2023-11-09 14:20:41 +01:00
|
|
|
import numpy as np
|
2021-04-29 16:07:39 +02:00
|
|
|
import pytest
|
|
|
|
from sklearn.linear_model import LogisticRegression
|
|
|
|
from sklearn.svm import LinearSVC
|
|
|
|
|
2024-01-25 14:33:41 +01:00
|
|
|
import method.aggregative
|
2021-04-29 16:07:39 +02:00
|
|
|
import quapy as qp
|
2023-11-09 14:20:41 +01:00
|
|
|
from quapy.model_selection import GridSearchQ
|
2023-02-08 19:06:53 +01:00
|
|
|
from quapy.method.base import BinaryQuantifier
|
2021-05-05 17:12:44 +02:00
|
|
|
from quapy.data import Dataset, LabelledCollection
|
2023-02-13 19:27:48 +01:00
|
|
|
from quapy.method import AGGREGATIVE_METHODS, NON_AGGREGATIVE_METHODS
|
2021-05-04 12:14:14 +02:00
|
|
|
from quapy.method.meta import Ensemble
|
2023-11-09 14:20:41 +01:00
|
|
|
from quapy.protocol import APP
|
2023-11-09 18:13:54 +01:00
|
|
|
from quapy.method.aggregative import DMy
|
2023-11-09 14:20:41 +01:00
|
|
|
from quapy.method.meta import MedianEstimator
|
2021-04-29 16:07:39 +02:00
|
|
|
|
2024-01-25 14:33:41 +01:00
|
|
|
# datasets = [pytest.param(qp.datasets.fetch_twitter('hcr', pickle=True), id='hcr'),
|
|
|
|
# pytest.param(qp.datasets.fetch_UCIDataset('ionosphere'), id='ionosphere')]
|
2021-04-29 16:07:39 +02:00
|
|
|
|
2023-02-13 19:27:48 +01:00
|
|
|
tinydatasets = [pytest.param(qp.datasets.fetch_twitter('hcr', pickle=True).reduce(), id='tiny_hcr'),
|
2024-02-07 18:31:34 +01:00
|
|
|
pytest.param(qp.datasets.fetch_UCIBinaryDataset('ionosphere').reduce(), id='tiny_ionosphere')]
|
2023-02-13 19:27:48 +01:00
|
|
|
|
2021-05-05 17:12:44 +02:00
|
|
|
learners = [LogisticRegression, LinearSVC]
|
2021-04-29 16:07:39 +02:00
|
|
|
|
|
|
|
|
2024-01-25 14:33:41 +01:00
|
|
|
@pytest.mark.parametrize('dataset', tinydatasets)
|
2023-02-13 19:27:48 +01:00
|
|
|
@pytest.mark.parametrize('aggregative_method', AGGREGATIVE_METHODS)
|
2021-04-29 16:07:39 +02:00
|
|
|
@pytest.mark.parametrize('learner', learners)
|
2021-05-05 17:12:44 +02:00
|
|
|
def test_aggregative_methods(dataset: Dataset, aggregative_method, learner):
|
2021-04-29 16:07:39 +02:00
|
|
|
model = aggregative_method(learner())
|
|
|
|
|
2023-02-08 19:06:53 +01:00
|
|
|
if isinstance(model, BinaryQuantifier) and not dataset.binary:
|
2021-05-10 10:26:51 +02:00
|
|
|
print(f'skipping the test of binary model {type(model)} on non-binary dataset {dataset}')
|
2021-05-04 12:14:14 +02:00
|
|
|
return
|
|
|
|
|
|
|
|
model.fit(dataset.training)
|
|
|
|
|
|
|
|
estim_prevalences = model.quantify(dataset.test.instances)
|
|
|
|
|
|
|
|
true_prevalences = dataset.test.prevalence()
|
|
|
|
error = qp.error.mae(true_prevalences, estim_prevalences)
|
|
|
|
|
2023-11-09 14:20:41 +01:00
|
|
|
assert type(error) == np.float64
|
2021-05-04 12:14:14 +02:00
|
|
|
|
|
|
|
|
2024-01-25 14:33:41 +01:00
|
|
|
@pytest.mark.parametrize('dataset', tinydatasets)
|
2021-05-04 12:14:14 +02:00
|
|
|
@pytest.mark.parametrize('non_aggregative_method', NON_AGGREGATIVE_METHODS)
|
2021-05-05 17:12:44 +02:00
|
|
|
def test_non_aggregative_methods(dataset: Dataset, non_aggregative_method):
|
2021-05-04 12:14:14 +02:00
|
|
|
model = non_aggregative_method()
|
|
|
|
|
2023-02-08 19:06:53 +01:00
|
|
|
if isinstance(model, BinaryQuantifier) and not dataset.binary:
|
2021-05-04 12:14:14 +02:00
|
|
|
print(f'skipping the test of binary model {model} on non-binary dataset {dataset}')
|
|
|
|
return
|
|
|
|
|
|
|
|
model.fit(dataset.training)
|
|
|
|
|
|
|
|
estim_prevalences = model.quantify(dataset.test.instances)
|
|
|
|
|
|
|
|
true_prevalences = dataset.test.prevalence()
|
|
|
|
error = qp.error.mae(true_prevalences, estim_prevalences)
|
|
|
|
|
2023-11-09 14:20:41 +01:00
|
|
|
assert type(error) == np.float64
|
2021-05-04 12:14:14 +02:00
|
|
|
|
|
|
|
|
2024-01-25 14:33:41 +01:00
|
|
|
@pytest.mark.parametrize('base_method', [method.aggregative.ACC, method.aggregative.PACC])
|
2023-02-13 19:27:48 +01:00
|
|
|
@pytest.mark.parametrize('learner', [LogisticRegression])
|
|
|
|
@pytest.mark.parametrize('dataset', tinydatasets)
|
2021-05-04 12:14:14 +02:00
|
|
|
@pytest.mark.parametrize('policy', Ensemble.VALID_POLICIES)
|
2021-05-05 17:12:44 +02:00
|
|
|
def test_ensemble_method(base_method, learner, dataset: Dataset, policy):
|
2024-01-29 09:43:29 +01:00
|
|
|
|
2023-02-13 19:27:48 +01:00
|
|
|
qp.environ['SAMPLE_SIZE'] = 20
|
2024-01-29 09:43:29 +01:00
|
|
|
|
2023-02-13 19:27:48 +01:00
|
|
|
base_quantifier=base_method(learner())
|
2024-01-29 09:43:29 +01:00
|
|
|
|
2023-02-13 19:27:48 +01:00
|
|
|
if not dataset.binary and policy=='ds':
|
|
|
|
print(f'skipping the test of binary policy ds on non-binary dataset {dataset}')
|
2021-05-04 12:14:14 +02:00
|
|
|
return
|
2024-01-29 09:43:29 +01:00
|
|
|
|
|
|
|
model = Ensemble(quantifier=base_quantifier, size=3, policy=policy, n_jobs=-1)
|
2021-05-04 12:14:14 +02:00
|
|
|
|
|
|
|
model.fit(dataset.training)
|
|
|
|
|
|
|
|
estim_prevalences = model.quantify(dataset.test.instances)
|
|
|
|
|
|
|
|
true_prevalences = dataset.test.prevalence()
|
|
|
|
error = qp.error.mae(true_prevalences, estim_prevalences)
|
|
|
|
|
2023-11-09 14:20:41 +01:00
|
|
|
assert type(error) == np.float64
|
2021-05-04 12:14:14 +02:00
|
|
|
|
|
|
|
|
|
|
|
def test_quanet_method():
|
2021-05-10 13:36:35 +02:00
|
|
|
try:
|
|
|
|
import quapy.classification.neural
|
|
|
|
except ModuleNotFoundError:
|
|
|
|
print('skipping QuaNet test due to missing torch package')
|
|
|
|
return
|
|
|
|
|
2023-02-13 19:27:48 +01:00
|
|
|
qp.environ['SAMPLE_SIZE'] = 100
|
|
|
|
|
|
|
|
# load the kindle dataset as text, and convert words to numerical indexes
|
2024-01-29 09:43:29 +01:00
|
|
|
dataset = qp.datasets.fetch_reviews('kindle', pickle=True).reduce(200, 200)
|
2021-05-04 12:14:14 +02:00
|
|
|
qp.data.preprocessing.index(dataset, min_df=5, inplace=True)
|
|
|
|
|
|
|
|
from quapy.classification.neural import CNNnet
|
2023-02-13 19:27:48 +01:00
|
|
|
cnn = CNNnet(dataset.vocabulary_size, dataset.n_classes)
|
2021-05-04 12:14:14 +02:00
|
|
|
|
|
|
|
from quapy.classification.neural import NeuralClassifierTrainer
|
|
|
|
learner = NeuralClassifierTrainer(cnn, device='cuda')
|
|
|
|
|
|
|
|
from quapy.method.meta import QuaNet
|
2023-02-13 19:27:48 +01:00
|
|
|
model = QuaNet(learner, device='cuda')
|
2021-05-04 12:14:14 +02:00
|
|
|
|
2023-02-08 19:06:53 +01:00
|
|
|
if isinstance(model, BinaryQuantifier) and not dataset.binary:
|
2021-05-04 12:14:14 +02:00
|
|
|
print(f'skipping the test of binary model {model} on non-binary dataset {dataset}')
|
2021-04-30 17:22:58 +02:00
|
|
|
return
|
|
|
|
|
2021-04-29 16:07:39 +02:00
|
|
|
model.fit(dataset.training)
|
|
|
|
|
|
|
|
estim_prevalences = model.quantify(dataset.test.instances)
|
|
|
|
|
|
|
|
true_prevalences = dataset.test.prevalence()
|
|
|
|
error = qp.error.mae(true_prevalences, estim_prevalences)
|
|
|
|
|
2023-11-09 14:20:41 +01:00
|
|
|
assert type(error) == np.float64
|
2021-05-05 17:12:44 +02:00
|
|
|
|
|
|
|
|
2023-02-13 19:27:48 +01:00
|
|
|
def test_str_label_names():
|
|
|
|
model = qp.method.aggregative.CC(LogisticRegression())
|
2021-05-10 10:26:51 +02:00
|
|
|
|
2021-05-05 17:12:44 +02:00
|
|
|
dataset = qp.datasets.fetch_reviews('imdb', pickle=True)
|
|
|
|
dataset = Dataset(dataset.training.sampling(1000, *dataset.training.prevalence()),
|
2023-02-13 19:27:48 +01:00
|
|
|
dataset.test.sampling(1000, 0.25, 0.75))
|
2021-05-05 17:12:44 +02:00
|
|
|
qp.data.preprocessing.text2tfidf(dataset, min_df=5, inplace=True)
|
|
|
|
|
2023-11-09 14:20:41 +01:00
|
|
|
np.random.seed(0)
|
2021-05-05 17:12:44 +02:00
|
|
|
model.fit(dataset.training)
|
|
|
|
|
|
|
|
int_estim_prevalences = model.quantify(dataset.test.instances)
|
|
|
|
true_prevalences = dataset.test.prevalence()
|
|
|
|
|
|
|
|
error = qp.error.mae(true_prevalences, int_estim_prevalences)
|
2023-11-09 14:20:41 +01:00
|
|
|
assert type(error) == np.float64
|
2021-05-05 17:12:44 +02:00
|
|
|
|
|
|
|
dataset_str = Dataset(LabelledCollection(dataset.training.instances,
|
|
|
|
['one' if label == 1 else 'zero' for label in dataset.training.labels]),
|
|
|
|
LabelledCollection(dataset.test.instances,
|
|
|
|
['one' if label == 1 else 'zero' for label in dataset.test.labels]))
|
2023-02-08 19:06:53 +01:00
|
|
|
assert all(dataset_str.training.classes_ == dataset_str.test.classes_), 'wrong indexation'
|
2023-11-09 14:20:41 +01:00
|
|
|
np.random.seed(0)
|
2021-05-05 17:12:44 +02:00
|
|
|
model.fit(dataset_str.training)
|
|
|
|
|
|
|
|
str_estim_prevalences = model.quantify(dataset_str.test.instances)
|
|
|
|
true_prevalences = dataset_str.test.prevalence()
|
|
|
|
|
|
|
|
error = qp.error.mae(true_prevalences, str_estim_prevalences)
|
2023-11-09 14:20:41 +01:00
|
|
|
assert type(error) == np.float64
|
2021-05-05 17:12:44 +02:00
|
|
|
|
|
|
|
print(true_prevalences)
|
|
|
|
print(int_estim_prevalences)
|
|
|
|
print(str_estim_prevalences)
|
|
|
|
|
2023-11-09 14:20:41 +01:00
|
|
|
np.testing.assert_almost_equal(int_estim_prevalences[1],
|
2021-05-05 17:12:44 +02:00
|
|
|
str_estim_prevalences[list(model.classes_).index('one')])
|
2023-11-09 14:20:41 +01:00
|
|
|
|
|
|
|
# helper
|
|
|
|
def __fit_test(quantifier, train, test):
|
|
|
|
quantifier.fit(train)
|
|
|
|
test_samples = APP(test)
|
|
|
|
true_prevs, estim_prevs = qp.evaluation.prediction(quantifier, test_samples)
|
|
|
|
return qp.error.mae(true_prevs, estim_prevs), estim_prevs
|
|
|
|
|
|
|
|
|
|
|
|
def test_median_meta():
|
|
|
|
"""
|
|
|
|
This test compares the performance of the MedianQuantifier with respect to computing the median of the predictions
|
|
|
|
of a differently parameterized quantifier. We use the DistributionMatching base quantifier and the median is
|
|
|
|
computed across different values of nbins
|
|
|
|
"""
|
|
|
|
|
|
|
|
qp.environ['SAMPLE_SIZE'] = 100
|
|
|
|
|
|
|
|
# grid of values
|
|
|
|
nbins_grid = list(range(2, 11))
|
|
|
|
|
|
|
|
dataset = 'kindle'
|
|
|
|
train, test = qp.datasets.fetch_reviews(dataset, tfidf=True, min_df=10).train_test
|
|
|
|
prevs = []
|
|
|
|
errors = []
|
|
|
|
for nbins in nbins_grid:
|
|
|
|
with qp.util.temp_seed(0):
|
2023-11-09 18:13:54 +01:00
|
|
|
q = DMy(LogisticRegression(), nbins=nbins)
|
2023-11-09 14:20:41 +01:00
|
|
|
mae, estim_prevs = __fit_test(q, train, test)
|
|
|
|
prevs.append(estim_prevs)
|
|
|
|
errors.append(mae)
|
|
|
|
print(f'{dataset} DistributionMatching(nbins={nbins}) got MAE {mae:.4f}')
|
|
|
|
prevs = np.asarray(prevs)
|
|
|
|
mae = np.mean(errors)
|
|
|
|
print(f'\tMAE={mae:.4f}')
|
|
|
|
|
2023-11-09 18:13:54 +01:00
|
|
|
q = DMy(LogisticRegression())
|
2023-11-09 14:20:41 +01:00
|
|
|
q = MedianEstimator(q, param_grid={'nbins': nbins_grid}, random_state=0, n_jobs=-1)
|
|
|
|
median_mae, prev = __fit_test(q, train, test)
|
|
|
|
print(f'\tMAE={median_mae:.4f}')
|
|
|
|
|
|
|
|
np.testing.assert_almost_equal(np.median(prevs, axis=0), prev)
|
|
|
|
assert median_mae < mae, 'the median-based quantifier provided a higher error...'
|
|
|
|
|
|
|
|
|
|
|
|
def test_median_meta_modsel():
|
|
|
|
"""
|
|
|
|
This test checks the median-meta quantifier with model selection
|
|
|
|
"""
|
|
|
|
|
|
|
|
qp.environ['SAMPLE_SIZE'] = 100
|
|
|
|
|
|
|
|
dataset = 'kindle'
|
|
|
|
train, test = qp.datasets.fetch_reviews(dataset, tfidf=True, min_df=10).train_test
|
|
|
|
train, val = train.split_stratified(random_state=0)
|
|
|
|
|
|
|
|
nbins_grid = [2, 4, 5, 10, 15]
|
|
|
|
|
2023-11-09 18:13:54 +01:00
|
|
|
q = DMy(LogisticRegression())
|
2023-11-09 14:20:41 +01:00
|
|
|
q = MedianEstimator(q, param_grid={'nbins': nbins_grid}, random_state=0, n_jobs=-1)
|
|
|
|
median_mae, _ = __fit_test(q, train, test)
|
|
|
|
print(f'\tMAE={median_mae:.4f}')
|
|
|
|
|
2023-11-09 18:13:54 +01:00
|
|
|
q = DMy(LogisticRegression())
|
2023-11-09 14:20:41 +01:00
|
|
|
lr_params = {'classifier__C': np.logspace(-1, 1, 3)}
|
|
|
|
q = MedianEstimator(q, param_grid={'nbins': nbins_grid}, random_state=0, n_jobs=-1)
|
|
|
|
q = GridSearchQ(q, param_grid=lr_params, protocol=APP(val), n_jobs=-1)
|
|
|
|
optimized_median_ave, _ = __fit_test(q, train, test)
|
|
|
|
print(f'\tMAE={optimized_median_ave:.4f}')
|
|
|
|
|
|
|
|
assert optimized_median_ave < median_mae, "the optimized method yielded worse performance..."
|