redo experiments with auto bandwidth selection
This commit is contained in:
parent
77df9112a3
commit
5bcaa8d1bb
|
@ -17,6 +17,8 @@ import quapy.functional as F
|
||||||
|
|
||||||
epsilon = 1e-10
|
epsilon = 1e-10
|
||||||
|
|
||||||
|
BANDWIDTH_RANGE = (0.001, 0.2)
|
||||||
|
|
||||||
class KDEyMLauto(KDEyML):
|
class KDEyMLauto(KDEyML):
|
||||||
def __init__(self, classifier: BaseEstimator = None, val_split=5, random_state=None, optim='two_steps'):
|
def __init__(self, classifier: BaseEstimator = None, val_split=5, random_state=None, optim='two_steps'):
|
||||||
self.classifier = qp._get_classifier(classifier)
|
self.classifier = qp._get_classifier(classifier)
|
||||||
|
@ -218,7 +220,7 @@ class KDEyMLauto(KDEyML):
|
||||||
|
|
||||||
def choose_bandwidth_maxlikelihood_search(self, tr_posteriors, tr_y, te_posteriors, classes):
|
def choose_bandwidth_maxlikelihood_search(self, tr_posteriors, tr_y, te_posteriors, classes):
|
||||||
n_classes = len(classes)
|
n_classes = len(classes)
|
||||||
init_prev = np.full(fill_value=1 / n_classes, shape=(n_classes,))
|
init_prev = F.uniform_prevalence(n_classes)
|
||||||
|
|
||||||
def neglikelihood_band(bandwidth):
|
def neglikelihood_band(bandwidth):
|
||||||
mix_densities = self.get_mixture_components(tr_posteriors, tr_y, classes, bandwidth[0])
|
mix_densities = self.get_mixture_components(tr_posteriors, tr_y, classes, bandwidth[0])
|
||||||
|
@ -268,7 +270,7 @@ def optim_minimize(loss: Callable, init_prev: np.ndarray, return_loss=False):
|
||||||
|
|
||||||
class KDEyMLauto2(KDEyML):
|
class KDEyMLauto2(KDEyML):
|
||||||
|
|
||||||
def __init__(self, classifier: BaseEstimator=None, val_split=5, bandwidth=0.1, random_state=None, reduction=100, max_reduced=500, target='likelihood'):
|
def __init__(self, classifier: BaseEstimator=None, val_split=5, bandwidth=0.1, random_state=None, reduction=100, max_reduced=500, target='likelihood', search='grid'):
|
||||||
"""
|
"""
|
||||||
reduction: number of examples per class for automatically setting the bandwidth
|
reduction: number of examples per class for automatically setting the bandwidth
|
||||||
"""
|
"""
|
||||||
|
@ -281,8 +283,10 @@ class KDEyMLauto2(KDEyML):
|
||||||
self.reduction = reduction
|
self.reduction = reduction
|
||||||
self.max_reduced = max_reduced
|
self.max_reduced = max_reduced
|
||||||
self.random_state = random_state
|
self.random_state = random_state
|
||||||
assert target in ['likelihood', 'likelihood+'] or target in qp.error.QUANTIFICATION_ERROR_NAMES, 'unknown target for auto'
|
assert target in ['likelihood'] or target in qp.error.QUANTIFICATION_ERROR_NAMES, 'unknown target for auto'
|
||||||
|
assert search in ['grid', 'optim'], 'unknown value for search'
|
||||||
self.target = target
|
self.target = target
|
||||||
|
self.search = search
|
||||||
|
|
||||||
def aggregation_fit(self, classif_predictions: LabelledCollection, data: LabelledCollection):
|
def aggregation_fit(self, classif_predictions: LabelledCollection, data: LabelledCollection):
|
||||||
if self.bandwidth == 'auto':
|
if self.bandwidth == 'auto':
|
||||||
|
@ -303,65 +307,42 @@ class KDEyMLauto2(KDEyML):
|
||||||
if len(train) > tr_length:
|
if len(train) > tr_length:
|
||||||
train = train.sampling(tr_length)
|
train = train.sampling(tr_length)
|
||||||
|
|
||||||
init_prev = np.full(fill_value=1 / n_classes, shape=(n_classes,))
|
init_prev = F.uniform_prevalence(n_classes=n_classes)
|
||||||
repeats = 25
|
repeats = 25
|
||||||
prot = UPP(val, sample_size=self.reduction, repeats=repeats, random_state=self.random_state)
|
prot = UPP(val, sample_size=self.reduction, repeats=repeats, random_state=self.random_state)
|
||||||
|
|
||||||
if self.target == 'likelihood+':
|
def eval_bandwidth(bandwidth):
|
||||||
|
mix_densities = self.get_mixture_components(*train.Xy, train.classes_, bandwidth)
|
||||||
|
loss_accum = 0
|
||||||
|
for (sample, prevtrue) in prot():
|
||||||
|
test_densities = [self.pdf(kde_i, sample) for kde_i in mix_densities]
|
||||||
|
|
||||||
def neg_loglikelihood_bandwidth(bandwidth):
|
def neg_loglikelihood_prev(prev):
|
||||||
mix_densities = self.get_mixture_components(*train.Xy, train.classes_, bandwidth)
|
test_mixture_likelihood = sum(prev_i * dens_i for prev_i, dens_i in zip(prev, test_densities))
|
||||||
|
test_loglikelihood = np.log(test_mixture_likelihood + epsilon)
|
||||||
|
nll = -np.sum(test_loglikelihood)
|
||||||
|
return nll
|
||||||
|
|
||||||
loss_accum = 0
|
if self.target == 'likelihood':
|
||||||
for (sample, prevtrue) in prot():
|
loss_fn = neg_loglikelihood_prev
|
||||||
test_densities = [self.pdf(kde_i, sample) for kde_i in mix_densities]
|
else:
|
||||||
|
loss_fn = lambda prev_hat: qp.error.from_name(self.target)(prev, prev_hat)
|
||||||
|
|
||||||
def neg_loglikelihood_prev(prev):
|
pred_prev, neglikelihood = optim_minimize(loss_fn, init_prev, return_loss=True)
|
||||||
test_mixture_likelihood = sum(prev_i * dens_i for prev_i, dens_i in zip(prev, test_densities))
|
loss_accum += neglikelihood
|
||||||
test_loglikelihood = np.log(test_mixture_likelihood + epsilon)
|
return loss_accum
|
||||||
nll = -np.sum(test_loglikelihood)
|
|
||||||
return nll
|
|
||||||
|
|
||||||
pred_prev, neglikelihood = optim_minimize(neg_loglikelihood_prev, init_prev, return_loss=True)
|
if self.search == 'optim':
|
||||||
# print(f'\t\tprev={F.strprev(pred_prev)} (true={F.strprev(prev)}) got {neglikelihood=}')
|
r = optimize.minimize_scalar(eval_bandwidth, bounds=(0.001, 0.2), options={'xatol': 0.005})
|
||||||
loss_accum += neglikelihood
|
|
||||||
return loss_accum
|
|
||||||
|
|
||||||
r = optimize.minimize_scalar(neg_loglikelihood_bandwidth, bounds=(0.00001, 0.2))
|
|
||||||
best_band = r.x
|
best_band = r.x
|
||||||
best_loss_value = r.fun
|
best_loss_value = r.fun
|
||||||
nit = r.nit
|
nit = r.nit
|
||||||
# assert r.success, 'Process did not converge!'
|
# assert r.success, 'Process did not converge!'
|
||||||
#found bandwidth=0.00994664 after nit=3 iterations loss_val=-212247.24305)
|
|
||||||
|
|
||||||
else:
|
elif self.search=='grid':
|
||||||
best_band = None
|
|
||||||
best_loss_value = None
|
|
||||||
init_prev = np.full(fill_value=1 / n_classes, shape=(n_classes,))
|
|
||||||
for bandwidth in np.logspace(-4, np.log10(0.2), 20):
|
|
||||||
mix_densities = self.get_mixture_components(*train.Xy, train.classes_, bandwidth)
|
|
||||||
|
|
||||||
loss_accum = 0
|
|
||||||
for (sample, prev) in tqdm(prot(), total=repeats):
|
|
||||||
test_densities = [self.pdf(kde_i, sample) for kde_i in mix_densities]
|
|
||||||
|
|
||||||
def neg_loglikelihood_prev_(prev):
|
|
||||||
test_mixture_likelihood = sum(prev_i * dens_i for prev_i, dens_i in zip(prev, test_densities))
|
|
||||||
test_loglikelihood = np.log(test_mixture_likelihood + epsilon)
|
|
||||||
return -np.sum(test_loglikelihood)
|
|
||||||
|
|
||||||
if self.target == 'likelihood':
|
|
||||||
loss_fn = neg_loglikelihood_prev_
|
|
||||||
else:
|
|
||||||
loss_fn = lambda prev_hat: qp.error.from_name(self.target)(prev, prev_hat)
|
|
||||||
|
|
||||||
pred_prev, loss_val = optim_minimize(loss_fn, init_prev, return_loss=True)
|
|
||||||
loss_accum += loss_val
|
|
||||||
|
|
||||||
if best_loss_value is None or loss_accum < best_loss_value:
|
|
||||||
best_loss_value = loss_accum
|
|
||||||
best_band = bandwidth
|
|
||||||
nit=20
|
nit=20
|
||||||
|
band_evals = [(band, eval_bandwidth(band)) for band in np.logspace(-4, np.log10(0.2), num=nit)]
|
||||||
|
best_band, best_loss_value = sorted(band_evals, key=lambda x:x[1])[0]
|
||||||
|
|
||||||
print(f'found bandwidth={best_band:.8f} after {nit=} iterations loss_val={best_loss_value:.5f})')
|
print(f'found bandwidth={best_band:.8f} after {nit=} iterations loss_val={best_loss_value:.5f})')
|
||||||
self.bandwidth_ = best_band
|
self.bandwidth_ = best_band
|
||||||
|
|
|
@ -22,7 +22,7 @@ def newLR():
|
||||||
|
|
||||||
# typical hyperparameters explored for Logistic Regression
|
# typical hyperparameters explored for Logistic Regression
|
||||||
logreg_grid = {
|
logreg_grid = {
|
||||||
'C': np.logspace(-3,3,7),
|
'C': np.logspace(-4,4,9),
|
||||||
'class_weight': [None, 'balanced']
|
'class_weight': [None, 'balanced']
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -34,14 +34,16 @@ def wrap_hyper(classifier_hyper_grid: dict):
|
||||||
METHODS = [
|
METHODS = [
|
||||||
('PACC', PACC(newLR()), wrap_hyper(logreg_grid)),
|
('PACC', PACC(newLR()), wrap_hyper(logreg_grid)),
|
||||||
('EMQ', EMQ(newLR()), wrap_hyper(logreg_grid)),
|
('EMQ', EMQ(newLR()), wrap_hyper(logreg_grid)),
|
||||||
('KDEy-ML', KDEyML(newLR()), {**wrap_hyper(logreg_grid), **{'bandwidth': np.logspace(-4, np.log10(0.2), 20)}}),
|
('KDEy', KDEyML(newLR()), {**wrap_hyper(logreg_grid), **{'bandwidth': np.logspace(-4, np.log10(0.2), 20)}}),
|
||||||
# ('KDEy-MLred', KDEyMLred(newLR()), {**wrap_hyper(logreg_grid), **{'bandwidth': np.logspace(-4, np.log10(0.2), 20)}}),
|
# ('KDEy-MLred', KDEyMLred(newLR()), {**wrap_hyper(logreg_grid), **{'bandwidth': np.logspace(-4, np.log10(0.2), 20)}}),
|
||||||
('KDEy-ML-scott', KDEyML(newLR(), bandwidth='scott'), wrap_hyper(logreg_grid)),
|
('KDEy-scott', KDEyML(newLR(), bandwidth='scott'), wrap_hyper(logreg_grid)),
|
||||||
('KDEy-ML-silver', KDEyML(newLR(), bandwidth='silverman'), wrap_hyper(logreg_grid)),
|
('KDEy-silver', KDEyML(newLR(), bandwidth='silverman'), wrap_hyper(logreg_grid)),
|
||||||
('KDEy-ML-autoLike', KDEyMLauto2(newLR(), bandwidth='auto', target='likelihood'), wrap_hyper(logreg_grid)),
|
('KDEy-NLL', KDEyMLauto2(newLR(), bandwidth='auto', target='likelihood', search='grid'), wrap_hyper(logreg_grid)),
|
||||||
('KDEy-ML-autoLike+', KDEyMLauto2(newLR(), bandwidth='auto', target='likelihood+'), wrap_hyper(logreg_grid)),
|
('KDEy-NLL+', KDEyMLauto2(newLR(), bandwidth='auto', target='likelihood', search='optim'), wrap_hyper(logreg_grid)),
|
||||||
('KDEy-ML-autoAE', KDEyMLauto2(newLR(), bandwidth='auto', target='mae'), wrap_hyper(logreg_grid)),
|
('KDEy-AE', KDEyMLauto2(newLR(), bandwidth='auto', target='mae', search='grid'), wrap_hyper(logreg_grid)),
|
||||||
('KDEy-ML-autoRAE', KDEyMLauto2(newLR(), bandwidth='auto', target='mrae'), wrap_hyper(logreg_grid)),
|
('KDEy-AE+', KDEyMLauto2(newLR(), bandwidth='auto', target='mae', search='optim'), wrap_hyper(logreg_grid)),
|
||||||
|
('KDEy-RAE', KDEyMLauto2(newLR(), bandwidth='auto', target='mrae', search='grid'), wrap_hyper(logreg_grid)),
|
||||||
|
('KDEy-RAE', KDEyMLauto2(newLR(), bandwidth='auto', target='mrae', search='optim'), wrap_hyper(logreg_grid)),
|
||||||
]
|
]
|
||||||
|
|
||||||
|
|
||||||
|
@ -80,12 +82,80 @@ def show_results(result_path):
|
||||||
print(pv)
|
print(pv)
|
||||||
|
|
||||||
|
|
||||||
|
def run_experiment(method_name, quantifier, param_grid):
|
||||||
|
|
||||||
|
print('Init method', method_name)
|
||||||
|
|
||||||
|
with open(global_result_path + '.csv', 'at') as csv:
|
||||||
|
for dataset in qp.datasets.UCI_MULTICLASS_DATASETS:
|
||||||
|
print('init', dataset)
|
||||||
|
|
||||||
|
# run_experiment(global_result_path, method_name, quantifier, param_grid, dataset)
|
||||||
|
local_result_path = os.path.join(Path(global_result_path).parent, method_name + '_' + dataset + '.dataframe')
|
||||||
|
|
||||||
|
if os.path.exists(local_result_path):
|
||||||
|
print(f'result file {local_result_path} already exist; skipping')
|
||||||
|
report = qp.util.load_report(local_result_path)
|
||||||
|
|
||||||
|
else:
|
||||||
|
with qp.util.temp_seed(SEED):
|
||||||
|
|
||||||
|
data = qp.datasets.fetch_UCIMulticlassDataset(dataset, verbose=True)
|
||||||
|
train, test = data.train_test
|
||||||
|
|
||||||
|
transductive_names = [name for (name, *_) in TRANSDUCTIVE_METHODS]
|
||||||
|
|
||||||
|
if method_name not in transductive_names:
|
||||||
|
if len(param_grid) == 0:
|
||||||
|
t_init = time()
|
||||||
|
quantifier.fit(train)
|
||||||
|
train_time = time() - t_init
|
||||||
|
else:
|
||||||
|
# model selection (train)
|
||||||
|
train, val = train.split_stratified(random_state=SEED)
|
||||||
|
protocol = UPP(val, repeats=n_bags_val)
|
||||||
|
modsel = GridSearchQ(
|
||||||
|
quantifier, param_grid, protocol, refit=True, n_jobs=-1, verbose=1, error='mae'
|
||||||
|
)
|
||||||
|
t_init = time()
|
||||||
|
try:
|
||||||
|
modsel.fit(train)
|
||||||
|
print(f'best params {modsel.best_params_}')
|
||||||
|
print(f'best score {modsel.best_score_}')
|
||||||
|
quantifier = modsel.best_model()
|
||||||
|
except:
|
||||||
|
print('something went wrong... trying to fit the default model')
|
||||||
|
quantifier.fit(train)
|
||||||
|
train_time = time() - t_init
|
||||||
|
else:
|
||||||
|
# transductive
|
||||||
|
t_init = time()
|
||||||
|
quantifier.fit(train) # <-- nothing actually (proyects the X into posteriors only)
|
||||||
|
train_time = time() - t_init
|
||||||
|
|
||||||
|
# test
|
||||||
|
t_init = time()
|
||||||
|
protocol = UPP(test, repeats=n_bags_test)
|
||||||
|
report = qp.evaluation.evaluation_report(
|
||||||
|
quantifier, protocol, error_metrics=['mae', 'mrae', 'kld'], verbose=True
|
||||||
|
)
|
||||||
|
test_time = time() - t_init
|
||||||
|
report['tr_time'] = train_time
|
||||||
|
report['te_time'] = test_time
|
||||||
|
report.to_csv(local_result_path)
|
||||||
|
|
||||||
|
means = report.mean(numeric_only=True)
|
||||||
|
csv.write(f'{method_name}\t{dataset}\t{means["mae"]:.5f}\t{means["mrae"]:.5f}\t{means["kld"]:.5f}\t{means["tr_time"]:.3f}\t{means["te_time"]:.3f}\n')
|
||||||
|
csv.flush()
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
if __name__ == '__main__':
|
||||||
|
|
||||||
qp.environ['SAMPLE_SIZE'] = 500
|
qp.environ['SAMPLE_SIZE'] = 500
|
||||||
qp.environ['N_JOBS'] = -1
|
qp.environ['N_JOBS'] = -1
|
||||||
n_bags_val = 25
|
n_bags_val = 100
|
||||||
n_bags_test = 100
|
n_bags_test = 500
|
||||||
result_dir = f'results_quantification/ucimulti'
|
result_dir = f'results_quantification/ucimulti'
|
||||||
|
|
||||||
os.makedirs(result_dir, exist_ok=True)
|
os.makedirs(result_dir, exist_ok=True)
|
||||||
|
@ -95,69 +165,6 @@ if __name__ == '__main__':
|
||||||
csv.write(f'Method\tDataset\tMAE\tMRAE\tKLD\tTR-TIME\tTE-TIME\n')
|
csv.write(f'Method\tDataset\tMAE\tMRAE\tKLD\tTR-TIME\tTE-TIME\n')
|
||||||
|
|
||||||
for method_name, quantifier, param_grid in METHODS + TRANSDUCTIVE_METHODS:
|
for method_name, quantifier, param_grid in METHODS + TRANSDUCTIVE_METHODS:
|
||||||
|
run_experiment(method_name, quantifier, param_grid)
|
||||||
print('Init method', method_name)
|
|
||||||
|
|
||||||
with open(global_result_path + '.csv', 'at') as csv:
|
|
||||||
for dataset in qp.datasets.UCI_MULTICLASS_DATASETS:
|
|
||||||
print('init', dataset)
|
|
||||||
|
|
||||||
# run_experiment(global_result_path, method_name, quantifier, param_grid, dataset)
|
|
||||||
local_result_path = os.path.join(Path(global_result_path).parent, method_name + '_' + dataset + '.dataframe')
|
|
||||||
|
|
||||||
if os.path.exists(local_result_path):
|
|
||||||
print(f'result file {local_result_path} already exist; skipping')
|
|
||||||
report = qp.util.load_report(local_result_path)
|
|
||||||
|
|
||||||
else:
|
|
||||||
with qp.util.temp_seed(SEED):
|
|
||||||
|
|
||||||
data = qp.datasets.fetch_UCIMulticlassDataset(dataset, verbose=True)
|
|
||||||
train, test = data.train_test
|
|
||||||
|
|
||||||
transductive_names = [name for (name, *_) in TRANSDUCTIVE_METHODS]
|
|
||||||
|
|
||||||
if method_name not in transductive_names:
|
|
||||||
if len(param_grid) == 0:
|
|
||||||
t_init = time()
|
|
||||||
quantifier.fit(train)
|
|
||||||
train_time = time() - t_init
|
|
||||||
else:
|
|
||||||
# model selection (train)
|
|
||||||
train, val = train.split_stratified(random_state=SEED)
|
|
||||||
protocol = UPP(val, repeats=n_bags_val)
|
|
||||||
modsel = GridSearchQ(
|
|
||||||
quantifier, param_grid, protocol, refit=True, n_jobs=-1, verbose=1, error='mae'
|
|
||||||
)
|
|
||||||
t_init = time()
|
|
||||||
try:
|
|
||||||
modsel.fit(train)
|
|
||||||
print(f'best params {modsel.best_params_}')
|
|
||||||
print(f'best score {modsel.best_score_}')
|
|
||||||
quantifier = modsel.best_model()
|
|
||||||
except:
|
|
||||||
print('something went wrong... trying to fit the default model')
|
|
||||||
quantifier.fit(train)
|
|
||||||
train_time = time() - t_init
|
|
||||||
else:
|
|
||||||
# transductive
|
|
||||||
t_init = time()
|
|
||||||
quantifier.fit(train) # <-- nothing actually (proyects the X into posteriors only)
|
|
||||||
train_time = time() - t_init
|
|
||||||
|
|
||||||
# test
|
|
||||||
t_init = time()
|
|
||||||
protocol = UPP(test, repeats=n_bags_test)
|
|
||||||
report = qp.evaluation.evaluation_report(
|
|
||||||
quantifier, protocol, error_metrics=['mae', 'mrae', 'kld'], verbose=True
|
|
||||||
)
|
|
||||||
test_time = time() - t_init
|
|
||||||
report['tr_time'] = train_time
|
|
||||||
report['te_time'] = test_time
|
|
||||||
report.to_csv(local_result_path)
|
|
||||||
|
|
||||||
means = report.mean(numeric_only=True)
|
|
||||||
csv.write(f'{method_name}\t{dataset}\t{means["mae"]:.5f}\t{means["mrae"]:.5f}\t{means["kld"]:.5f}\t{means["tr_time"]:.3f}\t{means["te_time"]:.3f}\n')
|
|
||||||
csv.flush()
|
|
||||||
|
|
||||||
show_results(global_result_path)
|
show_results(global_result_path)
|
|
@ -182,8 +182,10 @@ for i, dataset in enumerate(tqdm(DATASETS, desc='processing datasets', total=len
|
||||||
if show_mse:
|
if show_mse:
|
||||||
measurements.append(MSE_error_te)
|
measurements.append(MSE_error_te)
|
||||||
measurement_names.append('MSE')
|
measurement_names.append('MSE')
|
||||||
measurements.append(LIKE_value_te)
|
measurements.append(normalize_metric(LIKE_value_te))
|
||||||
measurement_names.append('NLL')
|
measurements.append(normalize_metric(LIKE_value_tr))
|
||||||
|
measurement_names.append('NLL(te)')
|
||||||
|
measurement_names.append('NLL(tr)')
|
||||||
|
|
||||||
if normalize:
|
if normalize:
|
||||||
measurements = [normalize_metric(m) for m in measurements]
|
measurements = [normalize_metric(m) for m in measurements]
|
||||||
|
@ -192,12 +194,12 @@ for i, dataset in enumerate(tqdm(DATASETS, desc='processing datasets', total=len
|
||||||
|
|
||||||
# Train-Test measurements
|
# Train-Test measurements
|
||||||
# ----------------------------------------------------------------------------------------------------
|
# ----------------------------------------------------------------------------------------------------
|
||||||
measurements = []
|
# measurements = []
|
||||||
measurement_names = []
|
# measurement_names = []
|
||||||
measurements.append(normalize_metric(LIKE_value_te))
|
# measurements.append(normalize_metric(LIKE_value_te))
|
||||||
measurements.append(normalize_metric(LIKE_value_tr))
|
# measurements.append(normalize_metric(LIKE_value_tr))
|
||||||
measurement_names.append('NLL(te)')
|
# measurement_names.append('NLL(te)')
|
||||||
measurement_names.append('NLL(tr)')
|
# measurement_names.append('NLL(tr)')
|
||||||
plot(xaxis, measurements, measurement_names, suffix='AVEtr')
|
plot(xaxis, measurements, measurement_names, suffix='AVEtr')
|
||||||
|
|
||||||
|
|
||||||
|
|
Loading…
Reference in New Issue