2024-02-23 16:55:14 +01:00
|
|
|
from collections import defaultdict
|
2024-02-27 18:38:39 +01:00
|
|
|
from time import time
|
2024-02-23 18:19:00 +01:00
|
|
|
from utils import *
|
|
|
|
from models_multiclass import *
|
|
|
|
from quapy.protocol import UPP
|
2024-02-27 18:38:39 +01:00
|
|
|
from commons import *
|
2024-02-23 16:55:14 +01:00
|
|
|
|
|
|
|
|
2024-02-27 18:38:39 +01:00
|
|
|
qp.environ['SAMPLE_SIZE'] = 250
|
|
|
|
NUM_TEST = 100
|
2024-02-23 16:55:14 +01:00
|
|
|
|
2024-02-27 18:38:39 +01:00
|
|
|
for cls_name, h in gen_classifiers():
|
|
|
|
print(cls_name)
|
2024-02-23 16:55:14 +01:00
|
|
|
|
2024-02-27 18:38:39 +01:00
|
|
|
acc_trues = defaultdict(lambda : []) # acc_name : list of results
|
|
|
|
acc_predicted = defaultdict(lambda : defaultdict(lambda : [])) # acc_name : method_name : list of results
|
2024-02-23 16:55:14 +01:00
|
|
|
|
2024-02-27 18:38:39 +01:00
|
|
|
for dataset_name, (L, V, U) in gen_datasets():
|
|
|
|
print(dataset_name)
|
2024-02-23 16:55:14 +01:00
|
|
|
|
2024-02-27 18:38:39 +01:00
|
|
|
h.fit(*L.Xy)
|
2024-02-23 16:55:14 +01:00
|
|
|
|
2024-02-27 18:38:39 +01:00
|
|
|
# test generation protocol
|
|
|
|
test_prot = UPP(U, repeats=NUM_TEST, return_type='labelled_collection', random_state=0)
|
2024-02-23 16:55:14 +01:00
|
|
|
|
2024-02-27 18:38:39 +01:00
|
|
|
# compute some stats of the dataset
|
|
|
|
get_dataset_stats(f'dataset_stats/{dataset_name}.json', test_prot, L, V)
|
2024-02-23 16:55:14 +01:00
|
|
|
|
2024-02-27 18:38:39 +01:00
|
|
|
# precompute the actual accuracy values
|
|
|
|
dataset_true_accs = {}
|
|
|
|
for acc_name, acc_fn in gen_acc_measure():
|
|
|
|
dataset_true_accs[acc_name] = [true_acc(h, acc_fn, Ui) for Ui in test_prot()]
|
|
|
|
acc_trues[acc_name].extend(dataset_true_accs[acc_name])
|
2024-02-23 16:55:14 +01:00
|
|
|
|
|
|
|
|
2024-02-27 18:38:39 +01:00
|
|
|
for method_name, method in gen_CAP(h, vanilla_acc_fn):
|
|
|
|
print('PARCHEADO con vanilla accuracy')
|
|
|
|
# training
|
|
|
|
tinit = time()
|
|
|
|
method.fit(V)
|
|
|
|
t_train = time()-tinit
|
2024-02-23 16:55:14 +01:00
|
|
|
|
2024-02-27 18:38:39 +01:00
|
|
|
# predictions
|
|
|
|
dataset_method_accs, t_test_ave = get_method_predictions(method, test_prot, gen_acc_measure)
|
2024-02-23 16:55:14 +01:00
|
|
|
|
2024-02-27 18:38:39 +01:00
|
|
|
# accumulate results across datasets
|
|
|
|
for acc_name, _ in gen_acc_measure():
|
|
|
|
acc_predicted[acc_name][method_name].extend(dataset_method_accs[acc_name])
|
2024-02-23 16:55:14 +01:00
|
|
|
|
2024-02-27 18:38:39 +01:00
|
|
|
print(f'\t{method_name} took train={t_train:.2f}s test(ave)={t_test_ave:.2f}s')
|
|
|
|
result = {
|
|
|
|
't_train': t_train,
|
|
|
|
't_test_ave': t_test_ave,
|
|
|
|
'true_acc': dataset_true_accs[acc_name],
|
|
|
|
'estim_acc': dataset_method_accs[acc_name]
|
|
|
|
}
|
|
|
|
save_json_file(f"results/{cls_name}/{acc_name}/{dataset_name}/{method_name}.json", result)
|
2024-02-23 16:55:14 +01:00
|
|
|
|
2024-02-27 18:38:39 +01:00
|
|
|
for acc_name, _ in gen_acc_measure():
|
|
|
|
acc_predicted_ = list(acc_predicted[acc_name].items())
|
|
|
|
plot_diagonal(cls_name, acc_name, acc_trues[acc_name], acc_predicted_)
|
2024-02-23 16:55:14 +01:00
|
|
|
|
2024-02-27 18:38:39 +01:00
|
|
|
gen_tables()
|
2024-02-23 16:55:14 +01:00
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|