164 lines
6.4 KiB
Python
164 lines
6.4 KiB
Python
import pickle
|
|
import os
|
|
from time import time
|
|
from collections import defaultdict
|
|
|
|
import numpy as np
|
|
from sklearn.linear_model import LogisticRegression
|
|
|
|
import quapy as qp
|
|
from KDEy.kdey_devel import KDEyMLauto, KDEyMLauto2
|
|
from quapy.method.aggregative import PACC, EMQ, KDEyML
|
|
from quapy.model_selection import GridSearchQ
|
|
from quapy.protocol import UPP
|
|
from pathlib import Path
|
|
|
|
from result_table.src.table import Table
|
|
|
|
SEED = 1
|
|
|
|
|
|
def newLR():
|
|
return LogisticRegression(max_iter=3000)
|
|
|
|
|
|
# typical hyperparameters explored for Logistic Regression
|
|
logreg_grid = {
|
|
'C': np.logspace(-4,4,9),
|
|
'class_weight': [None, 'balanced']
|
|
}
|
|
|
|
|
|
def wrap_hyper(classifier_hyper_grid: dict):
|
|
return {'classifier__' + k: v for k, v in classifier_hyper_grid.items()}
|
|
|
|
|
|
METHODS = [
|
|
# ('PACC', PACC(newLR()), wrap_hyper(logreg_grid)),
|
|
# ('EMQ', EMQ(newLR()), wrap_hyper(logreg_grid)),/
|
|
('KDEy', KDEyML(newLR()), {**wrap_hyper(logreg_grid), **{'bandwidth': np.logspace(-4, np.log10(0.2), 20)}}),
|
|
# ('KDEy-MLred', KDEyMLred(newLR()), {**wrap_hyper(logreg_grid), **{'bandwidth': np.logspace(-4, np.log10(0.2), 20)}}),
|
|
('KDEy-scott', KDEyML(newLR(), bandwidth='scott'), wrap_hyper(logreg_grid)),
|
|
('KDEy-silver', KDEyML(newLR(), bandwidth='silverman'), wrap_hyper(logreg_grid)),
|
|
('KDEy-NLL', KDEyMLauto2(newLR(), bandwidth='auto', target='likelihood', search='grid'), wrap_hyper(logreg_grid)),
|
|
('KDEy-NLL+', KDEyMLauto2(newLR(), bandwidth='auto', target='likelihood', search='optim'), wrap_hyper(logreg_grid)),
|
|
('KDEy-AE', KDEyMLauto2(newLR(), bandwidth='auto', target='mae', search='grid'), wrap_hyper(logreg_grid)),
|
|
('KDEy-AE+', KDEyMLauto2(newLR(), bandwidth='auto', target='mae', search='optim'), wrap_hyper(logreg_grid)),
|
|
('KDEy-RAE', KDEyMLauto2(newLR(), bandwidth='auto', target='mrae', search='grid'), wrap_hyper(logreg_grid)),
|
|
('KDEy-RAE+', KDEyMLauto2(newLR(), bandwidth='auto', target='mrae', search='optim'), wrap_hyper(logreg_grid)),
|
|
]
|
|
|
|
|
|
"""
|
|
TKDEyML era primero bandwidth (init 0.05) y luego prevalence (init uniform)
|
|
TKDEyML2 era primero prevalence (init uniform) y luego bandwidth (init 0.05)
|
|
TKDEyML3 era primero prevalence (init uniform) y luego bandwidth (init 0.1)
|
|
TKDEyML4 es como ML2 pero max 5 iteraciones por optimización
|
|
"""
|
|
TRANSDUCTIVE_METHODS = [
|
|
#('TKDEy-ML', KDEyMLauto(newLR()), None),
|
|
# ('TKDEy-both', KDEyMLauto(newLR(), optim='both'), None),
|
|
# ('TKDEy-bothfine', KDEyMLauto(newLR(), optim='both_fine'), None),
|
|
# ('TKDEy-two', KDEyMLauto(newLR(), optim='two_steps'), None),
|
|
# ('TKDEy-MLike', KDEyMLauto(newLR(), optim='max_likelihood'), None),
|
|
# ('TKDEy-MLike2', KDEyMLauto(newLR(), optim='max_likelihood2'), None),
|
|
#('TKDEy-ML3', KDEyMLauto(newLR()), None),
|
|
#('TKDEy-ML4', KDEyMLauto(newLR()), None),
|
|
]
|
|
|
|
def show_results(result_path, tables, tables_path='./tables/main.pdf'):
|
|
import pandas as pd
|
|
df = pd.read_csv(result_path + '.csv', sep='\t')
|
|
pd.set_option('display.max_columns', None)
|
|
pd.set_option('display.max_rows', None)
|
|
pd.set_option('display.width', 1000) # Ajustar el ancho máximo
|
|
pv = df.pivot_table(index='Dataset', columns="Method", values=["MAE"], margins=True)
|
|
print(pv)
|
|
pv = df.pivot_table(index='Dataset', columns="Method", values=["MRAE"], margins=True)
|
|
print(pv)
|
|
pv = df.pivot_table(index='Dataset', columns="Method", values=["KLD"], margins=True)
|
|
print(pv)
|
|
pv = df.pivot_table(index='Dataset', columns="Method", values=["TR-TIME"], margins=True)
|
|
print(pv)
|
|
pv = df.pivot_table(index='Dataset', columns="Method", values=["TE-TIME"], margins=True)
|
|
print(pv)
|
|
|
|
os.makedirs(Path(tables_path).parent, exist_ok=True)
|
|
tables= [table for table in tables.values()]
|
|
|
|
method_replace = {
|
|
'KDEy': 'KDEy(orig)',
|
|
'KDEy-scott': 'Scott',
|
|
'KDEy-silver': 'Silver',
|
|
'KDEy-NLL': 'NLL(grid)',
|
|
'KDEy-NLL+': 'NLL(search)',
|
|
'KDEy-AE': 'AE(grid)',
|
|
'KDEy-AE+': 'AE(search)',
|
|
'KDEy-RAE': 'RAE(grid)',
|
|
'KDEy-RAE+': 'RAE(search)',
|
|
}
|
|
|
|
Table.LatexPDF(tables_path, tables, method_replace=method_replace, verbose=True, clean=False)
|
|
|
|
|
|
def collect_results(method_name, tables):
|
|
|
|
print('Init method', method_name)
|
|
|
|
with open(global_result_path + '.csv', 'at') as csv:
|
|
for dataset in qp.datasets.UCI_MULTICLASS_DATASETS:
|
|
print('init', dataset)
|
|
|
|
# run_experiment(global_result_path, method_name, quantifier, param_grid, dataset)
|
|
local_result_path = os.path.join(Path(global_result_path).parent, method_name + '_' + dataset + '.dataframe')
|
|
|
|
if os.path.exists(local_result_path):
|
|
print(f'result file {local_result_path} already exist; skipping')
|
|
report = qp.util.load_report(local_result_path)
|
|
for metric, table in tables.items():
|
|
add_column = metric in ['tr_time', 'te_time']
|
|
if not add_column:
|
|
add_column = (metric=='mrae' and '-AE' not in method_name) or (metric=='mae' and '-RAE' not in method_name)
|
|
if add_column:
|
|
tables[metric].add(benchmark=dataset, method=method_name, v=report[metric])
|
|
# tables['mrae'].add(benchmark=dataset, method=method_name, v=report['mrae'])
|
|
|
|
else:
|
|
continue
|
|
|
|
means = report.mean(numeric_only=True)
|
|
csv.write(f'{method_name}\t{dataset}\t{means["mae"]:.5f}\t{means["mrae"]:.5f}\t{means["kld"]:.5f}\t{means["tr_time"]:.3f}\t{means["te_time"]:.3f}\n')
|
|
csv.flush()
|
|
|
|
|
|
|
|
if __name__ == '__main__':
|
|
|
|
qp.environ['SAMPLE_SIZE'] = 500
|
|
qp.environ['N_JOBS'] = -1
|
|
n_bags_val = 100
|
|
n_bags_test = 500
|
|
result_dir = f'results_quantification/ucimulti'
|
|
|
|
os.makedirs(result_dir, exist_ok=True)
|
|
|
|
tables = {
|
|
'mae': Table('inductive-mae'),
|
|
'mrae': Table('inductive-mrae'),
|
|
'tr_time': Table('inductive-tr-time'),
|
|
# 'te_time': Table('inductive-te-time'),
|
|
}
|
|
|
|
tables['tr_time'].format.show_std = False
|
|
# tables['te_time'].format.show_std = False
|
|
|
|
|
|
global_result_path = f'{result_dir}/allmethods'
|
|
with open(global_result_path + '.csv', 'wt') as csv:
|
|
csv.write(f'Method\tDataset\tMAE\tMRAE\tKLD\tTR-TIME\tTE-TIME\n')
|
|
|
|
for method_name, _, _ in METHODS + TRANSDUCTIVE_METHODS:
|
|
collect_results(method_name, tables)
|
|
|
|
show_results(global_result_path, tables)
|