switching

This commit is contained in:
Alejandro Moreo Fernandez 2023-11-03 09:54:36 +01:00
parent 0f4008e18d
commit 50d0ed2e84
7 changed files with 99 additions and 135 deletions

View File

@ -8,7 +8,7 @@ from distribution_matching.method_dirichlety import DIRy
from sklearn.linear_model import LogisticRegression from sklearn.linear_model import LogisticRegression
from method_kdey_closed_efficient import KDEyclosed_efficient from method_kdey_closed_efficient import KDEyclosed_efficient
METHODS = ['ACC', 'PACC', 'HDy-OvA', 'DIR', 'DM-T', 'DM-HD', 'KDEy-DMhd3', 'DM-CS', 'KDEy-closed++', 'EMQ', 'KDEy-ML'] #['ACC', 'PACC', 'HDy-OvA', 'DIR', 'DM', 'KDEy-DMhd3', 'KDEy-closed++', 'EMQ', 'KDEy-ML'] #, 'KDEy-DMhd2'] #, 'KDEy-DMhd2', 'DM-HD'] 'KDEy-DMjs', 'KDEy-DM', 'KDEy-ML+', 'KDEy-DMhd3+', 'EMQ-C', METHODS = ['ACC', 'PACC', 'HDy-OvA', 'DM-T', 'DM-HD', 'KDEy-DMhd3', 'DM-CS', 'KDEy-closed++', 'DIR', 'EMQ', 'KDEy-ML'] #['ACC', 'PACC', 'HDy-OvA', 'DIR', 'DM', 'KDEy-DMhd3', 'KDEy-closed++', 'EMQ', 'KDEy-ML'] #, 'KDEy-DMhd2'] #, 'KDEy-DMhd2', 'DM-HD'] 'KDEy-DMjs', 'KDEy-DM', 'KDEy-ML+', 'KDEy-DMhd3+', 'EMQ-C',
BIN_METHODS = [x.replace('-OvA', '') for x in METHODS] BIN_METHODS = [x.replace('-OvA', '') for x in METHODS]

View File

@ -9,18 +9,42 @@ Plots results for MAE, MRAE, and KLD
The rest of hyperparameters were set to their default values The rest of hyperparameters were set to their default values
""" """
df_tweet = pd.read_csv('../results/tweet/sensibility/KDEy-ML.csv', sep='\t')
df_lequa = pd.read_csv('../results/lequa/sensibility/KDEy-ML.csv', sep='\t')
df = pd.concat([df_tweet, df_lequa])
for err in ['MAE', 'MRAE', 'KLD']:
piv = df.pivot_table(index='Bandwidth', columns='Dataset', values=err) log_mrae = True
g = sns.lineplot(data=piv, markers=True, dashes=False)
g.set(xlim=(0.01, 0.2)) for method, param, xlim, xticks in [
g.legend(loc="center left", bbox_to_anchor=(1, 0.5)) ('KDEy-ML', 'Bandwidth', (0.01, 0.2), np.linspace(0.01, 0.2, 20)),
g.set_ylabel(err) ('DM-HD', 'nbins', (2,32), list(range(2,10)) + list(range(10,34,2)))
g.set_xticks(np.linspace(0.01, 0.2, 20)) ]:
plt.xticks(rotation=90)
plt.grid() for dataset in ['tweet', 'lequa', 'uciml']:
plt.savefig(f'./sensibility_{err}.pdf', bbox_inches='tight')
plt.clf() if dataset == 'tweet':
df = pd.read_csv(f'../results/tweet/sensibility/{method}.csv', sep='\t')
ylim = (0.03, 0.21)
elif dataset == 'lequa':
df = pd.read_csv(f'../results/lequa/T1B/sensibility/{method}.csv', sep='\t')
ylim = (0.0125, 0.03)
elif dataset == 'uciml':
ylim = (0, 0.23)
df = pd.read_csv(f'../results/ucimulti/sensibility/{method}.csv', sep='\t')
for err in ['MAE']: #, 'MRAE']:
piv = df.pivot_table(index=param, columns='Dataset', values=err)
g = sns.lineplot(data=piv, markers=True, dashes=False)
g.set(xlim=xlim)
g.legend(loc="center left", bbox_to_anchor=(1, 0.5))
if log_mrae and err=='MRAE':
plt.yscale('log')
g.set_ylabel('log('+err+')')
else:
g.set_ylabel(err)
g.set_ylim(ylim)
g.set_xticks(xticks)
plt.xticks(rotation=90)
plt.grid()
plt.savefig(f'./sensibility_{method}_{dataset}_{err}.pdf', bbox_inches='tight')
plt.clf()

View File

@ -1,10 +1,8 @@
import ternary
import math import math
import numpy as np import numpy as np
from sklearn.linear_model import LogisticRegression from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import train_test_split from sklearn.model_selection import train_test_split
from sklearn.neighbors import KernelDensity from sklearn.neighbors import KernelDensity
import plotly.figure_factory as ff
from data import LabelledCollection from data import LabelledCollection
@ -15,6 +13,7 @@ scale = 200
# con plotly salen los contornos bien, pero es un poco un jaleo porque utiliza el navegador... # con plotly salen los contornos bien, pero es un poco un jaleo porque utiliza el navegador...
def plot_simplex_(ax, density, title='', fontsize=9, points=None): def plot_simplex_(ax, density, title='', fontsize=9, points=None):
import ternary
tax = ternary.TernaryAxesSubplot(ax=ax, scale=scale) tax = ternary.TernaryAxesSubplot(ax=ax, scale=scale)
tax.heatmapf(density, boundary=True, style="triangular", colorbar=False, cmap='viridis') #cmap='magma') tax.heatmapf(density, boundary=True, style="triangular", colorbar=False, cmap='viridis') #cmap='magma')
@ -34,6 +33,7 @@ def plot_simplex_(ax, density, title='', fontsize=9, points=None):
def plot_simplex(ax, coord, kde_scores, title='', fontsize=11, points=None, savepath=None): def plot_simplex(ax, coord, kde_scores, title='', fontsize=11, points=None, savepath=None):
import plotly.figure_factory as ff
tax = ff.create_ternary_contour(coord.T, kde_scores, pole_labels=['y=1', 'y=2', 'y=3'], tax = ff.create_ternary_contour(coord.T, kde_scores, pole_labels=['y=1', 'y=2', 'y=3'],
interp_mode='cartesian', interp_mode='cartesian',
@ -49,6 +49,8 @@ def plot_simplex(ax, coord, kde_scores, title='', fontsize=11, points=None, save
from mpl_toolkits.axes_grid1 import make_axes_locatable from mpl_toolkits.axes_grid1 import make_axes_locatable
def plot_3class_problem(post_c1, post_c2, post_c3, post_test, alpha, bandwidth): def plot_3class_problem(post_c1, post_c2, post_c3, post_test, alpha, bandwidth):
import ternary
post_c1 = np.flip(post_c1, axis=1) post_c1 = np.flip(post_c1, axis=1)
post_c2 = np.flip(post_c2, axis=1) post_c2 = np.flip(post_c2, axis=1)
post_c3 = np.flip(post_c3, axis=1) post_c3 = np.flip(post_c3, axis=1)

View File

@ -1,56 +0,0 @@
import numpy as np
from sklearn.linear_model import LogisticRegression
import os
import pandas as pd
import quapy as qp
from method_kdey import KDEy
SEED=1
def task(bandwidth):
print('job-init', dataset, bandwidth)
train, val_gen, test_gen = qp.datasets.fetch_lequa2022(dataset)
with qp.util.temp_seed(SEED):
quantifier = KDEy(LogisticRegression(), target='max_likelihood', val_split=10, bandwidth=bandwidth)
quantifier.fit(train)
report = qp.evaluation.evaluation_report(
quantifier, protocol=test_gen, error_metrics=['mae', 'mrae', 'kld'], verbose=True)
return report
if __name__ == '__main__':
qp.environ['SAMPLE_SIZE'] = qp.datasets.LEQUA2022_SAMPLE_SIZE['T1B']
qp.environ['N_JOBS'] = -1
result_dir = f'results_lequa_sensibility'
os.makedirs(result_dir, exist_ok=True)
method = 'KDEy-MLE'
global_result_path = f'{result_dir}/{method}'
if not os.path.exists(global_result_path+'.csv'):
with open(global_result_path+'.csv', 'wt') as csv:
csv.write(f'Method\tDataset\tBandwidth\tMAE\tMRAE\tKLD\n')
dataset = 'T1B'
bandwidths = np.linspace(0.01, 0.2, 20)
reports = qp.util.parallel(task, bandwidths, n_jobs=-1)
with open(global_result_path + '.csv', 'at') as csv:
for bandwidth, report in zip(bandwidths, reports):
means = report.mean()
local_result_path = global_result_path + '_' + dataset + f'_{bandwidth:.3f}'
report.to_csv(f'{local_result_path}.dataframe')
csv.write(f'{method}\tLeQua-T1B\t{bandwidth}\t{means["mae"]:.5f}\t{means["mrae"]:.5f}\t{means["kld"]:.5f}\n')
csv.flush()
df = pd.read_csv(global_result_path + '.csv', sep='\t')
pd.set_option('display.max_columns', None)
pd.set_option('display.max_rows', None)
pv = df.pivot_table(index='Dataset', columns="Method", values=["MAE", "MRAE"])
print(pv)

View File

@ -0,0 +1,56 @@
import numpy as np
from sklearn.linear_model import LogisticRegression
import os
import quapy as qp
from distribution_matching.commons import show_results
from method_kdey import KDEy
from quapy.method.aggregative import DistributionMatching
SEED=1
def task(val):
print('job-init', val)
train, val_gen, test_gen = qp.datasets.fetch_lequa2022('T1B')
with qp.util.temp_seed(SEED):
if method=='KDEy-ML':
quantifier = KDEy(LogisticRegression(), target='max_likelihood', val_split=10, bandwidth=val)
elif method == 'DM-HD':
quantifier = DistributionMatching(LogisticRegression(), val_split=10, nbins=val, divergence='HD')
quantifier.fit(train)
report = qp.evaluation.evaluation_report(
quantifier, protocol=test_gen, error_metrics=['mae', 'mrae', 'kld'], verbose=True)
return report
if __name__ == '__main__':
qp.environ['SAMPLE_SIZE'] = qp.datasets.LEQUA2022_SAMPLE_SIZE['T1B']
qp.environ['N_JOBS'] = -1
result_dir = f'results/lequa/T1B/sensibility'
os.makedirs(result_dir, exist_ok=True)
for method, param, grid in [
('KDEy-ML', 'Bandwidth', np.linspace(0.01, 0.2, 20)),
('DM-HD', 'nbins', list(range(2, 10)) + list(range(10, 34, 2)))
]:
global_result_path = f'{result_dir}/{method}'
if not os.path.exists(global_result_path+'.csv'):
with open(global_result_path+'.csv', 'wt') as csv:
csv.write(f'Method\tDataset\t{param}\tMAE\tMRAE\tKLD\n')
reports = qp.util.parallel(task, grid, n_jobs=-1)
with open(global_result_path + '.csv', 'at') as csv:
for val, report in zip(grid, reports):
means = report.mean()
local_result_path = global_result_path + '_T1B' + (f'_{val:.3f}' if isinstance(val, float) else f'{val}')
report.to_csv(f'{local_result_path}.dataframe')
csv.write(f'{method}\tLeQua-T1B\t{val}\t{means["mae"]:.5f}\t{means["mrae"]:.5f}\t{means["kld"]:.5f}\n')
csv.flush()
show_results(global_result_path)

View File

@ -1,59 +0,0 @@
import pickle
import numpy as np
from sklearn.linear_model import LogisticRegression
import os
import sys
import pandas as pd
import quapy as qp
from quapy.method.aggregative import EMQ, DistributionMatching, PACC, ACC, CC, PCC, HDy, OneVsAllAggregative
from method_kdey import KDEy
from method_dirichlety import DIRy
from quapy.model_selection import GridSearchQ
from quapy.protocol import UPP
SEED=1
if __name__ == '__main__':
qp.environ['SAMPLE_SIZE'] = 100
qp.environ['N_JOBS'] = -1
n_bags_val = 250
n_bags_test = 1000
result_dir = f'results_tweet_sensibility'
os.makedirs(result_dir, exist_ok=True)
method = 'KDEy-MLE'
global_result_path = f'{result_dir}/{method}'
if not os.path.exists(global_result_path+'.csv'):
with open(global_result_path+'.csv', 'wt') as csv:
csv.write(f'Method\tDataset\tBandwidth\tMAE\tMRAE\tKLD\n')
with open(global_result_path+'.csv', 'at') as csv:
for bandwidth in np.linspace(0.01, 0.2, 20):
for dataset in qp.datasets.TWITTER_SENTIMENT_DATASETS_TEST:
print('init', dataset)
local_result_path = global_result_path + '_' + dataset + f'_{bandwidth:.3f}'
with qp.util.temp_seed(SEED):
data = qp.datasets.fetch_twitter(dataset, min_df=3, pickle=True, for_model_selection=False)
quantifier = KDEy(LogisticRegression(), target='max_likelihood', val_split=10, bandwidth=bandwidth)
quantifier.fit(data.training)
protocol = UPP(data.test, repeats=n_bags_test)
report = qp.evaluation.evaluation_report(quantifier, protocol, error_metrics=['mae', 'mrae', 'kld'], verbose=True)
report.to_csv(f'{local_result_path}.dataframe')
means = report.mean()
csv.write(f'{method}\t{data.name}\t{bandwidth}\t{means["mae"]:.5f}\t{means["mrae"]:.5f}\t{means["kld"]:.5f}\n')
csv.flush()
df = pd.read_csv(global_result_path+'.csv', sep='\t')
pd.set_option('display.max_columns', None)
pd.set_option('display.max_rows', None)
pv = df.pivot_table(index='Dataset', columns="Method", values=["MAE", "MRAE"])
print(pv)

View File

@ -25,9 +25,6 @@ if __name__ == '__main__':
os.makedirs(result_dir, exist_ok=True) os.makedirs(result_dir, exist_ok=True)
for method in METHODS: for method in METHODS:
#if method == 'HDy-OvA': continue
#if method == 'DIR': continue
# if method != 'EMQ-C': continue
print('Init method', method) print('Init method', method)