experiments for report

This commit is contained in:
Alejandro Moreo Fernandez 2025-07-08 14:34:43 +02:00
parent f063e4f5dc
commit ccae7746ce
5 changed files with 173 additions and 119 deletions

3
.gitmodules vendored Normal file
View File

@ -0,0 +1,3 @@
[submodule "result_path"]
path = result_path
url = gitea@gitea-s2i2s.isti.cnr.it:moreo/result_table.git

View File

@ -0,0 +1,22 @@
from dataclasses import dataclass
@dataclass
class DataConfig:
n_features: int
n_informative: int
n_redundant: int
n_clusters_per_class: int
flip_y:float
config_easy = DataConfig(n_features=2, n_informative=2, n_redundant=0, n_clusters_per_class=1, flip_y=0.0)
config_medium = DataConfig(n_features=10, n_informative=5, n_redundant=2, n_clusters_per_class=2, flip_y=0.01)
config_hard = DataConfig(n_features=50, n_informative=20, n_redundant=10, n_clusters_per_class=4, flip_y=0.05)
config_veryhard = DataConfig(n_features=100, n_informative=30, n_redundant=20, n_clusters_per_class=8, flip_y=0.05)
configs = [
config_easy,
config_medium,
config_hard,
config_veryhard
]

View File

@ -6,17 +6,20 @@ from quapy.data import LabelledCollection
from quapy.protocol import UniformPrevalenceProtocol from quapy.protocol import UniformPrevalenceProtocol
import quapy.functional as F import quapy.functional as F
import pandas as pd import pandas as pd
from commons import configs
random_state = 0 random_state = 0
n_features = 10
n_areas = 50 n_areas = 50
n_per_area = 1_000 n_per_area = 1_000
population_size = n_areas * n_per_area population_size = n_areas * n_per_area
n_experiments = 100 n_experiments = 100
n_survey = population_size//n_experiments n_survey = population_size//n_experiments
print(f'{n_features=}')
for config in configs:
print(f'{config.n_features=}')
print(f'{n_areas=}') print(f'{n_areas=}')
print(f'{n_per_area=}') print(f'{n_per_area=}')
print(f'{population_size=}') print(f'{population_size=}')
@ -25,14 +28,14 @@ print(f'{n_survey=}')
X, y = make_classification( X, y = make_classification(
n_samples=population_size * 100, n_samples=population_size * 100,
n_features=n_features, n_features=config.n_features,
n_informative=n_features//2, n_informative=config.n_informative,
n_redundant=2, n_redundant=config.n_redundant,
n_repeated=0, n_repeated=0,
n_classes=2, n_classes=2,
n_clusters_per_class=2, n_clusters_per_class=config.n_clusters_per_class,
weights=[0.5, 0.5], weights=[0.5, 0.5],
flip_y=0.01, flip_y=config.flip_y,
class_sep=1.0, class_sep=1.0,
hypercube=True, hypercube=True,
shift=0.0, shift=0.0,
@ -73,7 +76,7 @@ data_dic = {
'ID': idx, 'ID': idx,
'Y': data_y, 'Y': data_y,
} }
for feat_id in range(n_features): for feat_id in range(config.n_features):
data_dic[f'X_{feat_id}'] = data_X[:,feat_id] data_dic[f'X_{feat_id}'] = data_X[:,feat_id]
data_dic['area'] = data_area data_dic['area'] = data_area
@ -82,7 +85,7 @@ for experiment_id, experiment_selection in enumerate(experiment_selections):
df = pd.DataFrame(data_dic) df = pd.DataFrame(data_dic)
data_path = f'./data/data_nF{n_features}_nA{n_areas}_P{population_size}_nExp{n_experiments}.csv' data_path = f'./data/data_nF{config.n_features}_nA{n_areas}_P{population_size}_nExp{n_experiments}.csv'
os.makedirs(Path(data_path).parent, exist_ok=True) os.makedirs(Path(data_path).parent, exist_ok=True)
df.to_csv(data_path, index=0) df.to_csv(data_path, index=0)

View File

@ -3,15 +3,14 @@ from os.path import join
import numpy as np import numpy as np
import pandas as pd import pandas as pd
from sklearn.calibration import CalibratedClassifierCV
from sklearn.linear_model import LogisticRegression, LogisticRegressionCV from sklearn.linear_model import LogisticRegression, LogisticRegressionCV
from pathlib import Path from pathlib import Path
from quapy.data import LabelledCollection from quapy.data import LabelledCollection
from quapy.model_selection import GridSearchQ
from quapy.protocol import APP
from quapy.method.aggregative import PACC, PCC, EMQ, DMy, ACC, KDEyML, CC from quapy.method.aggregative import PACC, PCC, EMQ, DMy, ACC, KDEyML, CC
import quapy.functional as F
from tqdm import tqdm from tqdm import tqdm
from commons import configs
from src.new_table import LatexTable
pd.set_option('display.max_columns', None) pd.set_option('display.max_columns', None)
pd.set_option('display.width', 1000) pd.set_option('display.width', 1000)
@ -42,13 +41,17 @@ def methods():
yield 'PACC', PACC(classifier=LogisticRegression()) yield 'PACC', PACC(classifier=LogisticRegression())
yield 'EMQ', EMQ(classifier=LogisticRegression()) yield 'EMQ', EMQ(classifier=LogisticRegression())
yield 'KDEy', KDEyML(classifier=LogisticRegression(), bandwidth=0.05) yield 'KDEy', KDEyML(classifier=LogisticRegression(), bandwidth=0.05)
yield 'KDEy01', KDEyML(classifier=LogisticRegression()) # yield 'KDEy01', KDEyML(classifier=LogisticRegression())
data_path = './data/data_nF10_nA50_P50000_nExp100.csv' for config in configs:
config = Path(data_path).name.replace('.csv','') print(f'Running {config}')
result_dir = f'./results/{config}'
config_name = f'data_nF{config.n_features}_nA50_P50000_nExp100'
data_path = f'./data/{config_name}.csv'
result_dir = f'./results/{config_name}'
os.makedirs(result_dir, exist_ok=True) os.makedirs(result_dir, exist_ok=True)
X, y, A, numExperiments, df = load_data(data_path) X, y, A, numExperiments, df = load_data(data_path)
@ -58,8 +61,18 @@ n_areas = len(areas)
methods_results = [] methods_results = []
for q_name, quantifier in methods(): # load baseline result from UniPI
baseline_path = join(result_dir, 'Risultati_SAE.csv')
if os.path.exists(baseline_path):
unipi_baseline_df = pd.read_csv(baseline_path, index_col=0, sep=';')
unipi_baseline_df = unipi_baseline_df.rename(columns={'AE(SAE)': 'AE'})
unipi_baseline_name = "SAE"
methods_results.append(unipi_baseline_df)
else:
unipi_baseline_name = None
# run quantification methods
for q_name, quantifier in methods():
result_path = join(result_dir, f'{q_name}.csv') result_path = join(result_dir, f'{q_name}.csv')
if os.path.exists(result_path): if os.path.exists(result_path):
method_results = pd.read_csv(result_path, index_col=0) method_results = pd.read_csv(result_path, index_col=0)
@ -107,6 +120,18 @@ for q_name, quantifier in methods():
methods_results.append(method_results) methods_results.append(method_results)
methods_results = pd.concat(methods_results) methods_results = pd.concat(methods_results)
methods_results["area"] = methods_results["area"].astype(str).str.zfill(2)
latex_table = LatexTable.from_dataframe(methods_results, method='method', benchmark='area', value='AE')
latex_table.format.configuration.resizebox=True
methods_order = [m for m, _ in methods()]
if unipi_baseline_name is not None:
methods_order = [unipi_baseline_name] + methods_order
latex_table.reorder_methods(methods_order)
latex_table.latexPDF(pdf_path=join('./tables', f'{config_name}.pdf'), tabular_dir=f'tabular_{config_name}', landscape=False)
pv = methods_results.pivot_table( pv = methods_results.pivot_table(
index='area', index='area',
columns='method', columns='method',

1
result_path Submodule

@ -0,0 +1 @@
Subproject commit 816a4c675e2919ea0ec4dd2ba9bf0d518d53dc17