Compare commits
20 Commits
5bcaa8d1bb
...
9aecdad66f
Author | SHA1 | Date |
---|---|---|
Alejandro Moreo Fernandez | 9aecdad66f | |
Alejandro Moreo Fernandez | cdf0200430 | |
Alejandro Moreo Fernandez | 3686e820fe | |
Alejandro Moreo Fernandez | 14ff3c9884 | |
Alejandro Moreo Fernandez | 641228bf62 | |
Alejandro Moreo Fernandez | 04c1f286ce | |
Alejandro Moreo Fernandez | a271fe1231 | |
Mirko Bunse | 5e2fc07fc5 | |
Mirko Bunse | 73755b73e8 | |
Mirko Bunse | db8a870495 | |
Alejandro Moreo Fernandez | b485205c7c | |
Mirko Bunse | 9be729386a | |
Alejandro Moreo Fernandez | ffcfd64957 | |
Alejandro Moreo Fernandez | 1f1757f0ee | |
Alejandro Moreo Fernandez | cea96e87c6 | |
Alejandro Moreo Fernandez | 584a4d07d4 | |
Mirko Bunse | 3895cba610 | |
Mirko Bunse | de3f8fd300 | |
Mirko Bunse | 2311bb6649 | |
Alejandro Moreo Fernandez | 55c62a9dd2 |
|
@ -20,15 +20,16 @@ jobs:
|
||||||
env:
|
env:
|
||||||
QUAPY_TESTS_OMIT_LARGE_DATASETS: True
|
QUAPY_TESTS_OMIT_LARGE_DATASETS: True
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v3
|
- uses: actions/checkout@v4
|
||||||
- name: Set up Python ${{ matrix.python-version }}
|
- name: Set up Python ${{ matrix.python-version }}
|
||||||
uses: actions/setup-python@v4
|
uses: actions/setup-python@v5
|
||||||
with:
|
with:
|
||||||
python-version: ${{ matrix.python-version }}
|
python-version: ${{ matrix.python-version }}
|
||||||
- name: Install dependencies
|
- name: Install dependencies
|
||||||
run: |
|
run: |
|
||||||
python -m pip install --upgrade pip setuptools wheel
|
python -m pip install --upgrade pip setuptools wheel
|
||||||
python -m pip install -e .[bayes,composable,tests]
|
python -m pip install "qunfold @ git+https://github.com/mirkobunse/qunfold@v0.1.4"
|
||||||
|
python -m pip install -e .[bayes,tests]
|
||||||
- name: Test with unittest
|
- name: Test with unittest
|
||||||
run: python -m unittest
|
run: python -m unittest
|
||||||
|
|
||||||
|
@ -38,15 +39,18 @@ jobs:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
if: github.ref == 'refs/heads/master'
|
if: github.ref == 'refs/heads/master'
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v1
|
- uses: actions/checkout@v4
|
||||||
- name: Build documentation
|
- name: Set up Python
|
||||||
uses: ammaraskar/sphinx-action@master
|
uses: actions/setup-python@v5
|
||||||
with:
|
with:
|
||||||
pre-build-command: |
|
python-version: 3.11
|
||||||
apt-get --allow-releaseinfo-change update -y && apt-get install -y git && git --version
|
- name: Install dependencies
|
||||||
|
run: |
|
||||||
python -m pip install --upgrade pip setuptools wheel "jax[cpu]"
|
python -m pip install --upgrade pip setuptools wheel "jax[cpu]"
|
||||||
python -m pip install -e .[composable,neural,docs]
|
python -m pip install "qunfold @ git+https://github.com/mirkobunse/qunfold@v0.1.4"
|
||||||
docs-folder: "docs/"
|
python -m pip install -e .[neural,docs]
|
||||||
|
- name: Build documentation
|
||||||
|
run: sphinx-build -M html docs/source docs/build
|
||||||
- name: Publish documentation
|
- name: Publish documentation
|
||||||
run: |
|
run: |
|
||||||
git clone ${{ github.server_url }}/${{ github.repository }}.git --branch gh-pages --single-branch __gh-pages/
|
git clone ${{ github.server_url }}/${{ github.repository }}.git --branch gh-pages --single-branch __gh-pages/
|
||||||
|
|
|
@ -1,23 +0,0 @@
|
||||||
name: Pylint
|
|
||||||
|
|
||||||
on: [push]
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
build:
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
strategy:
|
|
||||||
matrix:
|
|
||||||
python-version: ["3.8", "3.9", "3.10"]
|
|
||||||
steps:
|
|
||||||
- uses: actions/checkout@v3
|
|
||||||
- name: Set up Python ${{ matrix.python-version }}
|
|
||||||
uses: actions/setup-python@v3
|
|
||||||
with:
|
|
||||||
python-version: ${{ matrix.python-version }}
|
|
||||||
- name: Install dependencies
|
|
||||||
run: |
|
|
||||||
python -m pip install --upgrade pip
|
|
||||||
pip install pylint
|
|
||||||
- name: Analysing the code with pylint
|
|
||||||
run: |
|
|
||||||
pylint $(git ls-files '*.py')
|
|
|
@ -326,7 +326,7 @@ class KDEyMLauto2(KDEyML):
|
||||||
if self.target == 'likelihood':
|
if self.target == 'likelihood':
|
||||||
loss_fn = neg_loglikelihood_prev
|
loss_fn = neg_loglikelihood_prev
|
||||||
else:
|
else:
|
||||||
loss_fn = lambda prev_hat: qp.error.from_name(self.target)(prev, prev_hat)
|
loss_fn = lambda prev_hat: qp.error.from_name(self.target)(prevtrue, prev_hat)
|
||||||
|
|
||||||
pred_prev, neglikelihood = optim_minimize(loss_fn, init_prev, return_loss=True)
|
pred_prev, neglikelihood = optim_minimize(loss_fn, init_prev, return_loss=True)
|
||||||
loss_accum += neglikelihood
|
loss_accum += neglikelihood
|
||||||
|
|
|
@ -43,7 +43,7 @@ METHODS = [
|
||||||
('KDEy-AE', KDEyMLauto2(newLR(), bandwidth='auto', target='mae', search='grid'), wrap_hyper(logreg_grid)),
|
('KDEy-AE', KDEyMLauto2(newLR(), bandwidth='auto', target='mae', search='grid'), wrap_hyper(logreg_grid)),
|
||||||
('KDEy-AE+', KDEyMLauto2(newLR(), bandwidth='auto', target='mae', search='optim'), wrap_hyper(logreg_grid)),
|
('KDEy-AE+', KDEyMLauto2(newLR(), bandwidth='auto', target='mae', search='optim'), wrap_hyper(logreg_grid)),
|
||||||
('KDEy-RAE', KDEyMLauto2(newLR(), bandwidth='auto', target='mrae', search='grid'), wrap_hyper(logreg_grid)),
|
('KDEy-RAE', KDEyMLauto2(newLR(), bandwidth='auto', target='mrae', search='grid'), wrap_hyper(logreg_grid)),
|
||||||
('KDEy-RAE', KDEyMLauto2(newLR(), bandwidth='auto', target='mrae', search='optim'), wrap_hyper(logreg_grid)),
|
('KDEy-RAE+', KDEyMLauto2(newLR(), bandwidth='auto', target='mrae', search='optim'), wrap_hyper(logreg_grid)),
|
||||||
]
|
]
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -28,7 +28,7 @@ def plot(xaxis, metrics_measurements, metrics_names, suffix):
|
||||||
fig, ax1 = plt.subplots(figsize=(8, 6))
|
fig, ax1 = plt.subplots(figsize=(8, 6))
|
||||||
|
|
||||||
def add_plot(ax, mean_error, std_error, name, color, marker):
|
def add_plot(ax, mean_error, std_error, name, color, marker):
|
||||||
ax.plot(xaxis, mean_error, label=name, marker=marker, color=color)
|
ax.plot(xaxis, mean_error, label=name, marker=marker, color=color, markersize=3)
|
||||||
if std_error is not None:
|
if std_error is not None:
|
||||||
ax.fill_between(xaxis, mean_error - std_error, mean_error + std_error, color=color, alpha=0.2)
|
ax.fill_between(xaxis, mean_error - std_error, mean_error + std_error, color=color, alpha=0.2)
|
||||||
|
|
||||||
|
@ -74,6 +74,56 @@ def plot(xaxis, metrics_measurements, metrics_names, suffix):
|
||||||
plt.close()
|
plt.close()
|
||||||
|
|
||||||
|
|
||||||
|
def plot_stack(xaxis, metrics_measurements, metrics_names, suffix):
|
||||||
|
|
||||||
|
# Crear la figura y los ejes (4 bloques verticales)
|
||||||
|
fig, axs = plt.subplots(4, 1, figsize=(8, 12))
|
||||||
|
|
||||||
|
x = xaxis
|
||||||
|
|
||||||
|
indexes = np.arange(len(metrics_measurements))
|
||||||
|
axs_idx = 0
|
||||||
|
# colors = ['b', 'g', 'r', 'c', 'purple']
|
||||||
|
for m_te, m_tr in zip(indexes[:-1:2], indexes[1::2]):
|
||||||
|
metric_te, metric_tr = metrics_measurements[m_te], metrics_measurements[m_tr]
|
||||||
|
metric_te_name, metric_tr_name = metrics_names[m_te], metrics_names[m_tr]
|
||||||
|
|
||||||
|
metric_mean_tr = np.mean(metric_tr, axis=0)
|
||||||
|
metric_std_tr = np.std(metric_tr, axis=0)
|
||||||
|
metric_mean_te = np.mean(metric_te, axis=0)
|
||||||
|
metric_std_te = np.std(metric_te, axis=0)
|
||||||
|
|
||||||
|
axs[axs_idx].plot(xaxis, metric_mean_tr, label=metric_tr_name, marker='o', color='r', markersize=3)
|
||||||
|
axs[axs_idx].fill_between(xaxis, metric_mean_tr - metric_std_tr, metric_mean_tr + metric_std_tr, color='r', alpha=0.2)
|
||||||
|
minx = np.argmin(metric_mean_tr)
|
||||||
|
axs[axs_idx].axvline(xaxis[minx], color='r', linestyle='--', linewidth=1)
|
||||||
|
|
||||||
|
axs[axs_idx].plot(xaxis, metric_mean_te, label=metric_te_name, marker='o', color='b', markersize=3)
|
||||||
|
axs[axs_idx].fill_between(xaxis, metric_mean_te - metric_std_te, metric_mean_te + metric_std_te, color='b', alpha=0.2)
|
||||||
|
minx = np.argmin(metric_mean_te)
|
||||||
|
axs[axs_idx].axvline(xaxis[minx], color='b', linestyle='--', linewidth=1)
|
||||||
|
|
||||||
|
# axs[axs_idx].set_title(f'{metric_te_name} and {metric_tr_name}')
|
||||||
|
axs[axs_idx].legend(loc='lower right')
|
||||||
|
if axs_idx < len(indexes)//2 -1:
|
||||||
|
axs[axs_idx].set_xticks([])
|
||||||
|
|
||||||
|
axs_idx += 1
|
||||||
|
|
||||||
|
# Ajustar el espaciado entre los subplots
|
||||||
|
plt.tight_layout()
|
||||||
|
|
||||||
|
# Mostrar el gráfico
|
||||||
|
|
||||||
|
# Mostrar el gráfico
|
||||||
|
# plt.title(dataset)
|
||||||
|
# plt.show()
|
||||||
|
os.makedirs('./plots/likelihood/', exist_ok=True)
|
||||||
|
|
||||||
|
plt.savefig(f'./plots/likelihood/{dataset}-fig{suffix}.png')
|
||||||
|
plt.close()
|
||||||
|
|
||||||
|
|
||||||
def generate_data(from_train=False):
|
def generate_data(from_train=False):
|
||||||
data = qp.datasets.fetch_UCIMulticlassDataset(dataset)
|
data = qp.datasets.fetch_UCIMulticlassDataset(dataset)
|
||||||
n_classes = data.n_classes
|
n_classes = data.n_classes
|
||||||
|
@ -110,7 +160,7 @@ def generate_data(from_train=False):
|
||||||
likelihood_value = []
|
likelihood_value = []
|
||||||
|
|
||||||
# for bandwidth in np.linspace(0.01, 0.2, 50):
|
# for bandwidth in np.linspace(0.01, 0.2, 50):
|
||||||
for bandwidth in np.logspace(-5, np.log10(0.2), 50):
|
for bandwidth in np.logspace(-4, np.log10(0.2), 50):
|
||||||
mix_densities = kde.get_mixture_components(tr_posteriors, tr_y, classes, bandwidth)
|
mix_densities = kde.get_mixture_components(tr_posteriors, tr_y, classes, bandwidth)
|
||||||
test_densities = [kde.pdf(kde_i, te_posteriors) for kde_i in mix_densities]
|
test_densities = [kde.pdf(kde_i, te_posteriors) for kde_i in mix_densities]
|
||||||
|
|
||||||
|
@ -172,16 +222,24 @@ for i, dataset in enumerate(tqdm(DATASETS, desc='processing datasets', total=len
|
||||||
measurement_names = []
|
measurement_names = []
|
||||||
if show_ae:
|
if show_ae:
|
||||||
measurements.append(AE_error_te)
|
measurements.append(AE_error_te)
|
||||||
measurement_names.append('AE')
|
measurement_names.append('AE(te)')
|
||||||
|
measurements.append(AE_error_tr)
|
||||||
|
measurement_names.append('AE(tr)')
|
||||||
if show_rae:
|
if show_rae:
|
||||||
measurements.append(RAE_error_te)
|
measurements.append(RAE_error_te)
|
||||||
measurement_names.append('RAE')
|
measurement_names.append('RAE(te)')
|
||||||
|
measurements.append(RAE_error_tr)
|
||||||
|
measurement_names.append('RAE(tr)')
|
||||||
if show_kld:
|
if show_kld:
|
||||||
measurements.append(KLD_error_te)
|
measurements.append(KLD_error_te)
|
||||||
measurement_names.append('KLD')
|
measurement_names.append('KLD(te)')
|
||||||
|
measurements.append(KLD_error_tr)
|
||||||
|
measurement_names.append('KLD(tr)')
|
||||||
if show_mse:
|
if show_mse:
|
||||||
measurements.append(MSE_error_te)
|
measurements.append(MSE_error_te)
|
||||||
measurement_names.append('MSE')
|
measurement_names.append('MSE(te)')
|
||||||
|
measurements.append(MSE_error_tr)
|
||||||
|
measurement_names.append('MSE(tr)')
|
||||||
measurements.append(normalize_metric(LIKE_value_te))
|
measurements.append(normalize_metric(LIKE_value_te))
|
||||||
measurements.append(normalize_metric(LIKE_value_tr))
|
measurements.append(normalize_metric(LIKE_value_tr))
|
||||||
measurement_names.append('NLL(te)')
|
measurement_names.append('NLL(te)')
|
||||||
|
@ -200,7 +258,8 @@ for i, dataset in enumerate(tqdm(DATASETS, desc='processing datasets', total=len
|
||||||
# measurements.append(normalize_metric(LIKE_value_tr))
|
# measurements.append(normalize_metric(LIKE_value_tr))
|
||||||
# measurement_names.append('NLL(te)')
|
# measurement_names.append('NLL(te)')
|
||||||
# measurement_names.append('NLL(tr)')
|
# measurement_names.append('NLL(tr)')
|
||||||
plot(xaxis, measurements, measurement_names, suffix='AVEtr')
|
# plot(xaxis, measurements, measurement_names, suffix='AVEtr')
|
||||||
|
plot_stack(xaxis, measurements, measurement_names, suffix='AVEtr')
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -0,0 +1,126 @@
|
||||||
|
import os
|
||||||
|
from time import time
|
||||||
|
import numpy as np
|
||||||
|
from sklearn.linear_model import LogisticRegression
|
||||||
|
import quapy as qp
|
||||||
|
#from KDEy.kdey_devel import KDEyMLauto, KDEyMLauto2, KDEyMLred
|
||||||
|
from LocalStack.method import LocalStackingQuantification, LocalStackingQuantification2
|
||||||
|
from quapy.method.aggregative import PACC, EMQ, KDEyML
|
||||||
|
from quapy.model_selection import GridSearchQ
|
||||||
|
from quapy.protocol import UPP
|
||||||
|
from pathlib import Path
|
||||||
|
|
||||||
|
SEED = 1
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
METHODS = [
|
||||||
|
('PACC', PACC(), {}),
|
||||||
|
('EMQ', EMQ(), {}),
|
||||||
|
('KDEy-ML', KDEyML(), {}),
|
||||||
|
]
|
||||||
|
|
||||||
|
TRANSDUCTIVE_METHODS = [
|
||||||
|
('LSQ', LocalStackingQuantification(EMQ()), {}),
|
||||||
|
('LSQ2', LocalStackingQuantification2(EMQ()), {})
|
||||||
|
]
|
||||||
|
|
||||||
|
def show_results(result_path):
|
||||||
|
import pandas as pd
|
||||||
|
df = pd.read_csv(result_path + '.csv', sep='\t')
|
||||||
|
pd.set_option('display.max_columns', None)
|
||||||
|
pd.set_option('display.max_rows', None)
|
||||||
|
pd.set_option('display.width', 1000) # Ajustar el ancho máximo
|
||||||
|
pv = df.pivot_table(index='Dataset', columns="Method", values=["MAE"], margins=True)
|
||||||
|
print(pv)
|
||||||
|
pv = df.pivot_table(index='Dataset', columns="Method", values=["MRAE"], margins=True)
|
||||||
|
print(pv)
|
||||||
|
pv = df.pivot_table(index='Dataset', columns="Method", values=["KLD"], margins=True)
|
||||||
|
print(pv)
|
||||||
|
pv = df.pivot_table(index='Dataset', columns="Method", values=["TR-TIME"], margins=True)
|
||||||
|
print(pv)
|
||||||
|
pv = df.pivot_table(index='Dataset', columns="Method", values=["TE-TIME"], margins=True)
|
||||||
|
print(pv)
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
|
||||||
|
qp.environ['SAMPLE_SIZE'] = 500
|
||||||
|
qp.environ['N_JOBS'] = -1
|
||||||
|
n_bags_val = 25
|
||||||
|
n_bags_test = 100
|
||||||
|
result_dir = f'results_quantification/localstack'
|
||||||
|
|
||||||
|
os.makedirs(result_dir, exist_ok=True)
|
||||||
|
|
||||||
|
global_result_path = f'{result_dir}/allmethods'
|
||||||
|
with open(global_result_path + '.csv', 'wt') as csv:
|
||||||
|
csv.write(f'Method\tDataset\tMAE\tMRAE\tKLD\tTR-TIME\tTE-TIME\n')
|
||||||
|
|
||||||
|
for method_name, quantifier, param_grid in METHODS + TRANSDUCTIVE_METHODS:
|
||||||
|
|
||||||
|
print('Init method', method_name)
|
||||||
|
|
||||||
|
with open(global_result_path + '.csv', 'at') as csv:
|
||||||
|
for dataset in qp.datasets.UCI_MULTICLASS_DATASETS:
|
||||||
|
print('init', dataset)
|
||||||
|
|
||||||
|
# run_experiment(global_result_path, method_name, quantifier, param_grid, dataset)
|
||||||
|
local_result_path = os.path.join(Path(global_result_path).parent, method_name + '_' + dataset + '.dataframe')
|
||||||
|
|
||||||
|
if os.path.exists(local_result_path):
|
||||||
|
print(f'result file {local_result_path} already exist; skipping')
|
||||||
|
report = qp.util.load_report(local_result_path)
|
||||||
|
|
||||||
|
else:
|
||||||
|
with qp.util.temp_seed(SEED):
|
||||||
|
|
||||||
|
data = qp.datasets.fetch_UCIMulticlassDataset(dataset, verbose=True)
|
||||||
|
train, test = data.train_test
|
||||||
|
|
||||||
|
transductive_names = [name for (name, *_) in TRANSDUCTIVE_METHODS]
|
||||||
|
|
||||||
|
if method_name not in transductive_names:
|
||||||
|
if len(param_grid) == 0:
|
||||||
|
t_init = time()
|
||||||
|
quantifier.fit(train)
|
||||||
|
train_time = time() - t_init
|
||||||
|
else:
|
||||||
|
# model selection (train)
|
||||||
|
train, val = train.split_stratified(random_state=SEED)
|
||||||
|
protocol = UPP(val, repeats=n_bags_val)
|
||||||
|
modsel = GridSearchQ(
|
||||||
|
quantifier, param_grid, protocol, refit=True, n_jobs=-1, verbose=1, error='mae'
|
||||||
|
)
|
||||||
|
t_init = time()
|
||||||
|
try:
|
||||||
|
modsel.fit(train)
|
||||||
|
print(f'best params {modsel.best_params_}')
|
||||||
|
print(f'best score {modsel.best_score_}')
|
||||||
|
quantifier = modsel.best_model()
|
||||||
|
except:
|
||||||
|
print('something went wrong... trying to fit the default model')
|
||||||
|
quantifier.fit(train)
|
||||||
|
train_time = time() - t_init
|
||||||
|
else:
|
||||||
|
# transductive
|
||||||
|
t_init = time()
|
||||||
|
quantifier.fit(train) # <-- nothing actually (proyects the X into posteriors only)
|
||||||
|
train_time = time() - t_init
|
||||||
|
|
||||||
|
# test
|
||||||
|
t_init = time()
|
||||||
|
protocol = UPP(test, repeats=n_bags_test)
|
||||||
|
report = qp.evaluation.evaluation_report(
|
||||||
|
quantifier, protocol, error_metrics=['mae', 'mrae', 'kld'], verbose=True
|
||||||
|
)
|
||||||
|
test_time = time() - t_init
|
||||||
|
report['tr_time'] = train_time
|
||||||
|
report['te_time'] = test_time
|
||||||
|
report.to_csv(local_result_path)
|
||||||
|
|
||||||
|
means = report.mean(numeric_only=True)
|
||||||
|
csv.write(f'{method_name}\t{dataset}\t{means["mae"]:.5f}\t{means["mrae"]:.5f}\t{means["kld"]:.5f}\t{means["tr_time"]:.3f}\t{means["te_time"]:.3f}\n')
|
||||||
|
csv.flush()
|
||||||
|
|
||||||
|
show_results(global_result_path)
|
|
@ -0,0 +1,112 @@
|
||||||
|
import numpy as np
|
||||||
|
import quapy as qp
|
||||||
|
from sklearn.multioutput import MultiOutputRegressor
|
||||||
|
from sklearn.svm import SVR
|
||||||
|
|
||||||
|
from quapy.data import LabelledCollection
|
||||||
|
from quapy.method.base import BaseQuantifier
|
||||||
|
from quapy.method.aggregative import AggregativeSoftQuantifier
|
||||||
|
|
||||||
|
|
||||||
|
class LocalStackingQuantification(BaseQuantifier):
|
||||||
|
|
||||||
|
def __init__(self, surrogate_quantifier, n_samples_gen=200, n_samples_sel=50, comparison_measure='ae', random_state=None):
|
||||||
|
assert isinstance(surrogate_quantifier, AggregativeSoftQuantifier), \
|
||||||
|
f'the surrogate quantifier must be of type {AggregativeSoftQuantifier.__class__.__name__}'
|
||||||
|
self.surrogate_quantifier = surrogate_quantifier
|
||||||
|
self.n_samples_gen = n_samples_gen
|
||||||
|
self.n_samples_sel = n_samples_sel
|
||||||
|
self.comparison_measure = qp.error.from_name(comparison_measure)
|
||||||
|
self.random_state = random_state
|
||||||
|
|
||||||
|
def fit(self, data: LabelledCollection):
|
||||||
|
train, val = data.split_stratified()
|
||||||
|
self.surrogate_quantifier.fit(train)
|
||||||
|
self.val_data = val
|
||||||
|
return self
|
||||||
|
|
||||||
|
def normalize(self, out_simplex:np.ndarray):
|
||||||
|
in_simplex = out_simplex/out_simplex.sum()
|
||||||
|
return in_simplex
|
||||||
|
|
||||||
|
def quantify(self, instances: np.ndarray):
|
||||||
|
assert hasattr(self, 'val_data'), 'quantify called before fit'
|
||||||
|
pred_prevs = self.surrogate_quantifier.quantify(instances)
|
||||||
|
test_size = instances.shape[0]
|
||||||
|
|
||||||
|
samples = []
|
||||||
|
samples_pred_prevs = []
|
||||||
|
samples_distance = []
|
||||||
|
for i in range(self.n_samples_gen):
|
||||||
|
sample_i = self.val_data.sampling(test_size, *pred_prevs, random_state=self.random_state)
|
||||||
|
pred_prev_sample_i = self.surrogate_quantifier.quantify(sample_i.X)
|
||||||
|
err_dist = self.comparison_measure(pred_prevs, pred_prev_sample_i)
|
||||||
|
|
||||||
|
samples.append(sample_i)
|
||||||
|
samples_pred_prevs.append(pred_prev_sample_i)
|
||||||
|
samples_distance.append(err_dist)
|
||||||
|
|
||||||
|
ord_distances = np.argsort(samples_distance)
|
||||||
|
samples_sel = np.asarray(samples)[ord_distances][:self.n_samples_sel]
|
||||||
|
samples_pred_prevs_sel = np.asarray(samples_pred_prevs)[ord_distances][:self.n_samples_sel]
|
||||||
|
|
||||||
|
reg = MultiOutputRegressor(SVR())
|
||||||
|
reg_X = samples_pred_prevs_sel
|
||||||
|
reg_y = [s.prevalence() for s in samples_sel]
|
||||||
|
reg.fit(reg_X, reg_y)
|
||||||
|
|
||||||
|
corrected_prev = reg.predict([pred_prevs])[0]
|
||||||
|
|
||||||
|
corrected_prev = self.normalize(corrected_prev)
|
||||||
|
return corrected_prev
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
class LocalStackingQuantification2(BaseQuantifier):
|
||||||
|
|
||||||
|
"""
|
||||||
|
Este en vez de seleccionar samples de training para los que la prevalencia predicha se parece a la prevalencia
|
||||||
|
predica en test, saca directamente samples de training con la prevalencia predicha en test
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self, surrogate_quantifier, n_samples_gen=200, n_samples_sel=50, comparison_measure='ae', random_state=None):
|
||||||
|
assert isinstance(surrogate_quantifier, AggregativeSoftQuantifier), \
|
||||||
|
f'the surrogate quantifier must be of type {AggregativeSoftQuantifier.__class__.__name__}'
|
||||||
|
self.surrogate_quantifier = surrogate_quantifier
|
||||||
|
self.n_samples_gen = n_samples_gen
|
||||||
|
self.n_samples_sel = n_samples_sel
|
||||||
|
self.comparison_measure = qp.error.from_name(comparison_measure)
|
||||||
|
self.random_state = random_state
|
||||||
|
|
||||||
|
def fit(self, data: LabelledCollection):
|
||||||
|
train, val = data.split_stratified()
|
||||||
|
self.surrogate_quantifier.fit(train)
|
||||||
|
self.val_data = val
|
||||||
|
return self
|
||||||
|
|
||||||
|
def normalize(self, out_simplex:np.ndarray):
|
||||||
|
in_simplex = out_simplex/out_simplex.sum()
|
||||||
|
return in_simplex
|
||||||
|
|
||||||
|
def quantify(self, instances: np.ndarray):
|
||||||
|
assert hasattr(self, 'val_data'), 'quantify called before fit'
|
||||||
|
pred_prevs = self.surrogate_quantifier.quantify(instances)
|
||||||
|
test_size = instances.shape[0]
|
||||||
|
|
||||||
|
samples = []
|
||||||
|
samples_pred_prevs = []
|
||||||
|
for i in range(self.n_samples_gen):
|
||||||
|
sample_i = self.val_data.sampling(test_size, *pred_prevs, random_state=self.random_state)
|
||||||
|
pred_prev_sample_i = self.surrogate_quantifier.quantify(sample_i.X)
|
||||||
|
samples.append(sample_i)
|
||||||
|
samples_pred_prevs.append(pred_prev_sample_i)
|
||||||
|
|
||||||
|
reg = MultiOutputRegressor(SVR())
|
||||||
|
reg_X = samples_pred_prevs
|
||||||
|
reg_y = [s.prevalence() for s in samples]
|
||||||
|
reg.fit(reg_X, reg_y)
|
||||||
|
|
||||||
|
corrected_prev = reg.predict([pred_prevs])[0]
|
||||||
|
|
||||||
|
corrected_prev = self.normalize(corrected_prev)
|
||||||
|
return corrected_prev
|
|
@ -11,9 +11,14 @@ import sys
|
||||||
from os.path import join
|
from os.path import join
|
||||||
quapy_path = join(pathlib.Path(__file__).parents[2].resolve().as_posix(), 'quapy')
|
quapy_path = join(pathlib.Path(__file__).parents[2].resolve().as_posix(), 'quapy')
|
||||||
wiki_path = join(pathlib.Path(__file__).parents[0].resolve().as_posix(), 'wiki')
|
wiki_path = join(pathlib.Path(__file__).parents[0].resolve().as_posix(), 'wiki')
|
||||||
|
source_path = pathlib.Path(__file__).parents[2].resolve().as_posix()
|
||||||
print(f'quapy path={quapy_path}')
|
print(f'quapy path={quapy_path}')
|
||||||
|
print(f'quapy source path={source_path}')
|
||||||
sys.path.insert(0, quapy_path)
|
sys.path.insert(0, quapy_path)
|
||||||
sys.path.insert(0, wiki_path)
|
sys.path.insert(0, wiki_path)
|
||||||
|
sys.path.insert(0, source_path)
|
||||||
|
|
||||||
|
print(sys.path)
|
||||||
|
|
||||||
|
|
||||||
project = 'QuaPy: A Python-based open-source framework for quantification'
|
project = 'QuaPy: A Python-based open-source framework for quantification'
|
||||||
|
|
|
@ -447,7 +447,7 @@ The [](quapy.method.composable) module allows the composition of quantification
|
||||||
```sh
|
```sh
|
||||||
pip install --upgrade pip setuptools wheel
|
pip install --upgrade pip setuptools wheel
|
||||||
pip install "jax[cpu]"
|
pip install "jax[cpu]"
|
||||||
pip install quapy[composable]
|
pip install "qunfold @ git+https://github.com/mirkobunse/qunfold@v0.1.4"
|
||||||
```
|
```
|
||||||
|
|
||||||
### Basics
|
### Basics
|
||||||
|
|
|
@ -2,6 +2,13 @@
|
||||||
This example illustrates the composition of quantification methods from
|
This example illustrates the composition of quantification methods from
|
||||||
arbitrary loss functions and feature transformations. It will extend the basic
|
arbitrary loss functions and feature transformations. It will extend the basic
|
||||||
example on the usage of quapy with this composition.
|
example on the usage of quapy with this composition.
|
||||||
|
|
||||||
|
This example requires the installation of qunfold, the back-end of QuaPy's
|
||||||
|
composition module:
|
||||||
|
|
||||||
|
pip install --upgrade pip setuptools wheel
|
||||||
|
pip install "jax[cpu]"
|
||||||
|
pip install "qunfold @ git+https://github.com/mirkobunse/qunfold@v0.1.4"
|
||||||
"""
|
"""
|
||||||
|
|
||||||
import numpy as np
|
import numpy as np
|
||||||
|
|
|
@ -1,5 +1,15 @@
|
||||||
"""This module allows the composition of quantification methods from loss functions and feature transformations. This functionality is realized through an integration of the qunfold package: https://github.com/mirkobunse/qunfold."""
|
"""This module allows the composition of quantification methods from loss functions and feature transformations. This functionality is realized through an integration of the qunfold package: https://github.com/mirkobunse/qunfold."""
|
||||||
|
|
||||||
|
_import_error_message = """qunfold, the back-end of quapy.method.composable, is not properly installed.
|
||||||
|
|
||||||
|
To fix this error, call:
|
||||||
|
|
||||||
|
pip install --upgrade pip setuptools wheel
|
||||||
|
pip install "jax[cpu]"
|
||||||
|
pip install "qunfold @ git+https://github.com/mirkobunse/qunfold@v0.1.4"
|
||||||
|
"""
|
||||||
|
|
||||||
|
try:
|
||||||
import qunfold
|
import qunfold
|
||||||
from qunfold.quapy import QuaPyWrapper
|
from qunfold.quapy import QuaPyWrapper
|
||||||
from qunfold.sklearn import CVClassifier
|
from qunfold.sklearn import CVClassifier
|
||||||
|
@ -40,6 +50,8 @@ __all__ = [ # control public members, e.g., for auto-documentation in sphinx; om
|
||||||
"GaussianKernelTransformer",
|
"GaussianKernelTransformer",
|
||||||
"GaussianRFFKernelTransformer",
|
"GaussianRFFKernelTransformer",
|
||||||
]
|
]
|
||||||
|
except ImportError as e:
|
||||||
|
raise ImportError(_import_error_message) from e
|
||||||
|
|
||||||
def ComposableQuantifier(loss, transformer, **kwargs):
|
def ComposableQuantifier(loss, transformer, **kwargs):
|
||||||
"""A generic quantification / unfolding method that solves a linear system of equations.
|
"""A generic quantification / unfolding method that solves a linear system of equations.
|
||||||
|
|
1
setup.py
1
setup.py
|
@ -125,7 +125,6 @@ setup(
|
||||||
# projects.
|
# projects.
|
||||||
extras_require={ # Optional
|
extras_require={ # Optional
|
||||||
'bayes': ['jax', 'jaxlib', 'numpyro'],
|
'bayes': ['jax', 'jaxlib', 'numpyro'],
|
||||||
'composable': ['qunfold @ git+https://github.com/mirkobunse/qunfold@v0.1.4'],
|
|
||||||
'neural': ['torch'],
|
'neural': ['torch'],
|
||||||
'tests': ['certifi'],
|
'tests': ['certifi'],
|
||||||
'docs' : ['sphinx-rtd-theme', 'myst-parser'],
|
'docs' : ['sphinx-rtd-theme', 'myst-parser'],
|
||||||
|
|
Loading…
Reference in New Issue