qp.environment was not reachable within Parallel processes; changing backend to threading
This commit is contained in:
parent
e7527bd7ed
commit
2fda46fc13
|
@ -16,7 +16,6 @@ import argparse
|
|||
import torch
|
||||
import shutil
|
||||
|
||||
qp.environ['SAMPLE_SIZE'] = settings.SAMPLE_SIZE
|
||||
|
||||
def newLR():
|
||||
return LogisticRegression(max_iter=1000, solver='lbfgs', n_jobs=-1)
|
||||
|
@ -116,12 +115,14 @@ def save_results(dataset_name, model_name, optim_loss, *results):
|
|||
|
||||
def run(experiment):
|
||||
|
||||
qp.environ['SAMPLE_SIZE'] = settings.SAMPLE_SIZE
|
||||
|
||||
optim_loss, dataset_name, (model_name, model, hyperparams) = experiment
|
||||
|
||||
if is_already_computed(dataset_name, model_name, optim_loss=optim_loss):
|
||||
print(f'result for dataset={dataset_name} model={model_name} loss={optim_loss} already computed.')
|
||||
return
|
||||
elif (optim_loss=='mae' and 'mrae' in model_name) or (optim_loss=='mrae' and 'mae' in model_name):
|
||||
elif (optim_loss == 'mae' and 'mrae' in model_name) or (optim_loss=='mrae' and 'mae' in model_name):
|
||||
print(f'skipping model={model_name} for optim_loss={optim_loss}')
|
||||
return
|
||||
else:
|
||||
|
@ -163,7 +164,8 @@ def run(experiment):
|
|||
test=benchmark_eval.test,
|
||||
sample_size=settings.SAMPLE_SIZE,
|
||||
n_prevpoints=21,
|
||||
n_repetitions=25
|
||||
n_repetitions=25,
|
||||
n_jobs=-1 if isinstance(model, qp.method.meta.Ensemble) else 1
|
||||
)
|
||||
test_estim_prevalence = model.quantify(benchmark_eval.test.instances)
|
||||
test_true_prevalence = benchmark_eval.test.prevalence()
|
||||
|
|
|
@ -10,7 +10,6 @@ from . import model_selection
|
|||
from . import classification
|
||||
from quapy.method.base import isprobabilistic, isaggregative
|
||||
|
||||
|
||||
environ = {
|
||||
'SAMPLE_SIZE': None,
|
||||
'UNK_TOKEN': '[UNK]',
|
||||
|
@ -21,6 +20,4 @@ environ = {
|
|||
|
||||
|
||||
def isbinary(x):
|
||||
return x.binary
|
||||
|
||||
|
||||
return x.binary
|
|
@ -1,8 +1,6 @@
|
|||
import numpy as np
|
||||
from sklearn.metrics import f1_score
|
||||
|
||||
import quapy as qp
|
||||
|
||||
|
||||
def f1e(y_true, y_pred):
|
||||
return 1. - f1_score(y_true, y_pred, average='macro')
|
||||
|
@ -66,6 +64,7 @@ def smooth(p, eps):
|
|||
|
||||
|
||||
def __check_eps(eps):
|
||||
import quapy as qp
|
||||
sample_size = qp.environ['SAMPLE_SIZE']
|
||||
if eps is None:
|
||||
if sample_size is None:
|
||||
|
|
|
@ -82,7 +82,7 @@ class Ensemble(BaseQuantifier):
|
|||
|
||||
is_static_policy = (self.policy in qp.error.QUANTIFICATION_ERROR_NAMES)
|
||||
|
||||
self.ensemble = Parallel(n_jobs=self.n_jobs)(
|
||||
self.ensemble = Parallel(n_jobs=self.n_jobs, backend="threading")(
|
||||
delayed(_delayed_new_instance)(
|
||||
self.base_quantifier, data, val_split, prev, posteriors, keep_samples=is_static_policy,
|
||||
verbose=self.verbose, sample_size=sample_size
|
||||
|
|
Loading…
Reference in New Issue