forked from moreo/QuaPy
merged
This commit is contained in:
commit
e55caf82fd
3
TODO.txt
3
TODO.txt
|
@ -6,3 +6,6 @@ Add prediction - artificial sampling
|
||||||
Add readers for typical datasets used in Quantification
|
Add readers for typical datasets used in Quantification
|
||||||
Add NAE, NRAE
|
Add NAE, NRAE
|
||||||
Add "measures for evaluating ordinal"?
|
Add "measures for evaluating ordinal"?
|
||||||
|
Document methods with paper references
|
||||||
|
The parallel training in svmperf seems not to work
|
||||||
|
|
||||||
|
|
|
@ -2,6 +2,7 @@ from sklearn.metrics import f1_score
|
||||||
import numpy as np
|
import numpy as np
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
SAMPLE_SIZE = None
|
SAMPLE_SIZE = None
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -77,6 +77,7 @@ def normalize_prevalence(prevalences):
|
||||||
return np.ones_like(prevalences) / prevalences.size
|
return np.ones_like(prevalences) / prevalences.size
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
def num_prevalence_combinations(nclasses:int, nprevpoints:int, nrepeats:int):
|
def num_prevalence_combinations(nclasses:int, nprevpoints:int, nrepeats:int):
|
||||||
"""
|
"""
|
||||||
Computes the number of prevalence combinations in the nclasses-dimensional simplex if nprevpoints equally distant
|
Computes the number of prevalence combinations in the nclasses-dimensional simplex if nprevpoints equally distant
|
||||||
|
@ -118,3 +119,4 @@ def get_nprevpoints_approximation(nclasses, nrepeats, combinations_budget):
|
||||||
return nprevpoints-1
|
return nprevpoints-1
|
||||||
else:
|
else:
|
||||||
nprevpoints+=1
|
nprevpoints+=1
|
||||||
|
|
||||||
|
|
|
@ -337,6 +337,10 @@ class OneVsAll(AggregativeQuantifier):
|
||||||
delayed(self._delayed_binary_predict)(c, self.class_method, X) for c in self.classes
|
delayed(self._delayed_binary_predict)(c, self.class_method, X) for c in self.classes
|
||||||
)
|
)
|
||||||
)
|
)
|
||||||
|
<<<<<<< HEAD
|
||||||
|
=======
|
||||||
|
print('one vs all: ', prevalences)
|
||||||
|
>>>>>>> 2361186a01c53e744f4291e2e2299700216ff139
|
||||||
return F.normalize_prevalence(prevalences)
|
return F.normalize_prevalence(prevalences)
|
||||||
|
|
||||||
@property
|
@property
|
||||||
|
|
|
@ -5,6 +5,7 @@ import contextlib
|
||||||
import numpy as np
|
import numpy as np
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
def get_parallel_slices(n_tasks, n_jobs=-1):
|
def get_parallel_slices(n_tasks, n_jobs=-1):
|
||||||
if n_jobs == -1:
|
if n_jobs == -1:
|
||||||
n_jobs = multiprocessing.cpu_count()
|
n_jobs = multiprocessing.cpu_count()
|
||||||
|
@ -32,4 +33,3 @@ def temp_seed(seed):
|
||||||
finally:
|
finally:
|
||||||
np.random.set_state(state)
|
np.random.set_state(state)
|
||||||
|
|
||||||
|
|
||||||
|
|
3
test.py
3
test.py
|
@ -3,6 +3,7 @@ from sklearn.svm import LinearSVC
|
||||||
import quapy as qp
|
import quapy as qp
|
||||||
import quapy.functional as F
|
import quapy.functional as F
|
||||||
|
|
||||||
|
|
||||||
SAMPLE_SIZE=500
|
SAMPLE_SIZE=500
|
||||||
binary = False
|
binary = False
|
||||||
|
|
||||||
|
@ -28,6 +29,7 @@ model = qp.method.aggregative.ClassifyAndCount(learner)
|
||||||
# model = qp.method.aggregative.ProbabilisticClassifyAndCount(learner)
|
# model = qp.method.aggregative.ProbabilisticClassifyAndCount(learner)
|
||||||
# model = qp.method.aggregative.ProbabilisticAdjustedClassifyAndCount(learner)
|
# model = qp.method.aggregative.ProbabilisticAdjustedClassifyAndCount(learner)
|
||||||
# model = qp.method.aggregative.ExpectationMaximizationQuantifier(learner)
|
# model = qp.method.aggregative.ExpectationMaximizationQuantifier(learner)
|
||||||
|
|
||||||
model.fit(dataset.training)
|
model.fit(dataset.training)
|
||||||
|
|
||||||
# estimating class prevalences
|
# estimating class prevalences
|
||||||
|
@ -51,3 +53,4 @@ print(f'Evaluation according to the artificial sampling protocol ({len(true_prev
|
||||||
for error in qp.error.QUANTIFICATION_ERROR:
|
for error in qp.error.QUANTIFICATION_ERROR:
|
||||||
score = error(true_prev, estim_prev)
|
score = error(true_prev, estim_prev)
|
||||||
print(f'{error.__name__}={score:.5f}')
|
print(f'{error.__name__}={score:.5f}')
|
||||||
|
|
||||||
|
|
Loading…
Reference in New Issue