bugfix in bayesian cc, merged
This commit is contained in:
commit
ea92c45405
|
@ -20,15 +20,16 @@ jobs:
|
|||
env:
|
||||
QUAPY_TESTS_OMIT_LARGE_DATASETS: True
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/checkout@v4
|
||||
- name: Set up Python ${{ matrix.python-version }}
|
||||
uses: actions/setup-python@v4
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: ${{ matrix.python-version }}
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
python -m pip install --upgrade pip setuptools wheel
|
||||
python -m pip install -e .[bayes,composable,tests]
|
||||
python -m pip install "qunfold @ git+https://github.com/mirkobunse/qunfold@v0.1.4"
|
||||
python -m pip install -e .[bayes,tests]
|
||||
- name: Test with unittest
|
||||
run: python -m unittest
|
||||
|
||||
|
@ -38,15 +39,18 @@ jobs:
|
|||
runs-on: ubuntu-latest
|
||||
if: github.ref == 'refs/heads/master'
|
||||
steps:
|
||||
- uses: actions/checkout@v1
|
||||
- name: Build documentation
|
||||
uses: ammaraskar/sphinx-action@master
|
||||
- uses: actions/checkout@v4
|
||||
- name: Set up Python
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
pre-build-command: |
|
||||
apt-get --allow-releaseinfo-change update -y && apt-get install -y git && git --version
|
||||
python -m pip install --upgrade pip setuptools wheel "jax[cpu]"
|
||||
python -m pip install -e .[composable,neural,docs]
|
||||
docs-folder: "docs/"
|
||||
python-version: 3.11
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
python -m pip install --upgrade pip setuptools wheel "jax[cpu]"
|
||||
python -m pip install "qunfold @ git+https://github.com/mirkobunse/qunfold@v0.1.4"
|
||||
python -m pip install -e .[neural,docs]
|
||||
- name: Build documentation
|
||||
run: sphinx-build -M html docs/source docs/build
|
||||
- name: Publish documentation
|
||||
run: |
|
||||
git clone ${{ github.server_url }}/${{ github.repository }}.git --branch gh-pages --single-branch __gh-pages/
|
||||
|
|
|
@ -1,23 +0,0 @@
|
|||
name: Pylint
|
||||
|
||||
on: [push]
|
||||
|
||||
jobs:
|
||||
build:
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
matrix:
|
||||
python-version: ["3.8", "3.9", "3.10"]
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- name: Set up Python ${{ matrix.python-version }}
|
||||
uses: actions/setup-python@v3
|
||||
with:
|
||||
python-version: ${{ matrix.python-version }}
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
python -m pip install --upgrade pip
|
||||
pip install pylint
|
||||
- name: Analysing the code with pylint
|
||||
run: |
|
||||
pylint $(git ls-files '*.py')
|
|
@ -167,3 +167,4 @@ TweetSentQuant
|
|||
|
||||
|
||||
*.png
|
||||
.idea
|
||||
|
|
|
@ -1,9 +1,10 @@
|
|||
Change Log 0.1.10
|
||||
-----------------
|
||||
|
||||
- ...
|
||||
|
||||
Change Log 0.1.9
|
||||
----------------
|
||||
- [TODO] add LeQua2024 and normalized match distance to qp.error
|
||||
- [TODO] add CDE-iteration and Bayes-CDE methods
|
||||
- [TODO] add Friedman's method and DeBias
|
||||
- [TODO] check ignore warning stuff (check https://docs.python.org/3/library/warnings.html#temporarily-suppressing-warnings)
|
||||
|
||||
- Added LeQua 2024 datasets and normalized match distance to qp.error
|
||||
|
||||
|
|
13
README.md
13
README.md
|
@ -45,19 +45,18 @@ of the test set.
|
|||
|
||||
```python
|
||||
import quapy as qp
|
||||
from sklearn.linear_model import LogisticRegression
|
||||
|
||||
dataset = qp.datasets.fetch_twitter('semeval16')
|
||||
dataset = qp.datasets.fetch_UCIBinaryDataset("yeast")
|
||||
training, test = dataset.train_test
|
||||
|
||||
# create an "Adjusted Classify & Count" quantifier
|
||||
model = qp.method.aggregative.ACC(LogisticRegression())
|
||||
model.fit(dataset.training)
|
||||
model = qp.method.aggregative.ACC()
|
||||
model.fit(training)
|
||||
|
||||
estim_prevalence = model.quantify(dataset.test.instances)
|
||||
true_prevalence = dataset.test.prevalence()
|
||||
estim_prevalence = model.quantify(test.X)
|
||||
true_prevalence = test.prevalence()
|
||||
|
||||
error = qp.error.mae(true_prevalence, estim_prevalence)
|
||||
|
||||
print(f'Mean Absolute Error (MAE)={error:.3f}')
|
||||
```
|
||||
|
||||
|
|
11
TODO.txt
11
TODO.txt
|
@ -1,3 +1,8 @@
|
|||
- Test the return_type="index" in protocols and finish the "distributin_samples.py" example
|
||||
- Add EDy (an implementation is available at quantificationlib)
|
||||
-
|
||||
- [TODO] Test the return_type="index" in protocols and finish the "distributin_samples.py" example
|
||||
- [TODO] Add EDy (an implementation is available at quantificationlib)
|
||||
- [TODO] add ensemble methods SC-MQ, MC-SQ, MC-MQ
|
||||
- [TODO] add HistNetQ
|
||||
- [TODO] add CDE-iteration and Bayes-CDE methods
|
||||
- [TODO] add Friedman's method and DeBias
|
||||
- [TODO] check ignore warning stuff
|
||||
check https://docs.python.org/3/library/warnings.html#temporarily-suppressing-warnings
|
||||
|
|
|
@ -11,9 +11,14 @@ import sys
|
|||
from os.path import join
|
||||
quapy_path = join(pathlib.Path(__file__).parents[2].resolve().as_posix(), 'quapy')
|
||||
wiki_path = join(pathlib.Path(__file__).parents[0].resolve().as_posix(), 'wiki')
|
||||
source_path = pathlib.Path(__file__).parents[2].resolve().as_posix()
|
||||
print(f'quapy path={quapy_path}')
|
||||
print(f'quapy source path={source_path}')
|
||||
sys.path.insert(0, quapy_path)
|
||||
sys.path.insert(0, wiki_path)
|
||||
sys.path.insert(0, source_path)
|
||||
|
||||
print(sys.path)
|
||||
|
||||
|
||||
project = 'QuaPy: A Python-based open-source framework for quantification'
|
||||
|
|
|
@ -447,7 +447,7 @@ The [](quapy.method.composable) module allows the composition of quantification
|
|||
```sh
|
||||
pip install --upgrade pip setuptools wheel
|
||||
pip install "jax[cpu]"
|
||||
pip install quapy[composable]
|
||||
pip install "qunfold @ git+https://github.com/mirkobunse/qunfold@v0.1.4"
|
||||
```
|
||||
|
||||
### Basics
|
||||
|
|
|
@ -2,6 +2,13 @@
|
|||
This example illustrates the composition of quantification methods from
|
||||
arbitrary loss functions and feature transformations. It will extend the basic
|
||||
example on the usage of quapy with this composition.
|
||||
|
||||
This example requires the installation of qunfold, the back-end of QuaPy's
|
||||
composition module:
|
||||
|
||||
pip install --upgrade pip setuptools wheel
|
||||
pip install "jax[cpu]"
|
||||
pip install "qunfold @ git+https://github.com/mirkobunse/qunfold@v0.1.4"
|
||||
"""
|
||||
|
||||
import numpy as np
|
||||
|
|
|
@ -33,9 +33,9 @@ quantifier = KDEyML(classifier=LogisticRegression())
|
|||
|
||||
# model selection
|
||||
param_grid = {
|
||||
'classifier__C': np.logspace(-3, 3, 7), # classifier-dependent: inverse of regularization strength
|
||||
'classifier__class_weight': ['balanced', None], # classifier-dependent: weights of each class
|
||||
'bandwidth': np.linspace(0.01, 0.2, 20) # quantifier-dependent: bandwidth of the kernel
|
||||
'classifier__C': np.logspace(-3, 3, 7), # classifier-dependent: inverse of regularization strength
|
||||
'classifier__class_weight': ['balanced', None], # classifier-dependent: weights of each class
|
||||
'bandwidth': np.linspace(0.01, 0.2, 20) # quantifier-dependent: bandwidth of the kernel
|
||||
}
|
||||
model_selection = GridSearchQ(quantifier, param_grid, protocol=val_generator, error='mrae', refit=False, verbose=True)
|
||||
quantifier = model_selection.fit(training)
|
||||
|
|
|
@ -637,7 +637,7 @@ def fetch_UCIMulticlassDataset(
|
|||
if n_train > max_train_instances:
|
||||
train_prop = (max_train_instances / n)
|
||||
|
||||
data = Dataset(*data.split_stratified(train_prop, random_state=0))
|
||||
data = Dataset(*data.split_stratified(train_prop, random_state=0), name=dataset_name)
|
||||
|
||||
if standardize:
|
||||
data = standardizer(data)
|
||||
|
|
|
@ -1,45 +1,57 @@
|
|||
"""This module allows the composition of quantification methods from loss functions and feature transformations. This functionality is realized through an integration of the qunfold package: https://github.com/mirkobunse/qunfold."""
|
||||
|
||||
import qunfold
|
||||
from qunfold.quapy import QuaPyWrapper
|
||||
from qunfold.sklearn import CVClassifier
|
||||
from qunfold import (
|
||||
LeastSquaresLoss, # losses
|
||||
BlobelLoss,
|
||||
EnergyLoss,
|
||||
HellingerSurrogateLoss,
|
||||
CombinedLoss,
|
||||
TikhonovRegularization,
|
||||
TikhonovRegularized,
|
||||
ClassTransformer, # transformers
|
||||
HistogramTransformer,
|
||||
DistanceTransformer,
|
||||
KernelTransformer,
|
||||
EnergyKernelTransformer,
|
||||
LaplacianKernelTransformer,
|
||||
GaussianKernelTransformer,
|
||||
GaussianRFFKernelTransformer,
|
||||
)
|
||||
_import_error_message = """qunfold, the back-end of quapy.method.composable, is not properly installed.
|
||||
|
||||
__all__ = [ # control public members, e.g., for auto-documentation in sphinx; omit QuaPyWrapper
|
||||
"ComposableQuantifier",
|
||||
"CVClassifier",
|
||||
"LeastSquaresLoss",
|
||||
"BlobelLoss",
|
||||
"EnergyLoss",
|
||||
"HellingerSurrogateLoss",
|
||||
"CombinedLoss",
|
||||
"TikhonovRegularization",
|
||||
"TikhonovRegularized",
|
||||
"ClassTransformer",
|
||||
"HistogramTransformer",
|
||||
"DistanceTransformer",
|
||||
"KernelTransformer",
|
||||
"EnergyKernelTransformer",
|
||||
"LaplacianKernelTransformer",
|
||||
"GaussianKernelTransformer",
|
||||
"GaussianRFFKernelTransformer",
|
||||
]
|
||||
To fix this error, call:
|
||||
|
||||
pip install --upgrade pip setuptools wheel
|
||||
pip install "jax[cpu]"
|
||||
pip install "qunfold @ git+https://github.com/mirkobunse/qunfold@v0.1.4"
|
||||
"""
|
||||
|
||||
try:
|
||||
import qunfold
|
||||
from qunfold.quapy import QuaPyWrapper
|
||||
from qunfold.sklearn import CVClassifier
|
||||
from qunfold import (
|
||||
LeastSquaresLoss, # losses
|
||||
BlobelLoss,
|
||||
EnergyLoss,
|
||||
HellingerSurrogateLoss,
|
||||
CombinedLoss,
|
||||
TikhonovRegularization,
|
||||
TikhonovRegularized,
|
||||
ClassTransformer, # transformers
|
||||
HistogramTransformer,
|
||||
DistanceTransformer,
|
||||
KernelTransformer,
|
||||
EnergyKernelTransformer,
|
||||
LaplacianKernelTransformer,
|
||||
GaussianKernelTransformer,
|
||||
GaussianRFFKernelTransformer,
|
||||
)
|
||||
|
||||
__all__ = [ # control public members, e.g., for auto-documentation in sphinx; omit QuaPyWrapper
|
||||
"ComposableQuantifier",
|
||||
"CVClassifier",
|
||||
"LeastSquaresLoss",
|
||||
"BlobelLoss",
|
||||
"EnergyLoss",
|
||||
"HellingerSurrogateLoss",
|
||||
"CombinedLoss",
|
||||
"TikhonovRegularization",
|
||||
"TikhonovRegularized",
|
||||
"ClassTransformer",
|
||||
"HistogramTransformer",
|
||||
"DistanceTransformer",
|
||||
"KernelTransformer",
|
||||
"EnergyKernelTransformer",
|
||||
"LaplacianKernelTransformer",
|
||||
"GaussianKernelTransformer",
|
||||
"GaussianRFFKernelTransformer",
|
||||
]
|
||||
except ImportError as e:
|
||||
raise ImportError(_import_error_message) from e
|
||||
|
||||
def ComposableQuantifier(loss, transformer, **kwargs):
|
||||
"""A generic quantification / unfolding method that solves a linear system of equations.
|
||||
|
|
Loading…
Reference in New Issue