Compare commits
99 Commits
99bc8508ac
...
a271fe1231
Author | SHA1 | Date |
---|---|---|
|
a271fe1231 | |
|
5e2fc07fc5 | |
|
73755b73e8 | |
|
db8a870495 | |
|
b485205c7c | |
|
9be729386a | |
|
ffcfd64957 | |
|
1f1757f0ee | |
|
cea96e87c6 | |
|
584a4d07d4 | |
|
3895cba610 | |
|
de3f8fd300 | |
|
2311bb6649 | |
|
55c62a9dd2 | |
|
a6ff00f96b | |
|
365a9e626c | |
|
88541976e9 | |
|
e580e33b83 | |
|
4474653a25 | |
|
13beb45274 | |
|
73d53820c2 | |
|
3f20aa06b1 | |
|
9642808cf3 | |
|
89d02043be | |
|
9a7e50f6c5 | |
|
2140aedf6a | |
|
b543857c08 | |
|
5da9fa0b09 | |
|
b4571d96c7 | |
|
2034845988 | |
|
b06a1532c2 | |
|
7e4e0e20a1 | |
|
b8252d0272 | |
|
daa275d325 | |
|
76b38cb81c | |
|
8237c121de | |
|
e83966f1ff | |
|
2dcc086ec2 | |
|
bf65c00349 | |
|
8e64e5446e | |
|
7fb41028d5 | |
|
868aa34cf5 | |
|
781ce82b90 | |
|
8142131205 | |
|
1730d5a1a9 | |
|
7f05f8dd41 | |
|
c99c9903a3 | |
|
b8b3cf540e | |
|
415c92f803 | |
|
c668d0b3d8 | |
|
d2209afab5 | |
|
8e9e7fa199 | |
|
449618c42e | |
|
b1414b2a04 | |
|
4e0e747d47 | |
|
02365e4bee | |
|
04e7805445 | |
|
fedf9b492b | |
|
6ea15c30b8 | |
|
21a466adf1 | |
|
e1f99eb201 | |
|
c408deacae | |
|
ad11b86168 | |
|
9ad36ef008 | |
|
acfb02c51f | |
|
4db21b6945 | |
|
c7419d81fc | |
|
817aab1d99 | |
|
7f39f4df66 | |
|
b3860b3b83 | |
|
8517338765 | |
|
19524f9aa8 | |
|
93dd6cb1c1 | |
|
498fd8b050 | |
|
244d1045ce | |
|
e92264c280 | |
|
f1462897ef | |
|
f74b048e2d | |
|
ecfc175622 | |
|
522d074087 | |
|
bf33c134fc | |
|
e111860128 | |
|
da99f78c0c | |
|
2000c33372 | |
|
e6f380dc5f | |
|
bee1c4e678 | |
|
a64620c377 | |
|
72b43bd2f8 | |
|
f3e543152c | |
|
31a697559c | |
|
69b8327fe9 | |
|
db6ff4ab9e | |
|
561b672200 | |
|
e0b80167b9 | |
|
4abec6629b | |
|
3095d7092c | |
|
b53d417240 | |
|
f69fca32b4 | |
|
f5603135a7 |
|
@ -0,0 +1,68 @@
|
|||
name: CI
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
push:
|
||||
branches:
|
||||
- master
|
||||
- devel
|
||||
|
||||
jobs:
|
||||
|
||||
# take out unit tests
|
||||
test:
|
||||
name: Unit tests (Python ${{ matrix.python-version }})
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
matrix:
|
||||
python-version:
|
||||
- "3.11"
|
||||
env:
|
||||
QUAPY_TESTS_OMIT_LARGE_DATASETS: True
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- name: Set up Python ${{ matrix.python-version }}
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: ${{ matrix.python-version }}
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
python -m pip install --upgrade pip setuptools wheel
|
||||
python -m pip install "qunfold @ git+https://github.com/mirkobunse/qunfold@v0.1.4"
|
||||
python -m pip install -e .[bayes,tests]
|
||||
- name: Test with unittest
|
||||
run: python -m unittest
|
||||
|
||||
# build and push documentation to gh-pages (only if pushed to the master branch)
|
||||
docs:
|
||||
name: Documentation
|
||||
runs-on: ubuntu-latest
|
||||
if: github.ref == 'refs/heads/master'
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- name: Set up Python
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: 3.11
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
python -m pip install --upgrade pip setuptools wheel "jax[cpu]"
|
||||
python -m pip install "qunfold @ git+https://github.com/mirkobunse/qunfold@v0.1.4"
|
||||
python -m pip install -e .[neural,docs]
|
||||
- name: Build documentation
|
||||
run: sphinx-build -M html docs/source docs/build
|
||||
- name: Publish documentation
|
||||
run: |
|
||||
git clone ${{ github.server_url }}/${{ github.repository }}.git --branch gh-pages --single-branch __gh-pages/
|
||||
cp -r docs/build/html/* __gh-pages/
|
||||
cd __gh-pages/
|
||||
git config --local user.email "action@github.com"
|
||||
git config --local user.name "GitHub Action"
|
||||
git add .
|
||||
git commit -am "Documentation based on ${{ github.sha }}" || true
|
||||
- name: Push changes
|
||||
uses: ad-m/github-push-action@master
|
||||
with:
|
||||
branch: gh-pages
|
||||
directory: __gh-pages/
|
||||
github_token: ${{ secrets.GITHUB_TOKEN }}
|
|
@ -1,23 +0,0 @@
|
|||
name: Pylint
|
||||
|
||||
on: [push]
|
||||
|
||||
jobs:
|
||||
build:
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
matrix:
|
||||
python-version: ["3.8", "3.9", "3.10"]
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- name: Set up Python ${{ matrix.python-version }}
|
||||
uses: actions/setup-python@v3
|
||||
with:
|
||||
python-version: ${{ matrix.python-version }}
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
python -m pip install --upgrade pip
|
||||
pip install pylint
|
||||
- name: Analysing the code with pylint
|
||||
run: |
|
||||
pylint $(git ls-files '*.py')
|
|
@ -69,6 +69,9 @@ instance/
|
|||
# Scrapy stuff:
|
||||
.scrapy
|
||||
|
||||
# vscode config:
|
||||
.vscode/
|
||||
|
||||
# Sphinx documentation
|
||||
docs/_build/
|
||||
|
||||
|
@ -85,6 +88,11 @@ ipython_config.py
|
|||
# pyenv
|
||||
.python-version
|
||||
|
||||
# poetry
|
||||
poetry.toml
|
||||
pyproject.toml
|
||||
poetry.lock
|
||||
|
||||
# pipenv
|
||||
# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
|
||||
# However, in case of collaboration, if having platform-specific dependencies or dependencies
|
||||
|
@ -159,3 +167,4 @@ TweetSentQuant
|
|||
|
||||
|
||||
*.png
|
||||
.idea
|
||||
|
|
|
@ -1,10 +1,47 @@
|
|||
Change Log 0.1.9
|
||||
----------------
|
||||
|
||||
- Added LeQua 2024 datasets and normalized match distance to qp.error
|
||||
|
||||
- Improved data loaders for UCI binary and UCI multiclass datasets (thanks to Lorenzo Volpi!); these datasets
|
||||
can be loaded with standardised covariates (default)
|
||||
|
||||
- Added a default classifier for aggregative quantifiers, which now can be instantiated without specifying
|
||||
the classifier. The default classifier can be accessed in qp.environ['DEFAULT_CLS'] and is assigned to
|
||||
sklearn.linear_model.LogisticRegression(max_iter=3000). If the classifier is not specified, then a clone
|
||||
of said classifier is returned. E.g.:
|
||||
> pacc = PACC()
|
||||
is equivalent to:
|
||||
> pacc = PACC(classifier=LogisticRegression(max_iter=3000))
|
||||
|
||||
- Improved error loging in model selection. In v0.1.8 only Status.INVALID was reported; in v0.1.9 it is
|
||||
now accompanied by a textual description of the error
|
||||
|
||||
- The number of parallel workers can now be set via an environment variable by running, e.g.:
|
||||
> N_JOBS=10 python3 your_script.py
|
||||
which has the same effect as writing the following code at the beginning of your_script.py:
|
||||
> import quapy as qp
|
||||
> qp.environ["N_JOBS"] = 10
|
||||
|
||||
- Some examples have been added to the ./examples/ dir, which now contains numbered examples from basics (0)
|
||||
to advanced topics (higher numbers)
|
||||
|
||||
- Moved the wiki documents to the ./docs/ folder so that they become editable via PR for the community
|
||||
|
||||
- Added Composable methods from Mirko Bunse's qunfold library! (thanks to Mirko Bunse!)
|
||||
|
||||
- Added Continuous Integration with GitHub Actions (thanks to Mirko Bunse!)
|
||||
|
||||
- Added Bayesian CC method (thanks to Pawel Czyz!). The method is described in detail in the paper
|
||||
Ziegler, Albert, and Paweł Czyż. "Bayesian Quantification with Black-Box Estimators."
|
||||
arXiv preprint arXiv:2302.09159 (2023).
|
||||
|
||||
- Removed binary UCI datasets {acute.a, acute.b, balance.2} from the list qp.data.datasets.UCI_BINARY_DATASETS
|
||||
(the datasets are still loadable from the fetch_UCIBinaryLabelledCollection and fetch_UCIBinaryDataset
|
||||
functions, though). The reason is that these datasets tend to yield results (for all methods) that are
|
||||
one or two orders of magnitude greater than for other datasets, and this has a disproportionate impact in
|
||||
methods average (I suspect there is something wrong in those datasets).
|
||||
|
||||
|
||||
Change Log 0.1.8
|
||||
----------------
|
||||
|
|
21
README.md
|
@ -13,7 +13,7 @@ for facilitating the analysis and interpretation of the experimental results.
|
|||
|
||||
### Last updates:
|
||||
|
||||
* Version 0.1.8 is released! major changes can be consulted [here](CHANGE_LOG.txt).
|
||||
* Version 0.1.9 is released! major changes can be consulted [here](CHANGE_LOG.txt).
|
||||
* The developer API documentation is available [here](https://hlt-isti.github.io/QuaPy/build/html/modules.html)
|
||||
|
||||
### Installation
|
||||
|
@ -24,7 +24,7 @@ pip install quapy
|
|||
|
||||
### Cite QuaPy
|
||||
|
||||
If you find QuaPy useful (and we hope you will), plese consider citing the original paper in your research:
|
||||
If you find QuaPy useful (and we hope you will), please consider citing the original paper in your research:
|
||||
|
||||
```
|
||||
@inproceedings{moreo2021quapy,
|
||||
|
@ -45,19 +45,18 @@ of the test set.
|
|||
|
||||
```python
|
||||
import quapy as qp
|
||||
from sklearn.linear_model import LogisticRegression
|
||||
|
||||
dataset = qp.datasets.fetch_twitter('semeval16')
|
||||
dataset = qp.datasets.fetch_UCIBinaryDataset("yeast")
|
||||
training, test = dataset.train_test
|
||||
|
||||
# create an "Adjusted Classify & Count" quantifier
|
||||
model = qp.method.aggregative.ACC(LogisticRegression())
|
||||
model.fit(dataset.training)
|
||||
model = qp.method.aggregative.ACC()
|
||||
model.fit(training)
|
||||
|
||||
estim_prevalence = model.quantify(dataset.test.instances)
|
||||
true_prevalence = dataset.test.prevalence()
|
||||
estim_prevalence = model.quantify(test.X)
|
||||
true_prevalence = test.prevalence()
|
||||
|
||||
error = qp.error.mae(true_prevalence, estim_prevalence)
|
||||
|
||||
print(f'Mean Absolute Error (MAE)={error:.3f}')
|
||||
```
|
||||
|
||||
|
@ -68,7 +67,7 @@ class prevalence of the training set. For this reason, any quantification model
|
|||
should be tested across many samples, even ones characterized by class prevalence
|
||||
values different or very different from those found in the training set.
|
||||
QuaPy implements sampling procedures and evaluation protocols that automate this workflow.
|
||||
See the [Wiki](https://github.com/HLT-ISTI/QuaPy/wiki) for detailed examples.
|
||||
See the [documentation](https://hlt-isti.github.io/QuaPy/build/html/) for detailed examples.
|
||||
|
||||
## Features
|
||||
|
||||
|
@ -116,4 +115,4 @@ are provided:
|
|||
|
||||
## Acknowledgments:
|
||||
|
||||
<img src="SoBigData.png" alt="SoBigData++" width="250"/>
|
||||
<img src="docs/source/SoBigData.png" alt="SoBigData++" width="250"/>
|
||||
|
|
6
TODO.txt
|
@ -0,0 +1,6 @@
|
|||
- [TODO] add ensemble methods SC-MQ, MC-SQ, MC-MQ
|
||||
- [TODO] add HistNetQ
|
||||
- [TODO] add CDE-iteration and Bayes-CDE methods
|
||||
- [TODO] add Friedman's method and DeBias
|
||||
- [TODO] check ignore warning stuff
|
||||
check https://docs.python.org/3/library/warnings.html#temporarily-suppressing-warnings
|
|
@ -0,0 +1 @@
|
|||
build/
|
|
@ -1,41 +0,0 @@
|
|||
.. QuaPy: A Python-based open-source framework for quantification documentation master file, created by
|
||||
sphinx-quickstart on Wed Feb 7 16:26:46 2024.
|
||||
You can adapt this file completely to your liking, but it should at least
|
||||
contain the root `toctree` directive.
|
||||
|
||||
Welcome to QuaPy's documentation!
|
||||
==========================================================================================
|
||||
|
||||
QuaPy is a Python-based open-source framework for quantification.
|
||||
|
||||
This document contains the API of the modules included in QuaPy.
|
||||
|
||||
Installation
|
||||
------------
|
||||
|
||||
`pip install quapy`
|
||||
|
||||
GitHub
|
||||
------------
|
||||
|
||||
QuaPy is hosted in GitHub at `https://github.com/HLT-ISTI/QuaPy <https://github.com/HLT-ISTI/QuaPy>`_
|
||||
|
||||
|
||||
.. toctree::
|
||||
:maxdepth: 2
|
||||
:caption: Contents:
|
||||
|
||||
Contents
|
||||
--------
|
||||
|
||||
.. toctree::
|
||||
|
||||
modules
|
||||
|
||||
|
||||
Indices and tables
|
||||
==================
|
||||
|
||||
* :ref:`genindex`
|
||||
* :ref:`modindex`
|
||||
* :ref:`search`
|
|
@ -1,7 +0,0 @@
|
|||
quapy
|
||||
=====
|
||||
|
||||
.. toctree::
|
||||
:maxdepth: 4
|
||||
|
||||
quapy
|
|
@ -1,45 +0,0 @@
|
|||
quapy.classification package
|
||||
============================
|
||||
|
||||
Submodules
|
||||
----------
|
||||
|
||||
quapy.classification.calibration module
|
||||
---------------------------------------
|
||||
|
||||
.. automodule:: quapy.classification.calibration
|
||||
:members:
|
||||
:undoc-members:
|
||||
:show-inheritance:
|
||||
|
||||
quapy.classification.methods module
|
||||
-----------------------------------
|
||||
|
||||
.. automodule:: quapy.classification.methods
|
||||
:members:
|
||||
:undoc-members:
|
||||
:show-inheritance:
|
||||
|
||||
quapy.classification.neural module
|
||||
----------------------------------
|
||||
|
||||
.. automodule:: quapy.classification.neural
|
||||
:members:
|
||||
:undoc-members:
|
||||
:show-inheritance:
|
||||
|
||||
quapy.classification.svmperf module
|
||||
-----------------------------------
|
||||
|
||||
.. automodule:: quapy.classification.svmperf
|
||||
:members:
|
||||
:undoc-members:
|
||||
:show-inheritance:
|
||||
|
||||
Module contents
|
||||
---------------
|
||||
|
||||
.. automodule:: quapy.classification
|
||||
:members:
|
||||
:undoc-members:
|
||||
:show-inheritance:
|
|
@ -1,46 +0,0 @@
|
|||
quapy.data package
|
||||
==================
|
||||
|
||||
Submodules
|
||||
----------
|
||||
|
||||
quapy.data.base module
|
||||
----------------------
|
||||
|
||||
.. automodule:: quapy.data.base
|
||||
:members:
|
||||
:undoc-members:
|
||||
:show-inheritance:
|
||||
|
||||
quapy.data.datasets module
|
||||
--------------------------
|
||||
|
||||
.. automodule:: quapy.data.datasets
|
||||
:members:
|
||||
:undoc-members:
|
||||
:show-inheritance:
|
||||
|
||||
|
||||
quapy.data.preprocessing module
|
||||
-------------------------------
|
||||
|
||||
.. automodule:: quapy.data.preprocessing
|
||||
:members:
|
||||
:undoc-members:
|
||||
:show-inheritance:
|
||||
|
||||
quapy.data.reader module
|
||||
------------------------
|
||||
|
||||
.. automodule:: quapy.data.reader
|
||||
:members:
|
||||
:undoc-members:
|
||||
:show-inheritance:
|
||||
|
||||
Module contents
|
||||
---------------
|
||||
|
||||
.. automodule:: quapy.data
|
||||
:members:
|
||||
:undoc-members:
|
||||
:show-inheritance:
|
|
@ -1,61 +0,0 @@
|
|||
quapy.method package
|
||||
====================
|
||||
|
||||
Submodules
|
||||
----------
|
||||
|
||||
quapy.method.aggregative module
|
||||
-------------------------------
|
||||
|
||||
.. automodule:: quapy.method.aggregative
|
||||
:members:
|
||||
:undoc-members:
|
||||
:show-inheritance:
|
||||
|
||||
.. automodule:: quapy.method._kdey
|
||||
:members:
|
||||
:undoc-members:
|
||||
:show-inheritance:
|
||||
|
||||
.. automodule:: quapy.method._neural
|
||||
:members:
|
||||
:undoc-members:
|
||||
:show-inheritance:
|
||||
|
||||
.. automodule:: quapy.method._threshold_optim
|
||||
:members:
|
||||
:undoc-members:
|
||||
:show-inheritance:
|
||||
|
||||
|
||||
quapy.method.base module
|
||||
------------------------
|
||||
|
||||
.. automodule:: quapy.method.base
|
||||
:members:
|
||||
:undoc-members:
|
||||
:show-inheritance:
|
||||
|
||||
quapy.method.meta module
|
||||
------------------------
|
||||
|
||||
.. automodule:: quapy.method.meta
|
||||
:members:
|
||||
:undoc-members:
|
||||
:show-inheritance:
|
||||
|
||||
quapy.method.non\_aggregative module
|
||||
------------------------------------
|
||||
|
||||
.. automodule:: quapy.method.non_aggregative
|
||||
:members:
|
||||
:undoc-members:
|
||||
:show-inheritance:
|
||||
|
||||
Module contents
|
||||
---------------
|
||||
|
||||
.. automodule:: quapy.method
|
||||
:members:
|
||||
:undoc-members:
|
||||
:show-inheritance:
|
|
@ -1,80 +0,0 @@
|
|||
quapy package
|
||||
=============
|
||||
|
||||
Subpackages
|
||||
-----------
|
||||
|
||||
.. toctree::
|
||||
:maxdepth: 4
|
||||
|
||||
quapy.classification
|
||||
quapy.data
|
||||
quapy.method
|
||||
|
||||
|
||||
Submodules
|
||||
----------
|
||||
|
||||
quapy.error module
|
||||
------------------
|
||||
|
||||
.. automodule:: quapy.error
|
||||
:members:
|
||||
:undoc-members:
|
||||
:show-inheritance:
|
||||
|
||||
quapy.evaluation module
|
||||
-----------------------
|
||||
|
||||
.. automodule:: quapy.evaluation
|
||||
:members:
|
||||
:undoc-members:
|
||||
:show-inheritance:
|
||||
|
||||
quapy.functional module
|
||||
-----------------------
|
||||
|
||||
.. automodule:: quapy.functional
|
||||
:members:
|
||||
:undoc-members:
|
||||
:show-inheritance:
|
||||
|
||||
quapy.model\_selection module
|
||||
-----------------------------
|
||||
|
||||
.. automodule:: quapy.model_selection
|
||||
:members:
|
||||
:undoc-members:
|
||||
:show-inheritance:
|
||||
|
||||
quapy.plot module
|
||||
-----------------
|
||||
|
||||
.. automodule:: quapy.plot
|
||||
:members:
|
||||
:undoc-members:
|
||||
:show-inheritance:
|
||||
|
||||
quapy.protocol module
|
||||
---------------------
|
||||
|
||||
.. automodule:: quapy.protocol
|
||||
:members:
|
||||
:undoc-members:
|
||||
:show-inheritance:
|
||||
|
||||
quapy.util module
|
||||
-----------------
|
||||
|
||||
.. automodule:: quapy.util
|
||||
:members:
|
||||
:undoc-members:
|
||||
:show-inheritance:
|
||||
|
||||
Module contents
|
||||
---------------
|
||||
|
||||
.. automodule:: quapy
|
||||
:members:
|
||||
:undoc-members:
|
||||
:show-inheritance:
|
|
@ -1,900 +0,0 @@
|
|||
/*
|
||||
* basic.css
|
||||
* ~~~~~~~~~
|
||||
*
|
||||
* Sphinx stylesheet -- basic theme.
|
||||
*
|
||||
* :copyright: Copyright 2007-2022 by the Sphinx team, see AUTHORS.
|
||||
* :license: BSD, see LICENSE for details.
|
||||
*
|
||||
*/
|
||||
|
||||
/* -- main layout ----------------------------------------------------------- */
|
||||
|
||||
div.clearer {
|
||||
clear: both;
|
||||
}
|
||||
|
||||
div.section::after {
|
||||
display: block;
|
||||
content: '';
|
||||
clear: left;
|
||||
}
|
||||
|
||||
/* -- relbar ---------------------------------------------------------------- */
|
||||
|
||||
div.related {
|
||||
width: 100%;
|
||||
font-size: 90%;
|
||||
}
|
||||
|
||||
div.related h3 {
|
||||
display: none;
|
||||
}
|
||||
|
||||
div.related ul {
|
||||
margin: 0;
|
||||
padding: 0 0 0 10px;
|
||||
list-style: none;
|
||||
}
|
||||
|
||||
div.related li {
|
||||
display: inline;
|
||||
}
|
||||
|
||||
div.related li.right {
|
||||
float: right;
|
||||
margin-right: 5px;
|
||||
}
|
||||
|
||||
/* -- sidebar --------------------------------------------------------------- */
|
||||
|
||||
div.sphinxsidebarwrapper {
|
||||
padding: 10px 5px 0 10px;
|
||||
}
|
||||
|
||||
div.sphinxsidebar {
|
||||
float: left;
|
||||
width: 230px;
|
||||
margin-left: -100%;
|
||||
font-size: 90%;
|
||||
word-wrap: break-word;
|
||||
overflow-wrap : break-word;
|
||||
}
|
||||
|
||||
div.sphinxsidebar ul {
|
||||
list-style: none;
|
||||
}
|
||||
|
||||
div.sphinxsidebar ul ul,
|
||||
div.sphinxsidebar ul.want-points {
|
||||
margin-left: 20px;
|
||||
list-style: square;
|
||||
}
|
||||
|
||||
div.sphinxsidebar ul ul {
|
||||
margin-top: 0;
|
||||
margin-bottom: 0;
|
||||
}
|
||||
|
||||
div.sphinxsidebar form {
|
||||
margin-top: 10px;
|
||||
}
|
||||
|
||||
div.sphinxsidebar input {
|
||||
border: 1px solid #98dbcc;
|
||||
font-family: sans-serif;
|
||||
font-size: 1em;
|
||||
}
|
||||
|
||||
div.sphinxsidebar #searchbox form.search {
|
||||
overflow: hidden;
|
||||
}
|
||||
|
||||
div.sphinxsidebar #searchbox input[type="text"] {
|
||||
float: left;
|
||||
width: 80%;
|
||||
padding: 0.25em;
|
||||
box-sizing: border-box;
|
||||
}
|
||||
|
||||
div.sphinxsidebar #searchbox input[type="submit"] {
|
||||
float: left;
|
||||
width: 20%;
|
||||
border-left: none;
|
||||
padding: 0.25em;
|
||||
box-sizing: border-box;
|
||||
}
|
||||
|
||||
|
||||
img {
|
||||
border: 0;
|
||||
max-width: 100%;
|
||||
}
|
||||
|
||||
/* -- search page ----------------------------------------------------------- */
|
||||
|
||||
ul.search {
|
||||
margin: 10px 0 0 20px;
|
||||
padding: 0;
|
||||
}
|
||||
|
||||
ul.search li {
|
||||
padding: 5px 0 5px 20px;
|
||||
background-image: url(file.png);
|
||||
background-repeat: no-repeat;
|
||||
background-position: 0 7px;
|
||||
}
|
||||
|
||||
ul.search li a {
|
||||
font-weight: bold;
|
||||
}
|
||||
|
||||
ul.search li p.context {
|
||||
color: #888;
|
||||
margin: 2px 0 0 30px;
|
||||
text-align: left;
|
||||
}
|
||||
|
||||
ul.keywordmatches li.goodmatch a {
|
||||
font-weight: bold;
|
||||
}
|
||||
|
||||
/* -- index page ------------------------------------------------------------ */
|
||||
|
||||
table.contentstable {
|
||||
width: 90%;
|
||||
margin-left: auto;
|
||||
margin-right: auto;
|
||||
}
|
||||
|
||||
table.contentstable p.biglink {
|
||||
line-height: 150%;
|
||||
}
|
||||
|
||||
a.biglink {
|
||||
font-size: 1.3em;
|
||||
}
|
||||
|
||||
span.linkdescr {
|
||||
font-style: italic;
|
||||
padding-top: 5px;
|
||||
font-size: 90%;
|
||||
}
|
||||
|
||||
/* -- general index --------------------------------------------------------- */
|
||||
|
||||
table.indextable {
|
||||
width: 100%;
|
||||
}
|
||||
|
||||
table.indextable td {
|
||||
text-align: left;
|
||||
vertical-align: top;
|
||||
}
|
||||
|
||||
table.indextable ul {
|
||||
margin-top: 0;
|
||||
margin-bottom: 0;
|
||||
list-style-type: none;
|
||||
}
|
||||
|
||||
table.indextable > tbody > tr > td > ul {
|
||||
padding-left: 0em;
|
||||
}
|
||||
|
||||
table.indextable tr.pcap {
|
||||
height: 10px;
|
||||
}
|
||||
|
||||
table.indextable tr.cap {
|
||||
margin-top: 10px;
|
||||
background-color: #f2f2f2;
|
||||
}
|
||||
|
||||
img.toggler {
|
||||
margin-right: 3px;
|
||||
margin-top: 3px;
|
||||
cursor: pointer;
|
||||
}
|
||||
|
||||
div.modindex-jumpbox {
|
||||
border-top: 1px solid #ddd;
|
||||
border-bottom: 1px solid #ddd;
|
||||
margin: 1em 0 1em 0;
|
||||
padding: 0.4em;
|
||||
}
|
||||
|
||||
div.genindex-jumpbox {
|
||||
border-top: 1px solid #ddd;
|
||||
border-bottom: 1px solid #ddd;
|
||||
margin: 1em 0 1em 0;
|
||||
padding: 0.4em;
|
||||
}
|
||||
|
||||
/* -- domain module index --------------------------------------------------- */
|
||||
|
||||
table.modindextable td {
|
||||
padding: 2px;
|
||||
border-collapse: collapse;
|
||||
}
|
||||
|
||||
/* -- general body styles --------------------------------------------------- */
|
||||
|
||||
div.body {
|
||||
min-width: 360px;
|
||||
max-width: 800px;
|
||||
}
|
||||
|
||||
div.body p, div.body dd, div.body li, div.body blockquote {
|
||||
-moz-hyphens: auto;
|
||||
-ms-hyphens: auto;
|
||||
-webkit-hyphens: auto;
|
||||
hyphens: auto;
|
||||
}
|
||||
|
||||
a.headerlink {
|
||||
visibility: hidden;
|
||||
}
|
||||
|
||||
h1:hover > a.headerlink,
|
||||
h2:hover > a.headerlink,
|
||||
h3:hover > a.headerlink,
|
||||
h4:hover > a.headerlink,
|
||||
h5:hover > a.headerlink,
|
||||
h6:hover > a.headerlink,
|
||||
dt:hover > a.headerlink,
|
||||
caption:hover > a.headerlink,
|
||||
p.caption:hover > a.headerlink,
|
||||
div.code-block-caption:hover > a.headerlink {
|
||||
visibility: visible;
|
||||
}
|
||||
|
||||
div.body p.caption {
|
||||
text-align: inherit;
|
||||
}
|
||||
|
||||
div.body td {
|
||||
text-align: left;
|
||||
}
|
||||
|
||||
.first {
|
||||
margin-top: 0 !important;
|
||||
}
|
||||
|
||||
p.rubric {
|
||||
margin-top: 30px;
|
||||
font-weight: bold;
|
||||
}
|
||||
|
||||
img.align-left, figure.align-left, .figure.align-left, object.align-left {
|
||||
clear: left;
|
||||
float: left;
|
||||
margin-right: 1em;
|
||||
}
|
||||
|
||||
img.align-right, figure.align-right, .figure.align-right, object.align-right {
|
||||
clear: right;
|
||||
float: right;
|
||||
margin-left: 1em;
|
||||
}
|
||||
|
||||
img.align-center, figure.align-center, .figure.align-center, object.align-center {
|
||||
display: block;
|
||||
margin-left: auto;
|
||||
margin-right: auto;
|
||||
}
|
||||
|
||||
img.align-default, figure.align-default, .figure.align-default {
|
||||
display: block;
|
||||
margin-left: auto;
|
||||
margin-right: auto;
|
||||
}
|
||||
|
||||
.align-left {
|
||||
text-align: left;
|
||||
}
|
||||
|
||||
.align-center {
|
||||
text-align: center;
|
||||
}
|
||||
|
||||
.align-default {
|
||||
text-align: center;
|
||||
}
|
||||
|
||||
.align-right {
|
||||
text-align: right;
|
||||
}
|
||||
|
||||
/* -- sidebars -------------------------------------------------------------- */
|
||||
|
||||
div.sidebar,
|
||||
aside.sidebar {
|
||||
margin: 0 0 0.5em 1em;
|
||||
border: 1px solid #ddb;
|
||||
padding: 7px;
|
||||
background-color: #ffe;
|
||||
width: 40%;
|
||||
float: right;
|
||||
clear: right;
|
||||
overflow-x: auto;
|
||||
}
|
||||
|
||||
p.sidebar-title {
|
||||
font-weight: bold;
|
||||
}
|
||||
nav.contents,
|
||||
aside.topic,
|
||||
div.admonition, div.topic, blockquote {
|
||||
clear: left;
|
||||
}
|
||||
|
||||
/* -- topics ---------------------------------------------------------------- */
|
||||
nav.contents,
|
||||
aside.topic,
|
||||
div.topic {
|
||||
border: 1px solid #ccc;
|
||||
padding: 7px;
|
||||
margin: 10px 0 10px 0;
|
||||
}
|
||||
|
||||
p.topic-title {
|
||||
font-size: 1.1em;
|
||||
font-weight: bold;
|
||||
margin-top: 10px;
|
||||
}
|
||||
|
||||
/* -- admonitions ----------------------------------------------------------- */
|
||||
|
||||
div.admonition {
|
||||
margin-top: 10px;
|
||||
margin-bottom: 10px;
|
||||
padding: 7px;
|
||||
}
|
||||
|
||||
div.admonition dt {
|
||||
font-weight: bold;
|
||||
}
|
||||
|
||||
p.admonition-title {
|
||||
margin: 0px 10px 5px 0px;
|
||||
font-weight: bold;
|
||||
}
|
||||
|
||||
div.body p.centered {
|
||||
text-align: center;
|
||||
margin-top: 25px;
|
||||
}
|
||||
|
||||
/* -- content of sidebars/topics/admonitions -------------------------------- */
|
||||
|
||||
div.sidebar > :last-child,
|
||||
aside.sidebar > :last-child,
|
||||
nav.contents > :last-child,
|
||||
aside.topic > :last-child,
|
||||
div.topic > :last-child,
|
||||
div.admonition > :last-child {
|
||||
margin-bottom: 0;
|
||||
}
|
||||
|
||||
div.sidebar::after,
|
||||
aside.sidebar::after,
|
||||
nav.contents::after,
|
||||
aside.topic::after,
|
||||
div.topic::after,
|
||||
div.admonition::after,
|
||||
blockquote::after {
|
||||
display: block;
|
||||
content: '';
|
||||
clear: both;
|
||||
}
|
||||
|
||||
/* -- tables ---------------------------------------------------------------- */
|
||||
|
||||
table.docutils {
|
||||
margin-top: 10px;
|
||||
margin-bottom: 10px;
|
||||
border: 0;
|
||||
border-collapse: collapse;
|
||||
}
|
||||
|
||||
table.align-center {
|
||||
margin-left: auto;
|
||||
margin-right: auto;
|
||||
}
|
||||
|
||||
table.align-default {
|
||||
margin-left: auto;
|
||||
margin-right: auto;
|
||||
}
|
||||
|
||||
table caption span.caption-number {
|
||||
font-style: italic;
|
||||
}
|
||||
|
||||
table caption span.caption-text {
|
||||
}
|
||||
|
||||
table.docutils td, table.docutils th {
|
||||
padding: 1px 8px 1px 5px;
|
||||
border-top: 0;
|
||||
border-left: 0;
|
||||
border-right: 0;
|
||||
border-bottom: 1px solid #aaa;
|
||||
}
|
||||
|
||||
th {
|
||||
text-align: left;
|
||||
padding-right: 5px;
|
||||
}
|
||||
|
||||
table.citation {
|
||||
border-left: solid 1px gray;
|
||||
margin-left: 1px;
|
||||
}
|
||||
|
||||
table.citation td {
|
||||
border-bottom: none;
|
||||
}
|
||||
|
||||
th > :first-child,
|
||||
td > :first-child {
|
||||
margin-top: 0px;
|
||||
}
|
||||
|
||||
th > :last-child,
|
||||
td > :last-child {
|
||||
margin-bottom: 0px;
|
||||
}
|
||||
|
||||
/* -- figures --------------------------------------------------------------- */
|
||||
|
||||
div.figure, figure {
|
||||
margin: 0.5em;
|
||||
padding: 0.5em;
|
||||
}
|
||||
|
||||
div.figure p.caption, figcaption {
|
||||
padding: 0.3em;
|
||||
}
|
||||
|
||||
div.figure p.caption span.caption-number,
|
||||
figcaption span.caption-number {
|
||||
font-style: italic;
|
||||
}
|
||||
|
||||
div.figure p.caption span.caption-text,
|
||||
figcaption span.caption-text {
|
||||
}
|
||||
|
||||
/* -- field list styles ----------------------------------------------------- */
|
||||
|
||||
table.field-list td, table.field-list th {
|
||||
border: 0 !important;
|
||||
}
|
||||
|
||||
.field-list ul {
|
||||
margin: 0;
|
||||
padding-left: 1em;
|
||||
}
|
||||
|
||||
.field-list p {
|
||||
margin: 0;
|
||||
}
|
||||
|
||||
.field-name {
|
||||
-moz-hyphens: manual;
|
||||
-ms-hyphens: manual;
|
||||
-webkit-hyphens: manual;
|
||||
hyphens: manual;
|
||||
}
|
||||
|
||||
/* -- hlist styles ---------------------------------------------------------- */
|
||||
|
||||
table.hlist {
|
||||
margin: 1em 0;
|
||||
}
|
||||
|
||||
table.hlist td {
|
||||
vertical-align: top;
|
||||
}
|
||||
|
||||
/* -- object description styles --------------------------------------------- */
|
||||
|
||||
.sig {
|
||||
font-family: 'Consolas', 'Menlo', 'DejaVu Sans Mono', 'Bitstream Vera Sans Mono', monospace;
|
||||
}
|
||||
|
||||
.sig-name, code.descname {
|
||||
background-color: transparent;
|
||||
font-weight: bold;
|
||||
}
|
||||
|
||||
.sig-name {
|
||||
font-size: 1.1em;
|
||||
}
|
||||
|
||||
code.descname {
|
||||
font-size: 1.2em;
|
||||
}
|
||||
|
||||
.sig-prename, code.descclassname {
|
||||
background-color: transparent;
|
||||
}
|
||||
|
||||
.optional {
|
||||
font-size: 1.3em;
|
||||
}
|
||||
|
||||
.sig-paren {
|
||||
font-size: larger;
|
||||
}
|
||||
|
||||
.sig-param.n {
|
||||
font-style: italic;
|
||||
}
|
||||
|
||||
/* C++ specific styling */
|
||||
|
||||
.sig-inline.c-texpr,
|
||||
.sig-inline.cpp-texpr {
|
||||
font-family: unset;
|
||||
}
|
||||
|
||||
.sig.c .k, .sig.c .kt,
|
||||
.sig.cpp .k, .sig.cpp .kt {
|
||||
color: #0033B3;
|
||||
}
|
||||
|
||||
.sig.c .m,
|
||||
.sig.cpp .m {
|
||||
color: #1750EB;
|
||||
}
|
||||
|
||||
.sig.c .s, .sig.c .sc,
|
||||
.sig.cpp .s, .sig.cpp .sc {
|
||||
color: #067D17;
|
||||
}
|
||||
|
||||
|
||||
/* -- other body styles ----------------------------------------------------- */
|
||||
|
||||
ol.arabic {
|
||||
list-style: decimal;
|
||||
}
|
||||
|
||||
ol.loweralpha {
|
||||
list-style: lower-alpha;
|
||||
}
|
||||
|
||||
ol.upperalpha {
|
||||
list-style: upper-alpha;
|
||||
}
|
||||
|
||||
ol.lowerroman {
|
||||
list-style: lower-roman;
|
||||
}
|
||||
|
||||
ol.upperroman {
|
||||
list-style: upper-roman;
|
||||
}
|
||||
|
||||
:not(li) > ol > li:first-child > :first-child,
|
||||
:not(li) > ul > li:first-child > :first-child {
|
||||
margin-top: 0px;
|
||||
}
|
||||
|
||||
:not(li) > ol > li:last-child > :last-child,
|
||||
:not(li) > ul > li:last-child > :last-child {
|
||||
margin-bottom: 0px;
|
||||
}
|
||||
|
||||
ol.simple ol p,
|
||||
ol.simple ul p,
|
||||
ul.simple ol p,
|
||||
ul.simple ul p {
|
||||
margin-top: 0;
|
||||
}
|
||||
|
||||
ol.simple > li:not(:first-child) > p,
|
||||
ul.simple > li:not(:first-child) > p {
|
||||
margin-top: 0;
|
||||
}
|
||||
|
||||
ol.simple p,
|
||||
ul.simple p {
|
||||
margin-bottom: 0;
|
||||
}
|
||||
aside.footnote > span,
|
||||
div.citation > span {
|
||||
float: left;
|
||||
}
|
||||
aside.footnote > span:last-of-type,
|
||||
div.citation > span:last-of-type {
|
||||
padding-right: 0.5em;
|
||||
}
|
||||
aside.footnote > p {
|
||||
margin-left: 2em;
|
||||
}
|
||||
div.citation > p {
|
||||
margin-left: 4em;
|
||||
}
|
||||
aside.footnote > p:last-of-type,
|
||||
div.citation > p:last-of-type {
|
||||
margin-bottom: 0em;
|
||||
}
|
||||
aside.footnote > p:last-of-type:after,
|
||||
div.citation > p:last-of-type:after {
|
||||
content: "";
|
||||
clear: both;
|
||||
}
|
||||
|
||||
dl.field-list {
|
||||
display: grid;
|
||||
grid-template-columns: fit-content(30%) auto;
|
||||
}
|
||||
|
||||
dl.field-list > dt {
|
||||
font-weight: bold;
|
||||
word-break: break-word;
|
||||
padding-left: 0.5em;
|
||||
padding-right: 5px;
|
||||
}
|
||||
|
||||
dl.field-list > dd {
|
||||
padding-left: 0.5em;
|
||||
margin-top: 0em;
|
||||
margin-left: 0em;
|
||||
margin-bottom: 0em;
|
||||
}
|
||||
|
||||
dl {
|
||||
margin-bottom: 15px;
|
||||
}
|
||||
|
||||
dd > :first-child {
|
||||
margin-top: 0px;
|
||||
}
|
||||
|
||||
dd ul, dd table {
|
||||
margin-bottom: 10px;
|
||||
}
|
||||
|
||||
dd {
|
||||
margin-top: 3px;
|
||||
margin-bottom: 10px;
|
||||
margin-left: 30px;
|
||||
}
|
||||
|
||||
dl > dd:last-child,
|
||||
dl > dd:last-child > :last-child {
|
||||
margin-bottom: 0;
|
||||
}
|
||||
|
||||
dt:target, span.highlighted {
|
||||
background-color: #fbe54e;
|
||||
}
|
||||
|
||||
rect.highlighted {
|
||||
fill: #fbe54e;
|
||||
}
|
||||
|
||||
dl.glossary dt {
|
||||
font-weight: bold;
|
||||
font-size: 1.1em;
|
||||
}
|
||||
|
||||
.versionmodified {
|
||||
font-style: italic;
|
||||
}
|
||||
|
||||
.system-message {
|
||||
background-color: #fda;
|
||||
padding: 5px;
|
||||
border: 3px solid red;
|
||||
}
|
||||
|
||||
.footnote:target {
|
||||
background-color: #ffa;
|
||||
}
|
||||
|
||||
.line-block {
|
||||
display: block;
|
||||
margin-top: 1em;
|
||||
margin-bottom: 1em;
|
||||
}
|
||||
|
||||
.line-block .line-block {
|
||||
margin-top: 0;
|
||||
margin-bottom: 0;
|
||||
margin-left: 1.5em;
|
||||
}
|
||||
|
||||
.guilabel, .menuselection {
|
||||
font-family: sans-serif;
|
||||
}
|
||||
|
||||
.accelerator {
|
||||
text-decoration: underline;
|
||||
}
|
||||
|
||||
.classifier {
|
||||
font-style: oblique;
|
||||
}
|
||||
|
||||
.classifier:before {
|
||||
font-style: normal;
|
||||
margin: 0 0.5em;
|
||||
content: ":";
|
||||
display: inline-block;
|
||||
}
|
||||
|
||||
abbr, acronym {
|
||||
border-bottom: dotted 1px;
|
||||
cursor: help;
|
||||
}
|
||||
|
||||
/* -- code displays --------------------------------------------------------- */
|
||||
|
||||
pre {
|
||||
overflow: auto;
|
||||
overflow-y: hidden; /* fixes display issues on Chrome browsers */
|
||||
}
|
||||
|
||||
pre, div[class*="highlight-"] {
|
||||
clear: both;
|
||||
}
|
||||
|
||||
span.pre {
|
||||
-moz-hyphens: none;
|
||||
-ms-hyphens: none;
|
||||
-webkit-hyphens: none;
|
||||
hyphens: none;
|
||||
white-space: nowrap;
|
||||
}
|
||||
|
||||
div[class*="highlight-"] {
|
||||
margin: 1em 0;
|
||||
}
|
||||
|
||||
td.linenos pre {
|
||||
border: 0;
|
||||
background-color: transparent;
|
||||
color: #aaa;
|
||||
}
|
||||
|
||||
table.highlighttable {
|
||||
display: block;
|
||||
}
|
||||
|
||||
table.highlighttable tbody {
|
||||
display: block;
|
||||
}
|
||||
|
||||
table.highlighttable tr {
|
||||
display: flex;
|
||||
}
|
||||
|
||||
table.highlighttable td {
|
||||
margin: 0;
|
||||
padding: 0;
|
||||
}
|
||||
|
||||
table.highlighttable td.linenos {
|
||||
padding-right: 0.5em;
|
||||
}
|
||||
|
||||
table.highlighttable td.code {
|
||||
flex: 1;
|
||||
overflow: hidden;
|
||||
}
|
||||
|
||||
.highlight .hll {
|
||||
display: block;
|
||||
}
|
||||
|
||||
div.highlight pre,
|
||||
table.highlighttable pre {
|
||||
margin: 0;
|
||||
}
|
||||
|
||||
div.code-block-caption + div {
|
||||
margin-top: 0;
|
||||
}
|
||||
|
||||
div.code-block-caption {
|
||||
margin-top: 1em;
|
||||
padding: 2px 5px;
|
||||
font-size: small;
|
||||
}
|
||||
|
||||
div.code-block-caption code {
|
||||
background-color: transparent;
|
||||
}
|
||||
|
||||
table.highlighttable td.linenos,
|
||||
span.linenos,
|
||||
div.highlight span.gp { /* gp: Generic.Prompt */
|
||||
user-select: none;
|
||||
-webkit-user-select: text; /* Safari fallback only */
|
||||
-webkit-user-select: none; /* Chrome/Safari */
|
||||
-moz-user-select: none; /* Firefox */
|
||||
-ms-user-select: none; /* IE10+ */
|
||||
}
|
||||
|
||||
div.code-block-caption span.caption-number {
|
||||
padding: 0.1em 0.3em;
|
||||
font-style: italic;
|
||||
}
|
||||
|
||||
div.code-block-caption span.caption-text {
|
||||
}
|
||||
|
||||
div.literal-block-wrapper {
|
||||
margin: 1em 0;
|
||||
}
|
||||
|
||||
code.xref, a code {
|
||||
background-color: transparent;
|
||||
font-weight: bold;
|
||||
}
|
||||
|
||||
h1 code, h2 code, h3 code, h4 code, h5 code, h6 code {
|
||||
background-color: transparent;
|
||||
}
|
||||
|
||||
.viewcode-link {
|
||||
float: right;
|
||||
}
|
||||
|
||||
.viewcode-back {
|
||||
float: right;
|
||||
font-family: sans-serif;
|
||||
}
|
||||
|
||||
div.viewcode-block:target {
|
||||
margin: -1px -10px;
|
||||
padding: 0 10px;
|
||||
}
|
||||
|
||||
/* -- math display ---------------------------------------------------------- */
|
||||
|
||||
img.math {
|
||||
vertical-align: middle;
|
||||
}
|
||||
|
||||
div.body div.math p {
|
||||
text-align: center;
|
||||
}
|
||||
|
||||
span.eqno {
|
||||
float: right;
|
||||
}
|
||||
|
||||
span.eqno a.headerlink {
|
||||
position: absolute;
|
||||
z-index: 1;
|
||||
}
|
||||
|
||||
div.math:hover a.headerlink {
|
||||
visibility: visible;
|
||||
}
|
||||
|
||||
/* -- printout stylesheet --------------------------------------------------- */
|
||||
|
||||
@media print {
|
||||
div.document,
|
||||
div.documentwrapper,
|
||||
div.bodywrapper {
|
||||
margin: 0 !important;
|
||||
width: 100%;
|
||||
}
|
||||
|
||||
div.sphinxsidebar,
|
||||
div.related,
|
||||
div.footer,
|
||||
#top-link {
|
||||
display: none;
|
||||
}
|
||||
}
|
|
@ -1,156 +0,0 @@
|
|||
/*
|
||||
* doctools.js
|
||||
* ~~~~~~~~~~~
|
||||
*
|
||||
* Base JavaScript utilities for all Sphinx HTML documentation.
|
||||
*
|
||||
* :copyright: Copyright 2007-2022 by the Sphinx team, see AUTHORS.
|
||||
* :license: BSD, see LICENSE for details.
|
||||
*
|
||||
*/
|
||||
"use strict";
|
||||
|
||||
const BLACKLISTED_KEY_CONTROL_ELEMENTS = new Set([
|
||||
"TEXTAREA",
|
||||
"INPUT",
|
||||
"SELECT",
|
||||
"BUTTON",
|
||||
]);
|
||||
|
||||
const _ready = (callback) => {
|
||||
if (document.readyState !== "loading") {
|
||||
callback();
|
||||
} else {
|
||||
document.addEventListener("DOMContentLoaded", callback);
|
||||
}
|
||||
};
|
||||
|
||||
/**
|
||||
* Small JavaScript module for the documentation.
|
||||
*/
|
||||
const Documentation = {
|
||||
init: () => {
|
||||
Documentation.initDomainIndexTable();
|
||||
Documentation.initOnKeyListeners();
|
||||
},
|
||||
|
||||
/**
|
||||
* i18n support
|
||||
*/
|
||||
TRANSLATIONS: {},
|
||||
PLURAL_EXPR: (n) => (n === 1 ? 0 : 1),
|
||||
LOCALE: "unknown",
|
||||
|
||||
// gettext and ngettext don't access this so that the functions
|
||||
// can safely bound to a different name (_ = Documentation.gettext)
|
||||
gettext: (string) => {
|
||||
const translated = Documentation.TRANSLATIONS[string];
|
||||
switch (typeof translated) {
|
||||
case "undefined":
|
||||
return string; // no translation
|
||||
case "string":
|
||||
return translated; // translation exists
|
||||
default:
|
||||
return translated[0]; // (singular, plural) translation tuple exists
|
||||
}
|
||||
},
|
||||
|
||||
ngettext: (singular, plural, n) => {
|
||||
const translated = Documentation.TRANSLATIONS[singular];
|
||||
if (typeof translated !== "undefined")
|
||||
return translated[Documentation.PLURAL_EXPR(n)];
|
||||
return n === 1 ? singular : plural;
|
||||
},
|
||||
|
||||
addTranslations: (catalog) => {
|
||||
Object.assign(Documentation.TRANSLATIONS, catalog.messages);
|
||||
Documentation.PLURAL_EXPR = new Function(
|
||||
"n",
|
||||
`return (${catalog.plural_expr})`
|
||||
);
|
||||
Documentation.LOCALE = catalog.locale;
|
||||
},
|
||||
|
||||
/**
|
||||
* helper function to focus on search bar
|
||||
*/
|
||||
focusSearchBar: () => {
|
||||
document.querySelectorAll("input[name=q]")[0]?.focus();
|
||||
},
|
||||
|
||||
/**
|
||||
* Initialise the domain index toggle buttons
|
||||
*/
|
||||
initDomainIndexTable: () => {
|
||||
const toggler = (el) => {
|
||||
const idNumber = el.id.substr(7);
|
||||
const toggledRows = document.querySelectorAll(`tr.cg-${idNumber}`);
|
||||
if (el.src.substr(-9) === "minus.png") {
|
||||
el.src = `${el.src.substr(0, el.src.length - 9)}plus.png`;
|
||||
toggledRows.forEach((el) => (el.style.display = "none"));
|
||||
} else {
|
||||
el.src = `${el.src.substr(0, el.src.length - 8)}minus.png`;
|
||||
toggledRows.forEach((el) => (el.style.display = ""));
|
||||
}
|
||||
};
|
||||
|
||||
const togglerElements = document.querySelectorAll("img.toggler");
|
||||
togglerElements.forEach((el) =>
|
||||
el.addEventListener("click", (event) => toggler(event.currentTarget))
|
||||
);
|
||||
togglerElements.forEach((el) => (el.style.display = ""));
|
||||
if (DOCUMENTATION_OPTIONS.COLLAPSE_INDEX) togglerElements.forEach(toggler);
|
||||
},
|
||||
|
||||
initOnKeyListeners: () => {
|
||||
// only install a listener if it is really needed
|
||||
if (
|
||||
!DOCUMENTATION_OPTIONS.NAVIGATION_WITH_KEYS &&
|
||||
!DOCUMENTATION_OPTIONS.ENABLE_SEARCH_SHORTCUTS
|
||||
)
|
||||
return;
|
||||
|
||||
document.addEventListener("keydown", (event) => {
|
||||
// bail for input elements
|
||||
if (BLACKLISTED_KEY_CONTROL_ELEMENTS.has(document.activeElement.tagName)) return;
|
||||
// bail with special keys
|
||||
if (event.altKey || event.ctrlKey || event.metaKey) return;
|
||||
|
||||
if (!event.shiftKey) {
|
||||
switch (event.key) {
|
||||
case "ArrowLeft":
|
||||
if (!DOCUMENTATION_OPTIONS.NAVIGATION_WITH_KEYS) break;
|
||||
|
||||
const prevLink = document.querySelector('link[rel="prev"]');
|
||||
if (prevLink && prevLink.href) {
|
||||
window.location.href = prevLink.href;
|
||||
event.preventDefault();
|
||||
}
|
||||
break;
|
||||
case "ArrowRight":
|
||||
if (!DOCUMENTATION_OPTIONS.NAVIGATION_WITH_KEYS) break;
|
||||
|
||||
const nextLink = document.querySelector('link[rel="next"]');
|
||||
if (nextLink && nextLink.href) {
|
||||
window.location.href = nextLink.href;
|
||||
event.preventDefault();
|
||||
}
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
// some keyboard layouts may need Shift to get /
|
||||
switch (event.key) {
|
||||
case "/":
|
||||
if (!DOCUMENTATION_OPTIONS.ENABLE_SEARCH_SHORTCUTS) break;
|
||||
Documentation.focusSearchBar();
|
||||
event.preventDefault();
|
||||
}
|
||||
});
|
||||
},
|
||||
};
|
||||
|
||||
// quick alias for translations
|
||||
const _ = Documentation.gettext;
|
||||
|
||||
_ready(Documentation.init);
|
|
@ -1,14 +0,0 @@
|
|||
var DOCUMENTATION_OPTIONS = {
|
||||
URL_ROOT: document.getElementById("documentation_options").getAttribute('data-url_root'),
|
||||
VERSION: '0.1.9',
|
||||
LANGUAGE: 'en',
|
||||
COLLAPSE_INDEX: false,
|
||||
BUILDER: 'html',
|
||||
FILE_SUFFIX: '.html',
|
||||
LINK_SUFFIX: '.html',
|
||||
HAS_SOURCE: true,
|
||||
SOURCELINK_SUFFIX: '.txt',
|
||||
NAVIGATION_WITH_KEYS: false,
|
||||
SHOW_SEARCH_SUMMARY: true,
|
||||
ENABLE_SEARCH_SHORTCUTS: true,
|
||||
};
|
Before Width: | Height: | Size: 286 B |
|
@ -1,199 +0,0 @@
|
|||
/*
|
||||
* language_data.js
|
||||
* ~~~~~~~~~~~~~~~~
|
||||
*
|
||||
* This script contains the language-specific data used by searchtools.js,
|
||||
* namely the list of stopwords, stemmer, scorer and splitter.
|
||||
*
|
||||
* :copyright: Copyright 2007-2022 by the Sphinx team, see AUTHORS.
|
||||
* :license: BSD, see LICENSE for details.
|
||||
*
|
||||
*/
|
||||
|
||||
var stopwords = ["a", "and", "are", "as", "at", "be", "but", "by", "for", "if", "in", "into", "is", "it", "near", "no", "not", "of", "on", "or", "such", "that", "the", "their", "then", "there", "these", "they", "this", "to", "was", "will", "with"];
|
||||
|
||||
|
||||
/* Non-minified version is copied as a separate JS file, is available */
|
||||
|
||||
/**
|
||||
* Porter Stemmer
|
||||
*/
|
||||
var Stemmer = function() {
|
||||
|
||||
var step2list = {
|
||||
ational: 'ate',
|
||||
tional: 'tion',
|
||||
enci: 'ence',
|
||||
anci: 'ance',
|
||||
izer: 'ize',
|
||||
bli: 'ble',
|
||||
alli: 'al',
|
||||
entli: 'ent',
|
||||
eli: 'e',
|
||||
ousli: 'ous',
|
||||
ization: 'ize',
|
||||
ation: 'ate',
|
||||
ator: 'ate',
|
||||
alism: 'al',
|
||||
iveness: 'ive',
|
||||
fulness: 'ful',
|
||||
ousness: 'ous',
|
||||
aliti: 'al',
|
||||
iviti: 'ive',
|
||||
biliti: 'ble',
|
||||
logi: 'log'
|
||||
};
|
||||
|
||||
var step3list = {
|
||||
icate: 'ic',
|
||||
ative: '',
|
||||
alize: 'al',
|
||||
iciti: 'ic',
|
||||
ical: 'ic',
|
||||
ful: '',
|
||||
ness: ''
|
||||
};
|
||||
|
||||
var c = "[^aeiou]"; // consonant
|
||||
var v = "[aeiouy]"; // vowel
|
||||
var C = c + "[^aeiouy]*"; // consonant sequence
|
||||
var V = v + "[aeiou]*"; // vowel sequence
|
||||
|
||||
var mgr0 = "^(" + C + ")?" + V + C; // [C]VC... is m>0
|
||||
var meq1 = "^(" + C + ")?" + V + C + "(" + V + ")?$"; // [C]VC[V] is m=1
|
||||
var mgr1 = "^(" + C + ")?" + V + C + V + C; // [C]VCVC... is m>1
|
||||
var s_v = "^(" + C + ")?" + v; // vowel in stem
|
||||
|
||||
this.stemWord = function (w) {
|
||||
var stem;
|
||||
var suffix;
|
||||
var firstch;
|
||||
var origword = w;
|
||||
|
||||
if (w.length < 3)
|
||||
return w;
|
||||
|
||||
var re;
|
||||
var re2;
|
||||
var re3;
|
||||
var re4;
|
||||
|
||||
firstch = w.substr(0,1);
|
||||
if (firstch == "y")
|
||||
w = firstch.toUpperCase() + w.substr(1);
|
||||
|
||||
// Step 1a
|
||||
re = /^(.+?)(ss|i)es$/;
|
||||
re2 = /^(.+?)([^s])s$/;
|
||||
|
||||
if (re.test(w))
|
||||
w = w.replace(re,"$1$2");
|
||||
else if (re2.test(w))
|
||||
w = w.replace(re2,"$1$2");
|
||||
|
||||
// Step 1b
|
||||
re = /^(.+?)eed$/;
|
||||
re2 = /^(.+?)(ed|ing)$/;
|
||||
if (re.test(w)) {
|
||||
var fp = re.exec(w);
|
||||
re = new RegExp(mgr0);
|
||||
if (re.test(fp[1])) {
|
||||
re = /.$/;
|
||||
w = w.replace(re,"");
|
||||
}
|
||||
}
|
||||
else if (re2.test(w)) {
|
||||
var fp = re2.exec(w);
|
||||
stem = fp[1];
|
||||
re2 = new RegExp(s_v);
|
||||
if (re2.test(stem)) {
|
||||
w = stem;
|
||||
re2 = /(at|bl|iz)$/;
|
||||
re3 = new RegExp("([^aeiouylsz])\\1$");
|
||||
re4 = new RegExp("^" + C + v + "[^aeiouwxy]$");
|
||||
if (re2.test(w))
|
||||
w = w + "e";
|
||||
else if (re3.test(w)) {
|
||||
re = /.$/;
|
||||
w = w.replace(re,"");
|
||||
}
|
||||
else if (re4.test(w))
|
||||
w = w + "e";
|
||||
}
|
||||
}
|
||||
|
||||
// Step 1c
|
||||
re = /^(.+?)y$/;
|
||||
if (re.test(w)) {
|
||||
var fp = re.exec(w);
|
||||
stem = fp[1];
|
||||
re = new RegExp(s_v);
|
||||
if (re.test(stem))
|
||||
w = stem + "i";
|
||||
}
|
||||
|
||||
// Step 2
|
||||
re = /^(.+?)(ational|tional|enci|anci|izer|bli|alli|entli|eli|ousli|ization|ation|ator|alism|iveness|fulness|ousness|aliti|iviti|biliti|logi)$/;
|
||||
if (re.test(w)) {
|
||||
var fp = re.exec(w);
|
||||
stem = fp[1];
|
||||
suffix = fp[2];
|
||||
re = new RegExp(mgr0);
|
||||
if (re.test(stem))
|
||||
w = stem + step2list[suffix];
|
||||
}
|
||||
|
||||
// Step 3
|
||||
re = /^(.+?)(icate|ative|alize|iciti|ical|ful|ness)$/;
|
||||
if (re.test(w)) {
|
||||
var fp = re.exec(w);
|
||||
stem = fp[1];
|
||||
suffix = fp[2];
|
||||
re = new RegExp(mgr0);
|
||||
if (re.test(stem))
|
||||
w = stem + step3list[suffix];
|
||||
}
|
||||
|
||||
// Step 4
|
||||
re = /^(.+?)(al|ance|ence|er|ic|able|ible|ant|ement|ment|ent|ou|ism|ate|iti|ous|ive|ize)$/;
|
||||
re2 = /^(.+?)(s|t)(ion)$/;
|
||||
if (re.test(w)) {
|
||||
var fp = re.exec(w);
|
||||
stem = fp[1];
|
||||
re = new RegExp(mgr1);
|
||||
if (re.test(stem))
|
||||
w = stem;
|
||||
}
|
||||
else if (re2.test(w)) {
|
||||
var fp = re2.exec(w);
|
||||
stem = fp[1] + fp[2];
|
||||
re2 = new RegExp(mgr1);
|
||||
if (re2.test(stem))
|
||||
w = stem;
|
||||
}
|
||||
|
||||
// Step 5
|
||||
re = /^(.+?)e$/;
|
||||
if (re.test(w)) {
|
||||
var fp = re.exec(w);
|
||||
stem = fp[1];
|
||||
re = new RegExp(mgr1);
|
||||
re2 = new RegExp(meq1);
|
||||
re3 = new RegExp("^" + C + v + "[^aeiouwxy]$");
|
||||
if (re.test(stem) || (re2.test(stem) && !(re3.test(stem))))
|
||||
w = stem;
|
||||
}
|
||||
re = /ll$/;
|
||||
re2 = new RegExp(mgr1);
|
||||
if (re.test(w) && re2.test(w)) {
|
||||
re = /.$/;
|
||||
w = w.replace(re,"");
|
||||
}
|
||||
|
||||
// and turn initial Y back to y
|
||||
if (firstch == "y")
|
||||
w = firstch.toLowerCase() + w.substr(1);
|
||||
return w;
|
||||
}
|
||||
}
|
||||
|
Before Width: | Height: | Size: 90 B |
Before Width: | Height: | Size: 90 B |
|
@ -1,74 +0,0 @@
|
|||
pre { line-height: 125%; }
|
||||
td.linenos .normal { color: inherit; background-color: transparent; padding-left: 5px; padding-right: 5px; }
|
||||
span.linenos { color: inherit; background-color: transparent; padding-left: 5px; padding-right: 5px; }
|
||||
td.linenos .special { color: #000000; background-color: #ffffc0; padding-left: 5px; padding-right: 5px; }
|
||||
span.linenos.special { color: #000000; background-color: #ffffc0; padding-left: 5px; padding-right: 5px; }
|
||||
.highlight .hll { background-color: #ffffcc }
|
||||
.highlight { background: #f8f8f8; }
|
||||
.highlight .c { color: #3D7B7B; font-style: italic } /* Comment */
|
||||
.highlight .err { border: 1px solid #FF0000 } /* Error */
|
||||
.highlight .k { color: #008000; font-weight: bold } /* Keyword */
|
||||
.highlight .o { color: #666666 } /* Operator */
|
||||
.highlight .ch { color: #3D7B7B; font-style: italic } /* Comment.Hashbang */
|
||||
.highlight .cm { color: #3D7B7B; font-style: italic } /* Comment.Multiline */
|
||||
.highlight .cp { color: #9C6500 } /* Comment.Preproc */
|
||||
.highlight .cpf { color: #3D7B7B; font-style: italic } /* Comment.PreprocFile */
|
||||
.highlight .c1 { color: #3D7B7B; font-style: italic } /* Comment.Single */
|
||||
.highlight .cs { color: #3D7B7B; font-style: italic } /* Comment.Special */
|
||||
.highlight .gd { color: #A00000 } /* Generic.Deleted */
|
||||
.highlight .ge { font-style: italic } /* Generic.Emph */
|
||||
.highlight .gr { color: #E40000 } /* Generic.Error */
|
||||
.highlight .gh { color: #000080; font-weight: bold } /* Generic.Heading */
|
||||
.highlight .gi { color: #008400 } /* Generic.Inserted */
|
||||
.highlight .go { color: #717171 } /* Generic.Output */
|
||||
.highlight .gp { color: #000080; font-weight: bold } /* Generic.Prompt */
|
||||
.highlight .gs { font-weight: bold } /* Generic.Strong */
|
||||
.highlight .gu { color: #800080; font-weight: bold } /* Generic.Subheading */
|
||||
.highlight .gt { color: #0044DD } /* Generic.Traceback */
|
||||
.highlight .kc { color: #008000; font-weight: bold } /* Keyword.Constant */
|
||||
.highlight .kd { color: #008000; font-weight: bold } /* Keyword.Declaration */
|
||||
.highlight .kn { color: #008000; font-weight: bold } /* Keyword.Namespace */
|
||||
.highlight .kp { color: #008000 } /* Keyword.Pseudo */
|
||||
.highlight .kr { color: #008000; font-weight: bold } /* Keyword.Reserved */
|
||||
.highlight .kt { color: #B00040 } /* Keyword.Type */
|
||||
.highlight .m { color: #666666 } /* Literal.Number */
|
||||
.highlight .s { color: #BA2121 } /* Literal.String */
|
||||
.highlight .na { color: #687822 } /* Name.Attribute */
|
||||
.highlight .nb { color: #008000 } /* Name.Builtin */
|
||||
.highlight .nc { color: #0000FF; font-weight: bold } /* Name.Class */
|
||||
.highlight .no { color: #880000 } /* Name.Constant */
|
||||
.highlight .nd { color: #AA22FF } /* Name.Decorator */
|
||||
.highlight .ni { color: #717171; font-weight: bold } /* Name.Entity */
|
||||
.highlight .ne { color: #CB3F38; font-weight: bold } /* Name.Exception */
|
||||
.highlight .nf { color: #0000FF } /* Name.Function */
|
||||
.highlight .nl { color: #767600 } /* Name.Label */
|
||||
.highlight .nn { color: #0000FF; font-weight: bold } /* Name.Namespace */
|
||||
.highlight .nt { color: #008000; font-weight: bold } /* Name.Tag */
|
||||
.highlight .nv { color: #19177C } /* Name.Variable */
|
||||
.highlight .ow { color: #AA22FF; font-weight: bold } /* Operator.Word */
|
||||
.highlight .w { color: #bbbbbb } /* Text.Whitespace */
|
||||
.highlight .mb { color: #666666 } /* Literal.Number.Bin */
|
||||
.highlight .mf { color: #666666 } /* Literal.Number.Float */
|
||||
.highlight .mh { color: #666666 } /* Literal.Number.Hex */
|
||||
.highlight .mi { color: #666666 } /* Literal.Number.Integer */
|
||||
.highlight .mo { color: #666666 } /* Literal.Number.Oct */
|
||||
.highlight .sa { color: #BA2121 } /* Literal.String.Affix */
|
||||
.highlight .sb { color: #BA2121 } /* Literal.String.Backtick */
|
||||
.highlight .sc { color: #BA2121 } /* Literal.String.Char */
|
||||
.highlight .dl { color: #BA2121 } /* Literal.String.Delimiter */
|
||||
.highlight .sd { color: #BA2121; font-style: italic } /* Literal.String.Doc */
|
||||
.highlight .s2 { color: #BA2121 } /* Literal.String.Double */
|
||||
.highlight .se { color: #AA5D1F; font-weight: bold } /* Literal.String.Escape */
|
||||
.highlight .sh { color: #BA2121 } /* Literal.String.Heredoc */
|
||||
.highlight .si { color: #A45A77; font-weight: bold } /* Literal.String.Interpol */
|
||||
.highlight .sx { color: #008000 } /* Literal.String.Other */
|
||||
.highlight .sr { color: #A45A77 } /* Literal.String.Regex */
|
||||
.highlight .s1 { color: #BA2121 } /* Literal.String.Single */
|
||||
.highlight .ss { color: #19177C } /* Literal.String.Symbol */
|
||||
.highlight .bp { color: #008000 } /* Name.Builtin.Pseudo */
|
||||
.highlight .fm { color: #0000FF } /* Name.Function.Magic */
|
||||
.highlight .vc { color: #19177C } /* Name.Variable.Class */
|
||||
.highlight .vg { color: #19177C } /* Name.Variable.Global */
|
||||
.highlight .vi { color: #19177C } /* Name.Variable.Instance */
|
||||
.highlight .vm { color: #19177C } /* Name.Variable.Magic */
|
||||
.highlight .il { color: #666666 } /* Literal.Number.Integer.Long */
|
|
@ -1,566 +0,0 @@
|
|||
/*
|
||||
* searchtools.js
|
||||
* ~~~~~~~~~~~~~~~~
|
||||
*
|
||||
* Sphinx JavaScript utilities for the full-text search.
|
||||
*
|
||||
* :copyright: Copyright 2007-2022 by the Sphinx team, see AUTHORS.
|
||||
* :license: BSD, see LICENSE for details.
|
||||
*
|
||||
*/
|
||||
"use strict";
|
||||
|
||||
/**
|
||||
* Simple result scoring code.
|
||||
*/
|
||||
if (typeof Scorer === "undefined") {
|
||||
var Scorer = {
|
||||
// Implement the following function to further tweak the score for each result
|
||||
// The function takes a result array [docname, title, anchor, descr, score, filename]
|
||||
// and returns the new score.
|
||||
/*
|
||||
score: result => {
|
||||
const [docname, title, anchor, descr, score, filename] = result
|
||||
return score
|
||||
},
|
||||
*/
|
||||
|
||||
// query matches the full name of an object
|
||||
objNameMatch: 11,
|
||||
// or matches in the last dotted part of the object name
|
||||
objPartialMatch: 6,
|
||||
// Additive scores depending on the priority of the object
|
||||
objPrio: {
|
||||
0: 15, // used to be importantResults
|
||||
1: 5, // used to be objectResults
|
||||
2: -5, // used to be unimportantResults
|
||||
},
|
||||
// Used when the priority is not in the mapping.
|
||||
objPrioDefault: 0,
|
||||
|
||||
// query found in title
|
||||
title: 15,
|
||||
partialTitle: 7,
|
||||
// query found in terms
|
||||
term: 5,
|
||||
partialTerm: 2,
|
||||
};
|
||||
}
|
||||
|
||||
const _removeChildren = (element) => {
|
||||
while (element && element.lastChild) element.removeChild(element.lastChild);
|
||||
};
|
||||
|
||||
/**
|
||||
* See https://developer.mozilla.org/en-US/docs/Web/JavaScript/Guide/Regular_Expressions#escaping
|
||||
*/
|
||||
const _escapeRegExp = (string) =>
|
||||
string.replace(/[.*+\-?^${}()|[\]\\]/g, "\\$&"); // $& means the whole matched string
|
||||
|
||||
const _displayItem = (item, searchTerms) => {
|
||||
const docBuilder = DOCUMENTATION_OPTIONS.BUILDER;
|
||||
const docUrlRoot = DOCUMENTATION_OPTIONS.URL_ROOT;
|
||||
const docFileSuffix = DOCUMENTATION_OPTIONS.FILE_SUFFIX;
|
||||
const docLinkSuffix = DOCUMENTATION_OPTIONS.LINK_SUFFIX;
|
||||
const showSearchSummary = DOCUMENTATION_OPTIONS.SHOW_SEARCH_SUMMARY;
|
||||
|
||||
const [docName, title, anchor, descr, score, _filename] = item;
|
||||
|
||||
let listItem = document.createElement("li");
|
||||
let requestUrl;
|
||||
let linkUrl;
|
||||
if (docBuilder === "dirhtml") {
|
||||
// dirhtml builder
|
||||
let dirname = docName + "/";
|
||||
if (dirname.match(/\/index\/$/))
|
||||
dirname = dirname.substring(0, dirname.length - 6);
|
||||
else if (dirname === "index/") dirname = "";
|
||||
requestUrl = docUrlRoot + dirname;
|
||||
linkUrl = requestUrl;
|
||||
} else {
|
||||
// normal html builders
|
||||
requestUrl = docUrlRoot + docName + docFileSuffix;
|
||||
linkUrl = docName + docLinkSuffix;
|
||||
}
|
||||
let linkEl = listItem.appendChild(document.createElement("a"));
|
||||
linkEl.href = linkUrl + anchor;
|
||||
linkEl.dataset.score = score;
|
||||
linkEl.innerHTML = title;
|
||||
if (descr)
|
||||
listItem.appendChild(document.createElement("span")).innerHTML =
|
||||
" (" + descr + ")";
|
||||
else if (showSearchSummary)
|
||||
fetch(requestUrl)
|
||||
.then((responseData) => responseData.text())
|
||||
.then((data) => {
|
||||
if (data)
|
||||
listItem.appendChild(
|
||||
Search.makeSearchSummary(data, searchTerms)
|
||||
);
|
||||
});
|
||||
Search.output.appendChild(listItem);
|
||||
};
|
||||
const _finishSearch = (resultCount) => {
|
||||
Search.stopPulse();
|
||||
Search.title.innerText = _("Search Results");
|
||||
if (!resultCount)
|
||||
Search.status.innerText = Documentation.gettext(
|
||||
"Your search did not match any documents. Please make sure that all words are spelled correctly and that you've selected enough categories."
|
||||
);
|
||||
else
|
||||
Search.status.innerText = _(
|
||||
`Search finished, found ${resultCount} page(s) matching the search query.`
|
||||
);
|
||||
};
|
||||
const _displayNextItem = (
|
||||
results,
|
||||
resultCount,
|
||||
searchTerms
|
||||
) => {
|
||||
// results left, load the summary and display it
|
||||
// this is intended to be dynamic (don't sub resultsCount)
|
||||
if (results.length) {
|
||||
_displayItem(results.pop(), searchTerms);
|
||||
setTimeout(
|
||||
() => _displayNextItem(results, resultCount, searchTerms),
|
||||
5
|
||||
);
|
||||
}
|
||||
// search finished, update title and status message
|
||||
else _finishSearch(resultCount);
|
||||
};
|
||||
|
||||
/**
|
||||
* Default splitQuery function. Can be overridden in ``sphinx.search`` with a
|
||||
* custom function per language.
|
||||
*
|
||||
* The regular expression works by splitting the string on consecutive characters
|
||||
* that are not Unicode letters, numbers, underscores, or emoji characters.
|
||||
* This is the same as ``\W+`` in Python, preserving the surrogate pair area.
|
||||
*/
|
||||
if (typeof splitQuery === "undefined") {
|
||||
var splitQuery = (query) => query
|
||||
.split(/[^\p{Letter}\p{Number}_\p{Emoji_Presentation}]+/gu)
|
||||
.filter(term => term) // remove remaining empty strings
|
||||
}
|
||||
|
||||
/**
|
||||
* Search Module
|
||||
*/
|
||||
const Search = {
|
||||
_index: null,
|
||||
_queued_query: null,
|
||||
_pulse_status: -1,
|
||||
|
||||
htmlToText: (htmlString) => {
|
||||
const htmlElement = new DOMParser().parseFromString(htmlString, 'text/html');
|
||||
htmlElement.querySelectorAll(".headerlink").forEach((el) => { el.remove() });
|
||||
const docContent = htmlElement.querySelector('[role="main"]');
|
||||
if (docContent !== undefined) return docContent.textContent;
|
||||
console.warn(
|
||||
"Content block not found. Sphinx search tries to obtain it via '[role=main]'. Could you check your theme or template."
|
||||
);
|
||||
return "";
|
||||
},
|
||||
|
||||
init: () => {
|
||||
const query = new URLSearchParams(window.location.search).get("q");
|
||||
document
|
||||
.querySelectorAll('input[name="q"]')
|
||||
.forEach((el) => (el.value = query));
|
||||
if (query) Search.performSearch(query);
|
||||
},
|
||||
|
||||
loadIndex: (url) =>
|
||||
(document.body.appendChild(document.createElement("script")).src = url),
|
||||
|
||||
setIndex: (index) => {
|
||||
Search._index = index;
|
||||
if (Search._queued_query !== null) {
|
||||
const query = Search._queued_query;
|
||||
Search._queued_query = null;
|
||||
Search.query(query);
|
||||
}
|
||||
},
|
||||
|
||||
hasIndex: () => Search._index !== null,
|
||||
|
||||
deferQuery: (query) => (Search._queued_query = query),
|
||||
|
||||
stopPulse: () => (Search._pulse_status = -1),
|
||||
|
||||
startPulse: () => {
|
||||
if (Search._pulse_status >= 0) return;
|
||||
|
||||
const pulse = () => {
|
||||
Search._pulse_status = (Search._pulse_status + 1) % 4;
|
||||
Search.dots.innerText = ".".repeat(Search._pulse_status);
|
||||
if (Search._pulse_status >= 0) window.setTimeout(pulse, 500);
|
||||
};
|
||||
pulse();
|
||||
},
|
||||
|
||||
/**
|
||||
* perform a search for something (or wait until index is loaded)
|
||||
*/
|
||||
performSearch: (query) => {
|
||||
// create the required interface elements
|
||||
const searchText = document.createElement("h2");
|
||||
searchText.textContent = _("Searching");
|
||||
const searchSummary = document.createElement("p");
|
||||
searchSummary.classList.add("search-summary");
|
||||
searchSummary.innerText = "";
|
||||
const searchList = document.createElement("ul");
|
||||
searchList.classList.add("search");
|
||||
|
||||
const out = document.getElementById("search-results");
|
||||
Search.title = out.appendChild(searchText);
|
||||
Search.dots = Search.title.appendChild(document.createElement("span"));
|
||||
Search.status = out.appendChild(searchSummary);
|
||||
Search.output = out.appendChild(searchList);
|
||||
|
||||
const searchProgress = document.getElementById("search-progress");
|
||||
// Some themes don't use the search progress node
|
||||
if (searchProgress) {
|
||||
searchProgress.innerText = _("Preparing search...");
|
||||
}
|
||||
Search.startPulse();
|
||||
|
||||
// index already loaded, the browser was quick!
|
||||
if (Search.hasIndex()) Search.query(query);
|
||||
else Search.deferQuery(query);
|
||||
},
|
||||
|
||||
/**
|
||||
* execute search (requires search index to be loaded)
|
||||
*/
|
||||
query: (query) => {
|
||||
const filenames = Search._index.filenames;
|
||||
const docNames = Search._index.docnames;
|
||||
const titles = Search._index.titles;
|
||||
const allTitles = Search._index.alltitles;
|
||||
const indexEntries = Search._index.indexentries;
|
||||
|
||||
// stem the search terms and add them to the correct list
|
||||
const stemmer = new Stemmer();
|
||||
const searchTerms = new Set();
|
||||
const excludedTerms = new Set();
|
||||
const highlightTerms = new Set();
|
||||
const objectTerms = new Set(splitQuery(query.toLowerCase().trim()));
|
||||
splitQuery(query.trim()).forEach((queryTerm) => {
|
||||
const queryTermLower = queryTerm.toLowerCase();
|
||||
|
||||
// maybe skip this "word"
|
||||
// stopwords array is from language_data.js
|
||||
if (
|
||||
stopwords.indexOf(queryTermLower) !== -1 ||
|
||||
queryTerm.match(/^\d+$/)
|
||||
)
|
||||
return;
|
||||
|
||||
// stem the word
|
||||
let word = stemmer.stemWord(queryTermLower);
|
||||
// select the correct list
|
||||
if (word[0] === "-") excludedTerms.add(word.substr(1));
|
||||
else {
|
||||
searchTerms.add(word);
|
||||
highlightTerms.add(queryTermLower);
|
||||
}
|
||||
});
|
||||
|
||||
if (SPHINX_HIGHLIGHT_ENABLED) { // set in sphinx_highlight.js
|
||||
localStorage.setItem("sphinx_highlight_terms", [...highlightTerms].join(" "))
|
||||
}
|
||||
|
||||
// console.debug("SEARCH: searching for:");
|
||||
// console.info("required: ", [...searchTerms]);
|
||||
// console.info("excluded: ", [...excludedTerms]);
|
||||
|
||||
// array of [docname, title, anchor, descr, score, filename]
|
||||
let results = [];
|
||||
_removeChildren(document.getElementById("search-progress"));
|
||||
|
||||
const queryLower = query.toLowerCase();
|
||||
for (const [title, foundTitles] of Object.entries(allTitles)) {
|
||||
if (title.toLowerCase().includes(queryLower) && (queryLower.length >= title.length/2)) {
|
||||
for (const [file, id] of foundTitles) {
|
||||
let score = Math.round(100 * queryLower.length / title.length)
|
||||
results.push([
|
||||
docNames[file],
|
||||
titles[file] !== title ? `${titles[file]} > ${title}` : title,
|
||||
id !== null ? "#" + id : "",
|
||||
null,
|
||||
score,
|
||||
filenames[file],
|
||||
]);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// search for explicit entries in index directives
|
||||
for (const [entry, foundEntries] of Object.entries(indexEntries)) {
|
||||
if (entry.includes(queryLower) && (queryLower.length >= entry.length/2)) {
|
||||
for (const [file, id] of foundEntries) {
|
||||
let score = Math.round(100 * queryLower.length / entry.length)
|
||||
results.push([
|
||||
docNames[file],
|
||||
titles[file],
|
||||
id ? "#" + id : "",
|
||||
null,
|
||||
score,
|
||||
filenames[file],
|
||||
]);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// lookup as object
|
||||
objectTerms.forEach((term) =>
|
||||
results.push(...Search.performObjectSearch(term, objectTerms))
|
||||
);
|
||||
|
||||
// lookup as search terms in fulltext
|
||||
results.push(...Search.performTermsSearch(searchTerms, excludedTerms));
|
||||
|
||||
// let the scorer override scores with a custom scoring function
|
||||
if (Scorer.score) results.forEach((item) => (item[4] = Scorer.score(item)));
|
||||
|
||||
// now sort the results by score (in opposite order of appearance, since the
|
||||
// display function below uses pop() to retrieve items) and then
|
||||
// alphabetically
|
||||
results.sort((a, b) => {
|
||||
const leftScore = a[4];
|
||||
const rightScore = b[4];
|
||||
if (leftScore === rightScore) {
|
||||
// same score: sort alphabetically
|
||||
const leftTitle = a[1].toLowerCase();
|
||||
const rightTitle = b[1].toLowerCase();
|
||||
if (leftTitle === rightTitle) return 0;
|
||||
return leftTitle > rightTitle ? -1 : 1; // inverted is intentional
|
||||
}
|
||||
return leftScore > rightScore ? 1 : -1;
|
||||
});
|
||||
|
||||
// remove duplicate search results
|
||||
// note the reversing of results, so that in the case of duplicates, the highest-scoring entry is kept
|
||||
let seen = new Set();
|
||||
results = results.reverse().reduce((acc, result) => {
|
||||
let resultStr = result.slice(0, 4).concat([result[5]]).map(v => String(v)).join(',');
|
||||
if (!seen.has(resultStr)) {
|
||||
acc.push(result);
|
||||
seen.add(resultStr);
|
||||
}
|
||||
return acc;
|
||||
}, []);
|
||||
|
||||
results = results.reverse();
|
||||
|
||||
// for debugging
|
||||
//Search.lastresults = results.slice(); // a copy
|
||||
// console.info("search results:", Search.lastresults);
|
||||
|
||||
// print the results
|
||||
_displayNextItem(results, results.length, searchTerms);
|
||||
},
|
||||
|
||||
/**
|
||||
* search for object names
|
||||
*/
|
||||
performObjectSearch: (object, objectTerms) => {
|
||||
const filenames = Search._index.filenames;
|
||||
const docNames = Search._index.docnames;
|
||||
const objects = Search._index.objects;
|
||||
const objNames = Search._index.objnames;
|
||||
const titles = Search._index.titles;
|
||||
|
||||
const results = [];
|
||||
|
||||
const objectSearchCallback = (prefix, match) => {
|
||||
const name = match[4]
|
||||
const fullname = (prefix ? prefix + "." : "") + name;
|
||||
const fullnameLower = fullname.toLowerCase();
|
||||
if (fullnameLower.indexOf(object) < 0) return;
|
||||
|
||||
let score = 0;
|
||||
const parts = fullnameLower.split(".");
|
||||
|
||||
// check for different match types: exact matches of full name or
|
||||
// "last name" (i.e. last dotted part)
|
||||
if (fullnameLower === object || parts.slice(-1)[0] === object)
|
||||
score += Scorer.objNameMatch;
|
||||
else if (parts.slice(-1)[0].indexOf(object) > -1)
|
||||
score += Scorer.objPartialMatch; // matches in last name
|
||||
|
||||
const objName = objNames[match[1]][2];
|
||||
const title = titles[match[0]];
|
||||
|
||||
// If more than one term searched for, we require other words to be
|
||||
// found in the name/title/description
|
||||
const otherTerms = new Set(objectTerms);
|
||||
otherTerms.delete(object);
|
||||
if (otherTerms.size > 0) {
|
||||
const haystack = `${prefix} ${name} ${objName} ${title}`.toLowerCase();
|
||||
if (
|
||||
[...otherTerms].some((otherTerm) => haystack.indexOf(otherTerm) < 0)
|
||||
)
|
||||
return;
|
||||
}
|
||||
|
||||
let anchor = match[3];
|
||||
if (anchor === "") anchor = fullname;
|
||||
else if (anchor === "-") anchor = objNames[match[1]][1] + "-" + fullname;
|
||||
|
||||
const descr = objName + _(", in ") + title;
|
||||
|
||||
// add custom score for some objects according to scorer
|
||||
if (Scorer.objPrio.hasOwnProperty(match[2]))
|
||||
score += Scorer.objPrio[match[2]];
|
||||
else score += Scorer.objPrioDefault;
|
||||
|
||||
results.push([
|
||||
docNames[match[0]],
|
||||
fullname,
|
||||
"#" + anchor,
|
||||
descr,
|
||||
score,
|
||||
filenames[match[0]],
|
||||
]);
|
||||
};
|
||||
Object.keys(objects).forEach((prefix) =>
|
||||
objects[prefix].forEach((array) =>
|
||||
objectSearchCallback(prefix, array)
|
||||
)
|
||||
);
|
||||
return results;
|
||||
},
|
||||
|
||||
/**
|
||||
* search for full-text terms in the index
|
||||
*/
|
||||
performTermsSearch: (searchTerms, excludedTerms) => {
|
||||
// prepare search
|
||||
const terms = Search._index.terms;
|
||||
const titleTerms = Search._index.titleterms;
|
||||
const filenames = Search._index.filenames;
|
||||
const docNames = Search._index.docnames;
|
||||
const titles = Search._index.titles;
|
||||
|
||||
const scoreMap = new Map();
|
||||
const fileMap = new Map();
|
||||
|
||||
// perform the search on the required terms
|
||||
searchTerms.forEach((word) => {
|
||||
const files = [];
|
||||
const arr = [
|
||||
{ files: terms[word], score: Scorer.term },
|
||||
{ files: titleTerms[word], score: Scorer.title },
|
||||
];
|
||||
// add support for partial matches
|
||||
if (word.length > 2) {
|
||||
const escapedWord = _escapeRegExp(word);
|
||||
Object.keys(terms).forEach((term) => {
|
||||
if (term.match(escapedWord) && !terms[word])
|
||||
arr.push({ files: terms[term], score: Scorer.partialTerm });
|
||||
});
|
||||
Object.keys(titleTerms).forEach((term) => {
|
||||
if (term.match(escapedWord) && !titleTerms[word])
|
||||
arr.push({ files: titleTerms[word], score: Scorer.partialTitle });
|
||||
});
|
||||
}
|
||||
|
||||
// no match but word was a required one
|
||||
if (arr.every((record) => record.files === undefined)) return;
|
||||
|
||||
// found search word in contents
|
||||
arr.forEach((record) => {
|
||||
if (record.files === undefined) return;
|
||||
|
||||
let recordFiles = record.files;
|
||||
if (recordFiles.length === undefined) recordFiles = [recordFiles];
|
||||
files.push(...recordFiles);
|
||||
|
||||
// set score for the word in each file
|
||||
recordFiles.forEach((file) => {
|
||||
if (!scoreMap.has(file)) scoreMap.set(file, {});
|
||||
scoreMap.get(file)[word] = record.score;
|
||||
});
|
||||
});
|
||||
|
||||
// create the mapping
|
||||
files.forEach((file) => {
|
||||
if (fileMap.has(file) && fileMap.get(file).indexOf(word) === -1)
|
||||
fileMap.get(file).push(word);
|
||||
else fileMap.set(file, [word]);
|
||||
});
|
||||
});
|
||||
|
||||
// now check if the files don't contain excluded terms
|
||||
const results = [];
|
||||
for (const [file, wordList] of fileMap) {
|
||||
// check if all requirements are matched
|
||||
|
||||
// as search terms with length < 3 are discarded
|
||||
const filteredTermCount = [...searchTerms].filter(
|
||||
(term) => term.length > 2
|
||||
).length;
|
||||
if (
|
||||
wordList.length !== searchTerms.size &&
|
||||
wordList.length !== filteredTermCount
|
||||
)
|
||||
continue;
|
||||
|
||||
// ensure that none of the excluded terms is in the search result
|
||||
if (
|
||||
[...excludedTerms].some(
|
||||
(term) =>
|
||||
terms[term] === file ||
|
||||
titleTerms[term] === file ||
|
||||
(terms[term] || []).includes(file) ||
|
||||
(titleTerms[term] || []).includes(file)
|
||||
)
|
||||
)
|
||||
break;
|
||||
|
||||
// select one (max) score for the file.
|
||||
const score = Math.max(...wordList.map((w) => scoreMap.get(file)[w]));
|
||||
// add result to the result list
|
||||
results.push([
|
||||
docNames[file],
|
||||
titles[file],
|
||||
"",
|
||||
null,
|
||||
score,
|
||||
filenames[file],
|
||||
]);
|
||||
}
|
||||
return results;
|
||||
},
|
||||
|
||||
/**
|
||||
* helper function to return a node containing the
|
||||
* search summary for a given text. keywords is a list
|
||||
* of stemmed words.
|
||||
*/
|
||||
makeSearchSummary: (htmlText, keywords) => {
|
||||
const text = Search.htmlToText(htmlText);
|
||||
if (text === "") return null;
|
||||
|
||||
const textLower = text.toLowerCase();
|
||||
const actualStartPosition = [...keywords]
|
||||
.map((k) => textLower.indexOf(k.toLowerCase()))
|
||||
.filter((i) => i > -1)
|
||||
.slice(-1)[0];
|
||||
const startWithContext = Math.max(actualStartPosition - 120, 0);
|
||||
|
||||
const top = startWithContext === 0 ? "" : "...";
|
||||
const tail = startWithContext + 240 < text.length ? "..." : "";
|
||||
|
||||
let summary = document.createElement("p");
|
||||
summary.classList.add("context");
|
||||
summary.textContent = top + text.substr(startWithContext, 240).trim() + tail;
|
||||
|
||||
return summary;
|
||||
},
|
||||
};
|
||||
|
||||
_ready(Search.init);
|
|
@ -1,744 +0,0 @@
|
|||
<!DOCTYPE html>
|
||||
<html class="writer-html5" lang="en">
|
||||
<head>
|
||||
<meta charset="utf-8" /><meta name="generator" content="Docutils 0.19: https://docutils.sourceforge.io/" />
|
||||
|
||||
<meta name="viewport" content="width=device-width, initial-scale=1.0" />
|
||||
<title>Welcome to QuaPy’s documentation! — QuaPy: A Python-based open-source framework for quantification 0.1.9 documentation</title>
|
||||
<link rel="stylesheet" type="text/css" href="_static/pygments.css" />
|
||||
<link rel="stylesheet" type="text/css" href="_static/css/theme.css" />
|
||||
|
||||
|
||||
<!--[if lt IE 9]>
|
||||
<script src="_static/js/html5shiv.min.js"></script>
|
||||
<![endif]-->
|
||||
|
||||
<script data-url_root="./" id="documentation_options" src="_static/documentation_options.js"></script>
|
||||
<script src="_static/jquery.js"></script>
|
||||
<script src="_static/underscore.js"></script>
|
||||
<script src="_static/_sphinx_javascript_frameworks_compat.js"></script>
|
||||
<script src="_static/doctools.js"></script>
|
||||
<script src="_static/sphinx_highlight.js"></script>
|
||||
<script src="_static/js/theme.js"></script>
|
||||
<link rel="index" title="Index" href="genindex.html" />
|
||||
<link rel="search" title="Search" href="search.html" />
|
||||
<link rel="next" title="quapy" href="modules.html" />
|
||||
</head>
|
||||
|
||||
<body class="wy-body-for-nav">
|
||||
<div class="wy-grid-for-nav">
|
||||
<nav data-toggle="wy-nav-shift" class="wy-nav-side">
|
||||
<div class="wy-side-scroll">
|
||||
<div class="wy-side-nav-search" >
|
||||
|
||||
|
||||
|
||||
<a href="#" class="icon icon-home">
|
||||
QuaPy: A Python-based open-source framework for quantification
|
||||
</a>
|
||||
<div role="search">
|
||||
<form id="rtd-search-form" class="wy-form" action="search.html" method="get">
|
||||
<input type="text" name="q" placeholder="Search docs" aria-label="Search docs" />
|
||||
<input type="hidden" name="check_keywords" value="yes" />
|
||||
<input type="hidden" name="area" value="default" />
|
||||
</form>
|
||||
</div>
|
||||
</div><div class="wy-menu wy-menu-vertical" data-spy="affix" role="navigation" aria-label="Navigation menu">
|
||||
<ul>
|
||||
<li class="toctree-l1"><a class="reference internal" href="modules.html">quapy</a></li>
|
||||
</ul>
|
||||
|
||||
</div>
|
||||
</div>
|
||||
</nav>
|
||||
|
||||
<section data-toggle="wy-nav-shift" class="wy-nav-content-wrap"><nav class="wy-nav-top" aria-label="Mobile navigation menu" >
|
||||
<i data-toggle="wy-nav-top" class="fa fa-bars"></i>
|
||||
<a href="#">QuaPy: A Python-based open-source framework for quantification</a>
|
||||
</nav>
|
||||
|
||||
<div class="wy-nav-content">
|
||||
<div class="rst-content">
|
||||
<div role="navigation" aria-label="Page navigation">
|
||||
<ul class="wy-breadcrumbs">
|
||||
<li><a href="#" class="icon icon-home" aria-label="Home"></a></li>
|
||||
<li class="breadcrumb-item active">Welcome to QuaPy’s documentation!</li>
|
||||
<li class="wy-breadcrumbs-aside">
|
||||
<a href="_sources/index.rst.txt" rel="nofollow"> View page source</a>
|
||||
</li>
|
||||
</ul>
|
||||
<hr/>
|
||||
</div>
|
||||
<div role="main" class="document" itemscope="itemscope" itemtype="http://schema.org/Article">
|
||||
<div itemprop="articleBody">
|
||||
|
||||
<section id="welcome-to-quapy-s-documentation">
|
||||
<h1>Welcome to QuaPy’s documentation!<a class="headerlink" href="#welcome-to-quapy-s-documentation" title="Permalink to this heading"></a></h1>
|
||||
<p>QuaPy is a Python-based open-source framework for quantification.</p>
|
||||
<p>This document contains the API of the modules included in QuaPy.</p>
|
||||
<section id="installation">
|
||||
<h2>Installation<a class="headerlink" href="#installation" title="Permalink to this heading"></a></h2>
|
||||
<p><cite>pip install quapy</cite></p>
|
||||
</section>
|
||||
<section id="github">
|
||||
<h2>GitHub<a class="headerlink" href="#github" title="Permalink to this heading"></a></h2>
|
||||
<p>QuaPy is hosted in GitHub at <a class="reference external" href="https://github.com/HLT-ISTI/QuaPy">https://github.com/HLT-ISTI/QuaPy</a></p>
|
||||
<div class="toctree-wrapper compound">
|
||||
</div>
|
||||
</section>
|
||||
<section id="contents">
|
||||
<h2>Contents<a class="headerlink" href="#contents" title="Permalink to this heading"></a></h2>
|
||||
<div class="toctree-wrapper compound">
|
||||
<ul>
|
||||
<li class="toctree-l1"><a class="reference internal" href="modules.html">quapy</a><ul>
|
||||
<li class="toctree-l2"><a class="reference internal" href="quapy.html">quapy package</a><ul>
|
||||
<li class="toctree-l3"><a class="reference internal" href="quapy.html#subpackages">Subpackages</a><ul>
|
||||
<li class="toctree-l4"><a class="reference internal" href="quapy.classification.html">quapy.classification package</a><ul>
|
||||
<li class="toctree-l5"><a class="reference internal" href="quapy.classification.html#submodules">Submodules</a></li>
|
||||
<li class="toctree-l5"><a class="reference internal" href="quapy.classification.html#module-quapy.classification.calibration">quapy.classification.calibration module</a><ul>
|
||||
<li class="toctree-l6"><a class="reference internal" href="quapy.classification.html#quapy.classification.calibration.BCTSCalibration"><code class="docutils literal notranslate"><span class="pre">BCTSCalibration</span></code></a></li>
|
||||
<li class="toctree-l6"><a class="reference internal" href="quapy.classification.html#quapy.classification.calibration.NBVSCalibration"><code class="docutils literal notranslate"><span class="pre">NBVSCalibration</span></code></a></li>
|
||||
<li class="toctree-l6"><a class="reference internal" href="quapy.classification.html#quapy.classification.calibration.RecalibratedProbabilisticClassifier"><code class="docutils literal notranslate"><span class="pre">RecalibratedProbabilisticClassifier</span></code></a></li>
|
||||
<li class="toctree-l6"><a class="reference internal" href="quapy.classification.html#quapy.classification.calibration.RecalibratedProbabilisticClassifierBase"><code class="docutils literal notranslate"><span class="pre">RecalibratedProbabilisticClassifierBase</span></code></a><ul>
|
||||
<li class="toctree-l7"><a class="reference internal" href="quapy.classification.html#quapy.classification.calibration.RecalibratedProbabilisticClassifierBase.classes_"><code class="docutils literal notranslate"><span class="pre">RecalibratedProbabilisticClassifierBase.classes_</span></code></a></li>
|
||||
<li class="toctree-l7"><a class="reference internal" href="quapy.classification.html#quapy.classification.calibration.RecalibratedProbabilisticClassifierBase.fit"><code class="docutils literal notranslate"><span class="pre">RecalibratedProbabilisticClassifierBase.fit()</span></code></a></li>
|
||||
<li class="toctree-l7"><a class="reference internal" href="quapy.classification.html#quapy.classification.calibration.RecalibratedProbabilisticClassifierBase.fit_cv"><code class="docutils literal notranslate"><span class="pre">RecalibratedProbabilisticClassifierBase.fit_cv()</span></code></a></li>
|
||||
<li class="toctree-l7"><a class="reference internal" href="quapy.classification.html#quapy.classification.calibration.RecalibratedProbabilisticClassifierBase.fit_tr_val"><code class="docutils literal notranslate"><span class="pre">RecalibratedProbabilisticClassifierBase.fit_tr_val()</span></code></a></li>
|
||||
<li class="toctree-l7"><a class="reference internal" href="quapy.classification.html#quapy.classification.calibration.RecalibratedProbabilisticClassifierBase.predict"><code class="docutils literal notranslate"><span class="pre">RecalibratedProbabilisticClassifierBase.predict()</span></code></a></li>
|
||||
<li class="toctree-l7"><a class="reference internal" href="quapy.classification.html#quapy.classification.calibration.RecalibratedProbabilisticClassifierBase.predict_proba"><code class="docutils literal notranslate"><span class="pre">RecalibratedProbabilisticClassifierBase.predict_proba()</span></code></a></li>
|
||||
</ul>
|
||||
</li>
|
||||
<li class="toctree-l6"><a class="reference internal" href="quapy.classification.html#quapy.classification.calibration.TSCalibration"><code class="docutils literal notranslate"><span class="pre">TSCalibration</span></code></a></li>
|
||||
<li class="toctree-l6"><a class="reference internal" href="quapy.classification.html#quapy.classification.calibration.VSCalibration"><code class="docutils literal notranslate"><span class="pre">VSCalibration</span></code></a></li>
|
||||
</ul>
|
||||
</li>
|
||||
<li class="toctree-l5"><a class="reference internal" href="quapy.classification.html#module-quapy.classification.methods">quapy.classification.methods module</a><ul>
|
||||
<li class="toctree-l6"><a class="reference internal" href="quapy.classification.html#quapy.classification.methods.LowRankLogisticRegression"><code class="docutils literal notranslate"><span class="pre">LowRankLogisticRegression</span></code></a><ul>
|
||||
<li class="toctree-l7"><a class="reference internal" href="quapy.classification.html#quapy.classification.methods.LowRankLogisticRegression.fit"><code class="docutils literal notranslate"><span class="pre">LowRankLogisticRegression.fit()</span></code></a></li>
|
||||
<li class="toctree-l7"><a class="reference internal" href="quapy.classification.html#quapy.classification.methods.LowRankLogisticRegression.get_params"><code class="docutils literal notranslate"><span class="pre">LowRankLogisticRegression.get_params()</span></code></a></li>
|
||||
<li class="toctree-l7"><a class="reference internal" href="quapy.classification.html#quapy.classification.methods.LowRankLogisticRegression.predict"><code class="docutils literal notranslate"><span class="pre">LowRankLogisticRegression.predict()</span></code></a></li>
|
||||
<li class="toctree-l7"><a class="reference internal" href="quapy.classification.html#quapy.classification.methods.LowRankLogisticRegression.predict_proba"><code class="docutils literal notranslate"><span class="pre">LowRankLogisticRegression.predict_proba()</span></code></a></li>
|
||||
<li class="toctree-l7"><a class="reference internal" href="quapy.classification.html#quapy.classification.methods.LowRankLogisticRegression.set_params"><code class="docutils literal notranslate"><span class="pre">LowRankLogisticRegression.set_params()</span></code></a></li>
|
||||
<li class="toctree-l7"><a class="reference internal" href="quapy.classification.html#quapy.classification.methods.LowRankLogisticRegression.transform"><code class="docutils literal notranslate"><span class="pre">LowRankLogisticRegression.transform()</span></code></a></li>
|
||||
</ul>
|
||||
</li>
|
||||
</ul>
|
||||
</li>
|
||||
<li class="toctree-l5"><a class="reference internal" href="quapy.classification.html#module-quapy.classification.neural">quapy.classification.neural module</a><ul>
|
||||
<li class="toctree-l6"><a class="reference internal" href="quapy.classification.html#quapy.classification.neural.CNNnet"><code class="docutils literal notranslate"><span class="pre">CNNnet</span></code></a><ul>
|
||||
<li class="toctree-l7"><a class="reference internal" href="quapy.classification.html#quapy.classification.neural.CNNnet.document_embedding"><code class="docutils literal notranslate"><span class="pre">CNNnet.document_embedding()</span></code></a></li>
|
||||
<li class="toctree-l7"><a class="reference internal" href="quapy.classification.html#quapy.classification.neural.CNNnet.get_params"><code class="docutils literal notranslate"><span class="pre">CNNnet.get_params()</span></code></a></li>
|
||||
<li class="toctree-l7"><a class="reference internal" href="quapy.classification.html#quapy.classification.neural.CNNnet.training"><code class="docutils literal notranslate"><span class="pre">CNNnet.training</span></code></a></li>
|
||||
<li class="toctree-l7"><a class="reference internal" href="quapy.classification.html#quapy.classification.neural.CNNnet.vocabulary_size"><code class="docutils literal notranslate"><span class="pre">CNNnet.vocabulary_size</span></code></a></li>
|
||||
</ul>
|
||||
</li>
|
||||
<li class="toctree-l6"><a class="reference internal" href="quapy.classification.html#quapy.classification.neural.LSTMnet"><code class="docutils literal notranslate"><span class="pre">LSTMnet</span></code></a><ul>
|
||||
<li class="toctree-l7"><a class="reference internal" href="quapy.classification.html#quapy.classification.neural.LSTMnet.document_embedding"><code class="docutils literal notranslate"><span class="pre">LSTMnet.document_embedding()</span></code></a></li>
|
||||
<li class="toctree-l7"><a class="reference internal" href="quapy.classification.html#quapy.classification.neural.LSTMnet.get_params"><code class="docutils literal notranslate"><span class="pre">LSTMnet.get_params()</span></code></a></li>
|
||||
<li class="toctree-l7"><a class="reference internal" href="quapy.classification.html#quapy.classification.neural.LSTMnet.training"><code class="docutils literal notranslate"><span class="pre">LSTMnet.training</span></code></a></li>
|
||||
<li class="toctree-l7"><a class="reference internal" href="quapy.classification.html#quapy.classification.neural.LSTMnet.vocabulary_size"><code class="docutils literal notranslate"><span class="pre">LSTMnet.vocabulary_size</span></code></a></li>
|
||||
</ul>
|
||||
</li>
|
||||
<li class="toctree-l6"><a class="reference internal" href="quapy.classification.html#quapy.classification.neural.NeuralClassifierTrainer"><code class="docutils literal notranslate"><span class="pre">NeuralClassifierTrainer</span></code></a><ul>
|
||||
<li class="toctree-l7"><a class="reference internal" href="quapy.classification.html#quapy.classification.neural.NeuralClassifierTrainer.device"><code class="docutils literal notranslate"><span class="pre">NeuralClassifierTrainer.device</span></code></a></li>
|
||||
<li class="toctree-l7"><a class="reference internal" href="quapy.classification.html#quapy.classification.neural.NeuralClassifierTrainer.fit"><code class="docutils literal notranslate"><span class="pre">NeuralClassifierTrainer.fit()</span></code></a></li>
|
||||
<li class="toctree-l7"><a class="reference internal" href="quapy.classification.html#quapy.classification.neural.NeuralClassifierTrainer.get_params"><code class="docutils literal notranslate"><span class="pre">NeuralClassifierTrainer.get_params()</span></code></a></li>
|
||||
<li class="toctree-l7"><a class="reference internal" href="quapy.classification.html#quapy.classification.neural.NeuralClassifierTrainer.predict"><code class="docutils literal notranslate"><span class="pre">NeuralClassifierTrainer.predict()</span></code></a></li>
|
||||
<li class="toctree-l7"><a class="reference internal" href="quapy.classification.html#quapy.classification.neural.NeuralClassifierTrainer.predict_proba"><code class="docutils literal notranslate"><span class="pre">NeuralClassifierTrainer.predict_proba()</span></code></a></li>
|
||||
<li class="toctree-l7"><a class="reference internal" href="quapy.classification.html#quapy.classification.neural.NeuralClassifierTrainer.reset_net_params"><code class="docutils literal notranslate"><span class="pre">NeuralClassifierTrainer.reset_net_params()</span></code></a></li>
|
||||
<li class="toctree-l7"><a class="reference internal" href="quapy.classification.html#quapy.classification.neural.NeuralClassifierTrainer.set_params"><code class="docutils literal notranslate"><span class="pre">NeuralClassifierTrainer.set_params()</span></code></a></li>
|
||||
<li class="toctree-l7"><a class="reference internal" href="quapy.classification.html#quapy.classification.neural.NeuralClassifierTrainer.transform"><code class="docutils literal notranslate"><span class="pre">NeuralClassifierTrainer.transform()</span></code></a></li>
|
||||
</ul>
|
||||
</li>
|
||||
<li class="toctree-l6"><a class="reference internal" href="quapy.classification.html#quapy.classification.neural.TextClassifierNet"><code class="docutils literal notranslate"><span class="pre">TextClassifierNet</span></code></a><ul>
|
||||
<li class="toctree-l7"><a class="reference internal" href="quapy.classification.html#quapy.classification.neural.TextClassifierNet.dimensions"><code class="docutils literal notranslate"><span class="pre">TextClassifierNet.dimensions()</span></code></a></li>
|
||||
<li class="toctree-l7"><a class="reference internal" href="quapy.classification.html#quapy.classification.neural.TextClassifierNet.document_embedding"><code class="docutils literal notranslate"><span class="pre">TextClassifierNet.document_embedding()</span></code></a></li>
|
||||
<li class="toctree-l7"><a class="reference internal" href="quapy.classification.html#quapy.classification.neural.TextClassifierNet.forward"><code class="docutils literal notranslate"><span class="pre">TextClassifierNet.forward()</span></code></a></li>
|
||||
<li class="toctree-l7"><a class="reference internal" href="quapy.classification.html#quapy.classification.neural.TextClassifierNet.get_params"><code class="docutils literal notranslate"><span class="pre">TextClassifierNet.get_params()</span></code></a></li>
|
||||
<li class="toctree-l7"><a class="reference internal" href="quapy.classification.html#quapy.classification.neural.TextClassifierNet.predict_proba"><code class="docutils literal notranslate"><span class="pre">TextClassifierNet.predict_proba()</span></code></a></li>
|
||||
<li class="toctree-l7"><a class="reference internal" href="quapy.classification.html#quapy.classification.neural.TextClassifierNet.training"><code class="docutils literal notranslate"><span class="pre">TextClassifierNet.training</span></code></a></li>
|
||||
<li class="toctree-l7"><a class="reference internal" href="quapy.classification.html#quapy.classification.neural.TextClassifierNet.vocabulary_size"><code class="docutils literal notranslate"><span class="pre">TextClassifierNet.vocabulary_size</span></code></a></li>
|
||||
<li class="toctree-l7"><a class="reference internal" href="quapy.classification.html#quapy.classification.neural.TextClassifierNet.xavier_uniform"><code class="docutils literal notranslate"><span class="pre">TextClassifierNet.xavier_uniform()</span></code></a></li>
|
||||
</ul>
|
||||
</li>
|
||||
<li class="toctree-l6"><a class="reference internal" href="quapy.classification.html#quapy.classification.neural.TorchDataset"><code class="docutils literal notranslate"><span class="pre">TorchDataset</span></code></a><ul>
|
||||
<li class="toctree-l7"><a class="reference internal" href="quapy.classification.html#quapy.classification.neural.TorchDataset.asDataloader"><code class="docutils literal notranslate"><span class="pre">TorchDataset.asDataloader()</span></code></a></li>
|
||||
</ul>
|
||||
</li>
|
||||
</ul>
|
||||
</li>
|
||||
<li class="toctree-l5"><a class="reference internal" href="quapy.classification.html#module-quapy.classification.svmperf">quapy.classification.svmperf module</a><ul>
|
||||
<li class="toctree-l6"><a class="reference internal" href="quapy.classification.html#quapy.classification.svmperf.SVMperf"><code class="docutils literal notranslate"><span class="pre">SVMperf</span></code></a><ul>
|
||||
<li class="toctree-l7"><a class="reference internal" href="quapy.classification.html#quapy.classification.svmperf.SVMperf.decision_function"><code class="docutils literal notranslate"><span class="pre">SVMperf.decision_function()</span></code></a></li>
|
||||
<li class="toctree-l7"><a class="reference internal" href="quapy.classification.html#quapy.classification.svmperf.SVMperf.fit"><code class="docutils literal notranslate"><span class="pre">SVMperf.fit()</span></code></a></li>
|
||||
<li class="toctree-l7"><a class="reference internal" href="quapy.classification.html#quapy.classification.svmperf.SVMperf.predict"><code class="docutils literal notranslate"><span class="pre">SVMperf.predict()</span></code></a></li>
|
||||
<li class="toctree-l7"><a class="reference internal" href="quapy.classification.html#quapy.classification.svmperf.SVMperf.valid_losses"><code class="docutils literal notranslate"><span class="pre">SVMperf.valid_losses</span></code></a></li>
|
||||
</ul>
|
||||
</li>
|
||||
</ul>
|
||||
</li>
|
||||
<li class="toctree-l5"><a class="reference internal" href="quapy.classification.html#module-quapy.classification">Module contents</a></li>
|
||||
</ul>
|
||||
</li>
|
||||
<li class="toctree-l4"><a class="reference internal" href="quapy.data.html">quapy.data package</a><ul>
|
||||
<li class="toctree-l5"><a class="reference internal" href="quapy.data.html#submodules">Submodules</a></li>
|
||||
<li class="toctree-l5"><a class="reference internal" href="quapy.data.html#module-quapy.data.base">quapy.data.base module</a><ul>
|
||||
<li class="toctree-l6"><a class="reference internal" href="quapy.data.html#quapy.data.base.Dataset"><code class="docutils literal notranslate"><span class="pre">Dataset</span></code></a><ul>
|
||||
<li class="toctree-l7"><a class="reference internal" href="quapy.data.html#quapy.data.base.Dataset.SplitStratified"><code class="docutils literal notranslate"><span class="pre">Dataset.SplitStratified()</span></code></a></li>
|
||||
<li class="toctree-l7"><a class="reference internal" href="quapy.data.html#quapy.data.base.Dataset.binary"><code class="docutils literal notranslate"><span class="pre">Dataset.binary</span></code></a></li>
|
||||
<li class="toctree-l7"><a class="reference internal" href="quapy.data.html#quapy.data.base.Dataset.classes_"><code class="docutils literal notranslate"><span class="pre">Dataset.classes_</span></code></a></li>
|
||||
<li class="toctree-l7"><a class="reference internal" href="quapy.data.html#quapy.data.base.Dataset.kFCV"><code class="docutils literal notranslate"><span class="pre">Dataset.kFCV()</span></code></a></li>
|
||||
<li class="toctree-l7"><a class="reference internal" href="quapy.data.html#quapy.data.base.Dataset.load"><code class="docutils literal notranslate"><span class="pre">Dataset.load()</span></code></a></li>
|
||||
<li class="toctree-l7"><a class="reference internal" href="quapy.data.html#quapy.data.base.Dataset.n_classes"><code class="docutils literal notranslate"><span class="pre">Dataset.n_classes</span></code></a></li>
|
||||
<li class="toctree-l7"><a class="reference internal" href="quapy.data.html#quapy.data.base.Dataset.reduce"><code class="docutils literal notranslate"><span class="pre">Dataset.reduce()</span></code></a></li>
|
||||
<li class="toctree-l7"><a class="reference internal" href="quapy.data.html#quapy.data.base.Dataset.stats"><code class="docutils literal notranslate"><span class="pre">Dataset.stats()</span></code></a></li>
|
||||
<li class="toctree-l7"><a class="reference internal" href="quapy.data.html#quapy.data.base.Dataset.train_test"><code class="docutils literal notranslate"><span class="pre">Dataset.train_test</span></code></a></li>
|
||||
<li class="toctree-l7"><a class="reference internal" href="quapy.data.html#quapy.data.base.Dataset.vocabulary_size"><code class="docutils literal notranslate"><span class="pre">Dataset.vocabulary_size</span></code></a></li>
|
||||
</ul>
|
||||
</li>
|
||||
<li class="toctree-l6"><a class="reference internal" href="quapy.data.html#quapy.data.base.LabelledCollection"><code class="docutils literal notranslate"><span class="pre">LabelledCollection</span></code></a><ul>
|
||||
<li class="toctree-l7"><a class="reference internal" href="quapy.data.html#quapy.data.base.LabelledCollection.X"><code class="docutils literal notranslate"><span class="pre">LabelledCollection.X</span></code></a></li>
|
||||
<li class="toctree-l7"><a class="reference internal" href="quapy.data.html#quapy.data.base.LabelledCollection.Xp"><code class="docutils literal notranslate"><span class="pre">LabelledCollection.Xp</span></code></a></li>
|
||||
<li class="toctree-l7"><a class="reference internal" href="quapy.data.html#quapy.data.base.LabelledCollection.Xy"><code class="docutils literal notranslate"><span class="pre">LabelledCollection.Xy</span></code></a></li>
|
||||
<li class="toctree-l7"><a class="reference internal" href="quapy.data.html#quapy.data.base.LabelledCollection.binary"><code class="docutils literal notranslate"><span class="pre">LabelledCollection.binary</span></code></a></li>
|
||||
<li class="toctree-l7"><a class="reference internal" href="quapy.data.html#quapy.data.base.LabelledCollection.counts"><code class="docutils literal notranslate"><span class="pre">LabelledCollection.counts()</span></code></a></li>
|
||||
<li class="toctree-l7"><a class="reference internal" href="quapy.data.html#quapy.data.base.LabelledCollection.join"><code class="docutils literal notranslate"><span class="pre">LabelledCollection.join()</span></code></a></li>
|
||||
<li class="toctree-l7"><a class="reference internal" href="quapy.data.html#quapy.data.base.LabelledCollection.kFCV"><code class="docutils literal notranslate"><span class="pre">LabelledCollection.kFCV()</span></code></a></li>
|
||||
<li class="toctree-l7"><a class="reference internal" href="quapy.data.html#quapy.data.base.LabelledCollection.load"><code class="docutils literal notranslate"><span class="pre">LabelledCollection.load()</span></code></a></li>
|
||||
<li class="toctree-l7"><a class="reference internal" href="quapy.data.html#quapy.data.base.LabelledCollection.n_classes"><code class="docutils literal notranslate"><span class="pre">LabelledCollection.n_classes</span></code></a></li>
|
||||
<li class="toctree-l7"><a class="reference internal" href="quapy.data.html#quapy.data.base.LabelledCollection.p"><code class="docutils literal notranslate"><span class="pre">LabelledCollection.p</span></code></a></li>
|
||||
<li class="toctree-l7"><a class="reference internal" href="quapy.data.html#quapy.data.base.LabelledCollection.prevalence"><code class="docutils literal notranslate"><span class="pre">LabelledCollection.prevalence()</span></code></a></li>
|
||||
<li class="toctree-l7"><a class="reference internal" href="quapy.data.html#quapy.data.base.LabelledCollection.sampling"><code class="docutils literal notranslate"><span class="pre">LabelledCollection.sampling()</span></code></a></li>
|
||||
<li class="toctree-l7"><a class="reference internal" href="quapy.data.html#quapy.data.base.LabelledCollection.sampling_from_index"><code class="docutils literal notranslate"><span class="pre">LabelledCollection.sampling_from_index()</span></code></a></li>
|
||||
<li class="toctree-l7"><a class="reference internal" href="quapy.data.html#quapy.data.base.LabelledCollection.sampling_index"><code class="docutils literal notranslate"><span class="pre">LabelledCollection.sampling_index()</span></code></a></li>
|
||||
<li class="toctree-l7"><a class="reference internal" href="quapy.data.html#quapy.data.base.LabelledCollection.split_random"><code class="docutils literal notranslate"><span class="pre">LabelledCollection.split_random()</span></code></a></li>
|
||||
<li class="toctree-l7"><a class="reference internal" href="quapy.data.html#quapy.data.base.LabelledCollection.split_stratified"><code class="docutils literal notranslate"><span class="pre">LabelledCollection.split_stratified()</span></code></a></li>
|
||||
<li class="toctree-l7"><a class="reference internal" href="quapy.data.html#quapy.data.base.LabelledCollection.stats"><code class="docutils literal notranslate"><span class="pre">LabelledCollection.stats()</span></code></a></li>
|
||||
<li class="toctree-l7"><a class="reference internal" href="quapy.data.html#quapy.data.base.LabelledCollection.uniform_sampling"><code class="docutils literal notranslate"><span class="pre">LabelledCollection.uniform_sampling()</span></code></a></li>
|
||||
<li class="toctree-l7"><a class="reference internal" href="quapy.data.html#quapy.data.base.LabelledCollection.uniform_sampling_index"><code class="docutils literal notranslate"><span class="pre">LabelledCollection.uniform_sampling_index()</span></code></a></li>
|
||||
<li class="toctree-l7"><a class="reference internal" href="quapy.data.html#quapy.data.base.LabelledCollection.y"><code class="docutils literal notranslate"><span class="pre">LabelledCollection.y</span></code></a></li>
|
||||
</ul>
|
||||
</li>
|
||||
</ul>
|
||||
</li>
|
||||
<li class="toctree-l5"><a class="reference internal" href="quapy.data.html#module-quapy.data.datasets">quapy.data.datasets module</a><ul>
|
||||
<li class="toctree-l6"><a class="reference internal" href="quapy.data.html#quapy.data.datasets.fetch_IFCB"><code class="docutils literal notranslate"><span class="pre">fetch_IFCB()</span></code></a></li>
|
||||
<li class="toctree-l6"><a class="reference internal" href="quapy.data.html#quapy.data.datasets.fetch_UCIBinaryDataset"><code class="docutils literal notranslate"><span class="pre">fetch_UCIBinaryDataset()</span></code></a></li>
|
||||
<li class="toctree-l6"><a class="reference internal" href="quapy.data.html#quapy.data.datasets.fetch_UCIBinaryLabelledCollection"><code class="docutils literal notranslate"><span class="pre">fetch_UCIBinaryLabelledCollection()</span></code></a></li>
|
||||
<li class="toctree-l6"><a class="reference internal" href="quapy.data.html#quapy.data.datasets.fetch_UCIMulticlassDataset"><code class="docutils literal notranslate"><span class="pre">fetch_UCIMulticlassDataset()</span></code></a></li>
|
||||
<li class="toctree-l6"><a class="reference internal" href="quapy.data.html#quapy.data.datasets.fetch_UCIMulticlassLabelledCollection"><code class="docutils literal notranslate"><span class="pre">fetch_UCIMulticlassLabelledCollection()</span></code></a></li>
|
||||
<li class="toctree-l6"><a class="reference internal" href="quapy.data.html#quapy.data.datasets.fetch_lequa2022"><code class="docutils literal notranslate"><span class="pre">fetch_lequa2022()</span></code></a></li>
|
||||
<li class="toctree-l6"><a class="reference internal" href="quapy.data.html#quapy.data.datasets.fetch_reviews"><code class="docutils literal notranslate"><span class="pre">fetch_reviews()</span></code></a></li>
|
||||
<li class="toctree-l6"><a class="reference internal" href="quapy.data.html#quapy.data.datasets.fetch_twitter"><code class="docutils literal notranslate"><span class="pre">fetch_twitter()</span></code></a></li>
|
||||
<li class="toctree-l6"><a class="reference internal" href="quapy.data.html#quapy.data.datasets.warn"><code class="docutils literal notranslate"><span class="pre">warn()</span></code></a></li>
|
||||
</ul>
|
||||
</li>
|
||||
<li class="toctree-l5"><a class="reference internal" href="quapy.data.html#module-quapy.data.preprocessing">quapy.data.preprocessing module</a><ul>
|
||||
<li class="toctree-l6"><a class="reference internal" href="quapy.data.html#quapy.data.preprocessing.IndexTransformer"><code class="docutils literal notranslate"><span class="pre">IndexTransformer</span></code></a><ul>
|
||||
<li class="toctree-l7"><a class="reference internal" href="quapy.data.html#quapy.data.preprocessing.IndexTransformer.add_word"><code class="docutils literal notranslate"><span class="pre">IndexTransformer.add_word()</span></code></a></li>
|
||||
<li class="toctree-l7"><a class="reference internal" href="quapy.data.html#quapy.data.preprocessing.IndexTransformer.fit"><code class="docutils literal notranslate"><span class="pre">IndexTransformer.fit()</span></code></a></li>
|
||||
<li class="toctree-l7"><a class="reference internal" href="quapy.data.html#quapy.data.preprocessing.IndexTransformer.fit_transform"><code class="docutils literal notranslate"><span class="pre">IndexTransformer.fit_transform()</span></code></a></li>
|
||||
<li class="toctree-l7"><a class="reference internal" href="quapy.data.html#quapy.data.preprocessing.IndexTransformer.transform"><code class="docutils literal notranslate"><span class="pre">IndexTransformer.transform()</span></code></a></li>
|
||||
<li class="toctree-l7"><a class="reference internal" href="quapy.data.html#quapy.data.preprocessing.IndexTransformer.vocabulary_size"><code class="docutils literal notranslate"><span class="pre">IndexTransformer.vocabulary_size()</span></code></a></li>
|
||||
</ul>
|
||||
</li>
|
||||
<li class="toctree-l6"><a class="reference internal" href="quapy.data.html#quapy.data.preprocessing.index"><code class="docutils literal notranslate"><span class="pre">index()</span></code></a></li>
|
||||
<li class="toctree-l6"><a class="reference internal" href="quapy.data.html#quapy.data.preprocessing.reduce_columns"><code class="docutils literal notranslate"><span class="pre">reduce_columns()</span></code></a></li>
|
||||
<li class="toctree-l6"><a class="reference internal" href="quapy.data.html#quapy.data.preprocessing.standardize"><code class="docutils literal notranslate"><span class="pre">standardize()</span></code></a></li>
|
||||
<li class="toctree-l6"><a class="reference internal" href="quapy.data.html#quapy.data.preprocessing.text2tfidf"><code class="docutils literal notranslate"><span class="pre">text2tfidf()</span></code></a></li>
|
||||
</ul>
|
||||
</li>
|
||||
<li class="toctree-l5"><a class="reference internal" href="quapy.data.html#module-quapy.data.reader">quapy.data.reader module</a><ul>
|
||||
<li class="toctree-l6"><a class="reference internal" href="quapy.data.html#quapy.data.reader.binarize"><code class="docutils literal notranslate"><span class="pre">binarize()</span></code></a></li>
|
||||
<li class="toctree-l6"><a class="reference internal" href="quapy.data.html#quapy.data.reader.from_csv"><code class="docutils literal notranslate"><span class="pre">from_csv()</span></code></a></li>
|
||||
<li class="toctree-l6"><a class="reference internal" href="quapy.data.html#quapy.data.reader.from_sparse"><code class="docutils literal notranslate"><span class="pre">from_sparse()</span></code></a></li>
|
||||
<li class="toctree-l6"><a class="reference internal" href="quapy.data.html#quapy.data.reader.from_text"><code class="docutils literal notranslate"><span class="pre">from_text()</span></code></a></li>
|
||||
<li class="toctree-l6"><a class="reference internal" href="quapy.data.html#quapy.data.reader.reindex_labels"><code class="docutils literal notranslate"><span class="pre">reindex_labels()</span></code></a></li>
|
||||
</ul>
|
||||
</li>
|
||||
<li class="toctree-l5"><a class="reference internal" href="quapy.data.html#module-quapy.data">Module contents</a></li>
|
||||
</ul>
|
||||
</li>
|
||||
<li class="toctree-l4"><a class="reference internal" href="quapy.method.html">quapy.method package</a><ul>
|
||||
<li class="toctree-l5"><a class="reference internal" href="quapy.method.html#submodules">Submodules</a></li>
|
||||
<li class="toctree-l5"><a class="reference internal" href="quapy.method.html#module-quapy.method.aggregative">quapy.method.aggregative module</a><ul>
|
||||
<li class="toctree-l6"><a class="reference internal" href="quapy.method.html#quapy.method.aggregative.ACC"><code class="docutils literal notranslate"><span class="pre">ACC</span></code></a><ul>
|
||||
<li class="toctree-l7"><a class="reference internal" href="quapy.method.html#quapy.method.aggregative.ACC.METHODS"><code class="docutils literal notranslate"><span class="pre">ACC.METHODS</span></code></a></li>
|
||||
<li class="toctree-l7"><a class="reference internal" href="quapy.method.html#quapy.method.aggregative.ACC.NORMALIZATIONS"><code class="docutils literal notranslate"><span class="pre">ACC.NORMALIZATIONS</span></code></a></li>
|
||||
<li class="toctree-l7"><a class="reference internal" href="quapy.method.html#quapy.method.aggregative.ACC.SOLVERS"><code class="docutils literal notranslate"><span class="pre">ACC.SOLVERS</span></code></a></li>
|
||||
<li class="toctree-l7"><a class="reference internal" href="quapy.method.html#quapy.method.aggregative.ACC.aggregate"><code class="docutils literal notranslate"><span class="pre">ACC.aggregate()</span></code></a></li>
|
||||
<li class="toctree-l7"><a class="reference internal" href="quapy.method.html#quapy.method.aggregative.ACC.aggregation_fit"><code class="docutils literal notranslate"><span class="pre">ACC.aggregation_fit()</span></code></a></li>
|
||||
<li class="toctree-l7"><a class="reference internal" href="quapy.method.html#quapy.method.aggregative.ACC.getPteCondEstim"><code class="docutils literal notranslate"><span class="pre">ACC.getPteCondEstim()</span></code></a></li>
|
||||
<li class="toctree-l7"><a class="reference internal" href="quapy.method.html#quapy.method.aggregative.ACC.newInvariantRatioEstimation"><code class="docutils literal notranslate"><span class="pre">ACC.newInvariantRatioEstimation()</span></code></a></li>
|
||||
</ul>
|
||||
</li>
|
||||
<li class="toctree-l6"><a class="reference internal" href="quapy.method.html#quapy.method.aggregative.AdjustedClassifyAndCount"><code class="docutils literal notranslate"><span class="pre">AdjustedClassifyAndCount</span></code></a></li>
|
||||
<li class="toctree-l6"><a class="reference internal" href="quapy.method.html#quapy.method.aggregative.AggregativeCrispQuantifier"><code class="docutils literal notranslate"><span class="pre">AggregativeCrispQuantifier</span></code></a></li>
|
||||
<li class="toctree-l6"><a class="reference internal" href="quapy.method.html#quapy.method.aggregative.AggregativeMedianEstimator"><code class="docutils literal notranslate"><span class="pre">AggregativeMedianEstimator</span></code></a><ul>
|
||||
<li class="toctree-l7"><a class="reference internal" href="quapy.method.html#quapy.method.aggregative.AggregativeMedianEstimator.fit"><code class="docutils literal notranslate"><span class="pre">AggregativeMedianEstimator.fit()</span></code></a></li>
|
||||
<li class="toctree-l7"><a class="reference internal" href="quapy.method.html#quapy.method.aggregative.AggregativeMedianEstimator.get_params"><code class="docutils literal notranslate"><span class="pre">AggregativeMedianEstimator.get_params()</span></code></a></li>
|
||||
<li class="toctree-l7"><a class="reference internal" href="quapy.method.html#quapy.method.aggregative.AggregativeMedianEstimator.quantify"><code class="docutils literal notranslate"><span class="pre">AggregativeMedianEstimator.quantify()</span></code></a></li>
|
||||
<li class="toctree-l7"><a class="reference internal" href="quapy.method.html#quapy.method.aggregative.AggregativeMedianEstimator.set_params"><code class="docutils literal notranslate"><span class="pre">AggregativeMedianEstimator.set_params()</span></code></a></li>
|
||||
</ul>
|
||||
</li>
|
||||
<li class="toctree-l6"><a class="reference internal" href="quapy.method.html#quapy.method.aggregative.AggregativeQuantifier"><code class="docutils literal notranslate"><span class="pre">AggregativeQuantifier</span></code></a><ul>
|
||||
<li class="toctree-l7"><a class="reference internal" href="quapy.method.html#quapy.method.aggregative.AggregativeQuantifier.aggregate"><code class="docutils literal notranslate"><span class="pre">AggregativeQuantifier.aggregate()</span></code></a></li>
|
||||
<li class="toctree-l7"><a class="reference internal" href="quapy.method.html#quapy.method.aggregative.AggregativeQuantifier.aggregation_fit"><code class="docutils literal notranslate"><span class="pre">AggregativeQuantifier.aggregation_fit()</span></code></a></li>
|
||||
<li class="toctree-l7"><a class="reference internal" href="quapy.method.html#quapy.method.aggregative.AggregativeQuantifier.classes_"><code class="docutils literal notranslate"><span class="pre">AggregativeQuantifier.classes_</span></code></a></li>
|
||||
<li class="toctree-l7"><a class="reference internal" href="quapy.method.html#quapy.method.aggregative.AggregativeQuantifier.classifier"><code class="docutils literal notranslate"><span class="pre">AggregativeQuantifier.classifier</span></code></a></li>
|
||||
<li class="toctree-l7"><a class="reference internal" href="quapy.method.html#quapy.method.aggregative.AggregativeQuantifier.classifier_fit_predict"><code class="docutils literal notranslate"><span class="pre">AggregativeQuantifier.classifier_fit_predict()</span></code></a></li>
|
||||
<li class="toctree-l7"><a class="reference internal" href="quapy.method.html#quapy.method.aggregative.AggregativeQuantifier.classify"><code class="docutils literal notranslate"><span class="pre">AggregativeQuantifier.classify()</span></code></a></li>
|
||||
<li class="toctree-l7"><a class="reference internal" href="quapy.method.html#quapy.method.aggregative.AggregativeQuantifier.fit"><code class="docutils literal notranslate"><span class="pre">AggregativeQuantifier.fit()</span></code></a></li>
|
||||
<li class="toctree-l7"><a class="reference internal" href="quapy.method.html#quapy.method.aggregative.AggregativeQuantifier.quantify"><code class="docutils literal notranslate"><span class="pre">AggregativeQuantifier.quantify()</span></code></a></li>
|
||||
<li class="toctree-l7"><a class="reference internal" href="quapy.method.html#quapy.method.aggregative.AggregativeQuantifier.val_split"><code class="docutils literal notranslate"><span class="pre">AggregativeQuantifier.val_split</span></code></a></li>
|
||||
<li class="toctree-l7"><a class="reference internal" href="quapy.method.html#quapy.method.aggregative.AggregativeQuantifier.val_split_"><code class="docutils literal notranslate"><span class="pre">AggregativeQuantifier.val_split_</span></code></a></li>
|
||||
</ul>
|
||||
</li>
|
||||
<li class="toctree-l6"><a class="reference internal" href="quapy.method.html#quapy.method.aggregative.AggregativeSoftQuantifier"><code class="docutils literal notranslate"><span class="pre">AggregativeSoftQuantifier</span></code></a></li>
|
||||
<li class="toctree-l6"><a class="reference internal" href="quapy.method.html#quapy.method.aggregative.BayesianCC"><code class="docutils literal notranslate"><span class="pre">BayesianCC</span></code></a><ul>
|
||||
<li class="toctree-l7"><a class="reference internal" href="quapy.method.html#quapy.method.aggregative.BayesianCC.aggregate"><code class="docutils literal notranslate"><span class="pre">BayesianCC.aggregate()</span></code></a></li>
|
||||
<li class="toctree-l7"><a class="reference internal" href="quapy.method.html#quapy.method.aggregative.BayesianCC.aggregation_fit"><code class="docutils literal notranslate"><span class="pre">BayesianCC.aggregation_fit()</span></code></a></li>
|
||||
<li class="toctree-l7"><a class="reference internal" href="quapy.method.html#quapy.method.aggregative.BayesianCC.get_conditional_probability_samples"><code class="docutils literal notranslate"><span class="pre">BayesianCC.get_conditional_probability_samples()</span></code></a></li>
|
||||
<li class="toctree-l7"><a class="reference internal" href="quapy.method.html#quapy.method.aggregative.BayesianCC.get_prevalence_samples"><code class="docutils literal notranslate"><span class="pre">BayesianCC.get_prevalence_samples()</span></code></a></li>
|
||||
<li class="toctree-l7"><a class="reference internal" href="quapy.method.html#quapy.method.aggregative.BayesianCC.sample_from_posterior"><code class="docutils literal notranslate"><span class="pre">BayesianCC.sample_from_posterior()</span></code></a></li>
|
||||
</ul>
|
||||
</li>
|
||||
<li class="toctree-l6"><a class="reference internal" href="quapy.method.html#quapy.method.aggregative.BinaryAggregativeQuantifier"><code class="docutils literal notranslate"><span class="pre">BinaryAggregativeQuantifier</span></code></a><ul>
|
||||
<li class="toctree-l7"><a class="reference internal" href="quapy.method.html#quapy.method.aggregative.BinaryAggregativeQuantifier.fit"><code class="docutils literal notranslate"><span class="pre">BinaryAggregativeQuantifier.fit()</span></code></a></li>
|
||||
<li class="toctree-l7"><a class="reference internal" href="quapy.method.html#quapy.method.aggregative.BinaryAggregativeQuantifier.neg_label"><code class="docutils literal notranslate"><span class="pre">BinaryAggregativeQuantifier.neg_label</span></code></a></li>
|
||||
<li class="toctree-l7"><a class="reference internal" href="quapy.method.html#quapy.method.aggregative.BinaryAggregativeQuantifier.pos_label"><code class="docutils literal notranslate"><span class="pre">BinaryAggregativeQuantifier.pos_label</span></code></a></li>
|
||||
</ul>
|
||||
</li>
|
||||
<li class="toctree-l6"><a class="reference internal" href="quapy.method.html#quapy.method.aggregative.CC"><code class="docutils literal notranslate"><span class="pre">CC</span></code></a><ul>
|
||||
<li class="toctree-l7"><a class="reference internal" href="quapy.method.html#quapy.method.aggregative.CC.aggregate"><code class="docutils literal notranslate"><span class="pre">CC.aggregate()</span></code></a></li>
|
||||
<li class="toctree-l7"><a class="reference internal" href="quapy.method.html#quapy.method.aggregative.CC.aggregation_fit"><code class="docutils literal notranslate"><span class="pre">CC.aggregation_fit()</span></code></a></li>
|
||||
</ul>
|
||||
</li>
|
||||
<li class="toctree-l6"><a class="reference internal" href="quapy.method.html#quapy.method.aggregative.ClassifyAndCount"><code class="docutils literal notranslate"><span class="pre">ClassifyAndCount</span></code></a></li>
|
||||
<li class="toctree-l6"><a class="reference internal" href="quapy.method.html#quapy.method.aggregative.DMy"><code class="docutils literal notranslate"><span class="pre">DMy</span></code></a><ul>
|
||||
<li class="toctree-l7"><a class="reference internal" href="quapy.method.html#quapy.method.aggregative.DMy.aggregate"><code class="docutils literal notranslate"><span class="pre">DMy.aggregate()</span></code></a></li>
|
||||
<li class="toctree-l7"><a class="reference internal" href="quapy.method.html#quapy.method.aggregative.DMy.aggregation_fit"><code class="docutils literal notranslate"><span class="pre">DMy.aggregation_fit()</span></code></a></li>
|
||||
</ul>
|
||||
</li>
|
||||
<li class="toctree-l6"><a class="reference internal" href="quapy.method.html#quapy.method.aggregative.DistributionMatchingY"><code class="docutils literal notranslate"><span class="pre">DistributionMatchingY</span></code></a></li>
|
||||
<li class="toctree-l6"><a class="reference internal" href="quapy.method.html#quapy.method.aggregative.DyS"><code class="docutils literal notranslate"><span class="pre">DyS</span></code></a><ul>
|
||||
<li class="toctree-l7"><a class="reference internal" href="quapy.method.html#quapy.method.aggregative.DyS.aggregate"><code class="docutils literal notranslate"><span class="pre">DyS.aggregate()</span></code></a></li>
|
||||
<li class="toctree-l7"><a class="reference internal" href="quapy.method.html#quapy.method.aggregative.DyS.aggregation_fit"><code class="docutils literal notranslate"><span class="pre">DyS.aggregation_fit()</span></code></a></li>
|
||||
</ul>
|
||||
</li>
|
||||
<li class="toctree-l6"><a class="reference internal" href="quapy.method.html#quapy.method.aggregative.EMQ"><code class="docutils literal notranslate"><span class="pre">EMQ</span></code></a><ul>
|
||||
<li class="toctree-l7"><a class="reference internal" href="quapy.method.html#quapy.method.aggregative.EMQ.EM"><code class="docutils literal notranslate"><span class="pre">EMQ.EM()</span></code></a></li>
|
||||
<li class="toctree-l7"><a class="reference internal" href="quapy.method.html#quapy.method.aggregative.EMQ.EMQ_BCTS"><code class="docutils literal notranslate"><span class="pre">EMQ.EMQ_BCTS()</span></code></a></li>
|
||||
<li class="toctree-l7"><a class="reference internal" href="quapy.method.html#quapy.method.aggregative.EMQ.EPSILON"><code class="docutils literal notranslate"><span class="pre">EMQ.EPSILON</span></code></a></li>
|
||||
<li class="toctree-l7"><a class="reference internal" href="quapy.method.html#quapy.method.aggregative.EMQ.MAX_ITER"><code class="docutils literal notranslate"><span class="pre">EMQ.MAX_ITER</span></code></a></li>
|
||||
<li class="toctree-l7"><a class="reference internal" href="quapy.method.html#quapy.method.aggregative.EMQ.aggregate"><code class="docutils literal notranslate"><span class="pre">EMQ.aggregate()</span></code></a></li>
|
||||
<li class="toctree-l7"><a class="reference internal" href="quapy.method.html#quapy.method.aggregative.EMQ.aggregation_fit"><code class="docutils literal notranslate"><span class="pre">EMQ.aggregation_fit()</span></code></a></li>
|
||||
<li class="toctree-l7"><a class="reference internal" href="quapy.method.html#quapy.method.aggregative.EMQ.classify"><code class="docutils literal notranslate"><span class="pre">EMQ.classify()</span></code></a></li>
|
||||
<li class="toctree-l7"><a class="reference internal" href="quapy.method.html#quapy.method.aggregative.EMQ.predict_proba"><code class="docutils literal notranslate"><span class="pre">EMQ.predict_proba()</span></code></a></li>
|
||||
</ul>
|
||||
</li>
|
||||
<li class="toctree-l6"><a class="reference internal" href="quapy.method.html#quapy.method.aggregative.ExpectationMaximizationQuantifier"><code class="docutils literal notranslate"><span class="pre">ExpectationMaximizationQuantifier</span></code></a></li>
|
||||
<li class="toctree-l6"><a class="reference internal" href="quapy.method.html#quapy.method.aggregative.HDy"><code class="docutils literal notranslate"><span class="pre">HDy</span></code></a><ul>
|
||||
<li class="toctree-l7"><a class="reference internal" href="quapy.method.html#quapy.method.aggregative.HDy.aggregate"><code class="docutils literal notranslate"><span class="pre">HDy.aggregate()</span></code></a></li>
|
||||
<li class="toctree-l7"><a class="reference internal" href="quapy.method.html#quapy.method.aggregative.HDy.aggregation_fit"><code class="docutils literal notranslate"><span class="pre">HDy.aggregation_fit()</span></code></a></li>
|
||||
</ul>
|
||||
</li>
|
||||
<li class="toctree-l6"><a class="reference internal" href="quapy.method.html#quapy.method.aggregative.HellingerDistanceY"><code class="docutils literal notranslate"><span class="pre">HellingerDistanceY</span></code></a></li>
|
||||
<li class="toctree-l6"><a class="reference internal" href="quapy.method.html#quapy.method.aggregative.OneVsAllAggregative"><code class="docutils literal notranslate"><span class="pre">OneVsAllAggregative</span></code></a><ul>
|
||||
<li class="toctree-l7"><a class="reference internal" href="quapy.method.html#quapy.method.aggregative.OneVsAllAggregative.aggregate"><code class="docutils literal notranslate"><span class="pre">OneVsAllAggregative.aggregate()</span></code></a></li>
|
||||
<li class="toctree-l7"><a class="reference internal" href="quapy.method.html#quapy.method.aggregative.OneVsAllAggregative.classify"><code class="docutils literal notranslate"><span class="pre">OneVsAllAggregative.classify()</span></code></a></li>
|
||||
</ul>
|
||||
</li>
|
||||
<li class="toctree-l6"><a class="reference internal" href="quapy.method.html#quapy.method.aggregative.PACC"><code class="docutils literal notranslate"><span class="pre">PACC</span></code></a><ul>
|
||||
<li class="toctree-l7"><a class="reference internal" href="quapy.method.html#quapy.method.aggregative.PACC.aggregate"><code class="docutils literal notranslate"><span class="pre">PACC.aggregate()</span></code></a></li>
|
||||
<li class="toctree-l7"><a class="reference internal" href="quapy.method.html#quapy.method.aggregative.PACC.aggregation_fit"><code class="docutils literal notranslate"><span class="pre">PACC.aggregation_fit()</span></code></a></li>
|
||||
<li class="toctree-l7"><a class="reference internal" href="quapy.method.html#quapy.method.aggregative.PACC.getPteCondEstim"><code class="docutils literal notranslate"><span class="pre">PACC.getPteCondEstim()</span></code></a></li>
|
||||
</ul>
|
||||
</li>
|
||||
<li class="toctree-l6"><a class="reference internal" href="quapy.method.html#quapy.method.aggregative.PCC"><code class="docutils literal notranslate"><span class="pre">PCC</span></code></a><ul>
|
||||
<li class="toctree-l7"><a class="reference internal" href="quapy.method.html#quapy.method.aggregative.PCC.aggregate"><code class="docutils literal notranslate"><span class="pre">PCC.aggregate()</span></code></a></li>
|
||||
<li class="toctree-l7"><a class="reference internal" href="quapy.method.html#quapy.method.aggregative.PCC.aggregation_fit"><code class="docutils literal notranslate"><span class="pre">PCC.aggregation_fit()</span></code></a></li>
|
||||
</ul>
|
||||
</li>
|
||||
<li class="toctree-l6"><a class="reference internal" href="quapy.method.html#quapy.method.aggregative.ProbabilisticAdjustedClassifyAndCount"><code class="docutils literal notranslate"><span class="pre">ProbabilisticAdjustedClassifyAndCount</span></code></a></li>
|
||||
<li class="toctree-l6"><a class="reference internal" href="quapy.method.html#quapy.method.aggregative.ProbabilisticClassifyAndCount"><code class="docutils literal notranslate"><span class="pre">ProbabilisticClassifyAndCount</span></code></a></li>
|
||||
<li class="toctree-l6"><a class="reference internal" href="quapy.method.html#quapy.method.aggregative.SLD"><code class="docutils literal notranslate"><span class="pre">SLD</span></code></a></li>
|
||||
<li class="toctree-l6"><a class="reference internal" href="quapy.method.html#quapy.method.aggregative.SMM"><code class="docutils literal notranslate"><span class="pre">SMM</span></code></a><ul>
|
||||
<li class="toctree-l7"><a class="reference internal" href="quapy.method.html#quapy.method.aggregative.SMM.aggregate"><code class="docutils literal notranslate"><span class="pre">SMM.aggregate()</span></code></a></li>
|
||||
<li class="toctree-l7"><a class="reference internal" href="quapy.method.html#quapy.method.aggregative.SMM.aggregation_fit"><code class="docutils literal notranslate"><span class="pre">SMM.aggregation_fit()</span></code></a></li>
|
||||
</ul>
|
||||
</li>
|
||||
<li class="toctree-l6"><a class="reference internal" href="quapy.method.html#quapy.method.aggregative.newELM"><code class="docutils literal notranslate"><span class="pre">newELM()</span></code></a></li>
|
||||
<li class="toctree-l6"><a class="reference internal" href="quapy.method.html#quapy.method.aggregative.newSVMAE"><code class="docutils literal notranslate"><span class="pre">newSVMAE()</span></code></a></li>
|
||||
<li class="toctree-l6"><a class="reference internal" href="quapy.method.html#quapy.method.aggregative.newSVMKLD"><code class="docutils literal notranslate"><span class="pre">newSVMKLD()</span></code></a></li>
|
||||
<li class="toctree-l6"><a class="reference internal" href="quapy.method.html#quapy.method.aggregative.newSVMQ"><code class="docutils literal notranslate"><span class="pre">newSVMQ()</span></code></a></li>
|
||||
<li class="toctree-l6"><a class="reference internal" href="quapy.method.html#quapy.method.aggregative.newSVMRAE"><code class="docutils literal notranslate"><span class="pre">newSVMRAE()</span></code></a></li>
|
||||
<li class="toctree-l6"><a class="reference internal" href="quapy.method.html#quapy.method._kdey.KDEBase"><code class="docutils literal notranslate"><span class="pre">KDEBase</span></code></a><ul>
|
||||
<li class="toctree-l7"><a class="reference internal" href="quapy.method.html#quapy.method._kdey.KDEBase.BANDWIDTH_METHOD"><code class="docutils literal notranslate"><span class="pre">KDEBase.BANDWIDTH_METHOD</span></code></a></li>
|
||||
<li class="toctree-l7"><a class="reference internal" href="quapy.method.html#quapy.method._kdey.KDEBase.get_kde_function"><code class="docutils literal notranslate"><span class="pre">KDEBase.get_kde_function()</span></code></a></li>
|
||||
<li class="toctree-l7"><a class="reference internal" href="quapy.method.html#quapy.method._kdey.KDEBase.get_mixture_components"><code class="docutils literal notranslate"><span class="pre">KDEBase.get_mixture_components()</span></code></a></li>
|
||||
<li class="toctree-l7"><a class="reference internal" href="quapy.method.html#quapy.method._kdey.KDEBase.pdf"><code class="docutils literal notranslate"><span class="pre">KDEBase.pdf()</span></code></a></li>
|
||||
</ul>
|
||||
</li>
|
||||
<li class="toctree-l6"><a class="reference internal" href="quapy.method.html#quapy.method._kdey.KDEyCS"><code class="docutils literal notranslate"><span class="pre">KDEyCS</span></code></a><ul>
|
||||
<li class="toctree-l7"><a class="reference internal" href="quapy.method.html#quapy.method._kdey.KDEyCS.aggregate"><code class="docutils literal notranslate"><span class="pre">KDEyCS.aggregate()</span></code></a></li>
|
||||
<li class="toctree-l7"><a class="reference internal" href="quapy.method.html#quapy.method._kdey.KDEyCS.aggregation_fit"><code class="docutils literal notranslate"><span class="pre">KDEyCS.aggregation_fit()</span></code></a></li>
|
||||
<li class="toctree-l7"><a class="reference internal" href="quapy.method.html#quapy.method._kdey.KDEyCS.gram_matrix_mix_sum"><code class="docutils literal notranslate"><span class="pre">KDEyCS.gram_matrix_mix_sum()</span></code></a></li>
|
||||
</ul>
|
||||
</li>
|
||||
<li class="toctree-l6"><a class="reference internal" href="quapy.method.html#quapy.method._kdey.KDEyHD"><code class="docutils literal notranslate"><span class="pre">KDEyHD</span></code></a><ul>
|
||||
<li class="toctree-l7"><a class="reference internal" href="quapy.method.html#quapy.method._kdey.KDEyHD.aggregate"><code class="docutils literal notranslate"><span class="pre">KDEyHD.aggregate()</span></code></a></li>
|
||||
<li class="toctree-l7"><a class="reference internal" href="quapy.method.html#quapy.method._kdey.KDEyHD.aggregation_fit"><code class="docutils literal notranslate"><span class="pre">KDEyHD.aggregation_fit()</span></code></a></li>
|
||||
</ul>
|
||||
</li>
|
||||
<li class="toctree-l6"><a class="reference internal" href="quapy.method.html#quapy.method._kdey.KDEyML"><code class="docutils literal notranslate"><span class="pre">KDEyML</span></code></a><ul>
|
||||
<li class="toctree-l7"><a class="reference internal" href="quapy.method.html#quapy.method._kdey.KDEyML.aggregate"><code class="docutils literal notranslate"><span class="pre">KDEyML.aggregate()</span></code></a></li>
|
||||
<li class="toctree-l7"><a class="reference internal" href="quapy.method.html#quapy.method._kdey.KDEyML.aggregation_fit"><code class="docutils literal notranslate"><span class="pre">KDEyML.aggregation_fit()</span></code></a></li>
|
||||
</ul>
|
||||
</li>
|
||||
<li class="toctree-l6"><a class="reference internal" href="quapy.method.html#quapy.method._neural.QuaNetModule"><code class="docutils literal notranslate"><span class="pre">QuaNetModule</span></code></a><ul>
|
||||
<li class="toctree-l7"><a class="reference internal" href="quapy.method.html#quapy.method._neural.QuaNetModule.device"><code class="docutils literal notranslate"><span class="pre">QuaNetModule.device</span></code></a></li>
|
||||
<li class="toctree-l7"><a class="reference internal" href="quapy.method.html#quapy.method._neural.QuaNetModule.forward"><code class="docutils literal notranslate"><span class="pre">QuaNetModule.forward()</span></code></a></li>
|
||||
<li class="toctree-l7"><a class="reference internal" href="quapy.method.html#quapy.method._neural.QuaNetModule.training"><code class="docutils literal notranslate"><span class="pre">QuaNetModule.training</span></code></a></li>
|
||||
</ul>
|
||||
</li>
|
||||
<li class="toctree-l6"><a class="reference internal" href="quapy.method.html#quapy.method._neural.QuaNetTrainer"><code class="docutils literal notranslate"><span class="pre">QuaNetTrainer</span></code></a><ul>
|
||||
<li class="toctree-l7"><a class="reference internal" href="quapy.method.html#quapy.method._neural.QuaNetTrainer.classes_"><code class="docutils literal notranslate"><span class="pre">QuaNetTrainer.classes_</span></code></a></li>
|
||||
<li class="toctree-l7"><a class="reference internal" href="quapy.method.html#quapy.method._neural.QuaNetTrainer.clean_checkpoint"><code class="docutils literal notranslate"><span class="pre">QuaNetTrainer.clean_checkpoint()</span></code></a></li>
|
||||
<li class="toctree-l7"><a class="reference internal" href="quapy.method.html#quapy.method._neural.QuaNetTrainer.clean_checkpoint_dir"><code class="docutils literal notranslate"><span class="pre">QuaNetTrainer.clean_checkpoint_dir()</span></code></a></li>
|
||||
<li class="toctree-l7"><a class="reference internal" href="quapy.method.html#quapy.method._neural.QuaNetTrainer.fit"><code class="docutils literal notranslate"><span class="pre">QuaNetTrainer.fit()</span></code></a></li>
|
||||
<li class="toctree-l7"><a class="reference internal" href="quapy.method.html#quapy.method._neural.QuaNetTrainer.get_params"><code class="docutils literal notranslate"><span class="pre">QuaNetTrainer.get_params()</span></code></a></li>
|
||||
<li class="toctree-l7"><a class="reference internal" href="quapy.method.html#quapy.method._neural.QuaNetTrainer.quantify"><code class="docutils literal notranslate"><span class="pre">QuaNetTrainer.quantify()</span></code></a></li>
|
||||
<li class="toctree-l7"><a class="reference internal" href="quapy.method.html#quapy.method._neural.QuaNetTrainer.set_params"><code class="docutils literal notranslate"><span class="pre">QuaNetTrainer.set_params()</span></code></a></li>
|
||||
</ul>
|
||||
</li>
|
||||
<li class="toctree-l6"><a class="reference internal" href="quapy.method.html#quapy.method._neural.mae_loss"><code class="docutils literal notranslate"><span class="pre">mae_loss()</span></code></a></li>
|
||||
<li class="toctree-l6"><a class="reference internal" href="quapy.method.html#quapy.method._threshold_optim.MAX"><code class="docutils literal notranslate"><span class="pre">MAX</span></code></a><ul>
|
||||
<li class="toctree-l7"><a class="reference internal" href="quapy.method.html#quapy.method._threshold_optim.MAX.condition"><code class="docutils literal notranslate"><span class="pre">MAX.condition()</span></code></a></li>
|
||||
</ul>
|
||||
</li>
|
||||
<li class="toctree-l6"><a class="reference internal" href="quapy.method.html#quapy.method._threshold_optim.MS"><code class="docutils literal notranslate"><span class="pre">MS</span></code></a><ul>
|
||||
<li class="toctree-l7"><a class="reference internal" href="quapy.method.html#quapy.method._threshold_optim.MS.aggregate"><code class="docutils literal notranslate"><span class="pre">MS.aggregate()</span></code></a></li>
|
||||
<li class="toctree-l7"><a class="reference internal" href="quapy.method.html#quapy.method._threshold_optim.MS.aggregation_fit"><code class="docutils literal notranslate"><span class="pre">MS.aggregation_fit()</span></code></a></li>
|
||||
<li class="toctree-l7"><a class="reference internal" href="quapy.method.html#quapy.method._threshold_optim.MS.condition"><code class="docutils literal notranslate"><span class="pre">MS.condition()</span></code></a></li>
|
||||
</ul>
|
||||
</li>
|
||||
<li class="toctree-l6"><a class="reference internal" href="quapy.method.html#quapy.method._threshold_optim.MS2"><code class="docutils literal notranslate"><span class="pre">MS2</span></code></a><ul>
|
||||
<li class="toctree-l7"><a class="reference internal" href="quapy.method.html#quapy.method._threshold_optim.MS2.discard"><code class="docutils literal notranslate"><span class="pre">MS2.discard()</span></code></a></li>
|
||||
</ul>
|
||||
</li>
|
||||
<li class="toctree-l6"><a class="reference internal" href="quapy.method.html#quapy.method._threshold_optim.T50"><code class="docutils literal notranslate"><span class="pre">T50</span></code></a><ul>
|
||||
<li class="toctree-l7"><a class="reference internal" href="quapy.method.html#quapy.method._threshold_optim.T50.condition"><code class="docutils literal notranslate"><span class="pre">T50.condition()</span></code></a></li>
|
||||
</ul>
|
||||
</li>
|
||||
<li class="toctree-l6"><a class="reference internal" href="quapy.method.html#quapy.method._threshold_optim.ThresholdOptimization"><code class="docutils literal notranslate"><span class="pre">ThresholdOptimization</span></code></a><ul>
|
||||
<li class="toctree-l7"><a class="reference internal" href="quapy.method.html#quapy.method._threshold_optim.ThresholdOptimization.aggregate"><code class="docutils literal notranslate"><span class="pre">ThresholdOptimization.aggregate()</span></code></a></li>
|
||||
<li class="toctree-l7"><a class="reference internal" href="quapy.method.html#quapy.method._threshold_optim.ThresholdOptimization.aggregate_with_threshold"><code class="docutils literal notranslate"><span class="pre">ThresholdOptimization.aggregate_with_threshold()</span></code></a></li>
|
||||
<li class="toctree-l7"><a class="reference internal" href="quapy.method.html#quapy.method._threshold_optim.ThresholdOptimization.aggregation_fit"><code class="docutils literal notranslate"><span class="pre">ThresholdOptimization.aggregation_fit()</span></code></a></li>
|
||||
<li class="toctree-l7"><a class="reference internal" href="quapy.method.html#quapy.method._threshold_optim.ThresholdOptimization.condition"><code class="docutils literal notranslate"><span class="pre">ThresholdOptimization.condition()</span></code></a></li>
|
||||
<li class="toctree-l7"><a class="reference internal" href="quapy.method.html#quapy.method._threshold_optim.ThresholdOptimization.discard"><code class="docutils literal notranslate"><span class="pre">ThresholdOptimization.discard()</span></code></a></li>
|
||||
</ul>
|
||||
</li>
|
||||
<li class="toctree-l6"><a class="reference internal" href="quapy.method.html#quapy.method._threshold_optim.X"><code class="docutils literal notranslate"><span class="pre">X</span></code></a><ul>
|
||||
<li class="toctree-l7"><a class="reference internal" href="quapy.method.html#quapy.method._threshold_optim.X.condition"><code class="docutils literal notranslate"><span class="pre">X.condition()</span></code></a></li>
|
||||
</ul>
|
||||
</li>
|
||||
</ul>
|
||||
</li>
|
||||
<li class="toctree-l5"><a class="reference internal" href="quapy.method.html#module-quapy.method.base">quapy.method.base module</a><ul>
|
||||
<li class="toctree-l6"><a class="reference internal" href="quapy.method.html#quapy.method.base.BaseQuantifier"><code class="docutils literal notranslate"><span class="pre">BaseQuantifier</span></code></a><ul>
|
||||
<li class="toctree-l7"><a class="reference internal" href="quapy.method.html#quapy.method.base.BaseQuantifier.fit"><code class="docutils literal notranslate"><span class="pre">BaseQuantifier.fit()</span></code></a></li>
|
||||
<li class="toctree-l7"><a class="reference internal" href="quapy.method.html#quapy.method.base.BaseQuantifier.quantify"><code class="docutils literal notranslate"><span class="pre">BaseQuantifier.quantify()</span></code></a></li>
|
||||
</ul>
|
||||
</li>
|
||||
<li class="toctree-l6"><a class="reference internal" href="quapy.method.html#quapy.method.base.BinaryQuantifier"><code class="docutils literal notranslate"><span class="pre">BinaryQuantifier</span></code></a></li>
|
||||
<li class="toctree-l6"><a class="reference internal" href="quapy.method.html#quapy.method.base.OneVsAll"><code class="docutils literal notranslate"><span class="pre">OneVsAll</span></code></a></li>
|
||||
<li class="toctree-l6"><a class="reference internal" href="quapy.method.html#quapy.method.base.OneVsAllGeneric"><code class="docutils literal notranslate"><span class="pre">OneVsAllGeneric</span></code></a><ul>
|
||||
<li class="toctree-l7"><a class="reference internal" href="quapy.method.html#quapy.method.base.OneVsAllGeneric.classes_"><code class="docutils literal notranslate"><span class="pre">OneVsAllGeneric.classes_</span></code></a></li>
|
||||
<li class="toctree-l7"><a class="reference internal" href="quapy.method.html#quapy.method.base.OneVsAllGeneric.fit"><code class="docutils literal notranslate"><span class="pre">OneVsAllGeneric.fit()</span></code></a></li>
|
||||
<li class="toctree-l7"><a class="reference internal" href="quapy.method.html#quapy.method.base.OneVsAllGeneric.quantify"><code class="docutils literal notranslate"><span class="pre">OneVsAllGeneric.quantify()</span></code></a></li>
|
||||
</ul>
|
||||
</li>
|
||||
<li class="toctree-l6"><a class="reference internal" href="quapy.method.html#quapy.method.base.newOneVsAll"><code class="docutils literal notranslate"><span class="pre">newOneVsAll()</span></code></a></li>
|
||||
</ul>
|
||||
</li>
|
||||
<li class="toctree-l5"><a class="reference internal" href="quapy.method.html#module-quapy.method.meta">quapy.method.meta module</a><ul>
|
||||
<li class="toctree-l6"><a class="reference internal" href="quapy.method.html#quapy.method.meta.EACC"><code class="docutils literal notranslate"><span class="pre">EACC()</span></code></a></li>
|
||||
<li class="toctree-l6"><a class="reference internal" href="quapy.method.html#quapy.method.meta.ECC"><code class="docutils literal notranslate"><span class="pre">ECC()</span></code></a></li>
|
||||
<li class="toctree-l6"><a class="reference internal" href="quapy.method.html#quapy.method.meta.EEMQ"><code class="docutils literal notranslate"><span class="pre">EEMQ()</span></code></a></li>
|
||||
<li class="toctree-l6"><a class="reference internal" href="quapy.method.html#quapy.method.meta.EHDy"><code class="docutils literal notranslate"><span class="pre">EHDy()</span></code></a></li>
|
||||
<li class="toctree-l6"><a class="reference internal" href="quapy.method.html#quapy.method.meta.EPACC"><code class="docutils literal notranslate"><span class="pre">EPACC()</span></code></a></li>
|
||||
<li class="toctree-l6"><a class="reference internal" href="quapy.method.html#quapy.method.meta.Ensemble"><code class="docutils literal notranslate"><span class="pre">Ensemble</span></code></a><ul>
|
||||
<li class="toctree-l7"><a class="reference internal" href="quapy.method.html#quapy.method.meta.Ensemble.VALID_POLICIES"><code class="docutils literal notranslate"><span class="pre">Ensemble.VALID_POLICIES</span></code></a></li>
|
||||
<li class="toctree-l7"><a class="reference internal" href="quapy.method.html#quapy.method.meta.Ensemble.aggregative"><code class="docutils literal notranslate"><span class="pre">Ensemble.aggregative</span></code></a></li>
|
||||
<li class="toctree-l7"><a class="reference internal" href="quapy.method.html#quapy.method.meta.Ensemble.fit"><code class="docutils literal notranslate"><span class="pre">Ensemble.fit()</span></code></a></li>
|
||||
<li class="toctree-l7"><a class="reference internal" href="quapy.method.html#quapy.method.meta.Ensemble.get_params"><code class="docutils literal notranslate"><span class="pre">Ensemble.get_params()</span></code></a></li>
|
||||
<li class="toctree-l7"><a class="reference internal" href="quapy.method.html#quapy.method.meta.Ensemble.probabilistic"><code class="docutils literal notranslate"><span class="pre">Ensemble.probabilistic</span></code></a></li>
|
||||
<li class="toctree-l7"><a class="reference internal" href="quapy.method.html#quapy.method.meta.Ensemble.quantify"><code class="docutils literal notranslate"><span class="pre">Ensemble.quantify()</span></code></a></li>
|
||||
<li class="toctree-l7"><a class="reference internal" href="quapy.method.html#quapy.method.meta.Ensemble.set_params"><code class="docutils literal notranslate"><span class="pre">Ensemble.set_params()</span></code></a></li>
|
||||
</ul>
|
||||
</li>
|
||||
<li class="toctree-l6"><a class="reference internal" href="quapy.method.html#quapy.method.meta.MedianEstimator"><code class="docutils literal notranslate"><span class="pre">MedianEstimator</span></code></a><ul>
|
||||
<li class="toctree-l7"><a class="reference internal" href="quapy.method.html#quapy.method.meta.MedianEstimator.fit"><code class="docutils literal notranslate"><span class="pre">MedianEstimator.fit()</span></code></a></li>
|
||||
<li class="toctree-l7"><a class="reference internal" href="quapy.method.html#quapy.method.meta.MedianEstimator.get_params"><code class="docutils literal notranslate"><span class="pre">MedianEstimator.get_params()</span></code></a></li>
|
||||
<li class="toctree-l7"><a class="reference internal" href="quapy.method.html#quapy.method.meta.MedianEstimator.quantify"><code class="docutils literal notranslate"><span class="pre">MedianEstimator.quantify()</span></code></a></li>
|
||||
<li class="toctree-l7"><a class="reference internal" href="quapy.method.html#quapy.method.meta.MedianEstimator.set_params"><code class="docutils literal notranslate"><span class="pre">MedianEstimator.set_params()</span></code></a></li>
|
||||
</ul>
|
||||
</li>
|
||||
<li class="toctree-l6"><a class="reference internal" href="quapy.method.html#quapy.method.meta.MedianEstimator2"><code class="docutils literal notranslate"><span class="pre">MedianEstimator2</span></code></a><ul>
|
||||
<li class="toctree-l7"><a class="reference internal" href="quapy.method.html#quapy.method.meta.MedianEstimator2.fit"><code class="docutils literal notranslate"><span class="pre">MedianEstimator2.fit()</span></code></a></li>
|
||||
<li class="toctree-l7"><a class="reference internal" href="quapy.method.html#quapy.method.meta.MedianEstimator2.get_params"><code class="docutils literal notranslate"><span class="pre">MedianEstimator2.get_params()</span></code></a></li>
|
||||
<li class="toctree-l7"><a class="reference internal" href="quapy.method.html#quapy.method.meta.MedianEstimator2.quantify"><code class="docutils literal notranslate"><span class="pre">MedianEstimator2.quantify()</span></code></a></li>
|
||||
<li class="toctree-l7"><a class="reference internal" href="quapy.method.html#quapy.method.meta.MedianEstimator2.set_params"><code class="docutils literal notranslate"><span class="pre">MedianEstimator2.set_params()</span></code></a></li>
|
||||
</ul>
|
||||
</li>
|
||||
<li class="toctree-l6"><a class="reference internal" href="quapy.method.html#quapy.method.meta.ensembleFactory"><code class="docutils literal notranslate"><span class="pre">ensembleFactory()</span></code></a></li>
|
||||
<li class="toctree-l6"><a class="reference internal" href="quapy.method.html#quapy.method.meta.get_probability_distribution"><code class="docutils literal notranslate"><span class="pre">get_probability_distribution()</span></code></a></li>
|
||||
</ul>
|
||||
</li>
|
||||
<li class="toctree-l5"><a class="reference internal" href="quapy.method.html#module-quapy.method.non_aggregative">quapy.method.non_aggregative module</a><ul>
|
||||
<li class="toctree-l6"><a class="reference internal" href="quapy.method.html#quapy.method.non_aggregative.DMx"><code class="docutils literal notranslate"><span class="pre">DMx</span></code></a><ul>
|
||||
<li class="toctree-l7"><a class="reference internal" href="quapy.method.html#quapy.method.non_aggregative.DMx.HDx"><code class="docutils literal notranslate"><span class="pre">DMx.HDx()</span></code></a></li>
|
||||
<li class="toctree-l7"><a class="reference internal" href="quapy.method.html#quapy.method.non_aggregative.DMx.fit"><code class="docutils literal notranslate"><span class="pre">DMx.fit()</span></code></a></li>
|
||||
<li class="toctree-l7"><a class="reference internal" href="quapy.method.html#quapy.method.non_aggregative.DMx.quantify"><code class="docutils literal notranslate"><span class="pre">DMx.quantify()</span></code></a></li>
|
||||
</ul>
|
||||
</li>
|
||||
<li class="toctree-l6"><a class="reference internal" href="quapy.method.html#quapy.method.non_aggregative.DistributionMatchingX"><code class="docutils literal notranslate"><span class="pre">DistributionMatchingX</span></code></a></li>
|
||||
<li class="toctree-l6"><a class="reference internal" href="quapy.method.html#quapy.method.non_aggregative.MaximumLikelihoodPrevalenceEstimation"><code class="docutils literal notranslate"><span class="pre">MaximumLikelihoodPrevalenceEstimation</span></code></a><ul>
|
||||
<li class="toctree-l7"><a class="reference internal" href="quapy.method.html#quapy.method.non_aggregative.MaximumLikelihoodPrevalenceEstimation.fit"><code class="docutils literal notranslate"><span class="pre">MaximumLikelihoodPrevalenceEstimation.fit()</span></code></a></li>
|
||||
<li class="toctree-l7"><a class="reference internal" href="quapy.method.html#quapy.method.non_aggregative.MaximumLikelihoodPrevalenceEstimation.quantify"><code class="docutils literal notranslate"><span class="pre">MaximumLikelihoodPrevalenceEstimation.quantify()</span></code></a></li>
|
||||
</ul>
|
||||
</li>
|
||||
<li class="toctree-l6"><a class="reference internal" href="quapy.method.html#quapy.method.non_aggregative.ReadMe"><code class="docutils literal notranslate"><span class="pre">ReadMe</span></code></a><ul>
|
||||
<li class="toctree-l7"><a class="reference internal" href="quapy.method.html#quapy.method.non_aggregative.ReadMe.fit"><code class="docutils literal notranslate"><span class="pre">ReadMe.fit()</span></code></a></li>
|
||||
<li class="toctree-l7"><a class="reference internal" href="quapy.method.html#quapy.method.non_aggregative.ReadMe.quantify"><code class="docutils literal notranslate"><span class="pre">ReadMe.quantify()</span></code></a></li>
|
||||
<li class="toctree-l7"><a class="reference internal" href="quapy.method.html#quapy.method.non_aggregative.ReadMe.std_constrained_linear_ls"><code class="docutils literal notranslate"><span class="pre">ReadMe.std_constrained_linear_ls()</span></code></a></li>
|
||||
</ul>
|
||||
</li>
|
||||
</ul>
|
||||
</li>
|
||||
<li class="toctree-l5"><a class="reference internal" href="quapy.method.html#module-quapy.method">Module contents</a></li>
|
||||
</ul>
|
||||
</li>
|
||||
</ul>
|
||||
</li>
|
||||
<li class="toctree-l3"><a class="reference internal" href="quapy.html#submodules">Submodules</a></li>
|
||||
<li class="toctree-l3"><a class="reference internal" href="quapy.html#module-quapy.error">quapy.error module</a><ul>
|
||||
<li class="toctree-l4"><a class="reference internal" href="quapy.html#quapy.error.absolute_error"><code class="docutils literal notranslate"><span class="pre">absolute_error()</span></code></a></li>
|
||||
<li class="toctree-l4"><a class="reference internal" href="quapy.html#quapy.error.acc_error"><code class="docutils literal notranslate"><span class="pre">acc_error()</span></code></a></li>
|
||||
<li class="toctree-l4"><a class="reference internal" href="quapy.html#quapy.error.acce"><code class="docutils literal notranslate"><span class="pre">acce()</span></code></a></li>
|
||||
<li class="toctree-l4"><a class="reference internal" href="quapy.html#quapy.error.ae"><code class="docutils literal notranslate"><span class="pre">ae()</span></code></a></li>
|
||||
<li class="toctree-l4"><a class="reference internal" href="quapy.html#quapy.error.f1_error"><code class="docutils literal notranslate"><span class="pre">f1_error()</span></code></a></li>
|
||||
<li class="toctree-l4"><a class="reference internal" href="quapy.html#quapy.error.f1e"><code class="docutils literal notranslate"><span class="pre">f1e()</span></code></a></li>
|
||||
<li class="toctree-l4"><a class="reference internal" href="quapy.html#quapy.error.from_name"><code class="docutils literal notranslate"><span class="pre">from_name()</span></code></a></li>
|
||||
<li class="toctree-l4"><a class="reference internal" href="quapy.html#quapy.error.kld"><code class="docutils literal notranslate"><span class="pre">kld()</span></code></a></li>
|
||||
<li class="toctree-l4"><a class="reference internal" href="quapy.html#quapy.error.mae"><code class="docutils literal notranslate"><span class="pre">mae()</span></code></a></li>
|
||||
<li class="toctree-l4"><a class="reference internal" href="quapy.html#quapy.error.mean_absolute_error"><code class="docutils literal notranslate"><span class="pre">mean_absolute_error()</span></code></a></li>
|
||||
<li class="toctree-l4"><a class="reference internal" href="quapy.html#quapy.error.mean_normalized_absolute_error"><code class="docutils literal notranslate"><span class="pre">mean_normalized_absolute_error()</span></code></a></li>
|
||||
<li class="toctree-l4"><a class="reference internal" href="quapy.html#quapy.error.mean_normalized_relative_absolute_error"><code class="docutils literal notranslate"><span class="pre">mean_normalized_relative_absolute_error()</span></code></a></li>
|
||||
<li class="toctree-l4"><a class="reference internal" href="quapy.html#quapy.error.mean_relative_absolute_error"><code class="docutils literal notranslate"><span class="pre">mean_relative_absolute_error()</span></code></a></li>
|
||||
<li class="toctree-l4"><a class="reference internal" href="quapy.html#quapy.error.mkld"><code class="docutils literal notranslate"><span class="pre">mkld()</span></code></a></li>
|
||||
<li class="toctree-l4"><a class="reference internal" href="quapy.html#quapy.error.mnae"><code class="docutils literal notranslate"><span class="pre">mnae()</span></code></a></li>
|
||||
<li class="toctree-l4"><a class="reference internal" href="quapy.html#quapy.error.mnkld"><code class="docutils literal notranslate"><span class="pre">mnkld()</span></code></a></li>
|
||||
<li class="toctree-l4"><a class="reference internal" href="quapy.html#quapy.error.mnrae"><code class="docutils literal notranslate"><span class="pre">mnrae()</span></code></a></li>
|
||||
<li class="toctree-l4"><a class="reference internal" href="quapy.html#quapy.error.mrae"><code class="docutils literal notranslate"><span class="pre">mrae()</span></code></a></li>
|
||||
<li class="toctree-l4"><a class="reference internal" href="quapy.html#quapy.error.mse"><code class="docutils literal notranslate"><span class="pre">mse()</span></code></a></li>
|
||||
<li class="toctree-l4"><a class="reference internal" href="quapy.html#quapy.error.nae"><code class="docutils literal notranslate"><span class="pre">nae()</span></code></a></li>
|
||||
<li class="toctree-l4"><a class="reference internal" href="quapy.html#quapy.error.nkld"><code class="docutils literal notranslate"><span class="pre">nkld()</span></code></a></li>
|
||||
<li class="toctree-l4"><a class="reference internal" href="quapy.html#quapy.error.normalized_absolute_error"><code class="docutils literal notranslate"><span class="pre">normalized_absolute_error()</span></code></a></li>
|
||||
<li class="toctree-l4"><a class="reference internal" href="quapy.html#quapy.error.normalized_relative_absolute_error"><code class="docutils literal notranslate"><span class="pre">normalized_relative_absolute_error()</span></code></a></li>
|
||||
<li class="toctree-l4"><a class="reference internal" href="quapy.html#quapy.error.nrae"><code class="docutils literal notranslate"><span class="pre">nrae()</span></code></a></li>
|
||||
<li class="toctree-l4"><a class="reference internal" href="quapy.html#quapy.error.rae"><code class="docutils literal notranslate"><span class="pre">rae()</span></code></a></li>
|
||||
<li class="toctree-l4"><a class="reference internal" href="quapy.html#quapy.error.relative_absolute_error"><code class="docutils literal notranslate"><span class="pre">relative_absolute_error()</span></code></a></li>
|
||||
<li class="toctree-l4"><a class="reference internal" href="quapy.html#quapy.error.se"><code class="docutils literal notranslate"><span class="pre">se()</span></code></a></li>
|
||||
<li class="toctree-l4"><a class="reference internal" href="quapy.html#quapy.error.smooth"><code class="docutils literal notranslate"><span class="pre">smooth()</span></code></a></li>
|
||||
</ul>
|
||||
</li>
|
||||
<li class="toctree-l3"><a class="reference internal" href="quapy.html#module-quapy.evaluation">quapy.evaluation module</a><ul>
|
||||
<li class="toctree-l4"><a class="reference internal" href="quapy.html#quapy.evaluation.evaluate"><code class="docutils literal notranslate"><span class="pre">evaluate()</span></code></a></li>
|
||||
<li class="toctree-l4"><a class="reference internal" href="quapy.html#quapy.evaluation.evaluate_on_samples"><code class="docutils literal notranslate"><span class="pre">evaluate_on_samples()</span></code></a></li>
|
||||
<li class="toctree-l4"><a class="reference internal" href="quapy.html#quapy.evaluation.evaluation_report"><code class="docutils literal notranslate"><span class="pre">evaluation_report()</span></code></a></li>
|
||||
<li class="toctree-l4"><a class="reference internal" href="quapy.html#quapy.evaluation.prediction"><code class="docutils literal notranslate"><span class="pre">prediction()</span></code></a></li>
|
||||
</ul>
|
||||
</li>
|
||||
<li class="toctree-l3"><a class="reference internal" href="quapy.html#module-quapy.functional">quapy.functional module</a><ul>
|
||||
<li class="toctree-l4"><a class="reference internal" href="quapy.html#quapy.functional.HellingerDistance"><code class="docutils literal notranslate"><span class="pre">HellingerDistance()</span></code></a></li>
|
||||
<li class="toctree-l4"><a class="reference internal" href="quapy.html#quapy.functional.TopsoeDistance"><code class="docutils literal notranslate"><span class="pre">TopsoeDistance()</span></code></a></li>
|
||||
<li class="toctree-l4"><a class="reference internal" href="quapy.html#quapy.functional.argmin_prevalence"><code class="docutils literal notranslate"><span class="pre">argmin_prevalence()</span></code></a></li>
|
||||
<li class="toctree-l4"><a class="reference internal" href="quapy.html#quapy.functional.as_binary_prevalence"><code class="docutils literal notranslate"><span class="pre">as_binary_prevalence()</span></code></a></li>
|
||||
<li class="toctree-l4"><a class="reference internal" href="quapy.html#quapy.functional.check_prevalence_vector"><code class="docutils literal notranslate"><span class="pre">check_prevalence_vector()</span></code></a></li>
|
||||
<li class="toctree-l4"><a class="reference internal" href="quapy.html#quapy.functional.clip"><code class="docutils literal notranslate"><span class="pre">clip()</span></code></a></li>
|
||||
<li class="toctree-l4"><a class="reference internal" href="quapy.html#quapy.functional.condsoftmax"><code class="docutils literal notranslate"><span class="pre">condsoftmax()</span></code></a></li>
|
||||
<li class="toctree-l4"><a class="reference internal" href="quapy.html#quapy.functional.counts_from_labels"><code class="docutils literal notranslate"><span class="pre">counts_from_labels()</span></code></a></li>
|
||||
<li class="toctree-l4"><a class="reference internal" href="quapy.html#quapy.functional.get_divergence"><code class="docutils literal notranslate"><span class="pre">get_divergence()</span></code></a></li>
|
||||
<li class="toctree-l4"><a class="reference internal" href="quapy.html#quapy.functional.get_nprevpoints_approximation"><code class="docutils literal notranslate"><span class="pre">get_nprevpoints_approximation()</span></code></a></li>
|
||||
<li class="toctree-l4"><a class="reference internal" href="quapy.html#quapy.functional.l1_norm"><code class="docutils literal notranslate"><span class="pre">l1_norm()</span></code></a></li>
|
||||
<li class="toctree-l4"><a class="reference internal" href="quapy.html#quapy.functional.linear_search"><code class="docutils literal notranslate"><span class="pre">linear_search()</span></code></a></li>
|
||||
<li class="toctree-l4"><a class="reference internal" href="quapy.html#quapy.functional.normalize_prevalence"><code class="docutils literal notranslate"><span class="pre">normalize_prevalence()</span></code></a></li>
|
||||
<li class="toctree-l4"><a class="reference internal" href="quapy.html#quapy.functional.num_prevalence_combinations"><code class="docutils literal notranslate"><span class="pre">num_prevalence_combinations()</span></code></a></li>
|
||||
<li class="toctree-l4"><a class="reference internal" href="quapy.html#quapy.functional.optim_minimize"><code class="docutils literal notranslate"><span class="pre">optim_minimize()</span></code></a></li>
|
||||
<li class="toctree-l4"><a class="reference internal" href="quapy.html#quapy.functional.prevalence_from_labels"><code class="docutils literal notranslate"><span class="pre">prevalence_from_labels()</span></code></a></li>
|
||||
<li class="toctree-l4"><a class="reference internal" href="quapy.html#quapy.functional.prevalence_from_probabilities"><code class="docutils literal notranslate"><span class="pre">prevalence_from_probabilities()</span></code></a></li>
|
||||
<li class="toctree-l4"><a class="reference internal" href="quapy.html#quapy.functional.prevalence_linspace"><code class="docutils literal notranslate"><span class="pre">prevalence_linspace()</span></code></a></li>
|
||||
<li class="toctree-l4"><a class="reference internal" href="quapy.html#quapy.functional.projection_simplex_sort"><code class="docutils literal notranslate"><span class="pre">projection_simplex_sort()</span></code></a></li>
|
||||
<li class="toctree-l4"><a class="reference internal" href="quapy.html#quapy.functional.softmax"><code class="docutils literal notranslate"><span class="pre">softmax()</span></code></a></li>
|
||||
<li class="toctree-l4"><a class="reference internal" href="quapy.html#quapy.functional.solve_adjustment"><code class="docutils literal notranslate"><span class="pre">solve_adjustment()</span></code></a></li>
|
||||
<li class="toctree-l4"><a class="reference internal" href="quapy.html#quapy.functional.solve_adjustment_binary"><code class="docutils literal notranslate"><span class="pre">solve_adjustment_binary()</span></code></a></li>
|
||||
<li class="toctree-l4"><a class="reference internal" href="quapy.html#quapy.functional.strprev"><code class="docutils literal notranslate"><span class="pre">strprev()</span></code></a></li>
|
||||
<li class="toctree-l4"><a class="reference internal" href="quapy.html#quapy.functional.ternary_search"><code class="docutils literal notranslate"><span class="pre">ternary_search()</span></code></a></li>
|
||||
<li class="toctree-l4"><a class="reference internal" href="quapy.html#quapy.functional.uniform_prevalence_sampling"><code class="docutils literal notranslate"><span class="pre">uniform_prevalence_sampling()</span></code></a></li>
|
||||
<li class="toctree-l4"><a class="reference internal" href="quapy.html#quapy.functional.uniform_simplex_sampling"><code class="docutils literal notranslate"><span class="pre">uniform_simplex_sampling()</span></code></a></li>
|
||||
</ul>
|
||||
</li>
|
||||
<li class="toctree-l3"><a class="reference internal" href="quapy.html#module-quapy.model_selection">quapy.model_selection module</a><ul>
|
||||
<li class="toctree-l4"><a class="reference internal" href="quapy.html#quapy.model_selection.ConfigStatus"><code class="docutils literal notranslate"><span class="pre">ConfigStatus</span></code></a><ul>
|
||||
<li class="toctree-l5"><a class="reference internal" href="quapy.html#quapy.model_selection.ConfigStatus.failed"><code class="docutils literal notranslate"><span class="pre">ConfigStatus.failed()</span></code></a></li>
|
||||
<li class="toctree-l5"><a class="reference internal" href="quapy.html#quapy.model_selection.ConfigStatus.success"><code class="docutils literal notranslate"><span class="pre">ConfigStatus.success()</span></code></a></li>
|
||||
</ul>
|
||||
</li>
|
||||
<li class="toctree-l4"><a class="reference internal" href="quapy.html#quapy.model_selection.GridSearchQ"><code class="docutils literal notranslate"><span class="pre">GridSearchQ</span></code></a><ul>
|
||||
<li class="toctree-l5"><a class="reference internal" href="quapy.html#quapy.model_selection.GridSearchQ.best_model"><code class="docutils literal notranslate"><span class="pre">GridSearchQ.best_model()</span></code></a></li>
|
||||
<li class="toctree-l5"><a class="reference internal" href="quapy.html#quapy.model_selection.GridSearchQ.fit"><code class="docutils literal notranslate"><span class="pre">GridSearchQ.fit()</span></code></a></li>
|
||||
<li class="toctree-l5"><a class="reference internal" href="quapy.html#quapy.model_selection.GridSearchQ.get_params"><code class="docutils literal notranslate"><span class="pre">GridSearchQ.get_params()</span></code></a></li>
|
||||
<li class="toctree-l5"><a class="reference internal" href="quapy.html#quapy.model_selection.GridSearchQ.quantify"><code class="docutils literal notranslate"><span class="pre">GridSearchQ.quantify()</span></code></a></li>
|
||||
<li class="toctree-l5"><a class="reference internal" href="quapy.html#quapy.model_selection.GridSearchQ.set_params"><code class="docutils literal notranslate"><span class="pre">GridSearchQ.set_params()</span></code></a></li>
|
||||
</ul>
|
||||
</li>
|
||||
<li class="toctree-l4"><a class="reference internal" href="quapy.html#quapy.model_selection.Status"><code class="docutils literal notranslate"><span class="pre">Status</span></code></a><ul>
|
||||
<li class="toctree-l5"><a class="reference internal" href="quapy.html#quapy.model_selection.Status.ERROR"><code class="docutils literal notranslate"><span class="pre">Status.ERROR</span></code></a></li>
|
||||
<li class="toctree-l5"><a class="reference internal" href="quapy.html#quapy.model_selection.Status.INVALID"><code class="docutils literal notranslate"><span class="pre">Status.INVALID</span></code></a></li>
|
||||
<li class="toctree-l5"><a class="reference internal" href="quapy.html#quapy.model_selection.Status.SUCCESS"><code class="docutils literal notranslate"><span class="pre">Status.SUCCESS</span></code></a></li>
|
||||
<li class="toctree-l5"><a class="reference internal" href="quapy.html#quapy.model_selection.Status.TIMEOUT"><code class="docutils literal notranslate"><span class="pre">Status.TIMEOUT</span></code></a></li>
|
||||
</ul>
|
||||
</li>
|
||||
<li class="toctree-l4"><a class="reference internal" href="quapy.html#quapy.model_selection.cross_val_predict"><code class="docutils literal notranslate"><span class="pre">cross_val_predict()</span></code></a></li>
|
||||
<li class="toctree-l4"><a class="reference internal" href="quapy.html#quapy.model_selection.expand_grid"><code class="docutils literal notranslate"><span class="pre">expand_grid()</span></code></a></li>
|
||||
<li class="toctree-l4"><a class="reference internal" href="quapy.html#quapy.model_selection.group_params"><code class="docutils literal notranslate"><span class="pre">group_params()</span></code></a></li>
|
||||
</ul>
|
||||
</li>
|
||||
<li class="toctree-l3"><a class="reference internal" href="quapy.html#module-quapy.plot">quapy.plot module</a><ul>
|
||||
<li class="toctree-l4"><a class="reference internal" href="quapy.html#quapy.plot.binary_bias_bins"><code class="docutils literal notranslate"><span class="pre">binary_bias_bins()</span></code></a></li>
|
||||
<li class="toctree-l4"><a class="reference internal" href="quapy.html#quapy.plot.binary_bias_global"><code class="docutils literal notranslate"><span class="pre">binary_bias_global()</span></code></a></li>
|
||||
<li class="toctree-l4"><a class="reference internal" href="quapy.html#quapy.plot.binary_diagonal"><code class="docutils literal notranslate"><span class="pre">binary_diagonal()</span></code></a></li>
|
||||
<li class="toctree-l4"><a class="reference internal" href="quapy.html#quapy.plot.brokenbar_supremacy_by_drift"><code class="docutils literal notranslate"><span class="pre">brokenbar_supremacy_by_drift()</span></code></a></li>
|
||||
<li class="toctree-l4"><a class="reference internal" href="quapy.html#quapy.plot.error_by_drift"><code class="docutils literal notranslate"><span class="pre">error_by_drift()</span></code></a></li>
|
||||
</ul>
|
||||
</li>
|
||||
<li class="toctree-l3"><a class="reference internal" href="quapy.html#module-quapy.protocol">quapy.protocol module</a><ul>
|
||||
<li class="toctree-l4"><a class="reference internal" href="quapy.html#quapy.protocol.APP"><code class="docutils literal notranslate"><span class="pre">APP</span></code></a><ul>
|
||||
<li class="toctree-l5"><a class="reference internal" href="quapy.html#quapy.protocol.APP.prevalence_grid"><code class="docutils literal notranslate"><span class="pre">APP.prevalence_grid()</span></code></a></li>
|
||||
<li class="toctree-l5"><a class="reference internal" href="quapy.html#quapy.protocol.APP.sample"><code class="docutils literal notranslate"><span class="pre">APP.sample()</span></code></a></li>
|
||||
<li class="toctree-l5"><a class="reference internal" href="quapy.html#quapy.protocol.APP.samples_parameters"><code class="docutils literal notranslate"><span class="pre">APP.samples_parameters()</span></code></a></li>
|
||||
<li class="toctree-l5"><a class="reference internal" href="quapy.html#quapy.protocol.APP.total"><code class="docutils literal notranslate"><span class="pre">APP.total()</span></code></a></li>
|
||||
</ul>
|
||||
</li>
|
||||
<li class="toctree-l4"><a class="reference internal" href="quapy.html#quapy.protocol.AbstractProtocol"><code class="docutils literal notranslate"><span class="pre">AbstractProtocol</span></code></a><ul>
|
||||
<li class="toctree-l5"><a class="reference internal" href="quapy.html#quapy.protocol.AbstractProtocol.total"><code class="docutils literal notranslate"><span class="pre">AbstractProtocol.total()</span></code></a></li>
|
||||
</ul>
|
||||
</li>
|
||||
<li class="toctree-l4"><a class="reference internal" href="quapy.html#quapy.protocol.AbstractStochasticSeededProtocol"><code class="docutils literal notranslate"><span class="pre">AbstractStochasticSeededProtocol</span></code></a><ul>
|
||||
<li class="toctree-l5"><a class="reference internal" href="quapy.html#quapy.protocol.AbstractStochasticSeededProtocol.collator"><code class="docutils literal notranslate"><span class="pre">AbstractStochasticSeededProtocol.collator()</span></code></a></li>
|
||||
<li class="toctree-l5"><a class="reference internal" href="quapy.html#quapy.protocol.AbstractStochasticSeededProtocol.random_state"><code class="docutils literal notranslate"><span class="pre">AbstractStochasticSeededProtocol.random_state</span></code></a></li>
|
||||
<li class="toctree-l5"><a class="reference internal" href="quapy.html#quapy.protocol.AbstractStochasticSeededProtocol.sample"><code class="docutils literal notranslate"><span class="pre">AbstractStochasticSeededProtocol.sample()</span></code></a></li>
|
||||
<li class="toctree-l5"><a class="reference internal" href="quapy.html#quapy.protocol.AbstractStochasticSeededProtocol.samples_parameters"><code class="docutils literal notranslate"><span class="pre">AbstractStochasticSeededProtocol.samples_parameters()</span></code></a></li>
|
||||
</ul>
|
||||
</li>
|
||||
<li class="toctree-l4"><a class="reference internal" href="quapy.html#quapy.protocol.ArtificialPrevalenceProtocol"><code class="docutils literal notranslate"><span class="pre">ArtificialPrevalenceProtocol</span></code></a></li>
|
||||
<li class="toctree-l4"><a class="reference internal" href="quapy.html#quapy.protocol.DomainMixer"><code class="docutils literal notranslate"><span class="pre">DomainMixer</span></code></a><ul>
|
||||
<li class="toctree-l5"><a class="reference internal" href="quapy.html#quapy.protocol.DomainMixer.sample"><code class="docutils literal notranslate"><span class="pre">DomainMixer.sample()</span></code></a></li>
|
||||
<li class="toctree-l5"><a class="reference internal" href="quapy.html#quapy.protocol.DomainMixer.samples_parameters"><code class="docutils literal notranslate"><span class="pre">DomainMixer.samples_parameters()</span></code></a></li>
|
||||
<li class="toctree-l5"><a class="reference internal" href="quapy.html#quapy.protocol.DomainMixer.total"><code class="docutils literal notranslate"><span class="pre">DomainMixer.total()</span></code></a></li>
|
||||
</ul>
|
||||
</li>
|
||||
<li class="toctree-l4"><a class="reference internal" href="quapy.html#quapy.protocol.IterateProtocol"><code class="docutils literal notranslate"><span class="pre">IterateProtocol</span></code></a><ul>
|
||||
<li class="toctree-l5"><a class="reference internal" href="quapy.html#quapy.protocol.IterateProtocol.total"><code class="docutils literal notranslate"><span class="pre">IterateProtocol.total()</span></code></a></li>
|
||||
</ul>
|
||||
</li>
|
||||
<li class="toctree-l4"><a class="reference internal" href="quapy.html#quapy.protocol.NPP"><code class="docutils literal notranslate"><span class="pre">NPP</span></code></a><ul>
|
||||
<li class="toctree-l5"><a class="reference internal" href="quapy.html#quapy.protocol.NPP.sample"><code class="docutils literal notranslate"><span class="pre">NPP.sample()</span></code></a></li>
|
||||
<li class="toctree-l5"><a class="reference internal" href="quapy.html#quapy.protocol.NPP.samples_parameters"><code class="docutils literal notranslate"><span class="pre">NPP.samples_parameters()</span></code></a></li>
|
||||
<li class="toctree-l5"><a class="reference internal" href="quapy.html#quapy.protocol.NPP.total"><code class="docutils literal notranslate"><span class="pre">NPP.total()</span></code></a></li>
|
||||
</ul>
|
||||
</li>
|
||||
<li class="toctree-l4"><a class="reference internal" href="quapy.html#quapy.protocol.NaturalPrevalenceProtocol"><code class="docutils literal notranslate"><span class="pre">NaturalPrevalenceProtocol</span></code></a></li>
|
||||
<li class="toctree-l4"><a class="reference internal" href="quapy.html#quapy.protocol.OnLabelledCollectionProtocol"><code class="docutils literal notranslate"><span class="pre">OnLabelledCollectionProtocol</span></code></a><ul>
|
||||
<li class="toctree-l5"><a class="reference internal" href="quapy.html#quapy.protocol.OnLabelledCollectionProtocol.RETURN_TYPES"><code class="docutils literal notranslate"><span class="pre">OnLabelledCollectionProtocol.RETURN_TYPES</span></code></a></li>
|
||||
<li class="toctree-l5"><a class="reference internal" href="quapy.html#quapy.protocol.OnLabelledCollectionProtocol.get_collator"><code class="docutils literal notranslate"><span class="pre">OnLabelledCollectionProtocol.get_collator()</span></code></a></li>
|
||||
<li class="toctree-l5"><a class="reference internal" href="quapy.html#quapy.protocol.OnLabelledCollectionProtocol.get_labelled_collection"><code class="docutils literal notranslate"><span class="pre">OnLabelledCollectionProtocol.get_labelled_collection()</span></code></a></li>
|
||||
<li class="toctree-l5"><a class="reference internal" href="quapy.html#quapy.protocol.OnLabelledCollectionProtocol.on_preclassified_instances"><code class="docutils literal notranslate"><span class="pre">OnLabelledCollectionProtocol.on_preclassified_instances()</span></code></a></li>
|
||||
</ul>
|
||||
</li>
|
||||
<li class="toctree-l4"><a class="reference internal" href="quapy.html#quapy.protocol.UPP"><code class="docutils literal notranslate"><span class="pre">UPP</span></code></a><ul>
|
||||
<li class="toctree-l5"><a class="reference internal" href="quapy.html#quapy.protocol.UPP.sample"><code class="docutils literal notranslate"><span class="pre">UPP.sample()</span></code></a></li>
|
||||
<li class="toctree-l5"><a class="reference internal" href="quapy.html#quapy.protocol.UPP.samples_parameters"><code class="docutils literal notranslate"><span class="pre">UPP.samples_parameters()</span></code></a></li>
|
||||
<li class="toctree-l5"><a class="reference internal" href="quapy.html#quapy.protocol.UPP.total"><code class="docutils literal notranslate"><span class="pre">UPP.total()</span></code></a></li>
|
||||
</ul>
|
||||
</li>
|
||||
<li class="toctree-l4"><a class="reference internal" href="quapy.html#quapy.protocol.UniformPrevalenceProtocol"><code class="docutils literal notranslate"><span class="pre">UniformPrevalenceProtocol</span></code></a></li>
|
||||
</ul>
|
||||
</li>
|
||||
<li class="toctree-l3"><a class="reference internal" href="quapy.html#module-quapy.util">quapy.util module</a><ul>
|
||||
<li class="toctree-l4"><a class="reference internal" href="quapy.html#quapy.util.EarlyStop"><code class="docutils literal notranslate"><span class="pre">EarlyStop</span></code></a></li>
|
||||
<li class="toctree-l4"><a class="reference internal" href="quapy.html#quapy.util.create_if_not_exist"><code class="docutils literal notranslate"><span class="pre">create_if_not_exist()</span></code></a></li>
|
||||
<li class="toctree-l4"><a class="reference internal" href="quapy.html#quapy.util.create_parent_dir"><code class="docutils literal notranslate"><span class="pre">create_parent_dir()</span></code></a></li>
|
||||
<li class="toctree-l4"><a class="reference internal" href="quapy.html#quapy.util.download_file"><code class="docutils literal notranslate"><span class="pre">download_file()</span></code></a></li>
|
||||
<li class="toctree-l4"><a class="reference internal" href="quapy.html#quapy.util.download_file_if_not_exists"><code class="docutils literal notranslate"><span class="pre">download_file_if_not_exists()</span></code></a></li>
|
||||
<li class="toctree-l4"><a class="reference internal" href="quapy.html#quapy.util.get_quapy_home"><code class="docutils literal notranslate"><span class="pre">get_quapy_home()</span></code></a></li>
|
||||
<li class="toctree-l4"><a class="reference internal" href="quapy.html#quapy.util.map_parallel"><code class="docutils literal notranslate"><span class="pre">map_parallel()</span></code></a></li>
|
||||
<li class="toctree-l4"><a class="reference internal" href="quapy.html#quapy.util.parallel"><code class="docutils literal notranslate"><span class="pre">parallel()</span></code></a></li>
|
||||
<li class="toctree-l4"><a class="reference internal" href="quapy.html#quapy.util.parallel_unpack"><code class="docutils literal notranslate"><span class="pre">parallel_unpack()</span></code></a></li>
|
||||
<li class="toctree-l4"><a class="reference internal" href="quapy.html#quapy.util.pickled_resource"><code class="docutils literal notranslate"><span class="pre">pickled_resource()</span></code></a></li>
|
||||
<li class="toctree-l4"><a class="reference internal" href="quapy.html#quapy.util.save_text_file"><code class="docutils literal notranslate"><span class="pre">save_text_file()</span></code></a></li>
|
||||
<li class="toctree-l4"><a class="reference internal" href="quapy.html#quapy.util.temp_seed"><code class="docutils literal notranslate"><span class="pre">temp_seed()</span></code></a></li>
|
||||
<li class="toctree-l4"><a class="reference internal" href="quapy.html#quapy.util.timeout"><code class="docutils literal notranslate"><span class="pre">timeout()</span></code></a></li>
|
||||
</ul>
|
||||
</li>
|
||||
<li class="toctree-l3"><a class="reference internal" href="quapy.html#module-quapy">Module contents</a></li>
|
||||
</ul>
|
||||
</li>
|
||||
</ul>
|
||||
</li>
|
||||
</ul>
|
||||
</div>
|
||||
</section>
|
||||
</section>
|
||||
<section id="indices-and-tables">
|
||||
<h1>Indices and tables<a class="headerlink" href="#indices-and-tables" title="Permalink to this heading"></a></h1>
|
||||
<ul class="simple">
|
||||
<li><p><a class="reference internal" href="genindex.html"><span class="std std-ref">Index</span></a></p></li>
|
||||
<li><p><a class="reference internal" href="py-modindex.html"><span class="std std-ref">Module Index</span></a></p></li>
|
||||
<li><p><a class="reference internal" href="search.html"><span class="std std-ref">Search Page</span></a></p></li>
|
||||
</ul>
|
||||
</section>
|
||||
|
||||
|
||||
</div>
|
||||
</div>
|
||||
<footer><div class="rst-footer-buttons" role="navigation" aria-label="Footer">
|
||||
<a href="modules.html" class="btn btn-neutral float-right" title="quapy" accesskey="n" rel="next">Next <span class="fa fa-arrow-circle-right" aria-hidden="true"></span></a>
|
||||
</div>
|
||||
|
||||
<hr/>
|
||||
|
||||
<div role="contentinfo">
|
||||
<p>© Copyright 2024, Alejandro Moreo.</p>
|
||||
</div>
|
||||
|
||||
Built with <a href="https://www.sphinx-doc.org/">Sphinx</a> using a
|
||||
<a href="https://github.com/readthedocs/sphinx_rtd_theme">theme</a>
|
||||
provided by <a href="https://readthedocs.org">Read the Docs</a>.
|
||||
|
||||
|
||||
</footer>
|
||||
</div>
|
||||
</div>
|
||||
</section>
|
||||
</div>
|
||||
<script>
|
||||
jQuery(function () {
|
||||
SphinxRtdTheme.Navigation.enable(true);
|
||||
});
|
||||
</script>
|
||||
|
||||
</body>
|
||||
</html>
|
|
@ -1,323 +0,0 @@
|
|||
<!DOCTYPE html>
|
||||
<html class="writer-html5" lang="en">
|
||||
<head>
|
||||
<meta charset="utf-8" /><meta name="generator" content="Docutils 0.19: https://docutils.sourceforge.io/" />
|
||||
|
||||
<meta name="viewport" content="width=device-width, initial-scale=1.0" />
|
||||
<title>quapy — QuaPy: A Python-based open-source framework for quantification 0.1.9 documentation</title>
|
||||
<link rel="stylesheet" type="text/css" href="_static/pygments.css" />
|
||||
<link rel="stylesheet" type="text/css" href="_static/css/theme.css" />
|
||||
|
||||
|
||||
<!--[if lt IE 9]>
|
||||
<script src="_static/js/html5shiv.min.js"></script>
|
||||
<![endif]-->
|
||||
|
||||
<script data-url_root="./" id="documentation_options" src="_static/documentation_options.js"></script>
|
||||
<script src="_static/jquery.js"></script>
|
||||
<script src="_static/underscore.js"></script>
|
||||
<script src="_static/_sphinx_javascript_frameworks_compat.js"></script>
|
||||
<script src="_static/doctools.js"></script>
|
||||
<script src="_static/sphinx_highlight.js"></script>
|
||||
<script src="_static/js/theme.js"></script>
|
||||
<link rel="index" title="Index" href="genindex.html" />
|
||||
<link rel="search" title="Search" href="search.html" />
|
||||
<link rel="next" title="quapy package" href="quapy.html" />
|
||||
<link rel="prev" title="Welcome to QuaPy’s documentation!" href="index.html" />
|
||||
</head>
|
||||
|
||||
<body class="wy-body-for-nav">
|
||||
<div class="wy-grid-for-nav">
|
||||
<nav data-toggle="wy-nav-shift" class="wy-nav-side">
|
||||
<div class="wy-side-scroll">
|
||||
<div class="wy-side-nav-search" >
|
||||
|
||||
|
||||
|
||||
<a href="index.html" class="icon icon-home">
|
||||
QuaPy: A Python-based open-source framework for quantification
|
||||
</a>
|
||||
<div role="search">
|
||||
<form id="rtd-search-form" class="wy-form" action="search.html" method="get">
|
||||
<input type="text" name="q" placeholder="Search docs" aria-label="Search docs" />
|
||||
<input type="hidden" name="check_keywords" value="yes" />
|
||||
<input type="hidden" name="area" value="default" />
|
||||
</form>
|
||||
</div>
|
||||
</div><div class="wy-menu wy-menu-vertical" data-spy="affix" role="navigation" aria-label="Navigation menu">
|
||||
<ul class="current">
|
||||
<li class="toctree-l1 current"><a class="current reference internal" href="#">quapy</a><ul>
|
||||
<li class="toctree-l2"><a class="reference internal" href="quapy.html">quapy package</a></li>
|
||||
</ul>
|
||||
</li>
|
||||
</ul>
|
||||
|
||||
</div>
|
||||
</div>
|
||||
</nav>
|
||||
|
||||
<section data-toggle="wy-nav-shift" class="wy-nav-content-wrap"><nav class="wy-nav-top" aria-label="Mobile navigation menu" >
|
||||
<i data-toggle="wy-nav-top" class="fa fa-bars"></i>
|
||||
<a href="index.html">QuaPy: A Python-based open-source framework for quantification</a>
|
||||
</nav>
|
||||
|
||||
<div class="wy-nav-content">
|
||||
<div class="rst-content">
|
||||
<div role="navigation" aria-label="Page navigation">
|
||||
<ul class="wy-breadcrumbs">
|
||||
<li><a href="index.html" class="icon icon-home" aria-label="Home"></a></li>
|
||||
<li class="breadcrumb-item active">quapy</li>
|
||||
<li class="wy-breadcrumbs-aside">
|
||||
<a href="_sources/modules.rst.txt" rel="nofollow"> View page source</a>
|
||||
</li>
|
||||
</ul>
|
||||
<hr/>
|
||||
</div>
|
||||
<div role="main" class="document" itemscope="itemscope" itemtype="http://schema.org/Article">
|
||||
<div itemprop="articleBody">
|
||||
|
||||
<section id="quapy">
|
||||
<h1>quapy<a class="headerlink" href="#quapy" title="Permalink to this heading"></a></h1>
|
||||
<div class="toctree-wrapper compound">
|
||||
<ul>
|
||||
<li class="toctree-l1"><a class="reference internal" href="quapy.html">quapy package</a><ul>
|
||||
<li class="toctree-l2"><a class="reference internal" href="quapy.html#subpackages">Subpackages</a><ul>
|
||||
<li class="toctree-l3"><a class="reference internal" href="quapy.classification.html">quapy.classification package</a><ul>
|
||||
<li class="toctree-l4"><a class="reference internal" href="quapy.classification.html#submodules">Submodules</a></li>
|
||||
<li class="toctree-l4"><a class="reference internal" href="quapy.classification.html#module-quapy.classification.calibration">quapy.classification.calibration module</a></li>
|
||||
<li class="toctree-l4"><a class="reference internal" href="quapy.classification.html#module-quapy.classification.methods">quapy.classification.methods module</a></li>
|
||||
<li class="toctree-l4"><a class="reference internal" href="quapy.classification.html#module-quapy.classification.neural">quapy.classification.neural module</a></li>
|
||||
<li class="toctree-l4"><a class="reference internal" href="quapy.classification.html#module-quapy.classification.svmperf">quapy.classification.svmperf module</a></li>
|
||||
<li class="toctree-l4"><a class="reference internal" href="quapy.classification.html#module-quapy.classification">Module contents</a></li>
|
||||
</ul>
|
||||
</li>
|
||||
<li class="toctree-l3"><a class="reference internal" href="quapy.data.html">quapy.data package</a><ul>
|
||||
<li class="toctree-l4"><a class="reference internal" href="quapy.data.html#submodules">Submodules</a></li>
|
||||
<li class="toctree-l4"><a class="reference internal" href="quapy.data.html#module-quapy.data.base">quapy.data.base module</a></li>
|
||||
<li class="toctree-l4"><a class="reference internal" href="quapy.data.html#module-quapy.data.datasets">quapy.data.datasets module</a></li>
|
||||
<li class="toctree-l4"><a class="reference internal" href="quapy.data.html#module-quapy.data.preprocessing">quapy.data.preprocessing module</a></li>
|
||||
<li class="toctree-l4"><a class="reference internal" href="quapy.data.html#module-quapy.data.reader">quapy.data.reader module</a></li>
|
||||
<li class="toctree-l4"><a class="reference internal" href="quapy.data.html#module-quapy.data">Module contents</a></li>
|
||||
</ul>
|
||||
</li>
|
||||
<li class="toctree-l3"><a class="reference internal" href="quapy.method.html">quapy.method package</a><ul>
|
||||
<li class="toctree-l4"><a class="reference internal" href="quapy.method.html#submodules">Submodules</a></li>
|
||||
<li class="toctree-l4"><a class="reference internal" href="quapy.method.html#module-quapy.method.aggregative">quapy.method.aggregative module</a></li>
|
||||
<li class="toctree-l4"><a class="reference internal" href="quapy.method.html#module-quapy.method.base">quapy.method.base module</a></li>
|
||||
<li class="toctree-l4"><a class="reference internal" href="quapy.method.html#module-quapy.method.meta">quapy.method.meta module</a></li>
|
||||
<li class="toctree-l4"><a class="reference internal" href="quapy.method.html#module-quapy.method.non_aggregative">quapy.method.non_aggregative module</a></li>
|
||||
<li class="toctree-l4"><a class="reference internal" href="quapy.method.html#module-quapy.method">Module contents</a></li>
|
||||
</ul>
|
||||
</li>
|
||||
</ul>
|
||||
</li>
|
||||
<li class="toctree-l2"><a class="reference internal" href="quapy.html#submodules">Submodules</a></li>
|
||||
<li class="toctree-l2"><a class="reference internal" href="quapy.html#module-quapy.error">quapy.error module</a><ul>
|
||||
<li class="toctree-l3"><a class="reference internal" href="quapy.html#quapy.error.absolute_error"><code class="docutils literal notranslate"><span class="pre">absolute_error()</span></code></a></li>
|
||||
<li class="toctree-l3"><a class="reference internal" href="quapy.html#quapy.error.acc_error"><code class="docutils literal notranslate"><span class="pre">acc_error()</span></code></a></li>
|
||||
<li class="toctree-l3"><a class="reference internal" href="quapy.html#quapy.error.acce"><code class="docutils literal notranslate"><span class="pre">acce()</span></code></a></li>
|
||||
<li class="toctree-l3"><a class="reference internal" href="quapy.html#quapy.error.ae"><code class="docutils literal notranslate"><span class="pre">ae()</span></code></a></li>
|
||||
<li class="toctree-l3"><a class="reference internal" href="quapy.html#quapy.error.f1_error"><code class="docutils literal notranslate"><span class="pre">f1_error()</span></code></a></li>
|
||||
<li class="toctree-l3"><a class="reference internal" href="quapy.html#quapy.error.f1e"><code class="docutils literal notranslate"><span class="pre">f1e()</span></code></a></li>
|
||||
<li class="toctree-l3"><a class="reference internal" href="quapy.html#quapy.error.from_name"><code class="docutils literal notranslate"><span class="pre">from_name()</span></code></a></li>
|
||||
<li class="toctree-l3"><a class="reference internal" href="quapy.html#quapy.error.kld"><code class="docutils literal notranslate"><span class="pre">kld()</span></code></a></li>
|
||||
<li class="toctree-l3"><a class="reference internal" href="quapy.html#quapy.error.mae"><code class="docutils literal notranslate"><span class="pre">mae()</span></code></a></li>
|
||||
<li class="toctree-l3"><a class="reference internal" href="quapy.html#quapy.error.mean_absolute_error"><code class="docutils literal notranslate"><span class="pre">mean_absolute_error()</span></code></a></li>
|
||||
<li class="toctree-l3"><a class="reference internal" href="quapy.html#quapy.error.mean_normalized_absolute_error"><code class="docutils literal notranslate"><span class="pre">mean_normalized_absolute_error()</span></code></a></li>
|
||||
<li class="toctree-l3"><a class="reference internal" href="quapy.html#quapy.error.mean_normalized_relative_absolute_error"><code class="docutils literal notranslate"><span class="pre">mean_normalized_relative_absolute_error()</span></code></a></li>
|
||||
<li class="toctree-l3"><a class="reference internal" href="quapy.html#quapy.error.mean_relative_absolute_error"><code class="docutils literal notranslate"><span class="pre">mean_relative_absolute_error()</span></code></a></li>
|
||||
<li class="toctree-l3"><a class="reference internal" href="quapy.html#quapy.error.mkld"><code class="docutils literal notranslate"><span class="pre">mkld()</span></code></a></li>
|
||||
<li class="toctree-l3"><a class="reference internal" href="quapy.html#quapy.error.mnae"><code class="docutils literal notranslate"><span class="pre">mnae()</span></code></a></li>
|
||||
<li class="toctree-l3"><a class="reference internal" href="quapy.html#quapy.error.mnkld"><code class="docutils literal notranslate"><span class="pre">mnkld()</span></code></a></li>
|
||||
<li class="toctree-l3"><a class="reference internal" href="quapy.html#quapy.error.mnrae"><code class="docutils literal notranslate"><span class="pre">mnrae()</span></code></a></li>
|
||||
<li class="toctree-l3"><a class="reference internal" href="quapy.html#quapy.error.mrae"><code class="docutils literal notranslate"><span class="pre">mrae()</span></code></a></li>
|
||||
<li class="toctree-l3"><a class="reference internal" href="quapy.html#quapy.error.mse"><code class="docutils literal notranslate"><span class="pre">mse()</span></code></a></li>
|
||||
<li class="toctree-l3"><a class="reference internal" href="quapy.html#quapy.error.nae"><code class="docutils literal notranslate"><span class="pre">nae()</span></code></a></li>
|
||||
<li class="toctree-l3"><a class="reference internal" href="quapy.html#quapy.error.nkld"><code class="docutils literal notranslate"><span class="pre">nkld()</span></code></a></li>
|
||||
<li class="toctree-l3"><a class="reference internal" href="quapy.html#quapy.error.normalized_absolute_error"><code class="docutils literal notranslate"><span class="pre">normalized_absolute_error()</span></code></a></li>
|
||||
<li class="toctree-l3"><a class="reference internal" href="quapy.html#quapy.error.normalized_relative_absolute_error"><code class="docutils literal notranslate"><span class="pre">normalized_relative_absolute_error()</span></code></a></li>
|
||||
<li class="toctree-l3"><a class="reference internal" href="quapy.html#quapy.error.nrae"><code class="docutils literal notranslate"><span class="pre">nrae()</span></code></a></li>
|
||||
<li class="toctree-l3"><a class="reference internal" href="quapy.html#quapy.error.rae"><code class="docutils literal notranslate"><span class="pre">rae()</span></code></a></li>
|
||||
<li class="toctree-l3"><a class="reference internal" href="quapy.html#quapy.error.relative_absolute_error"><code class="docutils literal notranslate"><span class="pre">relative_absolute_error()</span></code></a></li>
|
||||
<li class="toctree-l3"><a class="reference internal" href="quapy.html#quapy.error.se"><code class="docutils literal notranslate"><span class="pre">se()</span></code></a></li>
|
||||
<li class="toctree-l3"><a class="reference internal" href="quapy.html#quapy.error.smooth"><code class="docutils literal notranslate"><span class="pre">smooth()</span></code></a></li>
|
||||
</ul>
|
||||
</li>
|
||||
<li class="toctree-l2"><a class="reference internal" href="quapy.html#module-quapy.evaluation">quapy.evaluation module</a><ul>
|
||||
<li class="toctree-l3"><a class="reference internal" href="quapy.html#quapy.evaluation.evaluate"><code class="docutils literal notranslate"><span class="pre">evaluate()</span></code></a></li>
|
||||
<li class="toctree-l3"><a class="reference internal" href="quapy.html#quapy.evaluation.evaluate_on_samples"><code class="docutils literal notranslate"><span class="pre">evaluate_on_samples()</span></code></a></li>
|
||||
<li class="toctree-l3"><a class="reference internal" href="quapy.html#quapy.evaluation.evaluation_report"><code class="docutils literal notranslate"><span class="pre">evaluation_report()</span></code></a></li>
|
||||
<li class="toctree-l3"><a class="reference internal" href="quapy.html#quapy.evaluation.prediction"><code class="docutils literal notranslate"><span class="pre">prediction()</span></code></a></li>
|
||||
</ul>
|
||||
</li>
|
||||
<li class="toctree-l2"><a class="reference internal" href="quapy.html#module-quapy.functional">quapy.functional module</a><ul>
|
||||
<li class="toctree-l3"><a class="reference internal" href="quapy.html#quapy.functional.HellingerDistance"><code class="docutils literal notranslate"><span class="pre">HellingerDistance()</span></code></a></li>
|
||||
<li class="toctree-l3"><a class="reference internal" href="quapy.html#quapy.functional.TopsoeDistance"><code class="docutils literal notranslate"><span class="pre">TopsoeDistance()</span></code></a></li>
|
||||
<li class="toctree-l3"><a class="reference internal" href="quapy.html#quapy.functional.argmin_prevalence"><code class="docutils literal notranslate"><span class="pre">argmin_prevalence()</span></code></a></li>
|
||||
<li class="toctree-l3"><a class="reference internal" href="quapy.html#quapy.functional.as_binary_prevalence"><code class="docutils literal notranslate"><span class="pre">as_binary_prevalence()</span></code></a></li>
|
||||
<li class="toctree-l3"><a class="reference internal" href="quapy.html#quapy.functional.check_prevalence_vector"><code class="docutils literal notranslate"><span class="pre">check_prevalence_vector()</span></code></a></li>
|
||||
<li class="toctree-l3"><a class="reference internal" href="quapy.html#quapy.functional.clip"><code class="docutils literal notranslate"><span class="pre">clip()</span></code></a></li>
|
||||
<li class="toctree-l3"><a class="reference internal" href="quapy.html#quapy.functional.condsoftmax"><code class="docutils literal notranslate"><span class="pre">condsoftmax()</span></code></a></li>
|
||||
<li class="toctree-l3"><a class="reference internal" href="quapy.html#quapy.functional.counts_from_labels"><code class="docutils literal notranslate"><span class="pre">counts_from_labels()</span></code></a></li>
|
||||
<li class="toctree-l3"><a class="reference internal" href="quapy.html#quapy.functional.get_divergence"><code class="docutils literal notranslate"><span class="pre">get_divergence()</span></code></a></li>
|
||||
<li class="toctree-l3"><a class="reference internal" href="quapy.html#quapy.functional.get_nprevpoints_approximation"><code class="docutils literal notranslate"><span class="pre">get_nprevpoints_approximation()</span></code></a></li>
|
||||
<li class="toctree-l3"><a class="reference internal" href="quapy.html#quapy.functional.l1_norm"><code class="docutils literal notranslate"><span class="pre">l1_norm()</span></code></a></li>
|
||||
<li class="toctree-l3"><a class="reference internal" href="quapy.html#quapy.functional.linear_search"><code class="docutils literal notranslate"><span class="pre">linear_search()</span></code></a></li>
|
||||
<li class="toctree-l3"><a class="reference internal" href="quapy.html#quapy.functional.normalize_prevalence"><code class="docutils literal notranslate"><span class="pre">normalize_prevalence()</span></code></a></li>
|
||||
<li class="toctree-l3"><a class="reference internal" href="quapy.html#quapy.functional.num_prevalence_combinations"><code class="docutils literal notranslate"><span class="pre">num_prevalence_combinations()</span></code></a></li>
|
||||
<li class="toctree-l3"><a class="reference internal" href="quapy.html#quapy.functional.optim_minimize"><code class="docutils literal notranslate"><span class="pre">optim_minimize()</span></code></a></li>
|
||||
<li class="toctree-l3"><a class="reference internal" href="quapy.html#quapy.functional.prevalence_from_labels"><code class="docutils literal notranslate"><span class="pre">prevalence_from_labels()</span></code></a></li>
|
||||
<li class="toctree-l3"><a class="reference internal" href="quapy.html#quapy.functional.prevalence_from_probabilities"><code class="docutils literal notranslate"><span class="pre">prevalence_from_probabilities()</span></code></a></li>
|
||||
<li class="toctree-l3"><a class="reference internal" href="quapy.html#quapy.functional.prevalence_linspace"><code class="docutils literal notranslate"><span class="pre">prevalence_linspace()</span></code></a></li>
|
||||
<li class="toctree-l3"><a class="reference internal" href="quapy.html#quapy.functional.projection_simplex_sort"><code class="docutils literal notranslate"><span class="pre">projection_simplex_sort()</span></code></a></li>
|
||||
<li class="toctree-l3"><a class="reference internal" href="quapy.html#quapy.functional.softmax"><code class="docutils literal notranslate"><span class="pre">softmax()</span></code></a></li>
|
||||
<li class="toctree-l3"><a class="reference internal" href="quapy.html#quapy.functional.solve_adjustment"><code class="docutils literal notranslate"><span class="pre">solve_adjustment()</span></code></a></li>
|
||||
<li class="toctree-l3"><a class="reference internal" href="quapy.html#quapy.functional.solve_adjustment_binary"><code class="docutils literal notranslate"><span class="pre">solve_adjustment_binary()</span></code></a></li>
|
||||
<li class="toctree-l3"><a class="reference internal" href="quapy.html#quapy.functional.strprev"><code class="docutils literal notranslate"><span class="pre">strprev()</span></code></a></li>
|
||||
<li class="toctree-l3"><a class="reference internal" href="quapy.html#quapy.functional.ternary_search"><code class="docutils literal notranslate"><span class="pre">ternary_search()</span></code></a></li>
|
||||
<li class="toctree-l3"><a class="reference internal" href="quapy.html#quapy.functional.uniform_prevalence_sampling"><code class="docutils literal notranslate"><span class="pre">uniform_prevalence_sampling()</span></code></a></li>
|
||||
<li class="toctree-l3"><a class="reference internal" href="quapy.html#quapy.functional.uniform_simplex_sampling"><code class="docutils literal notranslate"><span class="pre">uniform_simplex_sampling()</span></code></a></li>
|
||||
</ul>
|
||||
</li>
|
||||
<li class="toctree-l2"><a class="reference internal" href="quapy.html#module-quapy.model_selection">quapy.model_selection module</a><ul>
|
||||
<li class="toctree-l3"><a class="reference internal" href="quapy.html#quapy.model_selection.ConfigStatus"><code class="docutils literal notranslate"><span class="pre">ConfigStatus</span></code></a><ul>
|
||||
<li class="toctree-l4"><a class="reference internal" href="quapy.html#quapy.model_selection.ConfigStatus.failed"><code class="docutils literal notranslate"><span class="pre">ConfigStatus.failed()</span></code></a></li>
|
||||
<li class="toctree-l4"><a class="reference internal" href="quapy.html#quapy.model_selection.ConfigStatus.success"><code class="docutils literal notranslate"><span class="pre">ConfigStatus.success()</span></code></a></li>
|
||||
</ul>
|
||||
</li>
|
||||
<li class="toctree-l3"><a class="reference internal" href="quapy.html#quapy.model_selection.GridSearchQ"><code class="docutils literal notranslate"><span class="pre">GridSearchQ</span></code></a><ul>
|
||||
<li class="toctree-l4"><a class="reference internal" href="quapy.html#quapy.model_selection.GridSearchQ.best_model"><code class="docutils literal notranslate"><span class="pre">GridSearchQ.best_model()</span></code></a></li>
|
||||
<li class="toctree-l4"><a class="reference internal" href="quapy.html#quapy.model_selection.GridSearchQ.fit"><code class="docutils literal notranslate"><span class="pre">GridSearchQ.fit()</span></code></a></li>
|
||||
<li class="toctree-l4"><a class="reference internal" href="quapy.html#quapy.model_selection.GridSearchQ.get_params"><code class="docutils literal notranslate"><span class="pre">GridSearchQ.get_params()</span></code></a></li>
|
||||
<li class="toctree-l4"><a class="reference internal" href="quapy.html#quapy.model_selection.GridSearchQ.quantify"><code class="docutils literal notranslate"><span class="pre">GridSearchQ.quantify()</span></code></a></li>
|
||||
<li class="toctree-l4"><a class="reference internal" href="quapy.html#quapy.model_selection.GridSearchQ.set_params"><code class="docutils literal notranslate"><span class="pre">GridSearchQ.set_params()</span></code></a></li>
|
||||
</ul>
|
||||
</li>
|
||||
<li class="toctree-l3"><a class="reference internal" href="quapy.html#quapy.model_selection.Status"><code class="docutils literal notranslate"><span class="pre">Status</span></code></a><ul>
|
||||
<li class="toctree-l4"><a class="reference internal" href="quapy.html#quapy.model_selection.Status.ERROR"><code class="docutils literal notranslate"><span class="pre">Status.ERROR</span></code></a></li>
|
||||
<li class="toctree-l4"><a class="reference internal" href="quapy.html#quapy.model_selection.Status.INVALID"><code class="docutils literal notranslate"><span class="pre">Status.INVALID</span></code></a></li>
|
||||
<li class="toctree-l4"><a class="reference internal" href="quapy.html#quapy.model_selection.Status.SUCCESS"><code class="docutils literal notranslate"><span class="pre">Status.SUCCESS</span></code></a></li>
|
||||
<li class="toctree-l4"><a class="reference internal" href="quapy.html#quapy.model_selection.Status.TIMEOUT"><code class="docutils literal notranslate"><span class="pre">Status.TIMEOUT</span></code></a></li>
|
||||
</ul>
|
||||
</li>
|
||||
<li class="toctree-l3"><a class="reference internal" href="quapy.html#quapy.model_selection.cross_val_predict"><code class="docutils literal notranslate"><span class="pre">cross_val_predict()</span></code></a></li>
|
||||
<li class="toctree-l3"><a class="reference internal" href="quapy.html#quapy.model_selection.expand_grid"><code class="docutils literal notranslate"><span class="pre">expand_grid()</span></code></a></li>
|
||||
<li class="toctree-l3"><a class="reference internal" href="quapy.html#quapy.model_selection.group_params"><code class="docutils literal notranslate"><span class="pre">group_params()</span></code></a></li>
|
||||
</ul>
|
||||
</li>
|
||||
<li class="toctree-l2"><a class="reference internal" href="quapy.html#module-quapy.plot">quapy.plot module</a><ul>
|
||||
<li class="toctree-l3"><a class="reference internal" href="quapy.html#quapy.plot.binary_bias_bins"><code class="docutils literal notranslate"><span class="pre">binary_bias_bins()</span></code></a></li>
|
||||
<li class="toctree-l3"><a class="reference internal" href="quapy.html#quapy.plot.binary_bias_global"><code class="docutils literal notranslate"><span class="pre">binary_bias_global()</span></code></a></li>
|
||||
<li class="toctree-l3"><a class="reference internal" href="quapy.html#quapy.plot.binary_diagonal"><code class="docutils literal notranslate"><span class="pre">binary_diagonal()</span></code></a></li>
|
||||
<li class="toctree-l3"><a class="reference internal" href="quapy.html#quapy.plot.brokenbar_supremacy_by_drift"><code class="docutils literal notranslate"><span class="pre">brokenbar_supremacy_by_drift()</span></code></a></li>
|
||||
<li class="toctree-l3"><a class="reference internal" href="quapy.html#quapy.plot.error_by_drift"><code class="docutils literal notranslate"><span class="pre">error_by_drift()</span></code></a></li>
|
||||
</ul>
|
||||
</li>
|
||||
<li class="toctree-l2"><a class="reference internal" href="quapy.html#module-quapy.protocol">quapy.protocol module</a><ul>
|
||||
<li class="toctree-l3"><a class="reference internal" href="quapy.html#quapy.protocol.APP"><code class="docutils literal notranslate"><span class="pre">APP</span></code></a><ul>
|
||||
<li class="toctree-l4"><a class="reference internal" href="quapy.html#quapy.protocol.APP.prevalence_grid"><code class="docutils literal notranslate"><span class="pre">APP.prevalence_grid()</span></code></a></li>
|
||||
<li class="toctree-l4"><a class="reference internal" href="quapy.html#quapy.protocol.APP.sample"><code class="docutils literal notranslate"><span class="pre">APP.sample()</span></code></a></li>
|
||||
<li class="toctree-l4"><a class="reference internal" href="quapy.html#quapy.protocol.APP.samples_parameters"><code class="docutils literal notranslate"><span class="pre">APP.samples_parameters()</span></code></a></li>
|
||||
<li class="toctree-l4"><a class="reference internal" href="quapy.html#quapy.protocol.APP.total"><code class="docutils literal notranslate"><span class="pre">APP.total()</span></code></a></li>
|
||||
</ul>
|
||||
</li>
|
||||
<li class="toctree-l3"><a class="reference internal" href="quapy.html#quapy.protocol.AbstractProtocol"><code class="docutils literal notranslate"><span class="pre">AbstractProtocol</span></code></a><ul>
|
||||
<li class="toctree-l4"><a class="reference internal" href="quapy.html#quapy.protocol.AbstractProtocol.total"><code class="docutils literal notranslate"><span class="pre">AbstractProtocol.total()</span></code></a></li>
|
||||
</ul>
|
||||
</li>
|
||||
<li class="toctree-l3"><a class="reference internal" href="quapy.html#quapy.protocol.AbstractStochasticSeededProtocol"><code class="docutils literal notranslate"><span class="pre">AbstractStochasticSeededProtocol</span></code></a><ul>
|
||||
<li class="toctree-l4"><a class="reference internal" href="quapy.html#quapy.protocol.AbstractStochasticSeededProtocol.collator"><code class="docutils literal notranslate"><span class="pre">AbstractStochasticSeededProtocol.collator()</span></code></a></li>
|
||||
<li class="toctree-l4"><a class="reference internal" href="quapy.html#quapy.protocol.AbstractStochasticSeededProtocol.random_state"><code class="docutils literal notranslate"><span class="pre">AbstractStochasticSeededProtocol.random_state</span></code></a></li>
|
||||
<li class="toctree-l4"><a class="reference internal" href="quapy.html#quapy.protocol.AbstractStochasticSeededProtocol.sample"><code class="docutils literal notranslate"><span class="pre">AbstractStochasticSeededProtocol.sample()</span></code></a></li>
|
||||
<li class="toctree-l4"><a class="reference internal" href="quapy.html#quapy.protocol.AbstractStochasticSeededProtocol.samples_parameters"><code class="docutils literal notranslate"><span class="pre">AbstractStochasticSeededProtocol.samples_parameters()</span></code></a></li>
|
||||
</ul>
|
||||
</li>
|
||||
<li class="toctree-l3"><a class="reference internal" href="quapy.html#quapy.protocol.ArtificialPrevalenceProtocol"><code class="docutils literal notranslate"><span class="pre">ArtificialPrevalenceProtocol</span></code></a></li>
|
||||
<li class="toctree-l3"><a class="reference internal" href="quapy.html#quapy.protocol.DomainMixer"><code class="docutils literal notranslate"><span class="pre">DomainMixer</span></code></a><ul>
|
||||
<li class="toctree-l4"><a class="reference internal" href="quapy.html#quapy.protocol.DomainMixer.sample"><code class="docutils literal notranslate"><span class="pre">DomainMixer.sample()</span></code></a></li>
|
||||
<li class="toctree-l4"><a class="reference internal" href="quapy.html#quapy.protocol.DomainMixer.samples_parameters"><code class="docutils literal notranslate"><span class="pre">DomainMixer.samples_parameters()</span></code></a></li>
|
||||
<li class="toctree-l4"><a class="reference internal" href="quapy.html#quapy.protocol.DomainMixer.total"><code class="docutils literal notranslate"><span class="pre">DomainMixer.total()</span></code></a></li>
|
||||
</ul>
|
||||
</li>
|
||||
<li class="toctree-l3"><a class="reference internal" href="quapy.html#quapy.protocol.IterateProtocol"><code class="docutils literal notranslate"><span class="pre">IterateProtocol</span></code></a><ul>
|
||||
<li class="toctree-l4"><a class="reference internal" href="quapy.html#quapy.protocol.IterateProtocol.total"><code class="docutils literal notranslate"><span class="pre">IterateProtocol.total()</span></code></a></li>
|
||||
</ul>
|
||||
</li>
|
||||
<li class="toctree-l3"><a class="reference internal" href="quapy.html#quapy.protocol.NPP"><code class="docutils literal notranslate"><span class="pre">NPP</span></code></a><ul>
|
||||
<li class="toctree-l4"><a class="reference internal" href="quapy.html#quapy.protocol.NPP.sample"><code class="docutils literal notranslate"><span class="pre">NPP.sample()</span></code></a></li>
|
||||
<li class="toctree-l4"><a class="reference internal" href="quapy.html#quapy.protocol.NPP.samples_parameters"><code class="docutils literal notranslate"><span class="pre">NPP.samples_parameters()</span></code></a></li>
|
||||
<li class="toctree-l4"><a class="reference internal" href="quapy.html#quapy.protocol.NPP.total"><code class="docutils literal notranslate"><span class="pre">NPP.total()</span></code></a></li>
|
||||
</ul>
|
||||
</li>
|
||||
<li class="toctree-l3"><a class="reference internal" href="quapy.html#quapy.protocol.NaturalPrevalenceProtocol"><code class="docutils literal notranslate"><span class="pre">NaturalPrevalenceProtocol</span></code></a></li>
|
||||
<li class="toctree-l3"><a class="reference internal" href="quapy.html#quapy.protocol.OnLabelledCollectionProtocol"><code class="docutils literal notranslate"><span class="pre">OnLabelledCollectionProtocol</span></code></a><ul>
|
||||
<li class="toctree-l4"><a class="reference internal" href="quapy.html#quapy.protocol.OnLabelledCollectionProtocol.RETURN_TYPES"><code class="docutils literal notranslate"><span class="pre">OnLabelledCollectionProtocol.RETURN_TYPES</span></code></a></li>
|
||||
<li class="toctree-l4"><a class="reference internal" href="quapy.html#quapy.protocol.OnLabelledCollectionProtocol.get_collator"><code class="docutils literal notranslate"><span class="pre">OnLabelledCollectionProtocol.get_collator()</span></code></a></li>
|
||||
<li class="toctree-l4"><a class="reference internal" href="quapy.html#quapy.protocol.OnLabelledCollectionProtocol.get_labelled_collection"><code class="docutils literal notranslate"><span class="pre">OnLabelledCollectionProtocol.get_labelled_collection()</span></code></a></li>
|
||||
<li class="toctree-l4"><a class="reference internal" href="quapy.html#quapy.protocol.OnLabelledCollectionProtocol.on_preclassified_instances"><code class="docutils literal notranslate"><span class="pre">OnLabelledCollectionProtocol.on_preclassified_instances()</span></code></a></li>
|
||||
</ul>
|
||||
</li>
|
||||
<li class="toctree-l3"><a class="reference internal" href="quapy.html#quapy.protocol.UPP"><code class="docutils literal notranslate"><span class="pre">UPP</span></code></a><ul>
|
||||
<li class="toctree-l4"><a class="reference internal" href="quapy.html#quapy.protocol.UPP.sample"><code class="docutils literal notranslate"><span class="pre">UPP.sample()</span></code></a></li>
|
||||
<li class="toctree-l4"><a class="reference internal" href="quapy.html#quapy.protocol.UPP.samples_parameters"><code class="docutils literal notranslate"><span class="pre">UPP.samples_parameters()</span></code></a></li>
|
||||
<li class="toctree-l4"><a class="reference internal" href="quapy.html#quapy.protocol.UPP.total"><code class="docutils literal notranslate"><span class="pre">UPP.total()</span></code></a></li>
|
||||
</ul>
|
||||
</li>
|
||||
<li class="toctree-l3"><a class="reference internal" href="quapy.html#quapy.protocol.UniformPrevalenceProtocol"><code class="docutils literal notranslate"><span class="pre">UniformPrevalenceProtocol</span></code></a></li>
|
||||
</ul>
|
||||
</li>
|
||||
<li class="toctree-l2"><a class="reference internal" href="quapy.html#module-quapy.util">quapy.util module</a><ul>
|
||||
<li class="toctree-l3"><a class="reference internal" href="quapy.html#quapy.util.EarlyStop"><code class="docutils literal notranslate"><span class="pre">EarlyStop</span></code></a></li>
|
||||
<li class="toctree-l3"><a class="reference internal" href="quapy.html#quapy.util.create_if_not_exist"><code class="docutils literal notranslate"><span class="pre">create_if_not_exist()</span></code></a></li>
|
||||
<li class="toctree-l3"><a class="reference internal" href="quapy.html#quapy.util.create_parent_dir"><code class="docutils literal notranslate"><span class="pre">create_parent_dir()</span></code></a></li>
|
||||
<li class="toctree-l3"><a class="reference internal" href="quapy.html#quapy.util.download_file"><code class="docutils literal notranslate"><span class="pre">download_file()</span></code></a></li>
|
||||
<li class="toctree-l3"><a class="reference internal" href="quapy.html#quapy.util.download_file_if_not_exists"><code class="docutils literal notranslate"><span class="pre">download_file_if_not_exists()</span></code></a></li>
|
||||
<li class="toctree-l3"><a class="reference internal" href="quapy.html#quapy.util.get_quapy_home"><code class="docutils literal notranslate"><span class="pre">get_quapy_home()</span></code></a></li>
|
||||
<li class="toctree-l3"><a class="reference internal" href="quapy.html#quapy.util.map_parallel"><code class="docutils literal notranslate"><span class="pre">map_parallel()</span></code></a></li>
|
||||
<li class="toctree-l3"><a class="reference internal" href="quapy.html#quapy.util.parallel"><code class="docutils literal notranslate"><span class="pre">parallel()</span></code></a></li>
|
||||
<li class="toctree-l3"><a class="reference internal" href="quapy.html#quapy.util.parallel_unpack"><code class="docutils literal notranslate"><span class="pre">parallel_unpack()</span></code></a></li>
|
||||
<li class="toctree-l3"><a class="reference internal" href="quapy.html#quapy.util.pickled_resource"><code class="docutils literal notranslate"><span class="pre">pickled_resource()</span></code></a></li>
|
||||
<li class="toctree-l3"><a class="reference internal" href="quapy.html#quapy.util.save_text_file"><code class="docutils literal notranslate"><span class="pre">save_text_file()</span></code></a></li>
|
||||
<li class="toctree-l3"><a class="reference internal" href="quapy.html#quapy.util.temp_seed"><code class="docutils literal notranslate"><span class="pre">temp_seed()</span></code></a></li>
|
||||
<li class="toctree-l3"><a class="reference internal" href="quapy.html#quapy.util.timeout"><code class="docutils literal notranslate"><span class="pre">timeout()</span></code></a></li>
|
||||
</ul>
|
||||
</li>
|
||||
<li class="toctree-l2"><a class="reference internal" href="quapy.html#module-quapy">Module contents</a></li>
|
||||
</ul>
|
||||
</li>
|
||||
</ul>
|
||||
</div>
|
||||
</section>
|
||||
|
||||
|
||||
</div>
|
||||
</div>
|
||||
<footer><div class="rst-footer-buttons" role="navigation" aria-label="Footer">
|
||||
<a href="index.html" class="btn btn-neutral float-left" title="Welcome to QuaPy’s documentation!" accesskey="p" rel="prev"><span class="fa fa-arrow-circle-left" aria-hidden="true"></span> Previous</a>
|
||||
<a href="quapy.html" class="btn btn-neutral float-right" title="quapy package" accesskey="n" rel="next">Next <span class="fa fa-arrow-circle-right" aria-hidden="true"></span></a>
|
||||
</div>
|
||||
|
||||
<hr/>
|
||||
|
||||
<div role="contentinfo">
|
||||
<p>© Copyright 2024, Alejandro Moreo.</p>
|
||||
</div>
|
||||
|
||||
Built with <a href="https://www.sphinx-doc.org/">Sphinx</a> using a
|
||||
<a href="https://github.com/readthedocs/sphinx_rtd_theme">theme</a>
|
||||
provided by <a href="https://readthedocs.org">Read the Docs</a>.
|
||||
|
||||
|
||||
</footer>
|
||||
</div>
|
||||
</div>
|
||||
</section>
|
||||
</div>
|
||||
<script>
|
||||
jQuery(function () {
|
||||
SphinxRtdTheme.Navigation.enable(true);
|
||||
});
|
||||
</script>
|
||||
|
||||
</body>
|
||||
</html>
|
|
@ -1,247 +0,0 @@
|
|||
<!DOCTYPE html>
|
||||
<html class="writer-html5" lang="en">
|
||||
<head>
|
||||
<meta charset="utf-8" />
|
||||
<meta name="viewport" content="width=device-width, initial-scale=1.0" />
|
||||
<title>Python Module Index — QuaPy: A Python-based open-source framework for quantification 0.1.9 documentation</title>
|
||||
<link rel="stylesheet" type="text/css" href="_static/pygments.css" />
|
||||
<link rel="stylesheet" type="text/css" href="_static/css/theme.css" />
|
||||
|
||||
|
||||
<!--[if lt IE 9]>
|
||||
<script src="_static/js/html5shiv.min.js"></script>
|
||||
<![endif]-->
|
||||
|
||||
<script data-url_root="./" id="documentation_options" src="_static/documentation_options.js"></script>
|
||||
<script src="_static/jquery.js"></script>
|
||||
<script src="_static/underscore.js"></script>
|
||||
<script src="_static/_sphinx_javascript_frameworks_compat.js"></script>
|
||||
<script src="_static/doctools.js"></script>
|
||||
<script src="_static/sphinx_highlight.js"></script>
|
||||
<script src="_static/js/theme.js"></script>
|
||||
<link rel="index" title="Index" href="genindex.html" />
|
||||
<link rel="search" title="Search" href="search.html" />
|
||||
|
||||
|
||||
|
||||
</head>
|
||||
|
||||
<body class="wy-body-for-nav">
|
||||
<div class="wy-grid-for-nav">
|
||||
<nav data-toggle="wy-nav-shift" class="wy-nav-side">
|
||||
<div class="wy-side-scroll">
|
||||
<div class="wy-side-nav-search" >
|
||||
|
||||
|
||||
|
||||
<a href="index.html" class="icon icon-home">
|
||||
QuaPy: A Python-based open-source framework for quantification
|
||||
</a>
|
||||
<div role="search">
|
||||
<form id="rtd-search-form" class="wy-form" action="search.html" method="get">
|
||||
<input type="text" name="q" placeholder="Search docs" aria-label="Search docs" />
|
||||
<input type="hidden" name="check_keywords" value="yes" />
|
||||
<input type="hidden" name="area" value="default" />
|
||||
</form>
|
||||
</div>
|
||||
</div><div class="wy-menu wy-menu-vertical" data-spy="affix" role="navigation" aria-label="Navigation menu">
|
||||
<ul>
|
||||
<li class="toctree-l1"><a class="reference internal" href="modules.html">quapy</a></li>
|
||||
</ul>
|
||||
|
||||
</div>
|
||||
</div>
|
||||
</nav>
|
||||
|
||||
<section data-toggle="wy-nav-shift" class="wy-nav-content-wrap"><nav class="wy-nav-top" aria-label="Mobile navigation menu" >
|
||||
<i data-toggle="wy-nav-top" class="fa fa-bars"></i>
|
||||
<a href="index.html">QuaPy: A Python-based open-source framework for quantification</a>
|
||||
</nav>
|
||||
|
||||
<div class="wy-nav-content">
|
||||
<div class="rst-content">
|
||||
<div role="navigation" aria-label="Page navigation">
|
||||
<ul class="wy-breadcrumbs">
|
||||
<li><a href="index.html" class="icon icon-home" aria-label="Home"></a></li>
|
||||
<li class="breadcrumb-item active">Python Module Index</li>
|
||||
<li class="wy-breadcrumbs-aside">
|
||||
</li>
|
||||
</ul>
|
||||
<hr/>
|
||||
</div>
|
||||
<div role="main" class="document" itemscope="itemscope" itemtype="http://schema.org/Article">
|
||||
<div itemprop="articleBody">
|
||||
|
||||
|
||||
<h1>Python Module Index</h1>
|
||||
|
||||
<div class="modindex-jumpbox">
|
||||
<a href="#cap-q"><strong>q</strong></a>
|
||||
</div>
|
||||
|
||||
<table class="indextable modindextable">
|
||||
<tr class="pcap"><td></td><td> </td><td></td></tr>
|
||||
<tr class="cap" id="cap-q"><td></td><td>
|
||||
<strong>q</strong></td><td></td></tr>
|
||||
<tr>
|
||||
<td><img src="_static/minus.png" class="toggler"
|
||||
id="toggle-1" style="display: none" alt="-" /></td>
|
||||
<td>
|
||||
<a href="quapy.html#module-quapy"><code class="xref">quapy</code></a></td><td>
|
||||
<em></em></td></tr>
|
||||
<tr class="cg-1">
|
||||
<td></td>
|
||||
<td>   
|
||||
<a href="quapy.classification.html#module-quapy.classification"><code class="xref">quapy.classification</code></a></td><td>
|
||||
<em></em></td></tr>
|
||||
<tr class="cg-1">
|
||||
<td></td>
|
||||
<td>   
|
||||
<a href="quapy.classification.html#module-quapy.classification.calibration"><code class="xref">quapy.classification.calibration</code></a></td><td>
|
||||
<em></em></td></tr>
|
||||
<tr class="cg-1">
|
||||
<td></td>
|
||||
<td>   
|
||||
<a href="quapy.classification.html#module-quapy.classification.methods"><code class="xref">quapy.classification.methods</code></a></td><td>
|
||||
<em></em></td></tr>
|
||||
<tr class="cg-1">
|
||||
<td></td>
|
||||
<td>   
|
||||
<a href="quapy.classification.html#module-quapy.classification.neural"><code class="xref">quapy.classification.neural</code></a></td><td>
|
||||
<em></em></td></tr>
|
||||
<tr class="cg-1">
|
||||
<td></td>
|
||||
<td>   
|
||||
<a href="quapy.classification.html#module-quapy.classification.svmperf"><code class="xref">quapy.classification.svmperf</code></a></td><td>
|
||||
<em></em></td></tr>
|
||||
<tr class="cg-1">
|
||||
<td></td>
|
||||
<td>   
|
||||
<a href="quapy.data.html#module-quapy.data"><code class="xref">quapy.data</code></a></td><td>
|
||||
<em></em></td></tr>
|
||||
<tr class="cg-1">
|
||||
<td></td>
|
||||
<td>   
|
||||
<a href="quapy.data.html#module-quapy.data.base"><code class="xref">quapy.data.base</code></a></td><td>
|
||||
<em></em></td></tr>
|
||||
<tr class="cg-1">
|
||||
<td></td>
|
||||
<td>   
|
||||
<a href="quapy.data.html#module-quapy.data.datasets"><code class="xref">quapy.data.datasets</code></a></td><td>
|
||||
<em></em></td></tr>
|
||||
<tr class="cg-1">
|
||||
<td></td>
|
||||
<td>   
|
||||
<a href="quapy.data.html#module-quapy.data.preprocessing"><code class="xref">quapy.data.preprocessing</code></a></td><td>
|
||||
<em></em></td></tr>
|
||||
<tr class="cg-1">
|
||||
<td></td>
|
||||
<td>   
|
||||
<a href="quapy.data.html#module-quapy.data.reader"><code class="xref">quapy.data.reader</code></a></td><td>
|
||||
<em></em></td></tr>
|
||||
<tr class="cg-1">
|
||||
<td></td>
|
||||
<td>   
|
||||
<a href="quapy.html#module-quapy.error"><code class="xref">quapy.error</code></a></td><td>
|
||||
<em></em></td></tr>
|
||||
<tr class="cg-1">
|
||||
<td></td>
|
||||
<td>   
|
||||
<a href="quapy.html#module-quapy.evaluation"><code class="xref">quapy.evaluation</code></a></td><td>
|
||||
<em></em></td></tr>
|
||||
<tr class="cg-1">
|
||||
<td></td>
|
||||
<td>   
|
||||
<a href="quapy.html#module-quapy.functional"><code class="xref">quapy.functional</code></a></td><td>
|
||||
<em></em></td></tr>
|
||||
<tr class="cg-1">
|
||||
<td></td>
|
||||
<td>   
|
||||
<a href="quapy.method.html#module-quapy.method"><code class="xref">quapy.method</code></a></td><td>
|
||||
<em></em></td></tr>
|
||||
<tr class="cg-1">
|
||||
<td></td>
|
||||
<td>   
|
||||
<a href="quapy.method.html#module-quapy.method._kdey"><code class="xref">quapy.method._kdey</code></a></td><td>
|
||||
<em></em></td></tr>
|
||||
<tr class="cg-1">
|
||||
<td></td>
|
||||
<td>   
|
||||
<a href="quapy.method.html#module-quapy.method._neural"><code class="xref">quapy.method._neural</code></a></td><td>
|
||||
<em></em></td></tr>
|
||||
<tr class="cg-1">
|
||||
<td></td>
|
||||
<td>   
|
||||
<a href="quapy.method.html#module-quapy.method._threshold_optim"><code class="xref">quapy.method._threshold_optim</code></a></td><td>
|
||||
<em></em></td></tr>
|
||||
<tr class="cg-1">
|
||||
<td></td>
|
||||
<td>   
|
||||
<a href="quapy.method.html#module-quapy.method.aggregative"><code class="xref">quapy.method.aggregative</code></a></td><td>
|
||||
<em></em></td></tr>
|
||||
<tr class="cg-1">
|
||||
<td></td>
|
||||
<td>   
|
||||
<a href="quapy.method.html#module-quapy.method.base"><code class="xref">quapy.method.base</code></a></td><td>
|
||||
<em></em></td></tr>
|
||||
<tr class="cg-1">
|
||||
<td></td>
|
||||
<td>   
|
||||
<a href="quapy.method.html#module-quapy.method.meta"><code class="xref">quapy.method.meta</code></a></td><td>
|
||||
<em></em></td></tr>
|
||||
<tr class="cg-1">
|
||||
<td></td>
|
||||
<td>   
|
||||
<a href="quapy.method.html#module-quapy.method.non_aggregative"><code class="xref">quapy.method.non_aggregative</code></a></td><td>
|
||||
<em></em></td></tr>
|
||||
<tr class="cg-1">
|
||||
<td></td>
|
||||
<td>   
|
||||
<a href="quapy.html#module-quapy.model_selection"><code class="xref">quapy.model_selection</code></a></td><td>
|
||||
<em></em></td></tr>
|
||||
<tr class="cg-1">
|
||||
<td></td>
|
||||
<td>   
|
||||
<a href="quapy.html#module-quapy.plot"><code class="xref">quapy.plot</code></a></td><td>
|
||||
<em></em></td></tr>
|
||||
<tr class="cg-1">
|
||||
<td></td>
|
||||
<td>   
|
||||
<a href="quapy.html#module-quapy.protocol"><code class="xref">quapy.protocol</code></a></td><td>
|
||||
<em></em></td></tr>
|
||||
<tr class="cg-1">
|
||||
<td></td>
|
||||
<td>   
|
||||
<a href="quapy.html#module-quapy.util"><code class="xref">quapy.util</code></a></td><td>
|
||||
<em></em></td></tr>
|
||||
</table>
|
||||
|
||||
|
||||
</div>
|
||||
</div>
|
||||
<footer>
|
||||
|
||||
<hr/>
|
||||
|
||||
<div role="contentinfo">
|
||||
<p>© Copyright 2024, Alejandro Moreo.</p>
|
||||
</div>
|
||||
|
||||
Built with <a href="https://www.sphinx-doc.org/">Sphinx</a> using a
|
||||
<a href="https://github.com/readthedocs/sphinx_rtd_theme">theme</a>
|
||||
provided by <a href="https://readthedocs.org">Read the Docs</a>.
|
||||
|
||||
|
||||
</footer>
|
||||
</div>
|
||||
</div>
|
||||
</section>
|
||||
</div>
|
||||
<script>
|
||||
jQuery(function () {
|
||||
SphinxRtdTheme.Navigation.enable(true);
|
||||
});
|
||||
</script>
|
||||
|
||||
</body>
|
||||
</html>
|
|
@ -1,958 +0,0 @@
|
|||
<!DOCTYPE html>
|
||||
<html class="writer-html5" lang="en">
|
||||
<head>
|
||||
<meta charset="utf-8" /><meta name="generator" content="Docutils 0.19: https://docutils.sourceforge.io/" />
|
||||
|
||||
<meta name="viewport" content="width=device-width, initial-scale=1.0" />
|
||||
<title>quapy.classification package — QuaPy: A Python-based open-source framework for quantification 0.1.9 documentation</title>
|
||||
<link rel="stylesheet" type="text/css" href="_static/pygments.css" />
|
||||
<link rel="stylesheet" type="text/css" href="_static/css/theme.css" />
|
||||
|
||||
|
||||
<!--[if lt IE 9]>
|
||||
<script src="_static/js/html5shiv.min.js"></script>
|
||||
<![endif]-->
|
||||
|
||||
<script data-url_root="./" id="documentation_options" src="_static/documentation_options.js"></script>
|
||||
<script src="_static/jquery.js"></script>
|
||||
<script src="_static/underscore.js"></script>
|
||||
<script src="_static/_sphinx_javascript_frameworks_compat.js"></script>
|
||||
<script src="_static/doctools.js"></script>
|
||||
<script src="_static/sphinx_highlight.js"></script>
|
||||
<script src="_static/js/theme.js"></script>
|
||||
<link rel="index" title="Index" href="genindex.html" />
|
||||
<link rel="search" title="Search" href="search.html" />
|
||||
<link rel="next" title="quapy.data package" href="quapy.data.html" />
|
||||
<link rel="prev" title="quapy package" href="quapy.html" />
|
||||
</head>
|
||||
|
||||
<body class="wy-body-for-nav">
|
||||
<div class="wy-grid-for-nav">
|
||||
<nav data-toggle="wy-nav-shift" class="wy-nav-side">
|
||||
<div class="wy-side-scroll">
|
||||
<div class="wy-side-nav-search" >
|
||||
|
||||
|
||||
|
||||
<a href="index.html" class="icon icon-home">
|
||||
QuaPy: A Python-based open-source framework for quantification
|
||||
</a>
|
||||
<div role="search">
|
||||
<form id="rtd-search-form" class="wy-form" action="search.html" method="get">
|
||||
<input type="text" name="q" placeholder="Search docs" aria-label="Search docs" />
|
||||
<input type="hidden" name="check_keywords" value="yes" />
|
||||
<input type="hidden" name="area" value="default" />
|
||||
</form>
|
||||
</div>
|
||||
</div><div class="wy-menu wy-menu-vertical" data-spy="affix" role="navigation" aria-label="Navigation menu">
|
||||
<ul class="current">
|
||||
<li class="toctree-l1 current"><a class="reference internal" href="modules.html">quapy</a><ul class="current">
|
||||
<li class="toctree-l2 current"><a class="reference internal" href="quapy.html">quapy package</a><ul class="current">
|
||||
<li class="toctree-l3 current"><a class="reference internal" href="quapy.html#subpackages">Subpackages</a><ul class="current">
|
||||
<li class="toctree-l4 current"><a class="current reference internal" href="#">quapy.classification package</a></li>
|
||||
<li class="toctree-l4"><a class="reference internal" href="quapy.data.html">quapy.data package</a></li>
|
||||
<li class="toctree-l4"><a class="reference internal" href="quapy.method.html">quapy.method package</a></li>
|
||||
</ul>
|
||||
</li>
|
||||
<li class="toctree-l3"><a class="reference internal" href="quapy.html#submodules">Submodules</a></li>
|
||||
<li class="toctree-l3"><a class="reference internal" href="quapy.html#module-quapy.error">quapy.error module</a></li>
|
||||
<li class="toctree-l3"><a class="reference internal" href="quapy.html#module-quapy.evaluation">quapy.evaluation module</a></li>
|
||||
<li class="toctree-l3"><a class="reference internal" href="quapy.html#module-quapy.functional">quapy.functional module</a></li>
|
||||
<li class="toctree-l3"><a class="reference internal" href="quapy.html#module-quapy.model_selection">quapy.model_selection module</a></li>
|
||||
<li class="toctree-l3"><a class="reference internal" href="quapy.html#module-quapy.plot">quapy.plot module</a></li>
|
||||
<li class="toctree-l3"><a class="reference internal" href="quapy.html#module-quapy.protocol">quapy.protocol module</a></li>
|
||||
<li class="toctree-l3"><a class="reference internal" href="quapy.html#module-quapy.util">quapy.util module</a></li>
|
||||
<li class="toctree-l3"><a class="reference internal" href="quapy.html#module-quapy">Module contents</a></li>
|
||||
</ul>
|
||||
</li>
|
||||
</ul>
|
||||
</li>
|
||||
</ul>
|
||||
|
||||
</div>
|
||||
</div>
|
||||
</nav>
|
||||
|
||||
<section data-toggle="wy-nav-shift" class="wy-nav-content-wrap"><nav class="wy-nav-top" aria-label="Mobile navigation menu" >
|
||||
<i data-toggle="wy-nav-top" class="fa fa-bars"></i>
|
||||
<a href="index.html">QuaPy: A Python-based open-source framework for quantification</a>
|
||||
</nav>
|
||||
|
||||
<div class="wy-nav-content">
|
||||
<div class="rst-content">
|
||||
<div role="navigation" aria-label="Page navigation">
|
||||
<ul class="wy-breadcrumbs">
|
||||
<li><a href="index.html" class="icon icon-home" aria-label="Home"></a></li>
|
||||
<li class="breadcrumb-item"><a href="modules.html">quapy</a></li>
|
||||
<li class="breadcrumb-item"><a href="quapy.html">quapy package</a></li>
|
||||
<li class="breadcrumb-item active">quapy.classification package</li>
|
||||
<li class="wy-breadcrumbs-aside">
|
||||
<a href="_sources/quapy.classification.rst.txt" rel="nofollow"> View page source</a>
|
||||
</li>
|
||||
</ul>
|
||||
<hr/>
|
||||
</div>
|
||||
<div role="main" class="document" itemscope="itemscope" itemtype="http://schema.org/Article">
|
||||
<div itemprop="articleBody">
|
||||
|
||||
<section id="quapy-classification-package">
|
||||
<h1>quapy.classification package<a class="headerlink" href="#quapy-classification-package" title="Permalink to this heading"></a></h1>
|
||||
<section id="submodules">
|
||||
<h2>Submodules<a class="headerlink" href="#submodules" title="Permalink to this heading"></a></h2>
|
||||
</section>
|
||||
<section id="module-quapy.classification.calibration">
|
||||
<span id="quapy-classification-calibration-module"></span><h2>quapy.classification.calibration module<a class="headerlink" href="#module-quapy.classification.calibration" title="Permalink to this heading"></a></h2>
|
||||
<dl class="py class">
|
||||
<dt class="sig sig-object py" id="quapy.classification.calibration.BCTSCalibration">
|
||||
<em class="property"><span class="pre">class</span><span class="w"> </span></em><span class="sig-prename descclassname"><span class="pre">quapy.classification.calibration.</span></span><span class="sig-name descname"><span class="pre">BCTSCalibration</span></span><span class="sig-paren">(</span><em class="sig-param"><span class="n"><span class="pre">classifier</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">val_split</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">5</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">n_jobs</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">None</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">verbose</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">False</span></span></em><span class="sig-paren">)</span><a class="reference internal" href="_modules/quapy/classification/calibration.html#BCTSCalibration"><span class="viewcode-link"><span class="pre">[source]</span></span></a><a class="headerlink" href="#quapy.classification.calibration.BCTSCalibration" title="Permalink to this definition"></a></dt>
|
||||
<dd><p>Bases: <a class="reference internal" href="#quapy.classification.calibration.RecalibratedProbabilisticClassifierBase" title="quapy.classification.calibration.RecalibratedProbabilisticClassifierBase"><code class="xref py py-class docutils literal notranslate"><span class="pre">RecalibratedProbabilisticClassifierBase</span></code></a></p>
|
||||
<p>Applies the Bias-Corrected Temperature Scaling (BCTS) calibration method from <cite>abstention.calibration</cite>, as defined in
|
||||
<a class="reference external" href="http://proceedings.mlr.press/v119/alexandari20a.html">Alexandari et al. paper</a>:</p>
|
||||
<dl class="field-list simple">
|
||||
<dt class="field-odd">Parameters<span class="colon">:</span></dt>
|
||||
<dd class="field-odd"><ul class="simple">
|
||||
<li><p><strong>classifier</strong> – a scikit-learn probabilistic classifier</p></li>
|
||||
<li><p><strong>val_split</strong> – indicate an integer k for performing kFCV to obtain the posterior prevalences, or a float p
|
||||
in (0,1) to indicate that the posteriors are obtained in a stratified validation split containing p% of the
|
||||
training instances (the rest is used for training). In any case, the classifier is retrained in the whole
|
||||
training set afterwards. Default value is 5.</p></li>
|
||||
<li><p><strong>n_jobs</strong> – indicate the number of parallel workers (only when val_split is an integer)</p></li>
|
||||
<li><p><strong>verbose</strong> – whether or not to display information in the standard output</p></li>
|
||||
</ul>
|
||||
</dd>
|
||||
</dl>
|
||||
</dd></dl>
|
||||
|
||||
<dl class="py class">
|
||||
<dt class="sig sig-object py" id="quapy.classification.calibration.NBVSCalibration">
|
||||
<em class="property"><span class="pre">class</span><span class="w"> </span></em><span class="sig-prename descclassname"><span class="pre">quapy.classification.calibration.</span></span><span class="sig-name descname"><span class="pre">NBVSCalibration</span></span><span class="sig-paren">(</span><em class="sig-param"><span class="n"><span class="pre">classifier</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">val_split</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">5</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">n_jobs</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">None</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">verbose</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">False</span></span></em><span class="sig-paren">)</span><a class="reference internal" href="_modules/quapy/classification/calibration.html#NBVSCalibration"><span class="viewcode-link"><span class="pre">[source]</span></span></a><a class="headerlink" href="#quapy.classification.calibration.NBVSCalibration" title="Permalink to this definition"></a></dt>
|
||||
<dd><p>Bases: <a class="reference internal" href="#quapy.classification.calibration.RecalibratedProbabilisticClassifierBase" title="quapy.classification.calibration.RecalibratedProbabilisticClassifierBase"><code class="xref py py-class docutils literal notranslate"><span class="pre">RecalibratedProbabilisticClassifierBase</span></code></a></p>
|
||||
<p>Applies the No-Bias Vector Scaling (NBVS) calibration method from <cite>abstention.calibration</cite>, as defined in
|
||||
<a class="reference external" href="http://proceedings.mlr.press/v119/alexandari20a.html">Alexandari et al. paper</a>:</p>
|
||||
<dl class="field-list simple">
|
||||
<dt class="field-odd">Parameters<span class="colon">:</span></dt>
|
||||
<dd class="field-odd"><ul class="simple">
|
||||
<li><p><strong>classifier</strong> – a scikit-learn probabilistic classifier</p></li>
|
||||
<li><p><strong>val_split</strong> – indicate an integer k for performing kFCV to obtain the posterior prevalences, or a float p
|
||||
in (0,1) to indicate that the posteriors are obtained in a stratified validation split containing p% of the
|
||||
training instances (the rest is used for training). In any case, the classifier is retrained in the whole
|
||||
training set afterwards. Default value is 5.</p></li>
|
||||
<li><p><strong>n_jobs</strong> – indicate the number of parallel workers (only when val_split is an integer)</p></li>
|
||||
<li><p><strong>verbose</strong> – whether or not to display information in the standard output</p></li>
|
||||
</ul>
|
||||
</dd>
|
||||
</dl>
|
||||
</dd></dl>
|
||||
|
||||
<dl class="py class">
|
||||
<dt class="sig sig-object py" id="quapy.classification.calibration.RecalibratedProbabilisticClassifier">
|
||||
<em class="property"><span class="pre">class</span><span class="w"> </span></em><span class="sig-prename descclassname"><span class="pre">quapy.classification.calibration.</span></span><span class="sig-name descname"><span class="pre">RecalibratedProbabilisticClassifier</span></span><a class="reference internal" href="_modules/quapy/classification/calibration.html#RecalibratedProbabilisticClassifier"><span class="viewcode-link"><span class="pre">[source]</span></span></a><a class="headerlink" href="#quapy.classification.calibration.RecalibratedProbabilisticClassifier" title="Permalink to this definition"></a></dt>
|
||||
<dd><p>Bases: <code class="xref py py-class docutils literal notranslate"><span class="pre">object</span></code></p>
|
||||
<p>Abstract class for (re)calibration method from <cite>abstention.calibration</cite>, as defined in
|
||||
<a class="reference external" href="http://proceedings.mlr.press/v119/alexandari20a.html">Alexandari, A., Kundaje, A., & Shrikumar, A. (2020, November). Maximum likelihood with bias-corrected calibration
|
||||
is hard-to-beat at label shift adaptation. In International Conference on Machine Learning (pp. 222-232). PMLR.</a>:</p>
|
||||
</dd></dl>
|
||||
|
||||
<dl class="py class">
|
||||
<dt class="sig sig-object py" id="quapy.classification.calibration.RecalibratedProbabilisticClassifierBase">
|
||||
<em class="property"><span class="pre">class</span><span class="w"> </span></em><span class="sig-prename descclassname"><span class="pre">quapy.classification.calibration.</span></span><span class="sig-name descname"><span class="pre">RecalibratedProbabilisticClassifierBase</span></span><span class="sig-paren">(</span><em class="sig-param"><span class="n"><span class="pre">classifier</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">calibrator</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">val_split</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">5</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">n_jobs</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">None</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">verbose</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">False</span></span></em><span class="sig-paren">)</span><a class="reference internal" href="_modules/quapy/classification/calibration.html#RecalibratedProbabilisticClassifierBase"><span class="viewcode-link"><span class="pre">[source]</span></span></a><a class="headerlink" href="#quapy.classification.calibration.RecalibratedProbabilisticClassifierBase" title="Permalink to this definition"></a></dt>
|
||||
<dd><p>Bases: <code class="xref py py-class docutils literal notranslate"><span class="pre">BaseEstimator</span></code>, <a class="reference internal" href="#quapy.classification.calibration.RecalibratedProbabilisticClassifier" title="quapy.classification.calibration.RecalibratedProbabilisticClassifier"><code class="xref py py-class docutils literal notranslate"><span class="pre">RecalibratedProbabilisticClassifier</span></code></a></p>
|
||||
<p>Applies a (re)calibration method from <cite>abstention.calibration</cite>, as defined in
|
||||
<a class="reference external" href="http://proceedings.mlr.press/v119/alexandari20a.html">Alexandari et al. paper</a>.</p>
|
||||
<dl class="field-list simple">
|
||||
<dt class="field-odd">Parameters<span class="colon">:</span></dt>
|
||||
<dd class="field-odd"><ul class="simple">
|
||||
<li><p><strong>classifier</strong> – a scikit-learn probabilistic classifier</p></li>
|
||||
<li><p><strong>calibrator</strong> – the calibration object (an instance of abstention.calibration.CalibratorFactory)</p></li>
|
||||
<li><p><strong>val_split</strong> – indicate an integer k for performing kFCV to obtain the posterior probabilities, or a float p
|
||||
in (0,1) to indicate that the posteriors are obtained in a stratified validation split containing p% of the
|
||||
training instances (the rest is used for training). In any case, the classifier is retrained in the whole
|
||||
training set afterwards. Default value is 5.</p></li>
|
||||
<li><p><strong>n_jobs</strong> – indicate the number of parallel workers (only when val_split is an integer); default=None</p></li>
|
||||
<li><p><strong>verbose</strong> – whether or not to display information in the standard output</p></li>
|
||||
</ul>
|
||||
</dd>
|
||||
</dl>
|
||||
<dl class="py property">
|
||||
<dt class="sig sig-object py" id="quapy.classification.calibration.RecalibratedProbabilisticClassifierBase.classes_">
|
||||
<em class="property"><span class="pre">property</span><span class="w"> </span></em><span class="sig-name descname"><span class="pre">classes_</span></span><a class="headerlink" href="#quapy.classification.calibration.RecalibratedProbabilisticClassifierBase.classes_" title="Permalink to this definition"></a></dt>
|
||||
<dd><p>Returns the classes on which the classifier has been trained on</p>
|
||||
<dl class="field-list simple">
|
||||
<dt class="field-odd">Returns<span class="colon">:</span></dt>
|
||||
<dd class="field-odd"><p>array-like of shape <cite>(n_classes)</cite></p>
|
||||
</dd>
|
||||
</dl>
|
||||
</dd></dl>
|
||||
|
||||
<dl class="py method">
|
||||
<dt class="sig sig-object py" id="quapy.classification.calibration.RecalibratedProbabilisticClassifierBase.fit">
|
||||
<span class="sig-name descname"><span class="pre">fit</span></span><span class="sig-paren">(</span><em class="sig-param"><span class="n"><span class="pre">X</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">y</span></span></em><span class="sig-paren">)</span><a class="reference internal" href="_modules/quapy/classification/calibration.html#RecalibratedProbabilisticClassifierBase.fit"><span class="viewcode-link"><span class="pre">[source]</span></span></a><a class="headerlink" href="#quapy.classification.calibration.RecalibratedProbabilisticClassifierBase.fit" title="Permalink to this definition"></a></dt>
|
||||
<dd><p>Fits the calibration for the probabilistic classifier.</p>
|
||||
<dl class="field-list simple">
|
||||
<dt class="field-odd">Parameters<span class="colon">:</span></dt>
|
||||
<dd class="field-odd"><ul class="simple">
|
||||
<li><p><strong>X</strong> – array-like of shape <cite>(n_samples, n_features)</cite> with the data instances</p></li>
|
||||
<li><p><strong>y</strong> – array-like of shape <cite>(n_samples,)</cite> with the class labels</p></li>
|
||||
</ul>
|
||||
</dd>
|
||||
<dt class="field-even">Returns<span class="colon">:</span></dt>
|
||||
<dd class="field-even"><p>self</p>
|
||||
</dd>
|
||||
</dl>
|
||||
</dd></dl>
|
||||
|
||||
<dl class="py method">
|
||||
<dt class="sig sig-object py" id="quapy.classification.calibration.RecalibratedProbabilisticClassifierBase.fit_cv">
|
||||
<span class="sig-name descname"><span class="pre">fit_cv</span></span><span class="sig-paren">(</span><em class="sig-param"><span class="n"><span class="pre">X</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">y</span></span></em><span class="sig-paren">)</span><a class="reference internal" href="_modules/quapy/classification/calibration.html#RecalibratedProbabilisticClassifierBase.fit_cv"><span class="viewcode-link"><span class="pre">[source]</span></span></a><a class="headerlink" href="#quapy.classification.calibration.RecalibratedProbabilisticClassifierBase.fit_cv" title="Permalink to this definition"></a></dt>
|
||||
<dd><p>Fits the calibration in a cross-validation manner, i.e., it generates posterior probabilities for all
|
||||
training instances via cross-validation, and then retrains the classifier on all training instances.
|
||||
The posterior probabilities thus generated are used for calibrating the outputs of the classifier.</p>
|
||||
<dl class="field-list simple">
|
||||
<dt class="field-odd">Parameters<span class="colon">:</span></dt>
|
||||
<dd class="field-odd"><ul class="simple">
|
||||
<li><p><strong>X</strong> – array-like of shape <cite>(n_samples, n_features)</cite> with the data instances</p></li>
|
||||
<li><p><strong>y</strong> – array-like of shape <cite>(n_samples,)</cite> with the class labels</p></li>
|
||||
</ul>
|
||||
</dd>
|
||||
<dt class="field-even">Returns<span class="colon">:</span></dt>
|
||||
<dd class="field-even"><p>self</p>
|
||||
</dd>
|
||||
</dl>
|
||||
</dd></dl>
|
||||
|
||||
<dl class="py method">
|
||||
<dt class="sig sig-object py" id="quapy.classification.calibration.RecalibratedProbabilisticClassifierBase.fit_tr_val">
|
||||
<span class="sig-name descname"><span class="pre">fit_tr_val</span></span><span class="sig-paren">(</span><em class="sig-param"><span class="n"><span class="pre">X</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">y</span></span></em><span class="sig-paren">)</span><a class="reference internal" href="_modules/quapy/classification/calibration.html#RecalibratedProbabilisticClassifierBase.fit_tr_val"><span class="viewcode-link"><span class="pre">[source]</span></span></a><a class="headerlink" href="#quapy.classification.calibration.RecalibratedProbabilisticClassifierBase.fit_tr_val" title="Permalink to this definition"></a></dt>
|
||||
<dd><p>Fits the calibration in a train/val-split manner, i.e.t, it partitions the training instances into a
|
||||
training and a validation set, and then uses the training samples to learn classifier which is then used
|
||||
to generate posterior probabilities for the held-out validation data. These posteriors are used to calibrate
|
||||
the classifier. The classifier is not retrained on the whole dataset.</p>
|
||||
<dl class="field-list simple">
|
||||
<dt class="field-odd">Parameters<span class="colon">:</span></dt>
|
||||
<dd class="field-odd"><ul class="simple">
|
||||
<li><p><strong>X</strong> – array-like of shape <cite>(n_samples, n_features)</cite> with the data instances</p></li>
|
||||
<li><p><strong>y</strong> – array-like of shape <cite>(n_samples,)</cite> with the class labels</p></li>
|
||||
</ul>
|
||||
</dd>
|
||||
<dt class="field-even">Returns<span class="colon">:</span></dt>
|
||||
<dd class="field-even"><p>self</p>
|
||||
</dd>
|
||||
</dl>
|
||||
</dd></dl>
|
||||
|
||||
<dl class="py method">
|
||||
<dt class="sig sig-object py" id="quapy.classification.calibration.RecalibratedProbabilisticClassifierBase.predict">
|
||||
<span class="sig-name descname"><span class="pre">predict</span></span><span class="sig-paren">(</span><em class="sig-param"><span class="n"><span class="pre">X</span></span></em><span class="sig-paren">)</span><a class="reference internal" href="_modules/quapy/classification/calibration.html#RecalibratedProbabilisticClassifierBase.predict"><span class="viewcode-link"><span class="pre">[source]</span></span></a><a class="headerlink" href="#quapy.classification.calibration.RecalibratedProbabilisticClassifierBase.predict" title="Permalink to this definition"></a></dt>
|
||||
<dd><p>Predicts class labels for the data instances in <cite>X</cite></p>
|
||||
<dl class="field-list simple">
|
||||
<dt class="field-odd">Parameters<span class="colon">:</span></dt>
|
||||
<dd class="field-odd"><p><strong>X</strong> – array-like of shape <cite>(n_samples, n_features)</cite> with the data instances</p>
|
||||
</dd>
|
||||
<dt class="field-even">Returns<span class="colon">:</span></dt>
|
||||
<dd class="field-even"><p>array-like of shape <cite>(n_samples,)</cite> with the class label predictions</p>
|
||||
</dd>
|
||||
</dl>
|
||||
</dd></dl>
|
||||
|
||||
<dl class="py method">
|
||||
<dt class="sig sig-object py" id="quapy.classification.calibration.RecalibratedProbabilisticClassifierBase.predict_proba">
|
||||
<span class="sig-name descname"><span class="pre">predict_proba</span></span><span class="sig-paren">(</span><em class="sig-param"><span class="n"><span class="pre">X</span></span></em><span class="sig-paren">)</span><a class="reference internal" href="_modules/quapy/classification/calibration.html#RecalibratedProbabilisticClassifierBase.predict_proba"><span class="viewcode-link"><span class="pre">[source]</span></span></a><a class="headerlink" href="#quapy.classification.calibration.RecalibratedProbabilisticClassifierBase.predict_proba" title="Permalink to this definition"></a></dt>
|
||||
<dd><p>Generates posterior probabilities for the data instances in <cite>X</cite></p>
|
||||
<dl class="field-list simple">
|
||||
<dt class="field-odd">Parameters<span class="colon">:</span></dt>
|
||||
<dd class="field-odd"><p><strong>X</strong> – array-like of shape <cite>(n_samples, n_features)</cite> with the data instances</p>
|
||||
</dd>
|
||||
<dt class="field-even">Returns<span class="colon">:</span></dt>
|
||||
<dd class="field-even"><p>array-like of shape <cite>(n_samples, n_classes)</cite> with posterior probabilities</p>
|
||||
</dd>
|
||||
</dl>
|
||||
</dd></dl>
|
||||
|
||||
</dd></dl>
|
||||
|
||||
<dl class="py class">
|
||||
<dt class="sig sig-object py" id="quapy.classification.calibration.TSCalibration">
|
||||
<em class="property"><span class="pre">class</span><span class="w"> </span></em><span class="sig-prename descclassname"><span class="pre">quapy.classification.calibration.</span></span><span class="sig-name descname"><span class="pre">TSCalibration</span></span><span class="sig-paren">(</span><em class="sig-param"><span class="n"><span class="pre">classifier</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">val_split</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">5</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">n_jobs</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">None</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">verbose</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">False</span></span></em><span class="sig-paren">)</span><a class="reference internal" href="_modules/quapy/classification/calibration.html#TSCalibration"><span class="viewcode-link"><span class="pre">[source]</span></span></a><a class="headerlink" href="#quapy.classification.calibration.TSCalibration" title="Permalink to this definition"></a></dt>
|
||||
<dd><p>Bases: <a class="reference internal" href="#quapy.classification.calibration.RecalibratedProbabilisticClassifierBase" title="quapy.classification.calibration.RecalibratedProbabilisticClassifierBase"><code class="xref py py-class docutils literal notranslate"><span class="pre">RecalibratedProbabilisticClassifierBase</span></code></a></p>
|
||||
<p>Applies the Temperature Scaling (TS) calibration method from <cite>abstention.calibration</cite>, as defined in
|
||||
<a class="reference external" href="http://proceedings.mlr.press/v119/alexandari20a.html">Alexandari et al. paper</a>:</p>
|
||||
<dl class="field-list simple">
|
||||
<dt class="field-odd">Parameters<span class="colon">:</span></dt>
|
||||
<dd class="field-odd"><ul class="simple">
|
||||
<li><p><strong>classifier</strong> – a scikit-learn probabilistic classifier</p></li>
|
||||
<li><p><strong>val_split</strong> – indicate an integer k for performing kFCV to obtain the posterior prevalences, or a float p
|
||||
in (0,1) to indicate that the posteriors are obtained in a stratified validation split containing p% of the
|
||||
training instances (the rest is used for training). In any case, the classifier is retrained in the whole
|
||||
training set afterwards. Default value is 5.</p></li>
|
||||
<li><p><strong>n_jobs</strong> – indicate the number of parallel workers (only when val_split is an integer)</p></li>
|
||||
<li><p><strong>verbose</strong> – whether or not to display information in the standard output</p></li>
|
||||
</ul>
|
||||
</dd>
|
||||
</dl>
|
||||
</dd></dl>
|
||||
|
||||
<dl class="py class">
|
||||
<dt class="sig sig-object py" id="quapy.classification.calibration.VSCalibration">
|
||||
<em class="property"><span class="pre">class</span><span class="w"> </span></em><span class="sig-prename descclassname"><span class="pre">quapy.classification.calibration.</span></span><span class="sig-name descname"><span class="pre">VSCalibration</span></span><span class="sig-paren">(</span><em class="sig-param"><span class="n"><span class="pre">classifier</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">val_split</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">5</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">n_jobs</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">None</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">verbose</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">False</span></span></em><span class="sig-paren">)</span><a class="reference internal" href="_modules/quapy/classification/calibration.html#VSCalibration"><span class="viewcode-link"><span class="pre">[source]</span></span></a><a class="headerlink" href="#quapy.classification.calibration.VSCalibration" title="Permalink to this definition"></a></dt>
|
||||
<dd><p>Bases: <a class="reference internal" href="#quapy.classification.calibration.RecalibratedProbabilisticClassifierBase" title="quapy.classification.calibration.RecalibratedProbabilisticClassifierBase"><code class="xref py py-class docutils literal notranslate"><span class="pre">RecalibratedProbabilisticClassifierBase</span></code></a></p>
|
||||
<p>Applies the Vector Scaling (VS) calibration method from <cite>abstention.calibration</cite>, as defined in
|
||||
<a class="reference external" href="http://proceedings.mlr.press/v119/alexandari20a.html">Alexandari et al. paper</a>:</p>
|
||||
<dl class="field-list simple">
|
||||
<dt class="field-odd">Parameters<span class="colon">:</span></dt>
|
||||
<dd class="field-odd"><ul class="simple">
|
||||
<li><p><strong>classifier</strong> – a scikit-learn probabilistic classifier</p></li>
|
||||
<li><p><strong>val_split</strong> – indicate an integer k for performing kFCV to obtain the posterior prevalences, or a float p
|
||||
in (0,1) to indicate that the posteriors are obtained in a stratified validation split containing p% of the
|
||||
training instances (the rest is used for training). In any case, the classifier is retrained in the whole
|
||||
training set afterwards. Default value is 5.</p></li>
|
||||
<li><p><strong>n_jobs</strong> – indicate the number of parallel workers (only when val_split is an integer)</p></li>
|
||||
<li><p><strong>verbose</strong> – whether or not to display information in the standard output</p></li>
|
||||
</ul>
|
||||
</dd>
|
||||
</dl>
|
||||
</dd></dl>
|
||||
|
||||
</section>
|
||||
<section id="module-quapy.classification.methods">
|
||||
<span id="quapy-classification-methods-module"></span><h2>quapy.classification.methods module<a class="headerlink" href="#module-quapy.classification.methods" title="Permalink to this heading"></a></h2>
|
||||
<dl class="py class">
|
||||
<dt class="sig sig-object py" id="quapy.classification.methods.LowRankLogisticRegression">
|
||||
<em class="property"><span class="pre">class</span><span class="w"> </span></em><span class="sig-prename descclassname"><span class="pre">quapy.classification.methods.</span></span><span class="sig-name descname"><span class="pre">LowRankLogisticRegression</span></span><span class="sig-paren">(</span><em class="sig-param"><span class="n"><span class="pre">n_components</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">100</span></span></em>, <em class="sig-param"><span class="o"><span class="pre">**</span></span><span class="n"><span class="pre">kwargs</span></span></em><span class="sig-paren">)</span><a class="reference internal" href="_modules/quapy/classification/methods.html#LowRankLogisticRegression"><span class="viewcode-link"><span class="pre">[source]</span></span></a><a class="headerlink" href="#quapy.classification.methods.LowRankLogisticRegression" title="Permalink to this definition"></a></dt>
|
||||
<dd><p>Bases: <code class="xref py py-class docutils literal notranslate"><span class="pre">BaseEstimator</span></code></p>
|
||||
<p>An example of a classification method (i.e., an object that implements <cite>fit</cite>, <cite>predict</cite>, and <cite>predict_proba</cite>)
|
||||
that also generates embedded inputs (i.e., that implements <cite>transform</cite>), as those required for
|
||||
<code class="xref py py-class docutils literal notranslate"><span class="pre">quapy.method.neural.QuaNet</span></code>. This is a mock method to allow for easily instantiating
|
||||
<code class="xref py py-class docutils literal notranslate"><span class="pre">quapy.method.neural.QuaNet</span></code> on array-like real-valued instances.
|
||||
The transformation consists of applying <code class="xref py py-class docutils literal notranslate"><span class="pre">sklearn.decomposition.TruncatedSVD</span></code>
|
||||
while classification is performed using <code class="xref py py-class docutils literal notranslate"><span class="pre">sklearn.linear_model.LogisticRegression</span></code> on the low-rank space.</p>
|
||||
<dl class="field-list simple">
|
||||
<dt class="field-odd">Parameters<span class="colon">:</span></dt>
|
||||
<dd class="field-odd"><ul class="simple">
|
||||
<li><p><strong>n_components</strong> – the number of principal components to retain</p></li>
|
||||
<li><p><strong>kwargs</strong> – parameters for the
|
||||
<a class="reference external" href="https://scikit-learn.org/stable/modules/generated/sklearn.linear_model.LogisticRegression.html">Logistic Regression</a> classifier</p></li>
|
||||
</ul>
|
||||
</dd>
|
||||
</dl>
|
||||
<dl class="py method">
|
||||
<dt class="sig sig-object py" id="quapy.classification.methods.LowRankLogisticRegression.fit">
|
||||
<span class="sig-name descname"><span class="pre">fit</span></span><span class="sig-paren">(</span><em class="sig-param"><span class="n"><span class="pre">X</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">y</span></span></em><span class="sig-paren">)</span><a class="reference internal" href="_modules/quapy/classification/methods.html#LowRankLogisticRegression.fit"><span class="viewcode-link"><span class="pre">[source]</span></span></a><a class="headerlink" href="#quapy.classification.methods.LowRankLogisticRegression.fit" title="Permalink to this definition"></a></dt>
|
||||
<dd><p>Fit the model according to the given training data. The fit consists of
|
||||
fitting <cite>TruncatedSVD</cite> and then <cite>LogisticRegression</cite> on the low-rank representation.</p>
|
||||
<dl class="field-list simple">
|
||||
<dt class="field-odd">Parameters<span class="colon">:</span></dt>
|
||||
<dd class="field-odd"><ul class="simple">
|
||||
<li><p><strong>X</strong> – array-like of shape <cite>(n_samples, n_features)</cite> with the instances</p></li>
|
||||
<li><p><strong>y</strong> – array-like of shape <cite>(n_samples, n_classes)</cite> with the class labels</p></li>
|
||||
</ul>
|
||||
</dd>
|
||||
<dt class="field-even">Returns<span class="colon">:</span></dt>
|
||||
<dd class="field-even"><p><cite>self</cite></p>
|
||||
</dd>
|
||||
</dl>
|
||||
</dd></dl>
|
||||
|
||||
<dl class="py method">
|
||||
<dt class="sig sig-object py" id="quapy.classification.methods.LowRankLogisticRegression.get_params">
|
||||
<span class="sig-name descname"><span class="pre">get_params</span></span><span class="sig-paren">(</span><span class="sig-paren">)</span><a class="reference internal" href="_modules/quapy/classification/methods.html#LowRankLogisticRegression.get_params"><span class="viewcode-link"><span class="pre">[source]</span></span></a><a class="headerlink" href="#quapy.classification.methods.LowRankLogisticRegression.get_params" title="Permalink to this definition"></a></dt>
|
||||
<dd><p>Get hyper-parameters for this estimator.</p>
|
||||
<dl class="field-list simple">
|
||||
<dt class="field-odd">Returns<span class="colon">:</span></dt>
|
||||
<dd class="field-odd"><p>a dictionary with parameter names mapped to their values</p>
|
||||
</dd>
|
||||
</dl>
|
||||
</dd></dl>
|
||||
|
||||
<dl class="py method">
|
||||
<dt class="sig sig-object py" id="quapy.classification.methods.LowRankLogisticRegression.predict">
|
||||
<span class="sig-name descname"><span class="pre">predict</span></span><span class="sig-paren">(</span><em class="sig-param"><span class="n"><span class="pre">X</span></span></em><span class="sig-paren">)</span><a class="reference internal" href="_modules/quapy/classification/methods.html#LowRankLogisticRegression.predict"><span class="viewcode-link"><span class="pre">[source]</span></span></a><a class="headerlink" href="#quapy.classification.methods.LowRankLogisticRegression.predict" title="Permalink to this definition"></a></dt>
|
||||
<dd><p>Predicts labels for the instances <cite>X</cite> embedded into the low-rank space.</p>
|
||||
<dl class="field-list simple">
|
||||
<dt class="field-odd">Parameters<span class="colon">:</span></dt>
|
||||
<dd class="field-odd"><p><strong>X</strong> – array-like of shape <cite>(n_samples, n_features)</cite> instances to classify</p>
|
||||
</dd>
|
||||
<dt class="field-even">Returns<span class="colon">:</span></dt>
|
||||
<dd class="field-even"><p>a <cite>numpy</cite> array of length <cite>n</cite> containing the label predictions, where <cite>n</cite> is the number of
|
||||
instances in <cite>X</cite></p>
|
||||
</dd>
|
||||
</dl>
|
||||
</dd></dl>
|
||||
|
||||
<dl class="py method">
|
||||
<dt class="sig sig-object py" id="quapy.classification.methods.LowRankLogisticRegression.predict_proba">
|
||||
<span class="sig-name descname"><span class="pre">predict_proba</span></span><span class="sig-paren">(</span><em class="sig-param"><span class="n"><span class="pre">X</span></span></em><span class="sig-paren">)</span><a class="reference internal" href="_modules/quapy/classification/methods.html#LowRankLogisticRegression.predict_proba"><span class="viewcode-link"><span class="pre">[source]</span></span></a><a class="headerlink" href="#quapy.classification.methods.LowRankLogisticRegression.predict_proba" title="Permalink to this definition"></a></dt>
|
||||
<dd><p>Predicts posterior probabilities for the instances <cite>X</cite> embedded into the low-rank space.</p>
|
||||
<dl class="field-list simple">
|
||||
<dt class="field-odd">Parameters<span class="colon">:</span></dt>
|
||||
<dd class="field-odd"><p><strong>X</strong> – array-like of shape <cite>(n_samples, n_features)</cite> instances to classify</p>
|
||||
</dd>
|
||||
<dt class="field-even">Returns<span class="colon">:</span></dt>
|
||||
<dd class="field-even"><p>array-like of shape <cite>(n_samples, n_classes)</cite> with the posterior probabilities</p>
|
||||
</dd>
|
||||
</dl>
|
||||
</dd></dl>
|
||||
|
||||
<dl class="py method">
|
||||
<dt class="sig sig-object py" id="quapy.classification.methods.LowRankLogisticRegression.set_params">
|
||||
<span class="sig-name descname"><span class="pre">set_params</span></span><span class="sig-paren">(</span><em class="sig-param"><span class="o"><span class="pre">**</span></span><span class="n"><span class="pre">params</span></span></em><span class="sig-paren">)</span><a class="reference internal" href="_modules/quapy/classification/methods.html#LowRankLogisticRegression.set_params"><span class="viewcode-link"><span class="pre">[source]</span></span></a><a class="headerlink" href="#quapy.classification.methods.LowRankLogisticRegression.set_params" title="Permalink to this definition"></a></dt>
|
||||
<dd><p>Set the parameters of this estimator.</p>
|
||||
<dl class="field-list simple">
|
||||
<dt class="field-odd">Parameters<span class="colon">:</span></dt>
|
||||
<dd class="field-odd"><p><strong>parameters</strong> – a <cite>**kwargs</cite> dictionary with the estimator parameters for
|
||||
<a class="reference external" href="https://scikit-learn.org/stable/modules/generated/sklearn.linear_model.LogisticRegression.html">Logistic Regression</a>
|
||||
and eventually also <cite>n_components</cite> for <cite>TruncatedSVD</cite></p>
|
||||
</dd>
|
||||
</dl>
|
||||
</dd></dl>
|
||||
|
||||
<dl class="py method">
|
||||
<dt class="sig sig-object py" id="quapy.classification.methods.LowRankLogisticRegression.transform">
|
||||
<span class="sig-name descname"><span class="pre">transform</span></span><span class="sig-paren">(</span><em class="sig-param"><span class="n"><span class="pre">X</span></span></em><span class="sig-paren">)</span><a class="reference internal" href="_modules/quapy/classification/methods.html#LowRankLogisticRegression.transform"><span class="viewcode-link"><span class="pre">[source]</span></span></a><a class="headerlink" href="#quapy.classification.methods.LowRankLogisticRegression.transform" title="Permalink to this definition"></a></dt>
|
||||
<dd><p>Returns the low-rank approximation of <cite>X</cite> with <cite>n_components</cite> dimensions, or <cite>X</cite> unaltered if
|
||||
<cite>n_components</cite> >= <cite>X.shape[1]</cite>.</p>
|
||||
<dl class="field-list simple">
|
||||
<dt class="field-odd">Parameters<span class="colon">:</span></dt>
|
||||
<dd class="field-odd"><p><strong>X</strong> – array-like of shape <cite>(n_samples, n_features)</cite> instances to embed</p>
|
||||
</dd>
|
||||
<dt class="field-even">Returns<span class="colon">:</span></dt>
|
||||
<dd class="field-even"><p>array-like of shape <cite>(n_samples, n_components)</cite> with the embedded instances</p>
|
||||
</dd>
|
||||
</dl>
|
||||
</dd></dl>
|
||||
|
||||
</dd></dl>
|
||||
|
||||
</section>
|
||||
<section id="module-quapy.classification.neural">
|
||||
<span id="quapy-classification-neural-module"></span><h2>quapy.classification.neural module<a class="headerlink" href="#module-quapy.classification.neural" title="Permalink to this heading"></a></h2>
|
||||
<dl class="py class">
|
||||
<dt class="sig sig-object py" id="quapy.classification.neural.CNNnet">
|
||||
<em class="property"><span class="pre">class</span><span class="w"> </span></em><span class="sig-prename descclassname"><span class="pre">quapy.classification.neural.</span></span><span class="sig-name descname"><span class="pre">CNNnet</span></span><span class="sig-paren">(</span><em class="sig-param"><span class="n"><span class="pre">vocabulary_size</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">n_classes</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">embedding_size</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">100</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">hidden_size</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">256</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">repr_size</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">100</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">kernel_heights</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">[3,</span> <span class="pre">5,</span> <span class="pre">7]</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">stride</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">1</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">padding</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">0</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">drop_p</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">0.5</span></span></em><span class="sig-paren">)</span><a class="reference internal" href="_modules/quapy/classification/neural.html#CNNnet"><span class="viewcode-link"><span class="pre">[source]</span></span></a><a class="headerlink" href="#quapy.classification.neural.CNNnet" title="Permalink to this definition"></a></dt>
|
||||
<dd><p>Bases: <a class="reference internal" href="#quapy.classification.neural.TextClassifierNet" title="quapy.classification.neural.TextClassifierNet"><code class="xref py py-class docutils literal notranslate"><span class="pre">TextClassifierNet</span></code></a></p>
|
||||
<p>An implementation of <a class="reference internal" href="#quapy.classification.neural.TextClassifierNet" title="quapy.classification.neural.TextClassifierNet"><code class="xref py py-class docutils literal notranslate"><span class="pre">quapy.classification.neural.TextClassifierNet</span></code></a> based on
|
||||
Convolutional Neural Networks.</p>
|
||||
<dl class="field-list simple">
|
||||
<dt class="field-odd">Parameters<span class="colon">:</span></dt>
|
||||
<dd class="field-odd"><ul class="simple">
|
||||
<li><p><strong>vocabulary_size</strong> – the size of the vocabulary</p></li>
|
||||
<li><p><strong>n_classes</strong> – number of target classes</p></li>
|
||||
<li><p><strong>embedding_size</strong> – the dimensionality of the word embeddings space (default 100)</p></li>
|
||||
<li><p><strong>hidden_size</strong> – the dimensionality of the hidden space (default 256)</p></li>
|
||||
<li><p><strong>repr_size</strong> – the dimensionality of the document embeddings space (default 100)</p></li>
|
||||
<li><p><strong>kernel_heights</strong> – list of kernel lengths (default [3,5,7]), i.e., the number of
|
||||
consecutive tokens that each kernel covers</p></li>
|
||||
<li><p><strong>stride</strong> – convolutional stride (default 1)</p></li>
|
||||
<li><p><strong>stride</strong> – convolutional pad (default 0)</p></li>
|
||||
<li><p><strong>drop_p</strong> – drop probability for dropout (default 0.5)</p></li>
|
||||
</ul>
|
||||
</dd>
|
||||
</dl>
|
||||
<dl class="py method">
|
||||
<dt class="sig sig-object py" id="quapy.classification.neural.CNNnet.document_embedding">
|
||||
<span class="sig-name descname"><span class="pre">document_embedding</span></span><span class="sig-paren">(</span><em class="sig-param"><span class="n"><span class="pre">input</span></span></em><span class="sig-paren">)</span><a class="reference internal" href="_modules/quapy/classification/neural.html#CNNnet.document_embedding"><span class="viewcode-link"><span class="pre">[source]</span></span></a><a class="headerlink" href="#quapy.classification.neural.CNNnet.document_embedding" title="Permalink to this definition"></a></dt>
|
||||
<dd><p>Embeds documents (i.e., performs the forward pass up to the
|
||||
next-to-last layer).</p>
|
||||
<dl class="field-list simple">
|
||||
<dt class="field-odd">Parameters<span class="colon">:</span></dt>
|
||||
<dd class="field-odd"><p><strong>input</strong> – a batch of instances, typically generated by a torch’s <cite>DataLoader</cite>
|
||||
instance (see <a class="reference internal" href="#quapy.classification.neural.TorchDataset" title="quapy.classification.neural.TorchDataset"><code class="xref py py-class docutils literal notranslate"><span class="pre">quapy.classification.neural.TorchDataset</span></code></a>)</p>
|
||||
</dd>
|
||||
<dt class="field-even">Returns<span class="colon">:</span></dt>
|
||||
<dd class="field-even"><p>a torch tensor of shape <cite>(n_samples, n_dimensions)</cite>, where
|
||||
<cite>n_samples</cite> is the number of documents, and <cite>n_dimensions</cite> is the
|
||||
dimensionality of the embedding</p>
|
||||
</dd>
|
||||
</dl>
|
||||
</dd></dl>
|
||||
|
||||
<dl class="py method">
|
||||
<dt class="sig sig-object py" id="quapy.classification.neural.CNNnet.get_params">
|
||||
<span class="sig-name descname"><span class="pre">get_params</span></span><span class="sig-paren">(</span><span class="sig-paren">)</span><a class="reference internal" href="_modules/quapy/classification/neural.html#CNNnet.get_params"><span class="viewcode-link"><span class="pre">[source]</span></span></a><a class="headerlink" href="#quapy.classification.neural.CNNnet.get_params" title="Permalink to this definition"></a></dt>
|
||||
<dd><p>Get hyper-parameters for this estimator</p>
|
||||
<dl class="field-list simple">
|
||||
<dt class="field-odd">Returns<span class="colon">:</span></dt>
|
||||
<dd class="field-odd"><p>a dictionary with parameter names mapped to their values</p>
|
||||
</dd>
|
||||
</dl>
|
||||
</dd></dl>
|
||||
|
||||
<dl class="py attribute">
|
||||
<dt class="sig sig-object py" id="quapy.classification.neural.CNNnet.training">
|
||||
<span class="sig-name descname"><span class="pre">training</span></span><em class="property"><span class="p"><span class="pre">:</span></span><span class="w"> </span><span class="pre">bool</span></em><a class="headerlink" href="#quapy.classification.neural.CNNnet.training" title="Permalink to this definition"></a></dt>
|
||||
<dd></dd></dl>
|
||||
|
||||
<dl class="py property">
|
||||
<dt class="sig sig-object py" id="quapy.classification.neural.CNNnet.vocabulary_size">
|
||||
<em class="property"><span class="pre">property</span><span class="w"> </span></em><span class="sig-name descname"><span class="pre">vocabulary_size</span></span><a class="headerlink" href="#quapy.classification.neural.CNNnet.vocabulary_size" title="Permalink to this definition"></a></dt>
|
||||
<dd><p>Return the size of the vocabulary</p>
|
||||
<dl class="field-list simple">
|
||||
<dt class="field-odd">Returns<span class="colon">:</span></dt>
|
||||
<dd class="field-odd"><p>integer</p>
|
||||
</dd>
|
||||
</dl>
|
||||
</dd></dl>
|
||||
|
||||
</dd></dl>
|
||||
|
||||
<dl class="py class">
|
||||
<dt class="sig sig-object py" id="quapy.classification.neural.LSTMnet">
|
||||
<em class="property"><span class="pre">class</span><span class="w"> </span></em><span class="sig-prename descclassname"><span class="pre">quapy.classification.neural.</span></span><span class="sig-name descname"><span class="pre">LSTMnet</span></span><span class="sig-paren">(</span><em class="sig-param"><span class="n"><span class="pre">vocabulary_size</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">n_classes</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">embedding_size</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">100</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">hidden_size</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">256</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">repr_size</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">100</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">lstm_class_nlayers</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">1</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">drop_p</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">0.5</span></span></em><span class="sig-paren">)</span><a class="reference internal" href="_modules/quapy/classification/neural.html#LSTMnet"><span class="viewcode-link"><span class="pre">[source]</span></span></a><a class="headerlink" href="#quapy.classification.neural.LSTMnet" title="Permalink to this definition"></a></dt>
|
||||
<dd><p>Bases: <a class="reference internal" href="#quapy.classification.neural.TextClassifierNet" title="quapy.classification.neural.TextClassifierNet"><code class="xref py py-class docutils literal notranslate"><span class="pre">TextClassifierNet</span></code></a></p>
|
||||
<p>An implementation of <a class="reference internal" href="#quapy.classification.neural.TextClassifierNet" title="quapy.classification.neural.TextClassifierNet"><code class="xref py py-class docutils literal notranslate"><span class="pre">quapy.classification.neural.TextClassifierNet</span></code></a> based on
|
||||
Long Short Term Memory networks.</p>
|
||||
<dl class="field-list simple">
|
||||
<dt class="field-odd">Parameters<span class="colon">:</span></dt>
|
||||
<dd class="field-odd"><ul class="simple">
|
||||
<li><p><strong>vocabulary_size</strong> – the size of the vocabulary</p></li>
|
||||
<li><p><strong>n_classes</strong> – number of target classes</p></li>
|
||||
<li><p><strong>embedding_size</strong> – the dimensionality of the word embeddings space (default 100)</p></li>
|
||||
<li><p><strong>hidden_size</strong> – the dimensionality of the hidden space (default 256)</p></li>
|
||||
<li><p><strong>repr_size</strong> – the dimensionality of the document embeddings space (default 100)</p></li>
|
||||
<li><p><strong>lstm_class_nlayers</strong> – number of LSTM layers (default 1)</p></li>
|
||||
<li><p><strong>drop_p</strong> – drop probability for dropout (default 0.5)</p></li>
|
||||
</ul>
|
||||
</dd>
|
||||
</dl>
|
||||
<dl class="py method">
|
||||
<dt class="sig sig-object py" id="quapy.classification.neural.LSTMnet.document_embedding">
|
||||
<span class="sig-name descname"><span class="pre">document_embedding</span></span><span class="sig-paren">(</span><em class="sig-param"><span class="n"><span class="pre">x</span></span></em><span class="sig-paren">)</span><a class="reference internal" href="_modules/quapy/classification/neural.html#LSTMnet.document_embedding"><span class="viewcode-link"><span class="pre">[source]</span></span></a><a class="headerlink" href="#quapy.classification.neural.LSTMnet.document_embedding" title="Permalink to this definition"></a></dt>
|
||||
<dd><p>Embeds documents (i.e., performs the forward pass up to the
|
||||
next-to-last layer).</p>
|
||||
<dl class="field-list simple">
|
||||
<dt class="field-odd">Parameters<span class="colon">:</span></dt>
|
||||
<dd class="field-odd"><p><strong>x</strong> – a batch of instances, typically generated by a torch’s <cite>DataLoader</cite>
|
||||
instance (see <a class="reference internal" href="#quapy.classification.neural.TorchDataset" title="quapy.classification.neural.TorchDataset"><code class="xref py py-class docutils literal notranslate"><span class="pre">quapy.classification.neural.TorchDataset</span></code></a>)</p>
|
||||
</dd>
|
||||
<dt class="field-even">Returns<span class="colon">:</span></dt>
|
||||
<dd class="field-even"><p>a torch tensor of shape <cite>(n_samples, n_dimensions)</cite>, where
|
||||
<cite>n_samples</cite> is the number of documents, and <cite>n_dimensions</cite> is the
|
||||
dimensionality of the embedding</p>
|
||||
</dd>
|
||||
</dl>
|
||||
</dd></dl>
|
||||
|
||||
<dl class="py method">
|
||||
<dt class="sig sig-object py" id="quapy.classification.neural.LSTMnet.get_params">
|
||||
<span class="sig-name descname"><span class="pre">get_params</span></span><span class="sig-paren">(</span><span class="sig-paren">)</span><a class="reference internal" href="_modules/quapy/classification/neural.html#LSTMnet.get_params"><span class="viewcode-link"><span class="pre">[source]</span></span></a><a class="headerlink" href="#quapy.classification.neural.LSTMnet.get_params" title="Permalink to this definition"></a></dt>
|
||||
<dd><p>Get hyper-parameters for this estimator</p>
|
||||
<dl class="field-list simple">
|
||||
<dt class="field-odd">Returns<span class="colon">:</span></dt>
|
||||
<dd class="field-odd"><p>a dictionary with parameter names mapped to their values</p>
|
||||
</dd>
|
||||
</dl>
|
||||
</dd></dl>
|
||||
|
||||
<dl class="py attribute">
|
||||
<dt class="sig sig-object py" id="quapy.classification.neural.LSTMnet.training">
|
||||
<span class="sig-name descname"><span class="pre">training</span></span><em class="property"><span class="p"><span class="pre">:</span></span><span class="w"> </span><span class="pre">bool</span></em><a class="headerlink" href="#quapy.classification.neural.LSTMnet.training" title="Permalink to this definition"></a></dt>
|
||||
<dd></dd></dl>
|
||||
|
||||
<dl class="py property">
|
||||
<dt class="sig sig-object py" id="quapy.classification.neural.LSTMnet.vocabulary_size">
|
||||
<em class="property"><span class="pre">property</span><span class="w"> </span></em><span class="sig-name descname"><span class="pre">vocabulary_size</span></span><a class="headerlink" href="#quapy.classification.neural.LSTMnet.vocabulary_size" title="Permalink to this definition"></a></dt>
|
||||
<dd><p>Return the size of the vocabulary</p>
|
||||
<dl class="field-list simple">
|
||||
<dt class="field-odd">Returns<span class="colon">:</span></dt>
|
||||
<dd class="field-odd"><p>integer</p>
|
||||
</dd>
|
||||
</dl>
|
||||
</dd></dl>
|
||||
|
||||
</dd></dl>
|
||||
|
||||
<dl class="py class">
|
||||
<dt class="sig sig-object py" id="quapy.classification.neural.NeuralClassifierTrainer">
|
||||
<em class="property"><span class="pre">class</span><span class="w"> </span></em><span class="sig-prename descclassname"><span class="pre">quapy.classification.neural.</span></span><span class="sig-name descname"><span class="pre">NeuralClassifierTrainer</span></span><span class="sig-paren">(</span><em class="sig-param"><span class="n"><span class="pre">net</span></span><span class="p"><span class="pre">:</span></span><span class="w"> </span><span class="n"><a class="reference internal" href="#quapy.classification.neural.TextClassifierNet" title="quapy.classification.neural.TextClassifierNet"><span class="pre">TextClassifierNet</span></a></span></em>, <em class="sig-param"><span class="n"><span class="pre">lr</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">0.001</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">weight_decay</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">0</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">patience</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">10</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">epochs</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">200</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">batch_size</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">64</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">batch_size_test</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">512</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">padding_length</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">300</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">device</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">'cuda'</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">checkpointpath</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">'../checkpoint/classifier_net.dat'</span></span></em><span class="sig-paren">)</span><a class="reference internal" href="_modules/quapy/classification/neural.html#NeuralClassifierTrainer"><span class="viewcode-link"><span class="pre">[source]</span></span></a><a class="headerlink" href="#quapy.classification.neural.NeuralClassifierTrainer" title="Permalink to this definition"></a></dt>
|
||||
<dd><p>Bases: <code class="xref py py-class docutils literal notranslate"><span class="pre">object</span></code></p>
|
||||
<p>Trains a neural network for text classification.</p>
|
||||
<dl class="field-list simple">
|
||||
<dt class="field-odd">Parameters<span class="colon">:</span></dt>
|
||||
<dd class="field-odd"><ul class="simple">
|
||||
<li><p><strong>net</strong> – an instance of <cite>TextClassifierNet</cite> implementing the forward pass</p></li>
|
||||
<li><p><strong>lr</strong> – learning rate (default 1e-3)</p></li>
|
||||
<li><p><strong>weight_decay</strong> – weight decay (default 0)</p></li>
|
||||
<li><p><strong>patience</strong> – number of epochs that do not show any improvement in validation
|
||||
to wait before applying early stop (default 10)</p></li>
|
||||
<li><p><strong>epochs</strong> – maximum number of training epochs (default 200)</p></li>
|
||||
<li><p><strong>batch_size</strong> – batch size for training (default 64)</p></li>
|
||||
<li><p><strong>batch_size_test</strong> – batch size for test (default 512)</p></li>
|
||||
<li><p><strong>padding_length</strong> – maximum number of tokens to consider in a document (default 300)</p></li>
|
||||
<li><p><strong>device</strong> – specify ‘cpu’ (default) or ‘cuda’ for enabling gpu</p></li>
|
||||
<li><p><strong>checkpointpath</strong> – where to store the parameters of the best model found so far
|
||||
according to the evaluation in the held-out validation split (default ‘../checkpoint/classifier_net.dat’)</p></li>
|
||||
</ul>
|
||||
</dd>
|
||||
</dl>
|
||||
<dl class="py property">
|
||||
<dt class="sig sig-object py" id="quapy.classification.neural.NeuralClassifierTrainer.device">
|
||||
<em class="property"><span class="pre">property</span><span class="w"> </span></em><span class="sig-name descname"><span class="pre">device</span></span><a class="headerlink" href="#quapy.classification.neural.NeuralClassifierTrainer.device" title="Permalink to this definition"></a></dt>
|
||||
<dd><p>Gets the device in which the network is allocated</p>
|
||||
<dl class="field-list simple">
|
||||
<dt class="field-odd">Returns<span class="colon">:</span></dt>
|
||||
<dd class="field-odd"><p>device</p>
|
||||
</dd>
|
||||
</dl>
|
||||
</dd></dl>
|
||||
|
||||
<dl class="py method">
|
||||
<dt class="sig sig-object py" id="quapy.classification.neural.NeuralClassifierTrainer.fit">
|
||||
<span class="sig-name descname"><span class="pre">fit</span></span><span class="sig-paren">(</span><em class="sig-param"><span class="n"><span class="pre">instances</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">labels</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">val_split</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">0.3</span></span></em><span class="sig-paren">)</span><a class="reference internal" href="_modules/quapy/classification/neural.html#NeuralClassifierTrainer.fit"><span class="viewcode-link"><span class="pre">[source]</span></span></a><a class="headerlink" href="#quapy.classification.neural.NeuralClassifierTrainer.fit" title="Permalink to this definition"></a></dt>
|
||||
<dd><p>Fits the model according to the given training data.</p>
|
||||
<dl class="field-list simple">
|
||||
<dt class="field-odd">Parameters<span class="colon">:</span></dt>
|
||||
<dd class="field-odd"><ul class="simple">
|
||||
<li><p><strong>instances</strong> – list of lists of indexed tokens</p></li>
|
||||
<li><p><strong>labels</strong> – array-like of shape <cite>(n_samples, n_classes)</cite> with the class labels</p></li>
|
||||
<li><p><strong>val_split</strong> – proportion of training documents to be taken as the validation set (default 0.3)</p></li>
|
||||
</ul>
|
||||
</dd>
|
||||
<dt class="field-even">Returns<span class="colon">:</span></dt>
|
||||
<dd class="field-even"><p></p>
|
||||
</dd>
|
||||
</dl>
|
||||
</dd></dl>
|
||||
|
||||
<dl class="py method">
|
||||
<dt class="sig sig-object py" id="quapy.classification.neural.NeuralClassifierTrainer.get_params">
|
||||
<span class="sig-name descname"><span class="pre">get_params</span></span><span class="sig-paren">(</span><span class="sig-paren">)</span><a class="reference internal" href="_modules/quapy/classification/neural.html#NeuralClassifierTrainer.get_params"><span class="viewcode-link"><span class="pre">[source]</span></span></a><a class="headerlink" href="#quapy.classification.neural.NeuralClassifierTrainer.get_params" title="Permalink to this definition"></a></dt>
|
||||
<dd><p>Get hyper-parameters for this estimator</p>
|
||||
<dl class="field-list simple">
|
||||
<dt class="field-odd">Returns<span class="colon">:</span></dt>
|
||||
<dd class="field-odd"><p>a dictionary with parameter names mapped to their values</p>
|
||||
</dd>
|
||||
</dl>
|
||||
</dd></dl>
|
||||
|
||||
<dl class="py method">
|
||||
<dt class="sig sig-object py" id="quapy.classification.neural.NeuralClassifierTrainer.predict">
|
||||
<span class="sig-name descname"><span class="pre">predict</span></span><span class="sig-paren">(</span><em class="sig-param"><span class="n"><span class="pre">instances</span></span></em><span class="sig-paren">)</span><a class="reference internal" href="_modules/quapy/classification/neural.html#NeuralClassifierTrainer.predict"><span class="viewcode-link"><span class="pre">[source]</span></span></a><a class="headerlink" href="#quapy.classification.neural.NeuralClassifierTrainer.predict" title="Permalink to this definition"></a></dt>
|
||||
<dd><p>Predicts labels for the instances</p>
|
||||
<dl class="field-list simple">
|
||||
<dt class="field-odd">Parameters<span class="colon">:</span></dt>
|
||||
<dd class="field-odd"><p><strong>instances</strong> – list of lists of indexed tokens</p>
|
||||
</dd>
|
||||
<dt class="field-even">Returns<span class="colon">:</span></dt>
|
||||
<dd class="field-even"><p>a <cite>numpy</cite> array of length <cite>n</cite> containing the label predictions, where <cite>n</cite> is the number of
|
||||
instances in <cite>X</cite></p>
|
||||
</dd>
|
||||
</dl>
|
||||
</dd></dl>
|
||||
|
||||
<dl class="py method">
|
||||
<dt class="sig sig-object py" id="quapy.classification.neural.NeuralClassifierTrainer.predict_proba">
|
||||
<span class="sig-name descname"><span class="pre">predict_proba</span></span><span class="sig-paren">(</span><em class="sig-param"><span class="n"><span class="pre">instances</span></span></em><span class="sig-paren">)</span><a class="reference internal" href="_modules/quapy/classification/neural.html#NeuralClassifierTrainer.predict_proba"><span class="viewcode-link"><span class="pre">[source]</span></span></a><a class="headerlink" href="#quapy.classification.neural.NeuralClassifierTrainer.predict_proba" title="Permalink to this definition"></a></dt>
|
||||
<dd><p>Predicts posterior probabilities for the instances</p>
|
||||
<dl class="field-list simple">
|
||||
<dt class="field-odd">Parameters<span class="colon">:</span></dt>
|
||||
<dd class="field-odd"><p><strong>X</strong> – array-like of shape <cite>(n_samples, n_features)</cite> instances to classify</p>
|
||||
</dd>
|
||||
<dt class="field-even">Returns<span class="colon">:</span></dt>
|
||||
<dd class="field-even"><p>array-like of shape <cite>(n_samples, n_classes)</cite> with the posterior probabilities</p>
|
||||
</dd>
|
||||
</dl>
|
||||
</dd></dl>
|
||||
|
||||
<dl class="py method">
|
||||
<dt class="sig sig-object py" id="quapy.classification.neural.NeuralClassifierTrainer.reset_net_params">
|
||||
<span class="sig-name descname"><span class="pre">reset_net_params</span></span><span class="sig-paren">(</span><em class="sig-param"><span class="n"><span class="pre">vocab_size</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">n_classes</span></span></em><span class="sig-paren">)</span><a class="reference internal" href="_modules/quapy/classification/neural.html#NeuralClassifierTrainer.reset_net_params"><span class="viewcode-link"><span class="pre">[source]</span></span></a><a class="headerlink" href="#quapy.classification.neural.NeuralClassifierTrainer.reset_net_params" title="Permalink to this definition"></a></dt>
|
||||
<dd><p>Reinitialize the network parameters</p>
|
||||
<dl class="field-list simple">
|
||||
<dt class="field-odd">Parameters<span class="colon">:</span></dt>
|
||||
<dd class="field-odd"><ul class="simple">
|
||||
<li><p><strong>vocab_size</strong> – the size of the vocabulary</p></li>
|
||||
<li><p><strong>n_classes</strong> – the number of target classes</p></li>
|
||||
</ul>
|
||||
</dd>
|
||||
</dl>
|
||||
</dd></dl>
|
||||
|
||||
<dl class="py method">
|
||||
<dt class="sig sig-object py" id="quapy.classification.neural.NeuralClassifierTrainer.set_params">
|
||||
<span class="sig-name descname"><span class="pre">set_params</span></span><span class="sig-paren">(</span><em class="sig-param"><span class="o"><span class="pre">**</span></span><span class="n"><span class="pre">params</span></span></em><span class="sig-paren">)</span><a class="reference internal" href="_modules/quapy/classification/neural.html#NeuralClassifierTrainer.set_params"><span class="viewcode-link"><span class="pre">[source]</span></span></a><a class="headerlink" href="#quapy.classification.neural.NeuralClassifierTrainer.set_params" title="Permalink to this definition"></a></dt>
|
||||
<dd><p>Set the parameters of this trainer and the learner it is training.
|
||||
In this current version, parameter names for the trainer and learner should
|
||||
be disjoint.</p>
|
||||
<dl class="field-list simple">
|
||||
<dt class="field-odd">Parameters<span class="colon">:</span></dt>
|
||||
<dd class="field-odd"><p><strong>params</strong> – a <cite>**kwargs</cite> dictionary with the parameters</p>
|
||||
</dd>
|
||||
</dl>
|
||||
</dd></dl>
|
||||
|
||||
<dl class="py method">
|
||||
<dt class="sig sig-object py" id="quapy.classification.neural.NeuralClassifierTrainer.transform">
|
||||
<span class="sig-name descname"><span class="pre">transform</span></span><span class="sig-paren">(</span><em class="sig-param"><span class="n"><span class="pre">instances</span></span></em><span class="sig-paren">)</span><a class="reference internal" href="_modules/quapy/classification/neural.html#NeuralClassifierTrainer.transform"><span class="viewcode-link"><span class="pre">[source]</span></span></a><a class="headerlink" href="#quapy.classification.neural.NeuralClassifierTrainer.transform" title="Permalink to this definition"></a></dt>
|
||||
<dd><p>Returns the embeddings of the instances</p>
|
||||
<dl class="field-list simple">
|
||||
<dt class="field-odd">Parameters<span class="colon">:</span></dt>
|
||||
<dd class="field-odd"><p><strong>instances</strong> – list of lists of indexed tokens</p>
|
||||
</dd>
|
||||
<dt class="field-even">Returns<span class="colon">:</span></dt>
|
||||
<dd class="field-even"><p>array-like of shape <cite>(n_samples, embed_size)</cite> with the embedded instances,
|
||||
where <cite>embed_size</cite> is defined by the classification network</p>
|
||||
</dd>
|
||||
</dl>
|
||||
</dd></dl>
|
||||
|
||||
</dd></dl>
|
||||
|
||||
<dl class="py class">
|
||||
<dt class="sig sig-object py" id="quapy.classification.neural.TextClassifierNet">
|
||||
<em class="property"><span class="pre">class</span><span class="w"> </span></em><span class="sig-prename descclassname"><span class="pre">quapy.classification.neural.</span></span><span class="sig-name descname"><span class="pre">TextClassifierNet</span></span><span class="sig-paren">(</span><em class="sig-param"><span class="o"><span class="pre">*</span></span><span class="n"><span class="pre">args</span></span></em>, <em class="sig-param"><span class="o"><span class="pre">**</span></span><span class="n"><span class="pre">kwargs</span></span></em><span class="sig-paren">)</span><a class="reference internal" href="_modules/quapy/classification/neural.html#TextClassifierNet"><span class="viewcode-link"><span class="pre">[source]</span></span></a><a class="headerlink" href="#quapy.classification.neural.TextClassifierNet" title="Permalink to this definition"></a></dt>
|
||||
<dd><p>Bases: <code class="xref py py-class docutils literal notranslate"><span class="pre">Module</span></code></p>
|
||||
<p>Abstract Text classifier (<cite>torch.nn.Module</cite>)</p>
|
||||
<dl class="py method">
|
||||
<dt class="sig sig-object py" id="quapy.classification.neural.TextClassifierNet.dimensions">
|
||||
<span class="sig-name descname"><span class="pre">dimensions</span></span><span class="sig-paren">(</span><span class="sig-paren">)</span><a class="reference internal" href="_modules/quapy/classification/neural.html#TextClassifierNet.dimensions"><span class="viewcode-link"><span class="pre">[source]</span></span></a><a class="headerlink" href="#quapy.classification.neural.TextClassifierNet.dimensions" title="Permalink to this definition"></a></dt>
|
||||
<dd><p>Gets the number of dimensions of the embedding space</p>
|
||||
<dl class="field-list simple">
|
||||
<dt class="field-odd">Returns<span class="colon">:</span></dt>
|
||||
<dd class="field-odd"><p>integer</p>
|
||||
</dd>
|
||||
</dl>
|
||||
</dd></dl>
|
||||
|
||||
<dl class="py method">
|
||||
<dt class="sig sig-object py" id="quapy.classification.neural.TextClassifierNet.document_embedding">
|
||||
<em class="property"><span class="pre">abstract</span><span class="w"> </span></em><span class="sig-name descname"><span class="pre">document_embedding</span></span><span class="sig-paren">(</span><em class="sig-param"><span class="n"><span class="pre">x</span></span></em><span class="sig-paren">)</span><a class="reference internal" href="_modules/quapy/classification/neural.html#TextClassifierNet.document_embedding"><span class="viewcode-link"><span class="pre">[source]</span></span></a><a class="headerlink" href="#quapy.classification.neural.TextClassifierNet.document_embedding" title="Permalink to this definition"></a></dt>
|
||||
<dd><p>Embeds documents (i.e., performs the forward pass up to the
|
||||
next-to-last layer).</p>
|
||||
<dl class="field-list simple">
|
||||
<dt class="field-odd">Parameters<span class="colon">:</span></dt>
|
||||
<dd class="field-odd"><p><strong>x</strong> – a batch of instances, typically generated by a torch’s <cite>DataLoader</cite>
|
||||
instance (see <a class="reference internal" href="#quapy.classification.neural.TorchDataset" title="quapy.classification.neural.TorchDataset"><code class="xref py py-class docutils literal notranslate"><span class="pre">quapy.classification.neural.TorchDataset</span></code></a>)</p>
|
||||
</dd>
|
||||
<dt class="field-even">Returns<span class="colon">:</span></dt>
|
||||
<dd class="field-even"><p>a torch tensor of shape <cite>(n_samples, n_dimensions)</cite>, where
|
||||
<cite>n_samples</cite> is the number of documents, and <cite>n_dimensions</cite> is the
|
||||
dimensionality of the embedding</p>
|
||||
</dd>
|
||||
</dl>
|
||||
</dd></dl>
|
||||
|
||||
<dl class="py method">
|
||||
<dt class="sig sig-object py" id="quapy.classification.neural.TextClassifierNet.forward">
|
||||
<span class="sig-name descname"><span class="pre">forward</span></span><span class="sig-paren">(</span><em class="sig-param"><span class="n"><span class="pre">x</span></span></em><span class="sig-paren">)</span><a class="reference internal" href="_modules/quapy/classification/neural.html#TextClassifierNet.forward"><span class="viewcode-link"><span class="pre">[source]</span></span></a><a class="headerlink" href="#quapy.classification.neural.TextClassifierNet.forward" title="Permalink to this definition"></a></dt>
|
||||
<dd><p>Performs the forward pass.</p>
|
||||
<dl class="field-list simple">
|
||||
<dt class="field-odd">Parameters<span class="colon">:</span></dt>
|
||||
<dd class="field-odd"><p><strong>x</strong> – a batch of instances, typically generated by a torch’s <cite>DataLoader</cite>
|
||||
instance (see <a class="reference internal" href="#quapy.classification.neural.TorchDataset" title="quapy.classification.neural.TorchDataset"><code class="xref py py-class docutils literal notranslate"><span class="pre">quapy.classification.neural.TorchDataset</span></code></a>)</p>
|
||||
</dd>
|
||||
<dt class="field-even">Returns<span class="colon">:</span></dt>
|
||||
<dd class="field-even"><p>a tensor of shape <cite>(n_instances, n_classes)</cite> with the decision scores
|
||||
for each of the instances and classes</p>
|
||||
</dd>
|
||||
</dl>
|
||||
</dd></dl>
|
||||
|
||||
<dl class="py method">
|
||||
<dt class="sig sig-object py" id="quapy.classification.neural.TextClassifierNet.get_params">
|
||||
<em class="property"><span class="pre">abstract</span><span class="w"> </span></em><span class="sig-name descname"><span class="pre">get_params</span></span><span class="sig-paren">(</span><span class="sig-paren">)</span><a class="reference internal" href="_modules/quapy/classification/neural.html#TextClassifierNet.get_params"><span class="viewcode-link"><span class="pre">[source]</span></span></a><a class="headerlink" href="#quapy.classification.neural.TextClassifierNet.get_params" title="Permalink to this definition"></a></dt>
|
||||
<dd><p>Get hyper-parameters for this estimator</p>
|
||||
<dl class="field-list simple">
|
||||
<dt class="field-odd">Returns<span class="colon">:</span></dt>
|
||||
<dd class="field-odd"><p>a dictionary with parameter names mapped to their values</p>
|
||||
</dd>
|
||||
</dl>
|
||||
</dd></dl>
|
||||
|
||||
<dl class="py method">
|
||||
<dt class="sig sig-object py" id="quapy.classification.neural.TextClassifierNet.predict_proba">
|
||||
<span class="sig-name descname"><span class="pre">predict_proba</span></span><span class="sig-paren">(</span><em class="sig-param"><span class="n"><span class="pre">x</span></span></em><span class="sig-paren">)</span><a class="reference internal" href="_modules/quapy/classification/neural.html#TextClassifierNet.predict_proba"><span class="viewcode-link"><span class="pre">[source]</span></span></a><a class="headerlink" href="#quapy.classification.neural.TextClassifierNet.predict_proba" title="Permalink to this definition"></a></dt>
|
||||
<dd><p>Predicts posterior probabilities for the instances in <cite>x</cite></p>
|
||||
<dl class="field-list simple">
|
||||
<dt class="field-odd">Parameters<span class="colon">:</span></dt>
|
||||
<dd class="field-odd"><p><strong>x</strong> – a torch tensor of indexed tokens with shape <cite>(n_instances, pad_length)</cite>
|
||||
where <cite>n_instances</cite> is the number of instances in the batch, and <cite>pad_length</cite>
|
||||
is length of the pad in the batch</p>
|
||||
</dd>
|
||||
<dt class="field-even">Returns<span class="colon">:</span></dt>
|
||||
<dd class="field-even"><p>array-like of shape <cite>(n_samples, n_classes)</cite> with the posterior probabilities</p>
|
||||
</dd>
|
||||
</dl>
|
||||
</dd></dl>
|
||||
|
||||
<dl class="py attribute">
|
||||
<dt class="sig sig-object py" id="quapy.classification.neural.TextClassifierNet.training">
|
||||
<span class="sig-name descname"><span class="pre">training</span></span><em class="property"><span class="p"><span class="pre">:</span></span><span class="w"> </span><span class="pre">bool</span></em><a class="headerlink" href="#quapy.classification.neural.TextClassifierNet.training" title="Permalink to this definition"></a></dt>
|
||||
<dd></dd></dl>
|
||||
|
||||
<dl class="py property">
|
||||
<dt class="sig sig-object py" id="quapy.classification.neural.TextClassifierNet.vocabulary_size">
|
||||
<em class="property"><span class="pre">property</span><span class="w"> </span></em><span class="sig-name descname"><span class="pre">vocabulary_size</span></span><a class="headerlink" href="#quapy.classification.neural.TextClassifierNet.vocabulary_size" title="Permalink to this definition"></a></dt>
|
||||
<dd><p>Return the size of the vocabulary</p>
|
||||
<dl class="field-list simple">
|
||||
<dt class="field-odd">Returns<span class="colon">:</span></dt>
|
||||
<dd class="field-odd"><p>integer</p>
|
||||
</dd>
|
||||
</dl>
|
||||
</dd></dl>
|
||||
|
||||
<dl class="py method">
|
||||
<dt class="sig sig-object py" id="quapy.classification.neural.TextClassifierNet.xavier_uniform">
|
||||
<span class="sig-name descname"><span class="pre">xavier_uniform</span></span><span class="sig-paren">(</span><span class="sig-paren">)</span><a class="reference internal" href="_modules/quapy/classification/neural.html#TextClassifierNet.xavier_uniform"><span class="viewcode-link"><span class="pre">[source]</span></span></a><a class="headerlink" href="#quapy.classification.neural.TextClassifierNet.xavier_uniform" title="Permalink to this definition"></a></dt>
|
||||
<dd><p>Performs Xavier initialization of the network parameters</p>
|
||||
</dd></dl>
|
||||
|
||||
</dd></dl>
|
||||
|
||||
<dl class="py class">
|
||||
<dt class="sig sig-object py" id="quapy.classification.neural.TorchDataset">
|
||||
<em class="property"><span class="pre">class</span><span class="w"> </span></em><span class="sig-prename descclassname"><span class="pre">quapy.classification.neural.</span></span><span class="sig-name descname"><span class="pre">TorchDataset</span></span><span class="sig-paren">(</span><em class="sig-param"><span class="n"><span class="pre">instances</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">labels</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">None</span></span></em><span class="sig-paren">)</span><a class="reference internal" href="_modules/quapy/classification/neural.html#TorchDataset"><span class="viewcode-link"><span class="pre">[source]</span></span></a><a class="headerlink" href="#quapy.classification.neural.TorchDataset" title="Permalink to this definition"></a></dt>
|
||||
<dd><p>Bases: <code class="xref py py-class docutils literal notranslate"><span class="pre">Dataset</span></code></p>
|
||||
<p>Transforms labelled instances into a Torch’s <code class="xref py py-class docutils literal notranslate"><span class="pre">torch.utils.data.DataLoader</span></code> object</p>
|
||||
<dl class="field-list simple">
|
||||
<dt class="field-odd">Parameters<span class="colon">:</span></dt>
|
||||
<dd class="field-odd"><ul class="simple">
|
||||
<li><p><strong>instances</strong> – list of lists of indexed tokens</p></li>
|
||||
<li><p><strong>labels</strong> – array-like of shape <cite>(n_samples, n_classes)</cite> with the class labels</p></li>
|
||||
</ul>
|
||||
</dd>
|
||||
</dl>
|
||||
<dl class="py method">
|
||||
<dt class="sig sig-object py" id="quapy.classification.neural.TorchDataset.asDataloader">
|
||||
<span class="sig-name descname"><span class="pre">asDataloader</span></span><span class="sig-paren">(</span><em class="sig-param"><span class="n"><span class="pre">batch_size</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">shuffle</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">pad_length</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">device</span></span></em><span class="sig-paren">)</span><a class="reference internal" href="_modules/quapy/classification/neural.html#TorchDataset.asDataloader"><span class="viewcode-link"><span class="pre">[source]</span></span></a><a class="headerlink" href="#quapy.classification.neural.TorchDataset.asDataloader" title="Permalink to this definition"></a></dt>
|
||||
<dd><p>Converts the labelled collection into a Torch DataLoader with dynamic padding for
|
||||
the batch</p>
|
||||
<dl class="field-list simple">
|
||||
<dt class="field-odd">Parameters<span class="colon">:</span></dt>
|
||||
<dd class="field-odd"><ul class="simple">
|
||||
<li><p><strong>batch_size</strong> – batch size</p></li>
|
||||
<li><p><strong>shuffle</strong> – whether or not to shuffle instances</p></li>
|
||||
<li><p><strong>pad_length</strong> – the maximum length for the list of tokens (dynamic padding is
|
||||
applied, meaning that if the longest document in the batch is shorter than
|
||||
<cite>pad_length</cite>, then the batch is padded up to its length, and not to <cite>pad_length</cite>.</p></li>
|
||||
<li><p><strong>device</strong> – whether to allocate tensors in cpu or in cuda</p></li>
|
||||
</ul>
|
||||
</dd>
|
||||
<dt class="field-even">Returns<span class="colon">:</span></dt>
|
||||
<dd class="field-even"><p>a <code class="xref py py-class docutils literal notranslate"><span class="pre">torch.utils.data.DataLoader</span></code> object</p>
|
||||
</dd>
|
||||
</dl>
|
||||
</dd></dl>
|
||||
|
||||
</dd></dl>
|
||||
|
||||
</section>
|
||||
<section id="module-quapy.classification.svmperf">
|
||||
<span id="quapy-classification-svmperf-module"></span><h2>quapy.classification.svmperf module<a class="headerlink" href="#module-quapy.classification.svmperf" title="Permalink to this heading"></a></h2>
|
||||
<dl class="py class">
|
||||
<dt class="sig sig-object py" id="quapy.classification.svmperf.SVMperf">
|
||||
<em class="property"><span class="pre">class</span><span class="w"> </span></em><span class="sig-prename descclassname"><span class="pre">quapy.classification.svmperf.</span></span><span class="sig-name descname"><span class="pre">SVMperf</span></span><span class="sig-paren">(</span><em class="sig-param"><span class="n"><span class="pre">svmperf_base</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">C</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">0.01</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">verbose</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">False</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">loss</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">'01'</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">host_folder</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">None</span></span></em><span class="sig-paren">)</span><a class="reference internal" href="_modules/quapy/classification/svmperf.html#SVMperf"><span class="viewcode-link"><span class="pre">[source]</span></span></a><a class="headerlink" href="#quapy.classification.svmperf.SVMperf" title="Permalink to this definition"></a></dt>
|
||||
<dd><p>Bases: <code class="xref py py-class docutils literal notranslate"><span class="pre">BaseEstimator</span></code>, <code class="xref py py-class docutils literal notranslate"><span class="pre">ClassifierMixin</span></code></p>
|
||||
<p>A wrapper for the <a class="reference external" href="https://www.cs.cornell.edu/people/tj/svm_light/svm_perf.html">SVM-perf package</a> by Thorsten Joachims.
|
||||
When using losses for quantification, the source code has to be patched. See
|
||||
the <a class="reference external" href="https://hlt-isti.github.io/QuaPy/build/html/Installation.html#svm-perf-with-quantification-oriented-losses">installation documentation</a>
|
||||
for further details.</p>
|
||||
<p class="rubric">References</p>
|
||||
<ul class="simple">
|
||||
<li><p><a class="reference external" href="https://dl.acm.org/doi/abs/10.1145/2700406?casa_token=8D2fHsGCVn0AAAAA:ZfThYOvrzWxMGfZYlQW_y8Cagg-o_l6X_PcF09mdETQ4Tu7jK98mxFbGSXp9ZSO14JkUIYuDGFG0">Esuli et al.2015</a></p></li>
|
||||
<li><p><a class="reference external" href="https://www.sciencedirect.com/science/article/abs/pii/S003132031400291X">Barranquero et al.2015</a></p></li>
|
||||
</ul>
|
||||
<dl class="field-list simple">
|
||||
<dt class="field-odd">Parameters<span class="colon">:</span></dt>
|
||||
<dd class="field-odd"><ul class="simple">
|
||||
<li><p><strong>svmperf_base</strong> – path to directory containing the binary files <cite>svm_perf_learn</cite> and <cite>svm_perf_classify</cite></p></li>
|
||||
<li><p><strong>C</strong> – trade-off between training error and margin (default 0.01)</p></li>
|
||||
<li><p><strong>verbose</strong> – set to True to print svm-perf std outputs</p></li>
|
||||
<li><p><strong>loss</strong> – the loss to optimize for. Available losses are “01”, “f1”, “kld”, “nkld”, “q”, “qacc”, “qf1”, “qgm”, “mae”, “mrae”.</p></li>
|
||||
<li><p><strong>host_folder</strong> – directory where to store the trained model; set to None (default) for using a tmp directory
|
||||
(temporal directories are automatically deleted)</p></li>
|
||||
</ul>
|
||||
</dd>
|
||||
</dl>
|
||||
<dl class="py method">
|
||||
<dt class="sig sig-object py" id="quapy.classification.svmperf.SVMperf.decision_function">
|
||||
<span class="sig-name descname"><span class="pre">decision_function</span></span><span class="sig-paren">(</span><em class="sig-param"><span class="n"><span class="pre">X</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">y</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">None</span></span></em><span class="sig-paren">)</span><a class="reference internal" href="_modules/quapy/classification/svmperf.html#SVMperf.decision_function"><span class="viewcode-link"><span class="pre">[source]</span></span></a><a class="headerlink" href="#quapy.classification.svmperf.SVMperf.decision_function" title="Permalink to this definition"></a></dt>
|
||||
<dd><p>Evaluate the decision function for the samples in <cite>X</cite>.</p>
|
||||
<dl class="field-list simple">
|
||||
<dt class="field-odd">Parameters<span class="colon">:</span></dt>
|
||||
<dd class="field-odd"><ul class="simple">
|
||||
<li><p><strong>X</strong> – array-like of shape <cite>(n_samples, n_features)</cite> containing the instances to classify</p></li>
|
||||
<li><p><strong>y</strong> – unused</p></li>
|
||||
</ul>
|
||||
</dd>
|
||||
<dt class="field-even">Returns<span class="colon">:</span></dt>
|
||||
<dd class="field-even"><p>array-like of shape <cite>(n_samples,)</cite> containing the decision scores of the instances</p>
|
||||
</dd>
|
||||
</dl>
|
||||
</dd></dl>
|
||||
|
||||
<dl class="py method">
|
||||
<dt class="sig sig-object py" id="quapy.classification.svmperf.SVMperf.fit">
|
||||
<span class="sig-name descname"><span class="pre">fit</span></span><span class="sig-paren">(</span><em class="sig-param"><span class="n"><span class="pre">X</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">y</span></span></em><span class="sig-paren">)</span><a class="reference internal" href="_modules/quapy/classification/svmperf.html#SVMperf.fit"><span class="viewcode-link"><span class="pre">[source]</span></span></a><a class="headerlink" href="#quapy.classification.svmperf.SVMperf.fit" title="Permalink to this definition"></a></dt>
|
||||
<dd><p>Trains the SVM for the multivariate performance loss</p>
|
||||
<dl class="field-list simple">
|
||||
<dt class="field-odd">Parameters<span class="colon">:</span></dt>
|
||||
<dd class="field-odd"><ul class="simple">
|
||||
<li><p><strong>X</strong> – training instances</p></li>
|
||||
<li><p><strong>y</strong> – a binary vector of labels</p></li>
|
||||
</ul>
|
||||
</dd>
|
||||
<dt class="field-even">Returns<span class="colon">:</span></dt>
|
||||
<dd class="field-even"><p><cite>self</cite></p>
|
||||
</dd>
|
||||
</dl>
|
||||
</dd></dl>
|
||||
|
||||
<dl class="py method">
|
||||
<dt class="sig sig-object py" id="quapy.classification.svmperf.SVMperf.predict">
|
||||
<span class="sig-name descname"><span class="pre">predict</span></span><span class="sig-paren">(</span><em class="sig-param"><span class="n"><span class="pre">X</span></span></em><span class="sig-paren">)</span><a class="reference internal" href="_modules/quapy/classification/svmperf.html#SVMperf.predict"><span class="viewcode-link"><span class="pre">[source]</span></span></a><a class="headerlink" href="#quapy.classification.svmperf.SVMperf.predict" title="Permalink to this definition"></a></dt>
|
||||
<dd><p>Predicts labels for the instances <cite>X</cite></p>
|
||||
<dl class="field-list simple">
|
||||
<dt class="field-odd">Parameters<span class="colon">:</span></dt>
|
||||
<dd class="field-odd"><p><strong>X</strong> – array-like of shape <cite>(n_samples, n_features)</cite> instances to classify</p>
|
||||
</dd>
|
||||
<dt class="field-even">Returns<span class="colon">:</span></dt>
|
||||
<dd class="field-even"><p>a <cite>numpy</cite> array of length <cite>n</cite> containing the label predictions, where <cite>n</cite> is the number of
|
||||
instances in <cite>X</cite></p>
|
||||
</dd>
|
||||
</dl>
|
||||
</dd></dl>
|
||||
|
||||
<dl class="py attribute">
|
||||
<dt class="sig sig-object py" id="quapy.classification.svmperf.SVMperf.valid_losses">
|
||||
<span class="sig-name descname"><span class="pre">valid_losses</span></span><em class="property"><span class="w"> </span><span class="p"><span class="pre">=</span></span><span class="w"> </span><span class="pre">{'01':</span> <span class="pre">0,</span> <span class="pre">'f1':</span> <span class="pre">1,</span> <span class="pre">'kld':</span> <span class="pre">12,</span> <span class="pre">'mae':</span> <span class="pre">26,</span> <span class="pre">'mrae':</span> <span class="pre">27,</span> <span class="pre">'nkld':</span> <span class="pre">13,</span> <span class="pre">'q':</span> <span class="pre">22,</span> <span class="pre">'qacc':</span> <span class="pre">23,</span> <span class="pre">'qf1':</span> <span class="pre">24,</span> <span class="pre">'qgm':</span> <span class="pre">25}</span></em><a class="headerlink" href="#quapy.classification.svmperf.SVMperf.valid_losses" title="Permalink to this definition"></a></dt>
|
||||
<dd></dd></dl>
|
||||
|
||||
</dd></dl>
|
||||
|
||||
</section>
|
||||
<section id="module-quapy.classification">
|
||||
<span id="module-contents"></span><h2>Module contents<a class="headerlink" href="#module-quapy.classification" title="Permalink to this heading"></a></h2>
|
||||
</section>
|
||||
</section>
|
||||
|
||||
|
||||
</div>
|
||||
</div>
|
||||
<footer><div class="rst-footer-buttons" role="navigation" aria-label="Footer">
|
||||
<a href="quapy.html" class="btn btn-neutral float-left" title="quapy package" accesskey="p" rel="prev"><span class="fa fa-arrow-circle-left" aria-hidden="true"></span> Previous</a>
|
||||
<a href="quapy.data.html" class="btn btn-neutral float-right" title="quapy.data package" accesskey="n" rel="next">Next <span class="fa fa-arrow-circle-right" aria-hidden="true"></span></a>
|
||||
</div>
|
||||
|
||||
<hr/>
|
||||
|
||||
<div role="contentinfo">
|
||||
<p>© Copyright 2024, Alejandro Moreo.</p>
|
||||
</div>
|
||||
|
||||
Built with <a href="https://www.sphinx-doc.org/">Sphinx</a> using a
|
||||
<a href="https://github.com/readthedocs/sphinx_rtd_theme">theme</a>
|
||||
provided by <a href="https://readthedocs.org">Read the Docs</a>.
|
||||
|
||||
|
||||
</footer>
|
||||
</div>
|
||||
</div>
|
||||
</section>
|
||||
</div>
|
||||
<script>
|
||||
jQuery(function () {
|
||||
SphinxRtdTheme.Navigation.enable(true);
|
||||
});
|
||||
</script>
|
||||
|
||||
</body>
|
||||
</html>
|
|
@ -1,122 +0,0 @@
|
|||
<!DOCTYPE html>
|
||||
<html class="writer-html5" lang="en">
|
||||
<head>
|
||||
<meta charset="utf-8" />
|
||||
<meta name="viewport" content="width=device-width, initial-scale=1.0" />
|
||||
<title>Search — QuaPy: A Python-based open-source framework for quantification 0.1.9 documentation</title>
|
||||
<link rel="stylesheet" type="text/css" href="_static/pygments.css" />
|
||||
<link rel="stylesheet" type="text/css" href="_static/css/theme.css" />
|
||||
|
||||
|
||||
|
||||
<!--[if lt IE 9]>
|
||||
<script src="_static/js/html5shiv.min.js"></script>
|
||||
<![endif]-->
|
||||
|
||||
<script data-url_root="./" id="documentation_options" src="_static/documentation_options.js"></script>
|
||||
<script src="_static/jquery.js"></script>
|
||||
<script src="_static/underscore.js"></script>
|
||||
<script src="_static/_sphinx_javascript_frameworks_compat.js"></script>
|
||||
<script src="_static/doctools.js"></script>
|
||||
<script src="_static/sphinx_highlight.js"></script>
|
||||
<script src="_static/js/theme.js"></script>
|
||||
<script src="_static/searchtools.js"></script>
|
||||
<script src="_static/language_data.js"></script>
|
||||
<link rel="index" title="Index" href="genindex.html" />
|
||||
<link rel="search" title="Search" href="#" />
|
||||
</head>
|
||||
|
||||
<body class="wy-body-for-nav">
|
||||
<div class="wy-grid-for-nav">
|
||||
<nav data-toggle="wy-nav-shift" class="wy-nav-side">
|
||||
<div class="wy-side-scroll">
|
||||
<div class="wy-side-nav-search" >
|
||||
|
||||
|
||||
|
||||
<a href="index.html" class="icon icon-home">
|
||||
QuaPy: A Python-based open-source framework for quantification
|
||||
</a>
|
||||
<div role="search">
|
||||
<form id="rtd-search-form" class="wy-form" action="#" method="get">
|
||||
<input type="text" name="q" placeholder="Search docs" aria-label="Search docs" />
|
||||
<input type="hidden" name="check_keywords" value="yes" />
|
||||
<input type="hidden" name="area" value="default" />
|
||||
</form>
|
||||
</div>
|
||||
</div><div class="wy-menu wy-menu-vertical" data-spy="affix" role="navigation" aria-label="Navigation menu">
|
||||
<ul>
|
||||
<li class="toctree-l1"><a class="reference internal" href="modules.html">quapy</a></li>
|
||||
</ul>
|
||||
|
||||
</div>
|
||||
</div>
|
||||
</nav>
|
||||
|
||||
<section data-toggle="wy-nav-shift" class="wy-nav-content-wrap"><nav class="wy-nav-top" aria-label="Mobile navigation menu" >
|
||||
<i data-toggle="wy-nav-top" class="fa fa-bars"></i>
|
||||
<a href="index.html">QuaPy: A Python-based open-source framework for quantification</a>
|
||||
</nav>
|
||||
|
||||
<div class="wy-nav-content">
|
||||
<div class="rst-content">
|
||||
<div role="navigation" aria-label="Page navigation">
|
||||
<ul class="wy-breadcrumbs">
|
||||
<li><a href="index.html" class="icon icon-home" aria-label="Home"></a></li>
|
||||
<li class="breadcrumb-item active">Search</li>
|
||||
<li class="wy-breadcrumbs-aside">
|
||||
</li>
|
||||
</ul>
|
||||
<hr/>
|
||||
</div>
|
||||
<div role="main" class="document" itemscope="itemscope" itemtype="http://schema.org/Article">
|
||||
<div itemprop="articleBody">
|
||||
|
||||
<noscript>
|
||||
<div id="fallback" class="admonition warning">
|
||||
<p class="last">
|
||||
Please activate JavaScript to enable the search functionality.
|
||||
</p>
|
||||
</div>
|
||||
</noscript>
|
||||
|
||||
|
||||
<div id="search-results">
|
||||
|
||||
</div>
|
||||
|
||||
</div>
|
||||
</div>
|
||||
<footer>
|
||||
|
||||
<hr/>
|
||||
|
||||
<div role="contentinfo">
|
||||
<p>© Copyright 2024, Alejandro Moreo.</p>
|
||||
</div>
|
||||
|
||||
Built with <a href="https://www.sphinx-doc.org/">Sphinx</a> using a
|
||||
<a href="https://github.com/readthedocs/sphinx_rtd_theme">theme</a>
|
||||
provided by <a href="https://readthedocs.org">Read the Docs</a>.
|
||||
|
||||
|
||||
</footer>
|
||||
</div>
|
||||
</div>
|
||||
</section>
|
||||
</div>
|
||||
<script>
|
||||
jQuery(function () {
|
||||
SphinxRtdTheme.Navigation.enable(true);
|
||||
});
|
||||
</script>
|
||||
<script>
|
||||
jQuery(function() { Search.loadIndex("searchindex.js"); });
|
||||
</script>
|
||||
|
||||
<script id="searchindexloader"></script>
|
||||
|
||||
|
||||
|
||||
</body>
|
||||
</html>
|
|
@ -0,0 +1 @@
|
|||
!*.png
|
Before Width: | Height: | Size: 128 KiB After Width: | Height: | Size: 128 KiB |
|
@ -10,8 +10,15 @@ import pathlib
|
|||
import sys
|
||||
from os.path import join
|
||||
quapy_path = join(pathlib.Path(__file__).parents[2].resolve().as_posix(), 'quapy')
|
||||
wiki_path = join(pathlib.Path(__file__).parents[0].resolve().as_posix(), 'wiki')
|
||||
source_path = pathlib.Path(__file__).parents[2].resolve().as_posix()
|
||||
print(f'quapy path={quapy_path}')
|
||||
print(f'quapy source path={source_path}')
|
||||
sys.path.insert(0, quapy_path)
|
||||
sys.path.insert(0, wiki_path)
|
||||
sys.path.insert(0, source_path)
|
||||
|
||||
print(sys.path)
|
||||
|
||||
|
||||
project = 'QuaPy: A Python-based open-source framework for quantification'
|
||||
|
@ -28,14 +35,21 @@ release = quapy.__version__
|
|||
# https://www.sphinx-doc.org/en/master/usage/configuration.html#general-configuration
|
||||
|
||||
extensions = [
|
||||
'sphinx.ext.autosectionlabel',
|
||||
'sphinx.ext.duration',
|
||||
'sphinx.ext.doctest',
|
||||
'sphinx.ext.autodoc',
|
||||
'sphinx.ext.autosummary',
|
||||
'sphinx.ext.viewcode',
|
||||
'sphinx.ext.napoleon'
|
||||
'sphinx.ext.napoleon',
|
||||
'sphinx.ext.intersphinx',
|
||||
'myst_parser',
|
||||
]
|
||||
|
||||
autosectionlabel_prefix_document = True
|
||||
|
||||
source_suffix = ['.rst', '.md']
|
||||
|
||||
templates_path = ['_templates']
|
||||
|
||||
# List of patterns, relative to source directory, that match files and
|
||||
|
@ -50,6 +64,10 @@ exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
|
|||
html_theme = 'sphinx_rtd_theme'
|
||||
# html_theme = 'furo'
|
||||
# need to be installed: pip install furo (not working...)
|
||||
html_static_path = ['_static']
|
||||
# html_static_path = ['_static']
|
||||
|
||||
# intersphinx configuration
|
||||
intersphinx_mapping = {
|
||||
"sklearn": ("https://scikit-learn.org/stable/", None),
|
||||
}
|
||||
|
||||
|
|
|
@ -0,0 +1,100 @@
|
|||
```{toctree}
|
||||
:hidden:
|
||||
|
||||
self
|
||||
```
|
||||
|
||||
# Quickstart
|
||||
|
||||
QuaPy is an open source framework for quantification (a.k.a. supervised prevalence estimation, or learning to quantify) written in Python.
|
||||
|
||||
QuaPy is based on the concept of "data sample", and provides implementations of the most important aspects of the quantification workflow, such as (baseline and advanced) quantification methods, quantification-oriented model selection mechanisms, evaluation measures, and evaluations protocols used for evaluating quantification methods. QuaPy also makes available commonly used datasets, and offers visualization tools for facilitating the analysis and interpretation of the experimental results.
|
||||
|
||||
QuaPy is hosted on GitHub at [https://github.com/HLT-ISTI/QuaPy](https://github.com/HLT-ISTI/QuaPy).
|
||||
|
||||
## Installation
|
||||
|
||||
```sh
|
||||
pip install quapy
|
||||
```
|
||||
|
||||
## Usage
|
||||
|
||||
The following script fetches a dataset of tweets, trains, applies, and evaluates a quantifier based on the *Adjusted Classify & Count* quantification method, using, as the evaluation measure, the *Mean Absolute Error* (MAE) between the predicted and the true class prevalence values of the test set:
|
||||
|
||||
```python
|
||||
import quapy as qp
|
||||
from sklearn.linear_model import LogisticRegression
|
||||
|
||||
dataset = qp.datasets.fetch_twitter('semeval16')
|
||||
|
||||
# create an "Adjusted Classify & Count" quantifier
|
||||
model = qp.method.aggregative.ACC(LogisticRegression())
|
||||
model.fit(dataset.training)
|
||||
|
||||
estim_prevalence = model.quantify(dataset.test.instances)
|
||||
true_prevalence = dataset.test.prevalence()
|
||||
|
||||
error = qp.error.mae(true_prevalence, estim_prevalence)
|
||||
|
||||
print(f'Mean Absolute Error (MAE)={error:.3f}')
|
||||
```
|
||||
|
||||
Quantification is useful in scenarios characterized by prior probability shift. In other words, we would be little interested in estimating the class prevalence values of the test set if we could assume the IID assumption to hold, as this prevalence would be roughly equivalent to the class prevalence of the training set. For this reason, any quantification model should be tested across many samples, even ones characterized by class prevalence values different or very different from those found in the training set. QuaPy implements sampling procedures and evaluation protocols that automate this workflow. See the [](./manuals) for detailed examples.
|
||||
|
||||
## Manuals
|
||||
|
||||
The following manuals illustrate several aspects of QuaPy through examples:
|
||||
|
||||
```{toctree}
|
||||
:maxdepth: 3
|
||||
|
||||
manuals
|
||||
```
|
||||
|
||||
```{toctree}
|
||||
:hidden:
|
||||
|
||||
API <quapy>
|
||||
```
|
||||
|
||||
## Features
|
||||
|
||||
- Implementation of many popular quantification methods (Classify-&-Count and its variants, Expectation Maximization, quantification methods based on structured output learning, HDy, QuaNet, quantification ensembles, among others).
|
||||
- Versatile functionality for performing evaluation based on sampling generation protocols (e.g., APP, NPP, etc.).
|
||||
- Implementation of most commonly used evaluation metrics (e.g., AE, RAE, NAE, NRAE, SE, KLD, NKLD, etc.).
|
||||
- Datasets frequently used in quantification (textual and numeric), including:
|
||||
- 32 UCI Machine Learning binary datasets.
|
||||
- 5 UCI Machine Learning multiclass datasets (new in v0.1.8!).
|
||||
- 11 Twitter quantification-by-sentiment datasets.
|
||||
- 3 product reviews quantification-by-sentiment datasets.
|
||||
- 4 tasks from LeQua competition (new in v0.1.7!)
|
||||
- IFCB dataset of plankton water samples (new in v0.1.8!).
|
||||
- Native support for binary and single-label multiclass quantification scenarios.
|
||||
- Model selection functionality that minimizes quantification-oriented loss functions.
|
||||
- Visualization tools for analysing the experimental results.
|
||||
|
||||
## Citing QuaPy
|
||||
|
||||
If you find QuaPy useful (and we hope you will), please consider citing the original paper in your research.
|
||||
|
||||
```bibtex
|
||||
@inproceedings{moreo2021quapy,
|
||||
title={QuaPy: a python-based framework for quantification},
|
||||
author={Moreo, Alejandro and Esuli, Andrea and Sebastiani, Fabrizio},
|
||||
booktitle={Proceedings of the 30th ACM International Conference on Information \& Knowledge Management},
|
||||
pages={4534--4543},
|
||||
year={2021}
|
||||
}
|
||||
```
|
||||
|
||||
## Contributing
|
||||
|
||||
In case you want to contribute improvements to quapy, please generate pull request to the "devel" branch.
|
||||
|
||||
## Acknowledgments
|
||||
|
||||
```{image} SoBigData.png
|
||||
:width: 250px
|
||||
:alt: SoBigData++
|
||||
```
|
|
@ -1,41 +0,0 @@
|
|||
.. QuaPy: A Python-based open-source framework for quantification documentation master file, created by
|
||||
sphinx-quickstart on Wed Feb 7 16:26:46 2024.
|
||||
You can adapt this file completely to your liking, but it should at least
|
||||
contain the root `toctree` directive.
|
||||
|
||||
Welcome to QuaPy's documentation!
|
||||
==========================================================================================
|
||||
|
||||
QuaPy is a Python-based open-source framework for quantification.
|
||||
|
||||
This document contains the API of the modules included in QuaPy.
|
||||
|
||||
Installation
|
||||
------------
|
||||
|
||||
`pip install quapy`
|
||||
|
||||
GitHub
|
||||
------------
|
||||
|
||||
QuaPy is hosted in GitHub at `https://github.com/HLT-ISTI/QuaPy <https://github.com/HLT-ISTI/QuaPy>`_
|
||||
|
||||
|
||||
.. toctree::
|
||||
:maxdepth: 2
|
||||
:caption: Contents:
|
||||
|
||||
Contents
|
||||
--------
|
||||
|
||||
.. toctree::
|
||||
|
||||
modules
|
||||
|
||||
|
||||
Indices and tables
|
||||
==================
|
||||
|
||||
* :ref:`genindex`
|
||||
* :ref:`modindex`
|
||||
* :ref:`search`
|
|
@ -0,0 +1,14 @@
|
|||
Manuals
|
||||
=======
|
||||
|
||||
.. toctree::
|
||||
:maxdepth: 2
|
||||
:numbered:
|
||||
|
||||
manuals/datasets
|
||||
manuals/evaluation
|
||||
manuals/explicit-loss-minimization
|
||||
manuals/methods
|
||||
manuals/model-selection
|
||||
manuals/plotting
|
||||
manuals/protocols
|
|
@ -0,0 +1,467 @@
|
|||
# Datasets
|
||||
|
||||
QuaPy makes available several datasets that have been used in
|
||||
quantification literature, as well as an interface to allow
|
||||
anyone import their custom datasets.
|
||||
|
||||
A _Dataset_ object in QuaPy is roughly a pair of _LabelledCollection_ objects,
|
||||
one playing the role of the training set, another the test set.
|
||||
_LabelledCollection_ is a data class consisting of the (iterable)
|
||||
instances and labels. This class handles most of the sampling functionality in QuaPy.
|
||||
Take a look at the following code:
|
||||
|
||||
```python
|
||||
|
||||
import quapy as qp
|
||||
import quapy.functional as F
|
||||
|
||||
instances = [
|
||||
'1st positive document', '2nd positive document',
|
||||
'the only negative document',
|
||||
'1st neutral document', '2nd neutral document', '3rd neutral document'
|
||||
]
|
||||
labels = [2, 2, 0, 1, 1, 1]
|
||||
|
||||
data = qp.data.LabelledCollection(instances, labels)
|
||||
print(F.strprev(data.prevalence(), prec=2))
|
||||
|
||||
```
|
||||
|
||||
Output the class prevalences (showing 2 digit precision):
|
||||
```
|
||||
[0.17, 0.50, 0.33]
|
||||
```
|
||||
|
||||
One can easily produce new samples at desired class prevalence values:
|
||||
|
||||
```python
|
||||
sample_size = 10
|
||||
prev = [0.4, 0.1, 0.5]
|
||||
sample = data.sampling(sample_size, *prev)
|
||||
|
||||
print('instances:', sample.instances)
|
||||
print('labels:', sample.labels)
|
||||
print('prevalence:', F.strprev(sample.prevalence(), prec=2))
|
||||
```
|
||||
|
||||
Which outputs:
|
||||
```
|
||||
instances: ['the only negative document' '2nd positive document'
|
||||
'2nd positive document' '2nd neutral document' '1st positive document'
|
||||
'the only negative document' 'the only negative document'
|
||||
'the only negative document' '2nd positive document'
|
||||
'1st positive document']
|
||||
labels: [0 2 2 1 2 0 0 0 2 2]
|
||||
prevalence: [0.40, 0.10, 0.50]
|
||||
```
|
||||
|
||||
Samples can be made consistent across different runs (e.g., to test
|
||||
different methods on the same exact samples) by sampling and retaining
|
||||
the indexes, that can then be used to generate the sample:
|
||||
|
||||
```python
|
||||
index = data.sampling_index(sample_size, *prev)
|
||||
for method in methods:
|
||||
sample = data.sampling_from_index(index)
|
||||
...
|
||||
```
|
||||
|
||||
However, generating samples for evaluation purposes is tackled in QuaPy
|
||||
by means of the evaluation protocols (see the dedicated entries in the manuals
|
||||
for [evaluation](./evaluation) and [protocols](./protocols)).
|
||||
|
||||
|
||||
## Reviews Datasets
|
||||
|
||||
Three datasets of reviews about Kindle devices, Harry Potter's series, and
|
||||
the well-known IMDb movie reviews can be fetched using a unified interface.
|
||||
For example:
|
||||
|
||||
```python
|
||||
import quapy as qp
|
||||
data = qp.datasets.fetch_reviews('kindle')
|
||||
```
|
||||
|
||||
These datasets have been used in:
|
||||
```
|
||||
Esuli, A., Moreo, A., & Sebastiani, F. (2018, October).
|
||||
A recurrent neural network for sentiment quantification.
|
||||
In Proceedings of the 27th ACM International Conference on
|
||||
Information and Knowledge Management (pp. 1775-1778).
|
||||
```
|
||||
|
||||
The list of reviews ids is available in:
|
||||
|
||||
```python
|
||||
qp.datasets.REVIEWS_SENTIMENT_DATASETS
|
||||
```
|
||||
|
||||
Some statistics of the fhe available datasets are summarized below:
|
||||
|
||||
| Dataset | classes | train size | test size | train prev | test prev | type |
|
||||
|---|:---:|:---:|:---:|:---:|:---:|---|
|
||||
| hp | 2 | 9533 | 18399 | \[0.018, 0.982\] | \[0.065, 0.935\] | text |
|
||||
| kindle | 2 | 3821 | 21591 | \[0.081, 0.919\] | \[0.063, 0.937\] | text |
|
||||
| imdb | 2 | 25000 | 25000 | \[0.500, 0.500\] | \[0.500, 0.500\] | text |
|
||||
|
||||
## Twitter Sentiment Datasets
|
||||
|
||||
11 Twitter datasets for sentiment analysis.
|
||||
Text is not accessible, and the documents were made available
|
||||
in tf-idf format. Each dataset presents two splits: a train/val
|
||||
split for model selection purposes, and a train+val/test split
|
||||
for model evaluation. The following code exemplifies how to load
|
||||
a twitter dataset for model selection.
|
||||
|
||||
```python
|
||||
import quapy as qp
|
||||
data = qp.datasets.fetch_twitter('gasp', for_model_selection=True)
|
||||
```
|
||||
|
||||
The datasets were used in:
|
||||
|
||||
```
|
||||
Gao, W., & Sebastiani, F. (2015, August).
|
||||
Tweet sentiment: From classification to quantification.
|
||||
In 2015 IEEE/ACM International Conference on Advances in
|
||||
Social Networks Analysis and Mining (ASONAM) (pp. 97-104). IEEE.
|
||||
```
|
||||
|
||||
Three of the datasets (semeval13, semeval14, and semeval15) share the
|
||||
same training set (semeval), meaning that the training split one would get
|
||||
when requesting any of them is the same. The dataset "semeval" can only
|
||||
be requested with "for_model_selection=True".
|
||||
The lists of the Twitter dataset's ids can be consulted in:
|
||||
|
||||
```python
|
||||
# a list of 11 dataset ids that can be used for model selection or model evaluation
|
||||
qp.datasets.TWITTER_SENTIMENT_DATASETS_TEST
|
||||
|
||||
# 9 dataset ids in which "semeval13", "semeval14", and "semeval15" are replaced with "semeval"
|
||||
qp.datasets.TWITTER_SENTIMENT_DATASETS_TRAIN
|
||||
```
|
||||
|
||||
Some details can be found below:
|
||||
|
||||
| Dataset | classes | train size | test size | features | train prev | test prev | type |
|
||||
|---|:---:|:---:|:---:|:---:|:---:|:---:|---|
|
||||
| gasp | 3 | 8788 | 3765 | 694582 | [0.421, 0.496, 0.082] | [0.407, 0.507, 0.086] | sparse |
|
||||
| hcr | 3 | 1594 | 798 | 222046 | [0.546, 0.211, 0.243] | [0.640, 0.167, 0.193] | sparse |
|
||||
| omd | 3 | 1839 | 787 | 199151 | [0.463, 0.271, 0.266] | [0.437, 0.283, 0.280] | sparse |
|
||||
| sanders | 3 | 2155 | 923 | 229399 | [0.161, 0.691, 0.148] | [0.164, 0.688, 0.148] | sparse |
|
||||
| semeval13 | 3 | 11338 | 3813 | 1215742 | [0.159, 0.470, 0.372] | [0.158, 0.430, 0.412] | sparse |
|
||||
| semeval14 | 3 | 11338 | 1853 | 1215742 | [0.159, 0.470, 0.372] | [0.109, 0.361, 0.530] | sparse |
|
||||
| semeval15 | 3 | 11338 | 2390 | 1215742 | [0.159, 0.470, 0.372] | [0.153, 0.413, 0.434] | sparse |
|
||||
| semeval16 | 3 | 8000 | 2000 | 889504 | [0.157, 0.351, 0.492] | [0.163, 0.341, 0.497] | sparse |
|
||||
| sst | 3 | 2971 | 1271 | 376132 | [0.261, 0.452, 0.288] | [0.207, 0.481, 0.312] | sparse |
|
||||
| wa | 3 | 2184 | 936 | 248563 | [0.305, 0.414, 0.281] | [0.282, 0.446, 0.272] | sparse |
|
||||
| wb | 3 | 4259 | 1823 | 404333 | [0.270, 0.392, 0.337] | [0.274, 0.392, 0.335] | sparse |
|
||||
|
||||
|
||||
## UCI Machine Learning
|
||||
|
||||
### Binary datasets
|
||||
|
||||
A set of 32 datasets from the [UCI Machine Learning repository](https://archive.ics.uci.edu/ml/datasets.php)
|
||||
used in:
|
||||
|
||||
```
|
||||
Pérez-Gállego, P., Quevedo, J. R., & del Coz, J. J. (2017).
|
||||
Using ensembles for problems with characterizable changes
|
||||
in data distribution: A case study on quantification.
|
||||
Information Fusion, 34, 87-100.
|
||||
```
|
||||
|
||||
The list does not exactly coincide with that used in Pérez-Gállego et al. 2017
|
||||
since we were unable to find the datasets with ids "diabetes" and "phoneme".
|
||||
|
||||
These dataset can be loaded by calling, e.g.:
|
||||
|
||||
```python
|
||||
import quapy as qp
|
||||
|
||||
data = qp.datasets.fetch_UCIBinaryDataset('yeast', verbose=True)
|
||||
```
|
||||
|
||||
This call will return a _Dataset_ object in which the training and
|
||||
test splits are randomly drawn, in a stratified manner, from the whole
|
||||
collection at 70% and 30%, respectively. The _verbose=True_ option indicates
|
||||
that the dataset description should be printed in standard output.
|
||||
The original data is not split,
|
||||
and some papers submit the entire collection to a kFCV validation.
|
||||
In order to accommodate with these practices, one could first instantiate
|
||||
the entire collection, and then creating a generator that will return one
|
||||
training+test dataset at a time, following a kFCV protocol:
|
||||
|
||||
```python
|
||||
import quapy as qp
|
||||
|
||||
collection = qp.datasets.fetch_UCIBinaryLabelledCollection("yeast")
|
||||
for data in qp.data.Dataset.kFCV(collection, nfolds=5, nrepeats=2):
|
||||
...
|
||||
```
|
||||
|
||||
Above code will allow to conduct a 2x5FCV evaluation on the "yeast" dataset.
|
||||
|
||||
All datasets come in numerical form (dense matrices); some statistics
|
||||
are summarized below.
|
||||
|
||||
| Dataset | classes | instances | features | prev | type |
|
||||
|---|:---:|:---:|:---:|:---:|---|
|
||||
| acute.a | 2 | 120 | 6 | [0.508, 0.492] | dense |
|
||||
| acute.b | 2 | 120 | 6 | [0.583, 0.417] | dense |
|
||||
| balance.1 | 2 | 625 | 4 | [0.539, 0.461] | dense |
|
||||
| balance.2 | 2 | 625 | 4 | [0.922, 0.078] | dense |
|
||||
| balance.3 | 2 | 625 | 4 | [0.539, 0.461] | dense |
|
||||
| breast-cancer | 2 | 683 | 9 | [0.350, 0.650] | dense |
|
||||
| cmc.1 | 2 | 1473 | 9 | [0.573, 0.427] | dense |
|
||||
| cmc.2 | 2 | 1473 | 9 | [0.774, 0.226] | dense |
|
||||
| cmc.3 | 2 | 1473 | 9 | [0.653, 0.347] | dense |
|
||||
| ctg.1 | 2 | 2126 | 21 | [0.222, 0.778] | dense |
|
||||
| ctg.2 | 2 | 2126 | 21 | [0.861, 0.139] | dense |
|
||||
| ctg.3 | 2 | 2126 | 21 | [0.917, 0.083] | dense |
|
||||
| german | 2 | 1000 | 24 | [0.300, 0.700] | dense |
|
||||
| haberman | 2 | 306 | 3 | [0.735, 0.265] | dense |
|
||||
| ionosphere | 2 | 351 | 34 | [0.641, 0.359] | dense |
|
||||
| iris.1 | 2 | 150 | 4 | [0.667, 0.333] | dense |
|
||||
| iris.2 | 2 | 150 | 4 | [0.667, 0.333] | dense |
|
||||
| iris.3 | 2 | 150 | 4 | [0.667, 0.333] | dense |
|
||||
| mammographic | 2 | 830 | 5 | [0.514, 0.486] | dense |
|
||||
| pageblocks.5 | 2 | 5473 | 10 | [0.979, 0.021] | dense |
|
||||
| semeion | 2 | 1593 | 256 | [0.901, 0.099] | dense |
|
||||
| sonar | 2 | 208 | 60 | [0.534, 0.466] | dense |
|
||||
| spambase | 2 | 4601 | 57 | [0.606, 0.394] | dense |
|
||||
| spectf | 2 | 267 | 44 | [0.794, 0.206] | dense |
|
||||
| tictactoe | 2 | 958 | 9 | [0.653, 0.347] | dense |
|
||||
| transfusion | 2 | 748 | 4 | [0.762, 0.238] | dense |
|
||||
| wdbc | 2 | 569 | 30 | [0.627, 0.373] | dense |
|
||||
| wine.1 | 2 | 178 | 13 | [0.669, 0.331] | dense |
|
||||
| wine.2 | 2 | 178 | 13 | [0.601, 0.399] | dense |
|
||||
| wine.3 | 2 | 178 | 13 | [0.730, 0.270] | dense |
|
||||
| wine-q-red | 2 | 1599 | 11 | [0.465, 0.535] | dense |
|
||||
| wine-q-white | 2 | 4898 | 11 | [0.335, 0.665] | dense |
|
||||
| yeast | 2 | 1484 | 8 | [0.711, 0.289] | dense |
|
||||
|
||||
#### Notes:
|
||||
All datasets will be downloaded automatically the first time they are requested, and
|
||||
stored in the _quapy_data_ folder for faster further reuse.
|
||||
|
||||
However, notice that it is a good idea to ignore datasets:
|
||||
* _acute.a_ and _acute.b_: these are very easy and many classifiers would score 100% accuracy
|
||||
* _balance.2_: this is extremely difficult; probably there is some problem with this dataset,
|
||||
the errors it tends to produce are orders of magnitude greater than for other datasets,
|
||||
and this has a disproportionate impact in the average performance.
|
||||
|
||||
### Multiclass datasets
|
||||
|
||||
A collection of 24 multiclass datasets from the [UCI Machine Learning repository](https://archive.ics.uci.edu/ml/datasets.php).
|
||||
Some of the datasets were first used in [this paper](https://arxiv.org/abs/2401.00490) and can be instantiated as follows:
|
||||
|
||||
```python
|
||||
import quapy as qp
|
||||
data = qp.datasets.fetch_UCIMulticlassLabelledCollection('dry-bean', verbose=True)
|
||||
```
|
||||
|
||||
A dataset can be instantiated filtering classes with a minimum number of instances using the `min_class_support` parameter
|
||||
(default: `100`) as folows:
|
||||
|
||||
|
||||
```python
|
||||
import quapy as qp
|
||||
data = qp.datasets.fetch_UCIMulticlassLabelledCollection('dry-bean', min_class_support=50, verbose=True)
|
||||
```
|
||||
|
||||
There are no pre-defined train-test partitions for these datasets, but you can easily create your own with the
|
||||
`split_stratified` method, e.g., `data.split_stratified()`. This can be also achieved using the method `fetch_UCIMulticlassDataset`
|
||||
as shown below:
|
||||
|
||||
```python
|
||||
data = qp.datasets.fetch_UCIMulticlassDataset('dry-bean', min_test_split=0.4, verbose=True)
|
||||
train, test = data.train_test
|
||||
```
|
||||
|
||||
This method tries to respect the `min_test_split` value while generating the train-test partition, but the resulting training set
|
||||
will not be bigger than `max_train_instances`, which defaults to `25000`. A bigger value can be passed as a parameter:
|
||||
|
||||
```python
|
||||
data = qp.datasets.fetch_UCIMulticlassDataset('dry-bean', min_test_split=0.4, max_train_instances=30000, verbose=True)
|
||||
train, test = data.train_test
|
||||
```
|
||||
|
||||
The datasets correspond to a part of the datasets that can be retrieved from the platform using the following filters:
|
||||
* datasets for classification
|
||||
* more than 2 classes
|
||||
* containing at least 1,000 instances
|
||||
* can be imported using the Python API.
|
||||
|
||||
Some statistics about these datasets are displayed below :
|
||||
|
||||
| **Dataset** | **classes** | **instances** | **features** | **prevs** | **type** |
|
||||
|:------------|:-----------:|:-------------:|:------------:|:----------|:--------:|
|
||||
| dry-bean | 7 | 13611 | 16 | [0.097, 0.038, 0.120, 0.261, 0.142, 0.149, 0.194] | dense |
|
||||
| wine-quality | 5 | 6462 | 11 | [0.033, 0.331, 0.439, 0.167, 0.030] | dense |
|
||||
| academic-success | 3 | 4424 | 36 | [0.321, 0.179, 0.499] | dense |
|
||||
| digits | 10 | 5620 | 64 | [0.099, 0.102, 0.099, 0.102, 0.101, 0.099, 0.099, 0.101, 0.099, 0.100] | dense |
|
||||
| letter | 26 | 20000 | 16 | [0.039, 0.038, 0.037, 0.040, 0.038, 0.039, 0.039, 0.037, 0.038, 0.037, 0.037, 0.038, 0.040, 0.039, 0.038, 0.040, 0.039, 0.038, 0.037, 0.040, 0.041, 0.038, 0.038, 0.039, 0.039, 0.037] | dense |
|
||||
| abalone | 11 | 3842 | 9 | [0.030, 0.067, 0.102, 0.148, 0.179, 0.165, 0.127, 0.069, 0.053, 0.033, 0.027] | dense |
|
||||
| obesity | 7 | 2111 | 23 | [0.129, 0.136, 0.166, 0.141, 0.153, 0.137, 0.137] | dense |
|
||||
| nursery | 4 | 12958 | 19 | [0.333, 0.329, 0.312, 0.025] | dense |
|
||||
| yeast | 4 | 1299 | 8 | [0.356, 0.125, 0.188, 0.330] | dense |
|
||||
| hand_digits | 10 | 10992 | 16 | [0.104, 0.104, 0.104, 0.096, 0.104, 0.096, 0.096, 0.104, 0.096, 0.096] | dense |
|
||||
| satellite | 6 | 6435 | 36 | [0.238, 0.109, 0.211, 0.097, 0.110, 0.234] | dense |
|
||||
| shuttle | 4 | 57927 | 7 | [0.787, 0.003, 0.154, 0.056] | dense |
|
||||
| cmc | 3 | 1473 | 9 | [0.427, 0.226, 0.347] | dense |
|
||||
| isolet | 26 | 7797 | 617 | [0.038, 0.038, 0.038, 0.038, 0.038, 0.038, 0.038, 0.038, 0.038, 0.038, 0.038, 0.038, 0.038, 0.038, 0.038, 0.038, 0.038, 0.038, 0.038, 0.038, 0.038, 0.038, 0.038, 0.038, 0.038, 0.038] | dense |
|
||||
| waveform-v1 | 3 | 5000 | 21 | [0.331, 0.329, 0.339] | dense |
|
||||
| molecular | 3 | 3190 | 227 | [0.240, 0.241, 0.519] | dense |
|
||||
| poker_hand | 8 | 1024985 | 10 | [0.501, 0.423, 0.048, 0.021, 0.004, 0.002, 0.001, 0.000] | dense |
|
||||
| connect-4 | 3 | 67557 | 84 | [0.095, 0.246, 0.658] | dense |
|
||||
| mhr | 3 | 1014 | 6 | [0.268, 0.400, 0.331] | dense |
|
||||
| chess | 15 | 27870 | 20 | [0.100, 0.051, 0.102, 0.078, 0.017, 0.007, 0.163, 0.061, 0.025, 0.021, 0.014, 0.071, 0.150, 0.129, 0.009] | dense |
|
||||
| page_block | 3 | 5357 | 10 | [0.917, 0.061, 0.021] | dense |
|
||||
| phishing | 3 | 1353 | 9 | [0.519, 0.076, 0.405] | dense |
|
||||
| image_seg | 7 | 2310 | 19 | [0.143, 0.143, 0.143, 0.143, 0.143, 0.143, 0.143] | dense |
|
||||
| hcv | 4 | 1385 | 28 | [0.243, 0.240, 0.256, 0.261] | dense |
|
||||
|
||||
Values shown above refer to datasets obtained through `fetchUCIMulticlassLabelledCollection` using all default parameters.
|
||||
|
||||
## LeQua 2022 Datasets
|
||||
|
||||
QuaPy also provides the datasets used for the LeQua 2022 competition.
|
||||
In brief, there are 4 tasks (T1A, T1B, T2A, T2B) having to do with text quantification
|
||||
problems. Tasks T1A and T1B provide documents in vector form, while T2A and T2B provide
|
||||
raw documents instead.
|
||||
Tasks T1A and T2A are binary sentiment quantification problems, while T2A and T2B
|
||||
are multiclass quantification problems consisting of estimating the class prevalence
|
||||
values of 28 different merchandise products.
|
||||
|
||||
Every task consists of a training set, a set of validation samples (for model selection)
|
||||
and a set of test samples (for evaluation). QuaPy returns this data as a LabelledCollection
|
||||
(training) and two generation protocols (for validation and test samples), as follows:
|
||||
|
||||
```python
|
||||
training, val_generator, test_generator = fetch_lequa2022(task=task)
|
||||
```
|
||||
|
||||
See the `lequa2022_experiments.py` in the examples folder for further details on how to
|
||||
carry out experiments using these datasets.
|
||||
|
||||
The datasets are downloaded only once, and stored for fast reuse.
|
||||
|
||||
Some statistics are summarized below:
|
||||
|
||||
| Dataset | classes | train size | validation samples | test samples | docs by sample | type |
|
||||
|---------|:-------:|:----------:|:------------------:|:------------:|:----------------:|:--------:|
|
||||
| T1A | 2 | 5000 | 1000 | 5000 | 250 | vector |
|
||||
| T1B | 28 | 20000 | 1000 | 5000 | 1000 | vector |
|
||||
| T2A | 2 | 5000 | 1000 | 5000 | 250 | text |
|
||||
| T2B | 28 | 20000 | 1000 | 5000 | 1000 | text |
|
||||
|
||||
For further details on the datasets, we refer to the original
|
||||
[paper](https://ceur-ws.org/Vol-3180/paper-146.pdf):
|
||||
|
||||
```
|
||||
Esuli, A., Moreo, A., Sebastiani, F., & Sperduti, G. (2022).
|
||||
A Detailed Overview of LeQua@ CLEF 2022: Learning to Quantify.
|
||||
```
|
||||
|
||||
## IFCB Plankton dataset
|
||||
|
||||
IFCB is a dataset of plankton species in water samples hosted in `Zenodo <https://zenodo.org/records/10036244>`_.
|
||||
This dataset is based on the data available publicly at `WHOI-Plankton repo <https://github.com/hsosik/WHOI-Plankton>`_
|
||||
and in the scripts for the processing are available at `P. González's repo <https://github.com/pglez82/IFCB_Zenodo>`_.
|
||||
|
||||
This dataset comes with precomputed features for testing quantification algorithms.
|
||||
|
||||
Some statistics:
|
||||
|
||||
| | **Training** | **Validation** | **Test** |
|
||||
|-----------------|:------------:|:--------------:|:--------:|
|
||||
| samples | 200 | 86 | 678 |
|
||||
| total instances | 584474 | 246916 | 2626429 |
|
||||
| mean per sample | 2922.3 | 2871.1 | 3873.8 |
|
||||
| min per sample | 266 | 59 | 33 |
|
||||
| max per sample | 6645 | 7375 | 9112 |
|
||||
|
||||
The number of features is 512, while the number of classes is 50.
|
||||
In terms of prevalence, the mean is 0.020, the minimum is 0, and the maximum is 0.978.
|
||||
|
||||
The dataset can be loaded for model selection (`for_model_selection=True`, thus returning the training and validation)
|
||||
or for test (`for_model_selection=False`, thus returning the training+validation and the test).
|
||||
|
||||
Additionally, the training can be interpreted as a list (a generator) of samples (`single_sample_train=False`)
|
||||
or as a single training set (`single_sample_train=True`).
|
||||
|
||||
Example:
|
||||
|
||||
```python
|
||||
train, val_gen = qp.datasets.fetch_IFCB(for_model_selection=True, single_sample_train=True)
|
||||
# ... model selection
|
||||
|
||||
train, test_gen = qp.datasets.fetch_IFCB(for_model_selection=False, single_sample_train=True)
|
||||
# ... train and evaluation
|
||||
```
|
||||
|
||||
|
||||
|
||||
## Adding Custom Datasets
|
||||
|
||||
QuaPy provides data loaders for simple formats dealing with
|
||||
text, following the format:
|
||||
|
||||
```
|
||||
class-id \t first document's pre-processed text \n
|
||||
class-id \t second document's pre-processed text \n
|
||||
...
|
||||
```
|
||||
|
||||
and sparse representations of the form:
|
||||
|
||||
```
|
||||
{-1, 0, or +1} col(int):val(float) col(int):val(float) ... \n
|
||||
...
|
||||
```
|
||||
|
||||
The code in charge in loading a LabelledCollection is:
|
||||
|
||||
```python
|
||||
@classmethod
|
||||
def load(cls, path:str, loader_func:callable):
|
||||
return LabelledCollection(*loader_func(path))
|
||||
```
|
||||
|
||||
indicating that any _loader_func_ (e.g., a user-defined one) which
|
||||
returns valid arguments for initializing a _LabelledCollection_ object will allow
|
||||
to load any collection. In particular, the _LabelledCollection_ receives as
|
||||
arguments the instances (as an iterable) and the labels (as an iterable) and,
|
||||
additionally, the number of classes can be specified (it would otherwise be
|
||||
inferred from the labels, but that requires at least one positive example for
|
||||
all classes to be present in the collection).
|
||||
|
||||
The same _loader_func_ can be passed to a Dataset, along with two
|
||||
paths, in order to create a training and test pair of _LabelledCollection_,
|
||||
e.g.:
|
||||
|
||||
```python
|
||||
import quapy as qp
|
||||
|
||||
train_path = '../my_data/train.dat'
|
||||
test_path = '../my_data/test.dat'
|
||||
|
||||
def my_custom_loader(path):
|
||||
with open(path, 'rb') as fin:
|
||||
...
|
||||
return instances, labels
|
||||
|
||||
data = qp.data.Dataset.load(train_path, test_path, my_custom_loader)
|
||||
```
|
||||
|
||||
### Data Processing
|
||||
|
||||
QuaPy implements a number of preprocessing functions in the package _qp.data.preprocessing_, including:
|
||||
|
||||
* _text2tfidf_: tfidf vectorization
|
||||
* _reduce_columns_: reducing the number of columns based on term frequency
|
||||
* _standardize_: transforms the column values into z-scores (i.e., subtract the mean and normalizes by the standard deviation, so
|
||||
that the column values have zero mean and unit variance).
|
||||
* _index_: transforms textual tokens into lists of numeric ids)
|
|
@ -0,0 +1,159 @@
|
|||
# Evaluation
|
||||
|
||||
Quantification is an appealing tool in scenarios of dataset shift,
|
||||
and particularly in scenarios of prior-probability shift.
|
||||
That is, the interest in estimating the class prevalences arises
|
||||
under the belief that those class prevalences might have changed
|
||||
with respect to the ones observed during training.
|
||||
In other words, one could simply return the training prevalence
|
||||
as a predictor of the test prevalence if this change is assumed
|
||||
to be unlikely (as is the case in general scenarios of
|
||||
machine learning governed by the iid assumption).
|
||||
In brief, quantification requires dedicated evaluation protocols,
|
||||
which are implemented in QuaPy and explained here.
|
||||
|
||||
## Error Measures
|
||||
|
||||
The module quapy.error implements the most popular error measures for quantification, e.g., mean absolute error (_mae_), mean relative absolute error (_mrae_), among others. For each such measure (e.g., _mrae_) there are corresponding functions (e.g., _rae_) that do not average the results across samples.
|
||||
|
||||
Some errors of classification are also available, e.g., accuracy error (_acce_) or F-1 error (_f1e_).
|
||||
|
||||
The error functions implement the following interface, e.g.:
|
||||
|
||||
```python
|
||||
mae(true_prevs, prevs_hat)
|
||||
```
|
||||
|
||||
in which the first argument is a ndarray containing the true
|
||||
prevalences, and the second argument is another ndarray with
|
||||
the estimations produced by some method.
|
||||
|
||||
Some error functions, e.g., _mrae_, _mkld_, and _mnkld_, are
|
||||
smoothed for numerical stability. In those cases, there is a
|
||||
third argument, e.g.:
|
||||
|
||||
```python
|
||||
def mrae(true_prevs, prevs_hat, eps=None): ...
|
||||
```
|
||||
|
||||
indicating the value for the smoothing parameter epsilon.
|
||||
Traditionally, this value is set to 1/(2T) in past literature,
|
||||
with T the sampling size. One could either pass this value
|
||||
to the function each time, or to set a QuaPy's environment
|
||||
variable _SAMPLE_SIZE_ once, and omit this argument
|
||||
thereafter (recommended);
|
||||
e.g.:
|
||||
|
||||
```python
|
||||
qp.environ['SAMPLE_SIZE'] = 100 # once for all
|
||||
true_prev = np.asarray([0.5, 0.3, 0.2]) # let's assume 3 classes
|
||||
estim_prev = np.asarray([0.1, 0.3, 0.6])
|
||||
error = qp.error.mrae(true_prev, estim_prev)
|
||||
print(f'mrae({true_prev}, {estim_prev}) = {error:.3f}')
|
||||
```
|
||||
|
||||
will print:
|
||||
```
|
||||
mrae([0.500, 0.300, 0.200], [0.100, 0.300, 0.600]) = 0.914
|
||||
```
|
||||
|
||||
Finally, it is possible to instantiate QuaPy's quantification
|
||||
error functions from strings using, e.g.:
|
||||
|
||||
```python
|
||||
error_function = qp.error.from_name('mse')
|
||||
error = error_function(true_prev, estim_prev)
|
||||
```
|
||||
|
||||
## Evaluation Protocols
|
||||
|
||||
An _evaluation protocol_ is an evaluation procedure that uses
|
||||
one specific _sample generation procotol_ to genereate many
|
||||
samples, typically characterized by widely varying amounts of
|
||||
_shift_ with respect to the original distribution, that are then
|
||||
used to evaluate the performance of a (trained) quantifier.
|
||||
These protocols are explained in more detail in a dedicated [manual](./protocols.md).
|
||||
For the moment being, let us assume we already have
|
||||
chosen and instantiated one specific such protocol, that we here
|
||||
simply call _prot_. Let also assume our model is called
|
||||
_quantifier_ and that our evaluatio measure of choice is
|
||||
_mae_. The evaluation comes down to:
|
||||
|
||||
```python
|
||||
mae = qp.evaluation.evaluate(quantifier, protocol=prot, error_metric='mae')
|
||||
print(f'MAE = {mae:.4f}')
|
||||
```
|
||||
|
||||
It is often desirable to evaluate our system using more than one
|
||||
single evaluatio measure. In this case, it is convenient to generate
|
||||
a _report_. A report in QuaPy is a dataframe accounting for all the
|
||||
true prevalence values with their corresponding prevalence values
|
||||
as estimated by the quantifier, along with the error each has given
|
||||
rise.
|
||||
|
||||
```python
|
||||
report = qp.evaluation.evaluation_report(quantifier, protocol=prot, error_metrics=['mae', 'mrae', 'mkld'])
|
||||
```
|
||||
|
||||
From a pandas' dataframe, it is straightforward to visualize all the results,
|
||||
and compute the averaged values, e.g.:
|
||||
|
||||
```python
|
||||
pd.set_option('display.expand_frame_repr', False)
|
||||
report['estim-prev'] = report['estim-prev'].map(F.strprev)
|
||||
print(report)
|
||||
|
||||
print('Averaged values:')
|
||||
print(report.mean())
|
||||
```
|
||||
|
||||
This will produce an output like:
|
||||
|
||||
```
|
||||
true-prev estim-prev mae mrae mkld
|
||||
0 [0.308, 0.692] [0.314, 0.686] 0.005649 0.013182 0.000074
|
||||
1 [0.896, 0.104] [0.909, 0.091] 0.013145 0.069323 0.000985
|
||||
2 [0.848, 0.152] [0.809, 0.191] 0.039063 0.149806 0.005175
|
||||
3 [0.016, 0.984] [0.033, 0.967] 0.017236 0.487529 0.005298
|
||||
4 [0.728, 0.272] [0.751, 0.249] 0.022769 0.057146 0.001350
|
||||
... ... ... ... ... ...
|
||||
4995 [0.72, 0.28] [0.698, 0.302] 0.021752 0.053631 0.001133
|
||||
4996 [0.868, 0.132] [0.888, 0.112] 0.020490 0.088230 0.001985
|
||||
4997 [0.292, 0.708] [0.298, 0.702] 0.006149 0.014788 0.000090
|
||||
4998 [0.24, 0.76] [0.220, 0.780] 0.019950 0.054309 0.001127
|
||||
4999 [0.948, 0.052] [0.965, 0.035] 0.016941 0.165776 0.003538
|
||||
|
||||
[5000 rows x 5 columns]
|
||||
Averaged values:
|
||||
mae 0.023588
|
||||
mrae 0.108779
|
||||
mkld 0.003631
|
||||
dtype: float64
|
||||
|
||||
Process finished with exit code 0
|
||||
```
|
||||
|
||||
Alternatively, we can simply generate all the predictions by:
|
||||
|
||||
```python
|
||||
true_prevs, estim_prevs = qp.evaluation.prediction(quantifier, protocol=prot)
|
||||
```
|
||||
|
||||
All the evaluation functions implement specific optimizations for speeding-up
|
||||
the evaluation of aggregative quantifiers (i.e., of instances of _AggregativeQuantifier_).
|
||||
The optimization comes down to generating classification predictions (either crisp or soft)
|
||||
only once for the entire test set, and then applying the sampling procedure to the
|
||||
predictions, instead of generating samples of instances and then computing the
|
||||
classification predictions every time. This is only possible when the protocol
|
||||
is an instance of _OnLabelledCollectionProtocol_. The optimization is only
|
||||
carried out when the number of classification predictions thus generated would be
|
||||
smaller than the number of predictions required for the entire protocol; e.g.,
|
||||
if the original dataset contains 1M instances, but the protocol is such that it would
|
||||
at most generate 20 samples of 100 instances, then it would be preferable to postpone the
|
||||
classification for each sample. This behaviour is indicated by setting
|
||||
_aggr_speedup="auto"_. Conversely, when indicating _aggr_speedup="force"_ QuaPy will
|
||||
precompute all the predictions irrespectively of the number of instances and number of samples.
|
||||
Finally, this can be deactivated by setting _aggr_speedup=False_. Note that this optimization
|
||||
is not only applied for the final evaluation, but also for the internal evaluations carried
|
||||
out during _model selection_. Since these are typically many, the heuristic can help reduce the
|
||||
execution time a lot.
|
|
@ -0,0 +1,26 @@
|
|||
# Explicit Loss Minimization
|
||||
|
||||
QuaPy makes available several Explicit Loss Minimization (ELM) methods, including
|
||||
SVM(Q), SVM(KLD), SVM(NKLD), SVM(AE), or SVM(RAE).
|
||||
These methods require to first download the
|
||||
[svmperf](http://www.cs.cornell.edu/people/tj/svm_light/svm_perf.html)
|
||||
package, apply the patch
|
||||
[svm-perf-quantification-ext.patch](https://github.com/HLT-ISTI/QuaPy/blob/master/svm-perf-quantification-ext.patch), and compile the sources.
|
||||
The script [prepare_svmperf.sh](https://github.com/HLT-ISTI/QuaPy/blob/master/prepare_svmperf.sh) does all the job. Simply run:
|
||||
|
||||
```
|
||||
./prepare_svmperf.sh
|
||||
```
|
||||
|
||||
The resulting directory `svm_perf_quantification/` contains the
|
||||
patched version of _svmperf_ with quantification-oriented losses.
|
||||
|
||||
The [svm-perf-quantification-ext.patch](https://github.com/HLT-ISTI/QuaPy/blob/master/prepare_svmperf.sh) is an extension of the patch made available by
|
||||
[Esuli et al. 2015](https://dl.acm.org/doi/abs/10.1145/2700406?casa_token=8D2fHsGCVn0AAAAA:ZfThYOvrzWxMGfZYlQW_y8Cagg-o_l6X_PcF09mdETQ4Tu7jK98mxFbGSXp9ZSO14JkUIYuDGFG0)
|
||||
that allows SVMperf to optimize for
|
||||
the _Q_ measure as proposed by [Barranquero et al. 2015](https://www.sciencedirect.com/science/article/abs/pii/S003132031400291X)
|
||||
and for the _KLD_ and _NKLD_ measures as proposed by [Esuli et al. 2015](https://dl.acm.org/doi/abs/10.1145/2700406?casa_token=8D2fHsGCVn0AAAAA:ZfThYOvrzWxMGfZYlQW_y8Cagg-o_l6X_PcF09mdETQ4Tu7jK98mxFbGSXp9ZSO14JkUIYuDGFG0).
|
||||
This patch extends the above one by also allowing SVMperf to optimize for
|
||||
_AE_ and _RAE_.
|
||||
See the [](./methods) manual for more details and code examples.
|
||||
|
|
@ -0,0 +1,584 @@
|
|||
# Quantification Methods
|
||||
|
||||
Quantification methods can be categorized as belonging to
|
||||
`aggregative` and `non-aggregative` groups.
|
||||
Most methods included in QuaPy at the moment are of type `aggregative`
|
||||
(though we plan to add many more methods in the near future), i.e.,
|
||||
are methods characterized by the fact that
|
||||
quantification is performed as an aggregation function of the individual
|
||||
products of classification.
|
||||
|
||||
Any quantifier in QuaPy shoud extend the class `BaseQuantifier`,
|
||||
and implement some abstract methods:
|
||||
```python
|
||||
@abstractmethod
|
||||
def fit(self, data: LabelledCollection): ...
|
||||
|
||||
@abstractmethod
|
||||
def quantify(self, instances): ...
|
||||
```
|
||||
The meaning of those functions should be familiar to those
|
||||
used to work with scikit-learn since the class structure of QuaPy
|
||||
is directly inspired by scikit-learn's _Estimators_. Functions
|
||||
`fit` and `quantify` are used to train the model and to provide
|
||||
class estimations (the reason why
|
||||
scikit-learn' structure has not been adopted _as is_ in QuaPy responds to
|
||||
the fact that scikit-learn's `predict` function is expected to return
|
||||
one output for each input element --e.g., a predicted label for each
|
||||
instance in a sample-- while in quantification the output for a sample
|
||||
is one single array of class prevalences).
|
||||
Quantifiers also extend from scikit-learn's `BaseEstimator`, in order
|
||||
to simplify the use of `set_params` and `get_params` used in
|
||||
[model selection](./model-selection).
|
||||
|
||||
## Aggregative Methods
|
||||
|
||||
All quantification methods are implemented as part of the
|
||||
`qp.method` package. In particular, `aggregative` methods are defined in
|
||||
`qp.method.aggregative`, and extend `AggregativeQuantifier(BaseQuantifier)`.
|
||||
The methods that any `aggregative` quantifier must implement are:
|
||||
|
||||
```python
|
||||
@abstractmethod
|
||||
def aggregation_fit(self, classif_predictions: LabelledCollection, data: LabelledCollection):
|
||||
|
||||
@abstractmethod
|
||||
def aggregate(self, classif_predictions:np.ndarray): ...
|
||||
```
|
||||
|
||||
These two functions replace the `fit` and `quantify` methods, since those
|
||||
come with default implementations. The `fit` function is provided and amounts to:
|
||||
|
||||
```python
|
||||
def fit(self, data: LabelledCollection, fit_classifier=True, val_split=None):
|
||||
self._check_init_parameters()
|
||||
classif_predictions = self.classifier_fit_predict(data, fit_classifier, predict_on=val_split)
|
||||
self.aggregation_fit(classif_predictions, data)
|
||||
return self
|
||||
```
|
||||
|
||||
Note that this function fits the classifier, and generates the predictions. This is assumed
|
||||
to be a routine common to all aggregative quantifiers, and is provided by QuaPy. What remains
|
||||
ahead is to define the `aggregation_fit` function, that takes as input the classifier predictions
|
||||
and the original training data (this latter is typically unused). The classifier predictions
|
||||
can be:
|
||||
- confidence scores: quantifiers inheriting directly from `AggregativeQuantifier`
|
||||
- crisp predictions: quantifiers inheriting from `AggregativeCrispQuantifier`
|
||||
- posterior probabilities: quantifiers inheriting from `AggregativeSoftQuantifier`
|
||||
- _anything_: custom quantifiers overriding the `classify` method
|
||||
|
||||
Note also that the `fit` method also calls `_check_init_parameters`; this function is meant to be
|
||||
overriden (if needed) and allows the method to quickly raise any exception based on any inconsistency
|
||||
found in the `__init__` arguments, thus avoiding to break after training the classifier and generating
|
||||
predictions.
|
||||
|
||||
Similarly, the function `quantify` is provided, and amounts to:
|
||||
|
||||
```python
|
||||
def quantify(self, instances):
|
||||
classif_predictions = self.classify(instances)
|
||||
return self.aggregate(classif_predictions)
|
||||
```
|
||||
|
||||
in which only the function `aggregate` is required to be overriden in most cases.
|
||||
|
||||
Aggregative quantifiers are expected to maintain a classifier (which is
|
||||
accessed through the `@property` `classifier`). This classifier is
|
||||
given as input to the quantifier, and can be already fit
|
||||
on external data (in which case, the `fit_learner` argument should
|
||||
be set to False), or be fit by the quantifier's fit (default).
|
||||
|
||||
The above patterns (in training: fit the classifier, then fit the aggregation;
|
||||
in test: classify, then aggregate) allows QuaPy to optimize many internal procedures.
|
||||
In particular, the model selection routing takes advantage of this two-step process
|
||||
and generates classifiers only for the valid combinations of hyperparameters of the
|
||||
classifier, and then _clones_ these classifiers and explores the combinations
|
||||
of hyperparameters that are specific to the quantifier (this can result in huge
|
||||
time savings).
|
||||
Concerning the inference phase, this two-step process allow the evaluation of many
|
||||
standard protocols (e.g., the [artificial sampling protocol](./evaluation)) to be
|
||||
carried out very efficiently. The reason is that the entire set can be pre-classified
|
||||
once, and the quantification estimations for different samples can directly
|
||||
reuse these predictions, without requiring to classify each element every time.
|
||||
QuaPy leverages this property to speed-up any procedure having to do with
|
||||
quantification over samples, as is customarily done in model selection or
|
||||
in evaluation.
|
||||
|
||||
### The Classify & Count variants
|
||||
|
||||
QuaPy implements the four CC variants, i.e.:
|
||||
|
||||
* _CC_ (Classify & Count), the simplest aggregative quantifier; one that
|
||||
simply relies on the label predictions of a classifier to deliver class estimates.
|
||||
* _ACC_ (Adjusted Classify & Count), the adjusted variant of CC.
|
||||
* _PCC_ (Probabilistic Classify & Count), the probabilistic variant of CC that
|
||||
relies on the soft estimations (or posterior probabilities) returned by a (probabilistic) classifier.
|
||||
* _PACC_ (Probabilistic Adjusted Classify & Count), the adjusted variant of PCC.
|
||||
|
||||
The following code serves as a complete example using CC equipped
|
||||
with a SVM as the classifier:
|
||||
|
||||
```python
|
||||
import quapy as qp
|
||||
import quapy.functional as F
|
||||
from sklearn.svm import LinearSVC
|
||||
|
||||
training, test = qp.datasets.fetch_twitter('hcr', pickle=True).train_test
|
||||
|
||||
# instantiate a classifier learner, in this case a SVM
|
||||
svm = LinearSVC()
|
||||
|
||||
# instantiate a Classify & Count with the SVM
|
||||
# (an alias is available in qp.method.aggregative.ClassifyAndCount)
|
||||
model = qp.method.aggregative.CC(svm)
|
||||
model.fit(training)
|
||||
estim_prevalence = model.quantify(test.instances)
|
||||
```
|
||||
|
||||
The same code could be used to instantiate an ACC, by simply replacing
|
||||
the instantiation of the model with:
|
||||
```python
|
||||
model = qp.method.aggregative.ACC(svm)
|
||||
```
|
||||
Note that the adjusted variants (ACC and PACC) need to estimate
|
||||
some parameters for performing the adjustment (e.g., the
|
||||
_true positive rate_ and the _false positive rate_ in case of
|
||||
binary classification) that are estimated on a validation split
|
||||
of the labelled set. In this case, the `__init__` method of
|
||||
ACC defines an additional parameter, `val_split`. If this parameter
|
||||
is set to a float in [0,1] representing a fraction (e.g., 0.4)
|
||||
then that fraction of labelled data (e.g., 40%)
|
||||
will be used for estimating the parameters for adjusting the
|
||||
predictions. This parameters can also be set with an integer,
|
||||
indicating that the parameters should be estimated by means of
|
||||
_k_-fold cross-validation, for which the integer indicates the
|
||||
number _k_ of folds (the default value is 5). Finally, `val_split` can be set to a
|
||||
specific held-out validation set (i.e., an instance of `LabelledCollection`).
|
||||
|
||||
The specification of `val_split` can be
|
||||
postponed to the invokation of the fit method (if `val_split` was also
|
||||
set in the constructor, the one specified at fit time would prevail),
|
||||
e.g.:
|
||||
|
||||
```python
|
||||
model = qp.method.aggregative.ACC(svm)
|
||||
# perform 5-fold cross validation for estimating ACC's parameters
|
||||
# (overrides the default val_split=0.4 in the constructor)
|
||||
model.fit(training, val_split=5)
|
||||
```
|
||||
|
||||
The following code illustrates the case in which PCC is used:
|
||||
|
||||
```python
|
||||
model = qp.method.aggregative.PCC(svm)
|
||||
model.fit(training)
|
||||
estim_prevalence = model.quantify(test.instances)
|
||||
print('classifier:', model.classifier)
|
||||
```
|
||||
In this case, QuaPy will print:
|
||||
```
|
||||
The learner LinearSVC does not seem to be probabilistic. The learner will be calibrated.
|
||||
classifier: CalibratedClassifierCV(base_estimator=LinearSVC(), cv=5)
|
||||
```
|
||||
The first output indicates that the learner (`LinearSVC` in this case)
|
||||
is not a probabilistic classifier (i.e., it does not implement the
|
||||
`predict_proba` method) and so, the classifier will be converted to
|
||||
a probabilistic one through [calibration](https://scikit-learn.org/stable/modules/calibration.html).
|
||||
As a result, the classifier that is printed in the second line points
|
||||
to a `CalibratedClassifier` instance. Note that calibration can only
|
||||
be applied to hard classifiers when `fit_learner=True`; an exception
|
||||
will be raised otherwise.
|
||||
|
||||
Lastly, everything we said aboud ACC and PCC
|
||||
applies to PACC as well.
|
||||
|
||||
_New in v0.1.9_: quantifiers ACC and PACC now have three additional arguments: `method`, `solver` and `norm`:
|
||||
|
||||
* Argument `method` specifies how to solve, for `p`, the linear system `q = Mp` (where `q` is the unadjusted counts for the
|
||||
test sample, `M` contains the class-conditional unadjusted counts --i.e., the missclassification rates-- and `p` is the
|
||||
sought prevalence vector):
|
||||
* option `"inversion"`: attempts to invert matrix `M`, thus solving `Minv q = p`. In degenerated cases, this
|
||||
inversion may not exist. In such cases, the method defaults to returning `q` (the unadjusted counts)
|
||||
* option `"invariant-ratio""` uses the invariant ratio estimator system proposed in Remark 5 of
|
||||
[Vaz, A.F., Izbicki F. and Stern, R.B. "Quantification Under Prior Probability Shift: the Ratio Estimator
|
||||
and its Extensions", in Journal of Machine Learning Research 20 (2019)](https://jmlr.csail.mit.edu/papers/volume20/18-456/18-456.pdf).
|
||||
|
||||
* Argument `solver` specifies how to solve the linear system.
|
||||
* `"exact-raise"` solves the system of linear equations and raises an exception if the system is not solvable
|
||||
* `"exact-cc"` returns the original unadjusted count if the system is not solvable
|
||||
* `"minimize"` minimizes the L2 norm of :math:`|Mp-q|`. This one generally works better, and is the
|
||||
default parameter. More details about this can be consulted in
|
||||
[Bunse, M. "On Multi-Class Extensions of Adjusted Classify and Count",
|
||||
on proceedings of the 2nd International Workshop on Learning to Quantify: Methods and Applications (LQ 2022),
|
||||
ECML/PKDD 2022, Grenoble (France)](https://lq-2022.github.io/proceedings/CompleteVolume.pdf)).
|
||||
|
||||
* Argument `norm` specifies how to normalize the estimate `p` when the vector lies outside of the probability simplex.
|
||||
Options are:
|
||||
* `"clip"` which clips the values to range `[0, 1]` and then L1-normalizes the vector
|
||||
* `"mapsimplex"` which projects the results on the probability simplex, as proposed by Vaz et al. in
|
||||
[Remark 5 of Vaz, et. (2019)](https://jmlr.csail.mit.edu/papers/volume20/18-456/18-456.pdf). This implementation
|
||||
relies on [Mathieu Blondel's `projection_simplex_sort`](https://gist.github.com/mblondel/6f3b7aaad90606b98f71))
|
||||
* `"condsoftmax"` applies softmax normalization only if the prevalence vector lies outside of the probability simplex.
|
||||
|
||||
|
||||
#### BayesianCC (_New in v0.1.9_!)
|
||||
|
||||
The `BayesianCC` is a variant of ACC introduced in
|
||||
[Ziegler, A. and Czyż, P. "Bayesian quantification with black-box estimators", arXiv (2023)](https://arxiv.org/abs/2302.09159),
|
||||
which models the probabilities `q = Mp` using latent random variables with weak Bayesian priors, rather than
|
||||
plug-in probability estimates. In particular, it uses Markov Chain Monte Carlo sampling to find the values of
|
||||
`p` compatible with the observed quantities.
|
||||
The `aggregate` method returns the posterior mean and the `get_prevalence_samples` method can be used to find
|
||||
uncertainty around `p` estimates (conditional on the observed data and the trained classifier)
|
||||
and is suitable for problems in which the `q = Mp` matrix is nearly non-invertible.
|
||||
|
||||
Note that this quantification method requires `val_split` to be a `float` and installation of additional dependencies (`$ pip install quapy[bayes]`) needed to run Markov chain Monte Carlo sampling. Markov Chain Monte Carlo is is slower than matrix inversion methods, but is guaranteed to sample proper probability vectors, so no clipping strategies are required.
|
||||
An example presenting how to run the method and use posterior samples is available in `examples/bayesian_quantification.py`.
|
||||
|
||||
### Expectation Maximization (EMQ)
|
||||
|
||||
The Expectation Maximization Quantifier (EMQ), also known as
|
||||
the SLD, is available at `qp.method.aggregative.EMQ` or via the
|
||||
alias `qp.method.aggregative.ExpectationMaximizationQuantifier`.
|
||||
The method is described in:
|
||||
|
||||
_Saerens, M., Latinne, P., and Decaestecker, C. (2002). Adjusting the outputs of a classifier
|
||||
to new a priori probabilities: A simple procedure. Neural Computation, 14(1):21–41._
|
||||
|
||||
EMQ works with a probabilistic classifier (if the classifier
|
||||
given as input is a hard one, a calibration will be attempted).
|
||||
Although this method was originally proposed for improving the
|
||||
posterior probabilities of a probabilistic classifier, and not
|
||||
for improving the estimation of prior probabilities, EMQ ranks
|
||||
almost always among the most effective quantifiers in the
|
||||
experiments we have carried out.
|
||||
|
||||
An example of use can be found below:
|
||||
|
||||
```python
|
||||
import quapy as qp
|
||||
from sklearn.linear_model import LogisticRegression
|
||||
|
||||
dataset = qp.datasets.fetch_twitter('hcr', pickle=True)
|
||||
|
||||
model = qp.method.aggregative.EMQ(LogisticRegression())
|
||||
model.fit(dataset.training)
|
||||
estim_prevalence = model.quantify(dataset.test.instances)
|
||||
```
|
||||
|
||||
_New in v0.1.7_: EMQ now accepts two new parameters in the construction method, namely
|
||||
`exact_train_prev` which allows to use the true training prevalence as the departing
|
||||
prevalence estimation (default behaviour), or instead an approximation of it as
|
||||
suggested by [Alexandari et al. (2020)](http://proceedings.mlr.press/v119/alexandari20a.html)
|
||||
(by setting `exact_train_prev=False`).
|
||||
The other parameter is `recalib` which allows to indicate a calibration method, among those
|
||||
proposed by [Alexandari et al. (2020)](http://proceedings.mlr.press/v119/alexandari20a.html),
|
||||
including the Bias-Corrected Temperature Scaling, Vector Scaling, etc.
|
||||
See the API documentation for further details.
|
||||
|
||||
|
||||
### Hellinger Distance y (HDy)
|
||||
|
||||
Implementation of the method based on the Hellinger Distance y (HDy) proposed by
|
||||
[González-Castro, V., Alaiz-Rodrı́guez, R., and Alegre, E. (2013). Class distribution
|
||||
estimation based on the Hellinger distance. Information Sciences, 218:146–164.](https://www.sciencedirect.com/science/article/pii/S0020025512004069)
|
||||
|
||||
It is implemented in `qp.method.aggregative.HDy` (also accessible
|
||||
through the allias `qp.method.aggregative.HellingerDistanceY`).
|
||||
This method works with a probabilistic classifier (hard classifiers
|
||||
can be used as well and will be calibrated) and requires a validation
|
||||
set to estimate parameter for the mixture model. Just like
|
||||
ACC and PACC, this quantifier receives a `val_split` argument
|
||||
in the constructor (or in the fit method, in which case the previous
|
||||
value is overridden) that can either be a float indicating the proportion
|
||||
of training data to be taken as the validation set (in a random
|
||||
stratified split), or a validation set (i.e., an instance of
|
||||
`LabelledCollection`) itself.
|
||||
|
||||
HDy was proposed as a binary classifier and the implementation
|
||||
provided in QuaPy accepts only binary datasets.
|
||||
|
||||
The following code shows an example of use:
|
||||
```python
|
||||
import quapy as qp
|
||||
from sklearn.linear_model import LogisticRegression
|
||||
|
||||
# load a binary dataset
|
||||
dataset = qp.datasets.fetch_reviews('hp', pickle=True)
|
||||
qp.data.preprocessing.text2tfidf(dataset, min_df=5, inplace=True)
|
||||
|
||||
model = qp.method.aggregative.HDy(LogisticRegression())
|
||||
model.fit(dataset.training)
|
||||
estim_prevalence = model.quantify(dataset.test.instances)
|
||||
```
|
||||
|
||||
_New in v0.1.7:_ QuaPy now provides an implementation of the generalized
|
||||
"Distribution Matching" approaches for multiclass, inspired by the framework
|
||||
of [Firat (2016)](https://arxiv.org/abs/1606.00868). One can instantiate
|
||||
a variant of HDy for multiclass quantification as follows:
|
||||
|
||||
```python
|
||||
mutliclassHDy = qp.method.aggregative.DMy(classifier=LogisticRegression(), divergence='HD', cdf=False)
|
||||
```
|
||||
|
||||
_New in v0.1.7:_ QuaPy now provides an implementation of the "DyS"
|
||||
framework proposed by [Maletzke et al (2020)](https://ojs.aaai.org/index.php/AAAI/article/view/4376)
|
||||
and the "SMM" method proposed by [Hassan et al (2019)](https://ieeexplore.ieee.org/document/9260028)
|
||||
(thanks to _Pablo González_ for the contributions!)
|
||||
|
||||
### Threshold Optimization methods
|
||||
|
||||
_New in v0.1.7:_ QuaPy now implements Forman's threshold optimization methods;
|
||||
see, e.g., [(Forman 2006)](https://dl.acm.org/doi/abs/10.1145/1150402.1150423)
|
||||
and [(Forman 2008)](https://link.springer.com/article/10.1007/s10618-008-0097-y).
|
||||
These include: T50, MAX, X, Median Sweep (MS), and its variant MS2.
|
||||
|
||||
### Explicit Loss Minimization
|
||||
|
||||
The Explicit Loss Minimization (ELM) represent a family of methods
|
||||
based on structured output learning, i.e., quantifiers relying on
|
||||
classifiers that have been optimized targeting a
|
||||
quantification-oriented evaluation measure.
|
||||
The original methods are implemented in QuaPy as classify & count (CC)
|
||||
quantifiers that use Joachim's [SVMperf](https://www.cs.cornell.edu/people/tj/svm_light/svm_perf.html)
|
||||
as the underlying classifier, properly set to optimize for the desired loss.
|
||||
|
||||
In QuaPy, this can be more achieved by calling the functions:
|
||||
|
||||
* `newSVMQ`: returns the quantification method called SVM(Q) that optimizes for the metric _Q_ defined
|
||||
in [_Barranquero, J., Díez, J., and del Coz, J. J. (2015). Quantification-oriented learning based
|
||||
on reliable classifiers. Pattern Recognition, 48(2):591–604._](https://www.sciencedirect.com/science/article/pii/S003132031400291X)
|
||||
* `newSVMKLD` and `newSVMNKLD`: returns the quantification method called SVM(KLD) and SVM(nKLD), standing for
|
||||
Kullback-Leibler Divergence and Normalized Kullback-Leibler Divergence, as proposed in [_Esuli, A. and Sebastiani, F. (2015).
|
||||
Optimizing text quantifiers for multivariate loss functions.
|
||||
ACM Transactions on Knowledge Discovery and Data, 9(4):Article 27._](https://dl.acm.org/doi/abs/10.1145/2700406)
|
||||
* `newSVMAE` and `newSVMRAE`: returns a quantification method called SVM(AE) and SVM(RAE) that optimizes for the (Mean) Absolute Error and for the
|
||||
(Mean) Relative Absolute Error, as first used by
|
||||
[_Moreo, A. and Sebastiani, F. (2021). Tweet sentiment quantification: An experimental re-evaluation. PLOS ONE 17 (9), 1-23._](https://arxiv.org/abs/2011.02552)
|
||||
|
||||
the last two methods (SVM(AE) and SVM(RAE)) have been implemented in
|
||||
QuaPy in order to make available ELM variants for what nowadays
|
||||
are considered the most well-behaved evaluation metrics in quantification.
|
||||
|
||||
In order to make these models work, you would need to run the script
|
||||
`prepare_svmperf.sh` (distributed along with QuaPy) that
|
||||
downloads `SVMperf`' source code, applies a patch that
|
||||
implements the quantification oriented losses, and compiles the
|
||||
sources.
|
||||
|
||||
If you want to add any custom loss, you would need to modify
|
||||
the source code of `SVMperf` in order to implement it, and
|
||||
assign a valid loss code to it. Then you must re-compile
|
||||
the whole thing and instantiate the quantifier in QuaPy
|
||||
as follows:
|
||||
|
||||
```python
|
||||
# you can either set the path to your custom svm_perf_quantification implementation
|
||||
# in the environment variable, or as an argument to the constructor of ELM
|
||||
qp.environ['SVMPERF_HOME'] = './path/to/svm_perf_quantification'
|
||||
|
||||
# assign an alias to your custom loss and the id you have assigned to it
|
||||
svmperf = qp.classification.svmperf.SVMperf
|
||||
svmperf.valid_losses['mycustomloss'] = 28
|
||||
|
||||
# instantiate the ELM method indicating the loss
|
||||
model = qp.method.aggregative.ELM(loss='mycustomloss')
|
||||
```
|
||||
|
||||
All ELM are binary quantifiers since they rely on `SVMperf`, that
|
||||
currently supports only binary classification.
|
||||
ELM variants (any binary quantifier in general) can be extended
|
||||
to operate in single-label scenarios trivially by adopting a
|
||||
"one-vs-all" strategy (as, e.g., in
|
||||
[_Gao, W. and Sebastiani, F. (2016). From classification to quantification in tweet sentiment
|
||||
analysis. Social Network Analysis and Mining, 6(19):1–22_](https://link.springer.com/article/10.1007/s13278-016-0327-z)).
|
||||
In QuaPy this is possible by using the `OneVsAll` class.
|
||||
|
||||
There are two ways for instantiating this class, `OneVsAllGeneric` that works for
|
||||
any quantifier, and `OneVsAllAggregative` that is optimized for aggregative quantifiers.
|
||||
In general, you can simply use the `newOneVsAll` function and QuaPy will choose
|
||||
the more convenient of the two.
|
||||
|
||||
```python
|
||||
import quapy as qp
|
||||
from quapy.method.aggregative import SVMQ
|
||||
|
||||
# load a single-label dataset (this one contains 3 classes)
|
||||
dataset = qp.datasets.fetch_twitter('hcr', pickle=True)
|
||||
|
||||
# let qp know where svmperf is
|
||||
qp.environ['SVMPERF_HOME'] = '../svm_perf_quantification'
|
||||
|
||||
model = newOneVsAll(SVMQ(), n_jobs=-1) # run them on parallel
|
||||
model.fit(dataset.training)
|
||||
estim_prevalence = model.quantify(dataset.test.instances)
|
||||
```
|
||||
|
||||
Check the examples on [explicit_loss_minimization](https://github.com/HLT-ISTI/QuaPy/blob/devel/examples/5.explicit_loss_minimization.py)
|
||||
and on [one versus all quantification](https://github.com/HLT-ISTI/QuaPy/blob/devel/examples/10.one_vs_all.py) for more details.
|
||||
|
||||
### Kernel Density Estimation methods (KDEy)
|
||||
|
||||
_New in v0.1.8_: QuaPy now provides implementations for the three variants
|
||||
of KDE-based methods proposed in
|
||||
_[Moreo, A., González, P. and del Coz, J.J., 2023.
|
||||
Kernel Density Estimation for Multiclass Quantification.
|
||||
arXiv preprint arXiv:2401.00490.](https://arxiv.org/abs/2401.00490)_.
|
||||
The variants differ in the divergence metric to be minimized:
|
||||
|
||||
- KDEy-HD: minimizes the (squared) Hellinger Distance and solves the problem via a Monte Carlo approach
|
||||
- KDEy-CS: minimizes the Cauchy-Schwarz divergence and solves the problem via a closed-form solution
|
||||
- KDEy-ML: minimizes the Kullback-Leibler divergence and solves the problem via maximum-likelihood
|
||||
|
||||
These methods are specifically devised for multiclass problems (although they can tackle
|
||||
binary problems too).
|
||||
|
||||
All KDE-based methods depend on the hyperparameter `bandwidth` of the kernel. Typical values
|
||||
that can be explored in model selection range in [0.01, 0.25]. The methods' performance
|
||||
vary smoothing with smooth variations of this hyperparameter.
|
||||
|
||||
|
||||
## Composable Methods
|
||||
|
||||
The [](quapy.method.composable) module allows the composition of quantification methods from loss functions and feature transformations. Any composed method solves a linear system of equations by minimizing the loss after transforming the data. Methods of this kind include ACC, PACC, HDx, HDy, and many other well-known methods, as well as an unlimited number of re-combinations of their building blocks.
|
||||
|
||||
### Installation
|
||||
|
||||
```sh
|
||||
pip install --upgrade pip setuptools wheel
|
||||
pip install "jax[cpu]"
|
||||
pip install "qunfold @ git+https://github.com/mirkobunse/qunfold@v0.1.4"
|
||||
```
|
||||
|
||||
### Basics
|
||||
|
||||
The composition of a method is implemented through the [](quapy.method.composable.ComposableQuantifier) class. Its documentation also features an example to get you started in composing your own methods.
|
||||
|
||||
```python
|
||||
ComposableQuantifier( # ordinal ACC, as proposed by Bunse et al., 2022
|
||||
TikhonovRegularized(LeastSquaresLoss(), 0.01),
|
||||
ClassTransformer(RandomForestClassifier(oob_score=True))
|
||||
)
|
||||
```
|
||||
|
||||
More exhaustive examples of method compositions, including hyper-parameter optimization, can be found in [the example directory](https://github.com/HLT-ISTI/QuaPy/tree/master/examples).
|
||||
|
||||
To implement your own loss functions and feature representations, follow the corresponding manual of the [qunfold package](https://github.com/mirkobunse/qunfold), which provides the back-end of QuaPy's composable module.
|
||||
|
||||
### Loss functions
|
||||
|
||||
- [](quapy.method.composable.LeastSquaresLoss)
|
||||
- [](quapy.method.composable.EnergyLoss)
|
||||
- [](quapy.method.composable.HellingerSurrogateLoss)
|
||||
- [](quapy.method.composable.BlobelLoss)
|
||||
- [](quapy.method.composable.CombinedLoss)
|
||||
|
||||
```{hint}
|
||||
You can use the [](quapy.method.composable.CombinedLoss) to create arbitrary, weighted sums of losses and regularizers.
|
||||
```
|
||||
|
||||
### Regularization functions
|
||||
|
||||
- [](quapy.method.composable.TikhonovRegularized)
|
||||
- [](quapy.method.composable.TikhonovRegularization)
|
||||
|
||||
### Feature transformations
|
||||
|
||||
- [](quapy.method.composable.ClassTransformer)
|
||||
- [](quapy.method.composable.DistanceTransformer)
|
||||
- [](quapy.method.composable.HistogramTransformer)
|
||||
- [](quapy.method.composable.EnergyKernelTransformer)
|
||||
- [](quapy.method.composable.GaussianKernelTransformer)
|
||||
- [](quapy.method.composable.LaplacianKernelTransformer)
|
||||
- [](quapy.method.composable.GaussianRFFKernelTransformer)
|
||||
|
||||
```{hint}
|
||||
The [](quapy.method.composable.ClassTransformer) requires the classifier to have a property `oob_score==True` and to produce a property `oob_decision_function` during fitting. In [scikit-learn](https://scikit-learn.org/), this requirement is fulfilled by any bagging classifier, such as random forests. Any other classifier needs to be cross-validated through the [](quapy.method.composable.CVClassifier).
|
||||
```
|
||||
|
||||
|
||||
## Meta Models
|
||||
|
||||
By _meta_ models we mean quantification methods that are defined on top of other
|
||||
quantification methods, and that thus do not squarely belong to the aggregative nor
|
||||
the non-aggregative group (indeed, _meta_ models could use quantifiers from any of those
|
||||
groups).
|
||||
_Meta_ models are implemented in the `qp.method.meta` module.
|
||||
|
||||
### Ensembles
|
||||
|
||||
QuaPy implements (some of) the variants proposed in:
|
||||
|
||||
* [_Pérez-Gállego, P., Quevedo, J. R., & del Coz, J. J. (2017).
|
||||
Using ensembles for problems with characterizable changes in data distribution: A case study on quantification.
|
||||
Information Fusion, 34, 87-100._](https://www.sciencedirect.com/science/article/pii/S1566253516300628)
|
||||
* [_Pérez-Gállego, P., Castano, A., Quevedo, J. R., & del Coz, J. J. (2019).
|
||||
Dynamic ensemble selection for quantification tasks.
|
||||
Information Fusion, 45, 1-15._](https://www.sciencedirect.com/science/article/pii/S1566253517303652)
|
||||
|
||||
The following code shows how to instantiate an Ensemble of 30 _Adjusted Classify & Count_ (ACC)
|
||||
quantifiers operating with a _Logistic Regressor_ (LR) as the base classifier, and using the
|
||||
_average_ as the aggregation policy (see the original article for further details).
|
||||
The last parameter indicates to use all processors for parallelization.
|
||||
|
||||
```python
|
||||
import quapy as qp
|
||||
from quapy.method.aggregative import ACC
|
||||
from quapy.method.meta import Ensemble
|
||||
from sklearn.linear_model import LogisticRegression
|
||||
|
||||
dataset = qp.datasets.fetch_UCIBinaryDataset('haberman')
|
||||
|
||||
model = Ensemble(quantifier=ACC(LogisticRegression()), size=30, policy='ave', n_jobs=-1)
|
||||
model.fit(dataset.training)
|
||||
estim_prevalence = model.quantify(dataset.test.instances)
|
||||
```
|
||||
|
||||
Other aggregation policies implemented in QuaPy include:
|
||||
* 'ptr' for applying a dynamic selection based on the training prevalence of the ensemble's members
|
||||
* 'ds' for applying a dynamic selection based on the Hellinger Distance
|
||||
* _any valid quantification measure_ (e.g., 'mse') for performing a static selection based on
|
||||
the performance estimated for each member of the ensemble in terms of that evaluation metric.
|
||||
|
||||
When using any of the above options, it is important to set the `red_size` parameter, which
|
||||
informs of the number of members to retain.
|
||||
|
||||
Please, check the [model selection manual](./model-selection) if you want to optimize the hyperparameters of ensemble for classification or quantification.
|
||||
|
||||
### The QuaNet neural network
|
||||
|
||||
QuaPy offers an implementation of QuaNet, a deep learning model presented in:
|
||||
|
||||
[_Esuli, A., Moreo, A., & Sebastiani, F. (2018, October).
|
||||
A recurrent neural network for sentiment quantification.
|
||||
In Proceedings of the 27th ACM International Conference on
|
||||
Information and Knowledge Management (pp. 1775-1778)._](https://dl.acm.org/doi/abs/10.1145/3269206.3269287)
|
||||
|
||||
This model requires `torch` to be installed.
|
||||
QuaNet also requires a classifier that can provide embedded representations
|
||||
of the inputs.
|
||||
In the original paper, QuaNet was tested using an LSTM as the base classifier.
|
||||
In the following example, we show an instantiation of QuaNet that instead uses CNN as a probabilistic classifier, taking its last layer representation as the document embedding:
|
||||
|
||||
```python
|
||||
import quapy as qp
|
||||
from quapy.method.meta import QuaNet
|
||||
from quapy.classification.neural import NeuralClassifierTrainer, CNNnet
|
||||
|
||||
# use samples of 100 elements
|
||||
qp.environ['SAMPLE_SIZE'] = 100
|
||||
|
||||
# load the kindle dataset as text, and convert words to numerical indexes
|
||||
dataset = qp.datasets.fetch_reviews('kindle', pickle=True)
|
||||
qp.data.preprocessing.index(dataset, min_df=5, inplace=True)
|
||||
|
||||
# the text classifier is a CNN trained by NeuralClassifierTrainer
|
||||
cnn = CNNnet(dataset.vocabulary_size, dataset.n_classes)
|
||||
learner = NeuralClassifierTrainer(cnn, device='cuda')
|
||||
|
||||
# train QuaNet
|
||||
model = QuaNet(learner, device='cuda')
|
||||
model.fit(dataset.training)
|
||||
estim_prevalence = model.quantify(dataset.test.instances)
|
||||
```
|
||||
|
|
@ -0,0 +1,145 @@
|
|||
# Model Selection
|
||||
|
||||
As a supervised machine learning task, quantification methods
|
||||
can strongly depend on a good choice of model hyper-parameters.
|
||||
The process whereby those hyper-parameters are chosen is
|
||||
typically known as _Model Selection_, and typically consists of
|
||||
testing different settings and picking the one that performed
|
||||
best in a held-out validation set in terms of any given
|
||||
evaluation measure.
|
||||
|
||||
## Targeting a Quantification-oriented loss
|
||||
|
||||
The task being optimized determines the evaluation protocol,
|
||||
i.e., the criteria according to which the performance of
|
||||
any given method for solving is to be assessed.
|
||||
As a task on its own right, quantification should impose
|
||||
its own model selection strategies, i.e., strategies
|
||||
aimed at finding appropriate configurations
|
||||
specifically designed for the task of quantification.
|
||||
|
||||
Quantification has long been regarded as an add-on of
|
||||
classification, and thus the model selection strategies
|
||||
customarily adopted in classification have simply been
|
||||
applied to quantification (see the next section).
|
||||
It has been argued in [Moreo, Alejandro, and Fabrizio Sebastiani.
|
||||
Re-Assessing the "Classify and Count" Quantification Method.
|
||||
ECIR 2021: Advances in Information Retrieval pp 75–91.](https://link.springer.com/chapter/10.1007/978-3-030-72240-1_6)
|
||||
that specific model selection strategies should
|
||||
be adopted for quantification. That is, model selection
|
||||
strategies for quantification should target
|
||||
quantification-oriented losses and be tested in a variety
|
||||
of scenarios exhibiting different degrees of prior
|
||||
probability shift.
|
||||
|
||||
The class _qp.model_selection.GridSearchQ_ implements a grid-search exploration over the space of
|
||||
hyper-parameter combinations that [evaluates](./evaluation)
|
||||
each combination of hyper-parameters by means of a given quantification-oriented
|
||||
error metric (e.g., any of the error functions implemented
|
||||
in _qp.error_) and according to a
|
||||
[sampling generation protocol](./protocols).
|
||||
|
||||
The following is an example (also included in the examples folder) of model selection for quantification:
|
||||
|
||||
```python
|
||||
import quapy as qp
|
||||
from quapy.protocol import APP
|
||||
from quapy.method.aggregative import DMy
|
||||
from sklearn.linear_model import LogisticRegression
|
||||
import numpy as np
|
||||
|
||||
"""
|
||||
In this example, we show how to perform model selection on a DistributionMatching quantifier.
|
||||
"""
|
||||
|
||||
model = DMy(LogisticRegression())
|
||||
|
||||
qp.environ['SAMPLE_SIZE'] = 100
|
||||
qp.environ['N_JOBS'] = -1 # explore hyper-parameters in parallel
|
||||
|
||||
training, test = qp.datasets.fetch_reviews('imdb', tfidf=True, min_df=5).train_test
|
||||
|
||||
# The model will be returned by the fit method of GridSearchQ.
|
||||
# Every combination of hyper-parameters will be evaluated by confronting the
|
||||
# quantifier thus configured against a series of samples generated by means
|
||||
# of a sample generation protocol. For this example, we will use the
|
||||
# artificial-prevalence protocol (APP), that generates samples with prevalence
|
||||
# values in the entire range of values from a grid (e.g., [0, 0.1, 0.2, ..., 1]).
|
||||
# We devote 30% of the dataset for this exploration.
|
||||
training, validation = training.split_stratified(train_prop=0.7)
|
||||
protocol = APP(validation)
|
||||
|
||||
# We will explore a classification-dependent hyper-parameter (e.g., the 'C'
|
||||
# hyper-parameter of LogisticRegression) and a quantification-dependent hyper-parameter
|
||||
# (e.g., the number of bins in a DistributionMatching quantifier.
|
||||
# Classifier-dependent hyper-parameters have to be marked with a prefix "classifier__"
|
||||
# in order to let the quantifier know this hyper-parameter belongs to its underlying
|
||||
# classifier.
|
||||
param_grid = {
|
||||
'classifier__C': np.logspace(-3, 3, 7),
|
||||
'nbins': [8, 16, 32, 64],
|
||||
}
|
||||
|
||||
model = qp.model_selection.GridSearchQ(
|
||||
model=model,
|
||||
param_grid=param_grid,
|
||||
protocol=protocol,
|
||||
error='mae', # the error to optimize is the MAE (a quantification-oriented loss)
|
||||
refit=True, # retrain on the whole labelled set once done
|
||||
verbose=True # show information as the process goes on
|
||||
).fit(training)
|
||||
|
||||
print(f'model selection ended: best hyper-parameters={model.best_params_}')
|
||||
model = model.best_model_
|
||||
|
||||
# evaluation in terms of MAE
|
||||
# we use the same evaluation protocol (APP) on the test set
|
||||
mae_score = qp.evaluation.evaluate(model, protocol=APP(test), error_metric='mae')
|
||||
|
||||
print(f'MAE={mae_score:.5f}')
|
||||
```
|
||||
|
||||
In this example, the system outputs:
|
||||
```
|
||||
[GridSearchQ]: starting model selection with self.n_jobs =-1
|
||||
[GridSearchQ]: hyperparams={'classifier__C': 0.01, 'nbins': 64} got mae score 0.04021 [took 1.1356s]
|
||||
[GridSearchQ]: hyperparams={'classifier__C': 0.01, 'nbins': 32} got mae score 0.04286 [took 1.2139s]
|
||||
[GridSearchQ]: hyperparams={'classifier__C': 0.01, 'nbins': 16} got mae score 0.04888 [took 1.2491s]
|
||||
[GridSearchQ]: hyperparams={'classifier__C': 0.001, 'nbins': 8} got mae score 0.05163 [took 1.5372s]
|
||||
[...]
|
||||
[GridSearchQ]: hyperparams={'classifier__C': 1000.0, 'nbins': 32} got mae score 0.02445 [took 2.9056s]
|
||||
[GridSearchQ]: optimization finished: best params {'classifier__C': 100.0, 'nbins': 32} (score=0.02234) [took 7.3114s]
|
||||
[GridSearchQ]: refitting on the whole development set
|
||||
model selection ended: best hyper-parameters={'classifier__C': 100.0, 'nbins': 32}
|
||||
MAE=0.03102
|
||||
```
|
||||
|
||||
|
||||
## Targeting a Classification-oriented loss
|
||||
|
||||
Optimizing a model for quantification could rather be
|
||||
computationally costly.
|
||||
In aggregative methods, one could alternatively try to optimize
|
||||
the classifier's hyper-parameters for classification.
|
||||
Although this is theoretically suboptimal, many articles in
|
||||
quantification literature have opted for this strategy.
|
||||
|
||||
In QuaPy, this is achieved by simply instantiating the
|
||||
classifier learner as a GridSearchCV from scikit-learn.
|
||||
The following code illustrates how to do that:
|
||||
|
||||
```python
|
||||
learner = GridSearchCV(
|
||||
LogisticRegression(),
|
||||
param_grid={'C': np.logspace(-4, 5, 10), 'class_weight': ['balanced', None]},
|
||||
cv=5)
|
||||
model = DistributionMatching(learner).fit(dataset.train)
|
||||
```
|
||||
|
||||
However, this is conceptually flawed, since the model should be
|
||||
optimized for the task at hand (quantification), and not for a surrogate task (classification),
|
||||
i.e., the model should be requested to deliver low quantification errors, rather
|
||||
than low classification errors.
|
||||
|
||||
|
||||
|
After Width: | Height: | Size: 62 KiB |
After Width: | Height: | Size: 108 KiB |
After Width: | Height: | Size: 71 KiB |
After Width: | Height: | Size: 185 KiB |
After Width: | Height: | Size: 337 KiB |
After Width: | Height: | Size: 243 KiB |
|
@ -0,0 +1,250 @@
|
|||
# Plotting
|
||||
|
||||
The module _qp.plot_ implements some basic plotting functions
|
||||
that can help analyse the performance of a quantification method.
|
||||
|
||||
All plotting functions receive as inputs the outcomes of
|
||||
some experiments and include, for each experiment,
|
||||
the following three main arguments:
|
||||
|
||||
* _method_names_ a list containing the names of the quantification methods
|
||||
* _true_prevs_ a list containing matrices of true prevalences
|
||||
* _estim_prevs_ a list containing matrices of estimated prevalences
|
||||
(should be of the same shape as the corresponding matrix in _true_prevs_)
|
||||
|
||||
Note that a method (as indicated by a name in _method_names_) can
|
||||
appear more than once. This could occur when various datasets are
|
||||
involved in the experiments. In this case, all experiments for the
|
||||
method will be merged and the plot will represent the method's
|
||||
performance across various datasets.
|
||||
|
||||
This is a very simple example of a valid input for the plotting functions:
|
||||
```python
|
||||
method_names = ['classify & count', 'EMQ', 'classify & count']
|
||||
true_prevs = [
|
||||
np.array([[0.5, 0.5], [0.25, 0.75]]),
|
||||
np.array([[0.0, 1.0], [0.25, 0.75], [0.0, 0.1]]),
|
||||
np.array([[0.0, 1.0], [0.25, 0.75], [0.0, 0.1]]),
|
||||
]
|
||||
estim_prevs = [
|
||||
np.array([[0.45, 0.55], [0.6, 0.4]]),
|
||||
np.array([[0.0, 1.0], [0.5, 0.5], [0.2, 0.8]]),
|
||||
np.array([[0.1, 0.9], [0.3, 0.7], [0.0, 0.1]]),
|
||||
]
|
||||
```
|
||||
in which the _classify & count_ has been tested in two datasets and
|
||||
the _EMQ_ method has been tested only in one dataset. For the first
|
||||
experiment, only two (binary) quantifications have been tested,
|
||||
while for the second and third experiments three instances have
|
||||
been tested.
|
||||
|
||||
In general, we would like to test the performance of the
|
||||
quantification methods across different scenarios showcasing
|
||||
the accuracy of the quantifier in predicting class prevalences
|
||||
for a wide range of prior distributions. This can easily be
|
||||
achieved by means of the
|
||||
[artificial sampling protocol](./protocols)
|
||||
that is implemented in QuaPy.
|
||||
|
||||
The following code shows how to perform one simple experiment
|
||||
in which the 4 _CC-variants_, all equipped with a linear SVM, are
|
||||
applied to one binary dataset of reviews about _Kindle_ devices and
|
||||
tested across the entire spectrum of class priors (taking 21 splits
|
||||
of the interval [0,1], i.e., using prevalence steps of 0.05, and
|
||||
generating 100 random samples at each prevalence).
|
||||
|
||||
```python
|
||||
import quapy as qp
|
||||
from protocol import APP
|
||||
from quapy.method.aggregative import CC, ACC, PCC, PACC
|
||||
from sklearn.svm import LinearSVC
|
||||
|
||||
qp.environ['SAMPLE_SIZE'] = 500
|
||||
|
||||
def gen_data():
|
||||
|
||||
def base_classifier():
|
||||
return LinearSVC(class_weight='balanced')
|
||||
|
||||
def models():
|
||||
yield 'CC', CC(base_classifier())
|
||||
yield 'ACC', ACC(base_classifier())
|
||||
yield 'PCC', PCC(base_classifier())
|
||||
yield 'PACC', PACC(base_classifier())
|
||||
|
||||
train, test = qp.datasets.fetch_reviews('kindle', tfidf=True, min_df=5).train_test
|
||||
|
||||
method_names, true_prevs, estim_prevs, tr_prevs = [], [], [], []
|
||||
|
||||
for method_name, model in models():
|
||||
model.fit(train)
|
||||
true_prev, estim_prev = qp.evaluation.prediction(model, APP(test, repeats=100, random_state=0))
|
||||
|
||||
method_names.append(method_name)
|
||||
true_prevs.append(true_prev)
|
||||
estim_prevs.append(estim_prev)
|
||||
tr_prevs.append(train.prevalence())
|
||||
|
||||
return method_names, true_prevs, estim_prevs, tr_prevs
|
||||
|
||||
method_names, true_prevs, estim_prevs, tr_prevs = gen_data()
|
||||
````
|
||||
the plots that can be generated are explained below.
|
||||
|
||||
## Diagonal Plot
|
||||
|
||||
The _diagonal_ plot shows a very insightful view of the
|
||||
quantifier's performance. It plots the predicted class
|
||||
prevalence (in the y-axis) against the true class prevalence
|
||||
(in the x-axis). Unfortunately, it is limited to binary quantification,
|
||||
although one can simply generate as many _diagonal_ plots as
|
||||
classes there are by indicating which class should be considered
|
||||
the target of the plot.
|
||||
|
||||
The following call will produce the plot:
|
||||
|
||||
```python
|
||||
qp.plot.binary_diagonal(method_names, true_prevs, estim_prevs, train_prev=tr_prevs[0], savepath='./plots/bin_diag.png')
|
||||
```
|
||||
|
||||
the last argument is optional, and indicates the path where to save
|
||||
the plot (the file extension will determine the format -- typical extensions
|
||||
are '.png' or '.pdf'). If this path is not provided, then the plot
|
||||
will be shown but not saved.
|
||||
The resulting plot should look like:
|
||||
|
||||

|
||||
|
||||
Note that in this case, we are also indicating the training
|
||||
prevalence, which is plotted in the diagonal a as cyan dot.
|
||||
The color bands indicate the standard deviations of the predictions,
|
||||
and can be hidden by setting the argument _show_std=False_ (see
|
||||
the complete list of arguments in the documentation).
|
||||
|
||||
Finally, note how most quantifiers, and specially the "unadjusted"
|
||||
variants CC and PCC, are strongly biased towards the
|
||||
prevalence seen during training.
|
||||
|
||||
## Quantification bias
|
||||
|
||||
This plot aims at evincing the bias that any quantifier
|
||||
displays with respect to the training prevalences by
|
||||
means of [box plots](https://en.wikipedia.org/wiki/Box_plot).
|
||||
This plot can be generated by:
|
||||
|
||||
```python
|
||||
qp.plot.binary_bias_global(method_names, true_prevs, estim_prevs, savepath='./plots/bin_bias.png')
|
||||
```
|
||||
|
||||
and should look like:
|
||||
|
||||

|
||||
|
||||
The box plots show some interesting facts:
|
||||
* all methods are biased towards the training prevalence but specially
|
||||
so CC and PCC (an unbiased quantifier would have a box centered at 0)
|
||||
* the bias is always positive, indicating that all methods tend to
|
||||
overestimate the positive class prevalence
|
||||
* CC and PCC have high variability while ACC and specially PACC exhibit
|
||||
lower variability.
|
||||
|
||||
Again, these plots could be generated for experiments ranging across
|
||||
different datasets, and the plot will merge all data accordingly.
|
||||
|
||||
Another illustrative example can be shown that consists of
|
||||
training different CC quantifiers trained at different
|
||||
(artificially sampled) training prevalences.
|
||||
For this example, we generate training samples of 5000
|
||||
documents containing 10%, 20%, ..., 90% of positives from the
|
||||
IMDb dataset, and generate the bias plot again.
|
||||
This example can be run by rewritting the _gen_data()_ function
|
||||
like this:
|
||||
|
||||
```python
|
||||
def gen_data():
|
||||
|
||||
train, test = qp.datasets.fetch_reviews('imdb', tfidf=True, min_df=5).train_test
|
||||
model = CC(LinearSVC())
|
||||
|
||||
method_data = []
|
||||
for training_prevalence in np.linspace(0.1, 0.9, 9):
|
||||
training_size = 5000
|
||||
# since the problem is binary, it suffices to specify the negative prevalence, since the positive is constrained
|
||||
train_sample = train.sampling(training_size, 1-training_prevalence)
|
||||
model.fit(train_sample)
|
||||
true_prev, estim_prev = qp.evaluation.prediction(model, APP(test, repeats=100, random_state=0))
|
||||
method_name = 'CC$_{'+f'{int(100*training_prevalence)}' + '\%}$'
|
||||
method_data.append((method_name, true_prev, estim_prev, train_sample.prevalence()))
|
||||
|
||||
return zip(*method_data)
|
||||
```
|
||||
|
||||
and the plot should now look like:
|
||||
|
||||

|
||||
|
||||
which clearly shows a negative bias for CC variants trained on
|
||||
data containing more negatives (i.e., < 50%) and positive biases
|
||||
in cases containing more positives (i.e., >50%). The CC trained
|
||||
at 50% behaves as an unbiased estimator of the positive class
|
||||
prevalence.
|
||||
|
||||
The function _qp.plot.binary_bias_bins_ allows the user to
|
||||
generate box plots broken down by bins of true test prevalence.
|
||||
To this aim, an argument _nbins_ is passed which indicates
|
||||
how many isometric subintervals to take. For example
|
||||
the following plot is produced for _nbins=3_:
|
||||
|
||||

|
||||
|
||||
Interestingly enough, the seemingly unbiased estimator (CC at 50%) happens to display
|
||||
a positive bias (or a tendency to overestimate) in cases of low prevalence
|
||||
(i.e., when the true prevalence of the positive class is below 33%),
|
||||
and a negative bias (or a tendency to underestimate) in cases of high prevalence
|
||||
(i.e., when the true prevalence is beyond 67%).
|
||||
|
||||
Out of curiosity, the diagonal plot for this experiment looks like:
|
||||
|
||||

|
||||
|
||||
showing pretty clearly the dependency of CC on the prior probabilities
|
||||
of the labeled set it was trained on.
|
||||
|
||||
|
||||
## Error by Drift
|
||||
|
||||
Above discussed plots are useful for analyzing and comparing
|
||||
the performance of different quantification methods, but are
|
||||
limited to the binary case. The "error by drift" is a plot
|
||||
that shows the error in predictions as a function of the
|
||||
(prior probability) drift between each test sample and the
|
||||
training set. Interestingly, the error and drift can both be measured
|
||||
in terms of any evaluation measure for quantification (like the
|
||||
ones available in _qp.error_) and can thus be computed
|
||||
irrespectively of the number of classes.
|
||||
|
||||
The following shows how to generate the plot for the 4 CC variants,
|
||||
using 10 bins for the drift
|
||||
and _absolute error_ as the measure of the error (the
|
||||
drift in the x-axis is always computed in terms of _absolute error_ since
|
||||
other errors are harder to interpret):
|
||||
|
||||
```python
|
||||
qp.plot.error_by_drift(method_names, true_prevs, estim_prevs, tr_prevs,
|
||||
error_name='ae', n_bins=10, savepath='./plots/err_drift.png')
|
||||
```
|
||||
|
||||

|
||||
|
||||
Note that all methods work reasonably well in cases of low prevalence
|
||||
drift (i.e., any CC-variant is a good quantifier whenever the IID
|
||||
assumption is approximately preserved). The higher the drift, the worse
|
||||
those quantifiers tend to perform, although it is clear that PACC
|
||||
yields the lowest error for the most difficult cases.
|
||||
|
||||
Remember that any plot can be generated _across many datasets_, and
|
||||
that this would probably result in a more solid comparison.
|
||||
In those cases, however, it is likely that the variances of each
|
||||
method get higher, to the detriment of the visualization.
|
||||
We recommend to set _show_std=False_ in those cases
|
||||
in order to hide the color bands.
|
|
@ -0,0 +1,177 @@
|
|||
# Protocols
|
||||
|
||||
_New in v0.1.7!_
|
||||
|
||||
Quantification methods are expected to behave robustly in the presence of
|
||||
shift. For this reason, quantification methods need to be confronted with
|
||||
samples exhibiting widely varying amounts of shift.
|
||||
_Protocols_ implement specific ways for generating such samples.
|
||||
|
||||
In QuaPy, a protocol is an instance of _AbstractProtocol_ implementing a
|
||||
_call_ method that returns a generator yielding a tuple _(sample, prev)_
|
||||
every time. The protocol can also implement the function _total()_ informing
|
||||
of the number of total samples that the protocol generates.
|
||||
|
||||
Protocols can inherit from _AbstractStochasticSeededProtocol_, the class of
|
||||
protocols that generate samples stochastically, but that can be set with
|
||||
a seed in order to allow for replicating the exact same samples. This is important
|
||||
for evaluation purposes, since we typically require all our methods be evaluated
|
||||
on the exact same test samples in order to allow for a fair comparison.
|
||||
Indeed, the seed is set by default to 0, since this is the most commonly
|
||||
desired behaviour. Indicate _radom_state=None_ for allowing different sequences of samples to be
|
||||
generated every time the protocol is invoked.
|
||||
|
||||
Protocols that also inherit from _OnLabelledCollectionProtocol_ are such that
|
||||
samples are generated from a _LabelledCollection_ object (e.g., a test collection,
|
||||
or a validation collection). These protocols also allow for generating sequences of
|
||||
_LabelledCollection_ instead of _(sample, prev)_ by indicating
|
||||
_return_type='labelled_collection'_ instead of the default value _return_type='sample_prev'_.
|
||||
|
||||
For a more technical explanation on _AbstractStochasticSeededProtocol_ and
|
||||
_OnLabelledCollectionProtocol_, see the "custom_protocol.py" provided in the
|
||||
example folder.
|
||||
|
||||
QuaPy provides implementations of most popular sample generation protocols
|
||||
used in literature. This is the subject of the following sections.
|
||||
|
||||
|
||||
## Artificial-Prevalence Protocol
|
||||
|
||||
The "artificial-sampling protocol" (APP) proposed by
|
||||
[Forman (2005)](https://link.springer.com/chapter/10.1007/11564096_55)
|
||||
is likely the most popular protocol used for quantification evaluation.
|
||||
In APP, a test set is used to generate samples at
|
||||
desired prevalence values covering the full spectrum.
|
||||
|
||||
In APP, the user specifies the number
|
||||
of (equally distant) points to be generated from the interval [0,1];
|
||||
in QuaPy this is achieved by setting _n_prevpoints_.
|
||||
For example, if _n_prevpoints=11_ then, for each class, the prevalence values
|
||||
[0., 0.1, 0.2, ..., 1.] will be used. This means that, for two classes,
|
||||
the number of different prevalence values will be 11 (since, once the prevalence
|
||||
of one class is determined, the other one is constrained). For 3 classes,
|
||||
the number of valid combinations can be obtained as 11 + 10 + ... + 1 = 66.
|
||||
In general, the number of valid combinations that will be produced for a given
|
||||
value of n_prevpoints can be consulted by invoking
|
||||
_num_prevalence_combinations_, e.g.:
|
||||
|
||||
```python
|
||||
import quapy.functional as F
|
||||
n_prevpoints = 21
|
||||
n_classes = 4
|
||||
n = F.num_prevalence_combinations(n_prevpoints, n_classes, n_repeats=1)
|
||||
```
|
||||
|
||||
in this example, _n=1771_. Note the last argument, _n_repeats_, that
|
||||
informs of the number of examples that will be generated for any
|
||||
valid combination (typical values are, e.g., 1 for a single sample,
|
||||
or 10 or higher for computing standard deviations of performing statistical
|
||||
significance tests).
|
||||
|
||||
One can instead work the other way around, i.e., one could decide for a
|
||||
maximum budged of evaluations and get the number of prevalence points that
|
||||
will give rise to a number of evaluations close, but not higher, than
|
||||
this budget. This can be achieved with the function
|
||||
_get_nprevpoints_approximation_, e.g.:
|
||||
|
||||
```python
|
||||
budget = 5000
|
||||
n_prevpoints = F.get_nprevpoints_approximation(budget, n_classes, n_repeats=1)
|
||||
n = F.num_prevalence_combinations(n_prevpoints, n_classes, n_repeats=1)
|
||||
print(f'by setting n_prevpoints={n_prevpoints} the number of evaluations for {n_classes} classes will be {n}')
|
||||
```
|
||||
this will produce the following output:
|
||||
```
|
||||
by setting n_prevpoints=30 the number of evaluations for 4 classes will be 4960
|
||||
```
|
||||
|
||||
The following code shows an example of usage of APP for model selection
|
||||
and evaluation:
|
||||
|
||||
```python
|
||||
import quapy as qp
|
||||
from quapy.method.aggregative import ACC
|
||||
from quapy.protocol import APP
|
||||
import numpy as np
|
||||
from sklearn.linear_model import LogisticRegression
|
||||
|
||||
qp.environ['SAMPLE_SIZE'] = 100
|
||||
qp.environ['N_JOBS'] = -1
|
||||
|
||||
# define an instance of our custom quantifier
|
||||
quantifier = ACC(LogisticRegression())
|
||||
|
||||
# load the IMDb dataset
|
||||
train, test = qp.datasets.fetch_reviews('imdb', tfidf=True, min_df=5).train_test
|
||||
|
||||
# model selection
|
||||
train, val = train.split_stratified(train_prop=0.75)
|
||||
quantifier = qp.model_selection.GridSearchQ(
|
||||
quantifier,
|
||||
param_grid={'classifier__C': np.logspace(-2, 2, 5)},
|
||||
protocol=APP(val) # <- this is the protocol we use for generating validation samples
|
||||
).fit(train)
|
||||
|
||||
# default values are n_prevalences=21, repeats=10, random_state=0; this is equialent to:
|
||||
# val_app = APP(val, n_prevalences=21, repeats=10, random_state=0)
|
||||
# quantifier = GridSearchQ(quantifier, param_grid, protocol=val_app).fit(train)
|
||||
|
||||
# evaluation with APP
|
||||
mae = qp.evaluation.evaluate(quantifier, protocol=APP(test), error_metric='mae')
|
||||
print(f'MAE = {mae:.4f}')
|
||||
```
|
||||
|
||||
Note that APP is an instance of _AbstractStochasticSeededProtocol_ and that the
|
||||
_random_state_ is by default set to 0, meaning that all the generated validation
|
||||
samples will be consistent for all the combinations of hyperparameters being tested.
|
||||
Note also that the _sample_size_ is not indicated when instantiating the protocol;
|
||||
in such cases QuaPy takes the value of _qp.environ['SAMPLE_SIZE']_.
|
||||
|
||||
This protocol is useful for testing a quantifier under conditions of
|
||||
_prior probability shift_.
|
||||
|
||||
## Sampling from the unit-simplex, the Uniform-Prevalence Protocol (UPP)
|
||||
|
||||
Generating all possible combinations from a grid of prevalence values (APP) in
|
||||
multiclass is cumbersome, and when the number of classes increases it rapidly
|
||||
becomes impractical. In some cases, it is preferable to generate a fixed number
|
||||
of samples displaying prevalence values that are uniformly drawn from the unit-simplex,
|
||||
that is, so that every legitimate distribution is equally likely. The main drawback
|
||||
of this approach is that we are not guaranteed that all classes have been tested
|
||||
in the entire range of prevalence values. The main advantage is that every possible
|
||||
prevalence value is electable (this was not possible with standard APP, since values
|
||||
not included in the grid are never tested). Yet another advantage is that we can
|
||||
control the computational burden every evaluation incurs, by deciding in advance
|
||||
the number of samples to generate.
|
||||
|
||||
The UPP protocol implements this idea by relying on the Kraemer algorithm
|
||||
for sampling from the unit-simplex as many vectors of prevalence values as indicated
|
||||
in the _repeats_ parameter. UPP can be instantiated as:
|
||||
|
||||
```python
|
||||
protocol = qp.in_protocol.UPP(test, repeats=100)
|
||||
```
|
||||
|
||||
This is the most convenient protocol for datasets
|
||||
containing many classes; see, e.g.,
|
||||
[LeQua (2022)](https://ceur-ws.org/Vol-3180/paper-146.pdf),
|
||||
and is useful for testing a quantifier under conditions of
|
||||
_prior probability shift_.
|
||||
|
||||
|
||||
## Natural-Prevalence Protocol
|
||||
|
||||
The "natural-prevalence protocol" (NPP) comes down to generating samples drawn
|
||||
uniformly at random from the original labelled collection. This protocol has
|
||||
sometimes been used in literature, although it is now considered to be deprecated,
|
||||
due to its limited capability to generate interesting amounts of shift.
|
||||
All other things being equal, this protocol can be used just like APP or UPP,
|
||||
and is instantiated via:
|
||||
|
||||
```python
|
||||
protocol = qp.in_protocol.NPP(test, repeats=100)
|
||||
```
|
||||
|
||||
## Other protocols
|
||||
|
||||
Other protocols exist in QuaPy and will be added to the `qp.protocol.py` module.
|
|
@ -1,7 +0,0 @@
|
|||
quapy
|
||||
=====
|
||||
|
||||
.. toctree::
|
||||
:maxdepth: 4
|
||||
|
||||
quapy
|
|
@ -52,6 +52,14 @@ quapy.method.non\_aggregative module
|
|||
:undoc-members:
|
||||
:show-inheritance:
|
||||
|
||||
quapy.method.composable module
|
||||
------------------------------
|
||||
|
||||
.. automodule:: quapy.method.composable
|
||||
:members:
|
||||
:undoc-members:
|
||||
:show-inheritance:
|
||||
|
||||
Module contents
|
||||
---------------
|
||||
|
||||
|
|
|
@ -0,0 +1,87 @@
|
|||
"""
|
||||
This is a basic example showcasing some of the important concepts behind quapy.
|
||||
First of all, import quapy. Wou would typically import quapy in the following way
|
||||
"""
|
||||
import numpy as np
|
||||
from sklearn.linear_model import LogisticRegression
|
||||
|
||||
import quapy as qp
|
||||
|
||||
# let's fetch some dataset to run one experiment
|
||||
# datasets are available in the "qp.data.datasets" module (there is a shortcut in qp.datasets)
|
||||
|
||||
data = qp.datasets.fetch_reviews('hp')
|
||||
|
||||
# The data are in plain text format. You can convert them into tfidf using some utilities available in the
|
||||
# qp.data.preprocessing module, e.g.:
|
||||
|
||||
data = qp.data.preprocessing.text2tfidf(data, min_df=5)
|
||||
|
||||
# you can obtain the same result by specifying tfidf=True it in the fetch function:
|
||||
# data = qp.datasets.fetch_reviews('hp', tfidf=True, min_df=5)
|
||||
|
||||
# data is an object of type Dataset, a very basic collection that contains a "training" and a "test" collection inside.
|
||||
train, test = data.train_test
|
||||
|
||||
# train and test are instances of LabelledCollection, a class that contains covariates (X) and true labels (y), along
|
||||
# with sampling functionality. Here are some examples of usage:
|
||||
X, y = train.Xy
|
||||
print(f'number of classes {train.n_classes}')
|
||||
print(f'class names {train.classes_}')
|
||||
|
||||
import quapy.functional as F # <- this module has some functional utilities, like a string formatter for prevalences
|
||||
print(f'training prevalence = {F.strprev(train.prevalence())}')
|
||||
|
||||
# let us train one quantifier, for example, PACC using a sklearn's Logistic Regressor as the underlying classifier
|
||||
# classifier = LogisticRegression()
|
||||
|
||||
# pacc = qp.method.aggregative.PACC(classifier)
|
||||
pacc = qp.method.aggregative.PACC()
|
||||
|
||||
print(f'training {pacc}')
|
||||
pacc.fit(train)
|
||||
|
||||
# let's now test our quantifier on the test data (of course, we should not use the test labels y at this point, only X)
|
||||
X_test = test.X
|
||||
estim_prevalence = pacc.quantify(X_test)
|
||||
|
||||
print(f'estimated test prevalence = {F.strprev(estim_prevalence)}')
|
||||
print(f'true test prevalence = {F.strprev(test.prevalence())}')
|
||||
|
||||
# let us use some evaluation metric to check how well our quantifier fared.
|
||||
# Error metrics are available in the qp.error module.
|
||||
|
||||
mae_error = qp.error.mae(test.prevalence(), estim_prevalence)
|
||||
print(f'MAE={mae_error:.4f}')
|
||||
|
||||
# In quantification, we typically use an evaluation protocol to test the performance of a quantification method.
|
||||
# The reason is that, even though the test set contains many instances, the whole counts as 1 single datapoint to
|
||||
# the quantifier, because quantification targets samples of instances as a whole (while classification, or regression,
|
||||
# target instances individually).
|
||||
# Quapy provides some standard protocols in qp.protocol. We will use the artificial prevalence protocol (APP). APP
|
||||
# works by generating many test samples, out of our original test collection, characterized by different prevalence
|
||||
# values. To do so, a grid of prevalence values is explored, and different samples are generated conditioned on each
|
||||
# prevalence vector. This way, the quantifier is stress-tested on a wide range of prevalence values, i.e., under
|
||||
# prior probability shift conditions.
|
||||
|
||||
# In this case we use "test" and not only "test.X" since the protocol needs to know the class labels in order
|
||||
# to generate samples at different prevalences. We will generate samples of 100 instances, from a grid of 21 values,
|
||||
# i.e., from a grid = [0.0, 0.05, 0.10, ..., 1.00], and only one sample (repeats) for each combination.
|
||||
app = qp.protocol.APP(test, sample_size=100, n_prevalences=21, repeats=1)
|
||||
|
||||
# let's print some examples:
|
||||
show=5
|
||||
for i, (sample, prev) in enumerate(app()):
|
||||
print(f'sample-{i}: {F.strprev(prev)}')
|
||||
if i+1==5:
|
||||
break
|
||||
|
||||
# we can use the evaluation routine provided in quapy to test our method using a given protocol in terms of
|
||||
# one specific error metric
|
||||
absolute_errors = qp.evaluation.evaluate(model=pacc, protocol=app, error_metric='ae')
|
||||
print(f'MAE = {np.mean(absolute_errors):.4f}+-{np.std(absolute_errors):.4f}')
|
||||
|
||||
|
||||
|
||||
|
||||
|
|
@ -1,10 +1,7 @@
|
|||
import quapy as qp
|
||||
from method._kdey import KDEyML
|
||||
from quapy.method.non_aggregative import DMx
|
||||
from quapy.protocol import APP, UPP
|
||||
from quapy.protocol import UPP
|
||||
from quapy.method.aggregative import DMy
|
||||
from sklearn.linear_model import LogisticRegression
|
||||
from examples.comparing_gridsearch import OLD_GridSearchQ
|
||||
import numpy as np
|
||||
from time import time
|
||||
|
||||
|
@ -12,12 +9,16 @@ from time import time
|
|||
In this example, we show how to perform model selection on a DistributionMatching quantifier.
|
||||
"""
|
||||
|
||||
model = KDEyML(LogisticRegression())
|
||||
model = DMy()
|
||||
|
||||
qp.environ['SAMPLE_SIZE'] = 100
|
||||
qp.environ['N_JOBS'] = -1
|
||||
|
||||
# training, test = qp.datasets.fetch_reviews('imdb', tfidf=True, min_df=5).train_test
|
||||
print(f'running model selection with N_JOBS={qp.environ["N_JOBS"]}; '
|
||||
f'to increase the number of jobs use:\n> N_JOBS=-1 python3 1.model_selection.py\n'
|
||||
f'alternatively, you can set this variable within the script as:\n'
|
||||
f'import quapy as qp\n'
|
||||
f'qp.environ["N_JOBS"]=-1')
|
||||
|
||||
training, test = qp.datasets.fetch_UCIMulticlassDataset('letter').train_test
|
||||
|
||||
with qp.util.temp_seed(0):
|
||||
|
@ -34,19 +35,21 @@ with qp.util.temp_seed(0):
|
|||
|
||||
# We will explore a classification-dependent hyper-parameter (e.g., the 'C'
|
||||
# hyper-parameter of LogisticRegression) and a quantification-dependent hyper-parameter
|
||||
# (e.g., the number of bins in a DistributionMatching quantifier.
|
||||
# (e.g., the number of bins in a DistributionMatching quantifier).
|
||||
# Classifier-dependent hyper-parameters have to be marked with a prefix "classifier__"
|
||||
# in order to let the quantifier know this hyper-parameter belongs to its underlying
|
||||
# classifier.
|
||||
# We consider 7 values for the classifier and 7 values for the quantifier.
|
||||
# QuaPy is optimized so that only 7 classifiers are trained, and then reused to test the
|
||||
# different configurations of the quantifier. In other words, quapy avoids to train
|
||||
# the classifier 7x7 times.
|
||||
param_grid = {
|
||||
'classifier__C': np.logspace(-3,3,7),
|
||||
'classifier__class_weight': ['balanced', None],
|
||||
'bandwidth': np.linspace(0.01, 0.2, 20),
|
||||
'classifier__C': np.logspace(-3, 3, 7),
|
||||
'nbins': [2, 3, 4, 5, 10, 15, 20]
|
||||
}
|
||||
|
||||
tinit = time()
|
||||
|
||||
# model = OLD_GridSearchQ(
|
||||
model = qp.model_selection.GridSearchQ(
|
||||
model=model,
|
||||
param_grid=param_grid,
|
|
@ -25,7 +25,9 @@ df = pd.DataFrame(columns=['method', 'dataset', 'MAE', 'MRAE', 'tr-time', 'te-ti
|
|||
|
||||
|
||||
for dataset_name in tqdm(qp.datasets.UCI_BINARY_DATASETS, total=len(qp.datasets.UCI_BINARY_DATASETS)):
|
||||
if dataset_name in ['acute.a', 'acute.b', 'balance.2', 'iris.1']: continue
|
||||
if dataset_name in ['acute.a', 'acute.b', 'balance.2', 'iris.1']:
|
||||
# these datasets tend to produce either too good or too bad results...
|
||||
continue
|
||||
|
||||
collection = qp.datasets.fetch_UCIBinaryLabelledCollection(dataset_name, verbose=False)
|
||||
train, test = collection.split_stratified()
|
|
@ -13,7 +13,7 @@ $ pip install quapy[bayesian]
|
|||
Running the script via:
|
||||
|
||||
```
|
||||
$ python examples/bayesian_quantification.py
|
||||
$ python examples/13.bayesian_quantification.py
|
||||
```
|
||||
|
||||
will produce a plot `bayesian_quantification.pdf`.
|
|
@ -0,0 +1,142 @@
|
|||
"""
|
||||
This example illustrates the composition of quantification methods from
|
||||
arbitrary loss functions and feature transformations. It will extend the basic
|
||||
example on the usage of quapy with this composition.
|
||||
|
||||
This example requires the installation of qunfold, the back-end of QuaPy's
|
||||
composition module:
|
||||
|
||||
pip install --upgrade pip setuptools wheel
|
||||
pip install "jax[cpu]"
|
||||
pip install "qunfold @ git+https://github.com/mirkobunse/qunfold@v0.1.4"
|
||||
"""
|
||||
|
||||
import numpy as np
|
||||
import quapy as qp
|
||||
import quapy.functional as F
|
||||
|
||||
# First of all, we load the same data as in the basic example.
|
||||
|
||||
data = qp.data.preprocessing.text2tfidf(
|
||||
qp.datasets.fetch_reviews("hp"),
|
||||
min_df = 5,
|
||||
)
|
||||
training, testing = data.train_test
|
||||
|
||||
# We start by recovering PACC from its building blocks, a LeastSquaresLoss and
|
||||
# a probabilistic ClassTransformer. A 5-fold cross-validation is implemented
|
||||
# through a CVClassifier.
|
||||
|
||||
from quapy.method.composable import (
|
||||
ComposableQuantifier,
|
||||
LeastSquaresLoss,
|
||||
ClassTransformer,
|
||||
CVClassifier,
|
||||
)
|
||||
from sklearn.linear_model import LogisticRegression
|
||||
|
||||
pacc = ComposableQuantifier(
|
||||
LeastSquaresLoss(),
|
||||
ClassTransformer(
|
||||
CVClassifier(LogisticRegression(random_state=0), 5),
|
||||
is_probabilistic = True
|
||||
),
|
||||
)
|
||||
|
||||
# Let's evaluate this quantifier.
|
||||
|
||||
print(f"Evaluating PACC: {pacc}")
|
||||
pacc.fit(training)
|
||||
app = qp.protocol.APP(testing, sample_size=100, n_prevalences=21, repeats=1)
|
||||
absolute_errors = qp.evaluation.evaluate(
|
||||
model = pacc,
|
||||
protocol = app,
|
||||
error_metric = "ae",
|
||||
)
|
||||
print(f"MAE = {np.mean(absolute_errors):.4f}+-{np.std(absolute_errors):.4f}")
|
||||
|
||||
# We now turn to the composition of novel methods. As an example, we use the
|
||||
# (squared) Hellinger distance as a loss function but, unlike HDy, we do not
|
||||
# compute any histograms from the output of the classifier.
|
||||
|
||||
from quapy.method.composable import HellingerSurrogateLoss
|
||||
|
||||
model = ComposableQuantifier(
|
||||
HellingerSurrogateLoss(), # the loss is different from before
|
||||
ClassTransformer( # we use the same transformer
|
||||
CVClassifier(LogisticRegression(random_state=0), 5),
|
||||
is_probabilistic = True
|
||||
),
|
||||
)
|
||||
|
||||
print(f"Evaluating {model}")
|
||||
model.fit(training)
|
||||
absolute_errors = qp.evaluation.evaluate(
|
||||
model = model,
|
||||
protocol = app, # use the same protocol for evaluation
|
||||
error_metric = "ae",
|
||||
)
|
||||
print(f"MAE = {np.mean(absolute_errors):.4f}+-{np.std(absolute_errors):.4f}")
|
||||
|
||||
# In general, any composed method solves a linear system of equations by
|
||||
# minimizing the loss after transforming the data. Methods of this kind include
|
||||
# ACC, PACC, HDx, HDy, and many other well-known methods, as well as an
|
||||
# unlimited number of re-combinations of their building blocks.
|
||||
|
||||
# To illustrate hyper-parameter optimization, we now define a method that
|
||||
# employs a weighted sum of the LeastSquaresLoss and the
|
||||
# HellingerSurrogateLoss. We will consider both the weighting of these losses
|
||||
# and the C parameter of the LogisticRegression as hyper-parameters to be
|
||||
# optimized.
|
||||
|
||||
from quapy.method.composable import CombinedLoss
|
||||
|
||||
model = ComposableQuantifier(
|
||||
CombinedLoss(HellingerSurrogateLoss(), LeastSquaresLoss()),
|
||||
ClassTransformer(
|
||||
CVClassifier(LogisticRegression(random_state=0), 5),
|
||||
is_probabilistic = True
|
||||
),
|
||||
)
|
||||
|
||||
from qunfold.quapy import QuaPyWrapper
|
||||
from qunfold import GenericMethod
|
||||
|
||||
model = QuaPyWrapper(GenericMethod(
|
||||
CombinedLoss(HellingerSurrogateLoss(), LeastSquaresLoss()),
|
||||
ClassTransformer(
|
||||
CVClassifier(LogisticRegression(random_state=0), 5),
|
||||
is_probabilistic = True
|
||||
),
|
||||
))
|
||||
|
||||
# The names of the parameters stem from the comparably deep object hierarchy
|
||||
# that composable methods define.
|
||||
|
||||
param_grid = {
|
||||
"loss__weights": [ (w, 1-w) for w in [.1, .5, .9] ],
|
||||
"transformer__classifier__estimator__C": [1e-1, 1e1],
|
||||
}
|
||||
|
||||
grid_search = qp.model_selection.GridSearchQ(
|
||||
model = model,
|
||||
param_grid = param_grid,
|
||||
protocol = app, # use the protocol that we used for testing before
|
||||
error = "mae",
|
||||
refit = False,
|
||||
verbose = True,
|
||||
).fit(training)
|
||||
print(
|
||||
f"Best hyper-parameters = {grid_search.best_params_}",
|
||||
f"Best MAE = {grid_search.best_score_}",
|
||||
sep = "\n",
|
||||
)
|
||||
|
||||
# Note that a proper evaluation would still require the best model to be
|
||||
# evaluated on a separate test set.
|
||||
|
||||
# To implement your own loss functions and feature representations, please
|
||||
# follow the corresponding manual of the qunfold package. This package provides
|
||||
# the back-end of QuaPy’s composable module and is fully compatible with QuaPy.
|
||||
#
|
||||
# https://mirkobunse.github.io/qunfold/developer-guide.html#custom-implementations
|
|
@ -0,0 +1,52 @@
|
|||
import numpy as np
|
||||
from sklearn.linear_model import LogisticRegression
|
||||
import quapy as qp
|
||||
import quapy.functional as F
|
||||
from quapy.data.datasets import LEQUA2024_SAMPLE_SIZE, fetch_lequa2024
|
||||
from quapy.evaluation import evaluation_report
|
||||
from quapy.method.aggregative import KDEyML
|
||||
from quapy.model_selection import GridSearchQ
|
||||
import pandas as pd
|
||||
|
||||
"""
|
||||
This example shows hoy to use the LeQua datasets (new in v0.1.9). For more information about the datasets, and the
|
||||
LeQua competition itself, check:
|
||||
https://lequa2024.github.io/index (the site of the competition)
|
||||
"""
|
||||
|
||||
# there are 4 tasks: T1 (binary), T2 (multiclass), T3 (ordinal), T4 (binary - covariate & prior shift)
|
||||
task = 'T2'
|
||||
|
||||
# set the sample size in the environment. The sample size is task-dendendent and can be consulted by doing:
|
||||
qp.environ['SAMPLE_SIZE'] = LEQUA2024_SAMPLE_SIZE[task]
|
||||
qp.environ['N_JOBS'] = -1
|
||||
|
||||
# the fetch method returns a training set (an instance of LabelledCollection) and two generators: one for the
|
||||
# validation set and another for the test sets. These generators are both instances of classes that extend
|
||||
# AbstractProtocol (i.e., classes that implement sampling generation procedures) and, in particular, are instances
|
||||
# of SamplesFromDir, a protocol that simply iterates over pre-generated samples (those provided for the competition)
|
||||
# stored in a directory.
|
||||
training, val_generator, test_generator = fetch_lequa2024(task=task)
|
||||
|
||||
# define the quantifier
|
||||
quantifier = KDEyML(classifier=LogisticRegression())
|
||||
|
||||
# model selection
|
||||
param_grid = {
|
||||
'classifier__C': np.logspace(-3, 3, 7), # classifier-dependent: inverse of regularization strength
|
||||
'classifier__class_weight': ['balanced', None], # classifier-dependent: weights of each class
|
||||
'bandwidth': np.linspace(0.01, 0.2, 20) # quantifier-dependent: bandwidth of the kernel
|
||||
}
|
||||
model_selection = GridSearchQ(quantifier, param_grid, protocol=val_generator, error='mrae', refit=False, verbose=True)
|
||||
quantifier = model_selection.fit(training)
|
||||
|
||||
# evaluation
|
||||
report = evaluation_report(quantifier, protocol=test_generator, error_metrics=['mae', 'mrae'], verbose=True)
|
||||
|
||||
# printing results
|
||||
pd.set_option('display.expand_frame_repr', False)
|
||||
report['estim-prev'] = report['estim-prev'].map(F.strprev)
|
||||
print(report)
|
||||
|
||||
print('Averaged values:')
|
||||
print(report.mean())
|
|
@ -33,7 +33,7 @@ returns an instance of SVM(Q) (i.e., an instance of CC properly set to work with
|
|||
Since we wan to explore the losses, we will instead use newELM. For this example we will create a quantifier for tweet
|
||||
sentiment analysis considering three classes: negative, neutral, and positive. Since SVMperf is a binary classifier,
|
||||
our quantifier will be binary as well. We will use a one-vs-all approach to work in multiclass model.
|
||||
For more details about how one-vs-all works, we refer to the example "one_vs_all.py" and to the API documentation.
|
||||
For more details about how one-vs-all works, we refer to the example "10.one_vs_all.py" and to the API documentation.
|
||||
"""
|
||||
|
||||
qp.environ['SAMPLE_SIZE'] = 100
|
|
@ -29,12 +29,17 @@ def newLR():
|
|||
|
||||
|
||||
def calibratedLR():
|
||||
return CalibratedClassifierCV(LogisticRegression(max_iter=1000, solver='lbfgs', n_jobs=-1))
|
||||
return CalibratedClassifierCV(newLR())
|
||||
|
||||
|
||||
__C_range = np.logspace(-3, 3, 7)
|
||||
lr_params = {'classifier__C': __C_range, 'classifier__class_weight': [None, 'balanced']}
|
||||
svmperf_params = {'classifier__C': __C_range}
|
||||
lr_params = {
|
||||
'classifier__C': __C_range,
|
||||
'classifier__class_weight': [None, 'balanced']
|
||||
}
|
||||
svmperf_params = {
|
||||
'classifier__C': __C_range
|
||||
}
|
||||
|
||||
|
||||
def quantification_models():
|
|
@ -0,0 +1,125 @@
|
|||
import pickle
|
||||
import os
|
||||
from time import time
|
||||
from collections import defaultdict
|
||||
|
||||
import numpy as np
|
||||
from sklearn.linear_model import LogisticRegression
|
||||
|
||||
import quapy as qp
|
||||
from quapy.method.aggregative import PACC, EMQ
|
||||
from quapy.model_selection import GridSearchQ
|
||||
from quapy.protocol import UPP
|
||||
from pathlib import Path
|
||||
|
||||
|
||||
SEED = 1
|
||||
|
||||
|
||||
def newLR():
|
||||
return LogisticRegression(max_iter=3000)
|
||||
|
||||
# typical hyperparameters explored for Logistic Regression
|
||||
logreg_grid = {
|
||||
'C': np.logspace(-3, 3, 7),
|
||||
'class_weight': ['balanced', None]
|
||||
}
|
||||
|
||||
def wrap_hyper(classifier_hyper_grid:dict):
|
||||
return {'classifier__'+k:v for k, v in classifier_hyper_grid.items()}
|
||||
|
||||
METHODS = [
|
||||
('PACC', PACC(newLR()), wrap_hyper(logreg_grid)),
|
||||
('EMQ', EMQ(newLR()), wrap_hyper(logreg_grid)),
|
||||
# ('KDEy-ML', KDEyML(newLR()), {**wrap_hyper(logreg_grid), **{'bandwidth': np.linspace(0.01, 0.2, 20)}}),
|
||||
]
|
||||
|
||||
|
||||
def show_results(result_path):
|
||||
import pandas as pd
|
||||
df = pd.read_csv(result_path+'.csv', sep='\t')
|
||||
pd.set_option('display.max_columns', None)
|
||||
pd.set_option('display.max_rows', None)
|
||||
pv = df.pivot_table(index='Dataset', columns="Method", values=["MAE", "MRAE", "t_train"], margins=True)
|
||||
print(pv)
|
||||
|
||||
def load_timings(result_path):
|
||||
import pandas as pd
|
||||
timings = defaultdict(lambda: {})
|
||||
if not Path(result_path + '.csv').exists():
|
||||
return timings
|
||||
|
||||
df = pd.read_csv(result_path+'.csv', sep='\t')
|
||||
return timings | df.pivot_table(index='Dataset', columns='Method', values='t_train').to_dict()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
|
||||
qp.environ['SAMPLE_SIZE'] = 500
|
||||
qp.environ['N_JOBS'] = -1
|
||||
n_bags_val = 250
|
||||
n_bags_test = 1000
|
||||
result_dir = f'results/ucimulti'
|
||||
|
||||
os.makedirs(result_dir, exist_ok=True)
|
||||
|
||||
global_result_path = f'{result_dir}/allmethods'
|
||||
timings = load_timings(global_result_path)
|
||||
with open(global_result_path + '.csv', 'wt') as csv:
|
||||
csv.write(f'Method\tDataset\tMAE\tMRAE\tt_train\n')
|
||||
|
||||
for method_name, quantifier, param_grid in METHODS:
|
||||
|
||||
print('Init method', method_name)
|
||||
|
||||
with open(global_result_path + '.csv', 'at') as csv:
|
||||
|
||||
for dataset in qp.datasets.UCI_MULTICLASS_DATASETS:
|
||||
|
||||
print('init', dataset)
|
||||
|
||||
local_result_path = os.path.join(Path(global_result_path).parent, method_name + '_' + dataset + '.dataframe')
|
||||
|
||||
if os.path.exists(local_result_path):
|
||||
print(f'result file {local_result_path} already exist; skipping')
|
||||
report = qp.util.load_report(local_result_path)
|
||||
|
||||
else:
|
||||
with qp.util.temp_seed(SEED):
|
||||
|
||||
data = qp.datasets.fetch_UCIMulticlassDataset(dataset, verbose=True)
|
||||
|
||||
# model selection
|
||||
train, test = data.train_test
|
||||
train, val = train.split_stratified(random_state=SEED)
|
||||
|
||||
protocol = UPP(val, repeats=n_bags_val)
|
||||
modsel = GridSearchQ(
|
||||
quantifier, param_grid, protocol, refit=True, n_jobs=-1, verbose=1, error='mae'
|
||||
)
|
||||
|
||||
t_init = time()
|
||||
try:
|
||||
modsel.fit(train)
|
||||
|
||||
print(f'best params {modsel.best_params_}')
|
||||
print(f'best score {modsel.best_score_}')
|
||||
|
||||
quantifier = modsel.best_model()
|
||||
except:
|
||||
print('something went wrong... trying to fit the default model')
|
||||
quantifier.fit(train)
|
||||
timings[method_name][dataset] = time() - t_init
|
||||
|
||||
|
||||
protocol = UPP(test, repeats=n_bags_test)
|
||||
report = qp.evaluation.evaluation_report(
|
||||
quantifier, protocol, error_metrics=['mae', 'mrae'], verbose=True
|
||||
)
|
||||
report.to_csv(local_result_path)
|
||||
|
||||
means = report.mean(numeric_only=True)
|
||||
csv.write(f'{method_name}\t{dataset}\t{means["mae"]:.5f}\t{means["mrae"]:.5f}\t{timings[method_name][dataset]:.3f}\n')
|
||||
csv.flush()
|
||||
|
||||
show_results(global_result_path)
|
|
@ -1,63 +0,0 @@
|
|||
import numpy as np
|
||||
from abstention.calibration import NoBiasVectorScaling, VectorScaling, TempScaling
|
||||
from sklearn.calibration import CalibratedClassifierCV
|
||||
from sklearn.linear_model import LogisticRegression
|
||||
import quapy as qp
|
||||
import quapy.functional as F
|
||||
from classification.calibration import RecalibratedProbabilisticClassifierBase, NBVSCalibration, \
|
||||
BCTSCalibration
|
||||
from data.datasets import LEQUA2022_SAMPLE_SIZE, fetch_lequa2022
|
||||
from evaluation import evaluation_report
|
||||
from method.aggregative import EMQ
|
||||
from model_selection import GridSearchQ
|
||||
import pandas as pd
|
||||
|
||||
for task in ['T1A', 'T1B']:
|
||||
|
||||
# calibration = TempScaling(verbose=False, bias_positions='all')
|
||||
|
||||
qp.environ['SAMPLE_SIZE'] = LEQUA2022_SAMPLE_SIZE[task]
|
||||
training, val_generator, test_generator = fetch_lequa2022(task=task)
|
||||
|
||||
# define the quantifier
|
||||
# learner = BCTSCalibration(LogisticRegression(), n_jobs=-1)
|
||||
# learner = CalibratedClassifierCV(LogisticRegression())
|
||||
learner = LogisticRegression()
|
||||
quantifier = EMQ(classifier=learner)
|
||||
|
||||
# model selection
|
||||
param_grid = {
|
||||
'classifier__C': np.logspace(-3, 3, 7),
|
||||
'classifier__class_weight': ['balanced', None],
|
||||
'recalib': ['platt', 'ts', 'vs', 'nbvs', 'bcts', None],
|
||||
'exact_train_prev': [False, True]
|
||||
}
|
||||
model_selection = GridSearchQ(quantifier, param_grid, protocol=val_generator, error='mrae', n_jobs=-1, refit=False, verbose=True)
|
||||
quantifier = model_selection.fit(training)
|
||||
|
||||
# evaluation
|
||||
report = evaluation_report(quantifier, protocol=test_generator, error_metrics=['mae', 'mrae', 'mkld'], verbose=True)
|
||||
|
||||
# import os
|
||||
# os.makedirs(f'./out', exist_ok=True)
|
||||
# with open(f'./out/EMQ_{calib}_{task}.txt', 'wt') as foo:
|
||||
# estim_prev = report['estim-prev'].values
|
||||
# nclasses = len(estim_prev[0])
|
||||
# foo.write(f'id,'+','.join([str(x) for x in range(nclasses)])+'\n')
|
||||
# for id, prev in enumerate(estim_prev):
|
||||
# foo.write(f'{id},'+','.join([f'{p:.5f}' for p in prev])+'\n')
|
||||
#
|
||||
# #os.makedirs(f'./errors/{task}', exist_ok=True)
|
||||
# with open(f'./out/EMQ_{calib}_{task}_errors.txt', 'wt') as foo:
|
||||
# maes, mraes = report['mae'].values, report['mrae'].values
|
||||
# foo.write(f'id,AE,RAE\n')
|
||||
# for id, (ae_i, rae_i) in enumerate(zip(maes, mraes)):
|
||||
# foo.write(f'{id},{ae_i:.5f},{rae_i:.5f}\n')
|
||||
|
||||
# printing results
|
||||
pd.set_option('display.expand_frame_repr', False)
|
||||
report['estim-prev'] = report['estim-prev'].map(F.strprev)
|
||||
print(report)
|
||||
|
||||
print('Averaged values:')
|
||||
print(report.mean())
|
|
@ -1,15 +1,18 @@
|
|||
"""QuaPy module for quantification"""
|
||||
from sklearn.linear_model import LogisticRegression
|
||||
|
||||
from quapy.data import datasets
|
||||
from . import error
|
||||
from . import data
|
||||
from . import functional
|
||||
# from . import method
|
||||
from . import method
|
||||
from . import evaluation
|
||||
from . import protocol
|
||||
from . import plot
|
||||
from . import util
|
||||
from . import model_selection
|
||||
from . import classification
|
||||
import os
|
||||
|
||||
__version__ = '0.1.9'
|
||||
|
||||
|
@ -20,7 +23,8 @@ environ = {
|
|||
'PAD_TOKEN': '[PAD]',
|
||||
'PAD_INDEX': 1,
|
||||
'SVMPERF_HOME': './svm_perf_quantification',
|
||||
'N_JOBS': 1
|
||||
'N_JOBS': int(os.getenv('N_JOBS', 1)),
|
||||
'DEFAULT_CLS': LogisticRegression(max_iter=3000)
|
||||
}
|
||||
|
||||
|
||||
|
@ -48,3 +52,19 @@ def _get_sample_size(sample_size):
|
|||
if sample_size is None:
|
||||
raise ValueError('neither sample_size nor qp.environ["SAMPLE_SIZE"] have been specified')
|
||||
return sample_size
|
||||
|
||||
|
||||
def _get_classifier(classifier):
|
||||
"""
|
||||
If `classifier` is None, then it returns `environ['DEFAULT_CLS']`;
|
||||
if otherwise, returns `classifier`.
|
||||
|
||||
:param classifier: sklearn's estimator or None
|
||||
:return: sklearn's estimator
|
||||
"""
|
||||
if classifier is None:
|
||||
from sklearn.base import clone
|
||||
classifier = clone(environ['DEFAULT_CLS'])
|
||||
if classifier is None:
|
||||
raise ValueError('neither classifier nor qp.environ["DEFAULT_CLS"] have been specified')
|
||||
return classifier
|
||||
|
|
|
@ -1,20 +1,18 @@
|
|||
import os
|
||||
import pandas as pd
|
||||
import math
|
||||
|
||||
from typing import Optional
|
||||
from quapy.data import LabelledCollection
|
||||
from quapy.protocol import AbstractProtocol
|
||||
from pathlib import Path
|
||||
|
||||
|
||||
def get_sample_list(path_dir):
|
||||
"""Gets a sample list finding the csv files in a directory
|
||||
"""
|
||||
Gets a sample list finding the csv files in a directory
|
||||
|
||||
Args:
|
||||
path_dir (_type_): directory to look for samples
|
||||
|
||||
Returns:
|
||||
_type_: list of samples
|
||||
:param path_dir: directory to look for samples
|
||||
:return: list of samples
|
||||
"""
|
||||
samples = []
|
||||
for filename in sorted(os.listdir(path_dir)):
|
||||
|
@ -23,18 +21,15 @@ def get_sample_list(path_dir):
|
|||
return samples
|
||||
|
||||
|
||||
def generate_modelselection_split(samples, split=0.3):
|
||||
"""This function generates a train/test split for model selection
|
||||
def generate_modelselection_split(samples, test_prop=0.3):
|
||||
"""This function generates a train/test partition for model selection
|
||||
without the use of random numbers so the split is always the same
|
||||
|
||||
Args:
|
||||
samples (_type_): list of samples
|
||||
split (float, optional): percentage saved for test. Defaults to 0.3.
|
||||
|
||||
Returns:
|
||||
_type_: list of samples to use as train and list of samples to use as test
|
||||
:param samples: list of samples
|
||||
:param test_prop: float, percentage saved for test. Defaults to 0.3.
|
||||
:return: list of samples to use as train and list of samples to use as test
|
||||
"""
|
||||
num_items_to_pick = math.ceil(len(samples) * split)
|
||||
num_items_to_pick = math.ceil(len(samples) * test_prop)
|
||||
step_size = math.floor(len(samples) / num_items_to_pick)
|
||||
test_indices = [i * step_size for i in range(num_items_to_pick)]
|
||||
test = [samples[i] for i in test_indices]
|
||||
|
@ -72,7 +67,7 @@ class IFCBTrainSamplesFromDir(AbstractProtocol):
|
|||
|
||||
class IFCBTestSamples(AbstractProtocol):
|
||||
|
||||
def __init__(self, path_dir:str, test_prevalences: pd.DataFrame, samples: list = None, classes: list=None):
|
||||
def __init__(self, path_dir:str, test_prevalences: Optional[pd.DataFrame]=None, samples: list=None, classes: list=None):
|
||||
self.path_dir = path_dir
|
||||
self.test_prevalences = test_prevalences
|
||||
self.classes = classes
|
||||
|
|
|
@ -4,6 +4,8 @@ import numpy as np
|
|||
import os
|
||||
|
||||
from quapy.protocol import AbstractProtocol
|
||||
from quapy.data import LabelledCollection
|
||||
|
||||
|
||||
DEV_SAMPLES = 1000
|
||||
TEST_SAMPLES = 5000
|
||||
|
@ -12,6 +14,13 @@ ERROR_TOL = 1E-3
|
|||
|
||||
|
||||
def load_category_map(path):
|
||||
"""
|
||||
Loads the category map, i.e., a mapping of numerical ids of labels with a human readable name.
|
||||
|
||||
:param path: path to the label map file
|
||||
:return: a dictionary cat2code (i.e., cat2code[cat_name] gives access to the category id) and a list code2cat (i.e.,
|
||||
code2cat[cat_id] gives access to the category name)
|
||||
"""
|
||||
cat2code = {}
|
||||
with open(path, 'rt') as fin:
|
||||
for line in fin:
|
||||
|
@ -22,6 +31,16 @@ def load_category_map(path):
|
|||
|
||||
|
||||
def load_raw_documents(path):
|
||||
"""
|
||||
Loads raw documents. In case the sample is unlabelled,
|
||||
the labels returned are None
|
||||
|
||||
:param path: path to the data sample containing the raw documents
|
||||
:return: a tuple with the documents (np.ndarray of strings of shape `(n,)`) and
|
||||
the labels (a np.ndarray of shape `(n,)` if the sample is labelled,
|
||||
or None if the sample is unlabelled), with `n` the number of instances in the sample
|
||||
(250 for T1A, 1000 for T1B)
|
||||
"""
|
||||
df = pd.read_csv(path)
|
||||
documents = list(df["text"].values)
|
||||
labels = None
|
||||
|
@ -30,7 +49,16 @@ def load_raw_documents(path):
|
|||
return documents, labels
|
||||
|
||||
|
||||
def load_vector_documents(path):
|
||||
def load_vector_documents_2022(path):
|
||||
"""
|
||||
Loads vectorized documents. In case the sample is unlabelled,
|
||||
the labels returned are None
|
||||
|
||||
:param path: path to the data sample containing the raw documents
|
||||
:return: a tuple with the documents (np.ndarray of shape `(n,300)`) and the labels (a np.ndarray of shape `(n,)` if
|
||||
the sample is labelled, or None if the sample is unlabelled), with `n` the number of instances in the sample
|
||||
(250 for T1A, 1000 for T1B)
|
||||
"""
|
||||
D = pd.read_csv(path).to_numpy(dtype=float)
|
||||
labelled = D.shape[1] == 301
|
||||
if labelled:
|
||||
|
@ -40,6 +68,25 @@ def load_vector_documents(path):
|
|||
return X, y
|
||||
|
||||
|
||||
def load_vector_documents_2024(path):
|
||||
"""
|
||||
Loads vectorized documents. In case the sample is unlabelled,
|
||||
the labels returned are None
|
||||
|
||||
:param path: path to the data sample containing the raw documents
|
||||
:return: a tuple with the documents (np.ndarray of shape `(n,256)`) and the labels (a np.ndarray of shape `(n,)` if
|
||||
the sample is labelled, or None if the sample is unlabelled), with `n` the number of instances in the sample
|
||||
(250 for T1 and T4, 1000 for T2, and 200 for T3)
|
||||
"""
|
||||
D = pd.read_csv(path).to_numpy(dtype=float)
|
||||
labelled = D.shape[1] == 257
|
||||
if labelled:
|
||||
X, y = D[:,1:], D[:,0].astype(int).flatten()
|
||||
else:
|
||||
X, y = D, None
|
||||
return X, y
|
||||
|
||||
|
||||
class SamplesFromDir(AbstractProtocol):
|
||||
|
||||
def __init__(self, path_dir:str, ground_truth_path:str, load_fn):
|
||||
|
@ -53,6 +100,20 @@ class SamplesFromDir(AbstractProtocol):
|
|||
yield sample, prevalence
|
||||
|
||||
|
||||
class LabelledCollectionsFromDir(AbstractProtocol):
|
||||
|
||||
def __init__(self, path_dir:str, ground_truth_path:str, load_fn):
|
||||
self.path_dir = path_dir
|
||||
self.load_fn = load_fn
|
||||
self.true_prevs = pd.read_csv(ground_truth_path, index_col=0)
|
||||
|
||||
def __call__(self):
|
||||
for id, prevalence in self.true_prevs.iterrows():
|
||||
collection_path = os.path.join(self.path_dir, f'{id}.txt')
|
||||
lc = LabelledCollection.load(path=collection_path, loader_func=self.load_fn)
|
||||
yield lc
|
||||
|
||||
|
||||
class ResultSubmission:
|
||||
|
||||
def __init__(self):
|
|
@ -123,7 +123,7 @@ class LabelledCollection:
|
|||
if len(prevs) == self.n_classes - 1:
|
||||
prevs = prevs + (1 - sum(prevs),)
|
||||
assert len(prevs) == self.n_classes, 'unexpected number of prevalences'
|
||||
assert sum(prevs) == 1, f'prevalences ({prevs}) wrong range (sum={sum(prevs)})'
|
||||
assert np.isclose(sum(prevs), 1), f'prevalences ({prevs}) wrong range (sum={sum(prevs)})'
|
||||
|
||||
# Decide how many instances should be taken for each class in order to satisfy the requested prevalence
|
||||
# accurately, and the number of instances in the sample (exactly). If int(size * prevs[i]) (which is
|
||||
|
@ -549,7 +549,7 @@ class Dataset:
|
|||
yield Dataset(train, test, name=f'fold {(i % nfolds) + 1}/{nfolds} (round={(i // nfolds) + 1})')
|
||||
|
||||
|
||||
def reduce(self, n_train=100, n_test=100):
|
||||
def reduce(self, n_train=100, n_test=100, random_state=None):
|
||||
"""
|
||||
Reduce the number of instances in place for quick experiments. Preserves the prevalence of each set.
|
||||
|
||||
|
@ -557,6 +557,14 @@ class Dataset:
|
|||
:param n_test: number of test documents to keep (default 100)
|
||||
:return: self
|
||||
"""
|
||||
self.training = self.training.sampling(n_train, *self.training.prevalence())
|
||||
self.test = self.test.sampling(n_test, *self.test.prevalence())
|
||||
self.training = self.training.sampling(
|
||||
n_train,
|
||||
*self.training.prevalence(),
|
||||
random_state = random_state
|
||||
)
|
||||
self.test = self.test.sampling(
|
||||
n_test,
|
||||
*self.test.prevalence(),
|
||||
random_state = random_state
|
||||
)
|
||||
return self
|
|
@ -90,12 +90,17 @@ def standardize(dataset: Dataset, inplace=False):
|
|||
:class:`quapy.data.base.Dataset` is to be returned
|
||||
:return: an instance of :class:`quapy.data.base.Dataset`
|
||||
"""
|
||||
s = StandardScaler(copy=not inplace)
|
||||
training = s.fit_transform(dataset.training.instances)
|
||||
test = s.transform(dataset.test.instances)
|
||||
s = StandardScaler()
|
||||
train, test = dataset.train_test
|
||||
std_train_X = s.fit_transform(train.X)
|
||||
std_test_X = s.transform(test.X)
|
||||
if inplace:
|
||||
dataset.training.instances = std_train_X
|
||||
dataset.test.instances = std_test_X
|
||||
return dataset
|
||||
else:
|
||||
training = LabelledCollection(std_train_X, train.labels, classes=train.classes_)
|
||||
test = LabelledCollection(std_test_X, test.labels, classes=test.classes_)
|
||||
return Dataset(training, test, dataset.vocabulary, dataset.name)
|
||||
|
||||
|
||||
|
|
|
@ -158,8 +158,8 @@ def kld(prevs, prevs_hat, eps=None):
|
|||
:return: Kullback-Leibler divergence between the two distributions
|
||||
"""
|
||||
eps = __check_eps(eps)
|
||||
smooth_prevs = prevs + eps
|
||||
smooth_prevs_hat = prevs_hat + eps
|
||||
smooth_prevs = smooth(prevs, eps)
|
||||
smooth_prevs_hat = smooth(prevs_hat, eps)
|
||||
return (smooth_prevs*np.log(smooth_prevs/smooth_prevs_hat)).sum(axis=-1)
|
||||
|
||||
|
||||
|
@ -285,6 +285,36 @@ def mnrae(prevs, prevs_hat, eps=None):
|
|||
return nrae(prevs, prevs_hat, eps).mean()
|
||||
|
||||
|
||||
def nmd(prevs, prevs_hat):
|
||||
"""
|
||||
Computes the Normalized Match Distance; which is the Normalized Distance multiplied by the factor
|
||||
`1/(n-1)` to guarantee the measure ranges between 0 (best prediction) and 1 (worst prediction).
|
||||
|
||||
:param prevs: array-like of shape `(n_classes,)` or `(n_instances, n_classes)` with the true prevalence values
|
||||
:param prevs_hat: array-like of shape `(n_classes,)` or `(n_instances, n_classes)` with the predicted prevalence values
|
||||
:return: float in [0,1]
|
||||
"""
|
||||
n = prevs.shape[-1]
|
||||
return (1./(n-1))*np.mean(match_distance(prevs, prevs_hat))
|
||||
|
||||
|
||||
def md(prevs, prevs_hat, ERROR_TOL=1E-3):
|
||||
"""
|
||||
Computes the Match Distance, under the assumption that the cost in mistaking class i with class i+1 is 1 in
|
||||
all cases.
|
||||
|
||||
:param prevs: array-like of shape `(n_classes,)` or `(n_instances, n_classes)` with the true prevalence values
|
||||
:param prevs_hat: array-like of shape `(n_classes,)` or `(n_instances, n_classes)` with the predicted prevalence values
|
||||
:return: float
|
||||
"""
|
||||
P = np.cumsum(prevs, axis=-1)
|
||||
P_hat = np.cumsum(prevs_hat, axis=-1)
|
||||
assert np.all(np.isclose(P_hat[..., -1], 1.0, rtol=ERROR_TOL)), \
|
||||
'arg error in match_distance: the array does not represent a valid distribution'
|
||||
distances = np.abs(P-P_hat)
|
||||
return distances[..., :-1].sum(axis=-1)
|
||||
|
||||
|
||||
def smooth(prevs, eps):
|
||||
""" Smooths a prevalence distribution with :math:`\\epsilon` (`eps`) as:
|
||||
:math:`\\underline{p}(y)=\\frac{\\epsilon+p(y)}{\\epsilon|\\mathcal{Y}|+
|
||||
|
@ -328,3 +358,5 @@ normalized_absolute_error = nae
|
|||
normalized_relative_absolute_error = nrae
|
||||
mean_normalized_absolute_error = mnae
|
||||
mean_normalized_relative_absolute_error = mnrae
|
||||
normalized_match_distance = nmd
|
||||
match_distance = md
|
||||
|
|
|
@ -1,4 +1,3 @@
|
|||
import itertools
|
||||
import warnings
|
||||
from collections import defaultdict
|
||||
from typing import Literal, Union, Callable
|
||||
|
@ -189,6 +188,19 @@ def check_prevalence_vector(prevalences: ArrayLike, raise_exception: bool=False,
|
|||
return valid
|
||||
|
||||
|
||||
def uniform_prevalence(n_classes):
|
||||
"""
|
||||
Returns a vector representing the uniform distribution for `n_classes`
|
||||
|
||||
:param n_classes: number of classes
|
||||
:return: np.ndarray with all values 1/n_classes
|
||||
"""
|
||||
assert isinstance(n_classes, int) and n_classes>0, \
|
||||
(f'param {n_classes} not understood; must be a positive integer representing the '
|
||||
f'number of classes ')
|
||||
return np.full(shape=n_classes, fill_value=1./n_classes)
|
||||
|
||||
|
||||
def normalize_prevalence(prevalences: ArrayLike, method='l1'):
|
||||
"""
|
||||
Normalizes a vector or matrix of prevalence values. The normalization consists of applying a L1 normalization in
|
||||
|
@ -606,3 +618,5 @@ def solve_adjustment(
|
|||
raise ValueError(f"Solver {solver} not known.")
|
||||
else:
|
||||
raise ValueError(f'unknown {solver=}')
|
||||
|
||||
|
||||
|
|
|
@ -3,6 +3,7 @@ from . import aggregative
|
|||
from . import non_aggregative
|
||||
from . import meta
|
||||
|
||||
|
||||
AGGREGATIVE_METHODS = {
|
||||
aggregative.CC,
|
||||
aggregative.ACC,
|
||||
|
|
|
@ -24,12 +24,14 @@ class KDEBase:
|
|||
Checks that the bandwidth parameter is correct
|
||||
|
||||
:param bandwidth: either a string (see BANDWIDTH_METHOD) or a float
|
||||
:return: nothing, but raises an exception for invalid values
|
||||
:return: the bandwidth if the check is passed, or raises an exception for invalid values
|
||||
"""
|
||||
assert bandwidth in KDEBase.BANDWIDTH_METHOD or isinstance(bandwidth, float), \
|
||||
f'invalid bandwidth, valid ones are {KDEBase.BANDWIDTH_METHOD} or float values'
|
||||
if isinstance(bandwidth, float):
|
||||
assert 0 < bandwidth < 1, "the bandwith for KDEy should be in (0,1), since this method models the unit simplex"
|
||||
assert 0 < bandwidth < 1, \
|
||||
"the bandwith for KDEy should be in (0,1), since this method models the unit simplex"
|
||||
return bandwidth
|
||||
|
||||
def get_kde_function(self, X, bandwidth):
|
||||
"""
|
||||
|
@ -62,8 +64,13 @@ class KDEBase:
|
|||
:param bandwidth: float, the bandwidth of the kernel
|
||||
:return: a list of KernelDensity objects, each fitted with the corresponding class-specific covariates
|
||||
"""
|
||||
return [self.get_kde_function(X[y == cat], bandwidth) for cat in classes]
|
||||
|
||||
class_cond_X = []
|
||||
for cat in classes:
|
||||
selX = X[y==cat]
|
||||
if selX.size==0:
|
||||
selX = [F.uniform_prevalence(len(classes))]
|
||||
class_cond_X.append(selX)
|
||||
return [self.get_kde_function(X_cond_yi, bandwidth) for X_cond_yi in class_cond_X]
|
||||
|
||||
|
||||
class KDEyML(AggregativeSoftQuantifier, KDEBase):
|
||||
|
@ -101,16 +108,13 @@ class KDEyML(AggregativeSoftQuantifier, KDEBase):
|
|||
Alternatively, this set can be specified at fit time by indicating the exact set of data
|
||||
on which the predictions are to be generated.
|
||||
:param bandwidth: float, the bandwidth of the Kernel
|
||||
:param n_jobs: number of parallel workers
|
||||
:param random_state: a seed to be set before fitting any base quantifier (default None)
|
||||
"""
|
||||
|
||||
def __init__(self, classifier: BaseEstimator, val_split=10, bandwidth=0.1, n_jobs=None, random_state=None):
|
||||
self._check_bandwidth(bandwidth)
|
||||
self.classifier = classifier
|
||||
def __init__(self, classifier: BaseEstimator=None, val_split=5, bandwidth=0.1, random_state=None):
|
||||
self.classifier = qp._get_classifier(classifier)
|
||||
self.val_split = val_split
|
||||
self.bandwidth = bandwidth
|
||||
self.n_jobs = n_jobs
|
||||
self.bandwidth = KDEBase._check_bandwidth(bandwidth)
|
||||
self.random_state=random_state
|
||||
|
||||
def aggregation_fit(self, classif_predictions: LabelledCollection, data: LabelledCollection):
|
||||
|
@ -125,17 +129,17 @@ class KDEyML(AggregativeSoftQuantifier, KDEBase):
|
|||
:param posteriors: instances in the sample converted into posterior probabilities
|
||||
:return: a vector of class prevalence estimates
|
||||
"""
|
||||
np.random.RandomState(self.random_state)
|
||||
epsilon = 1e-10
|
||||
n_classes = len(self.mix_densities)
|
||||
test_densities = [self.pdf(kde_i, posteriors) for kde_i in self.mix_densities]
|
||||
with qp.util.temp_seed(self.random_state):
|
||||
epsilon = 1e-10
|
||||
n_classes = len(self.mix_densities)
|
||||
test_densities = [self.pdf(kde_i, posteriors) for kde_i in self.mix_densities]
|
||||
|
||||
def neg_loglikelihood(prev):
|
||||
test_mixture_likelihood = sum(prev_i * dens_i for prev_i, dens_i in zip (prev, test_densities))
|
||||
test_loglikelihood = np.log(test_mixture_likelihood + epsilon)
|
||||
return -np.sum(test_loglikelihood)
|
||||
def neg_loglikelihood(prev):
|
||||
test_mixture_likelihood = sum(prev_i * dens_i for prev_i, dens_i in zip (prev, test_densities))
|
||||
test_loglikelihood = np.log(test_mixture_likelihood + epsilon)
|
||||
return -np.sum(test_loglikelihood)
|
||||
|
||||
return F.optim_minimize(neg_loglikelihood, n_classes)
|
||||
return F.optim_minimize(neg_loglikelihood, n_classes)
|
||||
|
||||
|
||||
class KDEyHD(AggregativeSoftQuantifier, KDEBase):
|
||||
|
@ -178,20 +182,17 @@ class KDEyHD(AggregativeSoftQuantifier, KDEBase):
|
|||
Alternatively, this set can be specified at fit time by indicating the exact set of data
|
||||
on which the predictions are to be generated.
|
||||
:param bandwidth: float, the bandwidth of the Kernel
|
||||
:param n_jobs: number of parallel workers
|
||||
:param random_state: a seed to be set before fitting any base quantifier (default None)
|
||||
:param montecarlo_trials: number of Monte Carlo trials (default 10000)
|
||||
"""
|
||||
|
||||
def __init__(self, classifier: BaseEstimator, val_split=10, divergence: str='HD',
|
||||
bandwidth=0.1, n_jobs=None, random_state=None, montecarlo_trials=10000):
|
||||
def __init__(self, classifier: BaseEstimator=None, val_split=5, divergence: str='HD',
|
||||
bandwidth=0.1, random_state=None, montecarlo_trials=10000):
|
||||
|
||||
self._check_bandwidth(bandwidth)
|
||||
self.classifier = classifier
|
||||
self.classifier = qp._get_classifier(classifier)
|
||||
self.val_split = val_split
|
||||
self.divergence = divergence
|
||||
self.bandwidth = bandwidth
|
||||
self.n_jobs = n_jobs
|
||||
self.bandwidth = KDEBase._check_bandwidth(bandwidth)
|
||||
self.random_state=random_state
|
||||
self.montecarlo_trials = montecarlo_trials
|
||||
|
||||
|
@ -273,15 +274,12 @@ class KDEyCS(AggregativeSoftQuantifier):
|
|||
Alternatively, this set can be specified at fit time by indicating the exact set of data
|
||||
on which the predictions are to be generated.
|
||||
:param bandwidth: float, the bandwidth of the Kernel
|
||||
:param n_jobs: number of parallel workers
|
||||
"""
|
||||
|
||||
def __init__(self, classifier: BaseEstimator, val_split=10, bandwidth=0.1, n_jobs=None):
|
||||
KDEBase._check_bandwidth(bandwidth)
|
||||
self.classifier = classifier
|
||||
def __init__(self, classifier: BaseEstimator=None, val_split=5, bandwidth=0.1):
|
||||
self.classifier = qp._get_classifier(classifier)
|
||||
self.val_split = val_split
|
||||
self.bandwidth = bandwidth
|
||||
self.n_jobs = n_jobs
|
||||
self.bandwidth = KDEBase._check_bandwidth(bandwidth)
|
||||
|
||||
def gram_matrix_mix_sum(self, X, Y=None):
|
||||
# this adapts the output of the rbf_kernel function (pairwise evaluations of Gaussian kernels k(x,y))
|
||||
|
@ -350,7 +348,7 @@ class KDEyCS(AggregativeSoftQuantifier):
|
|||
# called \overline{r} in the paper
|
||||
alpha_ratio = alpha * self.counts_inv
|
||||
|
||||
# recal that tr_te_sums already accounts for the constant terms (1/Li)*(1/M)
|
||||
# recall that tr_te_sums already accounts for the constant terms (1/Li)*(1/M)
|
||||
partA = -np.log((alpha_ratio @ tr_te_sums) * Minv)
|
||||
partB = 0.5 * np.log(alpha_ratio @ tr_tr_sums @ alpha_ratio)
|
||||
return partA + partB #+ partC
|
||||
|
|
|
@ -21,7 +21,7 @@ class QuaNetTrainer(BaseQuantifier):
|
|||
Example:
|
||||
|
||||
>>> import quapy as qp
|
||||
>>> from quapy.method.meta import QuaNet
|
||||
>>> from quapy.method_name.meta import QuaNet
|
||||
>>> from quapy.classification.neural import NeuralClassifierTrainer, CNNnet
|
||||
>>>
|
||||
>>> # use samples of 100 elements
|
||||
|
|
|
@ -27,8 +27,8 @@ class ThresholdOptimization(BinaryAggregativeQuantifier):
|
|||
:class:`quapy.data.base.LabelledCollection` (the split itself).
|
||||
"""
|
||||
|
||||
def __init__(self, classifier: BaseEstimator, val_split=5, n_jobs=None):
|
||||
self.classifier = classifier
|
||||
def __init__(self, classifier: BaseEstimator=None, val_split=None, n_jobs=None):
|
||||
self.classifier = qp._get_classifier(classifier)
|
||||
self.val_split = val_split
|
||||
self.n_jobs = qp._get_njobs(n_jobs)
|
||||
|
||||
|
@ -143,7 +143,7 @@ class T50(ThresholdOptimization):
|
|||
:class:`quapy.data.base.LabelledCollection` (the split itself).
|
||||
"""
|
||||
|
||||
def __init__(self, classifier: BaseEstimator, val_split=5):
|
||||
def __init__(self, classifier: BaseEstimator=None, val_split=5):
|
||||
super().__init__(classifier, val_split)
|
||||
|
||||
def condition(self, tpr, fpr) -> float:
|
||||
|
@ -167,7 +167,7 @@ class MAX(ThresholdOptimization):
|
|||
:class:`quapy.data.base.LabelledCollection` (the split itself).
|
||||
"""
|
||||
|
||||
def __init__(self, classifier: BaseEstimator, val_split=5):
|
||||
def __init__(self, classifier: BaseEstimator=None, val_split=5):
|
||||
super().__init__(classifier, val_split)
|
||||
|
||||
def condition(self, tpr, fpr) -> float:
|
||||
|
@ -192,7 +192,7 @@ class X(ThresholdOptimization):
|
|||
:class:`quapy.data.base.LabelledCollection` (the split itself).
|
||||
"""
|
||||
|
||||
def __init__(self, classifier: BaseEstimator, val_split=5):
|
||||
def __init__(self, classifier: BaseEstimator=None, val_split=5):
|
||||
super().__init__(classifier, val_split)
|
||||
|
||||
def condition(self, tpr, fpr) -> float:
|
||||
|
@ -215,7 +215,7 @@ class MS(ThresholdOptimization):
|
|||
`k`-fold cross validation (this integer stands for the number of folds `k`, defaults 5), or as a
|
||||
:class:`quapy.data.base.LabelledCollection` (the split itself).
|
||||
"""
|
||||
def __init__(self, classifier: BaseEstimator, val_split=5):
|
||||
def __init__(self, classifier: BaseEstimator=None, val_split=5):
|
||||
super().__init__(classifier, val_split)
|
||||
|
||||
def condition(self, tpr, fpr) -> float:
|
||||
|
@ -254,7 +254,7 @@ class MS2(MS):
|
|||
`k`-fold cross validation (this integer stands for the number of folds `k`, defaults 5), or as a
|
||||
:class:`quapy.data.base.LabelledCollection` (the split itself).
|
||||
"""
|
||||
def __init__(self, classifier: BaseEstimator, val_split=5):
|
||||
def __init__(self, classifier: BaseEstimator=None, val_split=5):
|
||||
super().__init__(classifier, val_split)
|
||||
|
||||
def discard(self, tpr, fpr) -> bool:
|
||||
|
|
|
@ -3,7 +3,6 @@ from copy import deepcopy
|
|||
from typing import Callable, Literal, Union
|
||||
import numpy as np
|
||||
from abstention.calibration import NoBiasVectorScaling, TempScaling, VectorScaling
|
||||
from scipy import optimize
|
||||
from sklearn.base import BaseEstimator
|
||||
from sklearn.calibration import CalibratedClassifierCV
|
||||
from sklearn.metrics import confusion_matrix
|
||||
|
@ -12,7 +11,6 @@ from sklearn.model_selection import cross_val_predict
|
|||
import quapy as qp
|
||||
import quapy.functional as F
|
||||
from quapy.functional import get_divergence
|
||||
from quapy.classification.calibration import NBVSCalibration, BCTSCalibration, TSCalibration, VSCalibration
|
||||
from quapy.classification.svmperf import SVMperf
|
||||
from quapy.data import LabelledCollection
|
||||
from quapy.method.base import BaseQuantifier, BinaryQuantifier, OneVsAllGeneric
|
||||
|
@ -82,6 +80,13 @@ class AggregativeQuantifier(BaseQuantifier, ABC):
|
|||
:param data: a :class:`quapy.data.base.LabelledCollection` consisting of the training data
|
||||
:param fit_classifier: whether to train the learner (default is True). Set to False if the
|
||||
learner has been trained outside the quantifier.
|
||||
:param val_split: specifies the data used for generating classifier predictions. This specification
|
||||
can be made as float in (0, 1) indicating the proportion of stratified held-out validation set to
|
||||
be extracted from the training set; or as an integer (default 5), indicating that the predictions
|
||||
are to be generated in a `k`-fold cross-validation manner (with this integer indicating the value
|
||||
for `k`); or as a collection defining the specific set of data to use for validation.
|
||||
Alternatively, this set can be specified at fit time by indicating the exact set of data
|
||||
on which the predictions are to be generated.
|
||||
:return: self
|
||||
"""
|
||||
self._check_init_parameters()
|
||||
|
@ -111,6 +116,12 @@ class AggregativeQuantifier(BaseQuantifier, ABC):
|
|||
if fit_classifier:
|
||||
self._check_non_empty_classes(data)
|
||||
|
||||
if predict_on is None:
|
||||
if not fit_classifier:
|
||||
predict_on = data
|
||||
if isinstance(self.val_split, LabelledCollection) and self.val_split!=predict_on:
|
||||
raise ValueError(f'{fit_classifier=} but a LabelledCollection was provided as val_split '
|
||||
f'in __init__ that is not the same as the LabelledCollection provided in fit.')
|
||||
if predict_on is None:
|
||||
predict_on = self.val_split
|
||||
|
||||
|
@ -330,8 +341,8 @@ class CC(AggregativeCrispQuantifier):
|
|||
:param classifier: a sklearn's Estimator that generates a classifier
|
||||
"""
|
||||
|
||||
def __init__(self, classifier: BaseEstimator):
|
||||
self.classifier = classifier
|
||||
def __init__(self, classifier: BaseEstimator=None):
|
||||
self.classifier = qp._get_classifier(classifier)
|
||||
|
||||
def aggregation_fit(self, classif_predictions: LabelledCollection, data: LabelledCollection):
|
||||
"""
|
||||
|
@ -360,8 +371,8 @@ class PCC(AggregativeSoftQuantifier):
|
|||
:param classifier: a sklearn's Estimator that generates a classifier
|
||||
"""
|
||||
|
||||
def __init__(self, classifier: BaseEstimator):
|
||||
self.classifier = classifier
|
||||
def __init__(self, classifier: BaseEstimator=None):
|
||||
self.classifier = qp._get_classifier(classifier)
|
||||
|
||||
def aggregation_fit(self, classif_predictions: LabelledCollection, data: LabelledCollection):
|
||||
"""
|
||||
|
@ -423,14 +434,14 @@ class ACC(AggregativeCrispQuantifier):
|
|||
"""
|
||||
def __init__(
|
||||
self,
|
||||
classifier: BaseEstimator,
|
||||
classifier: BaseEstimator=None,
|
||||
val_split=5,
|
||||
solver: Literal['minimize', 'exact', 'exact-raise', 'exact-cc'] = 'minimize',
|
||||
method: Literal['inversion', 'invariant-ratio'] = 'inversion',
|
||||
norm: Literal['clip', 'mapsimplex', 'condsoftmax'] = 'clip',
|
||||
n_jobs=None,
|
||||
):
|
||||
self.classifier = classifier
|
||||
self.classifier = qp._get_classifier(classifier)
|
||||
self.val_split = val_split
|
||||
self.n_jobs = qp._get_njobs(n_jobs)
|
||||
self.solver = solver
|
||||
|
@ -467,7 +478,7 @@ class ACC(AggregativeCrispQuantifier):
|
|||
if self.method not in ACC.METHODS:
|
||||
raise ValueError(f"unknown method; valid ones are {ACC.METHODS}")
|
||||
if self.norm not in ACC.NORMALIZATIONS:
|
||||
raise ValueError(f"unknown clipping; valid ones are {ACC.NORMALIZATIONS}")
|
||||
raise ValueError(f"unknown normalization; valid ones are {ACC.NORMALIZATIONS}")
|
||||
|
||||
def aggregation_fit(self, classif_predictions: LabelledCollection, data: LabelledCollection):
|
||||
"""
|
||||
|
@ -558,14 +569,14 @@ class PACC(AggregativeSoftQuantifier):
|
|||
"""
|
||||
def __init__(
|
||||
self,
|
||||
classifier: BaseEstimator,
|
||||
classifier: BaseEstimator=None,
|
||||
val_split=5,
|
||||
solver: Literal['minimize', 'exact', 'exact-raise', 'exact-cc'] = 'minimize',
|
||||
method: Literal['inversion', 'invariant-ratio'] = 'inversion',
|
||||
norm: Literal['clip', 'mapsimplex', 'condsoftmax'] = 'clip',
|
||||
n_jobs=None
|
||||
):
|
||||
self.classifier = classifier
|
||||
self.classifier = qp._get_classifier(classifier)
|
||||
self.val_split = val_split
|
||||
self.n_jobs = qp._get_njobs(n_jobs)
|
||||
self.solver = solver
|
||||
|
@ -577,8 +588,9 @@ class PACC(AggregativeSoftQuantifier):
|
|||
raise ValueError(f"unknown solver; valid ones are {ACC.SOLVERS}")
|
||||
if self.method not in ACC.METHODS:
|
||||
raise ValueError(f"unknown method; valid ones are {ACC.METHODS}")
|
||||
if self.clipping not in ACC.NORMALIZATIONS:
|
||||
raise ValueError(f"unknown clipping; valid ones are {ACC.NORMALIZATIONS}")
|
||||
if self.norm not in ACC.NORMALIZATIONS:
|
||||
raise ValueError(f"unknown normalization; valid ones are {ACC.NORMALIZATIONS}")
|
||||
|
||||
|
||||
def aggregation_fit(self, classif_predictions: LabelledCollection, data: LabelledCollection):
|
||||
"""
|
||||
|
@ -654,8 +666,8 @@ class EMQ(AggregativeSoftQuantifier):
|
|||
MAX_ITER = 1000
|
||||
EPSILON = 1e-4
|
||||
|
||||
def __init__(self, classifier: BaseEstimator, val_split=None, exact_train_prev=True, recalib=None, n_jobs=None):
|
||||
self.classifier = classifier
|
||||
def __init__(self, classifier: BaseEstimator=None, val_split=None, exact_train_prev=True, recalib=None, n_jobs=None):
|
||||
self.classifier = qp._get_classifier(classifier)
|
||||
self.val_split = val_split
|
||||
self.exact_train_prev = exact_train_prev
|
||||
self.recalib = recalib
|
||||
|
@ -769,7 +781,7 @@ class EMQ(AggregativeSoftQuantifier):
|
|||
Px = posterior_probabilities
|
||||
Ptr = np.copy(tr_prev)
|
||||
|
||||
if np.product(Ptr) == 0: # some entry is 0; we should smooth the values to avoid 0 division
|
||||
if np.prod(Ptr) == 0: # some entry is 0; we should smooth the values to avoid 0 division
|
||||
Ptr += epsilon
|
||||
Ptr /= Ptr.sum()
|
||||
|
||||
|
@ -818,7 +830,7 @@ class BayesianCC(AggregativeCrispQuantifier):
|
|||
:param mcmc_seed: random seed for the MCMC sampler (default 0)
|
||||
"""
|
||||
def __init__(self,
|
||||
classifier: BaseEstimator,
|
||||
classifier: BaseEstimator=None,
|
||||
val_split: float = 0.75,
|
||||
num_warmup: int = 500,
|
||||
num_samples: int = 1_000,
|
||||
|
@ -835,7 +847,7 @@ class BayesianCC(AggregativeCrispQuantifier):
|
|||
if _bayesian.DEPENDENCIES_INSTALLED is False:
|
||||
raise ImportError("Auxiliary dependencies are required. Run `$ pip install quapy[bayes]` to install them.")
|
||||
|
||||
self.classifier = classifier
|
||||
self.classifier = qp._get_classifier(classifier)
|
||||
self.val_split = val_split
|
||||
self.num_warmup = num_warmup
|
||||
self.num_samples = num_samples
|
||||
|
@ -905,8 +917,8 @@ class HDy(AggregativeSoftQuantifier, BinaryAggregativeQuantifier):
|
|||
validation distribution, or a :class:`quapy.data.base.LabelledCollection` (the split itself), or an integer indicating the number of folds (default 5)..
|
||||
"""
|
||||
|
||||
def __init__(self, classifier: BaseEstimator, val_split=5):
|
||||
self.classifier = classifier
|
||||
def __init__(self, classifier: BaseEstimator=None, val_split=5):
|
||||
self.classifier = qp._get_classifier(classifier)
|
||||
self.val_split = val_split
|
||||
|
||||
def aggregation_fit(self, classif_predictions: LabelledCollection, data: LabelledCollection):
|
||||
|
@ -981,8 +993,8 @@ class DyS(AggregativeSoftQuantifier, BinaryAggregativeQuantifier):
|
|||
:param n_jobs: number of parallel workers.
|
||||
"""
|
||||
|
||||
def __init__(self, classifier: BaseEstimator, val_split=5, n_bins=8, divergence: Union[str, Callable]= 'HD', tol=1e-05, n_jobs=None):
|
||||
self.classifier = classifier
|
||||
def __init__(self, classifier: BaseEstimator=None, val_split=5, n_bins=8, divergence: Union[str, Callable]= 'HD', tol=1e-05, n_jobs=None):
|
||||
self.classifier = qp._get_classifier(classifier)
|
||||
self.val_split = val_split
|
||||
self.tol = tol
|
||||
self.divergence = divergence
|
||||
|
@ -1046,8 +1058,8 @@ class SMM(AggregativeSoftQuantifier, BinaryAggregativeQuantifier):
|
|||
validation distribution, or a :class:`quapy.data.base.LabelledCollection` (the split itself), or an integer indicating the number of folds (default 5)..
|
||||
"""
|
||||
|
||||
def __init__(self, classifier: BaseEstimator, val_split=5):
|
||||
self.classifier = classifier
|
||||
def __init__(self, classifier: BaseEstimator=None, val_split=5):
|
||||
self.classifier = qp._get_classifier(classifier)
|
||||
self.val_split = val_split
|
||||
|
||||
def aggregation_fit(self, classif_predictions: LabelledCollection, data: LabelledCollection):
|
||||
|
@ -1095,9 +1107,9 @@ class DMy(AggregativeSoftQuantifier):
|
|||
:param n_jobs: number of parallel workers (default None)
|
||||
"""
|
||||
|
||||
def __init__(self, classifier, val_split=5, nbins=8, divergence: Union[str, Callable]='HD',
|
||||
def __init__(self, classifier: BaseEstimator=None, val_split=5, nbins=8, divergence: Union[str, Callable]='HD',
|
||||
cdf=False, search='optim_minimize', n_jobs=None):
|
||||
self.classifier = classifier
|
||||
self.classifier = qp._get_classifier(classifier)
|
||||
self.val_split = val_split
|
||||
self.nbins = nbins
|
||||
self.divergence = divergence
|
||||
|
@ -1420,12 +1432,10 @@ class AggregativeMedianEstimator(BinaryQuantifier):
|
|||
|
||||
def _delayed_fit_classifier(self, args):
|
||||
with qp.util.temp_seed(self.random_state):
|
||||
print('enter job')
|
||||
cls_params, training, kwargs = args
|
||||
model = deepcopy(self.base_quantifier)
|
||||
model.set_params(**cls_params)
|
||||
predictions = model.classifier_fit_predict(training, **kwargs)
|
||||
print('exit job')
|
||||
return (model, predictions)
|
||||
|
||||
def _delayed_fit_aggregation(self, args):
|
||||
|
@ -1455,7 +1465,6 @@ class AggregativeMedianEstimator(BinaryQuantifier):
|
|||
backend='threading'
|
||||
)
|
||||
else:
|
||||
print('only 1')
|
||||
model = self.base_quantifier
|
||||
model.set_params(**cls_configs[0])
|
||||
predictions = model.classifier_fit_predict(training, **kwargs)
|
||||
|
|
|
@ -54,7 +54,7 @@ class OneVsAll:
|
|||
pass
|
||||
|
||||
|
||||
def newOneVsAll(binary_quantifier, n_jobs=None):
|
||||
def newOneVsAll(binary_quantifier: BaseQuantifier, n_jobs=None):
|
||||
assert isinstance(binary_quantifier, BaseQuantifier), \
|
||||
f'{binary_quantifier} does not seem to be a Quantifier'
|
||||
if isinstance(binary_quantifier, qp.method.aggregative.AggregativeQuantifier):
|
||||
|
@ -69,7 +69,7 @@ class OneVsAllGeneric(OneVsAll, BaseQuantifier):
|
|||
quantifier for each class, and then l1-normalizes the outputs so that the class prevelence values sum up to 1.
|
||||
"""
|
||||
|
||||
def __init__(self, binary_quantifier, n_jobs=None):
|
||||
def __init__(self, binary_quantifier: BaseQuantifier, n_jobs=None):
|
||||
assert isinstance(binary_quantifier, BaseQuantifier), \
|
||||
f'{binary_quantifier} does not seem to be a Quantifier'
|
||||
if isinstance(binary_quantifier, qp.method.aggregative.AggregativeQuantifier):
|
||||
|
|
|
@ -0,0 +1,102 @@
|
|||
"""This module allows the composition of quantification methods from loss functions and feature transformations. This functionality is realized through an integration of the qunfold package: https://github.com/mirkobunse/qunfold."""
|
||||
|
||||
_import_error_message = """qunfold, the back-end of quapy.method.composable, is not properly installed.
|
||||
|
||||
To fix this error, call:
|
||||
|
||||
pip install --upgrade pip setuptools wheel
|
||||
pip install "jax[cpu]"
|
||||
pip install "qunfold @ git+https://github.com/mirkobunse/qunfold@v0.1.4"
|
||||
"""
|
||||
|
||||
try:
|
||||
import qunfold
|
||||
from qunfold.quapy import QuaPyWrapper
|
||||
from qunfold.sklearn import CVClassifier
|
||||
from qunfold import (
|
||||
LeastSquaresLoss, # losses
|
||||
BlobelLoss,
|
||||
EnergyLoss,
|
||||
HellingerSurrogateLoss,
|
||||
CombinedLoss,
|
||||
TikhonovRegularization,
|
||||
TikhonovRegularized,
|
||||
ClassTransformer, # transformers
|
||||
HistogramTransformer,
|
||||
DistanceTransformer,
|
||||
KernelTransformer,
|
||||
EnergyKernelTransformer,
|
||||
LaplacianKernelTransformer,
|
||||
GaussianKernelTransformer,
|
||||
GaussianRFFKernelTransformer,
|
||||
)
|
||||
|
||||
__all__ = [ # control public members, e.g., for auto-documentation in sphinx; omit QuaPyWrapper
|
||||
"ComposableQuantifier",
|
||||
"CVClassifier",
|
||||
"LeastSquaresLoss",
|
||||
"BlobelLoss",
|
||||
"EnergyLoss",
|
||||
"HellingerSurrogateLoss",
|
||||
"CombinedLoss",
|
||||
"TikhonovRegularization",
|
||||
"TikhonovRegularized",
|
||||
"ClassTransformer",
|
||||
"HistogramTransformer",
|
||||
"DistanceTransformer",
|
||||
"KernelTransformer",
|
||||
"EnergyKernelTransformer",
|
||||
"LaplacianKernelTransformer",
|
||||
"GaussianKernelTransformer",
|
||||
"GaussianRFFKernelTransformer",
|
||||
]
|
||||
except ImportError as e:
|
||||
raise ImportError(_import_error_message) from e
|
||||
|
||||
def ComposableQuantifier(loss, transformer, **kwargs):
|
||||
"""A generic quantification / unfolding method that solves a linear system of equations.
|
||||
|
||||
This class represents any quantifier that can be described in terms of a loss function, a feature transformation, and a regularization term. In this implementation, the loss is minimized through unconstrained second-order minimization. Valid probability estimates are ensured through a soft-max trick by Bunse (2022).
|
||||
|
||||
Args:
|
||||
loss: An instance of a loss class from `quapy.methods.composable`.
|
||||
transformer: An instance of a transformer class from `quapy.methods.composable`.
|
||||
solver (optional): The `method` argument in `scipy.optimize.minimize`. Defaults to `"trust-ncg"`.
|
||||
solver_options (optional): The `options` argument in `scipy.optimize.minimize`. Defaults to `{"gtol": 1e-8, "maxiter": 1000}`.
|
||||
seed (optional): A random number generator seed from which a numpy RandomState is created. Defaults to `None`.
|
||||
|
||||
Examples:
|
||||
Here, we create the ordinal variant of ACC (Bunse et al., 2023). This variant consists of the original feature transformation of ACC and of the original loss of ACC, the latter of which is regularized towards smooth solutions.
|
||||
|
||||
>>> from quapy.method.composable import (
|
||||
>>> ComposableQuantifier,
|
||||
>>> TikhonovRegularized,
|
||||
>>> LeastSquaresLoss,
|
||||
>>> ClassTransformer,
|
||||
>>> )
|
||||
>>> from sklearn.ensemble import RandomForestClassifier
|
||||
>>> o_acc = ComposableQuantifier(
|
||||
>>> TikhonovRegularized(LeastSquaresLoss(), 0.01),
|
||||
>>> ClassTransformer(RandomForestClassifier(oob_score=True))
|
||||
>>> )
|
||||
|
||||
Here, we perform hyper-parameter optimization with the ordinal ACC.
|
||||
|
||||
>>> quapy.model_selection.GridSearchQ(
|
||||
>>> model = o_acc,
|
||||
>>> param_grid = { # try both splitting criteria
|
||||
>>> "transformer__classifier__estimator__criterion": ["gini", "entropy"],
|
||||
>>> },
|
||||
>>> # ...
|
||||
>>> )
|
||||
|
||||
To use a classifier that does not provide the `oob_score` argument, such as logistic regression, you have to configure a cross validation of this classifier. Here, we employ 10 cross validation folds. 5 folds are the default.
|
||||
|
||||
>>> from quapy.method.composable import CVClassifier
|
||||
>>> from sklearn.linear_model import LogisticRegression
|
||||
>>> acc_lr = ComposableQuantifier(
|
||||
>>> LeastSquaresLoss(),
|
||||
>>> ClassTransformer(CVClassifier(LogisticRegression(), 10))
|
||||
>>> )
|
||||
"""
|
||||
return QuaPyWrapper(qunfold.GenericMethod(loss, transformer, **kwargs))
|
|
@ -328,7 +328,7 @@ class GridSearchQ(BaseQuantifier):
|
|||
if self.raise_errors:
|
||||
raise exception
|
||||
else:
|
||||
return ConfigStatus(params, status)
|
||||
return ConfigStatus(params, status, msg=str(exception))
|
||||
|
||||
try:
|
||||
with timeout(self.timeout):
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
from collections import defaultdict
|
||||
import matplotlib.pyplot as plt
|
||||
from matplotlib.cm import get_cmap
|
||||
from matplotlib.pyplot import get_cmap
|
||||
import numpy as np
|
||||
from matplotlib import cm
|
||||
from scipy.stats import ttest_ind_from_stats
|
||||
|
|
|
@ -1,5 +1,11 @@
|
|||
import pytest
|
||||
import unittest
|
||||
|
||||
def test_import():
|
||||
import quapy as qp
|
||||
assert qp.__version__ is not None
|
||||
|
||||
class ImportTest(unittest.TestCase):
|
||||
def test_import(self):
|
||||
import quapy as qp
|
||||
self.assertIsNotNone(qp.__version__)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
unittest.main()
|
||||
|
|
|
@ -1,61 +1,127 @@
|
|||
import pytest
|
||||
import os
|
||||
import unittest
|
||||
|
||||
from quapy.data.datasets import REVIEWS_SENTIMENT_DATASETS, TWITTER_SENTIMENT_DATASETS_TEST, \
|
||||
TWITTER_SENTIMENT_DATASETS_TRAIN, UCI_BINARY_DATASETS, LEQUA2022_TASKS, UCI_MULTICLASS_DATASETS,\
|
||||
fetch_reviews, fetch_twitter, fetch_UCIBinaryDataset, fetch_lequa2022, fetch_UCIMulticlassLabelledCollection
|
||||
from sklearn.feature_extraction.text import TfidfVectorizer
|
||||
from sklearn.linear_model import LogisticRegression
|
||||
|
||||
import quapy.functional as F
|
||||
from quapy.method.aggregative import PCC
|
||||
from quapy.data.datasets import *
|
||||
|
||||
|
||||
@pytest.mark.parametrize('dataset_name', REVIEWS_SENTIMENT_DATASETS)
|
||||
def test_fetch_reviews(dataset_name):
|
||||
dataset = fetch_reviews(dataset_name)
|
||||
print(f'Dataset {dataset_name}')
|
||||
print('Training set stats')
|
||||
dataset.training.stats()
|
||||
print('Test set stats')
|
||||
dataset.test.stats()
|
||||
class TestDatasets(unittest.TestCase):
|
||||
|
||||
def new_quantifier(self):
|
||||
return PCC(LogisticRegression(C=0.001, max_iter=100))
|
||||
|
||||
@pytest.mark.parametrize('dataset_name', TWITTER_SENTIMENT_DATASETS_TEST + TWITTER_SENTIMENT_DATASETS_TRAIN)
|
||||
def test_fetch_twitter(dataset_name):
|
||||
try:
|
||||
dataset = fetch_twitter(dataset_name)
|
||||
except ValueError as ve:
|
||||
if dataset_name == 'semeval' and ve.args[0].startswith(
|
||||
'dataset "semeval" can only be used for model selection.'):
|
||||
dataset = fetch_twitter(dataset_name, for_model_selection=True)
|
||||
print(f'Dataset {dataset_name}')
|
||||
print('Training set stats')
|
||||
dataset.training.stats()
|
||||
print('Test set stats')
|
||||
def _check_dataset(self, dataset):
|
||||
q = self.new_quantifier()
|
||||
print(f'testing method {q} in {dataset.name}...', end='')
|
||||
q.fit(dataset.training)
|
||||
estim_prevalences = q.quantify(dataset.test.instances)
|
||||
self.assertTrue(F.check_prevalence_vector(estim_prevalences))
|
||||
print(f'[done]')
|
||||
|
||||
def _check_samples(self, gen, q, max_samples_test=5, vectorizer=None):
|
||||
for X, p in gen():
|
||||
if vectorizer is not None:
|
||||
X = vectorizer.transform(X)
|
||||
estim_prevalences = q.quantify(X)
|
||||
self.assertTrue(F.check_prevalence_vector(estim_prevalences))
|
||||
max_samples_test -= 1
|
||||
if max_samples_test == 0:
|
||||
break
|
||||
|
||||
@pytest.mark.parametrize('dataset_name', UCI_BINARY_DATASETS)
|
||||
def test_fetch_UCIDataset(dataset_name):
|
||||
try:
|
||||
dataset = fetch_UCIBinaryDataset(dataset_name)
|
||||
except FileNotFoundError as fnfe:
|
||||
if dataset_name == 'pageblocks.5' and fnfe.args[0].find(
|
||||
'If this is the first time you attempt to load this dataset') > 0:
|
||||
print('The pageblocks.5 dataset requires some hand processing to be usable, skipping this test.')
|
||||
def test_reviews(self):
|
||||
for dataset_name in REVIEWS_SENTIMENT_DATASETS:
|
||||
print(f'loading dataset {dataset_name}...', end='')
|
||||
dataset = fetch_reviews(dataset_name, tfidf=True, min_df=10)
|
||||
dataset.stats()
|
||||
dataset.reduce()
|
||||
print(f'[done]')
|
||||
self._check_dataset(dataset)
|
||||
|
||||
def test_twitter(self):
|
||||
for dataset_name in TWITTER_SENTIMENT_DATASETS_TEST:
|
||||
print(f'loading dataset {dataset_name}...', end='')
|
||||
dataset = fetch_twitter(dataset_name, min_df=10)
|
||||
dataset.stats()
|
||||
dataset.reduce()
|
||||
print(f'[done]')
|
||||
self._check_dataset(dataset)
|
||||
|
||||
def test_UCIBinaryDataset(self):
|
||||
for dataset_name in UCI_BINARY_DATASETS:
|
||||
try:
|
||||
print(f'loading dataset {dataset_name}...', end='')
|
||||
dataset = fetch_UCIBinaryDataset(dataset_name)
|
||||
dataset.stats()
|
||||
dataset.reduce()
|
||||
print(f'[done]')
|
||||
self._check_dataset(dataset)
|
||||
except FileNotFoundError as fnfe:
|
||||
if dataset_name == 'pageblocks.5' and fnfe.args[0].find(
|
||||
'If this is the first time you attempt to load this dataset') > 0:
|
||||
print('The pageblocks.5 dataset requires some hand processing to be usable; skipping this test.')
|
||||
continue
|
||||
|
||||
def test_UCIMultiDataset(self):
|
||||
for dataset_name in UCI_MULTICLASS_DATASETS:
|
||||
print(f'loading dataset {dataset_name}...', end='')
|
||||
dataset = fetch_UCIMulticlassDataset(dataset_name)
|
||||
dataset.stats()
|
||||
n_classes = dataset.n_classes
|
||||
uniform_prev = F.uniform_prevalence(n_classes)
|
||||
dataset.training = dataset.training.sampling(100, *uniform_prev)
|
||||
dataset.test = dataset.test.sampling(100, *uniform_prev)
|
||||
print(f'[done]')
|
||||
self._check_dataset(dataset)
|
||||
|
||||
def test_lequa2022(self):
|
||||
if os.environ.get('QUAPY_TESTS_OMIT_LARGE_DATASETS'):
|
||||
print("omitting test_lequa2022 because QUAPY_TESTS_OMIT_LARGE_DATASETS is set")
|
||||
return
|
||||
print(f'Dataset {dataset_name}')
|
||||
print('Training set stats')
|
||||
dataset.training.stats()
|
||||
print('Test set stats')
|
||||
|
||||
for dataset_name in LEQUA2022_VECTOR_TASKS:
|
||||
print(f'loading dataset {dataset_name}...', end='')
|
||||
train, gen_val, gen_test = fetch_lequa2022(dataset_name)
|
||||
train.stats()
|
||||
n_classes = train.n_classes
|
||||
train = train.sampling(100, *F.uniform_prevalence(n_classes))
|
||||
q = self.new_quantifier()
|
||||
q.fit(train)
|
||||
self._check_samples(gen_val, q, max_samples_test=5)
|
||||
self._check_samples(gen_test, q, max_samples_test=5)
|
||||
|
||||
for dataset_name in LEQUA2022_TEXT_TASKS:
|
||||
print(f'loading dataset {dataset_name}...', end='')
|
||||
train, gen_val, gen_test = fetch_lequa2022(dataset_name)
|
||||
train.stats()
|
||||
n_classes = train.n_classes
|
||||
train = train.sampling(100, *F.uniform_prevalence(n_classes))
|
||||
tfidf = TfidfVectorizer()
|
||||
train.instances = tfidf.fit_transform(train.instances)
|
||||
q = self.new_quantifier()
|
||||
q.fit(train)
|
||||
self._check_samples(gen_val, q, max_samples_test=5, vectorizer=tfidf)
|
||||
self._check_samples(gen_test, q, max_samples_test=5, vectorizer=tfidf)
|
||||
|
||||
|
||||
@pytest.mark.parametrize('dataset_name', UCI_MULTICLASS_DATASETS)
|
||||
def test_fetch_UCIMultiDataset(dataset_name):
|
||||
dataset = fetch_UCIMulticlassLabelledCollection(dataset_name)
|
||||
print(f'Dataset {dataset_name}')
|
||||
print('Training set stats')
|
||||
dataset.stats()
|
||||
print('Test set stats')
|
||||
def test_IFCB(self):
|
||||
if os.environ.get('QUAPY_TESTS_OMIT_LARGE_DATASETS'):
|
||||
print("omitting test_IFCB because QUAPY_TESTS_OMIT_LARGE_DATASETS is set")
|
||||
return
|
||||
|
||||
print(f'loading dataset IFCB.')
|
||||
for mod_sel in [False, True]:
|
||||
train, gen = fetch_IFCB(single_sample_train=True, for_model_selection=mod_sel)
|
||||
train.stats()
|
||||
n_classes = train.n_classes
|
||||
train = train.sampling(100, *F.uniform_prevalence(n_classes))
|
||||
q = self.new_quantifier()
|
||||
q.fit(train)
|
||||
self._check_samples(gen, q, max_samples_test=5)
|
||||
|
||||
|
||||
@pytest.mark.parametrize('dataset_name', LEQUA2022_TASKS)
|
||||
def test_fetch_lequa2022(dataset_name):
|
||||
train, gen_val, gen_test = fetch_lequa2022(dataset_name)
|
||||
print(train.stats())
|
||||
print('Val:', gen_val.total())
|
||||
print('Test:', gen_test.total())
|
||||
if __name__ == '__main__':
|
||||
unittest.main()
|
||||
|
|
|
@ -15,9 +15,9 @@ class HierarchyTestCase(unittest.TestCase):
|
|||
|
||||
def test_inspect_aggregative(self):
|
||||
|
||||
import quapy.method.aggregative as aggregative
|
||||
import quapy.method.aggregative as methods
|
||||
|
||||
members = inspect.getmembers(aggregative)
|
||||
members = inspect.getmembers(methods)
|
||||
classes = set([cls for name, cls in members if inspect.isclass(cls)])
|
||||
quantifiers = [cls for cls in classes if issubclass(cls, BaseQuantifier)]
|
||||
quantifiers = [cls for cls in quantifiers if issubclass(cls, AggregativeQuantifier)]
|
||||
|
@ -31,25 +31,6 @@ class HierarchyTestCase(unittest.TestCase):
|
|||
for m in BINARY_METHODS:
|
||||
self.assertEqual(isinstance(m(lr), BinaryQuantifier), True)
|
||||
|
||||
def test_inspect_binary(self):
|
||||
|
||||
import quapy.method.base as base
|
||||
import quapy.method.aggregative as aggregative
|
||||
import quapy.method.non_aggregative as non_aggregative
|
||||
import quapy.method.meta as meta
|
||||
|
||||
members = inspect.getmembers(base)
|
||||
members+= inspect.getmembers(aggregative)
|
||||
members += inspect.getmembers(non_aggregative)
|
||||
members += inspect.getmembers(meta)
|
||||
classes = set([cls for name, cls in members if inspect.isclass(cls)])
|
||||
quantifiers = [cls for cls in classes if issubclass(cls, BaseQuantifier)]
|
||||
quantifiers = [cls for cls in quantifiers if issubclass(cls, BinaryQuantifier)]
|
||||
quantifiers = [cls for cls in quantifiers if not inspect.isabstract(cls) ]
|
||||
|
||||
for cls in quantifiers:
|
||||
self.assertIn(cls, BINARY_METHODS)
|
||||
|
||||
def test_probabilistic(self):
|
||||
lr = LogisticRegression()
|
||||
for m in [CC(lr), ACC(lr)]:
|
||||
|
|
|
@ -1,234 +1,123 @@
|
|||
import numpy as np
|
||||
import pytest
|
||||
import itertools
|
||||
import unittest
|
||||
|
||||
from sklearn.linear_model import LogisticRegression
|
||||
from sklearn.svm import LinearSVC
|
||||
|
||||
import method.aggregative
|
||||
import quapy as qp
|
||||
from quapy.model_selection import GridSearchQ
|
||||
from quapy.method.base import BinaryQuantifier
|
||||
from quapy.data import Dataset, LabelledCollection
|
||||
from quapy.method import AGGREGATIVE_METHODS, NON_AGGREGATIVE_METHODS
|
||||
from quapy.method.aggregative import ACC
|
||||
from quapy.method.meta import Ensemble
|
||||
from quapy.protocol import APP
|
||||
from quapy.method.aggregative import DMy
|
||||
from quapy.method.meta import MedianEstimator
|
||||
|
||||
# datasets = [pytest.param(qp.datasets.fetch_twitter('hcr', pickle=True), id='hcr'),
|
||||
# pytest.param(qp.datasets.fetch_UCIDataset('ionosphere'), id='ionosphere')]
|
||||
|
||||
tinydatasets = [pytest.param(qp.datasets.fetch_twitter('hcr', pickle=True).reduce(), id='tiny_hcr'),
|
||||
pytest.param(qp.datasets.fetch_UCIBinaryDataset('ionosphere').reduce(), id='tiny_ionosphere')]
|
||||
|
||||
learners = [LogisticRegression, LinearSVC]
|
||||
|
||||
|
||||
@pytest.mark.parametrize('dataset', tinydatasets)
|
||||
@pytest.mark.parametrize('aggregative_method', AGGREGATIVE_METHODS)
|
||||
@pytest.mark.parametrize('learner', learners)
|
||||
def test_aggregative_methods(dataset: Dataset, aggregative_method, learner):
|
||||
model = aggregative_method(learner())
|
||||
|
||||
if isinstance(model, BinaryQuantifier) and not dataset.binary:
|
||||
print(f'skipping the test of binary model {type(model)} on non-binary dataset {dataset}')
|
||||
return
|
||||
|
||||
model.fit(dataset.training)
|
||||
|
||||
estim_prevalences = model.quantify(dataset.test.instances)
|
||||
|
||||
true_prevalences = dataset.test.prevalence()
|
||||
error = qp.error.mae(true_prevalences, estim_prevalences)
|
||||
|
||||
assert type(error) == np.float64
|
||||
|
||||
|
||||
@pytest.mark.parametrize('dataset', tinydatasets)
|
||||
@pytest.mark.parametrize('non_aggregative_method', NON_AGGREGATIVE_METHODS)
|
||||
def test_non_aggregative_methods(dataset: Dataset, non_aggregative_method):
|
||||
model = non_aggregative_method()
|
||||
|
||||
if isinstance(model, BinaryQuantifier) and not dataset.binary:
|
||||
print(f'skipping the test of binary model {model} on non-binary dataset {dataset}')
|
||||
return
|
||||
|
||||
model.fit(dataset.training)
|
||||
|
||||
estim_prevalences = model.quantify(dataset.test.instances)
|
||||
|
||||
true_prevalences = dataset.test.prevalence()
|
||||
error = qp.error.mae(true_prevalences, estim_prevalences)
|
||||
|
||||
assert type(error) == np.float64
|
||||
|
||||
|
||||
@pytest.mark.parametrize('base_method', [method.aggregative.ACC, method.aggregative.PACC])
|
||||
@pytest.mark.parametrize('learner', [LogisticRegression])
|
||||
@pytest.mark.parametrize('dataset', tinydatasets)
|
||||
@pytest.mark.parametrize('policy', Ensemble.VALID_POLICIES)
|
||||
def test_ensemble_method(base_method, learner, dataset: Dataset, policy):
|
||||
|
||||
qp.environ['SAMPLE_SIZE'] = 20
|
||||
|
||||
base_quantifier=base_method(learner())
|
||||
|
||||
if not dataset.binary and policy=='ds':
|
||||
print(f'skipping the test of binary policy ds on non-binary dataset {dataset}')
|
||||
return
|
||||
|
||||
model = Ensemble(quantifier=base_quantifier, size=3, policy=policy, n_jobs=-1)
|
||||
|
||||
model.fit(dataset.training)
|
||||
|
||||
estim_prevalences = model.quantify(dataset.test.instances)
|
||||
|
||||
true_prevalences = dataset.test.prevalence()
|
||||
error = qp.error.mae(true_prevalences, estim_prevalences)
|
||||
|
||||
assert type(error) == np.float64
|
||||
|
||||
|
||||
def test_quanet_method():
|
||||
try:
|
||||
import quapy.classification.neural
|
||||
except ModuleNotFoundError:
|
||||
print('skipping QuaNet test due to missing torch package')
|
||||
return
|
||||
|
||||
qp.environ['SAMPLE_SIZE'] = 100
|
||||
|
||||
# load the kindle dataset as text, and convert words to numerical indexes
|
||||
dataset = qp.datasets.fetch_reviews('kindle', pickle=True).reduce(200, 200)
|
||||
qp.data.preprocessing.index(dataset, min_df=5, inplace=True)
|
||||
|
||||
from quapy.classification.neural import CNNnet
|
||||
cnn = CNNnet(dataset.vocabulary_size, dataset.n_classes)
|
||||
|
||||
from quapy.classification.neural import NeuralClassifierTrainer
|
||||
learner = NeuralClassifierTrainer(cnn, device='cuda')
|
||||
|
||||
from quapy.method.meta import QuaNet
|
||||
model = QuaNet(learner, device='cuda')
|
||||
|
||||
if isinstance(model, BinaryQuantifier) and not dataset.binary:
|
||||
print(f'skipping the test of binary model {model} on non-binary dataset {dataset}')
|
||||
return
|
||||
|
||||
model.fit(dataset.training)
|
||||
|
||||
estim_prevalences = model.quantify(dataset.test.instances)
|
||||
|
||||
true_prevalences = dataset.test.prevalence()
|
||||
error = qp.error.mae(true_prevalences, estim_prevalences)
|
||||
|
||||
assert type(error) == np.float64
|
||||
|
||||
|
||||
def test_str_label_names():
|
||||
model = qp.method.aggregative.CC(LogisticRegression())
|
||||
|
||||
dataset = qp.datasets.fetch_reviews('imdb', pickle=True)
|
||||
dataset = Dataset(dataset.training.sampling(1000, *dataset.training.prevalence()),
|
||||
dataset.test.sampling(1000, 0.25, 0.75))
|
||||
qp.data.preprocessing.text2tfidf(dataset, min_df=5, inplace=True)
|
||||
|
||||
np.random.seed(0)
|
||||
model.fit(dataset.training)
|
||||
|
||||
int_estim_prevalences = model.quantify(dataset.test.instances)
|
||||
true_prevalences = dataset.test.prevalence()
|
||||
|
||||
error = qp.error.mae(true_prevalences, int_estim_prevalences)
|
||||
assert type(error) == np.float64
|
||||
|
||||
dataset_str = Dataset(LabelledCollection(dataset.training.instances,
|
||||
['one' if label == 1 else 'zero' for label in dataset.training.labels]),
|
||||
LabelledCollection(dataset.test.instances,
|
||||
['one' if label == 1 else 'zero' for label in dataset.test.labels]))
|
||||
assert all(dataset_str.training.classes_ == dataset_str.test.classes_), 'wrong indexation'
|
||||
np.random.seed(0)
|
||||
model.fit(dataset_str.training)
|
||||
|
||||
str_estim_prevalences = model.quantify(dataset_str.test.instances)
|
||||
true_prevalences = dataset_str.test.prevalence()
|
||||
|
||||
error = qp.error.mae(true_prevalences, str_estim_prevalences)
|
||||
assert type(error) == np.float64
|
||||
|
||||
print(true_prevalences)
|
||||
print(int_estim_prevalences)
|
||||
print(str_estim_prevalences)
|
||||
|
||||
np.testing.assert_almost_equal(int_estim_prevalences[1],
|
||||
str_estim_prevalences[list(model.classes_).index('one')])
|
||||
|
||||
# helper
|
||||
def __fit_test(quantifier, train, test):
|
||||
quantifier.fit(train)
|
||||
test_samples = APP(test)
|
||||
true_prevs, estim_prevs = qp.evaluation.prediction(quantifier, test_samples)
|
||||
return qp.error.mae(true_prevs, estim_prevs), estim_prevs
|
||||
|
||||
|
||||
def test_median_meta():
|
||||
"""
|
||||
This test compares the performance of the MedianQuantifier with respect to computing the median of the predictions
|
||||
of a differently parameterized quantifier. We use the DistributionMatching base quantifier and the median is
|
||||
computed across different values of nbins
|
||||
"""
|
||||
|
||||
qp.environ['SAMPLE_SIZE'] = 100
|
||||
|
||||
# grid of values
|
||||
nbins_grid = list(range(2, 11))
|
||||
|
||||
dataset = 'kindle'
|
||||
train, test = qp.datasets.fetch_reviews(dataset, tfidf=True, min_df=10).train_test
|
||||
prevs = []
|
||||
errors = []
|
||||
for nbins in nbins_grid:
|
||||
with qp.util.temp_seed(0):
|
||||
q = DMy(LogisticRegression(), nbins=nbins)
|
||||
mae, estim_prevs = __fit_test(q, train, test)
|
||||
prevs.append(estim_prevs)
|
||||
errors.append(mae)
|
||||
print(f'{dataset} DistributionMatching(nbins={nbins}) got MAE {mae:.4f}')
|
||||
prevs = np.asarray(prevs)
|
||||
mae = np.mean(errors)
|
||||
print(f'\tMAE={mae:.4f}')
|
||||
|
||||
q = DMy(LogisticRegression())
|
||||
q = MedianEstimator(q, param_grid={'nbins': nbins_grid}, random_state=0, n_jobs=-1)
|
||||
median_mae, prev = __fit_test(q, train, test)
|
||||
print(f'\tMAE={median_mae:.4f}')
|
||||
|
||||
np.testing.assert_almost_equal(np.median(prevs, axis=0), prev)
|
||||
assert median_mae < mae, 'the median-based quantifier provided a higher error...'
|
||||
|
||||
|
||||
def test_median_meta_modsel():
|
||||
"""
|
||||
This test checks the median-meta quantifier with model selection
|
||||
"""
|
||||
|
||||
qp.environ['SAMPLE_SIZE'] = 100
|
||||
|
||||
dataset = 'kindle'
|
||||
train, test = qp.datasets.fetch_reviews(dataset, tfidf=True, min_df=10).train_test
|
||||
train, val = train.split_stratified(random_state=0)
|
||||
|
||||
nbins_grid = [2, 4, 5, 10, 15]
|
||||
|
||||
q = DMy(LogisticRegression())
|
||||
q = MedianEstimator(q, param_grid={'nbins': nbins_grid}, random_state=0, n_jobs=-1)
|
||||
median_mae, _ = __fit_test(q, train, test)
|
||||
print(f'\tMAE={median_mae:.4f}')
|
||||
|
||||
q = DMy(LogisticRegression())
|
||||
lr_params = {'classifier__C': np.logspace(-1, 1, 3)}
|
||||
q = MedianEstimator(q, param_grid={'nbins': nbins_grid}, random_state=0, n_jobs=-1)
|
||||
q = GridSearchQ(q, param_grid=lr_params, protocol=APP(val), n_jobs=-1)
|
||||
optimized_median_ave, _ = __fit_test(q, train, test)
|
||||
print(f'\tMAE={optimized_median_ave:.4f}')
|
||||
|
||||
assert optimized_median_ave < median_mae, "the optimized method yielded worse performance..."
|
||||
from quapy.method import AGGREGATIVE_METHODS, BINARY_METHODS, NON_AGGREGATIVE_METHODS
|
||||
from quapy.functional import check_prevalence_vector
|
||||
|
||||
# a random selection of composed methods to test the qunfold integration
|
||||
from quapy.method.composable import (
|
||||
ComposableQuantifier,
|
||||
LeastSquaresLoss,
|
||||
HellingerSurrogateLoss,
|
||||
ClassTransformer,
|
||||
HistogramTransformer,
|
||||
CVClassifier,
|
||||
)
|
||||
COMPOSABLE_METHODS = [
|
||||
ComposableQuantifier( # ACC
|
||||
LeastSquaresLoss(),
|
||||
ClassTransformer(CVClassifier(LogisticRegression()))
|
||||
),
|
||||
ComposableQuantifier( # HDy
|
||||
HellingerSurrogateLoss(),
|
||||
HistogramTransformer(
|
||||
3, # 3 bins per class
|
||||
preprocessor = ClassTransformer(CVClassifier(LogisticRegression()))
|
||||
)
|
||||
),
|
||||
]
|
||||
|
||||
class TestMethods(unittest.TestCase):
|
||||
|
||||
tiny_dataset_multiclass = qp.datasets.fetch_UCIMulticlassDataset('academic-success').reduce(n_test=10)
|
||||
tiny_dataset_binary = qp.datasets.fetch_UCIBinaryDataset('ionosphere').reduce(n_test=10)
|
||||
datasets = [tiny_dataset_binary, tiny_dataset_multiclass]
|
||||
|
||||
def test_aggregative(self):
|
||||
for dataset in TestMethods.datasets:
|
||||
learner = LogisticRegression()
|
||||
learner.fit(*dataset.training.Xy)
|
||||
|
||||
for model in AGGREGATIVE_METHODS:
|
||||
if not dataset.binary and model in BINARY_METHODS:
|
||||
print(f'skipping the test of binary model {model.__name__} on multiclass dataset {dataset.name}')
|
||||
continue
|
||||
|
||||
q = model(learner)
|
||||
print('testing', q)
|
||||
q.fit(dataset.training, fit_classifier=False)
|
||||
estim_prevalences = q.quantify(dataset.test.X)
|
||||
self.assertTrue(check_prevalence_vector(estim_prevalences))
|
||||
|
||||
def test_non_aggregative(self):
|
||||
for dataset in TestMethods.datasets:
|
||||
|
||||
for model in NON_AGGREGATIVE_METHODS:
|
||||
if not dataset.binary and model in BINARY_METHODS:
|
||||
print(f'skipping the test of binary model {model.__name__} on multiclass dataset {dataset.name}')
|
||||
continue
|
||||
|
||||
q = model()
|
||||
print(f'testing {q} on dataset {dataset.name}')
|
||||
q.fit(dataset.training)
|
||||
estim_prevalences = q.quantify(dataset.test.X)
|
||||
self.assertTrue(check_prevalence_vector(estim_prevalences))
|
||||
|
||||
def test_ensembles(self):
|
||||
|
||||
qp.environ['SAMPLE_SIZE'] = 10
|
||||
|
||||
base_quantifier = ACC(LogisticRegression())
|
||||
for dataset, policy in itertools.product(TestMethods.datasets, Ensemble.VALID_POLICIES):
|
||||
if not dataset.binary and policy == 'ds':
|
||||
print(f'skipping the test of binary policy ds on non-binary dataset {dataset}')
|
||||
continue
|
||||
|
||||
print(f'testing {base_quantifier} on dataset {dataset.name} with {policy=}')
|
||||
ensemble = Ensemble(quantifier=base_quantifier, size=3, policy=policy, n_jobs=-1)
|
||||
ensemble.fit(dataset.training)
|
||||
estim_prevalences = ensemble.quantify(dataset.test.instances)
|
||||
self.assertTrue(check_prevalence_vector(estim_prevalences))
|
||||
|
||||
def test_quanet(self):
|
||||
try:
|
||||
import quapy.classification.neural
|
||||
except ModuleNotFoundError:
|
||||
print('the torch package is not installed; skipping unit test for QuaNet')
|
||||
return
|
||||
|
||||
qp.environ['SAMPLE_SIZE'] = 10
|
||||
|
||||
# load the kindle dataset as text, and convert words to numerical indexes
|
||||
dataset = qp.datasets.fetch_reviews('kindle', pickle=True).reduce()
|
||||
qp.data.preprocessing.index(dataset, min_df=5, inplace=True)
|
||||
|
||||
from quapy.classification.neural import CNNnet
|
||||
cnn = CNNnet(dataset.vocabulary_size, dataset.n_classes)
|
||||
|
||||
from quapy.classification.neural import NeuralClassifierTrainer
|
||||
learner = NeuralClassifierTrainer(cnn, device='cpu')
|
||||
|
||||
from quapy.method.meta import QuaNet
|
||||
model = QuaNet(learner, device='cpu', n_epochs=2, tr_iter_per_poch=10, va_iter_per_poch=10, patience=2)
|
||||
|
||||
model.fit(dataset.training)
|
||||
estim_prevalences = model.quantify(dataset.test.instances)
|
||||
self.assertTrue(check_prevalence_vector(estim_prevalences))
|
||||
|
||||
def test_composable(self):
|
||||
for dataset in TestMethods.datasets:
|
||||
for q in COMPOSABLE_METHODS:
|
||||
print('testing', q)
|
||||
q.fit(dataset.training)
|
||||
estim_prevalences = q.quantify(dataset.test.X)
|
||||
self.assertTrue(check_prevalence_vector(estim_prevalences))
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
unittest.main()
|
||||
|
|
|
@ -2,7 +2,6 @@ import unittest
|
|||
|
||||
import numpy as np
|
||||
from sklearn.linear_model import LogisticRegression
|
||||
from sklearn.svm import SVC
|
||||
|
||||
import quapy as qp
|
||||
from quapy.method.aggregative import PACC
|
||||
|
@ -14,13 +13,16 @@ import time
|
|||
class ModselTestCase(unittest.TestCase):
|
||||
|
||||
def test_modsel(self):
|
||||
"""
|
||||
Checks whether a model selection exploration takes a good hyperparameter
|
||||
"""
|
||||
|
||||
q = PACC(LogisticRegression(random_state=1, max_iter=5000))
|
||||
|
||||
data = qp.datasets.fetch_reviews('imdb', tfidf=True, min_df=10)
|
||||
data = qp.datasets.fetch_reviews('imdb', tfidf=True, min_df=10).reduce(random_state=1)
|
||||
training, validation = data.training.split_stratified(0.7, random_state=1)
|
||||
|
||||
param_grid = {'classifier__C': np.logspace(-3,3,7)}
|
||||
param_grid = {'classifier__C': [0.000001, 10.]}
|
||||
app = APP(validation, sample_size=100, random_state=1)
|
||||
q = GridSearchQ(
|
||||
q, param_grid, protocol=app, error='mae', refit=True, timeout=-1, verbose=True
|
||||
|
@ -32,54 +34,40 @@ class ModselTestCase(unittest.TestCase):
|
|||
self.assertEqual(q.best_model().get_params()['classifier__C'], 10.0)
|
||||
|
||||
def test_modsel_parallel(self):
|
||||
"""
|
||||
Checks whether a parallelized model selection actually is faster than a sequential exploration but
|
||||
obtains the same optimal parameters
|
||||
"""
|
||||
|
||||
q = PACC(LogisticRegression(random_state=1, max_iter=5000))
|
||||
|
||||
data = qp.datasets.fetch_reviews('imdb', tfidf=True, min_df=10)
|
||||
data = qp.datasets.fetch_reviews('imdb', tfidf=True, min_df=10).reduce(n_train=500, random_state=1)
|
||||
training, validation = data.training.split_stratified(0.7, random_state=1)
|
||||
# test = data.test
|
||||
|
||||
param_grid = {'classifier__C': np.logspace(-3,3,7)}
|
||||
app = APP(validation, sample_size=100, random_state=1)
|
||||
q = GridSearchQ(
|
||||
|
||||
print('starting model selection in sequential exploration')
|
||||
tinit = time.time()
|
||||
modsel = GridSearchQ(
|
||||
q, param_grid, protocol=app, error='mae', refit=True, timeout=-1, n_jobs=1, verbose=True
|
||||
).fit(training)
|
||||
tend_seq = time.time()-tinit
|
||||
best_c_seq = modsel.best_params_['classifier__C']
|
||||
print(f'[done] took {tend_seq:.2f}s best C = {best_c_seq}')
|
||||
|
||||
print('starting model selection in parallel exploration')
|
||||
tinit = time.time()
|
||||
modsel = GridSearchQ(
|
||||
q, param_grid, protocol=app, error='mae', refit=True, timeout=-1, n_jobs=-1, verbose=True
|
||||
).fit(training)
|
||||
print('best params', q.best_params_)
|
||||
print('best score', q.best_score_)
|
||||
tend_par = time.time() - tinit
|
||||
best_c_par = modsel.best_params_['classifier__C']
|
||||
print(f'[done] took {tend_par:.2f}s best C = {best_c_par}')
|
||||
|
||||
self.assertEqual(q.best_params_['classifier__C'], 10.0)
|
||||
self.assertEqual(q.best_model().get_params()['classifier__C'], 10.0)
|
||||
self.assertEqual(best_c_seq, best_c_par)
|
||||
self.assertLess(tend_par, tend_seq)
|
||||
|
||||
def test_modsel_parallel_speedup(self):
|
||||
class SlowLR(LogisticRegression):
|
||||
def fit(self, X, y, sample_weight=None):
|
||||
time.sleep(1)
|
||||
return super(SlowLR, self).fit(X, y, sample_weight)
|
||||
|
||||
q = PACC(SlowLR(random_state=1, max_iter=5000))
|
||||
|
||||
data = qp.datasets.fetch_reviews('imdb', tfidf=True, min_df=10)
|
||||
training, validation = data.training.split_stratified(0.7, random_state=1)
|
||||
|
||||
param_grid = {'classifier__C': np.logspace(-3, 3, 7)}
|
||||
app = APP(validation, sample_size=100, random_state=1)
|
||||
|
||||
tinit = time.time()
|
||||
GridSearchQ(
|
||||
q, param_grid, protocol=app, error='mae', refit=False, timeout=-1, n_jobs=1, verbose=True
|
||||
).fit(training)
|
||||
tend_nooptim = time.time()-tinit
|
||||
|
||||
tinit = time.time()
|
||||
GridSearchQ(
|
||||
q, param_grid, protocol=app, error='mae', refit=False, timeout=-1, n_jobs=-1, verbose=True
|
||||
).fit(training)
|
||||
tend_optim = time.time() - tinit
|
||||
|
||||
print(f'parallel training took {tend_optim:.4f}s')
|
||||
print(f'sequential training took {tend_nooptim:.4f}s')
|
||||
|
||||
self.assertEqual(tend_optim < (0.5*tend_nooptim), True)
|
||||
|
||||
def test_modsel_timeout(self):
|
||||
|
||||
|
@ -91,11 +79,10 @@ class ModselTestCase(unittest.TestCase):
|
|||
|
||||
q = PACC(SlowLR())
|
||||
|
||||
data = qp.datasets.fetch_reviews('imdb', tfidf=True, min_df=10)
|
||||
data = qp.datasets.fetch_reviews('imdb', tfidf=True, min_df=10).reduce(random_state=1)
|
||||
training, validation = data.training.split_stratified(0.7, random_state=1)
|
||||
# test = data.test
|
||||
|
||||
param_grid = {'classifier__C': np.logspace(-3,3,7)}
|
||||
param_grid = {'classifier__C': np.logspace(-1,1,3)}
|
||||
app = APP(validation, sample_size=100, random_state=1)
|
||||
|
||||
print('Expecting TimeoutError to be raised')
|
||||
|
|
|
@ -8,7 +8,7 @@ from quapy.method.aggregative import PACC
|
|||
import quapy.functional as F
|
||||
|
||||
|
||||
class MyTestCase(unittest.TestCase):
|
||||
class TestReplicability(unittest.TestCase):
|
||||
|
||||
def test_prediction_replicability(self):
|
||||
|
||||
|
@ -26,7 +26,7 @@ class MyTestCase(unittest.TestCase):
|
|||
prev2 = pacc.fit(dataset.training).quantify(dataset.test.X)
|
||||
str_prev2 = strprev(prev2, prec=5)
|
||||
|
||||
self.assertEqual(str_prev1, str_prev2) # add assertion here
|
||||
self.assertEqual(str_prev1, str_prev2)
|
||||
|
||||
|
||||
def test_samping_replicability(self):
|
||||
|
@ -78,7 +78,7 @@ class MyTestCase(unittest.TestCase):
|
|||
|
||||
def test_parallel_replicability(self):
|
||||
|
||||
train, test = qp.datasets.fetch_UCIMulticlassDataset('dry-bean').train_test
|
||||
train, test = qp.datasets.fetch_UCIMulticlassDataset('dry-bean').reduce().train_test
|
||||
|
||||
test = test.sampling(500, *[0.1, 0.0, 0.1, 0.1, 0.2, 0.5, 0.0])
|
||||
|
||||
|
|
|
@ -6,6 +6,9 @@ import pickle
|
|||
import urllib
|
||||
from pathlib import Path
|
||||
from contextlib import ExitStack
|
||||
|
||||
import pandas as pd
|
||||
|
||||
import quapy as qp
|
||||
|
||||
import numpy as np
|
||||
|
@ -228,12 +231,14 @@ def pickled_resource(pickle_path:str, generation_func:callable, *args):
|
|||
return generation_func(*args)
|
||||
else:
|
||||
if os.path.exists(pickle_path):
|
||||
return pickle.load(open(pickle_path, 'rb'))
|
||||
with open(pickle_path, 'rb') as fin:
|
||||
instance = pickle.load(fin)
|
||||
else:
|
||||
instance = generation_func(*args)
|
||||
os.makedirs(str(Path(pickle_path).parent), exist_ok=True)
|
||||
pickle.dump(instance, open(pickle_path, 'wb'), pickle.HIGHEST_PROTOCOL)
|
||||
return instance
|
||||
with open(pickle_path, 'wb') as foo:
|
||||
pickle.dump(instance, foo, pickle.HIGHEST_PROTOCOL)
|
||||
return instance
|
||||
|
||||
|
||||
def _check_sample_size(sample_size):
|
||||
|
@ -246,6 +251,28 @@ def _check_sample_size(sample_size):
|
|||
return sample_size
|
||||
|
||||
|
||||
def load_report(path, as_dict=False):
|
||||
def str2prev_arr(strprev):
|
||||
within = strprev.strip('[]').split()
|
||||
float_list = [float(p) for p in within]
|
||||
float_list[-1] = 1. - sum(float_list[:-1])
|
||||
return np.asarray(float_list)
|
||||
|
||||
df = pd.read_csv(path, index_col=0)
|
||||
df['true-prev'] = df['true-prev'].apply(str2prev_arr)
|
||||
df['estim-prev'] = df['estim-prev'].apply(str2prev_arr)
|
||||
if as_dict:
|
||||
d = {}
|
||||
for col in df.columns.values:
|
||||
vals = df[col].values
|
||||
if col in ['true-prev', 'estim-prev']:
|
||||
vals = np.vstack(vals)
|
||||
d[col] = vals
|
||||
return d
|
||||
else:
|
||||
return df
|
||||
|
||||
|
||||
class EarlyStop:
|
||||
"""
|
||||
A class implementing the early-stopping condition typically used for training neural networks.
|
||||
|
|
5
setup.py
|
@ -113,7 +113,7 @@ setup(
|
|||
|
||||
python_requires='>=3.8, <4',
|
||||
|
||||
install_requires=['scikit-learn', 'pandas', 'tqdm', 'matplotlib', 'joblib', 'xlrd', 'abstention', 'ucimlrepo'],
|
||||
install_requires=['scikit-learn', 'pandas', 'tqdm', 'matplotlib', 'joblib', 'xlrd', 'abstention', 'ucimlrepo', 'certifi'],
|
||||
|
||||
# List additional groups of dependencies here (e.g. development
|
||||
# dependencies). Users will be able to install these using the "extras"
|
||||
|
@ -125,6 +125,9 @@ setup(
|
|||
# projects.
|
||||
extras_require={ # Optional
|
||||
'bayes': ['jax', 'jaxlib', 'numpyro'],
|
||||
'neural': ['torch'],
|
||||
'tests': ['certifi'],
|
||||
'docs' : ['sphinx-rtd-theme', 'myst-parser'],
|
||||
},
|
||||
|
||||
# If there are data files included in your packages that need to be
|
||||
|
|