1
0
Fork 0

some bug fixes here and there

This commit is contained in:
Alejandro Moreo Fernandez 2023-02-13 19:27:48 +01:00
parent 505d2de823
commit c608647475
19 changed files with 408 additions and 338 deletions

View File

@ -106,8 +106,6 @@
<li><a href="quapy.method.html#quapy.method.aggregative.DistributionMatching.aggregate">(quapy.method.aggregative.DistributionMatching method)</a> <li><a href="quapy.method.html#quapy.method.aggregative.DistributionMatching.aggregate">(quapy.method.aggregative.DistributionMatching method)</a>
</li> </li>
<li><a href="quapy.method.html#quapy.method.aggregative.DyS.aggregate">(quapy.method.aggregative.DyS method)</a> <li><a href="quapy.method.html#quapy.method.aggregative.DyS.aggregate">(quapy.method.aggregative.DyS method)</a>
</li>
<li><a href="quapy.method.html#quapy.method.aggregative.ELM.aggregate">(quapy.method.aggregative.ELM method)</a>
</li> </li>
<li><a href="quapy.method.html#quapy.method.aggregative.EMQ.aggregate">(quapy.method.aggregative.EMQ method)</a> <li><a href="quapy.method.html#quapy.method.aggregative.EMQ.aggregate">(quapy.method.aggregative.EMQ method)</a>
</li> </li>
@ -198,8 +196,6 @@
<li><a href="quapy.method.html#quapy.method.aggregative.AggregativeProbabilisticQuantifier.classify">(quapy.method.aggregative.AggregativeProbabilisticQuantifier method)</a> <li><a href="quapy.method.html#quapy.method.aggregative.AggregativeProbabilisticQuantifier.classify">(quapy.method.aggregative.AggregativeProbabilisticQuantifier method)</a>
</li> </li>
<li><a href="quapy.method.html#quapy.method.aggregative.AggregativeQuantifier.classify">(quapy.method.aggregative.AggregativeQuantifier method)</a> <li><a href="quapy.method.html#quapy.method.aggregative.AggregativeQuantifier.classify">(quapy.method.aggregative.AggregativeQuantifier method)</a>
</li>
<li><a href="quapy.method.html#quapy.method.aggregative.ELM.classify">(quapy.method.aggregative.ELM method)</a>
</li> </li>
<li><a href="quapy.method.html#quapy.method.aggregative.OneVsAllAggregative.classify">(quapy.method.aggregative.OneVsAllAggregative method)</a> <li><a href="quapy.method.html#quapy.method.aggregative.OneVsAllAggregative.classify">(quapy.method.aggregative.OneVsAllAggregative method)</a>
</li> </li>
@ -283,8 +279,6 @@
<li><a href="quapy.method.html#quapy.method.meta.EEMQ">EEMQ() (in module quapy.method.meta)</a> <li><a href="quapy.method.html#quapy.method.meta.EEMQ">EEMQ() (in module quapy.method.meta)</a>
</li> </li>
<li><a href="quapy.method.html#quapy.method.meta.EHDy">EHDy() (in module quapy.method.meta)</a> <li><a href="quapy.method.html#quapy.method.meta.EHDy">EHDy() (in module quapy.method.meta)</a>
</li>
<li><a href="quapy.method.html#quapy.method.aggregative.ELM">ELM (class in quapy.method.aggregative)</a>
</li> </li>
<li><a href="quapy.method.html#quapy.method.aggregative.EMQ.EM">EM() (quapy.method.aggregative.EMQ class method)</a> <li><a href="quapy.method.html#quapy.method.aggregative.EMQ.EM">EM() (quapy.method.aggregative.EMQ class method)</a>
</li> </li>
@ -307,8 +301,6 @@
<li><a href="quapy.html#quapy.evaluation.evaluation_report">evaluation_report() (in module quapy.evaluation)</a> <li><a href="quapy.html#quapy.evaluation.evaluation_report">evaluation_report() (in module quapy.evaluation)</a>
</li> </li>
<li><a href="quapy.method.html#quapy.method.aggregative.ExpectationMaximizationQuantifier">ExpectationMaximizationQuantifier (in module quapy.method.aggregative)</a> <li><a href="quapy.method.html#quapy.method.aggregative.ExpectationMaximizationQuantifier">ExpectationMaximizationQuantifier (in module quapy.method.aggregative)</a>
</li>
<li><a href="quapy.method.html#quapy.method.aggregative.ExplicitLossMinimisation">ExplicitLossMinimisation (in module quapy.method.aggregative)</a>
</li> </li>
</ul></td> </ul></td>
</tr></table> </tr></table>
@ -350,8 +342,6 @@
<li><a href="quapy.method.html#quapy.method.aggregative.DistributionMatching.fit">(quapy.method.aggregative.DistributionMatching method)</a> <li><a href="quapy.method.html#quapy.method.aggregative.DistributionMatching.fit">(quapy.method.aggregative.DistributionMatching method)</a>
</li> </li>
<li><a href="quapy.method.html#quapy.method.aggregative.DyS.fit">(quapy.method.aggregative.DyS method)</a> <li><a href="quapy.method.html#quapy.method.aggregative.DyS.fit">(quapy.method.aggregative.DyS method)</a>
</li>
<li><a href="quapy.method.html#quapy.method.aggregative.ELM.fit">(quapy.method.aggregative.ELM method)</a>
</li> </li>
<li><a href="quapy.method.html#quapy.method.aggregative.EMQ.fit">(quapy.method.aggregative.EMQ method)</a> <li><a href="quapy.method.html#quapy.method.aggregative.EMQ.fit">(quapy.method.aggregative.EMQ method)</a>
</li> </li>
@ -435,8 +425,6 @@
<li><a href="quapy.method.html#quapy.method.meta.get_probability_distribution">get_probability_distribution() (in module quapy.method.meta)</a> <li><a href="quapy.method.html#quapy.method.meta.get_probability_distribution">get_probability_distribution() (in module quapy.method.meta)</a>
</li> </li>
<li><a href="quapy.html#quapy.util.get_quapy_home">get_quapy_home() (in module quapy.util)</a> <li><a href="quapy.html#quapy.util.get_quapy_home">get_quapy_home() (in module quapy.util)</a>
</li>
<li><a href="quapy.method.html#quapy.method.base.getOneVsAll">getOneVsAll() (in module quapy.method.base)</a>
</li> </li>
<li><a href="quapy.method.html#quapy.method.aggregative.ACC.getPteCondEstim">getPteCondEstim() (quapy.method.aggregative.ACC class method)</a> <li><a href="quapy.method.html#quapy.method.aggregative.ACC.getPteCondEstim">getPteCondEstim() (quapy.method.aggregative.ACC class method)</a>
@ -618,9 +606,21 @@
<li><a href="quapy.classification.html#quapy.classification.calibration.NBVSCalibration">NBVSCalibration (class in quapy.classification.calibration)</a> <li><a href="quapy.classification.html#quapy.classification.calibration.NBVSCalibration">NBVSCalibration (class in quapy.classification.calibration)</a>
</li> </li>
<li><a href="quapy.classification.html#quapy.classification.neural.NeuralClassifierTrainer">NeuralClassifierTrainer (class in quapy.classification.neural)</a> <li><a href="quapy.classification.html#quapy.classification.neural.NeuralClassifierTrainer">NeuralClassifierTrainer (class in quapy.classification.neural)</a>
</li>
<li><a href="quapy.method.html#quapy.method.aggregative.newELM">newELM() (in module quapy.method.aggregative)</a>
</li>
<li><a href="quapy.method.html#quapy.method.base.newOneVsAll">newOneVsAll() (in module quapy.method.base)</a>
</li>
<li><a href="quapy.method.html#quapy.method.aggregative.newSVMAE">newSVMAE() (in module quapy.method.aggregative)</a>
</li> </li>
</ul></td> </ul></td>
<td style="width: 33%; vertical-align: top;"><ul> <td style="width: 33%; vertical-align: top;"><ul>
<li><a href="quapy.method.html#quapy.method.aggregative.newSVMKLD">newSVMKLD() (in module quapy.method.aggregative)</a>
</li>
<li><a href="quapy.method.html#quapy.method.aggregative.newSVMQ">newSVMQ() (in module quapy.method.aggregative)</a>
</li>
<li><a href="quapy.method.html#quapy.method.aggregative.newSVMRAE">newSVMRAE() (in module quapy.method.aggregative)</a>
</li>
<li><a href="quapy.html#quapy.error.nkld">nkld() (in module quapy.error)</a> <li><a href="quapy.html#quapy.error.nkld">nkld() (in module quapy.error)</a>
</li> </li>
<li><a href="quapy.html#quapy.functional.normalize_prevalence">normalize_prevalence() (in module quapy.functional)</a> <li><a href="quapy.html#quapy.functional.normalize_prevalence">normalize_prevalence() (in module quapy.functional)</a>
@ -971,8 +971,6 @@
<ul> <ul>
<li><a href="quapy.classification.html#quapy.classification.neural.NeuralClassifierTrainer.set_params">(quapy.classification.neural.NeuralClassifierTrainer method)</a> <li><a href="quapy.classification.html#quapy.classification.neural.NeuralClassifierTrainer.set_params">(quapy.classification.neural.NeuralClassifierTrainer method)</a>
</li>
<li><a href="quapy.classification.html#quapy.classification.svmperf.SVMperf.set_params">(quapy.classification.svmperf.SVMperf method)</a>
</li> </li>
<li><a href="quapy.method.html#quapy.method.meta.Ensemble.set_params">(quapy.method.meta.Ensemble method)</a> <li><a href="quapy.method.html#quapy.method.meta.Ensemble.set_params">(quapy.method.meta.Ensemble method)</a>
</li> </li>
@ -1006,18 +1004,8 @@
</li> </li>
</ul></li> </ul></li>
<li><a href="quapy.html#quapy.functional.strprev">strprev() (in module quapy.functional)</a> <li><a href="quapy.html#quapy.functional.strprev">strprev() (in module quapy.functional)</a>
</li>
<li><a href="quapy.method.html#quapy.method.aggregative.SVMAE">SVMAE (class in quapy.method.aggregative)</a>
</li>
<li><a href="quapy.method.html#quapy.method.aggregative.SVMKLD">SVMKLD (class in quapy.method.aggregative)</a>
</li>
<li><a href="quapy.method.html#quapy.method.aggregative.SVMNKLD">SVMNKLD (class in quapy.method.aggregative)</a>
</li> </li>
<li><a href="quapy.classification.html#quapy.classification.svmperf.SVMperf">SVMperf (class in quapy.classification.svmperf)</a> <li><a href="quapy.classification.html#quapy.classification.svmperf.SVMperf">SVMperf (class in quapy.classification.svmperf)</a>
</li>
<li><a href="quapy.method.html#quapy.method.aggregative.SVMQ">SVMQ (class in quapy.method.aggregative)</a>
</li>
<li><a href="quapy.method.html#quapy.method.aggregative.SVMRAE">SVMRAE (class in quapy.method.aggregative)</a>
</li> </li>
</ul></td> </ul></td>
</tr></table> </tr></table>

Binary file not shown.

View File

@ -801,7 +801,7 @@ applied, meaning that if the longest document in the batch is shorter than
<span id="quapy-classification-svmperf"></span><h2>quapy.classification.svmperf<a class="headerlink" href="#module-quapy.classification.svmperf" title="Permalink to this heading"></a></h2> <span id="quapy-classification-svmperf"></span><h2>quapy.classification.svmperf<a class="headerlink" href="#module-quapy.classification.svmperf" title="Permalink to this heading"></a></h2>
<dl class="py class"> <dl class="py class">
<dt class="sig sig-object py" id="quapy.classification.svmperf.SVMperf"> <dt class="sig sig-object py" id="quapy.classification.svmperf.SVMperf">
<em class="property"><span class="pre">class</span><span class="w"> </span></em><span class="sig-prename descclassname"><span class="pre">quapy.classification.svmperf.</span></span><span class="sig-name descname"><span class="pre">SVMperf</span></span><span class="sig-paren">(</span><em class="sig-param"><span class="n"><span class="pre">svmperf_base</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">C</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">0.01</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">verbose</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">False</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">loss</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">'01'</span></span></em><span class="sig-paren">)</span><a class="headerlink" href="#quapy.classification.svmperf.SVMperf" title="Permalink to this definition"></a></dt> <em class="property"><span class="pre">class</span><span class="w"> </span></em><span class="sig-prename descclassname"><span class="pre">quapy.classification.svmperf.</span></span><span class="sig-name descname"><span class="pre">SVMperf</span></span><span class="sig-paren">(</span><em class="sig-param"><span class="n"><span class="pre">svmperf_base</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">C</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">0.01</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">verbose</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">False</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">loss</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">'01'</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">host_folder</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">None</span></span></em><span class="sig-paren">)</span><a class="headerlink" href="#quapy.classification.svmperf.SVMperf" title="Permalink to this definition"></a></dt>
<dd><p>Bases: <code class="xref py py-class docutils literal notranslate"><span class="pre">BaseEstimator</span></code>, <code class="xref py py-class docutils literal notranslate"><span class="pre">ClassifierMixin</span></code></p> <dd><p>Bases: <code class="xref py py-class docutils literal notranslate"><span class="pre">BaseEstimator</span></code>, <code class="xref py py-class docutils literal notranslate"><span class="pre">ClassifierMixin</span></code></p>
<p>A wrapper for the <a class="reference external" href="https://www.cs.cornell.edu/people/tj/svm_light/svm_perf.html">SVM-perf package</a> by Thorsten Joachims. <p>A wrapper for the <a class="reference external" href="https://www.cs.cornell.edu/people/tj/svm_light/svm_perf.html">SVM-perf package</a> by Thorsten Joachims.
When using losses for quantification, the source code has to be patched. See When using losses for quantification, the source code has to be patched. See
@ -821,6 +821,8 @@ for further details.</p>
<li><p><strong>C</strong> trade-off between training error and margin (default 0.01)</p></li> <li><p><strong>C</strong> trade-off between training error and margin (default 0.01)</p></li>
<li><p><strong>verbose</strong> set to True to print svm-perf std outputs</p></li> <li><p><strong>verbose</strong> set to True to print svm-perf std outputs</p></li>
<li><p><strong>loss</strong> the loss to optimize for. Available losses are “01”, “f1”, “kld”, “nkld”, “q”, “qacc”, “qf1”, “qgm”, “mae”, “mrae”.</p></li> <li><p><strong>loss</strong> the loss to optimize for. Available losses are “01”, “f1”, “kld”, “nkld”, “q”, “qacc”, “qf1”, “qgm”, “mae”, “mrae”.</p></li>
<li><p><strong>host_folder</strong> directory where to store the trained model; set to None (default) for using a tmp directory
(temporal directories are automatically deleted)</p></li>
</ul> </ul>
</dd> </dd>
</dl> </dl>
@ -873,17 +875,6 @@ instances in <cite>X</cite></p>
</dl> </dl>
</dd></dl> </dd></dl>
<dl class="py method">
<dt class="sig sig-object py" id="quapy.classification.svmperf.SVMperf.set_params">
<span class="sig-name descname"><span class="pre">set_params</span></span><span class="sig-paren">(</span><em class="sig-param"><span class="o"><span class="pre">**</span></span><span class="n"><span class="pre">parameters</span></span></em><span class="sig-paren">)</span><a class="headerlink" href="#quapy.classification.svmperf.SVMperf.set_params" title="Permalink to this definition"></a></dt>
<dd><p>Set the hyper-parameters for svm-perf. Currently, only the <cite>C</cite> parameter is supported</p>
<dl class="field-list simple">
<dt class="field-odd">Parameters<span class="colon">:</span></dt>
<dd class="field-odd"><p><strong>parameters</strong> a <cite>**kwargs</cite> dictionary <cite>{C: &lt;float&gt;}</cite></p>
</dd>
</dl>
</dd></dl>
<dl class="py attribute"> <dl class="py attribute">
<dt class="sig sig-object py" id="quapy.classification.svmperf.SVMperf.valid_losses"> <dt class="sig sig-object py" id="quapy.classification.svmperf.SVMperf.valid_losses">
<span class="sig-name descname"><span class="pre">valid_losses</span></span><em class="property"><span class="w"> </span><span class="p"><span class="pre">=</span></span><span class="w"> </span><span class="pre">{'01':</span> <span class="pre">0,</span> <span class="pre">'f1':</span> <span class="pre">1,</span> <span class="pre">'kld':</span> <span class="pre">12,</span> <span class="pre">'mae':</span> <span class="pre">26,</span> <span class="pre">'mrae':</span> <span class="pre">27,</span> <span class="pre">'nkld':</span> <span class="pre">13,</span> <span class="pre">'q':</span> <span class="pre">22,</span> <span class="pre">'qacc':</span> <span class="pre">23,</span> <span class="pre">'qf1':</span> <span class="pre">24,</span> <span class="pre">'qgm':</span> <span class="pre">25}</span></em><a class="headerlink" href="#quapy.classification.svmperf.SVMperf.valid_losses" title="Permalink to this definition"></a></dt> <span class="sig-name descname"><span class="pre">valid_losses</span></span><em class="property"><span class="w"> </span><span class="p"><span class="pre">=</span></span><span class="w"> </span><span class="pre">{'01':</span> <span class="pre">0,</span> <span class="pre">'f1':</span> <span class="pre">1,</span> <span class="pre">'kld':</span> <span class="pre">12,</span> <span class="pre">'mae':</span> <span class="pre">26,</span> <span class="pre">'mrae':</span> <span class="pre">27,</span> <span class="pre">'nkld':</span> <span class="pre">13,</span> <span class="pre">'q':</span> <span class="pre">22,</span> <span class="pre">'qacc':</span> <span class="pre">23,</span> <span class="pre">'qf1':</span> <span class="pre">24,</span> <span class="pre">'qgm':</span> <span class="pre">25}</span></em><a class="headerlink" href="#quapy.classification.svmperf.SVMperf.valid_losses" title="Permalink to this definition"></a></dt>

View File

@ -550,13 +550,13 @@ in the grid multiplied by <cite>repeat</cite></p>
<dl class="py method"> <dl class="py method">
<dt class="sig sig-object py" id="quapy.protocol.APP.sample"> <dt class="sig sig-object py" id="quapy.protocol.APP.sample">
<span class="sig-name descname"><span class="pre">sample</span></span><span class="sig-paren">(</span><em class="sig-param"><span class="n"><span class="pre">index</span></span></em><span class="sig-paren">)</span><a class="headerlink" href="#quapy.protocol.APP.sample" title="Permalink to this definition"></a></dt> <span class="sig-name descname"><span class="pre">sample</span></span><span class="sig-paren">(</span><em class="sig-param"><span class="n"><span class="pre">index</span></span></em><span class="sig-paren">)</span><a class="headerlink" href="#quapy.protocol.APP.sample" title="Permalink to this definition"></a></dt>
<dd><p>Extract one sample determined by the given parameters</p> <dd><p>Realizes the sample given the index of the instances.</p>
<dl class="field-list simple"> <dl class="field-list simple">
<dt class="field-odd">Parameters<span class="colon">:</span></dt> <dt class="field-odd">Parameters<span class="colon">:</span></dt>
<dd class="field-odd"><p><strong>params</strong> all the necessary parameters to generate a sample</p> <dd class="field-odd"><p><strong>index</strong> indexes of the instances to select</p>
</dd> </dd>
<dt class="field-even">Returns<span class="colon">:</span></dt> <dt class="field-even">Returns<span class="colon">:</span></dt>
<dd class="field-even"><p>one sample (the same sample has to be generated for the same parameters)</p> <dd class="field-even"><p>an instance of <code class="xref py py-class docutils literal notranslate"><span class="pre">qp.data.LabelledCollection</span></code></p>
</dd> </dd>
</dl> </dl>
</dd></dl> </dd></dl>
@ -564,10 +564,10 @@ in the grid multiplied by <cite>repeat</cite></p>
<dl class="py method"> <dl class="py method">
<dt class="sig sig-object py" id="quapy.protocol.APP.samples_parameters"> <dt class="sig sig-object py" id="quapy.protocol.APP.samples_parameters">
<span class="sig-name descname"><span class="pre">samples_parameters</span></span><span class="sig-paren">(</span><span class="sig-paren">)</span><a class="headerlink" href="#quapy.protocol.APP.samples_parameters" title="Permalink to this definition"></a></dt> <span class="sig-name descname"><span class="pre">samples_parameters</span></span><span class="sig-paren">(</span><span class="sig-paren">)</span><a class="headerlink" href="#quapy.protocol.APP.samples_parameters" title="Permalink to this definition"></a></dt>
<dd><p>This function has to return all the necessary parameters to replicate the samples</p> <dd><p>Return all the necessary parameters to replicate the samples as according to the APP protocol.</p>
<dl class="field-list simple"> <dl class="field-list simple">
<dt class="field-odd">Returns<span class="colon">:</span></dt> <dt class="field-odd">Returns<span class="colon">:</span></dt>
<dd class="field-odd"><p>a list of parameters, each of which serves to deterministically generate a sample</p> <dd class="field-odd"><p>a list of indexes that realize the APP sampling</p>
</dd> </dd>
</dl> </dl>
</dd></dl> </dd></dl>
@ -575,10 +575,10 @@ in the grid multiplied by <cite>repeat</cite></p>
<dl class="py method"> <dl class="py method">
<dt class="sig sig-object py" id="quapy.protocol.APP.total"> <dt class="sig sig-object py" id="quapy.protocol.APP.total">
<span class="sig-name descname"><span class="pre">total</span></span><span class="sig-paren">(</span><span class="sig-paren">)</span><a class="headerlink" href="#quapy.protocol.APP.total" title="Permalink to this definition"></a></dt> <span class="sig-name descname"><span class="pre">total</span></span><span class="sig-paren">(</span><span class="sig-paren">)</span><a class="headerlink" href="#quapy.protocol.APP.total" title="Permalink to this definition"></a></dt>
<dd><p>Indicates the total number of samples that the protocol generates.</p> <dd><p>Returns the number of samples that will be generated</p>
<dl class="field-list simple"> <dl class="field-list simple">
<dt class="field-odd">Returns<span class="colon">:</span></dt> <dt class="field-odd">Returns<span class="colon">:</span></dt>
<dd class="field-odd"><p>The number of samples to generate if known, or <cite>None</cite> otherwise.</p> <dd class="field-odd"><p>int</p>
</dd> </dd>
</dl> </dl>
</dd></dl> </dd></dl>
@ -666,10 +666,11 @@ the sequence will be consistent every time the protocol is called.</p>
<dl class="field-list simple"> <dl class="field-list simple">
<dt class="field-odd">Parameters<span class="colon">:</span></dt> <dt class="field-odd">Parameters<span class="colon">:</span></dt>
<dd class="field-odd"><ul class="simple"> <dd class="field-odd"><ul class="simple">
<li><p><strong>domainA</strong> </p></li> <li><p><strong>domainA</strong> one domain, an object of <code class="xref py py-class docutils literal notranslate"><span class="pre">qp.data.LabelledCollection</span></code></p></li>
<li><p><strong>domainB</strong> </p></li> <li><p><strong>domainB</strong> another domain, an object of <code class="xref py py-class docutils literal notranslate"><span class="pre">qp.data.LabelledCollection</span></code></p></li>
<li><p><strong>sample_size</strong> </p></li> <li><p><strong>sample_size</strong> integer, the number of instances in each sample; if None (default) then it is taken from
<li><p><strong>repeats</strong> </p></li> qp.environ[“SAMPLE_SIZE”]. If this is not set, a ValueError exception is raised.</p></li>
<li><p><strong>repeats</strong> int, number of samples to draw for every mixture rate</p></li>
<li><p><strong>prevalence</strong> the prevalence to preserv along the mixtures. If specified, should be an array containing <li><p><strong>prevalence</strong> the prevalence to preserv along the mixtures. If specified, should be an array containing
one prevalence value (positive float) for each class and summing up to one. If not specified, the prevalence one prevalence value (positive float) for each class and summing up to one. If not specified, the prevalence
will be taken from the domain A (default).</p></li> will be taken from the domain A (default).</p></li>
@ -684,13 +685,13 @@ will be the same every time the protocol is called)</p></li>
<dl class="py method"> <dl class="py method">
<dt class="sig sig-object py" id="quapy.protocol.DomainMixer.sample"> <dt class="sig sig-object py" id="quapy.protocol.DomainMixer.sample">
<span class="sig-name descname"><span class="pre">sample</span></span><span class="sig-paren">(</span><em class="sig-param"><span class="n"><span class="pre">indexes</span></span></em><span class="sig-paren">)</span><a class="headerlink" href="#quapy.protocol.DomainMixer.sample" title="Permalink to this definition"></a></dt> <span class="sig-name descname"><span class="pre">sample</span></span><span class="sig-paren">(</span><em class="sig-param"><span class="n"><span class="pre">indexes</span></span></em><span class="sig-paren">)</span><a class="headerlink" href="#quapy.protocol.DomainMixer.sample" title="Permalink to this definition"></a></dt>
<dd><p>Extract one sample determined by the given parameters</p> <dd><p>Realizes the sample given a pair of indexes of the instances from A and B.</p>
<dl class="field-list simple"> <dl class="field-list simple">
<dt class="field-odd">Parameters<span class="colon">:</span></dt> <dt class="field-odd">Parameters<span class="colon">:</span></dt>
<dd class="field-odd"><p><strong>params</strong> all the necessary parameters to generate a sample</p> <dd class="field-odd"><p><strong>indexes</strong> indexes of the instances to select from A and B</p>
</dd> </dd>
<dt class="field-even">Returns<span class="colon">:</span></dt> <dt class="field-even">Returns<span class="colon">:</span></dt>
<dd class="field-even"><p>one sample (the same sample has to be generated for the same parameters)</p> <dd class="field-even"><p>an instance of <code class="xref py py-class docutils literal notranslate"><span class="pre">qp.data.LabelledCollection</span></code></p>
</dd> </dd>
</dl> </dl>
</dd></dl> </dd></dl>
@ -698,10 +699,10 @@ will be the same every time the protocol is called)</p></li>
<dl class="py method"> <dl class="py method">
<dt class="sig sig-object py" id="quapy.protocol.DomainMixer.samples_parameters"> <dt class="sig sig-object py" id="quapy.protocol.DomainMixer.samples_parameters">
<span class="sig-name descname"><span class="pre">samples_parameters</span></span><span class="sig-paren">(</span><span class="sig-paren">)</span><a class="headerlink" href="#quapy.protocol.DomainMixer.samples_parameters" title="Permalink to this definition"></a></dt> <span class="sig-name descname"><span class="pre">samples_parameters</span></span><span class="sig-paren">(</span><span class="sig-paren">)</span><a class="headerlink" href="#quapy.protocol.DomainMixer.samples_parameters" title="Permalink to this definition"></a></dt>
<dd><p>This function has to return all the necessary parameters to replicate the samples</p> <dd><p>Return all the necessary parameters to replicate the samples as according to the this protocol.</p>
<dl class="field-list simple"> <dl class="field-list simple">
<dt class="field-odd">Returns<span class="colon">:</span></dt> <dt class="field-odd">Returns<span class="colon">:</span></dt>
<dd class="field-odd"><p>a list of parameters, each of which serves to deterministically generate a sample</p> <dd class="field-odd"><p>a list of zipped indexes (from A and B) that realize the sampling</p>
</dd> </dd>
</dl> </dl>
</dd></dl> </dd></dl>
@ -709,10 +710,10 @@ will be the same every time the protocol is called)</p></li>
<dl class="py method"> <dl class="py method">
<dt class="sig sig-object py" id="quapy.protocol.DomainMixer.total"> <dt class="sig sig-object py" id="quapy.protocol.DomainMixer.total">
<span class="sig-name descname"><span class="pre">total</span></span><span class="sig-paren">(</span><span class="sig-paren">)</span><a class="headerlink" href="#quapy.protocol.DomainMixer.total" title="Permalink to this definition"></a></dt> <span class="sig-name descname"><span class="pre">total</span></span><span class="sig-paren">(</span><span class="sig-paren">)</span><a class="headerlink" href="#quapy.protocol.DomainMixer.total" title="Permalink to this definition"></a></dt>
<dd><p>Indicates the total number of samples that the protocol generates.</p> <dd><p>Returns the number of samples that will be generated (equals to “repeats * mixture_points”)</p>
<dl class="field-list simple"> <dl class="field-list simple">
<dt class="field-odd">Returns<span class="colon">:</span></dt> <dt class="field-odd">Returns<span class="colon">:</span></dt>
<dd class="field-odd"><p>The number of samples to generate if known, or <cite>None</cite> otherwise.</p> <dd class="field-odd"><p>int</p>
</dd> </dd>
</dl> </dl>
</dd></dl> </dd></dl>
@ -742,13 +743,13 @@ to “labelled_collection” to get instead instances of LabelledCollection</p><
<dl class="py method"> <dl class="py method">
<dt class="sig sig-object py" id="quapy.protocol.NPP.sample"> <dt class="sig sig-object py" id="quapy.protocol.NPP.sample">
<span class="sig-name descname"><span class="pre">sample</span></span><span class="sig-paren">(</span><em class="sig-param"><span class="n"><span class="pre">index</span></span></em><span class="sig-paren">)</span><a class="headerlink" href="#quapy.protocol.NPP.sample" title="Permalink to this definition"></a></dt> <span class="sig-name descname"><span class="pre">sample</span></span><span class="sig-paren">(</span><em class="sig-param"><span class="n"><span class="pre">index</span></span></em><span class="sig-paren">)</span><a class="headerlink" href="#quapy.protocol.NPP.sample" title="Permalink to this definition"></a></dt>
<dd><p>Extract one sample determined by the given parameters</p> <dd><p>Realizes the sample given the index of the instances.</p>
<dl class="field-list simple"> <dl class="field-list simple">
<dt class="field-odd">Parameters<span class="colon">:</span></dt> <dt class="field-odd">Parameters<span class="colon">:</span></dt>
<dd class="field-odd"><p><strong>params</strong> all the necessary parameters to generate a sample</p> <dd class="field-odd"><p><strong>index</strong> indexes of the instances to select</p>
</dd> </dd>
<dt class="field-even">Returns<span class="colon">:</span></dt> <dt class="field-even">Returns<span class="colon">:</span></dt>
<dd class="field-even"><p>one sample (the same sample has to be generated for the same parameters)</p> <dd class="field-even"><p>an instance of <code class="xref py py-class docutils literal notranslate"><span class="pre">qp.data.LabelledCollection</span></code></p>
</dd> </dd>
</dl> </dl>
</dd></dl> </dd></dl>
@ -756,10 +757,10 @@ to “labelled_collection” to get instead instances of LabelledCollection</p><
<dl class="py method"> <dl class="py method">
<dt class="sig sig-object py" id="quapy.protocol.NPP.samples_parameters"> <dt class="sig sig-object py" id="quapy.protocol.NPP.samples_parameters">
<span class="sig-name descname"><span class="pre">samples_parameters</span></span><span class="sig-paren">(</span><span class="sig-paren">)</span><a class="headerlink" href="#quapy.protocol.NPP.samples_parameters" title="Permalink to this definition"></a></dt> <span class="sig-name descname"><span class="pre">samples_parameters</span></span><span class="sig-paren">(</span><span class="sig-paren">)</span><a class="headerlink" href="#quapy.protocol.NPP.samples_parameters" title="Permalink to this definition"></a></dt>
<dd><p>This function has to return all the necessary parameters to replicate the samples</p> <dd><p>Return all the necessary parameters to replicate the samples as according to the NPP protocol.</p>
<dl class="field-list simple"> <dl class="field-list simple">
<dt class="field-odd">Returns<span class="colon">:</span></dt> <dt class="field-odd">Returns<span class="colon">:</span></dt>
<dd class="field-odd"><p>a list of parameters, each of which serves to deterministically generate a sample</p> <dd class="field-odd"><p>a list of indexes that realize the NPP sampling</p>
</dd> </dd>
</dl> </dl>
</dd></dl> </dd></dl>
@ -767,10 +768,10 @@ to “labelled_collection” to get instead instances of LabelledCollection</p><
<dl class="py method"> <dl class="py method">
<dt class="sig sig-object py" id="quapy.protocol.NPP.total"> <dt class="sig sig-object py" id="quapy.protocol.NPP.total">
<span class="sig-name descname"><span class="pre">total</span></span><span class="sig-paren">(</span><span class="sig-paren">)</span><a class="headerlink" href="#quapy.protocol.NPP.total" title="Permalink to this definition"></a></dt> <span class="sig-name descname"><span class="pre">total</span></span><span class="sig-paren">(</span><span class="sig-paren">)</span><a class="headerlink" href="#quapy.protocol.NPP.total" title="Permalink to this definition"></a></dt>
<dd><p>Indicates the total number of samples that the protocol generates.</p> <dd><p>Returns the number of samples that will be generated (equals to “repeats”)</p>
<dl class="field-list simple"> <dl class="field-list simple">
<dt class="field-odd">Returns<span class="colon">:</span></dt> <dt class="field-odd">Returns<span class="colon">:</span></dt>
<dd class="field-odd"><p>The number of samples to generate if known, or <cite>None</cite> otherwise.</p> <dd class="field-odd"><p>int</p>
</dd> </dd>
</dl> </dl>
</dd></dl> </dd></dl>
@ -781,6 +782,7 @@ to “labelled_collection” to get instead instances of LabelledCollection</p><
<dt class="sig sig-object py" id="quapy.protocol.OnLabelledCollectionProtocol"> <dt class="sig sig-object py" id="quapy.protocol.OnLabelledCollectionProtocol">
<em class="property"><span class="pre">class</span><span class="w"> </span></em><span class="sig-prename descclassname"><span class="pre">quapy.protocol.</span></span><span class="sig-name descname"><span class="pre">OnLabelledCollectionProtocol</span></span><a class="headerlink" href="#quapy.protocol.OnLabelledCollectionProtocol" title="Permalink to this definition"></a></dt> <em class="property"><span class="pre">class</span><span class="w"> </span></em><span class="sig-prename descclassname"><span class="pre">quapy.protocol.</span></span><span class="sig-name descname"><span class="pre">OnLabelledCollectionProtocol</span></span><a class="headerlink" href="#quapy.protocol.OnLabelledCollectionProtocol" title="Permalink to this definition"></a></dt>
<dd><p>Bases: <code class="xref py py-class docutils literal notranslate"><span class="pre">object</span></code></p> <dd><p>Bases: <code class="xref py py-class docutils literal notranslate"><span class="pre">object</span></code></p>
<p>Protocols that generate samples from a <code class="xref py py-class docutils literal notranslate"><span class="pre">qp.data.LabelledCollection</span></code> object.</p>
<dl class="py attribute"> <dl class="py attribute">
<dt class="sig sig-object py" id="quapy.protocol.OnLabelledCollectionProtocol.RETURN_TYPES"> <dt class="sig sig-object py" id="quapy.protocol.OnLabelledCollectionProtocol.RETURN_TYPES">
<span class="sig-name descname"><span class="pre">RETURN_TYPES</span></span><em class="property"><span class="w"> </span><span class="p"><span class="pre">=</span></span><span class="w"> </span><span class="pre">['sample_prev',</span> <span class="pre">'labelled_collection']</span></em><a class="headerlink" href="#quapy.protocol.OnLabelledCollectionProtocol.RETURN_TYPES" title="Permalink to this definition"></a></dt> <span class="sig-name descname"><span class="pre">RETURN_TYPES</span></span><em class="property"><span class="w"> </span><span class="p"><span class="pre">=</span></span><span class="w"> </span><span class="pre">['sample_prev',</span> <span class="pre">'labelled_collection']</span></em><a class="headerlink" href="#quapy.protocol.OnLabelledCollectionProtocol.RETURN_TYPES" title="Permalink to this definition"></a></dt>
@ -789,17 +791,52 @@ to “labelled_collection” to get instead instances of LabelledCollection</p><
<dl class="py method"> <dl class="py method">
<dt class="sig sig-object py" id="quapy.protocol.OnLabelledCollectionProtocol.get_collator"> <dt class="sig sig-object py" id="quapy.protocol.OnLabelledCollectionProtocol.get_collator">
<em class="property"><span class="pre">classmethod</span><span class="w"> </span></em><span class="sig-name descname"><span class="pre">get_collator</span></span><span class="sig-paren">(</span><em class="sig-param"><span class="n"><span class="pre">return_type</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">'sample_prev'</span></span></em><span class="sig-paren">)</span><a class="headerlink" href="#quapy.protocol.OnLabelledCollectionProtocol.get_collator" title="Permalink to this definition"></a></dt> <em class="property"><span class="pre">classmethod</span><span class="w"> </span></em><span class="sig-name descname"><span class="pre">get_collator</span></span><span class="sig-paren">(</span><em class="sig-param"><span class="n"><span class="pre">return_type</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">'sample_prev'</span></span></em><span class="sig-paren">)</span><a class="headerlink" href="#quapy.protocol.OnLabelledCollectionProtocol.get_collator" title="Permalink to this definition"></a></dt>
<dd></dd></dl> <dd><p>Returns a collator function, i.e., a function that prepares the yielded data</p>
<dl class="field-list simple">
<dt class="field-odd">Parameters<span class="colon">:</span></dt>
<dd class="field-odd"><p><strong>return_type</strong> either sample_prev (default) if the collator is requested to yield tuples of
<cite>(sample, prevalence)</cite>, or labelled_collection when it is requested to yield instances of
<code class="xref py py-class docutils literal notranslate"><span class="pre">qp.data.LabelledCollection</span></code></p>
</dd>
<dt class="field-even">Returns<span class="colon">:</span></dt>
<dd class="field-even"><p>the collator function (a callable function that takes as input an instance of
<code class="xref py py-class docutils literal notranslate"><span class="pre">qp.data.LabelledCollection</span></code>)</p>
</dd>
</dl>
</dd></dl>
<dl class="py method"> <dl class="py method">
<dt class="sig sig-object py" id="quapy.protocol.OnLabelledCollectionProtocol.get_labelled_collection"> <dt class="sig sig-object py" id="quapy.protocol.OnLabelledCollectionProtocol.get_labelled_collection">
<span class="sig-name descname"><span class="pre">get_labelled_collection</span></span><span class="sig-paren">(</span><span class="sig-paren">)</span><a class="headerlink" href="#quapy.protocol.OnLabelledCollectionProtocol.get_labelled_collection" title="Permalink to this definition"></a></dt> <span class="sig-name descname"><span class="pre">get_labelled_collection</span></span><span class="sig-paren">(</span><span class="sig-paren">)</span><a class="headerlink" href="#quapy.protocol.OnLabelledCollectionProtocol.get_labelled_collection" title="Permalink to this definition"></a></dt>
<dd></dd></dl> <dd><p>Returns the labelled collection on which this protocol acts.</p>
<dl class="field-list simple">
<dt class="field-odd">Returns<span class="colon">:</span></dt>
<dd class="field-odd"><p>an object of type <code class="xref py py-class docutils literal notranslate"><span class="pre">qp.data.LabelledCollection</span></code></p>
</dd>
</dl>
</dd></dl>
<dl class="py method"> <dl class="py method">
<dt class="sig sig-object py" id="quapy.protocol.OnLabelledCollectionProtocol.on_preclassified_instances"> <dt class="sig sig-object py" id="quapy.protocol.OnLabelledCollectionProtocol.on_preclassified_instances">
<span class="sig-name descname"><span class="pre">on_preclassified_instances</span></span><span class="sig-paren">(</span><em class="sig-param"><span class="n"><span class="pre">pre_classifications</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">in_place</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">False</span></span></em><span class="sig-paren">)</span><a class="headerlink" href="#quapy.protocol.OnLabelledCollectionProtocol.on_preclassified_instances" title="Permalink to this definition"></a></dt> <span class="sig-name descname"><span class="pre">on_preclassified_instances</span></span><span class="sig-paren">(</span><em class="sig-param"><span class="n"><span class="pre">pre_classifications</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">in_place</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">False</span></span></em><span class="sig-paren">)</span><a class="headerlink" href="#quapy.protocol.OnLabelledCollectionProtocol.on_preclassified_instances" title="Permalink to this definition"></a></dt>
<dd></dd></dl> <dd><p>Returns a copy of this protocol that acts on a modified version of the original
<code class="xref py py-class docutils literal notranslate"><span class="pre">qp.data.LabelledCollection</span></code> in which the original instances have been replaced
with the outputs of a classifier for each instance. (This is convenient for speeding-up
the evaluation procedures for many samples, by pre-classifying the instances in advance.)</p>
<dl class="field-list simple">
<dt class="field-odd">Parameters<span class="colon">:</span></dt>
<dd class="field-odd"><ul class="simple">
<li><p><strong>pre_classifications</strong> the predictions issued by a classifier, typically an array-like
with shape <cite>(n_instances,)</cite> when the classifier is a hard one, or with shape
<cite>(n_instances, n_classes)</cite> when the classifier is a probabilistic one.</p></li>
<li><p><strong>in_place</strong> whether or not to apply the modification in-place or in a new copy (default).</p></li>
</ul>
</dd>
<dt class="field-even">Returns<span class="colon">:</span></dt>
<dd class="field-even"><p>a copy of this protocol</p>
</dd>
</dl>
</dd></dl>
</dd></dl> </dd></dl>
@ -830,13 +867,13 @@ to “labelled_collection” to get instead instances of LabelledCollection</p><
<dl class="py method"> <dl class="py method">
<dt class="sig sig-object py" id="quapy.protocol.USimplexPP.sample"> <dt class="sig sig-object py" id="quapy.protocol.USimplexPP.sample">
<span class="sig-name descname"><span class="pre">sample</span></span><span class="sig-paren">(</span><em class="sig-param"><span class="n"><span class="pre">index</span></span></em><span class="sig-paren">)</span><a class="headerlink" href="#quapy.protocol.USimplexPP.sample" title="Permalink to this definition"></a></dt> <span class="sig-name descname"><span class="pre">sample</span></span><span class="sig-paren">(</span><em class="sig-param"><span class="n"><span class="pre">index</span></span></em><span class="sig-paren">)</span><a class="headerlink" href="#quapy.protocol.USimplexPP.sample" title="Permalink to this definition"></a></dt>
<dd><p>Extract one sample determined by the given parameters</p> <dd><p>Realizes the sample given the index of the instances.</p>
<dl class="field-list simple"> <dl class="field-list simple">
<dt class="field-odd">Parameters<span class="colon">:</span></dt> <dt class="field-odd">Parameters<span class="colon">:</span></dt>
<dd class="field-odd"><p><strong>params</strong> all the necessary parameters to generate a sample</p> <dd class="field-odd"><p><strong>index</strong> indexes of the instances to select</p>
</dd> </dd>
<dt class="field-even">Returns<span class="colon">:</span></dt> <dt class="field-even">Returns<span class="colon">:</span></dt>
<dd class="field-even"><p>one sample (the same sample has to be generated for the same parameters)</p> <dd class="field-even"><p>an instance of <code class="xref py py-class docutils literal notranslate"><span class="pre">qp.data.LabelledCollection</span></code></p>
</dd> </dd>
</dl> </dl>
</dd></dl> </dd></dl>
@ -844,10 +881,10 @@ to “labelled_collection” to get instead instances of LabelledCollection</p><
<dl class="py method"> <dl class="py method">
<dt class="sig sig-object py" id="quapy.protocol.USimplexPP.samples_parameters"> <dt class="sig sig-object py" id="quapy.protocol.USimplexPP.samples_parameters">
<span class="sig-name descname"><span class="pre">samples_parameters</span></span><span class="sig-paren">(</span><span class="sig-paren">)</span><a class="headerlink" href="#quapy.protocol.USimplexPP.samples_parameters" title="Permalink to this definition"></a></dt> <span class="sig-name descname"><span class="pre">samples_parameters</span></span><span class="sig-paren">(</span><span class="sig-paren">)</span><a class="headerlink" href="#quapy.protocol.USimplexPP.samples_parameters" title="Permalink to this definition"></a></dt>
<dd><p>This function has to return all the necessary parameters to replicate the samples</p> <dd><p>Return all the necessary parameters to replicate the samples as according to the USimplexPP protocol.</p>
<dl class="field-list simple"> <dl class="field-list simple">
<dt class="field-odd">Returns<span class="colon">:</span></dt> <dt class="field-odd">Returns<span class="colon">:</span></dt>
<dd class="field-odd"><p>a list of parameters, each of which serves to deterministically generate a sample</p> <dd class="field-odd"><p>a list of indexes that realize the USimplexPP sampling</p>
</dd> </dd>
</dl> </dl>
</dd></dl> </dd></dl>
@ -855,10 +892,10 @@ to “labelled_collection” to get instead instances of LabelledCollection</p><
<dl class="py method"> <dl class="py method">
<dt class="sig sig-object py" id="quapy.protocol.USimplexPP.total"> <dt class="sig sig-object py" id="quapy.protocol.USimplexPP.total">
<span class="sig-name descname"><span class="pre">total</span></span><span class="sig-paren">(</span><span class="sig-paren">)</span><a class="headerlink" href="#quapy.protocol.USimplexPP.total" title="Permalink to this definition"></a></dt> <span class="sig-name descname"><span class="pre">total</span></span><span class="sig-paren">(</span><span class="sig-paren">)</span><a class="headerlink" href="#quapy.protocol.USimplexPP.total" title="Permalink to this definition"></a></dt>
<dd><p>Indicates the total number of samples that the protocol generates.</p> <dd><p>Returns the number of samples that will be generated (equals to “repeats”)</p>
<dl class="field-list simple"> <dl class="field-list simple">
<dt class="field-odd">Returns<span class="colon">:</span></dt> <dt class="field-odd">Returns<span class="colon">:</span></dt>
<dd class="field-odd"><p>The number of samples to generate if known, or <cite>None</cite> otherwise.</p> <dd class="field-odd"><p>int</p>
</dd> </dd>
</dl> </dl>
</dd></dl> </dd></dl>

View File

@ -458,76 +458,6 @@ learner has been trained outside the quantifier.</p></li>
</dd></dl> </dd></dl>
<dl class="py class">
<dt class="sig sig-object py" id="quapy.method.aggregative.ELM">
<em class="property"><span class="pre">class</span><span class="w"> </span></em><span class="sig-prename descclassname"><span class="pre">quapy.method.aggregative.</span></span><span class="sig-name descname"><span class="pre">ELM</span></span><span class="sig-paren">(</span><em class="sig-param"><span class="n"><span class="pre">svmperf_base</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">None</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">loss</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">'01'</span></span></em>, <em class="sig-param"><span class="o"><span class="pre">**</span></span><span class="n"><span class="pre">kwargs</span></span></em><span class="sig-paren">)</span><a class="headerlink" href="#quapy.method.aggregative.ELM" title="Permalink to this definition"></a></dt>
<dd><p>Bases: <a class="reference internal" href="#quapy.method.aggregative.AggregativeQuantifier" title="quapy.method.aggregative.AggregativeQuantifier"><code class="xref py py-class docutils literal notranslate"><span class="pre">AggregativeQuantifier</span></code></a>, <a class="reference internal" href="#quapy.method.base.BinaryQuantifier" title="quapy.method.base.BinaryQuantifier"><code class="xref py py-class docutils literal notranslate"><span class="pre">BinaryQuantifier</span></code></a></p>
<p>Class of Explicit Loss Minimization (ELM) quantifiers.
Quantifiers based on ELM represent a family of methods based on structured output learning;
these quantifiers rely on classifiers that have been optimized using a quantification-oriented loss
measure. This implementation relies on
<a class="reference external" href="https://www.cs.cornell.edu/people/tj/svm_light/svm_perf.html">Joachims SVM perf</a> structured output
learning algorithm, which has to be installed and patched for the purpose (see this
<a class="reference external" href="https://github.com/HLT-ISTI/QuaPy/blob/master/prepare_svmperf.sh">script</a>).</p>
<dl class="field-list simple">
<dt class="field-odd">Parameters<span class="colon">:</span></dt>
<dd class="field-odd"><ul class="simple">
<li><p><strong>svmperf_base</strong> path to the folder containing the binary files of <cite>SVM perf</cite></p></li>
<li><p><strong>loss</strong> the loss to optimize (see <a class="reference internal" href="quapy.classification.html#quapy.classification.svmperf.SVMperf.valid_losses" title="quapy.classification.svmperf.SVMperf.valid_losses"><code class="xref py py-attr docutils literal notranslate"><span class="pre">quapy.classification.svmperf.SVMperf.valid_losses</span></code></a>)</p></li>
<li><p><strong>kwargs</strong> rest of SVM perfs parameters</p></li>
</ul>
</dd>
</dl>
<dl class="py method">
<dt class="sig sig-object py" id="quapy.method.aggregative.ELM.aggregate">
<span class="sig-name descname"><span class="pre">aggregate</span></span><span class="sig-paren">(</span><em class="sig-param"><span class="n"><span class="pre">classif_predictions</span></span><span class="p"><span class="pre">:</span></span><span class="w"> </span><span class="n"><span class="pre">ndarray</span></span></em><span class="sig-paren">)</span><a class="headerlink" href="#quapy.method.aggregative.ELM.aggregate" title="Permalink to this definition"></a></dt>
<dd><p>Implements the aggregation of label predictions.</p>
<dl class="field-list simple">
<dt class="field-odd">Parameters<span class="colon">:</span></dt>
<dd class="field-odd"><p><strong>classif_predictions</strong> <cite>np.ndarray</cite> of label predictions</p>
</dd>
<dt class="field-even">Returns<span class="colon">:</span></dt>
<dd class="field-even"><p><cite>np.ndarray</cite> of shape <cite>(n_classes,)</cite> with class prevalence estimates.</p>
</dd>
</dl>
</dd></dl>
<dl class="py method">
<dt class="sig sig-object py" id="quapy.method.aggregative.ELM.classify">
<span class="sig-name descname"><span class="pre">classify</span></span><span class="sig-paren">(</span><em class="sig-param"><span class="n"><span class="pre">X</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">y</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">None</span></span></em><span class="sig-paren">)</span><a class="headerlink" href="#quapy.method.aggregative.ELM.classify" title="Permalink to this definition"></a></dt>
<dd><p>Provides the label predictions for the given instances. The predictions should respect the format expected by
<a class="reference internal" href="#quapy.method.aggregative.ELM.aggregate" title="quapy.method.aggregative.ELM.aggregate"><code class="xref py py-meth docutils literal notranslate"><span class="pre">aggregate()</span></code></a>, i.e., posterior probabilities for probabilistic quantifiers, or crisp predictions for
non-probabilistic quantifiers</p>
<dl class="field-list simple">
<dt class="field-odd">Parameters<span class="colon">:</span></dt>
<dd class="field-odd"><p><strong>instances</strong> array-like</p>
</dd>
<dt class="field-even">Returns<span class="colon">:</span></dt>
<dd class="field-even"><p>np.ndarray of shape <cite>(n_instances,)</cite> with label predictions</p>
</dd>
</dl>
</dd></dl>
<dl class="py method">
<dt class="sig sig-object py" id="quapy.method.aggregative.ELM.fit">
<span class="sig-name descname"><span class="pre">fit</span></span><span class="sig-paren">(</span><em class="sig-param"><span class="n"><span class="pre">data</span></span><span class="p"><span class="pre">:</span></span><span class="w"> </span><span class="n"><a class="reference internal" href="quapy.data.html#quapy.data.base.LabelledCollection" title="quapy.data.base.LabelledCollection"><span class="pre">LabelledCollection</span></a></span></em>, <em class="sig-param"><span class="n"><span class="pre">fit_classifier</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">True</span></span></em><span class="sig-paren">)</span><a class="headerlink" href="#quapy.method.aggregative.ELM.fit" title="Permalink to this definition"></a></dt>
<dd><p>Trains the aggregative quantifier</p>
<dl class="field-list simple">
<dt class="field-odd">Parameters<span class="colon">:</span></dt>
<dd class="field-odd"><ul class="simple">
<li><p><strong>data</strong> a <a class="reference internal" href="quapy.data.html#quapy.data.base.LabelledCollection" title="quapy.data.base.LabelledCollection"><code class="xref py py-class docutils literal notranslate"><span class="pre">quapy.data.base.LabelledCollection</span></code></a> consisting of the training data</p></li>
<li><p><strong>fit_classifier</strong> whether or not to train the learner (default is True). Set to False if the
learner has been trained outside the quantifier.</p></li>
</ul>
</dd>
<dt class="field-even">Returns<span class="colon">:</span></dt>
<dd class="field-even"><p>self</p>
</dd>
</dl>
</dd></dl>
</dd></dl>
<dl class="py class"> <dl class="py class">
<dt class="sig sig-object py" id="quapy.method.aggregative.EMQ"> <dt class="sig sig-object py" id="quapy.method.aggregative.EMQ">
<em class="property"><span class="pre">class</span><span class="w"> </span></em><span class="sig-prename descclassname"><span class="pre">quapy.method.aggregative.</span></span><span class="sig-name descname"><span class="pre">EMQ</span></span><span class="sig-paren">(</span><em class="sig-param"><span class="n"><span class="pre">classifier</span></span><span class="p"><span class="pre">:</span></span><span class="w"> </span><span class="n"><span class="pre">BaseEstimator</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">exact_train_prev</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">True</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">recalib</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">None</span></span></em><span class="sig-paren">)</span><a class="headerlink" href="#quapy.method.aggregative.EMQ" title="Permalink to this definition"></a></dt> <em class="property"><span class="pre">class</span><span class="w"> </span></em><span class="sig-prename descclassname"><span class="pre">quapy.method.aggregative.</span></span><span class="sig-name descname"><span class="pre">EMQ</span></span><span class="sig-paren">(</span><em class="sig-param"><span class="n"><span class="pre">classifier</span></span><span class="p"><span class="pre">:</span></span><span class="w"> </span><span class="n"><span class="pre">BaseEstimator</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">exact_train_prev</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">True</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">recalib</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">None</span></span></em><span class="sig-paren">)</span><a class="headerlink" href="#quapy.method.aggregative.EMQ" title="Permalink to this definition"></a></dt>
@ -627,12 +557,6 @@ learner has been trained outside the quantifier.</p></li>
<dd><p>alias of <a class="reference internal" href="#quapy.method.aggregative.EMQ" title="quapy.method.aggregative.EMQ"><code class="xref py py-class docutils literal notranslate"><span class="pre">EMQ</span></code></a></p> <dd><p>alias of <a class="reference internal" href="#quapy.method.aggregative.EMQ" title="quapy.method.aggregative.EMQ"><code class="xref py py-class docutils literal notranslate"><span class="pre">EMQ</span></code></a></p>
</dd></dl> </dd></dl>
<dl class="py attribute">
<dt class="sig sig-object py" id="quapy.method.aggregative.ExplicitLossMinimisation">
<span class="sig-prename descclassname"><span class="pre">quapy.method.aggregative.</span></span><span class="sig-name descname"><span class="pre">ExplicitLossMinimisation</span></span><a class="headerlink" href="#quapy.method.aggregative.ExplicitLossMinimisation" title="Permalink to this definition"></a></dt>
<dd><p>alias of <a class="reference internal" href="#quapy.method.aggregative.ELM" title="quapy.method.aggregative.ELM"><code class="xref py py-class docutils literal notranslate"><span class="pre">ELM</span></code></a></p>
</dd></dl>
<dl class="py class"> <dl class="py class">
<dt class="sig sig-object py" id="quapy.method.aggregative.HDy"> <dt class="sig sig-object py" id="quapy.method.aggregative.HDy">
<em class="property"><span class="pre">class</span><span class="w"> </span></em><span class="sig-prename descclassname"><span class="pre">quapy.method.aggregative.</span></span><span class="sig-name descname"><span class="pre">HDy</span></span><span class="sig-paren">(</span><em class="sig-param"><span class="n"><span class="pre">classifier</span></span><span class="p"><span class="pre">:</span></span><span class="w"> </span><span class="n"><span class="pre">BaseEstimator</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">val_split</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">0.4</span></span></em><span class="sig-paren">)</span><a class="headerlink" href="#quapy.method.aggregative.HDy" title="Permalink to this definition"></a></dt> <em class="property"><span class="pre">class</span><span class="w"> </span></em><span class="sig-prename descclassname"><span class="pre">quapy.method.aggregative.</span></span><span class="sig-name descname"><span class="pre">HDy</span></span><span class="sig-paren">(</span><em class="sig-param"><span class="n"><span class="pre">classifier</span></span><span class="p"><span class="pre">:</span></span><span class="w"> </span><span class="n"><span class="pre">BaseEstimator</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">val_split</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">0.4</span></span></em><span class="sig-paren">)</span><a class="headerlink" href="#quapy.method.aggregative.HDy" title="Permalink to this definition"></a></dt>
@ -782,7 +706,7 @@ validation data, or as an integer, indicating that the misclassification rates s
<dl class="py class"> <dl class="py class">
<dt class="sig sig-object py" id="quapy.method.aggregative.OneVsAllAggregative"> <dt class="sig sig-object py" id="quapy.method.aggregative.OneVsAllAggregative">
<em class="property"><span class="pre">class</span><span class="w"> </span></em><span class="sig-prename descclassname"><span class="pre">quapy.method.aggregative.</span></span><span class="sig-name descname"><span class="pre">OneVsAllAggregative</span></span><span class="sig-paren">(</span><em class="sig-param"><span class="n"><span class="pre">binary_quantifier</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">n_jobs</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">None</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">parallel_backend</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">'loky'</span></span></em><span class="sig-paren">)</span><a class="headerlink" href="#quapy.method.aggregative.OneVsAllAggregative" title="Permalink to this definition"></a></dt> <em class="property"><span class="pre">class</span><span class="w"> </span></em><span class="sig-prename descclassname"><span class="pre">quapy.method.aggregative.</span></span><span class="sig-name descname"><span class="pre">OneVsAllAggregative</span></span><span class="sig-paren">(</span><em class="sig-param"><span class="n"><span class="pre">binary_quantifier</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">n_jobs</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">None</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">parallel_backend</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">'multiprocessing'</span></span></em><span class="sig-paren">)</span><a class="headerlink" href="#quapy.method.aggregative.OneVsAllAggregative" title="Permalink to this definition"></a></dt>
<dd><p>Bases: <a class="reference internal" href="#quapy.method.base.OneVsAllGeneric" title="quapy.method.base.OneVsAllGeneric"><code class="xref py py-class docutils literal notranslate"><span class="pre">OneVsAllGeneric</span></code></a>, <a class="reference internal" href="#quapy.method.aggregative.AggregativeQuantifier" title="quapy.method.aggregative.AggregativeQuantifier"><code class="xref py py-class docutils literal notranslate"><span class="pre">AggregativeQuantifier</span></code></a></p> <dd><p>Bases: <a class="reference internal" href="#quapy.method.base.OneVsAllGeneric" title="quapy.method.base.OneVsAllGeneric"><code class="xref py py-class docutils literal notranslate"><span class="pre">OneVsAllGeneric</span></code></a>, <a class="reference internal" href="#quapy.method.aggregative.AggregativeQuantifier" title="quapy.method.aggregative.AggregativeQuantifier"><code class="xref py py-class docutils literal notranslate"><span class="pre">AggregativeQuantifier</span></code></a></p>
<p>Allows any binary quantifier to perform quantification on single-label datasets. <p>Allows any binary quantifier to perform quantification on single-label datasets.
The method maintains one binary quantifier for each class, and then l1-normalizes the outputs so that the The method maintains one binary quantifier for each class, and then l1-normalizes the outputs so that the
@ -1029,108 +953,6 @@ learner has been trained outside the quantifier.</p></li>
</dd></dl> </dd></dl>
<dl class="py class">
<dt class="sig sig-object py" id="quapy.method.aggregative.SVMAE">
<em class="property"><span class="pre">class</span><span class="w"> </span></em><span class="sig-prename descclassname"><span class="pre">quapy.method.aggregative.</span></span><span class="sig-name descname"><span class="pre">SVMAE</span></span><span class="sig-paren">(</span><em class="sig-param"><span class="n"><span class="pre">svmperf_base</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">None</span></span></em>, <em class="sig-param"><span class="o"><span class="pre">**</span></span><span class="n"><span class="pre">kwargs</span></span></em><span class="sig-paren">)</span><a class="headerlink" href="#quapy.method.aggregative.SVMAE" title="Permalink to this definition"></a></dt>
<dd><p>Bases: <a class="reference internal" href="#quapy.method.aggregative.ELM" title="quapy.method.aggregative.ELM"><code class="xref py py-class docutils literal notranslate"><span class="pre">ELM</span></code></a></p>
<p>SVM(AE), which attempts to minimize Absolute Error as first used by
<a class="reference external" href="https://arxiv.org/abs/2011.02552">Moreo and Sebastiani, 2021</a>.
Equivalent to:</p>
<div class="doctest highlight-default notranslate"><div class="highlight"><pre><span></span><span class="gp">&gt;&gt;&gt; </span><span class="n">ELM</span><span class="p">(</span><span class="n">svmperf_base</span><span class="p">,</span> <span class="n">loss</span><span class="o">=</span><span class="s1">&#39;mae&#39;</span><span class="p">,</span> <span class="o">**</span><span class="n">kwargs</span><span class="p">)</span>
</pre></div>
</div>
<dl class="field-list simple">
<dt class="field-odd">Parameters<span class="colon">:</span></dt>
<dd class="field-odd"><ul class="simple">
<li><p><strong>svmperf_base</strong> path to the folder containing the binary files of <cite>SVM perf</cite></p></li>
<li><p><strong>kwargs</strong> rest of SVM perfs parameters</p></li>
</ul>
</dd>
</dl>
</dd></dl>
<dl class="py class">
<dt class="sig sig-object py" id="quapy.method.aggregative.SVMKLD">
<em class="property"><span class="pre">class</span><span class="w"> </span></em><span class="sig-prename descclassname"><span class="pre">quapy.method.aggregative.</span></span><span class="sig-name descname"><span class="pre">SVMKLD</span></span><span class="sig-paren">(</span><em class="sig-param"><span class="n"><span class="pre">svmperf_base</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">None</span></span></em>, <em class="sig-param"><span class="o"><span class="pre">**</span></span><span class="n"><span class="pre">kwargs</span></span></em><span class="sig-paren">)</span><a class="headerlink" href="#quapy.method.aggregative.SVMKLD" title="Permalink to this definition"></a></dt>
<dd><p>Bases: <a class="reference internal" href="#quapy.method.aggregative.ELM" title="quapy.method.aggregative.ELM"><code class="xref py py-class docutils literal notranslate"><span class="pre">ELM</span></code></a></p>
<p>SVM(KLD), which attempts to minimize the Kullback-Leibler Divergence as proposed by
<a class="reference external" href="https://dl.acm.org/doi/abs/10.1145/2700406">Esuli et al. 2015</a>.
Equivalent to:</p>
<div class="doctest highlight-default notranslate"><div class="highlight"><pre><span></span><span class="gp">&gt;&gt;&gt; </span><span class="n">ELM</span><span class="p">(</span><span class="n">svmperf_base</span><span class="p">,</span> <span class="n">loss</span><span class="o">=</span><span class="s1">&#39;kld&#39;</span><span class="p">,</span> <span class="o">**</span><span class="n">kwargs</span><span class="p">)</span>
</pre></div>
</div>
<dl class="field-list simple">
<dt class="field-odd">Parameters<span class="colon">:</span></dt>
<dd class="field-odd"><ul class="simple">
<li><p><strong>svmperf_base</strong> path to the folder containing the binary files of <cite>SVM perf</cite></p></li>
<li><p><strong>kwargs</strong> rest of SVM perfs parameters</p></li>
</ul>
</dd>
</dl>
</dd></dl>
<dl class="py class">
<dt class="sig sig-object py" id="quapy.method.aggregative.SVMNKLD">
<em class="property"><span class="pre">class</span><span class="w"> </span></em><span class="sig-prename descclassname"><span class="pre">quapy.method.aggregative.</span></span><span class="sig-name descname"><span class="pre">SVMNKLD</span></span><span class="sig-paren">(</span><em class="sig-param"><span class="n"><span class="pre">svmperf_base</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">None</span></span></em>, <em class="sig-param"><span class="o"><span class="pre">**</span></span><span class="n"><span class="pre">kwargs</span></span></em><span class="sig-paren">)</span><a class="headerlink" href="#quapy.method.aggregative.SVMNKLD" title="Permalink to this definition"></a></dt>
<dd><p>Bases: <a class="reference internal" href="#quapy.method.aggregative.ELM" title="quapy.method.aggregative.ELM"><code class="xref py py-class docutils literal notranslate"><span class="pre">ELM</span></code></a></p>
<p>SVM(NKLD), which attempts to minimize a version of the the Kullback-Leibler Divergence normalized
via the logistic function, as proposed by
<a class="reference external" href="https://dl.acm.org/doi/abs/10.1145/2700406">Esuli et al. 2015</a>.
Equivalent to:</p>
<div class="doctest highlight-default notranslate"><div class="highlight"><pre><span></span><span class="gp">&gt;&gt;&gt; </span><span class="n">ELM</span><span class="p">(</span><span class="n">svmperf_base</span><span class="p">,</span> <span class="n">loss</span><span class="o">=</span><span class="s1">&#39;nkld&#39;</span><span class="p">,</span> <span class="o">**</span><span class="n">kwargs</span><span class="p">)</span>
</pre></div>
</div>
<dl class="field-list simple">
<dt class="field-odd">Parameters<span class="colon">:</span></dt>
<dd class="field-odd"><ul class="simple">
<li><p><strong>svmperf_base</strong> path to the folder containing the binary files of <cite>SVM perf</cite></p></li>
<li><p><strong>kwargs</strong> rest of SVM perfs parameters</p></li>
</ul>
</dd>
</dl>
</dd></dl>
<dl class="py class">
<dt class="sig sig-object py" id="quapy.method.aggregative.SVMQ">
<em class="property"><span class="pre">class</span><span class="w"> </span></em><span class="sig-prename descclassname"><span class="pre">quapy.method.aggregative.</span></span><span class="sig-name descname"><span class="pre">SVMQ</span></span><span class="sig-paren">(</span><em class="sig-param"><span class="n"><span class="pre">svmperf_base</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">None</span></span></em>, <em class="sig-param"><span class="o"><span class="pre">**</span></span><span class="n"><span class="pre">kwargs</span></span></em><span class="sig-paren">)</span><a class="headerlink" href="#quapy.method.aggregative.SVMQ" title="Permalink to this definition"></a></dt>
<dd><p>Bases: <a class="reference internal" href="#quapy.method.aggregative.ELM" title="quapy.method.aggregative.ELM"><code class="xref py py-class docutils literal notranslate"><span class="pre">ELM</span></code></a></p>
<p>SVM(Q), which attempts to minimize the <cite>Q</cite> loss combining a classification-oriented loss and a
quantification-oriented loss, as proposed by
<a class="reference external" href="https://www.sciencedirect.com/science/article/pii/S003132031400291X">Barranquero et al. 2015</a>.
Equivalent to:</p>
<div class="doctest highlight-default notranslate"><div class="highlight"><pre><span></span><span class="gp">&gt;&gt;&gt; </span><span class="n">ELM</span><span class="p">(</span><span class="n">svmperf_base</span><span class="p">,</span> <span class="n">loss</span><span class="o">=</span><span class="s1">&#39;q&#39;</span><span class="p">,</span> <span class="o">**</span><span class="n">kwargs</span><span class="p">)</span>
</pre></div>
</div>
<dl class="field-list simple">
<dt class="field-odd">Parameters<span class="colon">:</span></dt>
<dd class="field-odd"><ul class="simple">
<li><p><strong>svmperf_base</strong> path to the folder containing the binary files of <cite>SVM perf</cite></p></li>
<li><p><strong>kwargs</strong> rest of SVM perfs parameters</p></li>
</ul>
</dd>
</dl>
</dd></dl>
<dl class="py class">
<dt class="sig sig-object py" id="quapy.method.aggregative.SVMRAE">
<em class="property"><span class="pre">class</span><span class="w"> </span></em><span class="sig-prename descclassname"><span class="pre">quapy.method.aggregative.</span></span><span class="sig-name descname"><span class="pre">SVMRAE</span></span><span class="sig-paren">(</span><em class="sig-param"><span class="n"><span class="pre">svmperf_base</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">None</span></span></em>, <em class="sig-param"><span class="o"><span class="pre">**</span></span><span class="n"><span class="pre">kwargs</span></span></em><span class="sig-paren">)</span><a class="headerlink" href="#quapy.method.aggregative.SVMRAE" title="Permalink to this definition"></a></dt>
<dd><p>Bases: <a class="reference internal" href="#quapy.method.aggregative.ELM" title="quapy.method.aggregative.ELM"><code class="xref py py-class docutils literal notranslate"><span class="pre">ELM</span></code></a></p>
<p>SVM(RAE), which attempts to minimize Relative Absolute Error as first used by
<a class="reference external" href="https://arxiv.org/abs/2011.02552">Moreo and Sebastiani, 2021</a>.
Equivalent to:</p>
<div class="doctest highlight-default notranslate"><div class="highlight"><pre><span></span><span class="gp">&gt;&gt;&gt; </span><span class="n">ELM</span><span class="p">(</span><span class="n">svmperf_base</span><span class="p">,</span> <span class="n">loss</span><span class="o">=</span><span class="s1">&#39;mrae&#39;</span><span class="p">,</span> <span class="o">**</span><span class="n">kwargs</span><span class="p">)</span>
</pre></div>
</div>
<dl class="field-list simple">
<dt class="field-odd">Parameters<span class="colon">:</span></dt>
<dd class="field-odd"><ul class="simple">
<li><p><strong>svmperf_base</strong> path to the folder containing the binary files of <cite>SVM perf</cite></p></li>
<li><p><strong>kwargs</strong> rest of SVM perfs parameters</p></li>
</ul>
</dd>
</dl>
</dd></dl>
<dl class="py class"> <dl class="py class">
<dt class="sig sig-object py" id="quapy.method.aggregative.T50"> <dt class="sig sig-object py" id="quapy.method.aggregative.T50">
<em class="property"><span class="pre">class</span><span class="w"> </span></em><span class="sig-prename descclassname"><span class="pre">quapy.method.aggregative.</span></span><span class="sig-name descname"><span class="pre">T50</span></span><span class="sig-paren">(</span><em class="sig-param"><span class="n"><span class="pre">classifier</span></span><span class="p"><span class="pre">:</span></span><span class="w"> </span><span class="n"><span class="pre">BaseEstimator</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">val_split</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">0.4</span></span></em><span class="sig-paren">)</span><a class="headerlink" href="#quapy.method.aggregative.T50" title="Permalink to this definition"></a></dt> <em class="property"><span class="pre">class</span><span class="w"> </span></em><span class="sig-prename descclassname"><span class="pre">quapy.method.aggregative.</span></span><span class="sig-name descname"><span class="pre">T50</span></span><span class="sig-paren">(</span><em class="sig-param"><span class="n"><span class="pre">classifier</span></span><span class="p"><span class="pre">:</span></span><span class="w"> </span><span class="n"><span class="pre">BaseEstimator</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">val_split</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">0.4</span></span></em><span class="sig-paren">)</span><a class="headerlink" href="#quapy.method.aggregative.T50" title="Permalink to this definition"></a></dt>
@ -1247,6 +1069,162 @@ validation data, or as an integer, indicating that the misclassification rates s
<span class="sig-prename descclassname"><span class="pre">quapy.method.aggregative.</span></span><span class="sig-name descname"><span class="pre">cross_generate_predictions_depr</span></span><span class="sig-paren">(</span><em class="sig-param"><span class="n"><span class="pre">data</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">classifier</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">val_split</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">probabilistic</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">fit_classifier</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">method_name</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">''</span></span></em><span class="sig-paren">)</span><a class="headerlink" href="#quapy.method.aggregative.cross_generate_predictions_depr" title="Permalink to this definition"></a></dt> <span class="sig-prename descclassname"><span class="pre">quapy.method.aggregative.</span></span><span class="sig-name descname"><span class="pre">cross_generate_predictions_depr</span></span><span class="sig-paren">(</span><em class="sig-param"><span class="n"><span class="pre">data</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">classifier</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">val_split</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">probabilistic</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">fit_classifier</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">method_name</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">''</span></span></em><span class="sig-paren">)</span><a class="headerlink" href="#quapy.method.aggregative.cross_generate_predictions_depr" title="Permalink to this definition"></a></dt>
<dd></dd></dl> <dd></dd></dl>
<dl class="py function">
<dt class="sig sig-object py" id="quapy.method.aggregative.newELM">
<span class="sig-prename descclassname"><span class="pre">quapy.method.aggregative.</span></span><span class="sig-name descname"><span class="pre">newELM</span></span><span class="sig-paren">(</span><em class="sig-param"><span class="n"><span class="pre">svmperf_base</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">None</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">loss</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">'01'</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">C</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">1</span></span></em><span class="sig-paren">)</span><a class="headerlink" href="#quapy.method.aggregative.newELM" title="Permalink to this definition"></a></dt>
<dd><p>Explicit Loss Minimization (ELM) quantifiers.
Quantifiers based on ELM represent a family of methods based on structured output learning;
these quantifiers rely on classifiers that have been optimized using a quantification-oriented loss
measure. This implementation relies on
<a class="reference external" href="https://www.cs.cornell.edu/people/tj/svm_light/svm_perf.html">Joachims SVM perf</a> structured output
learning algorithm, which has to be installed and patched for the purpose (see this
<a class="reference external" href="https://github.com/HLT-ISTI/QuaPy/blob/master/prepare_svmperf.sh">script</a>).
This function equivalent to:</p>
<div class="doctest highlight-default notranslate"><div class="highlight"><pre><span></span><span class="gp">&gt;&gt;&gt; </span><span class="n">CC</span><span class="p">(</span><span class="n">SVMperf</span><span class="p">(</span><span class="n">svmperf_base</span><span class="p">,</span> <span class="n">loss</span><span class="p">,</span> <span class="n">C</span><span class="p">))</span>
</pre></div>
</div>
<dl class="field-list simple">
<dt class="field-odd">Parameters<span class="colon">:</span></dt>
<dd class="field-odd"><ul class="simple">
<li><p><strong>svmperf_base</strong> path to the folder containing the binary files of <cite>SVM perf</cite>; if set to None (default)
this path will be obtained from qp.environ[SVMPERF_HOME]</p></li>
<li><p><strong>loss</strong> the loss to optimize (see <a class="reference internal" href="quapy.classification.html#quapy.classification.svmperf.SVMperf.valid_losses" title="quapy.classification.svmperf.SVMperf.valid_losses"><code class="xref py py-attr docutils literal notranslate"><span class="pre">quapy.classification.svmperf.SVMperf.valid_losses</span></code></a>)</p></li>
<li><p><strong>C</strong> trade-off between training error and margin (default 0.01)</p></li>
</ul>
</dd>
<dt class="field-even">Returns<span class="colon">:</span></dt>
<dd class="field-even"><p>returns an instance of CC set to work with SVMperf (with loss and C set properly) as the
underlying classifier</p>
</dd>
</dl>
</dd></dl>
<dl class="py function">
<dt class="sig sig-object py" id="quapy.method.aggregative.newSVMAE">
<span class="sig-prename descclassname"><span class="pre">quapy.method.aggregative.</span></span><span class="sig-name descname"><span class="pre">newSVMAE</span></span><span class="sig-paren">(</span><em class="sig-param"><span class="n"><span class="pre">svmperf_base</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">None</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">C</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">1</span></span></em><span class="sig-paren">)</span><a class="headerlink" href="#quapy.method.aggregative.newSVMAE" title="Permalink to this definition"></a></dt>
<dd><p>SVM(KLD) is an Explicit Loss Minimization (ELM) quantifier set to optimize for the Absolute Error as first used by
<a class="reference external" href="https://arxiv.org/abs/2011.02552">Moreo and Sebastiani, 2021</a>.
Equivalent to:</p>
<div class="doctest highlight-default notranslate"><div class="highlight"><pre><span></span><span class="gp">&gt;&gt;&gt; </span><span class="n">CC</span><span class="p">(</span><span class="n">SVMperf</span><span class="p">(</span><span class="n">svmperf_base</span><span class="p">,</span> <span class="n">loss</span><span class="o">=</span><span class="s1">&#39;mae&#39;</span><span class="p">,</span> <span class="n">C</span><span class="o">=</span><span class="n">C</span><span class="p">))</span>
</pre></div>
</div>
<p>Quantifiers based on ELM represent a family of methods based on structured output learning;
these quantifiers rely on classifiers that have been optimized using a quantification-oriented loss
measure. This implementation relies on
<a class="reference external" href="https://www.cs.cornell.edu/people/tj/svm_light/svm_perf.html">Joachims SVM perf</a> structured output
learning algorithm, which has to be installed and patched for the purpose (see this
<a class="reference external" href="https://github.com/HLT-ISTI/QuaPy/blob/master/prepare_svmperf.sh">script</a>).
This function is a wrapper around CC(SVMperf(svmperf_base, loss, C))</p>
<dl class="field-list simple">
<dt class="field-odd">Parameters<span class="colon">:</span></dt>
<dd class="field-odd"><ul class="simple">
<li><p><strong>svmperf_base</strong> path to the folder containing the binary files of <cite>SVM perf</cite>; if set to None (default)
this path will be obtained from qp.environ[SVMPERF_HOME]</p></li>
<li><p><strong>C</strong> trade-off between training error and margin (default 0.01)</p></li>
</ul>
</dd>
<dt class="field-even">Returns<span class="colon">:</span></dt>
<dd class="field-even"><p>returns an instance of CC set to work with SVMperf (with loss and C set properly) as the
underlying classifier</p>
</dd>
</dl>
</dd></dl>
<dl class="py function">
<dt class="sig sig-object py" id="quapy.method.aggregative.newSVMKLD">
<span class="sig-prename descclassname"><span class="pre">quapy.method.aggregative.</span></span><span class="sig-name descname"><span class="pre">newSVMKLD</span></span><span class="sig-paren">(</span><em class="sig-param"><span class="n"><span class="pre">svmperf_base</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">None</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">C</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">1</span></span></em><span class="sig-paren">)</span><a class="headerlink" href="#quapy.method.aggregative.newSVMKLD" title="Permalink to this definition"></a></dt>
<dd><p>SVM(KLD) is an Explicit Loss Minimization (ELM) quantifier set to optimize for the Kullback-Leibler Divergence
normalized via the logistic function, as proposed by
<a class="reference external" href="https://dl.acm.org/doi/abs/10.1145/2700406">Esuli et al. 2015</a>.
Equivalent to:</p>
<div class="doctest highlight-default notranslate"><div class="highlight"><pre><span></span><span class="gp">&gt;&gt;&gt; </span><span class="n">CC</span><span class="p">(</span><span class="n">SVMperf</span><span class="p">(</span><span class="n">svmperf_base</span><span class="p">,</span> <span class="n">loss</span><span class="o">=</span><span class="s1">&#39;nkld&#39;</span><span class="p">,</span> <span class="n">C</span><span class="o">=</span><span class="n">C</span><span class="p">))</span>
</pre></div>
</div>
<p>Quantifiers based on ELM represent a family of methods based on structured output learning;
these quantifiers rely on classifiers that have been optimized using a quantification-oriented loss
measure. This implementation relies on
<a class="reference external" href="https://www.cs.cornell.edu/people/tj/svm_light/svm_perf.html">Joachims SVM perf</a> structured output
learning algorithm, which has to be installed and patched for the purpose (see this
<a class="reference external" href="https://github.com/HLT-ISTI/QuaPy/blob/master/prepare_svmperf.sh">script</a>).
This function is a wrapper around CC(SVMperf(svmperf_base, loss, C))</p>
<dl class="field-list simple">
<dt class="field-odd">Parameters<span class="colon">:</span></dt>
<dd class="field-odd"><ul class="simple">
<li><p><strong>svmperf_base</strong> path to the folder containing the binary files of <cite>SVM perf</cite>; if set to None (default)
this path will be obtained from qp.environ[SVMPERF_HOME]</p></li>
<li><p><strong>C</strong> trade-off between training error and margin (default 0.01)</p></li>
</ul>
</dd>
<dt class="field-even">Returns<span class="colon">:</span></dt>
<dd class="field-even"><p>returns an instance of CC set to work with SVMperf (with loss and C set properly) as the
underlying classifier</p>
</dd>
</dl>
</dd></dl>
<dl class="py function">
<dt class="sig sig-object py" id="quapy.method.aggregative.newSVMQ">
<span class="sig-prename descclassname"><span class="pre">quapy.method.aggregative.</span></span><span class="sig-name descname"><span class="pre">newSVMQ</span></span><span class="sig-paren">(</span><em class="sig-param"><span class="n"><span class="pre">svmperf_base</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">None</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">C</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">1</span></span></em><span class="sig-paren">)</span><a class="headerlink" href="#quapy.method.aggregative.newSVMQ" title="Permalink to this definition"></a></dt>
<dd><p>SVM(Q) is an Explicit Loss Minimization (ELM) quantifier set to optimize for the <cite>Q</cite> loss combining a
classification-oriented loss and a quantification-oriented loss, as proposed by
<a class="reference external" href="https://www.sciencedirect.com/science/article/pii/S003132031400291X">Barranquero et al. 2015</a>.
Equivalent to:</p>
<div class="doctest highlight-default notranslate"><div class="highlight"><pre><span></span><span class="gp">&gt;&gt;&gt; </span><span class="n">CC</span><span class="p">(</span><span class="n">SVMperf</span><span class="p">(</span><span class="n">svmperf_base</span><span class="p">,</span> <span class="n">loss</span><span class="o">=</span><span class="s1">&#39;q&#39;</span><span class="p">,</span> <span class="n">C</span><span class="o">=</span><span class="n">C</span><span class="p">))</span>
</pre></div>
</div>
<p>Quantifiers based on ELM represent a family of methods based on structured output learning;
these quantifiers rely on classifiers that have been optimized using a quantification-oriented loss
measure. This implementation relies on
<a class="reference external" href="https://www.cs.cornell.edu/people/tj/svm_light/svm_perf.html">Joachims SVM perf</a> structured output
learning algorithm, which has to be installed and patched for the purpose (see this
<a class="reference external" href="https://github.com/HLT-ISTI/QuaPy/blob/master/prepare_svmperf.sh">script</a>).
This function is a wrapper around CC(SVMperf(svmperf_base, loss, C))</p>
<dl class="field-list simple">
<dt class="field-odd">Parameters<span class="colon">:</span></dt>
<dd class="field-odd"><ul class="simple">
<li><p><strong>svmperf_base</strong> path to the folder containing the binary files of <cite>SVM perf</cite>; if set to None (default)
this path will be obtained from qp.environ[SVMPERF_HOME]</p></li>
<li><p><strong>C</strong> trade-off between training error and margin (default 0.01)</p></li>
</ul>
</dd>
<dt class="field-even">Returns<span class="colon">:</span></dt>
<dd class="field-even"><p>returns an instance of CC set to work with SVMperf (with loss and C set properly) as the
underlying classifier</p>
</dd>
</dl>
</dd></dl>
<dl class="py function">
<dt class="sig sig-object py" id="quapy.method.aggregative.newSVMRAE">
<span class="sig-prename descclassname"><span class="pre">quapy.method.aggregative.</span></span><span class="sig-name descname"><span class="pre">newSVMRAE</span></span><span class="sig-paren">(</span><em class="sig-param"><span class="n"><span class="pre">svmperf_base</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">None</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">C</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">1</span></span></em><span class="sig-paren">)</span><a class="headerlink" href="#quapy.method.aggregative.newSVMRAE" title="Permalink to this definition"></a></dt>
<dd><p>SVM(KLD) is an Explicit Loss Minimization (ELM) quantifier set to optimize for the Relative Absolute Error as first
used by <a class="reference external" href="https://arxiv.org/abs/2011.02552">Moreo and Sebastiani, 2021</a>.
Equivalent to:</p>
<div class="doctest highlight-default notranslate"><div class="highlight"><pre><span></span><span class="gp">&gt;&gt;&gt; </span><span class="n">CC</span><span class="p">(</span><span class="n">SVMperf</span><span class="p">(</span><span class="n">svmperf_base</span><span class="p">,</span> <span class="n">loss</span><span class="o">=</span><span class="s1">&#39;mrae&#39;</span><span class="p">,</span> <span class="n">C</span><span class="o">=</span><span class="n">C</span><span class="p">))</span>
</pre></div>
</div>
<p>Quantifiers based on ELM represent a family of methods based on structured output learning;
these quantifiers rely on classifiers that have been optimized using a quantification-oriented loss
measure. This implementation relies on
<a class="reference external" href="https://www.cs.cornell.edu/people/tj/svm_light/svm_perf.html">Joachims SVM perf</a> structured output
learning algorithm, which has to be installed and patched for the purpose (see this
<a class="reference external" href="https://github.com/HLT-ISTI/QuaPy/blob/master/prepare_svmperf.sh">script</a>).
This function is a wrapper around CC(SVMperf(svmperf_base, loss, C))</p>
<dl class="field-list simple">
<dt class="field-odd">Parameters<span class="colon">:</span></dt>
<dd class="field-odd"><ul class="simple">
<li><p><strong>svmperf_base</strong> path to the folder containing the binary files of <cite>SVM perf</cite>; if set to None (default)
this path will be obtained from qp.environ[SVMPERF_HOME]</p></li>
<li><p><strong>C</strong> trade-off between training error and margin (default 0.01)</p></li>
</ul>
</dd>
<dt class="field-even">Returns<span class="colon">:</span></dt>
<dd class="field-even"><p>returns an instance of CC set to work with SVMperf (with loss and C set properly) as the
underlying classifier</p>
</dd>
</dl>
</dd></dl>
</section> </section>
<section id="module-quapy.method.base"> <section id="module-quapy.method.base">
<span id="quapy-method-base"></span><h2>quapy.method.base<a class="headerlink" href="#module-quapy.method.base" title="Permalink to this heading"></a></h2> <span id="quapy-method-base"></span><h2>quapy.method.base<a class="headerlink" href="#module-quapy.method.base" title="Permalink to this heading"></a></h2>
@ -1303,7 +1281,7 @@ validation data, or as an integer, indicating that the misclassification rates s
<dl class="py class"> <dl class="py class">
<dt class="sig sig-object py" id="quapy.method.base.OneVsAllGeneric"> <dt class="sig sig-object py" id="quapy.method.base.OneVsAllGeneric">
<em class="property"><span class="pre">class</span><span class="w"> </span></em><span class="sig-prename descclassname"><span class="pre">quapy.method.base.</span></span><span class="sig-name descname"><span class="pre">OneVsAllGeneric</span></span><span class="sig-paren">(</span><em class="sig-param"><span class="n"><span class="pre">binary_quantifier</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">n_jobs</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">None</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">parallel_backend</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">'loky'</span></span></em><span class="sig-paren">)</span><a class="headerlink" href="#quapy.method.base.OneVsAllGeneric" title="Permalink to this definition"></a></dt> <em class="property"><span class="pre">class</span><span class="w"> </span></em><span class="sig-prename descclassname"><span class="pre">quapy.method.base.</span></span><span class="sig-name descname"><span class="pre">OneVsAllGeneric</span></span><span class="sig-paren">(</span><em class="sig-param"><span class="n"><span class="pre">binary_quantifier</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">n_jobs</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">None</span></span></em><span class="sig-paren">)</span><a class="headerlink" href="#quapy.method.base.OneVsAllGeneric" title="Permalink to this definition"></a></dt>
<dd><p>Bases: <a class="reference internal" href="#quapy.method.base.OneVsAll" title="quapy.method.base.OneVsAll"><code class="xref py py-class docutils literal notranslate"><span class="pre">OneVsAll</span></code></a>, <a class="reference internal" href="#quapy.method.base.BaseQuantifier" title="quapy.method.base.BaseQuantifier"><code class="xref py py-class docutils literal notranslate"><span class="pre">BaseQuantifier</span></code></a></p> <dd><p>Bases: <a class="reference internal" href="#quapy.method.base.OneVsAll" title="quapy.method.base.OneVsAll"><code class="xref py py-class docutils literal notranslate"><span class="pre">OneVsAll</span></code></a>, <a class="reference internal" href="#quapy.method.base.BaseQuantifier" title="quapy.method.base.BaseQuantifier"><code class="xref py py-class docutils literal notranslate"><span class="pre">BaseQuantifier</span></code></a></p>
<p>Allows any binary quantifier to perform quantification on single-label datasets. The method maintains one binary <p>Allows any binary quantifier to perform quantification on single-label datasets. The method maintains one binary
quantifier for each class, and then l1-normalizes the outputs so that the class prevelence values sum up to 1.</p> quantifier for each class, and then l1-normalizes the outputs so that the class prevelence values sum up to 1.</p>
@ -1343,8 +1321,8 @@ quantifier for each class, and then l1-normalizes the outputs so that the class
</dd></dl> </dd></dl>
<dl class="py function"> <dl class="py function">
<dt class="sig sig-object py" id="quapy.method.base.getOneVsAll"> <dt class="sig sig-object py" id="quapy.method.base.newOneVsAll">
<span class="sig-prename descclassname"><span class="pre">quapy.method.base.</span></span><span class="sig-name descname"><span class="pre">getOneVsAll</span></span><span class="sig-paren">(</span><em class="sig-param"><span class="n"><span class="pre">binary_quantifier</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">n_jobs</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">None</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">parallel_backend</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">'loky'</span></span></em><span class="sig-paren">)</span><a class="headerlink" href="#quapy.method.base.getOneVsAll" title="Permalink to this definition"></a></dt> <span class="sig-prename descclassname"><span class="pre">quapy.method.base.</span></span><span class="sig-name descname"><span class="pre">newOneVsAll</span></span><span class="sig-paren">(</span><em class="sig-param"><span class="n"><span class="pre">binary_quantifier</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">n_jobs</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">None</span></span></em><span class="sig-paren">)</span><a class="headerlink" href="#quapy.method.base.newOneVsAll" title="Permalink to this definition"></a></dt>
<dd></dd></dl> <dd></dd></dl>
</section> </section>

File diff suppressed because one or more lines are too long

View File

@ -0,0 +1,35 @@
import quapy as qp
from quapy.classification.neural import CNNnet
from quapy.classification.neural import NeuralClassifierTrainer
from quapy.method.meta import QuaNet
import quapy.functional as F
"""
This example shows how to train QuaNet. The internal classifier is a word-based CNN.
"""
# set the sample size in the environment
qp.environ["SAMPLE_SIZE"] = 100
# the dataset is textual (Kindle reviews from Amazon), so we need to index terms, i.e.,
# we need to convert distinct terms into numerical ids
dataset = qp.datasets.fetch_reviews('kindle', pickle=True)
qp.data.preprocessing.index(dataset, min_df=5, inplace=True)
train, test = dataset.train_test
# train the text classifier:
cnn_module = CNNnet(dataset.vocabulary_size, dataset.training.n_classes)
cnn_classifier = NeuralClassifierTrainer(cnn_module, device='cuda')
cnn_classifier.fit(*dataset.training.Xy)
# train QuaNet (alternatively, we can set fit_classifier=True and let QuaNet train the classifier)
quantifier = QuaNet(cnn_classifier, device='cuda')
quantifier.fit(train, fit_classifier=False)
# prediction and evaluation
estim_prevalence = quantifier.quantify(test.instances)
mae = qp.error.mae(test.prevalence(), estim_prevalence)
print(f'true prevalence: {F.strprev(test.prevalence())}')
print(f'estim prevalence: {F.strprev(estim_prevalence)}')
print(f'MAE = {mae:.4f}')

View File

@ -229,11 +229,11 @@ class NeuralClassifierTrainer:
self.net.eval() self.net.eval()
opt = self.trainer_hyperparams opt = self.trainer_hyperparams
with torch.no_grad(): with torch.no_grad():
positive_probs = [] posteriors = []
for xi in TorchDataset(instances).asDataloader( for xi in TorchDataset(instances).asDataloader(
opt['batch_size_test'], shuffle=False, pad_length=opt['padding_length'], device=opt['device']): opt['batch_size_test'], shuffle=False, pad_length=opt['padding_length'], device=opt['device']):
positive_probs.append(self.net.predict_proba(xi)) posteriors.append(self.net.predict_proba(xi))
return np.concatenate(positive_probs) return np.concatenate(posteriors)
def transform(self, instances): def transform(self, instances):
""" """

View File

@ -523,3 +523,14 @@ class Dataset:
yield Dataset(train, test, name=f'fold {(i % nfolds) + 1}/{nfolds} (round={(i // nfolds) + 1})') yield Dataset(train, test, name=f'fold {(i % nfolds) + 1}/{nfolds} (round={(i // nfolds) + 1})')
def reduce(self, n_train=100, n_test=100):
"""
Reduce the number of instances in place for quick experiments. Preserves the prevalence of each set.
:param n_train: number of training documents to keep (default 100)
:param n_test: number of test documents to keep (default 100)
:return: self
"""
self.training = self.training.sampling(n_train, *self.training.prevalence())
self.test = self.test.sampling(n_test, *self.test.prevalence())
return self

View File

@ -121,6 +121,9 @@ def index(dataset: Dataset, min_df=5, inplace=False, **kwargs):
training_index = indexer.fit_transform(dataset.training.instances) training_index = indexer.fit_transform(dataset.training.instances)
test_index = indexer.transform(dataset.test.instances) test_index = indexer.transform(dataset.test.instances)
training_index = np.asarray(training_index, dtype=object)
test_index = np.asarray(test_index, dtype=object)
if inplace: if inplace:
dataset.training = LabelledCollection(training_index, dataset.training.labels, dataset.classes_) dataset.training = LabelledCollection(training_index, dataset.training.labels, dataset.classes_)
dataset.test = LabelledCollection(test_index, dataset.test.labels, dataset.classes_) dataset.test = LabelledCollection(test_index, dataset.test.labels, dataset.classes_)
@ -181,12 +184,12 @@ class IndexTransformer:
# given the number of tasks and the number of jobs, generates the slices for the parallel processes # given the number of tasks and the number of jobs, generates the slices for the parallel processes
assert self.unk != -1, 'transform called before fit' assert self.unk != -1, 'transform called before fit'
n_jobs = qp._get_njobs(n_jobs) n_jobs = qp._get_njobs(n_jobs)
indexed = map_parallel(func=self._index, args=X, n_jobs=n_jobs) return map_parallel(func=self._index, args=X, n_jobs=n_jobs)
return np.asarray(indexed)
def _index(self, documents): def _index(self, documents):
vocab = self.vocabulary_.copy() vocab = self.vocabulary_.copy()
return [[vocab.prevalence(word, self.unk) for word in self.analyzer(doc)] for doc in tqdm(documents, 'indexing')] return [[vocab.get(word, self.unk) for word in self.analyzer(doc)] for doc in tqdm(documents, 'indexing')]
def fit_transform(self, X, n_jobs=None): def fit_transform(self, X, n_jobs=None):
""" """

View File

@ -2,7 +2,7 @@ from typing import Union, Callable, Iterable
import numpy as np import numpy as np
from tqdm import tqdm from tqdm import tqdm
import quapy as qp import quapy as qp
from quapy.protocol import AbstractProtocol, OnLabelledCollectionProtocol from quapy.protocol import AbstractProtocol, OnLabelledCollectionProtocol, IterateProtocol
from quapy.method.base import BaseQuantifier from quapy.method.base import BaseQuantifier
import pandas as pd import pandas as pd
@ -94,5 +94,15 @@ def evaluate(
return error_metric(true_prevs, estim_prevs) return error_metric(true_prevs, estim_prevs)
def evaluate_on_samples(
model: BaseQuantifier,
samples: [qp.data.LabelledCollection],
error_metric:Union[str, Callable],
verbose=False):
return evaluate(model, IterateProtocol(samples), error_metric, aggr_speedup=False, verbose=verbose)

View File

@ -338,7 +338,7 @@ class ACC(AggregativeQuantifier):
) )
self.cc = CC(self.classifier) self.cc = CC(self.classifier)
self.Pte_cond_estim_ = self.getPteCondEstim(data.classes_, y, y_) self.Pte_cond_estim_ = self.getPteCondEstim(self.classifier.classes_, y, y_)
return self return self
@ -996,7 +996,7 @@ def newSVMAE(svmperf_base=None, C=1):
""" """
return newELM(svmperf_base, loss='mae', C=C) return newELM(svmperf_base, loss='mae', C=C)
def newSVMAE(svmperf_base=None, C=1): def newSVMRAE(svmperf_base=None, C=1):
""" """
SVM(KLD) is an Explicit Loss Minimization (ELM) quantifier set to optimize for the Relative Absolute Error as first SVM(KLD) is an Explicit Loss Minimization (ELM) quantifier set to optimize for the Relative Absolute Error as first
used by `Moreo and Sebastiani, 2021 <https://arxiv.org/abs/2011.02552>`_. used by `Moreo and Sebastiani, 2021 <https://arxiv.org/abs/2011.02552>`_.

View File

@ -7,6 +7,7 @@ from sklearn.model_selection import GridSearchCV, cross_val_predict
from tqdm import tqdm from tqdm import tqdm
import quapy as qp import quapy as qp
from evaluation import evaluate_on_samples
from quapy import functional as F from quapy import functional as F
from quapy.data import LabelledCollection from quapy.data import LabelledCollection
from quapy.model_selection import GridSearchQ from quapy.model_selection import GridSearchQ
@ -182,7 +183,7 @@ class Ensemble(BaseQuantifier):
tests = [m[3] for m in self.ensemble] tests = [m[3] for m in self.ensemble]
scores = [] scores = []
for i, model in enumerate(self.ensemble): for i, model in enumerate(self.ensemble):
scores.append(evaluate(model[0], tests[:i] + tests[i + 1:], error, self.n_jobs)) scores.append(evaluate_on_samples(model[0], tests[:i] + tests[i + 1:], error))
order = np.argsort(scores) order = np.argsort(scores)
self.ensemble = _select_k(self.ensemble, order, k=self.red_size) self.ensemble = _select_k(self.ensemble, order, k=self.red_size)

View File

@ -6,6 +6,7 @@ import torch
from torch.nn import MSELoss from torch.nn import MSELoss
from torch.nn.functional import relu from torch.nn.functional import relu
from protocol import USimplexPP
from quapy.method.aggregative import * from quapy.method.aggregative import *
from quapy.util import EarlyStop from quapy.util import EarlyStop
@ -41,7 +42,8 @@ class QuaNetTrainer(BaseQuantifier):
:param classifier: an object implementing `fit` (i.e., that can be trained on labelled data), :param classifier: an object implementing `fit` (i.e., that can be trained on labelled data),
`predict_proba` (i.e., that can generate posterior probabilities of unlabelled examples) and `predict_proba` (i.e., that can generate posterior probabilities of unlabelled examples) and
`transform` (i.e., that can generate embedded representations of the unlabelled instances). `transform` (i.e., that can generate embedded representations of the unlabelled instances).
:param sample_size: integer, the sample size :param sample_size: integer, the sample size; default is None, meaning that the sample size should be
taken from qp.environ["SAMPLE_SIZE"]
:param n_epochs: integer, maximum number of training epochs :param n_epochs: integer, maximum number of training epochs
:param tr_iter_per_poch: integer, number of training iterations before considering an epoch complete :param tr_iter_per_poch: integer, number of training iterations before considering an epoch complete
:param va_iter_per_poch: integer, number of validation iterations to perform after each epoch :param va_iter_per_poch: integer, number of validation iterations to perform after each epoch
@ -61,7 +63,7 @@ class QuaNetTrainer(BaseQuantifier):
def __init__(self, def __init__(self,
classifier, classifier,
sample_size, sample_size=None,
n_epochs=100, n_epochs=100,
tr_iter_per_poch=500, tr_iter_per_poch=500,
va_iter_per_poch=100, va_iter_per_poch=100,
@ -83,7 +85,7 @@ class QuaNetTrainer(BaseQuantifier):
f'the classifier {classifier.__class__.__name__} does not seem to be able to produce posterior probabilities ' \ f'the classifier {classifier.__class__.__name__} does not seem to be able to produce posterior probabilities ' \
f'since it does not implement the method "predict_proba"' f'since it does not implement the method "predict_proba"'
self.classifier = classifier self.classifier = classifier
self.sample_size = sample_size self.sample_size = qp._get_sample_size(sample_size)
self.n_epochs = n_epochs self.n_epochs = n_epochs
self.tr_iter = tr_iter_per_poch self.tr_iter = tr_iter_per_poch
self.va_iter = va_iter_per_poch self.va_iter = va_iter_per_poch
@ -216,16 +218,13 @@ class QuaNetTrainer(BaseQuantifier):
self.quanet.train(mode=train) self.quanet.train(mode=train)
losses = [] losses = []
mae_errors = [] mae_errors = []
if train==False: sampler = USimplexPP(
prevpoints = F.get_nprevpoints_approximation(iterations, self.quanet.n_classes) data,
iterations = F.num_prevalence_combinations(prevpoints, self.quanet.n_classes) sample_size=self.sample_size,
with qp.util.temp_seed(0): repeats=iterations,
sampling_index_gen = data.artificial_sampling_index_generator(self.sample_size, prevpoints) random_state=None if train else 0 # different samples during train, same samples during validation
else: )
sampling_index_gen = [data.sampling_index(self.sample_size, *prev) for prev in pbar = tqdm(sampler.samples_parameters(), total=sampler.total())
F.uniform_simplex_sampling(data.n_classes, iterations)]
pbar = tqdm(sampling_index_gen, total=iterations) if train else sampling_index_gen
for it, index in enumerate(pbar): for it, index in enumerate(pbar):
sample_data = data.sampling_from_index(index) sample_data = data.sampling_from_index(index)
sample_posteriors = posteriors[index] sample_posteriors = posteriors[index]

View File

@ -34,6 +34,34 @@ class AbstractProtocol(metaclass=ABCMeta):
return None return None
class IterateProtocol(AbstractProtocol):
"""
A very simple protocol which simply iterates over a list of previously generated samples
:param samples: a list of :class:`quapy.data.base.LabelledCollection`
"""
def __init__(self, samples: [LabelledCollection]):
self.samples = samples
def __call__(self):
"""
Yields one sample from the initial list at a time
:return: yields a tuple `(sample, prev) at a time, where `sample` is a set of instances
and in which `prev` is an `nd.array` with the class prevalence values
"""
for sample in self.samples:
yield sample.Xp
def total(self):
"""
Returns the number of samples in this protocol
:return: int
"""
return len(self.samples)
class AbstractStochasticSeededProtocol(AbstractProtocol): class AbstractStochasticSeededProtocol(AbstractProtocol):
""" """
An `AbstractStochasticSeededProtocol` is a protocol that generates, via any random procedure (e.g., An `AbstractStochasticSeededProtocol` is a protocol that generates, via any random procedure (e.g.,
@ -107,7 +135,7 @@ class OnLabelledCollectionProtocol:
Protocols that generate samples from a :class:`qp.data.LabelledCollection` object. Protocols that generate samples from a :class:`qp.data.LabelledCollection` object.
""" """
RETURN_TYPES = ['sample_prev', 'labelled_collection'] RETURN_TYPES = ['sample_prev', 'labelled_collection', 'index']
def get_labelled_collection(self): def get_labelled_collection(self):
""" """

View File

@ -39,10 +39,6 @@ class EvalTestCase(unittest.TestCase):
self.emq.fit(data) self.emq.fit(data)
return self return self
def set_params(self, **parameters): pass
def get_params(self, deep=True): pass
emq = NonAggregativeEMQ(SlowLR()).fit(train) emq = NonAggregativeEMQ(SlowLR()).fit(train)
tinit = time() tinit = time()

View File

@ -27,6 +27,5 @@ class HierarchyTestCase(unittest.TestCase):
self.assertEqual(isinstance(m, AggregativeProbabilisticQuantifier), True) self.assertEqual(isinstance(m, AggregativeProbabilisticQuantifier), True)
if __name__ == '__main__': if __name__ == '__main__':
unittest.main() unittest.main()

View File

@ -0,0 +1,21 @@
import unittest
import numpy as np
import quapy as qp
class LabelCollectionTestCase(unittest.TestCase):
def test_split(self):
x = np.arange(100)
y = np.random.randint(0,5,100)
data = qp.data.LabelledCollection(x,y)
tr, te = data.split_random(0.7)
check_prev = tr.prevalence()*0.7 + te.prevalence()*0.3
self.assertEqual(len(tr), 70)
self.assertEqual(len(te), 30)
self.assertEqual(np.allclose(check_prev, data.prevalence()), True)
self.assertEqual(len(tr+te), len(data))
if __name__ == '__main__':
unittest.main()

View File

@ -6,18 +6,21 @@ from sklearn.svm import LinearSVC
import quapy as qp import quapy as qp
from quapy.method.base import BinaryQuantifier from quapy.method.base import BinaryQuantifier
from quapy.data import Dataset, LabelledCollection from quapy.data import Dataset, LabelledCollection
from quapy.method import AGGREGATIVE_METHODS, NON_AGGREGATIVE_METHODS, EXPLICIT_LOSS_MINIMIZATION_METHODS from quapy.method import AGGREGATIVE_METHODS, NON_AGGREGATIVE_METHODS
from quapy.method.aggregative import ACC, PACC, HDy from quapy.method.aggregative import ACC, PACC, HDy
from quapy.method.meta import Ensemble from quapy.method.meta import Ensemble
datasets = [pytest.param(qp.datasets.fetch_twitter('hcr'), id='hcr'), datasets = [pytest.param(qp.datasets.fetch_twitter('hcr', pickle=True), id='hcr'),
pytest.param(qp.datasets.fetch_UCIDataset('ionosphere'), id='ionosphere')] pytest.param(qp.datasets.fetch_UCIDataset('ionosphere'), id='ionosphere')]
tinydatasets = [pytest.param(qp.datasets.fetch_twitter('hcr', pickle=True).reduce(), id='tiny_hcr'),
pytest.param(qp.datasets.fetch_UCIDataset('ionosphere').reduce(), id='tiny_ionosphere')]
learners = [LogisticRegression, LinearSVC] learners = [LogisticRegression, LinearSVC]
@pytest.mark.parametrize('dataset', datasets) @pytest.mark.parametrize('dataset', datasets)
@pytest.mark.parametrize('aggregative_method', AGGREGATIVE_METHODS.difference(EXPLICIT_LOSS_MINIMIZATION_METHODS)) @pytest.mark.parametrize('aggregative_method', AGGREGATIVE_METHODS)
@pytest.mark.parametrize('learner', learners) @pytest.mark.parametrize('learner', learners)
def test_aggregative_methods(dataset: Dataset, aggregative_method, learner): def test_aggregative_methods(dataset: Dataset, aggregative_method, learner):
model = aggregative_method(learner()) model = aggregative_method(learner())
@ -36,30 +39,6 @@ def test_aggregative_methods(dataset: Dataset, aggregative_method, learner):
assert type(error) == numpy.float64 assert type(error) == numpy.float64
@pytest.mark.parametrize('dataset', datasets)
@pytest.mark.parametrize('elm_method', EXPLICIT_LOSS_MINIMIZATION_METHODS)
def test_elm_methods(dataset: Dataset, elm_method):
try:
model = elm_method()
except AssertionError as ae:
if ae.args[0].find('does not seem to point to a valid path') > 0:
print('Missing SVMperf binary program, skipping test')
return
if isinstance(model, BinaryQuantifier) and not dataset.binary:
print(f'skipping the test of binary model {model} on non-binary dataset {dataset}')
return
model.fit(dataset.training)
estim_prevalences = model.quantify(dataset.test.instances)
true_prevalences = dataset.test.prevalence()
error = qp.error.mae(true_prevalences, estim_prevalences)
assert type(error) == numpy.float64
@pytest.mark.parametrize('dataset', datasets) @pytest.mark.parametrize('dataset', datasets)
@pytest.mark.parametrize('non_aggregative_method', NON_AGGREGATIVE_METHODS) @pytest.mark.parametrize('non_aggregative_method', NON_AGGREGATIVE_METHODS)
def test_non_aggregative_methods(dataset: Dataset, non_aggregative_method): def test_non_aggregative_methods(dataset: Dataset, non_aggregative_method):
@ -79,16 +58,20 @@ def test_non_aggregative_methods(dataset: Dataset, non_aggregative_method):
assert type(error) == numpy.float64 assert type(error) == numpy.float64
@pytest.mark.parametrize('base_method', AGGREGATIVE_METHODS.difference(EXPLICIT_LOSS_MINIMIZATION_METHODS)) @pytest.mark.parametrize('base_method', AGGREGATIVE_METHODS)
@pytest.mark.parametrize('learner', learners) @pytest.mark.parametrize('learner', [LogisticRegression])
@pytest.mark.parametrize('dataset', datasets) @pytest.mark.parametrize('dataset', tinydatasets)
@pytest.mark.parametrize('policy', Ensemble.VALID_POLICIES) @pytest.mark.parametrize('policy', Ensemble.VALID_POLICIES)
def test_ensemble_method(base_method, learner, dataset: Dataset, policy): def test_ensemble_method(base_method, learner, dataset: Dataset, policy):
qp.environ['SAMPLE_SIZE'] = len(dataset.training) qp.environ['SAMPLE_SIZE'] = 20
model = Ensemble(quantifier=base_method(learner()), size=5, policy=policy, n_jobs=-1) base_quantifier=base_method(learner())
if isinstance(model, BinaryQuantifier) and not dataset.binary: if isinstance(base_quantifier, BinaryQuantifier) and not dataset.binary:
print(f'skipping the test of binary model {model} on non-binary dataset {dataset}') print(f'skipping the test of binary model {base_quantifier} on non-binary dataset {dataset}')
return return
if not dataset.binary and policy=='ds':
print(f'skipping the test of binary policy ds on non-binary dataset {dataset}')
return
model = Ensemble(quantifier=base_quantifier, size=5, policy=policy, n_jobs=-1)
model.fit(dataset.training) model.fit(dataset.training)
@ -107,19 +90,23 @@ def test_quanet_method():
print('skipping QuaNet test due to missing torch package') print('skipping QuaNet test due to missing torch package')
return return
qp.environ['SAMPLE_SIZE'] = 100
# load the kindle dataset as text, and convert words to numerical indexes
dataset = qp.datasets.fetch_reviews('kindle', pickle=True) dataset = qp.datasets.fetch_reviews('kindle', pickle=True)
dataset = Dataset(dataset.training.sampling(100, *dataset.training.prevalence()), dataset = Dataset(dataset.training.sampling(200, *dataset.training.prevalence()),
dataset.test.sampling(100, *dataset.test.prevalence())) dataset.test.sampling(200, *dataset.test.prevalence()))
qp.data.preprocessing.index(dataset, min_df=5, inplace=True) qp.data.preprocessing.index(dataset, min_df=5, inplace=True)
from quapy.classification.neural import CNNnet from quapy.classification.neural import CNNnet
cnn = CNNnet(dataset.vocabulary_size, dataset.training.n_classes) cnn = CNNnet(dataset.vocabulary_size, dataset.n_classes)
from quapy.classification.neural import NeuralClassifierTrainer from quapy.classification.neural import NeuralClassifierTrainer
learner = NeuralClassifierTrainer(cnn, device='cuda') learner = NeuralClassifierTrainer(cnn, device='cuda')
from quapy.method.meta import QuaNet from quapy.method.meta import QuaNet
model = QuaNet(learner, sample_size=len(dataset.training), device='cuda') model = QuaNet(learner, device='cuda')
if isinstance(model, BinaryQuantifier) and not dataset.binary: if isinstance(model, BinaryQuantifier) and not dataset.binary:
print(f'skipping the test of binary model {model} on non-binary dataset {dataset}') print(f'skipping the test of binary model {model} on non-binary dataset {dataset}')
@ -135,26 +122,12 @@ def test_quanet_method():
assert type(error) == numpy.float64 assert type(error) == numpy.float64
def models_to_test_for_str_label_names(): def test_str_label_names():
models = list() model = qp.method.aggregative.CC(LogisticRegression())
learner = LogisticRegression
for method in AGGREGATIVE_METHODS.difference(EXPLICIT_LOSS_MINIMIZATION_METHODS):
models.append(method(learner(random_state=0)))
for method in NON_AGGREGATIVE_METHODS:
models.append(method())
return models
@pytest.mark.parametrize('model', models_to_test_for_str_label_names())
def test_str_label_names(model):
if type(model) in {ACC, PACC, HDy}:
print(
f'skipping the test of binary model {type(model)} because it currently does not support random seed control.')
return
dataset = qp.datasets.fetch_reviews('imdb', pickle=True) dataset = qp.datasets.fetch_reviews('imdb', pickle=True)
dataset = Dataset(dataset.training.sampling(1000, *dataset.training.prevalence()), dataset = Dataset(dataset.training.sampling(1000, *dataset.training.prevalence()),
dataset.test.sampling(1000, *dataset.test.prevalence())) dataset.test.sampling(1000, 0.25, 0.75))
qp.data.preprocessing.text2tfidf(dataset, min_df=5, inplace=True) qp.data.preprocessing.text2tfidf(dataset, min_df=5, inplace=True)
numpy.random.seed(0) numpy.random.seed(0)