diff --git a/docs/build/html/genindex.html b/docs/build/html/genindex.html index fc438e0..bc41b0c 100644 --- a/docs/build/html/genindex.html +++ b/docs/build/html/genindex.html @@ -106,8 +106,6 @@
  • (quapy.method.aggregative.DistributionMatching method)
  • (quapy.method.aggregative.DyS method) -
  • -
  • (quapy.method.aggregative.ELM method)
  • (quapy.method.aggregative.EMQ method)
  • @@ -198,8 +196,6 @@
  • (quapy.method.aggregative.AggregativeProbabilisticQuantifier method)
  • (quapy.method.aggregative.AggregativeQuantifier method) -
  • -
  • (quapy.method.aggregative.ELM method)
  • (quapy.method.aggregative.OneVsAllAggregative method)
  • @@ -283,8 +279,6 @@
  • EEMQ() (in module quapy.method.meta)
  • EHDy() (in module quapy.method.meta) -
  • -
  • ELM (class in quapy.method.aggregative)
  • EM() (quapy.method.aggregative.EMQ class method)
  • @@ -307,8 +301,6 @@
  • evaluation_report() (in module quapy.evaluation)
  • ExpectationMaximizationQuantifier (in module quapy.method.aggregative) -
  • -
  • ExplicitLossMinimisation (in module quapy.method.aggregative)
  • @@ -350,8 +342,6 @@
  • (quapy.method.aggregative.DistributionMatching method)
  • (quapy.method.aggregative.DyS method) -
  • -
  • (quapy.method.aggregative.ELM method)
  • (quapy.method.aggregative.EMQ method)
  • @@ -435,8 +425,6 @@
  • get_probability_distribution() (in module quapy.method.meta)
  • get_quapy_home() (in module quapy.util) -
  • -
  • getOneVsAll() (in module quapy.method.base)
  • getPteCondEstim() (quapy.method.aggregative.ACC class method) @@ -618,9 +606,21 @@
  • NBVSCalibration (class in quapy.classification.calibration)
  • NeuralClassifierTrainer (class in quapy.classification.neural) +
  • +
  • newELM() (in module quapy.method.aggregative) +
  • +
  • newOneVsAll() (in module quapy.method.base) +
  • +
  • newSVMAE() (in module quapy.method.aggregative)
  • diff --git a/docs/build/html/objects.inv b/docs/build/html/objects.inv index 3c30cfb..e143639 100644 Binary files a/docs/build/html/objects.inv and b/docs/build/html/objects.inv differ diff --git a/docs/build/html/quapy.classification.html b/docs/build/html/quapy.classification.html index 8e2a6b9..f5684c6 100644 --- a/docs/build/html/quapy.classification.html +++ b/docs/build/html/quapy.classification.html @@ -801,7 +801,7 @@ applied, meaning that if the longest document in the batch is shorter than

    quapy.classification.svmperf

    -class quapy.classification.svmperf.SVMperf(svmperf_base, C=0.01, verbose=False, loss='01')
    +class quapy.classification.svmperf.SVMperf(svmperf_base, C=0.01, verbose=False, loss='01', host_folder=None)

    Bases: BaseEstimator, ClassifierMixin

    A wrapper for the SVM-perf package by Thorsten Joachims. When using losses for quantification, the source code has to be patched. See @@ -821,6 +821,8 @@ for further details.

  • C – trade-off between training error and margin (default 0.01)

  • verbose – set to True to print svm-perf std outputs

  • loss – the loss to optimize for. Available losses are “01”, “f1”, “kld”, “nkld”, “q”, “qacc”, “qf1”, “qgm”, “mae”, “mrae”.

  • +
  • host_folder – directory where to store the trained model; set to None (default) for using a tmp directory +(temporal directories are automatically deleted)

  • @@ -873,17 +875,6 @@ instances in X

    -
    -
    -set_params(**parameters)
    -

    Set the hyper-parameters for svm-perf. Currently, only the C parameter is supported

    -
    -
    Parameters:
    -

    parameters – a **kwargs dictionary {‘C’: <float>}

    -
    -
    -
    -
    valid_losses = {'01': 0, 'f1': 1, 'kld': 12, 'mae': 26, 'mrae': 27, 'nkld': 13, 'q': 22, 'qacc': 23, 'qf1': 24, 'qgm': 25}
    diff --git a/docs/build/html/quapy.html b/docs/build/html/quapy.html index d72b33d..b522f38 100644 --- a/docs/build/html/quapy.html +++ b/docs/build/html/quapy.html @@ -550,13 +550,13 @@ in the grid multiplied by repeat

    sample(index)
    -

    Extract one sample determined by the given parameters

    +

    Realizes the sample given the index of the instances.

    Parameters:
    -

    params – all the necessary parameters to generate a sample

    +

    index – indexes of the instances to select

    Returns:
    -

    one sample (the same sample has to be generated for the same parameters)

    +

    an instance of qp.data.LabelledCollection

    @@ -564,10 +564,10 @@ in the grid multiplied by repeat

    samples_parameters()
    -

    This function has to return all the necessary parameters to replicate the samples

    +

    Return all the necessary parameters to replicate the samples as according to the APP protocol.

    Returns:
    -

    a list of parameters, each of which serves to deterministically generate a sample

    +

    a list of indexes that realize the APP sampling

    @@ -575,10 +575,10 @@ in the grid multiplied by repeat

    total()
    -

    Indicates the total number of samples that the protocol generates.

    +

    Returns the number of samples that will be generated

    Returns:
    -

    The number of samples to generate if known, or None otherwise.

    +

    int

    @@ -666,10 +666,11 @@ the sequence will be consistent every time the protocol is called.

    Parameters:
      -
    • domainA

    • -
    • domainB

    • -
    • sample_size

    • -
    • repeats

    • +
    • domainA – one domain, an object of qp.data.LabelledCollection

    • +
    • domainB – another domain, an object of qp.data.LabelledCollection

    • +
    • sample_size – integer, the number of instances in each sample; if None (default) then it is taken from +qp.environ[“SAMPLE_SIZE”]. If this is not set, a ValueError exception is raised.

    • +
    • repeats – int, number of samples to draw for every mixture rate

    • prevalence – the prevalence to preserv along the mixtures. If specified, should be an array containing one prevalence value (positive float) for each class and summing up to one. If not specified, the prevalence will be taken from the domain A (default).

    • @@ -684,13 +685,13 @@ will be the same every time the protocol is called)

      sample(indexes)
      -

      Extract one sample determined by the given parameters

      +

      Realizes the sample given a pair of indexes of the instances from A and B.

      Parameters:
      -

      params – all the necessary parameters to generate a sample

      +

      indexes – indexes of the instances to select from A and B

      Returns:
      -

      one sample (the same sample has to be generated for the same parameters)

      +

      an instance of qp.data.LabelledCollection

      @@ -698,10 +699,10 @@ will be the same every time the protocol is called)

      samples_parameters()
      -

      This function has to return all the necessary parameters to replicate the samples

      +

      Return all the necessary parameters to replicate the samples as according to the this protocol.

      Returns:
      -

      a list of parameters, each of which serves to deterministically generate a sample

      +

      a list of zipped indexes (from A and B) that realize the sampling

      @@ -709,10 +710,10 @@ will be the same every time the protocol is called)

      total()
      -

      Indicates the total number of samples that the protocol generates.

      +

      Returns the number of samples that will be generated (equals to “repeats * mixture_points”)

      Returns:
      -

      The number of samples to generate if known, or None otherwise.

      +

      int

      @@ -742,13 +743,13 @@ to “labelled_collection” to get instead instances of LabelledCollection

      <
      sample(index)
      -

      Extract one sample determined by the given parameters

      +

      Realizes the sample given the index of the instances.

      Parameters:
      -

      params – all the necessary parameters to generate a sample

      +

      index – indexes of the instances to select

      Returns:
      -

      one sample (the same sample has to be generated for the same parameters)

      +

      an instance of qp.data.LabelledCollection

      @@ -756,10 +757,10 @@ to “labelled_collection” to get instead instances of LabelledCollection

      <
      samples_parameters()
      -

      This function has to return all the necessary parameters to replicate the samples

      +

      Return all the necessary parameters to replicate the samples as according to the NPP protocol.

      Returns:
      -

      a list of parameters, each of which serves to deterministically generate a sample

      +

      a list of indexes that realize the NPP sampling

      @@ -767,10 +768,10 @@ to “labelled_collection” to get instead instances of LabelledCollection

      <
      total()
      -

      Indicates the total number of samples that the protocol generates.

      +

      Returns the number of samples that will be generated (equals to “repeats”)

      Returns:
      -

      The number of samples to generate if known, or None otherwise.

      +

      int

      @@ -781,6 +782,7 @@ to “labelled_collection” to get instead instances of LabelledCollection

      <
      class quapy.protocol.OnLabelledCollectionProtocol

      Bases: object

      +

      Protocols that generate samples from a qp.data.LabelledCollection object.

      RETURN_TYPES = ['sample_prev', 'labelled_collection']
      @@ -789,17 +791,52 @@ to “labelled_collection” to get instead instances of LabelledCollection

      <
      classmethod get_collator(return_type='sample_prev')
      -
      +

      Returns a collator function, i.e., a function that prepares the yielded data

      +
      +
      Parameters:
      +

      return_type – either ‘sample_prev’ (default) if the collator is requested to yield tuples of +(sample, prevalence), or ‘labelled_collection’ when it is requested to yield instances of +qp.data.LabelledCollection

      +
      +
      Returns:
      +

      the collator function (a callable function that takes as input an instance of +qp.data.LabelledCollection)

      +
      +
      +
      get_labelled_collection()
      -
      +

      Returns the labelled collection on which this protocol acts.

      +
      +
      Returns:
      +

      an object of type qp.data.LabelledCollection

      +
      +
      +
    on_preclassified_instances(pre_classifications, in_place=False)
    -
    +

    Returns a copy of this protocol that acts on a modified version of the original +qp.data.LabelledCollection in which the original instances have been replaced +with the outputs of a classifier for each instance. (This is convenient for speeding-up +the evaluation procedures for many samples, by pre-classifying the instances in advance.)

    +
    +
    Parameters:
    +
      +
    • pre_classifications – the predictions issued by a classifier, typically an array-like +with shape (n_instances,) when the classifier is a hard one, or with shape +(n_instances, n_classes) when the classifier is a probabilistic one.

    • +
    • in_place – whether or not to apply the modification in-place or in a new copy (default).

    • +
    +
    +
    Returns:
    +

    a copy of this protocol

    +
    +
    +
    @@ -830,13 +867,13 @@ to “labelled_collection” to get instead instances of LabelledCollection

    <
    sample(index)
    -

    Extract one sample determined by the given parameters

    +

    Realizes the sample given the index of the instances.

    Parameters:
    -

    params – all the necessary parameters to generate a sample

    +

    index – indexes of the instances to select

    Returns:
    -

    one sample (the same sample has to be generated for the same parameters)

    +

    an instance of qp.data.LabelledCollection

    @@ -844,10 +881,10 @@ to “labelled_collection” to get instead instances of LabelledCollection

    <
    samples_parameters()
    -

    This function has to return all the necessary parameters to replicate the samples

    +

    Return all the necessary parameters to replicate the samples as according to the USimplexPP protocol.

    Returns:
    -

    a list of parameters, each of which serves to deterministically generate a sample

    +

    a list of indexes that realize the USimplexPP sampling

    @@ -855,10 +892,10 @@ to “labelled_collection” to get instead instances of LabelledCollection

    <
    total()
    -

    Indicates the total number of samples that the protocol generates.

    +

    Returns the number of samples that will be generated (equals to “repeats”)

    Returns:
    -

    The number of samples to generate if known, or None otherwise.

    +

    int

    diff --git a/docs/build/html/quapy.method.html b/docs/build/html/quapy.method.html index 19a1e0b..4525456 100644 --- a/docs/build/html/quapy.method.html +++ b/docs/build/html/quapy.method.html @@ -458,76 +458,6 @@ learner has been trained outside the quantifier.

    -
    -
    -class quapy.method.aggregative.ELM(svmperf_base=None, loss='01', **kwargs)
    -

    Bases: AggregativeQuantifier, BinaryQuantifier

    -

    Class of Explicit Loss Minimization (ELM) quantifiers. -Quantifiers based on ELM represent a family of methods based on structured output learning; -these quantifiers rely on classifiers that have been optimized using a quantification-oriented loss -measure. This implementation relies on -Joachims’ SVM perf structured output -learning algorithm, which has to be installed and patched for the purpose (see this -script).

    -
    -
    Parameters:
    -
    -
    -
    -
    -
    -aggregate(classif_predictions: ndarray)
    -

    Implements the aggregation of label predictions.

    -
    -
    Parameters:
    -

    classif_predictionsnp.ndarray of label predictions

    -
    -
    Returns:
    -

    np.ndarray of shape (n_classes,) with class prevalence estimates.

    -
    -
    -
    - -
    -
    -classify(X, y=None)
    -

    Provides the label predictions for the given instances. The predictions should respect the format expected by -aggregate(), i.e., posterior probabilities for probabilistic quantifiers, or crisp predictions for -non-probabilistic quantifiers

    -
    -
    Parameters:
    -

    instances – array-like

    -
    -
    Returns:
    -

    np.ndarray of shape (n_instances,) with label predictions

    -
    -
    -
    - -
    -
    -fit(data: LabelledCollection, fit_classifier=True)
    -

    Trains the aggregative quantifier

    -
    -
    Parameters:
    -
      -
    • data – a quapy.data.base.LabelledCollection consisting of the training data

    • -
    • fit_classifier – whether or not to train the learner (default is True). Set to False if the -learner has been trained outside the quantifier.

    • -
    -
    -
    Returns:
    -

    self

    -
    -
    -
    - -
    -
    class quapy.method.aggregative.EMQ(classifier: BaseEstimator, exact_train_prev=True, recalib=None)
    @@ -627,12 +557,6 @@ learner has been trained outside the quantifier.

    alias of EMQ

    -
    -
    -quapy.method.aggregative.ExplicitLossMinimisation
    -

    alias of ELM

    -
    -
    class quapy.method.aggregative.HDy(classifier: BaseEstimator, val_split=0.4)
    @@ -782,7 +706,7 @@ validation data, or as an integer, indicating that the misclassification rates s
    -class quapy.method.aggregative.OneVsAllAggregative(binary_quantifier, n_jobs=None, parallel_backend='loky')
    +class quapy.method.aggregative.OneVsAllAggregative(binary_quantifier, n_jobs=None, parallel_backend='multiprocessing')

    Bases: OneVsAllGeneric, AggregativeQuantifier

    Allows any binary quantifier to perform quantification on single-label datasets. The method maintains one binary quantifier for each class, and then l1-normalizes the outputs so that the @@ -1029,108 +953,6 @@ learner has been trained outside the quantifier.

    -
    -
    -class quapy.method.aggregative.SVMAE(svmperf_base=None, **kwargs)
    -

    Bases: ELM

    -

    SVM(AE), which attempts to minimize Absolute Error as first used by -Moreo and Sebastiani, 2021. -Equivalent to:

    -
    >>> ELM(svmperf_base, loss='mae', **kwargs)
    -
    -
    -
    -
    Parameters:
    -
      -
    • svmperf_base – path to the folder containing the binary files of SVM perf

    • -
    • kwargs – rest of SVM perf’s parameters

    • -
    -
    -
    -
    - -
    -
    -class quapy.method.aggregative.SVMKLD(svmperf_base=None, **kwargs)
    -

    Bases: ELM

    -

    SVM(KLD), which attempts to minimize the Kullback-Leibler Divergence as proposed by -Esuli et al. 2015. -Equivalent to:

    -
    >>> ELM(svmperf_base, loss='kld', **kwargs)
    -
    -
    -
    -
    Parameters:
    -
      -
    • svmperf_base – path to the folder containing the binary files of SVM perf

    • -
    • kwargs – rest of SVM perf’s parameters

    • -
    -
    -
    -
    - -
    -
    -class quapy.method.aggregative.SVMNKLD(svmperf_base=None, **kwargs)
    -

    Bases: ELM

    -

    SVM(NKLD), which attempts to minimize a version of the the Kullback-Leibler Divergence normalized -via the logistic function, as proposed by -Esuli et al. 2015. -Equivalent to:

    -
    >>> ELM(svmperf_base, loss='nkld', **kwargs)
    -
    -
    -
    -
    Parameters:
    -
      -
    • svmperf_base – path to the folder containing the binary files of SVM perf

    • -
    • kwargs – rest of SVM perf’s parameters

    • -
    -
    -
    -
    - -
    -
    -class quapy.method.aggregative.SVMQ(svmperf_base=None, **kwargs)
    -

    Bases: ELM

    -

    SVM(Q), which attempts to minimize the Q loss combining a classification-oriented loss and a -quantification-oriented loss, as proposed by -Barranquero et al. 2015. -Equivalent to:

    -
    >>> ELM(svmperf_base, loss='q', **kwargs)
    -
    -
    -
    -
    Parameters:
    -
      -
    • svmperf_base – path to the folder containing the binary files of SVM perf

    • -
    • kwargs – rest of SVM perf’s parameters

    • -
    -
    -
    -
    - -
    -
    -class quapy.method.aggregative.SVMRAE(svmperf_base=None, **kwargs)
    -

    Bases: ELM

    -

    SVM(RAE), which attempts to minimize Relative Absolute Error as first used by -Moreo and Sebastiani, 2021. -Equivalent to:

    -
    >>> ELM(svmperf_base, loss='mrae', **kwargs)
    -
    -
    -
    -
    Parameters:
    -
      -
    • svmperf_base – path to the folder containing the binary files of SVM perf

    • -
    • kwargs – rest of SVM perf’s parameters

    • -
    -
    -
    -
    -
    class quapy.method.aggregative.T50(classifier: BaseEstimator, val_split=0.4)
    @@ -1247,6 +1069,162 @@ validation data, or as an integer, indicating that the misclassification rates s quapy.method.aggregative.cross_generate_predictions_depr(data, classifier, val_split, probabilistic, fit_classifier, method_name='')
    +
    +
    +quapy.method.aggregative.newELM(svmperf_base=None, loss='01', C=1)
    +

    Explicit Loss Minimization (ELM) quantifiers. +Quantifiers based on ELM represent a family of methods based on structured output learning; +these quantifiers rely on classifiers that have been optimized using a quantification-oriented loss +measure. This implementation relies on +Joachims’ SVM perf structured output +learning algorithm, which has to be installed and patched for the purpose (see this +script). +This function equivalent to:

    +
    >>> CC(SVMperf(svmperf_base, loss, C))
    +
    +
    +
    +
    Parameters:
    +
      +
    • svmperf_base – path to the folder containing the binary files of SVM perf; if set to None (default) +this path will be obtained from qp.environ[‘SVMPERF_HOME’]

    • +
    • loss – the loss to optimize (see quapy.classification.svmperf.SVMperf.valid_losses)

    • +
    • C – trade-off between training error and margin (default 0.01)

    • +
    +
    +
    Returns:
    +

    returns an instance of CC set to work with SVMperf (with loss and C set properly) as the +underlying classifier

    +
    +
    +
    + +
    +
    +quapy.method.aggregative.newSVMAE(svmperf_base=None, C=1)
    +

    SVM(KLD) is an Explicit Loss Minimization (ELM) quantifier set to optimize for the Absolute Error as first used by +Moreo and Sebastiani, 2021. +Equivalent to:

    +
    >>> CC(SVMperf(svmperf_base, loss='mae', C=C))
    +
    +
    +

    Quantifiers based on ELM represent a family of methods based on structured output learning; +these quantifiers rely on classifiers that have been optimized using a quantification-oriented loss +measure. This implementation relies on +Joachims’ SVM perf structured output +learning algorithm, which has to be installed and patched for the purpose (see this +script). +This function is a wrapper around CC(SVMperf(svmperf_base, loss, C))

    +
    +
    Parameters:
    +
      +
    • svmperf_base – path to the folder containing the binary files of SVM perf; if set to None (default) +this path will be obtained from qp.environ[‘SVMPERF_HOME’]

    • +
    • C – trade-off between training error and margin (default 0.01)

    • +
    +
    +
    Returns:
    +

    returns an instance of CC set to work with SVMperf (with loss and C set properly) as the +underlying classifier

    +
    +
    +
    + +
    +
    +quapy.method.aggregative.newSVMKLD(svmperf_base=None, C=1)
    +

    SVM(KLD) is an Explicit Loss Minimization (ELM) quantifier set to optimize for the Kullback-Leibler Divergence +normalized via the logistic function, as proposed by +Esuli et al. 2015. +Equivalent to:

    +
    >>> CC(SVMperf(svmperf_base, loss='nkld', C=C))
    +
    +
    +

    Quantifiers based on ELM represent a family of methods based on structured output learning; +these quantifiers rely on classifiers that have been optimized using a quantification-oriented loss +measure. This implementation relies on +Joachims’ SVM perf structured output +learning algorithm, which has to be installed and patched for the purpose (see this +script). +This function is a wrapper around CC(SVMperf(svmperf_base, loss, C))

    +
    +
    Parameters:
    +
      +
    • svmperf_base – path to the folder containing the binary files of SVM perf; if set to None (default) +this path will be obtained from qp.environ[‘SVMPERF_HOME’]

    • +
    • C – trade-off between training error and margin (default 0.01)

    • +
    +
    +
    Returns:
    +

    returns an instance of CC set to work with SVMperf (with loss and C set properly) as the +underlying classifier

    +
    +
    +
    + +
    +
    +quapy.method.aggregative.newSVMQ(svmperf_base=None, C=1)
    +

    SVM(Q) is an Explicit Loss Minimization (ELM) quantifier set to optimize for the Q loss combining a +classification-oriented loss and a quantification-oriented loss, as proposed by +Barranquero et al. 2015. +Equivalent to:

    +
    >>> CC(SVMperf(svmperf_base, loss='q', C=C))
    +
    +
    +

    Quantifiers based on ELM represent a family of methods based on structured output learning; +these quantifiers rely on classifiers that have been optimized using a quantification-oriented loss +measure. This implementation relies on +Joachims’ SVM perf structured output +learning algorithm, which has to be installed and patched for the purpose (see this +script). +This function is a wrapper around CC(SVMperf(svmperf_base, loss, C))

    +
    +
    Parameters:
    +
      +
    • svmperf_base – path to the folder containing the binary files of SVM perf; if set to None (default) +this path will be obtained from qp.environ[‘SVMPERF_HOME’]

    • +
    • C – trade-off between training error and margin (default 0.01)

    • +
    +
    +
    Returns:
    +

    returns an instance of CC set to work with SVMperf (with loss and C set properly) as the +underlying classifier

    +
    +
    +
    + +
    +
    +quapy.method.aggregative.newSVMRAE(svmperf_base=None, C=1)
    +

    SVM(KLD) is an Explicit Loss Minimization (ELM) quantifier set to optimize for the Relative Absolute Error as first +used by Moreo and Sebastiani, 2021. +Equivalent to:

    +
    >>> CC(SVMperf(svmperf_base, loss='mrae', C=C))
    +
    +
    +

    Quantifiers based on ELM represent a family of methods based on structured output learning; +these quantifiers rely on classifiers that have been optimized using a quantification-oriented loss +measure. This implementation relies on +Joachims’ SVM perf structured output +learning algorithm, which has to be installed and patched for the purpose (see this +script). +This function is a wrapper around CC(SVMperf(svmperf_base, loss, C))

    +
    +
    Parameters:
    +
      +
    • svmperf_base – path to the folder containing the binary files of SVM perf; if set to None (default) +this path will be obtained from qp.environ[‘SVMPERF_HOME’]

    • +
    • C – trade-off between training error and margin (default 0.01)

    • +
    +
    +
    Returns:
    +

    returns an instance of CC set to work with SVMperf (with loss and C set properly) as the +underlying classifier

    +
    +
    +
    +

    quapy.method.base

    @@ -1303,7 +1281,7 @@ validation data, or as an integer, indicating that the misclassification rates s
    -class quapy.method.base.OneVsAllGeneric(binary_quantifier, n_jobs=None, parallel_backend='loky')
    +class quapy.method.base.OneVsAllGeneric(binary_quantifier, n_jobs=None)

    Bases: OneVsAll, BaseQuantifier

    Allows any binary quantifier to perform quantification on single-label datasets. The method maintains one binary quantifier for each class, and then l1-normalizes the outputs so that the class prevelence values sum up to 1.

    @@ -1343,8 +1321,8 @@ quantifier for each class, and then l1-normalizes the outputs so that the class
    -
    -quapy.method.base.getOneVsAll(binary_quantifier, n_jobs=None, parallel_backend='loky')
    +
    +quapy.method.base.newOneVsAll(binary_quantifier, n_jobs=None)
    diff --git a/docs/build/html/searchindex.js b/docs/build/html/searchindex.js index 99c18d6..27108e2 100644 --- a/docs/build/html/searchindex.js +++ b/docs/build/html/searchindex.js @@ -1 +1 @@ -Search.setIndex({"docnames": ["Datasets", "Evaluation", "Installation", "Methods", "Model-Selection", "Plotting", "index", "modules", "quapy", "quapy.classification", "quapy.data", "quapy.method"], "filenames": ["Datasets.md", "Evaluation.md", "Installation.rst", "Methods.md", "Model-Selection.md", "Plotting.md", "index.rst", "modules.rst", "quapy.rst", "quapy.classification.rst", "quapy.data.rst", "quapy.method.rst"], "titles": ["Datasets", "Evaluation", "Installation", "Quantification Methods", "Model Selection", "Plotting", "Welcome to QuaPy\u2019s documentation!", "quapy", "quapy package", "quapy.classification package", "quapy.data package", "quapy.method package"], "terms": {"quapi": [0, 1, 2, 3, 4, 5], "make": [0, 1, 3, 8, 11], "avail": [0, 1, 2, 3, 5, 6, 9, 11], "sever": [0, 10], "have": [0, 1, 2, 3, 4, 5, 8, 10, 11], "been": [0, 3, 4, 5, 8, 9, 10, 11], "us": [0, 1, 3, 4, 5, 6, 8, 9, 10, 11], "quantif": [0, 1, 6, 8, 9, 10, 11], "literatur": [0, 1, 4, 6], "well": [0, 3, 4, 5, 11], "an": [0, 1, 2, 3, 4, 5, 6, 8, 9, 10, 11], "interfac": [0, 1, 11], "allow": [0, 1, 2, 3, 5, 8, 9, 10, 11], "anyon": 0, "import": [0, 1, 3, 4, 5, 6, 10, 11], "A": [0, 3, 8, 9, 10, 11], "object": [0, 8, 9, 10, 11], "i": [0, 1, 2, 3, 4, 5, 6, 8, 9, 10, 11], "roughli": 0, "pair": [0, 8], "labelledcollect": [0, 3, 4, 8, 10, 11], "one": [0, 1, 3, 4, 5, 8, 10, 11], "plai": 0, "role": 0, "train": [0, 1, 3, 4, 5, 6, 8, 9, 10, 11], "set": [0, 1, 3, 4, 5, 6, 8, 9, 10, 11], "anoth": [0, 1, 3, 5, 10], "test": [0, 1, 3, 4, 5, 6, 8, 9, 10, 11], "class": [0, 1, 3, 4, 5, 6, 8, 9, 10, 11], "consist": [0, 4, 5, 8, 9, 10, 11], "iter": [0, 8, 11], "instanc": [0, 3, 4, 5, 6, 8, 9, 10, 11], "label": [0, 3, 4, 5, 6, 8, 9, 10, 11], "thi": [0, 1, 2, 3, 4, 5, 6, 8, 9, 10, 11], "handl": 0, "most": [0, 3, 5, 6, 8, 10, 11], "sampl": [0, 1, 3, 4, 5, 6, 8, 9, 10, 11], "function": [0, 1, 3, 4, 5, 6, 7, 9, 10, 11], "take": [0, 3, 5, 8, 10, 11], "look": [0, 1, 3, 5, 11], "follow": [0, 1, 3, 4, 5, 6, 8, 11], "code": [0, 3, 4, 5, 9], "qp": [0, 1, 3, 4, 5, 6, 8, 10, 11], "f": [0, 1, 3, 4, 5, 6, 10], "1st": 0, "posit": [0, 3, 5, 8, 10, 11], "document": [0, 1, 3, 5, 9, 10, 11], "2nd": 0, "onli": [0, 3, 5, 8, 9, 10, 11], "neg": [0, 5, 8, 11], "neutral": 0, "3rd": 0, "2": [0, 1, 3, 5, 8, 10, 11], "0": [0, 1, 3, 4, 5, 8, 9, 10, 11], "1": [0, 1, 3, 4, 5, 8, 9, 10, 11], "print": [0, 1, 3, 4, 6, 9, 10], "strprev": [0, 1, 8], "preval": [0, 1, 3, 4, 5, 6, 8, 9, 10, 11], "prec": [0, 8], "output": [0, 1, 3, 4, 9, 10, 11], "show": [0, 1, 3, 4, 5, 8, 9, 10, 11], "digit": 0, "precis": [0, 1, 8], "17": 0, "50": [0, 5, 8, 11], "33": [0, 5, 8], "One": [0, 1, 3, 11], "can": [0, 1, 2, 3, 4, 5, 8, 10, 11], "easili": [0, 2, 5, 9], "produc": [0, 1, 5, 8], "new": [0, 3, 8, 9, 10], "desir": [0, 1, 10], "sample_s": [0, 1, 3, 4, 5, 8, 11], "10": [0, 1, 4, 5, 8, 9, 11], "prev": [0, 1, 8, 10], "4": [0, 1, 3, 4, 5, 10, 11], "5": [0, 1, 3, 4, 5, 8, 9, 10, 11], "which": [0, 1, 3, 4, 5, 8, 9, 10, 11], "40": [0, 3, 4, 11], "made": [0, 2, 8, 10, 11], "across": [0, 1, 4, 5, 6, 8, 11], "differ": [0, 1, 3, 4, 5, 6, 8, 10, 11], "run": [0, 1, 2, 3, 4, 5, 8, 10, 11], "e": [0, 1, 3, 4, 5, 6, 8, 9, 10, 11], "g": [0, 1, 3, 4, 6, 8, 10, 11], "method": [0, 1, 4, 5, 6, 8], "same": [0, 3, 5, 8, 10, 11], "exact": [0, 10], "retain": [0, 3, 9, 11], "index": [0, 3, 6, 8, 9, 10, 11], "gener": [0, 1, 3, 4, 5, 8, 9, 10, 11], "sampling_index": [0, 10], "sampling_from_index": [0, 10], "also": [0, 1, 2, 3, 5, 6, 8, 9], "implement": [0, 1, 3, 4, 5, 6, 8, 9, 10, 11], "artifici": [0, 1, 3, 4, 5, 6, 8], "protocol": [0, 3, 4, 5, 6, 7, 10, 11], "via": [0, 2, 3, 8, 9, 11], "python": [0, 6], "": [0, 1, 3, 4, 5, 8, 9, 10, 11], "seri": [0, 10], "equidist": [0, 8], "rang": [0, 5, 8, 11], "entir": [0, 3, 4, 5, 8], "spectrum": [0, 1, 4, 5, 8], "simplex": [0, 8], "space": [0, 4, 8, 9], "artificial_sampling_gener": 0, "100": [0, 1, 3, 4, 5, 8, 9, 10, 11], "n_preval": [0, 8], "each": [0, 1, 3, 4, 5, 8, 9, 10, 11], "valid": [0, 1, 3, 4, 5, 8, 9, 10, 11], "combin": [0, 1, 4, 8, 11], "origin": [0, 3, 8, 10], "from": [0, 1, 3, 4, 5, 6, 8, 9, 10, 11], "split": [0, 3, 4, 5, 8, 9, 10, 11], "point": [0, 1, 3, 8, 10], "25": [0, 5, 8, 9, 11], "75": [0, 5, 8], "00": [0, 1, 4], "see": [0, 1, 2, 3, 4, 5, 6, 8, 9, 10, 11], "evalu": [0, 3, 4, 5, 6, 7, 9, 10, 11], "wiki": [0, 3], "further": [0, 1, 3, 9, 10, 11], "detail": [0, 1, 3, 6, 9, 10, 11], "how": [0, 1, 3, 4, 5, 8, 10, 11], "properli": 0, "three": [0, 5], "about": [0, 5, 8, 10], "kindl": [0, 1, 3, 5, 10, 11], "devic": [0, 3, 5, 9, 11], "harri": 0, "potter": 0, "known": [0, 3, 4, 8, 11], "imdb": [0, 5, 10], "movi": 0, "fetch": [0, 6], "unifi": [0, 11], "For": [0, 1, 5, 6, 8, 10], "exampl": [0, 1, 3, 4, 5, 8, 9, 10, 11], "fetch_review": [0, 1, 3, 4, 5, 10, 11], "These": [0, 9], "esuli": [0, 2, 3, 9, 10, 11], "moreo": [0, 3, 4, 10, 11], "sebastiani": [0, 3, 4, 10, 11], "2018": [0, 3, 10], "octob": [0, 3], "recurr": [0, 3, 10], "neural": [0, 8, 10], "network": [0, 8, 9, 10, 11], "In": [0, 1, 2, 3, 4, 5, 6, 8, 9, 10, 11], "proceed": [0, 3, 10], "27th": [0, 3, 10], "acm": [0, 3, 10, 11], "intern": [0, 3, 9, 10], "confer": [0, 3, 9, 10], "inform": [0, 1, 3, 4, 8, 9, 10, 11], "knowledg": [0, 3, 10], "manag": [0, 3, 10], "pp": [0, 3, 9], "1775": [0, 3], "1778": [0, 3], "The": [0, 1, 2, 4, 5, 6, 8, 9, 10, 11], "list": [0, 5, 8, 9, 10, 11], "id": [0, 3, 10], "reviews_sentiment_dataset": [0, 10], "some": [0, 1, 3, 5, 8, 10, 11], "statist": [0, 1, 8, 11], "fhe": 0, "ar": [0, 1, 3, 4, 5, 8, 9, 10, 11], "summar": 0, "below": [0, 2, 3, 5, 8, 10], "size": [0, 1, 3, 8, 9, 10, 11], "type": [0, 3, 8, 10, 11], "hp": [0, 3, 4, 10], "9533": 0, "18399": 0, "018": 0, "982": 0, "065": 0, "935": 0, "text": [0, 3, 8, 9, 10, 11], "3821": [0, 10], "21591": [0, 10], "081": [0, 10], "919": [0, 10], "063": [0, 10], "937": [0, 10], "25000": 0, "500": [0, 1, 4, 5, 11], "11": [0, 1, 6, 8], "analysi": [0, 3, 6, 10], "access": [0, 3, 10, 11], "were": 0, "tf": [0, 10], "idf": 0, "format": [0, 5, 10, 11], "present": [0, 3, 10], "two": [0, 1, 3, 4, 5, 8, 10, 11], "val": [0, 9, 10], "model": [0, 1, 5, 6, 8, 9, 11], "select": [0, 3, 6, 8, 10, 11], "purpos": [0, 11], "exemplifi": 0, "load": [0, 3, 8, 10, 11], "fetch_twitt": [0, 3, 6, 10], "gasp": [0, 10], "for_model_select": [0, 10], "true": [0, 1, 3, 4, 5, 6, 8, 9, 10, 11], "gao": [0, 3, 10, 11], "w": [0, 3, 10], "2015": [0, 2, 3, 9, 11], "august": 0, "tweet": [0, 3, 10], "classif": [0, 1, 3, 6, 8, 10, 11], "ieee": 0, "advanc": [0, 6], "social": [0, 3, 10], "mine": [0, 3], "asonam": 0, "97": 0, "104": 0, "semeval13": [0, 10], "semeval14": [0, 10], "semeval15": [0, 10], "share": [0, 10], "semev": 0, "mean": [0, 1, 3, 4, 5, 6, 8, 9, 10, 11], "would": [0, 1, 3, 5, 6, 10, 11], "get": [0, 1, 5, 8, 9, 10, 11], "when": [0, 1, 3, 4, 5, 8, 9, 10], "request": [0, 8, 10, 11], "ani": [0, 1, 3, 4, 5, 6, 8, 9, 10, 11], "them": [0, 3, 10, 11], "consult": [0, 1], "twitter_sentiment_datasets_test": [0, 10], "9": [0, 1, 3, 5, 8], "replac": [0, 3, 10], "twitter_sentiment_datasets_train": [0, 10], "found": [0, 3, 4, 8, 9, 10], "featur": [0, 10], "3": [0, 1, 3, 5, 6, 8, 9, 10, 11], "8788": 0, "3765": 0, "694582": 0, "421": 0, "496": 0, "082": [0, 1], "407": 0, "507": 0, "086": 0, "spars": [0, 10], "hcr": [0, 3, 10], "1594": 0, "798": 0, "222046": 0, "546": 0, "211": 0, "243": 0, "640": 0, "167": 0, "193": 0, "omd": [0, 10], "1839": 0, "787": 0, "199151": 0, "463": 0, "271": 0, "266": 0, "437": 0, "283": [0, 1], "280": 0, "sander": [0, 10], "2155": 0, "923": 0, "229399": 0, "161": 0, "691": 0, "148": 0, "164": [0, 3], "688": 0, "11338": 0, "3813": 0, "1215742": 0, "159": 0, "470": 0, "372": 0, "158": 0, "430": 0, "412": 0, "1853": 0, "109": 0, "361": 0, "530": 0, "2390": 0, "153": 0, "413": 0, "434": 0, "semeval16": [0, 6, 10], "8000": 0, "2000": 0, "889504": 0, "157": 0, "351": 0, "492": 0, "163": [0, 1], "341": 0, "497": 0, "sst": [0, 10], "2971": 0, "1271": 0, "376132": 0, "261": 0, "452": 0, "288": 0, "207": 0, "481": 0, "312": 0, "wa": [0, 3, 5, 8, 10, 11], "2184": 0, "936": 0, "248563": 0, "305": 0, "414": 0, "281": 0, "282": 0, "446": 0, "272": 0, "wb": [0, 10], "4259": 0, "1823": 0, "404333": 0, "270": 0, "392": 0, "337": 0, "274": 0, "335": 0, "32": [0, 6], "repositori": [0, 10], "p\u00e9rez": [0, 3, 10, 11], "g\u00e1llego": [0, 3, 10, 11], "p": [0, 3, 8, 9, 10, 11], "quevedo": [0, 3, 10], "j": [0, 3, 10, 11], "r": [0, 3, 8, 10], "del": [0, 3, 10], "coz": [0, 3, 10], "2017": [0, 3, 10, 11], "ensembl": [0, 6, 10, 11], "problem": [0, 3, 5, 8, 10, 11], "characteriz": [0, 3, 10], "chang": [0, 1, 3, 10], "distribut": [0, 3, 5, 8, 10, 11], "case": [0, 1, 3, 4, 5, 8, 9, 10, 11], "studi": [0, 3, 10], "fusion": [0, 3, 10], "34": [0, 3, 10, 11], "87": [0, 3, 10], "doe": [0, 2, 3, 8, 11], "exactli": 0, "coincid": [0, 6], "et": [0, 2, 9, 10, 11], "al": [0, 2, 9, 10, 11], "sinc": [0, 1, 3, 5, 10, 11], "we": [0, 1, 3, 4, 5, 6, 10], "unabl": 0, "find": [0, 4, 11], "diabet": 0, "phonem": 0, "call": [0, 1, 5, 8, 10, 11], "fetch_ucidataset": [0, 3, 10], "yeast": [0, 10], "verbos": [0, 1, 4, 8, 9, 10, 11], "return": [0, 1, 3, 4, 5, 8, 9, 10, 11], "randomli": [0, 10], "drawn": [0, 1, 4, 8, 10], "stratifi": [0, 3, 9, 10, 11], "manner": [0, 9, 11], "whole": [0, 1, 3, 4, 8, 9], "collect": [0, 8, 9, 10], "70": 0, "30": [0, 1, 3, 11], "respect": [0, 1, 5, 8, 11], "option": [0, 1, 3, 5, 10, 11], "indic": [0, 1, 3, 4, 5, 8, 9, 10, 11], "descript": [0, 10], "should": [0, 1, 3, 4, 5, 6, 8, 9, 10, 11], "standard": [0, 1, 5, 8, 9, 10, 11], "paper": [0, 3, 9, 11], "submit": 0, "kfcv": [0, 9, 10, 11], "order": [0, 2, 3, 5, 8, 10, 11], "accommod": 0, "practic": [0, 4], "could": [0, 1, 3, 4, 5, 6], "first": [0, 1, 2, 3, 5, 8, 10, 11], "instanti": [0, 1, 3, 4, 8, 9, 11], "creat": [0, 6, 8, 11], "time": [0, 1, 3, 8, 10, 11], "fetch_ucilabelledcollect": [0, 10], "nfold": [0, 8, 10], "nrepeat": [0, 10], "abov": [0, 3, 5, 8], "conduct": [0, 8], "2x5fcv": 0, "all": [0, 1, 2, 3, 5, 8, 9, 11], "come": [0, 8, 10, 11], "numer": [0, 1, 3, 6, 10, 11], "form": [0, 8, 10, 11], "dens": [0, 11], "matric": [0, 5, 10], "acut": 0, "120": 0, "6": [0, 1, 3, 5, 10], "508": 0, "b": [0, 8, 10, 11], "583": 0, "417": 0, "balanc": [0, 4, 11], "625": 0, "539": 0, "461": 0, "922": 0, "078": 0, "breast": 0, "cancer": 0, "683": 0, "350": 0, "650": 0, "cmc": 0, "1473": 0, "573": 0, "427": 0, "774": 0, "226": 0, "653": 0, "347": 0, "ctg": 0, "2126": 0, "22": [0, 3, 9, 10], "222": [0, 9], "778": 0, "861": 0, "139": 0, "917": 0, "083": 0, "german": 0, "1000": [0, 4, 11], "24": [0, 9], "300": [0, 1, 9], "700": 0, "haberman": [0, 3], "306": 0, "735": 0, "265": 0, "ionospher": 0, "641": 0, "359": 0, "iri": 0, "150": 0, "667": 0, "333": 0, "mammograph": 0, "830": 0, "514": 0, "486": 0, "pageblock": 0, "5473": 0, "979": 0, "021": 0, "semeion": 0, "1593": 0, "256": [0, 9], "901": 0, "099": 0, "sonar": 0, "208": 0, "60": 0, "534": 0, "466": 0, "spambas": 0, "4601": 0, "57": 0, "606": 0, "394": 0, "spectf": 0, "267": 0, "44": 0, "794": 0, "206": 0, "tictacto": 0, "958": 0, "transfus": 0, "748": 0, "762": 0, "238": 0, "wdbc": 0, "569": 0, "627": 0, "373": 0, "wine": 0, "178": 0, "13": [0, 9], "669": 0, "331": 0, "601": 0, "399": 0, "730": 0, "q": [0, 2, 3, 8, 9, 11], "red": 0, "1599": 0, "465": 0, "535": 0, "white": 0, "4898": 0, "665": 0, "1484": 0, "8": [0, 1, 5, 10, 11], "711": 0, "289": 0, "download": [0, 2, 3, 8, 10], "automat": [0, 1], "thei": [0, 3, 11], "store": [0, 9, 10, 11], "quapy_data": [0, 8], "folder": [0, 10, 11], "faster": [0, 10], "reus": [0, 3, 8, 10], "howev": [0, 4, 5], "requir": [0, 1, 3, 6, 9], "special": [0, 5, 10], "action": 0, "moment": [0, 3], "fulli": [0, 8], "autom": [0, 3, 6], "cardiotocographi": 0, "excel": 0, "file": [0, 5, 8, 9, 10, 11], "user": [0, 1, 5], "instal": [0, 3, 6, 9, 11], "xlrd": [0, 2], "modul": [0, 1, 3, 5, 6, 7], "open": [0, 6, 10], "page": [0, 2, 6], "block": [0, 8], "need": [0, 3, 8, 10, 11], "unix": 0, "compress": 0, "extens": [0, 2, 5], "z": [0, 10], "directli": [0, 1, 3], "doabl": 0, "packag": [0, 2, 3, 6, 7], "like": [0, 1, 3, 5, 8, 9, 10, 11], "gzip": 0, "zip": [0, 5], "uncompress": 0, "o": [0, 8], "depend": [0, 1, 4, 5, 8, 11], "softwar": 0, "manual": 0, "do": [0, 1, 3, 4, 8, 9, 10, 11], "invok": [0, 1, 3, 8, 10], "provid": [0, 3, 5, 6, 10, 11], "loader": [0, 10], "simpl": [0, 3, 5, 11], "deal": 0, "t": [0, 1, 3, 8, 9, 11], "pre": [0, 3], "n": [0, 1, 8, 9, 11], "second": [0, 1, 3, 5, 8, 10], "represent": [0, 3, 8, 9, 11], "col": [0, 10], "int": [0, 5, 8, 10, 11], "float": [0, 3, 8, 9, 10, 11], "charg": [0, 10], "classmethod": [0, 8, 10, 11], "def": [0, 1, 3, 5, 8], "cl": 0, "path": [0, 3, 5, 8, 9, 10, 11], "str": [0, 8, 10, 11], "loader_func": [0, 10], "callabl": [0, 8, 10, 11], "defin": [0, 3, 8, 9, 10, 11], "argument": [0, 1, 3, 5, 8, 10, 11], "initi": [0, 9, 11], "particular": [0, 1, 3, 11], "receiv": [0, 3, 5], "addition": 0, "number": [0, 1, 3, 5, 8, 9, 10, 11], "specifi": [0, 1, 3, 5, 8, 9, 10], "otherwis": [0, 3, 8, 10], "infer": [0, 10], "least": [0, 10], "pass": [0, 1, 5, 8, 9, 11], "along": [0, 3, 8, 11], "train_path": [0, 10], "my_data": 0, "dat": [0, 9], "test_path": [0, 10], "my_custom_load": 0, "rb": 0, "fin": 0, "preprocess": [0, 1, 3, 8, 11], "includ": [0, 1, 3, 5, 6, 10, 11], "text2tfidf": [0, 1, 3, 10], "tfidf": [0, 4, 5, 10], "vector": [0, 8, 9, 10, 11], "reduce_column": [0, 10], "reduc": [0, 10], "column": [0, 10], "base": [0, 3, 6, 8, 9], "term": [0, 1, 3, 4, 5, 6, 8, 9, 10, 11], "frequenc": [0, 10, 11], "transform": [0, 9, 10, 11], "valu": [0, 1, 3, 8, 9, 10, 11], "score": [0, 1, 4, 8, 9, 10], "subtract": [0, 8, 10], "normal": [0, 1, 3, 8, 10, 11], "deviat": [0, 1, 5, 8, 10], "so": [0, 1, 3, 5, 8, 9, 10, 11], "zero": [0, 8], "unit": [0, 8], "varianc": [0, 5], "textual": [0, 6, 10], "token": [0, 9, 10], "appeal": 1, "tool": [1, 6], "scenario": [1, 3, 4, 5, 6], "dataset": [1, 3, 4, 5, 6, 8, 9, 11], "shift": [1, 4, 6, 8, 9, 11], "particularli": 1, "prior": [1, 3, 4, 5, 6, 8, 11], "probabl": [1, 3, 4, 5, 6, 8, 9, 11], "That": [1, 4], "interest": [1, 5, 6, 8], "estim": [1, 3, 5, 6, 8, 9, 10, 11], "aris": 1, "under": 1, "belief": 1, "those": [1, 3, 4, 5, 8, 9, 11], "might": [1, 8, 10], "ones": [1, 3, 5, 8, 10, 11], "observ": [1, 11], "dure": [1, 5, 11], "other": [1, 3, 5, 6, 8, 10, 11], "word": [1, 3, 6, 9, 10, 11], "simpli": [1, 2, 3, 4, 5, 6, 8, 11], "predictor": 1, "assum": [1, 6, 11], "unlik": [1, 4, 8], "machin": [1, 4, 6, 9], "learn": [1, 2, 3, 4, 6, 8, 9, 10, 11], "govern": 1, "iid": [1, 5, 6], "assumpt": [1, 5, 6], "brief": [1, 10], "dedic": [1, 10], "explain": [1, 5], "here": [1, 11], "mae": [1, 4, 6, 8, 9, 11], "absolut": [1, 3, 5, 6, 8, 11], "mrae": [1, 6, 8, 9, 11], "rel": [1, 3, 8, 10, 11], "mse": [1, 3, 6, 8, 11], "squar": [1, 3, 8], "mkld": [1, 8, 11], "kullback": [1, 3, 8, 11], "leibler": [1, 3, 8, 11], "diverg": [1, 3, 8, 11], "mnkld": [1, 8, 11], "ae": [1, 2, 5, 8, 11], "rae": [1, 2, 8, 11], "se": [1, 8], "kld": [1, 2, 8, 9, 11], "nkld": [1, 2, 6, 8, 9, 11], "individu": [1, 3], "without": [1, 3, 8, 10], "averag": [1, 3, 8, 10, 11], "acc": [1, 3, 5, 6, 8, 11], "accuraci": [1, 5, 8, 11], "f1e": [1, 8], "f1": [1, 8, 9], "true_prev": [1, 5, 8], "prevs_hat": [1, 8], "ndarrai": [1, 3, 8, 10, 11], "contain": [1, 2, 3, 5, 8, 9, 10, 11], "smooth": [1, 8], "stabil": [1, 11], "third": [1, 5], "ep": [1, 8], "none": [1, 4, 8, 9, 10, 11], "paramet": [1, 3, 4, 8, 9, 10, 11], "epsilon": [1, 8, 11], "tradition": 1, "2t": [1, 8], "past": 1, "either": [1, 3, 8, 11], "environ": [1, 3, 4, 5, 8, 11], "variabl": [1, 3, 5, 8, 10], "onc": [1, 3, 5, 8, 10], "ommit": 1, "thereaft": 1, "recommend": [1, 5, 11], "np": [1, 3, 4, 5, 8, 10, 11], "asarrai": 1, "let": [1, 3, 11], "estim_prev": [1, 5, 8], "ae_": 1, "3f": [1, 6], "200": [1, 9], "600": 1, "914": 1, "final": [1, 3, 5, 11], "possibl": [1, 3, 8, 11], "string": [1, 8, 10, 11], "error_funct": 1, "from_nam": [1, 8], "accord": [1, 3, 4, 8, 9, 10, 11], "fix": [1, 4], "cover": [1, 4, 8, 9], "full": [1, 8], "contrast": 1, "natur": [1, 8], "despit": 1, "introduc": 1, "approxim": [1, 5, 8, 9], "preserv": [1, 5, 8], "procol": 1, "equal": [1, 8, 11], "distant": [1, 8], "interv": [1, 5, 8], "n_prevpoint": [1, 4, 5, 8], "determin": [1, 4, 5, 8], "constrain": [1, 5, 8, 10], "obtain": [1, 4, 8, 9, 11], "66": [1, 11], "given": [1, 3, 4, 8, 9, 10, 11], "num_prevalence_combin": [1, 8], "21": [1, 3, 5, 8], "n_class": [1, 3, 8, 9, 10, 11], "n_repeat": [1, 8], "1771": 1, "note": [1, 3, 4, 5, 8, 10], "last": [1, 3, 5, 8, 9, 10], "typic": [1, 4, 5, 8, 9, 10, 11], "singl": [1, 3, 6, 11], "higher": [1, 5], "comput": [1, 3, 5, 8, 11], "perform": [1, 3, 4, 5, 6, 8, 9, 11], "signific": 1, "instead": [1, 3, 4, 8, 10, 11], "work": [1, 3, 4, 5, 8, 10, 11], "wai": [1, 11], "around": [1, 10], "maximum": [1, 8, 9, 11], "budg": 1, "close": [1, 10], "than": [1, 4, 5, 8, 9, 10], "budget": [1, 4], "achiev": [1, 3, 4, 5], "get_nprevpoints_approxim": [1, 8], "5000": [1, 5], "4960": 1, "cost": 1, "sometim": 1, "cumbersom": 1, "control": [1, 4, 8], "overal": 1, "experi": [1, 2, 3, 4, 5, 8], "rather": [1, 4], "By": [1, 3, 8], "avoid": [1, 8], "lead": [1, 10], "closer": 1, "surpass": 1, "script": [1, 2, 3, 6, 11], "pacc": [1, 3, 5, 8, 11], "reli": [1, 3, 8, 11], "logist": [1, 3, 9, 11], "regressor": [1, 3], "classifi": [1, 4, 5, 6, 8, 9, 11], "variou": [1, 5], "metric": [1, 3, 4, 6, 8, 11], "sklearn": [1, 3, 4, 5, 6, 9, 10, 11], "linear_model": [1, 3, 4, 6, 9], "logisticregress": [1, 3, 4, 6, 9, 11], "data": [1, 3, 4, 5, 6, 8, 9, 11], "min_df": [1, 3, 4, 5, 10, 11], "inplac": [1, 3, 10, 11], "lr": [1, 3, 9, 11], "aggreg": [1, 4, 5, 6, 8], "fit": [1, 3, 4, 5, 6, 8, 9, 10, 11], "df": 1, "artificial_sampling_report": 1, "mani": [1, 3, 4, 5, 6, 8, 11], "extract": [1, 8, 10], "categori": [1, 8], "n_repetit": [1, 4, 5], "n_job": [1, 3, 4, 8, 9, 10, 11], "parallel": [1, 3, 8, 9, 10, 11], "worker": [1, 8, 9, 10, 11], "cpu": [1, 9, 11], "random_se": [1, 8], "42": 1, "random": [1, 3, 4, 5, 8, 10], "seed": [1, 4, 8, 10], "replic": [1, 4, 8], "error_metr": [1, 4, 8], "line": [1, 3, 8], "result": [1, 2, 3, 4, 5, 6, 11], "report": 1, "panda": [1, 2], "datafram": 1, "displai": [1, 5, 8, 9], "just": [1, 3], "clearer": 1, "shown": [1, 5, 8], "convert": [1, 3, 8, 9, 10, 11], "repres": [1, 3, 5, 8, 10, 11], "decim": 1, "default": [1, 3, 8, 9, 10, 11], "pd": 1, "set_opt": 1, "expand_frame_repr": 1, "fals": [1, 3, 5, 8, 9, 10, 11], "map": [1, 9, 11], "000": 1, "000e": 1, "091": 1, "909": 1, "009": 1, "048": 1, "426e": 1, "04": 1, "837": 1, "037": 1, "114": 1, "633e": 1, "03": 1, "7": [1, 5, 8, 9, 11], "717": 1, "017": 1, "041": 1, "383e": 1, "366": 1, "634": 1, "034": 1, "070": 1, "412e": 1, "459": 1, "541": 1, "387e": 1, "565": 1, "435": 1, "035": 1, "073": 1, "535e": 1, "654": 1, "346": 1, "046": 1, "108": 1, "701e": 1, "725": 1, "275": 1, "075": 1, "235": 1, "515e": 1, "02": 1, "858": 1, "142": 1, "042": 1, "229": 1, "740e": 1, "945": 1, "055": 1, "27": [1, 3, 9], "357": 1, "219e": 1, "578": 1, "dtype": [1, 10], "float64": 1, "artificial_sampling_ev": [1, 4], "artificial_sampling_predict": [1, 5], "arrai": [1, 3, 5, 8, 9, 10, 11], "pip": 2, "older": 2, "version": [2, 8, 9, 11], "scikit": [2, 3, 4, 8, 9, 10, 11], "numpi": [2, 4, 8, 9], "scipi": [2, 10], "pytorch": [2, 11], "quanet": [2, 6, 9, 11], "svmperf": [2, 3, 8, 11], "patch": [2, 3, 9, 11], "joblib": [2, 11], "tqdm": 2, "matplotlib": [2, 8], "involv": [2, 5, 8], "you": [2, 3], "appli": [2, 3, 4, 5, 8, 9, 10, 11], "ext": 2, "compil": [2, 3], "sourc": [2, 3, 6, 9], "prepare_svmperf": [2, 3], "sh": [2, 3], "job": 2, "directori": [2, 8, 9, 10, 11], "svm_perf_quantif": [2, 3], "optim": [2, 3, 4, 8, 9, 11], "measur": [2, 3, 4, 5, 6, 8, 11], "propos": [2, 3, 11], "barranquero": [2, 3, 9, 11], "extend": [2, 3, 8, 11], "former": [2, 11], "categor": [3, 10], "belong": [3, 11], "non": [3, 11], "group": 3, "though": [3, 8], "plan": 3, "add": [3, 4, 8, 10], "more": [3, 5, 11], "futur": 3, "character": [3, 6], "fact": [3, 5], "product": [3, 10], "quantifi": [3, 4, 5, 6, 8, 10, 11], "shoud": 3, "basequantifi": [3, 8, 11], "abstract": [3, 8, 9, 10, 11], "abstractmethod": 3, "self": [3, 8, 9, 10, 11], "set_param": [3, 8, 9, 11], "get_param": [3, 8, 9, 11], "deep": [3, 8, 11], "familiar": 3, "structur": [3, 11], "inspir": 3, "reason": [3, 5, 6], "why": 3, "ha": [3, 4, 5, 8, 9, 10, 11], "adopt": [3, 4, 10], "respond": 3, "predict": [3, 4, 5, 8, 9, 11], "input": [3, 5, 8, 9, 11], "element": [3, 10, 11], "while": [3, 5, 9, 10, 11], "selector": 3, "process": [3, 4, 8], "hyperparamet": [3, 8, 11], "search": [3, 4, 6, 8, 11], "part": [3, 10], "aggregativequantifi": [3, 11], "must": [3, 10, 11], "fit_learn": 3, "classif_predict": [3, 11], "mention": 3, "befor": [3, 8, 9, 10, 11], "inde": [3, 4], "alreadi": [3, 8, 11], "preclassifi": 3, "maintain": [3, 11], "through": [3, 8], "properti": [3, 8, 9, 10, 11], "learner": [3, 4, 9, 11], "extern": 3, "probabilist": [3, 9, 11], "inherit": 3, "aggregativeprobabilisticquantifi": [3, 11], "posterior": [3, 8, 9, 11], "crisp": [3, 8, 11], "decis": [3, 8, 9, 11], "hard": [3, 9], "classif_posterior": [3, 11], "posterior_prob": [3, 11], "advantag": [3, 11], "procedur": [3, 6, 8], "veri": [3, 5], "effici": 3, "everi": [3, 8, 11], "leverag": 3, "speed": [3, 11], "up": [3, 4, 8, 9, 11], "over": [3, 4, 8], "customarili": [3, 4], "done": 3, "four": 3, "cc": [3, 5, 11], "simplest": 3, "deliv": [3, 11], "adjust": [3, 6, 8, 11], "pcc": [3, 4, 5, 11], "soft": 3, "serv": [3, 8, 10], "complet": [3, 5, 11], "equip": [3, 5], "svm": [3, 5, 6, 9, 10, 11], "linearsvc": [3, 5, 10], "pickl": [3, 8, 10, 11], "alia": [3, 8, 10, 11], "classifyandcount": [3, 11], "estim_preval": [3, 6, 11], "rate": [3, 8, 9, 11], "binari": [3, 5, 6, 8, 9, 10, 11], "init": 3, "addit": 3, "val_split": [3, 4, 9, 11], "integ": [3, 8, 9, 10, 11], "k": [3, 6, 8, 9, 10, 11], "fold": [3, 8, 10, 11], "cross": [3, 8, 9, 10, 11], "specif": [3, 4, 8], "held": [3, 4, 8, 9, 11], "out": [3, 4, 5, 8, 9, 10, 11], "postpon": 3, "constructor": 3, "prevail": 3, "overrid": 3, "illustr": [3, 4, 5], "seem": 3, "calibr": [3, 8], "calibratedclassifiercv": 3, "base_estim": 3, "cv": [3, 4], "predict_proba": [3, 9, 11], "As": [3, 4], "calibratedclassifi": 3, "except": [3, 8, 11], "rais": [3, 8, 11], "lastli": 3, "everyth": 3, "said": 3, "aboud": 3, "sld": [3, 11], "expectationmaximizationquantifi": [3, 11], "describ": [3, 8, 11], "saeren": [3, 11], "m": [3, 8, 11], "latinn": [3, 11], "decaesteck": [3, 11], "c": [3, 4, 8, 9, 10, 11], "2002": 3, "priori": 3, "14": 3, "41": 3, "attempt": [3, 11], "although": [3, 4, 5, 11], "improv": [3, 8, 9, 11], "rank": [3, 9], "almost": 3, "alwai": [3, 4, 5, 11], "among": 3, "effect": 3, "carri": [3, 10, 11], "gonz\u00e1lez": 3, "castro": 3, "v": [3, 8, 9, 11], "alaiz": 3, "rodr\u0131": 3, "guez": 3, "alegr": 3, "2013": 3, "scienc": 3, "218": 3, "146": 3, "It": [3, 4, 5, 8], "allia": 3, "hellingerdistancei": [3, 11], "mixtur": [3, 8, 11], "previou": 3, "overridden": [3, 11], "proport": [3, 4, 9, 10, 11], "taken": [3, 8, 9, 10], "itself": [3, 8, 11], "accept": 3, "elm": [3, 11], "famili": [3, 11], "target": [3, 5, 6, 8, 9, 11], "orient": [3, 6, 8, 11], "joachim": [3, 9, 11], "svmq": [3, 11], "d\u00edez": 3, "reliabl": 3, "pattern": 3, "recognit": 3, "48": 3, "591": 3, "604": 3, "svmkld": [3, 11], "multivari": [3, 9], "transact": 3, "discoveri": 3, "articl": [3, 4], "svmnkld": [3, 11], "svmae": [3, 11], "error": [3, 4, 6, 7, 9, 11], "svmrae": [3, 11], "what": 3, "nowadai": 3, "consid": [3, 5, 8, 9, 10, 11], "behav": [3, 5], "If": [3, 5, 8, 10, 11], "want": [3, 4], "custom": [3, 6, 10], "modifi": [3, 8], "assign": [3, 10], "Then": 3, "re": [3, 4, 9, 10], "thing": 3, "your": 3, "svmperf_hom": 3, "valid_loss": [3, 9, 11], "mycustomloss": 3, "28": [3, 10], "current": [3, 8, 9, 10, 11], "support": [3, 6, 9, 10, 11], "oper": 3, "trivial": 3, "strategi": [3, 4], "2016": [3, 10, 11], "sentiment": [3, 6, 10], "19": [3, 10], "onevsal": [3, 11], "know": 3, "where": [3, 5, 8, 9, 10, 11], "top": [3, 8, 11], "thu": [3, 4, 5, 8, 9, 11], "nor": 3, "castano": [3, 10], "2019": [3, 10, 11], "dynam": [3, 9, 10, 11], "task": [3, 4, 10], "45": [3, 5, 10], "15": [3, 8, 10], "polici": [3, 11], "processor": 3, "av": [3, 11], "ptr": [3, 11], "member": [3, 11], "d": [3, 11], "static": [3, 11], "red_siz": [3, 11], "pleas": 3, "check": [3, 4, 8], "offer": [3, 6], "torch": [3, 9, 11], "embed": [3, 9, 11], "lstm": [3, 9, 11], "cnn": [3, 11], "its": [3, 4, 8, 9, 11], "layer": [3, 9, 11], "neuralclassifiertrain": [3, 9, 11], "cnnnet": [3, 9, 11], "vocabulary_s": [3, 9, 10, 11], "cuda": [3, 9, 11], "supervis": [4, 6], "strongli": [4, 5], "good": [4, 5], "choic": [4, 11], "hyper": [4, 8, 9], "wherebi": 4, "chosen": [4, 8], "pick": 4, "best": [4, 8, 9, 11], "being": [4, 8, 11], "criteria": 4, "solv": [4, 11], "assess": 4, "own": 4, "right": [4, 8, 10], "impos": [4, 8], "aim": [4, 5], "appropri": 4, "configur": [4, 8], "design": 4, "long": [4, 9], "regard": 4, "next": [4, 8, 9, 10], "section": 4, "argu": 4, "alejandro": 4, "fabrizio": 4, "count": [4, 5, 6, 8, 10, 11], "arxiv": 4, "preprint": 4, "2011": 4, "02552": 4, "2020": [4, 9], "varieti": 4, "exhibit": [4, 5], "degre": 4, "model_select": [4, 7, 11], "gridsearchq": [4, 8, 11], "grid": [4, 8, 11], "explor": [4, 8], "portion": 4, "param_grid": [4, 8, 11], "logspac": [4, 11], "class_weight": [4, 11], "eval_budget": 4, "refit": [4, 8], "retrain": [4, 9], "goe": 4, "end": [4, 8, 11], "best_params_": 4, "best_model_": 4, "101": 4, "5f": 4, "system": [4, 11], "start": 4, "hyperparam": 4, "0001": [4, 11], "got": [4, 11], "24987": 4, "48135": 4, "001": [4, 9, 11], "24866": 4, "100000": 4, "43676": 4, "finish": 4, "param": [4, 8, 9, 11], "19982": 4, "develop": [4, 6], "1010": 4, "5005": 4, "54it": 4, "20342": 4, "altern": 4, "computation": 4, "costli": 4, "try": 4, "theoret": 4, "suboptim": 4, "opt": 4, "gridsearchcv": [4, 11], "10000": 4, "5379": 4, "55it": 4, "41734": 4, "wors": [4, 5, 8], "larg": 4, "between": [4, 5, 6, 8, 9, 11], "modal": 4, "turn": 4, "better": 4, "nonetheless": 4, "happen": [4, 5], "basic": [5, 11], "help": [5, 11], "analys": [5, 6], "outcom": 5, "main": 5, "method_nam": [5, 8, 11], "name": [5, 8, 9, 10, 11], "shape": [5, 8, 9, 10, 11], "correspond": [5, 10], "matrix": [5, 8, 11], "appear": 5, "occur": [5, 10], "merg": 5, "emq": [5, 11], "55": 5, "showcas": 5, "wide": 5, "variant": [5, 6, 8, 11], "linear": [5, 8, 11], "review": [5, 6, 10], "step": [5, 8], "05": [5, 8, 11], "gen_data": 5, "base_classifi": 5, "yield": [5, 8, 10, 11], "tr_prev": [5, 8, 11], "append": 5, "__class__": 5, "__name__": 5, "insight": 5, "view": 5, "y": [5, 8, 9, 10, 11], "axi": [5, 8], "against": 5, "x": [5, 8, 9, 10, 11], "unfortun": 5, "limit": [5, 8, 11], "binary_diagon": [5, 8], "train_prev": [5, 8], "savepath": [5, 8], "bin_diag": 5, "png": 5, "save": [5, 8], "pdf": [5, 11], "cyan": 5, "dot": [5, 8], "color": [5, 8], "band": [5, 8], "hidden": [5, 9, 11], "show_std": [5, 8], "unadjust": 5, "bias": 5, "toward": [5, 10], "seen": [5, 8, 11], "evinc": 5, "box": [5, 8], "binary_bias_glob": [5, 8], "bin_bia": 5, "unbias": 5, "center": 5, "tend": 5, "overestim": 5, "high": [5, 8], "lower": [5, 11], "again": 5, "accordingli": 5, "20": [5, 8, 11], "90": [5, 8], "rewrit": 5, "method_data": 5, "training_preval": 5, "linspac": 5, "training_s": 5, "suffic": 5, "latex": 5, "syntax": 5, "_": [5, 8, 10], "now": 5, "clearli": 5, "binary_bias_bin": [5, 8], "broken": [5, 8], "down": [5, 8, 10], "bin": [5, 8, 11], "To": [5, 10], "nbin": [5, 8, 11], "isometr": [5, 8], "subinterv": 5, "interestingli": 5, "enough": 5, "seemingli": 5, "tendenc": 5, "low": [5, 8, 9], "underestim": 5, "beyond": 5, "67": [5, 8], "curios": 5, "pretti": 5, "discuss": 5, "analyz": 5, "compar": [5, 8], "both": [5, 10], "irrespect": [5, 11], "harder": 5, "interpret": [5, 6, 11], "error_by_drift": [5, 8], "error_nam": [5, 8], "n_bin": [5, 8, 11], "err_drift": 5, "whenev": [5, 8], "clear": 5, "lowest": 5, "difficult": 5, "rememb": 5, "solid": 5, "comparison": 5, "detriment": 5, "visual": [5, 6], "hide": 5, "framework": [6, 11], "written": 6, "root": 6, "concept": 6, "baselin": 6, "integr": 6, "commonli": 6, "facilit": 6, "twitter": [6, 10], "true_preval": 6, "hold": [6, 8, 11], "endeavour": [6, 8], "popular": 6, "expect": [6, 11], "maxim": [6, 11], "hdy": [6, 11], "versatil": 6, "etc": 6, "uci": [6, 10], "nativ": 6, "loss": [6, 9, 11], "perf": [6, 9, 11], "ad": 6, "meta": [6, 8], "plot": [6, 7], "diagon": [6, 8], "bia": [6, 8, 9, 11], "drift": 6, "api": 6, "subpackag": 7, "submodul": 7, "util": [7, 9, 10], "content": 7, "bctscalibr": 9, "nbvscalibr": 9, "recalibratedprobabilisticclassifi": 9, "recalibratedprobabilisticclassifierbas": 9, "classes_": [9, 10, 11], "fit_cv": 9, "fit_tr_val": 9, "tscalibr": 9, "vscalibr": 9, "lowranklogisticregress": 9, "document_embed": 9, "lstmnet": 9, "reset_net_param": 9, "textclassifiernet": 9, "dimens": [8, 9, 10, 11], "forward": [9, 11], "xavier_uniform": 9, "torchdataset": 9, "asdataload": 9, "decision_funct": 9, "splitstratifi": 10, "stat": 10, "train_test": 10, "xp": 10, "xy": 10, "split_random": 10, "split_stratifi": 10, "uniform_sampl": 10, "uniform_sampling_index": 10, "fetch_lequa2022": 10, "warn": 10, "indextransform": 10, "add_word": 10, "fit_transform": 10, "reader": 8, "binar": [8, 10], "from_csv": 10, "from_spars": 10, "from_text": 10, "reindex_label": 10, "getptecondestim": 11, "solve_adjust": 11, "adjustedclassifyandcount": 11, "distributionmatch": 11, "dy": 11, "em": 11, "max_it": 11, "explicitlossminimis": 11, "max": 11, "ms2": 11, "mediansweep": 11, "mediansweep2": 11, "probabilisticadjustedclassifyandcount": 11, "probabilisticclassifyandcount": 11, "smm": 11, "t50": 11, "thresholdoptim": 11, "cross_generate_predict": 11, "cross_generate_predictions_depr": 11, "binaryquantifi": 11, "onevsallgener": 11, "eacc": 11, "ecc": 11, "eemq": 11, "ehdi": 11, "epacc": 11, "valid_polici": 11, "ensemblefactori": 11, "get_probability_distribut": 11, "quanetmodul": 11, "quanettrain": 11, "clean_checkpoint": 11, "clean_checkpoint_dir": 11, "mae_loss": 11, "non_aggreg": 8, "maximumlikelihoodprevalenceestim": 11, "absolute_error": 8, "hat": 8, "frac": 8, "mathcal": 8, "sum_": 8, "acc_error": 8, "y_true": 8, "y_pred": 8, "tp": 8, "tn": 8, "fp": 8, "fn": 8, "stand": [8, 11], "f1_error": 8, "macro": 8, "f_1": 8, "harmon": 8, "recal": 8, "2tp": 8, "independ": [8, 11], "err_nam": 8, "p_hat": 8, "d_": 8, "kl": 8, "log": [8, 10], "factor": 8, "beforehand": 8, "n_sampl": [8, 9], "mean_absolute_error": 8, "mean_relative_absolute_error": 8, "relative_absolute_error": 8, "underlin": 8, "displaystyl": 8, "abstractprotocol": 8, "union": [8, 10, 11], "aggr_speedup": 8, "auto": 8, "evaluation_report": 8, "app": [8, 11], "repeat": 8, "smooth_limits_epsilon": 8, "random_st": [8, 10], "return_typ": 8, "sample_prev": 8, "abstractstochasticseededprotocol": 8, "onlabelledcollectionprotocol": 8, "95": 8, "copi": [8, 10], "quantiti": 8, "labelled_collect": 8, "prevalence_grid": 8, "exhaust": 8, "sum": [8, 11], "implicit": 8, "return_constrained_dim": 8, "rest": [8, 9, 10, 11], "quit": 8, "obvious": 8, "determinist": 8, "anywher": 8, "multipli": 8, "necessari": 8, "samples_paramet": 8, "total": 8, "parent": 8, "sequenc": 8, "enforc": 8, "collat": 8, "arg": [8, 10], "domainmix": 8, "domaina": 8, "domainb": 8, "mixture_point": 8, "domain": 8, "scale": [8, 9, 11], "npp": 8, "draw": 8, "uniformli": 8, "therefor": 8, "get_col": 8, "get_labelled_collect": 8, "on_preclassified_inst": 8, "pre_classif": 8, "in_plac": 8, "usimplexpp": 8, "kraemer": 8, "algorithm": [8, 11], "sens": 8, "guarante": [8, 10], "prefer": 8, "intract": 8, "hellingerdist": 8, "hellingh": 8, "distanc": [8, 11], "hd": [8, 11], "discret": [8, 11], "sqrt": 8, "p_i": 8, "q_i": 8, "real": [8, 9, 10, 11], "topsoedist": 8, "1e": [8, 9, 11], "topso": [8, 11], "adjusted_quantif": 8, "prevalence_estim": 8, "tpr": [8, 11], "fpr": [8, 11], "clip": 8, "exce": 8, "check_prevalence_vector": 8, "raise_except": 8, "toleranz": 8, "08": 8, "combinations_budget": 8, "largest": 8, "dimension": [8, 9, 10, 11], "repetit": 8, "less": [8, 10], "normalize_preval": 8, "l1": [8, 11], "calcul": 8, "binom": 8, "mass": 8, "alloc": [8, 9], "solut": 8, "star": 8, "bar": 8, "prevalence_from_label": 8, "n_instanc": [8, 9, 11], "correctli": 8, "even": 8, "len": 8, "prevalence_from_prob": 8, "bool": [8, 9, 11], "argmax": 8, "prevalence_linspac": 8, "01": [8, 9, 11], "separ": [8, 10], "99": 8, "uniform_prevalence_sampl": 8, "adapt": [8, 9], "post": 8, "http": [8, 10, 11], "stackexchang": 8, "com": 8, "question": 8, "3227": 8, "uniform": [8, 10], "uniform_simplex_sampl": 8, "dict": [8, 10, 11], "timeout": 8, "dictionari": [8, 9, 10, 11], "kei": [8, 10], "quantification_error": 8, "whether": [8, 9, 10, 11], "ignor": [8, 10, 11], "gen": 8, "establish": 8, "timer": 8, "longer": [8, 11], "timeouterror": 8, "bound": [8, 11], "stdout": 8, "best_model": 8, "after": [8, 11], "minim": [8, 11], "routin": [8, 10, 11], "unus": [8, 9], "contanin": 8, "cross_val_predict": 8, "akin": [8, 11], "issu": 8, "reproduc": [8, 10], "pos_class": [8, 10], "titl": 8, "colormap": 8, "listedcolormap": 8, "vertical_xtick": 8, "legend": 8, "local": 8, "sign": 8, "minu": 8, "classs": 8, "compon": [8, 9, 11], "cm": 8, "tab10": 8, "secondari": 8, "global": 8, "method_ord": 8, "henc": [8, 10], "conveni": 8, "multiclass": [8, 10, 11], "inconveni": 8, "leyend": 8, "hightlight": 8, "associ": 8, "brokenbar_supremacy_by_drift": 8, "isomer": 8, "x_error": 8, "y_error": 8, "ttest_alpha": 8, "005": 8, "tail_density_threshold": 8, "region": 8, "chart": 8, "condit": [8, 11], "ii": 8, "significantli": 8, "side": 8, "confid": 8, "percentil": 8, "divid": 8, "amount": 8, "similar": [8, 11], "threshold": [8, 11], "densiti": 8, "tail": 8, "discard": 8, "outlier": 8, "show_dens": 8, "show_legend": 8, "logscal": 8, "vline": 8, "especi": 8, "mai": 8, "cumberson": 8, "gain": 8, "understand": 8, "fare": 8, "regim": 8, "highlight": 8, "vertic": 8, "earlystop": 8, "patienc": [8, 9, 11], "lower_is_bett": 8, "earli": [8, 9, 11], "stop": [8, 9, 11], "epoch": [8, 9, 11], "best_epoch": 8, "best_scor": 8, "consecut": [8, 9, 11], "monitor": 8, "obtaind": 8, "far": [8, 9, 10], "flag": 8, "keep": 8, "track": 8, "boolean": [8, 10, 11], "create_if_not_exist": 8, "makedir": 8, "exist_ok": 8, "join": 8, "dir": [8, 11], "subdir": 8, "anotherdir": 8, "create_parent_dir": 8, "exist": 8, "txt": 8, "download_fil": 8, "url": 8, "archive_filenam": 8, "destin": 8, "filenam": 8, "download_file_if_not_exist": 8, "dowload": 8, "get_quapy_hom": 8, "home": [8, 10], "perman": 8, "map_parallel": 8, "func": 8, "slice": 8, "item": 8, "wrapper": [8, 9, 10, 11], "multiprocess": [8, 11], "delai": 8, "args_i": 8, "silent": [8, 11], "child": 8, "ensur": 8, "pickled_resourc": 8, "pickle_path": 8, "generation_func": 8, "fast": [8, 10], "resourc": 8, "some_arrai": 8, "mock": [8, 9], "rand": 8, "my_arrai": 8, "pkl": 8, "save_text_fil": 8, "disk": 8, "miss": 8, "temp_se": 8, "context": 8, "tempor": 8, "outer": 8, "state": 8, "within": [8, 11], "get_njob": [], "correct": [9, 11], "temperatur": [9, 11], "bct": [9, 11], "abstent": 9, "alexandari": [9, 11], "afterward": [9, 11], "No": [9, 11], "nbv": [9, 11], "baseestim": [9, 11], "calibratorfactori": 9, "n_compon": 9, "kwarg": [9, 10, 11], "decomposit": 9, "truncatedsvd": 9, "princip": 9, "regress": 9, "n_featur": 9, "length": [9, 10], "eventu": [9, 10], "unalt": 9, "emb": 9, "embedding_s": 9, "hidden_s": 9, "repr_siz": 9, "kernel_height": 9, "stride": 9, "pad": [9, 10], "drop_p": 9, "convolut": 9, "vocabulari": [9, 10], "kernel": 9, "drop": 9, "dropout": [9, 11], "batch": 9, "dataload": 9, "tensor": 9, "n_dimens": 9, "lstm_class_nlay": 9, "short": 9, "memori": 9, "net": 9, "weight_decai": 9, "batch_siz": 9, "64": [9, 11], "batch_size_test": 9, "512": [9, 11], "padding_length": 9, "checkpointpath": 9, "checkpoint": [9, 11], "classifier_net": 9, "weight": [9, 10], "decai": 9, "wait": 9, "enabl": 9, "gpu": [9, 11], "vocab_s": 9, "reiniti": 9, "trainer": 9, "disjoint": 9, "embed_s": 9, "nn": 9, "pad_length": 9, "xavier": 9, "shuffl": [9, 10], "longest": 9, "shorter": 9, "svmperf_bas": [9, 11], "classifiermixin": 9, "thorsten": 9, "refer": [9, 10], "svm_perf_learn": 9, "svm_perf_classifi": 9, "trade": 9, "off": 9, "margin": 9, "std": 9, "qacc": 9, "qf1": 9, "qgm": 9, "12": 9, "26": 9, "23": 9, "train_siz": 10, "conform": 10, "round": 10, "loader_kwarg": 10, "read": 10, "tupl": [10, 11], "tr": 10, "te": 10, "csr": 10, "csr_matrix": 10, "4403": 10, "my_collect": 10, "codefram": 10, "larger": [10, 11], "actual": [10, 11], "empti": 10, "met": 10, "whose": [10, 11], "train_prop": 10, "left": [8, 10], "stratif": 10, "greater": 10, "dataset_nam": 10, "data_hom": 10, "test_split": 10, "predefin": 10, "uci_dataset": 10, "dump": 10, "leav": 10, "quay_data": 10, "ml": 10, "5fcvx2": 10, "x2": 10, "offici": 10, "lequa": 10, "competit": 10, "t1a": 10, "t1b": 10, "t2a": 10, "t2b": 10, "raw": 10, "merchandis": 10, "sperduti": 10, "2022": 10, "overview": 10, "clef": 10, "lequa2022_experi": 10, "py": 10, "guid": 10, "val_gen": 10, "test_gen": 10, "samplesfromdir": 10, "minimun": 10, "kept": 10, "subsequ": 10, "mining6": 10, "devel": 10, "style": 10, "countvector": 10, "keyword": [10, 11], "nogap": 10, "regardless": 10, "codifi": 10, "unknown": 10, "surfac": 10, "assert": 10, "gap": 10, "preced": 10, "decid": 10, "uniqu": 10, "rare": 10, "unk": 10, "minimum": [10, 11], "occurr": 10, "org": [10, 11], "stabl": 10, "feature_extract": 10, "html": 10, "subtyp": 10, "spmatrix": 10, "remov": [10, 11], "infrequ": 10, "aka": [10, 11], "sublinear_tf": 10, "scall": 10, "counter": 10, "tfidfvector": 10, "whcih": 10, "had": 10, "encod": 10, "utf": 10, "csv": 10, "feat1": 10, "feat2": 10, "featn": 10, "covari": 10, "express": 10, "row": 10, "class2int": 10, "collet": 10, "fomart": 10, "progress": 10, "sentenc": 10, "classnam": 10, "u1": 10, "misclassif": 11, "n_classes_": [], "fit_classifi": 11, "bypass": 11, "y_": 11, "ptecondestim": 11, "prevs_estim": 11, "ax": 11, "entri": 11, "y_i": 11, "y_j": 11, "_posterior_probabilities_": 11, "attribut": 11, "subclass": 11, "give": 11, "outsid": 11, "unless": 11, "noth": 11, "els": 11, "cdf": 11, "match": 11, "helling": 11, "sought": 11, "channel": 11, "proper": 11, "ch": 11, "di": 11, "dij": 11, "fraction": 11, "th": 11, "tol": 11, "ternari": 11, "dl": 11, "doi": 11, "1145": 11, "3219819": 11, "3220059": 11, "histogram": 11, "toler": 11, "explicit": 11, "exact_train_prev": 11, "recalib": 11, "updat": 11, "likelihood": [9, 11], "mutual": 11, "recurs": 11, "until": 11, "converg": 11, "suggest": 11, "recalibr": 11, "reach": 11, "loop": 11, "cumul": 11, "unlabel": 11, "latter": 11, "forman": 11, "2006": 11, "2008": 11, "goal": 11, "bring": 11, "denomin": 11, "median": 11, "sweep": 11, "binary_quantifi": 11, "prevel": 11, "emploi": 11, "resp": 11, "subobject": 11, "nest": 11, "pipelin": 11, "__": 11, "simplif": 11, "2021": 11, "equival": 11, "cosest": 11, "heurist": 11, "choos": 11, "ground": 11, "complement": 11, "param_mod_sel": 11, "param_model_sel": 11, "min_po": 11, "max_sample_s": 11, "closest": 11, "preliminari": 11, "recomput": 11, "compat": 11, "l": 11, "base_quantifier_class": 11, "factori": 11, "common": 11, "doc_embedding_s": 11, "stats_siz": 11, "lstm_hidden_s": 11, "lstm_nlayer": 11, "ff_layer": 11, "1024": 11, "bidirect": 11, "qdrop_p": 11, "order_bi": 11, "cell": 11, "connect": 11, "ff": 11, "sort": 11, "doc_embed": 11, "doc_posterior": 11, "recip": 11, "care": 11, "regist": 11, "hook": 11, "n_epoch": 11, "tr_iter_per_poch": 11, "va_iter_per_poch": 11, "checkpointdir": 11, "checkpointnam": 11, "phase": 11, "anyth": 11, "truth": 11, "mlpe": 11, "lazi": 11, "put": 11, "assumpion": 11, "beat": [9, 11], "estimant": 11, "kundaj": 9, "shrikumar": 9, "novemb": 9, "232": 9, "pmlr": 9, "outpu": [], "partit": 9, "ight": [], "valueerror": 8, "attach": 10, "mix": 10, "onevsallaggreg": 11, "parallel_backend": 11, "loki": 11, "backend": 11, "cannot": 11, "temp": 11, "getonevsal": 11}, "objects": {"": [[8, 0, 0, "-", "quapy"]], "quapy": [[9, 0, 0, "-", "classification"], [10, 0, 0, "-", "data"], [8, 0, 0, "-", "error"], [8, 0, 0, "-", "evaluation"], [8, 0, 0, "-", "functional"], [11, 0, 0, "-", "method"], [8, 0, 0, "-", "model_selection"], [8, 0, 0, "-", "plot"], [8, 0, 0, "-", "protocol"], [8, 0, 0, "-", "util"]], "quapy.classification": [[9, 0, 0, "-", "calibration"], [9, 0, 0, "-", "methods"], [9, 0, 0, "-", "neural"], [9, 0, 0, "-", "svmperf"]], "quapy.classification.calibration": [[9, 1, 1, "", "BCTSCalibration"], [9, 1, 1, "", "NBVSCalibration"], [9, 1, 1, "", "RecalibratedProbabilisticClassifier"], [9, 1, 1, "", "RecalibratedProbabilisticClassifierBase"], [9, 1, 1, "", "TSCalibration"], [9, 1, 1, "", "VSCalibration"]], "quapy.classification.calibration.RecalibratedProbabilisticClassifierBase": [[9, 2, 1, "", "classes_"], [9, 3, 1, "", "fit"], [9, 3, 1, "", "fit_cv"], [9, 3, 1, "", "fit_tr_val"], [9, 3, 1, "", "predict"], [9, 3, 1, "", "predict_proba"]], "quapy.classification.methods": [[9, 1, 1, "", "LowRankLogisticRegression"]], "quapy.classification.methods.LowRankLogisticRegression": [[9, 3, 1, "", "fit"], [9, 3, 1, "", "get_params"], [9, 3, 1, "", "predict"], [9, 3, 1, "", "predict_proba"], [9, 3, 1, "", "set_params"], [9, 3, 1, "", "transform"]], "quapy.classification.neural": [[9, 1, 1, "", "CNNnet"], [9, 1, 1, "", "LSTMnet"], [9, 1, 1, "", "NeuralClassifierTrainer"], [9, 1, 1, "", "TextClassifierNet"], [9, 1, 1, "", "TorchDataset"]], "quapy.classification.neural.CNNnet": [[9, 3, 1, "", "document_embedding"], [9, 3, 1, "", "get_params"], [9, 4, 1, "", "training"], [9, 2, 1, "", "vocabulary_size"]], "quapy.classification.neural.LSTMnet": [[9, 3, 1, "", "document_embedding"], [9, 3, 1, "", "get_params"], [9, 4, 1, "", "training"], [9, 2, 1, "", "vocabulary_size"]], "quapy.classification.neural.NeuralClassifierTrainer": [[9, 2, 1, "", "device"], [9, 3, 1, "", "fit"], [9, 3, 1, "", "get_params"], [9, 3, 1, "", "predict"], [9, 3, 1, "", "predict_proba"], [9, 3, 1, "", "reset_net_params"], [9, 3, 1, "", "set_params"], [9, 3, 1, "", "transform"]], "quapy.classification.neural.TextClassifierNet": [[9, 3, 1, "", "dimensions"], [9, 3, 1, "", "document_embedding"], [9, 3, 1, "", "forward"], [9, 3, 1, "", "get_params"], [9, 3, 1, "", "predict_proba"], [9, 4, 1, "", "training"], [9, 2, 1, "", "vocabulary_size"], [9, 3, 1, "", "xavier_uniform"]], "quapy.classification.neural.TorchDataset": [[9, 3, 1, "", "asDataloader"]], "quapy.classification.svmperf": [[9, 1, 1, "", "SVMperf"]], "quapy.classification.svmperf.SVMperf": [[9, 3, 1, "", "decision_function"], [9, 3, 1, "", "fit"], [9, 3, 1, "", "predict"], [9, 3, 1, "", "set_params"], [9, 4, 1, "", "valid_losses"]], "quapy.data": [[10, 0, 0, "-", "base"], [10, 0, 0, "-", "datasets"], [10, 0, 0, "-", "preprocessing"], [10, 0, 0, "-", "reader"]], "quapy.data.base": [[10, 1, 1, "", "Dataset"], [10, 1, 1, "", "LabelledCollection"]], "quapy.data.base.Dataset": [[10, 3, 1, "", "SplitStratified"], [10, 2, 1, "", "binary"], [10, 2, 1, "", "classes_"], [10, 3, 1, "", "kFCV"], [10, 3, 1, "", "load"], [10, 2, 1, "", "n_classes"], [10, 3, 1, "", "stats"], [10, 2, 1, "", "train_test"], [10, 2, 1, "", "vocabulary_size"]], "quapy.data.base.LabelledCollection": [[10, 2, 1, "", "X"], [10, 2, 1, "", "Xp"], [10, 2, 1, "", "Xy"], [10, 2, 1, "", "binary"], [10, 3, 1, "", "counts"], [10, 3, 1, "", "kFCV"], [10, 3, 1, "", "load"], [10, 3, 1, "", "mix"], [10, 2, 1, "", "n_classes"], [10, 2, 1, "", "p"], [10, 3, 1, "", "prevalence"], [10, 3, 1, "", "sampling"], [10, 3, 1, "", "sampling_from_index"], [10, 3, 1, "", "sampling_index"], [10, 3, 1, "", "split_random"], [10, 3, 1, "", "split_stratified"], [10, 3, 1, "", "stats"], [10, 3, 1, "", "uniform_sampling"], [10, 3, 1, "", "uniform_sampling_index"], [10, 2, 1, "", "y"]], "quapy.data.datasets": [[10, 5, 1, "", "fetch_UCIDataset"], [10, 5, 1, "", "fetch_UCILabelledCollection"], [10, 5, 1, "", "fetch_lequa2022"], [10, 5, 1, "", "fetch_reviews"], [10, 5, 1, "", "fetch_twitter"], [10, 5, 1, "", "warn"]], "quapy.data.preprocessing": [[10, 1, 1, "", "IndexTransformer"], [10, 5, 1, "", "index"], [10, 5, 1, "", "reduce_columns"], [10, 5, 1, "", "standardize"], [10, 5, 1, "", "text2tfidf"]], "quapy.data.preprocessing.IndexTransformer": [[10, 3, 1, "", "add_word"], [10, 3, 1, "", "fit"], [10, 3, 1, "", "fit_transform"], [10, 3, 1, "", "transform"], [10, 3, 1, "", "vocabulary_size"]], "quapy.data.reader": [[10, 5, 1, "", "binarize"], [10, 5, 1, "", "from_csv"], [10, 5, 1, "", "from_sparse"], [10, 5, 1, "", "from_text"], [10, 5, 1, "", "reindex_labels"]], "quapy.error": [[8, 5, 1, "", "absolute_error"], [8, 5, 1, "", "acc_error"], [8, 5, 1, "", "acce"], [8, 5, 1, "", "ae"], [8, 5, 1, "", "f1_error"], [8, 5, 1, "", "f1e"], [8, 5, 1, "", "from_name"], [8, 5, 1, "", "kld"], [8, 5, 1, "", "mae"], [8, 5, 1, "", "mean_absolute_error"], [8, 5, 1, "", "mean_relative_absolute_error"], [8, 5, 1, "", "mkld"], [8, 5, 1, "", "mnkld"], [8, 5, 1, "", "mrae"], [8, 5, 1, "", "mse"], [8, 5, 1, "", "nkld"], [8, 5, 1, "", "rae"], [8, 5, 1, "", "relative_absolute_error"], [8, 5, 1, "", "se"], [8, 5, 1, "", "smooth"]], "quapy.evaluation": [[8, 5, 1, "", "evaluate"], [8, 5, 1, "", "evaluation_report"], [8, 5, 1, "", "prediction"]], "quapy.functional": [[8, 5, 1, "", "HellingerDistance"], [8, 5, 1, "", "TopsoeDistance"], [8, 5, 1, "", "adjusted_quantification"], [8, 5, 1, "", "check_prevalence_vector"], [8, 5, 1, "", "get_nprevpoints_approximation"], [8, 5, 1, "", "normalize_prevalence"], [8, 5, 1, "", "num_prevalence_combinations"], [8, 5, 1, "", "prevalence_from_labels"], [8, 5, 1, "", "prevalence_from_probabilities"], [8, 5, 1, "", "prevalence_linspace"], [8, 5, 1, "", "strprev"], [8, 5, 1, "", "uniform_prevalence_sampling"], [8, 5, 1, "", "uniform_simplex_sampling"]], "quapy.method": [[11, 0, 0, "-", "aggregative"], [11, 0, 0, "-", "base"], [11, 0, 0, "-", "meta"], [11, 0, 0, "-", "neural"], [11, 0, 0, "-", "non_aggregative"]], "quapy.method.aggregative": [[11, 1, 1, "", "ACC"], [11, 4, 1, "", "AdjustedClassifyAndCount"], [11, 1, 1, "", "AggregativeProbabilisticQuantifier"], [11, 1, 1, "", "AggregativeQuantifier"], [11, 1, 1, "", "CC"], [11, 4, 1, "", "ClassifyAndCount"], [11, 1, 1, "", "DistributionMatching"], [11, 1, 1, "", "DyS"], [11, 1, 1, "", "ELM"], [11, 1, 1, "", "EMQ"], [11, 4, 1, "", "ExpectationMaximizationQuantifier"], [11, 4, 1, "", "ExplicitLossMinimisation"], [11, 1, 1, "", "HDy"], [11, 4, 1, "", "HellingerDistanceY"], [11, 1, 1, "", "MAX"], [11, 1, 1, "", "MS"], [11, 1, 1, "", "MS2"], [11, 4, 1, "", "MedianSweep"], [11, 4, 1, "", "MedianSweep2"], [11, 1, 1, "", "OneVsAllAggregative"], [11, 1, 1, "", "PACC"], [11, 1, 1, "", "PCC"], [11, 4, 1, "", "ProbabilisticAdjustedClassifyAndCount"], [11, 4, 1, "", "ProbabilisticClassifyAndCount"], [11, 4, 1, "", "SLD"], [11, 1, 1, "", "SMM"], [11, 1, 1, "", "SVMAE"], [11, 1, 1, "", "SVMKLD"], [11, 1, 1, "", "SVMNKLD"], [11, 1, 1, "", "SVMQ"], [11, 1, 1, "", "SVMRAE"], [11, 1, 1, "", "T50"], [11, 1, 1, "", "ThresholdOptimization"], [11, 1, 1, "", "X"], [11, 5, 1, "", "cross_generate_predictions"], [11, 5, 1, "", "cross_generate_predictions_depr"]], "quapy.method.aggregative.ACC": [[11, 3, 1, "", "aggregate"], [11, 3, 1, "", "classify"], [11, 3, 1, "", "fit"], [11, 3, 1, "", "getPteCondEstim"], [11, 3, 1, "", "solve_adjustment"]], "quapy.method.aggregative.AggregativeProbabilisticQuantifier": [[11, 3, 1, "", "classify"]], "quapy.method.aggregative.AggregativeQuantifier": [[11, 3, 1, "", "aggregate"], [11, 2, 1, "", "classes_"], [11, 2, 1, "", "classifier"], [11, 3, 1, "", "classify"], [11, 3, 1, "", "fit"], [11, 3, 1, "", "quantify"]], "quapy.method.aggregative.CC": [[11, 3, 1, "", "aggregate"], [11, 3, 1, "", "fit"]], "quapy.method.aggregative.DistributionMatching": [[11, 3, 1, "", "aggregate"], [11, 3, 1, "", "fit"]], "quapy.method.aggregative.DyS": [[11, 3, 1, "", "aggregate"], [11, 3, 1, "", "fit"]], "quapy.method.aggregative.ELM": [[11, 3, 1, "", "aggregate"], [11, 3, 1, "", "classify"], [11, 3, 1, "", "fit"]], "quapy.method.aggregative.EMQ": [[11, 3, 1, "", "EM"], [11, 4, 1, "", "EPSILON"], [11, 4, 1, "", "MAX_ITER"], [11, 3, 1, "", "aggregate"], [11, 3, 1, "", "fit"], [11, 3, 1, "", "predict_proba"]], "quapy.method.aggregative.HDy": [[11, 3, 1, "", "aggregate"], [11, 3, 1, "", "fit"]], "quapy.method.aggregative.OneVsAllAggregative": [[11, 3, 1, "", "aggregate"], [11, 3, 1, "", "classify"]], "quapy.method.aggregative.PACC": [[11, 3, 1, "", "aggregate"], [11, 3, 1, "", "classify"], [11, 3, 1, "", "fit"], [11, 3, 1, "", "getPteCondEstim"]], "quapy.method.aggregative.PCC": [[11, 3, 1, "", "aggregate"], [11, 3, 1, "", "fit"]], "quapy.method.aggregative.SMM": [[11, 3, 1, "", "aggregate"], [11, 3, 1, "", "fit"]], "quapy.method.aggregative.ThresholdOptimization": [[11, 3, 1, "", "aggregate"], [11, 3, 1, "", "fit"]], "quapy.method.base": [[11, 1, 1, "", "BaseQuantifier"], [11, 1, 1, "", "BinaryQuantifier"], [11, 1, 1, "", "OneVsAll"], [11, 1, 1, "", "OneVsAllGeneric"], [11, 5, 1, "", "getOneVsAll"]], "quapy.method.base.BaseQuantifier": [[11, 3, 1, "", "fit"], [11, 3, 1, "", "quantify"]], "quapy.method.base.OneVsAllGeneric": [[11, 2, 1, "", "classes_"], [11, 3, 1, "", "fit"], [11, 3, 1, "", "quantify"]], "quapy.method.meta": [[11, 5, 1, "", "EACC"], [11, 5, 1, "", "ECC"], [11, 5, 1, "", "EEMQ"], [11, 5, 1, "", "EHDy"], [11, 5, 1, "", "EPACC"], [11, 1, 1, "", "Ensemble"], [11, 5, 1, "", "ensembleFactory"], [11, 5, 1, "", "get_probability_distribution"]], "quapy.method.meta.Ensemble": [[11, 4, 1, "", "VALID_POLICIES"], [11, 2, 1, "", "aggregative"], [11, 3, 1, "", "fit"], [11, 3, 1, "", "get_params"], [11, 2, 1, "", "probabilistic"], [11, 3, 1, "", "quantify"], [11, 3, 1, "", "set_params"]], "quapy.method.neural": [[11, 1, 1, "", "QuaNetModule"], [11, 1, 1, "", "QuaNetTrainer"], [11, 5, 1, "", "mae_loss"]], "quapy.method.neural.QuaNetModule": [[11, 2, 1, "", "device"], [11, 3, 1, "", "forward"], [11, 4, 1, "", "training"]], "quapy.method.neural.QuaNetTrainer": [[11, 2, 1, "", "classes_"], [11, 3, 1, "", "clean_checkpoint"], [11, 3, 1, "", "clean_checkpoint_dir"], [11, 3, 1, "", "fit"], [11, 3, 1, "", "get_params"], [11, 3, 1, "", "quantify"], [11, 3, 1, "", "set_params"]], "quapy.method.non_aggregative": [[11, 1, 1, "", "MaximumLikelihoodPrevalenceEstimation"]], "quapy.method.non_aggregative.MaximumLikelihoodPrevalenceEstimation": [[11, 3, 1, "", "fit"], [11, 3, 1, "", "quantify"]], "quapy.model_selection": [[8, 1, 1, "", "GridSearchQ"], [8, 5, 1, "", "cross_val_predict"]], "quapy.model_selection.GridSearchQ": [[8, 3, 1, "", "best_model"], [8, 3, 1, "", "fit"], [8, 3, 1, "", "get_params"], [8, 3, 1, "", "quantify"], [8, 3, 1, "", "set_params"]], "quapy.plot": [[8, 5, 1, "", "binary_bias_bins"], [8, 5, 1, "", "binary_bias_global"], [8, 5, 1, "", "binary_diagonal"], [8, 5, 1, "", "brokenbar_supremacy_by_drift"], [8, 5, 1, "", "error_by_drift"]], "quapy.protocol": [[8, 1, 1, "", "APP"], [8, 1, 1, "", "AbstractProtocol"], [8, 1, 1, "", "AbstractStochasticSeededProtocol"], [8, 1, 1, "", "DomainMixer"], [8, 1, 1, "", "NPP"], [8, 1, 1, "", "OnLabelledCollectionProtocol"], [8, 1, 1, "", "USimplexPP"]], "quapy.protocol.APP": [[8, 3, 1, "", "prevalence_grid"], [8, 3, 1, "", "sample"], [8, 3, 1, "", "samples_parameters"], [8, 3, 1, "", "total"]], "quapy.protocol.AbstractProtocol": [[8, 3, 1, "", "total"]], "quapy.protocol.AbstractStochasticSeededProtocol": [[8, 3, 1, "", "collator"], [8, 2, 1, "", "random_state"], [8, 3, 1, "", "sample"], [8, 3, 1, "", "samples_parameters"]], "quapy.protocol.DomainMixer": [[8, 3, 1, "", "sample"], [8, 3, 1, "", "samples_parameters"], [8, 3, 1, "", "total"]], "quapy.protocol.NPP": [[8, 3, 1, "", "sample"], [8, 3, 1, "", "samples_parameters"], [8, 3, 1, "", "total"]], "quapy.protocol.OnLabelledCollectionProtocol": [[8, 4, 1, "", "RETURN_TYPES"], [8, 3, 1, "", "get_collator"], [8, 3, 1, "", "get_labelled_collection"], [8, 3, 1, "", "on_preclassified_instances"]], "quapy.protocol.USimplexPP": [[8, 3, 1, "", "sample"], [8, 3, 1, "", "samples_parameters"], [8, 3, 1, "", "total"]], "quapy.util": [[8, 1, 1, "", "EarlyStop"], [8, 5, 1, "", "create_if_not_exist"], [8, 5, 1, "", "create_parent_dir"], [8, 5, 1, "", "download_file"], [8, 5, 1, "", "download_file_if_not_exists"], [8, 5, 1, "", "get_quapy_home"], [8, 5, 1, "", "map_parallel"], [8, 5, 1, "", "parallel"], [8, 5, 1, "", "pickled_resource"], [8, 5, 1, "", "save_text_file"], [8, 5, 1, "", "temp_seed"]]}, "objtypes": {"0": "py:module", "1": "py:class", "2": "py:property", "3": "py:method", "4": "py:attribute", "5": "py:function"}, "objnames": {"0": ["py", "module", "Python module"], "1": ["py", "class", "Python class"], "2": ["py", "property", "Python property"], "3": ["py", "method", "Python method"], "4": ["py", "attribute", "Python attribute"], "5": ["py", "function", "Python function"]}, "titleterms": {"dataset": [0, 10], "review": 0, "twitter": 0, "sentiment": 0, "uci": 0, "machin": 0, "learn": 0, "issu": 0, "ad": 0, "custom": 0, "data": [0, 10], "process": 0, "evalu": [1, 8], "error": [1, 5, 8], "measur": 1, "protocol": [1, 8], "instal": 2, "requir": 2, "svm": 2, "perf": 2, "quantif": [2, 3, 4, 5], "orient": [2, 4], "loss": [2, 3, 4], "method": [3, 9, 11], "aggreg": [3, 11], "The": 3, "classifi": 3, "count": 3, "variant": 3, "expect": 3, "maxim": 3, "emq": 3, "helling": 3, "distanc": 3, "y": 3, "hdy": 3, "explicit": 3, "minim": 3, "meta": [3, 11], "model": [3, 4], "ensembl": 3, "quanet": 3, "neural": [3, 9, 11], "network": 3, "select": 4, "target": 4, "classif": [4, 9], "plot": [5, 8], "diagon": 5, "bia": 5, "drift": 5, "welcom": 6, "quapi": [6, 7, 8, 9, 10, 11], "": 6, "document": 6, "introduct": 6, "A": 6, "quick": 6, "exampl": 6, "featur": 6, "content": [6, 8, 9, 10, 11], "indic": 6, "tabl": 6, "packag": [8, 9, 10, 11], "subpackag": 8, "submodul": [8, 9, 10, 11], "function": 8, "model_select": 8, "util": 8, "modul": [8, 9, 10, 11], "calibr": 9, "svmperf": 9, "base": [10, 11], "preprocess": 10, "reader": 10, "non_aggreg": 11}, "envversion": {"sphinx.domains.c": 2, "sphinx.domains.changeset": 1, "sphinx.domains.citation": 1, "sphinx.domains.cpp": 8, "sphinx.domains.index": 1, "sphinx.domains.javascript": 2, "sphinx.domains.math": 2, "sphinx.domains.python": 3, "sphinx.domains.rst": 2, "sphinx.domains.std": 2, "sphinx": 57}, "alltitles": {"Datasets": [[0, "datasets"]], "Reviews Datasets": [[0, "reviews-datasets"]], "Twitter Sentiment Datasets": [[0, "twitter-sentiment-datasets"]], "UCI Machine Learning": [[0, "uci-machine-learning"]], "Issues:": [[0, "issues"]], "Adding Custom Datasets": [[0, "adding-custom-datasets"]], "Data Processing": [[0, "data-processing"]], "Evaluation": [[1, "evaluation"]], "Error Measures": [[1, "error-measures"]], "Evaluation Protocols": [[1, "evaluation-protocols"]], "Installation": [[2, "installation"]], "Requirements": [[2, "requirements"]], "SVM-perf with quantification-oriented losses": [[2, "svm-perf-with-quantification-oriented-losses"]], "Quantification Methods": [[3, "quantification-methods"]], "Aggregative Methods": [[3, "aggregative-methods"]], "The Classify & Count variants": [[3, "the-classify-count-variants"]], "Expectation Maximization (EMQ)": [[3, "expectation-maximization-emq"]], "Hellinger Distance y (HDy)": [[3, "hellinger-distance-y-hdy"]], "Explicit Loss Minimization": [[3, "explicit-loss-minimization"]], "Meta Models": [[3, "meta-models"]], "Ensembles": [[3, "ensembles"]], "The QuaNet neural network": [[3, "the-quanet-neural-network"]], "Model Selection": [[4, "model-selection"]], "Targeting a Quantification-oriented loss": [[4, "targeting-a-quantification-oriented-loss"]], "Targeting a Classification-oriented loss": [[4, "targeting-a-classification-oriented-loss"]], "Plotting": [[5, "plotting"]], "Diagonal Plot": [[5, "diagonal-plot"]], "Quantification bias": [[5, "quantification-bias"]], "Error by Drift": [[5, "error-by-drift"]], "Welcome to QuaPy\u2019s documentation!": [[6, "welcome-to-quapy-s-documentation"]], "Introduction": [[6, "introduction"]], "A quick example:": [[6, "a-quick-example"]], "Features": [[6, "features"]], "Contents:": [[6, null]], "Indices and tables": [[6, "indices-and-tables"]], "quapy": [[7, "quapy"]], "quapy package": [[8, "quapy-package"]], "Submodules": [[8, "submodules"], [9, "submodules"], [10, "submodules"], [11, "submodules"]], "quapy.error": [[8, "module-quapy.error"]], "quapy.evaluation": [[8, "module-quapy.evaluation"]], "quapy.protocol": [[8, "quapy-protocol"]], "quapy.functional": [[8, "module-quapy.functional"]], "quapy.model_selection": [[8, "module-quapy.model_selection"]], "quapy.plot": [[8, "module-quapy.plot"]], "quapy.util": [[8, "module-quapy.util"]], "Subpackages": [[8, "subpackages"]], "Module contents": [[8, "module-quapy"], [9, "module-quapy.classification"], [10, "module-quapy.data"], [11, "module-quapy.method"]], "quapy.classification package": [[9, "quapy-classification-package"]], "quapy.classification.calibration": [[9, "quapy-classification-calibration"]], "quapy.classification.methods": [[9, "module-quapy.classification.methods"]], "quapy.classification.neural": [[9, "module-quapy.classification.neural"]], "quapy.classification.svmperf": [[9, "module-quapy.classification.svmperf"]], "quapy.data package": [[10, "quapy-data-package"]], "quapy.data.base": [[10, "module-quapy.data.base"]], "quapy.data.datasets": [[10, "module-quapy.data.datasets"]], "quapy.data.preprocessing": [[10, "module-quapy.data.preprocessing"]], "quapy.data.reader": [[10, "module-quapy.data.reader"]], "quapy.method package": [[11, "quapy-method-package"]], "quapy.method.aggregative": [[11, "module-quapy.method.aggregative"]], "quapy.method.base": [[11, "module-quapy.method.base"]], "quapy.method.meta": [[11, "module-quapy.method.meta"]], "quapy.method.neural": [[11, "module-quapy.method.neural"]], "quapy.method.non_aggregative": [[11, "module-quapy.method.non_aggregative"]]}, "indexentries": {"app (class in quapy.protocol)": [[8, "quapy.protocol.APP"]], "abstractprotocol (class in quapy.protocol)": [[8, "quapy.protocol.AbstractProtocol"]], "abstractstochasticseededprotocol (class in quapy.protocol)": [[8, "quapy.protocol.AbstractStochasticSeededProtocol"]], "domainmixer (class in quapy.protocol)": [[8, "quapy.protocol.DomainMixer"]], "earlystop (class in quapy.util)": [[8, "quapy.util.EarlyStop"]], "gridsearchq (class in quapy.model_selection)": [[8, "quapy.model_selection.GridSearchQ"]], "hellingerdistance() (in module quapy.functional)": [[8, "quapy.functional.HellingerDistance"]], "npp (class in quapy.protocol)": [[8, "quapy.protocol.NPP"]], "onlabelledcollectionprotocol (class in quapy.protocol)": [[8, "quapy.protocol.OnLabelledCollectionProtocol"]], "return_types (quapy.protocol.onlabelledcollectionprotocol attribute)": [[8, "quapy.protocol.OnLabelledCollectionProtocol.RETURN_TYPES"]], "topsoedistance() (in module quapy.functional)": [[8, "quapy.functional.TopsoeDistance"]], "usimplexpp (class in quapy.protocol)": [[8, "quapy.protocol.USimplexPP"]], "absolute_error() (in module quapy.error)": [[8, "quapy.error.absolute_error"]], "acc_error() (in module quapy.error)": [[8, "quapy.error.acc_error"]], "acce() (in module quapy.error)": [[8, "quapy.error.acce"]], "adjusted_quantification() (in module quapy.functional)": [[8, "quapy.functional.adjusted_quantification"]], "ae() (in module quapy.error)": [[8, "quapy.error.ae"]], "best_model() (quapy.model_selection.gridsearchq method)": [[8, "quapy.model_selection.GridSearchQ.best_model"]], "binary_bias_bins() (in module quapy.plot)": [[8, "quapy.plot.binary_bias_bins"]], "binary_bias_global() (in module quapy.plot)": [[8, "quapy.plot.binary_bias_global"]], "binary_diagonal() (in module quapy.plot)": [[8, "quapy.plot.binary_diagonal"]], "brokenbar_supremacy_by_drift() (in module quapy.plot)": [[8, "quapy.plot.brokenbar_supremacy_by_drift"]], "check_prevalence_vector() (in module quapy.functional)": [[8, "quapy.functional.check_prevalence_vector"]], "collator() (quapy.protocol.abstractstochasticseededprotocol method)": [[8, "quapy.protocol.AbstractStochasticSeededProtocol.collator"]], "create_if_not_exist() (in module quapy.util)": [[8, "quapy.util.create_if_not_exist"]], "create_parent_dir() (in module quapy.util)": [[8, "quapy.util.create_parent_dir"]], "cross_val_predict() (in module quapy.model_selection)": [[8, "quapy.model_selection.cross_val_predict"]], "download_file() (in module quapy.util)": [[8, "quapy.util.download_file"]], "download_file_if_not_exists() (in module quapy.util)": [[8, "quapy.util.download_file_if_not_exists"]], "error_by_drift() (in module quapy.plot)": [[8, "quapy.plot.error_by_drift"]], "evaluate() (in module quapy.evaluation)": [[8, "quapy.evaluation.evaluate"]], "evaluation_report() (in module quapy.evaluation)": [[8, "quapy.evaluation.evaluation_report"]], "f1_error() (in module quapy.error)": [[8, "quapy.error.f1_error"]], "f1e() (in module quapy.error)": [[8, "quapy.error.f1e"]], "fit() (quapy.model_selection.gridsearchq method)": [[8, "quapy.model_selection.GridSearchQ.fit"]], "from_name() (in module quapy.error)": [[8, "quapy.error.from_name"]], "get_collator() (quapy.protocol.onlabelledcollectionprotocol class method)": [[8, "quapy.protocol.OnLabelledCollectionProtocol.get_collator"]], "get_labelled_collection() (quapy.protocol.onlabelledcollectionprotocol method)": [[8, "quapy.protocol.OnLabelledCollectionProtocol.get_labelled_collection"]], "get_nprevpoints_approximation() (in module quapy.functional)": [[8, "quapy.functional.get_nprevpoints_approximation"]], "get_params() (quapy.model_selection.gridsearchq method)": [[8, "quapy.model_selection.GridSearchQ.get_params"]], "get_quapy_home() (in module quapy.util)": [[8, "quapy.util.get_quapy_home"]], "kld() (in module quapy.error)": [[8, "quapy.error.kld"]], "mae() (in module quapy.error)": [[8, "quapy.error.mae"]], "map_parallel() (in module quapy.util)": [[8, "quapy.util.map_parallel"]], "mean_absolute_error() (in module quapy.error)": [[8, "quapy.error.mean_absolute_error"]], "mean_relative_absolute_error() (in module quapy.error)": [[8, "quapy.error.mean_relative_absolute_error"]], "mkld() (in module quapy.error)": [[8, "quapy.error.mkld"]], "mnkld() (in module quapy.error)": [[8, "quapy.error.mnkld"]], "module": [[8, "module-quapy"], [8, "module-quapy.error"], [8, "module-quapy.evaluation"], [8, "module-quapy.functional"], [8, "module-quapy.model_selection"], [8, "module-quapy.plot"], [8, "module-quapy.protocol"], [8, "module-quapy.util"], [9, "module-quapy.classification"], [9, "module-quapy.classification.calibration"], [9, "module-quapy.classification.methods"], [9, "module-quapy.classification.neural"], [9, "module-quapy.classification.svmperf"], [10, "module-quapy.data"], [10, "module-quapy.data.base"], [10, "module-quapy.data.datasets"], [10, "module-quapy.data.preprocessing"], [10, "module-quapy.data.reader"], [11, "module-quapy.method"], [11, "module-quapy.method.aggregative"], [11, "module-quapy.method.base"], [11, "module-quapy.method.meta"], [11, "module-quapy.method.neural"], [11, "module-quapy.method.non_aggregative"]], "mrae() (in module quapy.error)": [[8, "quapy.error.mrae"]], "mse() (in module quapy.error)": [[8, "quapy.error.mse"]], "nkld() (in module quapy.error)": [[8, "quapy.error.nkld"]], "normalize_prevalence() (in module quapy.functional)": [[8, "quapy.functional.normalize_prevalence"]], "num_prevalence_combinations() (in module quapy.functional)": [[8, "quapy.functional.num_prevalence_combinations"]], "on_preclassified_instances() (quapy.protocol.onlabelledcollectionprotocol method)": [[8, "quapy.protocol.OnLabelledCollectionProtocol.on_preclassified_instances"]], "parallel() (in module quapy.util)": [[8, "quapy.util.parallel"]], "pickled_resource() (in module quapy.util)": [[8, "quapy.util.pickled_resource"]], "prediction() (in module quapy.evaluation)": [[8, "quapy.evaluation.prediction"]], "prevalence_from_labels() (in module quapy.functional)": [[8, "quapy.functional.prevalence_from_labels"]], "prevalence_from_probabilities() (in module quapy.functional)": [[8, "quapy.functional.prevalence_from_probabilities"]], "prevalence_grid() (quapy.protocol.app method)": [[8, "quapy.protocol.APP.prevalence_grid"]], "prevalence_linspace() (in module quapy.functional)": [[8, "quapy.functional.prevalence_linspace"]], "quantify() (quapy.model_selection.gridsearchq method)": [[8, "quapy.model_selection.GridSearchQ.quantify"]], "quapy": [[8, "module-quapy"]], "quapy.error": [[8, "module-quapy.error"]], "quapy.evaluation": [[8, "module-quapy.evaluation"]], "quapy.functional": [[8, "module-quapy.functional"]], "quapy.model_selection": [[8, "module-quapy.model_selection"]], "quapy.plot": [[8, "module-quapy.plot"]], "quapy.protocol": [[8, "module-quapy.protocol"]], "quapy.util": [[8, "module-quapy.util"]], "rae() (in module quapy.error)": [[8, "quapy.error.rae"]], "random_state (quapy.protocol.abstractstochasticseededprotocol property)": [[8, "quapy.protocol.AbstractStochasticSeededProtocol.random_state"]], "relative_absolute_error() (in module quapy.error)": [[8, "quapy.error.relative_absolute_error"]], "sample() (quapy.protocol.app method)": [[8, "quapy.protocol.APP.sample"]], "sample() (quapy.protocol.abstractstochasticseededprotocol method)": [[8, "quapy.protocol.AbstractStochasticSeededProtocol.sample"]], "sample() (quapy.protocol.domainmixer method)": [[8, "quapy.protocol.DomainMixer.sample"]], "sample() (quapy.protocol.npp method)": [[8, "quapy.protocol.NPP.sample"]], "sample() (quapy.protocol.usimplexpp method)": [[8, "quapy.protocol.USimplexPP.sample"]], "samples_parameters() (quapy.protocol.app method)": [[8, "quapy.protocol.APP.samples_parameters"]], "samples_parameters() (quapy.protocol.abstractstochasticseededprotocol method)": [[8, "quapy.protocol.AbstractStochasticSeededProtocol.samples_parameters"]], "samples_parameters() (quapy.protocol.domainmixer method)": [[8, "quapy.protocol.DomainMixer.samples_parameters"]], "samples_parameters() (quapy.protocol.npp method)": [[8, "quapy.protocol.NPP.samples_parameters"]], "samples_parameters() (quapy.protocol.usimplexpp method)": [[8, "quapy.protocol.USimplexPP.samples_parameters"]], "save_text_file() (in module quapy.util)": [[8, "quapy.util.save_text_file"]], "se() (in module quapy.error)": [[8, "quapy.error.se"]], "set_params() (quapy.model_selection.gridsearchq method)": [[8, "quapy.model_selection.GridSearchQ.set_params"]], "smooth() (in module quapy.error)": [[8, "quapy.error.smooth"]], "strprev() (in module quapy.functional)": [[8, "quapy.functional.strprev"]], "temp_seed() (in module quapy.util)": [[8, "quapy.util.temp_seed"]], "total() (quapy.protocol.app method)": [[8, "quapy.protocol.APP.total"]], "total() (quapy.protocol.abstractprotocol method)": [[8, "quapy.protocol.AbstractProtocol.total"]], "total() (quapy.protocol.domainmixer method)": [[8, "quapy.protocol.DomainMixer.total"]], "total() (quapy.protocol.npp method)": [[8, "quapy.protocol.NPP.total"]], "total() (quapy.protocol.usimplexpp method)": [[8, "quapy.protocol.USimplexPP.total"]], "uniform_prevalence_sampling() (in module quapy.functional)": [[8, "quapy.functional.uniform_prevalence_sampling"]], "uniform_simplex_sampling() (in module quapy.functional)": [[8, "quapy.functional.uniform_simplex_sampling"]], "bctscalibration (class in quapy.classification.calibration)": [[9, "quapy.classification.calibration.BCTSCalibration"]], "cnnnet (class in quapy.classification.neural)": [[9, "quapy.classification.neural.CNNnet"]], "lstmnet (class in quapy.classification.neural)": [[9, "quapy.classification.neural.LSTMnet"]], "lowranklogisticregression (class in quapy.classification.methods)": [[9, "quapy.classification.methods.LowRankLogisticRegression"]], "nbvscalibration (class in quapy.classification.calibration)": [[9, "quapy.classification.calibration.NBVSCalibration"]], "neuralclassifiertrainer (class in quapy.classification.neural)": [[9, "quapy.classification.neural.NeuralClassifierTrainer"]], "recalibratedprobabilisticclassifier (class in quapy.classification.calibration)": [[9, "quapy.classification.calibration.RecalibratedProbabilisticClassifier"]], "recalibratedprobabilisticclassifierbase (class in quapy.classification.calibration)": [[9, "quapy.classification.calibration.RecalibratedProbabilisticClassifierBase"]], "svmperf (class in quapy.classification.svmperf)": [[9, "quapy.classification.svmperf.SVMperf"]], "tscalibration (class in quapy.classification.calibration)": [[9, "quapy.classification.calibration.TSCalibration"]], "textclassifiernet (class in quapy.classification.neural)": [[9, "quapy.classification.neural.TextClassifierNet"]], "torchdataset (class in quapy.classification.neural)": [[9, "quapy.classification.neural.TorchDataset"]], "vscalibration (class in quapy.classification.calibration)": [[9, "quapy.classification.calibration.VSCalibration"]], "asdataloader() (quapy.classification.neural.torchdataset method)": [[9, "quapy.classification.neural.TorchDataset.asDataloader"]], "classes_ (quapy.classification.calibration.recalibratedprobabilisticclassifierbase property)": [[9, "quapy.classification.calibration.RecalibratedProbabilisticClassifierBase.classes_"]], "decision_function() (quapy.classification.svmperf.svmperf method)": [[9, "quapy.classification.svmperf.SVMperf.decision_function"]], "device (quapy.classification.neural.neuralclassifiertrainer property)": [[9, "quapy.classification.neural.NeuralClassifierTrainer.device"]], "dimensions() (quapy.classification.neural.textclassifiernet method)": [[9, "quapy.classification.neural.TextClassifierNet.dimensions"]], "document_embedding() (quapy.classification.neural.cnnnet method)": [[9, "quapy.classification.neural.CNNnet.document_embedding"]], "document_embedding() (quapy.classification.neural.lstmnet method)": [[9, "quapy.classification.neural.LSTMnet.document_embedding"]], "document_embedding() (quapy.classification.neural.textclassifiernet method)": [[9, "quapy.classification.neural.TextClassifierNet.document_embedding"]], "fit() (quapy.classification.calibration.recalibratedprobabilisticclassifierbase method)": [[9, "quapy.classification.calibration.RecalibratedProbabilisticClassifierBase.fit"]], "fit() (quapy.classification.methods.lowranklogisticregression method)": [[9, "quapy.classification.methods.LowRankLogisticRegression.fit"]], "fit() (quapy.classification.neural.neuralclassifiertrainer method)": [[9, "quapy.classification.neural.NeuralClassifierTrainer.fit"]], "fit() (quapy.classification.svmperf.svmperf method)": [[9, "quapy.classification.svmperf.SVMperf.fit"]], "fit_cv() (quapy.classification.calibration.recalibratedprobabilisticclassifierbase method)": [[9, "quapy.classification.calibration.RecalibratedProbabilisticClassifierBase.fit_cv"]], "fit_tr_val() (quapy.classification.calibration.recalibratedprobabilisticclassifierbase method)": [[9, "quapy.classification.calibration.RecalibratedProbabilisticClassifierBase.fit_tr_val"]], "forward() (quapy.classification.neural.textclassifiernet method)": [[9, "quapy.classification.neural.TextClassifierNet.forward"]], "get_params() (quapy.classification.methods.lowranklogisticregression method)": [[9, "quapy.classification.methods.LowRankLogisticRegression.get_params"]], "get_params() (quapy.classification.neural.cnnnet method)": [[9, "quapy.classification.neural.CNNnet.get_params"]], "get_params() (quapy.classification.neural.lstmnet method)": [[9, "quapy.classification.neural.LSTMnet.get_params"]], "get_params() (quapy.classification.neural.neuralclassifiertrainer method)": [[9, "quapy.classification.neural.NeuralClassifierTrainer.get_params"]], "get_params() (quapy.classification.neural.textclassifiernet method)": [[9, "quapy.classification.neural.TextClassifierNet.get_params"]], "predict() (quapy.classification.calibration.recalibratedprobabilisticclassifierbase method)": [[9, "quapy.classification.calibration.RecalibratedProbabilisticClassifierBase.predict"]], "predict() (quapy.classification.methods.lowranklogisticregression method)": [[9, "quapy.classification.methods.LowRankLogisticRegression.predict"]], "predict() (quapy.classification.neural.neuralclassifiertrainer method)": [[9, "quapy.classification.neural.NeuralClassifierTrainer.predict"]], "predict() (quapy.classification.svmperf.svmperf method)": [[9, "quapy.classification.svmperf.SVMperf.predict"]], "predict_proba() (quapy.classification.calibration.recalibratedprobabilisticclassifierbase method)": [[9, "quapy.classification.calibration.RecalibratedProbabilisticClassifierBase.predict_proba"]], "predict_proba() (quapy.classification.methods.lowranklogisticregression method)": [[9, "quapy.classification.methods.LowRankLogisticRegression.predict_proba"]], "predict_proba() (quapy.classification.neural.neuralclassifiertrainer method)": [[9, "quapy.classification.neural.NeuralClassifierTrainer.predict_proba"]], "predict_proba() (quapy.classification.neural.textclassifiernet method)": [[9, "quapy.classification.neural.TextClassifierNet.predict_proba"]], "quapy.classification": [[9, "module-quapy.classification"]], "quapy.classification.calibration": [[9, "module-quapy.classification.calibration"]], "quapy.classification.methods": [[9, "module-quapy.classification.methods"]], "quapy.classification.neural": [[9, "module-quapy.classification.neural"]], "quapy.classification.svmperf": [[9, "module-quapy.classification.svmperf"]], "reset_net_params() (quapy.classification.neural.neuralclassifiertrainer method)": [[9, "quapy.classification.neural.NeuralClassifierTrainer.reset_net_params"]], "set_params() (quapy.classification.methods.lowranklogisticregression method)": [[9, "quapy.classification.methods.LowRankLogisticRegression.set_params"]], "set_params() (quapy.classification.neural.neuralclassifiertrainer method)": [[9, "quapy.classification.neural.NeuralClassifierTrainer.set_params"]], "set_params() (quapy.classification.svmperf.svmperf method)": [[9, "quapy.classification.svmperf.SVMperf.set_params"]], "training (quapy.classification.neural.cnnnet attribute)": [[9, "quapy.classification.neural.CNNnet.training"]], "training (quapy.classification.neural.lstmnet attribute)": [[9, "quapy.classification.neural.LSTMnet.training"]], "training (quapy.classification.neural.textclassifiernet attribute)": [[9, "quapy.classification.neural.TextClassifierNet.training"]], "transform() (quapy.classification.methods.lowranklogisticregression method)": [[9, "quapy.classification.methods.LowRankLogisticRegression.transform"]], "transform() (quapy.classification.neural.neuralclassifiertrainer method)": [[9, "quapy.classification.neural.NeuralClassifierTrainer.transform"]], "valid_losses (quapy.classification.svmperf.svmperf attribute)": [[9, "quapy.classification.svmperf.SVMperf.valid_losses"]], "vocabulary_size (quapy.classification.neural.cnnnet property)": [[9, "quapy.classification.neural.CNNnet.vocabulary_size"]], "vocabulary_size (quapy.classification.neural.lstmnet property)": [[9, "quapy.classification.neural.LSTMnet.vocabulary_size"]], "vocabulary_size (quapy.classification.neural.textclassifiernet property)": [[9, "quapy.classification.neural.TextClassifierNet.vocabulary_size"]], "xavier_uniform() (quapy.classification.neural.textclassifiernet method)": [[9, "quapy.classification.neural.TextClassifierNet.xavier_uniform"]], "dataset (class in quapy.data.base)": [[10, "quapy.data.base.Dataset"]], "indextransformer (class in quapy.data.preprocessing)": [[10, "quapy.data.preprocessing.IndexTransformer"]], "labelledcollection (class in quapy.data.base)": [[10, "quapy.data.base.LabelledCollection"]], "splitstratified() (quapy.data.base.dataset class method)": [[10, "quapy.data.base.Dataset.SplitStratified"]], "x (quapy.data.base.labelledcollection property)": [[10, "quapy.data.base.LabelledCollection.X"]], "xp (quapy.data.base.labelledcollection property)": [[10, "quapy.data.base.LabelledCollection.Xp"]], "xy (quapy.data.base.labelledcollection property)": [[10, "quapy.data.base.LabelledCollection.Xy"]], "add_word() (quapy.data.preprocessing.indextransformer method)": [[10, "quapy.data.preprocessing.IndexTransformer.add_word"]], "binarize() (in module quapy.data.reader)": [[10, "quapy.data.reader.binarize"]], "binary (quapy.data.base.dataset property)": [[10, "quapy.data.base.Dataset.binary"]], "binary (quapy.data.base.labelledcollection property)": [[10, "quapy.data.base.LabelledCollection.binary"]], "classes_ (quapy.data.base.dataset property)": [[10, "quapy.data.base.Dataset.classes_"]], "counts() (quapy.data.base.labelledcollection method)": [[10, "quapy.data.base.LabelledCollection.counts"]], "fetch_ucidataset() (in module quapy.data.datasets)": [[10, "quapy.data.datasets.fetch_UCIDataset"]], "fetch_ucilabelledcollection() (in module quapy.data.datasets)": [[10, "quapy.data.datasets.fetch_UCILabelledCollection"]], "fetch_lequa2022() (in module quapy.data.datasets)": [[10, "quapy.data.datasets.fetch_lequa2022"]], "fetch_reviews() (in module quapy.data.datasets)": [[10, "quapy.data.datasets.fetch_reviews"]], "fetch_twitter() (in module quapy.data.datasets)": [[10, "quapy.data.datasets.fetch_twitter"]], "fit() (quapy.data.preprocessing.indextransformer method)": [[10, "quapy.data.preprocessing.IndexTransformer.fit"]], "fit_transform() (quapy.data.preprocessing.indextransformer method)": [[10, "quapy.data.preprocessing.IndexTransformer.fit_transform"]], "from_csv() (in module quapy.data.reader)": [[10, "quapy.data.reader.from_csv"]], "from_sparse() (in module quapy.data.reader)": [[10, "quapy.data.reader.from_sparse"]], "from_text() (in module quapy.data.reader)": [[10, "quapy.data.reader.from_text"]], "index() (in module quapy.data.preprocessing)": [[10, "quapy.data.preprocessing.index"]], "kfcv() (quapy.data.base.dataset class method)": [[10, "quapy.data.base.Dataset.kFCV"]], "kfcv() (quapy.data.base.labelledcollection method)": [[10, "quapy.data.base.LabelledCollection.kFCV"]], "load() (quapy.data.base.dataset class method)": [[10, "quapy.data.base.Dataset.load"]], "load() (quapy.data.base.labelledcollection class method)": [[10, "quapy.data.base.LabelledCollection.load"]], "mix() (quapy.data.base.labelledcollection class method)": [[10, "quapy.data.base.LabelledCollection.mix"]], "n_classes (quapy.data.base.dataset property)": [[10, "quapy.data.base.Dataset.n_classes"]], "n_classes (quapy.data.base.labelledcollection property)": [[10, "quapy.data.base.LabelledCollection.n_classes"]], "p (quapy.data.base.labelledcollection property)": [[10, "quapy.data.base.LabelledCollection.p"]], "prevalence() (quapy.data.base.labelledcollection method)": [[10, "quapy.data.base.LabelledCollection.prevalence"]], "quapy.data": [[10, "module-quapy.data"]], "quapy.data.base": [[10, "module-quapy.data.base"]], "quapy.data.datasets": [[10, "module-quapy.data.datasets"]], "quapy.data.preprocessing": [[10, "module-quapy.data.preprocessing"]], "quapy.data.reader": [[10, "module-quapy.data.reader"]], "reduce_columns() (in module quapy.data.preprocessing)": [[10, "quapy.data.preprocessing.reduce_columns"]], "reindex_labels() (in module quapy.data.reader)": [[10, "quapy.data.reader.reindex_labels"]], "sampling() (quapy.data.base.labelledcollection method)": [[10, "quapy.data.base.LabelledCollection.sampling"]], "sampling_from_index() (quapy.data.base.labelledcollection method)": [[10, "quapy.data.base.LabelledCollection.sampling_from_index"]], "sampling_index() (quapy.data.base.labelledcollection method)": [[10, "quapy.data.base.LabelledCollection.sampling_index"]], "split_random() (quapy.data.base.labelledcollection method)": [[10, "quapy.data.base.LabelledCollection.split_random"]], "split_stratified() (quapy.data.base.labelledcollection method)": [[10, "quapy.data.base.LabelledCollection.split_stratified"]], "standardize() (in module quapy.data.preprocessing)": [[10, "quapy.data.preprocessing.standardize"]], "stats() (quapy.data.base.dataset method)": [[10, "quapy.data.base.Dataset.stats"]], "stats() (quapy.data.base.labelledcollection method)": [[10, "quapy.data.base.LabelledCollection.stats"]], "text2tfidf() (in module quapy.data.preprocessing)": [[10, "quapy.data.preprocessing.text2tfidf"]], "train_test (quapy.data.base.dataset property)": [[10, "quapy.data.base.Dataset.train_test"]], "transform() (quapy.data.preprocessing.indextransformer method)": [[10, "quapy.data.preprocessing.IndexTransformer.transform"]], "uniform_sampling() (quapy.data.base.labelledcollection method)": [[10, "quapy.data.base.LabelledCollection.uniform_sampling"]], "uniform_sampling_index() (quapy.data.base.labelledcollection method)": [[10, "quapy.data.base.LabelledCollection.uniform_sampling_index"]], "vocabulary_size (quapy.data.base.dataset property)": [[10, "quapy.data.base.Dataset.vocabulary_size"]], "vocabulary_size() (quapy.data.preprocessing.indextransformer method)": [[10, "quapy.data.preprocessing.IndexTransformer.vocabulary_size"]], "warn() (in module quapy.data.datasets)": [[10, "quapy.data.datasets.warn"]], "y (quapy.data.base.labelledcollection property)": [[10, "quapy.data.base.LabelledCollection.y"]], "acc (class in quapy.method.aggregative)": [[11, "quapy.method.aggregative.ACC"]], "adjustedclassifyandcount (in module quapy.method.aggregative)": [[11, "quapy.method.aggregative.AdjustedClassifyAndCount"]], "aggregativeprobabilisticquantifier (class in quapy.method.aggregative)": [[11, "quapy.method.aggregative.AggregativeProbabilisticQuantifier"]], "aggregativequantifier (class in quapy.method.aggregative)": [[11, "quapy.method.aggregative.AggregativeQuantifier"]], "basequantifier (class in quapy.method.base)": [[11, "quapy.method.base.BaseQuantifier"]], "binaryquantifier (class in quapy.method.base)": [[11, "quapy.method.base.BinaryQuantifier"]], "cc (class in quapy.method.aggregative)": [[11, "quapy.method.aggregative.CC"]], "classifyandcount (in module quapy.method.aggregative)": [[11, "quapy.method.aggregative.ClassifyAndCount"]], "distributionmatching (class in quapy.method.aggregative)": [[11, "quapy.method.aggregative.DistributionMatching"]], "dys (class in quapy.method.aggregative)": [[11, "quapy.method.aggregative.DyS"]], "eacc() (in module quapy.method.meta)": [[11, "quapy.method.meta.EACC"]], "ecc() (in module quapy.method.meta)": [[11, "quapy.method.meta.ECC"]], "eemq() (in module quapy.method.meta)": [[11, "quapy.method.meta.EEMQ"]], "ehdy() (in module quapy.method.meta)": [[11, "quapy.method.meta.EHDy"]], "elm (class in quapy.method.aggregative)": [[11, "quapy.method.aggregative.ELM"]], "em() (quapy.method.aggregative.emq class method)": [[11, "quapy.method.aggregative.EMQ.EM"]], "emq (class in quapy.method.aggregative)": [[11, "quapy.method.aggregative.EMQ"]], "epacc() (in module quapy.method.meta)": [[11, "quapy.method.meta.EPACC"]], "epsilon (quapy.method.aggregative.emq attribute)": [[11, "quapy.method.aggregative.EMQ.EPSILON"]], "ensemble (class in quapy.method.meta)": [[11, "quapy.method.meta.Ensemble"]], "expectationmaximizationquantifier (in module quapy.method.aggregative)": [[11, "quapy.method.aggregative.ExpectationMaximizationQuantifier"]], "explicitlossminimisation (in module quapy.method.aggregative)": [[11, "quapy.method.aggregative.ExplicitLossMinimisation"]], "hdy (class in quapy.method.aggregative)": [[11, "quapy.method.aggregative.HDy"]], "hellingerdistancey (in module quapy.method.aggregative)": [[11, "quapy.method.aggregative.HellingerDistanceY"]], "max (class in quapy.method.aggregative)": [[11, "quapy.method.aggregative.MAX"]], "max_iter (quapy.method.aggregative.emq attribute)": [[11, "quapy.method.aggregative.EMQ.MAX_ITER"]], "ms (class in quapy.method.aggregative)": [[11, "quapy.method.aggregative.MS"]], "ms2 (class in quapy.method.aggregative)": [[11, "quapy.method.aggregative.MS2"]], "maximumlikelihoodprevalenceestimation (class in quapy.method.non_aggregative)": [[11, "quapy.method.non_aggregative.MaximumLikelihoodPrevalenceEstimation"]], "mediansweep (in module quapy.method.aggregative)": [[11, "quapy.method.aggregative.MedianSweep"]], "mediansweep2 (in module quapy.method.aggregative)": [[11, "quapy.method.aggregative.MedianSweep2"]], "onevsall (class in quapy.method.base)": [[11, "quapy.method.base.OneVsAll"]], "onevsallaggregative (class in quapy.method.aggregative)": [[11, "quapy.method.aggregative.OneVsAllAggregative"]], "onevsallgeneric (class in quapy.method.base)": [[11, "quapy.method.base.OneVsAllGeneric"]], "pacc (class in quapy.method.aggregative)": [[11, "quapy.method.aggregative.PACC"]], "pcc (class in quapy.method.aggregative)": [[11, "quapy.method.aggregative.PCC"]], "probabilisticadjustedclassifyandcount (in module quapy.method.aggregative)": [[11, "quapy.method.aggregative.ProbabilisticAdjustedClassifyAndCount"]], "probabilisticclassifyandcount (in module quapy.method.aggregative)": [[11, "quapy.method.aggregative.ProbabilisticClassifyAndCount"]], "quanetmodule (class in quapy.method.neural)": [[11, "quapy.method.neural.QuaNetModule"]], "quanettrainer (class in quapy.method.neural)": [[11, "quapy.method.neural.QuaNetTrainer"]], "sld (in module quapy.method.aggregative)": [[11, "quapy.method.aggregative.SLD"]], "smm (class in quapy.method.aggregative)": [[11, "quapy.method.aggregative.SMM"]], "svmae (class in quapy.method.aggregative)": [[11, "quapy.method.aggregative.SVMAE"]], "svmkld (class in quapy.method.aggregative)": [[11, "quapy.method.aggregative.SVMKLD"]], "svmnkld (class in quapy.method.aggregative)": [[11, "quapy.method.aggregative.SVMNKLD"]], "svmq (class in quapy.method.aggregative)": [[11, "quapy.method.aggregative.SVMQ"]], "svmrae (class in quapy.method.aggregative)": [[11, "quapy.method.aggregative.SVMRAE"]], "t50 (class in quapy.method.aggregative)": [[11, "quapy.method.aggregative.T50"]], "thresholdoptimization (class in quapy.method.aggregative)": [[11, "quapy.method.aggregative.ThresholdOptimization"]], "valid_policies (quapy.method.meta.ensemble attribute)": [[11, "quapy.method.meta.Ensemble.VALID_POLICIES"]], "x (class in quapy.method.aggregative)": [[11, "quapy.method.aggregative.X"]], "aggregate() (quapy.method.aggregative.acc method)": [[11, "quapy.method.aggregative.ACC.aggregate"]], "aggregate() (quapy.method.aggregative.aggregativequantifier method)": [[11, "quapy.method.aggregative.AggregativeQuantifier.aggregate"]], "aggregate() (quapy.method.aggregative.cc method)": [[11, "quapy.method.aggregative.CC.aggregate"]], "aggregate() (quapy.method.aggregative.distributionmatching method)": [[11, "quapy.method.aggregative.DistributionMatching.aggregate"]], "aggregate() (quapy.method.aggregative.dys method)": [[11, "quapy.method.aggregative.DyS.aggregate"]], "aggregate() (quapy.method.aggregative.elm method)": [[11, "quapy.method.aggregative.ELM.aggregate"]], "aggregate() (quapy.method.aggregative.emq method)": [[11, "quapy.method.aggregative.EMQ.aggregate"]], "aggregate() (quapy.method.aggregative.hdy method)": [[11, "quapy.method.aggregative.HDy.aggregate"]], "aggregate() (quapy.method.aggregative.onevsallaggregative method)": [[11, "quapy.method.aggregative.OneVsAllAggregative.aggregate"]], "aggregate() (quapy.method.aggregative.pacc method)": [[11, "quapy.method.aggregative.PACC.aggregate"]], "aggregate() (quapy.method.aggregative.pcc method)": [[11, "quapy.method.aggregative.PCC.aggregate"]], "aggregate() (quapy.method.aggregative.smm method)": [[11, "quapy.method.aggregative.SMM.aggregate"]], "aggregate() (quapy.method.aggregative.thresholdoptimization method)": [[11, "quapy.method.aggregative.ThresholdOptimization.aggregate"]], "aggregative (quapy.method.meta.ensemble property)": [[11, "quapy.method.meta.Ensemble.aggregative"]], "classes_ (quapy.method.aggregative.aggregativequantifier property)": [[11, "quapy.method.aggregative.AggregativeQuantifier.classes_"]], "classes_ (quapy.method.base.onevsallgeneric property)": [[11, "quapy.method.base.OneVsAllGeneric.classes_"]], "classes_ (quapy.method.neural.quanettrainer property)": [[11, "quapy.method.neural.QuaNetTrainer.classes_"]], "classifier (quapy.method.aggregative.aggregativequantifier property)": [[11, "quapy.method.aggregative.AggregativeQuantifier.classifier"]], "classify() (quapy.method.aggregative.acc method)": [[11, "quapy.method.aggregative.ACC.classify"]], "classify() (quapy.method.aggregative.aggregativeprobabilisticquantifier method)": [[11, "quapy.method.aggregative.AggregativeProbabilisticQuantifier.classify"]], "classify() (quapy.method.aggregative.aggregativequantifier method)": [[11, "quapy.method.aggregative.AggregativeQuantifier.classify"]], "classify() (quapy.method.aggregative.elm method)": [[11, "quapy.method.aggregative.ELM.classify"]], "classify() (quapy.method.aggregative.onevsallaggregative method)": [[11, "quapy.method.aggregative.OneVsAllAggregative.classify"]], "classify() (quapy.method.aggregative.pacc method)": [[11, "quapy.method.aggregative.PACC.classify"]], "clean_checkpoint() (quapy.method.neural.quanettrainer method)": [[11, "quapy.method.neural.QuaNetTrainer.clean_checkpoint"]], "clean_checkpoint_dir() (quapy.method.neural.quanettrainer method)": [[11, "quapy.method.neural.QuaNetTrainer.clean_checkpoint_dir"]], "cross_generate_predictions() (in module quapy.method.aggregative)": [[11, "quapy.method.aggregative.cross_generate_predictions"]], "cross_generate_predictions_depr() (in module quapy.method.aggregative)": [[11, "quapy.method.aggregative.cross_generate_predictions_depr"]], "device (quapy.method.neural.quanetmodule property)": [[11, "quapy.method.neural.QuaNetModule.device"]], "ensemblefactory() (in module quapy.method.meta)": [[11, "quapy.method.meta.ensembleFactory"]], "fit() (quapy.method.aggregative.acc method)": [[11, "quapy.method.aggregative.ACC.fit"]], "fit() (quapy.method.aggregative.aggregativequantifier method)": [[11, "quapy.method.aggregative.AggregativeQuantifier.fit"]], "fit() (quapy.method.aggregative.cc method)": [[11, "quapy.method.aggregative.CC.fit"]], "fit() (quapy.method.aggregative.distributionmatching method)": [[11, "quapy.method.aggregative.DistributionMatching.fit"]], "fit() (quapy.method.aggregative.dys method)": [[11, "quapy.method.aggregative.DyS.fit"]], "fit() (quapy.method.aggregative.elm method)": [[11, "quapy.method.aggregative.ELM.fit"]], "fit() (quapy.method.aggregative.emq method)": [[11, "quapy.method.aggregative.EMQ.fit"]], "fit() (quapy.method.aggregative.hdy method)": [[11, "quapy.method.aggregative.HDy.fit"]], "fit() (quapy.method.aggregative.pacc method)": [[11, "quapy.method.aggregative.PACC.fit"]], "fit() (quapy.method.aggregative.pcc method)": [[11, "quapy.method.aggregative.PCC.fit"]], "fit() (quapy.method.aggregative.smm method)": [[11, "quapy.method.aggregative.SMM.fit"]], "fit() (quapy.method.aggregative.thresholdoptimization method)": [[11, "quapy.method.aggregative.ThresholdOptimization.fit"]], "fit() (quapy.method.base.basequantifier method)": [[11, "quapy.method.base.BaseQuantifier.fit"]], "fit() (quapy.method.base.onevsallgeneric method)": [[11, "quapy.method.base.OneVsAllGeneric.fit"]], "fit() (quapy.method.meta.ensemble method)": [[11, "quapy.method.meta.Ensemble.fit"]], "fit() (quapy.method.neural.quanettrainer method)": [[11, "quapy.method.neural.QuaNetTrainer.fit"]], "fit() (quapy.method.non_aggregative.maximumlikelihoodprevalenceestimation method)": [[11, "quapy.method.non_aggregative.MaximumLikelihoodPrevalenceEstimation.fit"]], "forward() (quapy.method.neural.quanetmodule method)": [[11, "quapy.method.neural.QuaNetModule.forward"]], "getonevsall() (in module quapy.method.base)": [[11, "quapy.method.base.getOneVsAll"]], "getptecondestim() (quapy.method.aggregative.acc class method)": [[11, "quapy.method.aggregative.ACC.getPteCondEstim"]], "getptecondestim() (quapy.method.aggregative.pacc class method)": [[11, "quapy.method.aggregative.PACC.getPteCondEstim"]], "get_params() (quapy.method.meta.ensemble method)": [[11, "quapy.method.meta.Ensemble.get_params"]], "get_params() (quapy.method.neural.quanettrainer method)": [[11, "quapy.method.neural.QuaNetTrainer.get_params"]], "get_probability_distribution() (in module quapy.method.meta)": [[11, "quapy.method.meta.get_probability_distribution"]], "mae_loss() (in module quapy.method.neural)": [[11, "quapy.method.neural.mae_loss"]], "predict_proba() (quapy.method.aggregative.emq method)": [[11, "quapy.method.aggregative.EMQ.predict_proba"]], "probabilistic (quapy.method.meta.ensemble property)": [[11, "quapy.method.meta.Ensemble.probabilistic"]], "quantify() (quapy.method.aggregative.aggregativequantifier method)": [[11, "quapy.method.aggregative.AggregativeQuantifier.quantify"]], "quantify() (quapy.method.base.basequantifier method)": [[11, "quapy.method.base.BaseQuantifier.quantify"]], "quantify() (quapy.method.base.onevsallgeneric method)": [[11, "quapy.method.base.OneVsAllGeneric.quantify"]], "quantify() (quapy.method.meta.ensemble method)": [[11, "quapy.method.meta.Ensemble.quantify"]], "quantify() (quapy.method.neural.quanettrainer method)": [[11, "quapy.method.neural.QuaNetTrainer.quantify"]], "quantify() (quapy.method.non_aggregative.maximumlikelihoodprevalenceestimation method)": [[11, "quapy.method.non_aggregative.MaximumLikelihoodPrevalenceEstimation.quantify"]], "quapy.method": [[11, "module-quapy.method"]], "quapy.method.aggregative": [[11, "module-quapy.method.aggregative"]], "quapy.method.base": [[11, "module-quapy.method.base"]], "quapy.method.meta": [[11, "module-quapy.method.meta"]], "quapy.method.neural": [[11, "module-quapy.method.neural"]], "quapy.method.non_aggregative": [[11, "module-quapy.method.non_aggregative"]], "set_params() (quapy.method.meta.ensemble method)": [[11, "quapy.method.meta.Ensemble.set_params"]], "set_params() (quapy.method.neural.quanettrainer method)": [[11, "quapy.method.neural.QuaNetTrainer.set_params"]], "solve_adjustment() (quapy.method.aggregative.acc class method)": [[11, "quapy.method.aggregative.ACC.solve_adjustment"]], "training (quapy.method.neural.quanetmodule attribute)": [[11, "quapy.method.neural.QuaNetModule.training"]]}}) \ No newline at end of file +Search.setIndex({"docnames": ["Datasets", "Evaluation", "Installation", "Methods", "Model-Selection", "Plotting", "index", "modules", "quapy", "quapy.classification", "quapy.data", "quapy.method"], "filenames": ["Datasets.md", "Evaluation.md", "Installation.rst", "Methods.md", "Model-Selection.md", "Plotting.md", "index.rst", "modules.rst", "quapy.rst", "quapy.classification.rst", "quapy.data.rst", "quapy.method.rst"], "titles": ["Datasets", "Evaluation", "Installation", "Quantification Methods", "Model Selection", "Plotting", "Welcome to QuaPy\u2019s documentation!", "quapy", "quapy package", "quapy.classification package", "quapy.data package", "quapy.method package"], "terms": {"quapi": [0, 1, 2, 3, 4, 5], "make": [0, 1, 3, 8, 11], "avail": [0, 1, 2, 3, 5, 6, 9, 11], "sever": [0, 10], "have": [0, 1, 2, 3, 4, 5, 8, 10, 11], "been": [0, 3, 4, 5, 8, 9, 10, 11], "us": [0, 1, 3, 4, 5, 6, 8, 9, 10, 11], "quantif": [0, 1, 6, 8, 9, 10, 11], "literatur": [0, 1, 4, 6], "well": [0, 3, 4, 5, 11], "an": [0, 1, 2, 3, 4, 5, 6, 8, 9, 10, 11], "interfac": [0, 1, 11], "allow": [0, 1, 2, 3, 5, 8, 9, 10, 11], "anyon": 0, "import": [0, 1, 3, 4, 5, 6, 10, 11], "A": [0, 3, 8, 9, 10, 11], "object": [0, 8, 9, 10, 11], "i": [0, 1, 2, 3, 4, 5, 6, 8, 9, 10, 11], "roughli": 0, "pair": [0, 8], "labelledcollect": [0, 3, 4, 8, 10, 11], "one": [0, 1, 3, 4, 5, 8, 10, 11], "plai": 0, "role": 0, "train": [0, 1, 3, 4, 5, 6, 8, 9, 10, 11], "set": [0, 1, 3, 4, 5, 6, 8, 9, 10, 11], "anoth": [0, 1, 3, 5, 8, 10], "test": [0, 1, 3, 4, 5, 6, 8, 9, 10, 11], "class": [0, 1, 3, 4, 5, 6, 8, 9, 10, 11], "consist": [0, 4, 5, 8, 9, 10, 11], "iter": [0, 8, 11], "instanc": [0, 3, 4, 5, 6, 8, 9, 10, 11], "label": [0, 3, 4, 5, 6, 8, 9, 10, 11], "thi": [0, 1, 2, 3, 4, 5, 6, 8, 9, 10, 11], "handl": 0, "most": [0, 3, 5, 6, 8, 10, 11], "sampl": [0, 1, 3, 4, 5, 6, 8, 9, 10, 11], "function": [0, 1, 3, 4, 5, 6, 7, 9, 10, 11], "take": [0, 3, 5, 8, 10, 11], "look": [0, 1, 3, 5, 11], "follow": [0, 1, 3, 4, 5, 6, 8, 11], "code": [0, 3, 4, 5, 9], "qp": [0, 1, 3, 4, 5, 6, 8, 10, 11], "f": [0, 1, 3, 4, 5, 6, 10], "1st": 0, "posit": [0, 3, 5, 8, 10, 11], "document": [0, 1, 3, 5, 9, 10, 11], "2nd": 0, "onli": [0, 3, 5, 8, 9, 10, 11], "neg": [0, 5, 8, 11], "neutral": 0, "3rd": 0, "2": [0, 1, 3, 5, 8, 10, 11], "0": [0, 1, 3, 4, 5, 8, 9, 10, 11], "1": [0, 1, 3, 4, 5, 8, 9, 10, 11], "print": [0, 1, 3, 4, 6, 9, 10], "strprev": [0, 1, 8], "preval": [0, 1, 3, 4, 5, 6, 8, 9, 10, 11], "prec": [0, 8], "output": [0, 1, 3, 4, 8, 9, 10, 11], "show": [0, 1, 3, 4, 5, 8, 9, 10, 11], "digit": 0, "precis": [0, 1, 8], "17": 0, "50": [0, 5, 8, 11], "33": [0, 5, 8], "One": [0, 1, 3, 11], "can": [0, 1, 2, 3, 4, 5, 8, 10, 11], "easili": [0, 2, 5, 9], "produc": [0, 1, 5, 8], "new": [0, 3, 8, 9, 10], "desir": [0, 1, 10], "sample_s": [0, 1, 3, 4, 5, 8, 11], "10": [0, 1, 4, 5, 8, 9, 11], "prev": [0, 1, 8, 10], "4": [0, 1, 3, 4, 5, 10, 11], "5": [0, 1, 3, 4, 5, 8, 9, 10, 11], "which": [0, 1, 3, 4, 5, 8, 9, 10, 11], "40": [0, 3, 4, 11], "made": [0, 2, 8, 10, 11], "across": [0, 1, 4, 5, 6, 8, 11], "differ": [0, 1, 3, 4, 5, 6, 8, 10, 11], "run": [0, 1, 2, 3, 4, 5, 8, 10, 11], "e": [0, 1, 3, 4, 5, 6, 8, 9, 10, 11], "g": [0, 1, 3, 4, 6, 8, 10, 11], "method": [0, 1, 4, 5, 6, 8], "same": [0, 3, 5, 8, 10, 11], "exact": [0, 10], "retain": [0, 3, 9, 11], "index": [0, 3, 6, 8, 9, 10, 11], "gener": [0, 1, 3, 4, 5, 8, 9, 10, 11], "sampling_index": [0, 10], "sampling_from_index": [0, 10], "also": [0, 1, 2, 3, 5, 6, 8, 9], "implement": [0, 1, 3, 4, 5, 6, 8, 9, 10, 11], "artifici": [0, 1, 3, 4, 5, 6, 8], "protocol": [0, 3, 4, 5, 6, 7, 10, 11], "via": [0, 2, 3, 8, 9, 11], "python": [0, 6], "": [0, 1, 3, 4, 5, 8, 9, 10, 11], "seri": [0, 10], "equidist": [0, 8], "rang": [0, 5, 8, 11], "entir": [0, 3, 4, 5, 8], "spectrum": [0, 1, 4, 5, 8], "simplex": [0, 8], "space": [0, 4, 8, 9], "artificial_sampling_gener": 0, "100": [0, 1, 3, 4, 5, 8, 9, 10, 11], "n_preval": [0, 8], "each": [0, 1, 3, 4, 5, 8, 9, 10, 11], "valid": [0, 1, 3, 4, 5, 8, 9, 10, 11], "combin": [0, 1, 4, 8, 11], "origin": [0, 3, 8, 10], "from": [0, 1, 3, 4, 5, 6, 8, 9, 10, 11], "split": [0, 3, 4, 5, 8, 9, 10, 11], "point": [0, 1, 3, 8, 10], "25": [0, 5, 8, 9, 11], "75": [0, 5, 8], "00": [0, 1, 4], "see": [0, 1, 2, 3, 4, 5, 6, 8, 9, 10, 11], "evalu": [0, 3, 4, 5, 6, 7, 9, 10, 11], "wiki": [0, 3], "further": [0, 1, 3, 9, 10, 11], "detail": [0, 1, 3, 6, 9, 10, 11], "how": [0, 1, 3, 4, 5, 8, 10, 11], "properli": [0, 11], "three": [0, 5], "about": [0, 5, 8, 10], "kindl": [0, 1, 3, 5, 10, 11], "devic": [0, 3, 5, 9, 11], "harri": 0, "potter": 0, "known": [0, 3, 4, 8, 11], "imdb": [0, 5, 10], "movi": 0, "fetch": [0, 6], "unifi": [0, 11], "For": [0, 1, 5, 6, 8, 10], "exampl": [0, 1, 3, 4, 5, 8, 9, 10, 11], "fetch_review": [0, 1, 3, 4, 5, 10, 11], "These": [0, 9], "esuli": [0, 2, 3, 9, 10, 11], "moreo": [0, 3, 4, 10, 11], "sebastiani": [0, 3, 4, 10, 11], "2018": [0, 3, 10], "octob": [0, 3], "recurr": [0, 3, 10], "neural": [0, 8, 10], "network": [0, 8, 9, 10, 11], "In": [0, 1, 2, 3, 4, 5, 6, 8, 9, 10, 11], "proceed": [0, 3, 10], "27th": [0, 3, 10], "acm": [0, 3, 10, 11], "intern": [0, 3, 9, 10], "confer": [0, 3, 9, 10], "inform": [0, 1, 3, 4, 8, 9, 10, 11], "knowledg": [0, 3, 10], "manag": [0, 3, 10], "pp": [0, 3, 9], "1775": [0, 3], "1778": [0, 3], "The": [0, 1, 2, 4, 5, 6, 8, 9, 10, 11], "list": [0, 5, 8, 9, 10, 11], "id": [0, 3, 10], "reviews_sentiment_dataset": [0, 10], "some": [0, 1, 3, 5, 8, 10, 11], "statist": [0, 1, 8, 11], "fhe": 0, "ar": [0, 1, 3, 4, 5, 8, 9, 10, 11], "summar": 0, "below": [0, 2, 3, 5, 8, 10], "size": [0, 1, 3, 8, 9, 10, 11], "type": [0, 3, 8, 10, 11], "hp": [0, 3, 4, 10], "9533": 0, "18399": 0, "018": 0, "982": 0, "065": 0, "935": 0, "text": [0, 3, 8, 9, 10, 11], "3821": [0, 10], "21591": [0, 10], "081": [0, 10], "919": [0, 10], "063": [0, 10], "937": [0, 10], "25000": 0, "500": [0, 1, 4, 5, 11], "11": [0, 1, 6, 8], "analysi": [0, 3, 6, 10], "access": [0, 3, 10, 11], "were": 0, "tf": [0, 10], "idf": 0, "format": [0, 5, 10, 11], "present": [0, 3, 10], "two": [0, 1, 3, 4, 5, 8, 10, 11], "val": [0, 9, 10], "model": [0, 1, 5, 6, 8, 9, 11], "select": [0, 3, 6, 8, 10, 11], "purpos": [0, 11], "exemplifi": 0, "load": [0, 3, 8, 10, 11], "fetch_twitt": [0, 3, 6, 10], "gasp": [0, 10], "for_model_select": [0, 10], "true": [0, 1, 3, 4, 5, 6, 8, 9, 10, 11], "gao": [0, 3, 10, 11], "w": [0, 3, 10], "2015": [0, 2, 3, 9, 11], "august": 0, "tweet": [0, 3, 10], "classif": [0, 1, 3, 6, 8, 10, 11], "ieee": 0, "advanc": [0, 6, 8], "social": [0, 3, 10], "mine": [0, 3], "asonam": 0, "97": 0, "104": 0, "semeval13": [0, 10], "semeval14": [0, 10], "semeval15": [0, 10], "share": [0, 10], "semev": 0, "mean": [0, 1, 3, 4, 5, 6, 8, 9, 10, 11], "would": [0, 1, 3, 5, 6, 10, 11], "get": [0, 1, 5, 8, 9, 10, 11], "when": [0, 1, 3, 4, 5, 8, 9, 10], "request": [0, 8, 10, 11], "ani": [0, 1, 3, 4, 5, 6, 8, 9, 10, 11], "them": [0, 3, 10, 11], "consult": [0, 1], "twitter_sentiment_datasets_test": [0, 10], "9": [0, 1, 3, 5, 8], "replac": [0, 3, 8, 10], "twitter_sentiment_datasets_train": [0, 10], "found": [0, 3, 4, 8, 9, 10], "featur": [0, 10], "3": [0, 1, 3, 5, 6, 8, 9, 10, 11], "8788": 0, "3765": 0, "694582": 0, "421": 0, "496": 0, "082": [0, 1], "407": 0, "507": 0, "086": 0, "spars": [0, 10], "hcr": [0, 3, 10], "1594": 0, "798": 0, "222046": 0, "546": 0, "211": 0, "243": 0, "640": 0, "167": 0, "193": 0, "omd": [0, 10], "1839": 0, "787": 0, "199151": 0, "463": 0, "271": 0, "266": 0, "437": 0, "283": [0, 1], "280": 0, "sander": [0, 10], "2155": 0, "923": 0, "229399": 0, "161": 0, "691": 0, "148": 0, "164": [0, 3], "688": 0, "11338": 0, "3813": 0, "1215742": 0, "159": 0, "470": 0, "372": 0, "158": 0, "430": 0, "412": 0, "1853": 0, "109": 0, "361": 0, "530": 0, "2390": 0, "153": 0, "413": 0, "434": 0, "semeval16": [0, 6, 10], "8000": 0, "2000": 0, "889504": 0, "157": 0, "351": 0, "492": 0, "163": [0, 1], "341": 0, "497": 0, "sst": [0, 10], "2971": 0, "1271": 0, "376132": 0, "261": 0, "452": 0, "288": 0, "207": 0, "481": 0, "312": 0, "wa": [0, 3, 5, 8, 10, 11], "2184": 0, "936": 0, "248563": 0, "305": 0, "414": 0, "281": 0, "282": 0, "446": 0, "272": 0, "wb": [0, 10], "4259": 0, "1823": 0, "404333": 0, "270": 0, "392": 0, "337": 0, "274": 0, "335": 0, "32": [0, 6], "repositori": [0, 10], "p\u00e9rez": [0, 3, 10, 11], "g\u00e1llego": [0, 3, 10, 11], "p": [0, 3, 8, 9, 10, 11], "quevedo": [0, 3, 10], "j": [0, 3, 10, 11], "r": [0, 3, 8, 10], "del": [0, 3, 10], "coz": [0, 3, 10], "2017": [0, 3, 10, 11], "ensembl": [0, 6, 10, 11], "problem": [0, 3, 5, 8, 10, 11], "characteriz": [0, 3, 10], "chang": [0, 1, 3, 10], "distribut": [0, 3, 5, 8, 10, 11], "case": [0, 1, 3, 4, 5, 8, 9, 10, 11], "studi": [0, 3, 10], "fusion": [0, 3, 10], "34": [0, 3, 10, 11], "87": [0, 3, 10], "doe": [0, 2, 3, 8, 11], "exactli": 0, "coincid": [0, 6], "et": [0, 2, 9, 10, 11], "al": [0, 2, 9, 10, 11], "sinc": [0, 1, 3, 5, 10, 11], "we": [0, 1, 3, 4, 5, 6, 10], "unabl": 0, "find": [0, 4, 11], "diabet": 0, "phonem": 0, "call": [0, 1, 5, 8, 10, 11], "fetch_ucidataset": [0, 3, 10], "yeast": [0, 10], "verbos": [0, 1, 4, 8, 9, 10, 11], "return": [0, 1, 3, 4, 5, 8, 9, 10, 11], "randomli": [0, 10], "drawn": [0, 1, 4, 8, 10], "stratifi": [0, 3, 9, 10, 11], "manner": [0, 9, 11], "whole": [0, 1, 3, 4, 8, 9], "collect": [0, 8, 9, 10], "70": 0, "30": [0, 1, 3, 11], "respect": [0, 1, 5, 8, 11], "option": [0, 1, 3, 5, 10, 11], "indic": [0, 1, 3, 4, 5, 8, 9, 10, 11], "descript": [0, 10], "should": [0, 1, 3, 4, 5, 6, 8, 9, 10, 11], "standard": [0, 1, 5, 8, 9, 10, 11], "paper": [0, 3, 9, 11], "submit": 0, "kfcv": [0, 9, 10, 11], "order": [0, 2, 3, 5, 8, 10, 11], "accommod": 0, "practic": [0, 4], "could": [0, 1, 3, 4, 5, 6], "first": [0, 1, 2, 3, 5, 8, 10, 11], "instanti": [0, 1, 3, 4, 8, 9, 11], "creat": [0, 6, 8, 11], "time": [0, 1, 3, 8, 10, 11], "fetch_ucilabelledcollect": [0, 10], "nfold": [0, 8, 10], "nrepeat": [0, 10], "abov": [0, 3, 5, 8], "conduct": [0, 8], "2x5fcv": 0, "all": [0, 1, 2, 3, 5, 8, 9, 11], "come": [0, 8, 10, 11], "numer": [0, 1, 3, 6, 10, 11], "form": [0, 8, 10, 11], "dens": [0, 11], "matric": [0, 5, 10], "acut": 0, "120": 0, "6": [0, 1, 3, 5, 10], "508": 0, "b": [0, 8, 10, 11], "583": 0, "417": 0, "balanc": [0, 4, 11], "625": 0, "539": 0, "461": 0, "922": 0, "078": 0, "breast": 0, "cancer": 0, "683": 0, "350": 0, "650": 0, "cmc": 0, "1473": 0, "573": 0, "427": 0, "774": 0, "226": 0, "653": 0, "347": 0, "ctg": 0, "2126": 0, "22": [0, 3, 9, 10], "222": [0, 9], "778": 0, "861": 0, "139": 0, "917": 0, "083": 0, "german": 0, "1000": [0, 4, 11], "24": [0, 9], "300": [0, 1, 9], "700": 0, "haberman": [0, 3], "306": 0, "735": 0, "265": 0, "ionospher": 0, "641": 0, "359": 0, "iri": 0, "150": 0, "667": 0, "333": 0, "mammograph": 0, "830": 0, "514": 0, "486": 0, "pageblock": 0, "5473": 0, "979": 0, "021": 0, "semeion": 0, "1593": 0, "256": [0, 9], "901": 0, "099": 0, "sonar": 0, "208": 0, "60": 0, "534": 0, "466": 0, "spambas": 0, "4601": 0, "57": 0, "606": 0, "394": 0, "spectf": 0, "267": 0, "44": 0, "794": 0, "206": 0, "tictacto": 0, "958": 0, "transfus": 0, "748": 0, "762": 0, "238": 0, "wdbc": 0, "569": 0, "627": 0, "373": 0, "wine": 0, "178": 0, "13": [0, 9], "669": 0, "331": 0, "601": 0, "399": 0, "730": 0, "q": [0, 2, 3, 8, 9, 11], "red": 0, "1599": 0, "465": 0, "535": 0, "white": 0, "4898": 0, "665": 0, "1484": 0, "8": [0, 1, 5, 10, 11], "711": 0, "289": 0, "download": [0, 2, 3, 8, 10], "automat": [0, 1, 9], "thei": [0, 3, 11], "store": [0, 9, 10, 11], "quapy_data": [0, 8], "folder": [0, 10, 11], "faster": [0, 10], "reus": [0, 3, 8, 10], "howev": [0, 4, 5], "requir": [0, 1, 3, 6, 9], "special": [0, 5, 10], "action": 0, "moment": [0, 3], "fulli": [0, 8], "autom": [0, 3, 6], "cardiotocographi": 0, "excel": 0, "file": [0, 5, 8, 9, 10, 11], "user": [0, 1, 5], "instal": [0, 3, 6, 9, 11], "xlrd": [0, 2], "modul": [0, 1, 3, 5, 6, 7], "open": [0, 6, 10], "page": [0, 2, 6], "block": [0, 8], "need": [0, 3, 8, 10, 11], "unix": 0, "compress": 0, "extens": [0, 2, 5], "z": [0, 10], "directli": [0, 1, 3], "doabl": 0, "packag": [0, 2, 3, 6, 7], "like": [0, 1, 3, 5, 8, 9, 10, 11], "gzip": 0, "zip": [0, 5, 8], "uncompress": 0, "o": [0, 8], "depend": [0, 1, 4, 5, 8, 11], "softwar": 0, "manual": 0, "do": [0, 1, 3, 4, 8, 9, 10, 11], "invok": [0, 1, 3, 8, 10], "provid": [0, 3, 5, 6, 10, 11], "loader": [0, 10], "simpl": [0, 3, 5, 11], "deal": 0, "t": [0, 1, 3, 8, 9, 11], "pre": [0, 3, 8], "n": [0, 1, 8, 9, 11], "second": [0, 1, 3, 5, 8, 10], "represent": [0, 3, 8, 9, 11], "col": [0, 10], "int": [0, 5, 8, 10, 11], "float": [0, 3, 8, 9, 10, 11], "charg": [0, 10], "classmethod": [0, 8, 10, 11], "def": [0, 1, 3, 5, 8], "cl": 0, "path": [0, 3, 5, 8, 9, 10, 11], "str": [0, 8, 10, 11], "loader_func": [0, 10], "callabl": [0, 8, 10, 11], "defin": [0, 3, 8, 9, 10, 11], "argument": [0, 1, 3, 5, 8, 10, 11], "initi": [0, 9, 11], "particular": [0, 1, 3, 11], "receiv": [0, 3, 5], "addition": 0, "number": [0, 1, 3, 5, 8, 9, 10, 11], "specifi": [0, 1, 3, 5, 8, 9, 10], "otherwis": [0, 3, 8, 10], "infer": [0, 10], "least": [0, 10], "pass": [0, 1, 5, 8, 9, 11], "along": [0, 3, 8, 11], "train_path": [0, 10], "my_data": 0, "dat": [0, 9], "test_path": [0, 10], "my_custom_load": 0, "rb": 0, "fin": 0, "preprocess": [0, 1, 3, 8, 11], "includ": [0, 1, 3, 5, 6, 10, 11], "text2tfidf": [0, 1, 3, 10], "tfidf": [0, 4, 5, 10], "vector": [0, 8, 9, 10, 11], "reduce_column": [0, 10], "reduc": [0, 10], "column": [0, 10], "base": [0, 3, 6, 8, 9], "term": [0, 1, 3, 4, 5, 6, 8, 9, 10, 11], "frequenc": [0, 10, 11], "transform": [0, 9, 10, 11], "valu": [0, 1, 3, 8, 9, 10, 11], "score": [0, 1, 4, 8, 9, 10], "subtract": [0, 8, 10], "normal": [0, 1, 3, 8, 10, 11], "deviat": [0, 1, 5, 8, 10], "so": [0, 1, 3, 5, 8, 9, 10, 11], "zero": [0, 8], "unit": [0, 8], "varianc": [0, 5], "textual": [0, 6, 10], "token": [0, 9, 10], "appeal": 1, "tool": [1, 6], "scenario": [1, 3, 4, 5, 6], "dataset": [1, 3, 4, 5, 6, 8, 9, 11], "shift": [1, 4, 6, 8, 9, 11], "particularli": 1, "prior": [1, 3, 4, 5, 6, 8, 11], "probabl": [1, 3, 4, 5, 6, 8, 9, 11], "That": [1, 4], "interest": [1, 5, 6, 8], "estim": [1, 3, 5, 6, 8, 9, 10, 11], "aris": 1, "under": 1, "belief": 1, "those": [1, 3, 4, 5, 8, 9, 11], "might": [1, 8, 10], "ones": [1, 3, 5, 8, 10, 11], "observ": [1, 11], "dure": [1, 5, 11], "other": [1, 3, 5, 6, 8, 10, 11], "word": [1, 3, 6, 9, 10, 11], "simpli": [1, 2, 3, 4, 5, 6, 8, 11], "predictor": 1, "assum": [1, 6, 11], "unlik": [1, 4, 8], "machin": [1, 4, 6, 9], "learn": [1, 2, 3, 4, 6, 8, 9, 10, 11], "govern": 1, "iid": [1, 5, 6], "assumpt": [1, 5, 6], "brief": [1, 10], "dedic": [1, 10], "explain": [1, 5], "here": [1, 11], "mae": [1, 4, 6, 8, 9, 11], "absolut": [1, 3, 5, 6, 8, 11], "mrae": [1, 6, 8, 9, 11], "rel": [1, 3, 8, 10, 11], "mse": [1, 3, 6, 8, 11], "squar": [1, 3, 8], "mkld": [1, 8, 11], "kullback": [1, 3, 8, 11], "leibler": [1, 3, 8, 11], "diverg": [1, 3, 8, 11], "mnkld": [1, 8, 11], "ae": [1, 2, 5, 8], "rae": [1, 2, 8], "se": [1, 8], "kld": [1, 2, 8, 9, 11], "nkld": [1, 2, 6, 8, 9, 11], "individu": [1, 3], "without": [1, 3, 8, 10], "averag": [1, 3, 8, 10, 11], "acc": [1, 3, 5, 6, 8, 11], "accuraci": [1, 5, 8, 11], "f1e": [1, 8], "f1": [1, 8, 9], "true_prev": [1, 5, 8], "prevs_hat": [1, 8], "ndarrai": [1, 3, 8, 10, 11], "contain": [1, 2, 3, 5, 8, 9, 10, 11], "smooth": [1, 8], "stabil": [1, 11], "third": [1, 5], "ep": [1, 8], "none": [1, 4, 8, 9, 10, 11], "paramet": [1, 3, 4, 8, 9, 10, 11], "epsilon": [1, 8, 11], "tradition": 1, "2t": [1, 8], "past": 1, "either": [1, 3, 8, 11], "environ": [1, 3, 4, 5, 8, 11], "variabl": [1, 3, 5, 8, 10], "onc": [1, 3, 5, 8, 10], "ommit": 1, "thereaft": 1, "recommend": [1, 5, 11], "np": [1, 3, 4, 5, 8, 10, 11], "asarrai": 1, "let": [1, 3, 11], "estim_prev": [1, 5, 8], "ae_": 1, "3f": [1, 6], "200": [1, 9], "600": 1, "914": 1, "final": [1, 3, 5, 11], "possibl": [1, 3, 8, 11], "string": [1, 8, 10, 11], "error_funct": 1, "from_nam": [1, 8], "accord": [1, 3, 4, 8, 9, 10, 11], "fix": [1, 4], "cover": [1, 4, 8, 9], "full": [1, 8], "contrast": 1, "natur": [1, 8], "despit": 1, "introduc": 1, "approxim": [1, 5, 8, 9], "preserv": [1, 5, 8], "procol": 1, "equal": [1, 8, 11], "distant": [1, 8], "interv": [1, 5, 8], "n_prevpoint": [1, 4, 5, 8], "determin": [1, 4, 5, 8], "constrain": [1, 5, 8, 10], "obtain": [1, 4, 8, 9, 11], "66": [1, 11], "given": [1, 3, 4, 8, 9, 10, 11], "num_prevalence_combin": [1, 8], "21": [1, 3, 5, 8], "n_class": [1, 3, 8, 9, 10, 11], "n_repeat": [1, 8], "1771": 1, "note": [1, 3, 4, 5, 8, 10], "last": [1, 3, 5, 8, 9, 10], "typic": [1, 4, 5, 8, 9, 10, 11], "singl": [1, 3, 6, 11], "higher": [1, 5], "comput": [1, 3, 5, 8, 11], "perform": [1, 3, 4, 5, 6, 8, 9, 11], "signific": 1, "instead": [1, 3, 4, 8, 10, 11], "work": [1, 3, 4, 5, 8, 10, 11], "wai": [1, 11], "around": [1, 10, 11], "maximum": [1, 8, 9, 11], "budg": 1, "close": [1, 10], "than": [1, 4, 5, 8, 9, 10], "budget": [1, 4], "achiev": [1, 3, 4, 5], "get_nprevpoints_approxim": [1, 8], "5000": [1, 5], "4960": 1, "cost": 1, "sometim": 1, "cumbersom": 1, "control": [1, 4, 8], "overal": 1, "experi": [1, 2, 3, 4, 5, 8], "rather": [1, 4], "By": [1, 3, 8], "avoid": [1, 8], "lead": [1, 10], "closer": 1, "surpass": 1, "script": [1, 2, 3, 6, 11], "pacc": [1, 3, 5, 8, 11], "reli": [1, 3, 8, 11], "logist": [1, 3, 9, 11], "regressor": [1, 3], "classifi": [1, 4, 5, 6, 8, 9, 11], "variou": [1, 5], "metric": [1, 3, 4, 6, 8, 11], "sklearn": [1, 3, 4, 5, 6, 9, 10, 11], "linear_model": [1, 3, 4, 6, 9], "logisticregress": [1, 3, 4, 6, 9, 11], "data": [1, 3, 4, 5, 6, 8, 9, 11], "min_df": [1, 3, 4, 5, 10, 11], "inplac": [1, 3, 10, 11], "lr": [1, 3, 9, 11], "aggreg": [1, 4, 5, 6, 8], "fit": [1, 3, 4, 5, 6, 8, 9, 10, 11], "df": 1, "artificial_sampling_report": 1, "mani": [1, 3, 4, 5, 6, 8, 11], "extract": [1, 8, 10], "categori": [1, 8], "n_repetit": [1, 4, 5], "n_job": [1, 3, 4, 8, 9, 10, 11], "parallel": [1, 3, 8, 9, 10, 11], "worker": [1, 8, 9, 10, 11], "cpu": [1, 9, 11], "random_se": [1, 8], "42": 1, "random": [1, 3, 4, 5, 8, 10], "seed": [1, 4, 8, 10], "replic": [1, 4, 8], "error_metr": [1, 4, 8], "line": [1, 3, 8], "result": [1, 2, 3, 4, 5, 6, 11], "report": 1, "panda": [1, 2], "datafram": 1, "displai": [1, 5, 8, 9], "just": [1, 3], "clearer": 1, "shown": [1, 5, 8], "convert": [1, 3, 8, 9, 10, 11], "repres": [1, 3, 5, 8, 10, 11], "decim": 1, "default": [1, 3, 8, 9, 10, 11], "pd": 1, "set_opt": 1, "expand_frame_repr": 1, "fals": [1, 3, 5, 8, 9, 10, 11], "map": [1, 9, 11], "000": 1, "000e": 1, "091": 1, "909": 1, "009": 1, "048": 1, "426e": 1, "04": 1, "837": 1, "037": 1, "114": 1, "633e": 1, "03": 1, "7": [1, 5, 8, 9, 11], "717": 1, "017": 1, "041": 1, "383e": 1, "366": 1, "634": 1, "034": 1, "070": 1, "412e": 1, "459": 1, "541": 1, "387e": 1, "565": 1, "435": 1, "035": 1, "073": 1, "535e": 1, "654": 1, "346": 1, "046": 1, "108": 1, "701e": 1, "725": 1, "275": 1, "075": 1, "235": 1, "515e": 1, "02": 1, "858": 1, "142": 1, "042": 1, "229": 1, "740e": 1, "945": 1, "055": 1, "27": [1, 3, 9], "357": 1, "219e": 1, "578": 1, "dtype": [1, 10], "float64": 1, "artificial_sampling_ev": [1, 4], "artificial_sampling_predict": [1, 5], "arrai": [1, 3, 5, 8, 9, 10, 11], "pip": 2, "older": 2, "version": [2, 8, 9], "scikit": [2, 3, 4, 8, 9, 10, 11], "numpi": [2, 4, 8, 9], "scipi": [2, 10], "pytorch": [2, 11], "quanet": [2, 6, 9, 11], "svmperf": [2, 3, 8, 11], "patch": [2, 3, 9, 11], "joblib": [2, 11], "tqdm": 2, "matplotlib": [2, 8], "involv": [2, 5, 8], "you": [2, 3], "appli": [2, 3, 4, 5, 8, 9, 10, 11], "ext": 2, "compil": [2, 3], "sourc": [2, 3, 6, 9], "prepare_svmperf": [2, 3], "sh": [2, 3], "job": 2, "directori": [2, 8, 9, 10, 11], "svm_perf_quantif": [2, 3], "optim": [2, 3, 4, 8, 9, 11], "measur": [2, 3, 4, 5, 6, 8, 11], "propos": [2, 3, 11], "barranquero": [2, 3, 9, 11], "extend": [2, 3, 8, 11], "former": [2, 11], "categor": [3, 10], "belong": [3, 11], "non": [3, 11], "group": 3, "though": [3, 8], "plan": 3, "add": [3, 4, 8, 10], "more": [3, 5, 11], "futur": 3, "character": [3, 6], "fact": [3, 5], "product": [3, 10], "quantifi": [3, 4, 5, 6, 8, 10, 11], "shoud": 3, "basequantifi": [3, 8, 11], "abstract": [3, 8, 9, 10, 11], "abstractmethod": 3, "self": [3, 8, 9, 10, 11], "set_param": [3, 8, 9, 11], "get_param": [3, 8, 9, 11], "deep": [3, 8, 11], "familiar": 3, "structur": [3, 11], "inspir": 3, "reason": [3, 5, 6], "why": 3, "ha": [3, 4, 5, 8, 9, 10, 11], "adopt": [3, 4, 10], "respond": 3, "predict": [3, 4, 5, 8, 9, 11], "input": [3, 5, 8, 9, 11], "element": [3, 10, 11], "while": [3, 5, 9, 10, 11], "selector": 3, "process": [3, 4, 8], "hyperparamet": [3, 8, 11], "search": [3, 4, 6, 8, 11], "part": [3, 10], "aggregativequantifi": [3, 11], "must": [3, 10, 11], "fit_learn": 3, "classif_predict": [3, 11], "mention": 3, "befor": [3, 8, 9, 10, 11], "inde": [3, 4], "alreadi": [3, 8, 11], "preclassifi": 3, "maintain": [3, 11], "through": [3, 8], "properti": [3, 8, 9, 10, 11], "learner": [3, 4, 9, 11], "extern": 3, "probabilist": [3, 8, 9, 11], "inherit": 3, "aggregativeprobabilisticquantifi": [3, 11], "posterior": [3, 8, 9, 11], "crisp": [3, 8, 11], "decis": [3, 8, 9, 11], "hard": [3, 8, 9], "classif_posterior": [3, 11], "posterior_prob": [3, 11], "advantag": [3, 11], "procedur": [3, 6, 8], "veri": [3, 5], "effici": 3, "everi": [3, 8, 11], "leverag": 3, "speed": [3, 8, 11], "up": [3, 4, 8, 9, 11], "over": [3, 4, 8], "customarili": [3, 4], "done": 3, "four": 3, "cc": [3, 5, 11], "simplest": 3, "deliv": [3, 11], "adjust": [3, 6, 8, 11], "pcc": [3, 4, 5, 11], "soft": 3, "serv": [3, 8, 10], "complet": [3, 5, 11], "equip": [3, 5], "svm": [3, 5, 6, 9, 10, 11], "linearsvc": [3, 5, 10], "pickl": [3, 8, 10, 11], "alia": [3, 8, 10, 11], "classifyandcount": [3, 11], "estim_preval": [3, 6, 11], "rate": [3, 8, 9, 11], "binari": [3, 5, 6, 8, 9, 10, 11], "init": 3, "addit": 3, "val_split": [3, 4, 9, 11], "integ": [3, 8, 9, 10, 11], "k": [3, 6, 8, 9, 10, 11], "fold": [3, 8, 10, 11], "cross": [3, 8, 9, 10, 11], "specif": [3, 4, 8], "held": [3, 4, 8, 9, 11], "out": [3, 4, 5, 8, 9, 10, 11], "postpon": 3, "constructor": 3, "prevail": 3, "overrid": 3, "illustr": [3, 4, 5], "seem": 3, "calibr": [3, 8], "calibratedclassifiercv": 3, "base_estim": 3, "cv": [3, 4], "predict_proba": [3, 9, 11], "As": [3, 4], "calibratedclassifi": 3, "except": [3, 8, 11], "rais": [3, 8, 11], "lastli": 3, "everyth": 3, "said": 3, "aboud": 3, "sld": [3, 11], "expectationmaximizationquantifi": [3, 11], "describ": [3, 8, 11], "saeren": [3, 11], "m": [3, 8, 11], "latinn": [3, 11], "decaesteck": [3, 11], "c": [3, 4, 8, 9, 10, 11], "2002": 3, "priori": 3, "14": 3, "41": 3, "attempt": 3, "although": [3, 4, 5, 11], "improv": [3, 8, 9, 11], "rank": [3, 9], "almost": 3, "alwai": [3, 4, 5, 11], "among": 3, "effect": 3, "carri": [3, 10, 11], "gonz\u00e1lez": 3, "castro": 3, "v": [3, 8, 9, 11], "alaiz": 3, "rodr\u0131": 3, "guez": 3, "alegr": 3, "2013": 3, "scienc": 3, "218": 3, "146": 3, "It": [3, 4, 5, 8], "allia": 3, "hellingerdistancei": [3, 11], "mixtur": [3, 8, 11], "previou": 3, "overridden": [3, 11], "proport": [3, 4, 9, 10, 11], "taken": [3, 8, 9, 10], "itself": [3, 8, 11], "accept": 3, "elm": [3, 11], "famili": [3, 11], "target": [3, 5, 6, 8, 9, 11], "orient": [3, 6, 8, 11], "joachim": [3, 9, 11], "svmq": 3, "d\u00edez": 3, "reliabl": 3, "pattern": 3, "recognit": 3, "48": 3, "591": 3, "604": 3, "svmkld": 3, "multivari": [3, 9], "transact": 3, "discoveri": 3, "articl": [3, 4], "svmnkld": 3, "svmae": 3, "error": [3, 4, 6, 7, 9, 11], "svmrae": 3, "what": 3, "nowadai": 3, "consid": [3, 5, 8, 9, 10, 11], "behav": [3, 5], "If": [3, 5, 8, 10, 11], "want": [3, 4], "custom": [3, 6, 10], "modifi": [3, 8], "assign": [3, 10], "Then": 3, "re": [3, 4, 9, 10], "thing": 3, "your": 3, "svmperf_hom": [3, 11], "valid_loss": [3, 9, 11], "mycustomloss": 3, "28": [3, 10], "current": [3, 8, 9, 10, 11], "support": [3, 6, 10, 11], "oper": 3, "trivial": 3, "strategi": [3, 4], "2016": [3, 10, 11], "sentiment": [3, 6, 10], "19": [3, 10], "onevsal": [3, 11], "know": 3, "where": [3, 5, 8, 9, 10, 11], "top": [3, 8, 11], "thu": [3, 4, 5, 8, 9, 11], "nor": 3, "castano": [3, 10], "2019": [3, 10, 11], "dynam": [3, 9, 10, 11], "task": [3, 4, 10], "45": [3, 5, 10], "15": [3, 8, 10], "polici": [3, 11], "processor": 3, "av": [3, 11], "ptr": [3, 11], "member": [3, 11], "d": [3, 11], "static": [3, 11], "red_siz": [3, 11], "pleas": 3, "check": [3, 4, 8], "offer": [3, 6], "torch": [3, 9, 11], "embed": [3, 9, 11], "lstm": [3, 9, 11], "cnn": [3, 11], "its": [3, 4, 8, 9, 11], "layer": [3, 9, 11], "neuralclassifiertrain": [3, 9, 11], "cnnnet": [3, 9, 11], "vocabulary_s": [3, 9, 10, 11], "cuda": [3, 9, 11], "supervis": [4, 6], "strongli": [4, 5], "good": [4, 5], "choic": [4, 11], "hyper": [4, 8, 9], "wherebi": 4, "chosen": [4, 8], "pick": 4, "best": [4, 8, 9, 11], "being": [4, 8, 11], "criteria": 4, "solv": [4, 11], "assess": 4, "own": 4, "right": [4, 8, 10], "impos": [4, 8], "aim": [4, 5], "appropri": 4, "configur": [4, 8], "design": 4, "long": [4, 9], "regard": 4, "next": [4, 8, 9, 10], "section": 4, "argu": 4, "alejandro": 4, "fabrizio": 4, "count": [4, 5, 6, 8, 10, 11], "arxiv": 4, "preprint": 4, "2011": 4, "02552": 4, "2020": [4, 9], "varieti": 4, "exhibit": [4, 5], "degre": 4, "model_select": [4, 7, 11], "gridsearchq": [4, 8, 11], "grid": [4, 8, 11], "explor": [4, 8], "portion": 4, "param_grid": [4, 8, 11], "logspac": [4, 11], "class_weight": [4, 11], "eval_budget": 4, "refit": [4, 8], "retrain": [4, 9], "goe": 4, "end": [4, 8, 11], "best_params_": 4, "best_model_": 4, "101": 4, "5f": 4, "system": [4, 11], "start": 4, "hyperparam": 4, "0001": [4, 11], "got": [4, 11], "24987": 4, "48135": 4, "001": [4, 9, 11], "24866": 4, "100000": 4, "43676": 4, "finish": 4, "param": [4, 8, 9, 11], "19982": 4, "develop": [4, 6], "1010": 4, "5005": 4, "54it": 4, "20342": 4, "altern": 4, "computation": 4, "costli": 4, "try": 4, "theoret": 4, "suboptim": 4, "opt": 4, "gridsearchcv": [4, 11], "10000": 4, "5379": 4, "55it": 4, "41734": 4, "wors": [4, 5, 8], "larg": 4, "between": [4, 5, 6, 8, 9, 11], "modal": 4, "turn": 4, "better": 4, "nonetheless": 4, "happen": [4, 5], "basic": [5, 11], "help": [5, 11], "analys": [5, 6], "outcom": 5, "main": 5, "method_nam": [5, 8, 11], "name": [5, 8, 9, 10, 11], "shape": [5, 8, 9, 10, 11], "correspond": [5, 10], "matrix": [5, 8, 11], "appear": 5, "occur": [5, 10], "merg": 5, "emq": [5, 11], "55": 5, "showcas": 5, "wide": 5, "variant": [5, 6, 8, 11], "linear": [5, 8, 11], "review": [5, 6, 10], "step": [5, 8], "05": [5, 8, 11], "gen_data": 5, "base_classifi": 5, "yield": [5, 8, 10, 11], "tr_prev": [5, 8, 11], "append": 5, "__class__": 5, "__name__": 5, "insight": 5, "view": 5, "y": [5, 8, 9, 10, 11], "axi": [5, 8], "against": 5, "x": [5, 8, 9, 10, 11], "unfortun": 5, "limit": [5, 8, 11], "binary_diagon": [5, 8], "train_prev": [5, 8], "savepath": [5, 8], "bin_diag": 5, "png": 5, "save": [5, 8], "pdf": [5, 11], "cyan": 5, "dot": [5, 8], "color": [5, 8], "band": [5, 8], "hidden": [5, 9, 11], "show_std": [5, 8], "unadjust": 5, "bias": 5, "toward": [5, 10], "seen": [5, 8, 11], "evinc": 5, "box": [5, 8], "binary_bias_glob": [5, 8], "bin_bia": 5, "unbias": 5, "center": 5, "tend": 5, "overestim": 5, "high": [5, 8], "lower": [5, 11], "again": 5, "accordingli": 5, "20": [5, 8, 11], "90": [5, 8], "rewrit": 5, "method_data": 5, "training_preval": 5, "linspac": 5, "training_s": 5, "suffic": 5, "latex": 5, "syntax": 5, "_": [5, 8, 10], "now": 5, "clearli": 5, "binary_bias_bin": [5, 8], "broken": [5, 8], "down": [5, 8, 10], "bin": [5, 8, 11], "To": [5, 10], "nbin": [5, 8, 11], "isometr": [5, 8], "subinterv": 5, "interestingli": 5, "enough": 5, "seemingli": 5, "tendenc": 5, "low": [5, 8, 9], "underestim": 5, "beyond": 5, "67": [5, 8], "curios": 5, "pretti": 5, "discuss": 5, "analyz": 5, "compar": [5, 8], "both": [5, 10], "irrespect": [5, 11], "harder": 5, "interpret": [5, 6, 11], "error_by_drift": [5, 8], "error_nam": [5, 8], "n_bin": [5, 8, 11], "err_drift": 5, "whenev": [5, 8], "clear": 5, "lowest": 5, "difficult": 5, "rememb": 5, "solid": 5, "comparison": 5, "detriment": 5, "visual": [5, 6], "hide": 5, "framework": [6, 11], "written": 6, "root": 6, "concept": 6, "baselin": 6, "integr": 6, "commonli": 6, "facilit": 6, "twitter": [6, 10], "true_preval": 6, "hold": [6, 8, 11], "endeavour": [6, 8], "popular": 6, "expect": [6, 11], "maxim": [6, 11], "hdy": [6, 11], "versatil": 6, "etc": 6, "uci": [6, 10], "nativ": 6, "loss": [6, 9, 11], "perf": [6, 9, 11], "ad": 6, "meta": [6, 8], "plot": [6, 7], "diagon": [6, 8], "bia": [6, 8, 9, 11], "drift": 6, "api": 6, "subpackag": 7, "submodul": 7, "util": [7, 9, 10], "content": 7, "bctscalibr": 9, "nbvscalibr": 9, "recalibratedprobabilisticclassifi": 9, "recalibratedprobabilisticclassifierbas": 9, "classes_": [9, 10, 11], "fit_cv": 9, "fit_tr_val": 9, "tscalibr": 9, "vscalibr": 9, "lowranklogisticregress": 9, "document_embed": 9, "lstmnet": 9, "reset_net_param": 9, "textclassifiernet": 9, "dimens": [8, 9, 10, 11], "forward": [9, 11], "xavier_uniform": 9, "torchdataset": 9, "asdataload": 9, "decision_funct": 9, "splitstratifi": 10, "stat": 10, "train_test": 10, "xp": 10, "xy": 10, "split_random": 10, "split_stratifi": 10, "uniform_sampl": 10, "uniform_sampling_index": 10, "fetch_lequa2022": 10, "warn": 10, "indextransform": 10, "add_word": 10, "fit_transform": 10, "reader": 8, "binar": [8, 10], "from_csv": 10, "from_spars": 10, "from_text": 10, "reindex_label": 10, "getptecondestim": 11, "solve_adjust": 11, "adjustedclassifyandcount": 11, "distributionmatch": 11, "dy": 11, "em": 11, "max_it": 11, "explicitlossminimis": [], "max": 11, "ms2": 11, "mediansweep": 11, "mediansweep2": 11, "probabilisticadjustedclassifyandcount": 11, "probabilisticclassifyandcount": 11, "smm": 11, "t50": 11, "thresholdoptim": 11, "cross_generate_predict": 11, "cross_generate_predictions_depr": 11, "binaryquantifi": 11, "onevsallgener": 11, "eacc": 11, "ecc": 11, "eemq": 11, "ehdi": 11, "epacc": 11, "valid_polici": 11, "ensemblefactori": 11, "get_probability_distribut": 11, "quanetmodul": 11, "quanettrain": 11, "clean_checkpoint": 11, "clean_checkpoint_dir": 11, "mae_loss": 11, "non_aggreg": 8, "maximumlikelihoodprevalenceestim": 11, "absolute_error": 8, "hat": 8, "frac": 8, "mathcal": 8, "sum_": 8, "acc_error": 8, "y_true": 8, "y_pred": 8, "tp": 8, "tn": 8, "fp": 8, "fn": 8, "stand": [8, 11], "f1_error": 8, "macro": 8, "f_1": 8, "harmon": 8, "recal": 8, "2tp": 8, "independ": [8, 11], "err_nam": 8, "p_hat": 8, "d_": 8, "kl": 8, "log": [8, 10], "factor": 8, "beforehand": 8, "n_sampl": [8, 9], "mean_absolute_error": 8, "mean_relative_absolute_error": 8, "relative_absolute_error": 8, "underlin": 8, "displaystyl": 8, "abstractprotocol": 8, "union": [8, 10, 11], "aggr_speedup": 8, "auto": 8, "evaluation_report": 8, "app": [8, 11], "repeat": 8, "smooth_limits_epsilon": 8, "random_st": [8, 10], "return_typ": 8, "sample_prev": 8, "abstractstochasticseededprotocol": 8, "onlabelledcollectionprotocol": 8, "95": 8, "copi": [8, 10], "quantiti": 8, "labelled_collect": 8, "prevalence_grid": 8, "exhaust": 8, "sum": [8, 11], "implicit": 8, "return_constrained_dim": 8, "rest": [8, 9, 10], "quit": 8, "obvious": 8, "determinist": 8, "anywher": 8, "multipli": 8, "necessari": 8, "samples_paramet": 8, "total": 8, "parent": 8, "sequenc": 8, "enforc": 8, "collat": 8, "arg": [8, 10], "domainmix": 8, "domaina": 8, "domainb": 8, "mixture_point": 8, "domain": 8, "scale": [8, 9, 11], "npp": 8, "draw": 8, "uniformli": 8, "therefor": 8, "get_col": 8, "get_labelled_collect": 8, "on_preclassified_inst": 8, "pre_classif": 8, "in_plac": 8, "usimplexpp": 8, "kraemer": 8, "algorithm": [8, 11], "sens": 8, "guarante": [8, 10], "prefer": 8, "intract": 8, "hellingerdist": 8, "hellingh": 8, "distanc": [8, 11], "hd": [8, 11], "discret": [8, 11], "sqrt": 8, "p_i": 8, "q_i": 8, "real": [8, 9, 10, 11], "topsoedist": 8, "1e": [8, 9, 11], "topso": [8, 11], "adjusted_quantif": 8, "prevalence_estim": 8, "tpr": [8, 11], "fpr": [8, 11], "clip": 8, "exce": 8, "check_prevalence_vector": 8, "raise_except": 8, "toleranz": 8, "08": 8, "combinations_budget": 8, "largest": 8, "dimension": [8, 9, 10, 11], "repetit": 8, "less": [8, 10], "normalize_preval": 8, "l1": [8, 11], "calcul": 8, "binom": 8, "mass": 8, "alloc": [8, 9], "solut": 8, "star": 8, "bar": 8, "prevalence_from_label": 8, "n_instanc": [8, 9, 11], "correctli": 8, "even": 8, "len": 8, "prevalence_from_prob": 8, "bool": [8, 9, 11], "argmax": 8, "prevalence_linspac": 8, "01": [8, 9, 11], "separ": [8, 10], "99": 8, "uniform_prevalence_sampl": 8, "adapt": [8, 9], "post": 8, "http": [8, 10, 11], "stackexchang": 8, "com": 8, "question": 8, "3227": 8, "uniform": [8, 10], "uniform_simplex_sampl": 8, "dict": [8, 10, 11], "timeout": 8, "dictionari": [8, 9, 10, 11], "kei": [8, 10], "quantification_error": 8, "whether": [8, 9, 10, 11], "ignor": [8, 10, 11], "gen": 8, "establish": 8, "timer": 8, "longer": [8, 11], "timeouterror": 8, "bound": [8, 11], "stdout": 8, "best_model": 8, "after": [8, 11], "minim": [8, 11], "routin": [8, 10, 11], "unus": [8, 9], "contanin": 8, "cross_val_predict": 8, "akin": [8, 11], "issu": 8, "reproduc": [8, 10], "pos_class": [8, 10], "titl": 8, "colormap": 8, "listedcolormap": 8, "vertical_xtick": 8, "legend": 8, "local": 8, "sign": 8, "minu": 8, "classs": 8, "compon": [8, 9, 11], "cm": 8, "tab10": 8, "secondari": 8, "global": 8, "method_ord": 8, "henc": [8, 10], "conveni": 8, "multiclass": [8, 10, 11], "inconveni": 8, "leyend": 8, "hightlight": 8, "associ": 8, "brokenbar_supremacy_by_drift": 8, "isomer": 8, "x_error": 8, "y_error": 8, "ttest_alpha": 8, "005": 8, "tail_density_threshold": 8, "region": 8, "chart": 8, "condit": [8, 11], "ii": 8, "significantli": 8, "side": 8, "confid": 8, "percentil": 8, "divid": 8, "amount": 8, "similar": [8, 11], "threshold": [8, 11], "densiti": 8, "tail": 8, "discard": 8, "outlier": 8, "show_dens": 8, "show_legend": 8, "logscal": 8, "vline": 8, "especi": 8, "mai": 8, "cumberson": 8, "gain": 8, "understand": 8, "fare": 8, "regim": 8, "highlight": 8, "vertic": 8, "earlystop": 8, "patienc": [8, 9, 11], "lower_is_bett": 8, "earli": [8, 9, 11], "stop": [8, 9, 11], "epoch": [8, 9, 11], "best_epoch": 8, "best_scor": 8, "consecut": [8, 9, 11], "monitor": 8, "obtaind": 8, "far": [8, 9, 10], "flag": 8, "keep": 8, "track": 8, "boolean": [8, 10, 11], "create_if_not_exist": 8, "makedir": 8, "exist_ok": 8, "join": 8, "dir": [8, 11], "subdir": 8, "anotherdir": 8, "create_parent_dir": 8, "exist": 8, "txt": 8, "download_fil": 8, "url": 8, "archive_filenam": 8, "destin": 8, "filenam": 8, "download_file_if_not_exist": 8, "dowload": 8, "get_quapy_hom": 8, "home": [8, 10], "perman": 8, "map_parallel": 8, "func": 8, "slice": 8, "item": 8, "wrapper": [8, 9, 10, 11], "multiprocess": [8, 11], "delai": 8, "args_i": 8, "silent": [8, 11], "child": 8, "ensur": 8, "pickled_resourc": 8, "pickle_path": 8, "generation_func": 8, "fast": [8, 10], "resourc": 8, "some_arrai": 8, "mock": [8, 9], "rand": 8, "my_arrai": 8, "pkl": 8, "save_text_fil": 8, "disk": 8, "miss": 8, "temp_se": 8, "context": 8, "tempor": [8, 9], "outer": 8, "state": 8, "within": [8, 11], "get_njob": [], "correct": [9, 11], "temperatur": [9, 11], "bct": [9, 11], "abstent": 9, "alexandari": [9, 11], "afterward": [9, 11], "No": [9, 11], "nbv": [9, 11], "baseestim": [9, 11], "calibratorfactori": 9, "n_compon": 9, "kwarg": [9, 10, 11], "decomposit": 9, "truncatedsvd": 9, "princip": 9, "regress": 9, "n_featur": 9, "length": [9, 10], "eventu": [9, 10], "unalt": 9, "emb": 9, "embedding_s": 9, "hidden_s": 9, "repr_siz": 9, "kernel_height": 9, "stride": 9, "pad": [9, 10], "drop_p": 9, "convolut": 9, "vocabulari": [9, 10], "kernel": 9, "drop": 9, "dropout": [9, 11], "batch": 9, "dataload": 9, "tensor": 9, "n_dimens": 9, "lstm_class_nlay": 9, "short": 9, "memori": 9, "net": 9, "weight_decai": 9, "batch_siz": 9, "64": [9, 11], "batch_size_test": 9, "512": [9, 11], "padding_length": 9, "checkpointpath": 9, "checkpoint": [9, 11], "classifier_net": 9, "weight": [9, 10], "decai": 9, "wait": 9, "enabl": 9, "gpu": [9, 11], "vocab_s": 9, "reiniti": 9, "trainer": 9, "disjoint": 9, "embed_s": 9, "nn": 9, "pad_length": 9, "xavier": 9, "shuffl": [9, 10], "longest": 9, "shorter": 9, "svmperf_bas": [9, 11], "classifiermixin": 9, "thorsten": 9, "refer": [9, 10], "svm_perf_learn": 9, "svm_perf_classifi": 9, "trade": [9, 11], "off": [9, 11], "margin": [9, 11], "std": 9, "qacc": 9, "qf1": 9, "qgm": 9, "12": 9, "26": 9, "23": 9, "train_siz": 10, "conform": 10, "round": 10, "loader_kwarg": 10, "read": 10, "tupl": [8, 10, 11], "tr": 10, "te": 10, "csr": 10, "csr_matrix": 10, "4403": 10, "my_collect": 10, "codefram": 10, "larger": [10, 11], "actual": [10, 11], "empti": 10, "met": 10, "whose": [10, 11], "train_prop": 10, "left": [8, 10], "stratif": 10, "greater": 10, "dataset_nam": 10, "data_hom": 10, "test_split": 10, "predefin": 10, "uci_dataset": 10, "dump": 10, "leav": 10, "quay_data": 10, "ml": 10, "5fcvx2": 10, "x2": 10, "offici": 10, "lequa": 10, "competit": 10, "t1a": 10, "t1b": 10, "t2a": 10, "t2b": 10, "raw": 10, "merchandis": 10, "sperduti": 10, "2022": 10, "overview": 10, "clef": 10, "lequa2022_experi": 10, "py": 10, "guid": 10, "val_gen": 10, "test_gen": 10, "samplesfromdir": 10, "minimun": 10, "kept": 10, "subsequ": 10, "mining6": 10, "devel": 10, "style": 10, "countvector": 10, "keyword": [10, 11], "nogap": 10, "regardless": 10, "codifi": 10, "unknown": 10, "surfac": 10, "assert": 10, "gap": 10, "preced": 10, "decid": 10, "uniqu": 10, "rare": 10, "unk": 10, "minimum": [10, 11], "occurr": 10, "org": [10, 11], "stabl": 10, "feature_extract": 10, "html": 10, "subtyp": 10, "spmatrix": 10, "remov": [10, 11], "infrequ": 10, "aka": [10, 11], "sublinear_tf": 10, "scall": 10, "counter": 10, "tfidfvector": 10, "whcih": 10, "had": 10, "encod": 10, "utf": 10, "csv": 10, "feat1": 10, "feat2": 10, "featn": 10, "covari": 10, "express": 10, "row": 10, "class2int": 10, "collet": 10, "fomart": 10, "progress": 10, "sentenc": 10, "classnam": 10, "u1": 10, "misclassif": 11, "n_classes_": [], "fit_classifi": 11, "bypass": 11, "y_": 11, "ptecondestim": 11, "prevs_estim": 11, "ax": 11, "entri": 11, "y_i": 11, "y_j": 11, "_posterior_probabilities_": 11, "attribut": 11, "subclass": 11, "give": 11, "outsid": 11, "unless": 11, "noth": 11, "els": 11, "cdf": 11, "match": 11, "helling": 11, "sought": 11, "channel": 11, "proper": 11, "ch": 11, "di": 11, "dij": 11, "fraction": 11, "th": 11, "tol": 11, "ternari": 11, "dl": 11, "doi": 11, "1145": 11, "3219819": 11, "3220059": 11, "histogram": 11, "toler": 11, "explicit": 11, "exact_train_prev": 11, "recalib": 11, "updat": 11, "likelihood": [9, 11], "mutual": 11, "recurs": 11, "until": 11, "converg": 11, "suggest": 11, "recalibr": 11, "reach": 11, "loop": 11, "cumul": 11, "unlabel": 11, "latter": 11, "forman": 11, "2006": 11, "2008": 11, "goal": 11, "bring": 11, "denomin": 11, "median": 11, "sweep": 11, "binary_quantifi": 11, "prevel": 11, "emploi": 11, "resp": 11, "subobject": 11, "nest": 11, "pipelin": 11, "__": 11, "simplif": 11, "2021": 11, "equival": 11, "cosest": 11, "heurist": 11, "choos": 11, "ground": 11, "complement": 11, "param_mod_sel": 11, "param_model_sel": 11, "min_po": 11, "max_sample_s": 11, "closest": 11, "preliminari": 11, "recomput": 11, "compat": 11, "l": 11, "base_quantifier_class": 11, "factori": 11, "common": 11, "doc_embedding_s": 11, "stats_siz": 11, "lstm_hidden_s": 11, "lstm_nlayer": 11, "ff_layer": 11, "1024": 11, "bidirect": 11, "qdrop_p": 11, "order_bi": 11, "cell": 11, "connect": 11, "ff": 11, "sort": 11, "doc_embed": 11, "doc_posterior": 11, "recip": 11, "care": 11, "regist": 11, "hook": 11, "n_epoch": 11, "tr_iter_per_poch": 11, "va_iter_per_poch": 11, "checkpointdir": 11, "checkpointnam": 11, "phase": 11, "anyth": 11, "truth": 11, "mlpe": 11, "lazi": 11, "put": 11, "assumpion": 11, "beat": [9, 11], "estimant": 11, "kundaj": 9, "shrikumar": 9, "novemb": 9, "232": 9, "pmlr": 9, "outpu": [], "partit": 9, "ight": [], "valueerror": 8, "attach": 10, "mix": 10, "onevsallaggreg": 11, "parallel_backend": 11, "loki": 11, "backend": 11, "cannot": 11, "temp": 11, "getonevsal": [], "realiz": 8, "prepar": 8, "act": 8, "modif": 8, "place": 8, "host_fold": 9, "tmp": 9, "delet": 9, "newelm": 11, "underli": 11, "newsvma": 11, "newsvmkld": 11, "newsvmq": 11, "newsvmra": 11, "newonevsal": 11}, "objects": {"": [[8, 0, 0, "-", "quapy"]], "quapy": [[9, 0, 0, "-", "classification"], [10, 0, 0, "-", "data"], [8, 0, 0, "-", "error"], [8, 0, 0, "-", "evaluation"], [8, 0, 0, "-", "functional"], [11, 0, 0, "-", "method"], [8, 0, 0, "-", "model_selection"], [8, 0, 0, "-", "plot"], [8, 0, 0, "-", "protocol"], [8, 0, 0, "-", "util"]], "quapy.classification": [[9, 0, 0, "-", "calibration"], [9, 0, 0, "-", "methods"], [9, 0, 0, "-", "neural"], [9, 0, 0, "-", "svmperf"]], "quapy.classification.calibration": [[9, 1, 1, "", "BCTSCalibration"], [9, 1, 1, "", "NBVSCalibration"], [9, 1, 1, "", "RecalibratedProbabilisticClassifier"], [9, 1, 1, "", "RecalibratedProbabilisticClassifierBase"], [9, 1, 1, "", "TSCalibration"], [9, 1, 1, "", "VSCalibration"]], "quapy.classification.calibration.RecalibratedProbabilisticClassifierBase": [[9, 2, 1, "", "classes_"], [9, 3, 1, "", "fit"], [9, 3, 1, "", "fit_cv"], [9, 3, 1, "", "fit_tr_val"], [9, 3, 1, "", "predict"], [9, 3, 1, "", "predict_proba"]], "quapy.classification.methods": [[9, 1, 1, "", "LowRankLogisticRegression"]], "quapy.classification.methods.LowRankLogisticRegression": [[9, 3, 1, "", "fit"], [9, 3, 1, "", "get_params"], [9, 3, 1, "", "predict"], [9, 3, 1, "", "predict_proba"], [9, 3, 1, "", "set_params"], [9, 3, 1, "", "transform"]], "quapy.classification.neural": [[9, 1, 1, "", "CNNnet"], [9, 1, 1, "", "LSTMnet"], [9, 1, 1, "", "NeuralClassifierTrainer"], [9, 1, 1, "", "TextClassifierNet"], [9, 1, 1, "", "TorchDataset"]], "quapy.classification.neural.CNNnet": [[9, 3, 1, "", "document_embedding"], [9, 3, 1, "", "get_params"], [9, 4, 1, "", "training"], [9, 2, 1, "", "vocabulary_size"]], "quapy.classification.neural.LSTMnet": [[9, 3, 1, "", "document_embedding"], [9, 3, 1, "", "get_params"], [9, 4, 1, "", "training"], [9, 2, 1, "", "vocabulary_size"]], "quapy.classification.neural.NeuralClassifierTrainer": [[9, 2, 1, "", "device"], [9, 3, 1, "", "fit"], [9, 3, 1, "", "get_params"], [9, 3, 1, "", "predict"], [9, 3, 1, "", "predict_proba"], [9, 3, 1, "", "reset_net_params"], [9, 3, 1, "", "set_params"], [9, 3, 1, "", "transform"]], "quapy.classification.neural.TextClassifierNet": [[9, 3, 1, "", "dimensions"], [9, 3, 1, "", "document_embedding"], [9, 3, 1, "", "forward"], [9, 3, 1, "", "get_params"], [9, 3, 1, "", "predict_proba"], [9, 4, 1, "", "training"], [9, 2, 1, "", "vocabulary_size"], [9, 3, 1, "", "xavier_uniform"]], "quapy.classification.neural.TorchDataset": [[9, 3, 1, "", "asDataloader"]], "quapy.classification.svmperf": [[9, 1, 1, "", "SVMperf"]], "quapy.classification.svmperf.SVMperf": [[9, 3, 1, "", "decision_function"], [9, 3, 1, "", "fit"], [9, 3, 1, "", "predict"], [9, 4, 1, "", "valid_losses"]], "quapy.data": [[10, 0, 0, "-", "base"], [10, 0, 0, "-", "datasets"], [10, 0, 0, "-", "preprocessing"], [10, 0, 0, "-", "reader"]], "quapy.data.base": [[10, 1, 1, "", "Dataset"], [10, 1, 1, "", "LabelledCollection"]], "quapy.data.base.Dataset": [[10, 3, 1, "", "SplitStratified"], [10, 2, 1, "", "binary"], [10, 2, 1, "", "classes_"], [10, 3, 1, "", "kFCV"], [10, 3, 1, "", "load"], [10, 2, 1, "", "n_classes"], [10, 3, 1, "", "stats"], [10, 2, 1, "", "train_test"], [10, 2, 1, "", "vocabulary_size"]], "quapy.data.base.LabelledCollection": [[10, 2, 1, "", "X"], [10, 2, 1, "", "Xp"], [10, 2, 1, "", "Xy"], [10, 2, 1, "", "binary"], [10, 3, 1, "", "counts"], [10, 3, 1, "", "kFCV"], [10, 3, 1, "", "load"], [10, 3, 1, "", "mix"], [10, 2, 1, "", "n_classes"], [10, 2, 1, "", "p"], [10, 3, 1, "", "prevalence"], [10, 3, 1, "", "sampling"], [10, 3, 1, "", "sampling_from_index"], [10, 3, 1, "", "sampling_index"], [10, 3, 1, "", "split_random"], [10, 3, 1, "", "split_stratified"], [10, 3, 1, "", "stats"], [10, 3, 1, "", "uniform_sampling"], [10, 3, 1, "", "uniform_sampling_index"], [10, 2, 1, "", "y"]], "quapy.data.datasets": [[10, 5, 1, "", "fetch_UCIDataset"], [10, 5, 1, "", "fetch_UCILabelledCollection"], [10, 5, 1, "", "fetch_lequa2022"], [10, 5, 1, "", "fetch_reviews"], [10, 5, 1, "", "fetch_twitter"], [10, 5, 1, "", "warn"]], "quapy.data.preprocessing": [[10, 1, 1, "", "IndexTransformer"], [10, 5, 1, "", "index"], [10, 5, 1, "", "reduce_columns"], [10, 5, 1, "", "standardize"], [10, 5, 1, "", "text2tfidf"]], "quapy.data.preprocessing.IndexTransformer": [[10, 3, 1, "", "add_word"], [10, 3, 1, "", "fit"], [10, 3, 1, "", "fit_transform"], [10, 3, 1, "", "transform"], [10, 3, 1, "", "vocabulary_size"]], "quapy.data.reader": [[10, 5, 1, "", "binarize"], [10, 5, 1, "", "from_csv"], [10, 5, 1, "", "from_sparse"], [10, 5, 1, "", "from_text"], [10, 5, 1, "", "reindex_labels"]], "quapy.error": [[8, 5, 1, "", "absolute_error"], [8, 5, 1, "", "acc_error"], [8, 5, 1, "", "acce"], [8, 5, 1, "", "ae"], [8, 5, 1, "", "f1_error"], [8, 5, 1, "", "f1e"], [8, 5, 1, "", "from_name"], [8, 5, 1, "", "kld"], [8, 5, 1, "", "mae"], [8, 5, 1, "", "mean_absolute_error"], [8, 5, 1, "", "mean_relative_absolute_error"], [8, 5, 1, "", "mkld"], [8, 5, 1, "", "mnkld"], [8, 5, 1, "", "mrae"], [8, 5, 1, "", "mse"], [8, 5, 1, "", "nkld"], [8, 5, 1, "", "rae"], [8, 5, 1, "", "relative_absolute_error"], [8, 5, 1, "", "se"], [8, 5, 1, "", "smooth"]], "quapy.evaluation": [[8, 5, 1, "", "evaluate"], [8, 5, 1, "", "evaluation_report"], [8, 5, 1, "", "prediction"]], "quapy.functional": [[8, 5, 1, "", "HellingerDistance"], [8, 5, 1, "", "TopsoeDistance"], [8, 5, 1, "", "adjusted_quantification"], [8, 5, 1, "", "check_prevalence_vector"], [8, 5, 1, "", "get_nprevpoints_approximation"], [8, 5, 1, "", "normalize_prevalence"], [8, 5, 1, "", "num_prevalence_combinations"], [8, 5, 1, "", "prevalence_from_labels"], [8, 5, 1, "", "prevalence_from_probabilities"], [8, 5, 1, "", "prevalence_linspace"], [8, 5, 1, "", "strprev"], [8, 5, 1, "", "uniform_prevalence_sampling"], [8, 5, 1, "", "uniform_simplex_sampling"]], "quapy.method": [[11, 0, 0, "-", "aggregative"], [11, 0, 0, "-", "base"], [11, 0, 0, "-", "meta"], [11, 0, 0, "-", "neural"], [11, 0, 0, "-", "non_aggregative"]], "quapy.method.aggregative": [[11, 1, 1, "", "ACC"], [11, 4, 1, "", "AdjustedClassifyAndCount"], [11, 1, 1, "", "AggregativeProbabilisticQuantifier"], [11, 1, 1, "", "AggregativeQuantifier"], [11, 1, 1, "", "CC"], [11, 4, 1, "", "ClassifyAndCount"], [11, 1, 1, "", "DistributionMatching"], [11, 1, 1, "", "DyS"], [11, 1, 1, "", "EMQ"], [11, 4, 1, "", "ExpectationMaximizationQuantifier"], [11, 1, 1, "", "HDy"], [11, 4, 1, "", "HellingerDistanceY"], [11, 1, 1, "", "MAX"], [11, 1, 1, "", "MS"], [11, 1, 1, "", "MS2"], [11, 4, 1, "", "MedianSweep"], [11, 4, 1, "", "MedianSweep2"], [11, 1, 1, "", "OneVsAllAggregative"], [11, 1, 1, "", "PACC"], [11, 1, 1, "", "PCC"], [11, 4, 1, "", "ProbabilisticAdjustedClassifyAndCount"], [11, 4, 1, "", "ProbabilisticClassifyAndCount"], [11, 4, 1, "", "SLD"], [11, 1, 1, "", "SMM"], [11, 1, 1, "", "T50"], [11, 1, 1, "", "ThresholdOptimization"], [11, 1, 1, "", "X"], [11, 5, 1, "", "cross_generate_predictions"], [11, 5, 1, "", "cross_generate_predictions_depr"], [11, 5, 1, "", "newELM"], [11, 5, 1, "", "newSVMAE"], [11, 5, 1, "", "newSVMKLD"], [11, 5, 1, "", "newSVMQ"], [11, 5, 1, "", "newSVMRAE"]], "quapy.method.aggregative.ACC": [[11, 3, 1, "", "aggregate"], [11, 3, 1, "", "classify"], [11, 3, 1, "", "fit"], [11, 3, 1, "", "getPteCondEstim"], [11, 3, 1, "", "solve_adjustment"]], "quapy.method.aggregative.AggregativeProbabilisticQuantifier": [[11, 3, 1, "", "classify"]], "quapy.method.aggregative.AggregativeQuantifier": [[11, 3, 1, "", "aggregate"], [11, 2, 1, "", "classes_"], [11, 2, 1, "", "classifier"], [11, 3, 1, "", "classify"], [11, 3, 1, "", "fit"], [11, 3, 1, "", "quantify"]], "quapy.method.aggregative.CC": [[11, 3, 1, "", "aggregate"], [11, 3, 1, "", "fit"]], "quapy.method.aggregative.DistributionMatching": [[11, 3, 1, "", "aggregate"], [11, 3, 1, "", "fit"]], "quapy.method.aggregative.DyS": [[11, 3, 1, "", "aggregate"], [11, 3, 1, "", "fit"]], "quapy.method.aggregative.EMQ": [[11, 3, 1, "", "EM"], [11, 4, 1, "", "EPSILON"], [11, 4, 1, "", "MAX_ITER"], [11, 3, 1, "", "aggregate"], [11, 3, 1, "", "fit"], [11, 3, 1, "", "predict_proba"]], "quapy.method.aggregative.HDy": [[11, 3, 1, "", "aggregate"], [11, 3, 1, "", "fit"]], "quapy.method.aggregative.OneVsAllAggregative": [[11, 3, 1, "", "aggregate"], [11, 3, 1, "", "classify"]], "quapy.method.aggregative.PACC": [[11, 3, 1, "", "aggregate"], [11, 3, 1, "", "classify"], [11, 3, 1, "", "fit"], [11, 3, 1, "", "getPteCondEstim"]], "quapy.method.aggregative.PCC": [[11, 3, 1, "", "aggregate"], [11, 3, 1, "", "fit"]], "quapy.method.aggregative.SMM": [[11, 3, 1, "", "aggregate"], [11, 3, 1, "", "fit"]], "quapy.method.aggregative.ThresholdOptimization": [[11, 3, 1, "", "aggregate"], [11, 3, 1, "", "fit"]], "quapy.method.base": [[11, 1, 1, "", "BaseQuantifier"], [11, 1, 1, "", "BinaryQuantifier"], [11, 1, 1, "", "OneVsAll"], [11, 1, 1, "", "OneVsAllGeneric"], [11, 5, 1, "", "newOneVsAll"]], "quapy.method.base.BaseQuantifier": [[11, 3, 1, "", "fit"], [11, 3, 1, "", "quantify"]], "quapy.method.base.OneVsAllGeneric": [[11, 2, 1, "", "classes_"], [11, 3, 1, "", "fit"], [11, 3, 1, "", "quantify"]], "quapy.method.meta": [[11, 5, 1, "", "EACC"], [11, 5, 1, "", "ECC"], [11, 5, 1, "", "EEMQ"], [11, 5, 1, "", "EHDy"], [11, 5, 1, "", "EPACC"], [11, 1, 1, "", "Ensemble"], [11, 5, 1, "", "ensembleFactory"], [11, 5, 1, "", "get_probability_distribution"]], "quapy.method.meta.Ensemble": [[11, 4, 1, "", "VALID_POLICIES"], [11, 2, 1, "", "aggregative"], [11, 3, 1, "", "fit"], [11, 3, 1, "", "get_params"], [11, 2, 1, "", "probabilistic"], [11, 3, 1, "", "quantify"], [11, 3, 1, "", "set_params"]], "quapy.method.neural": [[11, 1, 1, "", "QuaNetModule"], [11, 1, 1, "", "QuaNetTrainer"], [11, 5, 1, "", "mae_loss"]], "quapy.method.neural.QuaNetModule": [[11, 2, 1, "", "device"], [11, 3, 1, "", "forward"], [11, 4, 1, "", "training"]], "quapy.method.neural.QuaNetTrainer": [[11, 2, 1, "", "classes_"], [11, 3, 1, "", "clean_checkpoint"], [11, 3, 1, "", "clean_checkpoint_dir"], [11, 3, 1, "", "fit"], [11, 3, 1, "", "get_params"], [11, 3, 1, "", "quantify"], [11, 3, 1, "", "set_params"]], "quapy.method.non_aggregative": [[11, 1, 1, "", "MaximumLikelihoodPrevalenceEstimation"]], "quapy.method.non_aggregative.MaximumLikelihoodPrevalenceEstimation": [[11, 3, 1, "", "fit"], [11, 3, 1, "", "quantify"]], "quapy.model_selection": [[8, 1, 1, "", "GridSearchQ"], [8, 5, 1, "", "cross_val_predict"]], "quapy.model_selection.GridSearchQ": [[8, 3, 1, "", "best_model"], [8, 3, 1, "", "fit"], [8, 3, 1, "", "get_params"], [8, 3, 1, "", "quantify"], [8, 3, 1, "", "set_params"]], "quapy.plot": [[8, 5, 1, "", "binary_bias_bins"], [8, 5, 1, "", "binary_bias_global"], [8, 5, 1, "", "binary_diagonal"], [8, 5, 1, "", "brokenbar_supremacy_by_drift"], [8, 5, 1, "", "error_by_drift"]], "quapy.protocol": [[8, 1, 1, "", "APP"], [8, 1, 1, "", "AbstractProtocol"], [8, 1, 1, "", "AbstractStochasticSeededProtocol"], [8, 1, 1, "", "DomainMixer"], [8, 1, 1, "", "NPP"], [8, 1, 1, "", "OnLabelledCollectionProtocol"], [8, 1, 1, "", "USimplexPP"]], "quapy.protocol.APP": [[8, 3, 1, "", "prevalence_grid"], [8, 3, 1, "", "sample"], [8, 3, 1, "", "samples_parameters"], [8, 3, 1, "", "total"]], "quapy.protocol.AbstractProtocol": [[8, 3, 1, "", "total"]], "quapy.protocol.AbstractStochasticSeededProtocol": [[8, 3, 1, "", "collator"], [8, 2, 1, "", "random_state"], [8, 3, 1, "", "sample"], [8, 3, 1, "", "samples_parameters"]], "quapy.protocol.DomainMixer": [[8, 3, 1, "", "sample"], [8, 3, 1, "", "samples_parameters"], [8, 3, 1, "", "total"]], "quapy.protocol.NPP": [[8, 3, 1, "", "sample"], [8, 3, 1, "", "samples_parameters"], [8, 3, 1, "", "total"]], "quapy.protocol.OnLabelledCollectionProtocol": [[8, 4, 1, "", "RETURN_TYPES"], [8, 3, 1, "", "get_collator"], [8, 3, 1, "", "get_labelled_collection"], [8, 3, 1, "", "on_preclassified_instances"]], "quapy.protocol.USimplexPP": [[8, 3, 1, "", "sample"], [8, 3, 1, "", "samples_parameters"], [8, 3, 1, "", "total"]], "quapy.util": [[8, 1, 1, "", "EarlyStop"], [8, 5, 1, "", "create_if_not_exist"], [8, 5, 1, "", "create_parent_dir"], [8, 5, 1, "", "download_file"], [8, 5, 1, "", "download_file_if_not_exists"], [8, 5, 1, "", "get_quapy_home"], [8, 5, 1, "", "map_parallel"], [8, 5, 1, "", "parallel"], [8, 5, 1, "", "pickled_resource"], [8, 5, 1, "", "save_text_file"], [8, 5, 1, "", "temp_seed"]]}, "objtypes": {"0": "py:module", "1": "py:class", "2": "py:property", "3": "py:method", "4": "py:attribute", "5": "py:function"}, "objnames": {"0": ["py", "module", "Python module"], "1": ["py", "class", "Python class"], "2": ["py", "property", "Python property"], "3": ["py", "method", "Python method"], "4": ["py", "attribute", "Python attribute"], "5": ["py", "function", "Python function"]}, "titleterms": {"dataset": [0, 10], "review": 0, "twitter": 0, "sentiment": 0, "uci": 0, "machin": 0, "learn": 0, "issu": 0, "ad": 0, "custom": 0, "data": [0, 10], "process": 0, "evalu": [1, 8], "error": [1, 5, 8], "measur": 1, "protocol": [1, 8], "instal": 2, "requir": 2, "svm": 2, "perf": 2, "quantif": [2, 3, 4, 5], "orient": [2, 4], "loss": [2, 3, 4], "method": [3, 9, 11], "aggreg": [3, 11], "The": 3, "classifi": 3, "count": 3, "variant": 3, "expect": 3, "maxim": 3, "emq": 3, "helling": 3, "distanc": 3, "y": 3, "hdy": 3, "explicit": 3, "minim": 3, "meta": [3, 11], "model": [3, 4], "ensembl": 3, "quanet": 3, "neural": [3, 9, 11], "network": 3, "select": 4, "target": 4, "classif": [4, 9], "plot": [5, 8], "diagon": 5, "bia": 5, "drift": 5, "welcom": 6, "quapi": [6, 7, 8, 9, 10, 11], "": 6, "document": 6, "introduct": 6, "A": 6, "quick": 6, "exampl": 6, "featur": 6, "content": [6, 8, 9, 10, 11], "indic": 6, "tabl": 6, "packag": [8, 9, 10, 11], "subpackag": 8, "submodul": [8, 9, 10, 11], "function": 8, "model_select": 8, "util": 8, "modul": [8, 9, 10, 11], "calibr": 9, "svmperf": 9, "base": [10, 11], "preprocess": 10, "reader": 10, "non_aggreg": 11}, "envversion": {"sphinx.domains.c": 2, "sphinx.domains.changeset": 1, "sphinx.domains.citation": 1, "sphinx.domains.cpp": 8, "sphinx.domains.index": 1, "sphinx.domains.javascript": 2, "sphinx.domains.math": 2, "sphinx.domains.python": 3, "sphinx.domains.rst": 2, "sphinx.domains.std": 2, "sphinx": 57}, "alltitles": {"Datasets": [[0, "datasets"]], "Reviews Datasets": [[0, "reviews-datasets"]], "Twitter Sentiment Datasets": [[0, "twitter-sentiment-datasets"]], "UCI Machine Learning": [[0, "uci-machine-learning"]], "Issues:": [[0, "issues"]], "Adding Custom Datasets": [[0, "adding-custom-datasets"]], "Data Processing": [[0, "data-processing"]], "Evaluation": [[1, "evaluation"]], "Error Measures": [[1, "error-measures"]], "Evaluation Protocols": [[1, "evaluation-protocols"]], "Installation": [[2, "installation"]], "Requirements": [[2, "requirements"]], "SVM-perf with quantification-oriented losses": [[2, "svm-perf-with-quantification-oriented-losses"]], "Quantification Methods": [[3, "quantification-methods"]], "Aggregative Methods": [[3, "aggregative-methods"]], "The Classify & Count variants": [[3, "the-classify-count-variants"]], "Expectation Maximization (EMQ)": [[3, "expectation-maximization-emq"]], "Hellinger Distance y (HDy)": [[3, "hellinger-distance-y-hdy"]], "Explicit Loss Minimization": [[3, "explicit-loss-minimization"]], "Meta Models": [[3, "meta-models"]], "Ensembles": [[3, "ensembles"]], "The QuaNet neural network": [[3, "the-quanet-neural-network"]], "Model Selection": [[4, "model-selection"]], "Targeting a Quantification-oriented loss": [[4, "targeting-a-quantification-oriented-loss"]], "Targeting a Classification-oriented loss": [[4, "targeting-a-classification-oriented-loss"]], "Plotting": [[5, "plotting"]], "Diagonal Plot": [[5, "diagonal-plot"]], "Quantification bias": [[5, "quantification-bias"]], "Error by Drift": [[5, "error-by-drift"]], "Welcome to QuaPy\u2019s documentation!": [[6, "welcome-to-quapy-s-documentation"]], "Introduction": [[6, "introduction"]], "A quick example:": [[6, "a-quick-example"]], "Features": [[6, "features"]], "Contents:": [[6, null]], "Indices and tables": [[6, "indices-and-tables"]], "quapy": [[7, "quapy"]], "quapy package": [[8, "quapy-package"]], "Submodules": [[8, "submodules"], [9, "submodules"], [10, "submodules"], [11, "submodules"]], "quapy.error": [[8, "module-quapy.error"]], "quapy.evaluation": [[8, "module-quapy.evaluation"]], "quapy.protocol": [[8, "quapy-protocol"]], "quapy.functional": [[8, "module-quapy.functional"]], "quapy.model_selection": [[8, "module-quapy.model_selection"]], "quapy.plot": [[8, "module-quapy.plot"]], "quapy.util": [[8, "module-quapy.util"]], "Subpackages": [[8, "subpackages"]], "Module contents": [[8, "module-quapy"], [9, "module-quapy.classification"], [10, "module-quapy.data"], [11, "module-quapy.method"]], "quapy.classification package": [[9, "quapy-classification-package"]], "quapy.classification.calibration": [[9, "quapy-classification-calibration"]], "quapy.classification.methods": [[9, "module-quapy.classification.methods"]], "quapy.classification.neural": [[9, "module-quapy.classification.neural"]], "quapy.classification.svmperf": [[9, "module-quapy.classification.svmperf"]], "quapy.data package": [[10, "quapy-data-package"]], "quapy.data.base": [[10, "module-quapy.data.base"]], "quapy.data.datasets": [[10, "module-quapy.data.datasets"]], "quapy.data.preprocessing": [[10, "module-quapy.data.preprocessing"]], "quapy.data.reader": [[10, "module-quapy.data.reader"]], "quapy.method package": [[11, "quapy-method-package"]], "quapy.method.aggregative": [[11, "module-quapy.method.aggregative"]], "quapy.method.base": [[11, "module-quapy.method.base"]], "quapy.method.meta": [[11, "module-quapy.method.meta"]], "quapy.method.neural": [[11, "module-quapy.method.neural"]], "quapy.method.non_aggregative": [[11, "module-quapy.method.non_aggregative"]]}, "indexentries": {"app (class in quapy.protocol)": [[8, "quapy.protocol.APP"]], "abstractprotocol (class in quapy.protocol)": [[8, "quapy.protocol.AbstractProtocol"]], "abstractstochasticseededprotocol (class in quapy.protocol)": [[8, "quapy.protocol.AbstractStochasticSeededProtocol"]], "domainmixer (class in quapy.protocol)": [[8, "quapy.protocol.DomainMixer"]], "earlystop (class in quapy.util)": [[8, "quapy.util.EarlyStop"]], "gridsearchq (class in quapy.model_selection)": [[8, "quapy.model_selection.GridSearchQ"]], "hellingerdistance() (in module quapy.functional)": [[8, "quapy.functional.HellingerDistance"]], "npp (class in quapy.protocol)": [[8, "quapy.protocol.NPP"]], "onlabelledcollectionprotocol (class in quapy.protocol)": [[8, "quapy.protocol.OnLabelledCollectionProtocol"]], "return_types (quapy.protocol.onlabelledcollectionprotocol attribute)": [[8, "quapy.protocol.OnLabelledCollectionProtocol.RETURN_TYPES"]], "topsoedistance() (in module quapy.functional)": [[8, "quapy.functional.TopsoeDistance"]], "usimplexpp (class in quapy.protocol)": [[8, "quapy.protocol.USimplexPP"]], "absolute_error() (in module quapy.error)": [[8, "quapy.error.absolute_error"]], "acc_error() (in module quapy.error)": [[8, "quapy.error.acc_error"]], "acce() (in module quapy.error)": [[8, "quapy.error.acce"]], "adjusted_quantification() (in module quapy.functional)": [[8, "quapy.functional.adjusted_quantification"]], "ae() (in module quapy.error)": [[8, "quapy.error.ae"]], "best_model() (quapy.model_selection.gridsearchq method)": [[8, "quapy.model_selection.GridSearchQ.best_model"]], "binary_bias_bins() (in module quapy.plot)": [[8, "quapy.plot.binary_bias_bins"]], "binary_bias_global() (in module quapy.plot)": [[8, "quapy.plot.binary_bias_global"]], "binary_diagonal() (in module quapy.plot)": [[8, "quapy.plot.binary_diagonal"]], "brokenbar_supremacy_by_drift() (in module quapy.plot)": [[8, "quapy.plot.brokenbar_supremacy_by_drift"]], "check_prevalence_vector() (in module quapy.functional)": [[8, "quapy.functional.check_prevalence_vector"]], "collator() (quapy.protocol.abstractstochasticseededprotocol method)": [[8, "quapy.protocol.AbstractStochasticSeededProtocol.collator"]], "create_if_not_exist() (in module quapy.util)": [[8, "quapy.util.create_if_not_exist"]], "create_parent_dir() (in module quapy.util)": [[8, "quapy.util.create_parent_dir"]], "cross_val_predict() (in module quapy.model_selection)": [[8, "quapy.model_selection.cross_val_predict"]], "download_file() (in module quapy.util)": [[8, "quapy.util.download_file"]], "download_file_if_not_exists() (in module quapy.util)": [[8, "quapy.util.download_file_if_not_exists"]], "error_by_drift() (in module quapy.plot)": [[8, "quapy.plot.error_by_drift"]], "evaluate() (in module quapy.evaluation)": [[8, "quapy.evaluation.evaluate"]], "evaluation_report() (in module quapy.evaluation)": [[8, "quapy.evaluation.evaluation_report"]], "f1_error() (in module quapy.error)": [[8, "quapy.error.f1_error"]], "f1e() (in module quapy.error)": [[8, "quapy.error.f1e"]], "fit() (quapy.model_selection.gridsearchq method)": [[8, "quapy.model_selection.GridSearchQ.fit"]], "from_name() (in module quapy.error)": [[8, "quapy.error.from_name"]], "get_collator() (quapy.protocol.onlabelledcollectionprotocol class method)": [[8, "quapy.protocol.OnLabelledCollectionProtocol.get_collator"]], "get_labelled_collection() (quapy.protocol.onlabelledcollectionprotocol method)": [[8, "quapy.protocol.OnLabelledCollectionProtocol.get_labelled_collection"]], "get_nprevpoints_approximation() (in module quapy.functional)": [[8, "quapy.functional.get_nprevpoints_approximation"]], "get_params() (quapy.model_selection.gridsearchq method)": [[8, "quapy.model_selection.GridSearchQ.get_params"]], "get_quapy_home() (in module quapy.util)": [[8, "quapy.util.get_quapy_home"]], "kld() (in module quapy.error)": [[8, "quapy.error.kld"]], "mae() (in module quapy.error)": [[8, "quapy.error.mae"]], "map_parallel() (in module quapy.util)": [[8, "quapy.util.map_parallel"]], "mean_absolute_error() (in module quapy.error)": [[8, "quapy.error.mean_absolute_error"]], "mean_relative_absolute_error() (in module quapy.error)": [[8, "quapy.error.mean_relative_absolute_error"]], "mkld() (in module quapy.error)": [[8, "quapy.error.mkld"]], "mnkld() (in module quapy.error)": [[8, "quapy.error.mnkld"]], "module": [[8, "module-quapy"], [8, "module-quapy.error"], [8, "module-quapy.evaluation"], [8, "module-quapy.functional"], [8, "module-quapy.model_selection"], [8, "module-quapy.plot"], [8, "module-quapy.protocol"], [8, "module-quapy.util"], [9, "module-quapy.classification"], [9, "module-quapy.classification.calibration"], [9, "module-quapy.classification.methods"], [9, "module-quapy.classification.neural"], [9, "module-quapy.classification.svmperf"], [10, "module-quapy.data"], [10, "module-quapy.data.base"], [10, "module-quapy.data.datasets"], [10, "module-quapy.data.preprocessing"], [10, "module-quapy.data.reader"], [11, "module-quapy.method"], [11, "module-quapy.method.aggregative"], [11, "module-quapy.method.base"], [11, "module-quapy.method.meta"], [11, "module-quapy.method.neural"], [11, "module-quapy.method.non_aggregative"]], "mrae() (in module quapy.error)": [[8, "quapy.error.mrae"]], "mse() (in module quapy.error)": [[8, "quapy.error.mse"]], "nkld() (in module quapy.error)": [[8, "quapy.error.nkld"]], "normalize_prevalence() (in module quapy.functional)": [[8, "quapy.functional.normalize_prevalence"]], "num_prevalence_combinations() (in module quapy.functional)": [[8, "quapy.functional.num_prevalence_combinations"]], "on_preclassified_instances() (quapy.protocol.onlabelledcollectionprotocol method)": [[8, "quapy.protocol.OnLabelledCollectionProtocol.on_preclassified_instances"]], "parallel() (in module quapy.util)": [[8, "quapy.util.parallel"]], "pickled_resource() (in module quapy.util)": [[8, "quapy.util.pickled_resource"]], "prediction() (in module quapy.evaluation)": [[8, "quapy.evaluation.prediction"]], "prevalence_from_labels() (in module quapy.functional)": [[8, "quapy.functional.prevalence_from_labels"]], "prevalence_from_probabilities() (in module quapy.functional)": [[8, "quapy.functional.prevalence_from_probabilities"]], "prevalence_grid() (quapy.protocol.app method)": [[8, "quapy.protocol.APP.prevalence_grid"]], "prevalence_linspace() (in module quapy.functional)": [[8, "quapy.functional.prevalence_linspace"]], "quantify() (quapy.model_selection.gridsearchq method)": [[8, "quapy.model_selection.GridSearchQ.quantify"]], "quapy": [[8, "module-quapy"]], "quapy.error": [[8, "module-quapy.error"]], "quapy.evaluation": [[8, "module-quapy.evaluation"]], "quapy.functional": [[8, "module-quapy.functional"]], "quapy.model_selection": [[8, "module-quapy.model_selection"]], "quapy.plot": [[8, "module-quapy.plot"]], "quapy.protocol": [[8, "module-quapy.protocol"]], "quapy.util": [[8, "module-quapy.util"]], "rae() (in module quapy.error)": [[8, "quapy.error.rae"]], "random_state (quapy.protocol.abstractstochasticseededprotocol property)": [[8, "quapy.protocol.AbstractStochasticSeededProtocol.random_state"]], "relative_absolute_error() (in module quapy.error)": [[8, "quapy.error.relative_absolute_error"]], "sample() (quapy.protocol.app method)": [[8, "quapy.protocol.APP.sample"]], "sample() (quapy.protocol.abstractstochasticseededprotocol method)": [[8, "quapy.protocol.AbstractStochasticSeededProtocol.sample"]], "sample() (quapy.protocol.domainmixer method)": [[8, "quapy.protocol.DomainMixer.sample"]], "sample() (quapy.protocol.npp method)": [[8, "quapy.protocol.NPP.sample"]], "sample() (quapy.protocol.usimplexpp method)": [[8, "quapy.protocol.USimplexPP.sample"]], "samples_parameters() (quapy.protocol.app method)": [[8, "quapy.protocol.APP.samples_parameters"]], "samples_parameters() (quapy.protocol.abstractstochasticseededprotocol method)": [[8, "quapy.protocol.AbstractStochasticSeededProtocol.samples_parameters"]], "samples_parameters() (quapy.protocol.domainmixer method)": [[8, "quapy.protocol.DomainMixer.samples_parameters"]], "samples_parameters() (quapy.protocol.npp method)": [[8, "quapy.protocol.NPP.samples_parameters"]], "samples_parameters() (quapy.protocol.usimplexpp method)": [[8, "quapy.protocol.USimplexPP.samples_parameters"]], "save_text_file() (in module quapy.util)": [[8, "quapy.util.save_text_file"]], "se() (in module quapy.error)": [[8, "quapy.error.se"]], "set_params() (quapy.model_selection.gridsearchq method)": [[8, "quapy.model_selection.GridSearchQ.set_params"]], "smooth() (in module quapy.error)": [[8, "quapy.error.smooth"]], "strprev() (in module quapy.functional)": [[8, "quapy.functional.strprev"]], "temp_seed() (in module quapy.util)": [[8, "quapy.util.temp_seed"]], "total() (quapy.protocol.app method)": [[8, "quapy.protocol.APP.total"]], "total() (quapy.protocol.abstractprotocol method)": [[8, "quapy.protocol.AbstractProtocol.total"]], "total() (quapy.protocol.domainmixer method)": [[8, "quapy.protocol.DomainMixer.total"]], "total() (quapy.protocol.npp method)": [[8, "quapy.protocol.NPP.total"]], "total() (quapy.protocol.usimplexpp method)": [[8, "quapy.protocol.USimplexPP.total"]], "uniform_prevalence_sampling() (in module quapy.functional)": [[8, "quapy.functional.uniform_prevalence_sampling"]], "uniform_simplex_sampling() (in module quapy.functional)": [[8, "quapy.functional.uniform_simplex_sampling"]], "bctscalibration (class in quapy.classification.calibration)": [[9, "quapy.classification.calibration.BCTSCalibration"]], "cnnnet (class in quapy.classification.neural)": [[9, "quapy.classification.neural.CNNnet"]], "lstmnet (class in quapy.classification.neural)": [[9, "quapy.classification.neural.LSTMnet"]], "lowranklogisticregression (class in quapy.classification.methods)": [[9, "quapy.classification.methods.LowRankLogisticRegression"]], "nbvscalibration (class in quapy.classification.calibration)": [[9, "quapy.classification.calibration.NBVSCalibration"]], "neuralclassifiertrainer (class in quapy.classification.neural)": [[9, "quapy.classification.neural.NeuralClassifierTrainer"]], "recalibratedprobabilisticclassifier (class in quapy.classification.calibration)": [[9, "quapy.classification.calibration.RecalibratedProbabilisticClassifier"]], "recalibratedprobabilisticclassifierbase (class in quapy.classification.calibration)": [[9, "quapy.classification.calibration.RecalibratedProbabilisticClassifierBase"]], "svmperf (class in quapy.classification.svmperf)": [[9, "quapy.classification.svmperf.SVMperf"]], "tscalibration (class in quapy.classification.calibration)": [[9, "quapy.classification.calibration.TSCalibration"]], "textclassifiernet (class in quapy.classification.neural)": [[9, "quapy.classification.neural.TextClassifierNet"]], "torchdataset (class in quapy.classification.neural)": [[9, "quapy.classification.neural.TorchDataset"]], "vscalibration (class in quapy.classification.calibration)": [[9, "quapy.classification.calibration.VSCalibration"]], "asdataloader() (quapy.classification.neural.torchdataset method)": [[9, "quapy.classification.neural.TorchDataset.asDataloader"]], "classes_ (quapy.classification.calibration.recalibratedprobabilisticclassifierbase property)": [[9, "quapy.classification.calibration.RecalibratedProbabilisticClassifierBase.classes_"]], "decision_function() (quapy.classification.svmperf.svmperf method)": [[9, "quapy.classification.svmperf.SVMperf.decision_function"]], "device (quapy.classification.neural.neuralclassifiertrainer property)": [[9, "quapy.classification.neural.NeuralClassifierTrainer.device"]], "dimensions() (quapy.classification.neural.textclassifiernet method)": [[9, "quapy.classification.neural.TextClassifierNet.dimensions"]], "document_embedding() (quapy.classification.neural.cnnnet method)": [[9, "quapy.classification.neural.CNNnet.document_embedding"]], "document_embedding() (quapy.classification.neural.lstmnet method)": [[9, "quapy.classification.neural.LSTMnet.document_embedding"]], "document_embedding() (quapy.classification.neural.textclassifiernet method)": [[9, "quapy.classification.neural.TextClassifierNet.document_embedding"]], "fit() (quapy.classification.calibration.recalibratedprobabilisticclassifierbase method)": [[9, "quapy.classification.calibration.RecalibratedProbabilisticClassifierBase.fit"]], "fit() (quapy.classification.methods.lowranklogisticregression method)": [[9, "quapy.classification.methods.LowRankLogisticRegression.fit"]], "fit() (quapy.classification.neural.neuralclassifiertrainer method)": [[9, "quapy.classification.neural.NeuralClassifierTrainer.fit"]], "fit() (quapy.classification.svmperf.svmperf method)": [[9, "quapy.classification.svmperf.SVMperf.fit"]], "fit_cv() (quapy.classification.calibration.recalibratedprobabilisticclassifierbase method)": [[9, "quapy.classification.calibration.RecalibratedProbabilisticClassifierBase.fit_cv"]], "fit_tr_val() (quapy.classification.calibration.recalibratedprobabilisticclassifierbase method)": [[9, "quapy.classification.calibration.RecalibratedProbabilisticClassifierBase.fit_tr_val"]], "forward() (quapy.classification.neural.textclassifiernet method)": [[9, "quapy.classification.neural.TextClassifierNet.forward"]], "get_params() (quapy.classification.methods.lowranklogisticregression method)": [[9, "quapy.classification.methods.LowRankLogisticRegression.get_params"]], "get_params() (quapy.classification.neural.cnnnet method)": [[9, "quapy.classification.neural.CNNnet.get_params"]], "get_params() (quapy.classification.neural.lstmnet method)": [[9, "quapy.classification.neural.LSTMnet.get_params"]], "get_params() (quapy.classification.neural.neuralclassifiertrainer method)": [[9, "quapy.classification.neural.NeuralClassifierTrainer.get_params"]], "get_params() (quapy.classification.neural.textclassifiernet method)": [[9, "quapy.classification.neural.TextClassifierNet.get_params"]], "predict() (quapy.classification.calibration.recalibratedprobabilisticclassifierbase method)": [[9, "quapy.classification.calibration.RecalibratedProbabilisticClassifierBase.predict"]], "predict() (quapy.classification.methods.lowranklogisticregression method)": [[9, "quapy.classification.methods.LowRankLogisticRegression.predict"]], "predict() (quapy.classification.neural.neuralclassifiertrainer method)": [[9, "quapy.classification.neural.NeuralClassifierTrainer.predict"]], "predict() (quapy.classification.svmperf.svmperf method)": [[9, "quapy.classification.svmperf.SVMperf.predict"]], "predict_proba() (quapy.classification.calibration.recalibratedprobabilisticclassifierbase method)": [[9, "quapy.classification.calibration.RecalibratedProbabilisticClassifierBase.predict_proba"]], "predict_proba() (quapy.classification.methods.lowranklogisticregression method)": [[9, "quapy.classification.methods.LowRankLogisticRegression.predict_proba"]], "predict_proba() (quapy.classification.neural.neuralclassifiertrainer method)": [[9, "quapy.classification.neural.NeuralClassifierTrainer.predict_proba"]], "predict_proba() (quapy.classification.neural.textclassifiernet method)": [[9, "quapy.classification.neural.TextClassifierNet.predict_proba"]], "quapy.classification": [[9, "module-quapy.classification"]], "quapy.classification.calibration": [[9, "module-quapy.classification.calibration"]], "quapy.classification.methods": [[9, "module-quapy.classification.methods"]], "quapy.classification.neural": [[9, "module-quapy.classification.neural"]], "quapy.classification.svmperf": [[9, "module-quapy.classification.svmperf"]], "reset_net_params() (quapy.classification.neural.neuralclassifiertrainer method)": [[9, "quapy.classification.neural.NeuralClassifierTrainer.reset_net_params"]], "set_params() (quapy.classification.methods.lowranklogisticregression method)": [[9, "quapy.classification.methods.LowRankLogisticRegression.set_params"]], "set_params() (quapy.classification.neural.neuralclassifiertrainer method)": [[9, "quapy.classification.neural.NeuralClassifierTrainer.set_params"]], "training (quapy.classification.neural.cnnnet attribute)": [[9, "quapy.classification.neural.CNNnet.training"]], "training (quapy.classification.neural.lstmnet attribute)": [[9, "quapy.classification.neural.LSTMnet.training"]], "training (quapy.classification.neural.textclassifiernet attribute)": [[9, "quapy.classification.neural.TextClassifierNet.training"]], "transform() (quapy.classification.methods.lowranklogisticregression method)": [[9, "quapy.classification.methods.LowRankLogisticRegression.transform"]], "transform() (quapy.classification.neural.neuralclassifiertrainer method)": [[9, "quapy.classification.neural.NeuralClassifierTrainer.transform"]], "valid_losses (quapy.classification.svmperf.svmperf attribute)": [[9, "quapy.classification.svmperf.SVMperf.valid_losses"]], "vocabulary_size (quapy.classification.neural.cnnnet property)": [[9, "quapy.classification.neural.CNNnet.vocabulary_size"]], "vocabulary_size (quapy.classification.neural.lstmnet property)": [[9, "quapy.classification.neural.LSTMnet.vocabulary_size"]], "vocabulary_size (quapy.classification.neural.textclassifiernet property)": [[9, "quapy.classification.neural.TextClassifierNet.vocabulary_size"]], "xavier_uniform() (quapy.classification.neural.textclassifiernet method)": [[9, "quapy.classification.neural.TextClassifierNet.xavier_uniform"]], "dataset (class in quapy.data.base)": [[10, "quapy.data.base.Dataset"]], "indextransformer (class in quapy.data.preprocessing)": [[10, "quapy.data.preprocessing.IndexTransformer"]], "labelledcollection (class in quapy.data.base)": [[10, "quapy.data.base.LabelledCollection"]], "splitstratified() (quapy.data.base.dataset class method)": [[10, "quapy.data.base.Dataset.SplitStratified"]], "x (quapy.data.base.labelledcollection property)": [[10, "quapy.data.base.LabelledCollection.X"]], "xp (quapy.data.base.labelledcollection property)": [[10, "quapy.data.base.LabelledCollection.Xp"]], "xy (quapy.data.base.labelledcollection property)": [[10, "quapy.data.base.LabelledCollection.Xy"]], "add_word() (quapy.data.preprocessing.indextransformer method)": [[10, "quapy.data.preprocessing.IndexTransformer.add_word"]], "binarize() (in module quapy.data.reader)": [[10, "quapy.data.reader.binarize"]], "binary (quapy.data.base.dataset property)": [[10, "quapy.data.base.Dataset.binary"]], "binary (quapy.data.base.labelledcollection property)": [[10, "quapy.data.base.LabelledCollection.binary"]], "classes_ (quapy.data.base.dataset property)": [[10, "quapy.data.base.Dataset.classes_"]], "counts() (quapy.data.base.labelledcollection method)": [[10, "quapy.data.base.LabelledCollection.counts"]], "fetch_ucidataset() (in module quapy.data.datasets)": [[10, "quapy.data.datasets.fetch_UCIDataset"]], "fetch_ucilabelledcollection() (in module quapy.data.datasets)": [[10, "quapy.data.datasets.fetch_UCILabelledCollection"]], "fetch_lequa2022() (in module quapy.data.datasets)": [[10, "quapy.data.datasets.fetch_lequa2022"]], "fetch_reviews() (in module quapy.data.datasets)": [[10, "quapy.data.datasets.fetch_reviews"]], "fetch_twitter() (in module quapy.data.datasets)": [[10, "quapy.data.datasets.fetch_twitter"]], "fit() (quapy.data.preprocessing.indextransformer method)": [[10, "quapy.data.preprocessing.IndexTransformer.fit"]], "fit_transform() (quapy.data.preprocessing.indextransformer method)": [[10, "quapy.data.preprocessing.IndexTransformer.fit_transform"]], "from_csv() (in module quapy.data.reader)": [[10, "quapy.data.reader.from_csv"]], "from_sparse() (in module quapy.data.reader)": [[10, "quapy.data.reader.from_sparse"]], "from_text() (in module quapy.data.reader)": [[10, "quapy.data.reader.from_text"]], "index() (in module quapy.data.preprocessing)": [[10, "quapy.data.preprocessing.index"]], "kfcv() (quapy.data.base.dataset class method)": [[10, "quapy.data.base.Dataset.kFCV"]], "kfcv() (quapy.data.base.labelledcollection method)": [[10, "quapy.data.base.LabelledCollection.kFCV"]], "load() (quapy.data.base.dataset class method)": [[10, "quapy.data.base.Dataset.load"]], "load() (quapy.data.base.labelledcollection class method)": [[10, "quapy.data.base.LabelledCollection.load"]], "mix() (quapy.data.base.labelledcollection class method)": [[10, "quapy.data.base.LabelledCollection.mix"]], "n_classes (quapy.data.base.dataset property)": [[10, "quapy.data.base.Dataset.n_classes"]], "n_classes (quapy.data.base.labelledcollection property)": [[10, "quapy.data.base.LabelledCollection.n_classes"]], "p (quapy.data.base.labelledcollection property)": [[10, "quapy.data.base.LabelledCollection.p"]], "prevalence() (quapy.data.base.labelledcollection method)": [[10, "quapy.data.base.LabelledCollection.prevalence"]], "quapy.data": [[10, "module-quapy.data"]], "quapy.data.base": [[10, "module-quapy.data.base"]], "quapy.data.datasets": [[10, "module-quapy.data.datasets"]], "quapy.data.preprocessing": [[10, "module-quapy.data.preprocessing"]], "quapy.data.reader": [[10, "module-quapy.data.reader"]], "reduce_columns() (in module quapy.data.preprocessing)": [[10, "quapy.data.preprocessing.reduce_columns"]], "reindex_labels() (in module quapy.data.reader)": [[10, "quapy.data.reader.reindex_labels"]], "sampling() (quapy.data.base.labelledcollection method)": [[10, "quapy.data.base.LabelledCollection.sampling"]], "sampling_from_index() (quapy.data.base.labelledcollection method)": [[10, "quapy.data.base.LabelledCollection.sampling_from_index"]], "sampling_index() (quapy.data.base.labelledcollection method)": [[10, "quapy.data.base.LabelledCollection.sampling_index"]], "split_random() (quapy.data.base.labelledcollection method)": [[10, "quapy.data.base.LabelledCollection.split_random"]], "split_stratified() (quapy.data.base.labelledcollection method)": [[10, "quapy.data.base.LabelledCollection.split_stratified"]], "standardize() (in module quapy.data.preprocessing)": [[10, "quapy.data.preprocessing.standardize"]], "stats() (quapy.data.base.dataset method)": [[10, "quapy.data.base.Dataset.stats"]], "stats() (quapy.data.base.labelledcollection method)": [[10, "quapy.data.base.LabelledCollection.stats"]], "text2tfidf() (in module quapy.data.preprocessing)": [[10, "quapy.data.preprocessing.text2tfidf"]], "train_test (quapy.data.base.dataset property)": [[10, "quapy.data.base.Dataset.train_test"]], "transform() (quapy.data.preprocessing.indextransformer method)": [[10, "quapy.data.preprocessing.IndexTransformer.transform"]], "uniform_sampling() (quapy.data.base.labelledcollection method)": [[10, "quapy.data.base.LabelledCollection.uniform_sampling"]], "uniform_sampling_index() (quapy.data.base.labelledcollection method)": [[10, "quapy.data.base.LabelledCollection.uniform_sampling_index"]], "vocabulary_size (quapy.data.base.dataset property)": [[10, "quapy.data.base.Dataset.vocabulary_size"]], "vocabulary_size() (quapy.data.preprocessing.indextransformer method)": [[10, "quapy.data.preprocessing.IndexTransformer.vocabulary_size"]], "warn() (in module quapy.data.datasets)": [[10, "quapy.data.datasets.warn"]], "y (quapy.data.base.labelledcollection property)": [[10, "quapy.data.base.LabelledCollection.y"]], "acc (class in quapy.method.aggregative)": [[11, "quapy.method.aggregative.ACC"]], "adjustedclassifyandcount (in module quapy.method.aggregative)": [[11, "quapy.method.aggregative.AdjustedClassifyAndCount"]], "aggregativeprobabilisticquantifier (class in quapy.method.aggregative)": [[11, "quapy.method.aggregative.AggregativeProbabilisticQuantifier"]], "aggregativequantifier (class in quapy.method.aggregative)": [[11, "quapy.method.aggregative.AggregativeQuantifier"]], "basequantifier (class in quapy.method.base)": [[11, "quapy.method.base.BaseQuantifier"]], "binaryquantifier (class in quapy.method.base)": [[11, "quapy.method.base.BinaryQuantifier"]], "cc (class in quapy.method.aggregative)": [[11, "quapy.method.aggregative.CC"]], "classifyandcount (in module quapy.method.aggregative)": [[11, "quapy.method.aggregative.ClassifyAndCount"]], "distributionmatching (class in quapy.method.aggregative)": [[11, "quapy.method.aggregative.DistributionMatching"]], "dys (class in quapy.method.aggregative)": [[11, "quapy.method.aggregative.DyS"]], "eacc() (in module quapy.method.meta)": [[11, "quapy.method.meta.EACC"]], "ecc() (in module quapy.method.meta)": [[11, "quapy.method.meta.ECC"]], "eemq() (in module quapy.method.meta)": [[11, "quapy.method.meta.EEMQ"]], "ehdy() (in module quapy.method.meta)": [[11, "quapy.method.meta.EHDy"]], "em() (quapy.method.aggregative.emq class method)": [[11, "quapy.method.aggregative.EMQ.EM"]], "emq (class in quapy.method.aggregative)": [[11, "quapy.method.aggregative.EMQ"]], "epacc() (in module quapy.method.meta)": [[11, "quapy.method.meta.EPACC"]], "epsilon (quapy.method.aggregative.emq attribute)": [[11, "quapy.method.aggregative.EMQ.EPSILON"]], "ensemble (class in quapy.method.meta)": [[11, "quapy.method.meta.Ensemble"]], "expectationmaximizationquantifier (in module quapy.method.aggregative)": [[11, "quapy.method.aggregative.ExpectationMaximizationQuantifier"]], "hdy (class in quapy.method.aggregative)": [[11, "quapy.method.aggregative.HDy"]], "hellingerdistancey (in module quapy.method.aggregative)": [[11, "quapy.method.aggregative.HellingerDistanceY"]], "max (class in quapy.method.aggregative)": [[11, "quapy.method.aggregative.MAX"]], "max_iter (quapy.method.aggregative.emq attribute)": [[11, "quapy.method.aggregative.EMQ.MAX_ITER"]], "ms (class in quapy.method.aggregative)": [[11, "quapy.method.aggregative.MS"]], "ms2 (class in quapy.method.aggregative)": [[11, "quapy.method.aggregative.MS2"]], "maximumlikelihoodprevalenceestimation (class in quapy.method.non_aggregative)": [[11, "quapy.method.non_aggregative.MaximumLikelihoodPrevalenceEstimation"]], "mediansweep (in module quapy.method.aggregative)": [[11, "quapy.method.aggregative.MedianSweep"]], "mediansweep2 (in module quapy.method.aggregative)": [[11, "quapy.method.aggregative.MedianSweep2"]], "onevsall (class in quapy.method.base)": [[11, "quapy.method.base.OneVsAll"]], "onevsallaggregative (class in quapy.method.aggregative)": [[11, "quapy.method.aggregative.OneVsAllAggregative"]], "onevsallgeneric (class in quapy.method.base)": [[11, "quapy.method.base.OneVsAllGeneric"]], "pacc (class in quapy.method.aggregative)": [[11, "quapy.method.aggregative.PACC"]], "pcc (class in quapy.method.aggregative)": [[11, "quapy.method.aggregative.PCC"]], "probabilisticadjustedclassifyandcount (in module quapy.method.aggregative)": [[11, "quapy.method.aggregative.ProbabilisticAdjustedClassifyAndCount"]], "probabilisticclassifyandcount (in module quapy.method.aggregative)": [[11, "quapy.method.aggregative.ProbabilisticClassifyAndCount"]], "quanetmodule (class in quapy.method.neural)": [[11, "quapy.method.neural.QuaNetModule"]], "quanettrainer (class in quapy.method.neural)": [[11, "quapy.method.neural.QuaNetTrainer"]], "sld (in module quapy.method.aggregative)": [[11, "quapy.method.aggregative.SLD"]], "smm (class in quapy.method.aggregative)": [[11, "quapy.method.aggregative.SMM"]], "t50 (class in quapy.method.aggregative)": [[11, "quapy.method.aggregative.T50"]], "thresholdoptimization (class in quapy.method.aggregative)": [[11, "quapy.method.aggregative.ThresholdOptimization"]], "valid_policies (quapy.method.meta.ensemble attribute)": [[11, "quapy.method.meta.Ensemble.VALID_POLICIES"]], "x (class in quapy.method.aggregative)": [[11, "quapy.method.aggregative.X"]], "aggregate() (quapy.method.aggregative.acc method)": [[11, "quapy.method.aggregative.ACC.aggregate"]], "aggregate() (quapy.method.aggregative.aggregativequantifier method)": [[11, "quapy.method.aggregative.AggregativeQuantifier.aggregate"]], "aggregate() (quapy.method.aggregative.cc method)": [[11, "quapy.method.aggregative.CC.aggregate"]], "aggregate() (quapy.method.aggregative.distributionmatching method)": [[11, "quapy.method.aggregative.DistributionMatching.aggregate"]], "aggregate() (quapy.method.aggregative.dys method)": [[11, "quapy.method.aggregative.DyS.aggregate"]], "aggregate() (quapy.method.aggregative.emq method)": [[11, "quapy.method.aggregative.EMQ.aggregate"]], "aggregate() (quapy.method.aggregative.hdy method)": [[11, "quapy.method.aggregative.HDy.aggregate"]], "aggregate() (quapy.method.aggregative.onevsallaggregative method)": [[11, "quapy.method.aggregative.OneVsAllAggregative.aggregate"]], "aggregate() (quapy.method.aggregative.pacc method)": [[11, "quapy.method.aggregative.PACC.aggregate"]], "aggregate() (quapy.method.aggregative.pcc method)": [[11, "quapy.method.aggregative.PCC.aggregate"]], "aggregate() (quapy.method.aggregative.smm method)": [[11, "quapy.method.aggregative.SMM.aggregate"]], "aggregate() (quapy.method.aggregative.thresholdoptimization method)": [[11, "quapy.method.aggregative.ThresholdOptimization.aggregate"]], "aggregative (quapy.method.meta.ensemble property)": [[11, "quapy.method.meta.Ensemble.aggregative"]], "classes_ (quapy.method.aggregative.aggregativequantifier property)": [[11, "quapy.method.aggregative.AggregativeQuantifier.classes_"]], "classes_ (quapy.method.base.onevsallgeneric property)": [[11, "quapy.method.base.OneVsAllGeneric.classes_"]], "classes_ (quapy.method.neural.quanettrainer property)": [[11, "quapy.method.neural.QuaNetTrainer.classes_"]], "classifier (quapy.method.aggregative.aggregativequantifier property)": [[11, "quapy.method.aggregative.AggregativeQuantifier.classifier"]], "classify() (quapy.method.aggregative.acc method)": [[11, "quapy.method.aggregative.ACC.classify"]], "classify() (quapy.method.aggregative.aggregativeprobabilisticquantifier method)": [[11, "quapy.method.aggregative.AggregativeProbabilisticQuantifier.classify"]], "classify() (quapy.method.aggregative.aggregativequantifier method)": [[11, "quapy.method.aggregative.AggregativeQuantifier.classify"]], "classify() (quapy.method.aggregative.onevsallaggregative method)": [[11, "quapy.method.aggregative.OneVsAllAggregative.classify"]], "classify() (quapy.method.aggregative.pacc method)": [[11, "quapy.method.aggregative.PACC.classify"]], "clean_checkpoint() (quapy.method.neural.quanettrainer method)": [[11, "quapy.method.neural.QuaNetTrainer.clean_checkpoint"]], "clean_checkpoint_dir() (quapy.method.neural.quanettrainer method)": [[11, "quapy.method.neural.QuaNetTrainer.clean_checkpoint_dir"]], "cross_generate_predictions() (in module quapy.method.aggregative)": [[11, "quapy.method.aggregative.cross_generate_predictions"]], "cross_generate_predictions_depr() (in module quapy.method.aggregative)": [[11, "quapy.method.aggregative.cross_generate_predictions_depr"]], "device (quapy.method.neural.quanetmodule property)": [[11, "quapy.method.neural.QuaNetModule.device"]], "ensemblefactory() (in module quapy.method.meta)": [[11, "quapy.method.meta.ensembleFactory"]], "fit() (quapy.method.aggregative.acc method)": [[11, "quapy.method.aggregative.ACC.fit"]], "fit() (quapy.method.aggregative.aggregativequantifier method)": [[11, "quapy.method.aggregative.AggregativeQuantifier.fit"]], "fit() (quapy.method.aggregative.cc method)": [[11, "quapy.method.aggregative.CC.fit"]], "fit() (quapy.method.aggregative.distributionmatching method)": [[11, "quapy.method.aggregative.DistributionMatching.fit"]], "fit() (quapy.method.aggregative.dys method)": [[11, "quapy.method.aggregative.DyS.fit"]], "fit() (quapy.method.aggregative.emq method)": [[11, "quapy.method.aggregative.EMQ.fit"]], "fit() (quapy.method.aggregative.hdy method)": [[11, "quapy.method.aggregative.HDy.fit"]], "fit() (quapy.method.aggregative.pacc method)": [[11, "quapy.method.aggregative.PACC.fit"]], "fit() (quapy.method.aggregative.pcc method)": [[11, "quapy.method.aggregative.PCC.fit"]], "fit() (quapy.method.aggregative.smm method)": [[11, "quapy.method.aggregative.SMM.fit"]], "fit() (quapy.method.aggregative.thresholdoptimization method)": [[11, "quapy.method.aggregative.ThresholdOptimization.fit"]], "fit() (quapy.method.base.basequantifier method)": [[11, "quapy.method.base.BaseQuantifier.fit"]], "fit() (quapy.method.base.onevsallgeneric method)": [[11, "quapy.method.base.OneVsAllGeneric.fit"]], "fit() (quapy.method.meta.ensemble method)": [[11, "quapy.method.meta.Ensemble.fit"]], "fit() (quapy.method.neural.quanettrainer method)": [[11, "quapy.method.neural.QuaNetTrainer.fit"]], "fit() (quapy.method.non_aggregative.maximumlikelihoodprevalenceestimation method)": [[11, "quapy.method.non_aggregative.MaximumLikelihoodPrevalenceEstimation.fit"]], "forward() (quapy.method.neural.quanetmodule method)": [[11, "quapy.method.neural.QuaNetModule.forward"]], "getptecondestim() (quapy.method.aggregative.acc class method)": [[11, "quapy.method.aggregative.ACC.getPteCondEstim"]], "getptecondestim() (quapy.method.aggregative.pacc class method)": [[11, "quapy.method.aggregative.PACC.getPteCondEstim"]], "get_params() (quapy.method.meta.ensemble method)": [[11, "quapy.method.meta.Ensemble.get_params"]], "get_params() (quapy.method.neural.quanettrainer method)": [[11, "quapy.method.neural.QuaNetTrainer.get_params"]], "get_probability_distribution() (in module quapy.method.meta)": [[11, "quapy.method.meta.get_probability_distribution"]], "mae_loss() (in module quapy.method.neural)": [[11, "quapy.method.neural.mae_loss"]], "newelm() (in module quapy.method.aggregative)": [[11, "quapy.method.aggregative.newELM"]], "newonevsall() (in module quapy.method.base)": [[11, "quapy.method.base.newOneVsAll"]], "newsvmae() (in module quapy.method.aggregative)": [[11, "quapy.method.aggregative.newSVMAE"]], "newsvmkld() (in module quapy.method.aggregative)": [[11, "quapy.method.aggregative.newSVMKLD"]], "newsvmq() (in module quapy.method.aggregative)": [[11, "quapy.method.aggregative.newSVMQ"]], "newsvmrae() (in module quapy.method.aggregative)": [[11, "quapy.method.aggregative.newSVMRAE"]], "predict_proba() (quapy.method.aggregative.emq method)": [[11, "quapy.method.aggregative.EMQ.predict_proba"]], "probabilistic (quapy.method.meta.ensemble property)": [[11, "quapy.method.meta.Ensemble.probabilistic"]], "quantify() (quapy.method.aggregative.aggregativequantifier method)": [[11, "quapy.method.aggregative.AggregativeQuantifier.quantify"]], "quantify() (quapy.method.base.basequantifier method)": [[11, "quapy.method.base.BaseQuantifier.quantify"]], "quantify() (quapy.method.base.onevsallgeneric method)": [[11, "quapy.method.base.OneVsAllGeneric.quantify"]], "quantify() (quapy.method.meta.ensemble method)": [[11, "quapy.method.meta.Ensemble.quantify"]], "quantify() (quapy.method.neural.quanettrainer method)": [[11, "quapy.method.neural.QuaNetTrainer.quantify"]], "quantify() (quapy.method.non_aggregative.maximumlikelihoodprevalenceestimation method)": [[11, "quapy.method.non_aggregative.MaximumLikelihoodPrevalenceEstimation.quantify"]], "quapy.method": [[11, "module-quapy.method"]], "quapy.method.aggregative": [[11, "module-quapy.method.aggregative"]], "quapy.method.base": [[11, "module-quapy.method.base"]], "quapy.method.meta": [[11, "module-quapy.method.meta"]], "quapy.method.neural": [[11, "module-quapy.method.neural"]], "quapy.method.non_aggregative": [[11, "module-quapy.method.non_aggregative"]], "set_params() (quapy.method.meta.ensemble method)": [[11, "quapy.method.meta.Ensemble.set_params"]], "set_params() (quapy.method.neural.quanettrainer method)": [[11, "quapy.method.neural.QuaNetTrainer.set_params"]], "solve_adjustment() (quapy.method.aggregative.acc class method)": [[11, "quapy.method.aggregative.ACC.solve_adjustment"]], "training (quapy.method.neural.quanetmodule attribute)": [[11, "quapy.method.neural.QuaNetModule.training"]]}}) \ No newline at end of file diff --git a/examples/quanet_example.py b/examples/quanet_example.py new file mode 100644 index 0000000..4be3132 --- /dev/null +++ b/examples/quanet_example.py @@ -0,0 +1,35 @@ +import quapy as qp +from quapy.classification.neural import CNNnet +from quapy.classification.neural import NeuralClassifierTrainer +from quapy.method.meta import QuaNet +import quapy.functional as F + +""" +This example shows how to train QuaNet. The internal classifier is a word-based CNN. +""" + +# set the sample size in the environment +qp.environ["SAMPLE_SIZE"] = 100 + +# the dataset is textual (Kindle reviews from Amazon), so we need to index terms, i.e., +# we need to convert distinct terms into numerical ids +dataset = qp.datasets.fetch_reviews('kindle', pickle=True) +qp.data.preprocessing.index(dataset, min_df=5, inplace=True) +train, test = dataset.train_test + +# train the text classifier: +cnn_module = CNNnet(dataset.vocabulary_size, dataset.training.n_classes) +cnn_classifier = NeuralClassifierTrainer(cnn_module, device='cuda') +cnn_classifier.fit(*dataset.training.Xy) + +# train QuaNet (alternatively, we can set fit_classifier=True and let QuaNet train the classifier) +quantifier = QuaNet(cnn_classifier, device='cuda') +quantifier.fit(train, fit_classifier=False) + +# prediction and evaluation +estim_prevalence = quantifier.quantify(test.instances) +mae = qp.error.mae(test.prevalence(), estim_prevalence) + +print(f'true prevalence: {F.strprev(test.prevalence())}') +print(f'estim prevalence: {F.strprev(estim_prevalence)}') +print(f'MAE = {mae:.4f}') \ No newline at end of file diff --git a/quapy/classification/neural.py b/quapy/classification/neural.py index 0d576c5..dc8de5b 100644 --- a/quapy/classification/neural.py +++ b/quapy/classification/neural.py @@ -229,11 +229,11 @@ class NeuralClassifierTrainer: self.net.eval() opt = self.trainer_hyperparams with torch.no_grad(): - positive_probs = [] + posteriors = [] for xi in TorchDataset(instances).asDataloader( opt['batch_size_test'], shuffle=False, pad_length=opt['padding_length'], device=opt['device']): - positive_probs.append(self.net.predict_proba(xi)) - return np.concatenate(positive_probs) + posteriors.append(self.net.predict_proba(xi)) + return np.concatenate(posteriors) def transform(self, instances): """ diff --git a/quapy/data/base.py b/quapy/data/base.py index 7093821..ef3b7f2 100644 --- a/quapy/data/base.py +++ b/quapy/data/base.py @@ -523,3 +523,14 @@ class Dataset: yield Dataset(train, test, name=f'fold {(i % nfolds) + 1}/{nfolds} (round={(i // nfolds) + 1})') + def reduce(self, n_train=100, n_test=100): + """ + Reduce the number of instances in place for quick experiments. Preserves the prevalence of each set. + + :param n_train: number of training documents to keep (default 100) + :param n_test: number of test documents to keep (default 100) + :return: self + """ + self.training = self.training.sampling(n_train, *self.training.prevalence()) + self.test = self.test.sampling(n_test, *self.test.prevalence()) + return self \ No newline at end of file diff --git a/quapy/data/preprocessing.py b/quapy/data/preprocessing.py index e65ccf7..9aa8f8b 100644 --- a/quapy/data/preprocessing.py +++ b/quapy/data/preprocessing.py @@ -121,6 +121,9 @@ def index(dataset: Dataset, min_df=5, inplace=False, **kwargs): training_index = indexer.fit_transform(dataset.training.instances) test_index = indexer.transform(dataset.test.instances) + training_index = np.asarray(training_index, dtype=object) + test_index = np.asarray(test_index, dtype=object) + if inplace: dataset.training = LabelledCollection(training_index, dataset.training.labels, dataset.classes_) dataset.test = LabelledCollection(test_index, dataset.test.labels, dataset.classes_) @@ -181,12 +184,12 @@ class IndexTransformer: # given the number of tasks and the number of jobs, generates the slices for the parallel processes assert self.unk != -1, 'transform called before fit' n_jobs = qp._get_njobs(n_jobs) - indexed = map_parallel(func=self._index, args=X, n_jobs=n_jobs) - return np.asarray(indexed) + return map_parallel(func=self._index, args=X, n_jobs=n_jobs) + def _index(self, documents): vocab = self.vocabulary_.copy() - return [[vocab.prevalence(word, self.unk) for word in self.analyzer(doc)] for doc in tqdm(documents, 'indexing')] + return [[vocab.get(word, self.unk) for word in self.analyzer(doc)] for doc in tqdm(documents, 'indexing')] def fit_transform(self, X, n_jobs=None): """ diff --git a/quapy/evaluation.py b/quapy/evaluation.py index 95193aa..4f5de10 100644 --- a/quapy/evaluation.py +++ b/quapy/evaluation.py @@ -2,7 +2,7 @@ from typing import Union, Callable, Iterable import numpy as np from tqdm import tqdm import quapy as qp -from quapy.protocol import AbstractProtocol, OnLabelledCollectionProtocol +from quapy.protocol import AbstractProtocol, OnLabelledCollectionProtocol, IterateProtocol from quapy.method.base import BaseQuantifier import pandas as pd @@ -94,5 +94,15 @@ def evaluate( return error_metric(true_prevs, estim_prevs) +def evaluate_on_samples( + model: BaseQuantifier, + samples: [qp.data.LabelledCollection], + error_metric:Union[str, Callable], + verbose=False): + + return evaluate(model, IterateProtocol(samples), error_metric, aggr_speedup=False, verbose=verbose) + + + diff --git a/quapy/method/aggregative.py b/quapy/method/aggregative.py index b872ba3..bb63c64 100644 --- a/quapy/method/aggregative.py +++ b/quapy/method/aggregative.py @@ -338,7 +338,7 @@ class ACC(AggregativeQuantifier): ) self.cc = CC(self.classifier) - self.Pte_cond_estim_ = self.getPteCondEstim(data.classes_, y, y_) + self.Pte_cond_estim_ = self.getPteCondEstim(self.classifier.classes_, y, y_) return self @@ -996,7 +996,7 @@ def newSVMAE(svmperf_base=None, C=1): """ return newELM(svmperf_base, loss='mae', C=C) -def newSVMAE(svmperf_base=None, C=1): +def newSVMRAE(svmperf_base=None, C=1): """ SVM(KLD) is an Explicit Loss Minimization (ELM) quantifier set to optimize for the Relative Absolute Error as first used by `Moreo and Sebastiani, 2021 `_. diff --git a/quapy/method/meta.py b/quapy/method/meta.py index ba682ee..6db6861 100644 --- a/quapy/method/meta.py +++ b/quapy/method/meta.py @@ -7,6 +7,7 @@ from sklearn.model_selection import GridSearchCV, cross_val_predict from tqdm import tqdm import quapy as qp +from evaluation import evaluate_on_samples from quapy import functional as F from quapy.data import LabelledCollection from quapy.model_selection import GridSearchQ @@ -182,7 +183,7 @@ class Ensemble(BaseQuantifier): tests = [m[3] for m in self.ensemble] scores = [] for i, model in enumerate(self.ensemble): - scores.append(evaluate(model[0], tests[:i] + tests[i + 1:], error, self.n_jobs)) + scores.append(evaluate_on_samples(model[0], tests[:i] + tests[i + 1:], error)) order = np.argsort(scores) self.ensemble = _select_k(self.ensemble, order, k=self.red_size) diff --git a/quapy/method/neural.py b/quapy/method/neural.py index 1871ff0..e348930 100644 --- a/quapy/method/neural.py +++ b/quapy/method/neural.py @@ -6,6 +6,7 @@ import torch from torch.nn import MSELoss from torch.nn.functional import relu +from protocol import USimplexPP from quapy.method.aggregative import * from quapy.util import EarlyStop @@ -41,7 +42,8 @@ class QuaNetTrainer(BaseQuantifier): :param classifier: an object implementing `fit` (i.e., that can be trained on labelled data), `predict_proba` (i.e., that can generate posterior probabilities of unlabelled examples) and `transform` (i.e., that can generate embedded representations of the unlabelled instances). - :param sample_size: integer, the sample size + :param sample_size: integer, the sample size; default is None, meaning that the sample size should be + taken from qp.environ["SAMPLE_SIZE"] :param n_epochs: integer, maximum number of training epochs :param tr_iter_per_poch: integer, number of training iterations before considering an epoch complete :param va_iter_per_poch: integer, number of validation iterations to perform after each epoch @@ -61,7 +63,7 @@ class QuaNetTrainer(BaseQuantifier): def __init__(self, classifier, - sample_size, + sample_size=None, n_epochs=100, tr_iter_per_poch=500, va_iter_per_poch=100, @@ -83,7 +85,7 @@ class QuaNetTrainer(BaseQuantifier): f'the classifier {classifier.__class__.__name__} does not seem to be able to produce posterior probabilities ' \ f'since it does not implement the method "predict_proba"' self.classifier = classifier - self.sample_size = sample_size + self.sample_size = qp._get_sample_size(sample_size) self.n_epochs = n_epochs self.tr_iter = tr_iter_per_poch self.va_iter = va_iter_per_poch @@ -216,16 +218,13 @@ class QuaNetTrainer(BaseQuantifier): self.quanet.train(mode=train) losses = [] mae_errors = [] - if train==False: - prevpoints = F.get_nprevpoints_approximation(iterations, self.quanet.n_classes) - iterations = F.num_prevalence_combinations(prevpoints, self.quanet.n_classes) - with qp.util.temp_seed(0): - sampling_index_gen = data.artificial_sampling_index_generator(self.sample_size, prevpoints) - else: - sampling_index_gen = [data.sampling_index(self.sample_size, *prev) for prev in - F.uniform_simplex_sampling(data.n_classes, iterations)] - pbar = tqdm(sampling_index_gen, total=iterations) if train else sampling_index_gen - + sampler = USimplexPP( + data, + sample_size=self.sample_size, + repeats=iterations, + random_state=None if train else 0 # different samples during train, same samples during validation + ) + pbar = tqdm(sampler.samples_parameters(), total=sampler.total()) for it, index in enumerate(pbar): sample_data = data.sampling_from_index(index) sample_posteriors = posteriors[index] diff --git a/quapy/protocol.py b/quapy/protocol.py index 70f4a48..60df09c 100644 --- a/quapy/protocol.py +++ b/quapy/protocol.py @@ -34,6 +34,34 @@ class AbstractProtocol(metaclass=ABCMeta): return None +class IterateProtocol(AbstractProtocol): + """ + A very simple protocol which simply iterates over a list of previously generated samples + + :param samples: a list of :class:`quapy.data.base.LabelledCollection` + """ + def __init__(self, samples: [LabelledCollection]): + self.samples = samples + + def __call__(self): + """ + Yields one sample from the initial list at a time + + :return: yields a tuple `(sample, prev) at a time, where `sample` is a set of instances + and in which `prev` is an `nd.array` with the class prevalence values + """ + for sample in self.samples: + yield sample.Xp + + def total(self): + """ + Returns the number of samples in this protocol + + :return: int + """ + return len(self.samples) + + class AbstractStochasticSeededProtocol(AbstractProtocol): """ An `AbstractStochasticSeededProtocol` is a protocol that generates, via any random procedure (e.g., @@ -107,7 +135,7 @@ class OnLabelledCollectionProtocol: Protocols that generate samples from a :class:`qp.data.LabelledCollection` object. """ - RETURN_TYPES = ['sample_prev', 'labelled_collection'] + RETURN_TYPES = ['sample_prev', 'labelled_collection', 'index'] def get_labelled_collection(self): """ diff --git a/quapy/tests/test_evaluation.py b/quapy/tests/test_evaluation.py index 9a77867..db1ddc6 100644 --- a/quapy/tests/test_evaluation.py +++ b/quapy/tests/test_evaluation.py @@ -39,10 +39,6 @@ class EvalTestCase(unittest.TestCase): self.emq.fit(data) return self - def set_params(self, **parameters): pass - def get_params(self, deep=True): pass - - emq = NonAggregativeEMQ(SlowLR()).fit(train) tinit = time() diff --git a/quapy/tests/test_hierarchy.py b/quapy/tests/test_hierarchy.py index 21af4b6..2ea3af5 100644 --- a/quapy/tests/test_hierarchy.py +++ b/quapy/tests/test_hierarchy.py @@ -27,6 +27,5 @@ class HierarchyTestCase(unittest.TestCase): self.assertEqual(isinstance(m, AggregativeProbabilisticQuantifier), True) - if __name__ == '__main__': unittest.main() diff --git a/quapy/tests/test_labelcollection.py b/quapy/tests/test_labelcollection.py new file mode 100644 index 0000000..845f763 --- /dev/null +++ b/quapy/tests/test_labelcollection.py @@ -0,0 +1,21 @@ +import unittest +import numpy as np +import quapy as qp + + +class LabelCollectionTestCase(unittest.TestCase): + def test_split(self): + x = np.arange(100) + y = np.random.randint(0,5,100) + data = qp.data.LabelledCollection(x,y) + tr, te = data.split_random(0.7) + check_prev = tr.prevalence()*0.7 + te.prevalence()*0.3 + + self.assertEqual(len(tr), 70) + self.assertEqual(len(te), 30) + self.assertEqual(np.allclose(check_prev, data.prevalence()), True) + self.assertEqual(len(tr+te), len(data)) + + +if __name__ == '__main__': + unittest.main() diff --git a/quapy/tests/test_methods.py b/quapy/tests/test_methods.py index f13907c..4da5617 100644 --- a/quapy/tests/test_methods.py +++ b/quapy/tests/test_methods.py @@ -6,18 +6,21 @@ from sklearn.svm import LinearSVC import quapy as qp from quapy.method.base import BinaryQuantifier from quapy.data import Dataset, LabelledCollection -from quapy.method import AGGREGATIVE_METHODS, NON_AGGREGATIVE_METHODS, EXPLICIT_LOSS_MINIMIZATION_METHODS +from quapy.method import AGGREGATIVE_METHODS, NON_AGGREGATIVE_METHODS from quapy.method.aggregative import ACC, PACC, HDy from quapy.method.meta import Ensemble -datasets = [pytest.param(qp.datasets.fetch_twitter('hcr'), id='hcr'), +datasets = [pytest.param(qp.datasets.fetch_twitter('hcr', pickle=True), id='hcr'), pytest.param(qp.datasets.fetch_UCIDataset('ionosphere'), id='ionosphere')] +tinydatasets = [pytest.param(qp.datasets.fetch_twitter('hcr', pickle=True).reduce(), id='tiny_hcr'), + pytest.param(qp.datasets.fetch_UCIDataset('ionosphere').reduce(), id='tiny_ionosphere')] + learners = [LogisticRegression, LinearSVC] @pytest.mark.parametrize('dataset', datasets) -@pytest.mark.parametrize('aggregative_method', AGGREGATIVE_METHODS.difference(EXPLICIT_LOSS_MINIMIZATION_METHODS)) +@pytest.mark.parametrize('aggregative_method', AGGREGATIVE_METHODS) @pytest.mark.parametrize('learner', learners) def test_aggregative_methods(dataset: Dataset, aggregative_method, learner): model = aggregative_method(learner()) @@ -36,30 +39,6 @@ def test_aggregative_methods(dataset: Dataset, aggregative_method, learner): assert type(error) == numpy.float64 -@pytest.mark.parametrize('dataset', datasets) -@pytest.mark.parametrize('elm_method', EXPLICIT_LOSS_MINIMIZATION_METHODS) -def test_elm_methods(dataset: Dataset, elm_method): - try: - model = elm_method() - except AssertionError as ae: - if ae.args[0].find('does not seem to point to a valid path') > 0: - print('Missing SVMperf binary program, skipping test') - return - - if isinstance(model, BinaryQuantifier) and not dataset.binary: - print(f'skipping the test of binary model {model} on non-binary dataset {dataset}') - return - - model.fit(dataset.training) - - estim_prevalences = model.quantify(dataset.test.instances) - - true_prevalences = dataset.test.prevalence() - error = qp.error.mae(true_prevalences, estim_prevalences) - - assert type(error) == numpy.float64 - - @pytest.mark.parametrize('dataset', datasets) @pytest.mark.parametrize('non_aggregative_method', NON_AGGREGATIVE_METHODS) def test_non_aggregative_methods(dataset: Dataset, non_aggregative_method): @@ -79,16 +58,20 @@ def test_non_aggregative_methods(dataset: Dataset, non_aggregative_method): assert type(error) == numpy.float64 -@pytest.mark.parametrize('base_method', AGGREGATIVE_METHODS.difference(EXPLICIT_LOSS_MINIMIZATION_METHODS)) -@pytest.mark.parametrize('learner', learners) -@pytest.mark.parametrize('dataset', datasets) +@pytest.mark.parametrize('base_method', AGGREGATIVE_METHODS) +@pytest.mark.parametrize('learner', [LogisticRegression]) +@pytest.mark.parametrize('dataset', tinydatasets) @pytest.mark.parametrize('policy', Ensemble.VALID_POLICIES) def test_ensemble_method(base_method, learner, dataset: Dataset, policy): - qp.environ['SAMPLE_SIZE'] = len(dataset.training) - model = Ensemble(quantifier=base_method(learner()), size=5, policy=policy, n_jobs=-1) - if isinstance(model, BinaryQuantifier) and not dataset.binary: - print(f'skipping the test of binary model {model} on non-binary dataset {dataset}') + qp.environ['SAMPLE_SIZE'] = 20 + base_quantifier=base_method(learner()) + if isinstance(base_quantifier, BinaryQuantifier) and not dataset.binary: + print(f'skipping the test of binary model {base_quantifier} on non-binary dataset {dataset}') return + if not dataset.binary and policy=='ds': + print(f'skipping the test of binary policy ds on non-binary dataset {dataset}') + return + model = Ensemble(quantifier=base_quantifier, size=5, policy=policy, n_jobs=-1) model.fit(dataset.training) @@ -107,19 +90,23 @@ def test_quanet_method(): print('skipping QuaNet test due to missing torch package') return + + qp.environ['SAMPLE_SIZE'] = 100 + + # load the kindle dataset as text, and convert words to numerical indexes dataset = qp.datasets.fetch_reviews('kindle', pickle=True) - dataset = Dataset(dataset.training.sampling(100, *dataset.training.prevalence()), - dataset.test.sampling(100, *dataset.test.prevalence())) + dataset = Dataset(dataset.training.sampling(200, *dataset.training.prevalence()), + dataset.test.sampling(200, *dataset.test.prevalence())) qp.data.preprocessing.index(dataset, min_df=5, inplace=True) from quapy.classification.neural import CNNnet - cnn = CNNnet(dataset.vocabulary_size, dataset.training.n_classes) + cnn = CNNnet(dataset.vocabulary_size, dataset.n_classes) from quapy.classification.neural import NeuralClassifierTrainer learner = NeuralClassifierTrainer(cnn, device='cuda') from quapy.method.meta import QuaNet - model = QuaNet(learner, sample_size=len(dataset.training), device='cuda') + model = QuaNet(learner, device='cuda') if isinstance(model, BinaryQuantifier) and not dataset.binary: print(f'skipping the test of binary model {model} on non-binary dataset {dataset}') @@ -135,26 +122,12 @@ def test_quanet_method(): assert type(error) == numpy.float64 -def models_to_test_for_str_label_names(): - models = list() - learner = LogisticRegression - for method in AGGREGATIVE_METHODS.difference(EXPLICIT_LOSS_MINIMIZATION_METHODS): - models.append(method(learner(random_state=0))) - for method in NON_AGGREGATIVE_METHODS: - models.append(method()) - return models - - -@pytest.mark.parametrize('model', models_to_test_for_str_label_names()) -def test_str_label_names(model): - if type(model) in {ACC, PACC, HDy}: - print( - f'skipping the test of binary model {type(model)} because it currently does not support random seed control.') - return +def test_str_label_names(): + model = qp.method.aggregative.CC(LogisticRegression()) dataset = qp.datasets.fetch_reviews('imdb', pickle=True) dataset = Dataset(dataset.training.sampling(1000, *dataset.training.prevalence()), - dataset.test.sampling(1000, *dataset.test.prevalence())) + dataset.test.sampling(1000, 0.25, 0.75)) qp.data.preprocessing.text2tfidf(dataset, min_df=5, inplace=True) numpy.random.seed(0)