tables generation for Tweet experiments
This commit is contained in:
parent
c5ae2f8b1f
commit
43ed808945
|
@ -1,11 +1,9 @@
|
||||||
import quapy as qp
|
import quapy as qp
|
||||||
import numpy as np
|
import numpy as np
|
||||||
from os import makedirs
|
from os import makedirs
|
||||||
# from evaluate import evaluate_directory, statistical_significance, get_ranks_from_Gao_Sebastiani
|
|
||||||
import sys, os
|
import sys, os
|
||||||
import pickle
|
import pickle
|
||||||
from experiments import result_path
|
from experiments import result_path
|
||||||
from result_manager import ResultSet
|
|
||||||
from tabular import Table
|
from tabular import Table
|
||||||
|
|
||||||
tables_path = './tables'
|
tables_path = './tables'
|
||||||
|
@ -42,7 +40,6 @@ nice = {
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
def nicerm(key):
|
def nicerm(key):
|
||||||
return '\mathrm{'+nice[key]+'}'
|
return '\mathrm{'+nice[key]+'}'
|
||||||
|
|
||||||
|
@ -98,13 +95,13 @@ def save_table(path, table):
|
||||||
foo.write(table)
|
foo.write(table)
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
datasets = qp.datasets.TWITTER_SENTIMENT_DATASETS_TEST
|
datasets = qp.datasets.TWITTER_SENTIMENT_DATASETS_TEST
|
||||||
evaluation_measures = [qp.error.ae, qp.error.rae]
|
evaluation_measures = [qp.error.ae, qp.error.rae]
|
||||||
gao_seb_methods = ['cc', 'acc', 'pcc', 'pacc', 'sld', 'svmq', 'svmkld', 'svmnkld']
|
gao_seb_methods = ['cc', 'acc', 'pcc', 'pacc', 'sld', 'svmq', 'svmkld', 'svmnkld']
|
||||||
new_methods = []
|
new_methods = []
|
||||||
|
|
||||||
def addfunc(dataset, method, loss):
|
|
||||||
|
def experiment_errors(dataset, method, loss):
|
||||||
path = result_path(dataset, method, 'm'+loss if not loss.startswith('m') else loss)
|
path = result_path(dataset, method, 'm'+loss if not loss.startswith('m') else loss)
|
||||||
if os.path.exists(path):
|
if os.path.exists(path):
|
||||||
true_prevs, estim_prevs, _, _, _, _ = pickle.load(open(path, 'rb'))
|
true_prevs, estim_prevs, _, _, _, _ = pickle.load(open(path, 'rb'))
|
||||||
|
@ -127,49 +124,41 @@ for i, eval_func in enumerate(evaluation_measures):
|
||||||
nold_methods = len(gao_seb_methods)
|
nold_methods = len(gao_seb_methods)
|
||||||
nnew_methods = len(added_methods)
|
nnew_methods = len(added_methods)
|
||||||
|
|
||||||
table = Table(rows=datasets, cols=methods, addfunc=addfunc)
|
# fill data table
|
||||||
|
table = Table(rows=datasets, cols=methods)
|
||||||
# fill table
|
|
||||||
for dataset in datasets:
|
for dataset in datasets:
|
||||||
for method in methods:
|
for method in methods:
|
||||||
table.add(dataset, method, eval_name)
|
table.add(dataset, method, experiment_errors(dataset, method, eval_name))
|
||||||
|
|
||||||
|
# write the latex table
|
||||||
tabular = """
|
tabular = """
|
||||||
\\begin{tabularx}{\\textwidth}{|c||""" + ('Y|'*len(gao_seb_methods))+ '|' + ('Y|'*len(added_methods)) + """} \hline
|
\\begin{tabularx}{\\textwidth}{|c||""" + ('Y|'*nold_methods)+ '|' + ('Y|'*nnew_methods) + """} \hline
|
||||||
& \multicolumn{"""+str(nold_methods)+"""}{c||}{Methods tested in~\cite{Gao:2016uq}} & \multicolumn{"""+str(nnew_methods)+"""}{c|}{} \\\\ \hline
|
& \multicolumn{"""+str(nold_methods)+"""}{c||}{Methods tested in~\cite{Gao:2016uq}} &
|
||||||
|
\multicolumn{"""+str(nnew_methods)+"""}{c|}{} \\\\ \hline
|
||||||
"""
|
"""
|
||||||
|
|
||||||
rowreplace={dataset: nice.get(dataset, dataset.upper()) for dataset in datasets}
|
rowreplace={dataset: nice.get(dataset, dataset.upper()) for dataset in datasets}
|
||||||
colreplace={method:'\side{' + nice.get(method, method.upper()) +'$^{' + nicerm(eval_name) + '}$} ' for method in methods}
|
colreplace={method:'\side{' + nice.get(method, method.upper()) +'$^{' + nicerm(eval_name) + '}$} ' for method in methods}
|
||||||
|
|
||||||
tabular += table.latextabular(rowreplace=rowreplace, colreplace=colreplace)
|
tabular += table.latexTabular(rowreplace=rowreplace, colreplace=colreplace)
|
||||||
tabular += "\n\end{tabularx}"
|
tabular += "\n\end{tabularx}"
|
||||||
|
|
||||||
save_table(f'./tables/tab_results_{eval_name}.new2.tex', tabular)
|
save_table(f'./tables/tab_results_{eval_name}.new.tex', tabular)
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
# Tables ranks for AE and RAE (two tables)
|
# Tables ranks for AE and RAE (two tables)
|
||||||
# ----------------------------------------------------
|
# ----------------------------------------------------
|
||||||
def addfuncRank(dataset, method):
|
|
||||||
rank = table.get(dataset, method, 'rank')
|
|
||||||
if rank is None:
|
|
||||||
return None
|
|
||||||
return [rank]
|
|
||||||
|
|
||||||
methods = gao_seb_methods
|
methods = gao_seb_methods
|
||||||
nold_methods = len(gao_seb_methods)
|
|
||||||
|
|
||||||
ranktable = Table(rows=datasets, cols=methods, addfunc=addfuncRank)
|
# fill the data table
|
||||||
# fill table
|
ranktable = Table(rows=datasets, cols=methods, missing='--')
|
||||||
for dataset in datasets:
|
for dataset in datasets:
|
||||||
for method in methods:
|
for method in methods:
|
||||||
ranktable.add(dataset, method)
|
ranktable.add(dataset, method, values=table.get(dataset, method, 'rank'))
|
||||||
|
|
||||||
|
|
||||||
|
# write the latex table
|
||||||
tabular = """
|
tabular = """
|
||||||
\\begin{tabularx}{\\textwidth}{|c||""" + ('Y|' * len(gao_seb_methods)) + """} \hline
|
\\begin{tabularx}{\\textwidth}{|c||""" + ('Y|' * len(gao_seb_methods)) + """} \hline
|
||||||
& \multicolumn{""" + str(nold_methods) + """}{c||}{Methods tested in~\cite{Gao:2016uq}} \\\\ \hline
|
& \multicolumn{""" + str(nold_methods) + """}{c|}{Methods tested in~\cite{Gao:2016uq}} \\\\ \hline
|
||||||
"""
|
"""
|
||||||
for method in methods:
|
for method in methods:
|
||||||
tabular += ' & \side{' + nice.get(method, method.upper()) +'$^{' + nicerm(eval_name) + '}$} '
|
tabular += ' & \side{' + nice.get(method, method.upper()) +'$^{' + nicerm(eval_name) + '}$} '
|
||||||
|
@ -180,28 +169,29 @@ for i, eval_func in enumerate(evaluation_measures):
|
||||||
for method in methods:
|
for method in methods:
|
||||||
newrank = ranktable.get(dataset, method)
|
newrank = ranktable.get(dataset, method)
|
||||||
oldrank = gao_seb_ranks[f'{dataset}-{method}-{eval_name}']
|
oldrank = gao_seb_ranks[f'{dataset}-{method}-{eval_name}']
|
||||||
if newrank is None:
|
if newrank != '--':
|
||||||
newrank = '--'
|
newrank = f'{int(newrank)}'
|
||||||
else:
|
color = ranktable.get_color(dataset, method)
|
||||||
newrank = f'{int(newrank)}'
|
if color == '--':
|
||||||
tabular += ' & ' + f'{newrank}' + f' ({oldrank}) ' + ranktable.get_color(dataset, method)
|
color = ''
|
||||||
|
tabular += ' & ' + f'{newrank}' + f' ({oldrank}) ' + color
|
||||||
tabular += '\\\\\hline\n'
|
tabular += '\\\\\hline\n'
|
||||||
|
tabular += '\hline\n'
|
||||||
|
|
||||||
tabular += 'Average & '
|
tabular += 'Average '
|
||||||
for method in methods:
|
for method in methods:
|
||||||
newrank = ranktable.get_col_average(method)
|
newrank = ranktable.get_average(method)
|
||||||
oldrank = gao_seb_ranks[f'Average-{method}-{eval_name}']
|
oldrank = gao_seb_ranks[f'Average-{method}-{eval_name}']
|
||||||
if newrank is None or np.isnan(newrank):
|
if newrank != '--':
|
||||||
newrank = '--'
|
|
||||||
else:
|
|
||||||
newrank = f'{newrank:.1f}'
|
newrank = f'{newrank:.1f}'
|
||||||
oldrank = f'{oldrank:.1f}'
|
oldrank = f'{oldrank:.1f}'
|
||||||
tabular += ' & ' + f'{newrank}' + f' ({oldrank}) ' + ranktable.get_color(dataset, method)
|
color = ranktable.get_average(method, 'color')
|
||||||
|
if color == '--':
|
||||||
|
color = ''
|
||||||
|
tabular += ' & ' + f'{newrank}' + f' ({oldrank}) ' + color
|
||||||
tabular += '\\\\\hline\n'
|
tabular += '\\\\\hline\n'
|
||||||
|
|
||||||
tabular += "\end{tabularx}"
|
tabular += "\end{tabularx}"
|
||||||
|
|
||||||
save_table(f'./tables/tab_rank_{eval_name}.new2.tex', tabular)
|
save_table(f'./tables/tab_rank_{eval_name}.new.tex', tabular)
|
||||||
|
|
||||||
|
|
||||||
print("[Done]")
|
print("[Done]")
|
|
@ -1,204 +0,0 @@
|
||||||
from scipy.stats import wilcoxon, ttest_ind_from_stats
|
|
||||||
import numpy as np
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
class ResultSet:
|
|
||||||
VALID_TESTS = [None, "wilcoxon", "ttest_ind_from_stats"]
|
|
||||||
TTEST_DIFF = 'different'
|
|
||||||
TTEST_SIM = 'similar'
|
|
||||||
TTEST_SAME = 'same'
|
|
||||||
|
|
||||||
def __init__(self, name, addfunc, compare='mean', lower_is_better=True, show_std=True, test="wilcoxon",
|
|
||||||
remove_mean='', prec_mean=3, remove_std='', prec_std=3, maxtone=50, minval=None, maxval=None):
|
|
||||||
"""
|
|
||||||
|
|
||||||
:param name: name of the result set (e.g., a Dataset)
|
|
||||||
:param addfunc: a function which is called to process the result input in the "add" method. This function should
|
|
||||||
return a dictionary containing any key-value (e.g., 'mean':0.89) of interest
|
|
||||||
:param compare: the key (as generated by addfunc) that is to be compared in order to rank results
|
|
||||||
:param lower_is_better: if True, lower values of the "compare" key will result in higher ranks
|
|
||||||
:param show_std: whether or not to show the 'std' value (if True, the addfunc is expected to generate it)
|
|
||||||
:param test: which test of statistical significance to use. If "wilcoxon" then scipy.stats.wilcoxon(x,y) will
|
|
||||||
be computed where x,y are the values of the key "values" as computed by addfunc. If "ttest_ind_from_stats", then
|
|
||||||
scipy.stats.ttest_ind_from_stats will be called on "mean", "std", "nobs" values (as computed by addfunc) for
|
|
||||||
both samples being compared.
|
|
||||||
:param remove_mean: if specified, removes the string from the mean (e.g., useful to remove the '0.')
|
|
||||||
:param remove_std: if specified, removes the string from the std (e.g., useful to remove the '0.')
|
|
||||||
"""
|
|
||||||
self.name = name
|
|
||||||
self.addfunc = addfunc
|
|
||||||
self.compare = compare
|
|
||||||
self.lower_is_better = lower_is_better
|
|
||||||
self.show_std = show_std
|
|
||||||
assert test in self.VALID_TESTS, f'unknown test, valid are {self.VALID_TESTS}'
|
|
||||||
self.test = test
|
|
||||||
self.remove_mean = remove_mean
|
|
||||||
self.prec_mean = prec_mean
|
|
||||||
self.remove_std = remove_std
|
|
||||||
self.prec_std = prec_std
|
|
||||||
self.maxtone = maxtone
|
|
||||||
self.minval = minval
|
|
||||||
self.maxval = maxval
|
|
||||||
|
|
||||||
self.r = dict()
|
|
||||||
self.computed = False
|
|
||||||
|
|
||||||
def add(self, key, *args):
|
|
||||||
result = self.addfunc(*args)
|
|
||||||
if result is None:
|
|
||||||
return
|
|
||||||
assert 'values' in result, f'the add function {self.addfunc.__name__} does not fill the "values" attribute'
|
|
||||||
self.r[key] = result
|
|
||||||
vals = self.r[key]['values']
|
|
||||||
if isinstance(vals, np.ndarray):
|
|
||||||
self.r[key]['mean'] = vals.mean()
|
|
||||||
self.r[key]['std'] = vals.std()
|
|
||||||
self.r[key]['nobs'] = len(vals)
|
|
||||||
self.computed = False
|
|
||||||
|
|
||||||
def update(self):
|
|
||||||
if not self.computed:
|
|
||||||
self.compute()
|
|
||||||
|
|
||||||
def compute(self):
|
|
||||||
keylist = np.asarray(list(self.r.keys()))
|
|
||||||
vallist = [self.r[key][self.compare] for key in keylist]
|
|
||||||
keylist = keylist[np.argsort(vallist)]
|
|
||||||
|
|
||||||
print(vallist)
|
|
||||||
self.range_minval = min(vallist) if self.minval is None else self.minval
|
|
||||||
self.range_maxval = max(vallist) if self.maxval is None else self.maxval
|
|
||||||
if not self.lower_is_better:
|
|
||||||
keylist = keylist[::-1]
|
|
||||||
|
|
||||||
# keep track of statistical significance tests; if all are different, then the "phantom dags" will not be shown
|
|
||||||
self.some_similar = False
|
|
||||||
|
|
||||||
for i, key in enumerate(keylist):
|
|
||||||
rank = i + 1
|
|
||||||
isbest = rank == 1
|
|
||||||
if isbest:
|
|
||||||
best = self.r[key]
|
|
||||||
self.r[key]['best'] = isbest
|
|
||||||
self.r[key]['rank'] = rank
|
|
||||||
|
|
||||||
#color
|
|
||||||
val = self.r[key][self.compare]
|
|
||||||
self.r[key]['color'] = self.get_value_color(val, minval=self.range_minval, maxval=self.range_maxval)
|
|
||||||
|
|
||||||
if self.test is not None:
|
|
||||||
if isbest:
|
|
||||||
p_val = 0
|
|
||||||
elif self.test == 'wilcoxon':
|
|
||||||
_, p_val = wilcoxon(best['values'], self.r[key]['values'])
|
|
||||||
elif self.test == 'ttest_ind_from_stats':
|
|
||||||
mean1, std1, nobs1 = best['mean'], best['std'], best['nobs']
|
|
||||||
mean2, std2, nobs2 = self.r[key]['mean'], self.r[key]['std'], self.r[key]['nobs']
|
|
||||||
_, p_val = ttest_ind_from_stats(mean1, std1, nobs1, mean2, std2, nobs2)
|
|
||||||
|
|
||||||
if 0.005 >= p_val:
|
|
||||||
self.r[key]['test'] = ResultSet.TTEST_DIFF
|
|
||||||
elif 0.05 >= p_val > 0.005:
|
|
||||||
self.r[key]['test'] = ResultSet.TTEST_SIM
|
|
||||||
self.some_similar = True
|
|
||||||
elif p_val > 0.05:
|
|
||||||
self.r[key]['test'] = ResultSet.TTEST_SAME
|
|
||||||
self.some_similar = True
|
|
||||||
|
|
||||||
self.computed = True
|
|
||||||
|
|
||||||
def latex(self, key, missing='--', color=True):
|
|
||||||
|
|
||||||
if key not in self.r:
|
|
||||||
return missing
|
|
||||||
|
|
||||||
self.update()
|
|
||||||
|
|
||||||
rd = self.r[key]
|
|
||||||
s = f"{rd['mean']:.{self.prec_mean}f}"
|
|
||||||
if self.remove_mean:
|
|
||||||
s = s.replace(self.remove_mean, '.')
|
|
||||||
if rd['best']:
|
|
||||||
s = "\\textbf{"+s+"}"
|
|
||||||
else:
|
|
||||||
if self.test is not None and self.some_similar:
|
|
||||||
if rd['test'] == ResultSet.TTEST_SIM:
|
|
||||||
s += '^{\dag\phantom{\dag}}'
|
|
||||||
elif rd['test'] == ResultSet.TTEST_SAME:
|
|
||||||
s += '^{\ddag}'
|
|
||||||
elif rd['test'] == ResultSet.TTEST_DIFF:
|
|
||||||
s += '^{\phantom{\ddag}}'
|
|
||||||
|
|
||||||
if self.show_std:
|
|
||||||
std = f"{rd['std']:.{self.prec_std}f}"
|
|
||||||
if self.remove_std:
|
|
||||||
std = std.replace(self.remove_std, '.')
|
|
||||||
s += f" \pm {std}"
|
|
||||||
|
|
||||||
s = f'$ {s} $'
|
|
||||||
if color:
|
|
||||||
s += ' ' + self.r[key]['color']
|
|
||||||
|
|
||||||
return s
|
|
||||||
|
|
||||||
def mean(self, attr='mean', required:int=None, missing=np.nan):
|
|
||||||
"""
|
|
||||||
returns the mean value for the "attr" attribute
|
|
||||||
:param attr: the attribute to average across results
|
|
||||||
:param required: if specified, indicates the number of values that should be part of the mean; if this number
|
|
||||||
is different, then the mean is not computed
|
|
||||||
:param missing: the value to return in case the required condition is not satisfied
|
|
||||||
:return: the mean of the "key" attribute
|
|
||||||
"""
|
|
||||||
keylist = list(self.r.keys())
|
|
||||||
vallist = [self.r[key].get(attr, None) for key in keylist]
|
|
||||||
if None in vallist:
|
|
||||||
return missing
|
|
||||||
if required is not None:
|
|
||||||
if len(vallist) != required:
|
|
||||||
return missing
|
|
||||||
return np.mean(vallist)
|
|
||||||
|
|
||||||
def get(self, key, attr, missing='--'):
|
|
||||||
if key in self.r:
|
|
||||||
self.update()
|
|
||||||
if attr in self.r[key]:
|
|
||||||
return self.r[key][attr]
|
|
||||||
return missing
|
|
||||||
|
|
||||||
def get_color(self, key):
|
|
||||||
if key not in self.r:
|
|
||||||
return ''
|
|
||||||
self.update()
|
|
||||||
return self.r[key]['color']
|
|
||||||
|
|
||||||
def get_value_color(self, val, minval=None, maxval=None):
|
|
||||||
if minval is None or maxval is None:
|
|
||||||
self.update()
|
|
||||||
minval=self.range_minval
|
|
||||||
maxval=self.range_maxval
|
|
||||||
val = (val - minval) / (maxval - minval)
|
|
||||||
if self.lower_is_better:
|
|
||||||
val = 1 - val
|
|
||||||
return color_red2green_01(val, self.maxtone)
|
|
||||||
|
|
||||||
def change_compare(self, attr):
|
|
||||||
self.compare = attr
|
|
||||||
self.computed = False
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
def color_red2green_01(val, maxtone=100):
|
|
||||||
assert 0 <= val <= 1, f'val {val} out of range [0,1]'
|
|
||||||
|
|
||||||
# rescale to [-1,1]
|
|
||||||
val = val * 2 - 1
|
|
||||||
if val < 0:
|
|
||||||
color = 'red'
|
|
||||||
tone = maxtone * (-val)
|
|
||||||
else:
|
|
||||||
color = 'green'
|
|
||||||
tone = maxtone * val
|
|
||||||
return '\cellcolor{' + color + f'!{int(tone)}' + '}'
|
|
||||||
|
|
|
@ -1,247 +0,0 @@
|
||||||
import quapy as qp
|
|
||||||
import numpy as np
|
|
||||||
from os import makedirs
|
|
||||||
# from evaluate import evaluate_directory, statistical_significance, get_ranks_from_Gao_Sebastiani
|
|
||||||
import sys, os
|
|
||||||
import pickle
|
|
||||||
from experiments import result_path
|
|
||||||
from result_manager import ResultSet
|
|
||||||
|
|
||||||
|
|
||||||
tables_path = './tables'
|
|
||||||
MAXTONE = 50 # sets the intensity of the maximum color reached by the worst (red) and best (green) results
|
|
||||||
|
|
||||||
makedirs(tables_path, exist_ok=True)
|
|
||||||
|
|
||||||
sample_size = 100
|
|
||||||
qp.environ['SAMPLE_SIZE'] = sample_size
|
|
||||||
|
|
||||||
|
|
||||||
nice = {
|
|
||||||
'mae':'AE',
|
|
||||||
'mrae':'RAE',
|
|
||||||
'ae':'AE',
|
|
||||||
'rae':'RAE',
|
|
||||||
'svmkld': 'SVM(KLD)',
|
|
||||||
'svmnkld': 'SVM(NKLD)',
|
|
||||||
'svmq': 'SVM(Q)',
|
|
||||||
'svmae': 'SVM(AE)',
|
|
||||||
'svmnae': 'SVM(NAE)',
|
|
||||||
'svmmae': 'SVM(AE)',
|
|
||||||
'svmmrae': 'SVM(RAE)',
|
|
||||||
'quanet': 'QuaNet',
|
|
||||||
'hdy': 'HDy',
|
|
||||||
'dys': 'DyS',
|
|
||||||
'svmperf':'',
|
|
||||||
'sanders': 'Sanders',
|
|
||||||
'semeval13': 'SemEval13',
|
|
||||||
'semeval14': 'SemEval14',
|
|
||||||
'semeval15': 'SemEval15',
|
|
||||||
'semeval16': 'SemEval16',
|
|
||||||
'Average': 'Average'
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
def nicerm(key):
|
|
||||||
return '\mathrm{'+nice[key]+'}'
|
|
||||||
|
|
||||||
def color_from_rel_rank(rel_rank, maxtone=100):
|
|
||||||
rel_rank = rel_rank*2-1
|
|
||||||
if rel_rank < 0:
|
|
||||||
color = 'red'
|
|
||||||
tone = maxtone*(-rel_rank)
|
|
||||||
else:
|
|
||||||
color = 'green'
|
|
||||||
tone = maxtone*rel_rank
|
|
||||||
return '\cellcolor{' + color + f'!{int(tone)}' + '}'
|
|
||||||
|
|
||||||
def color_from_abs_rank(abs_rank, n_methods, maxtone=100):
|
|
||||||
rel_rank = 1.-(abs_rank-1.)/(n_methods-1)
|
|
||||||
return color_from_rel_rank(rel_rank, maxtone)
|
|
||||||
|
|
||||||
|
|
||||||
def load_Gao_Sebastiani_previous_results():
|
|
||||||
def rename(method):
|
|
||||||
old2new = {
|
|
||||||
'kld': 'svmkld',
|
|
||||||
'nkld': 'svmnkld',
|
|
||||||
'qbeta2': 'svmq',
|
|
||||||
'em': 'sld'
|
|
||||||
}
|
|
||||||
return old2new.get(method, method)
|
|
||||||
|
|
||||||
gao_seb_results = {}
|
|
||||||
with open('./Gao_Sebastiani_results.txt', 'rt') as fin:
|
|
||||||
lines = fin.readlines()
|
|
||||||
for line in lines[1:]:
|
|
||||||
line = line.strip()
|
|
||||||
parts = line.lower().split()
|
|
||||||
if len(parts) == 4:
|
|
||||||
dataset, method, ae, rae = parts
|
|
||||||
else:
|
|
||||||
method, ae, rae = parts
|
|
||||||
learner, method = method.split('-')
|
|
||||||
method = rename(method)
|
|
||||||
gao_seb_results[f'{dataset}-{method}-ae'] = float(ae)
|
|
||||||
gao_seb_results[f'{dataset}-{method}-rae'] = float(rae)
|
|
||||||
return gao_seb_results
|
|
||||||
|
|
||||||
|
|
||||||
def get_ranks_from_Gao_Sebastiani():
|
|
||||||
gao_seb_results = load_Gao_Sebastiani_previous_results()
|
|
||||||
datasets = set([key.split('-')[0] for key in gao_seb_results.keys()])
|
|
||||||
methods = np.sort(np.unique([key.split('-')[1] for key in gao_seb_results.keys()]))
|
|
||||||
ranks = {}
|
|
||||||
for metric in ['ae', 'rae']:
|
|
||||||
for dataset in datasets:
|
|
||||||
scores = [gao_seb_results[f'{dataset}-{method}-{metric}'] for method in methods]
|
|
||||||
order = np.argsort(scores)
|
|
||||||
sorted_methods = methods[order]
|
|
||||||
for i, method in enumerate(sorted_methods):
|
|
||||||
ranks[f'{dataset}-{method}-{metric}'] = i+1
|
|
||||||
for method in methods:
|
|
||||||
rankave = np.mean([ranks[f'{dataset}-{method}-{metric}'] for dataset in datasets])
|
|
||||||
ranks[f'Average-{method}-{metric}'] = rankave
|
|
||||||
return ranks, gao_seb_results
|
|
||||||
|
|
||||||
|
|
||||||
def save_table(path, table):
|
|
||||||
print(f'saving results in {path}')
|
|
||||||
with open(path, 'wt') as foo:
|
|
||||||
foo.write(table)
|
|
||||||
|
|
||||||
|
|
||||||
# Tables evaluation scores for AE and RAE (two tables)
|
|
||||||
# ----------------------------------------------------
|
|
||||||
|
|
||||||
datasets = qp.datasets.TWITTER_SENTIMENT_DATASETS_TEST
|
|
||||||
evaluation_measures = [qp.error.ae, qp.error.rae]
|
|
||||||
gao_seb_methods = ['cc', 'acc', 'pcc', 'pacc', 'sld', 'svmq', 'svmkld', 'svmnkld']
|
|
||||||
new_methods = []
|
|
||||||
|
|
||||||
|
|
||||||
def addfunc(dataset, method, loss):
|
|
||||||
path = result_path(dataset, method, 'm'+loss if not loss.startswith('m') else loss)
|
|
||||||
if os.path.exists(path):
|
|
||||||
true_prevs, estim_prevs, _, _, _, _ = pickle.load(open(path, 'rb'))
|
|
||||||
err_fn = getattr(qp.error, loss)
|
|
||||||
errors = err_fn(true_prevs, estim_prevs)
|
|
||||||
return {
|
|
||||||
'values': errors,
|
|
||||||
}
|
|
||||||
return None
|
|
||||||
|
|
||||||
def addave(method, tables):
|
|
||||||
values = []
|
|
||||||
for table in tables:
|
|
||||||
mean = table.get(method, 'values', missing=None)
|
|
||||||
if mean is None:
|
|
||||||
return None
|
|
||||||
values.append(mean)
|
|
||||||
values = np.concatenate(values)
|
|
||||||
return {
|
|
||||||
'values': values
|
|
||||||
}
|
|
||||||
|
|
||||||
def addrankave(method, tables):
|
|
||||||
values = []
|
|
||||||
for table in tables:
|
|
||||||
rank = table.get(method, 'rank', missing=None)
|
|
||||||
if rank is None:
|
|
||||||
return None
|
|
||||||
values.append(rank)
|
|
||||||
return {
|
|
||||||
'values': np.asarray(values)
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
TABLES = {eval_func.__name__:{} for eval_func in evaluation_measures}
|
|
||||||
|
|
||||||
for i, eval_func in enumerate(evaluation_measures):
|
|
||||||
eval_name = eval_func.__name__
|
|
||||||
added_methods = ['svm' + eval_name] + new_methods
|
|
||||||
methods = gao_seb_methods + added_methods
|
|
||||||
nold_methods = len(gao_seb_methods)
|
|
||||||
nnew_methods = len(added_methods)
|
|
||||||
|
|
||||||
# fill table
|
|
||||||
TABLE = TABLES[eval_name]
|
|
||||||
for dataset in datasets:
|
|
||||||
TABLE[dataset] = ResultSet(dataset, addfunc, show_std=False, test="ttest_ind_from_stats")
|
|
||||||
for method in methods:
|
|
||||||
TABLE[dataset].add(method, dataset, method, eval_name)
|
|
||||||
|
|
||||||
TABLE['Average'] = ResultSet('ave', addave, show_std=False, test="ttest_ind_from_stats")
|
|
||||||
for method in methods:
|
|
||||||
TABLE['Average'].add(method, method, [TABLE[dataset] for dataset in datasets])
|
|
||||||
|
|
||||||
tabular = """
|
|
||||||
\\begin{tabularx}{\\textwidth}{|c||""" + ('Y|'*len(gao_seb_methods))+ '|' + ('Y|'*len(added_methods)) + """} \hline
|
|
||||||
& \multicolumn{"""+str(nold_methods)+"""}{c||}{Methods tested in~\cite{Gao:2016uq}} & \multicolumn{"""+str(nnew_methods)+"""}{c|}{} \\\\ \hline
|
|
||||||
"""
|
|
||||||
|
|
||||||
for method in methods:
|
|
||||||
tabular += ' & \side{' + nice.get(method, method.upper()) +'$^{' + nicerm(eval_name) + '}$} '
|
|
||||||
tabular += '\\\\\hline\n'
|
|
||||||
|
|
||||||
for dataset in datasets + ['Average']:
|
|
||||||
if dataset == 'Average': tabular+= '\line\n'
|
|
||||||
tabular += nice.get(dataset, dataset.upper()) + ' '
|
|
||||||
for method in methods:
|
|
||||||
tabular += ' & ' + TABLE[dataset].latex(method)
|
|
||||||
tabular += '\\\\\hline\n'
|
|
||||||
|
|
||||||
tabular += "\end{tabularx}"
|
|
||||||
|
|
||||||
save_table(f'./tables/tab_results_{eval_name}.new.tex', tabular)
|
|
||||||
|
|
||||||
|
|
||||||
gao_seb_ranks, gao_seb_results = get_ranks_from_Gao_Sebastiani()
|
|
||||||
|
|
||||||
# Tables ranks for AE and RAE (two tables)
|
|
||||||
# ----------------------------------------------------
|
|
||||||
for i, eval_func in enumerate(evaluation_measures):
|
|
||||||
eval_name = eval_func.__name__
|
|
||||||
methods = gao_seb_methods
|
|
||||||
nold_methods = len(gao_seb_methods)
|
|
||||||
|
|
||||||
TABLE = TABLES[eval_name]
|
|
||||||
TABLE['Average'] = ResultSet('ave', addrankave, show_std=False, test="ttest_ind_from_stats")
|
|
||||||
for method in methods:
|
|
||||||
TABLE['Average'].add(method, method, [TABLE[dataset] for dataset in datasets])
|
|
||||||
|
|
||||||
|
|
||||||
tabular = """
|
|
||||||
\\begin{tabularx}{\\textwidth}{|c||""" + ('Y|' * len(gao_seb_methods)) + """} \hline
|
|
||||||
& \multicolumn{""" + str(nold_methods) + """}{c||}{Methods tested in~\cite{Gao:2016uq}} \\\\ \hline
|
|
||||||
"""
|
|
||||||
|
|
||||||
for method in methods:
|
|
||||||
tabular += ' & \side{' + nice.get(method, method.upper()) +'$^{' + nicerm(eval_name) + '}$} '
|
|
||||||
tabular += '\\\\\hline\n'
|
|
||||||
|
|
||||||
for dataset in datasets + ['Average']:
|
|
||||||
if dataset == 'Average':
|
|
||||||
tabular += '\line\n'
|
|
||||||
else:
|
|
||||||
TABLE[dataset].change_compare('rank')
|
|
||||||
tabular += nice.get(dataset, dataset.upper()) + ' '
|
|
||||||
for method in gao_seb_methods:
|
|
||||||
if dataset == 'Average':
|
|
||||||
method_rank = TABLE[dataset].get(method, 'mean')
|
|
||||||
else:
|
|
||||||
method_rank = TABLE[dataset].get(method, 'rank')
|
|
||||||
gao_seb_rank = gao_seb_ranks[f'{dataset}-{method}-{eval_name}']
|
|
||||||
if dataset == 'Average':
|
|
||||||
if method_rank != '--':
|
|
||||||
method_rank = f'{method_rank:.1f}'
|
|
||||||
gao_seb_rank = f'{gao_seb_rank:.1f}'
|
|
||||||
tabular += ' & ' + f'{method_rank}' + f' ({gao_seb_rank}) ' + TABLE[dataset].get_color(method)
|
|
||||||
tabular += '\\\\\hline\n'
|
|
||||||
tabular += "\end{tabularx}"
|
|
||||||
|
|
||||||
save_table(f'./tables/tab_rank_{eval_name}.new.tex', tabular)
|
|
||||||
|
|
||||||
|
|
||||||
print("[Done]")
|
|
|
@ -6,41 +6,28 @@ from scipy.stats import ttest_ind_from_stats, wilcoxon
|
||||||
class Table:
|
class Table:
|
||||||
VALID_TESTS = [None, "wilcoxon", "ttest"]
|
VALID_TESTS = [None, "wilcoxon", "ttest"]
|
||||||
|
|
||||||
def __init__(self, rows, cols, addfunc, lower_is_better=True, ttest='ttest', prec_mean=3, clean_zero=False,
|
def __init__(self, rows, cols, lower_is_better=True, ttest='ttest', prec_mean=3,
|
||||||
show_std=False, prec_std=3):
|
clean_zero=False, show_std=False, prec_std=3, average=True, missing=None, missing_str='--', color=True):
|
||||||
assert ttest in self.VALID_TESTS, f'unknown test, valid are {self.VALID_TESTS}'
|
assert ttest in self.VALID_TESTS, f'unknown test, valid are {self.VALID_TESTS}'
|
||||||
|
|
||||||
self.rows = np.asarray(rows)
|
self.rows = np.asarray(rows)
|
||||||
self.row_index = {row:i for i,row in enumerate(rows)}
|
self.row_index = {row:i for i, row in enumerate(rows)}
|
||||||
|
|
||||||
self.cols = np.asarray(cols)
|
self.cols = np.asarray(cols)
|
||||||
self.col_index = {col:j for j,col in enumerate(cols)}
|
self.col_index = {col:j for j, col in enumerate(cols)}
|
||||||
self.map = {}
|
|
||||||
self.mfunc = {}
|
self.map = {} # keyed (#rows,#cols)-ndarrays holding computations from self.map['values']
|
||||||
self.rarr = {}
|
|
||||||
self.carr = {}
|
|
||||||
self._addmap('values', dtype=object)
|
self._addmap('values', dtype=object)
|
||||||
self._addmap('fill', dtype=bool, func=lambda x: x is not None)
|
|
||||||
self._addmap('mean', dtype=float, func=np.mean)
|
|
||||||
self._addmap('std', dtype=float, func=np.std)
|
|
||||||
self._addmap('nobs', dtype=float, func=len)
|
|
||||||
self._addmap('rank', dtype=int, func=None)
|
|
||||||
self._addmap('color', dtype=object, func=None)
|
|
||||||
self._addmap('ttest', dtype=object, func=None)
|
|
||||||
self._addrarr('mean', dtype=float, func=np.mean, argmap='mean')
|
|
||||||
self._addrarr('min', dtype=float, func=np.min, argmap='mean')
|
|
||||||
self._addrarr('max', dtype=float, func=np.max, argmap='mean')
|
|
||||||
self._addcarr('mean', dtype=float, func=np.mean, argmap='mean')
|
|
||||||
self._addcarr('rank-mean', dtype=float, func=np.mean, argmap='rank')
|
|
||||||
if self.nrows>1:
|
|
||||||
self._col_ttest = Table(['ttest'], cols, _merge, lower_is_better, ttest)
|
|
||||||
else:
|
|
||||||
self._col_ttest = None
|
|
||||||
self.addfunc = addfunc
|
|
||||||
self.lower_is_better = lower_is_better
|
self.lower_is_better = lower_is_better
|
||||||
self.ttest = ttest
|
self.ttest = ttest
|
||||||
self.prec_mean = prec_mean
|
self.prec_mean = prec_mean
|
||||||
self.clean_zero = clean_zero
|
self.clean_zero = clean_zero
|
||||||
self.show_std = show_std
|
self.show_std = show_std
|
||||||
self.prec_std = prec_std
|
self.prec_std = prec_std
|
||||||
|
self.add_average = average
|
||||||
|
self.missing = missing
|
||||||
|
self.missing_str = missing_str
|
||||||
|
self.color = color
|
||||||
self.touch()
|
self.touch()
|
||||||
|
|
||||||
@property
|
@property
|
||||||
|
@ -58,27 +45,6 @@ class Table:
|
||||||
if self.modif:
|
if self.modif:
|
||||||
self.compute()
|
self.compute()
|
||||||
|
|
||||||
def _addmap(self, map, dtype, func=None):
|
|
||||||
self.map[map] = np.empty((self.nrows, self.ncols), dtype=dtype)
|
|
||||||
self.mfunc[map] = func
|
|
||||||
self.touch()
|
|
||||||
|
|
||||||
def _addrarr(self, rarr, dtype, func=np.mean, argmap='mean'):
|
|
||||||
self.rarr[rarr] = {
|
|
||||||
'arr': np.empty(self.ncols, dtype=dtype),
|
|
||||||
'func': func,
|
|
||||||
'argmap': argmap
|
|
||||||
}
|
|
||||||
self.touch()
|
|
||||||
|
|
||||||
def _addcarr(self, carr, dtype, func=np.mean, argmap='mean'):
|
|
||||||
self.carr[carr] = {
|
|
||||||
'arr': np.empty(self.nrows, dtype=dtype),
|
|
||||||
'func': func,
|
|
||||||
'argmap': argmap
|
|
||||||
}
|
|
||||||
self.touch()
|
|
||||||
|
|
||||||
def _getfilled(self):
|
def _getfilled(self):
|
||||||
return np.argwhere(self.map['fill'])
|
return np.argwhere(self.map['fill'])
|
||||||
|
|
||||||
|
@ -89,34 +55,19 @@ class Table:
|
||||||
def _indexes(self):
|
def _indexes(self):
|
||||||
return itertools.product(range(self.nrows), range(self.ncols))
|
return itertools.product(range(self.nrows), range(self.ncols))
|
||||||
|
|
||||||
def _runmap(self, map):
|
def _addmap(self, map, dtype, func=None):
|
||||||
|
self.map[map] = np.empty((self.nrows, self.ncols), dtype=dtype)
|
||||||
|
if func is None:
|
||||||
|
return
|
||||||
m = self.map[map]
|
m = self.map[map]
|
||||||
f = self.mfunc[map]
|
f = func
|
||||||
if f is None:
|
if f is None:
|
||||||
return
|
return
|
||||||
indexes = self._indexes() if map == 'fill' else self._getfilled()
|
indexes = self._indexes() if map == 'fill' else self._getfilled()
|
||||||
for i,j in indexes:
|
for i, j in indexes:
|
||||||
m[i,j] = f(self.values[i,j])
|
m[i, j] = f(self.values[i, j])
|
||||||
|
|
||||||
def _runrarr(self, rarr):
|
def _addrank(self):
|
||||||
dic = self.rarr[rarr]
|
|
||||||
arr, f, map = dic['arr'], dic['func'], dic['argmap']
|
|
||||||
for col, cid in self.col_index.items():
|
|
||||||
if all(self.map['fill'][:, cid]):
|
|
||||||
arr[cid] = f(self.map[map][:, cid])
|
|
||||||
else:
|
|
||||||
arr[cid] = None
|
|
||||||
|
|
||||||
def _runcarr(self, carr):
|
|
||||||
dic = self.carr[carr]
|
|
||||||
arr, f, map = dic['arr'], dic['func'], dic['argmap']
|
|
||||||
for row, rid in self.row_index.items():
|
|
||||||
if all(self.map['fill'][rid, :]):
|
|
||||||
arr[rid] = f(self.map[map][rid, :])
|
|
||||||
else:
|
|
||||||
arr[rid] = None
|
|
||||||
|
|
||||||
def _runrank(self):
|
|
||||||
for i in range(self.nrows):
|
for i in range(self.nrows):
|
||||||
filled_cols_idx = np.argwhere(self.map['fill'][i]).flatten()
|
filled_cols_idx = np.argwhere(self.map['fill'][i]).flatten()
|
||||||
col_means = [self.map['mean'][i,j] for j in filled_cols_idx]
|
col_means = [self.map['mean'][i,j] for j in filled_cols_idx]
|
||||||
|
@ -125,7 +76,7 @@ class Table:
|
||||||
ranked_cols_idx = ranked_cols_idx[::-1]
|
ranked_cols_idx = ranked_cols_idx[::-1]
|
||||||
self.map['rank'][i, ranked_cols_idx] = np.arange(1, len(filled_cols_idx)+1)
|
self.map['rank'][i, ranked_cols_idx] = np.arange(1, len(filled_cols_idx)+1)
|
||||||
|
|
||||||
def _runcolor(self):
|
def _addcolor(self):
|
||||||
for i in range(self.nrows):
|
for i in range(self.nrows):
|
||||||
filled_cols_idx = np.argwhere(self.map['fill'][i]).flatten()
|
filled_cols_idx = np.argwhere(self.map['fill'][i]).flatten()
|
||||||
if filled_cols_idx.size==0:
|
if filled_cols_idx.size==0:
|
||||||
|
@ -144,6 +95,12 @@ class Table:
|
||||||
normval = 1 - normval
|
normval = 1 - normval
|
||||||
self.map['color'][i, col_idx] = color_red2green_01(normval)
|
self.map['color'][i, col_idx] = color_red2green_01(normval)
|
||||||
|
|
||||||
|
def _addlatex(self):
|
||||||
|
return
|
||||||
|
for i,j in self._getfilled():
|
||||||
|
self.map['latex'][i,j] = self.latex(self.rows[i], self.cols[j])
|
||||||
|
|
||||||
|
|
||||||
def _run_ttest(self, row, col1, col2):
|
def _run_ttest(self, row, col1, col2):
|
||||||
mean1 = self.map['mean'][row, col1]
|
mean1 = self.map['mean'][row, col1]
|
||||||
std1 = self.map['std'][row, col1]
|
std1 = self.map['std'][row, col1]
|
||||||
|
@ -160,10 +117,10 @@ class Table:
|
||||||
_, p_val = wilcoxon(values1, values2)
|
_, p_val = wilcoxon(values1, values2)
|
||||||
return p_val
|
return p_val
|
||||||
|
|
||||||
def _runttest(self):
|
def _addttest(self):
|
||||||
if self.ttest is None:
|
if self.ttest is None:
|
||||||
return
|
return
|
||||||
self.some_similar = False
|
self.some_similar = [False]*self.ncols
|
||||||
for i in range(self.nrows):
|
for i in range(self.nrows):
|
||||||
filled_cols_idx = np.argwhere(self.map['fill'][i]).flatten()
|
filled_cols_idx = np.argwhere(self.map['fill'][i]).flatten()
|
||||||
if len(filled_cols_idx) <= 1:
|
if len(filled_cols_idx) <= 1:
|
||||||
|
@ -182,62 +139,74 @@ class Table:
|
||||||
pval_outcome = pval_interpretation(p_val)
|
pval_outcome = pval_interpretation(p_val)
|
||||||
self.map['ttest'][i, j] = pval_outcome
|
self.map['ttest'][i, j] = pval_outcome
|
||||||
if pval_outcome != 'Diff':
|
if pval_outcome != 'Diff':
|
||||||
self.some_similar = True
|
self.some_similar[j] = True
|
||||||
|
|
||||||
def get_col_average(self, col, arr='mean'):
|
|
||||||
self.update()
|
|
||||||
cid = self.col_index[col]
|
|
||||||
return self.rarr[arr]['arr'][cid]
|
|
||||||
|
|
||||||
def _map_list(self):
|
|
||||||
maps = list(self.map.keys())
|
|
||||||
maps.remove('fill')
|
|
||||||
maps.remove('values')
|
|
||||||
maps.remove('color')
|
|
||||||
maps.remove('ttest')
|
|
||||||
return ['fill'] + maps
|
|
||||||
|
|
||||||
def compute(self):
|
def compute(self):
|
||||||
for map in self._map_list():
|
self._addmap('fill', dtype=bool, func=lambda x: x is not None)
|
||||||
self._runmap(map)
|
self._addmap('mean', dtype=float, func=np.mean)
|
||||||
self._runrank()
|
self._addmap('std', dtype=float, func=np.std)
|
||||||
self._runcolor()
|
self._addmap('nobs', dtype=float, func=len)
|
||||||
self._runttest()
|
self._addmap('rank', dtype=int, func=None)
|
||||||
for arr in self.rarr.keys():
|
self._addmap('color', dtype=object, func=None)
|
||||||
self._runrarr(arr)
|
self._addmap('ttest', dtype=object, func=None)
|
||||||
for arr in self.carr.keys():
|
self._addmap('latex', dtype=object, func=None)
|
||||||
self._runcarr(arr)
|
self._addrank()
|
||||||
if self._col_ttest != None:
|
self._addcolor()
|
||||||
for col in self.cols:
|
self._addttest()
|
||||||
self._col_ttest.add('ttest', col, self.col_index[col], self.map['fill'], self.values, self.map['mean'], self.ttest)
|
self._addlatex()
|
||||||
self._col_ttest.compute()
|
if self.add_average:
|
||||||
|
self._addave()
|
||||||
self.modif = False
|
self.modif = False
|
||||||
|
|
||||||
def add(self, row, col, *args, **kwargs):
|
def _is_column_full(self, col):
|
||||||
print(row, col, args, kwargs)
|
return all(self.map['fill'][:, self.col_index[col]])
|
||||||
values = self.addfunc(row, col, *args, **kwargs)
|
|
||||||
# if values is None:
|
def _addave(self):
|
||||||
# raise ValueError(f'addfunc returned None for row={row} col={col}')
|
ave = Table(['ave'], self.cols, lower_is_better=self.lower_is_better, ttest=self.ttest, average=False,
|
||||||
rid, cid = self.coord(row, col)
|
missing=self.missing, missing_str=self.missing_str)
|
||||||
|
for col in self.cols:
|
||||||
|
values = None
|
||||||
|
if self._is_column_full(col):
|
||||||
|
if self.ttest == 'ttest':
|
||||||
|
values = np.asarray(self.map['mean'][:, self.col_index[col]])
|
||||||
|
else: # wilcoxon
|
||||||
|
values = np.concatenate(self.values[:, self.col_index[col]])
|
||||||
|
ave.add('ave', col, values)
|
||||||
|
self.average = ave
|
||||||
|
|
||||||
|
def add(self, row, col, values):
|
||||||
|
if values is not None:
|
||||||
|
values = np.asarray(values)
|
||||||
|
if values.ndim==0:
|
||||||
|
values = values.flatten()
|
||||||
|
rid, cid = self._coordinates(row, col)
|
||||||
self.map['values'][rid, cid] = values
|
self.map['values'][rid, cid] = values
|
||||||
self.touch()
|
self.touch()
|
||||||
|
|
||||||
def get(self, row, col, attr='mean'):
|
def get(self, row, col, attr='mean'):
|
||||||
assert attr in self.map, f'unknwon attribute {attr}'
|
|
||||||
self.update()
|
self.update()
|
||||||
rid, cid = self.coord(row, col)
|
assert attr in self.map, f'unknwon attribute {attr}'
|
||||||
|
rid, cid = self._coordinates(row, col)
|
||||||
if self.map['fill'][rid, cid]:
|
if self.map['fill'][rid, cid]:
|
||||||
return self.map[attr][rid, cid]
|
v = self.map[attr][rid, cid]
|
||||||
|
if v is None or (isinstance(v,float) and np.isnan(v)):
|
||||||
|
return self.missing
|
||||||
|
return v
|
||||||
|
else:
|
||||||
|
return self.missing
|
||||||
|
|
||||||
def coord(self, row, col):
|
def _coordinates(self, row, col):
|
||||||
assert row in self.row_index, f'row {row} out of range'
|
assert row in self.row_index, f'row {row} out of range'
|
||||||
assert col in self.col_index, f'col {col} out of range'
|
assert col in self.col_index, f'col {col} out of range'
|
||||||
rid = self.row_index[row]
|
rid = self.row_index[row]
|
||||||
cid = self.col_index[col]
|
cid = self.col_index[col]
|
||||||
return rid, cid
|
return rid, cid
|
||||||
|
|
||||||
def get_col_table(self):
|
def get_average(self, col, attr='mean'):
|
||||||
return self._col_ttest
|
self.update()
|
||||||
|
if self.add_average:
|
||||||
|
return self.average.get('ave', col, attr=attr)
|
||||||
|
return None
|
||||||
|
|
||||||
def get_color(self, row, col):
|
def get_color(self, row, col):
|
||||||
color = self.get(row, col, attr='color')
|
color = self.get(row, col, attr='color')
|
||||||
|
@ -245,11 +214,11 @@ class Table:
|
||||||
return ''
|
return ''
|
||||||
return color
|
return color
|
||||||
|
|
||||||
def latex(self, row, col, missing='--', color=True):
|
def latex(self, row, col):
|
||||||
self.update()
|
self.update()
|
||||||
i,j = self.coord(row, col)
|
i,j = self._coordinates(row, col)
|
||||||
if self.map['fill'][i,j] == False:
|
if self.map['fill'][i,j] == False:
|
||||||
return missing
|
return self.missing_str
|
||||||
|
|
||||||
mean = self.map['mean'][i,j]
|
mean = self.map['mean'][i,j]
|
||||||
l = f" {mean:.{self.prec_mean}f}"
|
l = f" {mean:.{self.prec_mean}f}"
|
||||||
|
@ -257,78 +226,69 @@ class Table:
|
||||||
l = l.replace(' 0.', '.')
|
l = l.replace(' 0.', '.')
|
||||||
|
|
||||||
isbest = self.map['rank'][i,j] == 1
|
isbest = self.map['rank'][i,j] == 1
|
||||||
|
|
||||||
if isbest:
|
if isbest:
|
||||||
l = "\\textbf{"+l+"}"
|
l = "\\textbf{"+l.strip()+"}"
|
||||||
else:
|
|
||||||
if self.ttest is not None and self.some_similar:
|
|
||||||
test_label = self.map['ttest'][i,j]
|
|
||||||
if test_label == 'Sim':
|
|
||||||
l += '^{\dag\phantom{\dag}}'
|
|
||||||
elif test_label == 'Same':
|
|
||||||
l += '^{\ddag}'
|
|
||||||
elif test_label == 'Diff':
|
|
||||||
l += '^{\phantom{\ddag}}'
|
|
||||||
|
|
||||||
|
stat = ''
|
||||||
|
if self.ttest is not None and self.some_similar[j]:
|
||||||
|
test_label = self.map['ttest'][i,j]
|
||||||
|
if test_label == 'Sim':
|
||||||
|
stat = '^{\dag\phantom{\dag}}'
|
||||||
|
elif test_label == 'Same':
|
||||||
|
stat = '^{\ddag}'
|
||||||
|
elif isbest or test_label == 'Diff':
|
||||||
|
stat = '^{\phantom{\ddag}}'
|
||||||
|
|
||||||
|
std = ''
|
||||||
if self.show_std:
|
if self.show_std:
|
||||||
std = self.map['std'][i,j]
|
std = self.map['std'][i,j]
|
||||||
std = f" {std:.{self.prec_std}f}"
|
std = f" {std:.{self.prec_std}f}"
|
||||||
if self.clean_zero:
|
if self.clean_zero:
|
||||||
std = std.replace(' 0.', '.')
|
std = std.replace(' 0.', '.')
|
||||||
l += f" \pm {std}"
|
std = f" \pm {std:{self.prec_std}}"
|
||||||
|
|
||||||
l = f'$ {l} $'
|
if stat!='' or std!='':
|
||||||
if color:
|
l = f'{l}${stat}{std}$'
|
||||||
|
|
||||||
|
if self.color:
|
||||||
l += ' ' + self.map['color'][i,j]
|
l += ' ' + self.map['color'][i,j]
|
||||||
|
|
||||||
return l
|
return l
|
||||||
|
|
||||||
def latextabular(self, missing='--', color=True, rowreplace={}, colreplace={}, average=True):
|
def latexTabular(self, rowreplace={}, colreplace={}, average=True):
|
||||||
tab = ' & '
|
tab = ' & '
|
||||||
tab += ' & '.join([colreplace.get(col, col) for col in self.cols])
|
tab += ' & '.join([colreplace.get(col, col) for col in self.cols])
|
||||||
tab += ' \\\\\hline\n'
|
tab += ' \\\\\hline\n'
|
||||||
for row in self.rows:
|
for row in self.rows:
|
||||||
rowname = rowreplace.get(row, row)
|
rowname = rowreplace.get(row, row)
|
||||||
tab += rowname + ' & '
|
tab += rowname + ' & '
|
||||||
tab += self.latexrow(row, missing, color)
|
tab += self.latexRow(row)
|
||||||
tab += ' \\\\\hline\n'
|
|
||||||
|
|
||||||
if average:
|
if average:
|
||||||
|
tab += '\hline\n'
|
||||||
tab += 'Average & '
|
tab += 'Average & '
|
||||||
tab += self.latexave(missing, color)
|
tab += self.latexAverage()
|
||||||
tab += ' \\\\\hline\n'
|
|
||||||
return tab
|
return tab
|
||||||
|
|
||||||
|
def latexRow(self, row, endl='\\\\\hline\n'):
|
||||||
def latexrow(self, row, missing='--', color=True):
|
s = [self.latex(row, col) for col in self.cols]
|
||||||
s = [self.latex(row, col, missing=missing, color=color) for col in self.cols]
|
|
||||||
s = ' & '.join(s)
|
s = ' & '.join(s)
|
||||||
|
s += ' ' + endl
|
||||||
return s
|
return s
|
||||||
|
|
||||||
def latexave(self, missing='--', color=True):
|
def latexAverage(self, endl='\\\\\hline\n'):
|
||||||
return self._col_ttest.latexrow('ttest')
|
if self.add_average:
|
||||||
|
return self.average.latexRow('ave', endl=endl)
|
||||||
|
|
||||||
def get_rank_table(self):
|
def getRankTable(self):
|
||||||
t = Table(rows=self.rows, cols=self.cols, addfunc=_getrank, ttest=None, prec_mean=0)
|
t = Table(rows=self.rows, cols=self.cols, prec_mean=0, average=True)
|
||||||
for row, col in self._getfilled():
|
for rid, cid in self._getfilled():
|
||||||
t.add(self.rows[row], self.cols[col], row, col, self.map['rank'])
|
row = self.rows[rid]
|
||||||
|
col = self.cols[cid]
|
||||||
|
t.add(row, col, self.get(row, col, 'rank'))
|
||||||
|
t.compute()
|
||||||
return t
|
return t
|
||||||
|
|
||||||
def _getrank(row, col, rowid, colid, rank):
|
|
||||||
return [rank[rowid, colid]]
|
|
||||||
|
|
||||||
def _merge(unused, col, colidx, fill, values, means, ttest):
|
|
||||||
if all(fill[:,colidx]):
|
|
||||||
nrows = values.shape[0]
|
|
||||||
if ttest=='ttest':
|
|
||||||
values = np.asarray(means[:, colidx])
|
|
||||||
else: # wilcoxon
|
|
||||||
values = [values[i, colidx] for i in range(nrows)]
|
|
||||||
values = np.concatenate(values)
|
|
||||||
return values
|
|
||||||
else:
|
|
||||||
return None
|
|
||||||
|
|
||||||
def pval_interpretation(p_val):
|
def pval_interpretation(p_val):
|
||||||
if 0.005 >= p_val:
|
if 0.005 >= p_val:
|
||||||
return 'Diff'
|
return 'Diff'
|
||||||
|
@ -352,21 +312,3 @@ def color_red2green_01(val, maxtone=50):
|
||||||
tone = maxtone * val
|
tone = maxtone * val
|
||||||
return '\cellcolor{' + color + f'!{int(tone)}' + '}'
|
return '\cellcolor{' + color + f'!{int(tone)}' + '}'
|
||||||
|
|
||||||
#
|
|
||||||
# def addfunc(m,d, mean, size):
|
|
||||||
# return np.random.rand(size)+mean
|
|
||||||
#
|
|
||||||
# t = Table(rows = ['M1', 'M2', 'M3'], cols=['D1', 'D2', 'D3', 'D4'], addfunc=addfunc, ttest='wilcoxon')
|
|
||||||
# t.add('M1','D1', mean=0.5, size=100)
|
|
||||||
# t.add('M1','D2', mean=0.5, size=100)
|
|
||||||
# t.add('M2','D1', mean=0.2, size=100)
|
|
||||||
# t.add('M2','D2', mean=0.1, size=100)
|
|
||||||
# t.add('M2','D3', mean=0.7, size=100)
|
|
||||||
# t.add('M2','D4', mean=0.3, size=100)
|
|
||||||
# t.add('M3','D1', mean=0.9, size=100)
|
|
||||||
# t.add('M3','D2', mean=0, size=100)
|
|
||||||
#
|
|
||||||
# print(t.latextabular())
|
|
||||||
#
|
|
||||||
# print('rank')
|
|
||||||
# print(t.get_rank_table().latextabular())
|
|
||||||
|
|
Loading…
Reference in New Issue