gfun_multimodal/gfun/vgfs/transformerGen.py

79 lines
2.3 KiB
Python

from sklearn.model_selection import train_test_split
from torch.utils.data import Dataset, DataLoader
class TransformerGen:
"""Base class for all transformers. It implements the basic methods for
the creation of the datasets, datalaoders and the train-val split method.
It is designed to be used with MultilingualDataset in the
form of dictioanries {lang: data}
"""
def __init__(
self,
model_name,
dataset_name,
epochs=10,
lr=1e-5,
batch_size=4,
batch_size_eval=32,
max_length=512,
print_steps=50,
device="cpu",
probabilistic=False,
n_jobs=-1,
evaluate_step=10,
verbose=False,
patience=5,
):
self.model_name = model_name
self.dataset_name = dataset_name
self.device = device
self.model = None
self.lr = lr
self.epochs = epochs
self.tokenizer = None
self.max_length = max_length
self.batch_size = batch_size
self.batch_size_eval = batch_size_eval
self.print_steps = print_steps
self.probabilistic = probabilistic
self.n_jobs = n_jobs
self.fitted = False
self.datasets = {}
self.evaluate_step = evaluate_step
self.verbose = verbose
self.patience = patience
self.datasets = {}
def make_probabilistic(self):
raise NotImplementedError
def build_dataloader(
self,
lX,
lY,
torchDataset,
processor_fn,
batch_size,
split="train",
shuffle=False,
):
l_tokenized = {lang: processor_fn(data) for lang, data in lX.items()}
self.datasets[split] = torchDataset(l_tokenized, lY, split=split)
return DataLoader(self.datasets[split], batch_size=batch_size, shuffle=shuffle)
def get_train_val_data(self, lX, lY, split=0.2, seed=42):
tr_lX, tr_lY, val_lX, val_lY = {}, {}, {}, {}
for lang in lX.keys():
tr_X, val_X, tr_Y, val_Y = train_test_split(
lX[lang], lY[lang], test_size=split, random_state=seed, shuffle=False
)
tr_lX[lang] = tr_X
tr_lY[lang] = tr_Y
val_lX[lang] = val_X
val_lY[lang] = val_Y
return tr_lX, tr_lY, val_lX, val_lY