Spaces:
Runtime error
Runtime error
import transformers | |
from datasets import load_dataset, load_metric | |
import datasets | |
import random | |
import pandas as pd | |
from IPython.display import display, HTML | |
from transformers import AutoTokenizer | |
from transformers import AutoModelForSeq2SeqLM, DataCollatorForSeq2Seq, Seq2SeqTrainingArguments, Seq2SeqTrainer | |
model_checkpoint = "t5-small" | |
raw_datasets = load_dataset("xsum") | |
metric = load_metric("rouge") | |
def show_random_elements(dataset, num_examples=5): | |
assert num_examples <= len(dataset), "Can't pick more elements than there are in the dataset." | |
picks = [] | |
for _ in range(num_examples): | |
pick = random.randint(0, len(dataset) - 1) | |
while pick in picks: | |
pick = random.randint(0, len(dataset) - 1) | |
picks.append(pick) | |
df = pd.DataFrame(dataset[picks]) | |
for column, typ in dataset.features.items(): | |
if isinstance(typ, datasets.ClassLabel): | |
df[column] = df[column].transform(lambda i: typ.names[i]) | |
display(HTML(df.to_html())) | |
tokenizer = AutoTokenizer.from_pretrained(model_checkpoint) | |
print(transformers.__version__) | |
if model_checkpoint in ["t5-small", "t5-base", "t5-larg", "t5-3b", "t5-11b"]: | |
prefix = "summarize: " | |
else: | |
prefix = "" | |
max_input_length = 1024 | |
max_target_length = 128 | |
def preprocess_function(examples): | |
inputs = [prefix + doc for doc in examples["document"]] | |
model_inputs = tokenizer(inputs, max_length=max_input_length, truncation=True) | |
# Setup the tokenizer for targets | |
with tokenizer.as_target_tokenizer(): | |
labels = tokenizer(examples["summary"], max_length=max_target_length, truncation=True) | |
model_inputs["labels"] = labels["input_ids"] | |
return model_inputs | |
model = AutoModelForSeq2SeqLM.from_pretrained(model_checkpoint) | |
batch_size = 16 | |
model_name = model_checkpoint.split("/")[-1] | |
args = Seq2SeqTrainingArguments( | |
f"{model_name}-finetuned-xsum", | |
evaluation_strategy = "epoch", | |
learning_rate=2e-5, | |
per_device_train_batch_size=batch_size, | |
per_device_eval_batch_size=batch_size, | |
weight_decay=0.01, | |
save_total_limit=3, | |
num_train_epochs=1, | |
predict_with_generate=True, | |
fp16=True, | |
push_to_hub=True, | |
) | |
import nltk | |
import numpy as np | |
def compute_metrics(eval_pred): | |
predictions, labels = eval_pred | |
decoded_preds = tokenizer.batch_decode(predictions, skip_special_tokens=True) | |
# Replace -100 in the labels as we can't decode them. | |
labels = np.where(labels != -100, labels, tokenizer.pad_token_id) | |
decoded_labels = tokenizer.batch_decode(labels, skip_special_tokens=True) | |
# Rouge expects a newline after each sentence | |
decoded_preds = ["\n".join(nltk.sent_tokenize(pred.strip())) for pred in decoded_preds] | |
decoded_labels = ["\n".join(nltk.sent_tokenize(label.strip())) for label in decoded_labels] | |
result = metric.compute(predictions=decoded_preds, references=decoded_labels, use_stemmer=True) | |
# Extract a few results | |
result = {key: value.mid.fmeasure * 100 for key, value in result.items()} | |
# Add mean generated length | |
prediction_lens = [np.count_nonzero(pred != tokenizer.pad_token_id) for pred in predictions] | |
result["gen_len"] = np.mean(prediction_lens) | |
return {k: round(v, 4) for k, v in result.items()} | |
trainer = Seq2SeqTrainer( | |
model, | |
args, | |
train_dataset=tokenized_datasets["train"], | |
eval_dataset=tokenized_datasets["validation"], | |
data_collator=data_collator, | |
tokenizer=tokenizer, | |
compute_metrics=compute_metrics | |
) | |