File size: 1,504 Bytes
64f310d |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 |
from datasets import load_dataset
import random
dataset = load_dataset("Jean-Baptiste/wikiner_fr")
# Remove duplicated rows in the dataset #####
# Remove duplicates in each set
def remove_duplicates(examples: dict[str, list]) -> list[bool]:
seen_sentences = set()
res = []
for example_tokens in examples['tokens']:
sentence = tuple(example_tokens)
if sentence not in seen_sentences:
res.append(True)
seen_sentences.add(sentence)
else:
res.append(False)
print(f"Removed {len(examples['tokens']) - sum(res)} duplicates")
return res
dataset = dataset.filter(remove_duplicates, batched=True, batch_size=None)
# Remove the duplicates in the train set present in the test set (leakage)
test_sentences = set(tuple(w) for w in dataset['test']['tokens'])
dataset['train'] = dataset['train'].filter(
lambda examples: [s not in test_sentences for s in [tuple(w) for w in examples['tokens']]],
batched=True,
batch_size=None
)
# Decapitalize words randomly #####
def decapitalize_tokens(example, probability=0.2):
for i, token in enumerate(example['tokens']):
if token.istitle() and \
i != 0 and \
random.random() < probability and \
example['ner_tags'][i] != 0:
example['tokens'][i] = token.lower()
return example
dataset_with_mixed_caps = dataset.map(decapitalize_tokens)
dataset_with_mixed_caps.push_to_hub("wikiner_fr_mixed_caps")
|