|
from datasets import load_dataset |
|
import random |
|
|
|
dataset = load_dataset("Jean-Baptiste/wikiner_fr") |
|
|
|
|
|
|
|
|
|
|
|
|
|
def remove_duplicates(examples: dict[str, list]) -> list[bool]: |
|
seen_sentences = set() |
|
res = [] |
|
for example_tokens in examples['tokens']: |
|
sentence = tuple(example_tokens) |
|
if sentence not in seen_sentences: |
|
res.append(True) |
|
seen_sentences.add(sentence) |
|
else: |
|
res.append(False) |
|
print(f"Removed {len(examples['tokens']) - sum(res)} duplicates") |
|
return res |
|
|
|
|
|
dataset = dataset.filter(remove_duplicates, batched=True, batch_size=None) |
|
|
|
|
|
test_sentences = set(tuple(w) for w in dataset['test']['tokens']) |
|
dataset['train'] = dataset['train'].filter( |
|
lambda examples: [s not in test_sentences for s in [tuple(w) for w in examples['tokens']]], |
|
batched=True, |
|
batch_size=None |
|
) |
|
|
|
|
|
|
|
|
|
def decapitalize_tokens(example, probability=0.2): |
|
for i, token in enumerate(example['tokens']): |
|
if token.istitle() and \ |
|
i != 0 and \ |
|
random.random() < probability and \ |
|
example['ner_tags'][i] != 0: |
|
example['tokens'][i] = token.lower() |
|
return example |
|
|
|
|
|
random.seed(42) |
|
dataset_with_mixed_caps = dataset.map(decapitalize_tokens) |
|
|
|
dataset_with_mixed_caps.push_to_hub("wikiner_fr_mixed_caps") |
|
|