|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
"""The SuperGLUE benchmark.""" |
|
|
|
import json |
|
import os |
|
|
|
import datasets |
|
|
|
_CITATION = """\ |
|
@article{gustafson2006documentation, |
|
title={Documentation of the Stockholm-Ume{\aa} Corpus}, |
|
author={Gustafson-Capkov{\'a}, Sofia and Hartmann, Britt}, |
|
journal={Stockholm University: Department of Linguistics}, |
|
year={2006} |
|
} |
|
""" |
|
|
|
|
|
_DESCRIPTION = """\ |
|
The dataset is a conversion of the venerable SUC 3.0 dataset into the |
|
huggingface ecosystem. The original dataset does not contain an official |
|
train-dev-test split, which is introduced here; the tag distribution for the |
|
NER tags between the three splits is mostly the same. |
|
|
|
The dataset has three different types of tagsets: manually annotated POS, |
|
manually annotated NER, and automatically annotated NER. For the |
|
automatically annotated NER tags, only sentences were chosen, where the |
|
automatic and manual annotations would match (with their respective |
|
categories). |
|
|
|
Additionally we provide remixes of the same data with some or all sentences |
|
being lowercased. |
|
""" |
|
|
|
_HOMEPAGE = "https://spraakbanken.gu.se/en/resources/suc3" |
|
|
|
_LICENSE = "CC-BY-4.0" |
|
|
|
|
|
|
|
|
|
_URL = "https://huggingface.co/datasets/KBLab/suc3_1/resolve/main/data/" |
|
|
|
_URLS = { |
|
"original_tags": { |
|
"cased": "original_tags/cased.tar.gz", |
|
"lower": "original_tags/lower.tar.gz", |
|
"lower_mix": "original_tags/lower_mix.tar.gz"}, |
|
"simple_tags": { |
|
"cased": "simple_tags/cased.tar.gz", |
|
"lower": "simple_tags/lower.tar.gz", |
|
"lower_mix": "simple_tags/lower_mix.tar.gz"} |
|
} |
|
|
|
|
|
_POS_LABEL_NAMES = { |
|
'AB', 'DT', 'HA', 'HD', 'HP', 'HS', 'IE', 'IN', 'JJ', 'KN', 'MAD', 'MID', |
|
'NN', 'PAD', 'PC', 'PL', 'PM', 'PN', 'PP', 'PS', 'RG', 'RO', 'SN', 'UO', |
|
'VB' |
|
} |
|
_NER_LABEL_NAMES_ORIGINAL = { |
|
'B-animal', 'B-event', 'B-inst', 'B-myth', 'B-other', 'B-person', |
|
'B-place', 'B-product', 'B-work', 'I-animal', 'I-event', 'I-inst', |
|
'I-myth', 'I-other', 'I-person', 'I-place', 'I-product', 'I-work', 'O' |
|
} |
|
|
|
_NER_LABEL_NAMES_SIMPLE = { |
|
'B-EVN', 'B-LOC', 'B-MSR', 'B-OBJ', 'B-ORG', 'B-PRS', 'B-TME', 'B-WRK', |
|
'I-EVN', 'I-LOC', 'I-MSR', 'I-OBJ', 'I-ORG', 'I-PRS', 'I-TME', 'I-WRK', 'O' |
|
} |
|
|
|
|
|
class SUC3Config(datasets.BuilderConfig): |
|
"""BuilderConfig for Suc.""" |
|
def __init__(self, |
|
ner_label_names, |
|
description, |
|
data_url, |
|
**kwargs): |
|
"""BuilderConfig for Suc. |
|
""" |
|
super(SUC3Config, |
|
self).__init__(version=datasets.Version("1.0.2"), **kwargs) |
|
self.ner_label_names = ner_label_names |
|
self.description = description |
|
self.data_url = data_url |
|
|
|
|
|
|
|
class SUC3(datasets.GeneratorBasedBuilder): |
|
"""The SuperGLUE benchmark.""" |
|
|
|
BUILDER_CONFIGS = [ |
|
SUC3Config( |
|
name="original_cased", |
|
ner_label_names=_NER_LABEL_NAMES_ORIGINAL, |
|
data_url=_URLS["original_tags"]["cased"], |
|
description="manually annotated & cased", |
|
), |
|
SUC3Config( |
|
name="original_lower", |
|
ner_label_names=_NER_LABEL_NAMES_ORIGINAL, |
|
data_url=_URLS["original_tags"]["lower"], |
|
description="manually annotated & lower", |
|
), |
|
SUC3Config( |
|
name="original_lower_mix", |
|
ner_label_names=_NER_LABEL_NAMES_ORIGINAL, |
|
data_url=_URLS["original_tags"]["lower_mix"], |
|
description="manually annotated & lower_mix", |
|
), |
|
SUC3Config( |
|
name="simple_cased", |
|
ner_label_names=_NER_LABEL_NAMES_SIMPLE, |
|
data_url=_URLS["simple_tags"]["cased"], |
|
description="automatically annotated & cased", |
|
), |
|
SUC3Config( |
|
name="simple_lower", |
|
ner_label_names=_NER_LABEL_NAMES_SIMPLE, |
|
data_url=_URLS["simple_tags"]["lower"], |
|
description="automatically annotated & lower", |
|
), |
|
SUC3Config( |
|
name="simple_lower_mix", |
|
ner_label_names=_NER_LABEL_NAMES_SIMPLE, |
|
data_url=_URLS["simple_tags"]["lower_mix"], |
|
description="autimatically annotated & lower_mix", |
|
), |
|
] |
|
|
|
def _info(self): |
|
features = {"id": datasets.Value("string"), |
|
"tokens": datasets.features.Sequence(datasets.Value("string")), |
|
"pos_tags": datasets.features.Sequence(datasets.Value("string")), |
|
"ner_tags": datasets.features.Sequence(datasets.Value("string"))} |
|
|
|
return datasets.DatasetInfo( |
|
description=_DESCRIPTION + self.config.description, |
|
features=datasets.Features(features), |
|
homepage=_HOMEPAGE, |
|
citation=_CITATION, |
|
supervised_keys=None, |
|
) |
|
|
|
def _split_generators(self, dl_manager): |
|
dl_dir = dl_manager.download_and_extract(_URL + self.config.data_url) |
|
dl_dir = os.path.join(dl_dir, self.config.data_url.split("/")[-1].split(".")[0]) |
|
return [ |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TRAIN, |
|
gen_kwargs={ |
|
"data_file": os.path.join(dl_dir, "train.jsonl"), |
|
}, |
|
), |
|
datasets.SplitGenerator( |
|
name=datasets.Split.VALIDATION, |
|
gen_kwargs={ |
|
"data_file": os.path.join(dl_dir, "dev.jsonl"), |
|
}, |
|
), |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TEST, |
|
gen_kwargs={ |
|
"data_file": os.path.join(dl_dir, "test.jsonl"), |
|
}, |
|
), |
|
] |
|
|
|
def _generate_examples(self, data_file): |
|
with open(data_file, encoding="utf-8") as f: |
|
for i, line in enumerate(f): |
|
row = json.loads(line) |
|
yield str(i), row |
|
|