|
"""ssj500k is a partially annotated training corpus for multiple syntactic and semantic tasks.""" |
|
import re |
|
import xml.etree.ElementTree as ET |
|
import os |
|
|
|
import datasets |
|
|
|
|
|
_CITATION = """\ |
|
@InProceedings{krek2020ssj500k, |
|
title = {The ssj500k Training Corpus for Slovene Language Processing}, |
|
author={Krek, Simon and Erjavec, Tomaž and Dobrovoljc, Kaja and Gantar, Polona and Arhar Holdt, Spela and Čibej, Jaka and Brank, Janez}, |
|
booktitle={Proceedings of the Conference on Language Technologies and Digital Humanities}, |
|
year={2020}, |
|
pages={24-33} |
|
} |
|
""" |
|
|
|
_DESCRIPTION = """\ |
|
The ssj500k training corpus contains about 500 000 tokens manually annotated on the levels of tokenisation, |
|
sentence segmentation, morphosyntactic tagging, and lemmatisation. About half of the corpus is also manually annotated |
|
with syntactic dependencies, named entities, and verbal multiword expressions. About a quarter of the corpus is also |
|
annotated with semantic role labels. The morphosyntactic tags and syntactic dependencies are included both in the |
|
JOS/MULTEXT-East framework, as well as in the framework of Universal Dependencies. |
|
""" |
|
|
|
_HOMEPAGE = "http://hdl.handle.net/11356/1434" |
|
|
|
_LICENSE = "Creative Commons - Attribution-NonCommercial-ShareAlike 4.0 International (CC BY-NC-SA 4.0)" |
|
|
|
_URLS = { |
|
"ssj500k-en.tei": "https://www.clarin.si/repository/xmlui/bitstream/handle/11356/1434/ssj500k-en.TEI.zip" |
|
} |
|
|
|
|
|
XML_NAMESPACE = "{http://www.w3.org/XML/1998/namespace}" |
|
IDX_ROOT_WORD = -1 |
|
IDX_NA_HEAD = -2 |
|
NA_TAG = "N/A" |
|
|
|
|
|
def namespace(element): |
|
|
|
m = re.match(r'\{.*\}', element.tag) |
|
return m.group(0) if m else '' |
|
|
|
|
|
def word_information(w_or_pc_el): |
|
if w_or_pc_el.tag.endswith("pc"): |
|
id_word = w_or_pc_el.attrib[f"{XML_NAMESPACE}id"] |
|
form = w_or_pc_el.text.strip() |
|
lemma = w_or_pc_el.text.strip() |
|
msd = w_or_pc_el.attrib[f"msd"] |
|
else: |
|
id_word = w_or_pc_el.attrib[f"{XML_NAMESPACE}id"] |
|
form = w_or_pc_el.text.strip() |
|
lemma = w_or_pc_el.attrib["lemma"] |
|
msd = w_or_pc_el.attrib[f"msd"] |
|
|
|
return id_word, form, lemma, msd |
|
|
|
|
|
class Ssj500k(datasets.GeneratorBasedBuilder): |
|
"""ssj500k is a partially annotated training corpus for multiple syntactic and semantic tasks.""" |
|
|
|
VERSION = datasets.Version("2.3.0") |
|
|
|
BUILDER_CONFIGS = [ |
|
datasets.BuilderConfig(name="all_data", version=VERSION, |
|
description="The entire dataset with all annotations, in some cases partially missing."), |
|
datasets.BuilderConfig(name="named_entity_recognition", version=VERSION, |
|
description="The data subset with annotated named entities."), |
|
datasets.BuilderConfig(name="dependency_parsing_ud", version=VERSION, |
|
description="The data subset with annotated dependencies (UD schema)."), |
|
datasets.BuilderConfig(name="dependency_parsing_jos", version=VERSION, |
|
description="The data subset with annotated dependencies (JOS schema)."), |
|
datasets.BuilderConfig(name="semantic_role_labeling", version=VERSION, |
|
description="The data subset with annotated semantic roles."), |
|
datasets.BuilderConfig(name="multiword_expressions", version=VERSION, |
|
description="The data subset with annotated named entities.") |
|
] |
|
|
|
DEFAULT_CONFIG_NAME = "all_data" |
|
|
|
def _info(self): |
|
features_dict = { |
|
"id_doc": datasets.Value("string"), |
|
"idx_par": datasets.Value("int32"), |
|
"idx_sent": datasets.Value("int32"), |
|
"id_words": datasets.Sequence(datasets.Value("string")), |
|
"words": datasets.Sequence(datasets.Value("string")), |
|
"lemmas": datasets.Sequence(datasets.Value("string")), |
|
"msds": datasets.Sequence(datasets.Value("string")) |
|
} |
|
|
|
ret_all_data = self.config.name == "all_data" |
|
if ret_all_data: |
|
features_dict.update({ |
|
"has_ne_ann": datasets.Value("bool"), "has_ud_dep_ann": datasets.Value("bool"), |
|
"has_jos_dep_ann": datasets.Value("bool"), "has_srl_ann": datasets.Value("bool"), |
|
"has_mwe_ann": datasets.Value("bool") |
|
}) |
|
|
|
if ret_all_data or self.config.name == "named_entity_recognition": |
|
features_dict["ne_tags"] = datasets.Sequence(datasets.Value("string")) |
|
|
|
if ret_all_data or self.config.name == "dependency_parsing_ud": |
|
features_dict.update({ |
|
"ud_dep_head": datasets.Sequence(datasets.Value("int32")), |
|
"ud_dep_rel": datasets.Sequence(datasets.Value("string")) |
|
}) |
|
|
|
if ret_all_data or self.config.name == "dependency_parsing_jos": |
|
features_dict.update({ |
|
"jos_dep_head": datasets.Sequence(datasets.Value("int32")), |
|
"jos_dep_rel": datasets.Sequence(datasets.Value("string")) |
|
}) |
|
|
|
if ret_all_data or self.config.name == "semantic_role_labeling": |
|
features_dict.update({ |
|
"srl_info": [{ |
|
"idx_arg": datasets.Value("uint32"), |
|
"idx_head": datasets.Value("uint32"), |
|
"role": datasets.Value("string") |
|
}] |
|
}) |
|
|
|
if ret_all_data or self.config.name == "multiword_expressions": |
|
features_dict["mwe_info"] = [{ |
|
"type": datasets.Value("string"), |
|
"word_indices": datasets.Sequence(datasets.Value("uint32")) |
|
}] |
|
|
|
features = datasets.Features(features_dict) |
|
return datasets.DatasetInfo( |
|
description=_DESCRIPTION, |
|
features=features, |
|
homepage=_HOMEPAGE, |
|
license=_LICENSE, |
|
citation=_CITATION, |
|
) |
|
|
|
def _split_generators(self, dl_manager): |
|
urls = _URLS["ssj500k-en.tei"] |
|
data_dir = dl_manager.download_and_extract(urls) |
|
return [ |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TRAIN, |
|
gen_kwargs={"file_path": os.path.join(data_dir, "ssj500k-en.TEI", "ssj500k-en.body.xml")} |
|
) |
|
] |
|
|
|
def _generate_examples(self, file_path): |
|
ret_all_data = self.config.name == "all_data" |
|
ret_ne_only = self.config.name == "named_entity_recognition" |
|
ret_ud_dep_only = self.config.name == "dependency_parsing_ud" |
|
ret_jos_dep_only = self.config.name == "dependency_parsing_jos" |
|
ret_srl_only = self.config.name == "semantic_role_labeling" |
|
ret_mwe_only = self.config.name == "multiword_expressions" |
|
|
|
curr_doc = ET.parse(file_path) |
|
root = curr_doc.getroot() |
|
NAMESPACE = namespace(root) |
|
|
|
idx_example = 0 |
|
for idx_doc, curr_doc in enumerate(root.iterfind(f"{NAMESPACE}div")): |
|
id_doc = curr_doc.attrib[f"{XML_NAMESPACE}id"] |
|
doc_metadata = {} |
|
metadata_el = curr_doc.find(f"{NAMESPACE}bibl") |
|
if metadata_el is not None: |
|
for child in metadata_el: |
|
if child.tag.endswith("term"): |
|
if child.attrib[f"{XML_NAMESPACE}lang"] != "en": |
|
continue |
|
|
|
parts = child.text.strip().split(" / ") |
|
attr_name = parts[0] |
|
attr_value = " / ".join(parts[1:]) |
|
|
|
elif child.tag.endswith("note"): |
|
attr_name = child.attrib["type"] |
|
attr_value = child.text.strip() |
|
else: |
|
attr_name = child.tag[len(NAMESPACE):] |
|
attr_value = child.text.strip() |
|
|
|
doc_metadata[attr_name] = attr_value |
|
|
|
|
|
|
|
has_ne = idx_doc < 498 |
|
has_mwe = idx_doc < 754 |
|
has_srl = idx_doc < 228 |
|
|
|
for idx_par, curr_par in enumerate(curr_doc.iterfind(f"{NAMESPACE}p")): |
|
for idx_sent, curr_sent in enumerate(curr_par.iterfind(f"{NAMESPACE}s")): |
|
id2position = {} |
|
id_words, words, lemmas, msds = [], [], [], [] |
|
|
|
|
|
named_ents = [] |
|
has_ud_dep, ud_dep_heads, ud_dep_rels = False, [], [] |
|
has_jos_dep, jos_dep_heads, jos_dep_rels = False, [], [] |
|
srl_info = [] |
|
mwe_info = [] |
|
|
|
|
|
|
|
for curr_el in curr_sent: |
|
|
|
if curr_el.tag.endswith(("w", "pc")): |
|
id_word, word, lemma, msd = word_information(curr_el) |
|
|
|
id2position[id_word] = len(id2position) |
|
id_words.append(id_word) |
|
words.append(word) |
|
lemmas.append(lemma) |
|
msds.append(msd) |
|
named_ents.append("O") |
|
|
|
|
|
elif curr_el.tag.endswith("seg"): |
|
has_ne = True |
|
ne_type = curr_el.attrib["subtype"] |
|
if ne_type.startswith("deriv-"): |
|
ne_type = ne_type[len("deriv-"):] |
|
ne_type = ne_type.upper() |
|
|
|
num_ne_tokens = 0 |
|
for curr_child in curr_el: |
|
num_ne_tokens += 1 |
|
id_word, word, lemma, msd = word_information(curr_child) |
|
|
|
id2position[id_word] = len(id2position) |
|
id_words.append(id_word) |
|
words.append(word) |
|
lemmas.append(lemma) |
|
msds.append(msd) |
|
|
|
assert num_ne_tokens > 0 |
|
nes = [f"B-{ne_type.upper()}"] + [f"I-{ne_type.upper()}" for _ in range(num_ne_tokens - 1)] |
|
named_ents.extend(nes) |
|
|
|
elif curr_el.tag.endswith("linkGrp"): |
|
|
|
if curr_el.attrib["type"] == "UD-SYN": |
|
has_ud_dep = True |
|
ud_dep_heads = [None for _ in range(len(words))] |
|
ud_dep_rels = [None for _ in range(len(words))] |
|
|
|
for link in curr_el: |
|
dep_rel = link.attrib["ana"].split(":")[-1] |
|
id_head_word, id_dependant = tuple(map( |
|
lambda _t_id: _t_id[1:] if _t_id.startswith("#") else _t_id, |
|
link.attrib["target"].split(" ") |
|
)) |
|
|
|
idx_head_word = id2position[id_head_word] if dep_rel != "root" else IDX_ROOT_WORD |
|
idx_dep_word = id2position[id_dependant] |
|
|
|
ud_dep_heads[idx_dep_word] = idx_head_word |
|
ud_dep_rels[idx_dep_word] = dep_rel |
|
|
|
|
|
elif curr_el.attrib["type"] == "JOS-SYN": |
|
has_jos_dep = True |
|
jos_dep_heads = [None for _ in range(len(words))] |
|
jos_dep_rels = [None for _ in range(len(words))] |
|
|
|
for link in curr_el: |
|
dep_rel = link.attrib["ana"].split(":")[-1] |
|
id_head_word, id_dependant = tuple(map( |
|
lambda _t_id: _t_id[1:] if _t_id.startswith("#") else _t_id, |
|
link.attrib["target"].split(" ") |
|
)) |
|
|
|
idx_head_word = id2position[id_head_word] if dep_rel != "Root" else IDX_ROOT_WORD |
|
idx_dep_word = id2position[id_dependant] |
|
|
|
jos_dep_heads[idx_dep_word] = idx_head_word |
|
jos_dep_rels[idx_dep_word] = dep_rel |
|
|
|
|
|
elif curr_el.attrib["type"] == "SRL": |
|
for link in curr_el: |
|
sem_role = link.attrib["ana"].split(":")[-1] |
|
id_head_word, id_arg_word = tuple(map( |
|
lambda _t_id: _t_id[1:] if _t_id.startswith("#") else _t_id, |
|
link.attrib["target"].split(" ") |
|
)) |
|
idx_head_word = id2position[id_head_word] |
|
idx_arg_word = id2position[id_arg_word] |
|
|
|
srl_info.append({ |
|
"idx_arg": idx_arg_word, |
|
"idx_head": idx_head_word, |
|
"role": sem_role |
|
}) |
|
|
|
|
|
elif curr_el.attrib["type"] == "MWE": |
|
has_mwe = True |
|
|
|
for link in curr_el: |
|
mwe_type = link.attrib["ana"].split(":")[-1] |
|
involved_words = list(map( |
|
lambda _t_id: _t_id[1:] if _t_id.startswith("#") else _t_id, |
|
link.attrib["target"].split(" ")) |
|
) |
|
word_indices = [id2position[_curr_tok] for _curr_tok in involved_words] |
|
mwe_info.append({"type": mwe_type, "word_indices": word_indices}) |
|
|
|
|
|
if (ret_ne_only and not has_ne) or (ret_ud_dep_only and not has_ud_dep) or \ |
|
(ret_jos_dep_only and not has_jos_dep) or (ret_srl_only and not has_srl) or \ |
|
(ret_mwe_only and not has_mwe): |
|
continue |
|
|
|
instance_dict = { |
|
"id_doc": id_doc, |
|
"idx_par": idx_par, |
|
"idx_sent": idx_sent, |
|
"id_words": id_words, |
|
"words": words, |
|
"lemmas": lemmas, |
|
"msds": msds |
|
} |
|
|
|
if ret_ne_only or ret_all_data: |
|
if not has_ne: |
|
named_ents = [NA_TAG for _ in range(len(words))] |
|
|
|
instance_dict["ne_tags"] = named_ents |
|
|
|
if ret_ud_dep_only or ret_all_data: |
|
if not has_ud_dep: |
|
ud_dep_heads = [IDX_NA_HEAD for _ in range(len(words))] |
|
ud_dep_rels = [NA_TAG for _ in range(len(words))] |
|
|
|
instance_dict["ud_dep_head"] = ud_dep_heads |
|
instance_dict["ud_dep_rel"] = ud_dep_rels |
|
|
|
if ret_jos_dep_only or ret_all_data: |
|
if not has_jos_dep: |
|
jos_dep_heads = [IDX_NA_HEAD for _ in range(len(words))] |
|
jos_dep_rels = [NA_TAG for _ in range(len(words))] |
|
|
|
instance_dict["jos_dep_head"] = jos_dep_heads |
|
instance_dict["jos_dep_rel"] = jos_dep_rels |
|
|
|
if ret_srl_only or ret_all_data: |
|
instance_dict["srl_info"] = srl_info |
|
|
|
if ret_mwe_only or ret_all_data: |
|
instance_dict["mwe_info"] = mwe_info |
|
|
|
|
|
if ret_all_data: |
|
instance_dict.update({ |
|
"has_ne_ann": has_ne, "has_ud_dep_ann": has_ud_dep, "has_jos_dep_ann": has_jos_dep, |
|
"has_srl_ann": has_srl, "has_mwe_ann": has_mwe |
|
}) |
|
|
|
yield idx_example, instance_dict |
|
idx_example += 1 |
|
|