Matej Klemen commited on
Commit
bf8cc42
1 Parent(s): a55a3e6

Add first version of the ssj500k script

Browse files
Files changed (2) hide show
  1. dataset_infos.json +1 -0
  2. ssj500k.py +365 -0
dataset_infos.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"all_data": {"description": "The ssj500k training corpus contains about 500 000 tokens manually annotated on the levels of tokenisation,\nsentence segmentation, morphosyntactic tagging, and lemmatisation. About half of the corpus is also manually annotated \nwith syntactic dependencies, named entities, and verbal multiword expressions. About a quarter of the corpus is also \nannotated with semantic role labels. The morphosyntactic tags and syntactic dependencies are included both in the \nJOS/MULTEXT-East framework, as well as in the framework of Universal Dependencies.\n", "citation": "@InProceedings{krek2020ssj500k,\ntitle = {The ssj500k Training Corpus for Slovene Language Processing},\nauthor={Krek, Simon and Erjavec, Toma\u017e and Dobrovoljc, Kaja and Gantar, Polona and Arhar Holdt, Spela and \u010cibej, Jaka and Brank, Janez},\nbooktitle={Proceedings of the Conference on Language Technologies and Digital Humanities},\nyear={2020},\npages={24-33}\n}\n", "homepage": "http://hdl.handle.net/11356/1434", "license": "Creative Commons - Attribution-NonCommercial-ShareAlike 4.0 International (CC BY-NC-SA 4.0)", "features": {"id_doc": {"dtype": "string", "id": null, "_type": "Value"}, "idx_par": {"dtype": "int32", "id": null, "_type": "Value"}, "idx_sent": {"dtype": "int32", "id": null, "_type": "Value"}, "id_words": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "words": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "lemmas": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "msds": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "has_ne_ann": {"dtype": "bool", "id": null, "_type": "Value"}, "has_ud_dep_ann": {"dtype": "bool", "id": null, "_type": "Value"}, "has_jos_dep_ann": {"dtype": "bool", "id": null, "_type": "Value"}, "has_srl_ann": {"dtype": "bool", "id": null, "_type": "Value"}, "has_mwe_ann": {"dtype": "bool", "id": null, "_type": "Value"}, "ne_tags": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "ud_dep_head": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "ud_dep_rel": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "jos_dep_head": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "jos_dep_rel": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "srl_info": [{"idx_arg": {"dtype": "uint32", "id": null, "_type": "Value"}, "idx_head": {"dtype": "uint32", "id": null, "_type": "Value"}, "role": {"dtype": "string", "id": null, "_type": "Value"}}], "mwe_info": [{"type": {"dtype": "string", "id": null, "_type": "Value"}, "word_indices": {"feature": {"dtype": "uint32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}]}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "ssj500k", "config_name": "all_data", "version": {"version_str": "2.3.0", "description": null, "major": 2, "minor": 3, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 69372407, "num_examples": 27829, "dataset_name": "ssj500k"}}, "download_checksums": {"https://www.clarin.si/repository/xmlui/bitstream/handle/11356/1434/ssj500k-en.TEI.zip": {"num_bytes": 13021836, "checksum": "08ac4d6cf74a45bc81f6e9ca53e7406c96c906c218cbb8ff2f7365e96655c460"}}, "download_size": 13021836, "post_processing_size": null, "dataset_size": 69372407, "size_in_bytes": 82394243}, "named_entity_recognition": {"description": "The ssj500k training corpus contains about 500 000 tokens manually annotated on the levels of tokenisation,\nsentence segmentation, morphosyntactic tagging, and lemmatisation. About half of the corpus is also manually annotated \nwith syntactic dependencies, named entities, and verbal multiword expressions. About a quarter of the corpus is also \nannotated with semantic role labels. The morphosyntactic tags and syntactic dependencies are included both in the \nJOS/MULTEXT-East framework, as well as in the framework of Universal Dependencies.\n", "citation": "@InProceedings{krek2020ssj500k,\ntitle = {The ssj500k Training Corpus for Slovene Language Processing},\nauthor={Krek, Simon and Erjavec, Toma\u017e and Dobrovoljc, Kaja and Gantar, Polona and Arhar Holdt, Spela and \u010cibej, Jaka and Brank, Janez},\nbooktitle={Proceedings of the Conference on Language Technologies and Digital Humanities},\nyear={2020},\npages={24-33}\n}\n", "homepage": "http://hdl.handle.net/11356/1434", "license": "Creative Commons - Attribution-NonCommercial-ShareAlike 4.0 International (CC BY-NC-SA 4.0)", "features": {"id_doc": {"dtype": "string", "id": null, "_type": "Value"}, "idx_par": {"dtype": "int32", "id": null, "_type": "Value"}, "idx_sent": {"dtype": "int32", "id": null, "_type": "Value"}, "id_words": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "words": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "lemmas": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "msds": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "ne_tags": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "ssj500k", "config_name": "named_entity_recognition", "version": {"version_str": "2.3.0", "description": null, "major": 2, "minor": 3, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 17651305, "num_examples": 9489, "dataset_name": "ssj500k"}}, "download_checksums": {"https://www.clarin.si/repository/xmlui/bitstream/handle/11356/1434/ssj500k-en.TEI.zip": {"num_bytes": 13021836, "checksum": "08ac4d6cf74a45bc81f6e9ca53e7406c96c906c218cbb8ff2f7365e96655c460"}}, "download_size": 13021836, "post_processing_size": null, "dataset_size": 17651305, "size_in_bytes": 30673141}, "dependency_parsing_ud": {"description": "The ssj500k training corpus contains about 500 000 tokens manually annotated on the levels of tokenisation,\nsentence segmentation, morphosyntactic tagging, and lemmatisation. About half of the corpus is also manually annotated \nwith syntactic dependencies, named entities, and verbal multiword expressions. About a quarter of the corpus is also \nannotated with semantic role labels. The morphosyntactic tags and syntactic dependencies are included both in the \nJOS/MULTEXT-East framework, as well as in the framework of Universal Dependencies.\n", "citation": "@InProceedings{krek2020ssj500k,\ntitle = {The ssj500k Training Corpus for Slovene Language Processing},\nauthor={Krek, Simon and Erjavec, Toma\u017e and Dobrovoljc, Kaja and Gantar, Polona and Arhar Holdt, Spela and \u010cibej, Jaka and Brank, Janez},\nbooktitle={Proceedings of the Conference on Language Technologies and Digital Humanities},\nyear={2020},\npages={24-33}\n}\n", "homepage": "http://hdl.handle.net/11356/1434", "license": "Creative Commons - Attribution-NonCommercial-ShareAlike 4.0 International (CC BY-NC-SA 4.0)", "features": {"id_doc": {"dtype": "string", "id": null, "_type": "Value"}, "idx_par": {"dtype": "int32", "id": null, "_type": "Value"}, "idx_sent": {"dtype": "int32", "id": null, "_type": "Value"}, "id_words": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "words": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "lemmas": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "msds": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "ud_dep_head": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "ud_dep_rel": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "ssj500k", "config_name": "dependency_parsing_ud", "version": {"version_str": "2.3.0", "description": null, "major": 2, "minor": 3, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 14048597, "num_examples": 8000, "dataset_name": "ssj500k"}}, "download_checksums": {"https://www.clarin.si/repository/xmlui/bitstream/handle/11356/1434/ssj500k-en.TEI.zip": {"num_bytes": 13021836, "checksum": "08ac4d6cf74a45bc81f6e9ca53e7406c96c906c218cbb8ff2f7365e96655c460"}}, "download_size": 13021836, "post_processing_size": null, "dataset_size": 14048597, "size_in_bytes": 27070433}, "dependency_parsing_jos": {"description": "The ssj500k training corpus contains about 500 000 tokens manually annotated on the levels of tokenisation,\nsentence segmentation, morphosyntactic tagging, and lemmatisation. About half of the corpus is also manually annotated \nwith syntactic dependencies, named entities, and verbal multiword expressions. About a quarter of the corpus is also \nannotated with semantic role labels. The morphosyntactic tags and syntactic dependencies are included both in the \nJOS/MULTEXT-East framework, as well as in the framework of Universal Dependencies.\n", "citation": "@InProceedings{krek2020ssj500k,\ntitle = {The ssj500k Training Corpus for Slovene Language Processing},\nauthor={Krek, Simon and Erjavec, Toma\u017e and Dobrovoljc, Kaja and Gantar, Polona and Arhar Holdt, Spela and \u010cibej, Jaka and Brank, Janez},\nbooktitle={Proceedings of the Conference on Language Technologies and Digital Humanities},\nyear={2020},\npages={24-33}\n}\n", "homepage": "http://hdl.handle.net/11356/1434", "license": "Creative Commons - Attribution-NonCommercial-ShareAlike 4.0 International (CC BY-NC-SA 4.0)", "features": {"id_doc": {"dtype": "string", "id": null, "_type": "Value"}, "idx_par": {"dtype": "int32", "id": null, "_type": "Value"}, "idx_sent": {"dtype": "int32", "id": null, "_type": "Value"}, "id_words": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "words": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "lemmas": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "msds": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "jos_dep_head": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "jos_dep_rel": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "ssj500k", "config_name": "dependency_parsing_jos", "version": {"version_str": "2.3.0", "description": null, "major": 2, "minor": 3, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 23027788, "num_examples": 11411, "dataset_name": "ssj500k"}}, "download_checksums": {"https://www.clarin.si/repository/xmlui/bitstream/handle/11356/1434/ssj500k-en.TEI.zip": {"num_bytes": 13021836, "checksum": "08ac4d6cf74a45bc81f6e9ca53e7406c96c906c218cbb8ff2f7365e96655c460"}}, "download_size": 13021836, "post_processing_size": null, "dataset_size": 23027788, "size_in_bytes": 36049624}, "semantic_role_labeling": {"description": "The ssj500k training corpus contains about 500 000 tokens manually annotated on the levels of tokenisation,\nsentence segmentation, morphosyntactic tagging, and lemmatisation. About half of the corpus is also manually annotated \nwith syntactic dependencies, named entities, and verbal multiword expressions. About a quarter of the corpus is also \nannotated with semantic role labels. The morphosyntactic tags and syntactic dependencies are included both in the \nJOS/MULTEXT-East framework, as well as in the framework of Universal Dependencies.\n", "citation": "@InProceedings{krek2020ssj500k,\ntitle = {The ssj500k Training Corpus for Slovene Language Processing},\nauthor={Krek, Simon and Erjavec, Toma\u017e and Dobrovoljc, Kaja and Gantar, Polona and Arhar Holdt, Spela and \u010cibej, Jaka and Brank, Janez},\nbooktitle={Proceedings of the Conference on Language Technologies and Digital Humanities},\nyear={2020},\npages={24-33}\n}\n", "homepage": "http://hdl.handle.net/11356/1434", "license": "Creative Commons - Attribution-NonCommercial-ShareAlike 4.0 International (CC BY-NC-SA 4.0)", "features": {"id_doc": {"dtype": "string", "id": null, "_type": "Value"}, "idx_par": {"dtype": "int32", "id": null, "_type": "Value"}, "idx_sent": {"dtype": "int32", "id": null, "_type": "Value"}, "id_words": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "words": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "lemmas": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "msds": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "srl_info": [{"idx_arg": {"dtype": "uint32", "id": null, "_type": "Value"}, "idx_head": {"dtype": "uint32", "id": null, "_type": "Value"}, "role": {"dtype": "string", "id": null, "_type": "Value"}}]}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "ssj500k", "config_name": "semantic_role_labeling", "version": {"version_str": "2.3.0", "description": null, "major": 2, "minor": 3, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 9901320, "num_examples": 5523, "dataset_name": "ssj500k"}}, "download_checksums": {"https://www.clarin.si/repository/xmlui/bitstream/handle/11356/1434/ssj500k-en.TEI.zip": {"num_bytes": 13021836, "checksum": "08ac4d6cf74a45bc81f6e9ca53e7406c96c906c218cbb8ff2f7365e96655c460"}}, "download_size": 13021836, "post_processing_size": null, "dataset_size": 9901320, "size_in_bytes": 22923156}, "multiword_expressions": {"description": "The ssj500k training corpus contains about 500 000 tokens manually annotated on the levels of tokenisation,\nsentence segmentation, morphosyntactic tagging, and lemmatisation. About half of the corpus is also manually annotated \nwith syntactic dependencies, named entities, and verbal multiword expressions. About a quarter of the corpus is also \nannotated with semantic role labels. The morphosyntactic tags and syntactic dependencies are included both in the \nJOS/MULTEXT-East framework, as well as in the framework of Universal Dependencies.\n", "citation": "@InProceedings{krek2020ssj500k,\ntitle = {The ssj500k Training Corpus for Slovene Language Processing},\nauthor={Krek, Simon and Erjavec, Toma\u017e and Dobrovoljc, Kaja and Gantar, Polona and Arhar Holdt, Spela and \u010cibej, Jaka and Brank, Janez},\nbooktitle={Proceedings of the Conference on Language Technologies and Digital Humanities},\nyear={2020},\npages={24-33}\n}\n", "homepage": "http://hdl.handle.net/11356/1434", "license": "Creative Commons - Attribution-NonCommercial-ShareAlike 4.0 International (CC BY-NC-SA 4.0)", "features": {"id_doc": {"dtype": "string", "id": null, "_type": "Value"}, "idx_par": {"dtype": "int32", "id": null, "_type": "Value"}, "idx_sent": {"dtype": "int32", "id": null, "_type": "Value"}, "id_words": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "words": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "lemmas": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "msds": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "mwe_info": [{"type": {"dtype": "string", "id": null, "_type": "Value"}, "word_indices": {"feature": {"dtype": "uint32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}]}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "ssj500k", "config_name": "multiword_expressions", "version": {"version_str": "2.3.0", "description": null, "major": 2, "minor": 3, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 24215008, "num_examples": 13516, "dataset_name": "ssj500k"}}, "download_checksums": {"https://www.clarin.si/repository/xmlui/bitstream/handle/11356/1434/ssj500k-en.TEI.zip": {"num_bytes": 13021836, "checksum": "08ac4d6cf74a45bc81f6e9ca53e7406c96c906c218cbb8ff2f7365e96655c460"}}, "download_size": 13021836, "post_processing_size": null, "dataset_size": 24215008, "size_in_bytes": 37236844}}
ssj500k.py ADDED
@@ -0,0 +1,365 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """ssj500k is a partially annotated training corpus for multiple syntactic and semantic tasks."""
2
+ import re
3
+ import xml.etree.ElementTree as ET
4
+ import os
5
+
6
+ import datasets
7
+
8
+
9
+ _CITATION = """\
10
+ @InProceedings{krek2020ssj500k,
11
+ title = {The ssj500k Training Corpus for Slovene Language Processing},
12
+ author={Krek, Simon and Erjavec, Tomaž and Dobrovoljc, Kaja and Gantar, Polona and Arhar Holdt, Spela and Čibej, Jaka and Brank, Janez},
13
+ booktitle={Proceedings of the Conference on Language Technologies and Digital Humanities},
14
+ year={2020},
15
+ pages={24-33}
16
+ }
17
+ """
18
+
19
+ _DESCRIPTION = """\
20
+ The ssj500k training corpus contains about 500 000 tokens manually annotated on the levels of tokenisation,
21
+ sentence segmentation, morphosyntactic tagging, and lemmatisation. About half of the corpus is also manually annotated
22
+ with syntactic dependencies, named entities, and verbal multiword expressions. About a quarter of the corpus is also
23
+ annotated with semantic role labels. The morphosyntactic tags and syntactic dependencies are included both in the
24
+ JOS/MULTEXT-East framework, as well as in the framework of Universal Dependencies.
25
+ """
26
+
27
+ _HOMEPAGE = "http://hdl.handle.net/11356/1434"
28
+
29
+ _LICENSE = "Creative Commons - Attribution-NonCommercial-ShareAlike 4.0 International (CC BY-NC-SA 4.0)"
30
+
31
+ _URLS = {
32
+ "ssj500k-en.tei": "https://www.clarin.si/repository/xmlui/bitstream/handle/11356/1434/ssj500k-en.TEI.zip"
33
+ }
34
+
35
+
36
+ XML_NAMESPACE = "{http://www.w3.org/XML/1998/namespace}"
37
+ IDX_ROOT_WORD = -1
38
+ IDX_NA_HEAD = -2
39
+ NA_TAG = "N/A"
40
+
41
+
42
+ def namespace(element):
43
+ # https://stackoverflow.com/a/12946675
44
+ m = re.match(r'\{.*\}', element.tag)
45
+ return m.group(0) if m else ''
46
+
47
+
48
+ def word_information(w_or_pc_el):
49
+ if w_or_pc_el.tag.endswith("pc"):
50
+ id_word = w_or_pc_el.attrib[f"{XML_NAMESPACE}id"]
51
+ form = w_or_pc_el.text.strip()
52
+ lemma = w_or_pc_el.text.strip()
53
+ msd = w_or_pc_el.attrib[f"msd"]
54
+ else: # word - w
55
+ id_word = w_or_pc_el.attrib[f"{XML_NAMESPACE}id"]
56
+ form = w_or_pc_el.text.strip()
57
+ lemma = w_or_pc_el.attrib["lemma"]
58
+ msd = w_or_pc_el.attrib[f"msd"]
59
+
60
+ return id_word, form, lemma, msd
61
+
62
+
63
+ class Ssj500k(datasets.GeneratorBasedBuilder):
64
+ """ssj500k is a partially annotated training corpus for multiple syntactic and semantic tasks."""
65
+
66
+ VERSION = datasets.Version("2.3.0")
67
+
68
+ BUILDER_CONFIGS = [
69
+ datasets.BuilderConfig(name="all_data", version=VERSION,
70
+ description="The entire dataset with all annotations, in some cases partially missing."),
71
+ datasets.BuilderConfig(name="named_entity_recognition", version=VERSION,
72
+ description="The data subset with annotated named entities."),
73
+ datasets.BuilderConfig(name="dependency_parsing_ud", version=VERSION,
74
+ description="The data subset with annotated dependencies (UD schema)."),
75
+ datasets.BuilderConfig(name="dependency_parsing_jos", version=VERSION,
76
+ description="The data subset with annotated dependencies (JOS schema)."),
77
+ datasets.BuilderConfig(name="semantic_role_labeling", version=VERSION,
78
+ description="The data subset with annotated semantic roles."),
79
+ datasets.BuilderConfig(name="multiword_expressions", version=VERSION,
80
+ description="The data subset with annotated named entities.")
81
+ ]
82
+
83
+ DEFAULT_CONFIG_NAME = "all_data"
84
+
85
+ def _info(self):
86
+ features_dict = {
87
+ "id_doc": datasets.Value("string"),
88
+ "idx_par": datasets.Value("int32"),
89
+ "idx_sent": datasets.Value("int32"),
90
+ "id_words": datasets.Sequence(datasets.Value("string")),
91
+ "words": datasets.Sequence(datasets.Value("string")),
92
+ "lemmas": datasets.Sequence(datasets.Value("string")),
93
+ "msds": datasets.Sequence(datasets.Value("string"))
94
+ }
95
+
96
+ ret_all_data = self.config.name == "all_data"
97
+ if ret_all_data:
98
+ features_dict.update({
99
+ "has_ne_ann": datasets.Value("bool"), "has_ud_dep_ann": datasets.Value("bool"),
100
+ "has_jos_dep_ann": datasets.Value("bool"), "has_srl_ann": datasets.Value("bool"),
101
+ "has_mwe_ann": datasets.Value("bool")
102
+ })
103
+
104
+ if ret_all_data or self.config.name == "named_entity_recognition":
105
+ features_dict["ne_tags"] = datasets.Sequence(datasets.Value("string"))
106
+
107
+ if ret_all_data or self.config.name == "dependency_parsing_ud":
108
+ features_dict.update({
109
+ "ud_dep_head": datasets.Sequence(datasets.Value("int32")),
110
+ "ud_dep_rel": datasets.Sequence(datasets.Value("string"))
111
+ })
112
+
113
+ if ret_all_data or self.config.name == "dependency_parsing_jos":
114
+ features_dict.update({
115
+ "jos_dep_head": datasets.Sequence(datasets.Value("int32")),
116
+ "jos_dep_rel": datasets.Sequence(datasets.Value("string"))
117
+ })
118
+
119
+ if ret_all_data or self.config.name == "semantic_role_labeling":
120
+ features_dict.update({
121
+ "srl_info": [{
122
+ "idx_arg": datasets.Value("uint32"),
123
+ "idx_head": datasets.Value("uint32"),
124
+ "role": datasets.Value("string")
125
+ }]
126
+ })
127
+
128
+ if ret_all_data or self.config.name == "multiword_expressions":
129
+ features_dict["mwe_info"] = [{
130
+ "type": datasets.Value("string"),
131
+ "word_indices": datasets.Sequence(datasets.Value("uint32"))
132
+ }]
133
+
134
+ features = datasets.Features(features_dict)
135
+ return datasets.DatasetInfo(
136
+ description=_DESCRIPTION,
137
+ features=features,
138
+ homepage=_HOMEPAGE,
139
+ license=_LICENSE,
140
+ citation=_CITATION,
141
+ )
142
+
143
+ def _split_generators(self, dl_manager):
144
+ urls = _URLS["ssj500k-en.tei"]
145
+ data_dir = dl_manager.download_and_extract(urls)
146
+ return [
147
+ datasets.SplitGenerator(
148
+ name=datasets.Split.TRAIN,
149
+ gen_kwargs={"file_path": os.path.join(data_dir, "ssj500k-en.TEI", "ssj500k-en.body.xml")}
150
+ )
151
+ ]
152
+
153
+ def _generate_examples(self, file_path):
154
+ ret_all_data = self.config.name == "all_data"
155
+ ret_ne_only = self.config.name == "named_entity_recognition"
156
+ ret_ud_dep_only = self.config.name == "dependency_parsing_ud"
157
+ ret_jos_dep_only = self.config.name == "dependency_parsing_jos"
158
+ ret_srl_only = self.config.name == "semantic_role_labeling"
159
+ ret_mwe_only = self.config.name == "multiword_expressions"
160
+
161
+ curr_doc = ET.parse(file_path)
162
+ root = curr_doc.getroot()
163
+ NAMESPACE = namespace(root)
164
+
165
+ idx_example = 0
166
+ for idx_doc, curr_doc in enumerate(root.iterfind(f"{NAMESPACE}div")):
167
+ id_doc = curr_doc.attrib[f"{XML_NAMESPACE}id"]
168
+ doc_metadata = {}
169
+ metadata_el = curr_doc.find(f"{NAMESPACE}bibl")
170
+ if metadata_el is not None:
171
+ for child in metadata_el:
172
+ if child.tag.endswith("term"):
173
+ if child.attrib[f"{XML_NAMESPACE}lang"] != "en":
174
+ continue
175
+
176
+ parts = child.text.strip().split(" / ")
177
+ attr_name = parts[0]
178
+ attr_value = " / ".join(parts[1:])
179
+
180
+ elif child.tag.endswith("note"):
181
+ attr_name = child.attrib["type"]
182
+ attr_value = child.text.strip()
183
+ else:
184
+ attr_name = child.tag[len(NAMESPACE):]
185
+ attr_value = child.text.strip()
186
+
187
+ doc_metadata[attr_name] = attr_value
188
+
189
+ # IMPORTANT: This is a hack, because it is not clear which documents are annotated with NEs
190
+ # The numbers of annotated docs are obtained from the paper provided in `_CITATION` (Table 1)
191
+ has_ne = idx_doc < 498
192
+ has_mwe = idx_doc < 754
193
+ has_srl = idx_doc < 228
194
+
195
+ for idx_par, curr_par in enumerate(curr_doc.iterfind(f"{NAMESPACE}p")):
196
+ for idx_sent, curr_sent in enumerate(curr_par.iterfind(f"{NAMESPACE}s")):
197
+ id2position = {}
198
+ id_words, words, lemmas, msds = [], [], [], []
199
+
200
+ # Optional (partial) annotations
201
+ named_ents = []
202
+ has_ud_dep, ud_dep_heads, ud_dep_rels = False, [], []
203
+ has_jos_dep, jos_dep_heads, jos_dep_rels = False, [], []
204
+ srl_info = []
205
+ mwe_info = []
206
+
207
+ # Note: assuming that all words of a sentence are observed before processing the optional annotations
208
+ # i.e., that <w> and <pc> elements come first, then the optional <linkGroup> annotations
209
+ for curr_el in curr_sent:
210
+ # Words
211
+ if curr_el.tag.endswith(("w", "pc")):
212
+ id_word, word, lemma, msd = word_information(curr_el)
213
+
214
+ id2position[id_word] = len(id2position)
215
+ id_words.append(id_word)
216
+ words.append(word)
217
+ lemmas.append(lemma)
218
+ msds.append(msd)
219
+ named_ents.append("O")
220
+
221
+ # Named entities
222
+ elif curr_el.tag.endswith("seg"):
223
+ has_ne = True
224
+ ne_type = curr_el.attrib["subtype"] # {"per", "loc", "org", "misc", "deriv-per"}
225
+ if ne_type.startswith("deriv-"):
226
+ ne_type = ne_type[len("deriv-"):]
227
+ ne_type = ne_type.upper()
228
+
229
+ num_ne_tokens = 0
230
+ for curr_child in curr_el:
231
+ num_ne_tokens += 1
232
+ id_word, word, lemma, msd = word_information(curr_child)
233
+
234
+ id2position[id_word] = len(id2position)
235
+ id_words.append(id_word)
236
+ words.append(word)
237
+ lemmas.append(lemma)
238
+ msds.append(msd)
239
+
240
+ assert num_ne_tokens > 0
241
+ nes = [f"B-{ne_type.upper()}"] + [f"I-{ne_type.upper()}" for _ in range(num_ne_tokens - 1)]
242
+ named_ents.extend(nes)
243
+
244
+ elif curr_el.tag.endswith("linkGrp"):
245
+ # UD dependencies
246
+ if curr_el.attrib["type"] == "UD-SYN":
247
+ has_ud_dep = True
248
+ ud_dep_heads = [None for _ in range(len(words))]
249
+ ud_dep_rels = [None for _ in range(len(words))]
250
+
251
+ for link in curr_el:
252
+ dep_rel = link.attrib["ana"].split(":")[-1]
253
+ id_head_word, id_dependant = tuple(map(
254
+ lambda _t_id: _t_id[1:] if _t_id.startswith("#") else _t_id,
255
+ link.attrib["target"].split(" ")
256
+ ))
257
+
258
+ idx_head_word = id2position[id_head_word] if dep_rel != "root" else IDX_ROOT_WORD
259
+ idx_dep_word = id2position[id_dependant]
260
+
261
+ ud_dep_heads[idx_dep_word] = idx_head_word
262
+ ud_dep_rels[idx_dep_word] = dep_rel
263
+
264
+ # JOS dependencies
265
+ elif curr_el.attrib["type"] == "JOS-SYN":
266
+ has_jos_dep = True
267
+ jos_dep_heads = [None for _ in range(len(words))]
268
+ jos_dep_rels = [None for _ in range(len(words))]
269
+
270
+ for link in curr_el:
271
+ dep_rel = link.attrib["ana"].split(":")[-1]
272
+ id_head_word, id_dependant = tuple(map(
273
+ lambda _t_id: _t_id[1:] if _t_id.startswith("#") else _t_id,
274
+ link.attrib["target"].split(" ")
275
+ ))
276
+
277
+ idx_head_word = id2position[id_head_word] if dep_rel != "Root" else IDX_ROOT_WORD
278
+ idx_dep_word = id2position[id_dependant]
279
+
280
+ jos_dep_heads[idx_dep_word] = idx_head_word
281
+ jos_dep_rels[idx_dep_word] = dep_rel
282
+
283
+ # Semantic role labels
284
+ elif curr_el.attrib["type"] == "SRL":
285
+ for link in curr_el:
286
+ sem_role = link.attrib["ana"].split(":")[-1]
287
+ id_head_word, id_arg_word = tuple(map(
288
+ lambda _t_id: _t_id[1:] if _t_id.startswith("#") else _t_id,
289
+ link.attrib["target"].split(" ")
290
+ ))
291
+ idx_head_word = id2position[id_head_word]
292
+ idx_arg_word = id2position[id_arg_word]
293
+
294
+ srl_info.append({
295
+ "idx_arg": idx_arg_word,
296
+ "idx_head": idx_head_word,
297
+ "role": sem_role
298
+ })
299
+
300
+ # Multi-word expressions
301
+ elif curr_el.attrib["type"] == "MWE":
302
+ has_mwe = True
303
+ # Follow the KOMET/G-KOMET format, i.e. list of {"type": ..., "word_indices": ...}
304
+ for link in curr_el:
305
+ mwe_type = link.attrib["ana"].split(":")[-1]
306
+ involved_words = list(map(
307
+ lambda _t_id: _t_id[1:] if _t_id.startswith("#") else _t_id,
308
+ link.attrib["target"].split(" "))
309
+ )
310
+ word_indices = [id2position[_curr_tok] for _curr_tok in involved_words]
311
+ mwe_info.append({"type": mwe_type, "word_indices": word_indices})
312
+
313
+ # Specified config expects only annotated instances, but there are none for the current instance
314
+ if (ret_ne_only and not has_ne) or (ret_ud_dep_only and not has_ud_dep) or \
315
+ (ret_jos_dep_only and not has_jos_dep) or (ret_srl_only and not has_srl) or \
316
+ (ret_mwe_only and not has_mwe):
317
+ continue
318
+
319
+ instance_dict = {
320
+ "id_doc": id_doc,
321
+ "idx_par": idx_par,
322
+ "idx_sent": idx_sent,
323
+ "id_words": id_words,
324
+ "words": words,
325
+ "lemmas": lemmas,
326
+ "msds": msds
327
+ }
328
+
329
+ if ret_ne_only or ret_all_data:
330
+ if not has_ne:
331
+ named_ents = [NA_TAG for _ in range(len(words))]
332
+
333
+ instance_dict["ne_tags"] = named_ents
334
+
335
+ if ret_ud_dep_only or ret_all_data:
336
+ if not has_ud_dep:
337
+ ud_dep_heads = [IDX_NA_HEAD for _ in range(len(words))]
338
+ ud_dep_rels = [NA_TAG for _ in range(len(words))]
339
+
340
+ instance_dict["ud_dep_head"] = ud_dep_heads
341
+ instance_dict["ud_dep_rel"] = ud_dep_rels
342
+
343
+ if ret_jos_dep_only or ret_all_data:
344
+ if not has_jos_dep:
345
+ jos_dep_heads = [IDX_NA_HEAD for _ in range(len(words))]
346
+ jos_dep_rels = [NA_TAG for _ in range(len(words))]
347
+
348
+ instance_dict["jos_dep_head"] = jos_dep_heads
349
+ instance_dict["jos_dep_rel"] = jos_dep_rels
350
+
351
+ if ret_srl_only or ret_all_data:
352
+ instance_dict["srl_info"] = srl_info
353
+
354
+ if ret_mwe_only or ret_all_data:
355
+ instance_dict["mwe_info"] = mwe_info
356
+
357
+ # When all data is returned, some instances are unannotated or partially annotated, mark instances with flags
358
+ if ret_all_data:
359
+ instance_dict.update({
360
+ "has_ne_ann": has_ne, "has_ud_dep_ann": has_ud_dep, "has_jos_dep_ann": has_jos_dep,
361
+ "has_srl_ann": has_srl, "has_mwe_ann": has_mwe
362
+ })
363
+
364
+ yield idx_example, instance_dict
365
+ idx_example += 1