Datasets:

ArXiv:
License:
holylovenia commited on
Commit
966b02e
1 Parent(s): 9780411

Upload mkqa.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. mkqa.py +227 -0
mkqa.py ADDED
@@ -0,0 +1,227 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The HuggingFace Datasets Authors and the current dataset script contributor.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ import json
17
+ from pathlib import Path
18
+ from typing import Dict, List, Tuple
19
+
20
+ import datasets
21
+
22
+ from seacrowd.utils import schemas
23
+ from seacrowd.utils.configs import SEACrowdConfig
24
+ from seacrowd.utils.constants import Licenses, Tasks
25
+
26
+ _CITATION = """\
27
+ @article{longpre-etal-2021-mkqa,
28
+ title = "{MKQA}: A Linguistically Diverse Benchmark for Multilingual Open Domain Question Answering",
29
+ author = "Longpre, Shayne and
30
+ Lu, Yi and
31
+ Daiber, Joachim",
32
+ editor = "Roark, Brian and
33
+ Nenkova, Ani",
34
+ journal = "Transactions of the Association for Computational Linguistics",
35
+ volume = "9",
36
+ year = "2021",
37
+ address = "Cambridge, MA",
38
+ publisher = "MIT Press",
39
+ url = "https://aclanthology.org/2021.tacl-1.82",
40
+ doi = "10.1162/tacl_a_00433",
41
+ pages = "1389--1406",
42
+ }
43
+ """
44
+
45
+ _DATASETNAME = "mkqa"
46
+
47
+ _DESCRIPTION = """\
48
+ Multilingual Knowledge Questions and Answers (MKQA), an open-domain question answering evaluation set comprising 10k question-answer pairs aligned across 26 typologically diverse languages (260k question-answer pairs in total)
49
+ """
50
+
51
+ _HOMEPAGE = "https://github.com/apple/ml-mkqa"
52
+
53
+ _LICENSE = Licenses.CC_BY_SA_3_0.value
54
+
55
+ _LOCAL = False
56
+
57
+ _URLS = {
58
+ _DATASETNAME: "https://github.com/apple/ml-mkqa/raw/main/dataset/mkqa.jsonl.gz",
59
+ }
60
+
61
+ _SUPPORTED_TASKS = [Tasks.QUESTION_ANSWERING]
62
+
63
+ _SOURCE_VERSION = "1.0.0"
64
+
65
+ _SEACROWD_VERSION = "2024.06.20"
66
+
67
+ _LANGUAGES = [
68
+ "khm",
69
+ "zsm",
70
+ "tha",
71
+ "vie",
72
+ ] # follows the convention of 3-letter code as suggested since NusaCrowd.
73
+
74
+
75
+ class MKQADataset(datasets.GeneratorBasedBuilder):
76
+ """
77
+ MKQA, an open-domain question answering evaluation set comprising 10k question-answer pairs
78
+ aligned across 26 typologically diverse languages (260k question-answer pairs in total).
79
+ The goal of this dataset is to provide a challenging benchmark for question answering quality
80
+ across a wide set of languages.
81
+ """
82
+
83
+ SOURCE_VERSION = datasets.Version(_SOURCE_VERSION)
84
+ SEACROWD_VERSION = datasets.Version(_SEACROWD_VERSION)
85
+
86
+ _ANS_TYPES = [
87
+ "binary",
88
+ "date",
89
+ "entity",
90
+ "long_answer",
91
+ "number",
92
+ "number_with_unit",
93
+ "short_phrase",
94
+ "unanswerable",
95
+ ]
96
+
97
+ _SOURCE_LANGUAGES = [
98
+ "km",
99
+ "ms",
100
+ "th",
101
+ "vi",
102
+ # Filtered out:
103
+ # "ar", "da", "de", "en", "es", "fi", "fr", "he", "hu", "it", "ja", "ko",
104
+ # "nl", "no", "pl", "pt", "ru", "sv", "tr", "zh_cn", "zh_hk", "zh_tw",
105
+ ]
106
+
107
+ _LANG_3TO2 = {
108
+ "khm": "km",
109
+ "zsm": "ms",
110
+ "tha": "th",
111
+ "vie": "vi",
112
+ }
113
+
114
+ BUILDER_CONFIGS = [
115
+ *[
116
+ SEACrowdConfig(
117
+ name=f"{_DATASETNAME}_{subset_lang}{'_' if subset_lang else ''}source",
118
+ version=datasets.Version(_SOURCE_VERSION),
119
+ description=f"{_DATASETNAME} source schema",
120
+ schema="source",
121
+ subset_id=f"{_DATASETNAME}_{subset_lang}",
122
+ )
123
+ for subset_lang in ["", *_LANGUAGES]
124
+ ],
125
+ *[
126
+ SEACrowdConfig(
127
+ name=f"{_DATASETNAME}_{subset_lang}{'_' if subset_lang else ''}seacrowd_qa",
128
+ version=datasets.Version(_SEACROWD_VERSION),
129
+ description=f"{_DATASETNAME} SEACrowd schema",
130
+ schema="seacrowd_qa",
131
+ subset_id=f"{_DATASETNAME}_{subset_lang}",
132
+ )
133
+ for subset_lang in ["", *_LANGUAGES]
134
+ ],
135
+ ]
136
+
137
+ DEFAULT_CONFIG_NAME = f"{_DATASETNAME}_source"
138
+
139
+ def _info(self) -> datasets.DatasetInfo:
140
+ lang = self.config.subset_id.rsplit("_", 1)[-1]
141
+ lang = self._LANG_3TO2.get(lang, lang)
142
+
143
+ if self.config.schema == "source":
144
+ features = datasets.Features(
145
+ {
146
+ "query": datasets.Value("string"),
147
+ "answers": {
148
+ cur_lang: [
149
+ {
150
+ "type": datasets.ClassLabel(names=self._ANS_TYPES),
151
+ "entity": datasets.Value("string"),
152
+ "text": datasets.Value("string"),
153
+ "aliases": [datasets.Value("string")],
154
+ }
155
+ ]
156
+ for cur_lang in ([lang] if lang else self._SOURCE_LANGUAGES)
157
+ },
158
+ "queries": {cur_lang: datasets.Value("string") for cur_lang in ([lang] if lang else self._SOURCE_LANGUAGES)},
159
+ "example_id": datasets.Value("string"),
160
+ }
161
+ )
162
+
163
+ elif self.config.schema == "seacrowd_qa":
164
+ features = schemas.qa_features
165
+ features["meta"]["answer_entity"] = datasets.Sequence(datasets.Value("string"))
166
+ features["meta"]["answer_aliases"] = datasets.Sequence(datasets.Sequence(datasets.Value("string")))
167
+ features["meta"]["answer_type"] = datasets.Sequence(datasets.ClassLabel(names=self._ANS_TYPES))
168
+
169
+ else: # schema not found! should NOT reach here ...
170
+ raise NotImplementedError()
171
+
172
+ return datasets.DatasetInfo(
173
+ description=_DESCRIPTION,
174
+ features=features,
175
+ homepage=_HOMEPAGE,
176
+ license=_LICENSE,
177
+ citation=_CITATION,
178
+ )
179
+
180
+ def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]:
181
+ """Returns SplitGenerators."""
182
+ urls = _URLS[_DATASETNAME]
183
+ data_path = dl_manager.download_and_extract(urls)
184
+ return [
185
+ datasets.SplitGenerator(
186
+ name=datasets.Split.TRAIN,
187
+ gen_kwargs={"filepath": data_path},
188
+ ),
189
+ ]
190
+
191
+ def _generate_examples(self, filepath: Path) -> Tuple[int, Dict]:
192
+ """Yields examples as (key, example) tuples."""
193
+ lang = self.config.subset_id.rsplit("_", 1)[-1]
194
+ lang = self._LANG_3TO2.get(lang, lang)
195
+
196
+ datas = []
197
+ with open(filepath, "r", encoding="utf8") as ipt:
198
+ for cur in map(json.loads, ipt):
199
+ cur["example_id"] = str(cur["example_id"])
200
+ for key in ["answers", "queries"]:
201
+ cur[key] = {k: v for k, v in cur[key].items() if k in ([lang] if lang else self._SOURCE_LANGUAGES)}
202
+ datas.append(cur)
203
+
204
+ if self.config.schema == "source":
205
+ for cur in datas:
206
+ for anslist in cur["answers"].values():
207
+ for ans in anslist:
208
+ ans.setdefault("entity", "")
209
+ ans.setdefault("aliases", [])
210
+ yield int(cur["example_id"]), cur
211
+
212
+ elif self.config.schema == "seacrowd_qa":
213
+ for cur in datas:
214
+ for cur_lang in [lang] if lang else map(lambda k: self._LANG_3TO2.get(k, k), _LANGUAGES):
215
+ ret = {
216
+ "id": f'{cur["example_id"]}_{cur_lang}',
217
+ "question_id": cur["example_id"],
218
+ "document_id": "",
219
+ "question": cur["queries"][cur_lang],
220
+ "type": "open_domain",
221
+ "choices": [],
222
+ "context": "",
223
+ "answer": [ans.get("text", None) for ans in cur["answers"][cur_lang]],
224
+ "meta": {f"answer_{k}": [ans.get(k, None) for ans in cur["answers"][cur_lang]] for k in ["entity", "aliases", "type"]},
225
+ }
226
+ ret["meta"]["answer_aliases"] = list(map(lambda a: [] if a is None else a, ret["meta"]["answer_aliases"]))
227
+ yield ret["id"], ret