saattrupdan commited on
Commit
0c3c2dd
1 Parent(s): ff76791

feat: Add data, script and readme

Browse files
Files changed (10) hide show
  1. .gitattributes +6 -54
  2. .gitignore +1 -0
  3. README.md +147 -1
  4. data/da.jsonl +3 -0
  5. data/fo.jsonl +3 -0
  6. data/is.jsonl +3 -0
  7. data/nb.jsonl +3 -0
  8. data/nn.jsonl +3 -0
  9. data/sv.jsonl +3 -0
  10. scandi_wiki.py +126 -0
.gitattributes CHANGED
@@ -1,54 +1,6 @@
1
- *.7z filter=lfs diff=lfs merge=lfs -text
2
- *.arrow filter=lfs diff=lfs merge=lfs -text
3
- *.bin filter=lfs diff=lfs merge=lfs -text
4
- *.bz2 filter=lfs diff=lfs merge=lfs -text
5
- *.ckpt filter=lfs diff=lfs merge=lfs -text
6
- *.ftz filter=lfs diff=lfs merge=lfs -text
7
- *.gz filter=lfs diff=lfs merge=lfs -text
8
- *.h5 filter=lfs diff=lfs merge=lfs -text
9
- *.joblib filter=lfs diff=lfs merge=lfs -text
10
- *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
- *.lz4 filter=lfs diff=lfs merge=lfs -text
12
- *.mlmodel filter=lfs diff=lfs merge=lfs -text
13
- *.model filter=lfs diff=lfs merge=lfs -text
14
- *.msgpack filter=lfs diff=lfs merge=lfs -text
15
- *.npy filter=lfs diff=lfs merge=lfs -text
16
- *.npz filter=lfs diff=lfs merge=lfs -text
17
- *.onnx filter=lfs diff=lfs merge=lfs -text
18
- *.ot filter=lfs diff=lfs merge=lfs -text
19
- *.parquet filter=lfs diff=lfs merge=lfs -text
20
- *.pb filter=lfs diff=lfs merge=lfs -text
21
- *.pickle filter=lfs diff=lfs merge=lfs -text
22
- *.pkl filter=lfs diff=lfs merge=lfs -text
23
- *.pt filter=lfs diff=lfs merge=lfs -text
24
- *.pth filter=lfs diff=lfs merge=lfs -text
25
- *.rar filter=lfs diff=lfs merge=lfs -text
26
- *.safetensors filter=lfs diff=lfs merge=lfs -text
27
- saved_model/**/* filter=lfs diff=lfs merge=lfs -text
28
- *.tar.* filter=lfs diff=lfs merge=lfs -text
29
- *.tflite filter=lfs diff=lfs merge=lfs -text
30
- *.tgz filter=lfs diff=lfs merge=lfs -text
31
- *.wasm filter=lfs diff=lfs merge=lfs -text
32
- *.xz filter=lfs diff=lfs merge=lfs -text
33
- *.zip filter=lfs diff=lfs merge=lfs -text
34
- *.zst filter=lfs diff=lfs merge=lfs -text
35
- *tfevents* filter=lfs diff=lfs merge=lfs -text
36
- # Audio files - uncompressed
37
- *.pcm filter=lfs diff=lfs merge=lfs -text
38
- *.sam filter=lfs diff=lfs merge=lfs -text
39
- *.raw filter=lfs diff=lfs merge=lfs -text
40
- # Audio files - compressed
41
- *.aac filter=lfs diff=lfs merge=lfs -text
42
- *.flac filter=lfs diff=lfs merge=lfs -text
43
- *.mp3 filter=lfs diff=lfs merge=lfs -text
44
- *.ogg filter=lfs diff=lfs merge=lfs -text
45
- *.wav filter=lfs diff=lfs merge=lfs -text
46
- # Image files - uncompressed
47
- *.bmp filter=lfs diff=lfs merge=lfs -text
48
- *.gif filter=lfs diff=lfs merge=lfs -text
49
- *.png filter=lfs diff=lfs merge=lfs -text
50
- *.tiff filter=lfs diff=lfs merge=lfs -text
51
- # Image files - compressed
52
- *.jpg filter=lfs diff=lfs merge=lfs -text
53
- *.jpeg filter=lfs diff=lfs merge=lfs -text
54
- *.webp filter=lfs diff=lfs merge=lfs -text
 
1
+ data/nn.jsonl filter=lfs diff=lfs merge=lfs -text
2
+ data/sv.jsonl filter=lfs diff=lfs merge=lfs -text
3
+ data/da.jsonl filter=lfs diff=lfs merge=lfs -text
4
+ data/fo.jsonl filter=lfs diff=lfs merge=lfs -text
5
+ data/is.jsonl filter=lfs diff=lfs merge=lfs -text
6
+ data/nb.jsonl filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
.gitignore ADDED
@@ -0,0 +1 @@
 
 
1
+ .venv
README.md CHANGED
@@ -1,3 +1,149 @@
1
  ---
2
- license: cc-by-sa-4.0
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3
  ---
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  ---
2
+ pretty_name: ScandiQA
3
+ language:
4
+ - da
5
+ - sv
6
+ - no
7
+ license:
8
+ - cc-by-sa-4.0
9
+ multilinguality:
10
+ - multilingual
11
+ size_categories:
12
+ - 1K<n<10K
13
+ source_datasets:
14
+ - mkqa
15
+ - natural_questions
16
+ task_categories:
17
+ - question-answering
18
+ task_ids:
19
+ - extractive-qa
20
  ---
21
+
22
+ # Dataset Card for ScandiQA
23
+
24
+ ## Dataset Description
25
+
26
+ - **Repository:** <https://github.com/alexandrainst/scandi-qa>
27
+ - **Point of Contact:** [Dan Saattrup Nielsen](mailto:[email protected])
28
+ - **Size of downloaded dataset files:** 69 MB
29
+ - **Size of the generated dataset:** 67 MB
30
+ - **Total amount of disk used:** 136 MB
31
+
32
+ ### Dataset Summary
33
+
34
+ ScandiQA is a dataset of questions and answers in the Danish, Norwegian, and Swedish
35
+ languages. All samples come from the Natural Questions (NQ) dataset, which is a large
36
+ question answering dataset from Google searches. The Scandinavian questions and answers
37
+ come from the MKQA dataset, where 10,000 NQ samples were manually translated into,
38
+ among others, Danish, Norwegian, and Swedish. However, this did not include a
39
+ translated context, hindering the training of extractive question answering models.
40
+
41
+ We merged the NQ dataset with the MKQA dataset, and extracted contexts as either "long
42
+ answers" from the NQ dataset, being the paragraph in which the answer was found, or
43
+ otherwise we extract the context by locating the paragraphs which have the largest
44
+ cosine similarity to the question, and which contains the desired answer.
45
+
46
+ Further, many answers in the MKQA dataset were "language normalised": for instance, all
47
+ date answers were converted to the format "YYYY-MM-DD", meaning that in most cases
48
+ these answers are not appearing in any paragraphs. We solve this by extending the MKQA
49
+ answers with plausible "answer candidates", being slight perturbations or translations
50
+ of the answer.
51
+
52
+ With the contexts extracted, we translated these to Danish, Swedish and Norwegian using
53
+ the [DeepL translation service](https://www.deepl.com/pro-api?cta=header-pro-api) for
54
+ Danish and Swedish, and the [Google Translation
55
+ service](https://cloud.google.com/translate/docs/reference/rest/) for Norwegian. After
56
+ translation we ensured that the Scandinavian answers do indeed occur in the translated
57
+ contexts.
58
+
59
+ As we are filtering the MKQA samples at both the "merging stage" and the "translation
60
+ stage", we are not able to fully convert the 10,000 samples to the Scandinavian
61
+ languages, and instead get roughly 8,000 samples per language. These have further been
62
+ split into a training, validation and test split, with the latter two containing
63
+ roughly 750 samples. The splits have been created in such a way that the proportion of
64
+ samples without an answer is roughly the same in each split.
65
+
66
+
67
+ ### Supported Tasks and Leaderboards
68
+
69
+ Training machine learning models for extractive question answering is the intended task
70
+ for this dataset. No leaderboard is active at this point.
71
+
72
+
73
+ ### Languages
74
+
75
+ The dataset is available in Danish (`da`), Swedish (`sv`) and Norwegian (`no`).
76
+
77
+
78
+ ## Dataset Structure
79
+
80
+ ### Data Instances
81
+
82
+ - **Size of downloaded dataset files:** 69 MB
83
+ - **Size of the generated dataset:** 67 MB
84
+ - **Total amount of disk used:** 136 MB
85
+
86
+ An example from the `train` split of the `da` subset looks as follows.
87
+ ```
88
+ {
89
+ 'example_id': 123,
90
+ 'question': 'Er dette en test?',
91
+ 'answer': 'Dette er en test',
92
+ 'answer_start': 0,
93
+ 'context': 'Dette er en testkontekst.',
94
+ 'answer_en': 'This is a test',
95
+ 'answer_start_en': 0,
96
+ 'context_en': "This is a test",
97
+ 'title_en': 'Train test'
98
+ }
99
+ ```
100
+
101
+ ### Data Fields
102
+
103
+ The data fields are the same among all splits.
104
+
105
+ - `example_id`: an `int64` feature.
106
+ - `question`: a `string` feature.
107
+ - `answer`: a `string` feature.
108
+ - `answer_start`: an `int64` feature.
109
+ - `context`: a `string` feature.
110
+ - `answer_en`: a `string` feature.
111
+ - `answer_start_en`: an `int64` feature.
112
+ - `context_en`: a `string` feature.
113
+ - `title_en`: a `string` feature.
114
+
115
+ ### Data Splits
116
+
117
+ | name | train | validation | test |
118
+ |----------|------:|-----------:|-----:|
119
+ | da | 6311 | 749 | 750 |
120
+ | sv | 6299 | 750 | 749 |
121
+ | no | 6314 | 749 | 750 |
122
+
123
+
124
+ ## Dataset Creation
125
+
126
+ ### Curation Rationale
127
+
128
+ The Scandinavian languages does not have any gold standard question answering dataset.
129
+ This is not quite gold standard, but the fact both the questions and answers are all
130
+ manually translated, it is a solid silver standard dataset.
131
+
132
+ ### Source Data
133
+
134
+ The original data was collected from the [MKQA](https://github.com/apple/ml-mkqa/) and
135
+ [Natural Questions](https://ai.google.com/research/NaturalQuestions) datasets from
136
+ Apple and Google, respectively.
137
+
138
+
139
+ ## Additional Information
140
+
141
+ ### Dataset Curators
142
+
143
+ [Dan Saattrup Nielsen](https://saattrupdan.github.io/) from the [The Alexandra
144
+ Institute](https://alexandra.dk/) curated this dataset.
145
+
146
+ ### Licensing Information
147
+
148
+ The dataset is licensed under the [CC BY-SA 4.0
149
+ license](https://creativecommons.org/licenses/by-sa/4.0/).
data/da.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e524f30d6b2528b2fad64c8cb5800fbc4860c93731288893a645c8731784b4a5
3
+ size 586791170
data/fo.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d199629c089a9a3b5df9cc64427737c4496897b6717bde7b3eea7b588ee0c8ed
3
+ size 18919558
data/is.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:693254d4d0248648f6c985781e98f3a4d3f3cd0a1f513987a309cc65eccc6b50
3
+ size 114716852
data/nb.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a9b73ea2f7f06682c6b2a631f0d3b2f3dc609ebe8b60352bd4e935f5ef15312c
3
+ size 1094829731
data/nn.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:65040a3d175e5717bae504441fbad6fe1870119c5cc93a1ab12727b07f5eb02b
3
+ size 255058352
data/sv.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9a57244428c760f9b07d5ba2560d4d2f0d65f8c3a517d879e3f6c81c3550ba5b
3
+ size 2424831491
scandi_wiki.py ADDED
@@ -0,0 +1,126 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2022 The HuggingFace Datasets Authors and Dan Saattrup Nielsen.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ """Python build script for the ScandiWiki dataset."""
15
+
16
+
17
+ import json
18
+ from pathlib import Path
19
+ from typing import List
20
+
21
+ import datasets
22
+ from datasets import Version
23
+ from datasets.builder import BuilderConfig, GeneratorBasedBuilder
24
+ from datasets.download import DownloadManager
25
+ from datasets.features import Features, Value
26
+ from datasets.info import DatasetInfo
27
+ from datasets.splits import SplitGenerator
28
+
29
+ _DESCRIPTION = """
30
+ ScandiWiki is a parsed and deduplicated version of the Danish, Norwegian Bokmål,
31
+ Norwegian Nynorsk, Swedish, Icelandic and Faroese Wikipedia corpora, as of January
32
+ 2023.
33
+ """
34
+
35
+ _LICENSE = "CC BY-SA 4.0"
36
+ _URLS = {
37
+ "da": "https://huggingface.co/datasets/alexandrainst/scandi-wiki/resolve/main/data/da.jsonl",
38
+ "sv": "https://huggingface.co/datasets/alexandrainst/scandi-wiki/resolve/main/data/sv.jsonl",
39
+ "nb": "https://huggingface.co/datasets/alexandrainst/scandi-wiki/resolve/main/data/nb.jsonl",
40
+ "nn": "https://huggingface.co/datasets/alexandrainst/scandi-wiki/resolve/main/data/nn.jsonl",
41
+ "is": "https://huggingface.co/datasets/alexandrainst/scandi-wiki/resolve/main/data/is.jsonl",
42
+ "fo": "https://huggingface.co/datasets/alexandrainst/scandi-wiki/resolve/main/data/fo.jsonl",
43
+ }
44
+
45
+ # _CITATION = """
46
+ # @InProceedings{huggingface:dataset,
47
+ # title = {ScandiWiki: A Scandinavian Wikipedia Dump},
48
+ # author={Dan Saattrup Nielsen},
49
+ # year={2022}
50
+ # }
51
+ # """
52
+
53
+
54
+ class ScandiWiki(GeneratorBasedBuilder):
55
+ """Scandinavian part of Wikipedia."""
56
+
57
+ VERSION = Version("1.0.0")
58
+
59
+ BUILDER_CONFIGS = [
60
+ BuilderConfig(
61
+ name="da",
62
+ version=VERSION,
63
+ description="The deduplicated Danish part of Wikipedia.",
64
+ ),
65
+ BuilderConfig(
66
+ name="sv",
67
+ version=VERSION,
68
+ description="The deduplicated Swedish part of Wikipedia.",
69
+ ),
70
+ BuilderConfig(
71
+ name="nb",
72
+ version=VERSION,
73
+ description="The deduplicated Norwegian Bokmål part of Wikipedia.",
74
+ ),
75
+ BuilderConfig(
76
+ name="nn",
77
+ version=VERSION,
78
+ description="The deduplicated Norwegian Nynorsk part of Wikipedia.",
79
+ ),
80
+ BuilderConfig(
81
+ name="is",
82
+ version=VERSION,
83
+ description="The deduplicated Icelandic part of Wikipedia.",
84
+ ),
85
+ BuilderConfig(
86
+ name="fo",
87
+ version=VERSION,
88
+ description="The deduplicated Faroese part of Wikipedia.",
89
+ ),
90
+ ]
91
+
92
+ def _info(self) -> DatasetInfo:
93
+ features = Features(
94
+ {
95
+ "id": Value("string"),
96
+ "url": Value("string"),
97
+ "title": Value("string"),
98
+ "text": Value("string"),
99
+ }
100
+ )
101
+ return DatasetInfo(
102
+ description=_DESCRIPTION,
103
+ features=features,
104
+ # homepage=_HOMEPAGE,
105
+ license=_LICENSE,
106
+ # citation=_CITATION,
107
+ )
108
+
109
+ def _split_generators(self, dl_manager: DownloadManager) -> List[SplitGenerator]:
110
+ url = _URLS[self.config.name]
111
+ downloaded_file = dl_manager.download(url)
112
+ return [
113
+ SplitGenerator(
114
+ name=datasets.Split.TRAIN,
115
+ gen_kwargs=dict(
116
+ filepath=downloaded_file,
117
+ split="train",
118
+ ),
119
+ ),
120
+ ]
121
+
122
+ def _generate_examples(self, filepath: str, split):
123
+ with Path(filepath).open(encoding="utf-8") as f:
124
+ for key, row in enumerate(f):
125
+ data = json.loads(row)
126
+ yield key, data