albertvillanova HF staff commited on
Commit
f12a081
1 Parent(s): 27439d7

Delete loading script

Browse files
Files changed (1) hide show
  1. turk.py +0 -117
turk.py DELETED
@@ -1,117 +0,0 @@
1
- # coding=utf-8
2
- # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
3
- #
4
- # Licensed under the Apache License, Version 2.0 (the "License");
5
- # you may not use this file except in compliance with the License.
6
- # You may obtain a copy of the License at
7
- #
8
- # http://www.apache.org/licenses/LICENSE-2.0
9
- #
10
- # Unless required by applicable law or agreed to in writing, software
11
- # distributed under the License is distributed on an "AS IS" BASIS,
12
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- # See the License for the specific language governing permissions and
14
- # limitations under the License.
15
- """TURKCorpus: a dataset for sentence simplification evaluation"""
16
-
17
-
18
- import datasets
19
-
20
-
21
- _CITATION = """\
22
- @article{Xu-EtAl:2016:TACL,
23
- author = {Wei Xu and Courtney Napoles and Ellie Pavlick and Quanze Chen and Chris Callison-Burch},
24
- title = {Optimizing Statistical Machine Translation for Text Simplification},
25
- journal = {Transactions of the Association for Computational Linguistics},
26
- volume = {4},
27
- year = {2016},
28
- url = {https://cocoxu.github.io/publications/tacl2016-smt-simplification.pdf},
29
- pages = {401--415}
30
- }
31
- }
32
- """
33
-
34
- _DESCRIPTION = """\
35
- TURKCorpus is a dataset for evaluating sentence simplification systems that focus on lexical paraphrasing,
36
- as described in "Optimizing Statistical Machine Translation for Text Simplification". The corpus is composed of 2000 validation and 359 test original sentences that were each simplified 8 times by different annotators.
37
- """
38
-
39
- _HOMEPAGE = "https://github.com/cocoxu/simplification"
40
-
41
- _LICENSE = "GNU General Public License v3.0"
42
-
43
- _URL_LIST = [
44
- (
45
- "test.8turkers.tok.norm",
46
- "https://raw.githubusercontent.com/cocoxu/simplification/master/data/turkcorpus/test.8turkers.tok.norm",
47
- ),
48
- (
49
- "tune.8turkers.tok.norm",
50
- "https://raw.githubusercontent.com/cocoxu/simplification/master/data/turkcorpus/tune.8turkers.tok.norm",
51
- ),
52
- ]
53
- _URL_LIST += [
54
- (
55
- f"{spl}.8turkers.tok.turk.{i}",
56
- f"https://raw.githubusercontent.com/cocoxu/simplification/master/data/turkcorpus/{spl}.8turkers.tok.turk.{i}",
57
- )
58
- for spl in ["tune", "test"]
59
- for i in range(8)
60
- ]
61
-
62
- _URLs = dict(_URL_LIST)
63
-
64
-
65
- class Turk(datasets.GeneratorBasedBuilder):
66
-
67
- VERSION = datasets.Version("1.0.0")
68
-
69
- BUILDER_CONFIGS = [
70
- datasets.BuilderConfig(
71
- name="simplification",
72
- version=VERSION,
73
- description="A set of original sentences aligned with 8 possible simplifications for each.",
74
- )
75
- ]
76
-
77
- def _info(self):
78
- features = datasets.Features(
79
- {
80
- "original": datasets.Value("string"),
81
- "simplifications": datasets.Sequence(datasets.Value("string")),
82
- }
83
- )
84
- return datasets.DatasetInfo(
85
- description=_DESCRIPTION,
86
- features=features,
87
- supervised_keys=None,
88
- homepage=_HOMEPAGE,
89
- license=_LICENSE,
90
- citation=_CITATION,
91
- )
92
-
93
- def _split_generators(self, dl_manager):
94
- data_dir = dl_manager.download_and_extract(_URLs)
95
- return [
96
- datasets.SplitGenerator(
97
- name=datasets.Split.VALIDATION,
98
- gen_kwargs={
99
- "filepaths": data_dir,
100
- "split": "valid",
101
- },
102
- ),
103
- datasets.SplitGenerator(
104
- name=datasets.Split.TEST,
105
- gen_kwargs={"filepaths": data_dir, "split": "test"},
106
- ),
107
- ]
108
-
109
- def _generate_examples(self, filepaths, split):
110
- """Yields examples."""
111
- if split == "valid":
112
- split = "tune"
113
- files = [open(filepaths[f"{split}.8turkers.tok.norm"], encoding="utf-8")] + [
114
- open(filepaths[f"{split}.8turkers.tok.turk.{i}"], encoding="utf-8") for i in range(8)
115
- ]
116
- for id_, lines in enumerate(zip(*files)):
117
- yield id_, {"original": lines[0].strip(), "simplifications": [line.strip() for line in lines[1:]]}