Datasets:

dibyaaaaax commited on
Commit
4845864
1 Parent(s): 9889a3c

Upload openkp.py

Browse files
Files changed (1) hide show
  1. openkp.py +154 -0
openkp.py ADDED
@@ -0,0 +1,154 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ import datasets
3
+
4
+ # _SPLIT = ['train', 'test', 'valid']
5
+ _CITATION = """\
6
+ @inproceedings{Xiong2019OpenDW,
7
+ title={Open Domain Web Keyphrase Extraction Beyond Language Modeling},
8
+ author={Lee Xiong and Chuan Hu and Chenyan Xiong and Daniel Fernando Campos and Arnold Overwijk},
9
+ booktitle={EMNLP},
10
+ year={2019}
11
+ }
12
+ """
13
+
14
+ _DESCRIPTION = """\
15
+
16
+ """
17
+
18
+ _HOMEPAGE = "https://github.com/microsoft/OpenKP"
19
+
20
+ # TODO: Add the licence for the dataset here if you can find it
21
+ _LICENSE = "MIT License"
22
+
23
+ # TODO: Add link to the official dataset URLs here
24
+
25
+ _URLS = {
26
+ "test": "test.jsonl",
27
+ "train": "train.jsonl",
28
+ "valid": "valid.jsonl"
29
+ }
30
+
31
+
32
+ # TODO: Name of the dataset usually match the script name with CamelCase instead of snake_case
33
+ class OpenKP(datasets.GeneratorBasedBuilder):
34
+ """TODO: Short description of my dataset."""
35
+
36
+ VERSION = datasets.Version("0.0.1")
37
+
38
+ BUILDER_CONFIGS = [
39
+ datasets.BuilderConfig(name="extraction", version=VERSION,
40
+ description="This part of my dataset covers extraction"),
41
+ datasets.BuilderConfig(name="generation", version=VERSION,
42
+ description="This part of my dataset covers generation"),
43
+ datasets.BuilderConfig(name="raw", version=VERSION, description="This part of my dataset covers the raw data"),
44
+ ]
45
+
46
+ DEFAULT_CONFIG_NAME = "extraction"
47
+
48
+ def _info(self):
49
+ if self.config.name == "extraction": # This is the name of the configuration selected in BUILDER_CONFIGS above
50
+ features = datasets.Features(
51
+ {
52
+ "id": datasets.Value("int64"),
53
+ "document": datasets.features.Sequence(datasets.Value("string")),
54
+ "doc_bio_tags": datasets.features.Sequence(datasets.Value("string"))
55
+
56
+ }
57
+ )
58
+ elif self.config.name == "generation":
59
+ features = datasets.Features(
60
+ {
61
+ "id": datasets.Value("int64"),
62
+ "document": datasets.features.Sequence(datasets.Value("string")),
63
+ "extractive_keyphrases": datasets.features.Sequence(datasets.Value("string")),
64
+ "abstractive_keyphrases": datasets.features.Sequence(datasets.Value("string"))
65
+
66
+ }
67
+ )
68
+ else:
69
+ features = datasets.Features(
70
+ {
71
+ "id": datasets.Value("int64"),
72
+ "document": datasets.features.Sequence(datasets.Value("string")),
73
+ "doc_bio_tags": datasets.features.Sequence(datasets.Value("string")),
74
+ "extractive_keyphrases": datasets.features.Sequence(datasets.Value("string")),
75
+ "abstractive_keyphrases": datasets.features.Sequence(datasets.Value("string")),
76
+ "other_metadata": datasets.features.Sequence(
77
+ {
78
+ "text": datasets.features.Sequence(datasets.Value("string")),
79
+ "bio_tags": datasets.features.Sequence(datasets.Value("string"))
80
+ }
81
+ )
82
+
83
+ }
84
+ )
85
+ return datasets.DatasetInfo(
86
+ # This is the description that will appear on the datasets page.
87
+ description=_DESCRIPTION,
88
+ # This defines the different columns of the dataset and their types
89
+ features=features,
90
+ homepage=_HOMEPAGE,
91
+ # License for the dataset if available
92
+ license=_LICENSE,
93
+ # Citation for the dataset
94
+ citation=_CITATION,
95
+ )
96
+
97
+ def _split_generators(self, dl_manager):
98
+
99
+ data_dir = dl_manager.download_and_extract(_URLS)
100
+ return [
101
+ datasets.SplitGenerator(
102
+ name=datasets.Split.TRAIN,
103
+ # These kwargs will be passed to _generate_examples
104
+ gen_kwargs={
105
+ "filepath": data_dir['train'],
106
+ "split": "train",
107
+ },
108
+ ),
109
+ datasets.SplitGenerator(
110
+ name=datasets.Split.TEST,
111
+ # These kwargs will be passed to _generate_examples
112
+ gen_kwargs={
113
+ "filepath": data_dir['test'],
114
+ "split": "test"
115
+ },
116
+ ),
117
+ datasets.SplitGenerator(
118
+ name=datasets.Split.VALIDATION,
119
+ # These kwargs will be passed to _generate_examples
120
+ gen_kwargs={
121
+ "filepath": data_dir['valid'],
122
+ "split": "valid",
123
+ },
124
+ ),
125
+ ]
126
+
127
+ # method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
128
+ def _generate_examples(self, filepath, split):
129
+ with open(filepath, encoding="utf-8") as f:
130
+ for key, row in enumerate(f):
131
+ data = json.loads(row)
132
+ if self.config.name == "extraction":
133
+ # Yields examples as (key, example) tuples
134
+ yield key, {
135
+ "id": data['paper_id'],
136
+ "document": data["document"],
137
+ "doc_bio_tags": data.get("doc_bio_tags")
138
+ }
139
+ elif self.config.name == "generation":
140
+ yield key, {
141
+ "id": data['paper_id'],
142
+ "document": data["document"],
143
+ "extractive_keyphrases": data.get("extractive_keyphrases"),
144
+ "abstractive_keyphrases": data.get("abstractive_keyphrases")
145
+ }
146
+ else:
147
+ yield key, {
148
+ "id": data['paper_id'],
149
+ "document": data["document"],
150
+ "doc_bio_tags": data.get("doc_bio_tags"),
151
+ "extractive_keyphrases": data.get("extractive_keyphrases"),
152
+ "abstractive_keyphrases": data.get("abstractive_keyphrases"),
153
+ "other_metadata": data["other_metadata"]
154
+ }