Datasets:

Languages:
English
License:
ArneBinder commited on
Commit
7d57d43
1 Parent(s): 806621d

from https://github.com/ArneBinder/pie-datasets/pull/152

Browse files
Files changed (1) hide show
  1. scifact.py +245 -0
scifact.py ADDED
@@ -0,0 +1,245 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ import logging
3
+ import os
4
+ from collections import defaultdict
5
+ from copy import copy
6
+ from typing import Any, Dict, Iterable, List
7
+
8
+ import datasets
9
+ from datasets import GeneratorBasedBuilder
10
+
11
+ logger = logging.getLogger(__name__)
12
+
13
+ _DESCRIPTION = """\
14
+ SciFact, a dataset of 1.4K expert-written scientific claims paired with evidence-containing abstracts, and annotated \\
15
+ with labels and rationales. This version differs from `allenai/scifact` on HF because we do not have separate splits \\
16
+ for claims and a corpus, instead we combine documents with claims that it supports or refutes, note that there are \\
17
+ also some documents that do not have any claims associated with them as well as there are some claims that do not \\
18
+ have any evidence. In the latter case we assign all such claims to the DUMMY document with ID -1 and without any text \\
19
+ (i.e. abstract sentences).
20
+ """
21
+
22
+ DATA_URL = "https://scifact.s3-us-west-2.amazonaws.com/release/latest/data.tar.gz"
23
+ SUBDIR = "data"
24
+
25
+ VARIANT_DOCUMENTS = "as_documents"
26
+ VARIANT_CLAIMS = "as_claims"
27
+
28
+
29
+ class ScifactConfig(datasets.BuilderConfig):
30
+ """BuilderConfig for Scifact."""
31
+
32
+ def __init__(self, **kwargs):
33
+ super().__init__(**kwargs)
34
+
35
+
36
+ class SciFact(GeneratorBasedBuilder):
37
+ BUILDER_CONFIGS = [
38
+ ScifactConfig(
39
+ name=VARIANT_DOCUMENTS,
40
+ description="Documents that serve as evidence for some claims that are split into train, test, dev",
41
+ ),
42
+ ScifactConfig(
43
+ name=VARIANT_CLAIMS,
44
+ description="Documents that serve as evidence for some claims that are split into train, test, dev",
45
+ ),
46
+ ]
47
+
48
+ def _info(self):
49
+ # Specifies the datasets.DatasetInfo object
50
+ if self.config.name == VARIANT_DOCUMENTS:
51
+ features = {
52
+ "doc_id": datasets.Value("int32"), # document ID
53
+ "title": datasets.Value("string"), # document title
54
+ "abstract": datasets.features.Sequence(
55
+ datasets.Value("string")
56
+ ), # document sentences
57
+ "structured": datasets.Value(
58
+ "bool"
59
+ ), # whether the abstract is structured, i.e. has OBJECTIVE, CONCLUSION, METHODS marked in the text
60
+ "claims": datasets.features.Sequence(
61
+ feature={
62
+ "id": datasets.Value(dtype="int32", id=None),
63
+ "claim": datasets.Value(dtype="string", id=None),
64
+ "evidence": datasets.features.Sequence(
65
+ feature={
66
+ "label": datasets.Value(dtype="string", id=None),
67
+ "sentences": datasets.features.Sequence(
68
+ datasets.Value(dtype="int32", id=None)
69
+ ),
70
+ }
71
+ ),
72
+ }
73
+ ), # list of claims associated with the document
74
+ }
75
+ elif self.config.name == VARIANT_CLAIMS:
76
+ features = {
77
+ "id": datasets.Value("int32"), # document ID
78
+ "claim": datasets.Value(dtype="string", id=None),
79
+ "cited_docs": datasets.features.Sequence(
80
+ feature={
81
+ "doc_id": datasets.Value(dtype="int32", id=None),
82
+ "title": datasets.Value("string"), # document title
83
+ "abstract": datasets.features.Sequence(
84
+ datasets.Value("string")
85
+ ), # document sentences
86
+ "structured": datasets.Value(
87
+ "bool"
88
+ ), # whether the abstract is structured, i.e. has OBJECTIVE, CONCLUSION, METHODS marked in the text
89
+ "evidence": datasets.features.Sequence(
90
+ feature={
91
+ "label": datasets.Value(dtype="string", id=None),
92
+ "sentences": datasets.features.Sequence(
93
+ datasets.Value(dtype="int32", id=None)
94
+ ),
95
+ }
96
+ ),
97
+ }
98
+ ), # list of claims associated with the document
99
+ }
100
+ else:
101
+ raise ValueError(f"unknown dataset variant: {self.config.name}")
102
+
103
+ return datasets.DatasetInfo(
104
+ # This is the description that will appear on the datasets page
105
+ description=_DESCRIPTION,
106
+ features=datasets.Features(features),
107
+ supervised_keys=None,
108
+ # Homepage of the dataset for documentation
109
+ homepage="https://scifact.apps.allenai.org/",
110
+ )
111
+
112
+ def _generate_examples(self, claims_filepath: str, corpus_filepath: str):
113
+ """Yields examples."""
114
+ with open(claims_filepath) as f:
115
+ claim_data = [json.loads(line) for line in f.readlines()]
116
+
117
+ with open(corpus_filepath) as f:
118
+ corpus_docs = [json.loads(line) for line in f.readlines()]
119
+
120
+ if self.config.name == VARIANT_DOCUMENTS:
121
+ doc_id2claims = defaultdict(list)
122
+ for claim in claim_data:
123
+ cited_doc_ids = claim.pop("cited_doc_ids", [-1])
124
+ evidence = claim.pop("evidence", dict())
125
+ for cited_doc_id in cited_doc_ids:
126
+ current_claim = claim.copy()
127
+ current_claim["evidence"] = evidence.get(str(cited_doc_id), [])
128
+ doc_id2claims[cited_doc_id].append(current_claim)
129
+ dummy_doc = {"doc_id": -1, "title": "", "abstract": [], "structured": False}
130
+ corpus_docs = [dummy_doc] + corpus_docs
131
+
132
+ for id_, doc in enumerate(corpus_docs):
133
+ doc = doc.copy()
134
+ doc["claims"] = doc_id2claims.get(doc["doc_id"], [])
135
+ yield id_, doc
136
+ elif self.config.name == VARIANT_CLAIMS:
137
+ doc_id2doc = {doc["doc_id"]: doc for doc in corpus_docs}
138
+ for _id, claim in enumerate(claim_data):
139
+ evidence = claim.pop("evidence", {})
140
+ cited_doc_ids = claim.pop("cited_doc_ids", [])
141
+ claim["cited_docs"] = []
142
+ for cited_doc_id in cited_doc_ids:
143
+ doc = copy(doc_id2doc[cited_doc_id])
144
+ doc["evidence"] = evidence.get(str(cited_doc_id), [])
145
+ claim["cited_docs"].append(doc)
146
+ yield _id, claim
147
+ else:
148
+ raise ValueError(f"unknown dataset variant: {self.config.name}")
149
+
150
+ def _split_generators(self, dl_manager):
151
+ """We handle string, list and dicts in datafiles."""
152
+ if dl_manager.manual_dir is None:
153
+ data_dir = os.path.join(dl_manager.download_and_extract(DATA_URL), SUBDIR)
154
+ else:
155
+ # Absolute path of the manual_dir
156
+ data_dir = os.path.abspath(dl_manager.manual_dir)
157
+
158
+ return [
159
+ datasets.SplitGenerator(
160
+ name=datasets.Split.TRAIN,
161
+ # These kwargs will be passed to _generate_examples
162
+ gen_kwargs={
163
+ "claims_filepath": os.path.join(data_dir, "claims_train.jsonl"),
164
+ "corpus_filepath": os.path.join(data_dir, "corpus.jsonl"),
165
+ },
166
+ ),
167
+ datasets.SplitGenerator(
168
+ name=datasets.Split.VALIDATION,
169
+ # These kwargs will be passed to _generate_examples
170
+ gen_kwargs={
171
+ "claims_filepath": os.path.join(data_dir, "claims_dev.jsonl"),
172
+ "corpus_filepath": os.path.join(data_dir, "corpus.jsonl"),
173
+ },
174
+ ),
175
+ datasets.SplitGenerator(
176
+ name=datasets.Split.TEST,
177
+ # These kwargs will be passed to _generate_examples
178
+ gen_kwargs={
179
+ "claims_filepath": os.path.join(data_dir, "claims_test.jsonl"),
180
+ "corpus_filepath": os.path.join(data_dir, "corpus.jsonl"),
181
+ },
182
+ ),
183
+ ]
184
+
185
+ def _convert_to_output_eval_format(
186
+ self, data: Iterable[Dict[str, Any]]
187
+ ) -> List[Dict[str, Any]]:
188
+ """Output should have the format as specified here:
189
+
190
+ https://github.com/allenai/scifact/blob/68b98a56d93e0f9da0d2aab4e6c3294699a0f72e/doc/evaluation.md#submission-format
191
+ Each claim is represented as Dict with:
192
+ "id": int An integer claim ID.
193
+ "evidence": Dict[str, Dict] The evidence for the claim.
194
+ "doc_id": Dict[str, Any] The sentences and label for a single document.
195
+ "sentences": List[int]
196
+ "label": str
197
+ """
198
+ if self.config.name == VARIANT_DOCUMENTS:
199
+ # Collect all claim-level annotations from all documents
200
+ claim2doc2sent_with_label = dict()
201
+ for document in data:
202
+ doc_id = document["doc_id"]
203
+ # Skip if document does not have any related claims
204
+ if len(document["claims"]["claim"]) == 0:
205
+ continue
206
+ for idx in range(len(document["claims"]["claim"])):
207
+ claim_id = document["claims"]["id"][idx]
208
+ claim_text = document["claims"]["claim"][idx]
209
+ claim_evidence = document["claims"]["evidence"][idx]
210
+ if claim_id not in claim2doc2sent_with_label:
211
+ claim2doc2sent_with_label[claim_id] = dict()
212
+ if doc_id not in claim2doc2sent_with_label[claim_id]:
213
+ if len(claim_evidence["label"]) > 0:
214
+ ev_label = claim_evidence["label"][0]
215
+ claim2doc2sent_with_label[claim_id][doc_id] = {
216
+ "label": ev_label,
217
+ "sentences": [],
218
+ }
219
+ for ev_sentences in claim_evidence["sentences"]:
220
+ claim2doc2sent_with_label[claim_id][doc_id]["sentences"].extend(
221
+ ev_sentences
222
+ )
223
+
224
+ outputs = []
225
+ for claim_id in claim2doc2sent_with_label:
226
+ claim_dict = {"id": claim_id, "evidence": dict()}
227
+ for doc_id in claim2doc2sent_with_label[claim_id]:
228
+ claim_dict["evidence"][doc_id] = {
229
+ "sentences": claim2doc2sent_with_label[claim_id][doc_id]["sentences"],
230
+ "label": claim2doc2sent_with_label[claim_id][doc_id]["label"],
231
+ }
232
+ outputs.append((int(claim_id), claim_dict.copy()))
233
+
234
+ outputs_sorted_by_claim_ids = [
235
+ claim for claim_id, claim in sorted(outputs, key=lambda x: x[0])
236
+ ]
237
+
238
+ return outputs_sorted_by_claim_ids
239
+
240
+ elif self.config.name == VARIANT_CLAIMS:
241
+ raise NotImplementedError(
242
+ f"_convert_to_output_eval_format is not yet implemented for dataset variant {self.config.name}"
243
+ )
244
+ else:
245
+ raise ValueError(f"unknown dataset variant: {self.config.name}")