Datasets:

Languages:
English
License:
gabrielaltay commited on
Commit
16c72c2
1 Parent(s): ba0efe8

upload bigbiohub.py to hub from bigbio repo

Browse files
Files changed (1) hide show
  1. bigbiohub.py +403 -1
bigbiohub.py CHANGED
@@ -1,7 +1,16 @@
 
1
  from dataclasses import dataclass
2
  from enum import Enum
3
- import datasets
 
4
  from types import SimpleNamespace
 
 
 
 
 
 
 
5
 
6
 
7
  BigBioValues = SimpleNamespace(NULL="<BB_NULL_STR>")
@@ -151,3 +160,396 @@ kb_features = datasets.Features(
151
  ],
152
  }
153
  )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from collections import defaultdict
2
  from dataclasses import dataclass
3
  from enum import Enum
4
+ import logging
5
+ from pathlib import Path
6
  from types import SimpleNamespace
7
+ from typing import Dict, Iterable, List, Tuple
8
+
9
+ import bioc
10
+ import datasets
11
+
12
+
13
+ logger = logging.getLogger(__name__)
14
 
15
 
16
  BigBioValues = SimpleNamespace(NULL="<BB_NULL_STR>")
 
160
  ],
161
  }
162
  )
163
+
164
+
165
+ def get_texts_and_offsets_from_bioc_ann(ann: bioc.BioCAnnotation) -> Tuple:
166
+
167
+ offsets = [(loc.offset, loc.offset + loc.length) for loc in ann.locations]
168
+
169
+ text = ann.text
170
+
171
+ if len(offsets) > 1:
172
+ i = 0
173
+ texts = []
174
+ for start, end in offsets:
175
+ chunk_len = end - start
176
+ texts.append(text[i : chunk_len + i])
177
+ i += chunk_len
178
+ while i < len(text) and text[i] == " ":
179
+ i += 1
180
+ else:
181
+ texts = [text]
182
+
183
+ return offsets, texts
184
+
185
+
186
+ def remove_prefix(a: str, prefix: str) -> str:
187
+ if a.startswith(prefix):
188
+ a = a[len(prefix) :]
189
+ return a
190
+
191
+
192
+ def parse_brat_file(
193
+ txt_file: Path,
194
+ annotation_file_suffixes: List[str] = None,
195
+ parse_notes: bool = False,
196
+ ) -> Dict:
197
+ """
198
+ Parse a brat file into the schema defined below.
199
+ `txt_file` should be the path to the brat '.txt' file you want to parse, e.g. 'data/1234.txt'
200
+ Assumes that the annotations are contained in one or more of the corresponding '.a1', '.a2' or '.ann' files,
201
+ e.g. 'data/1234.ann' or 'data/1234.a1' and 'data/1234.a2'.
202
+ Will include annotator notes, when `parse_notes == True`.
203
+ brat_features = datasets.Features(
204
+ {
205
+ "id": datasets.Value("string"),
206
+ "document_id": datasets.Value("string"),
207
+ "text": datasets.Value("string"),
208
+ "text_bound_annotations": [ # T line in brat, e.g. type or event trigger
209
+ {
210
+ "offsets": datasets.Sequence([datasets.Value("int32")]),
211
+ "text": datasets.Sequence(datasets.Value("string")),
212
+ "type": datasets.Value("string"),
213
+ "id": datasets.Value("string"),
214
+ }
215
+ ],
216
+ "events": [ # E line in brat
217
+ {
218
+ "trigger": datasets.Value(
219
+ "string"
220
+ ), # refers to the text_bound_annotation of the trigger,
221
+ "id": datasets.Value("string"),
222
+ "type": datasets.Value("string"),
223
+ "arguments": datasets.Sequence(
224
+ {
225
+ "role": datasets.Value("string"),
226
+ "ref_id": datasets.Value("string"),
227
+ }
228
+ ),
229
+ }
230
+ ],
231
+ "relations": [ # R line in brat
232
+ {
233
+ "id": datasets.Value("string"),
234
+ "head": {
235
+ "ref_id": datasets.Value("string"),
236
+ "role": datasets.Value("string"),
237
+ },
238
+ "tail": {
239
+ "ref_id": datasets.Value("string"),
240
+ "role": datasets.Value("string"),
241
+ },
242
+ "type": datasets.Value("string"),
243
+ }
244
+ ],
245
+ "equivalences": [ # Equiv line in brat
246
+ {
247
+ "id": datasets.Value("string"),
248
+ "ref_ids": datasets.Sequence(datasets.Value("string")),
249
+ }
250
+ ],
251
+ "attributes": [ # M or A lines in brat
252
+ {
253
+ "id": datasets.Value("string"),
254
+ "type": datasets.Value("string"),
255
+ "ref_id": datasets.Value("string"),
256
+ "value": datasets.Value("string"),
257
+ }
258
+ ],
259
+ "normalizations": [ # N lines in brat
260
+ {
261
+ "id": datasets.Value("string"),
262
+ "type": datasets.Value("string"),
263
+ "ref_id": datasets.Value("string"),
264
+ "resource_name": datasets.Value(
265
+ "string"
266
+ ), # Name of the resource, e.g. "Wikipedia"
267
+ "cuid": datasets.Value(
268
+ "string"
269
+ ), # ID in the resource, e.g. 534366
270
+ "text": datasets.Value(
271
+ "string"
272
+ ), # Human readable description/name of the entity, e.g. "Barack Obama"
273
+ }
274
+ ],
275
+ ### OPTIONAL: Only included when `parse_notes == True`
276
+ "notes": [ # # lines in brat
277
+ {
278
+ "id": datasets.Value("string"),
279
+ "type": datasets.Value("string"),
280
+ "ref_id": datasets.Value("string"),
281
+ "text": datasets.Value("string"),
282
+ }
283
+ ],
284
+ },
285
+ )
286
+ """
287
+
288
+ example = {}
289
+ example["document_id"] = txt_file.with_suffix("").name
290
+ with txt_file.open() as f:
291
+ example["text"] = f.read()
292
+
293
+ # If no specific suffixes of the to-be-read annotation files are given - take standard suffixes
294
+ # for event extraction
295
+ if annotation_file_suffixes is None:
296
+ annotation_file_suffixes = [".a1", ".a2", ".ann"]
297
+
298
+ if len(annotation_file_suffixes) == 0:
299
+ raise AssertionError(
300
+ "At least one suffix for the to-be-read annotation files should be given!"
301
+ )
302
+
303
+ ann_lines = []
304
+ for suffix in annotation_file_suffixes:
305
+ annotation_file = txt_file.with_suffix(suffix)
306
+ if annotation_file.exists():
307
+ with annotation_file.open() as f:
308
+ ann_lines.extend(f.readlines())
309
+
310
+ example["text_bound_annotations"] = []
311
+ example["events"] = []
312
+ example["relations"] = []
313
+ example["equivalences"] = []
314
+ example["attributes"] = []
315
+ example["normalizations"] = []
316
+
317
+ if parse_notes:
318
+ example["notes"] = []
319
+
320
+ for line in ann_lines:
321
+ line = line.strip()
322
+ if not line:
323
+ continue
324
+
325
+ if line.startswith("T"): # Text bound
326
+ ann = {}
327
+ fields = line.split("\t")
328
+
329
+ ann["id"] = fields[0]
330
+ ann["type"] = fields[1].split()[0]
331
+ ann["offsets"] = []
332
+ span_str = remove_prefix(fields[1], (ann["type"] + " "))
333
+ text = fields[2]
334
+ for span in span_str.split(";"):
335
+ start, end = span.split()
336
+ ann["offsets"].append([int(start), int(end)])
337
+
338
+ # Heuristically split text of discontiguous entities into chunks
339
+ ann["text"] = []
340
+ if len(ann["offsets"]) > 1:
341
+ i = 0
342
+ for start, end in ann["offsets"]:
343
+ chunk_len = end - start
344
+ ann["text"].append(text[i : chunk_len + i])
345
+ i += chunk_len
346
+ while i < len(text) and text[i] == " ":
347
+ i += 1
348
+ else:
349
+ ann["text"] = [text]
350
+
351
+ example["text_bound_annotations"].append(ann)
352
+
353
+ elif line.startswith("E"):
354
+ ann = {}
355
+ fields = line.split("\t")
356
+
357
+ ann["id"] = fields[0]
358
+
359
+ ann["type"], ann["trigger"] = fields[1].split()[0].split(":")
360
+
361
+ ann["arguments"] = []
362
+ for role_ref_id in fields[1].split()[1:]:
363
+ argument = {
364
+ "role": (role_ref_id.split(":"))[0],
365
+ "ref_id": (role_ref_id.split(":"))[1],
366
+ }
367
+ ann["arguments"].append(argument)
368
+
369
+ example["events"].append(ann)
370
+
371
+ elif line.startswith("R"):
372
+ ann = {}
373
+ fields = line.split("\t")
374
+
375
+ ann["id"] = fields[0]
376
+ ann["type"] = fields[1].split()[0]
377
+
378
+ ann["head"] = {
379
+ "role": fields[1].split()[1].split(":")[0],
380
+ "ref_id": fields[1].split()[1].split(":")[1],
381
+ }
382
+ ann["tail"] = {
383
+ "role": fields[1].split()[2].split(":")[0],
384
+ "ref_id": fields[1].split()[2].split(":")[1],
385
+ }
386
+
387
+ example["relations"].append(ann)
388
+
389
+ # '*' seems to be the legacy way to mark equivalences,
390
+ # but I couldn't find any info on the current way
391
+ # this might have to be adapted dependent on the brat version
392
+ # of the annotation
393
+ elif line.startswith("*"):
394
+ ann = {}
395
+ fields = line.split("\t")
396
+
397
+ ann["id"] = fields[0]
398
+ ann["ref_ids"] = fields[1].split()[1:]
399
+
400
+ example["equivalences"].append(ann)
401
+
402
+ elif line.startswith("A") or line.startswith("M"):
403
+ ann = {}
404
+ fields = line.split("\t")
405
+
406
+ ann["id"] = fields[0]
407
+
408
+ info = fields[1].split()
409
+ ann["type"] = info[0]
410
+ ann["ref_id"] = info[1]
411
+
412
+ if len(info) > 2:
413
+ ann["value"] = info[2]
414
+ else:
415
+ ann["value"] = ""
416
+
417
+ example["attributes"].append(ann)
418
+
419
+ elif line.startswith("N"):
420
+ ann = {}
421
+ fields = line.split("\t")
422
+
423
+ ann["id"] = fields[0]
424
+ ann["text"] = fields[2]
425
+
426
+ info = fields[1].split()
427
+
428
+ ann["type"] = info[0]
429
+ ann["ref_id"] = info[1]
430
+ ann["resource_name"] = info[2].split(":")[0]
431
+ ann["cuid"] = info[2].split(":")[1]
432
+ example["normalizations"].append(ann)
433
+
434
+ elif parse_notes and line.startswith("#"):
435
+ ann = {}
436
+ fields = line.split("\t")
437
+
438
+ ann["id"] = fields[0]
439
+ ann["text"] = fields[2] if len(fields) == 3 else BigBioValues.NULL
440
+
441
+ info = fields[1].split()
442
+
443
+ ann["type"] = info[0]
444
+ ann["ref_id"] = info[1]
445
+ example["notes"].append(ann)
446
+
447
+ return example
448
+
449
+
450
+ def brat_parse_to_bigbio_kb(brat_parse: Dict) -> Dict:
451
+ """
452
+ Transform a brat parse (conforming to the standard brat schema) obtained with
453
+ `parse_brat_file` into a dictionary conforming to the `bigbio-kb` schema (as defined in ../schemas/kb.py)
454
+ :param brat_parse:
455
+ """
456
+
457
+ unified_example = {}
458
+
459
+ # Prefix all ids with document id to ensure global uniqueness,
460
+ # because brat ids are only unique within their document
461
+ id_prefix = brat_parse["document_id"] + "_"
462
+
463
+ # identical
464
+ unified_example["document_id"] = brat_parse["document_id"]
465
+ unified_example["passages"] = [
466
+ {
467
+ "id": id_prefix + "_text",
468
+ "type": "abstract",
469
+ "text": [brat_parse["text"]],
470
+ "offsets": [[0, len(brat_parse["text"])]],
471
+ }
472
+ ]
473
+
474
+ # get normalizations
475
+ ref_id_to_normalizations = defaultdict(list)
476
+ for normalization in brat_parse["normalizations"]:
477
+ ref_id_to_normalizations[normalization["ref_id"]].append(
478
+ {
479
+ "db_name": normalization["resource_name"],
480
+ "db_id": normalization["cuid"],
481
+ }
482
+ )
483
+
484
+ # separate entities and event triggers
485
+ unified_example["events"] = []
486
+ non_event_ann = brat_parse["text_bound_annotations"].copy()
487
+ for event in brat_parse["events"]:
488
+ event = event.copy()
489
+ event["id"] = id_prefix + event["id"]
490
+ trigger = next(
491
+ tr
492
+ for tr in brat_parse["text_bound_annotations"]
493
+ if tr["id"] == event["trigger"]
494
+ )
495
+ if trigger in non_event_ann:
496
+ non_event_ann.remove(trigger)
497
+ event["trigger"] = {
498
+ "text": trigger["text"].copy(),
499
+ "offsets": trigger["offsets"].copy(),
500
+ }
501
+ for argument in event["arguments"]:
502
+ argument["ref_id"] = id_prefix + argument["ref_id"]
503
+
504
+ unified_example["events"].append(event)
505
+
506
+ unified_example["entities"] = []
507
+ anno_ids = [ref_id["id"] for ref_id in non_event_ann]
508
+ for ann in non_event_ann:
509
+ entity_ann = ann.copy()
510
+ entity_ann["id"] = id_prefix + entity_ann["id"]
511
+ entity_ann["normalized"] = ref_id_to_normalizations[ann["id"]]
512
+ unified_example["entities"].append(entity_ann)
513
+
514
+ # massage relations
515
+ unified_example["relations"] = []
516
+ skipped_relations = set()
517
+ for ann in brat_parse["relations"]:
518
+ if (
519
+ ann["head"]["ref_id"] not in anno_ids
520
+ or ann["tail"]["ref_id"] not in anno_ids
521
+ ):
522
+ skipped_relations.add(ann["id"])
523
+ continue
524
+ unified_example["relations"].append(
525
+ {
526
+ "arg1_id": id_prefix + ann["head"]["ref_id"],
527
+ "arg2_id": id_prefix + ann["tail"]["ref_id"],
528
+ "id": id_prefix + ann["id"],
529
+ "type": ann["type"],
530
+ "normalized": [],
531
+ }
532
+ )
533
+ if len(skipped_relations) > 0:
534
+ example_id = brat_parse["document_id"]
535
+ logger.info(
536
+ f"Example:{example_id}: The `bigbio_kb` schema allows `relations` only between entities."
537
+ f" Skip (for now): "
538
+ f"{list(skipped_relations)}"
539
+ )
540
+
541
+ # get coreferences
542
+ unified_example["coreferences"] = []
543
+ for i, ann in enumerate(brat_parse["equivalences"], start=1):
544
+ is_entity_cluster = True
545
+ for ref_id in ann["ref_ids"]:
546
+ if not ref_id.startswith("T"): # not textbound -> no entity
547
+ is_entity_cluster = False
548
+ elif ref_id not in anno_ids: # event trigger -> no entity
549
+ is_entity_cluster = False
550
+ if is_entity_cluster:
551
+ entity_ids = [id_prefix + i for i in ann["ref_ids"]]
552
+ unified_example["coreferences"].append(
553
+ {"id": id_prefix + str(i), "entity_ids": entity_ids}
554
+ )
555
+ return unified_example