Datasets:
ArXiv:
# coding=utf-8 | |
# Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor. | |
# | |
# Licensed under the Apache License, Version 2.0 (the "License"); | |
# you may not use this file except in compliance with the License. | |
# You may obtain a copy of the License at | |
# | |
# http://www.apache.org/licenses/LICENSE-2.0 | |
# | |
# Unless required by applicable law or agreed to in writing, software | |
# distributed under the License is distributed on an "AS IS" BASIS, | |
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |
# See the License for the specific language governing permissions and | |
# limitations under the License. | |
"""LegalGLUE: A Benchmark Dataset for Legal NLP models.""" | |
import csv | |
import json | |
import textwrap | |
import os | |
import datasets | |
_DESCRIPTION = """\ | |
Legal General Language Understanding Evaluation (LegalGLUE) benchmark is | |
a collection of datasets for evaluating model performance across a diverse set of legal NLP tasks | |
""" | |
GERMAN_LER = [ | |
"B-AN", | |
"B-EUN", | |
"B-GRT", | |
"B-GS", | |
"B-INN", | |
"B-LD", | |
"B-LDS", | |
"B-LIT", | |
"B-MRK", | |
"B-ORG", | |
"B-PER", | |
"B-RR", | |
"B-RS", | |
"B-ST", | |
"B-STR", | |
"B-UN", | |
"B-VO", | |
"B-VS", | |
"B-VT", | |
"I-AN", | |
"I-EUN", | |
"I-GRT", | |
"I-GS", | |
"I-INN", | |
"I-LD", | |
"I-LDS", | |
"I-LIT", | |
"I-MRK", | |
"I-ORG", | |
"I-PER", | |
"I-RR", | |
"I-RS", | |
"I-ST", | |
"I-STR", | |
"I-UN", | |
"I-VO", | |
"I-VS", | |
"I-VT", | |
"O"] | |
class LegalGlueConfig(datasets.BuilderConfig): | |
"""BuilderConfig for LegalGLUE.""" | |
def __init__( | |
self, | |
label_classes, #the list of classes of the labels | |
multi_label, #boolean, if the task is multi-label | |
homepage, #homepage of the original dataset | |
citation, #citation for the dataset | |
data_url, | |
data_files, | |
**kwargs, | |
): | |
super(LegalGlueConfig, self).__init__(version=datasets.Version("1.1.0", ""), **kwargs) | |
self.label_classes = label_classes | |
self.multi_label = multi_label | |
self.homepage = homepage | |
self.citation = citation | |
self.data_url = data_url | |
self.data_files = data_files | |
class LegalGLUE(datasets.GeneratorBasedBuilder): | |
"""LegalGLUE: A Benchmark Dataset for Legal Language Understanding""" | |
BUILDER_CONFIGS = [ | |
LegalGlueConfig( | |
name="german_ler", | |
description=textwrap.dedent( | |
"""\ | |
description""" | |
), | |
label_classes=GERMAN_LER, | |
multi_label=False, | |
data_url="https://raw.githubusercontent.com/elenanereiss/Legal-Entity-Recognition/master/data/dataset_courts.zip", | |
data_files=["bag.conll", "bfh.conll", "bgh.conll", "bpatg.conll", "bsg.conll", "bverfg.conll", "bverwg.conll"], | |
homepage="https://github.com/elenanereiss/Legal-Entity-Recognition", | |
citation=textwrap.dedent("""\ | |
@inproceedings{leitner2019fine, | |
author = {Elena Leitner and Georg Rehm and Julian Moreno-Schneider}, | |
title = {{Fine-grained Named Entity Recognition in Legal Documents}}, | |
booktitle = {Semantic Systems. The Power of AI and Knowledge | |
Graphs. Proceedings of the 15th International Conference | |
(SEMANTiCS 2019)}, | |
year = 2019, | |
editor = {Maribel Acosta and Philippe Cudré-Mauroux and Maria | |
Maleshkova and Tassilo Pellegrini and Harald Sack and York | |
Sure-Vetter}, | |
keywords = {aip}, | |
publisher = {Springer}, | |
series = {Lecture Notes in Computer Science}, | |
number = {11702}, | |
address = {Karlsruhe, Germany}, | |
month = 9, | |
note = {10/11 September 2019}, | |
pages = {272--287}, | |
pdf = {https://link.springer.com/content/pdf/10.1007%2F978-3-030-33220-4_20.pdf}} | |
""") | |
) | |
] | |
def _info(self): | |
if self.config.name == "german_ler": | |
features = { | |
"id": datasets.Value("string"), | |
"tokens": datasets.Sequence(datasets.Value("string")), | |
"ner_tags": datasets.Sequence( | |
datasets.features.ClassLabel( | |
names=self.config.label_classes | |
) | |
) | |
} | |
return datasets.DatasetInfo( | |
description=self.config.description, | |
features=datasets.Features(features), | |
homepage=self.config.homepage, | |
citation=self.config.citation, | |
) | |
def _split_generators(self, dl_manager): | |
#archive = dl_manager.download(self.config.data_url) | |
if self.config_name == "german_ler": | |
archive = dl_manager.download_and_extract(self.config.data_url) | |
return datasets.SplitGenerator( | |
name=datasets.Split.TRAIN, | |
# These kwargs will be passed to _generate_examples | |
gen_kwargs={ | |
"filepath": self.config.data_files, | |
"split": "train", | |
"archive": archive, | |
}, | |
) | |
else: | |
return [ | |
datasets.SplitGenerator( | |
name=datasets.Split.TRAIN, | |
# These kwargs will be passed to _generate_examples | |
gen_kwargs={ | |
"filepath": self.config.data_files, | |
"split": "train", | |
"files": dl_manager.iter_archive(archive), | |
}, | |
), | |
datasets.SplitGenerator( | |
name=datasets.Split.TEST, | |
# These kwargs will be passed to _generate_examples | |
gen_kwargs={ | |
"filepath": self.config.data_files, | |
"split": "test", | |
"files": dl_manager.iter_archive(archive), | |
}, | |
), | |
datasets.SplitGenerator( | |
name=datasets.Split.VALIDATION, | |
# These kwargs will be passed to _generate_examples | |
gen_kwargs={ | |
"filepath": self.config.data_files, | |
"split": "validation", | |
"files": dl_manager.iter_archive(archive), | |
}, | |
), | |
] | |
def _generate_examples(self, filepath, split, archive): | |
if self.config_name == "german_ler": | |
texts, labels = [], [] | |
for file in filepath: | |
path = os.path.join(archive,file) | |
with open (path, encoding="utf-8") as f: | |
tokens = [] | |
tags = [] | |
for line in f: | |
if line == "" or line == "\n": | |
if tokens: | |
texts.append(tokens) | |
labels.append(tags) | |
tokens = [] | |
tags = [] | |
else: | |
token, tag = line.split() | |
tokens.append(token) | |
tags.append(tag.rstrip()) | |
texts.append(tokens) | |
labels.append(tags) | |
for i in enumerate(texts): | |
tokens = text[i] | |
ner_tags = labels[i] | |
yield i, { | |
"id": str(i), | |
"tokens": tokens, | |
"ner_tags": ner_tags, | |
} | |