legalglue / legalglue.py
jfrenz's picture
remove empty trailing sentences
75b9028
raw
history blame
No virus
10.9 kB
# coding=utf-8
# Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""LegalGLUE: A Benchmark Dataset for Legal NLP models."""
import csv
import json
import textwrap
import os
import datasets
_DESCRIPTION = """\
Legal General Language Understanding Evaluation (LegalGLUE) benchmark is
a collection of datasets for evaluating model performance across a diverse set of legal NLP tasks
"""
GERMAN_LER = [
"B-AN",
"B-EUN",
"B-GRT",
"B-GS",
"B-INN",
"B-LD",
"B-LDS",
"B-LIT",
"B-MRK",
"B-ORG",
"B-PER",
"B-RR",
"B-RS",
"B-ST",
"B-STR",
"B-UN",
"B-VO",
"B-VS",
"B-VT",
"I-AN",
"I-EUN",
"I-GRT",
"I-GS",
"I-INN",
"I-LD",
"I-LDS",
"I-LIT",
"I-MRK",
"I-ORG",
"I-PER",
"I-RR",
"I-RS",
"I-ST",
"I-STR",
"I-UN",
"I-VO",
"I-VS",
"I-VT",
"O"]
LENER_BR=[
"O",
"B-ORGANIZACAO",
"I-ORGANIZACAO",
"B-PESSOA",
"I-PESSOA",
"B-TEMPO",
"I-TEMPO",
"B-LOCAL",
"I-LOCAL",
"B-LEGISLACAO",
"I-LEGISLACAO",
"B-JURISPRUDENCIA",
"I-JURISPRUDENCIA",
]
class LegalGlueConfig(datasets.BuilderConfig):
"""BuilderConfig for LegalGLUE."""
def __init__(
self,
label_classes, #the list of classes of the labels
multi_label, #boolean, if the task is multi-label
homepage, #homepage of the original dataset
citation, #citation for the dataset
data_url,
data_files,
**kwargs,
):
super(LegalGlueConfig, self).__init__(version=datasets.Version("1.1.0", ""), **kwargs)
self.label_classes = label_classes
self.multi_label = multi_label
self.homepage = homepage
self.citation = citation
self.data_url = data_url
self.data_files = data_files
class LegalGLUE(datasets.GeneratorBasedBuilder):
"""LegalGLUE: A Benchmark Dataset for Legal Language Understanding"""
BUILDER_CONFIGS = [
LegalGlueConfig(
name="german_ler",
description=textwrap.dedent(
"""\
description"""
),
label_classes=GERMAN_LER,
multi_label=False,
data_url="https://raw.githubusercontent.com/elenanereiss/Legal-Entity-Recognition/master/data/dataset_courts.zip",
data_files=["bag.conll", "bfh.conll", "bgh.conll", "bpatg.conll", "bsg.conll", "bverfg.conll", "bverwg.conll"],
homepage="https://github.com/elenanereiss/Legal-Entity-Recognition",
citation=textwrap.dedent("""\
@inproceedings{leitner2019fine,
author = {Elena Leitner and Georg Rehm and Julian Moreno-Schneider},
title = {{Fine-grained Named Entity Recognition in Legal Documents}},
booktitle = {Semantic Systems. The Power of AI and Knowledge
Graphs. Proceedings of the 15th International Conference
(SEMANTiCS 2019)},
year = 2019,
editor = {Maribel Acosta and Philippe Cudré-Mauroux and Maria
Maleshkova and Tassilo Pellegrini and Harald Sack and York
Sure-Vetter},
keywords = {aip},
publisher = {Springer},
series = {Lecture Notes in Computer Science},
number = {11702},
address = {Karlsruhe, Germany},
month = 9,
note = {10/11 September 2019},
pages = {272--287},
pdf = {https://link.springer.com/content/pdf/10.1007%2F978-3-030-33220-4_20.pdf}}
""")
),
# LegalGlueConfig(
# name="lener_br",
# description=textwrap.dedent(
# """\
# LeNER-Br is a Portuguese language dataset for named entity recognition
# applied to legal documents. LeNER-Br consists entirely of manually annotated
# legislation and legal cases texts and contains tags for persons, locations,
# time entities, organizations, legislation and legal cases.
# To compose the dataset, 66 legal documents from several Brazilian Courts were
# collected. Courts of superior and state levels were considered, such as Supremo
# Tribunal Federal, Superior Tribunal de Justiça, Tribunal de Justiça de Minas
# Gerais and Tribunal de Contas da União. In addition, four legislation documents
# were collected, such as "Lei Maria da Penha", giving a total of 70 documents
# """
# ),
# label_classes=LENER_BR,
# multi_label=False,
# data_url="https://github.com/peluz/lener-br/raw/master/leNER-Br/",
# data_files=["train/train.conll", "dev/dev.conll", "test/test.conll"],
# homepage="https://cic.unb.br/~teodecampos/LeNER-Br/",
# citation=textwrap.dedent("""\
# @inproceedings{luz_etal_propor2018,
# author = {Pedro H. {Luz de Araujo} and Te\'{o}filo E. {de Campos} and
# Renato R. R. {de Oliveira} and Matheus Stauffer and
# Samuel Couto and Paulo Bermejo},
# title = {{LeNER-Br}: a Dataset for Named Entity Recognition in {Brazilian} Legal Text},
# booktitle = {International Conference on the Computational Processing of Portuguese ({PROPOR})},
# publisher = {Springer},
# series = {Lecture Notes on Computer Science ({LNCS})},
# pages = {313--323},
# year = {2018},
# month = {September 24-26},
# address = {Canela, RS, Brazil},
# doi = {10.1007/978-3-319-99722-3_32},
# url = {https://cic.unb.br/~teodecampos/LeNER-Br/},
# }
# """)
# )
]
def _info(self):
if self.config.name == "german_ler":
features = {
"id": datasets.Value("string"),
"tokens": datasets.Sequence(datasets.Value("string")),
"ner_tags": datasets.Sequence(
datasets.features.ClassLabel(
names=self.config.label_classes
)
)
}
return datasets.DatasetInfo(
description=self.config.description,
features=datasets.Features(features),
homepage=self.config.homepage,
citation=self.config.citation,
)
def _split_generators(self, dl_manager):
#archive = dl_manager.download(self.config.data_url)
if self.config.name == "german_ler":
archive = dl_manager.download_and_extract(self.config.data_url)
return [datasets.SplitGenerator(
name=datasets.Split.TRAIN,
# These kwargs will be passed to _generate_examples
gen_kwargs={
"filepath": self.config.data_files,
"split": "train",
"files": [os.path.join(archive,file) for file in self.config.data_files]#dl_manager.iter_archive(archive),
},
)]
#elif self.config_name == "lener_br":
# else:
# return [
# datasets.SplitGenerator(
# name=datasets.Split.TRAIN,
# # These kwargs will be passed to _generate_examples
# gen_kwargs={
# "filepath": self.config.data_files,
# "split": "train",
# "files": dl_manager.iter_archive(archive),
# },
# ),
# datasets.SplitGenerator(
# name=datasets.Split.TEST,
# # These kwargs will be passed to _generate_examples
# gen_kwargs={
# "filepath": self.config.data_files,
# "split": "test",
# "files": dl_manager.iter_archive(archive),
# },
# ),
# datasets.SplitGenerator(
# name=datasets.Split.VALIDATION,
# # These kwargs will be passed to _generate_examples
# gen_kwargs={
# "filepath": self.config.data_files,
# "split": "validation",
# "files": dl_manager.iter_archive(archive),
# },
# ),
# ]
def _generate_examples(self, filepath, split, files):
if self.config.name == "german_ler":
texts, labels = [], []
for file in files:
#if path in filepath:
with open(file, encoding="utf-8") as f:
tokens = []
tags = []
for line in f:
if line == "" or line == "\n":
if tokens:
texts.append(tokens)
labels.append(tags)
tokens = []
tags = []
else:
token, tag = line.split()
tokens.append(token)
tags.append(tag.rstrip())
if tokens:
texts.append(tokens)
labels.append(tags)
for i,token in enumerate(texts):
tokens = texts[i]
ner_tags = labels[i]
yield i, {
"id": str(i),
"tokens": tokens,
"ner_tags": ner_tags,
}