jfrenz commited on
Commit
41eed00
1 Parent(s): 5ea2c6b

Upload legalglue.py

Browse files
Files changed (1) hide show
  1. legalglue.py +219 -0
legalglue.py ADDED
@@ -0,0 +1,219 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """LegalGLUE: A Benchmark Dataset for Legal NLP models."""
16
+
17
+ import csv
18
+ import json
19
+ import textwrap
20
+
21
+ import datasets
22
+
23
+
24
+ _DESCRIPTION = """\
25
+ Legal General Language Understanding Evaluation (LegalGLUE) benchmark is
26
+ a collection of datasets for evaluating model performance across a diverse set of legal NLP tasks
27
+ """
28
+
29
+ GERMAN_LER = [
30
+ "B-AN",
31
+ "B-EUN",
32
+ "B-GRT",
33
+ "B-GS",
34
+ "B-INN",
35
+ "B-LD",
36
+ "B-LDS",
37
+ "B-LIT",
38
+ "B-MRK",
39
+ "B-ORG",
40
+ "B-PER",
41
+ "B-RR",
42
+ "B-RS",
43
+ "B-ST",
44
+ "B-STR",
45
+ "B-UN",
46
+ "B-VO",
47
+ "B-VS",
48
+ "B-VT",
49
+ "I-AN",
50
+ "I-EUN",
51
+ "I-GRT",
52
+ "I-GS",
53
+ "I-INN",
54
+ "I-LD",
55
+ "I-LDS",
56
+ "I-LIT",
57
+ "I-MRK",
58
+ "I-ORG",
59
+ "I-PER",
60
+ "I-RR",
61
+ "I-RS",
62
+ "I-ST",
63
+ "I-STR",
64
+ "I-UN",
65
+ "I-VO",
66
+ "I-VS",
67
+ "I-VT",
68
+ "O"]
69
+
70
+
71
+ class LegalGlueConfig(datasets.BuilderConfig):
72
+ """BuilderConfig for LegalGLUE."""
73
+
74
+ def __init__(
75
+ self,
76
+ label_classes, #the list of classes of the labels
77
+ multi_label, #boolean, if the task is multi-label
78
+ homepage, #homepage of the original dataset
79
+ citation, #citation for the dataset
80
+ data_url,
81
+ data_files,
82
+ **kwargs,
83
+ ):
84
+ super(LegalGlueConfig, self).__init__(version=datasets.Version("1.1.0", ""), **kwargs)
85
+ self.label_classes = label_classes
86
+ self.multi_label = multi_label
87
+ self.homepage = homepage
88
+ self.citation = citation
89
+ self.data_url = data_url
90
+ self.data_files = data_files
91
+
92
+
93
+
94
+ class LexGLUE(datasets.GeneratorBasedBuilder):
95
+ """LegalGLUE: A Benchmark Dataset for Legal Language Understanding"""
96
+
97
+ BUILDER_CONFIGS = [
98
+ LexGlueConfig(
99
+ name="german_ler",
100
+ description=textwrap.dedent(
101
+ """\
102
+ description"""
103
+ ),
104
+ label_classes=GERMAN_LER,
105
+ multi_label=False,
106
+ data_url="https://raw.githubusercontent.com/elenanereiss/Legal-Entity-Recognition/master/data/dataset_courts.zip",
107
+ data_files=["bag.conll", "bfh.conll", "bgh.conll", "bpatg.conll", "bsg.conll", "bverfg.conll", "bverwg.conll"],
108
+ homepage="https://github.com/elenanereiss/Legal-Entity-Recognition",
109
+ citation=textwrap.dedent("""\
110
+ @inproceedings{leitner2019fine,
111
+ author = {Elena Leitner and Georg Rehm and Julian Moreno-Schneider},
112
+ title = {{Fine-grained Named Entity Recognition in Legal Documents}},
113
+ booktitle = {Semantic Systems. The Power of AI and Knowledge
114
+ Graphs. Proceedings of the 15th International Conference
115
+ (SEMANTiCS 2019)},
116
+ year = 2019,
117
+ editor = {Maribel Acosta and Philippe Cudré-Mauroux and Maria
118
+ Maleshkova and Tassilo Pellegrini and Harald Sack and York
119
+ Sure-Vetter},
120
+ keywords = {aip},
121
+ publisher = {Springer},
122
+ series = {Lecture Notes in Computer Science},
123
+ number = {11702},
124
+ address = {Karlsruhe, Germany},
125
+ month = 9,
126
+ note = {10/11 September 2019},
127
+ pages = {272--287},
128
+ pdf = {https://link.springer.com/content/pdf/10.1007%2F978-3-030-33220-4_20.pdf}}
129
+ """)
130
+ )
131
+ ]
132
+
133
+ def _info(self):
134
+ if self.config.name == "german_ler":
135
+ features = {
136
+ "id": datasets.Value("string"),
137
+ "tokens": datasets.Sequence(datasets.Value("string")),
138
+ "ner_tags": datasets.Sequence(
139
+ datasets.features.ClassLabel(
140
+ names=self.config.label_classes
141
+ )
142
+ )
143
+ }
144
+
145
+ return datasets.DatasetInfo(
146
+ description=self.config.description,
147
+ features=datasets.Features(features),
148
+ homepage=self.config.homepage,
149
+ citation=self.config.citation,
150
+ )
151
+
152
+ def _split_generators(self, dl_manager):
153
+ archive = dl_manager.download(self.config.data_url)
154
+ if self.config_name == "german_ler":
155
+ return datasets.SplitGenerator(
156
+ name=datasets.Split.TRAIN,
157
+ # These kwargs will be passed to _generate_examples
158
+ gen_kwargs={
159
+ "filepath": self.config.data_files,
160
+ "split": "train",
161
+ "files": dl_manager.iter_archive(archive),
162
+ },
163
+ )
164
+ else:
165
+ return [
166
+ datasets.SplitGenerator(
167
+ name=datasets.Split.TRAIN,
168
+ # These kwargs will be passed to _generate_examples
169
+ gen_kwargs={
170
+ "filepath": self.config.data_files,
171
+ "split": "train",
172
+ "files": dl_manager.iter_archive(archive),
173
+ },
174
+ ),
175
+ datasets.SplitGenerator(
176
+ name=datasets.Split.TEST,
177
+ # These kwargs will be passed to _generate_examples
178
+ gen_kwargs={
179
+ "filepath": self.config.data_files,
180
+ "split": "test",
181
+ "files": dl_manager.iter_archive(archive),
182
+ },
183
+ ),
184
+ datasets.SplitGenerator(
185
+ name=datasets.Split.VALIDATION,
186
+ # These kwargs will be passed to _generate_examples
187
+ gen_kwargs={
188
+ "filepath": self.config.data_files,
189
+ "split": "validation",
190
+ "files": dl_manager.iter_archive(archive),
191
+ },
192
+ ),
193
+ ]
194
+
195
+ def _generate_examples(self, filepath, split, files):
196
+ if self.config_name == "german_ler":
197
+ texts, labels = [], []
198
+ for path, file in files:
199
+ if path in filepath:
200
+ raw_text = file.read_text(encoding="utf-8").strip()
201
+ raw_sentences = re.split(r'\n\t?\n', raw_text)
202
+
203
+ for sentence in raw_sentences:
204
+ tokens = []
205
+ tags = []
206
+ for line in sentence.split('\n'):
207
+ token, tag = line.split()
208
+ tokens.append(token)
209
+ tags.append(tag)
210
+ texts.append(tokens)
211
+ labels.append(tags)
212
+ for i in enumerate(texts):
213
+ tokens = text[i]
214
+ ner_tags = labels[i]
215
+ yield i, {
216
+ "id": str(i),
217
+ "tokens": tokens,
218
+ "ner_tags": ner_tags,
219
+ }