jfrenz commited on
Commit
4d385c3
1 Parent(s): 75b9028

added lener_br dataset

Browse files
Files changed (2) hide show
  1. dataset_infos.json +1 -1
  2. legalglue.py +110 -41
dataset_infos.json CHANGED
@@ -1 +1 @@
1
- {"german_ler": {"description": "description", "citation": "@inproceedings{leitner2019fine,\nauthor = {Elena Leitner and Georg Rehm and Julian Moreno-Schneider},\ntitle = {{Fine-grained Named Entity Recognition in Legal Documents}},\nbooktitle = {Semantic Systems. The Power of AI and Knowledge\n Graphs. Proceedings of the 15th International Conference\n (SEMANTiCS 2019)},\nyear = 2019,\neditor = {Maribel Acosta and Philippe Cudr\u00e9-Mauroux and Maria\n Maleshkova and Tassilo Pellegrini and Harald Sack and York\n Sure-Vetter},\nkeywords = {aip},\npublisher = {Springer},\nseries = {Lecture Notes in Computer Science},\nnumber = {11702},\naddress = {Karlsruhe, Germany},\nmonth = 9,\nnote = {10/11 September 2019},\npages = {272--287},\npdf = {https://link.springer.com/content/pdf/10.1007%2F978-3-030-33220-4_20.pdf}}\n", "homepage": "https://github.com/elenanereiss/Legal-Entity-Recognition", "license": "", "features": {"id": {"dtype": "string", "id": null, "_type": "Value"}, "tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "ner_tags": {"feature": {"num_classes": 39, "names": ["B-AN", "B-EUN", "B-GRT", "B-GS", "B-INN", "B-LD", "B-LDS", "B-LIT", "B-MRK", "B-ORG", "B-PER", "B-RR", "B-RS", "B-ST", "B-STR", "B-UN", "B-VO", "B-VS", "B-VT", "I-AN", "I-EUN", "I-GRT", "I-GS", "I-INN", "I-LD", "I-LDS", "I-LIT", "I-MRK", "I-ORG", "I-PER", "I-RR", "I-RS", "I-ST", "I-STR", "I-UN", "I-VO", "I-VS", "I-VT", "O"], "names_file": null, "id": null, "_type": "ClassLabel"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "legal_glue", "config_name": "german_ler", "version": {"version_str": "1.1.0", "description": "", "major": 1, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 38853928, "num_examples": 66723, "dataset_name": "legal_glue"}}, "download_checksums": {"https://raw.githubusercontent.com/elenanereiss/Legal-Entity-Recognition/master/data/dataset_courts.zip": {"num_bytes": 4392913, "checksum": "f0427df5fb8bfdefe5228bc0fa0e75e9cfa782d1a78e32582cce096473c88567"}}, "download_size": 4392913, "post_processing_size": null, "dataset_size": 38853928, "size_in_bytes": 43246841}}
 
1
+ {"german_ler": {"description": "description", "citation": "@inproceedings{leitner2019fine,\nauthor = {Elena Leitner and Georg Rehm and Julian Moreno-Schneider},\ntitle = {{Fine-grained Named Entity Recognition in Legal Documents}},\nbooktitle = {Semantic Systems. The Power of AI and Knowledge\n Graphs. Proceedings of the 15th International Conference\n (SEMANTiCS 2019)},\nyear = 2019,\neditor = {Maribel Acosta and Philippe Cudr\u00e9-Mauroux and Maria\n Maleshkova and Tassilo Pellegrini and Harald Sack and York\n Sure-Vetter},\nkeywords = {aip},\npublisher = {Springer},\nseries = {Lecture Notes in Computer Science},\nnumber = {11702},\naddress = {Karlsruhe, Germany},\nmonth = 9,\nnote = {10/11 September 2019},\npages = {272--287},\npdf = {https://link.springer.com/content/pdf/10.1007%2F978-3-030-33220-4_20.pdf}}\n", "homepage": "https://github.com/elenanereiss/Legal-Entity-Recognition", "license": "", "features": {"id": {"dtype": "string", "id": null, "_type": "Value"}, "tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "ner_tags": {"feature": {"num_classes": 39, "names": ["B-AN", "B-EUN", "B-GRT", "B-GS", "B-INN", "B-LD", "B-LDS", "B-LIT", "B-MRK", "B-ORG", "B-PER", "B-RR", "B-RS", "B-ST", "B-STR", "B-UN", "B-VO", "B-VS", "B-VT", "I-AN", "I-EUN", "I-GRT", "I-GS", "I-INN", "I-LD", "I-LDS", "I-LIT", "I-MRK", "I-ORG", "I-PER", "I-RR", "I-RS", "I-ST", "I-STR", "I-UN", "I-VO", "I-VS", "I-VT", "O"], "names_file": null, "id": null, "_type": "ClassLabel"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "legal_glue", "config_name": "german_ler", "version": {"version_str": "1.1.0", "description": "", "major": 1, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 38853928, "num_examples": 66723, "dataset_name": "legal_glue"}}, "download_checksums": {"https://raw.githubusercontent.com/elenanereiss/Legal-Entity-Recognition/master/data/dataset_courts.zip": {"num_bytes": 4392913, "checksum": "f0427df5fb8bfdefe5228bc0fa0e75e9cfa782d1a78e32582cce096473c88567"}}, "download_size": 4392913, "post_processing_size": null, "dataset_size": 38853928, "size_in_bytes": 43246841}, "lener_br": {"description": "LeNER-Br is a Portuguese language dataset for named entity recognition\napplied to legal documents. LeNER-Br consists entirely of manually annotated\nlegislation and legal cases texts and contains tags for persons, locations,\ntime entities, organizations, legislation and legal cases.\nTo compose the dataset, 66 legal documents from several Brazilian Courts were\ncollected. Courts of superior and state levels were considered, such as Supremo\nTribunal Federal, Superior Tribunal de Justi\u00e7a, Tribunal de Justi\u00e7a de Minas\nGerais and Tribunal de Contas da Uni\u00e3o. In addition, four legislation documents\nwere collected, such as \"Lei Maria da Penha\", giving a total of 70 documents\n", "citation": "@inproceedings{luz_etal_propor2018,\nauthor = {Pedro H. {Luz de Araujo} and Te'{o}filo E. {de Campos} and\nRenato R. R. {de Oliveira} and Matheus Stauffer and\nSamuel Couto and Paulo Bermejo},\ntitle = {{LeNER-Br}: a Dataset for Named Entity Recognition in {Brazilian} Legal Text},\nbooktitle = {International Conference on the Computational Processing of Portuguese ({PROPOR})},\npublisher = {Springer},\nseries = {Lecture Notes on Computer Science ({LNCS})},\npages = {313--323},\nyear = {2018},\nmonth = {September 24-26},\naddress = {Canela, RS, Brazil},\ndoi = {10.1007/978-3-319-99722-3_32},\nurl = {https://cic.unb.br/~teodecampos/LeNER-Br/},\n}\n", "homepage": "https://cic.unb.br/~teodecampos/LeNER-Br/", "license": "", "features": {"id": {"dtype": "string", "id": null, "_type": "Value"}, "tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "ner_tags": {"feature": {"num_classes": 13, "names": ["O", "B-ORGANIZACAO", "I-ORGANIZACAO", "B-PESSOA", "I-PESSOA", "B-TEMPO", "I-TEMPO", "B-LOCAL", "I-LOCAL", "B-LEGISLACAO", "I-LEGISLACAO", "B-JURISPRUDENCIA", "I-JURISPRUDENCIA"], "names_file": null, "id": null, "_type": "ClassLabel"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "legal_glue", "config_name": "lener_br", "version": {"version_str": "1.1.0", "description": "", "major": 1, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 7153474101, "num_examples": 7828, "dataset_name": "legal_glue"}, "test": {"name": "test", "num_bytes": 195782089, "num_examples": 1177, "dataset_name": "legal_glue"}, "validation": {"name": "validation", "num_bytes": 280965196, "num_examples": 1390, "dataset_name": "legal_glue"}}, "download_checksums": {"https://github.com/peluz/lener-br/raw/master/leNER-Br/train/train.conll": {"num_bytes": 2142199, "checksum": "6fdf9066333c84565f9e3d28ee8f0f519336bece69b63f8d78b8de0fe96dcd47"}, "https://github.com/peluz/lener-br/raw/master/leNER-Br/test/test.conll": {"num_bytes": 438441, "checksum": "f90cd26a31afc2d1f132c4473d40c26d2283a98b374025fa5b5985b723dce825"}, "https://github.com/peluz/lener-br/raw/master/leNER-Br/dev/dev.conll": {"num_bytes": 402497, "checksum": "7e350feb828198031e57c21d6aadbf8dac92b19a684e45d7081c6cb491e2063b"}}, "download_size": 2983137, "post_processing_size": null, "dataset_size": 7630221386, "size_in_bytes": 7633204523}}
legalglue.py CHANGED
@@ -144,44 +144,44 @@ class LegalGLUE(datasets.GeneratorBasedBuilder):
144
  pdf = {https://link.springer.com/content/pdf/10.1007%2F978-3-030-33220-4_20.pdf}}
145
  """)
146
  ),
147
- # LegalGlueConfig(
148
- # name="lener_br",
149
- # description=textwrap.dedent(
150
- # """\
151
- # LeNER-Br is a Portuguese language dataset for named entity recognition
152
- # applied to legal documents. LeNER-Br consists entirely of manually annotated
153
- # legislation and legal cases texts and contains tags for persons, locations,
154
- # time entities, organizations, legislation and legal cases.
155
- # To compose the dataset, 66 legal documents from several Brazilian Courts were
156
- # collected. Courts of superior and state levels were considered, such as Supremo
157
- # Tribunal Federal, Superior Tribunal de Justiça, Tribunal de Justiça de Minas
158
- # Gerais and Tribunal de Contas da União. In addition, four legislation documents
159
- # were collected, such as "Lei Maria da Penha", giving a total of 70 documents
160
- # """
161
- # ),
162
- # label_classes=LENER_BR,
163
- # multi_label=False,
164
- # data_url="https://github.com/peluz/lener-br/raw/master/leNER-Br/",
165
- # data_files=["train/train.conll", "dev/dev.conll", "test/test.conll"],
166
- # homepage="https://cic.unb.br/~teodecampos/LeNER-Br/",
167
- # citation=textwrap.dedent("""\
168
- # @inproceedings{luz_etal_propor2018,
169
- # author = {Pedro H. {Luz de Araujo} and Te\'{o}filo E. {de Campos} and
170
- # Renato R. R. {de Oliveira} and Matheus Stauffer and
171
- # Samuel Couto and Paulo Bermejo},
172
- # title = {{LeNER-Br}: a Dataset for Named Entity Recognition in {Brazilian} Legal Text},
173
- # booktitle = {International Conference on the Computational Processing of Portuguese ({PROPOR})},
174
- # publisher = {Springer},
175
- # series = {Lecture Notes on Computer Science ({LNCS})},
176
- # pages = {313--323},
177
- # year = {2018},
178
- # month = {September 24-26},
179
- # address = {Canela, RS, Brazil},
180
- # doi = {10.1007/978-3-319-99722-3_32},
181
- # url = {https://cic.unb.br/~teodecampos/LeNER-Br/},
182
- # }
183
- # """)
184
- # )
185
  ]
186
 
187
  def _info(self):
@@ -195,7 +195,16 @@ class LegalGLUE(datasets.GeneratorBasedBuilder):
195
  )
196
  )
197
  }
198
-
 
 
 
 
 
 
 
 
 
199
  return datasets.DatasetInfo(
200
  description=self.config.description,
201
  features=datasets.Features(features),
@@ -216,8 +225,42 @@ class LegalGLUE(datasets.GeneratorBasedBuilder):
216
  "files": [os.path.join(archive,file) for file in self.config.data_files]#dl_manager.iter_archive(archive),
217
  },
218
  )]
219
- #elif self.config_name == "lener_br":
220
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
221
 
222
  # else:
223
  # return [
@@ -281,3 +324,29 @@ class LegalGLUE(datasets.GeneratorBasedBuilder):
281
  "tokens": tokens,
282
  "ner_tags": ner_tags,
283
  }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
144
  pdf = {https://link.springer.com/content/pdf/10.1007%2F978-3-030-33220-4_20.pdf}}
145
  """)
146
  ),
147
+ LegalGlueConfig(
148
+ name="lener_br",
149
+ description=textwrap.dedent(
150
+ """\
151
+ LeNER-Br is a Portuguese language dataset for named entity recognition
152
+ applied to legal documents. LeNER-Br consists entirely of manually annotated
153
+ legislation and legal cases texts and contains tags for persons, locations,
154
+ time entities, organizations, legislation and legal cases.
155
+ To compose the dataset, 66 legal documents from several Brazilian Courts were
156
+ collected. Courts of superior and state levels were considered, such as Supremo
157
+ Tribunal Federal, Superior Tribunal de Justiça, Tribunal de Justiça de Minas
158
+ Gerais and Tribunal de Contas da União. In addition, four legislation documents
159
+ were collected, such as "Lei Maria da Penha", giving a total of 70 documents
160
+ """
161
+ ),
162
+ label_classes=LENER_BR,
163
+ multi_label=False,
164
+ data_url="https://github.com/peluz/lener-br/raw/master/leNER-Br/",
165
+ data_files=["train/train.conll", "dev/dev.conll", "test/test.conll"],
166
+ homepage="https://cic.unb.br/~teodecampos/LeNER-Br/",
167
+ citation=textwrap.dedent("""\
168
+ @inproceedings{luz_etal_propor2018,
169
+ author = {Pedro H. {Luz de Araujo} and Te\'{o}filo E. {de Campos} and
170
+ Renato R. R. {de Oliveira} and Matheus Stauffer and
171
+ Samuel Couto and Paulo Bermejo},
172
+ title = {{LeNER-Br}: a Dataset for Named Entity Recognition in {Brazilian} Legal Text},
173
+ booktitle = {International Conference on the Computational Processing of Portuguese ({PROPOR})},
174
+ publisher = {Springer},
175
+ series = {Lecture Notes on Computer Science ({LNCS})},
176
+ pages = {313--323},
177
+ year = {2018},
178
+ month = {September 24-26},
179
+ address = {Canela, RS, Brazil},
180
+ doi = {10.1007/978-3-319-99722-3_32},
181
+ url = {https://cic.unb.br/~teodecampos/LeNER-Br/},
182
+ }
183
+ """)
184
+ )
185
  ]
186
 
187
  def _info(self):
 
195
  )
196
  )
197
  }
198
+ elif self.config.name == "lener_br":
199
+ features = {
200
+ "id": datasets.Value("string"),
201
+ "tokens": datasets.Sequence(datasets.Value("string")),
202
+ "ner_tags": datasets.Sequence(
203
+ datasets.features.ClassLabel(
204
+ names=self.config.label_classes
205
+ )
206
+ )
207
+ }
208
  return datasets.DatasetInfo(
209
  description=self.config.description,
210
  features=datasets.Features(features),
 
225
  "files": [os.path.join(archive,file) for file in self.config.data_files]#dl_manager.iter_archive(archive),
226
  },
227
  )]
228
+ elif self.config.name == "lener_br":
229
+ urls_to_download = {
230
+ "train": self.config.data_url + self.config.data_files[0],
231
+ "dev": self.config.data_url + self.config.data_files[2],
232
+ "test": self.config.data_url + self.config.data_files[1],
233
+ }
234
+ archive = dl_manager.download_and_extract(urls_to_download)
235
+ return[
236
+ datasets.SplitGenerator(
237
+ name=datasets.Split.TRAIN,
238
+ # These kwargs will be passed to _generate_examples
239
+ gen_kwargs={
240
+ "filepath": self.config.data_files,
241
+ "split": "train",
242
+ "files": archive["train"],
243
+ },
244
+ ),
245
+ datasets.SplitGenerator(
246
+ name=datasets.Split.TEST,
247
+ # These kwargs will be passed to _generate_examples
248
+ gen_kwargs={
249
+ "filepath": self.config.data_files,
250
+ "split": "test",
251
+ "files": archive["test"],
252
+ },
253
+ ),
254
+ datasets.SplitGenerator(
255
+ name=datasets.Split.VALIDATION,
256
+ # These kwargs will be passed to _generate_examples
257
+ gen_kwargs={
258
+ "filepath": self.config.data_files,
259
+ "split": "validation",
260
+ "files": archive["dev"],
261
+ },
262
+ ),
263
+ ]
264
 
265
  # else:
266
  # return [
 
324
  "tokens": tokens,
325
  "ner_tags": ner_tags,
326
  }
327
+ elif self.config.name == "lener_br":
328
+ with open(files, encoding="utf-8") as f:
329
+ id = 0
330
+ tokens = []
331
+ tags = []
332
+ for line in f:
333
+ if line == "" or line == "\n":
334
+ if tokens:
335
+ yield id, {
336
+ "id": str(id),
337
+ "tokens": tokens,
338
+ "ner_tags": tags,
339
+ }
340
+ id += 1
341
+ tokens = []
342
+ ner_tags = []
343
+ else:
344
+ token, tag = line.split()
345
+ tokens.append(token)
346
+ tags.append(tag.rstrip())
347
+
348
+ yield id, {
349
+ "id": str(id),
350
+ "tokens": tokens,
351
+ "ner_tags": ner_tags,
352
+ }