phpaiola commited on
Commit
9c57dce
1 Parent(s): dcf838a

Delete recognasumm.py

Browse files
Files changed (1) hide show
  1. recognasumm.py +0 -93
recognasumm.py DELETED
@@ -1,93 +0,0 @@
1
- import csv
2
- import json
3
- import os
4
-
5
- import datasets
6
-
7
- _CITATION = """\
8
- Coming soon
9
- }
10
- """
11
-
12
- _DESCRIPTION = """\
13
- RecognaSumm is a novel and comprehensive database specifically designed for the task of automatic text summarization in Portuguese. RecognaSumm stands out due to its diverse origin, composed of news collected from a variety of information sources, including agencies and online news portals. The database was constructed using web scraping techniques and careful curation, re sulting in a rich and representative collection of documents covering various topics and journalis tic styles. The creation of RecognaSumm aims to fill a significant void in Portuguese language summarization research, providing a training and evaluation foundation that can be used for the development and enhancement of automated summarization models.
14
- """
15
-
16
- _HOMEPAGE = ""
17
-
18
- _LICENSE = "mit"
19
-
20
- class RecognaSumm(datasets.GeneratorBasedBuilder):
21
-
22
- VERSION = datasets.Version("1.0.0")
23
-
24
- BUILDER_CONFIGS = [
25
- datasets.BuilderConfig(name="default", version=VERSION, description="Default setup of dataset"),
26
- ]
27
-
28
- DEFAULT_CONFIG_NAME = "default"
29
-
30
- def _info(self):
31
-
32
- features = datasets.Features(
33
- {
34
- "index": datasets.Value("int"),
35
- "Titulo": datasets.Value("string"),
36
- "Subtitulo": datasets.Value("string"),
37
- "Noticia": datasets.Value("string"),
38
- "Categoria": datasets.Value("string"),
39
- "Autor": datasets.Value("string"),
40
- "Data": datasets.Value("string"),
41
- "URL": datasets.Value("string"),
42
- "Autor_corrigido": datasets.Value("string"),
43
- "Sumario": datasets.Value("string"),
44
- }
45
- )
46
-
47
- return datasets.DatasetInfo(
48
- description=_DESCRIPTION,
49
- features=features,
50
- homepage=_HOMEPAGE,
51
- license=_LICENSE,
52
- citation=_CITATION,
53
- )
54
-
55
- def _split_generators(self, dl_manager):
56
- return [
57
- datasets.SplitGenerator(
58
- name=datasets.Split.TRAIN,
59
- # These kwargs will be passed to _generate_examples
60
- gen_kwargs={
61
- "filepath": "train.jsonl",
62
- "split": "train",
63
- },
64
- ),
65
- datasets.SplitGenerator(
66
- name=datasets.Split.VALIDATION,
67
- # These kwargs will be passed to _generate_examples
68
- gen_kwargs={
69
- "filepath": "validation.jsonl",
70
- "split": "validation",
71
- },
72
- ),
73
- datasets.SplitGenerator(
74
- name=datasets.Split.TEST,
75
- # These kwargs will be passed to _generate_examples
76
- gen_kwargs={
77
- "filepath": "test.jsonl",
78
- "split": "test"
79
- },
80
- ),
81
- ]
82
-
83
- # method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
84
- def _generate_examples(self, filepath, split):
85
- with open(filepath, encoding="utf-8") as f:
86
- for key, row in enumerate(f):
87
- data = json.loads(row)
88
- yield key, {
89
- "index": data["index"],
90
- "Noticia": data["Noticia"],
91
- "Sumario": data["Sumario"]
92
- }
93
-