AnnaSallesRius commited on
Commit
4fd2d88
1 Parent(s): 183f101

Upload 4 files

Browse files
Files changed (4) hide show
  1. OLD/Parafraseja.py +100 -0
  2. OLD/dev.jsonl +0 -0
  3. OLD/test.jsonl +0 -0
  4. OLD/train.jsonl +0 -0
OLD/Parafraseja.py ADDED
@@ -0,0 +1,100 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Loading script for the ReviewsFinder dataset.
2
+
3
+
4
+ import json
5
+ import csv
6
+
7
+ import datasets
8
+
9
+
10
+ logger = datasets.logging.get_logger(__name__)
11
+
12
+
13
+ _CITATION = """ """
14
+
15
+
16
+ _DESCRIPTION = """ Parafraseja is a dataset of 16,584 pairs of sentences with a label that indicates if they are paraphrases or not. The original sentences were collected from TE-ca and STS-ca. For each sentence, an annotator wrote a sentence that was a paraphrase and another that was not. The guidelines of this annotation are available. """
17
+
18
+
19
+ _HOMEPAGE = """ https://huggingface.co/datasets/projecte-aina/Parafraseja/ """
20
+
21
+
22
+
23
+ _URL = "https://huggingface.co/datasets/projecte-aina/Parafraseja/resolve/main/"
24
+ _TRAINING_FILE = "train.jsonl"
25
+ _DEV_FILE = "dev.jsonl"
26
+ _TEST_FILE = "test.jsonl"
27
+
28
+
29
+ class ParafrasejaConfig(datasets.BuilderConfig):
30
+ """ Builder config for the Parafraseja dataset """
31
+
32
+ def __init__(self, **kwargs):
33
+ """BuilderConfig for parafrasis.
34
+ Args:
35
+ **kwargs: keyword arguments forwarded to super.
36
+ """
37
+ super(ParafrasejaConfig, self).__init__(**kwargs)
38
+
39
+
40
+ class Parafraseja(datasets.GeneratorBasedBuilder):
41
+ """ Parafrasis Dataset """
42
+
43
+
44
+ BUILDER_CONFIGS = [
45
+ ParafrasejaConfig(
46
+ name="Parafraseja",
47
+ version=datasets.Version("1.0.0"),
48
+ description="Parafraseja dataset",
49
+ ),
50
+ ]
51
+
52
+
53
+ def _info(self):
54
+ return datasets.DatasetInfo(
55
+ description=_DESCRIPTION,
56
+ features=datasets.Features(
57
+ {
58
+ "sentence1": datasets.Value("string"),
59
+ "sentence2": datasets.Value("string"),
60
+ "label": datasets.features.ClassLabel
61
+ (names=
62
+ [
63
+ "No Parafrasis",
64
+ "Parafrasis",
65
+ ]
66
+ ),
67
+ }
68
+ ),
69
+ homepage=_HOMEPAGE,
70
+ citation=_CITATION,
71
+ )
72
+
73
+
74
+ def _split_generators(self, dl_manager):
75
+ """Returns SplitGenerators."""
76
+ urls_to_download = {
77
+ "train": f"{_URL}{_TRAINING_FILE}",
78
+ "dev": f"{_URL}{_DEV_FILE}",
79
+ "test": f"{_URL}{_TEST_FILE}",
80
+ }
81
+ downloaded_files = dl_manager.download_and_extract(urls_to_download)
82
+
83
+ return [
84
+ datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": downloaded_files["train"]}),
85
+ datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={"filepath": downloaded_files["dev"]}),
86
+ datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"filepath": downloaded_files["test"]}),
87
+ ]
88
+
89
+
90
+ def _generate_examples(self, filepath):
91
+ """This function returns the examples in the raw (text) form."""
92
+ logger.info("generating examples from = %s", filepath)
93
+ with open(filepath, encoding="utf-8") as f:
94
+ data = [json.loads(line) for line in f]
95
+ for id_, article in enumerate(data):
96
+ yield id_, {
97
+ "sentence1": article['original'],
98
+ "sentence2": article['new'],
99
+ "label": article['label'],
100
+ }
OLD/dev.jsonl ADDED
The diff for this file is too large to render. See raw diff
 
OLD/test.jsonl ADDED
The diff for this file is too large to render. See raw diff
 
OLD/train.jsonl ADDED
The diff for this file is too large to render. See raw diff