Kamtera commited on
Commit
06140ff
1 Parent(s): d0381c5

Create persian-conversational-dataset.py

Browse files
Files changed (1) hide show
  1. persian-conversational-dataset.py +75 -0
persian-conversational-dataset.py ADDED
@@ -0,0 +1,75 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # dadrah_dataset.json
2
+ """TODO(empathetic_dialogues): Add a description here."""
3
+
4
+
5
+ import csv
6
+
7
+ import datasets
8
+
9
+
10
+ _DESCRIPTION = """\
11
+ persian-conversational-dataset
12
+ """
13
+ _URL = "https://dl.fbaipublicfiles.com/parlai/empatheticdialogues/empatheticdialogues.tar.gz"
14
+
15
+
16
+ class persianDialogues(datasets.GeneratorBasedBuilder):
17
+
18
+ VERSION = datasets.Version("0.1.0")
19
+
20
+ def _info(self):
21
+ # TODO(empathetic_dialogues): Specifies the datasets.DatasetInfo object
22
+ return datasets.DatasetInfo(
23
+ # This is the description that will appear on the datasets page.
24
+ description=_DESCRIPTION,
25
+ # datasets.features.FeatureConnectors
26
+ features=datasets.Features(
27
+ {
28
+ "title": datasets.Value("string"),
29
+ "question": datasets.Value("string"),
30
+ "answers": datasets.Value("list"),
31
+ "keywords": datasets.Value("list"),
32
+ # These are the features of your dataset like images, labels ...
33
+ }
34
+ ),
35
+ # If there's a common (input, target) tuple from the features,
36
+ # specify them here. They'll be used if as_supervised=True in
37
+ # builder.as_dataset.
38
+ supervised_keys=None,
39
+ # Homepage of the dataset for documentation
40
+ )
41
+
42
+ def _split_generators(self, dl_manager):
43
+ """Returns SplitGenerators."""
44
+ # TODO(empathetic_dialogues): Downloads the data and defines the splits
45
+ # dl_manager is a datasets.download.DownloadManager that can be used to
46
+ # download and extract URLs
47
+ return [
48
+ datasets.SplitGenerator(
49
+ name=datasets.Split.TEST,
50
+ # These kwargs will be passed to _generate_examples
51
+ gen_kwargs={"files": ["dadrah_dataset.json"], "split_file": "dadrah_dataset.json"},
52
+ ),
53
+ ]
54
+
55
+ def _generate_examples(self, files, split_file):
56
+ """Yields examples."""
57
+ for path, f in files:
58
+ if split_file == path:
59
+ with open(split_file, 'r', encoding='utf-8') as fmm:
60
+ data=json.load(fmm)
61
+ for id_, row in enumerate(data):
62
+ title=row[0]
63
+ question=row[1]
64
+ answers=row[2]
65
+ keywords=row[3]
66
+ if id_==20:
67
+ break;
68
+ yield id_, {
69
+ "title": title,
70
+ "question": question,
71
+ "answers": answers,
72
+ "keywords": keywords,
73
+ }
74
+
75
+ break