Kamtera commited on
Commit
522b005
1 Parent(s): 49118ca

Update Persian-conversational-dataset.py

Browse files
Files changed (1) hide show
  1. Persian-conversational-dataset.py +33 -6
Persian-conversational-dataset.py CHANGED
@@ -1,4 +1,4 @@
1
- # dadrah_dataset.json
2
  # persian-conversational-dataset
3
  """TODO(empathetic_dialogues): Add a description here."""
4
 
@@ -6,13 +6,23 @@
6
  import csv,json
7
 
8
  import datasets
 
9
 
10
 
 
 
 
11
  _DESCRIPTION = """\
12
  persian-conversational-dataset
13
  """
14
- _URL = "https://dl.fbaipublicfiles.com/parlai/empatheticdialogues/empatheticdialogues.tar.gz"
15
 
 
 
 
 
 
 
 
16
 
17
  class persianConversation(datasets.GeneratorBasedBuilder):
18
 
@@ -28,8 +38,8 @@ class persianConversation(datasets.GeneratorBasedBuilder):
28
  {
29
  "title": datasets.Value("string"),
30
  "question": datasets.Value("string"),
31
- "answers": datasets.Value("list"),
32
- "keywords": datasets.Value("list"),
33
  # These are the features of your dataset like images, labels ...
34
  }
35
  ),
@@ -37,7 +47,8 @@ class persianConversation(datasets.GeneratorBasedBuilder):
37
  # specify them here. They'll be used if as_supervised=True in
38
  # builder.as_dataset.
39
  supervised_keys=None,
40
- # Homepage of the dataset for documentation
 
41
  )
42
 
43
  def _split_generators(self, dl_manager):
@@ -45,16 +56,32 @@ class persianConversation(datasets.GeneratorBasedBuilder):
45
  # TODO(empathetic_dialogues): Downloads the data and defines the splits
46
  # dl_manager is a datasets.download.DownloadManager that can be used to
47
  # download and extract URLs
 
 
 
48
  return [
 
 
 
 
 
 
 
 
49
  datasets.SplitGenerator(
50
  name=datasets.Split.TEST,
51
  # These kwargs will be passed to _generate_examples
52
- gen_kwargs={"files": ["dadrah_dataset.json"], "split_file": "dadrah_dataset.json"},
 
 
 
53
  ),
54
  ]
55
 
56
  def _generate_examples(self, files, split_file):
57
  """Yields examples."""
 
 
58
  for path, f in files:
59
  if split_file == path:
60
  with open(split_file, 'r', encoding='utf-8') as fmm:
 
1
+ # coding=utf-8
2
  # persian-conversational-dataset
3
  """TODO(empathetic_dialogues): Add a description here."""
4
 
 
6
  import csv,json
7
 
8
  import datasets
9
+ from datasets.tasks import QuestionAnsweringExtractive
10
 
11
 
12
+
13
+ logger = datasets.logging.get_logger(__name__)
14
+
15
  _DESCRIPTION = """\
16
  persian-conversational-dataset
17
  """
 
18
 
19
+ _URL = "https://huggingface.co/datasets/Kamtera/Persian-conversational-dataset/blob/main/"
20
+ _URLS = [
21
+ "dadrah_dataset.json",
22
+ "dadrah_dataset1-1000_10000.json",
23
+ "dadrah_dataset1-10000_100000.json",
24
+ "dadrah_dataset1-100000_276342.json",
25
+ ]
26
 
27
  class persianConversation(datasets.GeneratorBasedBuilder):
28
 
 
38
  {
39
  "title": datasets.Value("string"),
40
  "question": datasets.Value("string"),
41
+ "answers": datasets.Sequence(datasets.Value("string")),
42
+ "keywords": datasets.Sequence(datasets.Value("string")),
43
  # These are the features of your dataset like images, labels ...
44
  }
45
  ),
 
47
  # specify them here. They'll be used if as_supervised=True in
48
  # builder.as_dataset.
49
  supervised_keys=None,
50
+
51
+
52
  )
53
 
54
  def _split_generators(self, dl_manager):
 
56
  # TODO(empathetic_dialogues): Downloads the data and defines the splits
57
  # dl_manager is a datasets.download.DownloadManager that can be used to
58
  # download and extract URLs
59
+ downloaded_files = dl_manager.download(_URLS)
60
+ logger.info("| > downloaded files")
61
+ logger.info(downloaded_files)
62
  return [
63
+ datasets.SplitGenerator(
64
+ name=datasets.Split.TRAIN,
65
+ # These kwargs will be passed to _generate_examples
66
+ gen_kwargs={
67
+ "files": downloaded_files[1:3],
68
+ "split_file": "train",
69
+ },
70
+ ),
71
  datasets.SplitGenerator(
72
  name=datasets.Split.TEST,
73
  # These kwargs will be passed to _generate_examples
74
+ gen_kwargs={
75
+ "files": downloaded_files[0],
76
+ "split_file": "test"
77
+ },
78
  ),
79
  ]
80
 
81
  def _generate_examples(self, files, split_file):
82
  """Yields examples."""
83
+ logger.info("| > generate examples for "+split_file)
84
+ logger.info(files)
85
  for path, f in files:
86
  if split_file == path:
87
  with open(split_file, 'r', encoding='utf-8') as fmm: