|
|
|
|
|
"""TODO(empathetic_dialogues): Add a description here.""" |
|
|
|
|
|
import csv |
|
|
|
import datasets |
|
|
|
|
|
_DESCRIPTION = """\ |
|
persian-conversational-dataset |
|
""" |
|
_URL = "https://dl.fbaipublicfiles.com/parlai/empatheticdialogues/empatheticdialogues.tar.gz" |
|
|
|
|
|
class persianConversation(datasets.GeneratorBasedBuilder): |
|
|
|
VERSION = datasets.Version("0.1.0") |
|
|
|
def _info(self): |
|
|
|
return datasets.DatasetInfo( |
|
|
|
description=_DESCRIPTION, |
|
|
|
features=datasets.Features( |
|
{ |
|
"title": datasets.Value("string"), |
|
"question": datasets.Value("string"), |
|
"answers": datasets.Value("list"), |
|
"keywords": datasets.Value("list"), |
|
|
|
} |
|
), |
|
|
|
|
|
|
|
supervised_keys=None, |
|
|
|
) |
|
|
|
def _split_generators(self, dl_manager): |
|
"""Returns SplitGenerators.""" |
|
|
|
|
|
|
|
return [ |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TEST, |
|
|
|
gen_kwargs={"files": ["dadrah_dataset.json"], "split_file": "dadrah_dataset.json"}, |
|
), |
|
] |
|
|
|
def _generate_examples(self, files, split_file): |
|
"""Yields examples.""" |
|
for path, f in files: |
|
if split_file == path: |
|
with open(split_file, 'r', encoding='utf-8') as fmm: |
|
data=json.load(fmm) |
|
for id_, row in enumerate(data): |
|
title=row[0] |
|
question=row[1] |
|
answers=row[2] |
|
keywords=row[3] |
|
if id_==20: |
|
break; |
|
yield id_, { |
|
"title": title, |
|
"question": question, |
|
"answers": answers, |
|
"keywords": keywords, |
|
} |
|
|
|
break |