Datasets:
File size: 3,486 Bytes
522b005 b4872b0 06140ff 643b589 06140ff 522b005 06140ff 522b005 06140ff 522b005 06140ff e87bd10 06140ff 49118ca 06140ff 522b005 06140ff 522b005 06140ff 522b005 06140ff 522b005 046975a 522b005 06140ff 522b005 4218d99 522b005 06140ff 522b005 046975a 06140ff 046975a |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 |
# coding=utf-8
# persian-conversational-dataset
"""TODO(empathetic_dialogues): Add a description here."""
import csv,json
import datasets
from datasets.tasks import QuestionAnsweringExtractive
logger = datasets.logging.get_logger(__name__)
_DESCRIPTION = """\
persian-conversational-dataset
"""
_URL = "https://huggingface.co/datasets/Kamtera/Persian-conversational-dataset/blob/main/"
_URLS = [
"dadrah_dataset.json",
"dadrah_dataset1-1000_10000.json",
"dadrah_dataset1-10000_100000.json",
"dadrah_dataset1-100000_276342.json",
]
class persianConversation(datasets.GeneratorBasedBuilder):
# VERSION = datasets.Version("0.1.0")
def _info(self):
# TODO(empathetic_dialogues): Specifies the datasets.DatasetInfo object
return datasets.DatasetInfo(
# This is the description that will appear on the datasets page.
description=_DESCRIPTION,
# datasets.features.FeatureConnectors
features=datasets.Features(
{
"title": datasets.Value("string"),
"question": datasets.Value("string"),
"answers": datasets.Sequence(datasets.Value("string")),
"keywords": datasets.Sequence(datasets.Value("string")),
# These are the features of your dataset like images, labels ...
}
),
# If there's a common (input, target) tuple from the features,
# specify them here. They'll be used if as_supervised=True in
# builder.as_dataset.
supervised_keys=None,
)
def _split_generators(self, dl_manager):
"""Returns SplitGenerators."""
# TODO(empathetic_dialogues): Downloads the data and defines the splits
# dl_manager is a datasets.download.DownloadManager that can be used to
# download and extract URLs
downloaded_files = dl_manager.download(_URLS)
logger.info("| > downloaded files")
logger.info(downloaded_files)
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
# These kwargs will be passed to _generate_examples
gen_kwargs={
"files": downloaded_files[1:],
"split_file": "train",
},
),
datasets.SplitGenerator(
name=datasets.Split.TEST,
# These kwargs will be passed to _generate_examples
gen_kwargs={
"files": downloaded_files[0:1],
"split_file": "test"
},
),
]
def _generate_examples(self, files, split_file):
"""Yields examples."""
logger.info("| > generate examples for "+split_file)
logger.info(files)
for path in files:
with open(path, 'r', encoding='utf-8') as fmm:
data=json.load(fmm)
for id_, row in enumerate(data):
title=row[0]
question=row[1]
answers=row[2]
keywords=row[3]
if id_==20:
break;
yield id_, {
"title": title,
"question": question,
"answers": answers,
"keywords": keywords,
} |