Persian-conversational-dataset / Persian-conversational-dataset.py
Kamtera's picture
Update Persian-conversational-dataset.py
acbcd60
raw
history blame contribute delete
No virus
3.45 kB
# coding=utf-8
# persian-conversational-dataset
"""TODO(empathetic_dialogues): Add a description here."""
import csv
import json
import datasets
from datasets.tasks import QuestionAnsweringExtractive
logger = datasets.logging.get_logger(__name__)
_DESCRIPTION = """\
persian-conversational-dataset
"""
_URL = "https://huggingface.co/datasets/Kamtera/Persian-conversational-dataset/blob/main/"
_URLS = [
"dadrah_dataset.json",
"dadrah_dataset1-1000_10000.json",
"dadrah_dataset1-10000_100000.json",
"dadrah_dataset1-100000_276342.json",
]
class persianConversation(datasets.GeneratorBasedBuilder):
# VERSION = datasets.Version("0.1.0")
def _info(self):
# TODO(empathetic_dialogues): Specifies the datasets.DatasetInfo object
return datasets.DatasetInfo(
# This is the description that will appear on the datasets page.
description=_DESCRIPTION,
# datasets.features.FeatureConnectors
features=datasets.Features(
{
"title": datasets.Value("string"),
"question": datasets.Value("string"),
"answers": datasets.Sequence(datasets.Value("string")),
"keywords": datasets.Sequence(datasets.Value("string")),
# These are the features of your dataset like images, labels ...
}
),
# If there's a common (input, target) tuple from the features,
# specify them here. They'll be used if as_supervised=True in
# builder.as_dataset.
supervised_keys=None,
)
def _split_generators(self, dl_manager):
"""Returns SplitGenerators."""
# TODO(empathetic_dialogues): Downloads the data and defines the splits
# dl_manager is a datasets.download.DownloadManager that can be used to
# download and extract URLs
downloaded_files = dl_manager.download(_URLS)
logger.info("| > downloaded files")
logger.info(downloaded_files)
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
# These kwargs will be passed to _generate_examples
gen_kwargs={
"files": downloaded_files[1:],
"split_file": "train",
},
),
datasets.SplitGenerator(
name=datasets.Split.TEST,
# These kwargs will be passed to _generate_examples
gen_kwargs={
"files": downloaded_files[0:1],
"split_file": "test"
},
),
]
def _generate_examples(self, files, split_file):
"""Yields examples."""
import json
logger.info("| > generate examples for "+split_file)
logger.info(files)
for path in files:
with open(path, 'r', encoding='utf-8') as fmm:
data=json.load(fmm)
for id_, row in enumerate(data):
title=row[0]
question=row[1]
answers=row[2]
keywords=row[3]
yield id_, {
"title": title,
"question": question,
"answers": answers,
"keywords": keywords,
}