# coding=utf-8 # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Covid Dialog dataset in English and Chinese""" import copy import os import re import textwrap import datasets # BibTeX citation _CITATION = """\ @article{ju2020CovidDialog, title={CovidDialog: Medical Dialogue Datasets about COVID-19}, author={Ju, Zeqian and Chakravorty, Subrato and He, Xuehai and Chen, Shu and Yang, Xingyi and Xie, Pengtao}, journal={ https://github.com/UCSD-AI4H/COVID-Dialogue}, year={2020} } """ # Official description of the dataset _DESCRIPTION = textwrap.dedent( """ COVID-Dialogue-Dataset is amedical dialogue dataset about COVID-19 and other types of pneumonia. Patients who are concerned that they may be infected by COVID-19 or other pneumonia consult doctors and doctors provide advice. There are 603 consultations in English and 1393 consultations in Chinese. """ ) # Link to an official homepage for the dataset here _HOMEPAGE = "https://github.com/UCSD-AI4H/COVID-Dialogue" _LICENSE = "" import datasets import os import json class CovidDialogueHelm(datasets.GeneratorBasedBuilder): VERSION = datasets.Version("1.0.0") BUILDER_CONFIGS = [datasets.BuilderConfig(name="default", version=datasets.Version("1.0.0"), description=_DESCRIPTION)] def _info(self): features = datasets.Features( { "query": datasets.Value("string"), "answer": datasets.Value("string"), } ) return datasets.DatasetInfo( description=f"Covid Dialogue dataset, as preprocessed and shuffled in HELM", features=features, homepage=_HOMEPAGE, license=_LICENSE, citation=_CITATION, ) def _split_generators(self, dl_manager): test_target = dl_manager.download("test.source") test_source = dl_manager.download("test.source") train_source = dl_manager.download("train.source") train_target = dl_manager.download("train.target") val_source = dl_manager.download("val.source") val_target = dl_manager.download("val.target") return [ datasets.SplitGenerator( name=datasets.Split.TRAIN, gen_kwargs={"target": train_target, "source": train_source}, ), datasets.SplitGenerator( name=datasets.Split.VALIDATION, gen_kwargs={"target": val_target, "source": val_source}, ), datasets.SplitGenerator( name=datasets.Split.TEST, gen_kwargs={"target": test_target, "source": test_source}, ), ] # method parameters are unpacked from `gen_kwargs` as given in `_split_generators` def _generate_examples(self, source, target): with open(source, encoding="utf-8") as f_source: with open(target, encoding="utf-8") as f_target: for idx, (s, t) in enumerate(zip(f_source, f_target)): yield idx, {"query": s, "answer": t}