# coding=utf-8 # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at: # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Version 10-0-sadder. """BilingualChildrenEmo dataset: A multilingual emotion dataset of wilde's children's literature""" import datasets _DESCRIPTION = """\ The BilingualChildrenEmo dataset is a multilingual emotion dataset annotated by language experts under a project. \ The dataset can be used for tasks such as multilingual (Chinese and English) emotion classification and identification. """ _HOMEPAGE = "https://github.com/nana-lyj/BilingualChildrenEmo" _URLS = { "train": f"https://raw.githubusercontent.com/nana-lyj/BilingualChildrenEmo/main/data/train.tsv", "dev": f"https://raw.githubusercontent.com/nana-lyj/BilingualChildrenEmo/main/data/dev.tsv", "test": f"https://raw.githubusercontent.com/nana-lyj/BilingualChildrenEmo/main/data/test.tsv", } _LABEL_MAPPING = {0: 0, 1: 1, 2: 2, 3: 3, 4: 4} class BilingualChildrenEmo(datasets.GeneratorBasedBuilder): """BilingualChildrenEmo dataset: A multilingual emotion dataset of wilde's children's literature""" VERSION = datasets.Version("1.0.0") def _info(self): return datasets.DatasetInfo( description=_DESCRIPTION, features=datasets.Features( { "id": datasets.Value("int32"), "sentence": datasets.Value("string"), "label": datasets.ClassLabel(names=["joy", "sadness", "anger", "fear", "love"]), } ), supervised_keys=None, homepage=_HOMEPAGE, ) def _split_generators(self, dl_manager): downloaded_files = dl_manager.download(_URLS) return [ datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": downloaded_files["train"]}), datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={"filepath": downloaded_files["dev"]}), datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"filepath": downloaded_files["test"]}), ] def _generate_examples(self, filepath): with open(filepath, encoding="utf-8") as f: lines = f.readlines() for line in lines: fields = line.strip().split("\t") idx, sentence, label = fields label = _LABEL_MAPPING[int(label)] yield int(idx), {"id": int(idx), "sentence": sentence, "label": label}