sentence_pair / sentence_pair.py
qgyd2021's picture
[update]add ocnli
204984c
#!/usr/bin/python3
# -*- coding: utf-8 -*-
from glob import glob
import json
import os
from pathlib import Path
import datasets
_URLS = {
"afqmc": "data/afqmc.jsonl",
"bustm": "data/bustm.jsonl",
"ccks2018_task3": "data/ccks2018_task3.jsonl",
"chinese_mnli": "data/chinese_mnli.jsonl",
"chinese_snli": "data/chinese_snli.jsonl",
"chinese_sts": "data/chinese_sts.jsonl",
"chip2019": "data/chip2019.jsonl",
"covid_19": "data/covid_19.jsonl",
"diac2019": "data/diac2019.jsonl",
"lcqmc": "data/lcqmc.jsonl",
"ocnli": "data/ocnli.jsonl",
"pawsx_zh": "data/pawsx_zh.jsonl",
"sts_b": "data/sts_b.jsonl",
}
_CITATION = """\
@dataset{sentence_pair,
author = {Xing Tian},
title = {sentence_pair},
month = sep,
year = 2023,
publisher = {Xing Tian},
version = {1.0},
}
"""
class SentencePair(datasets.GeneratorBasedBuilder):
VERSION = datasets.Version("1.0.0")
configs = list()
for name in _URLS.keys():
config = datasets.BuilderConfig(name=name, version=VERSION, description=name)
configs.append(config)
BUILDER_CONFIGS = [
*configs
]
def _info(self):
features = datasets.Features(
{
"sentence1": datasets.Value("string"),
"sentence2": datasets.Value("string"),
"label": datasets.Value("string"),
"category": datasets.Value("string"),
"data_source": datasets.Value("string"),
"split": datasets.Value("string"),
}
)
return datasets.DatasetInfo(
features=features,
supervised_keys=None,
homepage="",
license="",
citation=_CITATION,
)
def _split_generators(self, dl_manager):
"""Returns SplitGenerators."""
url = _URLS[self.config.name]
dl_path = dl_manager.download(url)
archive_path = dl_path
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={"archive_path": archive_path, "split": "train"},
),
datasets.SplitGenerator(
name=datasets.Split.VALIDATION,
gen_kwargs={"archive_path": archive_path, "split": "validation"},
),
datasets.SplitGenerator(
name=datasets.Split.TEST,
gen_kwargs={"archive_path": archive_path, "split": "test"},
),
]
def _generate_examples(self, archive_path, split):
"""Yields examples."""
archive_path = Path(archive_path)
idx = 0
with open(archive_path, "r", encoding="utf-8") as f:
for row in f:
sample = json.loads(row)
if sample["split"] != split:
continue
yield idx, {
"sentence1": sample["sentence1"],
"sentence2": sample["sentence2"],
"label": sample["label"],
"category": sample["category"],
"data_source": sample["data_source"],
"split": sample["split"],
}
idx += 1
if __name__ == '__main__':
pass