|
from __future__ import annotations |
|
|
|
import json |
|
import random |
|
from typing import Generator |
|
|
|
from datasets import ( |
|
BuilderConfig, |
|
DatasetInfo, |
|
DownloadManager, |
|
Features, |
|
GeneratorBasedBuilder, |
|
Sequence, |
|
Split, |
|
SplitGenerator, |
|
Value, |
|
Version, |
|
) |
|
from datasets.data_files import DataFilesDict |
|
|
|
_CITATION = """ |
|
@inproceedings{omi-2021-wikipedia, |
|
title = "Wikipediaを用いた日本語の固有表現抽出のデータセットの構築", |
|
author = "近江 崇宏", |
|
booktitle = "言語処理学会第27回年次大会", |
|
year = "2021", |
|
url = "https://anlp.jp/proceedings/annual_meeting/2021/pdf_dir/P2-7.pdf", |
|
} |
|
""" |
|
_DESCRIPTION = "This is a dataset of Wikipedia articles with named entity labels created by Stockmark Inc." |
|
_HOMEPAGE = "https://github.com/stockmarkteam/ner-wikipedia-dataset" |
|
_LICENSE = "CC-BY-SA 3.0" |
|
_URL = "https://raw.githubusercontent.com/stockmarkteam/ner-wikipedia-dataset/main/ner.json" |
|
|
|
|
|
class NerWikipediaDatasetConfig(BuilderConfig): |
|
def __init__( |
|
self, |
|
name: str = "default", |
|
version: Version | str | None = Version("0.0.0"), |
|
data_dir: str | None = None, |
|
data_files: DataFilesDict | None = None, |
|
description: str | None = None, |
|
shuffle: bool = True, |
|
seed: int = 42, |
|
train_ratio: float = 0.8, |
|
validation_ratio: float = 0.1, |
|
) -> None: |
|
super().__init__( |
|
name=name, |
|
version=version, |
|
data_dir=data_dir, |
|
data_files=data_files, |
|
description=description, |
|
) |
|
self.shuffle = shuffle |
|
self.seed = seed |
|
self.train_ratio = train_ratio |
|
self.validation_ratio = validation_ratio |
|
|
|
|
|
class NerWikipediaDataset(GeneratorBasedBuilder): |
|
BUILDER_CONFIGS = [ |
|
NerWikipediaDatasetConfig( |
|
name="ner-wikipedia-dataset", |
|
version=Version("2.0.0"), |
|
description=_DESCRIPTION, |
|
), |
|
] |
|
|
|
def _info(self) -> DatasetInfo: |
|
return DatasetInfo( |
|
description=_DESCRIPTION, |
|
features=Features( |
|
{ |
|
"curid": Value("string"), |
|
"text": Value("string"), |
|
"entities": [ |
|
{ |
|
"name": Value("string"), |
|
"span": Sequence(Value("int64"), length=2), |
|
"type": Value("string"), |
|
} |
|
], |
|
} |
|
), |
|
homepage=_HOMEPAGE, |
|
license=_LICENSE, |
|
citation=_CITATION, |
|
) |
|
|
|
def _split_generators( |
|
self, dl_manager: DownloadManager |
|
) -> list[SplitGenerator]: |
|
dataset_dir = str(dl_manager.download_and_extract(_URL)) |
|
with open(dataset_dir, "r", encoding="utf-8") as f: |
|
data = json.load(f) |
|
|
|
if self.config.shuffle == True: |
|
random.seed(self.config.seed) |
|
random.shuffle(data) |
|
|
|
num_data = len(data) |
|
num_train_data = int(num_data * self.config.train_ratio) |
|
num_validation_data = int(num_data * self.config.validation_ratio) |
|
train_data = data[:num_train_data] |
|
validation_data = data[ |
|
num_train_data : num_train_data + num_validation_data |
|
] |
|
test_data = data[num_train_data + num_validation_data :] |
|
return [ |
|
SplitGenerator( |
|
name=Split.TRAIN, |
|
gen_kwargs={"data": train_data}, |
|
), |
|
SplitGenerator( |
|
name=Split.VALIDATION, |
|
gen_kwargs={"data": validation_data}, |
|
), |
|
SplitGenerator( |
|
name=Split.TEST, |
|
gen_kwargs={"data": test_data}, |
|
), |
|
] |
|
|
|
def _generate_examples(self, data: list[dict[str, str]]) -> Generator: |
|
for i, d in enumerate(data): |
|
yield i, { |
|
"curid": d["curid"], |
|
"text": d["text"], |
|
"entities": d["entities"], |
|
} |
|
|