add gen file
Browse files
README.md
CHANGED
@@ -1,13 +1,24 @@
|
|
1 |
---
|
|
|
|
|
2 |
license: cc0-1.0
|
3 |
task_categories:
|
4 |
- text-generation
|
5 |
-
language:
|
6 |
-
- ja
|
7 |
dataset_info:
|
8 |
features:
|
9 |
-
|
10 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
11 |
---
|
12 |
|
13 |
oscar 2023をfilterしたもの
|
|
|
1 |
---
|
2 |
+
language:
|
3 |
+
- ja
|
4 |
license: cc0-1.0
|
5 |
task_categories:
|
6 |
- text-generation
|
|
|
|
|
7 |
dataset_info:
|
8 |
features:
|
9 |
+
- name: text
|
10 |
+
dtype: string
|
11 |
+
splits:
|
12 |
+
- name: train
|
13 |
+
num_bytes: 177901962
|
14 |
+
num_examples: 161401
|
15 |
+
download_size: 107585837
|
16 |
+
dataset_size: 177901962
|
17 |
+
configs:
|
18 |
+
- config_name: default
|
19 |
+
data_files:
|
20 |
+
- split: train
|
21 |
+
path: data/train-*
|
22 |
---
|
23 |
|
24 |
oscar 2023をfilterしたもの
|
gen.py
ADDED
@@ -0,0 +1,105 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import collections
|
2 |
+
import io
|
3 |
+
import zstandard
|
4 |
+
import json
|
5 |
+
|
6 |
+
from dataclasses import dataclass
|
7 |
+
|
8 |
+
import datasets
|
9 |
+
|
10 |
+
|
11 |
+
logger = datasets.logging.get_logger(__name__)
|
12 |
+
|
13 |
+
|
14 |
+
@dataclass
|
15 |
+
class Identification:
|
16 |
+
label: str
|
17 |
+
prob: float
|
18 |
+
|
19 |
+
|
20 |
+
_DESCRIPTION = """
|
21 |
+
"""
|
22 |
+
|
23 |
+
_URL = ""
|
24 |
+
|
25 |
+
_LICENSE = """
|
26 |
+
"""
|
27 |
+
|
28 |
+
_CITATION = """
|
29 |
+
"""
|
30 |
+
|
31 |
+
_BASE_DATA_PAT_FORMAT_STR = ""
|
32 |
+
_BASE_CHECKSUM_FILE_NAME = "checksum.sha256"
|
33 |
+
|
34 |
+
class Oscar2301JAConfig(datasets.BuilderConfig):
|
35 |
+
def __init__(self, **kwargs):
|
36 |
+
"""BuilderConfig for OSCAR.
|
37 |
+
Args:
|
38 |
+
language (str): It has to contain 2-letter or 3-letter coded strings. For example: "se", "hu", "eml"
|
39 |
+
**kwargs: Keyword arguments forwarded to super.
|
40 |
+
"""
|
41 |
+
|
42 |
+
description = (
|
43 |
+
f"filter OSCAR 2023 dataset ja"
|
44 |
+
)
|
45 |
+
super(Oscar2301JAConfig, self).__init__(
|
46 |
+
name="oscar_2023_ja_filtered", description=description, **kwargs
|
47 |
+
)
|
48 |
+
|
49 |
+
|
50 |
+
class Oscar2301(datasets.GeneratorBasedBuilder):
|
51 |
+
BUILDER_CONFIGS = [
|
52 |
+
Oscar2301JAConfig(
|
53 |
+
version=datasets.Version("2023.1.0"),
|
54 |
+
)
|
55 |
+
]
|
56 |
+
BUILDER_CONFIG_CLASS = Oscar2301JAConfig
|
57 |
+
|
58 |
+
def _info(self):
|
59 |
+
return datasets.DatasetInfo(
|
60 |
+
description=_DESCRIPTION,
|
61 |
+
features=datasets.Features(
|
62 |
+
{
|
63 |
+
"id": datasets.Value("int64"),
|
64 |
+
"text": datasets.Value("string")
|
65 |
+
}
|
66 |
+
),
|
67 |
+
supervised_keys=None,
|
68 |
+
homepage=_URL,
|
69 |
+
citation=_CITATION,
|
70 |
+
license=_LICENSE,
|
71 |
+
)
|
72 |
+
|
73 |
+
def _split_generators(self, dl_manager):
|
74 |
+
checksum_path = self.config.base_data_path + _BASE_CHECKSUM_FILE_NAME
|
75 |
+
checksum_file = dl_manager.download(checksum_path)
|
76 |
+
with open(checksum_file, encoding="utf-8") as f:
|
77 |
+
data_filenames = [line.split()[1] for line in f if line]
|
78 |
+
data_urls = [
|
79 |
+
self.config.base_data_path + data_filename
|
80 |
+
for data_filename in data_filenames
|
81 |
+
]
|
82 |
+
doc_files = dl_manager.download(
|
83 |
+
[url for url in data_urls if url.endswith(".jsonl.zst")]
|
84 |
+
)
|
85 |
+
return [
|
86 |
+
datasets.SplitGenerator(
|
87 |
+
name=datasets.Split.TRAIN, gen_kwargs={"doc_files": doc_files}
|
88 |
+
),
|
89 |
+
]
|
90 |
+
|
91 |
+
def _generate_examples(self, doc_files):
|
92 |
+
"""This function returns the examples in the raw (text) form by iterating on all the files."""
|
93 |
+
id_ = 0
|
94 |
+
for doc_path in doc_files:
|
95 |
+
logger.info("generating examples from = %s", doc_path)
|
96 |
+
|
97 |
+
with open(doc_path, "rb") as fh:
|
98 |
+
dctx = zstandard.ZstdDecompressor()
|
99 |
+
stream_reader = dctx.stream_reader(fh)
|
100 |
+
buffered_reader = io.BufferedReader(stream_reader)
|
101 |
+
text_stream = io.TextIOWrapper(buffered_reader, encoding="utf-8")
|
102 |
+
for line in text_stream:
|
103 |
+
doc = json.loads(line)
|
104 |
+
yield id_, {"id": id_, "text": doc["text"]}
|
105 |
+
id_ += 1
|