initial commit
Browse files- .DS_Store +0 -0
- .gitattributes +10 -0
- data/.DS_Store +0 -0
- data/20210320/shard-00000-of-00010.parquet +3 -0
- data/20210320/shard-00001-of-00010.parquet +3 -0
- data/20210320/shard-00002-of-00010.parquet +3 -0
- data/20210320/shard-00003-of-00010.parquet +3 -0
- data/20210320/shard-00004-of-00010.parquet +3 -0
- data/20210320/shard-00005-of-00010.parquet +3 -0
- data/20210320/shard-00006-of-00010.parquet +3 -0
- data/20210320/shard-00007-of-00010.parquet +3 -0
- data/20210320/shard-00008-of-00010.parquet +3 -0
- data/20210320/shard-00009-of-00010.parquet +3 -0
- wikipedia_bn.py +77 -0
.DS_Store
ADDED
Binary file (6.15 kB). View file
|
|
.gitattributes
CHANGED
@@ -14,3 +14,13 @@
|
|
14 |
*.pb filter=lfs diff=lfs merge=lfs -text
|
15 |
*.pt filter=lfs diff=lfs merge=lfs -text
|
16 |
*.pth filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
14 |
*.pb filter=lfs diff=lfs merge=lfs -text
|
15 |
*.pt filter=lfs diff=lfs merge=lfs -text
|
16 |
*.pth filter=lfs diff=lfs merge=lfs -text
|
17 |
+
data/20210320/shard-00000-of-00010.parquet filter=lfs diff=lfs merge=lfs -text
|
18 |
+
data/20210320/shard-00001-of-00010.parquet filter=lfs diff=lfs merge=lfs -text
|
19 |
+
data/20210320/shard-00002-of-00010.parquet filter=lfs diff=lfs merge=lfs -text
|
20 |
+
data/20210320/shard-00005-of-00010.parquet filter=lfs diff=lfs merge=lfs -text
|
21 |
+
data/20210320/shard-00007-of-00010.parquet filter=lfs diff=lfs merge=lfs -text
|
22 |
+
data/20210320/shard-00003-of-00010.parquet filter=lfs diff=lfs merge=lfs -text
|
23 |
+
data/20210320/shard-00004-of-00010.parquet filter=lfs diff=lfs merge=lfs -text
|
24 |
+
data/20210320/shard-00006-of-00010.parquet filter=lfs diff=lfs merge=lfs -text
|
25 |
+
data/20210320/shard-00008-of-00010.parquet filter=lfs diff=lfs merge=lfs -text
|
26 |
+
data/20210320/shard-00009-of-00010.parquet filter=lfs diff=lfs merge=lfs -text
|
data/.DS_Store
ADDED
Binary file (6.15 kB). View file
|
|
data/20210320/shard-00000-of-00010.parquet
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:02b62bc2f29a4150fc1474d5e9c33195f63f508803a9774ecee5e313cdf7a92f
|
3 |
+
size 23919434
|
data/20210320/shard-00001-of-00010.parquet
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:3844db81f7451659e392797e88347bac3f6c7dd3d73aa03415b98099fd3c11d4
|
3 |
+
size 24042340
|
data/20210320/shard-00002-of-00010.parquet
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:61f957f325c0eaa45e7ddac2e196a7430ac31a0583eecbedc3c70259bb00a82a
|
3 |
+
size 24069936
|
data/20210320/shard-00003-of-00010.parquet
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:a5b5580cf21553471c2827619741cd4e2e26bf3140ace0793ed70c3fb78fb376
|
3 |
+
size 23947567
|
data/20210320/shard-00004-of-00010.parquet
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:cd2e76eccdf5a96e71dcfa43d93120d4a7ea4581ca7580cfc8e0f819d3537fdc
|
3 |
+
size 24138283
|
data/20210320/shard-00005-of-00010.parquet
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:f39a06578f0af9c3d022d0c8c2b32fb244305af5bda16c8981a9a819083cf591
|
3 |
+
size 23757563
|
data/20210320/shard-00006-of-00010.parquet
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:90107258601a7451dc3b2112a05c9c3061fc5d0ec1e172f5236f0e2764d3d79e
|
3 |
+
size 23063798
|
data/20210320/shard-00007-of-00010.parquet
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:6dd9df639379eb68d72fb5fd7a1a06fbe5e015ec388718086c2b0301180c73a5
|
3 |
+
size 23049898
|
data/20210320/shard-00008-of-00010.parquet
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:91db9734c43b89392d08506e3909792862e79485c8d0eae1035fbbd40b2774f3
|
3 |
+
size 23913087
|
data/20210320/shard-00009-of-00010.parquet
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:d3f34b8de4c8ec19384b8e8139693e50dcd4a0a1ad7899e1aabaf0a5c8dfb03b
|
3 |
+
size 23780181
|
wikipedia_bn.py
ADDED
@@ -0,0 +1,77 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""Bengali wikipedia from 03/20/2021"""
|
2 |
+
|
3 |
+
import os
|
4 |
+
import pyarrow.parquet as pq
|
5 |
+
|
6 |
+
import datasets
|
7 |
+
|
8 |
+
|
9 |
+
logger = datasets.logging.get_logger(__name__)
|
10 |
+
|
11 |
+
|
12 |
+
_CITATION = """\
|
13 |
+
@ONLINE {wikidump,
|
14 |
+
author = {Wikimedia Foundation},
|
15 |
+
title = {Wikimedia Downloads},
|
16 |
+
url = {https://dumps.wikimedia.org}
|
17 |
+
}
|
18 |
+
"""
|
19 |
+
|
20 |
+
_DESCRIPTION = """\
|
21 |
+
Bengali Wikipedia from the dump of 03/20/2021.
|
22 |
+
The data was processed using the huggingface datasets wikipedia script early april 2021.
|
23 |
+
The dataset was built from the Wikipedia dump (https://dumps.wikimedia.org/).
|
24 |
+
Each example contains the content of one full Wikipedia article with cleaning to strip
|
25 |
+
markdown and unwanted sections (references, etc.).
|
26 |
+
"""
|
27 |
+
|
28 |
+
_LICENSE = (
|
29 |
+
"This work is licensed under the Creative Commons Attribution-ShareAlike "
|
30 |
+
"3.0 Unported License. To view a copy of this license, visit "
|
31 |
+
"http://creativecommons.org/licenses/by-sa/3.0/ or send a letter to "
|
32 |
+
"Creative Commons, PO Box 1866, Mountain View, CA 94042, USA."
|
33 |
+
)
|
34 |
+
|
35 |
+
_N_SHARDS = 10
|
36 |
+
|
37 |
+
_URLS = {
|
38 |
+
"train": [f"data/20210320/shard-{i:05d}-of-{_N_SHARDS:05d}.parquet" for i in range(_N_SHARDS)],
|
39 |
+
}
|
40 |
+
|
41 |
+
|
42 |
+
class WikipediaBn(datasets.ArrowBasedBuilder):
|
43 |
+
"""SQUAD: The Stanford Question Answering Dataset. Version 1.1."""
|
44 |
+
|
45 |
+
def _info(self):
|
46 |
+
return datasets.DatasetInfo(
|
47 |
+
description=_DESCRIPTION,
|
48 |
+
features=datasets.Features(
|
49 |
+
{
|
50 |
+
"title": datasets.Value("string"),
|
51 |
+
"text": datasets.Value("string"),
|
52 |
+
}
|
53 |
+
),
|
54 |
+
# No default supervised_keys (as we have to pass both question
|
55 |
+
# and context as input).
|
56 |
+
supervised_keys=None,
|
57 |
+
homepage="https://dumps.wikimedia.org",
|
58 |
+
citation=_CITATION,
|
59 |
+
)
|
60 |
+
|
61 |
+
def _split_generators(self, dl_manager):
|
62 |
+
downloaded_files = dl_manager.download_and_extract(_URLS)
|
63 |
+
|
64 |
+
return [
|
65 |
+
datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepaths": downloaded_files["train"]}),
|
66 |
+
]
|
67 |
+
|
68 |
+
def _generate_tables(self, filepaths):
|
69 |
+
"""This function returns the examples in the raw (text) form."""
|
70 |
+
for filepath in filepaths:
|
71 |
+
logger.info("generating examples from = %s", filepath)
|
72 |
+
filepath_id = os.path.basename(filepath)
|
73 |
+
with open(filepath, "rb") as f:
|
74 |
+
pf = pq.ParquetFile(f)
|
75 |
+
for i in range(pf.num_row_groups):
|
76 |
+
id_ = f"{filepath_id}_{i}"
|
77 |
+
yield id_, pf.read_row_group(i)
|