File size: 5,698 Bytes
38395f2
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
be343e2
38395f2
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
import datasets
import json
from glob import glob
import os
logger = datasets.logging.get_logger(__name__)

_CITATION = """\
@misc{ c6a3fe684227415a9db8e21bac4a15ab,
  author       = {Zhao Xue and Hanyu Zhao and Sha Yuan and Yequan Wang},
  title        = {{WuDaoCorpora Text}},
  year         = 2022,
  month        = dec,
  publisher    = {Science Data Bank},
  version      = {V1},
  doi          = {10.57760/sciencedb.o00126.00004},
  url          = https://doi.org/10.57760/sciencedb.o00126.00004
}
"""

_DESCRIPTION = """\
WuDaoCorpora Text is a large pretraining Chinese corpus constructed by Beijing Academy of Artificial Intelligence(BAAI). The total data volume of the dataset has exceeded 5TB, including 200GB open data.


Compared with other pretraining corpora, the WuDaoCorpora Text has the following advantages.

1) In the process of data collection, we classify the quality of web pages according to the proportion of words in web pages and the integrity of DOM trees, and select high-quality web page for data collection to ensure the corpus quality.

2) Through data cooperation with other institutions and web page data crawling, the dataset covers a wide range types of Chinese text, including news, comments, encyclopedias, forums, blogs, academic papers, etc.

3) The dataset uses more than 20 cleaning rules to obtain the final corpus from the 100TB original web page data. In the cleaning process, special attention is paid to the removal of private information to avoid the risk of privacy disclosure.

4) The dataset contains 50+ data tags, such as education and laws, which is convenient for users to extract specific-domain data for model training in that field.


Please obey the following agreement if you use our dataset.

https://data.baai.ac.cn/resources/agreement/BAAIDataAgreement.pdf
"""

_URL = "https://china.scidb.cn/download?fileId=63a30383fed6a8a9e8454302&traceId=a505523f-775b-4261-ad0c-406126824b4d"


class WuDaoConfig(datasets.BuilderConfig):
    """BuilderConfig for SQUAD."""

    def __init__(self, **kwargs):
        """BuilderConfig for SQUAD.
        Args:
          **kwargs: keyword arguments forwarded to super.
        """
        super(WuDaoConfig, self).__init__(**kwargs)
        print(kwargs)

class WuDao(datasets.GeneratorBasedBuilder):
    
    BUILDER_CONFIGS = [
        WuDaoConfig(
            name="default",
            version=datasets.Version("1.0.0", ""),
            description="Plain text",
        ),
    ]

        
    def _info(self):
        return datasets.DatasetInfo(
            description=_DESCRIPTION,
            features=datasets.Features(
                {
                    "id": datasets.Value("int32"),
                    "uniqueKey": datasets.Value("string"),
                    "titleUkey": datasets.Value("string"),
                    "dataType": datasets.Value("string"),
                    "title": datasets.Value("string"),
                    "content": datasets.Value("string")
                }
            ),
            homepage="https://www.scidb.cn/en/detail?dataSetId=c6a3fe684227415a9db8e21bac4a15ab",
            citation=_CITATION,
        )
    
    def _split_generators(self, dl_manager):

        # hf-dataset does not support .rar format
        # simply implement a download and extract pipeline
        import wget
        from pathlib import Path
        import patoolib
        
        
        assert os.name in ["posix","Darwin"],"not a support os (Linux and Mac only)"

        _cache_dir = os.path.join(Path.home(),'.cache/wudao_dataset')
        print("dataset will save at:",_cache_dir)
        
        _file_path = os.path.join(_cache_dir,"data.rar")
        os.makedirs(_cache_dir,exist_ok=True)
        
        # while file not exist, download and extract
        if not os.path.isfile(_file_path):
            wget.download(_URL,_file_path)
            patoolib.extract_archive(_file_path, outdir=_cache_dir)
        
        # rename        
        files = glob(os.path.join(_cache_dir,"WuDaoCorpus2.0_base_200G","*.json"))
        os.makedirs(os.path.join(_cache_dir,"data_zhs"),exist_ok=True)
        
        for f_idx,file_name in enumerate(files):
            os.rename(file_name,os.path.join(_cache_dir,f"data_zhs/shard_{f_idx}.json"))
            print(f_idx)
            
        # clean
        try:
            os.removedirs(os.path.join(_cache_dir,"WuDaoCorpus2.0_base_200G"))
            # os.remove(_file_path)
        except:
            pass
        
        return [
            datasets.SplitGenerator(name="zhs", gen_kwargs={"data_dir": _cache_dir,"lng":"zhs"}),
            datasets.SplitGenerator(name="zht", gen_kwargs={"data_dir": _cache_dir,"lng":'zht'})
        ]
    
    def _generate_examples(self, data_dir,lng="zhs"):
        """This function returns the examples in the raw (text) form."""
        if lng == 'zht':
            import opencc
            s2t = opencc.OpenCC("s2t.json")
        filepaths = glob(os.path.join(data_dir,"data_zhs","*.json"))
        
        for filepath in filepaths:
            with open(filepath) as f:
                data = json.load(f)
                for x in data:
                    if lng == "zhs":
                        yield x["id"],x
                    elif lng =="zht":
                        yield x["id"],{
                            "id":x["id"],
                            "uniqueKey":x["uniqueKey"],
                            "titleUkey":x["titleUkey"],
                            "dataType":s2t.convert(x["dataType"]),
                            "title":s2t.convert(x["title"]),
                            "content":s2t.convert(x["content"])
                        }