system HF staff commited on
Commit
b519b31
0 Parent(s):

Update files from the datasets library (from 1.2.0)

Browse files

Release notes: https://github.com/huggingface/datasets/releases/tag/1.2.0

Files changed (5) hide show
  1. .gitattributes +27 -0
  2. README.md +140 -0
  3. dataset_infos.json +1 -0
  4. dummy/1.0.0/dummy_data.zip +3 -0
  5. weibo_ner.py +136 -0
.gitattributes ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bin.* filter=lfs diff=lfs merge=lfs -text
5
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.model filter=lfs diff=lfs merge=lfs -text
12
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
13
+ *.onnx filter=lfs diff=lfs merge=lfs -text
14
+ *.ot filter=lfs diff=lfs merge=lfs -text
15
+ *.parquet filter=lfs diff=lfs merge=lfs -text
16
+ *.pb filter=lfs diff=lfs merge=lfs -text
17
+ *.pt filter=lfs diff=lfs merge=lfs -text
18
+ *.pth filter=lfs diff=lfs merge=lfs -text
19
+ *.rar filter=lfs diff=lfs merge=lfs -text
20
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
21
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
22
+ *.tflite filter=lfs diff=lfs merge=lfs -text
23
+ *.tgz filter=lfs diff=lfs merge=lfs -text
24
+ *.xz filter=lfs diff=lfs merge=lfs -text
25
+ *.zip filter=lfs diff=lfs merge=lfs -text
26
+ *.zstandard filter=lfs diff=lfs merge=lfs -text
27
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
README.md ADDED
@@ -0,0 +1,140 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ annotations_creators:
3
+ - expert-generated
4
+ language_creators:
5
+ - found
6
+ languages:
7
+ - zh
8
+ licenses:
9
+ - unknown
10
+ multilinguality:
11
+ - monolingual
12
+ size_categories:
13
+ - 1K<n<10K
14
+ source_datasets:
15
+ - original
16
+ task_categories:
17
+ - structure-prediction
18
+ task_ids:
19
+ - named-entity-recognition
20
+ ---
21
+
22
+ # Dataset Card Creation Guide
23
+
24
+ ## Table of Contents
25
+ - [Dataset Description](#dataset-description)
26
+ - [Dataset Summary](#dataset-summary)
27
+ - [Supported Tasks](#supported-tasks-and-leaderboards)
28
+ - [Languages](#languages)
29
+ - [Dataset Structure](#dataset-structure)
30
+ - [Data Instances](#data-instances)
31
+ - [Data Fields](#data-instances)
32
+ - [Data Splits](#data-instances)
33
+ - [Dataset Creation](#dataset-creation)
34
+ - [Curation Rationale](#curation-rationale)
35
+ - [Source Data](#source-data)
36
+ - [Annotations](#annotations)
37
+ - [Personal and Sensitive Information](#personal-and-sensitive-information)
38
+ - [Considerations for Using the Data](#considerations-for-using-the-data)
39
+ - [Social Impact of Dataset](#social-impact-of-dataset)
40
+ - [Discussion of Biases](#discussion-of-biases)
41
+ - [Other Known Limitations](#other-known-limitations)
42
+ - [Additional Information](#additional-information)
43
+ - [Dataset Curators](#dataset-curators)
44
+ - [Licensing Information](#licensing-information)
45
+ - [Citation Information](#citation-information)
46
+
47
+ ## Dataset Description
48
+
49
+ - **Homepage:** None
50
+ - **Repository:** https://github.com/OYE93/Chinese-NLP-Corpus/tree/master/NER/Weibo
51
+ - **Paper:** [More Information Needed]
52
+ - **Leaderboard:** [If the dataset supports an active leaderboard, add link here]()
53
+ - **Point of Contact:** [More Information Needed]
54
+
55
+ ### Dataset Summary
56
+
57
+ [More Information Needed]
58
+
59
+ ### Supported Tasks and Leaderboards
60
+
61
+ [More Information Needed]
62
+
63
+ ### Languages
64
+
65
+ [More Information Needed]
66
+
67
+ ## Dataset Structure
68
+
69
+ ### Data Instances
70
+
71
+ [More Information Needed]
72
+
73
+ ### Data Fields
74
+
75
+ [More Information Needed]
76
+
77
+ ### Data Splits
78
+
79
+ [More Information Needed]
80
+ ## Dataset Creation
81
+
82
+ ### Curation Rationale
83
+
84
+ [More Information Needed]
85
+
86
+ ### Source Data
87
+
88
+ [More Information Needed]
89
+
90
+ #### Initial Data Collection and Normalization
91
+
92
+ [More Information Needed]
93
+
94
+ #### Who are the source language producers?
95
+
96
+ [More Information Needed]
97
+
98
+ ### Annotations
99
+
100
+ [More Information Needed]
101
+
102
+ #### Annotation process
103
+
104
+ [More Information Needed]
105
+
106
+ #### Who are the annotators?
107
+
108
+ [More Information Needed]
109
+
110
+ ### Personal and Sensitive Information
111
+
112
+ [More Information Needed]
113
+
114
+ ## Considerations for Using the Data
115
+
116
+ ### Social Impact of Dataset
117
+
118
+ [More Information Needed]
119
+
120
+ ### Discussion of Biases
121
+
122
+ [More Information Needed]
123
+
124
+ ### Other Known Limitations
125
+
126
+ [More Information Needed]
127
+
128
+ ## Additional Information
129
+
130
+ ### Dataset Curators
131
+
132
+ [More Information Needed]
133
+
134
+ ### Licensing Information
135
+
136
+ [More Information Needed]
137
+
138
+ ### Citation Information
139
+
140
+ [More Information Needed]
dataset_infos.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"default": {"description": "Tags: PER(\u4eba\u540d), LOC(\u5730\u70b9\u540d), GPE(\u884c\u653f\u533a\u540d), ORG(\u673a\u6784\u540d)\nLabel\tTag\tMeaning\nPER\tPER.NAM\t\u540d\u5b57\uff08\u5f20\u4e09\uff09\nPER.NOM\t\u4ee3\u79f0\u3001\u7c7b\u522b\u540d\uff08\u7a77\u4eba\uff09\nLOC\tLOC.NAM\t\u7279\u6307\u540d\u79f0\uff08\u7d2b\u7389\u5c71\u5e84\uff09\nLOC.NOM\t\u6cdb\u79f0\uff08\u5927\u5ce1\u8c37\u3001\u5bbe\u9986\uff09\nGPE\tGPE.NAM\t\u884c\u653f\u533a\u7684\u540d\u79f0\uff08\u5317\u4eac\uff09\nORG\tORG.NAM\t\u7279\u5b9a\u673a\u6784\u540d\u79f0\uff08\u901a\u60e0\u533b\u9662\uff09\nORG.NOM\t\u6cdb\u6307\u540d\u79f0\u3001\u7edf\u79f0\uff08\u6587\u827a\u516c\u53f8\uff09\n", "citation": "", "homepage": "https://github.com/OYE93/Chinese-NLP-Corpus/tree/master/NER/Weibo", "license": "", "features": {"id": {"dtype": "string", "id": null, "_type": "Value"}, "tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "ner_tags": {"feature": {"num_classes": 17, "names": ["B-GPE.NAM", "B-GPE.NOM", "B-LOC.NAM", "B-LOC.NOM", "B-ORG.NAM", "B-ORG.NOM", "B-PER.NAM", "B-PER.NOM", "I-GPE.NAM", "I-GPE.NOM", "I-LOC.NAM", "I-LOC.NOM", "I-ORG.NAM", "I-ORG.NOM", "I-PER.NAM", "I-PER.NOM", "O"], "names_file": null, "id": null, "_type": "ClassLabel"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "builder_name": "weibo_ner_corpus", "config_name": "default", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 1179589, "num_examples": 1350, "dataset_name": "weibo_ner_corpus"}, "validation": {"name": "validation", "num_bytes": 232380, "num_examples": 270, "dataset_name": "weibo_ner_corpus"}, "test": {"name": "test", "num_bytes": 237407, "num_examples": 270, "dataset_name": "weibo_ner_corpus"}}, "download_checksums": {"https://raw.githubusercontent.com/OYE93/Chinese-NLP-Corpus/master/NER/Weibo/weiboNER_2nd_conll.train": {"num_bytes": 536039, "checksum": "4e8954c54ab33b0afc9a8abb85715c47422645cc6d1b49fdb24540f4a427e997"}, "https://raw.githubusercontent.com/OYE93/Chinese-NLP-Corpus/master/NER/Weibo/weiboNER_2nd_conll.dev": {"num_bytes": 105881, "checksum": "e72730d83aac680505280b6301c879b3ba470d29ac308979d48cdc58ee4907cc"}, "https://raw.githubusercontent.com/OYE93/Chinese-NLP-Corpus/master/NER/Weibo/weiboNER_2nd_conll.test": {"num_bytes": 108767, "checksum": "faaf6c0ebde5348b935cfd5c89f0f22c9e0779d60667d8b5efbc647b6707d50e"}}, "download_size": 750687, "post_processing_size": null, "dataset_size": 1649376, "size_in_bytes": 2400063}}
dummy/1.0.0/dummy_data.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2757f75e09ee7da04552eb35c2d79c5e08d5b24002a9804c8df08600487c538e
3
+ size 644
weibo_ner.py ADDED
@@ -0,0 +1,136 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2020 HuggingFace Datasets Authors.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ # Lint as: python3
17
+ import datasets
18
+
19
+
20
+ _DESCRIPTION = """\
21
+ Tags: PER(人名), LOC(地点名), GPE(行政区名), ORG(机构名)
22
+ Label Tag Meaning
23
+ PER PER.NAM 名字(张三)
24
+ PER.NOM 代称、类别名(穷人)
25
+ LOC LOC.NAM 特指名称(紫玉山庄)
26
+ LOC.NOM 泛称(大峡谷、宾馆)
27
+ GPE GPE.NAM 行政区的名称(北京)
28
+ ORG ORG.NAM 特定机构名称(通惠医院)
29
+ ORG.NOM 泛指名称、统称(文艺公司)
30
+ """
31
+ _HOMEPAGE_URL = "https://github.com/OYE93/Chinese-NLP-Corpus/tree/master/NER/Weibo"
32
+ _CITATION = None
33
+ _TRAIN_URL = "https://raw.githubusercontent.com/OYE93/Chinese-NLP-Corpus/master/NER/Weibo/weiboNER_2nd_conll.train"
34
+ _TEST_URL = "https://raw.githubusercontent.com/OYE93/Chinese-NLP-Corpus/master/NER/Weibo/weiboNER_2nd_conll.test"
35
+ _VALID_URL = "https://raw.githubusercontent.com/OYE93/Chinese-NLP-Corpus/master/NER/Weibo/weiboNER_2nd_conll.dev"
36
+
37
+
38
+ class WeiboNERCorpus(datasets.GeneratorBasedBuilder):
39
+ VERSION = datasets.Version("1.0.0")
40
+
41
+ def _info(self):
42
+ return datasets.DatasetInfo(
43
+ description=_DESCRIPTION,
44
+ features=datasets.Features(
45
+ {
46
+ "id": datasets.Value("string"),
47
+ "tokens": datasets.Sequence(datasets.Value("string")),
48
+ "ner_tags": datasets.Sequence(
49
+ datasets.features.ClassLabel(
50
+ names=[
51
+ "B-GPE.NAM",
52
+ "B-GPE.NOM",
53
+ "B-LOC.NAM",
54
+ "B-LOC.NOM",
55
+ "B-ORG.NAM",
56
+ "B-ORG.NOM",
57
+ "B-PER.NAM",
58
+ "B-PER.NOM",
59
+ "I-GPE.NAM",
60
+ "I-GPE.NOM",
61
+ "I-LOC.NAM",
62
+ "I-LOC.NOM",
63
+ "I-ORG.NAM",
64
+ "I-ORG.NOM",
65
+ "I-PER.NAM",
66
+ "I-PER.NOM",
67
+ "O",
68
+ ]
69
+ )
70
+ ),
71
+ },
72
+ ),
73
+ supervised_keys=None,
74
+ homepage=_HOMEPAGE_URL,
75
+ citation=_CITATION,
76
+ )
77
+
78
+ def _split_generators(self, dl_manager):
79
+ train_path = dl_manager.download_and_extract(_TRAIN_URL)
80
+ valid_path = dl_manager.download_and_extract(_VALID_URL)
81
+ test_path = dl_manager.download_and_extract(_TEST_URL)
82
+ return [
83
+ datasets.SplitGenerator(
84
+ name=datasets.Split.TRAIN,
85
+ gen_kwargs={"data_path": train_path},
86
+ ),
87
+ datasets.SplitGenerator(
88
+ name=datasets.Split.VALIDATION,
89
+ gen_kwargs={"data_path": valid_path},
90
+ ),
91
+ datasets.SplitGenerator(
92
+ name=datasets.Split.TEST,
93
+ gen_kwargs={"data_path": test_path},
94
+ ),
95
+ ]
96
+
97
+ def _generate_examples(self, data_path):
98
+ sentence_counter = 0
99
+ with open(data_path, encoding="utf-8") as f:
100
+ current_words = []
101
+ current_labels = []
102
+ for row in f:
103
+ row = row.rstrip()
104
+ row_split = row.split("\t")
105
+ if len(row_split) == 2:
106
+ token, label = row_split
107
+ current_words.append(token)
108
+ current_labels.append(label)
109
+ else:
110
+ if not current_words:
111
+ continue
112
+ assert len(current_words) == len(current_labels), "word len doesnt match label length"
113
+ sentence = (
114
+ sentence_counter,
115
+ {
116
+ "id": str(sentence_counter),
117
+ "tokens": current_words,
118
+ "ner_tags": current_labels,
119
+ },
120
+ )
121
+ sentence_counter += 1
122
+ current_words = []
123
+ current_labels = []
124
+ yield sentence
125
+
126
+ # if something remains:
127
+ if current_words:
128
+ sentence = (
129
+ sentence_counter,
130
+ {
131
+ "id": str(sentence_counter),
132
+ "tokens": current_words,
133
+ "ner_tags": current_labels,
134
+ },
135
+ )
136
+ yield sentence