Delete legacy JSON metadata
#2
by
albertvillanova
HF staff
- opened
- dataset_infos.json +0 -1
dataset_infos.json
DELETED
@@ -1 +0,0 @@
|
|
1 |
-
{"large": {"description": "LCCC: Large-scale Cleaned Chinese Conversation corpus (LCCC) is a large corpus of Chinese conversations.\nA rigorous data cleaning pipeline is designed to ensure the quality of the corpus.\nThis pipeline involves a set of rules and several classifier-based filters.\nNoises such as offensive or sensitive words, special symbols, emojis,\ngrammatically incorrect sentences, and incoherent conversations are filtered.\n", "citation": "@inproceedings{wang2020chinese,\ntitle={A Large-Scale Chinese Short-Text Conversation Dataset},\nauthor={Wang, Yida and Ke, Pei and Zheng, Yinhe and Huang, Kaili and Jiang, Yong and Zhu, Xiaoyan and Huang, Minlie},\nbooktitle={NLPCC},\nyear={2020},\nurl={https://arxiv.org/abs/2008.03946}\n}\n", "homepage": "https://github.com/thu-coai/CDial-GPT", "license": "MIT", "features": {"dialog": [{"dtype": "string", "id": null, "_type": "Value"}]}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "lccc", "config_name": "large", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 1530827965, "num_examples": 12007759, "dataset_name": "lccc"}}, "download_checksums": {"https://huggingface.co/datasets/silver/lccc/resolve/main/lccc_large.jsonl.gz": {"num_bytes": 607605643, "checksum": "0eaf3b39e1f54c414c3c75a8319f89c8a98b4bc6f91913b051a0b849e7d3326f"}}, "download_size": 607605643, "post_processing_size": null, "dataset_size": 1530827965, "size_in_bytes": 2138433608}, "base": {"description": "LCCC: Large-scale Cleaned Chinese Conversation corpus (LCCC) is a large corpus of Chinese conversations.\nA rigorous data cleaning pipeline is designed to ensure the quality of the corpus.\nThis pipeline involves a set of rules and several classifier-based filters.\nNoises such as offensive or sensitive words, special symbols, emojis,\ngrammatically incorrect sentences, and incoherent conversations are filtered.\n", "citation": "@inproceedings{wang2020chinese,\ntitle={A Large-Scale Chinese Short-Text Conversation Dataset},\nauthor={Wang, Yida and Ke, Pei and Zheng, Yinhe and Huang, Kaili and Jiang, Yong and Zhu, Xiaoyan and Huang, Minlie},\nbooktitle={NLPCC},\nyear={2020},\nurl={https://arxiv.org/abs/2008.03946}\n}\n", "homepage": "https://github.com/thu-coai/CDial-GPT", "license": "MIT", "features": {"dialog": [{"dtype": "string", "id": null, "_type": "Value"}]}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "lccc", "config_name": "base", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 932634902, "num_examples": 6820506, "dataset_name": "lccc"}, "test": {"name": "test", "num_bytes": 1498216, "num_examples": 10000, "dataset_name": "lccc"}, "validation": {"name": "validation", "num_bytes": 2922731, "num_examples": 20000, "dataset_name": "lccc"}}, "download_checksums": {"https://huggingface.co/datasets/silver/lccc/resolve/main/lccc_base_train.jsonl.gz": {"num_bytes": 369854377, "checksum": "2162e0ed923fba62329cabf7e1493fbe59248afc94a62508e4abdea61e624627"}, "https://huggingface.co/datasets/silver/lccc/resolve/main/lccc_base_valid.jsonl.gz": {"num_bytes": 1071594, "checksum": "5cc27e7ac3447c5a31386178f82ff01cab56e27827445ef8d429809301491759"}, "https://huggingface.co/datasets/silver/lccc/resolve/main/lccc_base_test.jsonl.gz": {"num_bytes": 549124, "checksum": "cf8757587bdb8f360cc94fc38baadf9e185bad65a26155527a8430c048676016"}}, "download_size": 371475095, "post_processing_size": null, "dataset_size": 937055849, "size_in_bytes": 1308530944}}
|
|
|
|