|
import os |
|
|
|
import pyarrow as pa |
|
import pyarrow.parquet as pq |
|
import datasets |
|
|
|
|
|
|
|
_REPO_NAME = 'Fsoft-AIC/the-vault-function' |
|
|
|
_DESCRIPTION = """The Vault is a multilingual code-text dataset with over 40 million pairs covering 10 popular programming languages. |
|
It is the largest corpus containing parallel code-text data. By building upon The Stack, a massive raw code sample collection, |
|
the Vault offers a comprehensive and clean resource for advancing research in code understanding and generation. It provides a |
|
high-quality dataset that includes code-text pairs at multiple levels, such as class and inline-level, in addition to the function level. |
|
The Vault can serve many purposes at multiple levels.""" |
|
|
|
_HOMEPAGE = "https://huggingface.co/Fsoft-AIC" |
|
_LICENSE = "MIT License" |
|
_CITATION = """ |
|
@article{manh2023vault, |
|
title={The Vault: A Comprehensive Multilingual Dataset for Advancing Code Understanding and Generation}, |
|
author={Manh, Dung Nguyen and Hai, Nam Le and Dau, Anh TV and Nguyen, Anh Minh and Nghiem, Khanh and Guo, Jin and Bui, Nghi DQ}, |
|
journal={arXiv preprint arXiv:2305.06156}, |
|
year={2023} |
|
} |
|
""" |
|
|
|
|
|
|
|
_LANG_TO_TEXT = { |
|
"python": "python", |
|
"c": "c", |
|
"c#": "c_sharp", |
|
"c++": "cpp", |
|
"go": "go", |
|
"java": "java", |
|
"javascript": "javascript", |
|
"php": "php", |
|
"ruby": "ruby", |
|
"rust": "rust", |
|
} |
|
_LANG_CONFIGS = ["all"] + list(_LANG_TO_TEXT.keys()) |
|
|
|
_TEXT_TO_LANG = {} |
|
for lang in _LANG_TO_TEXT: |
|
_TEXT_TO_LANG[_LANG_TO_TEXT[lang]] = lang |
|
|
|
num_shard_split = { |
|
"train/small/ruby": 1, |
|
"train/small/c": 1, |
|
"train/small/c_sharp": 1, |
|
"train/small/cpp": 1, |
|
"train/small/go": 1, |
|
"train/small/java": 2, |
|
"train/small/javascript": 1, |
|
"train/small/php": 1, |
|
"train/small/python": 2, |
|
"train/small/rust": 1, |
|
|
|
"train/medium/c": 2, |
|
"train/medium/c_sharp": 3, |
|
"train/medium/cpp": 2, |
|
"train/medium/go": 4, |
|
"train/medium/java": 6, |
|
"train/medium/javascript": 2, |
|
"train/medium/php": 4, |
|
"train/medium/python": 9, |
|
"train/medium/ruby": 1, |
|
"train/medium/rust": 1, |
|
|
|
"train/full/c": 7, |
|
"train/full/c_sharp": 13, |
|
"train/full/cpp": 7, |
|
"train/full/go": 14, |
|
"train/full/java": 25, |
|
"train/full/javascript": 6, |
|
"train/full/php": 15, |
|
"train/full/python": 33, |
|
"train/full/ruby": 2, |
|
"train/full/rust": 3, |
|
|
|
"validation/ruby": 1, |
|
"validation/c": 1, |
|
"validation/c_sharp": 1, |
|
"validation/cpp": 1, |
|
"validation/go": 1, |
|
"validation/java": 1, |
|
"validation/javascript": 1, |
|
"validation/php": 1, |
|
"validation/python": 1, |
|
"validation/rust": 1, |
|
|
|
"test/ruby": 1, |
|
"test/c": 1, |
|
"test/c_sharp": 1, |
|
"test/cpp": 1, |
|
"test/go": 1, |
|
"test/java": 1, |
|
"test/javascript": 1, |
|
"test/php": 1, |
|
"test/python": 1, |
|
"test/rust": 1 |
|
|
|
} |
|
_SPLIT_CONFIGS = ["all", "train", "train/small", "train/medium", "train/full", "validation", "test"] |
|
|
|
|
|
|
|
class TheVaultFunctionConfig(datasets.BuilderConfig): |
|
"""BuilderConfig for The Vault dataset.""" |
|
|
|
def __init__(self, *args, languages=["all"], split_set= ["all"], **kwargs): |
|
"""BuilderConfig for the The Vault dataset. |
|
Args: |
|
split_set (:obj:`List[str]`): List of split set to load. |
|
languages (:obj:`List[str]`): List of languages to load. |
|
**kwargs: keyword arguments forwarded to super. |
|
""" |
|
super().__init__( |
|
*args, |
|
name= "+".join([split.replace("/", "_") for split in split_set]) + "-" + "+".join([_LANG_TO_TEXT[lang] if lang in _LANG_TO_TEXT else lang for lang in languages]), |
|
**kwargs, |
|
) |
|
|
|
languages = set([lang.lower() for lang in languages]) |
|
split_set = set([split.lower() for split in split_set]) |
|
|
|
assert all([language in _LANG_CONFIGS for language in languages]), f"languages {languages} contains language not in {_LANG_CONFIGS}." |
|
assert all([split in _SPLIT_CONFIGS for split in split_set]), f"split_set {split_set} contains element not in {_SPLIT_CONFIGS}." |
|
|
|
if "all" in split_set: |
|
assert len(split_set)==1, f"Passed 'all' together with other split sets. {split_set}" |
|
if "train" in split_set and "train/full" in split_set: |
|
print("WARNING - Split set 'train' and 'train/full' are similar. Force to only train/full.") |
|
split_set.remove("train") |
|
if "train" in split_set or "train/full" in split_set: |
|
for split in split_set: |
|
if "train" in split and (split != "train" and split != "train/full"): |
|
raise ValueError(f"Split set 'train' (or 'train/full) already contains '{split}'. Please only include one.") |
|
|
|
if "all" in languages: |
|
assert len(languages)==1, f"Passed 'all' together with other languages. {languages}" |
|
else: |
|
languages = [_LANG_TO_TEXT[lang] for lang in languages] |
|
|
|
self.languages = list(languages) |
|
self.split_set= list(split_set) |
|
|
|
|
|
class TheVaultFunction(datasets.GeneratorBasedBuilder): |
|
"""The Vault dataset.""" |
|
|
|
VERSION = datasets.Version("1.0.0") |
|
|
|
BUILDER_CONFIG_CLASS = TheVaultFunctionConfig |
|
BUILDER_CONFIGS = [TheVaultFunctionConfig(languages=[lang], split_set=[spl]) for lang in _LANG_CONFIGS for spl in _SPLIT_CONFIGS] |
|
DEFAULT_CONFIG_NAME = "all-all" |
|
|
|
|
|
def _info(self): |
|
return datasets.DatasetInfo( |
|
description=_DESCRIPTION, |
|
features=datasets.Features({ |
|
"hexsha": datasets.Value("string"), |
|
"repo": datasets.Value("string"), |
|
"path": datasets.Value("string"), |
|
"license": datasets.Sequence(datasets.Value("string")), |
|
"language": datasets.Value("string"), |
|
"identifier": datasets.Value("string"), |
|
"return_type": datasets.Value("string"), |
|
"original_string": datasets.Value("string"), |
|
"original_docstring": datasets.Value("string"), |
|
"docstring": datasets.Value("string"), |
|
"docstring_tokens": datasets.Sequence(datasets.Value("string")), |
|
"code": datasets.Value("string"), |
|
"code_tokens": datasets.Sequence(datasets.Value("string")), |
|
"short_docstring": datasets.Value("string"), |
|
"short_docstring_tokens": datasets.Sequence(datasets.Value("string")), |
|
"comment": datasets.Sequence(datasets.Value("string")), |
|
"parameters": [ |
|
{ |
|
"param": datasets.Value("string"), |
|
"type": datasets.Value("string"), |
|
} |
|
], |
|
"docstring_params": |
|
{ |
|
"returns": [ |
|
{ |
|
"docstring": datasets.Value("string"), |
|
"docstring_tokens": datasets.Sequence(datasets.Value("string")), |
|
"type": datasets.Value("string") |
|
} |
|
], |
|
"raises": [ |
|
{ |
|
"docstring": datasets.Value("string"), |
|
"docstring_tokens": datasets.Sequence(datasets.Value("string")), |
|
"type": datasets.Value("string") |
|
} |
|
], |
|
"params": [ |
|
{ |
|
"identifier": datasets.Value("string"), |
|
"type": datasets.Value("string"), |
|
"docstring": datasets.Value("string"), |
|
"docstring_tokens": datasets.Sequence(datasets.Value("string")), |
|
"default": datasets.Value("string"), |
|
"is_optional": datasets.Value("bool") |
|
} |
|
], |
|
"outlier_params": [ |
|
{ |
|
"identifier": datasets.Value("string"), |
|
"type": datasets.Value("string"), |
|
"docstring": datasets.Value("string"), |
|
"docstring_tokens": datasets.Sequence(datasets.Value("string")), |
|
"default": datasets.Value("string"), |
|
"is_optional": datasets.Value("bool") |
|
} |
|
], |
|
"others": [ |
|
{ |
|
"identifier": datasets.Value("string"), |
|
"docstring": datasets.Value("string"), |
|
"docstring_tokens": datasets.Sequence(datasets.Value("string")) |
|
} |
|
] |
|
}, |
|
}), |
|
supervised_keys=None, |
|
homepage=_HOMEPAGE, |
|
license=_LICENSE, |
|
citation=_CITATION, |
|
|
|
) |
|
|
|
def _split_generators(self, dl_manager): |
|
generators = [] |
|
split_set = self.config.split_set |
|
languages = self.config.languages |
|
|
|
if "all" in split_set: |
|
split_set = ["train/full", "validation", "test"] |
|
|
|
if "train" in split_set: |
|
split_set.remove('train') |
|
split_set = ["train/full"] + split_set |
|
|
|
if "all" in languages: |
|
languages = list(_LANG_TO_TEXT.values()) |
|
|
|
|
|
for split in split_set: |
|
split_files = [] |
|
for language in languages: |
|
num_shards = num_shard_split[f"{split}/{language}"] |
|
data_files = [ |
|
f"data/{split}/{language}-{_index:05d}-of-{num_shards:05d}.parquet" |
|
for _index in range(num_shards) |
|
] |
|
files = dl_manager.download(data_files) |
|
split_files.extend(files) |
|
|
|
|
|
|
|
|
|
|
|
generators.append( |
|
datasets.SplitGenerator( |
|
name="train" if split == "train/full" else split.replace("/", "_"), |
|
gen_kwargs={ |
|
"files": split_files, |
|
}, |
|
), |
|
) |
|
|
|
|
|
|
|
|
|
|
|
return generators |
|
|
|
def _generate_examples(self, files): |
|
key = 0 |
|
for file_idx, file in enumerate(files): |
|
with open(file, "rb") as f: |
|
parquet_file = pq.ParquetFile(f) |
|
for batch_idx, record_batch in enumerate(parquet_file.iter_batches(batch_size=10_000)): |
|
pa_table = pa.Table.from_batches([record_batch]) |
|
for row_index in range(pa_table.num_rows): |
|
row = pa_table.slice(row_index, 1).to_pydict() |
|
|
|
yield key, { |
|
"hexsha": row['hexsha'][0], |
|
"repo": row['repo'][0], |
|
"path": row['path'][0], |
|
"license": row['license'][0], |
|
"language": row['language'][0], |
|
"identifier": row['identifier'][0], |
|
"return_type": row['return_type'][0], |
|
"original_string": row['original_string'][0], |
|
"original_docstring": row['original_docstring'][0], |
|
"docstring": row['docstring'][0], |
|
"docstring_tokens": row['docstring_tokens'][0], |
|
"code": row['code'][0], |
|
"code_tokens": row['code_tokens'][0], |
|
"short_docstring": row['short_docstring'][0], |
|
"short_docstring_tokens": row['short_docstring_tokens'][0], |
|
"comment": row['comment'][0], |
|
"parameters": row['parameters'][0], |
|
"docstring_params": row['docstring_params'][0], |
|
} |
|
key += 1 |