|
import os |
|
|
|
import pyarrow as pa |
|
import pyarrow.parquet as pq |
|
|
|
import datasets |
|
|
|
_REPO_NAME = 'Fsoft-AIC/the-vault' |
|
|
|
_LANG_TO_EXTENSION = { |
|
"Python": [".py"], |
|
"C": [".c", ".h"], |
|
"C#": [".cs"], |
|
"C++": [".cpp", ".hpp", ".c++", ".h++", ".cc", ".hh", ".C", ".H"], |
|
"Go": [".go"], |
|
"Java": [".java"], |
|
"JavaScript": [".js"], |
|
"PHP": [".php", ".php3", ".php4", ".php5", ".phps", ".phpt"], |
|
"Ruby": [".rb"], |
|
"Rust": [".rs"], |
|
} |
|
|
|
|
|
_DESCRIPTION = """The Vault""" |
|
|
|
_HOMEPAGE = "https://huggingface.co/Fsoft-AIC" |
|
|
|
|
|
_EXTENSION_TO_LANG = {} |
|
for lang in _LANG_TO_EXTENSION: |
|
for extension in _LANG_TO_EXTENSION[lang]: |
|
_EXTENSION_TO_LANG[extension] = lang |
|
|
|
|
|
|
|
_LANG_CONFIGS = ["all"] + list(_LANG_TO_EXTENSION.keys()) |
|
|
|
num_shard_split = { |
|
'train/small': 2, |
|
'train/medium': 4 |
|
} |
|
_SPLIT_CONFIGS = ["all"] + list(num_shard_split.keys()) |
|
|
|
class TheVaultFunctionConfig(datasets.BuilderConfig): |
|
"""BuilderConfig for The Vault dataset.""" |
|
|
|
def __init__(self, *args, languages=["all"], split_set= ["all"], **kwargs): |
|
"""BuilderConfig for the GitHub Code dataset. |
|
Args: |
|
languages (:obj:`List[str]`): List of languages to load. |
|
**kwargs: keyword arguments forwarded to super. |
|
""" |
|
super().__init__( |
|
*args, |
|
name= "+".join([split.replace("/", "_") for split in split_set]) + "-" + "+".join(languages), |
|
**kwargs, |
|
) |
|
|
|
languages = set(languages) |
|
split_set = set(split_set) |
|
|
|
assert all([language in _LANG_CONFIGS for language in languages]), f"languages {languages} contains language not in {_LANG_CONFIGS}." |
|
assert all([split in _SPLIT_CONFIGS for split in split_set]), "split_set {} contains element not in {}.".format(split_set, _SPLIT_CONFIGS) |
|
|
|
if "all" in split_set: |
|
assert len(split_set)==1, f"Passed 'all' together with other split sets. {split_set}" |
|
|
|
if "all" in languages: |
|
assert len(languages)==1, f"Passed 'all' together with other languages. {languages}" |
|
self.filter_languages = False |
|
else: |
|
self.filter_languages = True |
|
|
|
self.languages = set(languages) |
|
self.split_set= split_set |
|
|
|
|
|
class TheVaultFunction(datasets.GeneratorBasedBuilder): |
|
"""The Vault dataset.""" |
|
|
|
VERSION = datasets.Version("1.0.0") |
|
|
|
BUILDER_CONFIG_CLASS = TheVaultFunctionConfig |
|
BUILDER_CONFIGS = [TheVaultFunctionConfig(languages=[lang], split_set=[spl]) for lang in _LANG_CONFIGS for spl in _SPLIT_CONFIGS] |
|
DEFAULT_CONFIG_NAME = "all-all" |
|
|
|
|
|
def _info(self): |
|
return datasets.DatasetInfo( |
|
description=_DESCRIPTION, |
|
features=datasets.Features({"original_string": datasets.Value("string"), |
|
"original_docstring": datasets.Value("string"), |
|
"code": datasets.Value("string"), |
|
"docstring": datasets.Value("string"), |
|
"code_tokens": datasets.Value("string"), |
|
"docstring_tokens": datasets.Value("string"), |
|
"short_docstring": datasets.Value("string"), |
|
"comment": datasets.Value("string"), |
|
"return_type": datasets.Value("string"), |
|
"identifier": datasets.Value("string"), |
|
"repo": datasets.Value("string"), |
|
"path": datasets.Value("string"), |
|
"language": datasets.Value("string"), |
|
}), |
|
supervised_keys=None, |
|
homepage=_HOMEPAGE, |
|
license="Multiple: see the 'license' field of each sample.", |
|
|
|
) |
|
|
|
def _split_generators(self, dl_manager): |
|
print(self.config.split_set) |
|
|
|
generators = [] |
|
split_set = self.config.split_set |
|
if "all" in split_set: |
|
split_set = list(num_shard_split.keys()) |
|
|
|
for split in split_set: |
|
num_shards = num_shard_split[split] |
|
data_files = [ |
|
f"data/{split}-{_index:05d}-of-{num_shards:05d}.parquet" |
|
for _index in range(num_shards) |
|
] |
|
files = dl_manager.download(data_files) |
|
generators.append( |
|
datasets.SplitGenerator( |
|
name=split.replace("/", "_"), |
|
gen_kwargs={ |
|
"files": files, |
|
}, |
|
), |
|
) |
|
return generators |
|
|
|
def _generate_examples(self, files): |
|
key = 0 |
|
for file_idx, file in enumerate(files): |
|
with open(file, "rb") as f: |
|
parquet_file = pq.ParquetFile(f) |
|
for batch_idx, record_batch in enumerate(parquet_file.iter_batches(batch_size=10_000)): |
|
pa_table = pa.Table.from_batches([record_batch]) |
|
for row_index in range(pa_table.num_rows): |
|
row = pa_table.slice(row_index, 1).to_pydict() |
|
|
|
lang = row['language'][0] |
|
|
|
if self.config.filter_languages and not lang in self.config.languages: |
|
continue |
|
|
|
yield key, { |
|
"original_string": datasets.Value("string"), |
|
"original_docstring": datasets.Value("string"), |
|
"code": datasets.Value("string"), |
|
"docstring": datasets.Value("string"), |
|
"code_tokens": datasets.Value("string"), |
|
"docstring_tokens": datasets.Value("string"), |
|
"short_docstring": datasets.Value("string"), |
|
"comment": datasets.Value("string"), |
|
"return_type": datasets.Value("string"), |
|
"identifier": datasets.Value("string"), |
|
"repo": datasets.Value("string"), |
|
"path": datasets.Value("string"), |
|
"language": datasets.Value("string"), |
|
} |
|
key += 1 |
|
|
|
|
|
def lang_from_name(name): |
|
for extension in _EXTENSION_TO_LANG: |
|
if name.endswith(extension): |
|
return _EXTENSION_TO_LANG[extension] |