Datasets:

ArXiv:
License:
File size: 8,307 Bytes
b1d8d14
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4399de1
b1d8d14
 
 
 
 
 
1d090ca
b1d8d14
 
 
 
 
 
 
 
 
 
 
 
 
 
 
b6b3ffa
b1d8d14
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
a591480
b1d8d14
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
e743519
 
 
 
 
 
 
 
 
 
 
 
 
 
 
b1d8d14
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
import os

import pyarrow as pa
import pyarrow.parquet as pq
import datasets


# Meta infomation
_REPO_NAME = 'Fsoft-AIC/the-vault-inline'

_DESCRIPTION = """The Vault is a multilingual code-text dataset with over 34 million pairs covering 10 popular programming languages. 
It is the largest corpus containing parallel code-text data. By building upon The Stack, a massive raw code sample collection, 
the Vault offers a comprehensive and clean resource for advancing research in code understanding and generation. It provides a 
high-quality dataset that includes code-text pairs at multiple levels, such as class and inline-level, in addition to the function level. 
The Vault can serve many purposes at multiple levels."""

_HOMEPAGE = "https://huggingface.co/Fsoft-AIC"
_LICENSE = "MIT License"
_CITATION = """
@article{manh2023vault,
  title={The Vault: A Comprehensive Multilingual Dataset for Advancing Code Understanding and Generation},
  author={Manh, Dung Nguyen and Hai, Nam Le and Dau, Anh TV and Nguyen, Anh Minh and Nghiem, Khanh and Guo, Jin and Bui, Nghi DQ},
  journal={arXiv preprint arXiv:2305.06156},
  year={2023}
}
"""
################################################################################################

# Config metadata
_LANG_TO_TEXT = {
    "python": "python",
    "c": "c",
    "c#": "c_sharp",
    "c++": "cpp",
    "go": "go",
    "java": "java",
    "javascript": "javascript",
    "php": "php",
    "ruby": "ruby",
    "rust": "rust",
}       
_LANG_CONFIGS = ["all"] + list(_LANG_TO_TEXT.keys())

_TEXT_TO_LANG = {}
for lang in _LANG_TO_TEXT:
    _TEXT_TO_LANG[_LANG_TO_TEXT[lang]] = lang

num_shard_split = {
    "ruby": 3,
    "c": 29,
    "c_sharp": 25,
    "cpp": 39,
    "go": 15,
    "java": 75,
    "javascript": 6,
    "php": 21,
    "python": 48,
    "rust": 10,
}

################################################################################################

class TheVaultFunctionConfig(datasets.BuilderConfig):
    """BuilderConfig for The Vault dataset."""

    def __init__(self, *args, languages=["all"], **kwargs):
        """BuilderConfig for the The Vault dataset.
        Args:
            languages (:obj:`List[str]`): List of languages to load.
            **kwargs: keyword arguments forwarded to super.
        """
        super().__init__(
            *args,
            name= "+".join([_LANG_TO_TEXT[lang] if lang in _LANG_TO_TEXT else lang for lang in languages]),
            **kwargs,
        )
        
        languages = set([lang.lower() for lang in languages])
        
        assert all([language in _LANG_CONFIGS for language in languages]), f"languages {languages} contains language not in {_LANG_CONFIGS}."

        if "all" in languages:
            assert len(languages)==1, f"Passed 'all' together with other languages. {languages}"
        else:
            languages = [_LANG_TO_TEXT[lang] for lang in languages] # Convert to text name
        
        self.languages = list(languages)


class TheVaultFunction(datasets.GeneratorBasedBuilder):
    """The Vault dataset."""

    VERSION = datasets.Version("1.0.0")
    
    BUILDER_CONFIG_CLASS = TheVaultFunctionConfig
    BUILDER_CONFIGS = [TheVaultFunctionConfig(languages=[lang]) for lang in _LANG_CONFIGS]
    DEFAULT_CONFIG_NAME = "all"

    
    def _info(self):
        return datasets.DatasetInfo(
            description=_DESCRIPTION,
            features=datasets.Features({
                                            "hexsha": datasets.Value("string"),
                                            "repo": datasets.Value("string"),
                                            "path": datasets.Value("string"), 
                                            "license": datasets.Sequence(datasets.Value("string")),
                                            "language": datasets.Value("string"),
                                            "identifier": datasets.Value("string"),
                                            "code": datasets.Value("string"),
                                            "code_tokens": datasets.Sequence(datasets.Value("string")),
                                            "original_comment": datasets.Value("string"),
                                            "comment": datasets.Value("string"),
                                            "comment_tokens": datasets.Sequence(datasets.Value("string")),
                                            "start_point": datasets.Sequence(datasets.Value("int32")),
                                            "end_point": datasets.Sequence(datasets.Value("int32")),
                                            "prev_context":
                                                {
                                                    "code": datasets.Value("string"),
                                                    "start_point": datasets.Sequence(datasets.Value("int32")),
                                                    "end_point": datasets.Sequence(datasets.Value("int32")),
                                                },
                                            "next_context":
                                                {
                                                    "code": datasets.Value("string"),
                                                    "start_point": datasets.Sequence(datasets.Value("int32")),
                                                    "end_point": datasets.Sequence(datasets.Value("int32")),
                                                },
                                        }),
            supervised_keys=None,
            homepage=_HOMEPAGE,
            license=_LICENSE,
            citation=_CITATION,
            
        )

    def _split_generators(self, dl_manager):
        generators = []
        languages = self.config.languages

        if "all" in languages:
            languages = list(_LANG_TO_TEXT.values())

        
        split_files = []
        for language in languages:
            num_shards = num_shard_split[language]
            data_files = [
                f"data/train/{language}-{_index:05d}-of-{num_shards:05d}.parquet"
                for _index in range(num_shards)
            ]
            files = dl_manager.download(data_files)
            split_files.extend(files)

        generators.append(
            datasets.SplitGenerator(
                name="train",
                gen_kwargs={
                    "files": split_files,
                },
            ),
        )
                
    
        return generators

    def _generate_examples(self, files):
        key = 0
        for file_idx, file in enumerate(files):
            with open(file, "rb") as f:
                parquet_file = pq.ParquetFile(f)
                for batch_idx, record_batch in enumerate(parquet_file.iter_batches(batch_size=10_000)):
                    pa_table = pa.Table.from_batches([record_batch])
                    for row_index in range(pa_table.num_rows):
                        row = pa_table.slice(row_index, 1).to_pydict()
                        
                        yield key, {
                                        "hexsha": row['hexsha'][0],
                                        "repo": row['repo'][0],
                                        "path": row['path'][0], 
                                        "license": row['license'][0], 
                                        "language": row['language'][0],
                                        "identifier": row['identifier'][0],
                                        "code": row['code'][0],
                                        "code_tokens": row['code_tokens'][0],
                                        "original_comment": row['original_comment'][0],
                                        "comment": row['comment'][0],
                                        "comment_tokens": row['comment_tokens'][0],
                                        "start_point": row['start_point'][0],
                                        "end_point": row['end_point'][0],
                                        "prev_context": row['prev_context'][0],
                                        "next_context": row['next_context'][0]
                                    } 
                        key += 1