Datasets:

Modalities:
Text
ArXiv:
Libraries:
Datasets
License:
File size: 11,502 Bytes
1c524fb
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
d46f34c
1c524fb
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
import os

import pyarrow as pa
import pyarrow.parquet as pq
import datasets


# Meta infomation
_REPO_NAME = 'Fsoft-AIC/the-vault-class'

_DESCRIPTION = """The Vault is a multilingual code-text dataset with over 40 million pairs covering 10 popular programming languages. 
It is the largest corpus containing parallel code-text data. By building upon The Stack, a massive raw code sample collection, 
the Vault offers a comprehensive and clean resource for advancing research in code understanding and generation. It provides a 
high-quality dataset that includes code-text pairs at multiple levels, such as class and inline-level, in addition to the function level. 
The Vault can serve many purposes at multiple levels."""

_HOMEPAGE = "https://huggingface.co/Fsoft-AIC"
_LICENSE = "MIT License"
_CITATION = """
@article{manh2023vault,
  title={The Vault: A Comprehensive Multilingual Dataset for Advancing Code Understanding and Generation},
  author={Manh, Dung Nguyen and Hai, Nam Le and Dau, Anh TV and Nguyen, Anh Minh and Nghiem, Khanh and Guo, Jin and Bui, Nghi DQ},
  journal={arXiv preprint arXiv:2305.06156},
  year={2023}
}
"""
################################################################################################

# Config metadata
_LANG_TO_TEXT = {
    "python": "python",
    "c#": "c_sharp",
    "c++": "cpp",
    "java": "java",
    "javascript": "javascript",
    "php": "php",
    "ruby": "ruby",
    "rust": "rust",
}       
_LANG_CONFIGS = ["all"] + list(_LANG_TO_TEXT.keys())

_TEXT_TO_LANG = {}
for lang in _LANG_TO_TEXT:
    _TEXT_TO_LANG[_LANG_TO_TEXT[lang]] = lang

num_shard_split = {
    "ruby": 3,
    "c_sharp": 17,
    "cpp": 1,
    "java": 60,
    "javascript": 3,
    "php": 13,
    "python": 5,
    "rust": 1,
}

################################################################################################

class TheVaultClassConfig(datasets.BuilderConfig):
    """BuilderConfig for The Vault dataset."""

    def __init__(self, *args, languages=["all"], **kwargs):
        """BuilderConfig for the The Vault dataset.
        Args:
            languages (:obj:`List[str]`): List of languages to load.
            **kwargs: keyword arguments forwarded to super.
        """
        super().__init__(
            *args,
            name=  "+".join([_LANG_TO_TEXT[lang] if lang in _LANG_TO_TEXT else lang for lang in languages]),
            **kwargs,
        )
        
        languages = set([lang.lower() for lang in languages])

        assert "go" not in languages and "c" not in languages, "C and Go do not have class level data."
        assert all([language in _LANG_CONFIGS for language in languages]), f"languages {languages} contains language not in {_LANG_CONFIGS}."

        if "all" in languages:
            assert len(languages)==1, f"Passed 'all' together with other languages. {languages}"
        else:
            languages = [_LANG_TO_TEXT[lang] for lang in languages] # Convert to text name
        
        self.languages = list(languages)

class TheVaultClass(datasets.GeneratorBasedBuilder):
    """The Vault dataset."""

    VERSION = datasets.Version("1.0.0")
    
    BUILDER_CONFIG_CLASS = TheVaultClassConfig
    BUILDER_CONFIGS = [TheVaultClassConfig(languages=[lang]) for lang in _LANG_CONFIGS]
    DEFAULT_CONFIG_NAME = "all"

    
    def _info(self):
        return datasets.DatasetInfo(
            description=_DESCRIPTION,
            features=datasets.Features({
                                            "hexsha": datasets.Value("string"),
                                            "repo": datasets.Value("string"),
                                            "path": datasets.Value("string"), 
                                            "license": datasets.Sequence(datasets.Value("string")),
                                            "language": datasets.Value("string"),
                                            "identifier": datasets.Value("string"),
                                            "original_docstring": datasets.Value("string"),
                                            "docstring": datasets.Value("string"),
                                            "docstring_tokens": datasets.Sequence(datasets.Value("string")),
                                            "code": datasets.Value("string"),
                                            "code_tokens": datasets.Sequence(datasets.Value("string")),
                                            "short_docstring": datasets.Value("string"),
                                            "short_docstring_tokens": datasets.Sequence(datasets.Value("string")),
                                            "comment": datasets.Sequence(datasets.Value("string")),
                                            "parameters": [
                                                {
                                                    "param": datasets.Value("string"),
                                                    "type": datasets.Value("string"),
                                                }
                                            ],
                                            "docstring_params":
                                                {
                                                    "returns": [
                                                        {
                                                            "docstring": datasets.Value("string"),
                                                            "docstring_tokens": datasets.Sequence(datasets.Value("string")),
                                                            "type": datasets.Value("string")
                                                        }
                                                    ],
                                                    "raises": [
                                                        {
                                                            "docstring": datasets.Value("string"),
                                                            "docstring_tokens": datasets.Sequence(datasets.Value("string")),
                                                            "type": datasets.Value("string")
                                                        }
                                                    ],
                                                    "params": [
                                                        {
                                                            "identifier": datasets.Value("string"),
                                                            "type": datasets.Value("string"),
                                                            "docstring": datasets.Value("string"),
                                                            "docstring_tokens": datasets.Sequence(datasets.Value("string")),
                                                            "default": datasets.Value("string"),
                                                            "is_optional": datasets.Value("bool")
                                                        }
                                                    ],
                                                    "outlier_params": [
                                                        {
                                                            "identifier": datasets.Value("string"),
                                                            "type": datasets.Value("string"),
                                                            "docstring": datasets.Value("string"),
                                                            "docstring_tokens": datasets.Sequence(datasets.Value("string")),
                                                            "default": datasets.Value("string"),
                                                            "is_optional": datasets.Value("bool")
                                                        }
                                                    ],
                                                    "others": [
                                                        {
                                                            "identifier": datasets.Value("string"),
                                                            "docstring": datasets.Value("string"),
                                                            "docstring_tokens": datasets.Sequence(datasets.Value("string"))
                                                        }
                                                    ]
                                                },
                                        }),
            supervised_keys=None,
            homepage=_HOMEPAGE,
            license=_LICENSE,
            citation=_CITATION,   
        )

    def _split_generators(self, dl_manager):
        generators = []
        languages = self.config.languages

        if "all" in languages:
            languages = list(_LANG_TO_TEXT.values())

        
        split_files = []
        for language in languages:
            num_shards = num_shard_split[language]
            data_files = [
                f"data/train/{language}-{_index:05d}-of-{num_shards:05d}.parquet"
                for _index in range(num_shards)
            ]
            files = dl_manager.download(data_files)
            split_files.extend(files)

        generators.append(
            datasets.SplitGenerator(
                name="train",
                gen_kwargs={
                    "files": split_files,
                },
            ),
        )
        return generators

    def _generate_examples(self, files):
        key = 0
        for file_idx, file in enumerate(files):
            with open(file, "rb") as f:
                parquet_file = pq.ParquetFile(f)
                for batch_idx, record_batch in enumerate(parquet_file.iter_batches(batch_size=10_000)):
                    pa_table = pa.Table.from_batches([record_batch])
                    for row_index in range(pa_table.num_rows):
                        row = pa_table.slice(row_index, 1).to_pydict()
                        
                        yield key, {
                                        "hexsha": row['hexsha'][0],
                                        "repo": row['repo'][0],
                                        "path": row['path'][0], 
                                        "license": row['license'][0], 
                                        "language": row['language'][0],
                                        "identifier": row['identifier'][0],
                                        "original_docstring": row['original_docstring'][0],
                                        "docstring": row['docstring'][0],
                                        "docstring_tokens": row['docstring_tokens'][0],
                                        "code": row['code'][0],
                                        "code_tokens": row['code_tokens'][0],
                                        "short_docstring": row['short_docstring'][0],
                                        "short_docstring_tokens": row['short_docstring_tokens'][0],
                                        "comment": row['comment'][0],
                                        "parameters": row['parameters'][0],
                                        "docstring_params": row['docstring_params'][0],
                                    } 
                        key += 1