Datasets:

Languages:
English
ArXiv:
License:
File size: 10,840 Bytes
b51f134
 
be5297a
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5d96618
be5297a
 
 
 
 
 
 
135a36b
be5297a
5d96618
 
be5297a
 
 
 
 
 
 
 
 
28e13bd
be5297a
28e13bd
be5297a
 
 
135a36b
 
 
28e13bd
 
 
 
 
be5297a
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
135a36b
be5297a
 
135a36b
be5297a
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
135a36b
be5297a
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5d96618
be5297a
 
 
 
 
 
 
 
 
135a36b
 
 
28e13bd
 
 
 
 
be5297a
 
 
5d96618
be5297a
 
 
 
135a36b
be5297a
 
 
5d96618
be5297a
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
135a36b
 
 
 
be5297a
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
28e13bd
be5297a
135a36b
 
 
be5297a
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
"""Script to download objects from Objaverse 1.0."""

import gzip
import json
import os
import urllib.request
from multiprocessing import Pool
from typing import Any, Dict, List, Optional, Tuple

import fsspec
from loguru import logger
from tqdm import tqdm


def load_annotations(
    uids: Optional[List[str]] = None,
    download_dir: str = "~/.objaverse",
) -> Dict[str, Any]:
    """Load the full metadata of all objects in the dataset.

    Args:
        uids: A list of uids with which to load metadata. If None, it loads
            the metadata for all uids.
        download_dir: The base directory to download the annotations to. Supports all
            file systems supported by fsspec. Defaults to "~/.objaverse".

    Returns:
        A dictionary of the metadata for each object. The keys are the uids and the
        values are the metadata for that object.
    """
    # make the metadata dir if it doesn't exist
    metadata_path = os.path.join(download_dir, "hf-objaverse-v1", "metadata")
    fs, _ = fsspec.core.url_to_fs(metadata_path)
    fs.makedirs(metadata_path, exist_ok=True)

    # get the dir ids that need to be loaded if only downloading a subset of uids
    object_paths = _load_object_paths(download_dir=download_dir)
    dir_ids = (
        {object_paths[uid].split("/")[1] for uid in uids}
        if uids is not None
        else {f"{i // 1000:03d}-{i % 1000:03d}" for i in range(160)}
    )

    # get the existing metadata files
    existing_metadata_files = fs.glob(
        os.path.join(metadata_path, "*.json.gz"), refresh=True
    )
    existing_dir_ids = {
        file.split("/")[-1].split(".")[0]
        for file in existing_metadata_files
        if file.endswith(".json.gz")  # note partial files end with .json.gz.tmp
    }
    downloaded_dir_ids = existing_dir_ids.intersection(dir_ids)
    logger.info(f"Found {len(downloaded_dir_ids)} metadata files already downloaded")

    # download the metadata from the missing dir_ids
    dir_ids_to_download = dir_ids - existing_dir_ids
    logger.info(f"Downloading {len(dir_ids_to_download)} metadata files")

    # download the metadata file if it doesn't exist
    if len(dir_ids_to_download) > 0:
        for i_id in tqdm(dir_ids_to_download, desc="Downloading metadata files"):
            # get the path to the json file
            path = os.path.join(metadata_path, f"{i_id}.json.gz")

            # get the url to the remote json file
            hf_url = f"https://huggingface.co/datasets/allenai/objaverse/resolve/main/metadata/{i_id}.json.gz"

            # download the file to a tmp path to avoid partial downloads on interruption
            tmp_path = f"{path}.tmp"
            with fs.open(tmp_path, "wb") as f:
                with urllib.request.urlopen(hf_url) as response:
                    f.write(response.read())
            fs.rename(tmp_path, path)

    out = {}
    for i_id in tqdm(dir_ids, desc="Reading metadata files"):
        # get the path to the json file
        path = os.path.join(metadata_path, f"{i_id}.json.gz")

        # read the json file of the metadata chunk
        with fs.open(path, "rb") as f:
            with gzip.GzipFile(fileobj=f) as gfile:
                content = gfile.read()
                data = json.loads(content)

        # filter the data to only include the uids we want
        if uids is not None:
            data = {uid: data[uid] for uid in uids if uid in data}

        # add the data to the out dict
        out.update(data)

    return out


annotations = load_annotations(download_dir="~/.objaverse-temp-400")


def _load_object_paths(download_dir: str) -> Dict[str, str]:
    """Load the object paths from the dataset.

    The object paths specify the location of where the object is located
    in the Hugging Face repo.

    Returns:
        A dictionary mapping the uid to the object path.
    """
    object_paths_file = "object-paths.json.gz"
    local_path = os.path.join(download_dir, "hf-objaverse-v1", object_paths_file)

    # download the object_paths file if it doesn't exist
    fs, path = fsspec.core.url_to_fs(local_path)
    if not fs.exists(path):
        hf_url = f"https://huggingface.co/datasets/allenai/objaverse/resolve/main/{object_paths_file}"
        fs.makedirs(os.path.dirname(path), exist_ok=True)

        # download the file to a tmp path to avoid partial downloads on interruption
        tmp_path = f"{path}.tmp"
        with fs.open(tmp_path, "wb") as f:
            with urllib.request.urlopen(hf_url) as response:
                f.write(response.read())
        fs.rename(tmp_path, path)

    # read the object_paths
    with fs.open(path, "rb") as f:
        with gzip.GzipFile(fileobj=f) as gfile:
            content = gfile.read()
            object_paths = json.loads(content)

    return object_paths


def load_uids(download_dir: str = "~/.objaverse") -> List[str]:
    """Load the uids from the dataset.

    Returns:
        A list of all the UIDs from the dataset.
    """
    return list(_load_object_paths(download_dir=download_dir).keys())


def _download_object(
    uid: str,
    hf_object_path: str,
    download_dir: str,
) -> Tuple[str, str]:
    """Download the object for the given uid.

    Args:
        uid: The uid of the object to load.
        hf_object_path: The path to the object in the Hugging Face repo. Here, hf_object_path
            is the part that comes after "main" in the Hugging Face repo url:
            https://huggingface.co/datasets/allenai/objaverse/resolve/main/{hf_object_path}
        download_dir: The base directory to download the object to. Supports all
            file systems supported by fsspec. Defaults to "~/.objaverse".

    Returns:
        A tuple of the uid and the path to where the downloaded object.
    """
    hf_url = f"https://huggingface.co/datasets/allenai/objaverse/resolve/main/{hf_object_path}"

    filename = os.path.join(download_dir, "hf-objaverse-v1", hf_object_path)
    fs, path = fsspec.core.url_to_fs(filename)

    # download the file
    fs.makedirs(os.path.dirname(path), exist_ok=True)
    tmp_path = f"{path}.tmp"
    with fs.open(tmp_path, "wb") as file:
        with urllib.request.urlopen(hf_url) as response:
            file.write(response.read())

    fs.rename(tmp_path, path)

    return uid, filename


def _parallel_download_object(args):
    # workaround since starmap doesn't work well with tqdm
    return _download_object(*args)


def load_objects(
    uids: List[str],
    download_processes: int = 1,
    download_dir: str = "~/.objaverse",
) -> Dict[str, str]:
    """Return the path to the object files for the given uids.

    If the object is not already downloaded, it will be downloaded.

    Args:
        uids: A list of uids.
        download_processes: The number of processes to use to download the objects.

    Returns:
        A dictionary mapping the object uid to the local path of where the object
        downloaded.
    """
    uids_set = set(uids)
    hf_object_paths = _load_object_paths(download_dir=download_dir)

    versioned_dirname = os.path.join(download_dir, "hf-objaverse-v1")
    fs, path = fsspec.core.url_to_fs(versioned_dirname)

    # Get the existing file paths. This is much faster than calling fs.exists() for each
    # file. `glob()` is like walk, but returns a list of files instead of the nested
    # directory structure. glob() is also faster than find() / walk() since it doesn't
    # need to traverse the entire directory structure.
    existing_file_paths = fs.glob(
        os.path.join(path, "glbs", "*", "*.glb"), refresh=True
    )
    existing_uids = {
        file.split("/")[-1].split(".")[0]
        for file in existing_file_paths
        if file.endswith(".glb")  # note partial files end with .glb.tmp
    }

    # add the existing downloaded uids to the return dict
    out = {}
    already_downloaded_uids = uids_set.intersection(existing_uids)
    for uid in already_downloaded_uids:
        hf_object_path = hf_object_paths[uid]
        fs_abs_object_path = os.path.join(versioned_dirname, hf_object_path)
        out[uid] = fs_abs_object_path

    logger.info(f"Found {len(already_downloaded_uids)} objects already downloaded")

    # get the uids that need to be downloaded
    remaining_uids = uids_set - existing_uids
    uids_to_download = []
    for uid in remaining_uids:
        if uid not in hf_object_paths:
            logger.error(f"Could not find object with uid {uid}. Skipping it.")
            continue
        uids_to_download.append((uid, hf_object_paths[uid]))

    logger.info(f"Downloading {len(uids_to_download)} new objects")

    # check if all objects are already downloaded
    if len(uids_to_download) == 0:
        return out

    if download_processes == 1:
        # iteratively download the objects
        for uid, hf_object_path in tqdm(uids_to_download):
            uid, local_path = _download_object(
                uid=uid, hf_object_path=hf_object_path, download_dir=download_dir
            )
            out[uid] = local_path
    else:
        args = [
            (uid, hf_object_path, download_dir)
            for uid, hf_object_path in uids_to_download
        ]

        # download the objects in parallel
        with Pool(download_processes) as pool:
            new_object_downloads = list(
                tqdm(
                    pool.imap_unordered(_parallel_download_object, args),
                    total=len(args),
                )
            )

        for uid, local_path in new_object_downloads:
            out[uid] = local_path

    return out


def load_lvis_annotations(download_dir: str = "~/.objaverse") -> Dict[str, List[str]]:
    """Load the LVIS annotations.

    If the annotations are not already downloaded, they will be downloaded.

    Args:
        download_dir: The base directory to download the annotations to. Supports all
        file systems supported by fsspec. Defaults to "~/.objaverse".

    Returns:
        A dictionary mapping the LVIS category to the list of uids in that category.
    """
    hf_url = "https://huggingface.co/datasets/allenai/objaverse/resolve/main/lvis-annotations.json.gz"

    download_path = os.path.join(
        download_dir, "hf-objaverse-v1", "lvis-annotations.json.gz"
    )

    # use fsspec
    fs, path = fsspec.core.url_to_fs(download_path)
    if not fs.exists(path):
        # make dir if it doesn't exist
        fs.makedirs(os.path.dirname(path), exist_ok=True)

        # download the file
        with fs.open(path, "wb") as f:
            with urllib.request.urlopen(hf_url) as response:
                f.write(response.read())

    # load the gzip file
    with fs.open(path, "rb") as f:
        with gzip.GzipFile(fileobj=f) as gfile:
            content = gfile.read()
            data = json.loads(content)

    return data