|
"""Script to download objects from Objaverse 1.0.""" |
|
|
|
import gzip |
|
import json |
|
import os |
|
import urllib.request |
|
from multiprocessing import Pool |
|
from typing import Any, Dict, List, Optional, Tuple |
|
|
|
import fsspec |
|
from loguru import logger |
|
from tqdm import tqdm |
|
|
|
|
|
def load_annotations( |
|
uids: Optional[List[str]] = None, |
|
download_dir: str = "~/.objaverse", |
|
) -> Dict[str, Any]: |
|
"""Load the full metadata of all objects in the dataset. |
|
|
|
Args: |
|
uids: A list of uids with which to load metadata. If None, it loads |
|
the metadata for all uids. |
|
download_dir: The base directory to download the annotations to. Supports all |
|
file systems supported by fsspec. Defaults to "~/.objaverse". |
|
|
|
Returns: |
|
A dictionary of the metadata for each object. The keys are the uids and the |
|
values are the metadata for that object. |
|
""" |
|
|
|
metadata_path = os.path.join(download_dir, "hf-objaverse-v1", "metadata") |
|
fs, _ = fsspec.core.url_to_fs(metadata_path) |
|
fs.makedirs(metadata_path, exist_ok=True) |
|
|
|
|
|
object_paths = _load_object_paths(download_dir=download_dir) |
|
dir_ids = ( |
|
set([object_paths[uid].split("/")[1] for uid in uids]) |
|
if uids is not None |
|
else set([f"{i // 1000:03d}-{i % 1000:03d}" for i in range(160)]) |
|
) |
|
|
|
|
|
existing_metadata_files = fs.glob( |
|
os.path.join(metadata_path, "*.json.gz"), refresh=True |
|
) |
|
existing_dir_ids = set( |
|
[ |
|
file.split("/")[-1].split(".")[0] |
|
for file in existing_metadata_files |
|
if file.endswith(".json.gz") |
|
] |
|
) |
|
downloaded_dir_ids = existing_dir_ids.intersection(dir_ids) |
|
logger.info(f"Found {len(downloaded_dir_ids)} metadata files already downloaded") |
|
|
|
|
|
dir_ids_to_download = dir_ids - existing_dir_ids |
|
logger.info(f"Downloading {len(dir_ids_to_download)} metadata files") |
|
|
|
|
|
if len(dir_ids_to_download) > 0: |
|
for i_id in tqdm(dir_ids_to_download, desc="Downloading metadata files"): |
|
|
|
path = os.path.join(metadata_path, f"{i_id}.json.gz") |
|
|
|
|
|
hf_url = f"https://huggingface.co/datasets/allenai/objaverse/resolve/main/metadata/{i_id}.json.gz" |
|
|
|
|
|
tmp_path = f"{path}.tmp" |
|
with fs.open(tmp_path, "wb") as f: |
|
with urllib.request.urlopen(hf_url) as response: |
|
f.write(response.read()) |
|
fs.rename(tmp_path, path) |
|
|
|
out = {} |
|
for i_id in tqdm(dir_ids, desc="Reading metadata files"): |
|
|
|
path = os.path.join(metadata_path, f"{i_id}.json.gz") |
|
|
|
|
|
with fs.open(path, "rb") as f: |
|
with gzip.GzipFile(fileobj=f) as gfile: |
|
content = gfile.read() |
|
data = json.loads(content) |
|
|
|
|
|
if uids is not None: |
|
data = {uid: data[uid] for uid in uids if uid in data} |
|
|
|
|
|
out.update(data) |
|
|
|
return out |
|
|
|
|
|
annotations = load_annotations(download_dir="~/.objaverse-temp-400") |
|
|
|
|
|
def _load_object_paths(download_dir: str) -> Dict[str, str]: |
|
"""Load the object paths from the dataset. |
|
|
|
The object paths specify the location of where the object is located |
|
in the Hugging Face repo. |
|
|
|
Returns: |
|
A dictionary mapping the uid to the object path. |
|
""" |
|
object_paths_file = "object-paths.json.gz" |
|
local_path = os.path.join(download_dir, "hf-objaverse-v1", object_paths_file) |
|
|
|
|
|
fs, path = fsspec.core.url_to_fs(local_path) |
|
if not fs.exists(path): |
|
hf_url = f"https://huggingface.co/datasets/allenai/objaverse/resolve/main/{object_paths_file}" |
|
fs.makedirs(os.path.dirname(path), exist_ok=True) |
|
|
|
|
|
tmp_path = f"{path}.tmp" |
|
with fs.open(tmp_path, "wb") as f: |
|
with urllib.request.urlopen(hf_url) as response: |
|
f.write(response.read()) |
|
fs.rename(tmp_path, path) |
|
|
|
|
|
with fs.open(path, "rb") as f: |
|
with gzip.GzipFile(fileobj=f) as gfile: |
|
content = gfile.read() |
|
object_paths = json.loads(content) |
|
|
|
return object_paths |
|
|
|
|
|
def load_uids(download_dir: str = "~/.objaverse") -> List[str]: |
|
"""Load the uids from the dataset. |
|
|
|
Returns: |
|
A list of all the UIDs from the dataset. |
|
""" |
|
return list(_load_object_paths(download_dir=download_dir).keys()) |
|
|
|
|
|
def _download_object( |
|
uid: str, |
|
hf_object_path: str, |
|
download_dir: str, |
|
) -> Tuple[str, str]: |
|
"""Download the object for the given uid. |
|
|
|
Args: |
|
uid: The uid of the object to load. |
|
hf_object_path: The path to the object in the Hugging Face repo. Here, hf_object_path |
|
is the part that comes after "main" in the Hugging Face repo url: |
|
https://huggingface.co/datasets/allenai/objaverse/resolve/main/{hf_object_path} |
|
download_dir: The base directory to download the object to. Supports all |
|
file systems supported by fsspec. Defaults to "~/.objaverse". |
|
|
|
Returns: |
|
A tuple of the uid and the path to where the downloaded object. |
|
""" |
|
hf_url = f"https://huggingface.co/datasets/allenai/objaverse/resolve/main/{hf_object_path}" |
|
|
|
filename = os.path.join(download_dir, "hf-objaverse-v1", hf_object_path) |
|
fs, path = fsspec.core.url_to_fs(filename) |
|
|
|
|
|
fs.makedirs(os.path.dirname(path), exist_ok=True) |
|
tmp_path = f"{path}.tmp" |
|
with fs.open(tmp_path, "wb") as file: |
|
with urllib.request.urlopen(hf_url) as response: |
|
file.write(response.read()) |
|
|
|
fs.rename(tmp_path, path) |
|
|
|
return uid, filename |
|
|
|
|
|
def _parallel_download_object(args): |
|
|
|
return _download_object(*args) |
|
|
|
|
|
def load_objects( |
|
uids: List[str], |
|
download_processes: int = 1, |
|
download_dir: str = "~/.objaverse", |
|
) -> Dict[str, str]: |
|
"""Return the path to the object files for the given uids. |
|
|
|
If the object is not already downloaded, it will be downloaded. |
|
|
|
Args: |
|
uids: A list of uids. |
|
download_processes: The number of processes to use to download the objects. |
|
|
|
Returns: |
|
A dictionary mapping the object uid to the local path of where the object |
|
downloaded. |
|
""" |
|
uids_set = set(uids) |
|
hf_object_paths = _load_object_paths(download_dir=download_dir) |
|
|
|
versioned_dirname = os.path.join(download_dir, "hf-objaverse-v1") |
|
fs, path = fsspec.core.url_to_fs(versioned_dirname) |
|
|
|
|
|
|
|
|
|
|
|
existing_file_paths = fs.glob( |
|
os.path.join(path, "glbs", "*", "*.glb"), refresh=True |
|
) |
|
existing_uids = set( |
|
[ |
|
file.split("/")[-1].split(".")[0] |
|
for file in existing_file_paths |
|
if file.endswith(".glb") |
|
] |
|
) |
|
|
|
|
|
out = {} |
|
already_downloaded_uids = uids_set.intersection(existing_uids) |
|
for uid in already_downloaded_uids: |
|
hf_object_path = hf_object_paths[uid] |
|
fs_abs_object_path = os.path.join(versioned_dirname, hf_object_path) |
|
out[uid] = fs_abs_object_path |
|
|
|
logger.info(f"Found {len(already_downloaded_uids)} objects already downloaded") |
|
|
|
|
|
remaining_uids = uids_set - existing_uids |
|
uids_to_download = [] |
|
for uid in remaining_uids: |
|
if uid not in hf_object_paths: |
|
logger.error(f"Could not find object with uid {uid}. Skipping it.") |
|
continue |
|
uids_to_download.append((uid, hf_object_paths[uid])) |
|
|
|
logger.info(f"Downloading {len(uids_to_download)} new objects") |
|
|
|
|
|
if len(uids_to_download) == 0: |
|
return out |
|
|
|
if download_processes == 1: |
|
|
|
for uid, hf_object_path in tqdm(uids_to_download): |
|
uid, local_path = _download_object( |
|
uid=uid, hf_object_path=hf_object_path, download_dir=download_dir |
|
) |
|
out[uid] = local_path |
|
else: |
|
args = [ |
|
(uid, hf_object_path, download_dir) |
|
for uid, hf_object_path in uids_to_download |
|
] |
|
|
|
|
|
with Pool(download_processes) as pool: |
|
new_object_downloads = list( |
|
tqdm( |
|
pool.imap_unordered(_parallel_download_object, args), |
|
total=len(args), |
|
) |
|
) |
|
|
|
for uid, local_path in new_object_downloads: |
|
out[uid] = local_path |
|
|
|
return out |
|
|
|
|
|
def load_lvis_annotations(download_dir: str = "~/.objaverse") -> Dict[str, List[str]]: |
|
"""Load the LVIS annotations. |
|
|
|
If the annotations are not already downloaded, they will be downloaded. |
|
|
|
Args: |
|
download_dir: The base directory to download the annotations to. Supports all |
|
file systems supported by fsspec. Defaults to "~/.objaverse". |
|
|
|
Returns: |
|
A dictionary mapping the LVIS category to the list of uids in that category. |
|
""" |
|
hf_url = f"https://huggingface.co/datasets/allenai/objaverse/resolve/main/lvis-annotations.json.gz" |
|
|
|
download_path = os.path.join( |
|
download_dir, "hf-objaverse-v1", "lvis-annotations.json.gz" |
|
) |
|
|
|
|
|
fs, path = fsspec.core.url_to_fs(download_path) |
|
if not fs.exists(path): |
|
|
|
fs.makedirs(os.path.dirname(path), exist_ok=True) |
|
|
|
|
|
with fs.open(path, "wb") as f: |
|
with urllib.request.urlopen(hf_url) as response: |
|
f.write(response.read()) |
|
|
|
|
|
with fs.open(path, "rb") as f: |
|
with gzip.GzipFile(fileobj=f) as gfile: |
|
content = gfile.read() |
|
data = json.loads(content) |
|
|
|
return data |
|
|