Datasets:

Languages:
English
ArXiv:
License:
objaverse-xl / objaverse_xl /objaverse_v1.py
mattdeitke's picture
update scripts
b51f134
raw
history blame
10.9 kB
"""Script to download objects from Objaverse 1.0."""
import gzip
import json
import os
import urllib.request
from multiprocessing import Pool
from typing import Any, Dict, List, Optional, Tuple
import fsspec
from loguru import logger
from tqdm import tqdm
def load_annotations(
uids: Optional[List[str]] = None,
download_dir: str = "~/.objaverse",
) -> Dict[str, Any]:
"""Load the full metadata of all objects in the dataset.
Args:
uids: A list of uids with which to load metadata. If None, it loads
the metadata for all uids.
download_dir: The base directory to download the annotations to. Supports all
file systems supported by fsspec. Defaults to "~/.objaverse".
Returns:
A dictionary of the metadata for each object. The keys are the uids and the
values are the metadata for that object.
"""
# make the metadata dir if it doesn't exist
metadata_path = os.path.join(download_dir, "hf-objaverse-v1", "metadata")
fs, _ = fsspec.core.url_to_fs(metadata_path)
fs.makedirs(metadata_path, exist_ok=True)
# get the dir ids that need to be loaded if only downloading a subset of uids
object_paths = _load_object_paths(download_dir=download_dir)
dir_ids = (
set([object_paths[uid].split("/")[1] for uid in uids])
if uids is not None
else set([f"{i // 1000:03d}-{i % 1000:03d}" for i in range(160)])
)
# get the existing metadata files
existing_metadata_files = fs.glob(
os.path.join(metadata_path, "*.json.gz"), refresh=True
)
existing_dir_ids = set(
[
file.split("/")[-1].split(".")[0]
for file in existing_metadata_files
if file.endswith(".json.gz") # note partial files end with .json.gz.tmp
]
)
downloaded_dir_ids = existing_dir_ids.intersection(dir_ids)
logger.info(f"Found {len(downloaded_dir_ids)} metadata files already downloaded")
# download the metadata from the missing dir_ids
dir_ids_to_download = dir_ids - existing_dir_ids
logger.info(f"Downloading {len(dir_ids_to_download)} metadata files")
# download the metadata file if it doesn't exist
if len(dir_ids_to_download) > 0:
for i_id in tqdm(dir_ids_to_download, desc="Downloading metadata files"):
# get the path to the json file
path = os.path.join(metadata_path, f"{i_id}.json.gz")
# get the url to the remote json file
hf_url = f"https://huggingface.co/datasets/allenai/objaverse/resolve/main/metadata/{i_id}.json.gz"
# download the file to a tmp path to avoid partial downloads on interruption
tmp_path = f"{path}.tmp"
with fs.open(tmp_path, "wb") as f:
with urllib.request.urlopen(hf_url) as response:
f.write(response.read())
fs.rename(tmp_path, path)
out = {}
for i_id in tqdm(dir_ids, desc="Reading metadata files"):
# get the path to the json file
path = os.path.join(metadata_path, f"{i_id}.json.gz")
# read the json file of the metadata chunk
with fs.open(path, "rb") as f:
with gzip.GzipFile(fileobj=f) as gfile:
content = gfile.read()
data = json.loads(content)
# filter the data to only include the uids we want
if uids is not None:
data = {uid: data[uid] for uid in uids if uid in data}
# add the data to the out dict
out.update(data)
return out
annotations = load_annotations(download_dir="~/.objaverse-temp-400")
def _load_object_paths(download_dir: str) -> Dict[str, str]:
"""Load the object paths from the dataset.
The object paths specify the location of where the object is located
in the Hugging Face repo.
Returns:
A dictionary mapping the uid to the object path.
"""
object_paths_file = "object-paths.json.gz"
local_path = os.path.join(download_dir, "hf-objaverse-v1", object_paths_file)
# download the object_paths file if it doesn't exist
fs, path = fsspec.core.url_to_fs(local_path)
if not fs.exists(path):
hf_url = f"https://huggingface.co/datasets/allenai/objaverse/resolve/main/{object_paths_file}"
fs.makedirs(os.path.dirname(path), exist_ok=True)
# download the file to a tmp path to avoid partial downloads on interruption
tmp_path = f"{path}.tmp"
with fs.open(tmp_path, "wb") as f:
with urllib.request.urlopen(hf_url) as response:
f.write(response.read())
fs.rename(tmp_path, path)
# read the object_paths
with fs.open(path, "rb") as f:
with gzip.GzipFile(fileobj=f) as gfile:
content = gfile.read()
object_paths = json.loads(content)
return object_paths
def load_uids(download_dir: str = "~/.objaverse") -> List[str]:
"""Load the uids from the dataset.
Returns:
A list of all the UIDs from the dataset.
"""
return list(_load_object_paths(download_dir=download_dir).keys())
def _download_object(
uid: str,
hf_object_path: str,
download_dir: str,
) -> Tuple[str, str]:
"""Download the object for the given uid.
Args:
uid: The uid of the object to load.
hf_object_path: The path to the object in the Hugging Face repo. Here, hf_object_path
is the part that comes after "main" in the Hugging Face repo url:
https://huggingface.co/datasets/allenai/objaverse/resolve/main/{hf_object_path}
download_dir: The base directory to download the object to. Supports all
file systems supported by fsspec. Defaults to "~/.objaverse".
Returns:
A tuple of the uid and the path to where the downloaded object.
"""
hf_url = f"https://huggingface.co/datasets/allenai/objaverse/resolve/main/{hf_object_path}"
filename = os.path.join(download_dir, "hf-objaverse-v1", hf_object_path)
fs, path = fsspec.core.url_to_fs(filename)
# download the file
fs.makedirs(os.path.dirname(path), exist_ok=True)
tmp_path = f"{path}.tmp"
with fs.open(tmp_path, "wb") as file:
with urllib.request.urlopen(hf_url) as response:
file.write(response.read())
fs.rename(tmp_path, path)
return uid, filename
def _parallel_download_object(args):
# workaround since starmap doesn't work well with tqdm
return _download_object(*args)
def load_objects(
uids: List[str],
download_processes: int = 1,
download_dir: str = "~/.objaverse",
) -> Dict[str, str]:
"""Return the path to the object files for the given uids.
If the object is not already downloaded, it will be downloaded.
Args:
uids: A list of uids.
download_processes: The number of processes to use to download the objects.
Returns:
A dictionary mapping the object uid to the local path of where the object
downloaded.
"""
uids_set = set(uids)
hf_object_paths = _load_object_paths(download_dir=download_dir)
versioned_dirname = os.path.join(download_dir, "hf-objaverse-v1")
fs, path = fsspec.core.url_to_fs(versioned_dirname)
# Get the existing file paths. This is much faster than calling fs.exists() for each
# file. `glob()` is like walk, but returns a list of files instead of the nested
# directory structure. glob() is also faster than find() / walk() since it doesn't
# need to traverse the entire directory structure.
existing_file_paths = fs.glob(
os.path.join(path, "glbs", "*", "*.glb"), refresh=True
)
existing_uids = set(
[
file.split("/")[-1].split(".")[0]
for file in existing_file_paths
if file.endswith(".glb") # note partial files end with .glb.tmp
]
)
# add the existing downloaded uids to the return dict
out = {}
already_downloaded_uids = uids_set.intersection(existing_uids)
for uid in already_downloaded_uids:
hf_object_path = hf_object_paths[uid]
fs_abs_object_path = os.path.join(versioned_dirname, hf_object_path)
out[uid] = fs_abs_object_path
logger.info(f"Found {len(already_downloaded_uids)} objects already downloaded")
# get the uids that need to be downloaded
remaining_uids = uids_set - existing_uids
uids_to_download = []
for uid in remaining_uids:
if uid not in hf_object_paths:
logger.error(f"Could not find object with uid {uid}. Skipping it.")
continue
uids_to_download.append((uid, hf_object_paths[uid]))
logger.info(f"Downloading {len(uids_to_download)} new objects")
# check if all objects are already downloaded
if len(uids_to_download) == 0:
return out
if download_processes == 1:
# iteratively download the objects
for uid, hf_object_path in tqdm(uids_to_download):
uid, local_path = _download_object(
uid=uid, hf_object_path=hf_object_path, download_dir=download_dir
)
out[uid] = local_path
else:
args = [
(uid, hf_object_path, download_dir)
for uid, hf_object_path in uids_to_download
]
# download the objects in parallel
with Pool(download_processes) as pool:
new_object_downloads = list(
tqdm(
pool.imap_unordered(_parallel_download_object, args),
total=len(args),
)
)
for uid, local_path in new_object_downloads:
out[uid] = local_path
return out
def load_lvis_annotations(download_dir: str = "~/.objaverse") -> Dict[str, List[str]]:
"""Load the LVIS annotations.
If the annotations are not already downloaded, they will be downloaded.
Args:
download_dir: The base directory to download the annotations to. Supports all
file systems supported by fsspec. Defaults to "~/.objaverse".
Returns:
A dictionary mapping the LVIS category to the list of uids in that category.
"""
hf_url = f"https://huggingface.co/datasets/allenai/objaverse/resolve/main/lvis-annotations.json.gz"
download_path = os.path.join(
download_dir, "hf-objaverse-v1", "lvis-annotations.json.gz"
)
# use fsspec
fs, path = fsspec.core.url_to_fs(download_path)
if not fs.exists(path):
# make dir if it doesn't exist
fs.makedirs(os.path.dirname(path), exist_ok=True)
# download the file
with fs.open(path, "wb") as f:
with urllib.request.urlopen(hf_url) as response:
f.write(response.read())
# load the gzip file
with fs.open(path, "rb") as f:
with gzip.GzipFile(fileobj=f) as gfile:
content = gfile.read()
data = json.loads(content)
return data