File size: 7,872 Bytes
b51f134 08bea20 135a36b 08bea20 5d96618 08bea20 135a36b 08bea20 135a36b 08bea20 5d96618 08bea20 5d96618 08bea20 5c01388 08bea20 5d96618 08bea20 28e13bd 08bea20 04c89f5 135a36b 04c89f5 08bea20 135a36b 08bea20 04c89f5 08bea20 28e13bd |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 |
"""Script to download objects from Thingiverse."""
import multiprocessing
import os
import time
from multiprocessing import Pool
from typing import Any, Dict, List, Optional, Tuple
import fsspec
import pandas as pd
import requests
from loguru import logger
from tqdm import tqdm
def _get_response_with_retries(
url: str, max_retries: int = 3, retry_delay: int = 5
) -> Optional[requests.models.Response]:
"""Get a response from a URL with retries.
Args:
url (str): The URL to get a response from.
max_retries (int, optional): The maximum number of retries. Defaults to 3.
retry_delay (int, optional): The delay between retries in seconds. Defaults to 5.
Returns:
Optional[requests.models.Response]: The response from the URL. If there was an error, returns None.
"""
for i in range(max_retries):
try:
response = requests.get(url, stream=True)
# if successful, break out of loop
if response.status_code not in {200, 404}:
time.sleep(retry_delay)
continue
break
except ConnectionError:
if i < max_retries - 1: # i.e. not on the last try
time.sleep(retry_delay)
else:
return None
return response
def _download_item(item: Dict[str, str], download_dir: str) -> Optional[str]:
"""Download the given item.
Args:
item (Dict[str, str]): The item to download. It should be a dictionary with the
keys "thingId" and "fileId".
download_dir (str, optional): The directory to save the files to. Supports all
file systems supported by fsspec.
Returns:
Optional[str]: The path to the downloaded file. If there was an error or 404,
returns None.
"""
file_id = item["fileId"]
thing_id = item["thingId"]
url = f"https://www.thingiverse.com/download:{file_id}"
response = _get_response_with_retries(url)
if response is None:
logger.error(f"{file_id=} Could not get response from {url}")
return None
# Check if the request was successful
if response.status_code == 404:
logger.error(f"{file_id=} (404) Could not find file with ID")
return None
file_path = os.path.join(download_dir, f"thing-{thing_id}-file-{file_id}.stl")
fs, path = fsspec.core.url_to_fs(file_path)
with fs.open(path, "wb") as file:
file.write(response.content)
return file_path
def _parallel_download_item(args) -> Tuple[Any, Optional[str]]:
item, download_dir = args
download_path = _download_item(item=item, download_dir=download_dir)
return item, download_path
def download_thingiverse_objects(
file_ids: Optional[List[str]] = None,
processes: Optional[int] = None,
download_dir: str = "~/.objaverse",
) -> List[Dict[str, str]]:
"""Download the objects from the given list of things and files.
Args:
file_ids (Optional[List[str]]): The list of file IDs to download. If None,
downloads all files. Defaults to None.
processes (int, optional): The number of processes to use. If None, maps to
use all available CPUs using multiprocessing.cpu_count(). Defaults to None.
download_dir (str, optional): The directory to save the files to. Supports all
file systems supported by fsspec. Defaults to "~/.objaverse-xl".
Returns:
List[Dict[str, str]]: The list of things and files that were downloaded. Each
item in the list is a dictionary with the keys "thingId", "fileId",
"filePath", and everything else from the annotations. If the file was
not successfully downloaded, the item will not appear in the list.
"""
if processes is None:
processes = multiprocessing.cpu_count()
# get the records of the specified fileIds
df = load_annotations(download_dir=download_dir)
if file_ids is not None:
file_ids_set = set(file_ids)
df = df[df["fileId"].isin(file_ids_set)]
things_and_files = df.to_dict(orient="records")
# create the download directory
download_dir = os.path.join(download_dir, "thingiverse")
fs, path = fsspec.core.url_to_fs(download_dir)
fs.makedirs(path, exist_ok=True)
# check to filter out files that already exist
existing_files = fs.glob(os.path.join(download_dir, "*.stl"), refresh=True)
existing_file_ids = {
os.path.basename(file).split(".")[0].split("-")[-1] for file in existing_files
}
# filter out existing files
items_to_download = []
already_downloaded_count = 0
out = []
for item in things_and_files:
if item["fileId"] in existing_file_ids:
already_downloaded_count += 1
out.append(
{
"filePath": os.path.join(
download_dir,
f"thing-{item['thingId']}-file-{item['fileId']}.stl",
),
**item,
}
)
else:
items_to_download.append(item)
logger.info(f"Found {already_downloaded_count} Thingiverse objects downloaded")
logger.info(
f"Downloading {len(items_to_download)} Thingiverse objects with {processes=}"
)
if len(items_to_download) == 0:
return out
# download the files
if processes == 1:
for item in tqdm(items_to_download):
file_path = _download_item(item=item, download_dir=download_dir)
if file_path is not None:
out.append(
{
"filePath": file_path,
**item,
}
)
else:
args = [(item, download_dir) for item in items_to_download]
with Pool(processes=processes) as pool:
items_and_file_paths = list(
tqdm(
pool.imap(_parallel_download_item, args),
total=len(args),
desc="Downloading Thingiverse Objects",
)
)
out.extend(
[
{
"filePath": file_path,
**item,
}
for item, file_path in items_and_file_paths
if file_path is not None
]
)
return out
def load_annotations(download_dir: str = "~/.objaverse") -> pd.DataFrame:
"""Load the annotations from the given directory.
Args:
download_dir (str, optional): The directory to load the annotations from.
Supports all file systems supported by fsspec. Defaults to
"~/.objaverse".
Returns:
pd.DataFrame: The annotations, which includes the columns "thingId", "fileId",
"filename", and "license".
"""
remote_url = "https://huggingface.co/datasets/allenai/objaverse-xl/resolve/main/thingiverse/thingiverse-objects.parquet"
download_path = os.path.join(
download_dir, "thingiverse", "thingiverse-objects.parquet"
)
fs, path = fsspec.core.url_to_fs(download_path)
if not fs.exists(path):
fs.makedirs(os.path.dirname(path), exist_ok=True)
logger.info(f"Downloading {remote_url} to {download_path}")
response = requests.get(remote_url)
response.raise_for_status()
with fs.open(path, "wb") as file:
file.write(response.content)
# read the file with pandas and fsspec
with fs.open(download_path, "rb") as f:
annotations_df = pd.read_parquet(f)
return annotations_df
# if __name__ == "__main__":
# # example usage
# annotations = load_annotations()
# file_ids = annotations.head(n=100)["fileId"].tolist()
# download_thingiverse_objects(file_ids=file_ids, processes=5)
|