import os import multiprocessing from multiprocessing import Pool from typing import Dict, List, Optional import fsspec import requests from loguru import logger import pandas as pd from tqdm import tqdm import time def _get_response_with_retries( url: str, max_retries: int = 3, retry_delay: int = 5 ) -> Optional[requests.models.Response]: """Get a response from a URL with retries. Args: url (str): The URL to get a response from. max_retries (int, optional): The maximum number of retries. Defaults to 3. retry_delay (int, optional): The delay between retries in seconds. Defaults to 5. Returns: Optional[requests.models.Response]: The response from the URL. If there was an error, returns None. """ for i in range(max_retries): try: response = requests.get(url, stream=True) # if successful, break out of loop if response.status_code not in {200, 404}: time.sleep(retry_delay) continue break except ConnectionError: if i < max_retries - 1: # i.e. not on the last try time.sleep(retry_delay) else: return None return response def _download_item(item: Dict[str, str], download_dir: str) -> None: """Download the given item. Args: item (Dict[str, str]): The item to download. It should be a dictionary with the keys "thingId" and "fileId". download_dir (str, optional): The directory to save the files to. Supports all file systems supported by fsspec. Returns: Optional[str]: The path to the downloaded file. If there was an error or 404, returns None. """ file_id = item["fileId"] thing_id = item["thingId"] url = f"https://www.thingiverse.com/download:{file_id}" response = _get_response_with_retries(url) if response is None: logger.error(f"{file_id=} Could not get response from {url}") return None # Check if the request was successful if response.status_code == 404: logger.error(f"{file_id=} (404) Could not find file with ID") return None file_path = os.path.join(download_dir, f"thing-{thing_id}-file-{file_id}.stl") fs, path = fsspec.core.url_to_fs(file_path) with fs.open(path, "wb") as file: file.write(response.content) return file_path def _parallel_download_item(args) -> Optional[str]: item, download_dir = args download_path = _download_item(item=item, download_dir=download_dir) return item, download_path def download_thingiverse_objects( file_ids: Optional[List[str]] = None, processes: Optional[int] = None, download_dir: str = "~/.objaverse", ) -> List[Dict[str, str]]: """Download the objects from the given list of things and files. Args: file_ids (Optional[List[str]]): The list of file IDs to download. If None, downloads all files. Defaults to None. processes (int, optional): The number of processes to use. If None, maps to use all available CPUs using multiprocessing.cpu_count(). Defaults to None. download_dir (str, optional): The directory to save the files to. Supports all file systems supported by fsspec. Defaults to "~/.objaverse-xl". Returns: List[Dict[str, str]]: The list of things and files that were downloaded. Each item in the list is a dictionary with the keys "thingId", "fileId", "filePath", and everything else from the annotations. """ if processes is None: processes = multiprocessing.cpu_count() # get the records of the specified fileIds df = load_annotations(download_dir=download_dir) file_ids = set(file_ids) if file_ids is not None: df = df[df["fileId"].isin(file_ids)] things_and_files = df.to_dict(orient="records") # create the download directory download_dir = os.path.join(download_dir, "thingiverse") fs, path = fsspec.core.url_to_fs(download_dir) fs.makedirs(path, exist_ok=True) # check to filter out files that already exist existing_files = fs.glob(os.path.join(download_dir, "*.stl"), refresh=True) existing_file_ids = set( [os.path.basename(file).split(".")[0].split("-")[-1] for file in existing_files] ) # filter out existing files items_to_download = [] already_downloaded_count = 0 out = [] for item in things_and_files: if item["fileId"] in existing_file_ids: already_downloaded_count += 1 out.append( { "filePath": os.path.join( download_dir, f"thing-{item['thingId']}-file-{item['fileId']}.stl", ), **item, } ) else: items_to_download.append(item) logger.info(f"Found {already_downloaded_count} Thingiverse objects downloaded") logger.info( f"Downloading {len(items_to_download)} Thingiverse objects with {processes=}" ) if len(items_to_download) == 0: return out # download the files if processes == 1: for item in tqdm(items_to_download): file_path = _download_item(item=item, download_dir=download_dir) out.append( {"filePath": file_path, **item,} ) else: args = [(item, download_dir) for item in items_to_download] with Pool(processes=processes) as pool: items_and_file_paths = list( tqdm( pool.imap(_parallel_download_item, args), total=len(args), desc="Downloading Thingiverse Objects", ) ) out.extend( [ {"filePath": file_path, **item,} for item, file_path in items_and_file_paths ] ) return out def load_annotations(download_dir: str = "~/.objaverse") -> pd.DataFrame: """Load the annotations from the given directory. Args: download_dir (str, optional): The directory to load the annotations from. Supports all file systems supported by fsspec. Defaults to "~/.objaverse". Returns: pd.DataFrame: The annotations, which includes the columns "thingId", "fileId", "filename", and "license". """ remote_url = "https://huggingface.co/datasets/allenai/objaverse-xl/resolve/main/thingiverse/thingiverse-objects.parquet" download_path = os.path.join( download_dir, "thingiverse", "thingiverse-objects.parquet" ) fs, path = fsspec.core.url_to_fs(download_path) if not fs.exists(path): fs.makedirs(os.path.dirname(path), exist_ok=True) logger.info(f"Downloading {remote_url} to {download_path}") response = requests.get(remote_url) response.raise_for_status() with fs.open(path, "wb") as file: file.write(response.content) # read the file with pandas and fsspec with fs.open(download_path, "rb") as f: annotations_df = pd.read_parquet(f) return annotations_df if __name__ == "__main__": # example usage annotations = load_annotations() file_ids = annotations.head(n=100)["fileId"].tolist() download_thingiverse_objects(file_ids=file_ids, processes=5)