mattdeitke
commited on
Commit
•
5d96618
1
Parent(s):
ce26753
fix lint issues
Browse files- objaverse_xl/github.py +0 -1
- objaverse_xl/objaverse_v1.py +6 -6
- objaverse_xl/thingiverse.py +5 -5
objaverse_xl/github.py
CHANGED
@@ -59,7 +59,6 @@ import hashlib
|
|
59 |
import json
|
60 |
import multiprocessing
|
61 |
import os
|
62 |
-
|
63 |
import shutil
|
64 |
import subprocess
|
65 |
import tarfile
|
|
|
59 |
import json
|
60 |
import multiprocessing
|
61 |
import os
|
|
|
62 |
import shutil
|
63 |
import subprocess
|
64 |
import tarfile
|
objaverse_xl/objaverse_v1.py
CHANGED
@@ -13,7 +13,7 @@ from tqdm import tqdm
|
|
13 |
def load_annotations(
|
14 |
uids: Optional[List[str]] = None,
|
15 |
download_dir: str = "~/.objaverse",
|
16 |
-
) ->
|
17 |
"""Load the full metadata of all objects in the dataset.
|
18 |
|
19 |
Args:
|
@@ -23,8 +23,8 @@ def load_annotations(
|
|
23 |
file systems supported by fsspec. Defaults to "~/.objaverse".
|
24 |
|
25 |
Returns:
|
26 |
-
A
|
27 |
-
|
28 |
"""
|
29 |
# make the metadata dir if it doesn't exist
|
30 |
metadata_path = os.path.join(download_dir, "hf-objaverse-v1", "metadata")
|
@@ -197,7 +197,7 @@ def load_objects(
|
|
197 |
A dictionary mapping the object uid to the local path of where the object
|
198 |
downloaded.
|
199 |
"""
|
200 |
-
|
201 |
hf_object_paths = _load_object_paths(download_dir=download_dir)
|
202 |
|
203 |
versioned_dirname = os.path.join(download_dir, "hf-objaverse-v1")
|
@@ -220,7 +220,7 @@ def load_objects(
|
|
220 |
|
221 |
# add the existing downloaded uids to the return dict
|
222 |
out = {}
|
223 |
-
already_downloaded_uids =
|
224 |
for uid in already_downloaded_uids:
|
225 |
hf_object_path = hf_object_paths[uid]
|
226 |
fs_abs_object_path = os.path.join(versioned_dirname, hf_object_path)
|
@@ -229,7 +229,7 @@ def load_objects(
|
|
229 |
logger.info(f"Found {len(already_downloaded_uids)} objects already downloaded")
|
230 |
|
231 |
# get the uids that need to be downloaded
|
232 |
-
remaining_uids =
|
233 |
uids_to_download = []
|
234 |
for uid in remaining_uids:
|
235 |
if uid not in hf_object_paths:
|
|
|
13 |
def load_annotations(
|
14 |
uids: Optional[List[str]] = None,
|
15 |
download_dir: str = "~/.objaverse",
|
16 |
+
) -> Dict[str, Any]:
|
17 |
"""Load the full metadata of all objects in the dataset.
|
18 |
|
19 |
Args:
|
|
|
23 |
file systems supported by fsspec. Defaults to "~/.objaverse".
|
24 |
|
25 |
Returns:
|
26 |
+
A dictionary of the metadata for each object. The keys are the uids and the
|
27 |
+
values are the metadata for that object.
|
28 |
"""
|
29 |
# make the metadata dir if it doesn't exist
|
30 |
metadata_path = os.path.join(download_dir, "hf-objaverse-v1", "metadata")
|
|
|
197 |
A dictionary mapping the object uid to the local path of where the object
|
198 |
downloaded.
|
199 |
"""
|
200 |
+
uids_set = set(uids)
|
201 |
hf_object_paths = _load_object_paths(download_dir=download_dir)
|
202 |
|
203 |
versioned_dirname = os.path.join(download_dir, "hf-objaverse-v1")
|
|
|
220 |
|
221 |
# add the existing downloaded uids to the return dict
|
222 |
out = {}
|
223 |
+
already_downloaded_uids = uids_set.intersection(existing_uids)
|
224 |
for uid in already_downloaded_uids:
|
225 |
hf_object_path = hf_object_paths[uid]
|
226 |
fs_abs_object_path = os.path.join(versioned_dirname, hf_object_path)
|
|
|
229 |
logger.info(f"Found {len(already_downloaded_uids)} objects already downloaded")
|
230 |
|
231 |
# get the uids that need to be downloaded
|
232 |
+
remaining_uids = uids_set - existing_uids
|
233 |
uids_to_download = []
|
234 |
for uid in remaining_uids:
|
235 |
if uid not in hf_object_paths:
|
objaverse_xl/thingiverse.py
CHANGED
@@ -2,7 +2,7 @@ import multiprocessing
|
|
2 |
import os
|
3 |
import time
|
4 |
from multiprocessing import Pool
|
5 |
-
from typing import Dict, List, Optional
|
6 |
|
7 |
import fsspec
|
8 |
import pandas as pd
|
@@ -42,7 +42,7 @@ def _get_response_with_retries(
|
|
42 |
return response
|
43 |
|
44 |
|
45 |
-
def _download_item(item: Dict[str, str], download_dir: str) ->
|
46 |
"""Download the given item.
|
47 |
|
48 |
Args:
|
@@ -79,7 +79,7 @@ def _download_item(item: Dict[str, str], download_dir: str) -> None:
|
|
79 |
return file_path
|
80 |
|
81 |
|
82 |
-
def _parallel_download_item(args) -> Optional[str]:
|
83 |
item, download_dir = args
|
84 |
download_path = _download_item(item=item, download_dir=download_dir)
|
85 |
return item, download_path
|
@@ -111,9 +111,9 @@ def download_thingiverse_objects(
|
|
111 |
|
112 |
# get the records of the specified fileIds
|
113 |
df = load_annotations(download_dir=download_dir)
|
114 |
-
file_ids = set(file_ids)
|
115 |
if file_ids is not None:
|
116 |
-
|
|
|
117 |
things_and_files = df.to_dict(orient="records")
|
118 |
|
119 |
# create the download directory
|
|
|
2 |
import os
|
3 |
import time
|
4 |
from multiprocessing import Pool
|
5 |
+
from typing import Any, Dict, List, Optional, Tuple
|
6 |
|
7 |
import fsspec
|
8 |
import pandas as pd
|
|
|
42 |
return response
|
43 |
|
44 |
|
45 |
+
def _download_item(item: Dict[str, str], download_dir: str) -> Optional[str]:
|
46 |
"""Download the given item.
|
47 |
|
48 |
Args:
|
|
|
79 |
return file_path
|
80 |
|
81 |
|
82 |
+
def _parallel_download_item(args) -> Tuple[Any, Optional[str]]:
|
83 |
item, download_dir = args
|
84 |
download_path = _download_item(item=item, download_dir=download_dir)
|
85 |
return item, download_path
|
|
|
111 |
|
112 |
# get the records of the specified fileIds
|
113 |
df = load_annotations(download_dir=download_dir)
|
|
|
114 |
if file_ids is not None:
|
115 |
+
file_ids_set = set(file_ids)
|
116 |
+
df = df[df["fileId"].isin(file_ids_set)]
|
117 |
things_and_files = df.to_dict(orient="records")
|
118 |
|
119 |
# create the download directory
|