Datasets:

Languages:
English
ArXiv:
License:
mattdeitke commited on
Commit
329af77
1 Parent(s): 5759f85

update thingiverse with abstract class

Browse files
Files changed (1) hide show
  1. objaverse_xl/thingiverse.py +352 -205
objaverse_xl/thingiverse.py CHANGED
@@ -4,7 +4,10 @@ import multiprocessing
4
  import os
5
  import time
6
  from multiprocessing import Pool
7
- from typing import Any, Dict, List, Optional, Tuple
 
 
 
8
 
9
  import fsspec
10
  import pandas as pd
@@ -13,217 +16,361 @@ from loguru import logger
13
  from tqdm import tqdm
14
 
15
 
16
- def _get_response_with_retries(
17
- url: str, max_retries: int = 3, retry_delay: int = 5
18
- ) -> Optional[requests.models.Response]:
19
- """Get a response from a URL with retries.
20
-
21
- Args:
22
- url (str): The URL to get a response from.
23
- max_retries (int, optional): The maximum number of retries. Defaults to 3.
24
- retry_delay (int, optional): The delay between retries in seconds. Defaults to 5.
25
-
26
- Returns:
27
- Optional[requests.models.Response]: The response from the URL. If there was an error, returns None.
28
- """
29
-
30
- for i in range(max_retries):
31
- try:
32
- response = requests.get(url, stream=True)
33
- # if successful, break out of loop
34
- if response.status_code not in {200, 404}:
35
- time.sleep(retry_delay)
36
- continue
37
- break
38
- except ConnectionError:
39
- if i < max_retries - 1: # i.e. not on the last try
40
- time.sleep(retry_delay)
41
- else:
42
- return None
43
-
44
- return response
45
-
46
-
47
- def _download_item(item: Dict[str, str], download_dir: str) -> Optional[str]:
48
- """Download the given item.
49
-
50
- Args:
51
- item (Dict[str, str]): The item to download. It should be a dictionary with the
52
- keys "thingId" and "fileId".
53
- download_dir (str, optional): The directory to save the files to. Supports all
54
- file systems supported by fsspec.
55
-
56
- Returns:
57
- Optional[str]: The path to the downloaded file. If there was an error or 404,
58
- returns None.
59
- """
60
- file_id = item["fileId"]
61
- thing_id = item["thingId"]
62
-
63
- url = f"https://www.thingiverse.com/download:{file_id}"
64
- response = _get_response_with_retries(url)
65
-
66
- if response is None:
67
- logger.error(f"{file_id=} Could not get response from {url}")
68
- return None
69
-
70
- # Check if the request was successful
71
- if response.status_code == 404:
72
- logger.error(f"{file_id=} (404) Could not find file with ID")
73
- return None
74
-
75
- file_path = os.path.join(download_dir, f"thing-{thing_id}-file-{file_id}.stl")
76
- fs, path = fsspec.core.url_to_fs(file_path)
77
-
78
- with fs.open(path, "wb") as file:
79
- file.write(response.content)
80
-
81
- return file_path
82
-
83
-
84
- def _parallel_download_item(args) -> Tuple[Any, Optional[str]]:
85
- item, download_dir = args
86
- download_path = _download_item(item=item, download_dir=download_dir)
87
- return item, download_path
88
-
89
-
90
- def download_thingiverse_objects(
91
- file_ids: Optional[List[str]] = None,
92
- processes: Optional[int] = None,
93
- download_dir: str = "~/.objaverse",
94
- ) -> List[Dict[str, str]]:
95
- """Download the objects from the given list of things and files.
96
-
97
- Args:
98
- file_ids (Optional[List[str]]): The list of file IDs to download. If None,
99
- downloads all files. Defaults to None.
100
- processes (int, optional): The number of processes to use. If None, maps to
101
- use all available CPUs using multiprocessing.cpu_count(). Defaults to None.
102
- download_dir (str, optional): The directory to save the files to. Supports all
103
- file systems supported by fsspec. Defaults to "~/.objaverse-xl".
104
-
105
- Returns:
106
- List[Dict[str, str]]: The list of things and files that were downloaded. Each
107
- item in the list is a dictionary with the keys "thingId", "fileId",
108
- "filePath", and everything else from the annotations. If the file was
109
- not successfully downloaded, the item will not appear in the list.
110
- """
111
- if processes is None:
112
- processes = multiprocessing.cpu_count()
113
-
114
- # get the records of the specified fileIds
115
- df = load_annotations(download_dir=download_dir)
116
- if file_ids is not None:
117
- file_ids_set = set(file_ids)
118
- df = df[df["fileId"].isin(file_ids_set)]
119
- things_and_files = df.to_dict(orient="records")
120
-
121
- # create the download directory
122
- download_dir = os.path.join(download_dir, "thingiverse")
123
- fs, path = fsspec.core.url_to_fs(download_dir)
124
- fs.makedirs(path, exist_ok=True)
125
-
126
- # check to filter out files that already exist
127
- existing_files = fs.glob(os.path.join(download_dir, "*.stl"), refresh=True)
128
- existing_file_ids = {
129
- os.path.basename(file).split(".")[0].split("-")[-1] for file in existing_files
130
- }
131
-
132
- # filter out existing files
133
- items_to_download = []
134
- already_downloaded_count = 0
135
- out = []
136
- for item in things_and_files:
137
- if item["fileId"] in existing_file_ids:
138
- already_downloaded_count += 1
139
- out.append(
140
- {
141
- "filePath": os.path.join(
142
- download_dir,
143
- f"thing-{item['thingId']}-file-{item['fileId']}.stl",
144
- ),
145
- **item,
146
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
147
  )
148
  else:
149
- items_to_download.append(item)
150
 
151
- logger.info(f"Found {already_downloaded_count} Thingiverse objects downloaded")
152
- logger.info(
153
- f"Downloading {len(items_to_download)} Thingiverse objects with {processes=}"
154
- )
155
- if len(items_to_download) == 0:
156
- return out
 
 
 
 
 
 
 
 
 
 
 
 
 
 
157
 
158
- # download the files
159
- if processes == 1:
160
- for item in tqdm(items_to_download):
161
- file_path = _download_item(item=item, download_dir=download_dir)
162
- if file_path is not None:
163
- out.append(
164
- {
165
- "filePath": file_path,
166
- **item,
167
- }
168
- )
169
- else:
170
- args = [(item, download_dir) for item in items_to_download]
171
  with Pool(processes=processes) as pool:
172
- items_and_file_paths = list(
173
  tqdm(
174
- pool.imap(_parallel_download_item, args),
175
  total=len(args),
176
  desc="Downloading Thingiverse Objects",
177
  )
178
  )
179
- out.extend(
180
- [
181
- {
182
- "filePath": file_path,
183
- **item,
184
- }
185
- for item, file_path in items_and_file_paths
186
- if file_path is not None
187
- ]
188
- )
189
- return out
190
-
191
-
192
- def load_annotations(download_dir: str = "~/.objaverse") -> pd.DataFrame:
193
- """Load the annotations from the given directory.
194
-
195
- Args:
196
- download_dir (str, optional): The directory to load the annotations from.
197
- Supports all file systems supported by fsspec. Defaults to
198
- "~/.objaverse".
199
-
200
- Returns:
201
- pd.DataFrame: The annotations, which includes the columns "thingId", "fileId",
202
- "filename", and "license".
203
- """
204
- remote_url = "https://huggingface.co/datasets/allenai/objaverse-xl/resolve/main/thingiverse/thingiverse-objects.parquet"
205
- download_path = os.path.join(
206
- download_dir, "thingiverse", "thingiverse-objects.parquet"
207
- )
208
- fs, path = fsspec.core.url_to_fs(download_path)
209
-
210
- if not fs.exists(path):
211
- fs.makedirs(os.path.dirname(path), exist_ok=True)
212
- logger.info(f"Downloading {remote_url} to {download_path}")
213
- response = requests.get(remote_url)
214
- response.raise_for_status()
215
- with fs.open(path, "wb") as file:
216
- file.write(response.content)
217
-
218
- # read the file with pandas and fsspec
219
- with fs.open(download_path, "rb") as f:
220
- annotations_df = pd.read_parquet(f)
221
-
222
- return annotations_df
223
-
224
-
225
- # if __name__ == "__main__":
226
- # # example usage
227
- # annotations = load_annotations()
228
- # file_ids = annotations.head(n=100)["fileId"].tolist()
229
- # download_thingiverse_objects(file_ids=file_ids, processes=5)
 
4
  import os
5
  import time
6
  from multiprocessing import Pool
7
+ from typing import Dict, Optional, Tuple, Callable
8
+ import tempfile
9
+ from objaverse_xl.utils import get_file_hash
10
+ from objaverse_xl.abstract import ObjaverseSource
11
 
12
  import fsspec
13
  import pandas as pd
 
16
  from tqdm import tqdm
17
 
18
 
19
+ class ThingiverseDownloader(ObjaverseSource):
20
+ """Script to download objects from Thingiverse."""
21
+
22
+ def load_annotations(self, download_dir: str = "~/.objaverse") -> pd.DataFrame:
23
+ """Load the annotations from the given directory.
24
+
25
+ Args:
26
+ download_dir (str, optional): The directory to load the annotations from.
27
+ Supports all file systems supported by fsspec. Defaults to
28
+ "~/.objaverse".
29
+
30
+ Returns:
31
+ pd.DataFrame: The annotations, which includes the columns "thingId", "fileId",
32
+ "filename", and "license".
33
+ """
34
+ remote_url = "https://huggingface.co/datasets/allenai/objaverse-xl/resolve/main/thingiverse/thingiverse-objects.parquet"
35
+ download_path = os.path.join(
36
+ download_dir, "thingiverse", "thingiverse-objects.parquet"
37
+ )
38
+ fs, path = fsspec.core.url_to_fs(download_path)
39
+
40
+ if not fs.exists(path):
41
+ fs.makedirs(os.path.dirname(path), exist_ok=True)
42
+ logger.info(f"Downloading {remote_url} to {download_path}")
43
+ response = requests.get(remote_url)
44
+ response.raise_for_status()
45
+ with fs.open(path, "wb") as file:
46
+ file.write(response.content)
47
+
48
+ # read the file with pandas and fsspec
49
+ with fs.open(download_path, "rb") as f:
50
+ annotations_df = pd.read_parquet(f)
51
+
52
+ return annotations_df
53
+
54
+ def _get_response_with_retries(
55
+ self, url: str, max_retries: int = 3, retry_delay: int = 5
56
+ ) -> Optional[requests.models.Response]:
57
+ """Get a response from a URL with retries.
58
+
59
+ Args:
60
+ url (str): The URL to get a response from.
61
+ max_retries (int, optional): The maximum number of retries. Defaults to 3.
62
+ retry_delay (int, optional): The delay between retries in seconds. Defaults to 5.
63
+
64
+ Returns:
65
+ Optional[requests.models.Response]: The response from the URL. If there was an error, returns None.
66
+ """
67
+
68
+ for i in range(max_retries):
69
+ try:
70
+ response = requests.get(url, stream=True)
71
+ # if successful, break out of loop
72
+ if response.status_code not in {200, 404}:
73
+ time.sleep(retry_delay)
74
+ continue
75
+ break
76
+ except ConnectionError:
77
+ if i < max_retries - 1: # i.e. not on the last try
78
+ time.sleep(retry_delay)
79
+ else:
80
+ return None
81
+
82
+ return response
83
+
84
+ def _download_item(
85
+ self,
86
+ thingi_file_id: str,
87
+ thingi_thing_id: str,
88
+ file_identifier: str,
89
+ download_dir: Optional[str],
90
+ expected_sha256: str,
91
+ handle_found_object: Optional[Callable],
92
+ handle_modified_object: Optional[Callable],
93
+ handle_missing_object: Optional[Callable],
94
+ ) -> Tuple[str, Optional[str]]:
95
+ """Download the given item.
96
+
97
+ Args:
98
+ thingi_file_id (str): The Thingiverse file ID of the object.
99
+ thingi_thing_id (str): The Thingiverse thing ID of the object.
100
+ file_identifier (str): File identifier of the Thingiverse object.
101
+ download_dir (Optional[str]): Directory to download the Smithsonian Object
102
+ to. Supports all file systems supported by fsspec. If None, the
103
+ Smithsonian Object will be deleted after it is downloaded and processed
104
+ with the handler functions.
105
+ expected_sha256 (str): The expected SHA256 of the contents of the downloaded
106
+ object.
107
+ handle_found_object (Optional[Callable]): Called when an object is
108
+ successfully found and downloaded. Here, the object has the same sha256
109
+ as the one that was downloaded with Objaverse-XL. If None, the object
110
+ will be downloaded, but nothing will be done with it. Args for the
111
+ function include:
112
+ - local_path (str): Local path to the downloaded 3D object.
113
+ - file_identifier (str): File identifier of the 3D object.
114
+ - sha256 (str): SHA256 of the contents of the 3D object.
115
+ - metadata (Dict[str, Any]): Metadata about the 3D object, including the
116
+ GitHub organization and repo names.
117
+ Return is not used.
118
+ handle_modified_object (Optional[Callable]): Called when a modified object
119
+ is found and downloaded. Here, the object is successfully downloaded,
120
+ but it has a different sha256 than the one that was downloaded with
121
+ Objaverse-XL. This is not expected to happen very often, because the
122
+ same commit hash is used for each repo. If None, the object will be
123
+ downloaded, but nothing will be done with it. Args for the function
124
+ include:
125
+ - local_path (str): Local path to the downloaded 3D object.
126
+ - file_identifier (str): File identifier of the 3D object.
127
+ - new_sha256 (str): SHA256 of the contents of the newly downloaded 3D
128
+ object.
129
+ - old_sha256 (str): Expected SHA256 of the contents of the 3D object as
130
+ it was when it was downloaded with Objaverse-XL.
131
+ - metadata (Dict[str, Any]): Metadata about the 3D object, including the
132
+ GitHub organization and repo names.
133
+ Return is not used.
134
+ handle_missing_object (Optional[Callable]): Called when an object that is in
135
+ Objaverse-XL is not found. Here, it is likely that the repository was
136
+ deleted or renamed. If None, nothing will be done with the missing
137
+ object. Args for the function include:
138
+ - file_identifier (str): File identifier of the 3D object.
139
+ - sha256 (str): SHA256 of the contents of the original 3D object.
140
+ - metadata (Dict[str, Any]): Metadata about the 3D object, including the
141
+ GitHub organization and repo names.
142
+ Return is not used.
143
+
144
+
145
+ Returns:
146
+ Optional[str]: The path to the downloaded file. If there was an error or 404,
147
+ returns None.
148
+ """
149
+ url = f"https://www.thingiverse.com/download:{thingi_file_id}"
150
+ response = self._get_response_with_retries(url)
151
+ filename = f"thing-{thingi_thing_id}-file-{thingi_file_id}.stl"
152
+
153
+ if response is None:
154
+ logger.warning(
155
+ f"Thingiverse file ID {thingi_file_id} could not get response from {url}"
156
+ )
157
+ # NOTE: the object is probably not missing, but the request failed
158
+ return file_identifier, None
159
+
160
+ # Check if the request was successful
161
+ if response.status_code == 404:
162
+ logger.warning(
163
+ f"Thingiverse file ID {thingi_file_id} (404) could not find file"
164
+ )
165
+ if handle_missing_object is not None:
166
+ handle_missing_object(
167
+ file_identifier=file_identifier, sha256=expected_sha256, metadata={}
168
+ )
169
+ return file_identifier, None
170
+
171
+ with tempfile.TemporaryDirectory() as temp_dir:
172
+ temp_path = os.path.join(temp_dir, filename)
173
+ temp_path_tmp = temp_path + ".tmp"
174
+
175
+ with open(temp_path_tmp, "wb") as file:
176
+ for chunk in response.iter_content(chunk_size=8192):
177
+ file.write(chunk)
178
+
179
+ # rename to temp_path
180
+ os.rename(temp_path_tmp, temp_path)
181
+
182
+ # check the sha256
183
+ sha256 = get_file_hash(temp_path)
184
+
185
+ if sha256 == expected_sha256:
186
+ if handle_found_object is not None:
187
+ handle_found_object(
188
+ local_path=temp_path,
189
+ file_identifier=file_identifier,
190
+ sha256=sha256,
191
+ metadata={},
192
+ )
193
+ else:
194
+ if handle_modified_object is not None:
195
+ handle_modified_object(
196
+ local_path=temp_path,
197
+ file_identifier=file_identifier,
198
+ new_sha256=sha256,
199
+ old_sha256=expected_sha256,
200
+ metadata={},
201
+ )
202
+
203
+ if download_dir is not None:
204
+ filename = os.path.join(download_dir, filename)
205
+ fs, path = fsspec.core.url_to_fs(filename)
206
+ fs.makedirs(os.path.dirname(path), exist_ok=True)
207
+ fs.put(temp_path, path)
208
+ else:
209
+ path = None
210
+
211
+ return file_identifier, path
212
+
213
+ def _parallel_download_item(self, args):
214
+ return self._download_item(*args)
215
+
216
+ def get_file_id_from_file_identifier(self, file_identifier: str) -> str:
217
+ """Get the thingiverse file ID from the Objaverse-XL file identifier.
218
+
219
+ Args:
220
+ file_identifier (str): The Objaverse-XL file identifier.
221
+
222
+ Returns:
223
+ str: The Thingiverse file ID.
224
+ """
225
+ return file_identifier.split("fileId=")[-1]
226
+
227
+ def get_thing_id_from_file_identifier(self, file_identifier: str) -> str:
228
+ """Get the thingiverse thing ID from the Objaverse-XL file identifier.
229
+
230
+ Args:
231
+ file_identifier (str): The Objaverse-XL file identifier.
232
+
233
+ Returns:
234
+ str: The Thingiverse thing ID.
235
+ """
236
+ return file_identifier.split("/")[-2].split(":")[1]
237
+
238
+ def download_objects(
239
+ self,
240
+ objects: pd.DataFrame,
241
+ processes: Optional[int] = None,
242
+ download_dir: Optional[str] = "~/.objaverse",
243
+ handle_found_object: Optional[Callable] = None,
244
+ handle_modified_object: Optional[Callable] = None,
245
+ handle_missing_object: Optional[Callable] = None,
246
+ ) -> Dict[str, str]:
247
+ """Download the objects from the given list of things and files.
248
+
249
+ Args:
250
+ file_ids (Optional[List[str]]): The list of file IDs to download. If None,
251
+ downloads all files. Defaults to None.
252
+ processes (int, optional): The number of processes to use. If None, maps to
253
+ use all available CPUs using multiprocessing.cpu_count(). Defaults to None.
254
+ download_dir (str, optional): The directory to save the files to. Supports all
255
+ file systems supported by fsspec. Defaults to "~/.objaverse-xl".
256
+ handle_found_object (Optional[Callable], optional): Called when an object is
257
+ successfully found and downloaded. Here, the object has the same sha256
258
+ as the one that was downloaded with Objaverse-XL. If None, the object
259
+ will be downloaded, but nothing will be done with it. Args for the
260
+ function include:
261
+ - local_path (str): Local path to the downloaded 3D object.
262
+ - file_identifier (str): File identifier of the 3D object.
263
+ - sha256 (str): SHA256 of the contents of the 3D object.
264
+ - metadata (Dict[Hashable, Any]): Metadata about the 3D object,
265
+ including the GitHub organization and repo names.
266
+ Return is not used. Defaults to None.
267
+ handle_modified_object (Optional[Callable], optional): Called when a
268
+ modified object is found and downloaded. Here, the object is
269
+ successfully downloaded, but it has a different sha256 than the one that
270
+ was downloaded with Objaverse-XL. This is not expected to happen very
271
+ often, because the same commit hash is used for each repo. If None, the
272
+ object will be downloaded, but nothing will be done with it. Args for
273
+ the function include:
274
+ - local_path (str): Local path to the downloaded 3D object.
275
+ - file_identifier (str): File identifier of the 3D object.
276
+ - new_sha256 (str): SHA256 of the contents of the newly downloaded 3D
277
+ object.
278
+ - old_sha256 (str): Expected SHA256 of the contents of the 3D object as
279
+ it was when it was downloaded with Objaverse-XL.
280
+ - metadata (Dict[Hashable, Any]): Metadata about the 3D object, which is
281
+ particular to the souce.
282
+ Return is not used. Defaults to None.
283
+ handle_missing_object (Optional[Callable], optional): Called when an object
284
+ that is in Objaverse-XL is not found. Here, it is likely that the
285
+ repository was deleted or renamed. If None, nothing will be done with
286
+ the missing object.
287
+ Args for the function include:
288
+ - file_identifier (str): File identifier of the 3D object.
289
+ - sha256 (str): SHA256 of the contents of the original 3D object.
290
+ - metadata (Dict[Hashable, Any]): Metadata about the 3D object, which is
291
+ particular to the source.
292
+ Return is not used. Defaults to None.
293
+
294
+ Returns:
295
+ Dict[str, str]: A dictionary mapping from the fileIdentifier to the path of
296
+ the downloaded file.
297
+ """
298
+ if processes is None:
299
+ processes = multiprocessing.cpu_count()
300
+
301
+ objects = objects.copy()
302
+ objects["thingiFileId"] = objects["fileIdentifier"].apply(
303
+ self.get_file_id_from_file_identifier
304
+ )
305
+ objects["thingiThingId"] = objects["fileIdentifier"].apply(
306
+ self.get_thing_id_from_file_identifier
307
+ )
308
+
309
+ # create the download directory
310
+ out = {}
311
+ if download_dir is not None:
312
+ download_dir = os.path.join(download_dir, "thingiverse")
313
+ fs, path = fsspec.core.url_to_fs(download_dir)
314
+ fs.makedirs(path, exist_ok=True)
315
+
316
+ # check to filter out files that already exist
317
+ existing_files = fs.glob(os.path.join(download_dir, "*.stl"), refresh=True)
318
+ existing_file_ids = {
319
+ os.path.basename(file).split(".")[0].split("-")[-1]
320
+ for file in existing_files
321
+ }
322
+
323
+ # filter out existing files
324
+ items_to_download = []
325
+ already_downloaded_count = 0
326
+ for _, item in objects.iterrows():
327
+ if item["thingiFileId"] in existing_file_ids:
328
+ already_downloaded_count += 1
329
+ out[item["fileIdentifier"]] = os.path.join(
330
+ os.path.expanduser(download_dir),
331
+ f"thing-{item['thingiThingId']}-file-{item['thingiFileId']}.stl",
332
+ )
333
+ else:
334
+ items_to_download.append(item)
335
+
336
+ logger.info(
337
+ f"Found {already_downloaded_count} Thingiverse objects downloaded"
338
  )
339
  else:
340
+ items_to_download = [item for _, item in objects.iterrows()]
341
 
342
+ logger.info(
343
+ f"Downloading {len(items_to_download)} Thingiverse objects with {processes=}"
344
+ )
345
+ if len(items_to_download) == 0:
346
+ return out
347
+
348
+ # download the files
349
+ args = [
350
+ (
351
+ item["thingiFileId"],
352
+ item["thingiThingId"],
353
+ item["fileIdentifier"],
354
+ download_dir,
355
+ item["sha256"],
356
+ handle_found_object,
357
+ handle_modified_object,
358
+ handle_missing_object,
359
+ )
360
+ for item in items_to_download
361
+ ]
362
 
 
 
 
 
 
 
 
 
 
 
 
 
 
363
  with Pool(processes=processes) as pool:
364
+ results = list(
365
  tqdm(
366
+ pool.imap_unordered(self._parallel_download_item, args),
367
  total=len(args),
368
  desc="Downloading Thingiverse Objects",
369
  )
370
  )
371
+
372
+ for file_identifier, download_path in results:
373
+ if download_path is not None:
374
+ out[file_identifier] = download_path
375
+
376
+ return out