Datasets:

Languages:
English
ArXiv:
License:
mattdeitke commited on
Commit
ac3e938
1 Parent(s): 8f7b31b

smithsonian refactor

Browse files
Files changed (1) hide show
  1. objaverse_xl/smithsonian.py +296 -155
objaverse_xl/smithsonian.py CHANGED
@@ -2,9 +2,9 @@
2
 
3
  import multiprocessing
4
  import os
5
- from functools import partial
6
  from multiprocessing import Pool
7
- from typing import Dict, List, Optional, Tuple
 
8
 
9
  import fsspec
10
  import pandas as pd
@@ -12,162 +12,303 @@ import requests
12
  from loguru import logger
13
  from tqdm import tqdm
14
 
15
- from objaverse_xl.utils import get_uid_from_str
16
-
17
-
18
- def load_smithsonian_metadata(download_dir: str = "~/.objaverse") -> pd.DataFrame:
19
- """Loads the Smithsonian Object Metadata dataset as a Pandas DataFrame.
20
-
21
- Args:
22
- download_dir (str, optional): Directory to download the parquet metadata file.
23
- Supports all file systems supported by fsspec. Defaults to "~/.objaverse".
24
-
25
- Returns:
26
- pd.DataFrame: Smithsonian Object Metadata dataset as a Pandas DataFrame with
27
- columns for the object "title", "url", "quality", "file_type", "uid", and
28
- "license". The quality is always Medium and the file_type is always glb.
29
- """
30
- filename = os.path.join(download_dir, "smithsonian", "object-metadata.parquet")
31
- fs, path = fsspec.core.url_to_fs(filename)
32
- fs.makedirs(os.path.dirname(path), exist_ok=True)
33
-
34
- # download the parquet file if it doesn't exist
35
- if not fs.exists(path):
36
- url = "https://huggingface.co/datasets/allenai/objaverse-xl/resolve/main/smithsonian/object-metadata.parquet"
37
- response = requests.get(url)
38
- response.raise_for_status()
39
- with fs.open(path, "wb") as file:
40
- file.write(response.content)
41
-
42
- # load the parquet file with fsspec
43
- with fs.open(path) as f:
44
- df = pd.read_parquet(f)
45
-
46
- return df
47
-
48
-
49
- def _download_smithsonian_object(
50
- url: str, download_dir: str = "~/.objaverse"
51
- ) -> Tuple[str, Optional[str]]:
52
- """Downloads a Smithsonian Object from a URL.
53
-
54
- Overwrites the file if it already exists and assumes this was previous checked.
55
-
56
- Args:
57
- url (str): URL to download the Smithsonian Object from.
58
- download_dir (str, optional): Directory to download the Smithsonian Object to.
59
- Supports all file systems supported by fsspec. Defaults to "~/.objaverse".
60
-
61
- Returns:
62
- Tuple[str, Optional[str]]: Tuple of the URL and the path to the downloaded
63
- Smithsonian Object. If the Smithsonian Object was not downloaded, the path
64
- will be None.
65
- """
66
- uid = get_uid_from_str(url)
67
-
68
- filename = os.path.join(download_dir, "smithsonian", "objects", f"{uid}.glb")
69
- fs, path = fsspec.core.url_to_fs(filename)
70
-
71
- response = requests.get(url)
72
-
73
- # check if the path is valid
74
- if response.status_code == 404:
75
- logger.warning(f"404 for {url}")
76
- return url, None
77
-
78
- # write to tmp path so that we don't have a partial file
79
- tmp_path = f"{path}.tmp"
80
- with fs.open(tmp_path, "wb") as file:
81
- for chunk in response.iter_content(chunk_size=8192):
82
- file.write(chunk)
83
-
84
- # rename to final path
85
- fs.rename(tmp_path, path)
86
-
87
- return url, filename
88
-
89
-
90
- def download_smithsonian_objects(
91
- urls: Optional[List[str]] = None,
92
- processes: Optional[int] = None,
93
- download_dir: str = "~/.objaverse",
94
- ) -> List[Dict[str, str]]:
95
- """Downloads all Smithsonian Objects.
96
-
97
- Args:
98
- urls (Optional[List[str]], optional): List of URLs to download the Smithsonian
99
- Objects from. If None, all Smithsonian Objects will be downloaded. Defaults
100
- to None.
101
- processes (Optional[int], optional): Number of processes to use for downloading
102
- the Smithsonian Objects. If None, the number of processes will be set to the
103
- number of CPUs on the machine (multiprocessing.cpu_count()). Defaults to
104
- None.
105
- download_dir (str, optional): Directory to download the Smithsonian Objects to.
106
- Supports all file systems supported by fsspec. Defaults to "~/.objaverse".
107
-
108
- Returns:
109
- List[Dict[str, str]]: List of dictionaries with keys "download_path" and "url"
110
- for each downloaded object.
111
- """
112
- if processes is None:
113
- processes = multiprocessing.cpu_count()
114
- if urls is None:
115
- df = load_smithsonian_metadata(download_dir=download_dir)
116
- urls = df["url"].tolist()
117
-
118
- # filename = os.path.join(download_dir, "smithsonian", "objects", f"{uid}.glb")
119
- objects_dir = os.path.join(download_dir, "smithsonian", "objects")
120
- fs, path = fsspec.core.url_to_fs(objects_dir)
121
- fs.makedirs(path, exist_ok=True)
122
-
123
- # get the existing glb files
124
- existing_glb_files = fs.glob(os.path.join(objects_dir, "*.glb"), refresh=True)
125
- existing_uids = [
126
- os.path.basename(file).split(".")[0] for file in existing_glb_files
127
- ]
128
-
129
- # find the urls that need to be downloaded
130
- out = []
131
- urls_to_download = set([])
132
- already_downloaded_urls = set([])
133
- for url in urls:
134
- uid = get_uid_from_str(url)
135
- if uid not in existing_uids:
136
- urls_to_download.add(url)
137
- else:
138
- already_downloaded_urls.add(url)
139
- out.append(
140
- {"download_path": os.path.join(objects_dir, f"{uid}.glb"), "url": url}
141
- )
142
-
143
- logger.info(
144
- f"Found {len(already_downloaded_urls)} Smithsonian Objects already downloaded"
145
- )
146
- logger.info(
147
- f"Downloading {len(urls_to_download)} Smithsonian Objects with {processes=}"
148
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
149
 
150
- if len(urls_to_download) == 0:
151
- return out
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
152
 
153
- with Pool(processes=processes) as pool:
154
- results = list(
155
- tqdm(
156
- pool.imap_unordered(
157
- partial(_download_smithsonian_object, download_dir=download_dir),
158
- urls_to_download,
159
- ),
160
- total=len(urls_to_download),
161
- desc="Downloading Smithsonian Objects",
162
- )
163
  )
164
 
165
- out.extend(
166
- [
167
- {"download_path": download_path, "url": url}
168
- for url, download_path in results
169
- if download_path is not None
 
 
 
 
 
 
 
 
170
  ]
171
- )
 
 
 
 
 
 
 
 
 
 
 
172
 
173
- return out
 
2
 
3
  import multiprocessing
4
  import os
 
5
  from multiprocessing import Pool
6
+ from typing import Dict, Optional, Tuple, Callable
7
+ import tempfile
8
 
9
  import fsspec
10
  import pandas as pd
 
12
  from loguru import logger
13
  from tqdm import tqdm
14
 
15
+ from objaverse_xl.utils import get_uid_from_str, get_file_hash
16
+ from objaverse_xl.abstract import ObjaverseSource
17
+
18
+
19
+ class SmithsonianDownloader(ObjaverseSource):
20
+ def load_annotations(self, download_dir: str = "~/.objaverse") -> pd.DataFrame:
21
+ """Loads the Smithsonian Object Metadata dataset as a Pandas DataFrame.
22
+
23
+ Args:
24
+ download_dir (str, optional): Directory to download the parquet metadata file.
25
+ Supports all file systems supported by fsspec. Defaults to "~/.objaverse".
26
+
27
+ Returns:
28
+ pd.DataFrame: Smithsonian Object Metadata dataset as a Pandas DataFrame with
29
+ columns for the object "title", "url", "quality", "file_type", "uid", and
30
+ "license". The quality is always Medium and the file_type is always glb.
31
+ """
32
+ filename = os.path.join(download_dir, "smithsonian", "object-metadata.parquet")
33
+ fs, path = fsspec.core.url_to_fs(filename)
34
+ fs.makedirs(os.path.dirname(path), exist_ok=True)
35
+
36
+ # download the parquet file if it doesn't exist
37
+ if not fs.exists(path):
38
+ url = "https://huggingface.co/datasets/allenai/objaverse-xl/resolve/main/smithsonian/object-metadata.parquet"
39
+ response = requests.get(url)
40
+ response.raise_for_status()
41
+ with fs.open(path, "wb") as file:
42
+ file.write(response.content)
43
+
44
+ # load the parquet file with fsspec
45
+ with fs.open(path) as f:
46
+ df = pd.read_parquet(f)
47
+
48
+ return df
49
+
50
+ def _download_smithsonian_object(
51
+ self,
52
+ file_identifier: str,
53
+ download_dir: Optional[str],
54
+ expected_sha256: str,
55
+ handle_found_object: Optional[Callable],
56
+ handle_modified_object: Optional[Callable],
57
+ handle_missing_object: Optional[Callable],
58
+ ) -> Tuple[str, Optional[str]]:
59
+ """Downloads a Smithsonian Object from a URL.
60
+
61
+ Overwrites the file if it already exists and assumes this was previous checked.
62
+
63
+ Args:
64
+ file_identifier (str): URL to download the Smithsonian Object from.
65
+ download_dir (Optional[str]): Directory to download the Smithsonian Object
66
+ to. Supports all file systems supported by fsspec. If None, the
67
+ Smithsonian Object will be deleted after it is downloaded and processed
68
+ with the handler functions.
69
+ expected_sha256 (str): The expected SHA256 of the contents of the downloade
70
+ object.
71
+ handle_found_object (Optional[Callable]): Called when an object is
72
+ successfully found and downloaded. Here, the object has the same sha256
73
+ as the one that was downloaded with Objaverse-XL. If None, the object
74
+ will be downloaded, but nothing will be done with it. Args for the
75
+ function include:
76
+ - local_path (str): Local path to the downloaded 3D object.
77
+ - file_identifier (str): GitHub URL of the 3D object.
78
+ - sha256 (str): SHA256 of the contents of the 3D object.
79
+ - metadata (Dict[str, Any]): Metadata about the 3D object, including the
80
+ GitHub organization and repo names.
81
+ Return is not used.
82
+ handle_modified_object (Optional[Callable]): Called when a modified object
83
+ is found and downloaded. Here, the object is successfully downloaded,
84
+ but it has a different sha256 than the one that was downloaded with
85
+ Objaverse-XL. This is not expected to happen very often, because the
86
+ same commit hash is used for each repo. If None, the object will be
87
+ downloaded, but nothing will be done with it. Args for the function
88
+ include:
89
+ - local_path (str): Local path to the downloaded 3D object.
90
+ - file_identifier (str): GitHub URL of the 3D object.
91
+ - new_sha256 (str): SHA256 of the contents of the newly downloaded 3D
92
+ object.
93
+ - old_sha256 (str): Expected SHA256 of the contents of the 3D object as
94
+ it was when it was downloaded with Objaverse-XL.
95
+ - metadata (Dict[str, Any]): Metadata about the 3D object, including the
96
+ GitHub organization and repo names.
97
+ Return is not used.
98
+ handle_missing_object (Optional[Callable]): Called when an object that is in
99
+ Objaverse-XL is not found. Here, it is likely that the repository was
100
+ deleted or renamed. If None, nothing will be done with the missing
101
+ object. Args for the function include:
102
+ - file_identifier (str): GitHub URL of the 3D object.
103
+ - sha256 (str): SHA256 of the contents of the original 3D object.
104
+ - metadata (Dict[str, Any]): Metadata about the 3D object, including the
105
+ GitHub organization and repo names.
106
+ Return is not used.
107
+
108
+
109
+ Returns:
110
+ Tuple[str, Optional[str]]: Tuple of the URL and the path to the downloaded
111
+ Smithsonian Object. If the Smithsonian Object was not downloaded, the path
112
+ will be None.
113
+ """
114
+ uid = get_uid_from_str(file_identifier)
115
+
116
+ with tempfile.TemporaryDirectory() as temp_dir:
117
+ temp_path = os.path.join(temp_dir, f"{uid}.glb")
118
+ temp_path_tmp = f"{temp_path}.tmp"
119
+
120
+ response = requests.get(file_identifier)
121
+
122
+ # check if the path is valid
123
+ if response.status_code == 404:
124
+ logger.warning(f"404 for {file_identifier}")
125
+ if handle_missing_object is not None:
126
+ handle_missing_object(
127
+ file_identifier=file_identifier,
128
+ sha256=expected_sha256,
129
+ metadata={},
130
+ )
131
+ return file_identifier, None
132
+
133
+ with open(temp_path_tmp, "wb") as file:
134
+ for chunk in response.iter_content(chunk_size=8192):
135
+ file.write(chunk)
136
+
137
+ # rename to temp_path
138
+ os.rename(temp_path_tmp, temp_path)
139
+
140
+ # check the sha256
141
+ sha256 = get_file_hash(temp_path)
142
+
143
+ if sha256 == expected_sha256:
144
+ if handle_found_object is not None:
145
+ handle_found_object(
146
+ local_path=temp_path,
147
+ file_identifier=file_identifier,
148
+ sha256=sha256,
149
+ metadata={},
150
+ )
151
+ else:
152
+ if handle_modified_object is not None:
153
+ handle_modified_object(
154
+ local_path=temp_path,
155
+ file_identifier=file_identifier,
156
+ new_sha256=sha256,
157
+ old_sha256=expected_sha256,
158
+ metadata={},
159
+ )
160
+
161
+ if download_dir is not None:
162
+ filename = os.path.join(
163
+ download_dir, "smithsonian", "objects", f"{uid}.glb"
164
+ )
165
+ fs, path = fsspec.core.url_to_fs(filename)
166
+ fs.makedirs(os.path.dirname(path), exist_ok=True)
167
+ fs.put(temp_path, path)
168
+ else:
169
+ path = None
170
+
171
+ return file_identifier, path
172
+
173
+ def _parallel_download_object(self, args):
174
+ # workaround since starmap doesn't work well with tqdm
175
+ return self._download_smithsonian_object(*args)
176
+
177
+ def download_objects(
178
+ self,
179
+ objects: pd.DataFrame,
180
+ download_dir: Optional[str] = "~/.objaverse",
181
+ processes: Optional[int] = None,
182
+ handle_found_object: Optional[Callable] = None,
183
+ handle_modified_object: Optional[Callable] = None,
184
+ handle_missing_object: Optional[Callable] = None,
185
+ **kwargs,
186
+ ) -> Dict[str, str]:
187
+ """Downloads all Smithsonian Objects.
188
+
189
+ Args:
190
+ objects (pd.DataFrmae): Objects to download. Must have columns for
191
+ the object "fileIdentifier" and "sha256". Use the `load_annotations`
192
+ function to get the metadata.
193
+ download_dir (Optional[str], optional): Directory to download the
194
+ Smithsonian Objects to. Supports all file systems supported by fsspec.
195
+ If None, the Smithsonian Objects will be deleted after they are
196
+ downloaded and processed with the handler functions. Defaults to
197
+ "~/.objaverse".
198
+ processes (Optional[int], optional): Number of processes to use for
199
+ downloading the Smithsonian Objects. If None, the number of processes
200
+ will be set to the number of CPUs on the machine
201
+ (multiprocessing.cpu_count()). Defaults to None.
202
+ handle_found_object (Optional[Callable], optional): Called when an object is
203
+ successfully found and downloaded. Here, the object has the same sha256
204
+ as the one that was downloaded with Objaverse-XL. If None, the object
205
+ will be downloaded, but nothing will be done with it. Args for the
206
+ function include:
207
+ - local_path (str): Local path to the downloaded 3D object.
208
+ - file_identifier (str): File identifier of the 3D object.
209
+ - sha256 (str): SHA256 of the contents of the 3D object.
210
+ - metadata (Dict[Hashable, Any]): Metadata about the 3D object,
211
+ including the GitHub organization and repo names.
212
+ Return is not used. Defaults to None.
213
+ handle_modified_object (Optional[Callable], optional): Called when a
214
+ modified object is found and downloaded. Here, the object is
215
+ successfully downloaded, but it has a different sha256 than the one that
216
+ was downloaded with Objaverse-XL. This is not expected to happen very
217
+ often, because the same commit hash is used for each repo. If None, the
218
+ object will be downloaded, but nothing will be done with it. Args for
219
+ the function include:
220
+ - local_path (str): Local path to the downloaded 3D object.
221
+ - file_identifier (str): File identifier of the 3D object.
222
+ - new_sha256 (str): SHA256 of the contents of the newly downloaded 3D
223
+ object.
224
+ - old_sha256 (str): Expected SHA256 of the contents of the 3D object as
225
+ it was when it was downloaded with Objaverse-XL.
226
+ - metadata (Dict[Hashable, Any]): Metadata about the 3D object, which is
227
+ particular to the souce.
228
+ Return is not used. Defaults to None.
229
+ handle_missing_object (Optional[Callable], optional): Called when an object
230
+ that is in Objaverse-XL is not found. Here, it is likely that the
231
+ repository was deleted or renamed. If None, nothing will be done with
232
+ the missing object.
233
+ Args for the function include:
234
+ - file_identifier (str): File identifier of the 3D object.
235
+ - sha256 (str): SHA256 of the contents of the original 3D object.
236
+ - metadata (Dict[Hashable, Any]): Metadata about the 3D object, which is
237
+ particular to the source.
238
+ Return is not used. Defaults to None.
239
+
240
+ Returns:
241
+ Dict[str, str]: A dictionary mapping from the fileIdentifier to the
242
+ download_path.
243
+ """
244
+ if processes is None:
245
+ processes = multiprocessing.cpu_count()
246
+
247
+ out = {}
248
+ objects_to_download = []
249
+ if download_dir is not None:
250
+ objects_dir = os.path.join(download_dir, "smithsonian", "objects")
251
+ fs, path = fsspec.core.url_to_fs(objects_dir)
252
+ fs.makedirs(path, exist_ok=True)
253
+
254
+ # get the existing glb files
255
+ existing_glb_files = fs.glob(
256
+ os.path.join(objects_dir, "*.glb"), refresh=True
257
+ )
258
+ existing_uids = set(
259
+ os.path.basename(file).split(".")[0] for file in existing_glb_files
260
+ )
261
 
262
+ # find the urls that need to be downloaded
263
+ already_downloaded_objects = set()
264
+ for _, item in objects.iterrows():
265
+ file_identifier = item["fileIdentifier"]
266
+ uid = get_uid_from_str(file_identifier)
267
+ if uid not in existing_uids:
268
+ objects_to_download.append(item)
269
+ else:
270
+ already_downloaded_objects.add(file_identifier)
271
+ out[file_identifier] = os.path.join(
272
+ os.path.expanduser(objects_dir), f"{uid}.glb"
273
+ )
274
+ else:
275
+ existing_uids = set()
276
+ objects_to_download = [item for _, item in objects.iterrows()]
277
+ already_downloaded_objects = set()
278
+ out = {}
279
 
280
+ logger.info(
281
+ f"Found {len(already_downloaded_objects)} Smithsonian Objects already downloaded"
282
+ )
283
+ logger.info(
284
+ f"Downloading {len(objects_to_download)} Smithsonian Objects with {processes} processes"
 
 
 
 
 
285
  )
286
 
287
+ if len(objects_to_download) == 0:
288
+ return out
289
+
290
+ args = [
291
+ [
292
+ item["fileIdentifier"],
293
+ download_dir,
294
+ item["sha256"],
295
+ handle_found_object,
296
+ handle_modified_object,
297
+ handle_missing_object,
298
+ ]
299
+ for item in objects_to_download
300
  ]
301
+ with Pool(processes=processes) as pool:
302
+ results = list(
303
+ tqdm(
304
+ pool.imap_unordered(self._parallel_download_object, args),
305
+ total=len(objects_to_download),
306
+ desc="Downloading Smithsonian Objects",
307
+ )
308
+ )
309
+
310
+ for file_identifier, download_path in results:
311
+ if download_path is not None:
312
+ out[file_identifier] = download_path
313
 
314
+ return out