Datasets:

Languages:
English
ArXiv:
License:
mattdeitke commited on
Commit
be5297a
1 Parent(s): 6332e17

add fsspec'd objaverse v1 script

Browse files
objaverse_xl/objaverse_v1.py ADDED
@@ -0,0 +1,296 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gzip
2
+ import json
3
+ import os
4
+ import urllib.request
5
+ from multiprocessing import Pool
6
+ from typing import Any, Dict, List, Optional, Tuple
7
+
8
+ import fsspec
9
+ from loguru import logger
10
+ from tqdm import tqdm
11
+
12
+
13
+ def load_annotations(
14
+ uids: Optional[List[str]] = None,
15
+ download_dir: str = "~/.objaverse",
16
+ ) -> List[Dict[str, Any]]:
17
+ """Load the full metadata of all objects in the dataset.
18
+
19
+ Args:
20
+ uids: A list of uids with which to load metadata. If None, it loads
21
+ the metadata for all uids.
22
+ download_dir: The base directory to download the annotations to. Supports all
23
+ file systems supported by fsspec. Defaults to "~/.objaverse".
24
+
25
+ Returns:
26
+ A list of dictionaries with the metadata for each object. The keys are the uids
27
+ and the values are the metadata for that object.
28
+ """
29
+ # make the metadata dir if it doesn't exist
30
+ metadata_path = os.path.join(download_dir, "hf-objaverse-v1", "metadata")
31
+ fs, _ = fsspec.core.url_to_fs(metadata_path)
32
+ fs.makedirs(metadata_path, exist_ok=True)
33
+
34
+ # get the dir ids that need to be loaded if only downloading a subset of uids
35
+ object_paths = _load_object_paths(download_dir=download_dir)
36
+ dir_ids = (
37
+ set([object_paths[uid].split("/")[1] for uid in uids])
38
+ if uids is not None
39
+ else set([f"{i // 1000:03d}-{i % 1000:03d}" for i in range(160)])
40
+ )
41
+
42
+ # get the existing metadata files
43
+ existing_metadata_files = fs.glob(os.path.join(metadata_path, "*.json.gz"), refresh=True)
44
+ existing_dir_ids = set([
45
+ file.split("/")[-1].split(".")[0]
46
+ for file in existing_metadata_files
47
+ if file.endswith(".json.gz") # note partial files end with .json.gz.tmp
48
+ ])
49
+ downloaded_dir_ids = existing_dir_ids.intersection(dir_ids)
50
+ logger.info(f"Found {len(downloaded_dir_ids)} metadata files already downloaded")
51
+
52
+ # download the metadata from the missing dir_ids
53
+ dir_ids_to_download = dir_ids - existing_dir_ids
54
+ logger.info(f"Downloading {len(dir_ids_to_download)} metadata files")
55
+
56
+ # download the metadata file if it doesn't exist
57
+ if len(dir_ids_to_download) > 0:
58
+ for i_id in tqdm(dir_ids_to_download, desc="Downloading metadata files"):
59
+ # get the path to the json file
60
+ path = os.path.join(metadata_path, f"{i_id}.json.gz")
61
+
62
+ # get the url to the remote json file
63
+ hf_url = f"https://huggingface.co/datasets/allenai/objaverse/resolve/main/metadata/{i_id}.json.gz"
64
+
65
+ # download the file to a tmp path to avoid partial downloads on interruption
66
+ tmp_path = f"{path}.tmp"
67
+ with fs.open(tmp_path, "wb") as f:
68
+ with urllib.request.urlopen(hf_url) as response:
69
+ f.write(response.read())
70
+ fs.rename(tmp_path, path)
71
+
72
+ out = {}
73
+ for i_id in tqdm(dir_ids, desc="Reading metadata files"):
74
+ # get the path to the json file
75
+ path = os.path.join(metadata_path, f"{i_id}.json.gz")
76
+
77
+ # read the json file of the metadata chunk
78
+ with fs.open(path, "rb") as f:
79
+ with gzip.GzipFile(fileobj=f) as gfile:
80
+ content = gfile.read()
81
+ data = json.loads(content)
82
+
83
+ # filter the data to only include the uids we want
84
+ if uids is not None:
85
+ data = {uid: data[uid] for uid in uids if uid in data}
86
+
87
+ # add the data to the out dict
88
+ out.update(data)
89
+
90
+ return out
91
+
92
+ annotations = load_annotations(download_dir="~/.objaverse-temp-400")
93
+
94
+ def _load_object_paths(download_dir: str) -> Dict[str, str]:
95
+ """Load the object paths from the dataset.
96
+
97
+ The object paths specify the location of where the object is located
98
+ in the Hugging Face repo.
99
+
100
+ Returns:
101
+ A dictionary mapping the uid to the object path.
102
+ """
103
+ object_paths_file = "object-paths.json.gz"
104
+ local_path = os.path.join(download_dir, "hf-objaverse-v1", object_paths_file)
105
+
106
+ # download the object_paths file if it doesn't exist
107
+ fs, path = fsspec.core.url_to_fs(local_path)
108
+ if not fs.exists(path):
109
+ hf_url = f"https://huggingface.co/datasets/allenai/objaverse/resolve/main/{object_paths_file}"
110
+ fs.makedirs(os.path.dirname(path), exist_ok=True)
111
+
112
+ # download the file to a tmp path to avoid partial downloads on interruption
113
+ tmp_path = f"{path}.tmp"
114
+ with fs.open(tmp_path, "wb") as f:
115
+ with urllib.request.urlopen(hf_url) as response:
116
+ f.write(response.read())
117
+ fs.rename(tmp_path, path)
118
+
119
+ # read the object_paths
120
+ with fs.open(path, "rb") as f:
121
+ with gzip.GzipFile(fileobj=f) as gfile:
122
+ content = gfile.read()
123
+ object_paths = json.loads(content)
124
+
125
+ return object_paths
126
+
127
+
128
+ def load_uids(download_dir: str = "~/.objaverse") -> List[str]:
129
+ """Load the uids from the dataset.
130
+
131
+ Returns:
132
+ A list of all the UIDs from the dataset.
133
+ """
134
+ return list(_load_object_paths(download_dir=download_dir).keys())
135
+
136
+
137
+ def _download_object(
138
+ uid: str,
139
+ hf_object_path: str,
140
+ download_dir: str,
141
+ ) -> Tuple[str, str]:
142
+ """Download the object for the given uid.
143
+
144
+ Args:
145
+ uid: The uid of the object to load.
146
+ hf_object_path: The path to the object in the Hugging Face repo. Here, hf_object_path
147
+ is the part that comes after "main" in the Hugging Face repo url:
148
+ https://huggingface.co/datasets/allenai/objaverse/resolve/main/{hf_object_path}
149
+ download_dir: The base directory to download the object to. Supports all
150
+ file systems supported by fsspec. Defaults to "~/.objaverse".
151
+
152
+ Returns:
153
+ A tuple of the uid and the path to where the downloaded object.
154
+ """
155
+ hf_url = (
156
+ f"https://huggingface.co/datasets/allenai/objaverse/resolve/main/{hf_object_path}"
157
+ )
158
+
159
+ filename = os.path.join(download_dir, "hf-objaverse-v1", hf_object_path)
160
+ fs, path = fsspec.core.url_to_fs(filename)
161
+
162
+ # download the file
163
+ fs.makedirs(os.path.dirname(path), exist_ok=True)
164
+ tmp_path = f"{path}.tmp"
165
+ with fs.open(tmp_path, "wb") as file:
166
+ with urllib.request.urlopen(hf_url) as response:
167
+ file.write(response.read())
168
+
169
+ fs.rename(tmp_path, path)
170
+
171
+ return uid, filename
172
+
173
+
174
+ def _parallel_download_object(args):
175
+ # workaround since starmap doesn't work well with tqdm
176
+ return _download_object(*args)
177
+
178
+
179
+ def load_objects(
180
+ uids: List[str],
181
+ download_processes: int = 1,
182
+ download_dir: str = "~/.objaverse",
183
+ ) -> Dict[str, str]:
184
+ """Return the path to the object files for the given uids.
185
+
186
+ If the object is not already downloaded, it will be downloaded.
187
+
188
+ Args:
189
+ uids: A list of uids.
190
+ download_processes: The number of processes to use to download the objects.
191
+
192
+ Returns:
193
+ A dictionary mapping the object uid to the local path of where the object
194
+ downloaded.
195
+ """
196
+ uids = set(uids)
197
+ hf_object_paths = _load_object_paths(download_dir=download_dir)
198
+
199
+ versioned_dirname = os.path.join(download_dir, "hf-objaverse-v1")
200
+ fs, path = fsspec.core.url_to_fs(versioned_dirname)
201
+
202
+ # Get the existing file paths. This is much faster than calling fs.exists() for each
203
+ # file. `glob()` is like walk, but returns a list of files instead of the nested
204
+ # directory structure. glob() is also faster than find() / walk() since it doesn't
205
+ # need to traverse the entire directory structure.
206
+ existing_file_paths = fs.glob(os.path.join(path, "glbs", "*", "*.glb"), refresh=True)
207
+ existing_uids = set([
208
+ file.split("/")[-1].split(".")[0]
209
+ for file in existing_file_paths
210
+ if file.endswith(".glb") # note partial files end with .glb.tmp
211
+ ])
212
+
213
+ # add the existing downloaded uids to the return dict
214
+ out = {}
215
+ already_downloaded_uids = uids.intersection(existing_uids)
216
+ for uid in already_downloaded_uids:
217
+ hf_object_path = hf_object_paths[uid]
218
+ fs_abs_object_path = os.path.join(versioned_dirname, hf_object_path)
219
+ out[uid] = fs_abs_object_path
220
+
221
+ logger.info(f"Found {len(already_downloaded_uids)} objects already downloaded")
222
+
223
+ # get the uids that need to be downloaded
224
+ remaining_uids = uids - existing_uids
225
+ uids_to_download = []
226
+ for uid in remaining_uids:
227
+ if uid not in hf_object_paths:
228
+ logger.error(f"Could not find object with uid {uid}. Skipping it.")
229
+ continue
230
+ uids_to_download.append((uid, hf_object_paths[uid]))
231
+
232
+ logger.info(f"Downloading {len(uids_to_download)} new objects")
233
+
234
+ # check if all objects are already downloaded
235
+ if len(uids_to_download) == 0:
236
+ return out
237
+
238
+ if download_processes == 1:
239
+ # iteratively download the objects
240
+ for uid, hf_object_path in tqdm(uids_to_download):
241
+ uid, local_path = _download_object(
242
+ uid=uid, hf_object_path=hf_object_path, download_dir=download_dir
243
+ )
244
+ out[uid] = local_path
245
+ else:
246
+ args = [
247
+ (uid, hf_object_path, download_dir)
248
+ for uid, hf_object_path in uids_to_download
249
+ ]
250
+
251
+ # download the objects in parallel
252
+ with Pool(download_processes) as pool:
253
+ new_object_downloads = list(
254
+ tqdm(pool.imap_unordered(_parallel_download_object, args), total=len(args))
255
+ )
256
+
257
+ for uid, local_path in new_object_downloads:
258
+ out[uid] = local_path
259
+
260
+ return out
261
+
262
+
263
+ def load_lvis_annotations(download_dir: str = "~/.objaverse") -> Dict[str, List[str]]:
264
+ """Load the LVIS annotations.
265
+
266
+ If the annotations are not already downloaded, they will be downloaded.
267
+
268
+ Args:
269
+ download_dir: The base directory to download the annotations to. Supports all
270
+ file systems supported by fsspec. Defaults to "~/.objaverse".
271
+
272
+ Returns:
273
+ A dictionary mapping the LVIS category to the list of uids in that category.
274
+ """
275
+ hf_url = f"https://huggingface.co/datasets/allenai/objaverse/resolve/main/lvis-annotations.json.gz"
276
+
277
+ download_path = os.path.join(download_dir, "hf-objaverse-v1", "lvis-annotations.json.gz")
278
+
279
+ # use fsspec
280
+ fs, path = fsspec.core.url_to_fs(download_path)
281
+ if not fs.exists(path):
282
+ # make dir if it doesn't exist
283
+ fs.makedirs(os.path.dirname(path), exist_ok=True)
284
+
285
+ # download the file
286
+ with fs.open(path, "wb") as f:
287
+ with urllib.request.urlopen(hf_url) as response:
288
+ f.write(response.read())
289
+
290
+ # load the gzip file
291
+ with fs.open(path, "rb") as f:
292
+ with gzip.GzipFile(fileobj=f) as gfile:
293
+ content = gfile.read()
294
+ data = json.loads(content)
295
+
296
+ return data
objaverse_xl/smithsonian.py CHANGED
@@ -1,6 +1,5 @@
1
  import multiprocessing
2
  import os
3
- import uuid
4
  from functools import partial
5
  from multiprocessing import Pool
6
  from typing import Dict, List, Optional
@@ -13,65 +12,59 @@ from tqdm import tqdm
13
  from utils import get_uid_from_str
14
 
15
 
16
- def load_smithsonian_metadata(
17
- download_dir: str = "~/.objaverse-xl",
18
- ) -> pd.DataFrame:
19
  """Loads the Smithsonian Object Metadata dataset as a Pandas DataFrame.
20
 
21
  Args:
22
  download_dir (str, optional): Directory to download the parquet metadata file.
23
- Supports all file systems supported by fsspec. Defaults to
24
- "~/.objaverse-xl".
25
 
26
  Returns:
27
  pd.DataFrame: Smithsonian Object Metadata dataset as a Pandas DataFrame with
28
  columns for the object "title", "url", "quality", "file_type", "uid", and
29
  "license". The quality is always Medium and the file_type is always glb.
30
  """
31
- dirname = os.path.expanduser(os.path.join(download_dir, "smithsonian"))
32
- filename = os.path.join(dirname, "object-metadata.parquet")
33
  fs, path = fsspec.core.url_to_fs(filename)
34
- if fs.protocol == "file":
35
- os.makedirs(dirname, exist_ok=True)
36
 
37
- if fs.exists(filename):
38
- df = pd.read_parquet(filename)
39
- return df
40
- else:
41
  url = "https://huggingface.co/datasets/allenai/objaverse-xl/resolve/main/smithsonian/object-metadata.parquet"
42
  response = requests.get(url)
43
  response.raise_for_status()
44
- with fs.open(filename, "wb") as file:
45
  file.write(response.content)
46
- df = pd.read_parquet(filename)
47
 
 
 
 
 
 
48
  df["uid"] = df["url"].apply(get_uid_from_str)
49
  df["license"] = "CC0"
 
50
  return df
51
 
52
 
53
- def download_smithsonian_object(url: str, download_dir: str = "~/.objaverse-xl") -> str:
54
  """Downloads a Smithsonian Object from a URL.
55
 
56
  Args:
57
  url (str): URL to download the Smithsonian Object from.
58
  download_dir (str, optional): Directory to download the Smithsonian Object to.
59
- Supports all file systems supported by fsspec. Defaults to
60
- "~/.objaverse-xl".
61
 
62
  Returns:
63
  str: Path to the downloaded Smithsonian Object.
64
  """
65
  uid = get_uid_from_str(url)
66
 
67
- dirname = os.path.expanduser(os.path.join(download_dir, "smithsonian", "objects"))
68
- filename = os.path.join(dirname, f"{uid}.glb")
69
  fs, path = fsspec.core.url_to_fs(filename)
70
- if fs.protocol == "file":
71
- os.makedirs(dirname, exist_ok=True)
72
 
73
- if not fs.exists(filename):
74
- tmp_path = os.path.join(dirname, f"{uid}.glb.tmp")
75
  response = requests.get(url)
76
 
77
  # check if the path is valid
@@ -79,13 +72,14 @@ def download_smithsonian_object(url: str, download_dir: str = "~/.objaverse-xl")
79
  logger.warning(f"404 for {url}")
80
  return None
81
 
82
- # write to tmp path
 
83
  with fs.open(tmp_path, "wb") as file:
84
  for chunk in response.iter_content(chunk_size=8192):
85
  file.write(chunk)
86
 
87
  # rename to final path
88
- fs.rename(tmp_path, filename)
89
 
90
  return filename
91
 
@@ -93,7 +87,7 @@ def download_smithsonian_object(url: str, download_dir: str = "~/.objaverse-xl")
93
  def download_smithsonian_objects(
94
  urls: Optional[str] = None,
95
  processes: Optional[int] = None,
96
- download_dir: str = "~/.objaverse-xl",
97
  ) -> List[Dict[str, str]]:
98
  """Downloads all Smithsonian Objects.
99
 
@@ -105,8 +99,7 @@ def download_smithsonian_objects(
105
  number of CPUs on the machine (multiprocessing.cpu_count()). Defaults to
106
  None.
107
  download_dir (str, optional): Directory to download the Smithsonian Objects to.
108
- Supports all file systems supported by fsspec. Defaults to
109
- "~/.objaverse-xl".
110
 
111
  Returns:
112
  List[Dict[str, str]]: List of dictionaries with keys "download_path" and "url"
 
1
  import multiprocessing
2
  import os
 
3
  from functools import partial
4
  from multiprocessing import Pool
5
  from typing import Dict, List, Optional
 
12
  from utils import get_uid_from_str
13
 
14
 
15
+ def load_smithsonian_metadata(download_dir: str = "~/.objaverse") -> pd.DataFrame:
 
 
16
  """Loads the Smithsonian Object Metadata dataset as a Pandas DataFrame.
17
 
18
  Args:
19
  download_dir (str, optional): Directory to download the parquet metadata file.
20
+ Supports all file systems supported by fsspec. Defaults to "~/.objaverse".
 
21
 
22
  Returns:
23
  pd.DataFrame: Smithsonian Object Metadata dataset as a Pandas DataFrame with
24
  columns for the object "title", "url", "quality", "file_type", "uid", and
25
  "license". The quality is always Medium and the file_type is always glb.
26
  """
27
+ filename = os.path.join(download_dir, "smithsonian", "object-metadata.parquet")
 
28
  fs, path = fsspec.core.url_to_fs(filename)
29
+ fs.makedirs(os.path.dirname(path), exist_ok=True)
 
30
 
31
+ # download the parquet file if it doesn't exist
32
+ if not fs.exists(path):
 
 
33
  url = "https://huggingface.co/datasets/allenai/objaverse-xl/resolve/main/smithsonian/object-metadata.parquet"
34
  response = requests.get(url)
35
  response.raise_for_status()
36
+ with fs.open(path, "wb") as file:
37
  file.write(response.content)
 
38
 
39
+ # load the parquet file with fsspec
40
+ with fs.open(path) as f:
41
+ df = pd.read_parquet(f)
42
+
43
+ # add uid and license columns
44
  df["uid"] = df["url"].apply(get_uid_from_str)
45
  df["license"] = "CC0"
46
+
47
  return df
48
 
49
 
50
+ def download_smithsonian_object(url: str, download_dir: str = "~/.objaverse") -> str:
51
  """Downloads a Smithsonian Object from a URL.
52
 
53
  Args:
54
  url (str): URL to download the Smithsonian Object from.
55
  download_dir (str, optional): Directory to download the Smithsonian Object to.
56
+ Supports all file systems supported by fsspec. Defaults to "~/.objaverse".
 
57
 
58
  Returns:
59
  str: Path to the downloaded Smithsonian Object.
60
  """
61
  uid = get_uid_from_str(url)
62
 
63
+ filename = os.path.join(download_dir, "smithsonian", "objects", f"{uid}.glb")
 
64
  fs, path = fsspec.core.url_to_fs(filename)
65
+ fs.makedirs(os.path.dirname(path), exist_ok=True)
 
66
 
67
+ if not fs.exists(path):
 
68
  response = requests.get(url)
69
 
70
  # check if the path is valid
 
72
  logger.warning(f"404 for {url}")
73
  return None
74
 
75
+ # write to tmp path so that we don't have a partial file
76
+ tmp_path = f"{path}.tmp"
77
  with fs.open(tmp_path, "wb") as file:
78
  for chunk in response.iter_content(chunk_size=8192):
79
  file.write(chunk)
80
 
81
  # rename to final path
82
+ fs.rename(tmp_path, path)
83
 
84
  return filename
85
 
 
87
  def download_smithsonian_objects(
88
  urls: Optional[str] = None,
89
  processes: Optional[int] = None,
90
+ download_dir: str = "~/.objaverse",
91
  ) -> List[Dict[str, str]]:
92
  """Downloads all Smithsonian Objects.
93
 
 
99
  number of CPUs on the machine (multiprocessing.cpu_count()). Defaults to
100
  None.
101
  download_dir (str, optional): Directory to download the Smithsonian Objects to.
102
+ Supports all file systems supported by fsspec. Defaults to "~/.objaverse".
 
103
 
104
  Returns:
105
  List[Dict[str, str]]: List of dictionaries with keys "download_path" and "url"