Datasets:

Languages:
English
ArXiv:
License:
mattdeitke commited on
Commit
9b8a65c
1 Parent(s): be5297a

performance refactor (exists -> glob)

Browse files
Files changed (1) hide show
  1. objaverse_xl/smithsonian.py +49 -21
objaverse_xl/smithsonian.py CHANGED
@@ -47,9 +47,11 @@ def load_smithsonian_metadata(download_dir: str = "~/.objaverse") -> pd.DataFram
47
  return df
48
 
49
 
50
- def download_smithsonian_object(url: str, download_dir: str = "~/.objaverse") -> str:
51
  """Downloads a Smithsonian Object from a URL.
52
 
 
 
53
  Args:
54
  url (str): URL to download the Smithsonian Object from.
55
  download_dir (str, optional): Directory to download the Smithsonian Object to.
@@ -62,24 +64,22 @@ def download_smithsonian_object(url: str, download_dir: str = "~/.objaverse") ->
62
 
63
  filename = os.path.join(download_dir, "smithsonian", "objects", f"{uid}.glb")
64
  fs, path = fsspec.core.url_to_fs(filename)
65
- fs.makedirs(os.path.dirname(path), exist_ok=True)
66
 
67
- if not fs.exists(path):
68
- response = requests.get(url)
69
 
70
- # check if the path is valid
71
- if response.status_code == 404:
72
- logger.warning(f"404 for {url}")
73
- return None
74
 
75
- # write to tmp path so that we don't have a partial file
76
- tmp_path = f"{path}.tmp"
77
- with fs.open(tmp_path, "wb") as file:
78
- for chunk in response.iter_content(chunk_size=8192):
79
- file.write(chunk)
80
 
81
- # rename to final path
82
- fs.rename(tmp_path, path)
83
 
84
  return filename
85
 
@@ -111,21 +111,49 @@ def download_smithsonian_objects(
111
  df = load_smithsonian_metadata(download_dir=download_dir)
112
  urls = df["url"].tolist()
113
 
114
- logger.info(f"Downloading {len(urls)} Smithsonian Objects with {processes=}")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
115
  with Pool(processes=processes) as pool:
116
  results = list(
117
  tqdm(
118
  pool.imap_unordered(
119
- partial(download_smithsonian_object, download_dir=download_dir),
120
- urls,
121
  ),
122
- total=len(urls),
123
  desc="Downloading Smithsonian Objects",
124
  )
125
  )
126
- out = [
 
127
  {"download_path": download_path, "url": url}
128
  for download_path, url in zip(results, urls)
129
  if download_path is not None
130
- ]
 
131
  return out
 
47
  return df
48
 
49
 
50
+ def _download_smithsonian_object(url: str, download_dir: str = "~/.objaverse") -> str:
51
  """Downloads a Smithsonian Object from a URL.
52
 
53
+ Overwrites the file if it already exists and assumes this was previous checked.
54
+
55
  Args:
56
  url (str): URL to download the Smithsonian Object from.
57
  download_dir (str, optional): Directory to download the Smithsonian Object to.
 
64
 
65
  filename = os.path.join(download_dir, "smithsonian", "objects", f"{uid}.glb")
66
  fs, path = fsspec.core.url_to_fs(filename)
 
67
 
68
+ response = requests.get(url)
 
69
 
70
+ # check if the path is valid
71
+ if response.status_code == 404:
72
+ logger.warning(f"404 for {url}")
73
+ return None
74
 
75
+ # write to tmp path so that we don't have a partial file
76
+ tmp_path = f"{path}.tmp"
77
+ with fs.open(tmp_path, "wb") as file:
78
+ for chunk in response.iter_content(chunk_size=8192):
79
+ file.write(chunk)
80
 
81
+ # rename to final path
82
+ fs.rename(tmp_path, path)
83
 
84
  return filename
85
 
 
111
  df = load_smithsonian_metadata(download_dir=download_dir)
112
  urls = df["url"].tolist()
113
 
114
+ # filename = os.path.join(download_dir, "smithsonian", "objects", f"{uid}.glb")
115
+ objects_dir = os.path.join(download_dir, "smithsonian", "objects")
116
+ fs, path = fsspec.core.url_to_fs(objects_dir)
117
+ fs.makedirs(path, exist_ok=True)
118
+
119
+ # get the existing glb files
120
+ existing_glb_files = fs.glob(os.path.join(objects_dir, "*.glb"), refresh=True)
121
+ existing_uids = [os.path.basename(file).split(".")[0] for file in existing_glb_files]
122
+
123
+ # find the urls that need to be downloaded
124
+ out = []
125
+ urls_to_download = set([])
126
+ already_downloaded_urls = set([])
127
+ for url in urls:
128
+ uid = get_uid_from_str(url)
129
+ if uid not in existing_uids:
130
+ urls_to_download.add(url)
131
+ else:
132
+ already_downloaded_urls.add(url)
133
+ out.append({"download_path": os.path.join(objects_dir, f"{uid}.glb"), "url": url})
134
+
135
+ logger.info(f"Found {len(already_downloaded_urls)} Smithsonian Objects already downloaded")
136
+ logger.info(f"Downloading {len(urls_to_download)} Smithsonian Objects with {processes=}")
137
+
138
+ if len(urls_to_download) == 0:
139
+ return out
140
+
141
  with Pool(processes=processes) as pool:
142
  results = list(
143
  tqdm(
144
  pool.imap_unordered(
145
+ partial(_download_smithsonian_object, download_dir=download_dir),
146
+ urls_to_download
147
  ),
148
+ total=len(urls_to_download),
149
  desc="Downloading Smithsonian Objects",
150
  )
151
  )
152
+
153
+ out.extend([
154
  {"download_path": download_path, "url": url}
155
  for download_path, url in zip(results, urls)
156
  if download_path is not None
157
+ ])
158
+
159
  return out