Datasets:

Languages:
English
ArXiv:
License:
mattdeitke commited on
Commit
28e13bd
1 Parent(s): b51f134

lint updates

Browse files
objaverse_xl/github.py CHANGED
@@ -71,12 +71,31 @@ def _get_repo_id_with_hash(item: pd.Series) -> str:
71
 
72
 
73
  def _git_shallow_clone(repo_url: str, target_directory: str) -> bool:
 
 
 
 
 
 
 
 
 
74
  return _run_command_with_check(
75
  ["git", "clone", "--depth", "1", repo_url, target_directory],
76
  )
77
 
78
 
79
  def _run_command_with_check(command: List[str], cwd: Optional[str] = None) -> bool:
 
 
 
 
 
 
 
 
 
 
80
  try:
81
  subprocess.run(
82
  command,
@@ -94,6 +113,14 @@ def _run_command_with_check(command: List[str], cwd: Optional[str] = None) -> bo
94
 
95
 
96
  def get_file_hash(file_path: str) -> str:
 
 
 
 
 
 
 
 
97
  # Check if the path is a symbolic link
98
  if os.path.islink(file_path):
99
  # Resolve the symbolic link
@@ -124,12 +151,20 @@ def _process_repo(
124
  handle_new_object: Optional[Callable],
125
  commit_hash: Optional[str],
126
  ) -> List[Dict[str, str]]:
127
- """
128
 
129
  Args:
 
 
 
130
  expected_objects (Dict[str, str]): Dictionary of objects that one expects to
131
  find in the repo. Keys are the GitHub URLs and values are the sha256 of the
132
  objects.
 
 
 
 
 
133
  """
134
  # NOTE: assuming that the user has already checked that the repo doesn't exist,
135
  org, repo = repo_id.split("/")
@@ -237,7 +272,9 @@ def _process_repo(
237
 
238
  # save the file hashes to a json file
239
  with open(
240
- os.path.join(target_directory, ".objaverse-file-hashes.json"), "w"
 
 
241
  ) as f:
242
  json.dump(file_hashes, f, indent=2)
243
 
@@ -299,14 +336,14 @@ def _list_files(root_dir: str) -> List[str]:
299
 
300
  def _pull_lfs_files(repo_dir: str) -> None:
301
  if _has_lfs_files(repo_dir):
302
- subprocess.run(["git", "lfs", "pull"], cwd=repo_dir)
303
 
304
 
305
  def _has_lfs_files(repo_dir: str) -> bool:
306
  gitattributes_path = os.path.join(repo_dir, ".gitattributes")
307
  if not os.path.exists(gitattributes_path):
308
  return False
309
- with open(gitattributes_path, "r") as f:
310
  for line in f:
311
  if "filter=lfs" in line:
312
  return True
@@ -316,13 +353,26 @@ def _has_lfs_files(repo_dir: str) -> bool:
316
  def _get_commit_hash_from_local_git_dir(local_git_dir: str) -> str:
317
  # get the git hash of the repo
318
  result = subprocess.run(
319
- ["git", "rev-parse", "HEAD"], cwd=local_git_dir, capture_output=True
320
  )
321
  commit_hash = result.stdout.strip().decode("utf-8")
322
  return commit_hash
323
 
324
 
325
  def _parallel_process_repo(args) -> List[Dict[str, str]]:
 
 
 
 
 
 
 
 
 
 
 
 
 
326
  (
327
  repo_id_hash,
328
  fs,
@@ -440,9 +490,9 @@ def download_github_objects(
440
  # Getting immediate subdirectories of root_path
441
  if save_repo_format == "files":
442
  downloaded_repo_dirs = fs.glob(base_download_dir + "/repos/*/*/")
443
- downloaded_repo_ids = set(
444
- ["/".join(x.split("/")[-2:]) for x in downloaded_repo_dirs]
445
- )
446
  else:
447
  downloaded_repo_dirs = fs.glob(
448
  base_download_dir + f"/repos/*/*.{save_repo_format}"
@@ -460,9 +510,9 @@ def download_github_objects(
460
  # get the unique repoIds
461
  objects["repoIdHash"] = objects.apply(_get_repo_id_with_hash, axis=1)
462
  repo_id_hashes = set(objects["repoIdHash"].unique().tolist())
463
- repo_ids = set(
464
- ["/".join(repo_id_hash.split("/")[:2]) for repo_id_hash in repo_id_hashes]
465
- )
466
  assert len(repo_id_hashes) == len(repo_ids), (
467
  f"More than 1 commit hash per repoId!"
468
  f" {len(repo_id_hashes)=}, {len(repo_ids)=}"
 
71
 
72
 
73
  def _git_shallow_clone(repo_url: str, target_directory: str) -> bool:
74
+ """Helper function to shallow clone a repo with git.
75
+
76
+ Args:
77
+ repo_url (str): URL of the repo to clone.
78
+ target_directory (str): Directory to clone the repo to.
79
+
80
+ Returns:
81
+ bool: True if the clone was successful, False otherwise.
82
+ """
83
  return _run_command_with_check(
84
  ["git", "clone", "--depth", "1", repo_url, target_directory],
85
  )
86
 
87
 
88
  def _run_command_with_check(command: List[str], cwd: Optional[str] = None) -> bool:
89
+ """Helper function to run a command and check if it was successful.
90
+
91
+ Args:
92
+ command (List[str]): Command to run.
93
+ cwd (Optional[str], optional): Current working directory to run the command in.
94
+ Defaults to None.
95
+
96
+ Returns:
97
+ bool: True if the command was successful, False otherwise.
98
+ """
99
  try:
100
  subprocess.run(
101
  command,
 
113
 
114
 
115
  def get_file_hash(file_path: str) -> str:
116
+ """Get the sha256 hash of a file.
117
+
118
+ Args:
119
+ file_path (str): Path to the file.
120
+
121
+ Returns:
122
+ str: sha256 hash of the file.
123
+ """
124
  # Check if the path is a symbolic link
125
  if os.path.islink(file_path):
126
  # Resolve the symbolic link
 
151
  handle_new_object: Optional[Callable],
152
  commit_hash: Optional[str],
153
  ) -> List[Dict[str, str]]:
154
+ """Process a single repo.
155
 
156
  Args:
157
+ repo_id (str): GitHub repo ID in the format of organization/repo.
158
+ fs (fsspec.AbstractFileSystem): File system to use for saving the repo.
159
+ base_dir (str): Base directory to save the repo to.
160
  expected_objects (Dict[str, str]): Dictionary of objects that one expects to
161
  find in the repo. Keys are the GitHub URLs and values are the sha256 of the
162
  objects.
163
+ {and the rest of the args are the same as download_github_objects}
164
+
165
+ Returns:
166
+ List[Dict[str, str]]: List of dictionaries with the keys "githubUrl" and
167
+ "sha256" for each downloaded object.
168
  """
169
  # NOTE: assuming that the user has already checked that the repo doesn't exist,
170
  org, repo = repo_id.split("/")
 
272
 
273
  # save the file hashes to a json file
274
  with open(
275
+ os.path.join(target_directory, ".objaverse-file-hashes.json"),
276
+ "w",
277
+ encoding="utf-8",
278
  ) as f:
279
  json.dump(file_hashes, f, indent=2)
280
 
 
336
 
337
  def _pull_lfs_files(repo_dir: str) -> None:
338
  if _has_lfs_files(repo_dir):
339
+ subprocess.run(["git", "lfs", "pull"], cwd=repo_dir, check=True)
340
 
341
 
342
  def _has_lfs_files(repo_dir: str) -> bool:
343
  gitattributes_path = os.path.join(repo_dir, ".gitattributes")
344
  if not os.path.exists(gitattributes_path):
345
  return False
346
+ with open(gitattributes_path, "r", encoding="utf-8") as f:
347
  for line in f:
348
  if "filter=lfs" in line:
349
  return True
 
353
  def _get_commit_hash_from_local_git_dir(local_git_dir: str) -> str:
354
  # get the git hash of the repo
355
  result = subprocess.run(
356
+ ["git", "rev-parse", "HEAD"], cwd=local_git_dir, capture_output=True, check=True
357
  )
358
  commit_hash = result.stdout.strip().decode("utf-8")
359
  return commit_hash
360
 
361
 
362
  def _parallel_process_repo(args) -> List[Dict[str, str]]:
363
+ """Helper function to process a repo in parallel.
364
+
365
+ Note: This function is used to parallelize the processing of repos. It is not
366
+ intended to be called directly.
367
+
368
+ Args:
369
+ args (Tuple): Tuple of arguments to pass to _process_repo.
370
+
371
+ Returns:
372
+ List[Dict[str, str]]: List of dictionaries with the keys "githubUrl" and
373
+ "sha256" for each downloaded object.
374
+ """
375
+
376
  (
377
  repo_id_hash,
378
  fs,
 
490
  # Getting immediate subdirectories of root_path
491
  if save_repo_format == "files":
492
  downloaded_repo_dirs = fs.glob(base_download_dir + "/repos/*/*/")
493
+ downloaded_repo_ids = {
494
+ "/".join(x.split("/")[-2:]) for x in downloaded_repo_dirs
495
+ }
496
  else:
497
  downloaded_repo_dirs = fs.glob(
498
  base_download_dir + f"/repos/*/*.{save_repo_format}"
 
510
  # get the unique repoIds
511
  objects["repoIdHash"] = objects.apply(_get_repo_id_with_hash, axis=1)
512
  repo_id_hashes = set(objects["repoIdHash"].unique().tolist())
513
+ repo_ids = {
514
+ "/".join(repo_id_hash.split("/")[:2]) for repo_id_hash in repo_id_hashes
515
+ }
516
  assert len(repo_id_hashes) == len(repo_ids), (
517
  f"More than 1 commit hash per repoId!"
518
  f" {len(repo_id_hashes)=}, {len(repo_ids)=}"
objaverse_xl/objaverse_v1.py CHANGED
@@ -36,22 +36,20 @@ def load_annotations(
36
  # get the dir ids that need to be loaded if only downloading a subset of uids
37
  object_paths = _load_object_paths(download_dir=download_dir)
38
  dir_ids = (
39
- set([object_paths[uid].split("/")[1] for uid in uids])
40
  if uids is not None
41
- else set([f"{i // 1000:03d}-{i % 1000:03d}" for i in range(160)])
42
  )
43
 
44
  # get the existing metadata files
45
  existing_metadata_files = fs.glob(
46
  os.path.join(metadata_path, "*.json.gz"), refresh=True
47
  )
48
- existing_dir_ids = set(
49
- [
50
- file.split("/")[-1].split(".")[0]
51
- for file in existing_metadata_files
52
- if file.endswith(".json.gz") # note partial files end with .json.gz.tmp
53
- ]
54
- )
55
  downloaded_dir_ids = existing_dir_ids.intersection(dir_ids)
56
  logger.info(f"Found {len(downloaded_dir_ids)} metadata files already downloaded")
57
 
@@ -212,13 +210,11 @@ def load_objects(
212
  existing_file_paths = fs.glob(
213
  os.path.join(path, "glbs", "*", "*.glb"), refresh=True
214
  )
215
- existing_uids = set(
216
- [
217
- file.split("/")[-1].split(".")[0]
218
- for file in existing_file_paths
219
- if file.endswith(".glb") # note partial files end with .glb.tmp
220
- ]
221
- )
222
 
223
  # add the existing downloaded uids to the return dict
224
  out = {}
@@ -285,7 +281,7 @@ def load_lvis_annotations(download_dir: str = "~/.objaverse") -> Dict[str, List[
285
  Returns:
286
  A dictionary mapping the LVIS category to the list of uids in that category.
287
  """
288
- hf_url = f"https://huggingface.co/datasets/allenai/objaverse/resolve/main/lvis-annotations.json.gz"
289
 
290
  download_path = os.path.join(
291
  download_dir, "hf-objaverse-v1", "lvis-annotations.json.gz"
 
36
  # get the dir ids that need to be loaded if only downloading a subset of uids
37
  object_paths = _load_object_paths(download_dir=download_dir)
38
  dir_ids = (
39
+ {object_paths[uid].split("/")[1] for uid in uids}
40
  if uids is not None
41
+ else {f"{i // 1000:03d}-{i % 1000:03d}" for i in range(160)}
42
  )
43
 
44
  # get the existing metadata files
45
  existing_metadata_files = fs.glob(
46
  os.path.join(metadata_path, "*.json.gz"), refresh=True
47
  )
48
+ existing_dir_ids = {
49
+ file.split("/")[-1].split(".")[0]
50
+ for file in existing_metadata_files
51
+ if file.endswith(".json.gz") # note partial files end with .json.gz.tmp
52
+ }
 
 
53
  downloaded_dir_ids = existing_dir_ids.intersection(dir_ids)
54
  logger.info(f"Found {len(downloaded_dir_ids)} metadata files already downloaded")
55
 
 
210
  existing_file_paths = fs.glob(
211
  os.path.join(path, "glbs", "*", "*.glb"), refresh=True
212
  )
213
+ existing_uids = {
214
+ file.split("/")[-1].split(".")[0]
215
+ for file in existing_file_paths
216
+ if file.endswith(".glb") # note partial files end with .glb.tmp
217
+ }
 
 
218
 
219
  # add the existing downloaded uids to the return dict
220
  out = {}
 
281
  Returns:
282
  A dictionary mapping the LVIS category to the list of uids in that category.
283
  """
284
+ hf_url = "https://huggingface.co/datasets/allenai/objaverse/resolve/main/lvis-annotations.json.gz"
285
 
286
  download_path = os.path.join(
287
  download_dir, "hf-objaverse-v1", "lvis-annotations.json.gz"
objaverse_xl/thingiverse.py CHANGED
@@ -125,9 +125,9 @@ def download_thingiverse_objects(
125
 
126
  # check to filter out files that already exist
127
  existing_files = fs.glob(os.path.join(download_dir, "*.stl"), refresh=True)
128
- existing_file_ids = set(
129
- [os.path.basename(file).split(".")[0].split("-")[-1] for file in existing_files]
130
- )
131
 
132
  # filter out existing files
133
  items_to_download = []
@@ -222,8 +222,8 @@ def load_annotations(download_dir: str = "~/.objaverse") -> pd.DataFrame:
222
  return annotations_df
223
 
224
 
225
- if __name__ == "__main__":
226
- # example usage
227
- annotations = load_annotations()
228
- file_ids = annotations.head(n=100)["fileId"].tolist()
229
- download_thingiverse_objects(file_ids=file_ids, processes=5)
 
125
 
126
  # check to filter out files that already exist
127
  existing_files = fs.glob(os.path.join(download_dir, "*.stl"), refresh=True)
128
+ existing_file_ids = {
129
+ os.path.basename(file).split(".")[0].split("-")[-1] for file in existing_files
130
+ }
131
 
132
  # filter out existing files
133
  items_to_download = []
 
222
  return annotations_df
223
 
224
 
225
+ # if __name__ == "__main__":
226
+ # # example usage
227
+ # annotations = load_annotations()
228
+ # file_ids = annotations.head(n=100)["fileId"].tolist()
229
+ # download_thingiverse_objects(file_ids=file_ids, processes=5)