Datasets:

Languages:
English
ArXiv:
License:
mattdeitke commited on
Commit
aae8281
1 Parent(s): 1e35c61

start refactor with abstract class

Browse files
Files changed (2) hide show
  1. objaverse_xl/abstract.py +82 -0
  2. objaverse_xl/github.py +486 -467
objaverse_xl/abstract.py ADDED
@@ -0,0 +1,82 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from abc import ABC, abstractmethod
2
+ import pandas as pd
3
+
4
+ from typing import Optional, Callable
5
+
6
+
7
+ class ObjaverseSource(ABC):
8
+ @abstractmethod
9
+ def load_annotations(self, download_dir: str = "~/.objaverse") -> pd.DataFrame:
10
+ pass
11
+
12
+ @abstractmethod
13
+ def download_objects(
14
+ self,
15
+ objects: pd.DataFrame,
16
+ download_dir: str = "~/.objaverse",
17
+ processes: Optional[int] = None,
18
+ handle_found_object: Optional[Callable] = None,
19
+ handle_modified_object: Optional[Callable] = None,
20
+ handle_missing_object: Optional[Callable] = None,
21
+ **kwargs
22
+ ) -> None:
23
+ """Downloads all objects from the source.
24
+
25
+ Args:
26
+ objects (pd.DataFrmae): Objects to download. Must have columns for
27
+ the object "fileIdentifier" and "sha256". Use the `load_annotations`
28
+ function to get the metadata.
29
+ processes (Optional[int], optional): Number of processes to use for
30
+ downloading. If None, will use the number of CPUs on the machine.
31
+ Defaults to None.
32
+ download_dir (str, optional): Directory to download the objects to.
33
+ Supports all file systems supported by fsspec. Defaults to
34
+ "~/.objaverse".
35
+ save_repo_format (Optional[Literal["zip", "tar", "tar.gz", "files"]],
36
+ optional): Format to save the repository. If None, the repository will
37
+ not be saved. If "files" is specified, each file will be saved
38
+ individually. Otherwise, the repository can be saved as a "zip", "tar",
39
+ or "tar.gz" file. Defaults to None.
40
+ handle_found_object (Optional[Callable], optional): Called when an object is
41
+ successfully found and downloaded. Here, the object has the same sha256
42
+ as the one that was downloaded with Objaverse-XL. If None, the object
43
+ will be downloaded, but nothing will be done with it. Args for the
44
+ function include:
45
+ - local_path (str): Local path to the downloaded 3D object.
46
+ - file_identifier (str): File identifier of the 3D object.
47
+ - sha256 (str): SHA256 of the contents of the 3D object.
48
+ - metadata (Dict[Hashable, Any]): Metadata about the 3D object,
49
+ including the GitHub organization and repo names.
50
+ Return is not used. Defaults to None.
51
+ handle_modified_object (Optional[Callable], optional): Called when a
52
+ modified object is found and downloaded. Here, the object is
53
+ successfully downloaded, but it has a different sha256 than the one that
54
+ was downloaded with Objaverse-XL. This is not expected to happen very
55
+ often, because the same commit hash is used for each repo. If None, the
56
+ object will be downloaded, but nothing will be done with it. Args for
57
+ the function include:
58
+ - local_path (str): Local path to the downloaded 3D object.
59
+ - file_identifier (str): File identifier of the 3D object.
60
+ - new_sha256 (str): SHA256 of the contents of the newly downloaded 3D
61
+ object.
62
+ - old_sha256 (str): Expected SHA256 of the contents of the 3D object as
63
+ it was when it was downloaded with Objaverse-XL.
64
+ - metadata (Dict[Hashable, Any]): Metadata about the 3D object, which is
65
+ particular to the souce.
66
+ Return is not used. Defaults to None.
67
+ handle_missing_object (Optional[Callable], optional): Called when an object
68
+ that is in Objaverse-XL is not found. Here, it is likely that the
69
+ repository was deleted or renamed. If None, nothing will be done with
70
+ the missing object.
71
+ Args for the function include:
72
+ - file_identifier (str): File identifier of the 3D object.
73
+ - sha256 (str): SHA256 of the contents of the original 3D object.
74
+ - metadata (Dict[Hashable, Any]): Metadata about the 3D object, which is
75
+ particular to the source.
76
+ Return is not used. Defaults to None.
77
+
78
+ Returns:
79
+ List[Dict[str, str]]: List of dictionaries with the keys "fileIdentifier"
80
+ and "sha256" for each downloaded object.
81
+ """
82
+ pass
objaverse_xl/github.py CHANGED
@@ -10,6 +10,7 @@ import tempfile
10
  from multiprocessing import Pool
11
  from typing import Callable, Dict, List, Literal, Optional
12
  from objaverse_xl.utils import get_file_hash
 
13
 
14
  import fsspec
15
  import pandas as pd
@@ -33,512 +34,530 @@ FILE_EXTENSIONS = [
33
  ]
34
 
35
 
36
- def load_github_metadata(download_dir: str = "~/.objaverse") -> pd.DataFrame:
37
- """Loads the GitHub 3D object metadata as a Pandas DataFrame.
38
 
39
- Args:
40
- download_dir (str, optional): Directory to download the parquet metadata file.
41
- Supports all file systems supported by fsspec. Defaults to "~/.objaverse".
42
 
43
- Returns:
44
- pd.DataFrame: GitHub 3D object metadata as a Pandas DataFrame with columns for
45
- the object "fileIdentifier", "license", "source", "fileType", and "sha256".
46
- """
47
- filename = os.path.join(download_dir, "github", "github-urls.parquet")
48
- fs, path = fsspec.core.url_to_fs(filename)
49
- fs.makedirs(os.path.dirname(path), exist_ok=True)
50
 
51
- # download the parquet file if it doesn't exist
52
- if not fs.exists(path):
53
- url = "https://huggingface.co/datasets/allenai/objaverse-xl/resolve/main/github/github-urls.parquet"
 
 
 
 
 
54
 
55
- response = requests.get(url)
56
- response.raise_for_status()
57
- with fs.open(path, "wb") as file:
58
- file.write(response.content)
59
 
60
- # load the parquet file with fsspec
61
- with fs.open(path) as f:
62
- df = pd.read_parquet(f)
 
63
 
64
- df["metadata"] = "{}"
 
 
65
 
66
- return df
67
 
 
68
 
69
- def _get_repo_id_with_hash(item: pd.Series) -> str:
70
- org, repo = item["fileIdentifier"].split("/")[3:5]
71
- commit_hash = item["fileIdentifier"].split("/")[6]
72
- return f"{org}/{repo}/{commit_hash}"
73
 
 
 
74
 
75
- def _git_shallow_clone(repo_url: str, target_directory: str) -> bool:
76
- """Helper function to shallow clone a repo with git.
77
-
78
- Args:
79
- repo_url (str): URL of the repo to clone.
80
- target_directory (str): Directory to clone the repo to.
81
-
82
- Returns:
83
- bool: True if the clone was successful, False otherwise.
84
- """
85
- return _run_command_with_check(
86
- ["git", "clone", "--depth", "1", repo_url, target_directory],
87
- )
88
 
 
 
 
 
 
 
89
 
90
- def _run_command_with_check(command: List[str], cwd: Optional[str] = None) -> bool:
91
- """Helper function to run a command and check if it was successful.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
92
 
93
- Args:
94
- command (List[str]): Command to run.
95
- cwd (Optional[str], optional): Current working directory to run the command in.
96
- Defaults to None.
 
 
 
 
 
 
 
 
 
 
 
97
 
98
- Returns:
99
- bool: True if the command was successful, False otherwise.
100
- """
101
- try:
102
- subprocess.run(
103
- command,
104
- cwd=cwd,
105
- check=True,
106
- stdout=subprocess.DEVNULL,
107
- stderr=subprocess.DEVNULL,
108
- )
109
- return True
110
- except subprocess.CalledProcessError as e:
111
- logger.error("Error:", e)
112
- logger.error(e.stdout)
113
- logger.error(e.stderr)
114
- return False
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
115
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
116
 
117
- def _process_repo(
118
- repo_id: str,
119
- fs: fsspec.AbstractFileSystem,
120
- base_dir: str,
121
- save_repo_format: Optional[Literal["zip", "tar", "tar.gz", "files"]],
122
- expected_objects: Dict[str, str],
123
- handle_found_object: Optional[Callable],
124
- handle_modified_object: Optional[Callable],
125
- handle_missing_object: Optional[Callable],
126
- handle_new_object: Optional[Callable],
127
- commit_hash: Optional[str],
128
- ) -> List[Dict[str, str]]:
129
- """Process a single repo.
130
-
131
- Args:
132
- repo_id (str): GitHub repo ID in the format of organization/repo.
133
- fs (fsspec.AbstractFileSystem): File system to use for saving the repo.
134
- base_dir (str): Base directory to save the repo to.
135
- expected_objects (Dict[str, str]): Dictionary of objects that one expects to
136
- find in the repo. Keys are the "fileIdentifier" (i.e., the GitHub URL in
137
- this case) and values are the "sha256" of the objects.
138
- {and the rest of the args are the same as download_github_objects}
139
-
140
- Returns:
141
- List[Dict[str, str]]: List of dictionaries with the keys "fileIdentifier" and
142
- "sha256" for each downloaded object.
143
- """
144
- # NOTE: assuming that the user has already checked that the repo doesn't exist,
145
- org, repo = repo_id.split("/")
146
-
147
- with tempfile.TemporaryDirectory() as temp_dir:
148
- # clone the repo to a temp directory
149
- target_directory = os.path.join(temp_dir, repo)
150
- successful_clone = _git_shallow_clone(
151
- f"https://github.com/{org}/{repo}.git", target_directory
152
- )
153
- if not successful_clone:
154
- logger.error(f"Could not clone {repo_id}")
155
- if handle_missing_object is not None:
156
- for github_url, sha256 in expected_objects.items():
157
  handle_missing_object(
158
  file_identifier=github_url,
159
  sha256=sha256,
160
  metadata=dict(github_organization=org, github_repo=repo),
161
  )
162
- return []
163
-
164
- # use the commit hash if specified
165
- repo_commit_hash = _get_commit_hash_from_local_git_dir(target_directory)
166
- if commit_hash is not None:
167
- keep_going = True
168
- if repo_commit_hash != commit_hash:
169
- # run git reset --hard && git checkout 37f4d8d287e201ce52c048bf74d46d6a09d26b2c
170
- if not _run_command_with_check(
171
- ["git", "fetch", "origin", commit_hash], target_directory
172
- ):
173
- logger.error(
174
- f"Error in git fetch! Sticking with {repo_commit_hash=} instead of {commit_hash=}"
175
- )
176
- keep_going = False
177
 
178
- if keep_going and not _run_command_with_check(
179
- ["git", "reset", "--hard"], target_directory
180
- ):
181
- logger.error(
182
- f"Error in git reset! Sticking with {repo_commit_hash=} instead of {commit_hash=}"
183
- )
184
- keep_going = False
185
 
186
- if keep_going:
187
- if _run_command_with_check(
188
- ["git", "checkout", commit_hash], target_directory
189
- ):
190
- repo_commit_hash = commit_hash
191
- else:
192
- logger.error(
193
- f"Error in git checkout! Sticking with {repo_commit_hash=} instead of {commit_hash=}"
194
- )
195
-
196
- # pull the lfs files
197
- _pull_lfs_files(target_directory)
198
-
199
- # get all the files in the repo
200
- files = _list_files(target_directory)
201
- files_with_3d_extension = [
202
- file
203
- for file in files
204
- if any(file.lower().endswith(ext) for ext in FILE_EXTENSIONS)
205
  ]
206
 
207
- # get the sha256 for each file
208
- file_hashes = []
209
- for file in tqdm(files_with_3d_extension, desc="Handling 3D object files"):
210
- file_hash = get_file_hash(file)
211
- # remove the temp_dir from the file path
212
- github_url = file.replace(
213
- target_directory,
214
- f"https://github.com/{org}/{repo}/blob/{repo_commit_hash}",
215
- )
216
- file_hashes.append(dict(sha256=file_hash, fileIdentifier=github_url))
217
-
218
- # handle the object under different conditions
219
- if github_url in expected_objects:
220
- if expected_objects[github_url] == file_hash:
221
- if handle_found_object is not None:
222
- handle_found_object(
223
- local_path=file,
224
- file_identifier=github_url,
225
- sha256=file_hash,
226
- metadata=dict(github_organization=org, github_repo=repo),
227
- )
228
- else:
229
- if handle_modified_object is not None:
230
- handle_modified_object(
231
- local_path=file,
232
- file_identifier=github_url,
233
- new_sha256=file_hash,
234
- old_sha256=expected_objects[github_url],
235
- metadata=dict(github_organization=org, github_repo=repo),
236
- )
237
- elif handle_new_object is not None:
238
- handle_new_object(
239
- local_path=file,
240
- file_identifier=github_url,
241
- sha256=file_hash,
242
- metadata=dict(github_organization=org, github_repo=repo),
243
- )
244
-
245
- # save the file hashes to a json file
246
- with open(
247
- os.path.join(target_directory, ".objaverse-file-hashes.json"),
248
- "w",
249
- encoding="utf-8",
250
- ) as f:
251
- json.dump(file_hashes, f, indent=2)
252
-
253
- # remove the .git directory
254
- shutil.rmtree(os.path.join(target_directory, ".git"))
255
-
256
- if save_repo_format is not None:
257
- logger.debug(f"Saving as {save_repo_format}")
258
- # save the repo to a zip file
259
- if save_repo_format == "zip":
260
- shutil.make_archive(target_directory, "zip", target_directory)
261
- elif save_repo_format == "tar":
262
- with tarfile.open(os.path.join(temp_dir, f"{repo}.tar"), "w") as tar:
263
- tar.add(target_directory, arcname=repo)
264
- elif save_repo_format == "tar.gz":
265
- with tarfile.open(
266
- os.path.join(temp_dir, f"{repo}.tar.gz"), "w:gz"
267
- ) as tar:
268
- tar.add(target_directory, arcname=repo)
269
- elif save_repo_format == "files":
270
- pass
271
- else:
272
- raise ValueError(
273
- f"save_repo_format must be one of zip, tar, tar.gz, files. Got {save_repo_format}"
274
- )
275
-
276
- dirname = os.path.join(base_dir, "repos", org)
277
- fs.makedirs(dirname, exist_ok=True)
278
- if save_repo_format != "files":
279
- # move the repo to the correct location (with put)
280
- fs.put(
281
- os.path.join(temp_dir, f"{repo}.{save_repo_format}"),
282
- os.path.join(dirname, f"{repo}.{save_repo_format}"),
283
- )
284
- else:
285
- # move the repo to the correct location (with put)
286
- fs.put(target_directory, dirname, recursive=True)
287
-
288
- # get each object that was missing from the expected objects
289
- if handle_missing_object is not None:
290
- obtained_urls = {x["fileIdentifier"] for x in file_hashes}
291
- for github_url, sha256 in expected_objects.items():
292
- if github_url not in obtained_urls:
293
- handle_missing_object(
294
- file_identifier=github_url,
295
- sha256=sha256,
296
- metadata=dict(github_organization=org, github_repo=repo),
297
- )
298
-
299
- return file_hashes
300
 
 
 
 
 
 
 
 
 
 
 
301
 
302
- def _list_files(root_dir: str) -> List[str]:
303
- return [
304
- os.path.join(root, f) for root, dirs, files in os.walk(root_dir) for f in files
305
- ]
306
 
 
 
307
 
308
- def _pull_lfs_files(repo_dir: str) -> None:
309
- if _has_lfs_files(repo_dir):
310
- subprocess.run(["git", "lfs", "pull"], cwd=repo_dir, check=True)
311
 
 
 
 
 
312
 
313
- def _has_lfs_files(repo_dir: str) -> bool:
314
- gitattributes_path = os.path.join(repo_dir, ".gitattributes")
315
- if not os.path.exists(gitattributes_path):
316
- return False
317
- with open(gitattributes_path, "r", encoding="utf-8") as f:
318
- for line in f:
319
- if "filter=lfs" in line:
320
- return True
321
- return False
322
-
323
-
324
- def _get_commit_hash_from_local_git_dir(local_git_dir: str) -> str:
325
- # get the git hash of the repo
326
- result = subprocess.run(
327
- ["git", "rev-parse", "HEAD"], cwd=local_git_dir, capture_output=True, check=True
328
- )
329
- commit_hash = result.stdout.strip().decode("utf-8")
330
- return commit_hash
331
-
332
-
333
- def _parallel_process_repo(args) -> List[Dict[str, str]]:
334
- """Helper function to process a repo in parallel.
335
-
336
- Note: This function is used to parallelize the processing of repos. It is not
337
- intended to be called directly.
338
-
339
- Args:
340
- args (Tuple): Tuple of arguments to pass to _process_repo.
341
-
342
- Returns:
343
- List[Dict[str, str]]: List of dictionaries with the keys "fileIdentifier" and
344
- "sha256" for each downloaded object.
345
- """
346
-
347
- (
348
- repo_id_hash,
349
- fs,
350
- base_dir,
351
- save_repo_format,
352
- expected_objects,
353
- handle_found_object,
354
- handle_modified_object,
355
- handle_missing_object,
356
- handle_new_object,
357
- ) = args
358
- repo_id = "/".join(repo_id_hash.split("/")[:2])
359
- commit_hash = repo_id_hash.split("/")[2]
360
- return _process_repo(
361
- repo_id=repo_id,
362
- fs=fs,
363
- base_dir=base_dir,
364
- save_repo_format=save_repo_format,
365
- expected_objects=expected_objects,
366
- handle_found_object=handle_found_object,
367
- handle_modified_object=handle_modified_object,
368
- handle_missing_object=handle_missing_object,
369
- handle_new_object=handle_new_object,
370
- commit_hash=commit_hash,
371
- )
372
-
373
-
374
- def _process_group(group):
375
- key, group_df = group
376
- return key, group_df.set_index("fileIdentifier")["sha256"].to_dict()
377
-
378
-
379
- def download_github_objects(
380
- objects: pd.DataFrame,
381
- processes: Optional[int] = None,
382
- download_dir: str = "~/.objaverse",
383
- save_repo_format: Optional[Literal["zip", "tar", "tar.gz", "files"]] = None,
384
- handle_found_object: Optional[Callable] = None,
385
- handle_modified_object: Optional[Callable] = None,
386
- handle_missing_object: Optional[Callable] = None,
387
- handle_new_object: Optional[Callable] = None,
388
- ) -> List[Dict[str, str]]:
389
- """Download the specified GitHub objects.
390
-
391
- Args:
392
- objects (pd.DataFrmae): GitHub objects to download. Must have columns for the
393
- object "fileIdentifier" and "sha256". Use the load_github_metadata function
394
- to get the metadata.
395
- processes (Optional[int], optional): Number of processes to use for downloading.
396
- If None, will use the number of CPUs on the machine. Defaults to None.
397
- download_dir (str, optional): Directory to download the GitHub objects to.
398
- Supports all file systems supported by fsspec. Defaults to "~/.objaverse".
399
- save_repo_format (Optional[Literal["zip", "tar", "tar.gz", "files"]], optional):
400
- Format to save the repository. If None, the repository will not be saved. If
401
- "files" is specified, each file will be saved individually. Otherwise, the
402
- repository can be saved as a "zip", "tar", or "tar.gz" file. Defaults to
403
- None.
404
- handle_found_object (Optional[Callable], optional): Called when an object is
405
- successfully found and downloaded. Here, the object has the same sha256 as
406
- the one that was downloaded with Objaverse-XL. If None, the object will be
407
- downloaded, but nothing will be done with it. Args for the function include:
408
- - local_path (str): Local path to the downloaded 3D object.
409
- - file_identifier (str): GitHub URL of the 3D object.
410
- - sha256 (str): SHA256 of the contents of the 3D object.
411
- - metadata (Dict[str, Any]): Metadata about the 3D object, including the
412
- GitHub organization and repo names.
413
- Return is not used. Defaults to None.
414
- handle_new_object (Optional[Callable], optional): Called when a new object is
415
- found. Here, the object is not used in Objaverse-XL, but is still downloaded
416
- with the repository. The object may have not been used because it does not
417
- successfully import into Blender. If None, the object will be downloaded,
418
- but nothing will be done with it. Args for the function include:
419
- - local_path (str): Local path to the downloaded 3D object.
420
- - file_identifier (str): GitHub URL of the 3D object.
421
- - sha256 (str): SHA256 of the contents of the 3D object.
422
- - metadata (Dict[str, Any]): Metadata about the 3D object, including the
423
- GitHub organization and repo names.
424
- Return is not used. Defaults to None.
425
- handle_modified_object (Optional[Callable], optional): Called when a modified
426
- object is found and downloaded. Here, the object is successfully downloaded,
427
- but it has a different sha256 than the one that was downloaded with
428
- Objaverse-XL. This is not expected to happen very often, because the same
429
- commit hash is used for each repo. If None, the object will be downloaded,
430
- but nothing will be done with it. Args for the function include:
431
- - local_path (str): Local path to the downloaded 3D object.
432
- - file_identifier (str): GitHub URL of the 3D object.
433
- - new_sha256 (str): SHA256 of the contents of the newly downloaded 3D
434
- object.
435
- - old_sha256 (str): Expected SHA256 of the contents of the 3D object as it
436
- was when it was downloaded with Objaverse-XL.
437
- - metadata (Dict[str, Any]): Metadata about the 3D object, including the
438
- GitHub organization and repo names.
439
- Return is not used. Defaults to None.
440
- handle_missing_object (Optional[Callable], optional): Called when an object that
441
- is in Objaverse-XL is not found. Here, it is likely that the repository was
442
- deleted or renamed. If None, nothing will be done with the missing object.
443
- Args for the function include:
444
- - file_identifier (str): GitHub URL of the 3D object.
445
- - sha256 (str): SHA256 of the contents of the original 3D object.
446
- - metadata (Dict[str, Any]): Metadata about the 3D object, including the
447
- GitHub organization and repo names.
448
- Return is not used. Defaults to None.
449
-
450
- Returns:
451
- List[Dict[str, str]]: List of dictionaries with the keys "fileIdentifier" and
452
- "sha256" for each downloaded object.
453
- """
454
- if processes is None:
455
- processes = multiprocessing.cpu_count()
456
-
457
- base_download_dir = os.path.join(download_dir, "github")
458
- fs, path = fsspec.core.url_to_fs(base_download_dir)
459
- fs.makedirs(path, exist_ok=True)
460
-
461
- # Getting immediate subdirectories of root_path
462
- if save_repo_format == "files":
463
- downloaded_repo_dirs = fs.glob(base_download_dir + "/repos/*/*/")
464
- downloaded_repo_ids = {
465
- "/".join(x.split("/")[-2:]) for x in downloaded_repo_dirs
466
- }
467
- else:
468
- downloaded_repo_dirs = fs.glob(
469
- base_download_dir + f"/repos/*/*.{save_repo_format}"
470
- )
471
- downloaded_repo_ids = set()
472
- for x in downloaded_repo_dirs:
473
- org, repo = x.split("/")[-2:]
474
- repo = repo[: -len(f".{save_repo_format}")]
475
- repo_id = f"{org}/{repo}"
476
- downloaded_repo_ids.add(repo_id)
477
-
478
- # make copy of objects
479
- objects = objects.copy()
480
-
481
- # get the unique repoIds
482
- objects["repoIdHash"] = objects.apply(_get_repo_id_with_hash, axis=1)
483
- repo_id_hashes = set(objects["repoIdHash"].unique().tolist())
484
- repo_ids = {
485
- "/".join(repo_id_hash.split("/")[:2]) for repo_id_hash in repo_id_hashes
486
- }
487
- assert len(repo_id_hashes) == len(repo_ids), (
488
- f"More than 1 commit hash per repoId!"
489
- f" {len(repo_id_hashes)=}, {len(repo_ids)=}"
490
- )
491
-
492
- logger.info(
493
- f"Provided {len(repo_ids)} repoIds with {len(objects)} objects to process."
494
- )
495
-
496
- # remove repoIds that have already been downloaded
497
- repo_ids_to_download = repo_ids - downloaded_repo_ids
498
- repo_id_hashes_to_download = [
499
- repo_id_hash
500
- for repo_id_hash in repo_id_hashes
501
- if "/".join(repo_id_hash.split("/")[:2]) in repo_ids_to_download
502
- ]
503
-
504
- logger.info(
505
- f"Found {len(repo_ids_to_download)} repoIds not yet downloaded. Downloading now..."
506
- )
507
-
508
- # get the objects to download
509
- groups = list(objects.groupby("repoIdHash"))
510
- with Pool(processes=processes) as pool:
511
- out_list = list(
512
- tqdm(
513
- pool.imap_unordered(_process_group, groups),
514
- total=len(groups),
515
- desc="Grouping objects by repository",
516
- )
517
- )
518
- objects_per_repo_id_hash = dict(out_list)
519
-
520
- all_args = [
521
  (
522
  repo_id_hash,
523
  fs,
524
- path,
525
  save_repo_format,
526
- objects_per_repo_id_hash[repo_id_hash],
527
  handle_found_object,
528
- handle_missing_object,
529
  handle_modified_object,
 
530
  handle_new_object,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
531
  )
532
- for repo_id_hash in repo_id_hashes_to_download
533
- ]
534
-
535
- with Pool(processes=processes) as pool:
536
- # use tqdm to show progress
537
- out = list(
538
- tqdm(
539
- pool.imap_unordered(_parallel_process_repo, all_args),
540
- total=len(all_args),
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
541
  )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
542
  )
543
- out_list = [item for sublist in out for item in sublist]
544
- return out_list
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
10
  from multiprocessing import Pool
11
  from typing import Callable, Dict, List, Literal, Optional
12
  from objaverse_xl.utils import get_file_hash
13
+ from objaverse_xl.abstract import ObjaverseSource
14
 
15
  import fsspec
16
  import pandas as pd
 
34
  ]
35
 
36
 
37
+ class GitHub(ObjaverseSource):
38
+ """Script to download objects from GitHub."""
39
 
40
+ def load_annotations(self, download_dir: str = "~/.objaverse") -> pd.DataFrame:
41
+ """Loads the GitHub 3D object metadata as a Pandas DataFrame.
 
42
 
43
+ Args:
44
+ download_dir (str, optional): Directory to download the parquet metadata
45
+ file. Supports all file systems supported by fsspec. Defaults to
46
+ "~/.objaverse".
 
 
 
47
 
48
+ Returns:
49
+ pd.DataFrame: GitHub 3D object metadata as a Pandas DataFrame with columns
50
+ for the object "fileIdentifier", "license", "source", "fileType",
51
+ "sha256", and "metadata".
52
+ """
53
+ filename = os.path.join(download_dir, "github", "github-urls.parquet")
54
+ fs, path = fsspec.core.url_to_fs(filename)
55
+ fs.makedirs(os.path.dirname(path), exist_ok=True)
56
 
57
+ # download the parquet file if it doesn't exist
58
+ if not fs.exists(path):
59
+ url = "https://huggingface.co/datasets/allenai/objaverse-xl/resolve/main/github/github-urls.parquet"
 
60
 
61
+ response = requests.get(url)
62
+ response.raise_for_status()
63
+ with fs.open(path, "wb") as file:
64
+ file.write(response.content)
65
 
66
+ # load the parquet file with fsspec
67
+ with fs.open(path) as f:
68
+ df = pd.read_parquet(f)
69
 
70
+ df["metadata"] = "{}"
71
 
72
+ return df
73
 
74
+ def _get_repo_id_with_hash(self, item: pd.Series) -> str:
75
+ org, repo = item["fileIdentifier"].split("/")[3:5]
76
+ commit_hash = item["fileIdentifier"].split("/")[6]
77
+ return f"{org}/{repo}/{commit_hash}"
78
 
79
+ def _git_shallow_clone(self, repo_url: str, target_directory: str) -> bool:
80
+ """Helper function to shallow clone a repo with git.
81
 
82
+ Args:
83
+ repo_url (str): URL of the repo to clone.
84
+ target_directory (str): Directory to clone the repo to.
 
 
 
 
 
 
 
 
 
 
85
 
86
+ Returns:
87
+ bool: True if the clone was successful, False otherwise.
88
+ """
89
+ return self._run_command_with_check(
90
+ ["git", "clone", "--depth", "1", repo_url, target_directory],
91
+ )
92
 
93
+ def _run_command_with_check(
94
+ self, command: List[str], cwd: Optional[str] = None
95
+ ) -> bool:
96
+ """Helper function to run a command and check if it was successful.
97
+
98
+ Args:
99
+ command (List[str]): Command to run.
100
+ cwd (Optional[str], optional): Current working directory to run the command
101
+ in. Defaults to None.
102
+
103
+ Returns:
104
+ bool: True if the command was successful, False otherwise.
105
+ """
106
+ try:
107
+ subprocess.run(
108
+ command,
109
+ cwd=cwd,
110
+ check=True,
111
+ stdout=subprocess.DEVNULL,
112
+ stderr=subprocess.DEVNULL,
113
+ )
114
+ return True
115
+ except subprocess.CalledProcessError as e:
116
+ logger.error("Error:", e)
117
+ logger.error(e.stdout)
118
+ logger.error(e.stderr)
119
+ return False
120
+
121
+ def _process_repo(
122
+ self,
123
+ repo_id: str,
124
+ fs: fsspec.AbstractFileSystem,
125
+ base_dir: str,
126
+ save_repo_format: Optional[Literal["zip", "tar", "tar.gz", "files"]],
127
+ expected_objects: Dict[str, str],
128
+ handle_found_object: Optional[Callable],
129
+ handle_modified_object: Optional[Callable],
130
+ handle_missing_object: Optional[Callable],
131
+ handle_new_object: Optional[Callable],
132
+ commit_hash: Optional[str],
133
+ ) -> List[Dict[str, str]]:
134
+ """Process a single repo.
135
+
136
+ Args:
137
+ repo_id (str): GitHub repo ID in the format of organization/repo.
138
+ fs (fsspec.AbstractFileSystem): File system to use for saving the repo.
139
+ base_dir (str): Base directory to save the repo to.
140
+ expected_objects (Dict[str, str]): Dictionary of objects that one expects to
141
+ find in the repo. Keys are the "fileIdentifier" (i.e., the GitHub URL in
142
+ this case) and values are the "sha256" of the objects.
143
+ {and the rest of the args are the same as download_github_objects}
144
+
145
+ Returns:
146
+ List[Dict[str, str]]: List of dictionaries with the keys "fileIdentifier"
147
+ and "sha256" for each downloaded object.
148
+ """
149
+ # NOTE: assuming that the user has already checked that the repo doesn't exist,
150
+ org, repo = repo_id.split("/")
151
+
152
+ with tempfile.TemporaryDirectory() as temp_dir:
153
+ # clone the repo to a temp directory
154
+ target_directory = os.path.join(temp_dir, repo)
155
+ successful_clone = self._git_shallow_clone(
156
+ f"https://github.com/{org}/{repo}.git", target_directory
157
+ )
158
+ if not successful_clone:
159
+ logger.error(f"Could not clone {repo_id}")
160
+ if handle_missing_object is not None:
161
+ for github_url, sha256 in expected_objects.items():
162
+ handle_missing_object(
163
+ file_identifier=github_url,
164
+ sha256=sha256,
165
+ metadata=dict(github_organization=org, github_repo=repo),
166
+ )
167
+ return []
168
 
169
+ # use the commit hash if specified
170
+ repo_commit_hash = self._get_commit_hash_from_local_git_dir(
171
+ target_directory
172
+ )
173
+ if commit_hash is not None:
174
+ keep_going = True
175
+ if repo_commit_hash != commit_hash:
176
+ # run git reset --hard && git checkout 37f4d8d287e201ce52c048bf74d46d6a09d26b2c
177
+ if not self._run_command_with_check(
178
+ ["git", "fetch", "origin", commit_hash], target_directory
179
+ ):
180
+ logger.error(
181
+ f"Error in git fetch! Sticking with {repo_commit_hash=} instead of {commit_hash=}"
182
+ )
183
+ keep_going = False
184
 
185
+ if keep_going and not self._run_command_with_check(
186
+ ["git", "reset", "--hard"], target_directory
187
+ ):
188
+ logger.error(
189
+ f"Error in git reset! Sticking with {repo_commit_hash=} instead of {commit_hash=}"
190
+ )
191
+ keep_going = False
192
+
193
+ if keep_going:
194
+ if self._run_command_with_check(
195
+ ["git", "checkout", commit_hash], target_directory
196
+ ):
197
+ repo_commit_hash = commit_hash
198
+ else:
199
+ logger.error(
200
+ f"Error in git checkout! Sticking with {repo_commit_hash=} instead of {commit_hash=}"
201
+ )
202
+
203
+ # pull the lfs files
204
+ self._pull_lfs_files(target_directory)
205
+
206
+ # get all the files in the repo
207
+ files = self._list_files(target_directory)
208
+ files_with_3d_extension = [
209
+ file
210
+ for file in files
211
+ if any(file.lower().endswith(ext) for ext in FILE_EXTENSIONS)
212
+ ]
213
+
214
+ # get the sha256 for each file
215
+ file_hashes = []
216
+ for file in tqdm(files_with_3d_extension, desc="Handling 3D object files"):
217
+ file_hash = get_file_hash(file)
218
+ # remove the temp_dir from the file path
219
+ github_url = file.replace(
220
+ target_directory,
221
+ f"https://github.com/{org}/{repo}/blob/{repo_commit_hash}",
222
+ )
223
+ file_hashes.append(dict(sha256=file_hash, fileIdentifier=github_url))
224
+
225
+ # handle the object under different conditions
226
+ if github_url in expected_objects:
227
+ if expected_objects[github_url] == file_hash:
228
+ if handle_found_object is not None:
229
+ handle_found_object(
230
+ local_path=file,
231
+ file_identifier=github_url,
232
+ sha256=file_hash,
233
+ metadata=dict(
234
+ github_organization=org, github_repo=repo
235
+ ),
236
+ )
237
+ else:
238
+ if handle_modified_object is not None:
239
+ handle_modified_object(
240
+ local_path=file,
241
+ file_identifier=github_url,
242
+ new_sha256=file_hash,
243
+ old_sha256=expected_objects[github_url],
244
+ metadata=dict(
245
+ github_organization=org, github_repo=repo
246
+ ),
247
+ )
248
+ elif handle_new_object is not None:
249
+ handle_new_object(
250
+ local_path=file,
251
+ file_identifier=github_url,
252
+ sha256=file_hash,
253
+ metadata=dict(github_organization=org, github_repo=repo),
254
+ )
255
 
256
+ # save the file hashes to a json file
257
+ with open(
258
+ os.path.join(target_directory, ".objaverse-file-hashes.json"),
259
+ "w",
260
+ encoding="utf-8",
261
+ ) as f:
262
+ json.dump(file_hashes, f, indent=2)
263
+
264
+ # remove the .git directory
265
+ shutil.rmtree(os.path.join(target_directory, ".git"))
266
+
267
+ if save_repo_format is not None:
268
+ logger.debug(f"Saving as {save_repo_format}")
269
+ # save the repo to a zip file
270
+ if save_repo_format == "zip":
271
+ shutil.make_archive(target_directory, "zip", target_directory)
272
+ elif save_repo_format == "tar":
273
+ with tarfile.open(
274
+ os.path.join(temp_dir, f"{repo}.tar"), "w"
275
+ ) as tar:
276
+ tar.add(target_directory, arcname=repo)
277
+ elif save_repo_format == "tar.gz":
278
+ with tarfile.open(
279
+ os.path.join(temp_dir, f"{repo}.tar.gz"), "w:gz"
280
+ ) as tar:
281
+ tar.add(target_directory, arcname=repo)
282
+ elif save_repo_format == "files":
283
+ pass
284
+ else:
285
+ raise ValueError(
286
+ f"save_repo_format must be one of zip, tar, tar.gz, files. Got {save_repo_format}"
287
+ )
288
 
289
+ dirname = os.path.join(base_dir, "repos", org)
290
+ fs.makedirs(dirname, exist_ok=True)
291
+ if save_repo_format != "files":
292
+ # move the repo to the correct location (with put)
293
+ fs.put(
294
+ os.path.join(temp_dir, f"{repo}.{save_repo_format}"),
295
+ os.path.join(dirname, f"{repo}.{save_repo_format}"),
296
+ )
297
+ else:
298
+ # move the repo to the correct location (with put)
299
+ fs.put(target_directory, dirname, recursive=True)
300
+
301
+ # get each object that was missing from the expected objects
302
+ if handle_missing_object is not None:
303
+ obtained_urls = {x["fileIdentifier"] for x in file_hashes}
304
+ for github_url, sha256 in expected_objects.items():
305
+ if github_url not in obtained_urls:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
306
  handle_missing_object(
307
  file_identifier=github_url,
308
  sha256=sha256,
309
  metadata=dict(github_organization=org, github_repo=repo),
310
  )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
311
 
312
+ return file_hashes
 
 
 
 
 
 
313
 
314
+ def _list_files(self, root_dir: str) -> List[str]:
315
+ return [
316
+ os.path.join(root, f)
317
+ for root, dirs, files in os.walk(root_dir)
318
+ for f in files
 
 
 
 
 
 
 
 
 
 
 
 
 
 
319
  ]
320
 
321
+ def _pull_lfs_files(self, repo_dir: str) -> None:
322
+ if self._has_lfs_files(repo_dir):
323
+ subprocess.run(["git", "lfs", "pull"], cwd=repo_dir, check=True)
324
+
325
+ def _has_lfs_files(self, repo_dir: str) -> bool:
326
+ gitattributes_path = os.path.join(repo_dir, ".gitattributes")
327
+ if not os.path.exists(gitattributes_path):
328
+ return False
329
+ with open(gitattributes_path, "r", encoding="utf-8") as f:
330
+ for line in f:
331
+ if "filter=lfs" in line:
332
+ return True
333
+ return False
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
334
 
335
+ def _get_commit_hash_from_local_git_dir(self, local_git_dir: str) -> str:
336
+ # get the git hash of the repo
337
+ result = subprocess.run(
338
+ ["git", "rev-parse", "HEAD"],
339
+ cwd=local_git_dir,
340
+ capture_output=True,
341
+ check=True,
342
+ )
343
+ commit_hash = result.stdout.strip().decode("utf-8")
344
+ return commit_hash
345
 
346
+ def _parallel_process_repo(self, args) -> List[Dict[str, str]]:
347
+ """Helper function to process a repo in parallel.
 
 
348
 
349
+ Note: This function is used to parallelize the processing of repos. It is not
350
+ intended to be called directly.
351
 
352
+ Args:
353
+ args (Tuple): Tuple of arguments to pass to _process_repo.
 
354
 
355
+ Returns:
356
+ List[Dict[str, str]]: List of dictionaries with the keys "fileIdentifier"
357
+ and "sha256" for each downloaded object.
358
+ """
359
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
360
  (
361
  repo_id_hash,
362
  fs,
363
+ base_dir,
364
  save_repo_format,
365
+ expected_objects,
366
  handle_found_object,
 
367
  handle_modified_object,
368
+ handle_missing_object,
369
  handle_new_object,
370
+ ) = args
371
+ repo_id = "/".join(repo_id_hash.split("/")[:2])
372
+ commit_hash = repo_id_hash.split("/")[2]
373
+ return self._process_repo(
374
+ repo_id=repo_id,
375
+ fs=fs,
376
+ base_dir=base_dir,
377
+ save_repo_format=save_repo_format,
378
+ expected_objects=expected_objects,
379
+ handle_found_object=handle_found_object,
380
+ handle_modified_object=handle_modified_object,
381
+ handle_missing_object=handle_missing_object,
382
+ handle_new_object=handle_new_object,
383
+ commit_hash=commit_hash,
384
  )
385
+
386
+ def _process_group(self, group):
387
+ key, group_df = group
388
+ return key, group_df.set_index("fileIdentifier")["sha256"].to_dict()
389
+
390
+ def download_objects(
391
+ self,
392
+ objects: pd.DataFrame,
393
+ download_dir: str = "~/.objaverse",
394
+ processes: Optional[int] = None,
395
+ handle_found_object: Optional[Callable] = None,
396
+ handle_modified_object: Optional[Callable] = None,
397
+ handle_missing_object: Optional[Callable] = None,
398
+ *,
399
+ save_repo_format: Optional[Literal["zip", "tar", "tar.gz", "files"]] = None,
400
+ handle_new_object: Optional[Callable] = None,
401
+ **kwargs,
402
+ ) -> List[Dict[str, str]]:
403
+ """Download the specified GitHub objects.
404
+
405
+ Args:
406
+ objects (pd.DataFrmae): GitHub objects to download. Must have columns for
407
+ the object "fileIdentifier" and "sha256". Use the load_github_metadata
408
+ function to get the metadata.
409
+ download_dir (str, optional): Directory to download the GitHub objects to.
410
+ Supports all file systems supported by fsspec. Defaults to
411
+ "~/.objaverse".
412
+ processes (Optional[int], optional): Number of processes to use for
413
+ downloading. If None, will use the number of CPUs on the machine.
414
+ Defaults to None.
415
+ handle_found_object (Optional[Callable], optional): Called when an object is
416
+ successfully found and downloaded. Here, the object has the same sha256
417
+ as the one that was downloaded with Objaverse-XL. If None, the object
418
+ will be downloaded, but nothing will be done with it. Args for the
419
+ function include:
420
+ - local_path (str): Local path to the downloaded 3D object.
421
+ - file_identifier (str): GitHub URL of the 3D object.
422
+ - sha256 (str): SHA256 of the contents of the 3D object.
423
+ - metadata (Dict[str, Any]): Metadata about the 3D object, including the
424
+ GitHub organization and repo names.
425
+ Return is not used. Defaults to None.
426
+ handle_modified_object (Optional[Callable], optional): Called when a
427
+ modified object is found and downloaded. Here, the object is
428
+ successfully downloaded, but it has a different sha256 than the one that
429
+ was downloaded with Objaverse-XL. This is not expected to happen very
430
+ often, because the same commit hash is used for each repo. If None, the
431
+ object will be downloaded, but nothing will be done with it. Args for
432
+ the function include:
433
+ - local_path (str): Local path to the downloaded 3D object.
434
+ - file_identifier (str): GitHub URL of the 3D object.
435
+ - new_sha256 (str): SHA256 of the contents of the newly downloaded 3D
436
+ object.
437
+ - old_sha256 (str): Expected SHA256 of the contents of the 3D object as
438
+ it was when it was downloaded with Objaverse-XL.
439
+ - metadata (Dict[str, Any]): Metadata about the 3D object, including the
440
+ GitHub organization and repo names.
441
+ Return is not used. Defaults to None.
442
+ handle_missing_object (Optional[Callable], optional): Called when an object
443
+ that is in Objaverse-XL is not found. Here, it is likely that the
444
+ repository was deleted or renamed. If None, nothing will be done with
445
+ the missing object. Args for the function include:
446
+ - file_identifier (str): GitHub URL of the 3D object.
447
+ - sha256 (str): SHA256 of the contents of the original 3D object.
448
+ - metadata (Dict[str, Any]): Metadata about the 3D object, including the
449
+ GitHub organization and repo names.
450
+ Return is not used. Defaults to None.
451
+ save_repo_format (Optional[Literal["zip", "tar", "tar.gz", "files"]],
452
+ optional): Format to save the repository. If None, the repository will
453
+ not be saved. If "files" is specified, each file will be saved
454
+ individually. Otherwise, the repository can be saved as a "zip", "tar",
455
+ or "tar.gz" file. Defaults to None.
456
+ handle_new_object (Optional[Callable], optional): Called when a new object
457
+ is found. Here, the object is not used in Objaverse-XL, but is still
458
+ downloaded with the repository. The object may have not been used
459
+ because it does not successfully import into Blender. If None, the
460
+ object will be downloaded, but nothing will be done with it. Args for
461
+ the function include:
462
+ - local_path (str): Local path to the downloaded 3D object.
463
+ - file_identifier (str): GitHub URL of the 3D object.
464
+ - sha256 (str): SHA256 of the contents of the 3D object.
465
+ - metadata (Dict[str, Any]): Metadata about the 3D object, including the
466
+ GitHub organization and repo names.
467
+ Return is not used. Defaults to None.
468
+
469
+ Returns:
470
+ List[Dict[str, str]]: List of dictionaries with the keys "fileIdentifier"
471
+ and "sha256" for each downloaded object.
472
+ """
473
+ if processes is None:
474
+ processes = multiprocessing.cpu_count()
475
+
476
+ base_download_dir = os.path.join(download_dir, "github")
477
+ fs, path = fsspec.core.url_to_fs(base_download_dir)
478
+ fs.makedirs(path, exist_ok=True)
479
+
480
+ # Getting immediate subdirectories of root_path
481
+ if save_repo_format == "files":
482
+ downloaded_repo_dirs = fs.glob(base_download_dir + "/repos/*/*/")
483
+ downloaded_repo_ids = {
484
+ "/".join(x.split("/")[-2:]) for x in downloaded_repo_dirs
485
+ }
486
+ else:
487
+ downloaded_repo_dirs = fs.glob(
488
+ base_download_dir + f"/repos/*/*.{save_repo_format}"
489
  )
490
+ downloaded_repo_ids = set()
491
+ for x in downloaded_repo_dirs:
492
+ org, repo = x.split("/")[-2:]
493
+ repo = repo[: -len(f".{save_repo_format}")]
494
+ repo_id = f"{org}/{repo}"
495
+ downloaded_repo_ids.add(repo_id)
496
+
497
+ # make copy of objects
498
+ objects = objects.copy()
499
+
500
+ # get the unique repoIds
501
+ objects["repoIdHash"] = objects.apply(self._get_repo_id_with_hash, axis=1)
502
+ repo_id_hashes = set(objects["repoIdHash"].unique().tolist())
503
+ repo_ids = {
504
+ "/".join(repo_id_hash.split("/")[:2]) for repo_id_hash in repo_id_hashes
505
+ }
506
+ assert len(repo_id_hashes) == len(repo_ids), (
507
+ f"More than 1 commit hash per repoId!"
508
+ f" {len(repo_id_hashes)=}, {len(repo_ids)=}"
509
  )
510
+
511
+ logger.info(
512
+ f"Provided {len(repo_ids)} repoIds with {len(objects)} objects to process."
513
+ )
514
+
515
+ # remove repoIds that have already been downloaded
516
+ repo_ids_to_download = repo_ids - downloaded_repo_ids
517
+ repo_id_hashes_to_download = [
518
+ repo_id_hash
519
+ for repo_id_hash in repo_id_hashes
520
+ if "/".join(repo_id_hash.split("/")[:2]) in repo_ids_to_download
521
+ ]
522
+
523
+ logger.info(
524
+ f"Found {len(repo_ids_to_download)} repoIds not yet downloaded. Downloading now..."
525
+ )
526
+
527
+ # get the objects to download
528
+ groups = list(objects.groupby("repoIdHash"))
529
+ with Pool(processes=processes) as pool:
530
+ out_list = list(
531
+ tqdm(
532
+ pool.imap_unordered(self._process_group, groups),
533
+ total=len(groups),
534
+ desc="Grouping objects by repository",
535
+ )
536
+ )
537
+ objects_per_repo_id_hash = dict(out_list)
538
+
539
+ all_args = [
540
+ (
541
+ repo_id_hash,
542
+ fs,
543
+ path,
544
+ save_repo_format,
545
+ objects_per_repo_id_hash[repo_id_hash],
546
+ handle_found_object,
547
+ handle_missing_object,
548
+ handle_modified_object,
549
+ handle_new_object,
550
+ )
551
+ for repo_id_hash in repo_id_hashes_to_download
552
+ ]
553
+
554
+ with Pool(processes=processes) as pool:
555
+ # use tqdm to show progress
556
+ out = list(
557
+ tqdm(
558
+ pool.imap_unordered(self._parallel_process_repo, all_args),
559
+ total=len(all_args),
560
+ )
561
+ )
562
+ out_list = [item for sublist in out for item in sublist]
563
+ return out_list