Datasets:

Languages:
English
ArXiv:
License:
File size: 16,699 Bytes
b51f134
 
08bea20
135a36b
be6382e
135a36b
08bea20
be6382e
08bea20
 
135a36b
08bea20
 
 
 
be6382e
 
 
08bea20
329af77
 
 
db59e0b
329af77
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
db59e0b
 
 
329af77
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
08bea20
 
329af77
08bea20
329af77
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
08bea20
 
329af77
08bea20
329af77
08bea20
 
 
 
329af77
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
"""Script to download objects from Thingiverse."""

import multiprocessing
import os
import tempfile
import time
from multiprocessing import Pool
from typing import Callable, Dict, Optional, Tuple

import fsspec
import pandas as pd
import requests
from loguru import logger
from tqdm import tqdm

from objaverse_xl.abstract import ObjaverseSource
from objaverse_xl.utils import get_file_hash


class ThingiverseDownloader(ObjaverseSource):
    """Script to download objects from Thingiverse."""

    def get_annotations(self, download_dir: str = "~/.objaverse") -> pd.DataFrame:
        """Load the annotations from the given directory.

        Args:
            download_dir (str, optional): The directory to load the annotations from.
                Supports all file systems supported by fsspec. Defaults to
                "~/.objaverse".

        Returns:
            pd.DataFrame: The annotations, which includes the columns "thingId", "fileId",
                "filename", and "license".
        """
        remote_url = "https://huggingface.co/datasets/allenai/objaverse-xl/resolve/main/thingiverse/thingiverse-objects.parquet"
        download_path = os.path.join(
            download_dir, "thingiverse", "thingiverse-objects.parquet"
        )
        fs, path = fsspec.core.url_to_fs(download_path)

        if not fs.exists(path):
            fs.makedirs(os.path.dirname(path), exist_ok=True)
            logger.info(f"Downloading {remote_url} to {download_path}")
            response = requests.get(remote_url)
            response.raise_for_status()
            with fs.open(path, "wb") as file:
                file.write(response.content)

        # read the file with pandas and fsspec
        with fs.open(download_path, "rb") as f:
            annotations_df = pd.read_parquet(f)

        return annotations_df

    def _get_response_with_retries(
        self, url: str, max_retries: int = 3, retry_delay: int = 5
    ) -> Optional[requests.models.Response]:
        """Get a response from a URL with retries.

        Args:
            url (str): The URL to get a response from.
            max_retries (int, optional): The maximum number of retries. Defaults to 3.
            retry_delay (int, optional): The delay between retries in seconds. Defaults to 5.

        Returns:
            Optional[requests.models.Response]: The response from the URL. If there was an error, returns None.
        """

        for i in range(max_retries):
            try:
                response = requests.get(url, stream=True)
                # if successful, break out of loop
                if response.status_code not in {200, 404}:
                    time.sleep(retry_delay)
                    continue
                break
            except ConnectionError:
                if i < max_retries - 1:  # i.e. not on the last try
                    time.sleep(retry_delay)
        else:
            return None

        return response

    def _download_item(
        self,
        thingi_file_id: str,
        thingi_thing_id: str,
        file_identifier: str,
        download_dir: Optional[str],
        expected_sha256: str,
        handle_found_object: Optional[Callable],
        handle_modified_object: Optional[Callable],
        handle_missing_object: Optional[Callable],
    ) -> Tuple[str, Optional[str]]:
        """Download the given item.

        Args:
            thingi_file_id (str): The Thingiverse file ID of the object.
            thingi_thing_id (str): The Thingiverse thing ID of the object.
            file_identifier (str): File identifier of the Thingiverse object.
            download_dir (Optional[str]): Directory to download the Smithsonian Object
                to. Supports all file systems supported by fsspec. If None, the
                Smithsonian Object will be deleted after it is downloaded and processed
                with the handler functions.
            expected_sha256 (str): The expected SHA256 of the contents of the downloaded
                object.
            handle_found_object (Optional[Callable]): Called when an object is
                successfully found and downloaded. Here, the object has the same sha256
                as the one that was downloaded with Objaverse-XL. If None, the object
                will be downloaded, but nothing will be done with it. Args for the
                function include:
                - local_path (str): Local path to the downloaded 3D object.
                - file_identifier (str): File identifier of the 3D object.
                - sha256 (str): SHA256 of the contents of the 3D object.
                - metadata (Dict[str, Any]): Metadata about the 3D object, including the
                    GitHub organization and repo names.
                Return is not used.
            handle_modified_object (Optional[Callable]): Called when a modified object
                is found and downloaded. Here, the object is successfully downloaded,
                but it has a different sha256 than the one that was downloaded with
                Objaverse-XL. This is not expected to happen very often, because the
                same commit hash is used for each repo. If None, the object will be
                downloaded, but nothing will be done with it. Args for the function
                include:
                - local_path (str): Local path to the downloaded 3D object.
                - file_identifier (str): File identifier of the 3D object.
                - new_sha256 (str): SHA256 of the contents of the newly downloaded 3D
                    object.
                - old_sha256 (str): Expected SHA256 of the contents of the 3D object as
                    it was when it was downloaded with Objaverse-XL.
                - metadata (Dict[str, Any]): Metadata about the 3D object, including the
                    GitHub organization and repo names.
                Return is not used.
            handle_missing_object (Optional[Callable]): Called when an object that is in
                Objaverse-XL is not found. Here, it is likely that the repository was
                deleted or renamed. If None, nothing will be done with the missing
                object. Args for the function include:
                - file_identifier (str): File identifier of the 3D object.
                - sha256 (str): SHA256 of the contents of the original 3D object.
                - metadata (Dict[str, Any]): Metadata about the 3D object, including the
                    GitHub organization and repo names.
                Return is not used.


        Returns:
            Optional[str]: The path to the downloaded file. If there was an error or 404,
                returns None.
        """
        url = f"https://www.thingiverse.com/download:{thingi_file_id}"
        response = self._get_response_with_retries(url)
        filename = f"thing-{thingi_thing_id}-file-{thingi_file_id}.stl"

        if response is None:
            logger.warning(
                f"Thingiverse file ID {thingi_file_id} could not get response from {url}"
            )
            # NOTE: the object is probably not missing, but the request failed
            return file_identifier, None

        # Check if the request was successful
        if response.status_code == 404:
            logger.warning(
                f"Thingiverse file ID {thingi_file_id} (404) could not find file"
            )
            if handle_missing_object is not None:
                handle_missing_object(
                    file_identifier=file_identifier, sha256=expected_sha256, metadata={}
                )
            return file_identifier, None

        with tempfile.TemporaryDirectory() as temp_dir:
            temp_path = os.path.join(temp_dir, filename)
            temp_path_tmp = temp_path + ".tmp"

            with open(temp_path_tmp, "wb") as file:
                for chunk in response.iter_content(chunk_size=8192):
                    file.write(chunk)

            # rename to temp_path
            os.rename(temp_path_tmp, temp_path)

            # check the sha256
            sha256 = get_file_hash(temp_path)

            if sha256 == expected_sha256:
                if handle_found_object is not None:
                    handle_found_object(
                        local_path=temp_path,
                        file_identifier=file_identifier,
                        sha256=sha256,
                        metadata={},
                    )
            else:
                if handle_modified_object is not None:
                    handle_modified_object(
                        local_path=temp_path,
                        file_identifier=file_identifier,
                        new_sha256=sha256,
                        old_sha256=expected_sha256,
                        metadata={},
                    )

            if download_dir is not None:
                filename = os.path.join(download_dir, filename)
                fs, path = fsspec.core.url_to_fs(filename)
                fs.makedirs(os.path.dirname(path), exist_ok=True)
                fs.put(temp_path, path)
            else:
                path = None

        return file_identifier, path

    def _parallel_download_item(self, args):
        return self._download_item(*args)

    def get_file_id_from_file_identifier(self, file_identifier: str) -> str:
        """Get the thingiverse file ID from the Objaverse-XL file identifier.

        Args:
            file_identifier (str): The Objaverse-XL file identifier.

        Returns:
            str: The Thingiverse file ID.
        """
        return file_identifier.split("fileId=")[-1]

    def get_thing_id_from_file_identifier(self, file_identifier: str) -> str:
        """Get the thingiverse thing ID from the Objaverse-XL file identifier.

        Args:
            file_identifier (str): The Objaverse-XL file identifier.

        Returns:
            str: The Thingiverse thing ID.
        """
        return file_identifier.split("/")[-2].split(":")[1]

    def download_objects(
        self,
        objects: pd.DataFrame,
        processes: Optional[int] = None,
        download_dir: Optional[str] = "~/.objaverse",
        handle_found_object: Optional[Callable] = None,
        handle_modified_object: Optional[Callable] = None,
        handle_missing_object: Optional[Callable] = None,
    ) -> Dict[str, str]:
        """Download the objects from the given list of things and files.

        Args:
            objects (pd.DataFrame): Thingiverse objects to download. Must have columns
                for the object "fileIdentifier" and "sha256". Use the `get_annotations`
                function to get the metadata.
            processes (int, optional): The number of processes to use. If None, maps to
                use all available CPUs using multiprocessing.cpu_count(). Defaults to None.
            download_dir (str, optional): The directory to save the files to. Supports all
                file systems supported by fsspec. Defaults to "~/.objaverse-xl".
            handle_found_object (Optional[Callable], optional): Called when an object is
                successfully found and downloaded. Here, the object has the same sha256
                as the one that was downloaded with Objaverse-XL. If None, the object
                will be downloaded, but nothing will be done with it. Args for the
                function include:
                - local_path (str): Local path to the downloaded 3D object.
                - file_identifier (str): File identifier of the 3D object.
                - sha256 (str): SHA256 of the contents of the 3D object.
                - metadata (Dict[Hashable, Any]): Metadata about the 3D object,
                    including the GitHub organization and repo names.
                Return is not used. Defaults to None.
            handle_modified_object (Optional[Callable], optional): Called when a
                modified object is found and downloaded. Here, the object is
                successfully downloaded, but it has a different sha256 than the one that
                was downloaded with Objaverse-XL. This is not expected to happen very
                often, because the same commit hash is used for each repo. If None, the
                object will be downloaded, but nothing will be done with it. Args for
                the function include:
                - local_path (str): Local path to the downloaded 3D object.
                - file_identifier (str): File identifier of the 3D object.
                - new_sha256 (str): SHA256 of the contents of the newly downloaded 3D
                    object.
                - old_sha256 (str): Expected SHA256 of the contents of the 3D object as
                    it was when it was downloaded with Objaverse-XL.
                - metadata (Dict[Hashable, Any]): Metadata about the 3D object, which is
                    particular to the souce.
                Return is not used. Defaults to None.
            handle_missing_object (Optional[Callable], optional): Called when an object
                that is in Objaverse-XL is not found. Here, it is likely that the
                repository was deleted or renamed. If None, nothing will be done with
                the missing object.
                Args for the function include:
                - file_identifier (str): File identifier of the 3D object.
                - sha256 (str): SHA256 of the contents of the original 3D object.
                - metadata (Dict[Hashable, Any]): Metadata about the 3D object, which is
                    particular to the source.
                Return is not used. Defaults to None.

        Returns:
            Dict[str, str]: A dictionary mapping from the fileIdentifier to the path of
                the downloaded file.
        """
        if processes is None:
            processes = multiprocessing.cpu_count()

        objects = objects.copy()
        objects["thingiFileId"] = objects["fileIdentifier"].apply(
            self.get_file_id_from_file_identifier
        )
        objects["thingiThingId"] = objects["fileIdentifier"].apply(
            self.get_thing_id_from_file_identifier
        )

        # create the download directory
        out = {}
        if download_dir is not None:
            download_dir = os.path.join(download_dir, "thingiverse")
            fs, path = fsspec.core.url_to_fs(download_dir)
            fs.makedirs(path, exist_ok=True)

            # check to filter out files that already exist
            existing_files = fs.glob(os.path.join(download_dir, "*.stl"), refresh=True)
            existing_file_ids = {
                os.path.basename(file).split(".")[0].split("-")[-1]
                for file in existing_files
            }

            # filter out existing files
            items_to_download = []
            already_downloaded_count = 0
            for _, item in objects.iterrows():
                if item["thingiFileId"] in existing_file_ids:
                    already_downloaded_count += 1
                    out[item["fileIdentifier"]] = os.path.join(
                        os.path.expanduser(download_dir),
                        f"thing-{item['thingiThingId']}-file-{item['thingiFileId']}.stl",
                    )
                else:
                    items_to_download.append(item)

            logger.info(
                f"Found {already_downloaded_count} Thingiverse objects downloaded"
            )
        else:
            items_to_download = [item for _, item in objects.iterrows()]

        logger.info(
            f"Downloading {len(items_to_download)} Thingiverse objects with {processes=}"
        )
        if len(items_to_download) == 0:
            return out

        # download the files
        args = [
            (
                item["thingiFileId"],
                item["thingiThingId"],
                item["fileIdentifier"],
                download_dir,
                item["sha256"],
                handle_found_object,
                handle_modified_object,
                handle_missing_object,
            )
            for item in items_to_download
        ]

        with Pool(processes=processes) as pool:
            results = list(
                tqdm(
                    pool.imap_unordered(self._parallel_download_item, args),
                    total=len(args),
                    desc="Downloading Thingiverse Objects",
                )
            )

        for file_identifier, download_path in results:
            if download_path is not None:
                out[file_identifier] = download_path

        return out