File size: 2,448 Bytes
217de99
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
import json
import tarfile
from datasets import DatasetInfo, DatasetBuilder, DownloadManager

_CITATION = """\
Coming Soon
"""

_DESCRIPTION = """\
American Stories offers high-quality structured data from historical newspapers suitable for pre-training large language models to enhance the understanding of historical English and world knowledge. It can also be integrated into external databases of retrieval-augmented language models, enabling broader access to historical information, including interpretations of political events and intricate details about people's ancestors. Additionally, the structured article texts facilitate the application of transformer-based methods for popular tasks like detecting reproduced content, significantly improving accuracy compared to traditional OCR methods. American Stories serves as a substantial and valuable dataset for advancing multimodal layout analysis models and other multimodal applications.
"""

class YourDatasetName(DatasetBuilder):
    VERSION = "0.0.1"

    BUILDER_CONFIGS = [
        DatasetBuilderConfig(
            name="AmericanStories",
            version=VERSION,
            description=_DESCRIPTION,
        ),
    ]

    def _info(self) -> DatasetInfo:
        features = {
            "feature_name": datasets.Value("string"),
            # Define the other features of your dataset here
        }

        return datasets.DatasetInfo(
            description=_DESCRIPTION,
            features=datasets.Features(features),
            citation=_CITATION,
        )

    def _split_generators(self, dl_manager: DownloadManager) -> List[datasets.SplitGenerator]:
        # No explicit splits defined
        return []

    def _generate_examples(self, filepath: str) -> Iterator[datasets.Example]:
        with tarfile.open(filepath, "r:gz") as tar:
            for member in tar.getmembers():
                if member.isfile() and member.name.endswith('.json'):
                    file_name = os.path.basename(member.name)
                    with tar.extractfile(member) as f:
                        data = json.load(f)
                        for idx, example in enumerate(data):
                            # Process and yield each example
                            yield idx, {
                                "feature_name": example["feature_name"],
                                # Assign values to other features of your dataset
                            }