import json import tarfile from datasets import DatasetInfo, DatasetBuilder, DownloadManager _CITATION = """\ Coming Soon """ _DESCRIPTION = """\ American Stories offers high-quality structured data from historical newspapers suitable for pre-training large language models to enhance the understanding of historical English and world knowledge. It can also be integrated into external databases of retrieval-augmented language models, enabling broader access to historical information, including interpretations of political events and intricate details about people's ancestors. Additionally, the structured article texts facilitate the application of transformer-based methods for popular tasks like detecting reproduced content, significantly improving accuracy compared to traditional OCR methods. American Stories serves as a substantial and valuable dataset for advancing multimodal layout analysis models and other multimodal applications. """ class YourDatasetName(DatasetBuilder): VERSION = "0.0.1" BUILDER_CONFIGS = [ DatasetBuilderConfig( name="AmericanStories", version=VERSION, description=_DESCRIPTION, ), ] def _info(self) -> DatasetInfo: features = { "feature_name": datasets.Value("string"), # Define the other features of your dataset here } return datasets.DatasetInfo( description=_DESCRIPTION, features=datasets.Features(features), citation=_CITATION, ) def _split_generators(self, dl_manager: DownloadManager) -> List[datasets.SplitGenerator]: # No explicit splits defined return [] def _generate_examples(self, filepath: str) -> Iterator[datasets.Example]: with tarfile.open(filepath, "r:gz") as tar: for member in tar.getmembers(): if member.isfile() and member.name.endswith('.json'): file_name = os.path.basename(member.name) with tar.extractfile(member) as f: data = json.load(f) for idx, example in enumerate(data): # Process and yield each example yield idx, { "feature_name": example["feature_name"], # Assign values to other features of your dataset }