|
import json |
|
import tarfile |
|
from datasets import DatasetInfo, DatasetBuilder, DownloadManager,BuilderConfig, SplitGenerator, Split, Version |
|
import datasets |
|
import os |
|
import requests |
|
import re |
|
|
|
|
|
|
|
|
|
DATA_DIR="." |
|
|
|
def make_year_file_splits(data_dir): |
|
|
|
data_files=os.listdir(data_dir) |
|
|
|
data_files=[file for file in data_files if file.startswith('faro_')] |
|
|
|
|
|
|
|
|
|
splits={} |
|
years=[] |
|
for file in data_files: |
|
year=file.split('_')[1].split('.')[0] |
|
if year not in splits: |
|
splits[year]=[] |
|
splits[year].append(file) |
|
years.append(year) |
|
return splits, years |
|
|
|
def make_year_file_splits(data_dir): |
|
base_url="https://huggingface.co/datasets/dell-research-harvard/AmericanStories/resolve/main/" |
|
|
|
year_list=["1774","1804","1807"] |
|
data_files=[f"faro_{year}.tar.gz" for year in year_list] |
|
url_list=[base_url+file for file in data_files] |
|
splits={year:file for year,file in zip(year_list,url_list)} |
|
years=year_list |
|
|
|
return splits, years |
|
|
|
|
|
|
|
|
|
_CITATION = """\ |
|
Coming Soon |
|
""" |
|
|
|
_DESCRIPTION = """\ |
|
American Stories offers high-quality structured data from historical newspapers suitable for pre-training large language models to enhance the understanding of historical English and world knowledge. It can also be integrated into external databases of retrieval-augmented language models, enabling broader access to historical information, including interpretations of political events and intricate details about people's ancestors. Additionally, the structured article texts facilitate the application of transformer-based methods for popular tasks like detecting reproduced content, significantly improving accuracy compared to traditional OCR methods. American Stories serves as a substantial and valuable dataset for advancing multimodal layout analysis models and other multimodal applications. """ |
|
|
|
_FILE_DICT,_YEARS=make_year_file_splits(DATA_DIR) |
|
|
|
|
|
|
|
class MyBuilderConfig(datasets.BuilderConfig): |
|
"""BuilderConfig for MyDataset for different configurations.""" |
|
|
|
def __init__(self, year_list=None, **kwargs): |
|
"""BuilderConfig for MyDataset. |
|
Args: |
|
**kwargs: keyword arguments forwarded to super. |
|
""" |
|
super(MyBuilderConfig, self).__init__(**kwargs) |
|
self.year_list = year_list |
|
|
|
class AmericanStories(datasets.GeneratorBasedBuilder): |
|
"""TODO: Short description of my dataset.""" |
|
|
|
VERSION = datasets.Version("0.0.1") |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
BUILDER_CONFIGS = [ |
|
MyBuilderConfig( |
|
name="all_years", |
|
version=VERSION, |
|
description="All years in the dataset", |
|
), |
|
MyBuilderConfig( |
|
name="subset_years", |
|
version=VERSION, |
|
description="Subset of years in the dataset", |
|
year_list=["1774","1804"], |
|
)] |
|
|
|
DEFAULT_CONFIG_NAME = "all_years" |
|
|
|
def _info(self): |
|
|
|
features = datasets.Features( |
|
{ "newspaper_name": datasets.Value("string"), |
|
"edition": datasets.Value("string"), |
|
"date": datasets.Value("string"), |
|
"page": datasets.Value("string"), |
|
"headline": datasets.Value("string"), |
|
"byline": datasets.Value("string"), |
|
"article": datasets.Value("string") |
|
|
|
} |
|
) |
|
|
|
return datasets.DatasetInfo( |
|
|
|
description=_DESCRIPTION, |
|
|
|
features=features, |
|
|
|
|
|
|
|
|
|
|
|
|
|
citation=_CITATION, |
|
) |
|
|
|
def _split_generators(self, dl_manager): |
|
|
|
|
|
|
|
|
|
|
|
|
|
urls = _FILE_DICT |
|
year_list=_YEARS |
|
|
|
|
|
if self.config.year_list: |
|
urls={year:urls[year] for year in self.config.year_list} |
|
year_list=self.config.year_list |
|
|
|
data_dir = dl_manager.download_and_extract(urls) |
|
print(data_dir) |
|
print(os.path.join(data_dir["1774"], "mnt","122a7683-fa4b-45dd-9f13-b18cc4f4a187","ca_rule_based_fa_clean","faro_"+"1774")) |
|
|
|
|
|
return [ |
|
datasets.SplitGenerator( |
|
name=year, |
|
|
|
gen_kwargs={ |
|
"year_dir": os.path.join(data_dir[year], "mnt","122a7683-fa4b-45dd-9f13-b18cc4f4a187","ca_rule_based_fa_clean","faro_"+year), |
|
"split": year, |
|
}, |
|
) for year in year_list |
|
] |
|
|
|
|
|
|
|
|
|
|
|
def _generate_examples(self, year_dir, split): |
|
|
|
|
|
for filepath in os.listdir(year_dir): |
|
with open(os.path.join(year_dir,filepath), encoding="utf-8") as f: |
|
data = json.load(f) |
|
scan_id=filepath.split('.')[0] |
|
scan_date=filepath.split("_")[0] |
|
scan_page=filepath.split("_")[1] |
|
scan_edition=filepath.split("_")[-2][8:] |
|
newspaper_name=data["lccn"]["title"] |
|
full_articles_in_data=data["full articles"] |
|
for article in full_articles_in_data: |
|
article_id=str(article["full_article_id"]) +"_" +scan_id |
|
yield article_id, { |
|
"newspaper_name": newspaper_name, |
|
"edition": scan_edition, |
|
"date": scan_date, |
|
"page": scan_page, |
|
"headline": article["headline"], |
|
"byline": article["byline"], |
|
"article": article["article"] |
|
} |
|
|