|
import json |
|
import tarfile |
|
from datasets import DatasetInfo, DatasetBuilder, DownloadManager,BuilderConfig, SplitGenerator, Split, Version |
|
import datasets |
|
import os |
|
import requests |
|
import re |
|
|
|
|
|
|
|
DATASET_URL="https://huggingface.co/datasets/dell-research-harvard/AmericanStories/blob/main/" |
|
def get_list_of_files(url): |
|
page = requests.get(url).text |
|
links=re.findall(r'href=[\'"]?([^\'" >]+)', page) |
|
|
|
links=[link for link in links if link.startswith('faro_')] |
|
return links |
|
|
|
|
|
def get_splits(links): |
|
splits={} |
|
years=[] |
|
for link in links: |
|
year=link.split('_')[1].split('.')[0] |
|
if year not in splits: |
|
splits[year]=[] |
|
splits[year].append(link) |
|
years.append(year) |
|
return splits,years |
|
|
|
|
|
DATA_DIR="." |
|
|
|
def make_year_file_splits(data_dir): |
|
|
|
data_files=os.listdir(data_dir) |
|
|
|
data_files=[file for file in data_files if file.startswith('faro_')] |
|
|
|
|
|
splits={} |
|
years=[] |
|
for file in data_files: |
|
year=file.split('_')[1].split('.')[0] |
|
if year not in splits: |
|
splits[year]=[] |
|
splits[year].append(file) |
|
years.append(year) |
|
return splits, years |
|
|
|
|
|
|
|
|
|
|
|
_CITATION = """\ |
|
Coming Soon |
|
""" |
|
|
|
_DESCRIPTION = """\ |
|
American Stories offers high-quality structured data from historical newspapers suitable for pre-training large language models to enhance the understanding of historical English and world knowledge. It can also be integrated into external databases of retrieval-augmented language models, enabling broader access to historical information, including interpretations of political events and intricate details about people's ancestors. Additionally, the structured article texts facilitate the application of transformer-based methods for popular tasks like detecting reproduced content, significantly improving accuracy compared to traditional OCR methods. American Stories serves as a substantial and valuable dataset for advancing multimodal layout analysis models and other multimodal applications. """ |
|
|
|
_FILE_DICT,_YEARS=make_year_file_splits(DATA_DIR) |
|
|
|
|
|
|
|
|
|
class AmericanStories(datasets.GeneratorBasedBuilder): |
|
"""TODO: Short description of my dataset.""" |
|
|
|
VERSION = datasets.Version("0.0.1") |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
BUILDER_CONFIGS = [datasets.BuilderConfig(name="american_stories", version="0.0.1", description="This part of my dataset covers a first domain")] |
|
|
|
|
|
def _info(self): |
|
|
|
features = datasets.Features( |
|
{ "newspaper_name": datasets.Value("string"), |
|
"edition": datasets.Value("string"), |
|
"date": datasets.Value("string"), |
|
"page": datasets.Value("string"), |
|
"headline": datasets.Value("string"), |
|
"byline": datasets.Value("string"), |
|
"article": datasets.Value("string") |
|
|
|
} |
|
) |
|
|
|
return datasets.DatasetInfo( |
|
|
|
description=_DESCRIPTION, |
|
|
|
features=features, |
|
|
|
|
|
|
|
|
|
|
|
|
|
citation=_CITATION, |
|
) |
|
|
|
def _split_generators(self, dl_manager,online=False): |
|
|
|
|
|
|
|
|
|
|
|
|
|
if not online: |
|
urls = _FILE_DICT |
|
else: |
|
_URL_DICT,year_list=get_splits(get_list_of_files(DATASET_URL)) |
|
urls = _URL_DICT |
|
year_list=_YEARS |
|
data_dir = dl_manager.download_and_extract(urls) |
|
|
|
|
|
return [ |
|
datasets.SplitGenerator( |
|
name=year, |
|
|
|
gen_kwargs={ |
|
"year_dir": os.path.join(data_dir[year][0], "mnt/122a7683-fa4b-45dd-9f13-b18cc4f4a187/ca_rule_based_fa_clean/faro_"+year), |
|
"split": year, |
|
}, |
|
) for year in year_list |
|
] |
|
|
|
|
|
|
|
|
|
|
|
def _generate_examples(self, year_dir, split): |
|
|
|
|
|
for filepath in os.listdir(year_dir): |
|
with open(os.path.join(year_dir,filepath), encoding="utf-8") as f: |
|
data = json.load(f) |
|
scan_id=filepath.split('.')[0] |
|
scan_date=filepath.split("_")[0] |
|
scan_page=filepath.split("_")[1] |
|
scan_edition=filepath.split("_")[-2][8:] |
|
newspaper_name=data["lccn"]["title"] |
|
full_articles_in_data=data["full articles"] |
|
for article in full_articles_in_data: |
|
article_id=str(article["full_article_id"]) +"_" +scan_id |
|
yield article_id, { |
|
"newspaper_name": newspaper_name, |
|
"edition": scan_edition, |
|
"date": scan_date, |
|
"page": scan_page, |
|
"headline": article["headline"], |
|
"byline": article["byline"], |
|
"article": article["article"] |
|
} |
|
|