AmericanStories / AmericanStories.py
96abhishekarora's picture
path changes
de30f05
raw
history blame
8.63 kB
import json
import tarfile
from datasets import DatasetInfo, DatasetBuilder, DownloadManager,BuilderConfig, SplitGenerator, Split, Version
import datasets
import os
import requests
import re
####data dir
DATA_DIR="."
def make_year_file_splits(data_dir):
###Get list of files
data_files=os.listdir(data_dir)
###Get only files containing faro_
data_files=[file for file in data_files if file.startswith('faro_')]
###Get only files for 17__ years
# data_files=[file for file in data_files if file.split('_')[1].startswith('17')]
###Arrange into splits by year - files follow the format faro_YYYY.tar.gz
splits={}
years=[]
for file in data_files:
year=file.split('_')[1].split('.')[0]
if year not in splits:
splits[year]=[]
splits[year].append(file)
years.append(year)
return splits, years
def make_year_file_splits(data_dir):
base_url="https://huggingface.co/datasets/dell-research-harvard/AmericanStories/resolve/main/"
year_list=["1774","1804","1807"]
data_files=[f"faro_{year}.tar.gz" for year in year_list]
url_list=[base_url+file for file in data_files]
splits={year:file for year,file in zip(year_list,url_list)}
years=year_list
return splits, years
_CITATION = """\
Coming Soon
"""
_DESCRIPTION = """\
American Stories offers high-quality structured data from historical newspapers suitable for pre-training large language models to enhance the understanding of historical English and world knowledge. It can also be integrated into external databases of retrieval-augmented language models, enabling broader access to historical information, including interpretations of political events and intricate details about people's ancestors. Additionally, the structured article texts facilitate the application of transformer-based methods for popular tasks like detecting reproduced content, significantly improving accuracy compared to traditional OCR methods. American Stories serves as a substantial and valuable dataset for advancing multimodal layout analysis models and other multimodal applications. """
_FILE_DICT,_YEARS=make_year_file_splits(DATA_DIR)
###Make a class of builderconfig that supports an year_list attribute
class MyBuilderConfig(datasets.BuilderConfig):
"""BuilderConfig for MyDataset for different configurations."""
def __init__(self, year_list=None, **kwargs):
"""BuilderConfig for MyDataset.
Args:
**kwargs: keyword arguments forwarded to super.
"""
super(MyBuilderConfig, self).__init__(**kwargs)
self.year_list = year_list
class AmericanStories(datasets.GeneratorBasedBuilder):
"""TODO: Short description of my dataset."""
VERSION = datasets.Version("0.0.1")
# This is an example of a dataset with multiple configurations.
# If you don't want/need to define several sub-sets in your dataset,
# just remove the BUILDER_CONFIG_CLASS and the BUILDER_CONFIGS attributes.
# If you need to make complex sub-parts in the datasets with configurable options
# You can create your own builder configuration class to store attribute, inheriting from datasets.BuilderConfig
# BUILDER_CONFIG_CLASS = MyBuilderConfig
# You will be able to load one or the other configurations in the following list with
# data = datasets.load_dataset('my_dataset', 'first_domain')
# data = datasets.load_dataset('my_dataset', 'second_domain')
##Now use the custom builder config class
BUILDER_CONFIGS = [
MyBuilderConfig(
name="all_years",
version=VERSION,
description="All years in the dataset",
),
MyBuilderConfig(
name="subset_years",
version=VERSION,
description="Subset of years in the dataset",
year_list=["1774","1804"],
)]
DEFAULT_CONFIG_NAME = "all_years" # It's not mandatory to have a default configuration. Just use one if it make sense.
def _info(self):
# TODO: This method specifies the datasets.DatasetInfo object which contains informations and typings for the dataset
features = datasets.Features(
{ "newspaper_name": datasets.Value("string"),
"edition": datasets.Value("string"),
"date": datasets.Value("string"),
"page": datasets.Value("string"),
"headline": datasets.Value("string"),
"byline": datasets.Value("string"),
"article": datasets.Value("string")
# These are the features of your dataset like images, labels ...
}
)
return datasets.DatasetInfo(
# This is the description that will appear on the datasets page.
description=_DESCRIPTION,
# This defines the different columns of the dataset and their types
features=features, # Here we define them above because they are different between the two configurations
# If there's a common (input, target) tuple from the features, uncomment supervised_keys line below and
# specify them. They'll be used if as_supervised=True in builder.as_dataset.
# supervised_keys=("sentence", "label"),
# Homepage of the dataset for documentation
# License for the dataset if available
# Citation for the dataset
citation=_CITATION,
)
def _split_generators(self, dl_manager):
# TODO: This method is tasked with downloading/extracting the data and defining the splits depending on the configuration
# If several configurations are possible (listed in BUILDER_CONFIGS), the configuration selected by the user is in self.config.name
# dl_manager is a datasets.download.DownloadManager that can be used to download and extract URLS
# It can accept any type or nested list/dict and will give back the same structure with the url replaced with path to local files.
# By default the archives will be extracted and a path to a cached folder where they are extracted is returned instead of the archive
urls = _FILE_DICT
year_list=_YEARS
##Subset _FILE_DICT and year_list to only years in the config.year_list
if self.config.year_list:
urls={year:urls[year] for year in self.config.year_list}
year_list=self.config.year_list
data_dir = dl_manager.download_and_extract(urls)
print(data_dir)
print(os.path.join(data_dir["1774"], "mnt","122a7683-fa4b-45dd-9f13-b18cc4f4a187","ca_rule_based_fa_clean","faro_"+"1774"))
###REturn a list of splits - but each split is for a year!
return [
datasets.SplitGenerator(
name=year,
# These kwargs will be passed to _generate_examples
gen_kwargs={
"year_dir": os.path.join(data_dir[year], "mnt","122a7683-fa4b-45dd-9f13-b18cc4f4a187","ca_rule_based_fa_clean","faro_"+year),
"split": year,
},
) for year in year_list
]
# method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
def _generate_examples(self, year_dir, split):
# TODO: This method handles input defined in _split_generators to yield (key, example) tuples from the dataset.
# The `key` is for legacy reasons (tfds) and is not important in itself, but must be unique for each example.
for filepath in os.listdir(year_dir):
with open(os.path.join(year_dir,filepath), encoding="utf-8") as f:
data = json.load(f)
scan_id=filepath.split('.')[0]
scan_date=filepath.split("_")[0]
scan_page=filepath.split("_")[1]
scan_edition=filepath.split("_")[-2][8:]
newspaper_name=data["lccn"]["title"]
full_articles_in_data=data["full articles"]
for article in full_articles_in_data:
article_id=str(article["full_article_id"]) +"_" +scan_id
yield article_id, {
"newspaper_name": newspaper_name,
"edition": scan_edition,
"date": scan_date,
"page": scan_page,
"headline": article["headline"],
"byline": article["byline"],
"article": article["article"]
}