96abhishekarora commited on
Commit
a849f84
1 Parent(s): 57a3900

doc changes

Browse files
Files changed (1) hide show
  1. AmericanStories.py +106 -122
AmericanStories.py CHANGED
@@ -1,193 +1,177 @@
1
  import json
2
  import tarfile
3
- from datasets import DatasetInfo, DatasetBuilder, DownloadManager,BuilderConfig, SplitGenerator, Split, Version
4
  import datasets
5
  import os
6
  import requests
7
  import re
8
  from tqdm import tqdm
9
 
 
 
 
10
 
 
 
 
11
 
12
- ####data dir
13
- DATA_DIR="."
 
 
 
14
 
15
- def make_year_file_splits(data_dir):
16
- ###Get list of files
17
- data_files=os.listdir(data_dir)
18
- ###Get only files containing faro_
19
- data_files=[file for file in data_files if file.startswith('faro_')]
20
-
21
- ###Get only files for 17__ years
22
- # data_files=[file for file in data_files if file.split('_')[1].startswith('17')]
23
- ###Arrange into splits by year - files follow the format faro_YYYY.tar.gz
24
- splits={}
25
- years=[]
26
- for file in data_files:
27
- year=file.split('_')[1].split('.')[0]
28
- if year not in splits:
29
- splits[year]=[]
30
- splits[year].append(file)
31
- years.append(year)
32
- return splits, years
33
-
34
- def make_year_file_splits(data_dir):
35
- base_url="https://huggingface.co/datasets/dell-research-harvard/AmericanStories/resolve/main/"
36
-
37
- # year_list=["1774","1804","1807"]
38
- ###MAke a list of years from 1774 to 1960
39
- print("Collecting all list of available files for each year")
40
- year_list=[str(year) for year in range(1774,1960)]
41
- data_files=[f"faro_{year}.tar.gz" for year in year_list]
42
- url_list=[base_url+file for file in data_files]
43
- ###Keep only valid urls
44
- # url_list=[url for url in tqdm(url_list) if requests.get(url).status_code==200]
45
- splits={year:file for year,file in zip(year_list,url_list)}
46
- years=year_list
47
 
48
  return splits, years
49
 
50
 
51
-
52
-
53
  _CITATION = """\
54
  Coming Soon
55
  """
56
 
57
  _DESCRIPTION = """\
58
- American Stories offers high-quality structured data from historical newspapers suitable for pre-training large language models to enhance the understanding of historical English and world knowledge. It can also be integrated into external databases of retrieval-augmented language models, enabling broader access to historical information, including interpretations of political events and intricate details about people's ancestors. Additionally, the structured article texts facilitate the application of transformer-based methods for popular tasks like detecting reproduced content, significantly improving accuracy compared to traditional OCR methods. American Stories serves as a substantial and valuable dataset for advancing multimodal layout analysis models and other multimodal applications. """
 
59
 
60
- _FILE_DICT,_YEARS=make_year_file_splits(DATA_DIR)
61
 
62
 
63
- ###Make a class of builderconfig that supports an year_list attribute
64
- class MyBuilderConfig(datasets.BuilderConfig):
65
- """BuilderConfig for MyDataset for different configurations."""
66
 
67
  def __init__(self, year_list=None, **kwargs):
68
- """BuilderConfig for MyDataset.
 
 
69
  Args:
70
- **kwargs: keyword arguments forwarded to super.
 
71
  """
72
- super(MyBuilderConfig, self).__init__(**kwargs)
73
  self.year_list = year_list
74
 
75
- class AmericanStories(datasets.GeneratorBasedBuilder):
76
- """TODO: Short description of my dataset."""
77
-
78
- VERSION = datasets.Version("0.0.1")
79
 
 
 
80
 
 
81
 
82
- # This is an example of a dataset with multiple configurations.
83
- # If you don't want/need to define several sub-sets in your dataset,
84
- # just remove the BUILDER_CONFIG_CLASS and the BUILDER_CONFIGS attributes.
85
-
86
- # If you need to make complex sub-parts in the datasets with configurable options
87
- # You can create your own builder configuration class to store attribute, inheriting from datasets.BuilderConfig
88
- # BUILDER_CONFIG_CLASS = MyBuilderConfig
89
-
90
- # You will be able to load one or the other configurations in the following list with
91
- # data = datasets.load_dataset('my_dataset', 'first_domain')
92
- # data = datasets.load_dataset('my_dataset', 'second_domain')
93
- ##Now use the custom builder config class
94
  BUILDER_CONFIGS = [
95
- MyBuilderConfig(
96
  name="all_years",
97
  version=VERSION,
98
  description="All years in the dataset",
99
  ),
100
- MyBuilderConfig(
101
  name="subset_years",
102
  version=VERSION,
103
  description="Subset of years in the dataset",
104
- year_list=["1774","1804"],
105
- )]
106
-
107
- DEFAULT_CONFIG_NAME = "subset_years" # It's not mandatory to have a default configuration. Just use one if it make sense.
108
 
109
  def _info(self):
110
- # TODO: This method specifies the datasets.DatasetInfo object which contains informations and typings for the dataset
 
 
 
 
 
111
  features = datasets.Features(
112
- { "newspaper_name": datasets.Value("string"),
 
 
113
  "edition": datasets.Value("string"),
114
  "date": datasets.Value("string"),
115
  "page": datasets.Value("string"),
116
  "headline": datasets.Value("string"),
117
  "byline": datasets.Value("string"),
118
- "article": datasets.Value("string")
119
- # These are the features of your dataset like images, labels ...
120
  }
121
  )
122
 
123
  return datasets.DatasetInfo(
124
- # This is the description that will appear on the datasets page.
125
  description=_DESCRIPTION,
126
- # This defines the different columns of the dataset and their types
127
- features=features, # Here we define them above because they are different between the two configurations
128
- # If there's a common (input, target) tuple from the features, uncomment supervised_keys line below and
129
- # specify them. They'll be used if as_supervised=True in builder.as_dataset.
130
- # supervised_keys=("sentence", "label"),
131
- # Homepage of the dataset for documentation
132
- # License for the dataset if available
133
- # Citation for the dataset
134
  citation=_CITATION,
135
  )
136
 
137
  def _split_generators(self, dl_manager):
138
- # TODO: This method is tasked with downloading/extracting the data and defining the splits depending on the configuration
139
- # If several configurations are possible (listed in BUILDER_CONFIGS), the configuration selected by the user is in self.config.name
 
 
 
 
 
 
 
 
 
 
 
 
 
140
 
141
- # dl_manager is a datasets.download.DownloadManager that can be used to download and extract URLS
142
- # It can accept any type or nested list/dict and will give back the same structure with the url replaced with path to local files.
143
- # By default the archives will be extracted and a path to a cached folder where they are extracted is returned instead of the archive
144
  urls = _FILE_DICT
145
- year_list=_YEARS
146
 
147
- ##Subset _FILE_DICT and year_list to only years in the config.year_list
148
  if self.config.year_list:
149
- urls={year:urls[year] for year in self.config.year_list}
150
- year_list=self.config.year_list
151
 
152
  data_dir = dl_manager.download_and_extract(urls)
153
 
154
- ###REturn a list of splits - but each split is for a year!
155
  return [
156
  datasets.SplitGenerator(
157
- name=year,
158
- # These kwargs will be passed to _generate_examples
159
- gen_kwargs={
160
- "year_dir": os.path.join(data_dir[year], "mnt","122a7683-fa4b-45dd-9f13-b18cc4f4a187","ca_rule_based_fa_clean","faro_"+year),
161
- "split": year,
162
- },
163
- ) for year in year_list
164
  ]
165
-
166
 
 
 
 
167
 
 
 
 
168
 
169
- # method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
170
- def _generate_examples(self, year_dir, split):
171
- # TODO: This method handles input defined in _split_generators to yield (key, example) tuples from the dataset.
172
- # The `key` is for legacy reasons (tfds) and is not important in itself, but must be unique for each example.
173
  for filepath in os.listdir(year_dir):
174
- with open(os.path.join(year_dir,filepath), encoding="utf-8") as f:
175
- data = json.load(f)
176
- if "lccn" in data.keys():
177
- scan_id=filepath.split('.')[0]
178
- scan_date=filepath.split("_")[0]
179
- scan_page=filepath.split("_")[1]
180
- scan_edition=filepath.split("_")[-2][8:]
181
- newspaper_name=data["lccn"]["title"]
182
- full_articles_in_data=data["full articles"]
183
- for article in full_articles_in_data:
184
- article_id=str(article["full_article_id"]) +"_" +scan_id
185
- yield article_id, {
186
- "newspaper_name": newspaper_name,
187
- "edition": scan_edition,
188
- "date": scan_date,
189
- "page": scan_page,
190
- "headline": article["headline"],
191
- "byline": article["byline"],
192
- "article": article["article"]
193
- }
 
 
1
  import json
2
  import tarfile
3
+ from datasets import DatasetInfo, DatasetBuilder, DownloadManager, BuilderConfig, SplitGenerator, Split, Version
4
  import datasets
5
  import os
6
  import requests
7
  import re
8
  from tqdm import tqdm
9
 
10
+ SUPPORTED_YEARS = ["1774"]
11
+ # Add years from 1798 to 1964 to the supported years
12
+ SUPPORTED_YEARS = SUPPORTED_YEARS + [str(year) for year in range(1798, 1964)]
13
 
14
+ def make_year_file_splits():
15
+ """
16
+ Collects a list of available files for each year.
17
 
18
+ Returns:
19
+ dict: A dictionary mapping each year to its corresponding file URL.
20
+ list: A list of years.
21
+ """
22
+ base_url = "https://huggingface.co/datasets/dell-research-harvard/AmericanStories/resolve/main/"
23
 
24
+ # Make a list of years from 1774 to 1960
25
+ year_list = [str(year) for year in range(1774, 1960)]
26
+ data_files = [f"faro_{year}.tar.gz" for year in year_list]
27
+ url_list = [base_url + file for file in data_files]
28
+
29
+ splits = {year: file for year, file in zip(year_list, url_list)}
30
+ years = year_list
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
31
 
32
  return splits, years
33
 
34
 
 
 
35
  _CITATION = """\
36
  Coming Soon
37
  """
38
 
39
  _DESCRIPTION = """\
40
+ American Stories offers high-quality structured data from historical newspapers suitable for pre-training large language models to enhance the understanding of historical English and world knowledge. It can also be integrated into external databases of retrieval-augmented language models, enabling broader access to historical information, including interpretations of political events and intricate details about people's ancestors. Additionally, the structured article texts facilitate the application of transformer-based methods for popular tasks like detecting reproduced content, significantly improving accuracy compared to traditional OCR methods. American Stories serves as a substantial and valuable dataset for advancing multimodal layout analysis models and other multimodal applications.
41
+ """
42
 
43
+ _FILE_DICT, _YEARS = make_year_file_splits()
44
 
45
 
46
+ class CustomBuilderConfig(datasets.BuilderConfig):
47
+ """BuilderConfig for AmericanStories dataset with different configurations."""
 
48
 
49
  def __init__(self, year_list=None, **kwargs):
50
+ """
51
+ BuilderConfig for AmericanStories dataset.
52
+
53
  Args:
54
+ year_list (list): A list of years to include in the dataset.
55
+ **kwargs: Additional keyword arguments forwarded to the superclass.
56
  """
57
+ super(CustomBuilderConfig, self).__init__(**kwargs)
58
  self.year_list = year_list
59
 
 
 
 
 
60
 
61
+ class AmericanStories(datasets.GeneratorBasedBuilder):
62
+ """Dataset builder class for AmericanStories dataset."""
63
 
64
+ VERSION = datasets.Version("0.1.0")
65
 
 
 
 
 
 
 
 
 
 
 
 
 
66
  BUILDER_CONFIGS = [
67
+ CustomBuilderConfig(
68
  name="all_years",
69
  version=VERSION,
70
  description="All years in the dataset",
71
  ),
72
+ CustomBuilderConfig(
73
  name="subset_years",
74
  version=VERSION,
75
  description="Subset of years in the dataset",
76
+ year_list=["1774", "1804"],
77
+ )
78
+ ]
79
+ DEFAULT_CONFIG_NAME = "subset_years"
80
 
81
  def _info(self):
82
+ """
83
+ Specifies the DatasetInfo object for the AmericanStories dataset.
84
+
85
+ Returns:
86
+ datasets.DatasetInfo: The DatasetInfo object.
87
+ """
88
  features = datasets.Features(
89
+ {
90
+ "article_id": datasets.Value("string"),
91
+ "newspaper_name": datasets.Value("string"),
92
  "edition": datasets.Value("string"),
93
  "date": datasets.Value("string"),
94
  "page": datasets.Value("string"),
95
  "headline": datasets.Value("string"),
96
  "byline": datasets.Value("string"),
97
+ "article": datasets.Value("string"),
 
98
  }
99
  )
100
 
101
  return datasets.DatasetInfo(
 
102
  description=_DESCRIPTION,
103
+ features=features,
 
 
 
 
 
 
 
104
  citation=_CITATION,
105
  )
106
 
107
  def _split_generators(self, dl_manager):
108
+ """
109
+ Downloads and extracts the data, and defines the dataset splits.
110
+
111
+ Args:
112
+ dl_manager (datasets.DownloadManager): The DownloadManager instance.
113
+
114
+ Returns:
115
+ list: A list of SplitGenerator objects.
116
+ """
117
+ if self.config.name == "subset_years":
118
+ print(SUPPORTED_YEARS)
119
+ if not self.config.year_list:
120
+ raise ValueError("Please provide a valid year_list")
121
+ elif not set(self.config.year_list).issubset(set(SUPPORTED_YEARS)):
122
+ raise ValueError(f"Only {SUPPORTED_YEARS} are supported. Please provide a valid year_list")
123
 
 
 
 
124
  urls = _FILE_DICT
125
+ year_list = _YEARS
126
 
127
+ # Subset _FILE_DICT and year_list to only include years in config.year_list
128
  if self.config.year_list:
129
+ urls = {year: urls[year] for year in self.config.year_list}
130
+ year_list = self.config.year_list
131
 
132
  data_dir = dl_manager.download_and_extract(urls)
133
 
134
+ # Return a list of splits, where each split corresponds to a year
135
  return [
136
  datasets.SplitGenerator(
137
+ name=year,
138
+ gen_kwargs={
139
+ "year_dir": os.path.join(data_dir[year], "mnt", "122a7683-fa4b-45dd-9f13-b18cc4f4a187", "ca_rule_based_fa_clean", "faro_" + year),
140
+ "split": year,
141
+ },
142
+ ) for year in year_list
 
143
  ]
 
144
 
145
+ def _generate_examples(self, year_dir, split):
146
+ """
147
+ Generates examples for the specified year and split.
148
 
149
+ Args:
150
+ year_dir (str): The directory path for the year.
151
+ split (str): The name of the split.
152
 
153
+ Yields:
154
+ tuple: The key-value pair containing the example ID and the example data.
155
+ """
 
156
  for filepath in os.listdir(year_dir):
157
+ with open(os.path.join(year_dir, filepath), encoding="utf-8") as f:
158
+ data = json.load(f)
159
+ if "lccn" in data.keys():
160
+ scan_id = filepath.split('.')[0]
161
+ scan_date = filepath.split("_")[0]
162
+ scan_page = filepath.split("_")[1]
163
+ scan_edition = filepath.split("_")[-2][8:]
164
+ newspaper_name = data["lccn"]["title"]
165
+ full_articles_in_data = data["full articles"]
166
+ for article in full_articles_in_data:
167
+ article_id = str(article["full_article_id"]) + "_" + scan_id
168
+ yield article_id, {
169
+ "article_id": article_id,
170
+ "newspaper_name": newspaper_name,
171
+ "edition": scan_edition,
172
+ "date": scan_date,
173
+ "page": scan_page,
174
+ "headline": article["headline"],
175
+ "byline": article["byline"],
176
+ "article": article["article"],
177
+ }