96abhishekarora commited on
Commit
f35912b
1 Parent(s): 806a21e

testing_loader

Browse files
Files changed (1) hide show
  1. AmericanStories.py +146 -39
AmericanStories.py CHANGED
@@ -1,60 +1,167 @@
1
  import json
2
  import tarfile
3
- from datasets import DatasetInfo, DatasetBuilder, DownloadManager
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4
 
5
  _CITATION = """\
6
  Coming Soon
7
  """
8
 
9
  _DESCRIPTION = """\
10
- American Stories offers high-quality structured data from historical newspapers suitable for pre-training large language models to enhance the understanding of historical English and world knowledge. It can also be integrated into external databases of retrieval-augmented language models, enabling broader access to historical information, including interpretations of political events and intricate details about people's ancestors. Additionally, the structured article texts facilitate the application of transformer-based methods for popular tasks like detecting reproduced content, significantly improving accuracy compared to traditional OCR methods. American Stories serves as a substantial and valuable dataset for advancing multimodal layout analysis models and other multimodal applications.
11
- """
 
12
 
13
- class AmericanStories(DatasetBuilder):
14
- VERSION = "0.0.1"
15
 
16
- BUILDER_CONFIGS = [
17
- DatasetBuilderConfig(
18
- name="AmericanStories",
19
- version=VERSION,
20
- description=_DESCRIPTION,
21
- ),
22
- ]
23
 
24
- def _info(self) -> DatasetInfo:
25
- features = {
26
- "feature_name": datasets.Value("string"),
27
- # Define the other features of your dataset here
28
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
29
 
30
  return datasets.DatasetInfo(
 
31
  description=_DESCRIPTION,
32
- features=datasets.Features(features),
 
 
 
 
 
 
 
33
  citation=_CITATION,
34
  )
35
 
36
- def _split_generators(self, dl_manager):
37
- data_files = [
38
- os.path.join(self.data_dir, filename)
39
- for filename in os.listdir(self.data_dir)
40
- if filename.endswith(".tar.gz")
41
- ]
42
-
 
 
 
 
 
 
 
 
 
43
  return [
44
- self._generate_examples(data_file)
45
- for data_file in data_files
 
 
 
 
 
 
46
  ]
 
 
 
47
 
48
- def _generate_examples(self, filepath: str) -> Iterator[datasets.Example]:
49
- with tarfile.open(filepath, "r:gz") as tar:
50
- for member in tar.getmembers():
51
- if member.isfile() and member.name.endswith('.json'):
52
- file_name = os.path.basename(member.name)
53
- with tar.extractfile(member) as f:
54
- data = json.load(f)
55
- for idx, example in enumerate(data):
56
- # Process and yield each example
57
- yield idx, {
58
- "feature_name": example["feature_name"],
59
- # Assign values to other features of your dataset
 
 
 
 
 
 
 
 
 
 
 
60
  }
 
1
  import json
2
  import tarfile
3
+ from datasets import DatasetInfo, DatasetBuilder, DownloadManager,BuilderConfig, SplitGenerator, Split, Version
4
+ import datasets
5
+ import os
6
+ import requests
7
+ import re
8
+
9
+
10
+ ###GEt list of files
11
+ DATASET_URL="https://huggingface.co/datasets/dell-research-harvard/AmericanStories/blob/main/"
12
+ def get_list_of_files(url):
13
+ page = requests.get(url).text
14
+ links=re.findall(r'href=[\'"]?([^\'" >]+)', page)
15
+ ###Get only links containing faro_
16
+ links=[link for link in links if link.startswith('faro_')]
17
+ return links
18
+
19
+ ###Arrange into splits by year - files follow the format faro_YYYY.tar.gz
20
+ def get_splits(links):
21
+ splits={}
22
+ years=[]
23
+ for link in links:
24
+ year=link.split('_')[1].split('.')[0]
25
+ if year not in splits:
26
+ splits[year]=[]
27
+ splits[year].append(link)
28
+ years.append(year)
29
+ return splits,years
30
+
31
+ ####data dir
32
+ DATA_DIR="."
33
+
34
+ def make_year_file_splits(data_dir):
35
+ ###Get list of files
36
+ data_files=os.listdir(data_dir)
37
+ ###Get only files containing faro_
38
+ data_files=[file for file in data_files if file.startswith('faro_')]
39
+
40
+ ###Arrange into splits by year - files follow the format faro_YYYY.tar.gz
41
+ splits={}
42
+ years=[]
43
+ for file in data_files:
44
+ year=file.split('_')[1].split('.')[0]
45
+ if year not in splits:
46
+ splits[year]=[]
47
+ splits[year].append(file)
48
+ years.append(year)
49
+ return splits, years
50
+
51
+
52
+
53
+
54
 
55
  _CITATION = """\
56
  Coming Soon
57
  """
58
 
59
  _DESCRIPTION = """\
60
+ American Stories offers high-quality structured data from historical newspapers suitable for pre-training large language models to enhance the understanding of historical English and world knowledge. It can also be integrated into external databases of retrieval-augmented language models, enabling broader access to historical information, including interpretations of political events and intricate details about people's ancestors. Additionally, the structured article texts facilitate the application of transformer-based methods for popular tasks like detecting reproduced content, significantly improving accuracy compared to traditional OCR methods. American Stories serves as a substantial and valuable dataset for advancing multimodal layout analysis models and other multimodal applications. """
61
+
62
+ _FILE_DICT,_YEARS=make_year_file_splits(DATA_DIR)
63
 
 
 
64
 
 
 
 
 
 
 
 
65
 
66
+
67
+ class AmericanStories(datasets.GeneratorBasedBuilder):
68
+ """TODO: Short description of my dataset."""
69
+
70
+ VERSION = datasets.Version("0.0.1")
71
+
72
+ # This is an example of a dataset with multiple configurations.
73
+ # If you don't want/need to define several sub-sets in your dataset,
74
+ # just remove the BUILDER_CONFIG_CLASS and the BUILDER_CONFIGS attributes.
75
+
76
+ # If you need to make complex sub-parts in the datasets with configurable options
77
+ # You can create your own builder configuration class to store attribute, inheriting from datasets.BuilderConfig
78
+ # BUILDER_CONFIG_CLASS = MyBuilderConfig
79
+
80
+ # You will be able to load one or the other configurations in the following list with
81
+ # data = datasets.load_dataset('my_dataset', 'first_domain')
82
+ # data = datasets.load_dataset('my_dataset', 'second_domain')
83
+ BUILDER_CONFIGS = [datasets.BuilderConfig(name="american_stories", version="0.0.1", description="This part of my dataset covers a first domain")]
84
+
85
+
86
+ def _info(self):
87
+ # TODO: This method specifies the datasets.DatasetInfo object which contains informations and typings for the dataset
88
+ features = datasets.Features(
89
+ { "newspaper_name": datasets.Value("string"),
90
+ "edition": datasets.Value("string"),
91
+ "date": datasets.Value("string"),
92
+ "page": datasets.Value("string"),
93
+ "headline": datasets.Value("string"),
94
+ "byline": datasets.Value("string"),
95
+ "article": datasets.Value("string")
96
+ # These are the features of your dataset like images, labels ...
97
+ }
98
+ )
99
 
100
  return datasets.DatasetInfo(
101
+ # This is the description that will appear on the datasets page.
102
  description=_DESCRIPTION,
103
+ # This defines the different columns of the dataset and their types
104
+ features=features, # Here we define them above because they are different between the two configurations
105
+ # If there's a common (input, target) tuple from the features, uncomment supervised_keys line below and
106
+ # specify them. They'll be used if as_supervised=True in builder.as_dataset.
107
+ # supervised_keys=("sentence", "label"),
108
+ # Homepage of the dataset for documentation
109
+ # License for the dataset if available
110
+ # Citation for the dataset
111
  citation=_CITATION,
112
  )
113
 
114
+ def _split_generators(self, dl_manager,online=False):
115
+ # TODO: This method is tasked with downloading/extracting the data and defining the splits depending on the configuration
116
+ # If several configurations are possible (listed in BUILDER_CONFIGS), the configuration selected by the user is in self.config.name
117
+
118
+ # dl_manager is a datasets.download.DownloadManager that can be used to download and extract URLS
119
+ # It can accept any type or nested list/dict and will give back the same structure with the url replaced with path to local files.
120
+ # By default the archives will be extracted and a path to a cached folder where they are extracted is returned instead of the archive
121
+ if not online:
122
+ urls = _FILE_DICT
123
+ else:
124
+ _URL_DICT,year_list=get_splits(get_list_of_files(DATASET_URL))
125
+ urls = _URL_DICT
126
+ year_list=_YEARS
127
+ data_dir = dl_manager.download_and_extract(urls)
128
+
129
+ ###REturn a list of splits - but each split is for a year!
130
  return [
131
+ datasets.SplitGenerator(
132
+ name=year,
133
+ # These kwargs will be passed to _generate_examples
134
+ gen_kwargs={
135
+ "year_dir": os.path.join(data_dir[year][0], "mnt/122a7683-fa4b-45dd-9f13-b18cc4f4a187/ca_rule_based_fa_clean/faro_"+year),
136
+ "split": year,
137
+ },
138
+ ) for year in year_list
139
  ]
140
+
141
+
142
+
143
 
144
+ # method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
145
+ def _generate_examples(self, year_dir, split):
146
+ # TODO: This method handles input defined in _split_generators to yield (key, example) tuples from the dataset.
147
+ # The `key` is for legacy reasons (tfds) and is not important in itself, but must be unique for each example.
148
+ for filepath in os.listdir(year_dir):
149
+ with open(os.path.join(year_dir,filepath), encoding="utf-8") as f:
150
+ data = json.load(f)
151
+ scan_id=filepath.split('.')[0]
152
+ scan_date=filepath.split("_")[0]
153
+ scan_page=filepath.split("_")[1]
154
+ scan_edition=filepath.split("_")[-2][8:]
155
+ newspaper_name=data["lccn"]["title"]
156
+ full_articles_in_data=data["full articles"]
157
+ for article in full_articles_in_data:
158
+ article_id=str(article["full_article_id"]) +"_" +scan_id
159
+ yield article_id, {
160
+ "newspaper_name": newspaper_name,
161
+ "edition": scan_edition,
162
+ "date": scan_date,
163
+ "page": scan_page,
164
+ "headline": article["headline"],
165
+ "byline": article["byline"],
166
+ "article": article["article"]
167
  }