Datasets:

License:
hojjat-m commited on
Commit
2581602
1 Parent(s): 9b5efb8

Update Persian-News.py

Browse files
Files changed (1) hide show
  1. Persian-News.py +82 -0
Persian-News.py CHANGED
@@ -0,0 +1,82 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ import csv
3
+ import datasets
4
+ import requests
5
+ import os
6
+
7
+ _CITATION = """\\
8
+ @article{ParsBERT,
9
+ title={ParsBERT: Transformer-based Model for Persian Language Understanding},
10
+ author={Mehrdad Farahani, Mohammad Gharachorloo, Marzieh Farahani, Mohammad Manthouri},
11
+ journal={ArXiv},
12
+ year={2020},
13
+ volume={abs/2005.12515}
14
+ }
15
+ """
16
+ _DESCRIPTION = """\\\\\\\\
17
+ A dataset of various news articles scraped from different online news agencies’ websites. The total number of articles is 16,438, spread over eight different classes.
18
+ """
19
+
20
+ _DRIVE_URL = "https://drive.google.com/uc?export=download&id=1B6xotfXCcW9xS1mYSBQos7OCg0ratzKC"
21
+
22
+ class PersianNewsConfig(datasets.BuilderConfig):
23
+ """BuilderConfig for PersianNews Dataset."""
24
+ def __init__(self, **kwargs):
25
+ super(PersianNewsConfig, self).__init__(**kwargs)
26
+
27
+
28
+ class PersianNews(datasets.GeneratorBasedBuilder):
29
+ BUILDER_CONFIGS = [
30
+ PersianNewsConfig(name="Persian News", version=datasets.Version("1.0.0"), description="persian classification dataset on online agencie's articles"),
31
+ ]
32
+ def _info(self):
33
+ return datasets.DatasetInfo(
34
+ # This is the description that will appear on the datasets page.
35
+ description=_DESCRIPTION,
36
+ # datasets.features.FeatureConnectors
37
+ features=datasets.Features(
38
+ {
39
+ "content": datasets.Value("string"),
40
+ "label": datasets.Value("string"),
41
+ "label_id": datasets.Value(dtype='int64')
42
+ }
43
+ ),
44
+ supervised_keys=None,
45
+ # Homepage of the dataset for documentation
46
+ homepage="https://hooshvare.github.io/docs/datasets/tc#persian-news",
47
+ citation=_CITATION,
48
+ )
49
+
50
+ def custom_dataset(self, src_url, dest_path):
51
+ response = requests.get(src_url)
52
+ response.raise_for_status()
53
+
54
+ with open(dest_path, 'wb') as f:
55
+ f.write(response.content)
56
+
57
+
58
+ def _split_generators(self, dl_manager):
59
+ """Returns SplitGenerators."""
60
+ # dl_manager is a datasets.download.DownloadManager that can be used to
61
+ # download and extract URLs
62
+
63
+ downloaded_file = dl_manager.download_custom(_DRIVE_URL, self.custom_dataset)
64
+ extracted_file = dl_manager.extract(downloaded_file)
65
+ return [
66
+ datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": os.path.join(extracted_file, 'digimag/train.csv')}),
67
+ datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"filepath": os.path.join(extracted_file, 'digimag/test.csv')}),
68
+ datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={"filepath": os.path.join(extracted_file, 'digimag/dev.csv')}),
69
+ ]
70
+
71
+ def _generate_examples(self, filepath):
72
+ try:
73
+ with open(filepath, encoding="utf-8") as f:
74
+ reader = csv.DictReader(f, delimiter="\t")
75
+ for idx, row in enumerate(reader):
76
+ yield idx, {
77
+ "content": row["content"],
78
+ "label": row["label"],
79
+ "label_id": row["label_id"],
80
+ }
81
+ except Exception as e:
82
+ print(e)