ruanchaves commited on
Commit
1099eb0
1 Parent(s): cf961ae

feat: set indexes to remote url

Browse files
Files changed (1) hide show
  1. hatebr.py +9 -5
hatebr.py CHANGED
@@ -19,7 +19,8 @@ HateBR is the first large-scale expert annotated corpus of Brazilian Instagram c
19
  """
20
  _URLS = {
21
  "train": "https://raw.githubusercontent.com/franciellevargas/HateBR/2d18c5b9410c2dfdd6d5394caa54d608857dae7c/dataset/HateBR.csv",
22
- "annotators": "https://raw.githubusercontent.com/franciellevargas/HateBR/83e8dea4e2d007a08ef534f3322aedeb80949f5c/annotators/final_concordancia_Kappa_Fleiss.csv"
 
23
  }
24
 
25
  _LABEL_INT_KEY = {
@@ -91,6 +92,7 @@ class Boun(datasets.GeneratorBasedBuilder):
91
  gen_kwargs={
92
  "filepath": downloaded_files["train"],
93
  "annotators": downloaded_files["annotators"],
 
94
  "split": "train"
95
  }
96
  ),
@@ -99,6 +101,7 @@ class Boun(datasets.GeneratorBasedBuilder):
99
  gen_kwargs={
100
  "filepath": downloaded_files["train"],
101
  "annotators": downloaded_files["annotators"],
 
102
  "split": "validation"
103
  }
104
  ),
@@ -107,20 +110,21 @@ class Boun(datasets.GeneratorBasedBuilder):
107
  gen_kwargs={
108
  "filepath": downloaded_files["train"],
109
  "annotators": downloaded_files["annotators"],
 
110
  "split": "test"
111
  }
112
  )
113
  ]
114
 
115
- def _generate_examples(self, filepath, annotators, split):
116
 
117
  records = pd.read_csv(filepath).to_dict("records")
118
  annotators = pd.read_csv(annotators).to_dict("records")
119
 
120
- with open("indexes.json", "r") as f:
121
- indexes = json.load(f)
122
 
123
- split_indexes = indexes[split]
124
 
125
  selected_tuples = []
126
  for idx, (row, annotator) in enumerate(zip(records, annotators)):
 
19
  """
20
  _URLS = {
21
  "train": "https://raw.githubusercontent.com/franciellevargas/HateBR/2d18c5b9410c2dfdd6d5394caa54d608857dae7c/dataset/HateBR.csv",
22
+ "annotators": "https://raw.githubusercontent.com/franciellevargas/HateBR/83e8dea4e2d007a08ef534f3322aedeb80949f5c/annotators/final_concordancia_Kappa_Fleiss.csv",
23
+ "indexes": "https://huggingface.co/datasets/ruanchaves/hatebr/raw/main/indexes.json"
24
  }
25
 
26
  _LABEL_INT_KEY = {
 
92
  gen_kwargs={
93
  "filepath": downloaded_files["train"],
94
  "annotators": downloaded_files["annotators"],
95
+ "indexes": downloaded_files["indexes"],
96
  "split": "train"
97
  }
98
  ),
 
101
  gen_kwargs={
102
  "filepath": downloaded_files["train"],
103
  "annotators": downloaded_files["annotators"],
104
+ "indexes": downloaded_files["indexes"],
105
  "split": "validation"
106
  }
107
  ),
 
110
  gen_kwargs={
111
  "filepath": downloaded_files["train"],
112
  "annotators": downloaded_files["annotators"],
113
+ "indexes": downloaded_files["indexes"],
114
  "split": "test"
115
  }
116
  )
117
  ]
118
 
119
+ def _generate_examples(self, filepath, annotators, indexes, split):
120
 
121
  records = pd.read_csv(filepath).to_dict("records")
122
  annotators = pd.read_csv(annotators).to_dict("records")
123
 
124
+ with open(indexes, "r") as f:
125
+ indexes_object = json.load(f)
126
 
127
+ split_indexes = indexes_object[split]
128
 
129
  selected_tuples = []
130
  for idx, (row, annotator) in enumerate(zip(records, annotators)):