Datasets:
Tasks:
Text Classification
Modalities:
Text
Sub-tasks:
hate-speech-detection
Languages:
Portuguese
Size:
1K - 10K
Tags:
instagram
DOI:
ruanchaves
commited on
Commit
•
67fcdad
1
Parent(s):
955707f
hate speech labels
Browse files
hatebr.py
CHANGED
@@ -20,6 +20,22 @@ _URLS = {
|
|
20 |
"train": "https://raw.githubusercontent.com/ruanchaves/HateBR/main/dataset/HateBR.csv"
|
21 |
}
|
22 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
23 |
class Boun(datasets.GeneratorBasedBuilder):
|
24 |
|
25 |
VERSION = datasets.Version("1.0.0")
|
@@ -31,7 +47,20 @@ class Boun(datasets.GeneratorBasedBuilder):
|
|
31 |
"instagram_comments": datasets.Value("string"),
|
32 |
"offensive_language": datasets.Value("int32"),
|
33 |
"offensiveness_levels": datasets.Value("int32"),
|
34 |
-
"hate_speech": datasets.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
35 |
}
|
36 |
),
|
37 |
supervised_keys=None,
|
@@ -46,6 +75,14 @@ class Boun(datasets.GeneratorBasedBuilder):
|
|
46 |
]
|
47 |
|
48 |
def _generate_examples(self, filepath):
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
49 |
records = pd.read_csv(filepath).to_dict("records")
|
50 |
for idx, row in enumerate(records):
|
51 |
-
yield idx, row
|
|
|
20 |
"train": "https://raw.githubusercontent.com/ruanchaves/HateBR/main/dataset/HateBR.csv"
|
21 |
}
|
22 |
|
23 |
+
_LABEL_INT_KEY = {
|
24 |
+
"antisemitism": 1,
|
25 |
+
"apology_for_the_dictatorship": 2,
|
26 |
+
"fatphobia": 3,
|
27 |
+
"homophobia": 4,
|
28 |
+
"partyism": 5,
|
29 |
+
"racism": 6,
|
30 |
+
"religious_intolerance": 7,
|
31 |
+
"sexism": 8,
|
32 |
+
"xenophobia": 9,
|
33 |
+
"offensive_&_non-hate_speech": -1,
|
34 |
+
"non-offensive": 0
|
35 |
+
}
|
36 |
+
|
37 |
+
_INT_LABEL_KEY = {v:k for k,v in _LABEL_INT_KEY.items()}
|
38 |
+
|
39 |
class Boun(datasets.GeneratorBasedBuilder):
|
40 |
|
41 |
VERSION = datasets.Version("1.0.0")
|
|
|
47 |
"instagram_comments": datasets.Value("string"),
|
48 |
"offensive_language": datasets.Value("int32"),
|
49 |
"offensiveness_levels": datasets.Value("int32"),
|
50 |
+
"hate_speech": datasets.Sequence(
|
51 |
+
feature={
|
52 |
+
"antisemitism": datasets.Value(dtype='bool', id=None),
|
53 |
+
"apology_for_the_dictatorship": datasets.Value(dtype='bool', id=None),
|
54 |
+
"fatphobia": datasets.Value(dtype='bool', id=None),
|
55 |
+
"homophobia": datasets.Value(dtype='bool', id=None),
|
56 |
+
"partyism": datasets.Value(dtype='bool', id=None),
|
57 |
+
"racism": datasets.Value(dtype='bool', id=None),
|
58 |
+
"religious_intolerance": datasets.Value(dtype='bool', id=None),
|
59 |
+
"sexism": datasets.Value(dtype='bool', id=None),
|
60 |
+
"xenophobia": datasets.Value(dtype='bool', id=None),
|
61 |
+
"offensive_&_non-hate_speech": datasets.Value(dtype='bool', id=None),
|
62 |
+
"non-offensive": datasets.Value(dtype='bool', id=None)
|
63 |
+
})
|
64 |
}
|
65 |
),
|
66 |
supervised_keys=None,
|
|
|
75 |
]
|
76 |
|
77 |
def _generate_examples(self, filepath):
|
78 |
+
def process_row(row):
|
79 |
+
categories = row["hate_speech"].split(",")
|
80 |
+
row["hate_speech"] = {}
|
81 |
+
for default_label in _LABEL_INT_KEY.keys():
|
82 |
+
row["hate_speech"][default_label] = False
|
83 |
+
for int_label in categories:
|
84 |
+
row["hate_speech"][_INT_LABEL_KEY[int_label]] = True
|
85 |
+
return row
|
86 |
records = pd.read_csv(filepath).to_dict("records")
|
87 |
for idx, row in enumerate(records):
|
88 |
+
yield idx, process_row(row)
|