File size: 6,400 Bytes
eb60e1d
 
 
955707f
68d8703
eb60e1d
 
 
 
 
 
 
 
 
 
 
 
 
 
 
22d3e5b
1099eb0
 
eb60e1d
 
67fcdad
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
68d8703
 
 
 
 
 
 
 
 
 
 
 
 
 
eb60e1d
 
 
 
 
 
 
 
 
e12dd41
eb60e1d
c3efdf4
 
 
 
 
 
 
 
 
 
34af89d
 
 
 
c3efdf4
eb60e1d
 
 
 
 
 
 
 
22d3e5b
68d8703
 
 
 
1099eb0
68d8703
 
 
 
 
22d3e5b
 
68d8703
1099eb0
68d8703
22d3e5b
68d8703
 
 
 
 
 
1099eb0
68d8703
 
 
eb60e1d
 
1099eb0
68d8703
955707f
22d3e5b
68d8703
1099eb0
 
68d8703
1099eb0
68d8703
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
"""HateBR dataset"""

import datasets
import pandas as pd
import json

_CITATION = """
@inproceedings{vargas2022hatebr,
  title={HateBR: A Large Expert Annotated Corpus of Brazilian Instagram Comments for Offensive Language and Hate Speech Detection},
  author={Vargas, Francielle and Carvalho, Isabelle and de G{\'o}es, Fabiana Rodrigues and Pardo, Thiago and Benevenuto, Fabr{\'\i}cio},
  booktitle={Proceedings of the Thirteenth Language Resources and Evaluation Conference},
  pages={7174--7183},
  year={2022}
}
"""

_DESCRIPTION = """
HateBR is the first large-scale expert annotated corpus of Brazilian Instagram comments for hate speech and offensive language detection on the web and social media. The HateBR corpus was collected from Brazilian Instagram comments of politicians and manually annotated by specialists. It is composed of 7,000 documents annotated according to three different layers: a binary classification (offensive versus non-offensive comments), offensiveness-level (highly, moderately, and slightly offensive messages), and nine hate speech groups (xenophobia, racism, homophobia, sexism, religious intolerance, partyism, apology for the dictatorship, antisemitism, and fatphobia). Each comment was annotated by three different annotators and achieved high inter-annotator agreement. Furthermore, baseline experiments were implemented reaching 85% of F1-score outperforming the current literature models for the Portuguese language. Accordingly, we hope that the proposed expertly annotated corpus may foster research on hate speech and offensive language detection in the Natural Language Processing area.
"""
_URLS = {
    "train": "https://raw.githubusercontent.com/franciellevargas/HateBR/2d18c5b9410c2dfdd6d5394caa54d608857dae7c/dataset/HateBR.csv",
    "annotators": "https://raw.githubusercontent.com/franciellevargas/HateBR/83e8dea4e2d007a08ef534f3322aedeb80949f5c/annotators/final_concordancia_Kappa_Fleiss.csv",
    "indexes": "https://huggingface.co/datasets/ruanchaves/hatebr/raw/main/indexes.json"
}

_LABEL_INT_KEY = {
    "antisemitism": 1,
    "apology_for_the_dictatorship": 2,
    "fatphobia": 3,
    "homophobia": 4,
    "partyism": 5,
    "racism": 6,
    "religious_intolerance": 7,
    "sexism": 8,
    "xenophobia": 9,
    "offensive_&_non-hate_speech": -1,
    "non-offensive": 0
}

_INT_LABEL_KEY = {v:k for k,v in _LABEL_INT_KEY.items()}

def process_row(row, annotator_row):
    categories = row["hate_speech"].split(",")
    del row["hate_speech"]
    for default_label in _LABEL_INT_KEY.keys():
        row[default_label] = False
    for int_label in categories:
        row[_INT_LABEL_KEY[int(int_label)]] = True
    
    if isinstance(annotator_row, dict):
        row["specialist_1_hate_speech"] = bool(int(annotator_row["Avaliador 1"]))
        row["specialist_2_hate_speech"] = bool(int(annotator_row["Avaliador 2"]))
        row["specialist_3_hate_speech"] = bool(int(annotator_row["Avaliador 3"]))
    return row

class Boun(datasets.GeneratorBasedBuilder):

    VERSION = datasets.Version("1.0.0")
    def _info(self):
        return datasets.DatasetInfo(
            description=_DESCRIPTION,
            features=datasets.Features(
                {
                    "instagram_comments": datasets.Value("string"),
                    "offensive_language": datasets.Value("bool"),
                    "offensiveness_levels": datasets.Value("int32"),
                    "antisemitism": datasets.Value("bool"),
                    "apology_for_the_dictatorship": datasets.Value("bool"),
                    "fatphobia": datasets.Value("bool"),
                    "homophobia": datasets.Value("bool"),
                    "partyism": datasets.Value("bool"),
                    "racism": datasets.Value("bool"),
                    "religious_intolerance": datasets.Value("bool"),
                    "sexism": datasets.Value("bool"),
                    "xenophobia": datasets.Value("bool"),
                    "offensive_&_non-hate_speech": datasets.Value("bool"),
                    "non-offensive": datasets.Value("bool"),
                    "specialist_1_hate_speech": datasets.Value("bool"),
                    "specialist_2_hate_speech": datasets.Value("bool"),
                    "specialist_3_hate_speech": datasets.Value("bool")
                }),
            supervised_keys=None,
            homepage="https://github.com/franciellevargas/HateBR",
            citation=_CITATION,
        )

    def _split_generators(self, dl_manager):
        downloaded_files = dl_manager.download(_URLS)
        return [
            datasets.SplitGenerator(
                name=datasets.Split.TRAIN,
                gen_kwargs={
                    "filepath": downloaded_files["train"],
                    "annotators": downloaded_files["annotators"],
                    "indexes": downloaded_files["indexes"],
                    "split": "train"
                }
            ),
            datasets.SplitGenerator(
                name=datasets.Split.VALIDATION,
                gen_kwargs={
                    "filepath": downloaded_files["train"],
                    "annotators": downloaded_files["annotators"],
                    "indexes": downloaded_files["indexes"],
                    "split": "validation"
                }
            ),    
            datasets.SplitGenerator(
                name=datasets.Split.TEST,
                gen_kwargs={
                    "filepath": downloaded_files["train"],
                    "annotators": downloaded_files["annotators"],
                    "indexes": downloaded_files["indexes"],
                    "split": "test"
                }
            )        
        ]

    def _generate_examples(self, filepath, annotators, indexes, split):

        records = pd.read_csv(filepath).to_dict("records")
        annotators = pd.read_csv(annotators).to_dict("records")

        with open(indexes, "r") as f:
            indexes_object = json.load(f)

        split_indexes = indexes_object[split]

        selected_tuples = []
        for idx, (row, annotator) in enumerate(zip(records, annotators)):
            if idx in split_indexes:
                selected_tuples.append((row, annotator))

        processed_records = [ process_row(*args) for args in selected_tuples ]

        for idx, row in enumerate(processed_records):
            yield idx, row