Datasets:
Tasks:
Text Classification
Sub-tasks:
intent-classification
Languages:
Polish
Size:
10K<n<100K
License:
{"task01": {"description": " In Task 6-1, the participants are to distinguish between normal/non-harmful tweets (class: 0) and tweets\n that contain any kind of harmful information (class: 1). This includes cyberbullying, hate speech and\n related phenomena.\n\n In Task 6-2, the participants shall distinguish between three classes of tweets: 0 (non-harmful),\n 1 (cyberbullying), 2 (hate-speech). There are various definitions of both cyberbullying and hate-speech,\n some of them even putting those two phenomena in the same group. The specific conditions on which we based\n our annotations for both cyberbullying and hate-speech, which have been worked out during ten years of research\n will be summarized in an introductory paper for the task, however, the main and definitive condition to 1\n distinguish the two is whether the harmful action is addressed towards a private person(s) (cyberbullying),\n or a public person/entity/large group (hate-speech).\n", "citation": "@proceedings{ogr:kob:19:poleval,\n editor = {Maciej Ogrodniczuk and \u0141ukasz Kobyli\u0144ski},\n title = {{Proceedings of the PolEval 2019 Workshop}},\n year = {2019},\n address = {Warsaw, Poland},\n publisher = {Institute of Computer Science, Polish Academy of Sciences},\n url = {http://2019.poleval.pl/files/poleval2019.pdf},\n isbn = \"978-83-63159-28-3\"}\n}\n", "homepage": "http://2019.poleval.pl/index.php/tasks/task6", "license": "", "features": {"text": {"dtype": "string", "id": null, "_type": "Value"}, "label": {"num_classes": 2, "names": ["0", "1"], "names_file": null, "id": null, "_type": "ClassLabel"}}, "post_processed": null, "supervised_keys": {"input": "text", "output": "label"}, "builder_name": "poleval2019_cyber_bullying", "config_name": "task01", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 1104322, "num_examples": 10041, "dataset_name": "poleval2019_cyber_bullying"}, "test": {"name": "test", "num_bytes": 109681, "num_examples": 1000, "dataset_name": "poleval2019_cyber_bullying"}}, "download_checksums": {"http://2019.poleval.pl/task6/task_6-1.zip": {"num_bytes": 339950, "checksum": "8b71cb27bfcb3b503e80f8959be8485a53b777f288042d3dc1e8fb54c863c2a8"}, "http://2019.poleval.pl/task6/task6_test.zip": {"num_bytes": 70051, "checksum": "6acac459608b2d6da75f138740447b047c7bd3e0bbf562964845830a27a0b2f7"}}, "download_size": 410001, "post_processing_size": null, "dataset_size": 1214003, "size_in_bytes": 1624004}, "task02": {"description": " In Task 6-1, the participants are to distinguish between normal/non-harmful tweets (class: 0) and tweets\n that contain any kind of harmful information (class: 1). This includes cyberbullying, hate speech and\n related phenomena.\n\n In Task 6-2, the participants shall distinguish between three classes of tweets: 0 (non-harmful),\n 1 (cyberbullying), 2 (hate-speech). There are various definitions of both cyberbullying and hate-speech,\n some of them even putting those two phenomena in the same group. The specific conditions on which we based\n our annotations for both cyberbullying and hate-speech, which have been worked out during ten years of research\n will be summarized in an introductory paper for the task, however, the main and definitive condition to 1\n distinguish the two is whether the harmful action is addressed towards a private person(s) (cyberbullying),\n or a public person/entity/large group (hate-speech).\n", "citation": "@proceedings{ogr:kob:19:poleval,\n editor = {Maciej Ogrodniczuk and \u0141ukasz Kobyli\u0144ski},\n title = {{Proceedings of the PolEval 2019 Workshop}},\n year = {2019},\n address = {Warsaw, Poland},\n publisher = {Institute of Computer Science, Polish Academy of Sciences},\n url = {http://2019.poleval.pl/files/poleval2019.pdf},\n isbn = \"978-83-63159-28-3\"}\n}\n", "homepage": "http://2019.poleval.pl/index.php/tasks/task6", "license": "", "features": {"text": {"dtype": "string", "id": null, "_type": "Value"}, "label": {"num_classes": 3, "names": ["0", "1", "2"], "names_file": null, "id": null, "_type": "ClassLabel"}}, "post_processed": null, "supervised_keys": {"input": "text", "output": "label"}, "builder_name": "poleval2019_cyber_bullying", "config_name": "task02", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 1104322, "num_examples": 10041, "dataset_name": "poleval2019_cyber_bullying"}, "test": {"name": "test", "num_bytes": 109681, "num_examples": 1000, "dataset_name": "poleval2019_cyber_bullying"}}, "download_checksums": {"http://2019.poleval.pl/task6/task_6-2.zip": {"num_bytes": 340096, "checksum": "659975fc8b6a505b11a4b8a9e29ae1beffede0c8bf83f409b904d982eb1daa8f"}, "http://2019.poleval.pl/task6/task6_test.zip": {"num_bytes": 70051, "checksum": "6acac459608b2d6da75f138740447b047c7bd3e0bbf562964845830a27a0b2f7"}}, "download_size": 410147, "post_processing_size": null, "dataset_size": 1214003, "size_in_bytes": 1624150}} | |