parquet-converter commited on
Commit
001a850
1 Parent(s): 9424b75

Update parquet files

Browse files
.gitattributes DELETED
@@ -1,54 +0,0 @@
1
- *.7z filter=lfs diff=lfs merge=lfs -text
2
- *.arrow filter=lfs diff=lfs merge=lfs -text
3
- *.bin filter=lfs diff=lfs merge=lfs -text
4
- *.bz2 filter=lfs diff=lfs merge=lfs -text
5
- *.ckpt filter=lfs diff=lfs merge=lfs -text
6
- *.ftz filter=lfs diff=lfs merge=lfs -text
7
- *.gz filter=lfs diff=lfs merge=lfs -text
8
- *.h5 filter=lfs diff=lfs merge=lfs -text
9
- *.joblib filter=lfs diff=lfs merge=lfs -text
10
- *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
- *.lz4 filter=lfs diff=lfs merge=lfs -text
12
- *.mlmodel filter=lfs diff=lfs merge=lfs -text
13
- *.model filter=lfs diff=lfs merge=lfs -text
14
- *.msgpack filter=lfs diff=lfs merge=lfs -text
15
- *.npy filter=lfs diff=lfs merge=lfs -text
16
- *.npz filter=lfs diff=lfs merge=lfs -text
17
- *.onnx filter=lfs diff=lfs merge=lfs -text
18
- *.ot filter=lfs diff=lfs merge=lfs -text
19
- *.parquet filter=lfs diff=lfs merge=lfs -text
20
- *.pb filter=lfs diff=lfs merge=lfs -text
21
- *.pickle filter=lfs diff=lfs merge=lfs -text
22
- *.pkl filter=lfs diff=lfs merge=lfs -text
23
- *.pt filter=lfs diff=lfs merge=lfs -text
24
- *.pth filter=lfs diff=lfs merge=lfs -text
25
- *.rar filter=lfs diff=lfs merge=lfs -text
26
- *.safetensors filter=lfs diff=lfs merge=lfs -text
27
- saved_model/**/* filter=lfs diff=lfs merge=lfs -text
28
- *.tar.* filter=lfs diff=lfs merge=lfs -text
29
- *.tflite filter=lfs diff=lfs merge=lfs -text
30
- *.tgz filter=lfs diff=lfs merge=lfs -text
31
- *.wasm filter=lfs diff=lfs merge=lfs -text
32
- *.xz filter=lfs diff=lfs merge=lfs -text
33
- *.zip filter=lfs diff=lfs merge=lfs -text
34
- *.zst filter=lfs diff=lfs merge=lfs -text
35
- *tfevents* filter=lfs diff=lfs merge=lfs -text
36
- # Audio files - uncompressed
37
- *.pcm filter=lfs diff=lfs merge=lfs -text
38
- *.sam filter=lfs diff=lfs merge=lfs -text
39
- *.raw filter=lfs diff=lfs merge=lfs -text
40
- # Audio files - compressed
41
- *.aac filter=lfs diff=lfs merge=lfs -text
42
- *.flac filter=lfs diff=lfs merge=lfs -text
43
- *.mp3 filter=lfs diff=lfs merge=lfs -text
44
- *.ogg filter=lfs diff=lfs merge=lfs -text
45
- *.wav filter=lfs diff=lfs merge=lfs -text
46
- # Image files - uncompressed
47
- *.bmp filter=lfs diff=lfs merge=lfs -text
48
- *.gif filter=lfs diff=lfs merge=lfs -text
49
- *.png filter=lfs diff=lfs merge=lfs -text
50
- *.tiff filter=lfs diff=lfs merge=lfs -text
51
- # Image files - compressed
52
- *.jpg filter=lfs diff=lfs merge=lfs -text
53
- *.jpeg filter=lfs diff=lfs merge=lfs -text
54
- *.webp filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
data/test_unseen_questions-00000-of-00001-3c502ac25141f64c.parquet → JohnnyBoy00--saf_micro_job_german/parquet-test_unseen_answers.parquet RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:2929a75113f50a7c0e63f69ace14461ae3d74a8b529373e61e23dafd961a0556
3
- size 69139
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ef1e57b766bd6d358ab1949a411235c3025aa7c70d1f3b4468867d92b1fa71a4
3
+ size 41361
data/validation-00000-of-00001-0da65f8dcc3502f2.parquet → JohnnyBoy00--saf_micro_job_german/parquet-test_unseen_questions.parquet RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:bd322dcc549aa70c2847e00e62dfbb778f80d80ab3487fac0b222928b372cfdb
3
- size 39667
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:99a133c0a0a87c35a55b0694a5e83d5ca69693a65b67a49188e449611ca5e0d4
3
+ size 70213
data/train-00000-of-00001-d7d79904ef6ab584.parquet → JohnnyBoy00--saf_micro_job_german/parquet-train.parquet RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:d4b778c58311f01950e87427f98da87bad51927a0e724051a03663a9a168269f
3
- size 125490
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d4c5205fc858d05ec4661a91047cd76bfee7a5374ca76bb3e199ef121c7cc674
3
+ size 139602
data/test_unseen_answers-00000-of-00001-1eac04e462a6f413.parquet → JohnnyBoy00--saf_micro_job_german/parquet-validation.parquet RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:8c8f564ed78f6763c23c691633b07f119e1e3ff1a8c68480309dbb6a8be75f18
3
- size 40307
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f9dc94fa2eb27fb89a96831a2fa6fd7bd65b101c8a97a58880c50664316616a0
3
+ size 40711
README.md DELETED
@@ -1,159 +0,0 @@
1
- ---
2
- pretty_name: SAF - Micro Job - German
3
- annotations_creators:
4
- - expert-generated
5
- language:
6
- - de
7
- language_creators:
8
- - other
9
- multilinguality:
10
- - monolingual
11
- size_categories:
12
- - 1K<n<10K
13
- source_datasets:
14
- - original
15
- tags:
16
- - short answer feedback
17
- - micro job
18
- task_categories:
19
- - text2text-generation
20
-
21
- dataset_info:
22
- features:
23
- - name: id
24
- dtype: string
25
- - name: question
26
- dtype: string
27
- - name: reference_answer
28
- dtype: string
29
- - name: provided_answer
30
- dtype: string
31
- - name: answer_feedback
32
- dtype: string
33
- - name: verification_feedback
34
- dtype: string
35
- - name: score
36
- dtype: float64
37
- splits:
38
- - name: train
39
- num_bytes: 885526
40
- num_examples: 1226
41
- - name: validation
42
- num_bytes: 217946
43
- num_examples: 308
44
- - name: test_unseen_answers
45
- num_bytes: 198832
46
- num_examples: 271
47
- - name: test_unseen_questions
48
- num_bytes: 545524
49
- num_examples: 602
50
- download_size: 274603
51
- dataset_size: 1847828
52
- ---
53
- # Dataset Card for "saf_micro_job_german"
54
-
55
- ## Table of Contents
56
- - [Dataset Description](#dataset-description)
57
- - [Dataset Summary](#dataset-summary)
58
- - [Supported Tasks and Leaderboards](#supported-tasks-and-leaderboards)
59
- - [Languages](#languages)
60
- - [Dataset Structure](#dataset-structure)
61
- - [Data Instances](#data-instances)
62
- - [Data Fields](#data-fields)
63
- - [Data Splits](#data-splits)
64
- - [Dataset Creation](#dataset-creation)
65
- - [Annotation process](#annotation-process)
66
- - [Additional Information](#additional-information)
67
- - [Citation Information](#citation-information)
68
- - [Contributions](#contributions)
69
-
70
- ## Dataset Description
71
-
72
- - **Paper:** [Your Answer is Incorrect... Would you like to know why? Introducing a Bilingual Short Answer Feedback Dataset](https://aclanthology.org/2022.acl-long.587) (Filighera et al., ACL 2022)
73
-
74
- ### Dataset Summary
75
-
76
- Short Answer Feedback (SAF) dataset is a short answer dataset introduced in [Your Answer is Incorrect... Would you like to know why? Introducing a Bilingual Short Answer Feedback Dataset](https://aclanthology.org/2022.acl-long.587) (Filighera et al., ACL 2022) as a way to remedy the lack of content-focused feedback datasets. This version of the dataset contains 8 German questions used in micro-job training on the crowd-worker platform appJobber - while the original dataset presented in the paper is comprised of an assortment of both English and German short answer questions (with reference answers). Please refer to the [saf_communication_networks_english](https://huggingface.co/datasets/JohnnyBoy00/saf_communication_networks_english) dataset to examine the English subset of the original dataset. Furthermore, a similarly constructed SAF dataset (covering the German legal domain) can be found at [saf_legal_domain_german](https://huggingface.co/datasets/JohnnyBoy00/saf_legal_domain_german).
77
-
78
- ### Supported Tasks and Leaderboards
79
-
80
- - `short_answer_feedback`: The dataset can be used to train a Text2Text Generation model from HuggingFace transformers in order to generate automatic short answer feedback.
81
-
82
- ### Languages
83
-
84
- The questions, reference answers, provided answers and the answer feedback in the dataset are written in German.
85
-
86
- ## Dataset Structure
87
-
88
- ### Data Instances
89
-
90
- An example of an entry of the training split looks as follows.
91
- ```
92
- {
93
- "id": "1",
94
- "question": "Frage 1: Ist das eine Frage?",
95
- "reference_answer": "Ja, das ist eine Frage.",
96
- "provided_answer": "Ich bin mir sicher, dass das eine Frage ist.",
97
- "answer_feedback": "Korrekt!",
98
- "verification_feedback": "Correct",
99
- "score": 1
100
- }
101
- ```
102
-
103
- ### Data Fields
104
-
105
- The data fields are the same among all splits.
106
-
107
- - `id`: a `string` feature (UUID4 in HEX format).
108
- - `question`: a `string` feature representing a question.
109
- - `reference_answer`: a `string` feature representing a reference answer to the question.
110
- - `provided_answer`: a `string` feature representing an answer that was provided for a particular question.
111
- - `answer_feedback`: a `string` feature representing the feedback given to the provided answers.
112
- - `verification_feedback`: a `string` feature representing an automatic labeling of the score. It can be `Correct` (`score` = 1), `Incorrect` (`score` = 0) or `Partially correct` (all intermediate scores).
113
- - `score`: a `float64` feature (between 0 and 1) representing the score given to the provided answer.
114
-
115
- ### Data Splits
116
-
117
- The dataset is comprised of four data splits.
118
-
119
- - `train`: used for training, contains a set of questions and the provided answers to them.
120
- - `validation`: used for validation, contains a set of questions and the provided answers to them (derived from the original training set defined in the paper).
121
- - `test_unseen_answers`: used for testing, contains unseen answers to the questions present in the `train` split.
122
- - `test_unseen_questions`: used for testing, contains unseen questions that do not appear in the `train` split.
123
-
124
- | Split |train|validation|test_unseen_answers|test_unseen_questions|
125
- |-------------------|----:|---------:|------------------:|--------------------:|
126
- |Number of instances| 1226| 308| 271| 602|
127
-
128
- ## Dataset Creation
129
-
130
- ### Annotation Process
131
-
132
- Two experienced appJobber employees were selected to evaluate the crowd-worker platform’s answers, and both of them underwent a general annotation guideline training (supervised by a Psychology doctoral student with prior work in the field of feedback). After the training, the annotators individually provided feedback to the answers following an agreed upon scoring rubric and the general annotation guideline. The individually annotated answer files were then combined into a cohesive gold standard after discussing and solving possible disagreements.
133
-
134
- ## Additional Information
135
-
136
- ### Citation Information
137
-
138
- ```
139
- @inproceedings{filighera-etal-2022-answer,
140
- title = "Your Answer is Incorrect... Would you like to know why? Introducing a Bilingual Short Answer Feedback Dataset",
141
- author = "Filighera, Anna and
142
- Parihar, Siddharth and
143
- Steuer, Tim and
144
- Meuser, Tobias and
145
- Ochs, Sebastian",
146
- booktitle = "Proceedings of the 60th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)",
147
- month = may,
148
- year = "2022",
149
- address = "Dublin, Ireland",
150
- publisher = "Association for Computational Linguistics",
151
- url = "https://aclanthology.org/2022.acl-long.587",
152
- doi = "10.18653/v1/2022.acl-long.587",
153
- pages = "8577--8591",
154
- }
155
- ```
156
-
157
- ### Contributions
158
-
159
- Thanks to [@JohnnyBoy2103](https://github.com/JohnnyBoy2103) for adding this dataset.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
conversion.py DELETED
@@ -1,96 +0,0 @@
1
- import os
2
- import string
3
- import math
4
- import random
5
- import xml.etree.ElementTree as et
6
- import jsonlines
7
- import uuid
8
-
9
- # set random seed for shuffling
10
- random.seed(1)
11
-
12
- def convert_xml_to_jsonl(path_to_dataset, dir, filename, train_split=None):
13
- """
14
- Utility function used for conversion of XML files from the dataset into JSON lines
15
-
16
- Params:
17
- path_to_dataset (string): path to the folder containing the dataset (in XML format)
18
- dir (string): name of the directory where the JSON lines file will be stored
19
- filename (string): name of the JSON lines file that will store the dataset
20
- train_split (float or None): if not None, defines which percentage of the dataset to use for the train and validation splits
21
-
22
- Returns:
23
- None: the file is saved in JSON lines format in the specified location
24
- """
25
- data = []
26
-
27
- # loop through all files in directory
28
- for f in os.listdir(path_to_dataset):
29
- if f.endswith('.xml'):
30
- root = et.parse(os.path.join(path_to_dataset, f)).getroot()
31
- # get question
32
- question = root.find('questionText').text.replace('\n', ' ')
33
- # get reference and student answers
34
- ref_answers = [x for x in root.find('referenceAnswers')]
35
- student_answers = [x for x in root.find('studentAnswers')]
36
-
37
- if len(ref_answers) == 1:
38
- # get reference answer and clear all spaces
39
- ref_answer = ref_answers[0].text.strip()
40
-
41
- # loop through all student answers and store the appropriate fields in a list
42
- for answer in student_answers:
43
- response = answer.find('response').text.strip()
44
- score = float(answer.find('score').text)
45
- feedback = answer.find('response_feedback').text.strip()
46
- verification_feedback = answer.find('verification_feedback').text.strip()
47
-
48
- # create dictionary with the appropriate fields
49
- data.append({
50
- 'id': uuid.uuid4().hex, # generate unique id in HEX format
51
- 'question': question,
52
- 'reference_answer': ref_answer,
53
- 'provided_answer': response,
54
- 'answer_feedback': feedback,
55
- 'verification_feedback': verification_feedback,
56
- 'score': score
57
- })
58
-
59
- if not os.path.exists(dir):
60
- print('Creating directory where JSON file will be stored\n')
61
- os.makedirs(dir)
62
-
63
- if train_split is None:
64
- with jsonlines.open(f'{os.path.join(dir, filename)}.jsonl', 'w') as writer:
65
- writer.write_all(data)
66
- else:
67
- # shuffle data and divide it into train and validation splits
68
- random.shuffle(data)
69
- train_data = data[: int(train_split * (len(data) - 1))]
70
- val_data = data[int(train_split * (len(data) - 1)) :]
71
-
72
- # write JSON lines file with train data
73
- with jsonlines.open(f'{os.path.join(dir, filename)}-train.jsonl', 'w') as writer:
74
- writer.write_all(train_data)
75
-
76
- # write JSON lines file with validation data
77
- with jsonlines.open(f'{os.path.join(dir, filename)}-validation.jsonl', 'w') as writer:
78
- writer.write_all(val_data)
79
-
80
- if __name__ == '__main__':
81
- # convert micro job dataset (german) to JSON lines
82
- convert_xml_to_jsonl(
83
- 'data/training/german',
84
- 'data/json',
85
- 'saf-micro-job-german',
86
- train_split=0.8)
87
-
88
- convert_xml_to_jsonl(
89
- 'data/unseen_answers/german',
90
- 'data/json',
91
- 'saf-micro-job-german-unseen-answers')
92
-
93
- convert_xml_to_jsonl(
94
- 'data/unseen_questions/german',
95
- 'data/json',
96
- 'saf-micro-job-german-unseen-questions')