Datasets:
Fix metadata in dataset card
#19
by
albertvillanova
HF staff
- opened
README.md
CHANGED
@@ -6,7 +6,7 @@ language_creators:
|
|
6 |
language:
|
7 |
- en
|
8 |
license:
|
9 |
-
-
|
10 |
multilinguality:
|
11 |
- monolingual
|
12 |
size_categories:
|
@@ -629,9 +629,10 @@ train-eval-index:
|
|
629 |
|
630 |
## Dataset Description
|
631 |
|
632 |
-
- **Homepage:**
|
633 |
-
- **Repository:**
|
634 |
-
- **Paper:**
|
|
|
635 |
- **Point of Contact:** [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
|
636 |
- **Size of downloaded dataset files:** 1.00 GB
|
637 |
- **Size of the generated dataset:** 240.84 MB
|
@@ -1084,26 +1085,102 @@ The data fields are the same among all splits.
|
|
1084 |
|
1085 |
### Licensing Information
|
1086 |
|
1087 |
-
|
1088 |
|
1089 |
### Citation Information
|
1090 |
|
|
|
|
|
|
|
1091 |
```
|
1092 |
-
@article{warstadt2018neural,
|
1093 |
-
title={Neural Network Acceptability Judgments},
|
1094 |
-
author={Warstadt, Alex and Singh, Amanpreet and Bowman, Samuel R},
|
1095 |
-
journal={arXiv preprint arXiv:1805.12471},
|
1096 |
-
year={2018}
|
1097 |
-
}
|
1098 |
@inproceedings{wang2019glue,
|
1099 |
title={{GLUE}: A Multi-Task Benchmark and Analysis Platform for Natural Language Understanding},
|
1100 |
author={Wang, Alex and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R.},
|
1101 |
note={In the Proceedings of ICLR.},
|
1102 |
year={2019}
|
1103 |
}
|
|
|
1104 |
|
1105 |
-
|
1106 |
-
the
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1107 |
```
|
1108 |
|
1109 |
|
|
|
6 |
language:
|
7 |
- en
|
8 |
license:
|
9 |
+
- other
|
10 |
multilinguality:
|
11 |
- monolingual
|
12 |
size_categories:
|
|
|
629 |
|
630 |
## Dataset Description
|
631 |
|
632 |
+
- **Homepage:** https://gluebenchmark.com/
|
633 |
+
- **Repository:** https://github.com/nyu-mll/GLUE-baselines
|
634 |
+
- **Paper:** https://arxiv.org/abs/1804.07461
|
635 |
+
- **Leaderboard:** https://gluebenchmark.com/leaderboard
|
636 |
- **Point of Contact:** [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
|
637 |
- **Size of downloaded dataset files:** 1.00 GB
|
638 |
- **Size of the generated dataset:** 240.84 MB
|
|
|
1085 |
|
1086 |
### Licensing Information
|
1087 |
|
1088 |
+
The primary GLUE tasks are built on and derived from existing datasets. We refer users to the original licenses accompanying each dataset.
|
1089 |
|
1090 |
### Citation Information
|
1091 |
|
1092 |
+
If you use GLUE, please cite all the datasets you use.
|
1093 |
+
|
1094 |
+
In addition, we encourage you to use the following BibTeX citation for GLUE itself:
|
1095 |
```
|
|
|
|
|
|
|
|
|
|
|
|
|
1096 |
@inproceedings{wang2019glue,
|
1097 |
title={{GLUE}: A Multi-Task Benchmark and Analysis Platform for Natural Language Understanding},
|
1098 |
author={Wang, Alex and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R.},
|
1099 |
note={In the Proceedings of ICLR.},
|
1100 |
year={2019}
|
1101 |
}
|
1102 |
+
```
|
1103 |
|
1104 |
+
If you evaluate using GLUE, we also highly recommend citing the papers that originally introduced the nine GLUE tasks, both to give the original authors their due credit and because venues will expect papers to describe the data they evaluate on.
|
1105 |
+
The following provides BibTeX for all of the GLUE tasks, except QQP, for which we recommend adding a footnote to this page: https://data.quora.com/First-Quora-Dataset-Release-Question-Pairs
|
1106 |
+
```
|
1107 |
+
@article{warstadt2018neural,
|
1108 |
+
title={Neural Network Acceptability Judgments},
|
1109 |
+
author={Warstadt, Alex and Singh, Amanpreet and Bowman, Samuel R.},
|
1110 |
+
journal={arXiv preprint 1805.12471},
|
1111 |
+
year={2018}
|
1112 |
+
}
|
1113 |
+
@inproceedings{socher2013recursive,
|
1114 |
+
title={Recursive deep models for semantic compositionality over a sentiment treebank},
|
1115 |
+
author={Socher, Richard and Perelygin, Alex and Wu, Jean and Chuang, Jason and Manning, Christopher D and Ng, Andrew and Potts, Christopher},
|
1116 |
+
booktitle={Proceedings of EMNLP},
|
1117 |
+
pages={1631--1642},
|
1118 |
+
year={2013}
|
1119 |
+
}
|
1120 |
+
@inproceedings{dolan2005automatically,
|
1121 |
+
title={Automatically constructing a corpus of sentential paraphrases},
|
1122 |
+
author={Dolan, William B and Brockett, Chris},
|
1123 |
+
booktitle={Proceedings of the International Workshop on Paraphrasing},
|
1124 |
+
year={2005}
|
1125 |
+
}
|
1126 |
+
@book{agirre2007semantic,
|
1127 |
+
editor = {Agirre, Eneko and M`arquez, Llu'{i}s and Wicentowski, Richard},
|
1128 |
+
title = {Proceedings of the Fourth International Workshop on Semantic Evaluations (SemEval-2007)},
|
1129 |
+
month = {June},
|
1130 |
+
year = {2007},
|
1131 |
+
address = {Prague, Czech Republic},
|
1132 |
+
publisher = {Association for Computational Linguistics},
|
1133 |
+
}
|
1134 |
+
@inproceedings{williams2018broad,
|
1135 |
+
author = {Williams, Adina and Nangia, Nikita and Bowman, Samuel R.},
|
1136 |
+
title = {A Broad-Coverage Challenge Corpus for Sentence Understanding through Inference},
|
1137 |
+
booktitle = {Proceedings of NAACL-HLT},
|
1138 |
+
year = 2018
|
1139 |
+
}
|
1140 |
+
@inproceedings{rajpurkar2016squad,
|
1141 |
+
author = {Rajpurkar, Pranav and Zhang, Jian and Lopyrev, Konstantin and Liang, Percy}
|
1142 |
+
title = {{SQ}u{AD}: 100,000+ Questions for Machine Comprehension of Text},
|
1143 |
+
booktitle = {Proceedings of EMNLP}
|
1144 |
+
year = {2016},
|
1145 |
+
publisher = {Association for Computational Linguistics},
|
1146 |
+
pages = {2383--2392},
|
1147 |
+
location = {Austin, Texas},
|
1148 |
+
}
|
1149 |
+
@incollection{dagan2006pascal,
|
1150 |
+
title={The {PASCAL} recognising textual entailment challenge},
|
1151 |
+
author={Dagan, Ido and Glickman, Oren and Magnini, Bernardo},
|
1152 |
+
booktitle={Machine learning challenges. evaluating predictive uncertainty, visual object classification, and recognising tectual entailment},
|
1153 |
+
pages={177--190},
|
1154 |
+
year={2006},
|
1155 |
+
publisher={Springer}
|
1156 |
+
}
|
1157 |
+
@article{bar2006second,
|
1158 |
+
title={The second {PASCAL} recognising textual entailment challenge},
|
1159 |
+
author={Bar Haim, Roy and Dagan, Ido and Dolan, Bill and Ferro, Lisa and Giampiccolo, Danilo and Magnini, Bernardo and Szpektor, Idan},
|
1160 |
+
year={2006}
|
1161 |
+
}
|
1162 |
+
@inproceedings{giampiccolo2007third,
|
1163 |
+
title={The third {PASCAL} recognizing textual entailment challenge},
|
1164 |
+
author={Giampiccolo, Danilo and Magnini, Bernardo and Dagan, Ido and Dolan, Bill},
|
1165 |
+
booktitle={Proceedings of the ACL-PASCAL workshop on textual entailment and paraphrasing},
|
1166 |
+
pages={1--9},
|
1167 |
+
year={2007},
|
1168 |
+
organization={Association for Computational Linguistics},
|
1169 |
+
}
|
1170 |
+
@article{bentivogli2009fifth,
|
1171 |
+
title={The Fifth {PASCAL} Recognizing Textual Entailment Challenge},
|
1172 |
+
author={Bentivogli, Luisa and Dagan, Ido and Dang, Hoa Trang and Giampiccolo, Danilo and Magnini, Bernardo},
|
1173 |
+
booktitle={TAC},
|
1174 |
+
year={2009}
|
1175 |
+
}
|
1176 |
+
@inproceedings{levesque2011winograd,
|
1177 |
+
title={The {W}inograd schema challenge},
|
1178 |
+
author={Levesque, Hector J and Davis, Ernest and Morgenstern, Leora},
|
1179 |
+
booktitle={{AAAI} Spring Symposium: Logical Formalizations of Commonsense Reasoning},
|
1180 |
+
volume={46},
|
1181 |
+
pages={47},
|
1182 |
+
year={2011}
|
1183 |
+
}
|
1184 |
```
|
1185 |
|
1186 |
|