baber commited on
Commit
d316af8
1 Parent(s): d1df6cf

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +58 -3
README.md CHANGED
@@ -19,8 +19,63 @@ AGIEval is a human-centric benchmark specifically designed to evaluate the gener
19
 
20
  ### Citation Information
21
 
22
- [More Information Needed]
 
 
 
 
 
 
 
 
 
 
23
 
24
- ### Contributions
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
25
 
26
- [More Information Needed]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
19
 
20
  ### Citation Information
21
 
22
+ ```
23
+ @misc{zhong2023agieval,
24
+ title={AGIEval: A Human-Centric Benchmark for Evaluating Foundation Models},
25
+ author={Wanjun Zhong and Ruixiang Cui and Yiduo Guo and Yaobo Liang and Shuai Lu and Yanlin Wang and Amin Saied and Weizhu Chen and Nan Duan},
26
+ year={2023},
27
+ eprint={2304.06364},
28
+ archivePrefix={arXiv},
29
+ primaryClass={cs.CL}
30
+ }
31
+ ```
32
+ Citation for individual datasets:
33
 
34
+ ```
35
+ @inproceedings{ling-etal-2017-program,
36
+ title = "Program Induction by Rationale Generation: Learning to Solve and Explain Algebraic Word Problems",
37
+ author = "Ling, Wang and
38
+ Yogatama, Dani and
39
+ Dyer, Chris and
40
+ Blunsom, Phil",
41
+ booktitle = "Proceedings of the 55th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)",
42
+ month = jul,
43
+ year = "2017",
44
+ address = "Vancouver, Canada",
45
+ publisher = "Association for Computational Linguistics",
46
+ url = "https://aclanthology.org/P17-1015",
47
+ doi = "10.18653/v1/P17-1015",
48
+ pages = "158--167",
49
+ abstract = "Solving algebraic word problems requires executing a series of arithmetic operations{---}a program{---}to obtain a final answer. However, since programs can be arbitrarily complicated, inducing them directly from question-answer pairs is a formidable challenge. To make this task more feasible, we solve these problems by generating answer rationales, sequences of natural language and human-readable mathematical expressions that derive the final answer through a series of small steps. Although rationales do not explicitly specify programs, they provide a scaffolding for their structure via intermediate milestones. To evaluate our approach, we have created a new 100,000-sample dataset of questions, answers and rationales. Experimental results show that indirect supervision of program learning via answer rationales is a promising strategy for inducing arithmetic programs.",
50
+ }
51
 
52
+ @inproceedings{hendrycksmath2021,
53
+ title={Measuring Mathematical Problem Solving With the MATH Dataset},
54
+ author={Dan Hendrycks and Collin Burns and Saurav Kadavath and Akul Arora and Steven Basart and Eric Tang and Dawn Song and Jacob Steinhardt},
55
+ journal={NeurIPS},
56
+ year={2021}
57
+ }
58
+
59
+ @inproceedings{Liu2020LogiQAAC,
60
+ title={LogiQA: A Challenge Dataset for Machine Reading Comprehension with Logical Reasoning},
61
+ author={Jian Liu and Leyang Cui and Hanmeng Liu and Dandan Huang and Yile Wang and Yue Zhang},
62
+ booktitle={International Joint Conference on Artificial Intelligence},
63
+ year={2020}
64
+ }
65
+
66
+ @inproceedings{zhong2019jec,
67
+ title={JEC-QA: A Legal-Domain Question Answering Dataset},
68
+ author={Zhong, Haoxi and Xiao, Chaojun and Tu, Cunchao and Zhang, Tianyang and Liu, Zhiyuan and Sun, Maosong},
69
+ booktitle={Proceedings of AAAI},
70
+ year={2020},
71
+ }
72
+
73
+ @article{Wang2021FromLT,
74
+ title={From LSAT: The Progress and Challenges of Complex Reasoning},
75
+ author={Siyuan Wang and Zhongkun Liu and Wanjun Zhong and Ming Zhou and Zhongyu Wei and Zhumin Chen and Nan Duan},
76
+ journal={IEEE/ACM Transactions on Audio, Speech, and Language Processing},
77
+ year={2021},
78
+ volume={30},
79
+ pages={2201-2216}
80
+ }
81
+ ```