gpt2 qg hl
Browse files- README.md +56 -0
- added_tokens.json +1 -0
- config.json +36 -0
- merges.txt +0 -0
- pytorch_model.bin +3 -0
- special_tokens_map.json +1 -0
- tokenizer_config.json +1 -0
- vocab.json +0 -0
README.md
ADDED
@@ -0,0 +1,56 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Transformer QG on SQuAD
|
2 |
+
HLQG is Proposed by [Ying-Hong Chan & Yao-Chung Fan. (2019). A Re-current BERT-based Model for Question Generation.](https://www.aclweb.org/anthology/D19-5821/)
|
3 |
+
|
4 |
+
**This is a Reproduce Version**
|
5 |
+
|
6 |
+
More detail: [p208p2002/Transformer-QG-on-SQuAD](https://github.com/p208p2002/Transformer-QG-on-SQuAD)
|
7 |
+
|
8 |
+
## Usage
|
9 |
+
### Input Format
|
10 |
+
```
|
11 |
+
C' = [c1, c2, ..., [HL], a1, ..., a|A|, [HL], ..., c|C|]
|
12 |
+
```
|
13 |
+
### Input Example
|
14 |
+
```
|
15 |
+
Harry Potter is a series of seven fantasy novels written by British author, [HL]J. K. Rowling[HL].
|
16 |
+
```
|
17 |
+
> # Who wrote Harry Potter?
|
18 |
+
|
19 |
+
## Data setting
|
20 |
+
We report two dataset setting as Follow
|
21 |
+
|
22 |
+
### SQuAD
|
23 |
+
- train: 87599\\t
|
24 |
+
- validation: 10570
|
25 |
+
> [SQuAD: 100,000+ Questions for Machine Comprehension of Text](https://arxiv.org/abs/1606.05250)
|
26 |
+
|
27 |
+
### SQuAD NQG
|
28 |
+
- train: 75722
|
29 |
+
- dev: 10570
|
30 |
+
- test: 11877
|
31 |
+
> [Learning to Ask: Neural Question Generation for Reading Comprehension](https://arxiv.org/abs/1705.00106)
|
32 |
+
|
33 |
+
## Available models
|
34 |
+
- BART
|
35 |
+
- GPT2
|
36 |
+
- T5
|
37 |
+
|
38 |
+
## Expriments
|
39 |
+
We report score with `NQG Scorer` which is using in SQuAD NQG.
|
40 |
+
|
41 |
+
If not special explanation, the size of the model defaults to "base".
|
42 |
+
|
43 |
+
### SQuAD
|
44 |
+
Model |Bleu 1|Bleu 2|Bleu 3|Bleu 4|METEOR|ROUGE-L|
|
45 |
+
---------------------------------|------|------|------|------|------|-------|
|
46 |
+
BART-HLSQG |54.67 |39.26 |30.34 |24.15 |25.43 |52.64 |
|
47 |
+
GPT2-HLSQG |49.31 |33.95 |25.41| 19.69 |22.29 |48.82 |
|
48 |
+
T5-HLSQG |54.29 |39.22 |30.43 |24.26 |25.56 |53.11 |
|
49 |
+
|
50 |
+
### SQuAD NQG
|
51 |
+
Model |Bleu 1|Bleu 2|Bleu 3|Bleu 4|METEOR|ROUGE-L|
|
52 |
+
---------------------------------|------|------|------|------|------|-------|
|
53 |
+
BERT-HLSQG (Chan et al.) |49.73 |34.60 |26.13 |20.33 |23.88 |48.23 |
|
54 |
+
BART-HLSQG |54.12 |38.19 |28.84 |22.35 |24.55 |51.03 |
|
55 |
+
GPT2-HLSQG |49.82 |33.69 |24.71 |18.63 |21.90 |47.60 |
|
56 |
+
T5-HLSQG |53.13 |37.60 |28.62 |22.38 |24.48 |51.20 |
|
added_tokens.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"[HL]": 50259, "[SEP]": 50258, "[PAD]": 50257}
|
config.json
ADDED
@@ -0,0 +1,36 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"_name_or_path": "gpt2",
|
3 |
+
"activation_function": "gelu_new",
|
4 |
+
"architectures": [
|
5 |
+
"GPT2LMHeadModel"
|
6 |
+
],
|
7 |
+
"attn_pdrop": 0.1,
|
8 |
+
"bos_token_id": 50256,
|
9 |
+
"embd_pdrop": 0.1,
|
10 |
+
"eos_token_id": 50256,
|
11 |
+
"gradient_checkpointing": false,
|
12 |
+
"initializer_range": 0.02,
|
13 |
+
"layer_norm_epsilon": 1e-05,
|
14 |
+
"model_type": "gpt2",
|
15 |
+
"n_ctx": 1024,
|
16 |
+
"n_embd": 768,
|
17 |
+
"n_head": 12,
|
18 |
+
"n_inner": null,
|
19 |
+
"n_layer": 12,
|
20 |
+
"n_positions": 1024,
|
21 |
+
"resid_pdrop": 0.1,
|
22 |
+
"summary_activation": null,
|
23 |
+
"summary_first_dropout": 0.1,
|
24 |
+
"summary_proj_to_labels": true,
|
25 |
+
"summary_type": "cls_index",
|
26 |
+
"summary_use_proj": true,
|
27 |
+
"task_specific_params": {
|
28 |
+
"text-generation": {
|
29 |
+
"do_sample": true,
|
30 |
+
"max_length": 50
|
31 |
+
}
|
32 |
+
},
|
33 |
+
"transformers_version": "4.5.1",
|
34 |
+
"use_cache": true,
|
35 |
+
"vocab_size": 50260
|
36 |
+
}
|
merges.txt
ADDED
The diff for this file is too large to render.
See raw diff
|
|
pytorch_model.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:fbce16e272570fbf2f583336a6d813eb00424c128e77e2891a63db4ca211387c
|
3 |
+
size 510415776
|
special_tokens_map.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"bos_token": "<|endoftext|>", "eos_token": "<|endoftext|>", "unk_token": "<|endoftext|>", "sep_token": "[SEP]", "pad_token": "[PAD]"}
|
tokenizer_config.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"unk_token": "<|endoftext|>", "bos_token": "<|endoftext|>", "eos_token": "<|endoftext|>", "add_prefix_space": false, "model_max_length": 1024, "special_tokens_map_file": null, "name_or_path": "gpt2"}
|
vocab.json
ADDED
The diff for this file is too large to render.
See raw diff
|
|