Den4ikAI commited on
Commit
da9cd26
1 Parent(s): e45263b

Upload 9 files

Browse files
added_tokens.json ADDED
@@ -0,0 +1,110 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "<LM>": 50357,
3
+ "<SC1>": 50358,
4
+ "<SC2>": 50359,
5
+ "<SC3>": 50360,
6
+ "<SC4>": 50361,
7
+ "<SC5>": 50362,
8
+ "<SC6>": 50363,
9
+ "<extra_id_0>": 50257,
10
+ "<extra_id_10>": 50267,
11
+ "<extra_id_11>": 50268,
12
+ "<extra_id_12>": 50269,
13
+ "<extra_id_13>": 50270,
14
+ "<extra_id_14>": 50271,
15
+ "<extra_id_15>": 50272,
16
+ "<extra_id_16>": 50273,
17
+ "<extra_id_17>": 50274,
18
+ "<extra_id_18>": 50275,
19
+ "<extra_id_19>": 50276,
20
+ "<extra_id_1>": 50258,
21
+ "<extra_id_20>": 50277,
22
+ "<extra_id_21>": 50278,
23
+ "<extra_id_22>": 50279,
24
+ "<extra_id_23>": 50280,
25
+ "<extra_id_24>": 50281,
26
+ "<extra_id_25>": 50282,
27
+ "<extra_id_26>": 50283,
28
+ "<extra_id_27>": 50284,
29
+ "<extra_id_28>": 50285,
30
+ "<extra_id_29>": 50286,
31
+ "<extra_id_2>": 50259,
32
+ "<extra_id_30>": 50287,
33
+ "<extra_id_31>": 50288,
34
+ "<extra_id_32>": 50289,
35
+ "<extra_id_33>": 50290,
36
+ "<extra_id_34>": 50291,
37
+ "<extra_id_35>": 50292,
38
+ "<extra_id_36>": 50293,
39
+ "<extra_id_37>": 50294,
40
+ "<extra_id_38>": 50295,
41
+ "<extra_id_39>": 50296,
42
+ "<extra_id_3>": 50260,
43
+ "<extra_id_40>": 50297,
44
+ "<extra_id_41>": 50298,
45
+ "<extra_id_42>": 50299,
46
+ "<extra_id_43>": 50300,
47
+ "<extra_id_44>": 50301,
48
+ "<extra_id_45>": 50302,
49
+ "<extra_id_46>": 50303,
50
+ "<extra_id_47>": 50304,
51
+ "<extra_id_48>": 50305,
52
+ "<extra_id_49>": 50306,
53
+ "<extra_id_4>": 50261,
54
+ "<extra_id_50>": 50307,
55
+ "<extra_id_51>": 50308,
56
+ "<extra_id_52>": 50309,
57
+ "<extra_id_53>": 50310,
58
+ "<extra_id_54>": 50311,
59
+ "<extra_id_55>": 50312,
60
+ "<extra_id_56>": 50313,
61
+ "<extra_id_57>": 50314,
62
+ "<extra_id_58>": 50315,
63
+ "<extra_id_59>": 50316,
64
+ "<extra_id_5>": 50262,
65
+ "<extra_id_60>": 50317,
66
+ "<extra_id_61>": 50318,
67
+ "<extra_id_62>": 50319,
68
+ "<extra_id_63>": 50320,
69
+ "<extra_id_64>": 50321,
70
+ "<extra_id_65>": 50322,
71
+ "<extra_id_66>": 50323,
72
+ "<extra_id_67>": 50324,
73
+ "<extra_id_68>": 50325,
74
+ "<extra_id_69>": 50326,
75
+ "<extra_id_6>": 50263,
76
+ "<extra_id_70>": 50327,
77
+ "<extra_id_71>": 50328,
78
+ "<extra_id_72>": 50329,
79
+ "<extra_id_73>": 50330,
80
+ "<extra_id_74>": 50331,
81
+ "<extra_id_75>": 50332,
82
+ "<extra_id_76>": 50333,
83
+ "<extra_id_77>": 50334,
84
+ "<extra_id_78>": 50335,
85
+ "<extra_id_79>": 50336,
86
+ "<extra_id_7>": 50264,
87
+ "<extra_id_80>": 50337,
88
+ "<extra_id_81>": 50338,
89
+ "<extra_id_82>": 50339,
90
+ "<extra_id_83>": 50340,
91
+ "<extra_id_84>": 50341,
92
+ "<extra_id_85>": 50342,
93
+ "<extra_id_86>": 50343,
94
+ "<extra_id_87>": 50344,
95
+ "<extra_id_88>": 50345,
96
+ "<extra_id_89>": 50346,
97
+ "<extra_id_8>": 50265,
98
+ "<extra_id_90>": 50347,
99
+ "<extra_id_91>": 50348,
100
+ "<extra_id_92>": 50349,
101
+ "<extra_id_93>": 50350,
102
+ "<extra_id_94>": 50351,
103
+ "<extra_id_95>": 50352,
104
+ "<extra_id_96>": 50353,
105
+ "<extra_id_97>": 50354,
106
+ "<extra_id_98>": 50355,
107
+ "<extra_id_99>": 50356,
108
+ "<extra_id_9>": 50266,
109
+ "<|endoftext|>": 50364
110
+ }
config.json ADDED
@@ -0,0 +1,32 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "/media/denis/d9b5b2ab-3dc3-4627-9f99-1ed59b84b83e/models/FRED-T5-XL",
3
+ "architectures": [
4
+ "T5ForConditionalGeneration"
5
+ ],
6
+ "d_ff": 4096,
7
+ "d_kv": 64,
8
+ "d_model": 1536,
9
+ "decoder_start_token_id": 0,
10
+ "dense_act_fn": "gelu_new",
11
+ "dropout_rate": 0.1,
12
+ "eos_token_id": 2,
13
+ "feed_forward_proj": "gated-gelu",
14
+ "gradient_checkpointing": false,
15
+ "initializer_factor": 1.0,
16
+ "is_encoder_decoder": true,
17
+ "is_gated_act": true,
18
+ "layer_norm_epsilon": 1e-06,
19
+ "model_type": "t5",
20
+ "num_decoder_layers": 24,
21
+ "num_heads": 24,
22
+ "num_layers": 24,
23
+ "output_past": true,
24
+ "pad_token_id": 0,
25
+ "relative_attention_max_distance": 128,
26
+ "relative_attention_num_buckets": 32,
27
+ "tie_word_embeddings": false,
28
+ "torch_dtype": "bfloat16",
29
+ "transformers_version": "4.29.2",
30
+ "use_cache": true,
31
+ "vocab_size": 50364
32
+ }
generation_config.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "_from_model_config": true,
3
+ "decoder_start_token_id": 0,
4
+ "eos_token_id": 2,
5
+ "pad_token_id": 0,
6
+ "transformers_version": "4.29.2"
7
+ }
merges.txt ADDED
The diff for this file is too large to render. See raw diff
 
pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2479090b30ec9e48861c1b7545fd89653cfaf4a1998ec3a14ee3c8683585f5c5
3
+ size 3480901445
special_tokens_map.json ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": "<s>",
3
+ "eos_token": "</s>",
4
+ "pad_token": "<pad>",
5
+ "unk_token": {
6
+ "content": "<|endoftext|>",
7
+ "lstrip": false,
8
+ "normalized": true,
9
+ "rstrip": false,
10
+ "single_word": false
11
+ }
12
+ }
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer_config.json ADDED
@@ -0,0 +1,40 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_bos_token": false,
3
+ "add_prefix_space": false,
4
+ "bos_token": {
5
+ "__type": "AddedToken",
6
+ "content": "<s>",
7
+ "lstrip": false,
8
+ "normalized": true,
9
+ "rstrip": false,
10
+ "single_word": false
11
+ },
12
+ "clean_up_tokenization_spaces": true,
13
+ "eos_token": {
14
+ "__type": "AddedToken",
15
+ "content": "</s>",
16
+ "lstrip": false,
17
+ "normalized": true,
18
+ "rstrip": false,
19
+ "single_word": false
20
+ },
21
+ "errors": "replace",
22
+ "model_max_length": 1000000000000000019884624838656,
23
+ "pad_token": {
24
+ "__type": "AddedToken",
25
+ "content": "<pad>",
26
+ "lstrip": false,
27
+ "normalized": true,
28
+ "rstrip": false,
29
+ "single_word": false
30
+ },
31
+ "tokenizer_class": "GPT2Tokenizer",
32
+ "unk_token": {
33
+ "__type": "AddedToken",
34
+ "content": "<|endoftext|>",
35
+ "lstrip": false,
36
+ "normalized": true,
37
+ "rstrip": false,
38
+ "single_word": false
39
+ }
40
+ }
vocab.json ADDED
The diff for this file is too large to render. See raw diff