visualglm-6b / config.json
zxdu20's picture
Update names
ab02f3d
raw
history blame
1.94 kB
{
"_name_or_path": "THUDM/visualglm-6b",
"architectures": [
"ChatGLMModel"
],
"auto_map": {
"AutoConfig": "configuration_chatglm.ChatGLMConfig",
"AutoModel": "modeling_chatglm.ChatGLMForConditionalGenerationWithImage",
"AutoModelForSeq2SeqLM": "modeling_chatglm.ChatGLMForConditionalGenerationWithImage"
},
"bos_token_id": 130004,
"eos_token_id": 130005,
"mask_token_id": 130000,
"gmask_token_id": 130001,
"pad_token_id": 3,
"hidden_size": 4096,
"inner_hidden_size": 16384,
"layernorm_epsilon": 1e-05,
"max_sequence_length": 2048,
"model_type": "chatglm",
"num_attention_heads": 32,
"num_layers": 28,
"position_encoding_2d": true,
"torch_dtype": "float16",
"transformers_version": "4.23.1",
"use_cache": true,
"vocab_size": 130528,
"image_length": 32,
"eva_config": {
"num_layers": 39,
"hidden_size": 1408,
"num_attention_heads": 16,
"vocab_size": 1,
"layernorm_order": "pre",
"model_parallel_size": 1,
"max_sequence_length": 257,
"inner_hidden_size": 6144,
"use_final_layernorm": false,
"layernorm_epsilon": 1e-06,
"image_size": [
224,
224
],
"pre_len": 1,
"post_len": 0,
"in_channels": 3,
"num_classes": 0,
"patch_size": 14
},
"qformer_config": {
"num_layers": 12,
"hidden_size": 768,
"num_attention_heads": 12,
"vocab_size": 32,
"layernorm_order": "post",
"model_parallel_size": 1,
"max_sequence_length": 0,
"is_decoder": [
true,
false,
true,
false,
true,
false,
true,
false,
true,
false,
true,
false
],
"cross_attn_hidden_size": 1408,
"layernorm_epsilon": 1e-12
}
}