Upload LlavaLlamaForCausalLM
Browse files- config.json +2 -3
config.json
CHANGED
@@ -49,7 +49,7 @@
|
|
49 |
"mm_vision_select_layer": -2,
|
50 |
"mm_vision_tower": "openai/clip-vit-large-patch14-336",
|
51 |
"mm_vision_tower_lr": 2e-06,
|
52 |
-
"model_type": "
|
53 |
"num_attention_heads": 32,
|
54 |
"num_hidden_layers": 32,
|
55 |
"num_key_value_heads": 8,
|
@@ -65,6 +65,5 @@
|
|
65 |
"use_cache": true,
|
66 |
"use_mm_proj": true,
|
67 |
"vision_tower_pretrained": null,
|
68 |
-
"vocab_size": 128256
|
69 |
-
"image_token_index": 128257
|
70 |
}
|
|
|
49 |
"mm_vision_select_layer": -2,
|
50 |
"mm_vision_tower": "openai/clip-vit-large-patch14-336",
|
51 |
"mm_vision_tower_lr": 2e-06,
|
52 |
+
"model_type": "llava_llama",
|
53 |
"num_attention_heads": 32,
|
54 |
"num_hidden_layers": 32,
|
55 |
"num_key_value_heads": 8,
|
|
|
65 |
"use_cache": true,
|
66 |
"use_mm_proj": true,
|
67 |
"vision_tower_pretrained": null,
|
68 |
+
"vocab_size": 128256
|
|
|
69 |
}
|