{ "activation_type": "swiglu", "alibi": false, "alibi_bias_max": 8.0, "architectures": [ "LlavaOLMoBitnet1BForCausalLM" ], "attention_dropout": 0.0, "attention_layer_norm": false, "attention_layer_norm_with_affine": false, "bias_for_layer_norm": false, "block_group_size": 1, "block_type": "sequential", "clip_qkv": null, "d_model": 2048, "embedding_dropout": 0.0, "embedding_size": 50304, "eos_token_id": 50279, "bos_token_id": 50279, "flash_attention": true, "include_bias": false, "init_cutoff_factor": null, "init_device": "cpu", "init_fn": "mitchell", "init_std": 0.02, "layer_norm_type": "rms", "layer_norm_with_affine": true, "max_sequence_length": 2048, "mlp_hidden_size": null, "mlp_ratio": 8, "model_type": "llava", "multi_query_attention": false, "n_heads": 16, "n_layers": 16, "pad_token_id": 1, "precision": "amp_bf16", "residual_dropout": 0.0, "rope": true, "rope_full_precision": true, "scale_logits": false, "ternary": true, "transformers_version": "4.38.2", "use_cache": false, "vocab_size": 50280, "inference_mode":false, "weight_tying": true, "auto_map": { "AutoConfig": "configuration_olmo.OLMoConfig", "AutoModelForCausalLM": "modeling_olmo.OLMoForCausalLM" }, "freeze_mm_vision_resampler": false, "mm_hidden_size": 1024, "mm_projector_type": "mlp2x_gelu", "mm_resampler_type": null, "mm_use_im_patch_token": false, "mm_use_im_start_end": false, "mm_vision_select_feature": "patch", "mm_vision_select_layer": -2, "mm_vision_tower": "openai/clip-vit-large-patch14-336", "tune_mm_mlp_adapter": false, "tune_mm_vision_resampler": false, "unfreeze_mm_vision_tower": false, "use_mm_proj": true, "image_aspect_ratio": "pad" }