Text Generation
Transformers
Safetensors
llama
finetuned
quantized
4-bit precision
gptq
dataset:ai2_arc
dataset:unalignment/spicy-3.1
dataset:codeparrot/apps
dataset:facebook/belebele
dataset:boolq
dataset:jondurbin/cinematika-v0.1
dataset:drop
dataset:lmsys/lmsys-chat-1m
dataset:TIGER-Lab/MathInstruct
dataset:cais/mmlu
dataset:Muennighoff/natural-instructions
dataset:openbookqa
dataset:piqa
dataset:Vezora/Tested-22k-Python-Alpaca
dataset:cakiki/rosetta-code
dataset:Open-Orca/SlimOrca
dataset:spider
dataset:squad_v2
dataset:migtissera/Synthia-v1.3
dataset:datasets/winogrande
dataset:nvidia/HelpSteer
dataset:Intel/orca_dpo_pairs
dataset:unalignment/toxic-dpo-v0.1
dataset:jondurbin/truthy-dpo-v0.1
dataset:allenai/ultrafeedback_binarized_cleaned
dataset:Squish42/bluemoon-fandom-1-1-rp-cleaned
dataset:LDJnr/Capybara
dataset:JULIELab/EmoBank
dataset:kingbri/PIPPA-shareGPT
Inference Endpoints
text-generation-inference
has_space
conversational
Eval Results
{ | |
"_name_or_path": "one-man-army/UNA-34Beagles-32K-bf16-v1", | |
"architectures": [ | |
"LlamaForCausalLM" | |
], | |
"attention_bias": false, | |
"attention_dropout": 0.0, | |
"bos_token_id": 1, | |
"eos_token_id": 2, | |
"hidden_act": "silu", | |
"hidden_size": 7168, | |
"initializer_range": 0.02, | |
"intermediate_size": 20480, | |
"max_position_embeddings": 32768, | |
"model_type": "llama", | |
"num_attention_heads": 56, | |
"num_hidden_layers": 60, | |
"num_key_value_heads": 8, | |
"pad_token_id": 1, | |
"pretraining_tp": 1, | |
"rms_norm_eps": 1e-05, | |
"rope_scaling": null, | |
"rope_theta": 5000000.0, | |
"tie_word_embeddings": false, | |
"torch_dtype": "float16", | |
"transformers_version": "4.36.2", | |
"use_cache": true, | |
"vocab_size": 64000 | |
} | |