bias: false block_size: 8192 gelu_approximate: none head_size: 128 hf_config: name: Meta-Llama-3-8B-Instruct org: meta-llama intermediate_size: 14336 lm_head_bias: false mlp_class_name: LLaMAMLP n_embd: 4096 n_expert: 0 n_expert_per_token: 0 n_head: 32 n_layer: 32 n_query_groups: 8 name: Llama-3-8B-Instruct norm_class_name: RMSNorm norm_eps: 1.0e-05 padded_vocab_size: 128256 padding_multiple: 512 parallel_residual: false rope_base: 500000 rope_condense_ratio: 1 rotary_percentage: 1.0 scale_embeddings: false shared_attention_norm: false vocab_size: 128000