feat(phase-04): Wyoming Satellite integration + OpenClaw HA components

## Voice Pipeline (P3)
- Replace openWakeWord daemon with Wyoming Satellite approach
- Add Wyoming Satellite service on port 10700 for HA voice pipeline
- Update setup.sh with cross-platform sed compatibility (macOS/Linux)
- Add version field to Kokoro TTS voice info
- Update launchd service loader to use Wyoming Satellite

## Home Assistant Integration (P4)
- Add custom conversation agent component (openclaw_conversation)
  - Fix: Use IntentResponse instead of plain strings (HA API requirement)
  - Support both HTTP API and CLI fallback modes
  - Config flow for easy HA UI setup
- Add OpenClaw bridge scripts (Python + Bash)
- Add ha-ctl utility for HA entity control
  - Fix: Use context manager for token file reading
- Add HA configuration examples and documentation

## Infrastructure
- Add mem0 backup automation (launchd + script)
- Add n8n workflow templates (morning briefing, notification router)
- Add VS Code workspace configuration
- Reorganize model files into categorized folders:
  - lmstudio-community/
  - mlx-community/
  - bartowski/
  - mradermacher/

## Documentation
- Update PROJECT_PLAN.md with Wyoming Satellite architecture
- Update TODO.md with completed Wyoming integration tasks
- Add OPENCLAW_INTEGRATION.md for HA setup guide

## Testing
- Verified Wyoming services running (STT:10300, TTS:10301, Satellite:10700)
- Verified OpenClaw CLI accessibility
- Confirmed cross-platform compatibility fixes
This commit is contained in:
Aodhan Collins
2026-03-08 02:06:37 +00:00
parent 9eb5633115
commit 6a0bae2a0b
119 changed files with 780808 additions and 64 deletions

View File

@@ -0,0 +1,63 @@
{
"architectures": [
"DeepseekV2ForCausalLM"
],
"attention_bias": false,
"attention_dropout": 0.0,
"auto_map": {
"AutoConfig": "configuration_deepseek.DeepseekV2Config",
"AutoModel": "modeling_deepseek.DeepseekV2Model",
"AutoModelForCausalLM": "modeling_deepseek.DeepseekV2ForCausalLM"
},
"aux_loss_alpha": 0.001,
"bos_token_id": 100000,
"eos_token_id": 100001,
"first_k_dense_replace": 1,
"hidden_act": "silu",
"hidden_size": 2048,
"initializer_range": 0.02,
"intermediate_size": 10944,
"kv_lora_rank": 512,
"max_position_embeddings": 163840,
"model_type": "deepseek_v2",
"moe_intermediate_size": 1408,
"moe_layer_freq": 1,
"n_group": 1,
"n_routed_experts": 64,
"n_shared_experts": 2,
"norm_topk_prob": false,
"num_attention_heads": 16,
"num_experts_per_tok": 6,
"num_hidden_layers": 27,
"num_key_value_heads": 16,
"pretraining_tp": 1,
"q_lora_rank": null,
"qk_nope_head_dim": 128,
"qk_rope_head_dim": 64,
"quantization": {
"group_size": 64,
"bits": 4
},
"rms_norm_eps": 1e-06,
"rope_scaling": {
"beta_fast": 32,
"beta_slow": 1,
"factor": 40,
"mscale": 0.707,
"mscale_all_dim": 0.707,
"original_max_position_embeddings": 4096,
"type": "yarn"
},
"rope_theta": 10000,
"routed_scaling_factor": 1.0,
"scoring_func": "softmax",
"seq_aux": true,
"tie_word_embeddings": false,
"topk_group": 1,
"topk_method": "greedy",
"torch_dtype": "bfloat16",
"transformers_version": "4.39.3",
"use_cache": true,
"v_head_dim": 128,
"vocab_size": 102400
}

View File

@@ -0,0 +1,206 @@
from transformers.configuration_utils import PretrainedConfig
from transformers.utils import logging
logger = logging.get_logger(__name__)
DEEPSEEK_PRETRAINED_CONFIG_ARCHIVE_MAP = {}
class DeepseekV2Config(PretrainedConfig):
r"""
This is the configuration class to store the configuration of a [`DeepseekV2Model`]. It is used to instantiate an DeepSeek
model according to the specified arguments, defining the model architecture. Instantiating a configuration with the
defaults will yield a similar configuration to that of the DeepSeek-V2.
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
vocab_size (`int`, *optional*, defaults to 102400):
Vocabulary size of the Deep model. Defines the number of different tokens that can be represented by the
`inputs_ids` passed when calling [`DeepseekV2Model`]
hidden_size (`int`, *optional*, defaults to 4096):
Dimension of the hidden representations.
intermediate_size (`int`, *optional*, defaults to 11008):
Dimension of the MLP representations.
moe_intermediate_size (`int`, *optional*, defaults to 1407):
Dimension of the MoE representations.
num_hidden_layers (`int`, *optional*, defaults to 32):
Number of hidden layers in the Transformer decoder.
num_attention_heads (`int`, *optional*, defaults to 32):
Number of attention heads for each attention layer in the Transformer decoder.
n_shared_experts (`int`, *optional*, defaults to None):
Number of shared experts, None means dense model.
n_routed_experts (`int`, *optional*, defaults to None):
Number of routed experts, None means dense model.
routed_scaling_factor (`float`, *optional*, defaults to 1.0):
Scaling factor or routed experts.
topk_method (`str`, *optional*, defaults to `gready`):
Topk method used in routed gate.
n_group (`int`, *optional*, defaults to None):
Number of groups for routed experts.
topk_group (`int`, *optional*, defaults to None):
Number of selected groups for each token(for each token, ensuring the selected experts is only within `topk_group` groups).
num_experts_per_tok (`int`, *optional*, defaults to None):
Number of selected experts, None means dense model.
moe_layer_freq (`int`, *optional*, defaults to 1):
The frequency of the MoE layer: one expert layer for every `moe_layer_freq - 1` dense layers.
first_k_dense_replace (`int`, *optional*, defaults to 0):
Number of dense layers in shallow layers(embed->dense->dense->...->dense->moe->moe...->lm_head).
\--k dense layers--/
norm_topk_prob (`bool`, *optional*, defaults to False):
Whether to normalize the weights of the routed experts.
scoring_func (`str`, *optional*, defaults to 'softmax'):
Method of computing expert weights.
aux_loss_alpha (`float`, *optional*, defaults to 0.001):
Auxiliary loss weight coefficient.
seq_aux = (`bool`, *optional*, defaults to True):
Whether to compute the auxiliary loss for each individual sample.
num_key_value_heads (`int`, *optional*):
This is the number of key_value heads that should be used to implement Grouped Query Attention. If
`num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if
`num_key_value_heads=1 the model will use Multi Query Attention (MQA) otherwise GQA is used. When
converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed
by meanpooling all the original heads within that group. For more details checkout [this
paper](https://arxiv.org/pdf/2305.13245.pdf). If it is not specified, will default to
`num_attention_heads`.
hidden_act (`str` or `function`, *optional*, defaults to `"silu"`):
The non-linear activation function (function or string) in the decoder.
max_position_embeddings (`int`, *optional*, defaults to 2048):
The maximum sequence length that this model might ever be used with.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
rms_norm_eps (`float`, *optional*, defaults to 1e-06):
The epsilon used by the rms normalization layers.
use_cache (`bool`, *optional*, defaults to `True`):
Whether or not the model should return the last key/values attentions (not used by all models). Only
relevant if `config.is_decoder=True`.
pad_token_id (`int`, *optional*):
Padding token id.
bos_token_id (`int`, *optional*, defaults to 1):
Beginning of stream token id.
eos_token_id (`int`, *optional*, defaults to 2):
End of stream token id.
pretraining_tp (`int`, *optional*, defaults to 1):
Experimental feature. Tensor parallelism rank used during pretraining. Please refer to [this
document](https://huggingface.co/docs/transformers/parallelism) to understand more about it. This value is
necessary to ensure exact reproducibility of the pretraining results. Please refer to [this
issue](https://github.com/pytorch/pytorch/issues/76232).
tie_word_embeddings (`bool`, *optional*, defaults to `False`):
Whether to tie weight embeddings
rope_theta (`float`, *optional*, defaults to 10000.0):
The base period of the RoPE embeddings.
rope_scaling (`Dict`, *optional*):
Dictionary containing the scaling configuration for the RoPE embeddings. Currently supports two scaling
strategies: linear and dynamic. Their scaling factor must be a float greater than 1. The expected format is
`{"type": strategy name, "factor": scaling factor}`. When using this flag, don't update
`max_position_embeddings` to the expected new maximum.
attention_bias (`bool`, defaults to `False`, *optional*, defaults to `False`):
Whether to use a bias in the query, key, value and output projection layers during self-attention.
attention_dropout (`float`, *optional*, defaults to 0.0):
The dropout ratio for the attention probabilities.
```python
>>> from transformers import DeepseekV2Model, DeepseekV2Config
>>> # Initializing a Deepseek-V2 style configuration
>>> configuration = DeepseekV2Config()
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = "deepseek_v2"
keys_to_ignore_at_inference = ["past_key_values"]
def __init__(
self,
vocab_size=102400,
hidden_size=4096,
intermediate_size=11008,
moe_intermediate_size = 1407,
num_hidden_layers=30,
num_attention_heads=32,
num_key_value_heads=32,
n_shared_experts = None,
n_routed_experts = None,
ep_size = 1,
routed_scaling_factor = 1.0,
kv_lora_rank = 512,
q_lora_rank = 1536,
qk_rope_head_dim = 64,
v_head_dim = 128,
qk_nope_head_dim = 128,
topk_method = 'gready',
n_group = None,
topk_group = None,
num_experts_per_tok = None,
moe_layer_freq = 1,
first_k_dense_replace = 0,
norm_topk_prob = False,
scoring_func = 'softmax',
aux_loss_alpha = 0.001,
seq_aux = True,
hidden_act="silu",
max_position_embeddings=2048,
initializer_range=0.02,
rms_norm_eps=1e-6,
use_cache=True,
pad_token_id=None,
bos_token_id=100000,
eos_token_id=100001,
pretraining_tp=1,
tie_word_embeddings=False,
rope_theta=10000.0,
rope_scaling=None,
attention_bias=False,
attention_dropout=0.0,
**kwargs,
):
self.vocab_size = vocab_size
self.max_position_embeddings = max_position_embeddings
self.hidden_size = hidden_size
self.intermediate_size = intermediate_size
self.moe_intermediate_size = moe_intermediate_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.n_shared_experts = n_shared_experts
self.n_routed_experts = n_routed_experts
self.ep_size = ep_size
self.routed_scaling_factor = routed_scaling_factor
self.kv_lora_rank = kv_lora_rank
self.q_lora_rank = q_lora_rank
self.qk_rope_head_dim = qk_rope_head_dim
self.v_head_dim = v_head_dim
self.qk_nope_head_dim = qk_nope_head_dim
self.topk_method = topk_method
self.n_group = n_group
self.topk_group = topk_group
self.num_experts_per_tok = num_experts_per_tok
self.moe_layer_freq = moe_layer_freq
self.first_k_dense_replace = first_k_dense_replace
self.norm_topk_prob = norm_topk_prob
self.scoring_func = scoring_func
self.aux_loss_alpha = aux_loss_alpha
self.seq_aux = seq_aux
# for backward compatibility
if num_key_value_heads is None:
num_key_value_heads = num_attention_heads
self.num_key_value_heads = num_key_value_heads
self.hidden_act = hidden_act
self.initializer_range = initializer_range
self.rms_norm_eps = rms_norm_eps
self.pretraining_tp = pretraining_tp
self.use_cache = use_cache
self.rope_theta = rope_theta
self.rope_scaling = rope_scaling
self.attention_bias = attention_bias
self.attention_dropout = attention_dropout
super().__init__(
pad_token_id=pad_token_id,
bos_token_id=bos_token_id,
eos_token_id=eos_token_id,
tie_word_embeddings=tie_word_embeddings,
**kwargs,
)

View File

@@ -0,0 +1,922 @@
{
"metadata": {
"total_size": 8839977984
},
"weight_map": {
"lm_head.biases": "model-00002-of-00002.safetensors",
"lm_head.scales": "model-00002-of-00002.safetensors",
"lm_head.weight": "model-00002-of-00002.safetensors",
"model.embed_tokens.biases": "model-00001-of-00002.safetensors",
"model.embed_tokens.scales": "model-00001-of-00002.safetensors",
"model.embed_tokens.weight": "model-00001-of-00002.safetensors",
"model.layers.0.input_layernorm.weight": "model-00001-of-00002.safetensors",
"model.layers.0.mlp.down_proj.biases": "model-00001-of-00002.safetensors",
"model.layers.0.mlp.down_proj.scales": "model-00001-of-00002.safetensors",
"model.layers.0.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
"model.layers.0.mlp.gate_proj.biases": "model-00001-of-00002.safetensors",
"model.layers.0.mlp.gate_proj.scales": "model-00001-of-00002.safetensors",
"model.layers.0.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
"model.layers.0.mlp.up_proj.biases": "model-00001-of-00002.safetensors",
"model.layers.0.mlp.up_proj.scales": "model-00001-of-00002.safetensors",
"model.layers.0.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
"model.layers.0.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
"model.layers.0.self_attn.kv_a_layernorm.weight": "model-00001-of-00002.safetensors",
"model.layers.0.self_attn.kv_a_proj_with_mqa.biases": "model-00001-of-00002.safetensors",
"model.layers.0.self_attn.kv_a_proj_with_mqa.scales": "model-00001-of-00002.safetensors",
"model.layers.0.self_attn.kv_a_proj_with_mqa.weight": "model-00001-of-00002.safetensors",
"model.layers.0.self_attn.kv_b_proj.biases": "model-00001-of-00002.safetensors",
"model.layers.0.self_attn.kv_b_proj.scales": "model-00001-of-00002.safetensors",
"model.layers.0.self_attn.kv_b_proj.weight": "model-00001-of-00002.safetensors",
"model.layers.0.self_attn.o_proj.biases": "model-00001-of-00002.safetensors",
"model.layers.0.self_attn.o_proj.scales": "model-00001-of-00002.safetensors",
"model.layers.0.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
"model.layers.0.self_attn.q_proj.biases": "model-00001-of-00002.safetensors",
"model.layers.0.self_attn.q_proj.scales": "model-00001-of-00002.safetensors",
"model.layers.0.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
"model.layers.1.input_layernorm.weight": "model-00001-of-00002.safetensors",
"model.layers.1.mlp.gate.weight": "model-00001-of-00002.safetensors",
"model.layers.1.mlp.shared_experts.down_proj.biases": "model-00001-of-00002.safetensors",
"model.layers.1.mlp.shared_experts.down_proj.scales": "model-00001-of-00002.safetensors",
"model.layers.1.mlp.shared_experts.down_proj.weight": "model-00001-of-00002.safetensors",
"model.layers.1.mlp.shared_experts.gate_proj.biases": "model-00001-of-00002.safetensors",
"model.layers.1.mlp.shared_experts.gate_proj.scales": "model-00001-of-00002.safetensors",
"model.layers.1.mlp.shared_experts.gate_proj.weight": "model-00001-of-00002.safetensors",
"model.layers.1.mlp.shared_experts.up_proj.biases": "model-00001-of-00002.safetensors",
"model.layers.1.mlp.shared_experts.up_proj.scales": "model-00001-of-00002.safetensors",
"model.layers.1.mlp.shared_experts.up_proj.weight": "model-00001-of-00002.safetensors",
"model.layers.1.mlp.switch_mlp.down_proj.biases": "model-00001-of-00002.safetensors",
"model.layers.1.mlp.switch_mlp.down_proj.scales": "model-00001-of-00002.safetensors",
"model.layers.1.mlp.switch_mlp.down_proj.weight": "model-00001-of-00002.safetensors",
"model.layers.1.mlp.switch_mlp.gate_proj.biases": "model-00001-of-00002.safetensors",
"model.layers.1.mlp.switch_mlp.gate_proj.scales": "model-00001-of-00002.safetensors",
"model.layers.1.mlp.switch_mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
"model.layers.1.mlp.switch_mlp.up_proj.biases": "model-00001-of-00002.safetensors",
"model.layers.1.mlp.switch_mlp.up_proj.scales": "model-00001-of-00002.safetensors",
"model.layers.1.mlp.switch_mlp.up_proj.weight": "model-00001-of-00002.safetensors",
"model.layers.1.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
"model.layers.1.self_attn.kv_a_layernorm.weight": "model-00001-of-00002.safetensors",
"model.layers.1.self_attn.kv_a_proj_with_mqa.biases": "model-00001-of-00002.safetensors",
"model.layers.1.self_attn.kv_a_proj_with_mqa.scales": "model-00001-of-00002.safetensors",
"model.layers.1.self_attn.kv_a_proj_with_mqa.weight": "model-00001-of-00002.safetensors",
"model.layers.1.self_attn.kv_b_proj.biases": "model-00001-of-00002.safetensors",
"model.layers.1.self_attn.kv_b_proj.scales": "model-00001-of-00002.safetensors",
"model.layers.1.self_attn.kv_b_proj.weight": "model-00001-of-00002.safetensors",
"model.layers.1.self_attn.o_proj.biases": "model-00001-of-00002.safetensors",
"model.layers.1.self_attn.o_proj.scales": "model-00001-of-00002.safetensors",
"model.layers.1.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
"model.layers.1.self_attn.q_proj.biases": "model-00001-of-00002.safetensors",
"model.layers.1.self_attn.q_proj.scales": "model-00001-of-00002.safetensors",
"model.layers.1.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
"model.layers.10.input_layernorm.weight": "model-00001-of-00002.safetensors",
"model.layers.10.mlp.gate.weight": "model-00001-of-00002.safetensors",
"model.layers.10.mlp.shared_experts.down_proj.biases": "model-00001-of-00002.safetensors",
"model.layers.10.mlp.shared_experts.down_proj.scales": "model-00001-of-00002.safetensors",
"model.layers.10.mlp.shared_experts.down_proj.weight": "model-00001-of-00002.safetensors",
"model.layers.10.mlp.shared_experts.gate_proj.biases": "model-00001-of-00002.safetensors",
"model.layers.10.mlp.shared_experts.gate_proj.scales": "model-00001-of-00002.safetensors",
"model.layers.10.mlp.shared_experts.gate_proj.weight": "model-00001-of-00002.safetensors",
"model.layers.10.mlp.shared_experts.up_proj.biases": "model-00001-of-00002.safetensors",
"model.layers.10.mlp.shared_experts.up_proj.scales": "model-00001-of-00002.safetensors",
"model.layers.10.mlp.shared_experts.up_proj.weight": "model-00001-of-00002.safetensors",
"model.layers.10.mlp.switch_mlp.down_proj.biases": "model-00001-of-00002.safetensors",
"model.layers.10.mlp.switch_mlp.down_proj.scales": "model-00001-of-00002.safetensors",
"model.layers.10.mlp.switch_mlp.down_proj.weight": "model-00001-of-00002.safetensors",
"model.layers.10.mlp.switch_mlp.gate_proj.biases": "model-00001-of-00002.safetensors",
"model.layers.10.mlp.switch_mlp.gate_proj.scales": "model-00001-of-00002.safetensors",
"model.layers.10.mlp.switch_mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
"model.layers.10.mlp.switch_mlp.up_proj.biases": "model-00001-of-00002.safetensors",
"model.layers.10.mlp.switch_mlp.up_proj.scales": "model-00001-of-00002.safetensors",
"model.layers.10.mlp.switch_mlp.up_proj.weight": "model-00001-of-00002.safetensors",
"model.layers.10.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
"model.layers.10.self_attn.kv_a_layernorm.weight": "model-00001-of-00002.safetensors",
"model.layers.10.self_attn.kv_a_proj_with_mqa.biases": "model-00001-of-00002.safetensors",
"model.layers.10.self_attn.kv_a_proj_with_mqa.scales": "model-00001-of-00002.safetensors",
"model.layers.10.self_attn.kv_a_proj_with_mqa.weight": "model-00001-of-00002.safetensors",
"model.layers.10.self_attn.kv_b_proj.biases": "model-00001-of-00002.safetensors",
"model.layers.10.self_attn.kv_b_proj.scales": "model-00001-of-00002.safetensors",
"model.layers.10.self_attn.kv_b_proj.weight": "model-00001-of-00002.safetensors",
"model.layers.10.self_attn.o_proj.biases": "model-00001-of-00002.safetensors",
"model.layers.10.self_attn.o_proj.scales": "model-00001-of-00002.safetensors",
"model.layers.10.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
"model.layers.10.self_attn.q_proj.biases": "model-00001-of-00002.safetensors",
"model.layers.10.self_attn.q_proj.scales": "model-00001-of-00002.safetensors",
"model.layers.10.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
"model.layers.11.input_layernorm.weight": "model-00001-of-00002.safetensors",
"model.layers.11.mlp.gate.weight": "model-00001-of-00002.safetensors",
"model.layers.11.mlp.shared_experts.down_proj.biases": "model-00001-of-00002.safetensors",
"model.layers.11.mlp.shared_experts.down_proj.scales": "model-00001-of-00002.safetensors",
"model.layers.11.mlp.shared_experts.down_proj.weight": "model-00001-of-00002.safetensors",
"model.layers.11.mlp.shared_experts.gate_proj.biases": "model-00001-of-00002.safetensors",
"model.layers.11.mlp.shared_experts.gate_proj.scales": "model-00001-of-00002.safetensors",
"model.layers.11.mlp.shared_experts.gate_proj.weight": "model-00001-of-00002.safetensors",
"model.layers.11.mlp.shared_experts.up_proj.biases": "model-00001-of-00002.safetensors",
"model.layers.11.mlp.shared_experts.up_proj.scales": "model-00001-of-00002.safetensors",
"model.layers.11.mlp.shared_experts.up_proj.weight": "model-00001-of-00002.safetensors",
"model.layers.11.mlp.switch_mlp.down_proj.biases": "model-00001-of-00002.safetensors",
"model.layers.11.mlp.switch_mlp.down_proj.scales": "model-00001-of-00002.safetensors",
"model.layers.11.mlp.switch_mlp.down_proj.weight": "model-00001-of-00002.safetensors",
"model.layers.11.mlp.switch_mlp.gate_proj.biases": "model-00001-of-00002.safetensors",
"model.layers.11.mlp.switch_mlp.gate_proj.scales": "model-00001-of-00002.safetensors",
"model.layers.11.mlp.switch_mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
"model.layers.11.mlp.switch_mlp.up_proj.biases": "model-00001-of-00002.safetensors",
"model.layers.11.mlp.switch_mlp.up_proj.scales": "model-00001-of-00002.safetensors",
"model.layers.11.mlp.switch_mlp.up_proj.weight": "model-00001-of-00002.safetensors",
"model.layers.11.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
"model.layers.11.self_attn.kv_a_layernorm.weight": "model-00001-of-00002.safetensors",
"model.layers.11.self_attn.kv_a_proj_with_mqa.biases": "model-00001-of-00002.safetensors",
"model.layers.11.self_attn.kv_a_proj_with_mqa.scales": "model-00001-of-00002.safetensors",
"model.layers.11.self_attn.kv_a_proj_with_mqa.weight": "model-00001-of-00002.safetensors",
"model.layers.11.self_attn.kv_b_proj.biases": "model-00001-of-00002.safetensors",
"model.layers.11.self_attn.kv_b_proj.scales": "model-00001-of-00002.safetensors",
"model.layers.11.self_attn.kv_b_proj.weight": "model-00001-of-00002.safetensors",
"model.layers.11.self_attn.o_proj.biases": "model-00001-of-00002.safetensors",
"model.layers.11.self_attn.o_proj.scales": "model-00001-of-00002.safetensors",
"model.layers.11.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
"model.layers.11.self_attn.q_proj.biases": "model-00001-of-00002.safetensors",
"model.layers.11.self_attn.q_proj.scales": "model-00001-of-00002.safetensors",
"model.layers.11.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
"model.layers.12.input_layernorm.weight": "model-00001-of-00002.safetensors",
"model.layers.12.mlp.gate.weight": "model-00001-of-00002.safetensors",
"model.layers.12.mlp.shared_experts.down_proj.biases": "model-00001-of-00002.safetensors",
"model.layers.12.mlp.shared_experts.down_proj.scales": "model-00001-of-00002.safetensors",
"model.layers.12.mlp.shared_experts.down_proj.weight": "model-00001-of-00002.safetensors",
"model.layers.12.mlp.shared_experts.gate_proj.biases": "model-00001-of-00002.safetensors",
"model.layers.12.mlp.shared_experts.gate_proj.scales": "model-00001-of-00002.safetensors",
"model.layers.12.mlp.shared_experts.gate_proj.weight": "model-00001-of-00002.safetensors",
"model.layers.12.mlp.shared_experts.up_proj.biases": "model-00001-of-00002.safetensors",
"model.layers.12.mlp.shared_experts.up_proj.scales": "model-00001-of-00002.safetensors",
"model.layers.12.mlp.shared_experts.up_proj.weight": "model-00001-of-00002.safetensors",
"model.layers.12.mlp.switch_mlp.down_proj.biases": "model-00001-of-00002.safetensors",
"model.layers.12.mlp.switch_mlp.down_proj.scales": "model-00001-of-00002.safetensors",
"model.layers.12.mlp.switch_mlp.down_proj.weight": "model-00001-of-00002.safetensors",
"model.layers.12.mlp.switch_mlp.gate_proj.biases": "model-00001-of-00002.safetensors",
"model.layers.12.mlp.switch_mlp.gate_proj.scales": "model-00001-of-00002.safetensors",
"model.layers.12.mlp.switch_mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
"model.layers.12.mlp.switch_mlp.up_proj.biases": "model-00001-of-00002.safetensors",
"model.layers.12.mlp.switch_mlp.up_proj.scales": "model-00001-of-00002.safetensors",
"model.layers.12.mlp.switch_mlp.up_proj.weight": "model-00001-of-00002.safetensors",
"model.layers.12.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
"model.layers.12.self_attn.kv_a_layernorm.weight": "model-00001-of-00002.safetensors",
"model.layers.12.self_attn.kv_a_proj_with_mqa.biases": "model-00001-of-00002.safetensors",
"model.layers.12.self_attn.kv_a_proj_with_mqa.scales": "model-00001-of-00002.safetensors",
"model.layers.12.self_attn.kv_a_proj_with_mqa.weight": "model-00001-of-00002.safetensors",
"model.layers.12.self_attn.kv_b_proj.biases": "model-00001-of-00002.safetensors",
"model.layers.12.self_attn.kv_b_proj.scales": "model-00001-of-00002.safetensors",
"model.layers.12.self_attn.kv_b_proj.weight": "model-00001-of-00002.safetensors",
"model.layers.12.self_attn.o_proj.biases": "model-00001-of-00002.safetensors",
"model.layers.12.self_attn.o_proj.scales": "model-00001-of-00002.safetensors",
"model.layers.12.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
"model.layers.12.self_attn.q_proj.biases": "model-00001-of-00002.safetensors",
"model.layers.12.self_attn.q_proj.scales": "model-00001-of-00002.safetensors",
"model.layers.12.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
"model.layers.13.input_layernorm.weight": "model-00001-of-00002.safetensors",
"model.layers.13.mlp.gate.weight": "model-00001-of-00002.safetensors",
"model.layers.13.mlp.shared_experts.down_proj.biases": "model-00001-of-00002.safetensors",
"model.layers.13.mlp.shared_experts.down_proj.scales": "model-00001-of-00002.safetensors",
"model.layers.13.mlp.shared_experts.down_proj.weight": "model-00001-of-00002.safetensors",
"model.layers.13.mlp.shared_experts.gate_proj.biases": "model-00001-of-00002.safetensors",
"model.layers.13.mlp.shared_experts.gate_proj.scales": "model-00001-of-00002.safetensors",
"model.layers.13.mlp.shared_experts.gate_proj.weight": "model-00001-of-00002.safetensors",
"model.layers.13.mlp.shared_experts.up_proj.biases": "model-00001-of-00002.safetensors",
"model.layers.13.mlp.shared_experts.up_proj.scales": "model-00001-of-00002.safetensors",
"model.layers.13.mlp.shared_experts.up_proj.weight": "model-00001-of-00002.safetensors",
"model.layers.13.mlp.switch_mlp.down_proj.biases": "model-00001-of-00002.safetensors",
"model.layers.13.mlp.switch_mlp.down_proj.scales": "model-00001-of-00002.safetensors",
"model.layers.13.mlp.switch_mlp.down_proj.weight": "model-00001-of-00002.safetensors",
"model.layers.13.mlp.switch_mlp.gate_proj.biases": "model-00001-of-00002.safetensors",
"model.layers.13.mlp.switch_mlp.gate_proj.scales": "model-00001-of-00002.safetensors",
"model.layers.13.mlp.switch_mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
"model.layers.13.mlp.switch_mlp.up_proj.biases": "model-00001-of-00002.safetensors",
"model.layers.13.mlp.switch_mlp.up_proj.scales": "model-00001-of-00002.safetensors",
"model.layers.13.mlp.switch_mlp.up_proj.weight": "model-00001-of-00002.safetensors",
"model.layers.13.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
"model.layers.13.self_attn.kv_a_layernorm.weight": "model-00001-of-00002.safetensors",
"model.layers.13.self_attn.kv_a_proj_with_mqa.biases": "model-00001-of-00002.safetensors",
"model.layers.13.self_attn.kv_a_proj_with_mqa.scales": "model-00001-of-00002.safetensors",
"model.layers.13.self_attn.kv_a_proj_with_mqa.weight": "model-00001-of-00002.safetensors",
"model.layers.13.self_attn.kv_b_proj.biases": "model-00001-of-00002.safetensors",
"model.layers.13.self_attn.kv_b_proj.scales": "model-00001-of-00002.safetensors",
"model.layers.13.self_attn.kv_b_proj.weight": "model-00001-of-00002.safetensors",
"model.layers.13.self_attn.o_proj.biases": "model-00001-of-00002.safetensors",
"model.layers.13.self_attn.o_proj.scales": "model-00001-of-00002.safetensors",
"model.layers.13.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
"model.layers.13.self_attn.q_proj.biases": "model-00001-of-00002.safetensors",
"model.layers.13.self_attn.q_proj.scales": "model-00001-of-00002.safetensors",
"model.layers.13.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
"model.layers.14.input_layernorm.weight": "model-00001-of-00002.safetensors",
"model.layers.14.mlp.gate.weight": "model-00001-of-00002.safetensors",
"model.layers.14.mlp.shared_experts.down_proj.biases": "model-00001-of-00002.safetensors",
"model.layers.14.mlp.shared_experts.down_proj.scales": "model-00001-of-00002.safetensors",
"model.layers.14.mlp.shared_experts.down_proj.weight": "model-00001-of-00002.safetensors",
"model.layers.14.mlp.shared_experts.gate_proj.biases": "model-00001-of-00002.safetensors",
"model.layers.14.mlp.shared_experts.gate_proj.scales": "model-00001-of-00002.safetensors",
"model.layers.14.mlp.shared_experts.gate_proj.weight": "model-00001-of-00002.safetensors",
"model.layers.14.mlp.shared_experts.up_proj.biases": "model-00001-of-00002.safetensors",
"model.layers.14.mlp.shared_experts.up_proj.scales": "model-00001-of-00002.safetensors",
"model.layers.14.mlp.shared_experts.up_proj.weight": "model-00001-of-00002.safetensors",
"model.layers.14.mlp.switch_mlp.down_proj.biases": "model-00001-of-00002.safetensors",
"model.layers.14.mlp.switch_mlp.down_proj.scales": "model-00001-of-00002.safetensors",
"model.layers.14.mlp.switch_mlp.down_proj.weight": "model-00001-of-00002.safetensors",
"model.layers.14.mlp.switch_mlp.gate_proj.biases": "model-00001-of-00002.safetensors",
"model.layers.14.mlp.switch_mlp.gate_proj.scales": "model-00001-of-00002.safetensors",
"model.layers.14.mlp.switch_mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
"model.layers.14.mlp.switch_mlp.up_proj.biases": "model-00001-of-00002.safetensors",
"model.layers.14.mlp.switch_mlp.up_proj.scales": "model-00001-of-00002.safetensors",
"model.layers.14.mlp.switch_mlp.up_proj.weight": "model-00001-of-00002.safetensors",
"model.layers.14.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
"model.layers.14.self_attn.kv_a_layernorm.weight": "model-00001-of-00002.safetensors",
"model.layers.14.self_attn.kv_a_proj_with_mqa.biases": "model-00001-of-00002.safetensors",
"model.layers.14.self_attn.kv_a_proj_with_mqa.scales": "model-00001-of-00002.safetensors",
"model.layers.14.self_attn.kv_a_proj_with_mqa.weight": "model-00001-of-00002.safetensors",
"model.layers.14.self_attn.kv_b_proj.biases": "model-00001-of-00002.safetensors",
"model.layers.14.self_attn.kv_b_proj.scales": "model-00001-of-00002.safetensors",
"model.layers.14.self_attn.kv_b_proj.weight": "model-00001-of-00002.safetensors",
"model.layers.14.self_attn.o_proj.biases": "model-00001-of-00002.safetensors",
"model.layers.14.self_attn.o_proj.scales": "model-00001-of-00002.safetensors",
"model.layers.14.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
"model.layers.14.self_attn.q_proj.biases": "model-00001-of-00002.safetensors",
"model.layers.14.self_attn.q_proj.scales": "model-00001-of-00002.safetensors",
"model.layers.14.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
"model.layers.15.input_layernorm.weight": "model-00001-of-00002.safetensors",
"model.layers.15.mlp.gate.weight": "model-00001-of-00002.safetensors",
"model.layers.15.mlp.shared_experts.down_proj.biases": "model-00001-of-00002.safetensors",
"model.layers.15.mlp.shared_experts.down_proj.scales": "model-00001-of-00002.safetensors",
"model.layers.15.mlp.shared_experts.down_proj.weight": "model-00001-of-00002.safetensors",
"model.layers.15.mlp.shared_experts.gate_proj.biases": "model-00001-of-00002.safetensors",
"model.layers.15.mlp.shared_experts.gate_proj.scales": "model-00001-of-00002.safetensors",
"model.layers.15.mlp.shared_experts.gate_proj.weight": "model-00001-of-00002.safetensors",
"model.layers.15.mlp.shared_experts.up_proj.biases": "model-00001-of-00002.safetensors",
"model.layers.15.mlp.shared_experts.up_proj.scales": "model-00001-of-00002.safetensors",
"model.layers.15.mlp.shared_experts.up_proj.weight": "model-00001-of-00002.safetensors",
"model.layers.15.mlp.switch_mlp.down_proj.biases": "model-00001-of-00002.safetensors",
"model.layers.15.mlp.switch_mlp.down_proj.scales": "model-00001-of-00002.safetensors",
"model.layers.15.mlp.switch_mlp.down_proj.weight": "model-00001-of-00002.safetensors",
"model.layers.15.mlp.switch_mlp.gate_proj.biases": "model-00001-of-00002.safetensors",
"model.layers.15.mlp.switch_mlp.gate_proj.scales": "model-00001-of-00002.safetensors",
"model.layers.15.mlp.switch_mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
"model.layers.15.mlp.switch_mlp.up_proj.biases": "model-00001-of-00002.safetensors",
"model.layers.15.mlp.switch_mlp.up_proj.scales": "model-00001-of-00002.safetensors",
"model.layers.15.mlp.switch_mlp.up_proj.weight": "model-00001-of-00002.safetensors",
"model.layers.15.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
"model.layers.15.self_attn.kv_a_layernorm.weight": "model-00001-of-00002.safetensors",
"model.layers.15.self_attn.kv_a_proj_with_mqa.biases": "model-00001-of-00002.safetensors",
"model.layers.15.self_attn.kv_a_proj_with_mqa.scales": "model-00001-of-00002.safetensors",
"model.layers.15.self_attn.kv_a_proj_with_mqa.weight": "model-00001-of-00002.safetensors",
"model.layers.15.self_attn.kv_b_proj.biases": "model-00001-of-00002.safetensors",
"model.layers.15.self_attn.kv_b_proj.scales": "model-00001-of-00002.safetensors",
"model.layers.15.self_attn.kv_b_proj.weight": "model-00001-of-00002.safetensors",
"model.layers.15.self_attn.o_proj.biases": "model-00001-of-00002.safetensors",
"model.layers.15.self_attn.o_proj.scales": "model-00001-of-00002.safetensors",
"model.layers.15.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
"model.layers.15.self_attn.q_proj.biases": "model-00001-of-00002.safetensors",
"model.layers.15.self_attn.q_proj.scales": "model-00001-of-00002.safetensors",
"model.layers.15.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
"model.layers.16.input_layernorm.weight": "model-00002-of-00002.safetensors",
"model.layers.16.mlp.gate.weight": "model-00002-of-00002.safetensors",
"model.layers.16.mlp.shared_experts.down_proj.biases": "model-00002-of-00002.safetensors",
"model.layers.16.mlp.shared_experts.down_proj.scales": "model-00002-of-00002.safetensors",
"model.layers.16.mlp.shared_experts.down_proj.weight": "model-00002-of-00002.safetensors",
"model.layers.16.mlp.shared_experts.gate_proj.biases": "model-00002-of-00002.safetensors",
"model.layers.16.mlp.shared_experts.gate_proj.scales": "model-00002-of-00002.safetensors",
"model.layers.16.mlp.shared_experts.gate_proj.weight": "model-00002-of-00002.safetensors",
"model.layers.16.mlp.shared_experts.up_proj.biases": "model-00002-of-00002.safetensors",
"model.layers.16.mlp.shared_experts.up_proj.scales": "model-00002-of-00002.safetensors",
"model.layers.16.mlp.shared_experts.up_proj.weight": "model-00002-of-00002.safetensors",
"model.layers.16.mlp.switch_mlp.down_proj.biases": "model-00002-of-00002.safetensors",
"model.layers.16.mlp.switch_mlp.down_proj.scales": "model-00002-of-00002.safetensors",
"model.layers.16.mlp.switch_mlp.down_proj.weight": "model-00002-of-00002.safetensors",
"model.layers.16.mlp.switch_mlp.gate_proj.biases": "model-00001-of-00002.safetensors",
"model.layers.16.mlp.switch_mlp.gate_proj.scales": "model-00001-of-00002.safetensors",
"model.layers.16.mlp.switch_mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
"model.layers.16.mlp.switch_mlp.up_proj.biases": "model-00001-of-00002.safetensors",
"model.layers.16.mlp.switch_mlp.up_proj.scales": "model-00001-of-00002.safetensors",
"model.layers.16.mlp.switch_mlp.up_proj.weight": "model-00001-of-00002.safetensors",
"model.layers.16.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
"model.layers.16.self_attn.kv_a_layernorm.weight": "model-00001-of-00002.safetensors",
"model.layers.16.self_attn.kv_a_proj_with_mqa.biases": "model-00001-of-00002.safetensors",
"model.layers.16.self_attn.kv_a_proj_with_mqa.scales": "model-00001-of-00002.safetensors",
"model.layers.16.self_attn.kv_a_proj_with_mqa.weight": "model-00001-of-00002.safetensors",
"model.layers.16.self_attn.kv_b_proj.biases": "model-00001-of-00002.safetensors",
"model.layers.16.self_attn.kv_b_proj.scales": "model-00001-of-00002.safetensors",
"model.layers.16.self_attn.kv_b_proj.weight": "model-00001-of-00002.safetensors",
"model.layers.16.self_attn.o_proj.biases": "model-00001-of-00002.safetensors",
"model.layers.16.self_attn.o_proj.scales": "model-00001-of-00002.safetensors",
"model.layers.16.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
"model.layers.16.self_attn.q_proj.biases": "model-00001-of-00002.safetensors",
"model.layers.16.self_attn.q_proj.scales": "model-00001-of-00002.safetensors",
"model.layers.16.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
"model.layers.17.input_layernorm.weight": "model-00002-of-00002.safetensors",
"model.layers.17.mlp.gate.weight": "model-00002-of-00002.safetensors",
"model.layers.17.mlp.shared_experts.down_proj.biases": "model-00002-of-00002.safetensors",
"model.layers.17.mlp.shared_experts.down_proj.scales": "model-00002-of-00002.safetensors",
"model.layers.17.mlp.shared_experts.down_proj.weight": "model-00002-of-00002.safetensors",
"model.layers.17.mlp.shared_experts.gate_proj.biases": "model-00002-of-00002.safetensors",
"model.layers.17.mlp.shared_experts.gate_proj.scales": "model-00002-of-00002.safetensors",
"model.layers.17.mlp.shared_experts.gate_proj.weight": "model-00002-of-00002.safetensors",
"model.layers.17.mlp.shared_experts.up_proj.biases": "model-00002-of-00002.safetensors",
"model.layers.17.mlp.shared_experts.up_proj.scales": "model-00002-of-00002.safetensors",
"model.layers.17.mlp.shared_experts.up_proj.weight": "model-00002-of-00002.safetensors",
"model.layers.17.mlp.switch_mlp.down_proj.biases": "model-00002-of-00002.safetensors",
"model.layers.17.mlp.switch_mlp.down_proj.scales": "model-00002-of-00002.safetensors",
"model.layers.17.mlp.switch_mlp.down_proj.weight": "model-00002-of-00002.safetensors",
"model.layers.17.mlp.switch_mlp.gate_proj.biases": "model-00002-of-00002.safetensors",
"model.layers.17.mlp.switch_mlp.gate_proj.scales": "model-00002-of-00002.safetensors",
"model.layers.17.mlp.switch_mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
"model.layers.17.mlp.switch_mlp.up_proj.biases": "model-00002-of-00002.safetensors",
"model.layers.17.mlp.switch_mlp.up_proj.scales": "model-00002-of-00002.safetensors",
"model.layers.17.mlp.switch_mlp.up_proj.weight": "model-00002-of-00002.safetensors",
"model.layers.17.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
"model.layers.17.self_attn.kv_a_layernorm.weight": "model-00002-of-00002.safetensors",
"model.layers.17.self_attn.kv_a_proj_with_mqa.biases": "model-00002-of-00002.safetensors",
"model.layers.17.self_attn.kv_a_proj_with_mqa.scales": "model-00002-of-00002.safetensors",
"model.layers.17.self_attn.kv_a_proj_with_mqa.weight": "model-00002-of-00002.safetensors",
"model.layers.17.self_attn.kv_b_proj.biases": "model-00002-of-00002.safetensors",
"model.layers.17.self_attn.kv_b_proj.scales": "model-00002-of-00002.safetensors",
"model.layers.17.self_attn.kv_b_proj.weight": "model-00002-of-00002.safetensors",
"model.layers.17.self_attn.o_proj.biases": "model-00002-of-00002.safetensors",
"model.layers.17.self_attn.o_proj.scales": "model-00002-of-00002.safetensors",
"model.layers.17.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
"model.layers.17.self_attn.q_proj.biases": "model-00002-of-00002.safetensors",
"model.layers.17.self_attn.q_proj.scales": "model-00002-of-00002.safetensors",
"model.layers.17.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
"model.layers.18.input_layernorm.weight": "model-00002-of-00002.safetensors",
"model.layers.18.mlp.gate.weight": "model-00002-of-00002.safetensors",
"model.layers.18.mlp.shared_experts.down_proj.biases": "model-00002-of-00002.safetensors",
"model.layers.18.mlp.shared_experts.down_proj.scales": "model-00002-of-00002.safetensors",
"model.layers.18.mlp.shared_experts.down_proj.weight": "model-00002-of-00002.safetensors",
"model.layers.18.mlp.shared_experts.gate_proj.biases": "model-00002-of-00002.safetensors",
"model.layers.18.mlp.shared_experts.gate_proj.scales": "model-00002-of-00002.safetensors",
"model.layers.18.mlp.shared_experts.gate_proj.weight": "model-00002-of-00002.safetensors",
"model.layers.18.mlp.shared_experts.up_proj.biases": "model-00002-of-00002.safetensors",
"model.layers.18.mlp.shared_experts.up_proj.scales": "model-00002-of-00002.safetensors",
"model.layers.18.mlp.shared_experts.up_proj.weight": "model-00002-of-00002.safetensors",
"model.layers.18.mlp.switch_mlp.down_proj.biases": "model-00002-of-00002.safetensors",
"model.layers.18.mlp.switch_mlp.down_proj.scales": "model-00002-of-00002.safetensors",
"model.layers.18.mlp.switch_mlp.down_proj.weight": "model-00002-of-00002.safetensors",
"model.layers.18.mlp.switch_mlp.gate_proj.biases": "model-00002-of-00002.safetensors",
"model.layers.18.mlp.switch_mlp.gate_proj.scales": "model-00002-of-00002.safetensors",
"model.layers.18.mlp.switch_mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
"model.layers.18.mlp.switch_mlp.up_proj.biases": "model-00002-of-00002.safetensors",
"model.layers.18.mlp.switch_mlp.up_proj.scales": "model-00002-of-00002.safetensors",
"model.layers.18.mlp.switch_mlp.up_proj.weight": "model-00002-of-00002.safetensors",
"model.layers.18.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
"model.layers.18.self_attn.kv_a_layernorm.weight": "model-00002-of-00002.safetensors",
"model.layers.18.self_attn.kv_a_proj_with_mqa.biases": "model-00002-of-00002.safetensors",
"model.layers.18.self_attn.kv_a_proj_with_mqa.scales": "model-00002-of-00002.safetensors",
"model.layers.18.self_attn.kv_a_proj_with_mqa.weight": "model-00002-of-00002.safetensors",
"model.layers.18.self_attn.kv_b_proj.biases": "model-00002-of-00002.safetensors",
"model.layers.18.self_attn.kv_b_proj.scales": "model-00002-of-00002.safetensors",
"model.layers.18.self_attn.kv_b_proj.weight": "model-00002-of-00002.safetensors",
"model.layers.18.self_attn.o_proj.biases": "model-00002-of-00002.safetensors",
"model.layers.18.self_attn.o_proj.scales": "model-00002-of-00002.safetensors",
"model.layers.18.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
"model.layers.18.self_attn.q_proj.biases": "model-00002-of-00002.safetensors",
"model.layers.18.self_attn.q_proj.scales": "model-00002-of-00002.safetensors",
"model.layers.18.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
"model.layers.19.input_layernorm.weight": "model-00002-of-00002.safetensors",
"model.layers.19.mlp.gate.weight": "model-00002-of-00002.safetensors",
"model.layers.19.mlp.shared_experts.down_proj.biases": "model-00002-of-00002.safetensors",
"model.layers.19.mlp.shared_experts.down_proj.scales": "model-00002-of-00002.safetensors",
"model.layers.19.mlp.shared_experts.down_proj.weight": "model-00002-of-00002.safetensors",
"model.layers.19.mlp.shared_experts.gate_proj.biases": "model-00002-of-00002.safetensors",
"model.layers.19.mlp.shared_experts.gate_proj.scales": "model-00002-of-00002.safetensors",
"model.layers.19.mlp.shared_experts.gate_proj.weight": "model-00002-of-00002.safetensors",
"model.layers.19.mlp.shared_experts.up_proj.biases": "model-00002-of-00002.safetensors",
"model.layers.19.mlp.shared_experts.up_proj.scales": "model-00002-of-00002.safetensors",
"model.layers.19.mlp.shared_experts.up_proj.weight": "model-00002-of-00002.safetensors",
"model.layers.19.mlp.switch_mlp.down_proj.biases": "model-00002-of-00002.safetensors",
"model.layers.19.mlp.switch_mlp.down_proj.scales": "model-00002-of-00002.safetensors",
"model.layers.19.mlp.switch_mlp.down_proj.weight": "model-00002-of-00002.safetensors",
"model.layers.19.mlp.switch_mlp.gate_proj.biases": "model-00002-of-00002.safetensors",
"model.layers.19.mlp.switch_mlp.gate_proj.scales": "model-00002-of-00002.safetensors",
"model.layers.19.mlp.switch_mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
"model.layers.19.mlp.switch_mlp.up_proj.biases": "model-00002-of-00002.safetensors",
"model.layers.19.mlp.switch_mlp.up_proj.scales": "model-00002-of-00002.safetensors",
"model.layers.19.mlp.switch_mlp.up_proj.weight": "model-00002-of-00002.safetensors",
"model.layers.19.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
"model.layers.19.self_attn.kv_a_layernorm.weight": "model-00002-of-00002.safetensors",
"model.layers.19.self_attn.kv_a_proj_with_mqa.biases": "model-00002-of-00002.safetensors",
"model.layers.19.self_attn.kv_a_proj_with_mqa.scales": "model-00002-of-00002.safetensors",
"model.layers.19.self_attn.kv_a_proj_with_mqa.weight": "model-00002-of-00002.safetensors",
"model.layers.19.self_attn.kv_b_proj.biases": "model-00002-of-00002.safetensors",
"model.layers.19.self_attn.kv_b_proj.scales": "model-00002-of-00002.safetensors",
"model.layers.19.self_attn.kv_b_proj.weight": "model-00002-of-00002.safetensors",
"model.layers.19.self_attn.o_proj.biases": "model-00002-of-00002.safetensors",
"model.layers.19.self_attn.o_proj.scales": "model-00002-of-00002.safetensors",
"model.layers.19.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
"model.layers.19.self_attn.q_proj.biases": "model-00002-of-00002.safetensors",
"model.layers.19.self_attn.q_proj.scales": "model-00002-of-00002.safetensors",
"model.layers.19.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
"model.layers.2.input_layernorm.weight": "model-00001-of-00002.safetensors",
"model.layers.2.mlp.gate.weight": "model-00001-of-00002.safetensors",
"model.layers.2.mlp.shared_experts.down_proj.biases": "model-00001-of-00002.safetensors",
"model.layers.2.mlp.shared_experts.down_proj.scales": "model-00001-of-00002.safetensors",
"model.layers.2.mlp.shared_experts.down_proj.weight": "model-00001-of-00002.safetensors",
"model.layers.2.mlp.shared_experts.gate_proj.biases": "model-00001-of-00002.safetensors",
"model.layers.2.mlp.shared_experts.gate_proj.scales": "model-00001-of-00002.safetensors",
"model.layers.2.mlp.shared_experts.gate_proj.weight": "model-00001-of-00002.safetensors",
"model.layers.2.mlp.shared_experts.up_proj.biases": "model-00001-of-00002.safetensors",
"model.layers.2.mlp.shared_experts.up_proj.scales": "model-00001-of-00002.safetensors",
"model.layers.2.mlp.shared_experts.up_proj.weight": "model-00001-of-00002.safetensors",
"model.layers.2.mlp.switch_mlp.down_proj.biases": "model-00001-of-00002.safetensors",
"model.layers.2.mlp.switch_mlp.down_proj.scales": "model-00001-of-00002.safetensors",
"model.layers.2.mlp.switch_mlp.down_proj.weight": "model-00001-of-00002.safetensors",
"model.layers.2.mlp.switch_mlp.gate_proj.biases": "model-00001-of-00002.safetensors",
"model.layers.2.mlp.switch_mlp.gate_proj.scales": "model-00001-of-00002.safetensors",
"model.layers.2.mlp.switch_mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
"model.layers.2.mlp.switch_mlp.up_proj.biases": "model-00001-of-00002.safetensors",
"model.layers.2.mlp.switch_mlp.up_proj.scales": "model-00001-of-00002.safetensors",
"model.layers.2.mlp.switch_mlp.up_proj.weight": "model-00001-of-00002.safetensors",
"model.layers.2.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
"model.layers.2.self_attn.kv_a_layernorm.weight": "model-00001-of-00002.safetensors",
"model.layers.2.self_attn.kv_a_proj_with_mqa.biases": "model-00001-of-00002.safetensors",
"model.layers.2.self_attn.kv_a_proj_with_mqa.scales": "model-00001-of-00002.safetensors",
"model.layers.2.self_attn.kv_a_proj_with_mqa.weight": "model-00001-of-00002.safetensors",
"model.layers.2.self_attn.kv_b_proj.biases": "model-00001-of-00002.safetensors",
"model.layers.2.self_attn.kv_b_proj.scales": "model-00001-of-00002.safetensors",
"model.layers.2.self_attn.kv_b_proj.weight": "model-00001-of-00002.safetensors",
"model.layers.2.self_attn.o_proj.biases": "model-00001-of-00002.safetensors",
"model.layers.2.self_attn.o_proj.scales": "model-00001-of-00002.safetensors",
"model.layers.2.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
"model.layers.2.self_attn.q_proj.biases": "model-00001-of-00002.safetensors",
"model.layers.2.self_attn.q_proj.scales": "model-00001-of-00002.safetensors",
"model.layers.2.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
"model.layers.20.input_layernorm.weight": "model-00002-of-00002.safetensors",
"model.layers.20.mlp.gate.weight": "model-00002-of-00002.safetensors",
"model.layers.20.mlp.shared_experts.down_proj.biases": "model-00002-of-00002.safetensors",
"model.layers.20.mlp.shared_experts.down_proj.scales": "model-00002-of-00002.safetensors",
"model.layers.20.mlp.shared_experts.down_proj.weight": "model-00002-of-00002.safetensors",
"model.layers.20.mlp.shared_experts.gate_proj.biases": "model-00002-of-00002.safetensors",
"model.layers.20.mlp.shared_experts.gate_proj.scales": "model-00002-of-00002.safetensors",
"model.layers.20.mlp.shared_experts.gate_proj.weight": "model-00002-of-00002.safetensors",
"model.layers.20.mlp.shared_experts.up_proj.biases": "model-00002-of-00002.safetensors",
"model.layers.20.mlp.shared_experts.up_proj.scales": "model-00002-of-00002.safetensors",
"model.layers.20.mlp.shared_experts.up_proj.weight": "model-00002-of-00002.safetensors",
"model.layers.20.mlp.switch_mlp.down_proj.biases": "model-00002-of-00002.safetensors",
"model.layers.20.mlp.switch_mlp.down_proj.scales": "model-00002-of-00002.safetensors",
"model.layers.20.mlp.switch_mlp.down_proj.weight": "model-00002-of-00002.safetensors",
"model.layers.20.mlp.switch_mlp.gate_proj.biases": "model-00002-of-00002.safetensors",
"model.layers.20.mlp.switch_mlp.gate_proj.scales": "model-00002-of-00002.safetensors",
"model.layers.20.mlp.switch_mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
"model.layers.20.mlp.switch_mlp.up_proj.biases": "model-00002-of-00002.safetensors",
"model.layers.20.mlp.switch_mlp.up_proj.scales": "model-00002-of-00002.safetensors",
"model.layers.20.mlp.switch_mlp.up_proj.weight": "model-00002-of-00002.safetensors",
"model.layers.20.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
"model.layers.20.self_attn.kv_a_layernorm.weight": "model-00002-of-00002.safetensors",
"model.layers.20.self_attn.kv_a_proj_with_mqa.biases": "model-00002-of-00002.safetensors",
"model.layers.20.self_attn.kv_a_proj_with_mqa.scales": "model-00002-of-00002.safetensors",
"model.layers.20.self_attn.kv_a_proj_with_mqa.weight": "model-00002-of-00002.safetensors",
"model.layers.20.self_attn.kv_b_proj.biases": "model-00002-of-00002.safetensors",
"model.layers.20.self_attn.kv_b_proj.scales": "model-00002-of-00002.safetensors",
"model.layers.20.self_attn.kv_b_proj.weight": "model-00002-of-00002.safetensors",
"model.layers.20.self_attn.o_proj.biases": "model-00002-of-00002.safetensors",
"model.layers.20.self_attn.o_proj.scales": "model-00002-of-00002.safetensors",
"model.layers.20.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
"model.layers.20.self_attn.q_proj.biases": "model-00002-of-00002.safetensors",
"model.layers.20.self_attn.q_proj.scales": "model-00002-of-00002.safetensors",
"model.layers.20.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
"model.layers.21.input_layernorm.weight": "model-00002-of-00002.safetensors",
"model.layers.21.mlp.gate.weight": "model-00002-of-00002.safetensors",
"model.layers.21.mlp.shared_experts.down_proj.biases": "model-00002-of-00002.safetensors",
"model.layers.21.mlp.shared_experts.down_proj.scales": "model-00002-of-00002.safetensors",
"model.layers.21.mlp.shared_experts.down_proj.weight": "model-00002-of-00002.safetensors",
"model.layers.21.mlp.shared_experts.gate_proj.biases": "model-00002-of-00002.safetensors",
"model.layers.21.mlp.shared_experts.gate_proj.scales": "model-00002-of-00002.safetensors",
"model.layers.21.mlp.shared_experts.gate_proj.weight": "model-00002-of-00002.safetensors",
"model.layers.21.mlp.shared_experts.up_proj.biases": "model-00002-of-00002.safetensors",
"model.layers.21.mlp.shared_experts.up_proj.scales": "model-00002-of-00002.safetensors",
"model.layers.21.mlp.shared_experts.up_proj.weight": "model-00002-of-00002.safetensors",
"model.layers.21.mlp.switch_mlp.down_proj.biases": "model-00002-of-00002.safetensors",
"model.layers.21.mlp.switch_mlp.down_proj.scales": "model-00002-of-00002.safetensors",
"model.layers.21.mlp.switch_mlp.down_proj.weight": "model-00002-of-00002.safetensors",
"model.layers.21.mlp.switch_mlp.gate_proj.biases": "model-00002-of-00002.safetensors",
"model.layers.21.mlp.switch_mlp.gate_proj.scales": "model-00002-of-00002.safetensors",
"model.layers.21.mlp.switch_mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
"model.layers.21.mlp.switch_mlp.up_proj.biases": "model-00002-of-00002.safetensors",
"model.layers.21.mlp.switch_mlp.up_proj.scales": "model-00002-of-00002.safetensors",
"model.layers.21.mlp.switch_mlp.up_proj.weight": "model-00002-of-00002.safetensors",
"model.layers.21.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
"model.layers.21.self_attn.kv_a_layernorm.weight": "model-00002-of-00002.safetensors",
"model.layers.21.self_attn.kv_a_proj_with_mqa.biases": "model-00002-of-00002.safetensors",
"model.layers.21.self_attn.kv_a_proj_with_mqa.scales": "model-00002-of-00002.safetensors",
"model.layers.21.self_attn.kv_a_proj_with_mqa.weight": "model-00002-of-00002.safetensors",
"model.layers.21.self_attn.kv_b_proj.biases": "model-00002-of-00002.safetensors",
"model.layers.21.self_attn.kv_b_proj.scales": "model-00002-of-00002.safetensors",
"model.layers.21.self_attn.kv_b_proj.weight": "model-00002-of-00002.safetensors",
"model.layers.21.self_attn.o_proj.biases": "model-00002-of-00002.safetensors",
"model.layers.21.self_attn.o_proj.scales": "model-00002-of-00002.safetensors",
"model.layers.21.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
"model.layers.21.self_attn.q_proj.biases": "model-00002-of-00002.safetensors",
"model.layers.21.self_attn.q_proj.scales": "model-00002-of-00002.safetensors",
"model.layers.21.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
"model.layers.22.input_layernorm.weight": "model-00002-of-00002.safetensors",
"model.layers.22.mlp.gate.weight": "model-00002-of-00002.safetensors",
"model.layers.22.mlp.shared_experts.down_proj.biases": "model-00002-of-00002.safetensors",
"model.layers.22.mlp.shared_experts.down_proj.scales": "model-00002-of-00002.safetensors",
"model.layers.22.mlp.shared_experts.down_proj.weight": "model-00002-of-00002.safetensors",
"model.layers.22.mlp.shared_experts.gate_proj.biases": "model-00002-of-00002.safetensors",
"model.layers.22.mlp.shared_experts.gate_proj.scales": "model-00002-of-00002.safetensors",
"model.layers.22.mlp.shared_experts.gate_proj.weight": "model-00002-of-00002.safetensors",
"model.layers.22.mlp.shared_experts.up_proj.biases": "model-00002-of-00002.safetensors",
"model.layers.22.mlp.shared_experts.up_proj.scales": "model-00002-of-00002.safetensors",
"model.layers.22.mlp.shared_experts.up_proj.weight": "model-00002-of-00002.safetensors",
"model.layers.22.mlp.switch_mlp.down_proj.biases": "model-00002-of-00002.safetensors",
"model.layers.22.mlp.switch_mlp.down_proj.scales": "model-00002-of-00002.safetensors",
"model.layers.22.mlp.switch_mlp.down_proj.weight": "model-00002-of-00002.safetensors",
"model.layers.22.mlp.switch_mlp.gate_proj.biases": "model-00002-of-00002.safetensors",
"model.layers.22.mlp.switch_mlp.gate_proj.scales": "model-00002-of-00002.safetensors",
"model.layers.22.mlp.switch_mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
"model.layers.22.mlp.switch_mlp.up_proj.biases": "model-00002-of-00002.safetensors",
"model.layers.22.mlp.switch_mlp.up_proj.scales": "model-00002-of-00002.safetensors",
"model.layers.22.mlp.switch_mlp.up_proj.weight": "model-00002-of-00002.safetensors",
"model.layers.22.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
"model.layers.22.self_attn.kv_a_layernorm.weight": "model-00002-of-00002.safetensors",
"model.layers.22.self_attn.kv_a_proj_with_mqa.biases": "model-00002-of-00002.safetensors",
"model.layers.22.self_attn.kv_a_proj_with_mqa.scales": "model-00002-of-00002.safetensors",
"model.layers.22.self_attn.kv_a_proj_with_mqa.weight": "model-00002-of-00002.safetensors",
"model.layers.22.self_attn.kv_b_proj.biases": "model-00002-of-00002.safetensors",
"model.layers.22.self_attn.kv_b_proj.scales": "model-00002-of-00002.safetensors",
"model.layers.22.self_attn.kv_b_proj.weight": "model-00002-of-00002.safetensors",
"model.layers.22.self_attn.o_proj.biases": "model-00002-of-00002.safetensors",
"model.layers.22.self_attn.o_proj.scales": "model-00002-of-00002.safetensors",
"model.layers.22.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
"model.layers.22.self_attn.q_proj.biases": "model-00002-of-00002.safetensors",
"model.layers.22.self_attn.q_proj.scales": "model-00002-of-00002.safetensors",
"model.layers.22.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
"model.layers.23.input_layernorm.weight": "model-00002-of-00002.safetensors",
"model.layers.23.mlp.gate.weight": "model-00002-of-00002.safetensors",
"model.layers.23.mlp.shared_experts.down_proj.biases": "model-00002-of-00002.safetensors",
"model.layers.23.mlp.shared_experts.down_proj.scales": "model-00002-of-00002.safetensors",
"model.layers.23.mlp.shared_experts.down_proj.weight": "model-00002-of-00002.safetensors",
"model.layers.23.mlp.shared_experts.gate_proj.biases": "model-00002-of-00002.safetensors",
"model.layers.23.mlp.shared_experts.gate_proj.scales": "model-00002-of-00002.safetensors",
"model.layers.23.mlp.shared_experts.gate_proj.weight": "model-00002-of-00002.safetensors",
"model.layers.23.mlp.shared_experts.up_proj.biases": "model-00002-of-00002.safetensors",
"model.layers.23.mlp.shared_experts.up_proj.scales": "model-00002-of-00002.safetensors",
"model.layers.23.mlp.shared_experts.up_proj.weight": "model-00002-of-00002.safetensors",
"model.layers.23.mlp.switch_mlp.down_proj.biases": "model-00002-of-00002.safetensors",
"model.layers.23.mlp.switch_mlp.down_proj.scales": "model-00002-of-00002.safetensors",
"model.layers.23.mlp.switch_mlp.down_proj.weight": "model-00002-of-00002.safetensors",
"model.layers.23.mlp.switch_mlp.gate_proj.biases": "model-00002-of-00002.safetensors",
"model.layers.23.mlp.switch_mlp.gate_proj.scales": "model-00002-of-00002.safetensors",
"model.layers.23.mlp.switch_mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
"model.layers.23.mlp.switch_mlp.up_proj.biases": "model-00002-of-00002.safetensors",
"model.layers.23.mlp.switch_mlp.up_proj.scales": "model-00002-of-00002.safetensors",
"model.layers.23.mlp.switch_mlp.up_proj.weight": "model-00002-of-00002.safetensors",
"model.layers.23.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
"model.layers.23.self_attn.kv_a_layernorm.weight": "model-00002-of-00002.safetensors",
"model.layers.23.self_attn.kv_a_proj_with_mqa.biases": "model-00002-of-00002.safetensors",
"model.layers.23.self_attn.kv_a_proj_with_mqa.scales": "model-00002-of-00002.safetensors",
"model.layers.23.self_attn.kv_a_proj_with_mqa.weight": "model-00002-of-00002.safetensors",
"model.layers.23.self_attn.kv_b_proj.biases": "model-00002-of-00002.safetensors",
"model.layers.23.self_attn.kv_b_proj.scales": "model-00002-of-00002.safetensors",
"model.layers.23.self_attn.kv_b_proj.weight": "model-00002-of-00002.safetensors",
"model.layers.23.self_attn.o_proj.biases": "model-00002-of-00002.safetensors",
"model.layers.23.self_attn.o_proj.scales": "model-00002-of-00002.safetensors",
"model.layers.23.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
"model.layers.23.self_attn.q_proj.biases": "model-00002-of-00002.safetensors",
"model.layers.23.self_attn.q_proj.scales": "model-00002-of-00002.safetensors",
"model.layers.23.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
"model.layers.24.input_layernorm.weight": "model-00002-of-00002.safetensors",
"model.layers.24.mlp.gate.weight": "model-00002-of-00002.safetensors",
"model.layers.24.mlp.shared_experts.down_proj.biases": "model-00002-of-00002.safetensors",
"model.layers.24.mlp.shared_experts.down_proj.scales": "model-00002-of-00002.safetensors",
"model.layers.24.mlp.shared_experts.down_proj.weight": "model-00002-of-00002.safetensors",
"model.layers.24.mlp.shared_experts.gate_proj.biases": "model-00002-of-00002.safetensors",
"model.layers.24.mlp.shared_experts.gate_proj.scales": "model-00002-of-00002.safetensors",
"model.layers.24.mlp.shared_experts.gate_proj.weight": "model-00002-of-00002.safetensors",
"model.layers.24.mlp.shared_experts.up_proj.biases": "model-00002-of-00002.safetensors",
"model.layers.24.mlp.shared_experts.up_proj.scales": "model-00002-of-00002.safetensors",
"model.layers.24.mlp.shared_experts.up_proj.weight": "model-00002-of-00002.safetensors",
"model.layers.24.mlp.switch_mlp.down_proj.biases": "model-00002-of-00002.safetensors",
"model.layers.24.mlp.switch_mlp.down_proj.scales": "model-00002-of-00002.safetensors",
"model.layers.24.mlp.switch_mlp.down_proj.weight": "model-00002-of-00002.safetensors",
"model.layers.24.mlp.switch_mlp.gate_proj.biases": "model-00002-of-00002.safetensors",
"model.layers.24.mlp.switch_mlp.gate_proj.scales": "model-00002-of-00002.safetensors",
"model.layers.24.mlp.switch_mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
"model.layers.24.mlp.switch_mlp.up_proj.biases": "model-00002-of-00002.safetensors",
"model.layers.24.mlp.switch_mlp.up_proj.scales": "model-00002-of-00002.safetensors",
"model.layers.24.mlp.switch_mlp.up_proj.weight": "model-00002-of-00002.safetensors",
"model.layers.24.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
"model.layers.24.self_attn.kv_a_layernorm.weight": "model-00002-of-00002.safetensors",
"model.layers.24.self_attn.kv_a_proj_with_mqa.biases": "model-00002-of-00002.safetensors",
"model.layers.24.self_attn.kv_a_proj_with_mqa.scales": "model-00002-of-00002.safetensors",
"model.layers.24.self_attn.kv_a_proj_with_mqa.weight": "model-00002-of-00002.safetensors",
"model.layers.24.self_attn.kv_b_proj.biases": "model-00002-of-00002.safetensors",
"model.layers.24.self_attn.kv_b_proj.scales": "model-00002-of-00002.safetensors",
"model.layers.24.self_attn.kv_b_proj.weight": "model-00002-of-00002.safetensors",
"model.layers.24.self_attn.o_proj.biases": "model-00002-of-00002.safetensors",
"model.layers.24.self_attn.o_proj.scales": "model-00002-of-00002.safetensors",
"model.layers.24.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
"model.layers.24.self_attn.q_proj.biases": "model-00002-of-00002.safetensors",
"model.layers.24.self_attn.q_proj.scales": "model-00002-of-00002.safetensors",
"model.layers.24.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
"model.layers.25.input_layernorm.weight": "model-00002-of-00002.safetensors",
"model.layers.25.mlp.gate.weight": "model-00002-of-00002.safetensors",
"model.layers.25.mlp.shared_experts.down_proj.biases": "model-00002-of-00002.safetensors",
"model.layers.25.mlp.shared_experts.down_proj.scales": "model-00002-of-00002.safetensors",
"model.layers.25.mlp.shared_experts.down_proj.weight": "model-00002-of-00002.safetensors",
"model.layers.25.mlp.shared_experts.gate_proj.biases": "model-00002-of-00002.safetensors",
"model.layers.25.mlp.shared_experts.gate_proj.scales": "model-00002-of-00002.safetensors",
"model.layers.25.mlp.shared_experts.gate_proj.weight": "model-00002-of-00002.safetensors",
"model.layers.25.mlp.shared_experts.up_proj.biases": "model-00002-of-00002.safetensors",
"model.layers.25.mlp.shared_experts.up_proj.scales": "model-00002-of-00002.safetensors",
"model.layers.25.mlp.shared_experts.up_proj.weight": "model-00002-of-00002.safetensors",
"model.layers.25.mlp.switch_mlp.down_proj.biases": "model-00002-of-00002.safetensors",
"model.layers.25.mlp.switch_mlp.down_proj.scales": "model-00002-of-00002.safetensors",
"model.layers.25.mlp.switch_mlp.down_proj.weight": "model-00002-of-00002.safetensors",
"model.layers.25.mlp.switch_mlp.gate_proj.biases": "model-00002-of-00002.safetensors",
"model.layers.25.mlp.switch_mlp.gate_proj.scales": "model-00002-of-00002.safetensors",
"model.layers.25.mlp.switch_mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
"model.layers.25.mlp.switch_mlp.up_proj.biases": "model-00002-of-00002.safetensors",
"model.layers.25.mlp.switch_mlp.up_proj.scales": "model-00002-of-00002.safetensors",
"model.layers.25.mlp.switch_mlp.up_proj.weight": "model-00002-of-00002.safetensors",
"model.layers.25.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
"model.layers.25.self_attn.kv_a_layernorm.weight": "model-00002-of-00002.safetensors",
"model.layers.25.self_attn.kv_a_proj_with_mqa.biases": "model-00002-of-00002.safetensors",
"model.layers.25.self_attn.kv_a_proj_with_mqa.scales": "model-00002-of-00002.safetensors",
"model.layers.25.self_attn.kv_a_proj_with_mqa.weight": "model-00002-of-00002.safetensors",
"model.layers.25.self_attn.kv_b_proj.biases": "model-00002-of-00002.safetensors",
"model.layers.25.self_attn.kv_b_proj.scales": "model-00002-of-00002.safetensors",
"model.layers.25.self_attn.kv_b_proj.weight": "model-00002-of-00002.safetensors",
"model.layers.25.self_attn.o_proj.biases": "model-00002-of-00002.safetensors",
"model.layers.25.self_attn.o_proj.scales": "model-00002-of-00002.safetensors",
"model.layers.25.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
"model.layers.25.self_attn.q_proj.biases": "model-00002-of-00002.safetensors",
"model.layers.25.self_attn.q_proj.scales": "model-00002-of-00002.safetensors",
"model.layers.25.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
"model.layers.26.input_layernorm.weight": "model-00002-of-00002.safetensors",
"model.layers.26.mlp.gate.weight": "model-00002-of-00002.safetensors",
"model.layers.26.mlp.shared_experts.down_proj.biases": "model-00002-of-00002.safetensors",
"model.layers.26.mlp.shared_experts.down_proj.scales": "model-00002-of-00002.safetensors",
"model.layers.26.mlp.shared_experts.down_proj.weight": "model-00002-of-00002.safetensors",
"model.layers.26.mlp.shared_experts.gate_proj.biases": "model-00002-of-00002.safetensors",
"model.layers.26.mlp.shared_experts.gate_proj.scales": "model-00002-of-00002.safetensors",
"model.layers.26.mlp.shared_experts.gate_proj.weight": "model-00002-of-00002.safetensors",
"model.layers.26.mlp.shared_experts.up_proj.biases": "model-00002-of-00002.safetensors",
"model.layers.26.mlp.shared_experts.up_proj.scales": "model-00002-of-00002.safetensors",
"model.layers.26.mlp.shared_experts.up_proj.weight": "model-00002-of-00002.safetensors",
"model.layers.26.mlp.switch_mlp.down_proj.biases": "model-00002-of-00002.safetensors",
"model.layers.26.mlp.switch_mlp.down_proj.scales": "model-00002-of-00002.safetensors",
"model.layers.26.mlp.switch_mlp.down_proj.weight": "model-00002-of-00002.safetensors",
"model.layers.26.mlp.switch_mlp.gate_proj.biases": "model-00002-of-00002.safetensors",
"model.layers.26.mlp.switch_mlp.gate_proj.scales": "model-00002-of-00002.safetensors",
"model.layers.26.mlp.switch_mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
"model.layers.26.mlp.switch_mlp.up_proj.biases": "model-00002-of-00002.safetensors",
"model.layers.26.mlp.switch_mlp.up_proj.scales": "model-00002-of-00002.safetensors",
"model.layers.26.mlp.switch_mlp.up_proj.weight": "model-00002-of-00002.safetensors",
"model.layers.26.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
"model.layers.26.self_attn.kv_a_layernorm.weight": "model-00002-of-00002.safetensors",
"model.layers.26.self_attn.kv_a_proj_with_mqa.biases": "model-00002-of-00002.safetensors",
"model.layers.26.self_attn.kv_a_proj_with_mqa.scales": "model-00002-of-00002.safetensors",
"model.layers.26.self_attn.kv_a_proj_with_mqa.weight": "model-00002-of-00002.safetensors",
"model.layers.26.self_attn.kv_b_proj.biases": "model-00002-of-00002.safetensors",
"model.layers.26.self_attn.kv_b_proj.scales": "model-00002-of-00002.safetensors",
"model.layers.26.self_attn.kv_b_proj.weight": "model-00002-of-00002.safetensors",
"model.layers.26.self_attn.o_proj.biases": "model-00002-of-00002.safetensors",
"model.layers.26.self_attn.o_proj.scales": "model-00002-of-00002.safetensors",
"model.layers.26.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
"model.layers.26.self_attn.q_proj.biases": "model-00002-of-00002.safetensors",
"model.layers.26.self_attn.q_proj.scales": "model-00002-of-00002.safetensors",
"model.layers.26.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
"model.layers.3.input_layernorm.weight": "model-00001-of-00002.safetensors",
"model.layers.3.mlp.gate.weight": "model-00001-of-00002.safetensors",
"model.layers.3.mlp.shared_experts.down_proj.biases": "model-00001-of-00002.safetensors",
"model.layers.3.mlp.shared_experts.down_proj.scales": "model-00001-of-00002.safetensors",
"model.layers.3.mlp.shared_experts.down_proj.weight": "model-00001-of-00002.safetensors",
"model.layers.3.mlp.shared_experts.gate_proj.biases": "model-00001-of-00002.safetensors",
"model.layers.3.mlp.shared_experts.gate_proj.scales": "model-00001-of-00002.safetensors",
"model.layers.3.mlp.shared_experts.gate_proj.weight": "model-00001-of-00002.safetensors",
"model.layers.3.mlp.shared_experts.up_proj.biases": "model-00001-of-00002.safetensors",
"model.layers.3.mlp.shared_experts.up_proj.scales": "model-00001-of-00002.safetensors",
"model.layers.3.mlp.shared_experts.up_proj.weight": "model-00001-of-00002.safetensors",
"model.layers.3.mlp.switch_mlp.down_proj.biases": "model-00001-of-00002.safetensors",
"model.layers.3.mlp.switch_mlp.down_proj.scales": "model-00001-of-00002.safetensors",
"model.layers.3.mlp.switch_mlp.down_proj.weight": "model-00001-of-00002.safetensors",
"model.layers.3.mlp.switch_mlp.gate_proj.biases": "model-00001-of-00002.safetensors",
"model.layers.3.mlp.switch_mlp.gate_proj.scales": "model-00001-of-00002.safetensors",
"model.layers.3.mlp.switch_mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
"model.layers.3.mlp.switch_mlp.up_proj.biases": "model-00001-of-00002.safetensors",
"model.layers.3.mlp.switch_mlp.up_proj.scales": "model-00001-of-00002.safetensors",
"model.layers.3.mlp.switch_mlp.up_proj.weight": "model-00001-of-00002.safetensors",
"model.layers.3.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
"model.layers.3.self_attn.kv_a_layernorm.weight": "model-00001-of-00002.safetensors",
"model.layers.3.self_attn.kv_a_proj_with_mqa.biases": "model-00001-of-00002.safetensors",
"model.layers.3.self_attn.kv_a_proj_with_mqa.scales": "model-00001-of-00002.safetensors",
"model.layers.3.self_attn.kv_a_proj_with_mqa.weight": "model-00001-of-00002.safetensors",
"model.layers.3.self_attn.kv_b_proj.biases": "model-00001-of-00002.safetensors",
"model.layers.3.self_attn.kv_b_proj.scales": "model-00001-of-00002.safetensors",
"model.layers.3.self_attn.kv_b_proj.weight": "model-00001-of-00002.safetensors",
"model.layers.3.self_attn.o_proj.biases": "model-00001-of-00002.safetensors",
"model.layers.3.self_attn.o_proj.scales": "model-00001-of-00002.safetensors",
"model.layers.3.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
"model.layers.3.self_attn.q_proj.biases": "model-00001-of-00002.safetensors",
"model.layers.3.self_attn.q_proj.scales": "model-00001-of-00002.safetensors",
"model.layers.3.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
"model.layers.4.input_layernorm.weight": "model-00001-of-00002.safetensors",
"model.layers.4.mlp.gate.weight": "model-00001-of-00002.safetensors",
"model.layers.4.mlp.shared_experts.down_proj.biases": "model-00001-of-00002.safetensors",
"model.layers.4.mlp.shared_experts.down_proj.scales": "model-00001-of-00002.safetensors",
"model.layers.4.mlp.shared_experts.down_proj.weight": "model-00001-of-00002.safetensors",
"model.layers.4.mlp.shared_experts.gate_proj.biases": "model-00001-of-00002.safetensors",
"model.layers.4.mlp.shared_experts.gate_proj.scales": "model-00001-of-00002.safetensors",
"model.layers.4.mlp.shared_experts.gate_proj.weight": "model-00001-of-00002.safetensors",
"model.layers.4.mlp.shared_experts.up_proj.biases": "model-00001-of-00002.safetensors",
"model.layers.4.mlp.shared_experts.up_proj.scales": "model-00001-of-00002.safetensors",
"model.layers.4.mlp.shared_experts.up_proj.weight": "model-00001-of-00002.safetensors",
"model.layers.4.mlp.switch_mlp.down_proj.biases": "model-00001-of-00002.safetensors",
"model.layers.4.mlp.switch_mlp.down_proj.scales": "model-00001-of-00002.safetensors",
"model.layers.4.mlp.switch_mlp.down_proj.weight": "model-00001-of-00002.safetensors",
"model.layers.4.mlp.switch_mlp.gate_proj.biases": "model-00001-of-00002.safetensors",
"model.layers.4.mlp.switch_mlp.gate_proj.scales": "model-00001-of-00002.safetensors",
"model.layers.4.mlp.switch_mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
"model.layers.4.mlp.switch_mlp.up_proj.biases": "model-00001-of-00002.safetensors",
"model.layers.4.mlp.switch_mlp.up_proj.scales": "model-00001-of-00002.safetensors",
"model.layers.4.mlp.switch_mlp.up_proj.weight": "model-00001-of-00002.safetensors",
"model.layers.4.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
"model.layers.4.self_attn.kv_a_layernorm.weight": "model-00001-of-00002.safetensors",
"model.layers.4.self_attn.kv_a_proj_with_mqa.biases": "model-00001-of-00002.safetensors",
"model.layers.4.self_attn.kv_a_proj_with_mqa.scales": "model-00001-of-00002.safetensors",
"model.layers.4.self_attn.kv_a_proj_with_mqa.weight": "model-00001-of-00002.safetensors",
"model.layers.4.self_attn.kv_b_proj.biases": "model-00001-of-00002.safetensors",
"model.layers.4.self_attn.kv_b_proj.scales": "model-00001-of-00002.safetensors",
"model.layers.4.self_attn.kv_b_proj.weight": "model-00001-of-00002.safetensors",
"model.layers.4.self_attn.o_proj.biases": "model-00001-of-00002.safetensors",
"model.layers.4.self_attn.o_proj.scales": "model-00001-of-00002.safetensors",
"model.layers.4.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
"model.layers.4.self_attn.q_proj.biases": "model-00001-of-00002.safetensors",
"model.layers.4.self_attn.q_proj.scales": "model-00001-of-00002.safetensors",
"model.layers.4.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
"model.layers.5.input_layernorm.weight": "model-00001-of-00002.safetensors",
"model.layers.5.mlp.gate.weight": "model-00001-of-00002.safetensors",
"model.layers.5.mlp.shared_experts.down_proj.biases": "model-00001-of-00002.safetensors",
"model.layers.5.mlp.shared_experts.down_proj.scales": "model-00001-of-00002.safetensors",
"model.layers.5.mlp.shared_experts.down_proj.weight": "model-00001-of-00002.safetensors",
"model.layers.5.mlp.shared_experts.gate_proj.biases": "model-00001-of-00002.safetensors",
"model.layers.5.mlp.shared_experts.gate_proj.scales": "model-00001-of-00002.safetensors",
"model.layers.5.mlp.shared_experts.gate_proj.weight": "model-00001-of-00002.safetensors",
"model.layers.5.mlp.shared_experts.up_proj.biases": "model-00001-of-00002.safetensors",
"model.layers.5.mlp.shared_experts.up_proj.scales": "model-00001-of-00002.safetensors",
"model.layers.5.mlp.shared_experts.up_proj.weight": "model-00001-of-00002.safetensors",
"model.layers.5.mlp.switch_mlp.down_proj.biases": "model-00001-of-00002.safetensors",
"model.layers.5.mlp.switch_mlp.down_proj.scales": "model-00001-of-00002.safetensors",
"model.layers.5.mlp.switch_mlp.down_proj.weight": "model-00001-of-00002.safetensors",
"model.layers.5.mlp.switch_mlp.gate_proj.biases": "model-00001-of-00002.safetensors",
"model.layers.5.mlp.switch_mlp.gate_proj.scales": "model-00001-of-00002.safetensors",
"model.layers.5.mlp.switch_mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
"model.layers.5.mlp.switch_mlp.up_proj.biases": "model-00001-of-00002.safetensors",
"model.layers.5.mlp.switch_mlp.up_proj.scales": "model-00001-of-00002.safetensors",
"model.layers.5.mlp.switch_mlp.up_proj.weight": "model-00001-of-00002.safetensors",
"model.layers.5.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
"model.layers.5.self_attn.kv_a_layernorm.weight": "model-00001-of-00002.safetensors",
"model.layers.5.self_attn.kv_a_proj_with_mqa.biases": "model-00001-of-00002.safetensors",
"model.layers.5.self_attn.kv_a_proj_with_mqa.scales": "model-00001-of-00002.safetensors",
"model.layers.5.self_attn.kv_a_proj_with_mqa.weight": "model-00001-of-00002.safetensors",
"model.layers.5.self_attn.kv_b_proj.biases": "model-00001-of-00002.safetensors",
"model.layers.5.self_attn.kv_b_proj.scales": "model-00001-of-00002.safetensors",
"model.layers.5.self_attn.kv_b_proj.weight": "model-00001-of-00002.safetensors",
"model.layers.5.self_attn.o_proj.biases": "model-00001-of-00002.safetensors",
"model.layers.5.self_attn.o_proj.scales": "model-00001-of-00002.safetensors",
"model.layers.5.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
"model.layers.5.self_attn.q_proj.biases": "model-00001-of-00002.safetensors",
"model.layers.5.self_attn.q_proj.scales": "model-00001-of-00002.safetensors",
"model.layers.5.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
"model.layers.6.input_layernorm.weight": "model-00001-of-00002.safetensors",
"model.layers.6.mlp.gate.weight": "model-00001-of-00002.safetensors",
"model.layers.6.mlp.shared_experts.down_proj.biases": "model-00001-of-00002.safetensors",
"model.layers.6.mlp.shared_experts.down_proj.scales": "model-00001-of-00002.safetensors",
"model.layers.6.mlp.shared_experts.down_proj.weight": "model-00001-of-00002.safetensors",
"model.layers.6.mlp.shared_experts.gate_proj.biases": "model-00001-of-00002.safetensors",
"model.layers.6.mlp.shared_experts.gate_proj.scales": "model-00001-of-00002.safetensors",
"model.layers.6.mlp.shared_experts.gate_proj.weight": "model-00001-of-00002.safetensors",
"model.layers.6.mlp.shared_experts.up_proj.biases": "model-00001-of-00002.safetensors",
"model.layers.6.mlp.shared_experts.up_proj.scales": "model-00001-of-00002.safetensors",
"model.layers.6.mlp.shared_experts.up_proj.weight": "model-00001-of-00002.safetensors",
"model.layers.6.mlp.switch_mlp.down_proj.biases": "model-00001-of-00002.safetensors",
"model.layers.6.mlp.switch_mlp.down_proj.scales": "model-00001-of-00002.safetensors",
"model.layers.6.mlp.switch_mlp.down_proj.weight": "model-00001-of-00002.safetensors",
"model.layers.6.mlp.switch_mlp.gate_proj.biases": "model-00001-of-00002.safetensors",
"model.layers.6.mlp.switch_mlp.gate_proj.scales": "model-00001-of-00002.safetensors",
"model.layers.6.mlp.switch_mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
"model.layers.6.mlp.switch_mlp.up_proj.biases": "model-00001-of-00002.safetensors",
"model.layers.6.mlp.switch_mlp.up_proj.scales": "model-00001-of-00002.safetensors",
"model.layers.6.mlp.switch_mlp.up_proj.weight": "model-00001-of-00002.safetensors",
"model.layers.6.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
"model.layers.6.self_attn.kv_a_layernorm.weight": "model-00001-of-00002.safetensors",
"model.layers.6.self_attn.kv_a_proj_with_mqa.biases": "model-00001-of-00002.safetensors",
"model.layers.6.self_attn.kv_a_proj_with_mqa.scales": "model-00001-of-00002.safetensors",
"model.layers.6.self_attn.kv_a_proj_with_mqa.weight": "model-00001-of-00002.safetensors",
"model.layers.6.self_attn.kv_b_proj.biases": "model-00001-of-00002.safetensors",
"model.layers.6.self_attn.kv_b_proj.scales": "model-00001-of-00002.safetensors",
"model.layers.6.self_attn.kv_b_proj.weight": "model-00001-of-00002.safetensors",
"model.layers.6.self_attn.o_proj.biases": "model-00001-of-00002.safetensors",
"model.layers.6.self_attn.o_proj.scales": "model-00001-of-00002.safetensors",
"model.layers.6.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
"model.layers.6.self_attn.q_proj.biases": "model-00001-of-00002.safetensors",
"model.layers.6.self_attn.q_proj.scales": "model-00001-of-00002.safetensors",
"model.layers.6.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
"model.layers.7.input_layernorm.weight": "model-00001-of-00002.safetensors",
"model.layers.7.mlp.gate.weight": "model-00001-of-00002.safetensors",
"model.layers.7.mlp.shared_experts.down_proj.biases": "model-00001-of-00002.safetensors",
"model.layers.7.mlp.shared_experts.down_proj.scales": "model-00001-of-00002.safetensors",
"model.layers.7.mlp.shared_experts.down_proj.weight": "model-00001-of-00002.safetensors",
"model.layers.7.mlp.shared_experts.gate_proj.biases": "model-00001-of-00002.safetensors",
"model.layers.7.mlp.shared_experts.gate_proj.scales": "model-00001-of-00002.safetensors",
"model.layers.7.mlp.shared_experts.gate_proj.weight": "model-00001-of-00002.safetensors",
"model.layers.7.mlp.shared_experts.up_proj.biases": "model-00001-of-00002.safetensors",
"model.layers.7.mlp.shared_experts.up_proj.scales": "model-00001-of-00002.safetensors",
"model.layers.7.mlp.shared_experts.up_proj.weight": "model-00001-of-00002.safetensors",
"model.layers.7.mlp.switch_mlp.down_proj.biases": "model-00001-of-00002.safetensors",
"model.layers.7.mlp.switch_mlp.down_proj.scales": "model-00001-of-00002.safetensors",
"model.layers.7.mlp.switch_mlp.down_proj.weight": "model-00001-of-00002.safetensors",
"model.layers.7.mlp.switch_mlp.gate_proj.biases": "model-00001-of-00002.safetensors",
"model.layers.7.mlp.switch_mlp.gate_proj.scales": "model-00001-of-00002.safetensors",
"model.layers.7.mlp.switch_mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
"model.layers.7.mlp.switch_mlp.up_proj.biases": "model-00001-of-00002.safetensors",
"model.layers.7.mlp.switch_mlp.up_proj.scales": "model-00001-of-00002.safetensors",
"model.layers.7.mlp.switch_mlp.up_proj.weight": "model-00001-of-00002.safetensors",
"model.layers.7.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
"model.layers.7.self_attn.kv_a_layernorm.weight": "model-00001-of-00002.safetensors",
"model.layers.7.self_attn.kv_a_proj_with_mqa.biases": "model-00001-of-00002.safetensors",
"model.layers.7.self_attn.kv_a_proj_with_mqa.scales": "model-00001-of-00002.safetensors",
"model.layers.7.self_attn.kv_a_proj_with_mqa.weight": "model-00001-of-00002.safetensors",
"model.layers.7.self_attn.kv_b_proj.biases": "model-00001-of-00002.safetensors",
"model.layers.7.self_attn.kv_b_proj.scales": "model-00001-of-00002.safetensors",
"model.layers.7.self_attn.kv_b_proj.weight": "model-00001-of-00002.safetensors",
"model.layers.7.self_attn.o_proj.biases": "model-00001-of-00002.safetensors",
"model.layers.7.self_attn.o_proj.scales": "model-00001-of-00002.safetensors",
"model.layers.7.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
"model.layers.7.self_attn.q_proj.biases": "model-00001-of-00002.safetensors",
"model.layers.7.self_attn.q_proj.scales": "model-00001-of-00002.safetensors",
"model.layers.7.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
"model.layers.8.input_layernorm.weight": "model-00001-of-00002.safetensors",
"model.layers.8.mlp.gate.weight": "model-00001-of-00002.safetensors",
"model.layers.8.mlp.shared_experts.down_proj.biases": "model-00001-of-00002.safetensors",
"model.layers.8.mlp.shared_experts.down_proj.scales": "model-00001-of-00002.safetensors",
"model.layers.8.mlp.shared_experts.down_proj.weight": "model-00001-of-00002.safetensors",
"model.layers.8.mlp.shared_experts.gate_proj.biases": "model-00001-of-00002.safetensors",
"model.layers.8.mlp.shared_experts.gate_proj.scales": "model-00001-of-00002.safetensors",
"model.layers.8.mlp.shared_experts.gate_proj.weight": "model-00001-of-00002.safetensors",
"model.layers.8.mlp.shared_experts.up_proj.biases": "model-00001-of-00002.safetensors",
"model.layers.8.mlp.shared_experts.up_proj.scales": "model-00001-of-00002.safetensors",
"model.layers.8.mlp.shared_experts.up_proj.weight": "model-00001-of-00002.safetensors",
"model.layers.8.mlp.switch_mlp.down_proj.biases": "model-00001-of-00002.safetensors",
"model.layers.8.mlp.switch_mlp.down_proj.scales": "model-00001-of-00002.safetensors",
"model.layers.8.mlp.switch_mlp.down_proj.weight": "model-00001-of-00002.safetensors",
"model.layers.8.mlp.switch_mlp.gate_proj.biases": "model-00001-of-00002.safetensors",
"model.layers.8.mlp.switch_mlp.gate_proj.scales": "model-00001-of-00002.safetensors",
"model.layers.8.mlp.switch_mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
"model.layers.8.mlp.switch_mlp.up_proj.biases": "model-00001-of-00002.safetensors",
"model.layers.8.mlp.switch_mlp.up_proj.scales": "model-00001-of-00002.safetensors",
"model.layers.8.mlp.switch_mlp.up_proj.weight": "model-00001-of-00002.safetensors",
"model.layers.8.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
"model.layers.8.self_attn.kv_a_layernorm.weight": "model-00001-of-00002.safetensors",
"model.layers.8.self_attn.kv_a_proj_with_mqa.biases": "model-00001-of-00002.safetensors",
"model.layers.8.self_attn.kv_a_proj_with_mqa.scales": "model-00001-of-00002.safetensors",
"model.layers.8.self_attn.kv_a_proj_with_mqa.weight": "model-00001-of-00002.safetensors",
"model.layers.8.self_attn.kv_b_proj.biases": "model-00001-of-00002.safetensors",
"model.layers.8.self_attn.kv_b_proj.scales": "model-00001-of-00002.safetensors",
"model.layers.8.self_attn.kv_b_proj.weight": "model-00001-of-00002.safetensors",
"model.layers.8.self_attn.o_proj.biases": "model-00001-of-00002.safetensors",
"model.layers.8.self_attn.o_proj.scales": "model-00001-of-00002.safetensors",
"model.layers.8.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
"model.layers.8.self_attn.q_proj.biases": "model-00001-of-00002.safetensors",
"model.layers.8.self_attn.q_proj.scales": "model-00001-of-00002.safetensors",
"model.layers.8.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
"model.layers.9.input_layernorm.weight": "model-00001-of-00002.safetensors",
"model.layers.9.mlp.gate.weight": "model-00001-of-00002.safetensors",
"model.layers.9.mlp.shared_experts.down_proj.biases": "model-00001-of-00002.safetensors",
"model.layers.9.mlp.shared_experts.down_proj.scales": "model-00001-of-00002.safetensors",
"model.layers.9.mlp.shared_experts.down_proj.weight": "model-00001-of-00002.safetensors",
"model.layers.9.mlp.shared_experts.gate_proj.biases": "model-00001-of-00002.safetensors",
"model.layers.9.mlp.shared_experts.gate_proj.scales": "model-00001-of-00002.safetensors",
"model.layers.9.mlp.shared_experts.gate_proj.weight": "model-00001-of-00002.safetensors",
"model.layers.9.mlp.shared_experts.up_proj.biases": "model-00001-of-00002.safetensors",
"model.layers.9.mlp.shared_experts.up_proj.scales": "model-00001-of-00002.safetensors",
"model.layers.9.mlp.shared_experts.up_proj.weight": "model-00001-of-00002.safetensors",
"model.layers.9.mlp.switch_mlp.down_proj.biases": "model-00001-of-00002.safetensors",
"model.layers.9.mlp.switch_mlp.down_proj.scales": "model-00001-of-00002.safetensors",
"model.layers.9.mlp.switch_mlp.down_proj.weight": "model-00001-of-00002.safetensors",
"model.layers.9.mlp.switch_mlp.gate_proj.biases": "model-00001-of-00002.safetensors",
"model.layers.9.mlp.switch_mlp.gate_proj.scales": "model-00001-of-00002.safetensors",
"model.layers.9.mlp.switch_mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
"model.layers.9.mlp.switch_mlp.up_proj.biases": "model-00001-of-00002.safetensors",
"model.layers.9.mlp.switch_mlp.up_proj.scales": "model-00001-of-00002.safetensors",
"model.layers.9.mlp.switch_mlp.up_proj.weight": "model-00001-of-00002.safetensors",
"model.layers.9.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
"model.layers.9.self_attn.kv_a_layernorm.weight": "model-00001-of-00002.safetensors",
"model.layers.9.self_attn.kv_a_proj_with_mqa.biases": "model-00001-of-00002.safetensors",
"model.layers.9.self_attn.kv_a_proj_with_mqa.scales": "model-00001-of-00002.safetensors",
"model.layers.9.self_attn.kv_a_proj_with_mqa.weight": "model-00001-of-00002.safetensors",
"model.layers.9.self_attn.kv_b_proj.biases": "model-00001-of-00002.safetensors",
"model.layers.9.self_attn.kv_b_proj.scales": "model-00001-of-00002.safetensors",
"model.layers.9.self_attn.kv_b_proj.weight": "model-00001-of-00002.safetensors",
"model.layers.9.self_attn.o_proj.biases": "model-00001-of-00002.safetensors",
"model.layers.9.self_attn.o_proj.scales": "model-00001-of-00002.safetensors",
"model.layers.9.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
"model.layers.9.self_attn.q_proj.biases": "model-00001-of-00002.safetensors",
"model.layers.9.self_attn.q_proj.scales": "model-00001-of-00002.safetensors",
"model.layers.9.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
"model.norm.weight": "model-00002-of-00002.safetensors"
}
}

View File

@@ -0,0 +1,23 @@
{
"bos_token": {
"content": "<begin▁of▁sentence>",
"lstrip": false,
"normalized": true,
"rstrip": false,
"single_word": false
},
"eos_token": {
"content": "<end▁of▁sentence>",
"lstrip": false,
"normalized": true,
"rstrip": false,
"single_word": false
},
"pad_token": {
"content": "<end▁of▁sentence>",
"lstrip": false,
"normalized": true,
"rstrip": false,
"single_word": false
}
}

View File

@@ -0,0 +1,38 @@
from typing import List, Optional, Union
from transformers.models.llama import LlamaTokenizerFast
class DeepseekTokenizerFast(LlamaTokenizerFast):
def convert_ids_to_tokens(
self, ids: Union[int, List[int]], skip_special_tokens: bool = False
) -> Union[str, List[str]]:
"""
Converts a single index or a sequence of indices in a token or a sequence of tokens, using the vocabulary and
added tokens.
Args:
ids (`int` or `List[int]`):
The token id (or token ids) to convert to tokens.
skip_special_tokens (`bool`, *optional*, defaults to `False`):
Whether or not to remove special tokens in the decoding.
Returns:
`str` or `List[str]`: The decoded token(s).
"""
if isinstance(ids, int):
return self._convert_id_to_token(ids)
tokens = []
for index in ids:
index = int(index)
if skip_special_tokens and index in self.all_special_ids:
continue
token = self._tokenizer.id_to_token(index)
tokens.append(token if token is not None else "")
return tokens
def _convert_id_to_token(self, index: int) -> Optional[str]:
token = self._tokenizer.id_to_token(int(index))
return token if token is not None else ""

View File

@@ -0,0 +1,162 @@
{
"add_bos_token": true,
"add_eos_token": false,
"add_prefix_space": null,
"added_tokens_decoder": {
"100000": {
"content": "<begin▁of▁sentence>",
"lstrip": false,
"normalized": true,
"rstrip": false,
"single_word": false,
"special": true
},
"100001": {
"content": "<end▁of▁sentence>",
"lstrip": false,
"normalized": true,
"rstrip": false,
"single_word": false,
"special": true
},
"100002": {
"content": "<fim▁hole>",
"lstrip": false,
"normalized": true,
"rstrip": false,
"single_word": false,
"special": false
},
"100003": {
"content": "<fim▁begin>",
"lstrip": false,
"normalized": true,
"rstrip": false,
"single_word": false,
"special": false
},
"100004": {
"content": "<fim▁end>",
"lstrip": false,
"normalized": true,
"rstrip": false,
"single_word": false,
"special": false
},
"100005": {
"content": "<completion>",
"lstrip": false,
"normalized": true,
"rstrip": false,
"single_word": false,
"special": false
},
"100006": {
"content": "<User>",
"lstrip": false,
"normalized": true,
"rstrip": false,
"single_word": false,
"special": false
},
"100007": {
"content": "<Assistant>",
"lstrip": false,
"normalized": true,
"rstrip": false,
"single_word": false,
"special": false
},
"100008": {
"content": "<|EOT|>",
"lstrip": false,
"normalized": true,
"rstrip": false,
"single_word": false,
"special": true
},
"100009": {
"content": "<tool▁calls▁begin>",
"lstrip": false,
"normalized": true,
"rstrip": false,
"single_word": false,
"special": false
},
"100010": {
"content": "<tool▁calls▁end>",
"lstrip": false,
"normalized": true,
"rstrip": false,
"single_word": false,
"special": false
},
"100011": {
"content": "<tool▁call▁begin>",
"lstrip": false,
"normalized": true,
"rstrip": false,
"single_word": false,
"special": false
},
"100012": {
"content": "<tool▁call▁end>",
"lstrip": false,
"normalized": true,
"rstrip": false,
"single_word": false,
"special": false
},
"100013": {
"content": "<tool▁outputs▁begin>",
"lstrip": false,
"normalized": true,
"rstrip": false,
"single_word": false,
"special": false
},
"100014": {
"content": "<tool▁outputs▁end>",
"lstrip": false,
"normalized": true,
"rstrip": false,
"single_word": false,
"special": false
},
"100015": {
"content": "<tool▁output▁begin>",
"lstrip": false,
"normalized": true,
"rstrip": false,
"single_word": false,
"special": false
},
"100016": {
"content": "<tool▁output▁end>",
"lstrip": false,
"normalized": true,
"rstrip": false,
"single_word": false,
"special": false
},
"100017": {
"content": "<tool▁sep>",
"lstrip": false,
"normalized": true,
"rstrip": false,
"single_word": false,
"special": false
}
},
"bos_token": "<begin▁of▁sentence>",
"chat_template": "{% if not add_generation_prompt is defined %}{% set add_generation_prompt = false %}{% endif %}{{ bos_token }}{% for message in messages %}{% if message['role'] == 'user' %}{{ 'User: ' + message['content'] + '\n\n' }}{% elif message['role'] == 'assistant' %}{{ 'Assistant: ' + message['content'] + eos_token }}{% elif message['role'] == 'system' %}{{ message['content'] + '\n\n' }}{% endif %}{% endfor %}{% if add_generation_prompt %}{{ 'Assistant:' }}{% endif %}",
"clean_up_tokenization_spaces": false,
"eos_token": "<end▁of▁sentence>",
"legacy": true,
"model_max_length": 16384,
"pad_token": "<end▁of▁sentence>",
"sp_model_kwargs": {},
"tokenizer_class": "LlamaTokenizer",
"unk_token": null,
"use_default_system_prompt": false
}