Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -127,8 +127,9 @@ class DeepseekV2Config(PreTrainedConfig):
"layers.*.self_attn.q_b_proj": "colwise",
"layers.*.self_attn.kv_b_proj": "colwise",
"layers.*.self_attn.o_proj": "rowwise",
"layers.*.mlp.gate_up_proj": "colwise",
"layers.*.mlp.down_proj": "rowwise",
"layers.*.mlp.experts.gate_up_proj": "local_colwise",
"layers.*.mlp.experts.down_proj": "local_rowwise",
"layers.*.mlp.experts": "gather",
}
base_model_pp_plan = {
"embed_tokens": (["input_ids"], ["inputs_embeds"]),
Expand Down
5 changes: 3 additions & 2 deletions src/transformers/models/deepseek_v2/modular_deepseek_v2.py
Original file line number Diff line number Diff line change
Expand Up @@ -142,8 +142,9 @@ class DeepseekV2Config(LlamaConfig):
"layers.*.self_attn.q_b_proj": "colwise",
"layers.*.self_attn.kv_b_proj": "colwise",
"layers.*.self_attn.o_proj": "rowwise",
"layers.*.mlp.gate_up_proj": "colwise",
"layers.*.mlp.down_proj": "rowwise",
"layers.*.mlp.experts.gate_up_proj": "local_colwise",
"layers.*.mlp.experts.down_proj": "local_rowwise",
"layers.*.mlp.experts": "gather",
}

model_type = "deepseek_v2"
Expand Down
23 changes: 10 additions & 13 deletions src/transformers/models/deepseek_v3/configuration_deepseek_v3.py
Original file line number Diff line number Diff line change
Expand Up @@ -131,19 +131,16 @@ class DeepseekV3Config(PreTrainedConfig):

model_type = "deepseek_v3"
keys_to_ignore_at_inference = ["past_key_values"]
base_model_tp_plan = { # TODO: only replicate attention layers when > first_k_dense_replace
"layers.*.mlp.experts.*.gate_proj": "local_colwise",
"layers.*.mlp.experts.*.up_proj": "local_colwise",
"layers.*.mlp.experts.*.down_proj": "local_rowwise",
"layers.*.mlp.experts.*": "local", # each expert is wrapped in a module list
"layers.*.mlp.shared_experts.gate_proj": "local_colwise",
"layers.*.mlp.shared_experts.up_proj": "local_colwise",
"layers.*.mlp.shared_experts.down_proj": "local_rowwise",
"layers.*.mlp.shared_experts": "local",
"layers.*.mlp.gate_proj": "local_colwise",
"layers.*.mlp.up_proj": "local_colwise",
"layers.*.mlp.down_proj": "local_rowwise",
"layers.*.mlp": "gather", # This is the only moment where results are gathered
base_model_tp_plan = {
"layers.*.mlp.experts.gate_up_proj": "local_rowwise",
"layers.*.mlp.experts.down_proj": "local_rowwise",
"layers.*.mlp.experts": "gather",
"layers.*.mlp.shared_experts.gate_proj": "colwise",
"layers.*.mlp.shared_experts.up_proj": "colwise",
"layers.*.mlp.shared_experts.down_proj": "rowwise",
"layers.*.mlp.gate_proj": "colwise",
"layers.*.mlp.up_proj": "colwise",
"layers.*.mlp.down_proj": "rowwise",
}
base_model_pp_plan = {
"embed_tokens": (["input_ids"], ["inputs_embeds"]),
Expand Down
23 changes: 10 additions & 13 deletions src/transformers/models/dots1/configuration_dots1.py
Original file line number Diff line number Diff line change
Expand Up @@ -109,23 +109,20 @@ class Dots1Config(PreTrainedConfig):
model_type = "dots1"
keys_to_ignore_at_inference = ["past_key_values"]

base_model_tp_plan = { # TODO: only replicate attention layers when > first_k_dense_replace
base_model_tp_plan = {
"layers.*.self_attn.q_proj": "colwise",
"layers.*.self_attn.k_proj": "colwise",
"layers.*.self_attn.v_proj": "colwise",
"layers.*.self_attn.o_proj": "rowwise",
"layers.*.mlp.experts.*.gate_proj": "local_colwise",
"layers.*.mlp.experts.*.up_proj": "local_colwise",
"layers.*.mlp.experts.*.down_proj": "local_rowwise",
"layers.*.mlp.experts.*": "local", # each expert is wrapped in a module list
"layers.*.mlp.shared_experts.gate_proj": "local_colwise",
"layers.*.mlp.shared_experts.up_proj": "local_colwise",
"layers.*.mlp.shared_experts.down_proj": "local_rowwise",
"layers.*.mlp.shared_experts": "local",
"layers.*.mlp.gate_proj": "local_colwise",
"layers.*.mlp.up_proj": "local_colwise",
"layers.*.mlp.down_proj": "local_rowwise",
"layers.*.mlp": "gather", # This is the only moment where results are gathered
"layers.*.mlp.experts.gate_up_proj": "local_rowwise",
"layers.*.mlp.experts.down_proj": "local_rowwise",
"layers.*.mlp.experts": "gather",
"layers.*.mlp.shared_experts.gate_proj": "colwise",
"layers.*.mlp.shared_experts.up_proj": "colwise",
"layers.*.mlp.shared_experts.down_proj": "rowwise",
"layers.*.mlp.gate_proj": "colwise",
"layers.*.mlp.up_proj": "colwise",
"layers.*.mlp.down_proj": "rowwise",
}

base_model_pp_plan = {
Expand Down
24 changes: 9 additions & 15 deletions src/transformers/models/ernie4_5_moe/configuration_ernie4_5_moe.py
Original file line number Diff line number Diff line change
Expand Up @@ -122,21 +122,15 @@ class Ernie4_5_MoeConfig(PreTrainedConfig):
"layers.*.self_attn.k_proj": "colwise",
"layers.*.self_attn.v_proj": "colwise",
"layers.*.self_attn.o_proj": "rowwise",
# sequence parallel is pretty slow
# "norm.weight": "sequence_parallel",
# "layers.*.input_layernorm.weight": "sequence_parallel",
# "layers.*.post_attention_layernorm.weight": "sequence_parallel",
"layers.*.mlp.shared_experts.gate_proj": "local_colwise",
"layers.*.mlp.shared_experts.up_proj": "local_colwise",
"layers.*.mlp.shared_experts.down_proj": "local_rowwise",
"layers.*.mlp.experts.*.gate_proj": "local_colwise",
"layers.*.mlp.experts.*.up_proj": "local_colwise",
"layers.*.mlp.experts.*.down_proj": "local_rowwise",
"layers.*.mlp.experts": "local",
"layers.*.mlp.gate_proj": "local_colwise",
"layers.*.mlp.up_proj": "local_colwise",
"layers.*.mlp.down_proj": "local_rowwise",
"layers.*.mlp": "gather",
"layers.*.mlp.experts.gate_up_proj": "local_rowwise",
"layers.*.mlp.experts.down_proj": "local_rowwise",
"layers.*.mlp.experts": "gather",
"layers.*.mlp.shared_experts.gate_proj": "colwise",
"layers.*.mlp.shared_experts.up_proj": "colwise",
"layers.*.mlp.shared_experts.down_proj": "rowwise",
"layers.*.mlp.gate_proj": "colwise",
"layers.*.mlp.up_proj": "colwise",
"layers.*.mlp.down_proj": "rowwise",
}
base_model_pp_plan = {
"embed_tokens": (["input_ids"], ["inputs_embeds"]),
Expand Down
6 changes: 3 additions & 3 deletions src/transformers/models/flex_olmo/configuration_flex_olmo.py
Original file line number Diff line number Diff line change
Expand Up @@ -115,9 +115,9 @@ class FlexOlmoConfig(PreTrainedConfig):
"layers.*.self_attn.k_proj": "colwise_rep", # we need to replicate here due to the added norm on q and k
"layers.*.self_attn.v_proj": "colwise_rep", # we need to replicate here due to the added norm on q and k
"layers.*.self_attn.o_proj": "rowwise_rep", # we need to replicate here due to the added norm on q and k
"layers.*.mlp.experts.*.gate_proj": "colwise",
"layers.*.mlp.experts.*.up_proj": "colwise",
"layers.*.mlp.experts.*.down_proj": "rowwise",
"layers.*.mlp.experts.gate_up_proj": "local_rowwise",
"layers.*.mlp.experts.down_proj": "local_rowwise",
"layers.*.mlp.experts": "gather",
}
base_model_pp_plan = {
"embed_tokens": (["input_ids"], ["inputs_embeds"]),
Expand Down
6 changes: 3 additions & 3 deletions src/transformers/models/flex_olmo/modular_flex_olmo.py
Original file line number Diff line number Diff line change
Expand Up @@ -125,9 +125,9 @@ class FlexOlmoConfig(OlmoeConfig):
"layers.*.self_attn.k_proj": "colwise_rep", # we need to replicate here due to the added norm on q and k
"layers.*.self_attn.v_proj": "colwise_rep", # we need to replicate here due to the added norm on q and k
"layers.*.self_attn.o_proj": "rowwise_rep", # we need to replicate here due to the added norm on q and k
"layers.*.mlp.experts.*.gate_proj": "colwise",
"layers.*.mlp.experts.*.up_proj": "colwise",
"layers.*.mlp.experts.*.down_proj": "rowwise",
"layers.*.mlp.experts.gate_up_proj": "local_rowwise",
"layers.*.mlp.experts.down_proj": "local_rowwise",
"layers.*.mlp.experts": "gather",
}
base_model_pp_plan = {
"embed_tokens": (["input_ids"], ["inputs_embeds"]),
Expand Down
6 changes: 3 additions & 3 deletions src/transformers/models/glm4_moe/configuration_glm4_moe.py
Original file line number Diff line number Diff line change
Expand Up @@ -121,9 +121,9 @@ class Glm4MoeConfig(PreTrainedConfig):
"layers.*.self_attn.k_proj": "colwise",
"layers.*.self_attn.v_proj": "colwise",
"layers.*.self_attn.o_proj": "rowwise",
"layers.*.mlp.experts.*.gate_proj": "colwise",
"layers.*.mlp.experts.*.up_proj": "colwise",
"layers.*.mlp.experts.*.down_proj": "rowwise",
"layers.*.mlp.experts.gate_up_proj": "local_rowwise",
"layers.*.mlp.experts.down_proj": "local_rowwise",
"layers.*.mlp.experts": "gather",
"layers.*.mlp.gate_proj": "colwise",
"layers.*.mlp.up_proj": "colwise",
"layers.*.mlp.down_proj": "rowwise",
Expand Down
6 changes: 3 additions & 3 deletions src/transformers/models/glm4_moe/modular_glm4_moe.py
Original file line number Diff line number Diff line change
Expand Up @@ -135,9 +135,9 @@ class Glm4MoeConfig(PreTrainedConfig):
"layers.*.self_attn.k_proj": "colwise",
"layers.*.self_attn.v_proj": "colwise",
"layers.*.self_attn.o_proj": "rowwise",
"layers.*.mlp.experts.*.gate_proj": "colwise",
"layers.*.mlp.experts.*.up_proj": "colwise",
"layers.*.mlp.experts.*.down_proj": "rowwise",
"layers.*.mlp.experts.gate_up_proj": "local_rowwise",
"layers.*.mlp.experts.down_proj": "local_rowwise",
"layers.*.mlp.experts": "gather",
"layers.*.mlp.gate_proj": "colwise",
"layers.*.mlp.up_proj": "colwise",
"layers.*.mlp.down_proj": "rowwise",
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -129,9 +129,9 @@ class LongcatFlashConfig(PreTrainedConfig):
"layers.*.mlps.*.gate_proj": "colwise",
"layers.*.mlps.*.up_proj": "colwise",
"layers.*.mlps.*.down_proj": "rowwise",
"layers.*.mlp.experts.*.gate_proj": "colwise",
"layers.*.mlp.experts.*.up_proj": "colwise",
"layers.*.mlp.experts.*.down_proj": "rowwise",
"layers.*.mlp.experts.gate_up_proj": "local_rowwise",
"layers.*.mlp.experts.down_proj": "local_rowwise",
"layers.*.mlp.experts": "gather",
}

base_model_pp_plan = {
Expand Down
6 changes: 3 additions & 3 deletions src/transformers/models/minimax/configuration_minimax.py
Original file line number Diff line number Diff line change
Expand Up @@ -138,9 +138,9 @@ class MiniMaxConfig(PreTrainedConfig):
"layers.*.self_attn.v_proj": "colwise",
"layers.*.self_attn.o_proj": "rowwise",
"layers.*.mlp.gate": "colwise_rep", # we need to replicate here to correctly route experts
"layers.*.mlp.experts.*.w1": "colwise",
"layers.*.mlp.experts.*.w2": "rowwise",
"layers.*.mlp.experts.*.w3": "colwise",
"layers.*.mlp.experts.gate_up_proj": "local_rowwise",
"layers.*.mlp.experts.down_proj": "local_rowwise",
"layers.*.mlp.experts": "gather",
}
base_model_pp_plan = {
"embed_tokens": (["input_ids"], ["inputs_embeds"]),
Expand Down
6 changes: 3 additions & 3 deletions src/transformers/models/minimax/modular_minimax.py
Original file line number Diff line number Diff line change
Expand Up @@ -163,9 +163,9 @@ class MiniMaxConfig(PreTrainedConfig):
"layers.*.self_attn.v_proj": "colwise",
"layers.*.self_attn.o_proj": "rowwise",
"layers.*.mlp.gate": "colwise_rep", # we need to replicate here to correctly route experts
"layers.*.mlp.experts.*.w1": "colwise",
"layers.*.mlp.experts.*.w2": "rowwise",
"layers.*.mlp.experts.*.w3": "colwise",
"layers.*.mlp.experts.gate_up_proj": "local_rowwise",
"layers.*.mlp.experts.down_proj": "local_rowwise",
"layers.*.mlp.experts": "gather",
}
base_model_pp_plan = {
"embed_tokens": (["input_ids"], ["inputs_embeds"]),
Expand Down
10 changes: 4 additions & 6 deletions src/transformers/models/mixtral/configuration_mixtral.py
Original file line number Diff line number Diff line change
Expand Up @@ -115,16 +115,14 @@ class MixtralConfig(PreTrainedConfig):
model_type = "mixtral"
keys_to_ignore_at_inference = ["past_key_values"]
base_model_tp_plan = {
"layers.*.self_attn.q_proj": "local_colwise",
"layers.*.self_attn.k_proj": "local_colwise",
"layers.*.self_attn.v_proj": "local_colwise",
"layers.*.self_attn.o_proj": "local_rowwise",
"layers.*.self_attn": "gather",
"layers.*.self_attn.q_proj": "colwise",
"layers.*.self_attn.k_proj": "colwise",
"layers.*.self_attn.v_proj": "colwise",
"layers.*.self_attn.o_proj": "rowwise",
"layers.*.mlp.gate": "ep_router", # we need to replicate here to correctly route experts
"layers.*.mlp.experts.gate_up_proj": "local_colwise",
"layers.*.mlp.experts.down_proj": "local_rowwise",
"layers.*.mlp.experts": "gather",
# "layers.*.mlp.experts.gate_up_proj": "local_packed_rowwise" ? if you load from
}
base_model_pp_plan = {
"embed_tokens": (["input_ids"], ["inputs_embeds"]),
Expand Down
6 changes: 3 additions & 3 deletions src/transformers/models/qwen3_moe/configuration_qwen3_moe.py
Original file line number Diff line number Diff line change
Expand Up @@ -121,9 +121,9 @@ class Qwen3MoeConfig(PreTrainedConfig):
"layers.*.self_attn.k_proj": "colwise",
"layers.*.self_attn.v_proj": "colwise",
"layers.*.self_attn.o_proj": "rowwise",
"layers.*.mlp.experts.*.gate_proj": "colwise",
"layers.*.mlp.experts.*.up_proj": "colwise",
"layers.*.mlp.experts.*.down_proj": "rowwise",
"layers.*.mlp.experts.gate_up_proj": "local_rowwise",
"layers.*.mlp.experts.down_proj": "local_rowwise",
"layers.*.mlp.experts": "gather",
"layers.*.mlp.gate_proj": "colwise",
"layers.*.mlp.up_proj": "colwise",
"layers.*.mlp.down_proj": "rowwise",
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -135,9 +135,9 @@ class Qwen3NextConfig(PreTrainedConfig):
"layers.*.self_attn.k_proj": "colwise",
"layers.*.self_attn.v_proj": "colwise",
"layers.*.self_attn.o_proj": "rowwise",
"layers.*.mlp.experts.*.gate_proj": "colwise",
"layers.*.mlp.experts.*.up_proj": "colwise",
"layers.*.mlp.experts.*.down_proj": "rowwise",
"layers.*.mlp.experts.gate_up_proj": "local_rowwise",
"layers.*.mlp.experts.down_proj": "local_rowwise",
"layers.*.mlp.experts": "gather",
"layers.*.mlp.shared_expert.gate_proj": "colwise",
"layers.*.mlp.shared_expert.up_proj": "colwise",
"layers.*.mlp.shared_expert.down_proj": "rowwise",
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -268,9 +268,9 @@ class Qwen3OmniMoeTextConfig(PreTrainedConfig):
"layers.*.self_attn.k_proj": "colwise",
"layers.*.self_attn.v_proj": "colwise",
"layers.*.self_attn.o_proj": "rowwise",
"layers.*.mlp.experts.*.gate_proj": "colwise",
"layers.*.mlp.experts.*.up_proj": "colwise",
"layers.*.mlp.experts.*.down_proj": "rowwise",
"layers.*.mlp.experts.gate_up_proj": "local_rowwise",
"layers.*.mlp.experts.down_proj": "local_rowwise",
"layers.*.mlp.experts": "gather",
"layers.*.mlp.gate_proj": "colwise",
"layers.*.mlp.up_proj": "colwise",
"layers.*.mlp.down_proj": "rowwise",
Expand Down Expand Up @@ -712,9 +712,9 @@ class Qwen3OmniMoeTalkerTextConfig(PreTrainedConfig):
"layers.*.self_attn.k_proj": "colwise",
"layers.*.self_attn.v_proj": "colwise",
"layers.*.self_attn.o_proj": "rowwise",
"layers.*.mlp.experts.*.gate_proj": "colwise",
"layers.*.mlp.experts.*.up_proj": "colwise",
"layers.*.mlp.experts.*.down_proj": "rowwise",
"layers.*.mlp.experts.gate_up_proj": "local_rowwise",
"layers.*.mlp.experts.down_proj": "local_rowwise",
"layers.*.mlp.experts": "gather",
"layers.*.mlp.gate_proj": "colwise",
"layers.*.mlp.up_proj": "colwise",
"layers.*.mlp.down_proj": "rowwise",
Expand Down