[MM][Bugfix] Update hf_config to hf_text_config (#5319)

### What this PR does / why we need it?

Following https://github.com/vllm-project/vllm-ascend/pull/5205, update
`hf_config` to `hf_text_config`.

Find more details at
https://github.com/vllm-project/vllm-ascend/pull/5205#issuecomment-3675417534
and
https://github.com/vllm-project/vllm-ascend/pull/5205#issuecomment-3677920872.

### Does this PR introduce _any_ user-facing change?

### How was this patch tested?

- vLLM version: release/v0.13.0
- vLLM main:
5fbfa8d9ef

Signed-off-by: shen-shanshan <467638484@qq.com>
This commit is contained in:
Shanshan Shen
2026-01-06 16:41:39 +08:00
committed by GitHub
parent 293b2275df
commit b94d589769
23 changed files with 44 additions and 43 deletions

View File

@@ -697,7 +697,7 @@ def is_moe_layer(prefix: str) -> bool:
def get_moe_params():
from vllm.config import get_current_vllm_config
vllm_config = get_current_vllm_config()
config = vllm_config.model_config.hf_config
config = vllm_config.model_config.hf_text_config
n_routed_experts = getattr(config, 'n_routed_experts', 0)
first_k_dense_replace = getattr(config, 'first_k_dense_replace',
float('inf'))

View File

@@ -91,7 +91,7 @@ class AscendMultiHeadLatentAttention(MultiHeadLatentAttentionWrapper):
self.qk_head_dim = qk_nope_head_dim + qk_rope_head_dim
self.v_head_dim = v_head_dim
self.prefix = prefix
hf_config = get_current_vllm_config().model_config.hf_config
hf_config = get_current_vllm_config().model_config.hf_text_config
self.enable_shared_expert_dp = get_ascend_config(
).enable_shared_expert_dp
self.tp_size = get_tensor_model_parallel_world_size()

View File

@@ -247,6 +247,6 @@ def reach_layer_for_shared_weight_series(layer: LinearBase):
def is_hidden_layer(vllm_config, layer: LinearBase) -> bool:
num_hidden_layers = vllm_config.model_config.hf_config.num_hidden_layers
num_hidden_layers = vllm_config.model_config.hf_text_config.num_hidden_layers
layer_idx = extract_layer_index(layer.prefix)
return layer_idx < num_hidden_layers