[MM][Bugfix] Update hf_config to hf_text_config (#5319)

### What this PR does / why we need it?

Following https://github.com/vllm-project/vllm-ascend/pull/5205, update
`hf_config` to `hf_text_config`.

Find more details at
https://github.com/vllm-project/vllm-ascend/pull/5205#issuecomment-3675417534
and
https://github.com/vllm-project/vllm-ascend/pull/5205#issuecomment-3677920872.

### Does this PR introduce _any_ user-facing change?

### How was this patch tested?

- vLLM version: release/v0.13.0
- vLLM main:
5fbfa8d9ef

Signed-off-by: shen-shanshan <467638484@qq.com>
This commit is contained in:
Shanshan Shen
2026-01-06 16:41:39 +08:00
committed by GitHub
parent 293b2275df
commit b94d589769
23 changed files with 44 additions and 43 deletions

View File

@@ -96,7 +96,7 @@ class KVPoolWorker:
partitions = None
if self.kv_role == "kv_consumer" and self.consumer_is_to_put:
num_hidden_layers = model_config.hf_config.num_hidden_layers
num_hidden_layers = model_config.hf_text_config.num_hidden_layers
partition_list_str = vllm_config.kv_transfer_config.kv_connector_extra_config.get(
"prefill_pp_layer_partition", None)
prefill_pp_size = int(

View File

@@ -345,7 +345,7 @@ class KVCacheRecvingThread(threading.Thread):
self.vllm_config = vllm_config
self.model_config = self.vllm_config.model_config
self.block_size = self.vllm_config.cache_config.block_size
self.num_layers = self.model_config.hf_config.num_hidden_layers
self.num_layers = self.model_config.hf_text_config.num_hidden_layers
self.pp_layer_indices = {
rank:
get_prefill_pp_indices(self.num_layers, rank,

View File

@@ -167,7 +167,7 @@ def init_ascend_model_parallel(parallel_config: ParallelConfig, ):
global _SHARED_WEIGHT
# TODO: Check if the model is Deepseek V3.2 with enabled SFA CP and activated shared weights. It will then be normalized within the PCP parameters. -- clrs97
is_ds_v32 = hasattr(vllm_config.model_config.hf_config, "index_topk")
is_ds_v32 = hasattr(vllm_config.model_config.hf_text_config, "index_topk")
if enable_sp() and is_ds_v32 and _SHARED_WEIGHT is None:
_SHARED_WEIGHT = _create_shared_weight_group("CP_shared_weight")
# TODO: Extract and unify the logic across different communication group.