[refact] unified soc_version code (#4359)

### What this PR does / why we need it?

Currently, there are two paths to judge the chip type in code,
`get_ascend_soc_version` use `get_soc_version` api in torch_npu, and
`is_310p` `use _build_info.__soc_version__`, which generate when
install. We need to unify the two paths.

We need to unify these codes based on the following points:

1. We need to ensure consistency in chip type judgment between compiling
and running states;
2. In compiling state, we need chip type to complete op's compilation,
but in running state, we only need device
type(910B/910_93/310P/910_95/etc) to make code branch judgement;
3. In compiling state, torch_npu may not have been installed yet, so we
can't use torch_npu's api.

Based on the above points, we have made the following changes:

1. When user set env `SOC_VERSION`, use it; when not set, query
soc_version by `npu-smi`;
2. generate device_type based on soc_version when compiling, and write
`__device_type__` instead of `__soc_version__` in `_build_info.py`;
3. In running state, use `__device_type__` to judge code branch.

### Does this PR introduce _any_ user-facing change?

When not set env `SOC_VERSION`, it will not be `ASCEND910B1` by default,
we will query soc_version by `npu-smi`. And env `SOC_VERSION` must be in
the list `soc_to_device` in `setup.py`.

- vLLM version: v0.11.0
- vLLM main:
2918c1b49c

Signed-off-by: zzzzwwjj <1183291235@qq.com>
This commit is contained in:
zzzzwwjj
2025-11-26 14:28:55 +08:00
committed by GitHub
parent a91e76cd84
commit 136ea9ff56
42 changed files with 361 additions and 243 deletions

View File

@@ -42,8 +42,7 @@ from vllm_ascend.torchair.utils import (
register_torchair_model, torchair_ops_patch,
torchair_quant_method_register, write_kv_cache_bytes_to_file)
from vllm_ascend.utils import (ACL_FORMAT_FRACTAL_ND, ACL_FORMAT_FRACTAL_NZ,
is_310p, get_ascend_soc_version,
AscendSocVersion)
AscendDeviceType, get_ascend_device_type)
from vllm_ascend.worker.model_runner_v1 import NPUModelRunner
@@ -125,13 +124,13 @@ class NPUTorchairModelRunner(NPUModelRunner):
max_num_tokens, tp_size)
self.mc2_tokens_capacity = max_graph_batch_size
if get_ascend_soc_version(
) == AscendSocVersion.A3 and self.mc2_tokens_capacity > 512:
if get_ascend_device_type(
) == AscendDeviceType._910_93 and self.mc2_tokens_capacity > 512:
logger.error(
f"A3: the max number of tokens must smaller then 512, but now is {self.mc2_tokens_capacity}"
)
if get_ascend_soc_version(
) == AscendSocVersion.A2 and self.mc2_tokens_capacity > 256:
if get_ascend_device_type(
) == AscendDeviceType._910B and self.mc2_tokens_capacity > 256:
logger.error(
f"A2: the max number of tokens must smaller then 256, but now is {self.mc2_tokens_capacity}"
)
@@ -207,7 +206,7 @@ class NPUTorchairModelRunner(NPUModelRunner):
positions, attn_metadata, num_tokens,
intermediate_tensors, inputs_embeds):
if with_prefill or self.enable_shared_expert_dp:
if is_310p():
if get_ascend_device_type() == AscendDeviceType._310P:
converting_weight_acl_format(self.model, ACL_FORMAT_FRACTAL_ND)
hidden_states = super()._generate_dummy_run_hidden_states(
with_prefill, is_torchair_compile, input_ids, positions,
@@ -230,7 +229,7 @@ class NPUTorchairModelRunner(NPUModelRunner):
assert isinstance(kv, tuple), "kv_cache must be a tuple"
torch._dynamo.mark_static(kv[0])
torch._dynamo.mark_static(kv[1])
if is_310p():
if get_ascend_device_type() == AscendDeviceType._310P:
converting_weight_acl_format(self.model, ACL_FORMAT_FRACTAL_NZ)
compiled_model = self._get_torchair_lazy_compiled_model(num_tokens)
@@ -371,7 +370,7 @@ class NPUTorchairModelRunner(NPUModelRunner):
"attn_metadata": attn_metadata
}
if not with_prefill:
if is_310p():
if get_ascend_device_type() == AscendDeviceType._310P:
converting_weight_acl_format(self.model, ACL_FORMAT_FRACTAL_NZ)
compiled_model = self._get_torchair_lazy_compiled_model(
padded_num_tokens_across_dp)
@@ -384,7 +383,7 @@ class NPUTorchairModelRunner(NPUModelRunner):
)
else:
assert self.model is not None
if is_310p():
if get_ascend_device_type() == AscendDeviceType._310P:
converting_weight_acl_format(self.model, ACL_FORMAT_FRACTAL_ND)
hidden_states = self.model(
@@ -414,7 +413,7 @@ class NPUTorchairModelRunner(NPUModelRunner):
patch_for_hcom()
if is_310p():
if get_ascend_device_type() == AscendDeviceType._310P:
# on 300I Duo platform, we need to patch broadcast. however, this patch will be
# overwritten by patch_for_hcom in torchair. so we need to re-patch it here.
from vllm_ascend.patch.platform.patch_distributed import \
@@ -428,7 +427,8 @@ class NPUTorchairModelRunner(NPUModelRunner):
self.ascend_config.torchair_graph_config.enable_frozen_parameter
# enabling tiling_schedule_optimize on 300I Duo has some bugs, so we have to
# disable it on 300I Duo platform now.
config.experimental_config.tiling_schedule_optimize = not is_310p()
config.experimental_config.tiling_schedule_optimize = get_ascend_device_type(
) != AscendDeviceType._310P
config.experimental_config.enable_view_optimize = \
self.ascend_config.torchair_graph_config.enable_view_optimize
torch.npu.set_compile_mode(jit_compile=False)
@@ -531,8 +531,8 @@ class NPUTorchairModelRunner(NPUModelRunner):
# NOTE: when enable_expert_parallel on A3, we need to check if `graph_batch_size` is divisible by `tp_size`
# Because we use x_active_mask for dispatch/combine op on A3, which requires that input shape should be same
# on all EP ranks
if get_ascend_soc_version(
) == AscendSocVersion.A3 and self.parallel_config.enable_expert_parallel:
if get_ascend_device_type(
) == AscendDeviceType._910_93 and self.parallel_config.enable_expert_parallel:
self._align_graph_size_divisible_by_tp_size()
def _align_graph_size_divisible_by_tp_size(self):