[misc] move mxfp_compat into device to decouple from quantization init chain (#6918)

### What this PR does / why we need it?
`mxfp_compat` only provides dtype/symbol compatibility helpers for
different `torch_npu` versions, but it was placed under
`vllm_ascend.quantization`. Importing it from device/ops paths could
trigger `quantization/__init__.py` and pull in heavy quantization method
dependencies, increasing startup coupling and causing import-cycle risk
(especially on 310P paths).

### Does this PR introduce _any_ user-facing change?
No functional behavior change intended.

### How was this patch tested?
CI passed.

- vLLM version: v0.16.0
- vLLM main:
15d76f74e2

---------

Signed-off-by: linfeng-yuan <1102311262@qq.com>
This commit is contained in:
linfeng-yuan
2026-03-02 18:17:01 +08:00
committed by GitHub
parent 632801b0ad
commit 68d8d20ca2
6 changed files with 7 additions and 7 deletions

View File

@@ -43,7 +43,7 @@ class NPUModelRunner310(NPUModelRunner):
Dict[str, torch.Tensor]: A map between layer names to their
corresponding memory buffer for KV cache.
"""
# 310P limitation: KV transfer is not supported.
# 310P limitation: KV transfer is not supported
if self.vllm_config.kv_transfer_config is not None:
raise ValueError("KV cache transfer is not supported for 310P.")
if self.use_sparse:

View File

@@ -18,7 +18,7 @@
import torch
import torch_npu
from vllm_ascend.quantization.mxfp_compat import (
from vllm_ascend.device.mxfp_compat import (
FLOAT4_E2M1FN_X2_DTYPE,
FLOAT8_E8M0FNU_DTYPE,
HIFLOAT8_DTYPE,

View File

@@ -23,10 +23,10 @@ from vllm.triton_utils import HAS_TRITON
from vllm_ascend.ascend_forward_context import MoECommType
from vllm_ascend.device.device_op import DeviceOperator
from vllm_ascend.ops.activation import AscendSwigluOAIAndMul
from vllm_ascend.quantization.mxfp_compat import (
from vllm_ascend.device.mxfp_compat import (
ensure_mxfp8_moe_available,
)
from vllm_ascend.ops.activation import AscendSwigluOAIAndMul
from vllm_ascend.utils import (
dispose_tensor,
enable_custom_op,

View File

@@ -25,12 +25,12 @@ from vllm.distributed import get_ep_group
from vllm.forward_context import get_forward_context
from vllm_ascend.ascend_config import get_ascend_config
from vllm_ascend.ops.fused_moe.experts_selector import select_experts
from vllm_ascend.quantization.mxfp_compat import (
from vllm_ascend.device.mxfp_compat import (
FLOAT8_E8M0FNU_DTYPE,
ensure_mxfp8_linear_available,
ensure_mxfp8_moe_available,
)
from vllm_ascend.ops.fused_moe.experts_selector import select_experts
from .base import AscendLinearScheme, AscendMoEScheme, QuantType
from .registry import register_scheme

View File

@@ -1,6 +1,6 @@
import torch
from vllm_ascend.quantization.mxfp_compat import (
from vllm_ascend.device.mxfp_compat import (
FLOAT4_E2M1FN_X2_DTYPE,
FLOAT8_E8M0FNU_DTYPE,
ensure_mxfp4_dtype_available,