diff --git a/tests/ut/attention/test_mla_v1.py b/tests/ut/attention/test_mla_v1.py index 9851e51..71518b1 100644 --- a/tests/ut/attention/test_mla_v1.py +++ b/tests/ut/attention/test_mla_v1.py @@ -366,6 +366,7 @@ class TestAscendMLAImpl(TestBase): self.assertEqual(q_pe.shape[1], self.impl.num_heads) self.assertEqual(q_pe.shape[2], self.impl.qk_rope_head_dim) + @patch('vllm_ascend.utils._ENABLE_NZ', True) @patch('torch_npu.npu_format_cast') def test_process_weights_after_loading(self, mock_format_cast): layer = MagicMock(spec=LinearBase) diff --git a/tests/ut/models/test_qwen2_5_vl.py b/tests/ut/models/test_qwen2_5_vl.py index 7111aae..b4f0680 100644 --- a/tests/ut/models/test_qwen2_5_vl.py +++ b/tests/ut/models/test_qwen2_5_vl.py @@ -1,3 +1,5 @@ +from unittest.mock import patch + import pytest import torch import torch.nn.functional as F @@ -365,6 +367,7 @@ class TestAscendQwen2_5_VisionTransformer(PytestBase): res = attention.pad_qkv_bias(torch.rand((300))) assert res.shape[0] == 384 + @patch('vllm_ascend.utils._ENABLE_NZ', True) def test_pad_qkv_weight(self, mocker: MockerFixture): attention = self.init_vision_transformer(mocker) mocker.patch("torch.nn.Module.__setattr__") @@ -377,6 +380,7 @@ class TestAscendQwen2_5_VisionTransformer(PytestBase): res = attention.pad_qkv_weight(torch.rand((300, 300))) assert res.shape == (384, 300) + @patch('vllm_ascend.utils._ENABLE_NZ', True) def test_pad_proj_weight(self, mocker: MockerFixture): attention = self.init_vision_transformer(mocker) mocker.patch("torch.nn.Module.__setattr__") diff --git a/tests/ut/quantization/test_w4a8_dynamic.py b/tests/ut/quantization/test_w4a8_dynamic.py index 2116b0c..803d5ac 100644 --- a/tests/ut/quantization/test_w4a8_dynamic.py +++ b/tests/ut/quantization/test_w4a8_dynamic.py @@ -60,6 +60,7 @@ class TestAscendW4A8DynamicLinearMethod(TestBase): self.assertEqual(params["scale_bias"].dtype, torch.float32) self.assertEqual(params["scale_bias"].shape, (32, 16)) + @patch('vllm_ascend.utils._ENABLE_NZ', True) @patch('torch_npu.npu_convert_weight_to_int4pack') @patch('torch.Tensor.npu') def test_process_weights_after_loading(self, mock_npu, @@ -260,6 +261,7 @@ class TestAscendW4A8DynamicFusedMoEMethod(TestBase): requires_grad=False) return layer + @patch('vllm_ascend.utils._ENABLE_NZ', True) @patch('torch_npu.npu_format_cast') @patch('torch_npu.npu_quantize') @patch('torch.Tensor.npu') diff --git a/tests/ut/test_utils.py b/tests/ut/test_utils.py index 32f2d7b..da7b247 100644 --- a/tests/ut/test_utils.py +++ b/tests/ut/test_utils.py @@ -40,12 +40,18 @@ class TestUtils(TestBase): self.assertFalse(utils.is_310p()) def test_is_enable_nz(self): - with mock.patch("vllm_ascend.utils.envs_ascend.VLLM_ASCEND_ENABLE_NZ", - 1): - self.assertTrue(utils.is_enable_nz()) - with mock.patch("vllm_ascend.utils.envs_ascend.VLLM_ASCEND_ENABLE_NZ", - 0): - self.assertFalse(utils.is_enable_nz()) + # Case when _ENABLE_NZ is already set + utils._ENABLE_NZ = True + self.assertTrue(utils.is_enable_nz()) + + utils._ENABLE_NZ = False + self.assertFalse(utils.is_enable_nz()) + + # Case when _ENABLE_NZ is None and vllm_config is not provided + utils._ENABLE_NZ = None + with self.assertRaises(ValueError) as context: + utils.is_enable_nz() + self.assertIn("vllm_config must be provided", str(context.exception)) def test_sleep_mode_enabled(self): utils._SLEEP_MODE_ENABLED = None diff --git a/tests/ut/worker/test_worker_v1.py b/tests/ut/worker/test_worker_v1.py index 2f9f166..d899c99 100644 --- a/tests/ut/worker/test_worker_v1.py +++ b/tests/ut/worker/test_worker_v1.py @@ -19,7 +19,13 @@ class TestNPUWorker(TestBase): self.model_config_mock = MagicMock(spec=ModelConfig) self.model_config_mock.dtype = torch.float16 self.model_config_mock.trust_remote_code = False - self.model_config_mock.hf_config = None + + self.hf_config_mock = MagicMock() + self.hf_config_mock.model_type = "test_model" + if hasattr(self.hf_config_mock, 'index_topk'): + delattr(self.hf_config_mock, 'index_topk') + + self.model_config_mock.hf_config = self.hf_config_mock self.parallel_config_mock = MagicMock(spec=ParallelConfig) @@ -245,6 +251,7 @@ class TestNPUWorker(TestBase): self.assertIn("Sleep mode is not enabled", str(cm.exception)) + @patch('vllm_ascend.utils._ENABLE_NZ', False) @patch("vllm_ascend.worker.worker_v1.sleep_mode_enabled") @patch("vllm_ascend.worker.worker_v1.CaMemAllocator") @patch.dict(os.environ, {"VLLM_ASCEND_ENABLE_NZ": "0"}) diff --git a/vllm_ascend/utils.py b/vllm_ascend/utils.py index f65aaa0..785cf30 100644 --- a/vllm_ascend/utils.py +++ b/vllm_ascend/utils.py @@ -59,6 +59,7 @@ _MIN_DP_BUFFER_SIZE = 50 _IS_MOE_MODEL = None _ENABLE_SP = None _HAS_LAYER_IDX = None +_ENABLE_NZ = None def is_310p(): @@ -69,8 +70,14 @@ def is_310p(): return _IS_310P -def is_enable_nz(): - return envs_ascend.VLLM_ASCEND_ENABLE_NZ +def is_enable_nz(vllm_config: Optional[VllmConfig] = None) -> bool: + global _ENABLE_NZ + if _ENABLE_NZ is None: + if not vllm_config: + raise ValueError( + "vllm_config must be provided when _ENABLE_NZ is None") + _ENABLE_NZ = envs_ascend.VLLM_ASCEND_ENABLE_NZ and vllm_config.model_config.hf_config.model_type != "qwen3_next" + return _ENABLE_NZ def sleep_mode_enabled(): diff --git a/vllm_ascend/worker/worker_v1.py b/vllm_ascend/worker/worker_v1.py index 3f6db84..b6b6008 100644 --- a/vllm_ascend/worker/worker_v1.py +++ b/vllm_ascend/worker/worker_v1.py @@ -81,6 +81,7 @@ class NPUWorker(WorkerBase): # register patch for vllm from vllm_ascend.utils import adapt_patch adapt_patch() + is_enable_nz(vllm_config) # Register ops when worker init. from vllm_ascend import ops ops.register_dummy_fusion_op()