2025-07-02 17:46:06 +08:00
|
|
|
import importlib
|
|
|
|
|
from unittest.mock import MagicMock, patch
|
|
|
|
|
|
2025-08-28 14:08:31 +08:00
|
|
|
import pytest
|
2025-07-02 17:46:06 +08:00
|
|
|
import torch
|
2025-11-24 17:08:20 +08:00
|
|
|
from vllm.config.compilation import CompilationMode, CUDAGraphMode
|
2025-07-02 17:46:06 +08:00
|
|
|
from vllm.platforms import PlatformEnum
|
2026-01-23 09:45:08 +08:00
|
|
|
from vllm.v1.attention.selector import AttentionSelectorConfig # type: ignore
|
2025-07-02 17:46:06 +08:00
|
|
|
|
|
|
|
|
from tests.ut.base import TestBase
|
|
|
|
|
from vllm_ascend.platform import NPUPlatform
|
2026-03-25 17:57:19 +08:00
|
|
|
from vllm_ascend.utils import (
|
|
|
|
|
ASCEND_QUANTIZATION_METHOD,
|
|
|
|
|
COMPRESSED_TENSORS_METHOD,
|
|
|
|
|
AscendDeviceType,
|
|
|
|
|
)
|
2025-07-02 17:46:06 +08:00
|
|
|
|
|
|
|
|
|
|
|
|
|
class TestNPUPlatform(TestBase):
|
2025-09-02 18:34:04 +08:00
|
|
|
@staticmethod
|
|
|
|
|
def mock_vllm_config():
|
|
|
|
|
mock_vllm_config = MagicMock()
|
|
|
|
|
mock_vllm_config.compilation_config = MagicMock()
|
|
|
|
|
mock_vllm_config.model_config = MagicMock()
|
|
|
|
|
mock_vllm_config.parallel_config = MagicMock()
|
|
|
|
|
mock_vllm_config.cache_config = MagicMock()
|
|
|
|
|
mock_vllm_config.scheduler_config = MagicMock()
|
2026-03-25 17:57:19 +08:00
|
|
|
mock_vllm_config.scheduler_config.max_num_seqs = None
|
2025-09-02 18:34:04 +08:00
|
|
|
mock_vllm_config.speculative_config = None
|
2026-03-25 17:57:19 +08:00
|
|
|
mock_vllm_config.additional_config = {}
|
2025-12-03 23:43:05 +08:00
|
|
|
mock_vllm_config.compilation_config.pass_config.enable_sp = False
|
2025-09-02 18:34:04 +08:00
|
|
|
mock_vllm_config.compilation_config.cudagraph_mode = None
|
|
|
|
|
return mock_vllm_config
|
|
|
|
|
|
|
|
|
|
@staticmethod
|
|
|
|
|
def mock_vllm_ascend_config():
|
|
|
|
|
mock_ascend_config = MagicMock()
|
2025-12-08 08:27:46 +08:00
|
|
|
mock_ascend_config.xlite_graph_config.enabled = False
|
2026-03-25 17:57:19 +08:00
|
|
|
mock_ascend_config.xlite_graph_config.full_mode = False
|
|
|
|
|
mock_ascend_config.ascend_compilation_config.enable_npugraph_ex = False
|
|
|
|
|
mock_ascend_config.ascend_fusion_config = None
|
|
|
|
|
mock_ascend_config.recompute_scheduler_enable = False
|
|
|
|
|
mock_ascend_config.SLO_limits_for_dynamic_batch = -1
|
2025-09-16 14:13:07 +08:00
|
|
|
mock_ascend_config.enable_shared_expert_dp = False
|
2026-03-25 17:57:19 +08:00
|
|
|
mock_ascend_config.update_compile_ranges_split_points = MagicMock()
|
2025-09-02 18:34:04 +08:00
|
|
|
return mock_ascend_config
|
|
|
|
|
|
2025-07-02 17:46:06 +08:00
|
|
|
def setUp(self):
|
|
|
|
|
self.platform = NPUPlatform()
|
2026-01-20 11:02:38 +08:00
|
|
|
self.platform.supported_quantization[:] = ["ascend", "compressed-tensors"]
|
2025-07-02 17:46:06 +08:00
|
|
|
|
|
|
|
|
def test_class_variables(self):
|
|
|
|
|
self.assertEqual(NPUPlatform._enum, PlatformEnum.OOT)
|
|
|
|
|
self.assertEqual(NPUPlatform.device_name, "npu")
|
|
|
|
|
self.assertEqual(NPUPlatform.device_type, "npu")
|
|
|
|
|
self.assertEqual(NPUPlatform.simple_compile_backend, "eager")
|
|
|
|
|
self.assertEqual(NPUPlatform.ray_device_key, "NPU")
|
2026-01-20 11:02:38 +08:00
|
|
|
self.assertEqual(NPUPlatform.device_control_env_var, "ASCEND_RT_VISIBLE_DEVICES")
|
2025-07-02 17:46:06 +08:00
|
|
|
self.assertEqual(NPUPlatform.dispatch_key, "PrivateUse1")
|
2026-01-20 11:02:38 +08:00
|
|
|
self.assertEqual(NPUPlatform.supported_quantization, [ASCEND_QUANTIZATION_METHOD, COMPRESSED_TENSORS_METHOD])
|
2025-07-02 17:46:06 +08:00
|
|
|
|
|
|
|
|
def test_is_sleep_mode_available(self):
|
|
|
|
|
self.assertTrue(self.platform.is_sleep_mode_available())
|
|
|
|
|
|
|
|
|
|
@patch("vllm_ascend.utils.adapt_patch")
|
[Refactor] Quantization Module Refactor (#5738)
### Summary
This PR refactors the `vllm_ascend/quantization` module to improve code
organization, maintainability, and extensibility. The refactoring
introduces a clear separation of concerns with a registry-based scheme
discovery pattern, abstract base classes for quantization schemes, and
dedicated wrapper classes.
### Key Changes
#### 1. **Modular Directory Structure**
| Before | After |
|--------|-------|
| Flat file structure with mixed responsibilities | Organized into
`methods/` subpackage for schemes |
| Single `quant_config.py` (600+ lines) | Separate config files:
`modelslim_config.py`, `compressed_tensors_config.py` |
| `utils.py` with scheme lookup logic | `methods/registry.py` with
decorator-based registration |
#### 2. **Registry-Based Scheme Discovery**
Replaced hardcoded `ASCEND_QUANTIZATION_METHOD_MAP` dictionary with a
decorator-based registry pattern:
```python
# Before: Manual dictionary mapping
ASCEND_QUANTIZATION_METHOD_MAP = {
"W8A8_DYNAMIC": {"linear": AscendW8A8DynamicLinearMethod, ...},
...
}
# After: Decorator-based registration
@register_scheme("W8A8_DYNAMIC", "linear")
class AscendW8A8DynamicLinearMethod(AscendLinearScheme):
...
```
#### 3. **Abstract Base Classes**
Introduced three abstract base classes in `methods/base.py`:
- `AscendLinearScheme` - Base for linear layer quantization
- `AscendMoEScheme` - Base for MoE layer quantization
- `AscendAttentionScheme` - Base for attention layer quantization
#### 4. **Separated Config and Wrapper Classes**
- **Config classes** (`AscendModelSlimConfig`,
`AscendCompressedTensorsConfig`): Handle config parsing and scheme
selection
- **Wrapper classes** (`AscendLinearMethod`, `AscendFusedMoEMethod`,
etc.): Implement vLLM interfaces and delegate to schemes
#### 5. **Cleaner Public API**
```python
# New clean module interface
from vllm_ascend.quantization import (
AscendModelSlimConfig,
AscendCompressedTensorsConfig,
)
from vllm_ascend.quantization.methods import get_scheme_class
```
### Architecture Diagram
```mermaid
classDiagram
direction TB
class QuantizationConfig {
<<vLLM Interface>>
+get_quant_method()
}
class AscendModelSlimConfig {
+quant_description
+get_quant_method()
-create_scheme_for_layer()
}
class AscendCompressedTensorsConfig {
+target_scheme_map
+get_quant_method()
-_get_scheme_from_parts()
}
class AscendLinearMethod {
<<Wrapper>>
+quant_method: AscendLinearScheme
+create_weights()
+apply()
}
class AscendFusedMoEMethod {
<<Wrapper>>
+quant_method: AscendMoEScheme
+create_weights()
+apply()
}
class AscendLinearScheme {
<<Abstract>>
+get_weight()*
+apply()*
+get_pertensor_param()
+get_perchannel_param()
}
class AscendMoEScheme {
<<Abstract>>
+get_weight()*
+get_dynamic_quant_param()*
+apply()*
}
class W8A8DynamicLinear {
+get_weight()
+apply()
}
class W8A8DynamicMoE {
+get_weight()
+apply()
}
QuantizationConfig <|-- AscendModelSlimConfig
QuantizationConfig <|-- AscendCompressedTensorsConfig
AscendModelSlimConfig ..> AscendLinearMethod : creates
AscendModelSlimConfig ..> AscendFusedMoEMethod : creates
AscendCompressedTensorsConfig ..> AscendLinearMethod : creates
AscendCompressedTensorsConfig ..> AscendFusedMoEMethod : creates
AscendLinearMethod o-- AscendLinearScheme : delegates to
AscendFusedMoEMethod o-- AscendMoEScheme : delegates to
AscendLinearScheme <|-- W8A8DynamicLinear
AscendMoEScheme <|-- W8A8DynamicMoE
```
### Scheme Registration Flow
```mermaid
sequenceDiagram
participant Module as Scheme Module
participant Registry as _SCHEME_REGISTRY
participant Config as QuantConfig
participant Wrapper as Wrapper Class
Note over Module: At import time
Module->>Registry: @register_scheme("W8A8_DYNAMIC", "linear")
Registry->>Registry: Store (quant_type, layer_type) -> Class
Note over Config: At runtime
Config->>Config: Determine quant_type from description
Config->>Registry: get_scheme_class(quant_type, layer_type)
Registry-->>Config: Return scheme class
Config->>Config: scheme = scheme_cls()
Config->>Wrapper: Create wrapper with scheme
Wrapper-->>Config: Return wrapper instance
```
### File Changes Summary
| Original Files | Refactored Files |
|----------------|------------------|
| `__init__.py` (empty) | `__init__.py` (exports public API) |
| `quant_config.py` | `modelslim_config.py` + `wrappers.py` |
| `compressed_tensors/` | `compressed_tensors_config.py` |
| `utils.py` | `methods/registry.py` |
| `w8a8_dynamic.py` | `methods/w8a8_dynamic.py` |
| `w8a8.py` | `methods/w8a8_static.py` |
| `w4a4_flatquant_dynamic.py` | `methods/w4a4_flatquant.py` |
| ... | `methods/base.py` (new) |
### Benefits
1. **Extensibility**: Adding new quantization schemes only requires
implementing the base class and adding `@register_scheme` decorator
2. **Maintainability**: Clear separation between config parsing, wrapper
logic, and scheme implementation
3. **Testability**: Abstract base classes enable easier unit testing and
mocking
4. **Discoverability**: Registry pattern makes it easy to list all
supported schemes
5. **Reduced Coupling**: Config classes no longer need to know about all
scheme implementations
___
- vLLM version: v0.13.0
- vLLM main:
https://github.com/vllm-project/vllm/commit/2f4e6548efec402b913ffddc8726230d9311948d
---------
Signed-off-by: SlightwindSec <slightwindsec@gmail.com>
2026-01-23 14:13:47 +08:00
|
|
|
@patch("vllm_ascend.quantization.modelslim_config.AscendModelSlimConfig")
|
|
|
|
|
def test_pre_register_and_update_with_parser(self, mock_quant_config,
|
|
|
|
|
mock_adapt_patch):
|
2025-07-02 17:46:06 +08:00
|
|
|
mock_parser = MagicMock()
|
|
|
|
|
mock_action = MagicMock()
|
|
|
|
|
mock_action.choices = ["awq", "gptq"]
|
|
|
|
|
mock_parser._option_string_actions = {"--quantization": mock_action}
|
|
|
|
|
|
|
|
|
|
self.platform.pre_register_and_update(mock_parser)
|
|
|
|
|
|
|
|
|
|
mock_adapt_patch.assert_called_once_with(is_global_patch=True)
|
|
|
|
|
|
2025-08-26 09:06:16 +08:00
|
|
|
self.assertTrue(ASCEND_QUANTIZATION_METHOD in mock_action.choices)
|
2025-07-02 17:46:06 +08:00
|
|
|
self.assertEqual(len(mock_action.choices), 3) # original 2 + ascend
|
|
|
|
|
|
|
|
|
|
@patch("vllm_ascend.utils.adapt_patch")
|
[Refactor] Quantization Module Refactor (#5738)
### Summary
This PR refactors the `vllm_ascend/quantization` module to improve code
organization, maintainability, and extensibility. The refactoring
introduces a clear separation of concerns with a registry-based scheme
discovery pattern, abstract base classes for quantization schemes, and
dedicated wrapper classes.
### Key Changes
#### 1. **Modular Directory Structure**
| Before | After |
|--------|-------|
| Flat file structure with mixed responsibilities | Organized into
`methods/` subpackage for schemes |
| Single `quant_config.py` (600+ lines) | Separate config files:
`modelslim_config.py`, `compressed_tensors_config.py` |
| `utils.py` with scheme lookup logic | `methods/registry.py` with
decorator-based registration |
#### 2. **Registry-Based Scheme Discovery**
Replaced hardcoded `ASCEND_QUANTIZATION_METHOD_MAP` dictionary with a
decorator-based registry pattern:
```python
# Before: Manual dictionary mapping
ASCEND_QUANTIZATION_METHOD_MAP = {
"W8A8_DYNAMIC": {"linear": AscendW8A8DynamicLinearMethod, ...},
...
}
# After: Decorator-based registration
@register_scheme("W8A8_DYNAMIC", "linear")
class AscendW8A8DynamicLinearMethod(AscendLinearScheme):
...
```
#### 3. **Abstract Base Classes**
Introduced three abstract base classes in `methods/base.py`:
- `AscendLinearScheme` - Base for linear layer quantization
- `AscendMoEScheme` - Base for MoE layer quantization
- `AscendAttentionScheme` - Base for attention layer quantization
#### 4. **Separated Config and Wrapper Classes**
- **Config classes** (`AscendModelSlimConfig`,
`AscendCompressedTensorsConfig`): Handle config parsing and scheme
selection
- **Wrapper classes** (`AscendLinearMethod`, `AscendFusedMoEMethod`,
etc.): Implement vLLM interfaces and delegate to schemes
#### 5. **Cleaner Public API**
```python
# New clean module interface
from vllm_ascend.quantization import (
AscendModelSlimConfig,
AscendCompressedTensorsConfig,
)
from vllm_ascend.quantization.methods import get_scheme_class
```
### Architecture Diagram
```mermaid
classDiagram
direction TB
class QuantizationConfig {
<<vLLM Interface>>
+get_quant_method()
}
class AscendModelSlimConfig {
+quant_description
+get_quant_method()
-create_scheme_for_layer()
}
class AscendCompressedTensorsConfig {
+target_scheme_map
+get_quant_method()
-_get_scheme_from_parts()
}
class AscendLinearMethod {
<<Wrapper>>
+quant_method: AscendLinearScheme
+create_weights()
+apply()
}
class AscendFusedMoEMethod {
<<Wrapper>>
+quant_method: AscendMoEScheme
+create_weights()
+apply()
}
class AscendLinearScheme {
<<Abstract>>
+get_weight()*
+apply()*
+get_pertensor_param()
+get_perchannel_param()
}
class AscendMoEScheme {
<<Abstract>>
+get_weight()*
+get_dynamic_quant_param()*
+apply()*
}
class W8A8DynamicLinear {
+get_weight()
+apply()
}
class W8A8DynamicMoE {
+get_weight()
+apply()
}
QuantizationConfig <|-- AscendModelSlimConfig
QuantizationConfig <|-- AscendCompressedTensorsConfig
AscendModelSlimConfig ..> AscendLinearMethod : creates
AscendModelSlimConfig ..> AscendFusedMoEMethod : creates
AscendCompressedTensorsConfig ..> AscendLinearMethod : creates
AscendCompressedTensorsConfig ..> AscendFusedMoEMethod : creates
AscendLinearMethod o-- AscendLinearScheme : delegates to
AscendFusedMoEMethod o-- AscendMoEScheme : delegates to
AscendLinearScheme <|-- W8A8DynamicLinear
AscendMoEScheme <|-- W8A8DynamicMoE
```
### Scheme Registration Flow
```mermaid
sequenceDiagram
participant Module as Scheme Module
participant Registry as _SCHEME_REGISTRY
participant Config as QuantConfig
participant Wrapper as Wrapper Class
Note over Module: At import time
Module->>Registry: @register_scheme("W8A8_DYNAMIC", "linear")
Registry->>Registry: Store (quant_type, layer_type) -> Class
Note over Config: At runtime
Config->>Config: Determine quant_type from description
Config->>Registry: get_scheme_class(quant_type, layer_type)
Registry-->>Config: Return scheme class
Config->>Config: scheme = scheme_cls()
Config->>Wrapper: Create wrapper with scheme
Wrapper-->>Config: Return wrapper instance
```
### File Changes Summary
| Original Files | Refactored Files |
|----------------|------------------|
| `__init__.py` (empty) | `__init__.py` (exports public API) |
| `quant_config.py` | `modelslim_config.py` + `wrappers.py` |
| `compressed_tensors/` | `compressed_tensors_config.py` |
| `utils.py` | `methods/registry.py` |
| `w8a8_dynamic.py` | `methods/w8a8_dynamic.py` |
| `w8a8.py` | `methods/w8a8_static.py` |
| `w4a4_flatquant_dynamic.py` | `methods/w4a4_flatquant.py` |
| ... | `methods/base.py` (new) |
### Benefits
1. **Extensibility**: Adding new quantization schemes only requires
implementing the base class and adding `@register_scheme` decorator
2. **Maintainability**: Clear separation between config parsing, wrapper
logic, and scheme implementation
3. **Testability**: Abstract base classes enable easier unit testing and
mocking
4. **Discoverability**: Registry pattern makes it easy to list all
supported schemes
5. **Reduced Coupling**: Config classes no longer need to know about all
scheme implementations
___
- vLLM version: v0.13.0
- vLLM main:
https://github.com/vllm-project/vllm/commit/2f4e6548efec402b913ffddc8726230d9311948d
---------
Signed-off-by: SlightwindSec <slightwindsec@gmail.com>
2026-01-23 14:13:47 +08:00
|
|
|
@patch("vllm_ascend.quantization.modelslim_config.AscendModelSlimConfig")
|
|
|
|
|
def test_pre_register_and_update_without_parser(self, mock_quant_config,
|
|
|
|
|
mock_adapt_patch):
|
2025-07-02 17:46:06 +08:00
|
|
|
self.platform.pre_register_and_update(None)
|
|
|
|
|
|
|
|
|
|
mock_adapt_patch.assert_called_once_with(is_global_patch=True)
|
|
|
|
|
|
|
|
|
|
@patch("vllm_ascend.utils.adapt_patch")
|
[Refactor] Quantization Module Refactor (#5738)
### Summary
This PR refactors the `vllm_ascend/quantization` module to improve code
organization, maintainability, and extensibility. The refactoring
introduces a clear separation of concerns with a registry-based scheme
discovery pattern, abstract base classes for quantization schemes, and
dedicated wrapper classes.
### Key Changes
#### 1. **Modular Directory Structure**
| Before | After |
|--------|-------|
| Flat file structure with mixed responsibilities | Organized into
`methods/` subpackage for schemes |
| Single `quant_config.py` (600+ lines) | Separate config files:
`modelslim_config.py`, `compressed_tensors_config.py` |
| `utils.py` with scheme lookup logic | `methods/registry.py` with
decorator-based registration |
#### 2. **Registry-Based Scheme Discovery**
Replaced hardcoded `ASCEND_QUANTIZATION_METHOD_MAP` dictionary with a
decorator-based registry pattern:
```python
# Before: Manual dictionary mapping
ASCEND_QUANTIZATION_METHOD_MAP = {
"W8A8_DYNAMIC": {"linear": AscendW8A8DynamicLinearMethod, ...},
...
}
# After: Decorator-based registration
@register_scheme("W8A8_DYNAMIC", "linear")
class AscendW8A8DynamicLinearMethod(AscendLinearScheme):
...
```
#### 3. **Abstract Base Classes**
Introduced three abstract base classes in `methods/base.py`:
- `AscendLinearScheme` - Base for linear layer quantization
- `AscendMoEScheme` - Base for MoE layer quantization
- `AscendAttentionScheme` - Base for attention layer quantization
#### 4. **Separated Config and Wrapper Classes**
- **Config classes** (`AscendModelSlimConfig`,
`AscendCompressedTensorsConfig`): Handle config parsing and scheme
selection
- **Wrapper classes** (`AscendLinearMethod`, `AscendFusedMoEMethod`,
etc.): Implement vLLM interfaces and delegate to schemes
#### 5. **Cleaner Public API**
```python
# New clean module interface
from vllm_ascend.quantization import (
AscendModelSlimConfig,
AscendCompressedTensorsConfig,
)
from vllm_ascend.quantization.methods import get_scheme_class
```
### Architecture Diagram
```mermaid
classDiagram
direction TB
class QuantizationConfig {
<<vLLM Interface>>
+get_quant_method()
}
class AscendModelSlimConfig {
+quant_description
+get_quant_method()
-create_scheme_for_layer()
}
class AscendCompressedTensorsConfig {
+target_scheme_map
+get_quant_method()
-_get_scheme_from_parts()
}
class AscendLinearMethod {
<<Wrapper>>
+quant_method: AscendLinearScheme
+create_weights()
+apply()
}
class AscendFusedMoEMethod {
<<Wrapper>>
+quant_method: AscendMoEScheme
+create_weights()
+apply()
}
class AscendLinearScheme {
<<Abstract>>
+get_weight()*
+apply()*
+get_pertensor_param()
+get_perchannel_param()
}
class AscendMoEScheme {
<<Abstract>>
+get_weight()*
+get_dynamic_quant_param()*
+apply()*
}
class W8A8DynamicLinear {
+get_weight()
+apply()
}
class W8A8DynamicMoE {
+get_weight()
+apply()
}
QuantizationConfig <|-- AscendModelSlimConfig
QuantizationConfig <|-- AscendCompressedTensorsConfig
AscendModelSlimConfig ..> AscendLinearMethod : creates
AscendModelSlimConfig ..> AscendFusedMoEMethod : creates
AscendCompressedTensorsConfig ..> AscendLinearMethod : creates
AscendCompressedTensorsConfig ..> AscendFusedMoEMethod : creates
AscendLinearMethod o-- AscendLinearScheme : delegates to
AscendFusedMoEMethod o-- AscendMoEScheme : delegates to
AscendLinearScheme <|-- W8A8DynamicLinear
AscendMoEScheme <|-- W8A8DynamicMoE
```
### Scheme Registration Flow
```mermaid
sequenceDiagram
participant Module as Scheme Module
participant Registry as _SCHEME_REGISTRY
participant Config as QuantConfig
participant Wrapper as Wrapper Class
Note over Module: At import time
Module->>Registry: @register_scheme("W8A8_DYNAMIC", "linear")
Registry->>Registry: Store (quant_type, layer_type) -> Class
Note over Config: At runtime
Config->>Config: Determine quant_type from description
Config->>Registry: get_scheme_class(quant_type, layer_type)
Registry-->>Config: Return scheme class
Config->>Config: scheme = scheme_cls()
Config->>Wrapper: Create wrapper with scheme
Wrapper-->>Config: Return wrapper instance
```
### File Changes Summary
| Original Files | Refactored Files |
|----------------|------------------|
| `__init__.py` (empty) | `__init__.py` (exports public API) |
| `quant_config.py` | `modelslim_config.py` + `wrappers.py` |
| `compressed_tensors/` | `compressed_tensors_config.py` |
| `utils.py` | `methods/registry.py` |
| `w8a8_dynamic.py` | `methods/w8a8_dynamic.py` |
| `w8a8.py` | `methods/w8a8_static.py` |
| `w4a4_flatquant_dynamic.py` | `methods/w4a4_flatquant.py` |
| ... | `methods/base.py` (new) |
### Benefits
1. **Extensibility**: Adding new quantization schemes only requires
implementing the base class and adding `@register_scheme` decorator
2. **Maintainability**: Clear separation between config parsing, wrapper
logic, and scheme implementation
3. **Testability**: Abstract base classes enable easier unit testing and
mocking
4. **Discoverability**: Registry pattern makes it easy to list all
supported schemes
5. **Reduced Coupling**: Config classes no longer need to know about all
scheme implementations
___
- vLLM version: v0.13.0
- vLLM main:
https://github.com/vllm-project/vllm/commit/2f4e6548efec402b913ffddc8726230d9311948d
---------
Signed-off-by: SlightwindSec <slightwindsec@gmail.com>
2026-01-23 14:13:47 +08:00
|
|
|
@patch("vllm_ascend.quantization.modelslim_config.AscendModelSlimConfig")
|
|
|
|
|
def test_pre_register_and_update_with_parser_no_quant_action(
|
|
|
|
|
self, mock_quant_config, mock_adapt_patch):
|
2025-07-02 17:46:06 +08:00
|
|
|
mock_parser = MagicMock()
|
|
|
|
|
mock_parser._option_string_actions = {}
|
|
|
|
|
|
|
|
|
|
self.platform.pre_register_and_update(mock_parser)
|
|
|
|
|
|
|
|
|
|
mock_adapt_patch.assert_called_once_with(is_global_patch=True)
|
|
|
|
|
|
|
|
|
|
@patch("vllm_ascend.utils.adapt_patch")
|
[Refactor] Quantization Module Refactor (#5738)
### Summary
This PR refactors the `vllm_ascend/quantization` module to improve code
organization, maintainability, and extensibility. The refactoring
introduces a clear separation of concerns with a registry-based scheme
discovery pattern, abstract base classes for quantization schemes, and
dedicated wrapper classes.
### Key Changes
#### 1. **Modular Directory Structure**
| Before | After |
|--------|-------|
| Flat file structure with mixed responsibilities | Organized into
`methods/` subpackage for schemes |
| Single `quant_config.py` (600+ lines) | Separate config files:
`modelslim_config.py`, `compressed_tensors_config.py` |
| `utils.py` with scheme lookup logic | `methods/registry.py` with
decorator-based registration |
#### 2. **Registry-Based Scheme Discovery**
Replaced hardcoded `ASCEND_QUANTIZATION_METHOD_MAP` dictionary with a
decorator-based registry pattern:
```python
# Before: Manual dictionary mapping
ASCEND_QUANTIZATION_METHOD_MAP = {
"W8A8_DYNAMIC": {"linear": AscendW8A8DynamicLinearMethod, ...},
...
}
# After: Decorator-based registration
@register_scheme("W8A8_DYNAMIC", "linear")
class AscendW8A8DynamicLinearMethod(AscendLinearScheme):
...
```
#### 3. **Abstract Base Classes**
Introduced three abstract base classes in `methods/base.py`:
- `AscendLinearScheme` - Base for linear layer quantization
- `AscendMoEScheme` - Base for MoE layer quantization
- `AscendAttentionScheme` - Base for attention layer quantization
#### 4. **Separated Config and Wrapper Classes**
- **Config classes** (`AscendModelSlimConfig`,
`AscendCompressedTensorsConfig`): Handle config parsing and scheme
selection
- **Wrapper classes** (`AscendLinearMethod`, `AscendFusedMoEMethod`,
etc.): Implement vLLM interfaces and delegate to schemes
#### 5. **Cleaner Public API**
```python
# New clean module interface
from vllm_ascend.quantization import (
AscendModelSlimConfig,
AscendCompressedTensorsConfig,
)
from vllm_ascend.quantization.methods import get_scheme_class
```
### Architecture Diagram
```mermaid
classDiagram
direction TB
class QuantizationConfig {
<<vLLM Interface>>
+get_quant_method()
}
class AscendModelSlimConfig {
+quant_description
+get_quant_method()
-create_scheme_for_layer()
}
class AscendCompressedTensorsConfig {
+target_scheme_map
+get_quant_method()
-_get_scheme_from_parts()
}
class AscendLinearMethod {
<<Wrapper>>
+quant_method: AscendLinearScheme
+create_weights()
+apply()
}
class AscendFusedMoEMethod {
<<Wrapper>>
+quant_method: AscendMoEScheme
+create_weights()
+apply()
}
class AscendLinearScheme {
<<Abstract>>
+get_weight()*
+apply()*
+get_pertensor_param()
+get_perchannel_param()
}
class AscendMoEScheme {
<<Abstract>>
+get_weight()*
+get_dynamic_quant_param()*
+apply()*
}
class W8A8DynamicLinear {
+get_weight()
+apply()
}
class W8A8DynamicMoE {
+get_weight()
+apply()
}
QuantizationConfig <|-- AscendModelSlimConfig
QuantizationConfig <|-- AscendCompressedTensorsConfig
AscendModelSlimConfig ..> AscendLinearMethod : creates
AscendModelSlimConfig ..> AscendFusedMoEMethod : creates
AscendCompressedTensorsConfig ..> AscendLinearMethod : creates
AscendCompressedTensorsConfig ..> AscendFusedMoEMethod : creates
AscendLinearMethod o-- AscendLinearScheme : delegates to
AscendFusedMoEMethod o-- AscendMoEScheme : delegates to
AscendLinearScheme <|-- W8A8DynamicLinear
AscendMoEScheme <|-- W8A8DynamicMoE
```
### Scheme Registration Flow
```mermaid
sequenceDiagram
participant Module as Scheme Module
participant Registry as _SCHEME_REGISTRY
participant Config as QuantConfig
participant Wrapper as Wrapper Class
Note over Module: At import time
Module->>Registry: @register_scheme("W8A8_DYNAMIC", "linear")
Registry->>Registry: Store (quant_type, layer_type) -> Class
Note over Config: At runtime
Config->>Config: Determine quant_type from description
Config->>Registry: get_scheme_class(quant_type, layer_type)
Registry-->>Config: Return scheme class
Config->>Config: scheme = scheme_cls()
Config->>Wrapper: Create wrapper with scheme
Wrapper-->>Config: Return wrapper instance
```
### File Changes Summary
| Original Files | Refactored Files |
|----------------|------------------|
| `__init__.py` (empty) | `__init__.py` (exports public API) |
| `quant_config.py` | `modelslim_config.py` + `wrappers.py` |
| `compressed_tensors/` | `compressed_tensors_config.py` |
| `utils.py` | `methods/registry.py` |
| `w8a8_dynamic.py` | `methods/w8a8_dynamic.py` |
| `w8a8.py` | `methods/w8a8_static.py` |
| `w4a4_flatquant_dynamic.py` | `methods/w4a4_flatquant.py` |
| ... | `methods/base.py` (new) |
### Benefits
1. **Extensibility**: Adding new quantization schemes only requires
implementing the base class and adding `@register_scheme` decorator
2. **Maintainability**: Clear separation between config parsing, wrapper
logic, and scheme implementation
3. **Testability**: Abstract base classes enable easier unit testing and
mocking
4. **Discoverability**: Registry pattern makes it easy to list all
supported schemes
5. **Reduced Coupling**: Config classes no longer need to know about all
scheme implementations
___
- vLLM version: v0.13.0
- vLLM main:
https://github.com/vllm-project/vllm/commit/2f4e6548efec402b913ffddc8726230d9311948d
---------
Signed-off-by: SlightwindSec <slightwindsec@gmail.com>
2026-01-23 14:13:47 +08:00
|
|
|
@patch("vllm_ascend.quantization.modelslim_config.AscendModelSlimConfig")
|
|
|
|
|
def test_pre_register_and_update_with_existing_ascend_quant(
|
|
|
|
|
self, mock_quant_config, mock_adapt_patch):
|
2025-07-02 17:46:06 +08:00
|
|
|
mock_parser = MagicMock()
|
|
|
|
|
mock_action = MagicMock()
|
2025-08-26 09:06:16 +08:00
|
|
|
mock_action.choices = ["awq", ASCEND_QUANTIZATION_METHOD]
|
2025-07-02 17:46:06 +08:00
|
|
|
mock_parser._option_string_actions = {"--quantization": mock_action}
|
|
|
|
|
|
|
|
|
|
self.platform.pre_register_and_update(mock_parser)
|
|
|
|
|
|
|
|
|
|
mock_adapt_patch.assert_called_once_with(is_global_patch=True)
|
|
|
|
|
self.assertEqual(len(mock_action.choices), 2)
|
|
|
|
|
|
2026-03-25 17:57:19 +08:00
|
|
|
def test_apply_config_platform_defaults_sets_ascend_default_max(self):
|
|
|
|
|
test_cases = [
|
|
|
|
|
(40, 3, 160),
|
|
|
|
|
(200, 3, 512),
|
|
|
|
|
]
|
|
|
|
|
|
|
|
|
|
for max_num_seqs, num_speculative_tokens, expected_max in test_cases:
|
|
|
|
|
with self.subTest(
|
|
|
|
|
max_num_seqs=max_num_seqs,
|
|
|
|
|
num_speculative_tokens=num_speculative_tokens,
|
|
|
|
|
expected_max=expected_max,
|
|
|
|
|
):
|
|
|
|
|
vllm_config = TestNPUPlatform.mock_vllm_config()
|
|
|
|
|
vllm_config.scheduler_config.max_num_seqs = max_num_seqs
|
|
|
|
|
vllm_config.speculative_config = MagicMock(
|
|
|
|
|
num_speculative_tokens=num_speculative_tokens
|
|
|
|
|
)
|
|
|
|
|
vllm_config.compilation_config.max_cudagraph_capture_size = None
|
|
|
|
|
vllm_config.compilation_config.cudagraph_capture_sizes = None
|
|
|
|
|
|
|
|
|
|
self.platform.apply_config_platform_defaults(vllm_config)
|
|
|
|
|
|
|
|
|
|
self.assertEqual(
|
|
|
|
|
vllm_config.compilation_config.max_cudagraph_capture_size,
|
|
|
|
|
expected_max,
|
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
def test_apply_config_platform_defaults_respects_explicit_max(self):
|
|
|
|
|
vllm_config = TestNPUPlatform.mock_vllm_config()
|
|
|
|
|
vllm_config.compilation_config.max_cudagraph_capture_size = 456
|
|
|
|
|
vllm_config.compilation_config.cudagraph_capture_sizes = None
|
|
|
|
|
|
|
|
|
|
self.platform.apply_config_platform_defaults(vllm_config)
|
|
|
|
|
|
|
|
|
|
self.assertEqual(vllm_config.compilation_config.max_cudagraph_capture_size, 456)
|
|
|
|
|
|
|
|
|
|
def test_apply_config_platform_defaults_respects_explicit_sizes(self):
|
|
|
|
|
vllm_config = TestNPUPlatform.mock_vllm_config()
|
|
|
|
|
vllm_config.compilation_config.max_cudagraph_capture_size = None
|
|
|
|
|
vllm_config.compilation_config.cudagraph_capture_sizes = [1, 2, 4]
|
|
|
|
|
|
|
|
|
|
self.platform.apply_config_platform_defaults(vllm_config)
|
|
|
|
|
|
|
|
|
|
self.assertIsNone(vllm_config.compilation_config.max_cudagraph_capture_size)
|
|
|
|
|
self.assertEqual(vllm_config.compilation_config.cudagraph_capture_sizes, [1, 2, 4])
|
|
|
|
|
|
|
|
|
|
def test_apply_config_platform_defaults_skips_when_scheduler_max_num_seqs_is_missing(self):
|
|
|
|
|
vllm_config = TestNPUPlatform.mock_vllm_config()
|
|
|
|
|
vllm_config.compilation_config.max_cudagraph_capture_size = None
|
|
|
|
|
vllm_config.compilation_config.cudagraph_capture_sizes = None
|
|
|
|
|
|
|
|
|
|
self.platform.apply_config_platform_defaults(vllm_config)
|
|
|
|
|
|
|
|
|
|
self.assertIsNone(vllm_config.compilation_config.max_cudagraph_capture_size)
|
|
|
|
|
|
|
|
|
|
@patch("vllm_ascend.platform.refresh_block_size")
|
|
|
|
|
@patch("vllm_ascend.platform.get_ascend_device_type", return_value=AscendDeviceType.A3)
|
|
|
|
|
@patch("vllm_ascend.platform.enable_sp", return_value=False)
|
|
|
|
|
@patch("vllm_ascend.ascend_config.init_ascend_config")
|
|
|
|
|
@patch("vllm_ascend.quantization.utils.maybe_auto_detect_quantization")
|
|
|
|
|
def test_check_and_update_config_preserves_platform_default_max_input(
|
|
|
|
|
self,
|
|
|
|
|
mock_auto_detect,
|
|
|
|
|
mock_init_ascend,
|
|
|
|
|
_mock_enable_sp,
|
|
|
|
|
_mock_device_type,
|
|
|
|
|
_mock_refresh_block_size,
|
|
|
|
|
):
|
|
|
|
|
mock_init_ascend.return_value = TestNPUPlatform.mock_vllm_ascend_config()
|
|
|
|
|
vllm_config = TestNPUPlatform.mock_vllm_config()
|
|
|
|
|
vllm_config.scheduler_config.max_num_seqs = 77
|
|
|
|
|
vllm_config.compilation_config.max_cudagraph_capture_size = None
|
|
|
|
|
vllm_config.compilation_config.cudagraph_capture_sizes = None
|
|
|
|
|
vllm_config.compilation_config.mode = CompilationMode.DYNAMO_TRACE_ONCE
|
|
|
|
|
vllm_config.compilation_config.cudagraph_mode = CUDAGraphMode.FULL_DECODE_ONLY
|
|
|
|
|
vllm_config.compilation_config.custom_ops = []
|
|
|
|
|
vllm_config.model_config.enforce_eager = False
|
|
|
|
|
vllm_config.model_config.enable_sleep_mode = True
|
|
|
|
|
vllm_config.model_config.is_encoder_decoder = False
|
|
|
|
|
vllm_config.parallel_config.decode_context_parallel_size = 1
|
|
|
|
|
vllm_config.parallel_config.prefill_context_parallel_size = 1
|
|
|
|
|
vllm_config.parallel_config.tensor_parallel_size = 1
|
|
|
|
|
vllm_config.parallel_config.worker_cls = "manual"
|
|
|
|
|
vllm_config.parallel_config.cp_kv_cache_interleave_size = 1
|
|
|
|
|
vllm_config.cache_config.block_size = 1
|
|
|
|
|
|
|
|
|
|
self.platform.apply_config_platform_defaults(vllm_config)
|
|
|
|
|
|
|
|
|
|
observed_inputs: list[int | None] = []
|
|
|
|
|
vllm_config._set_cudagraph_sizes = MagicMock(
|
|
|
|
|
side_effect=lambda: observed_inputs.append(
|
|
|
|
|
vllm_config.compilation_config.max_cudagraph_capture_size
|
|
|
|
|
)
|
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
self.platform.check_and_update_config(vllm_config)
|
|
|
|
|
|
|
|
|
|
self.assertEqual(observed_inputs, [77])
|
|
|
|
|
|
2025-07-02 17:46:06 +08:00
|
|
|
def test_get_device_capability(self):
|
|
|
|
|
self.assertIsNone(self.platform.get_device_capability(device_id=0))
|
|
|
|
|
|
|
|
|
|
@patch("torch.npu.get_device_name")
|
|
|
|
|
def test_get_device_name(self, mock_get_device_name):
|
|
|
|
|
device_id = 0
|
|
|
|
|
device_name = "Ascend910B2"
|
|
|
|
|
mock_get_device_name.return_value = device_name
|
|
|
|
|
self.assertEqual(self.platform.get_device_name(device_id), device_name)
|
|
|
|
|
mock_get_device_name.assert_called_once_with(0)
|
|
|
|
|
|
2026-02-28 14:17:12 +08:00
|
|
|
@patch("torch.npu.get_device_properties")
|
|
|
|
|
def test_get_device_uuid(self, mock_get_device_properties):
|
|
|
|
|
device_id = 0
|
|
|
|
|
device_properties = MagicMock()
|
|
|
|
|
device_properties.uuid = "01020304-0000-0000-0000-01020304"
|
|
|
|
|
mock_get_device_properties.return_value = device_properties
|
|
|
|
|
self.assertEqual(self.platform.get_device_uuid(device_id), device_properties.uuid)
|
|
|
|
|
mock_get_device_properties.assert_called_once_with(0)
|
|
|
|
|
|
2025-07-02 17:46:06 +08:00
|
|
|
@patch("torch.inference_mode")
|
|
|
|
|
def test_inference_mode(self, mock_inference_mode):
|
|
|
|
|
mock_inference_mode.return_value = None
|
|
|
|
|
self.assertIsNone(self.platform.inference_mode())
|
|
|
|
|
mock_inference_mode.assert_called_once()
|
|
|
|
|
|
2026-03-13 22:53:25 +08:00
|
|
|
@patch("vllm_ascend.quantization.utils.maybe_auto_detect_quantization")
|
2025-07-02 17:46:06 +08:00
|
|
|
@patch("vllm_ascend.ascend_config.init_ascend_config")
|
|
|
|
|
@patch("vllm_ascend.utils.update_aclgraph_sizes")
|
2026-01-20 11:02:38 +08:00
|
|
|
@patch("vllm_ascend.utils.get_ascend_device_type", return_value=AscendDeviceType.A3)
|
2025-07-02 17:46:06 +08:00
|
|
|
@patch("os.environ", {})
|
2026-01-20 11:02:38 +08:00
|
|
|
@patch("vllm_ascend.core.recompute_scheduler.RecomputeSchedulerConfig.initialize_from_config")
|
2025-07-02 17:46:06 +08:00
|
|
|
def test_check_and_update_config_basic_config_update(
|
2026-03-13 22:53:25 +08:00
|
|
|
self, mock_init_recompute, mock_soc_version, mock_update_acl, mock_init_ascend, mock_auto_detect
|
2026-01-20 11:02:38 +08:00
|
|
|
):
|
|
|
|
|
mock_init_ascend.return_value = TestNPUPlatform.mock_vllm_ascend_config()
|
2025-09-02 18:34:04 +08:00
|
|
|
vllm_config = TestNPUPlatform.mock_vllm_config()
|
|
|
|
|
vllm_config.parallel_config.enable_expert_parallel = False
|
2025-12-05 10:31:49 +08:00
|
|
|
vllm_config.parallel_config.decode_context_parallel_size = 1
|
|
|
|
|
vllm_config.parallel_config.prefill_context_parallel_size = 1
|
|
|
|
|
vllm_config.parallel_config.decode_context_parallel_size = 1
|
|
|
|
|
vllm_config.parallel_config.prefill_context_parallel_size = 1
|
2025-10-20 16:30:57 +08:00
|
|
|
vllm_config.parallel_config.tensor_parallel_size = 1
|
|
|
|
|
mock_init_recompute.return_value = MagicMock()
|
[1/N][Refactor] Refactor code to adapt with vllm main (#3612)
### What this PR does / why we need it?
This is the step 1 of refactoring code to adapt with vllm main, and this
pr aligned with
https://github.com/vllm-project/vllm/commit/17c540a993af88204ad1b78345c8a865cf58ce44
1. refactor deepseek to the latest code arch as of
https://github.com/vllm-project/vllm/commit/17c540a993af88204ad1b78345c8a865cf58ce44
2. bunches of fixes due to vllm changes
- Fix `AscendScheduler` `__post_init__`, caused by
https://github.com/vllm-project/vllm/pull/25075
- Fix `AscendScheduler` init got an unexpected arg `block_size`, caused
by https://github.com/vllm-project/vllm/pull/26296
- Fix `KVCacheManager` `get_num_common_prefix_blocks` arg, caused by
https://github.com/vllm-project/vllm/pull/23485
- Fix `MLAAttention` import,caused by
https://github.com/vllm-project/vllm/pull/25103
- Fix `SharedFusedMoE` import, caused by
https://github.com/vllm-project/vllm/pull/26145
- Fix `LazyLoader` improt, caused by
https://github.com/vllm-project/vllm/pull/27022
- Fix `vllm.utils.swap_dict_values` improt, caused by
https://github.com/vllm-project/vllm/pull/26990
- Fix `Backend` enum import, caused by
https://github.com/vllm-project/vllm/pull/25893
- Fix `CompilationLevel` renaming to `CompilationMode` issue introduced
by https://github.com/vllm-project/vllm/pull/26355
- Fix fused_moe ops, caused by
https://github.com/vllm-project/vllm/pull/24097
- Fix bert model because of `inputs_embeds`, caused by
https://github.com/vllm-project/vllm/pull/25922
- Fix MRope because of `get_input_positions_tensor` to
`get_mrope_input_positions`, caused by
https://github.com/vllm-project/vllm/pull/24172
- Fix `splitting_ops` changes introduced by
https://github.com/vllm-project/vllm/pull/25845
- Fix multi-modality changes introduced by
https://github.com/vllm-project/vllm/issues/16229
- Fix lora bias dropping issue introduced by
https://github.com/vllm-project/vllm/pull/25807
- Fix structured ouput break introduced by
https://github.com/vllm-project/vllm/issues/26737
### Does this PR introduce _any_ user-facing change?
### How was this patch tested?
CI passed with existing test.
- vLLM version: v0.11.0rc3
- vLLM main: https://github.com/vllm-project/vllm/commit/v0.11.0
---------
Signed-off-by: MengqingCao <cmq0113@163.com>
Signed-off-by: Icey <1790571317@qq.com>
Co-authored-by: Icey <1790571317@qq.com>
2025-10-24 16:55:08 +08:00
|
|
|
vllm_config.scheduler_config = MagicMock()
|
2025-07-02 17:46:06 +08:00
|
|
|
|
|
|
|
|
# Use importlib.reload to reload the platform module, ensuring the mocked init_ascend_config method is used.
|
|
|
|
|
# Without this reload, when calling self.platform.check_and_update_config,
|
|
|
|
|
# it would execute the original unmocked init_ascend_config method, causing the unit test to fail.
|
|
|
|
|
from vllm_ascend import platform
|
|
|
|
|
|
|
|
|
|
importlib.reload(platform)
|
|
|
|
|
|
2025-09-02 18:34:04 +08:00
|
|
|
self.platform.check_and_update_config(vllm_config)
|
2025-07-02 17:46:06 +08:00
|
|
|
|
2025-09-02 18:34:04 +08:00
|
|
|
mock_init_ascend.assert_called_once_with(vllm_config)
|
2025-07-02 17:46:06 +08:00
|
|
|
|
2026-03-13 22:53:25 +08:00
|
|
|
@patch("vllm_ascend.quantization.utils.maybe_auto_detect_quantization")
|
2026-01-20 11:02:38 +08:00
|
|
|
@patch("vllm_ascend.utils.get_ascend_device_type", return_value=AscendDeviceType.A3)
|
2025-07-02 17:46:06 +08:00
|
|
|
@patch("vllm_ascend.ascend_config.init_ascend_config")
|
2026-01-20 11:02:38 +08:00
|
|
|
@patch("vllm_ascend.core.recompute_scheduler.RecomputeSchedulerConfig.initialize_from_config")
|
2025-07-02 17:46:06 +08:00
|
|
|
def test_check_and_update_config_no_model_config_warning(
|
2026-03-13 22:53:25 +08:00
|
|
|
self, mock_init_recompute, mock_init_ascend, mock_soc_version, mock_auto_detect
|
2026-01-20 11:02:38 +08:00
|
|
|
):
|
|
|
|
|
mock_init_ascend.return_value = TestNPUPlatform.mock_vllm_ascend_config()
|
2025-09-02 18:34:04 +08:00
|
|
|
vllm_config = TestNPUPlatform.mock_vllm_config()
|
|
|
|
|
vllm_config.model_config = None
|
2025-12-05 10:31:49 +08:00
|
|
|
vllm_config.parallel_config.decode_context_parallel_size = 1
|
|
|
|
|
vllm_config.parallel_config.prefill_context_parallel_size = 1
|
2025-10-20 16:30:57 +08:00
|
|
|
vllm_config.parallel_config.tensor_parallel_size = 1
|
|
|
|
|
mock_init_recompute.return_value = MagicMock()
|
[1/N][Refactor] Refactor code to adapt with vllm main (#3612)
### What this PR does / why we need it?
This is the step 1 of refactoring code to adapt with vllm main, and this
pr aligned with
https://github.com/vllm-project/vllm/commit/17c540a993af88204ad1b78345c8a865cf58ce44
1. refactor deepseek to the latest code arch as of
https://github.com/vllm-project/vllm/commit/17c540a993af88204ad1b78345c8a865cf58ce44
2. bunches of fixes due to vllm changes
- Fix `AscendScheduler` `__post_init__`, caused by
https://github.com/vllm-project/vllm/pull/25075
- Fix `AscendScheduler` init got an unexpected arg `block_size`, caused
by https://github.com/vllm-project/vllm/pull/26296
- Fix `KVCacheManager` `get_num_common_prefix_blocks` arg, caused by
https://github.com/vllm-project/vllm/pull/23485
- Fix `MLAAttention` import,caused by
https://github.com/vllm-project/vllm/pull/25103
- Fix `SharedFusedMoE` import, caused by
https://github.com/vllm-project/vllm/pull/26145
- Fix `LazyLoader` improt, caused by
https://github.com/vllm-project/vllm/pull/27022
- Fix `vllm.utils.swap_dict_values` improt, caused by
https://github.com/vllm-project/vllm/pull/26990
- Fix `Backend` enum import, caused by
https://github.com/vllm-project/vllm/pull/25893
- Fix `CompilationLevel` renaming to `CompilationMode` issue introduced
by https://github.com/vllm-project/vllm/pull/26355
- Fix fused_moe ops, caused by
https://github.com/vllm-project/vllm/pull/24097
- Fix bert model because of `inputs_embeds`, caused by
https://github.com/vllm-project/vllm/pull/25922
- Fix MRope because of `get_input_positions_tensor` to
`get_mrope_input_positions`, caused by
https://github.com/vllm-project/vllm/pull/24172
- Fix `splitting_ops` changes introduced by
https://github.com/vllm-project/vllm/pull/25845
- Fix multi-modality changes introduced by
https://github.com/vllm-project/vllm/issues/16229
- Fix lora bias dropping issue introduced by
https://github.com/vllm-project/vllm/pull/25807
- Fix structured ouput break introduced by
https://github.com/vllm-project/vllm/issues/26737
### Does this PR introduce _any_ user-facing change?
### How was this patch tested?
CI passed with existing test.
- vLLM version: v0.11.0rc3
- vLLM main: https://github.com/vllm-project/vllm/commit/v0.11.0
---------
Signed-off-by: MengqingCao <cmq0113@163.com>
Signed-off-by: Icey <1790571317@qq.com>
Co-authored-by: Icey <1790571317@qq.com>
2025-10-24 16:55:08 +08:00
|
|
|
vllm_config.scheduler_config = MagicMock()
|
2025-07-02 17:46:06 +08:00
|
|
|
|
|
|
|
|
with self.assertLogs(logger="vllm", level="WARNING") as cm:
|
|
|
|
|
from vllm_ascend import platform
|
|
|
|
|
|
|
|
|
|
importlib.reload(platform)
|
2026-01-20 11:02:38 +08:00
|
|
|
self.platform = platform.NPUPlatform()
|
|
|
|
|
|
|
|
|
|
with patch.object(platform.NPUPlatform, "_fix_incompatible_config"):
|
|
|
|
|
self.platform.check_and_update_config(vllm_config)
|
|
|
|
|
|
2025-07-02 17:46:06 +08:00
|
|
|
self.assertTrue("Model config is missing" in cm.output[0])
|
|
|
|
|
|
2026-03-13 22:53:25 +08:00
|
|
|
@patch("vllm_ascend.quantization.utils.maybe_auto_detect_quantization")
|
2026-01-20 11:02:38 +08:00
|
|
|
@patch("vllm_ascend.utils.get_ascend_device_type", return_value=AscendDeviceType.A3)
|
2025-07-02 17:46:06 +08:00
|
|
|
@patch("vllm_ascend.ascend_config.init_ascend_config")
|
2026-01-20 11:02:38 +08:00
|
|
|
@patch("vllm_ascend.core.recompute_scheduler.RecomputeSchedulerConfig.initialize_from_config")
|
2026-03-13 22:53:25 +08:00
|
|
|
def test_check_and_update_config_enforce_eager_mode(self, mock_init_recompute, mock_init_ascend, mock_soc_version, mock_auto_detect):
|
2026-01-20 11:02:38 +08:00
|
|
|
mock_init_ascend.return_value = TestNPUPlatform.mock_vllm_ascend_config()
|
2025-09-02 18:34:04 +08:00
|
|
|
vllm_config = TestNPUPlatform.mock_vllm_config()
|
|
|
|
|
vllm_config.model_config.enforce_eager = True
|
2025-12-05 10:31:49 +08:00
|
|
|
vllm_config.parallel_config.decode_context_parallel_size = 1
|
|
|
|
|
vllm_config.parallel_config.prefill_context_parallel_size = 1
|
2025-10-20 16:30:57 +08:00
|
|
|
vllm_config.parallel_config.tensor_parallel_size = 1
|
|
|
|
|
mock_init_recompute.return_value = MagicMock()
|
[1/N][Refactor] Refactor code to adapt with vllm main (#3612)
### What this PR does / why we need it?
This is the step 1 of refactoring code to adapt with vllm main, and this
pr aligned with
https://github.com/vllm-project/vllm/commit/17c540a993af88204ad1b78345c8a865cf58ce44
1. refactor deepseek to the latest code arch as of
https://github.com/vllm-project/vllm/commit/17c540a993af88204ad1b78345c8a865cf58ce44
2. bunches of fixes due to vllm changes
- Fix `AscendScheduler` `__post_init__`, caused by
https://github.com/vllm-project/vllm/pull/25075
- Fix `AscendScheduler` init got an unexpected arg `block_size`, caused
by https://github.com/vllm-project/vllm/pull/26296
- Fix `KVCacheManager` `get_num_common_prefix_blocks` arg, caused by
https://github.com/vllm-project/vllm/pull/23485
- Fix `MLAAttention` import,caused by
https://github.com/vllm-project/vllm/pull/25103
- Fix `SharedFusedMoE` import, caused by
https://github.com/vllm-project/vllm/pull/26145
- Fix `LazyLoader` improt, caused by
https://github.com/vllm-project/vllm/pull/27022
- Fix `vllm.utils.swap_dict_values` improt, caused by
https://github.com/vllm-project/vllm/pull/26990
- Fix `Backend` enum import, caused by
https://github.com/vllm-project/vllm/pull/25893
- Fix `CompilationLevel` renaming to `CompilationMode` issue introduced
by https://github.com/vllm-project/vllm/pull/26355
- Fix fused_moe ops, caused by
https://github.com/vllm-project/vllm/pull/24097
- Fix bert model because of `inputs_embeds`, caused by
https://github.com/vllm-project/vllm/pull/25922
- Fix MRope because of `get_input_positions_tensor` to
`get_mrope_input_positions`, caused by
https://github.com/vllm-project/vllm/pull/24172
- Fix `splitting_ops` changes introduced by
https://github.com/vllm-project/vllm/pull/25845
- Fix multi-modality changes introduced by
https://github.com/vllm-project/vllm/issues/16229
- Fix lora bias dropping issue introduced by
https://github.com/vllm-project/vllm/pull/25807
- Fix structured ouput break introduced by
https://github.com/vllm-project/vllm/issues/26737
### Does this PR introduce _any_ user-facing change?
### How was this patch tested?
CI passed with existing test.
- vLLM version: v0.11.0rc3
- vLLM main: https://github.com/vllm-project/vllm/commit/v0.11.0
---------
Signed-off-by: MengqingCao <cmq0113@163.com>
Signed-off-by: Icey <1790571317@qq.com>
Co-authored-by: Icey <1790571317@qq.com>
2025-10-24 16:55:08 +08:00
|
|
|
vllm_config.scheduler_config = MagicMock()
|
2025-07-02 17:46:06 +08:00
|
|
|
|
|
|
|
|
with self.assertLogs(logger="vllm", level="INFO") as cm:
|
|
|
|
|
from vllm_ascend import platform
|
|
|
|
|
|
|
|
|
|
importlib.reload(platform)
|
2026-01-20 11:02:38 +08:00
|
|
|
self.platform = platform.NPUPlatform()
|
|
|
|
|
|
|
|
|
|
with patch.object(platform.NPUPlatform, "_fix_incompatible_config"):
|
|
|
|
|
self.platform.check_and_update_config(vllm_config)
|
|
|
|
|
|
|
|
|
|
self.assertTrue("Compilation disabled, using eager mode by default" in cm.output[0])
|
[1/N][Refactor] Refactor code to adapt with vllm main (#3612)
### What this PR does / why we need it?
This is the step 1 of refactoring code to adapt with vllm main, and this
pr aligned with
https://github.com/vllm-project/vllm/commit/17c540a993af88204ad1b78345c8a865cf58ce44
1. refactor deepseek to the latest code arch as of
https://github.com/vllm-project/vllm/commit/17c540a993af88204ad1b78345c8a865cf58ce44
2. bunches of fixes due to vllm changes
- Fix `AscendScheduler` `__post_init__`, caused by
https://github.com/vllm-project/vllm/pull/25075
- Fix `AscendScheduler` init got an unexpected arg `block_size`, caused
by https://github.com/vllm-project/vllm/pull/26296
- Fix `KVCacheManager` `get_num_common_prefix_blocks` arg, caused by
https://github.com/vllm-project/vllm/pull/23485
- Fix `MLAAttention` import,caused by
https://github.com/vllm-project/vllm/pull/25103
- Fix `SharedFusedMoE` import, caused by
https://github.com/vllm-project/vllm/pull/26145
- Fix `LazyLoader` improt, caused by
https://github.com/vllm-project/vllm/pull/27022
- Fix `vllm.utils.swap_dict_values` improt, caused by
https://github.com/vllm-project/vllm/pull/26990
- Fix `Backend` enum import, caused by
https://github.com/vllm-project/vllm/pull/25893
- Fix `CompilationLevel` renaming to `CompilationMode` issue introduced
by https://github.com/vllm-project/vllm/pull/26355
- Fix fused_moe ops, caused by
https://github.com/vllm-project/vllm/pull/24097
- Fix bert model because of `inputs_embeds`, caused by
https://github.com/vllm-project/vllm/pull/25922
- Fix MRope because of `get_input_positions_tensor` to
`get_mrope_input_positions`, caused by
https://github.com/vllm-project/vllm/pull/24172
- Fix `splitting_ops` changes introduced by
https://github.com/vllm-project/vllm/pull/25845
- Fix multi-modality changes introduced by
https://github.com/vllm-project/vllm/issues/16229
- Fix lora bias dropping issue introduced by
https://github.com/vllm-project/vllm/pull/25807
- Fix structured ouput break introduced by
https://github.com/vllm-project/vllm/issues/26737
### Does this PR introduce _any_ user-facing change?
### How was this patch tested?
CI passed with existing test.
- vLLM version: v0.11.0rc3
- vLLM main: https://github.com/vllm-project/vllm/commit/v0.11.0
---------
Signed-off-by: MengqingCao <cmq0113@163.com>
Signed-off-by: Icey <1790571317@qq.com>
Co-authored-by: Icey <1790571317@qq.com>
2025-10-24 16:55:08 +08:00
|
|
|
|
2025-11-24 17:08:20 +08:00
|
|
|
self.assertEqual(
|
|
|
|
|
vllm_config.compilation_config.mode,
|
|
|
|
|
CompilationMode.NONE,
|
|
|
|
|
)
|
[1/N][Refactor] Refactor code to adapt with vllm main (#3612)
### What this PR does / why we need it?
This is the step 1 of refactoring code to adapt with vllm main, and this
pr aligned with
https://github.com/vllm-project/vllm/commit/17c540a993af88204ad1b78345c8a865cf58ce44
1. refactor deepseek to the latest code arch as of
https://github.com/vllm-project/vllm/commit/17c540a993af88204ad1b78345c8a865cf58ce44
2. bunches of fixes due to vllm changes
- Fix `AscendScheduler` `__post_init__`, caused by
https://github.com/vllm-project/vllm/pull/25075
- Fix `AscendScheduler` init got an unexpected arg `block_size`, caused
by https://github.com/vllm-project/vllm/pull/26296
- Fix `KVCacheManager` `get_num_common_prefix_blocks` arg, caused by
https://github.com/vllm-project/vllm/pull/23485
- Fix `MLAAttention` import,caused by
https://github.com/vllm-project/vllm/pull/25103
- Fix `SharedFusedMoE` import, caused by
https://github.com/vllm-project/vllm/pull/26145
- Fix `LazyLoader` improt, caused by
https://github.com/vllm-project/vllm/pull/27022
- Fix `vllm.utils.swap_dict_values` improt, caused by
https://github.com/vllm-project/vllm/pull/26990
- Fix `Backend` enum import, caused by
https://github.com/vllm-project/vllm/pull/25893
- Fix `CompilationLevel` renaming to `CompilationMode` issue introduced
by https://github.com/vllm-project/vllm/pull/26355
- Fix fused_moe ops, caused by
https://github.com/vllm-project/vllm/pull/24097
- Fix bert model because of `inputs_embeds`, caused by
https://github.com/vllm-project/vllm/pull/25922
- Fix MRope because of `get_input_positions_tensor` to
`get_mrope_input_positions`, caused by
https://github.com/vllm-project/vllm/pull/24172
- Fix `splitting_ops` changes introduced by
https://github.com/vllm-project/vllm/pull/25845
- Fix multi-modality changes introduced by
https://github.com/vllm-project/vllm/issues/16229
- Fix lora bias dropping issue introduced by
https://github.com/vllm-project/vllm/pull/25807
- Fix structured ouput break introduced by
https://github.com/vllm-project/vllm/issues/26737
### Does this PR introduce _any_ user-facing change?
### How was this patch tested?
CI passed with existing test.
- vLLM version: v0.11.0rc3
- vLLM main: https://github.com/vllm-project/vllm/commit/v0.11.0
---------
Signed-off-by: MengqingCao <cmq0113@163.com>
Signed-off-by: Icey <1790571317@qq.com>
Co-authored-by: Icey <1790571317@qq.com>
2025-10-24 16:55:08 +08:00
|
|
|
|
2025-08-27 09:30:25 +08:00
|
|
|
self.assertEqual(
|
2025-09-02 18:34:04 +08:00
|
|
|
vllm_config.compilation_config.cudagraph_mode,
|
2025-08-27 09:30:25 +08:00
|
|
|
CUDAGraphMode.NONE,
|
|
|
|
|
)
|
2025-07-02 17:46:06 +08:00
|
|
|
|
2026-03-13 22:53:25 +08:00
|
|
|
@patch("vllm_ascend.quantization.utils.maybe_auto_detect_quantization")
|
2026-01-20 11:02:38 +08:00
|
|
|
@patch("vllm_ascend.utils.get_ascend_device_type", return_value=AscendDeviceType.A3)
|
2025-07-02 17:46:06 +08:00
|
|
|
@patch("vllm_ascend.ascend_config.init_ascend_config")
|
2026-01-20 11:02:38 +08:00
|
|
|
@patch("vllm_ascend.core.recompute_scheduler.RecomputeSchedulerConfig.initialize_from_config")
|
2025-07-02 17:46:06 +08:00
|
|
|
def test_check_and_update_config_unsupported_compilation_level(
|
2026-03-13 22:53:25 +08:00
|
|
|
self, mock_init_recompute, mock_init_ascend, mock_soc_version, mock_auto_detect
|
2026-01-20 11:02:38 +08:00
|
|
|
):
|
|
|
|
|
mock_init_ascend.return_value = TestNPUPlatform.mock_vllm_ascend_config()
|
2025-09-02 18:34:04 +08:00
|
|
|
vllm_config = TestNPUPlatform.mock_vllm_config()
|
|
|
|
|
vllm_config.model_config.enforce_eager = False
|
2025-12-05 10:31:49 +08:00
|
|
|
vllm_config.parallel_config.decode_context_parallel_size = 1
|
|
|
|
|
vllm_config.parallel_config.prefill_context_parallel_size = 1
|
2025-10-20 16:30:57 +08:00
|
|
|
vllm_config.parallel_config.tensor_parallel_size = 1
|
|
|
|
|
mock_init_recompute.return_value = MagicMock()
|
[1/N][Refactor] Refactor code to adapt with vllm main (#3612)
### What this PR does / why we need it?
This is the step 1 of refactoring code to adapt with vllm main, and this
pr aligned with
https://github.com/vllm-project/vllm/commit/17c540a993af88204ad1b78345c8a865cf58ce44
1. refactor deepseek to the latest code arch as of
https://github.com/vllm-project/vllm/commit/17c540a993af88204ad1b78345c8a865cf58ce44
2. bunches of fixes due to vllm changes
- Fix `AscendScheduler` `__post_init__`, caused by
https://github.com/vllm-project/vllm/pull/25075
- Fix `AscendScheduler` init got an unexpected arg `block_size`, caused
by https://github.com/vllm-project/vllm/pull/26296
- Fix `KVCacheManager` `get_num_common_prefix_blocks` arg, caused by
https://github.com/vllm-project/vllm/pull/23485
- Fix `MLAAttention` import,caused by
https://github.com/vllm-project/vllm/pull/25103
- Fix `SharedFusedMoE` import, caused by
https://github.com/vllm-project/vllm/pull/26145
- Fix `LazyLoader` improt, caused by
https://github.com/vllm-project/vllm/pull/27022
- Fix `vllm.utils.swap_dict_values` improt, caused by
https://github.com/vllm-project/vllm/pull/26990
- Fix `Backend` enum import, caused by
https://github.com/vllm-project/vllm/pull/25893
- Fix `CompilationLevel` renaming to `CompilationMode` issue introduced
by https://github.com/vllm-project/vllm/pull/26355
- Fix fused_moe ops, caused by
https://github.com/vllm-project/vllm/pull/24097
- Fix bert model because of `inputs_embeds`, caused by
https://github.com/vllm-project/vllm/pull/25922
- Fix MRope because of `get_input_positions_tensor` to
`get_mrope_input_positions`, caused by
https://github.com/vllm-project/vllm/pull/24172
- Fix `splitting_ops` changes introduced by
https://github.com/vllm-project/vllm/pull/25845
- Fix multi-modality changes introduced by
https://github.com/vllm-project/vllm/issues/16229
- Fix lora bias dropping issue introduced by
https://github.com/vllm-project/vllm/pull/25807
- Fix structured ouput break introduced by
https://github.com/vllm-project/vllm/issues/26737
### Does this PR introduce _any_ user-facing change?
### How was this patch tested?
CI passed with existing test.
- vLLM version: v0.11.0rc3
- vLLM main: https://github.com/vllm-project/vllm/commit/v0.11.0
---------
Signed-off-by: MengqingCao <cmq0113@163.com>
Signed-off-by: Icey <1790571317@qq.com>
Co-authored-by: Icey <1790571317@qq.com>
2025-10-24 16:55:08 +08:00
|
|
|
vllm_config.scheduler_config = MagicMock()
|
|
|
|
|
|
2025-11-24 17:08:20 +08:00
|
|
|
vllm_config.compilation_config.mode = CompilationMode.DYNAMO_TRACE_ONCE
|
2025-07-02 17:46:06 +08:00
|
|
|
|
|
|
|
|
with self.assertLogs(logger="vllm", level="WARNING") as cm:
|
|
|
|
|
from vllm_ascend import platform
|
|
|
|
|
|
|
|
|
|
importlib.reload(platform)
|
2026-01-20 11:02:38 +08:00
|
|
|
self.platform = platform.NPUPlatform()
|
|
|
|
|
|
|
|
|
|
with patch.object(platform.NPUPlatform, "_fix_incompatible_config"):
|
|
|
|
|
self.platform.check_and_update_config(vllm_config)
|
|
|
|
|
|
2025-07-02 17:46:06 +08:00
|
|
|
self.assertTrue("NPU does not support" in cm.output[0])
|
2025-11-24 17:08:20 +08:00
|
|
|
|
|
|
|
|
self.assertEqual(
|
|
|
|
|
vllm_config.compilation_config.mode,
|
|
|
|
|
CompilationMode.NONE,
|
|
|
|
|
)
|
2025-08-27 09:30:25 +08:00
|
|
|
self.assertEqual(
|
2025-09-02 18:34:04 +08:00
|
|
|
vllm_config.compilation_config.cudagraph_mode,
|
2025-08-27 09:30:25 +08:00
|
|
|
CUDAGraphMode.NONE,
|
|
|
|
|
)
|
|
|
|
|
|
2026-01-20 11:02:38 +08:00
|
|
|
@pytest.mark.skip("Revert me when vllm support setting cudagraph_mode on oot platform")
|
2026-03-13 22:53:25 +08:00
|
|
|
@patch("vllm_ascend.quantization.utils.maybe_auto_detect_quantization")
|
2026-01-20 11:02:38 +08:00
|
|
|
@patch("vllm_ascend.utils.get_ascend_device_type", return_value=AscendDeviceType.A3)
|
2025-08-27 09:30:25 +08:00
|
|
|
@patch("vllm_ascend.ascend_config.init_ascend_config")
|
2026-03-13 22:53:25 +08:00
|
|
|
def test_check_and_update_config_unsupported_cudagraph_mode(self, mock_init_ascend, mock_soc_version, mock_auto_detect):
|
2026-01-20 11:02:38 +08:00
|
|
|
mock_init_ascend.return_value = TestNPUPlatform.mock_vllm_ascend_config()
|
2025-09-02 18:34:04 +08:00
|
|
|
vllm_config = TestNPUPlatform.mock_vllm_config()
|
|
|
|
|
vllm_config.model_config.enforce_eager = False
|
|
|
|
|
vllm_config.compilation_config.cudagraph_mode = CUDAGraphMode.FULL
|
2025-08-27 09:30:25 +08:00
|
|
|
|
|
|
|
|
with self.assertLogs(logger="vllm", level="INFO") as cm:
|
|
|
|
|
from vllm_ascend import platform
|
|
|
|
|
|
|
|
|
|
importlib.reload(platform)
|
2025-09-02 18:34:04 +08:00
|
|
|
self.platform.check_and_update_config(vllm_config)
|
2026-01-20 11:02:38 +08:00
|
|
|
self.assertTrue("cudagraph_mode is not support on NPU. falling back to NONE" in cm.output[0])
|
[1/N][Refactor] Refactor code to adapt with vllm main (#3612)
### What this PR does / why we need it?
This is the step 1 of refactoring code to adapt with vllm main, and this
pr aligned with
https://github.com/vllm-project/vllm/commit/17c540a993af88204ad1b78345c8a865cf58ce44
1. refactor deepseek to the latest code arch as of
https://github.com/vllm-project/vllm/commit/17c540a993af88204ad1b78345c8a865cf58ce44
2. bunches of fixes due to vllm changes
- Fix `AscendScheduler` `__post_init__`, caused by
https://github.com/vllm-project/vllm/pull/25075
- Fix `AscendScheduler` init got an unexpected arg `block_size`, caused
by https://github.com/vllm-project/vllm/pull/26296
- Fix `KVCacheManager` `get_num_common_prefix_blocks` arg, caused by
https://github.com/vllm-project/vllm/pull/23485
- Fix `MLAAttention` import,caused by
https://github.com/vllm-project/vllm/pull/25103
- Fix `SharedFusedMoE` import, caused by
https://github.com/vllm-project/vllm/pull/26145
- Fix `LazyLoader` improt, caused by
https://github.com/vllm-project/vllm/pull/27022
- Fix `vllm.utils.swap_dict_values` improt, caused by
https://github.com/vllm-project/vllm/pull/26990
- Fix `Backend` enum import, caused by
https://github.com/vllm-project/vllm/pull/25893
- Fix `CompilationLevel` renaming to `CompilationMode` issue introduced
by https://github.com/vllm-project/vllm/pull/26355
- Fix fused_moe ops, caused by
https://github.com/vllm-project/vllm/pull/24097
- Fix bert model because of `inputs_embeds`, caused by
https://github.com/vllm-project/vllm/pull/25922
- Fix MRope because of `get_input_positions_tensor` to
`get_mrope_input_positions`, caused by
https://github.com/vllm-project/vllm/pull/24172
- Fix `splitting_ops` changes introduced by
https://github.com/vllm-project/vllm/pull/25845
- Fix multi-modality changes introduced by
https://github.com/vllm-project/vllm/issues/16229
- Fix lora bias dropping issue introduced by
https://github.com/vllm-project/vllm/pull/25807
- Fix structured ouput break introduced by
https://github.com/vllm-project/vllm/issues/26737
### Does this PR introduce _any_ user-facing change?
### How was this patch tested?
CI passed with existing test.
- vLLM version: v0.11.0rc3
- vLLM main: https://github.com/vllm-project/vllm/commit/v0.11.0
---------
Signed-off-by: MengqingCao <cmq0113@163.com>
Signed-off-by: Icey <1790571317@qq.com>
Co-authored-by: Icey <1790571317@qq.com>
2025-10-24 16:55:08 +08:00
|
|
|
|
2025-11-24 17:08:20 +08:00
|
|
|
self.assertEqual(
|
|
|
|
|
vllm_config.compilation_config.mode,
|
|
|
|
|
CompilationMode.NONE,
|
|
|
|
|
)
|
2025-08-27 09:30:25 +08:00
|
|
|
self.assertEqual(
|
2025-09-02 18:34:04 +08:00
|
|
|
vllm_config.compilation_config.cudagraph_mode,
|
2025-08-27 09:30:25 +08:00
|
|
|
CUDAGraphMode.NONE,
|
|
|
|
|
)
|
2025-07-02 17:46:06 +08:00
|
|
|
|
2026-03-13 22:53:25 +08:00
|
|
|
@patch("vllm_ascend.quantization.utils.maybe_auto_detect_quantization")
|
2026-01-20 11:02:38 +08:00
|
|
|
@patch("vllm_ascend.utils.get_ascend_device_type", return_value=AscendDeviceType.A3)
|
2025-07-02 17:46:06 +08:00
|
|
|
@patch("vllm_ascend.ascend_config.init_ascend_config")
|
2026-01-20 11:02:38 +08:00
|
|
|
@patch("vllm_ascend.core.recompute_scheduler.RecomputeSchedulerConfig.initialize_from_config")
|
2025-07-02 17:46:06 +08:00
|
|
|
def test_check_and_update_config_cache_config_block_size(
|
2026-03-13 22:53:25 +08:00
|
|
|
self, mock_init_recompute, mock_init_ascend, mock_soc_version, mock_auto_detect
|
2026-01-20 11:02:38 +08:00
|
|
|
):
|
|
|
|
|
mock_init_ascend.return_value = TestNPUPlatform.mock_vllm_ascend_config()
|
2025-09-02 18:34:04 +08:00
|
|
|
vllm_config = TestNPUPlatform.mock_vllm_config()
|
|
|
|
|
vllm_config.cache_config.block_size = None
|
|
|
|
|
vllm_config.cache_config.enable_prefix_caching = True
|
2025-12-05 10:31:49 +08:00
|
|
|
vllm_config.parallel_config.decode_context_parallel_size = 1
|
|
|
|
|
vllm_config.parallel_config.prefill_context_parallel_size = 1
|
2025-10-20 16:30:57 +08:00
|
|
|
vllm_config.parallel_config.tensor_parallel_size = 1
|
|
|
|
|
mock_init_recompute.return_value = MagicMock()
|
[1/N][Refactor] Refactor code to adapt with vllm main (#3612)
### What this PR does / why we need it?
This is the step 1 of refactoring code to adapt with vllm main, and this
pr aligned with
https://github.com/vllm-project/vllm/commit/17c540a993af88204ad1b78345c8a865cf58ce44
1. refactor deepseek to the latest code arch as of
https://github.com/vllm-project/vllm/commit/17c540a993af88204ad1b78345c8a865cf58ce44
2. bunches of fixes due to vllm changes
- Fix `AscendScheduler` `__post_init__`, caused by
https://github.com/vllm-project/vllm/pull/25075
- Fix `AscendScheduler` init got an unexpected arg `block_size`, caused
by https://github.com/vllm-project/vllm/pull/26296
- Fix `KVCacheManager` `get_num_common_prefix_blocks` arg, caused by
https://github.com/vllm-project/vllm/pull/23485
- Fix `MLAAttention` import,caused by
https://github.com/vllm-project/vllm/pull/25103
- Fix `SharedFusedMoE` import, caused by
https://github.com/vllm-project/vllm/pull/26145
- Fix `LazyLoader` improt, caused by
https://github.com/vllm-project/vllm/pull/27022
- Fix `vllm.utils.swap_dict_values` improt, caused by
https://github.com/vllm-project/vllm/pull/26990
- Fix `Backend` enum import, caused by
https://github.com/vllm-project/vllm/pull/25893
- Fix `CompilationLevel` renaming to `CompilationMode` issue introduced
by https://github.com/vllm-project/vllm/pull/26355
- Fix fused_moe ops, caused by
https://github.com/vllm-project/vllm/pull/24097
- Fix bert model because of `inputs_embeds`, caused by
https://github.com/vllm-project/vllm/pull/25922
- Fix MRope because of `get_input_positions_tensor` to
`get_mrope_input_positions`, caused by
https://github.com/vllm-project/vllm/pull/24172
- Fix `splitting_ops` changes introduced by
https://github.com/vllm-project/vllm/pull/25845
- Fix multi-modality changes introduced by
https://github.com/vllm-project/vllm/issues/16229
- Fix lora bias dropping issue introduced by
https://github.com/vllm-project/vllm/pull/25807
- Fix structured ouput break introduced by
https://github.com/vllm-project/vllm/issues/26737
### Does this PR introduce _any_ user-facing change?
### How was this patch tested?
CI passed with existing test.
- vLLM version: v0.11.0rc3
- vLLM main: https://github.com/vllm-project/vllm/commit/v0.11.0
---------
Signed-off-by: MengqingCao <cmq0113@163.com>
Signed-off-by: Icey <1790571317@qq.com>
Co-authored-by: Icey <1790571317@qq.com>
2025-10-24 16:55:08 +08:00
|
|
|
vllm_config.scheduler_config = MagicMock()
|
2025-07-02 17:46:06 +08:00
|
|
|
|
|
|
|
|
from vllm_ascend import platform
|
|
|
|
|
|
|
|
|
|
importlib.reload(platform)
|
|
|
|
|
|
2025-09-02 18:34:04 +08:00
|
|
|
self.platform.check_and_update_config(vllm_config)
|
2025-07-02 17:46:06 +08:00
|
|
|
|
2025-09-02 18:34:04 +08:00
|
|
|
self.assertEqual(vllm_config.cache_config.block_size, 128)
|
2025-07-02 17:46:06 +08:00
|
|
|
|
2026-03-22 11:21:49 +08:00
|
|
|
def test_update_block_size_for_backend_preserves_hybrid_block_size(self):
|
|
|
|
|
vllm_config = TestNPUPlatform.mock_vllm_config()
|
|
|
|
|
vllm_config.model_config.is_hybrid = True
|
|
|
|
|
vllm_config.cache_config.block_size = 1024
|
|
|
|
|
vllm_config.cache_config.user_specified_block_size = False
|
|
|
|
|
|
|
|
|
|
self.platform.update_block_size_for_backend(vllm_config)
|
|
|
|
|
|
|
|
|
|
self.assertEqual(vllm_config.cache_config.block_size, 1024)
|
|
|
|
|
|
|
|
|
|
def test_update_block_size_for_backend_preserves_user_block_size(self):
|
|
|
|
|
vllm_config = TestNPUPlatform.mock_vllm_config()
|
|
|
|
|
vllm_config.model_config.is_hybrid = False
|
|
|
|
|
vllm_config.cache_config.block_size = 512
|
|
|
|
|
vllm_config.cache_config.user_specified_block_size = True
|
|
|
|
|
|
|
|
|
|
self.platform.update_block_size_for_backend(vllm_config)
|
|
|
|
|
|
|
|
|
|
self.assertEqual(vllm_config.cache_config.block_size, 512)
|
|
|
|
|
|
2026-03-13 22:53:25 +08:00
|
|
|
@patch("vllm_ascend.quantization.utils.maybe_auto_detect_quantization")
|
2026-01-20 11:02:38 +08:00
|
|
|
@patch("vllm_ascend.utils.get_ascend_device_type", return_value=AscendDeviceType.A3)
|
2025-07-02 17:46:06 +08:00
|
|
|
@patch("vllm_ascend.ascend_config.init_ascend_config")
|
2026-01-20 11:02:38 +08:00
|
|
|
@patch("vllm_ascend.core.recompute_scheduler.RecomputeSchedulerConfig.initialize_from_config")
|
2025-07-02 17:46:06 +08:00
|
|
|
def test_check_and_update_config_v1_worker_class_selection(
|
2026-03-13 22:53:25 +08:00
|
|
|
self, mock_init_recompute, mock_init_ascend, mock_soc_version, mock_auto_detect
|
2026-01-20 11:02:38 +08:00
|
|
|
):
|
|
|
|
|
mock_init_ascend.return_value = TestNPUPlatform.mock_vllm_ascend_config()
|
2025-09-02 18:34:04 +08:00
|
|
|
vllm_config = TestNPUPlatform.mock_vllm_config()
|
|
|
|
|
vllm_config.parallel_config.worker_cls = "auto"
|
2025-12-05 10:31:49 +08:00
|
|
|
vllm_config.parallel_config.decode_context_parallel_size = 1
|
|
|
|
|
vllm_config.parallel_config.prefill_context_parallel_size = 1
|
2025-10-20 16:30:57 +08:00
|
|
|
vllm_config.parallel_config.tensor_parallel_size = 1
|
|
|
|
|
mock_init_recompute.return_value = MagicMock()
|
[1/N][Refactor] Refactor code to adapt with vllm main (#3612)
### What this PR does / why we need it?
This is the step 1 of refactoring code to adapt with vllm main, and this
pr aligned with
https://github.com/vllm-project/vllm/commit/17c540a993af88204ad1b78345c8a865cf58ce44
1. refactor deepseek to the latest code arch as of
https://github.com/vllm-project/vllm/commit/17c540a993af88204ad1b78345c8a865cf58ce44
2. bunches of fixes due to vllm changes
- Fix `AscendScheduler` `__post_init__`, caused by
https://github.com/vllm-project/vllm/pull/25075
- Fix `AscendScheduler` init got an unexpected arg `block_size`, caused
by https://github.com/vllm-project/vllm/pull/26296
- Fix `KVCacheManager` `get_num_common_prefix_blocks` arg, caused by
https://github.com/vllm-project/vllm/pull/23485
- Fix `MLAAttention` import,caused by
https://github.com/vllm-project/vllm/pull/25103
- Fix `SharedFusedMoE` import, caused by
https://github.com/vllm-project/vllm/pull/26145
- Fix `LazyLoader` improt, caused by
https://github.com/vllm-project/vllm/pull/27022
- Fix `vllm.utils.swap_dict_values` improt, caused by
https://github.com/vllm-project/vllm/pull/26990
- Fix `Backend` enum import, caused by
https://github.com/vllm-project/vllm/pull/25893
- Fix `CompilationLevel` renaming to `CompilationMode` issue introduced
by https://github.com/vllm-project/vllm/pull/26355
- Fix fused_moe ops, caused by
https://github.com/vllm-project/vllm/pull/24097
- Fix bert model because of `inputs_embeds`, caused by
https://github.com/vllm-project/vllm/pull/25922
- Fix MRope because of `get_input_positions_tensor` to
`get_mrope_input_positions`, caused by
https://github.com/vllm-project/vllm/pull/24172
- Fix `splitting_ops` changes introduced by
https://github.com/vllm-project/vllm/pull/25845
- Fix multi-modality changes introduced by
https://github.com/vllm-project/vllm/issues/16229
- Fix lora bias dropping issue introduced by
https://github.com/vllm-project/vllm/pull/25807
- Fix structured ouput break introduced by
https://github.com/vllm-project/vllm/issues/26737
### Does this PR introduce _any_ user-facing change?
### How was this patch tested?
CI passed with existing test.
- vLLM version: v0.11.0rc3
- vLLM main: https://github.com/vllm-project/vllm/commit/v0.11.0
---------
Signed-off-by: MengqingCao <cmq0113@163.com>
Signed-off-by: Icey <1790571317@qq.com>
Co-authored-by: Icey <1790571317@qq.com>
2025-10-24 16:55:08 +08:00
|
|
|
vllm_config.scheduler_config = MagicMock()
|
2025-07-02 17:46:06 +08:00
|
|
|
|
|
|
|
|
from vllm_ascend import platform
|
|
|
|
|
|
|
|
|
|
importlib.reload(platform)
|
2025-09-02 18:34:04 +08:00
|
|
|
self.platform.check_and_update_config(vllm_config)
|
2025-07-02 17:46:06 +08:00
|
|
|
|
|
|
|
|
self.assertEqual(
|
2025-09-02 18:34:04 +08:00
|
|
|
vllm_config.parallel_config.worker_cls,
|
2025-12-18 15:51:54 +08:00
|
|
|
"vllm_ascend.worker.worker.NPUWorker",
|
2025-07-02 17:46:06 +08:00
|
|
|
)
|
|
|
|
|
|
2025-12-08 08:27:46 +08:00
|
|
|
test_ascend_config = TestNPUPlatform.mock_vllm_ascend_config()
|
|
|
|
|
test_ascend_config.xlite_graph_config.enabled = True
|
|
|
|
|
mock_init_ascend.return_value = test_ascend_config
|
|
|
|
|
vllm_config.parallel_config.worker_cls = "auto"
|
|
|
|
|
self.platform.check_and_update_config(vllm_config)
|
|
|
|
|
self.assertEqual(
|
|
|
|
|
vllm_config.parallel_config.worker_cls,
|
|
|
|
|
"vllm_ascend.xlite.xlite_worker.XliteWorker",
|
|
|
|
|
)
|
|
|
|
|
|
2026-03-13 22:53:25 +08:00
|
|
|
@patch("vllm_ascend.quantization.utils.maybe_auto_detect_quantization")
|
2025-07-02 17:46:06 +08:00
|
|
|
@patch("vllm_ascend.ascend_config.init_ascend_config")
|
2026-01-20 11:02:38 +08:00
|
|
|
@patch("vllm_ascend.utils.get_ascend_device_type", return_value=AscendDeviceType._310P)
|
|
|
|
|
@patch("vllm_ascend.core.recompute_scheduler.RecomputeSchedulerConfig.initialize_from_config")
|
2026-03-13 22:53:25 +08:00
|
|
|
def test_check_and_update_config_310p_no_custom_ops(self, mock_init_recompute, mock_soc_version, mock_init_ascend, mock_auto_detect):
|
2026-01-20 11:02:38 +08:00
|
|
|
mock_init_ascend.return_value = TestNPUPlatform.mock_vllm_ascend_config()
|
2025-09-02 18:34:04 +08:00
|
|
|
vllm_config = TestNPUPlatform.mock_vllm_config()
|
|
|
|
|
vllm_config.compilation_config.custom_ops = []
|
2025-12-05 10:31:49 +08:00
|
|
|
vllm_config.parallel_config.decode_context_parallel_size = 1
|
|
|
|
|
vllm_config.parallel_config.prefill_context_parallel_size = 1
|
2025-10-20 16:30:57 +08:00
|
|
|
vllm_config.parallel_config.tensor_parallel_size = 1
|
|
|
|
|
mock_init_recompute.return_value = MagicMock()
|
2025-07-02 17:46:06 +08:00
|
|
|
|
[1/N][Refactor] Refactor code to adapt with vllm main (#3612)
### What this PR does / why we need it?
This is the step 1 of refactoring code to adapt with vllm main, and this
pr aligned with
https://github.com/vllm-project/vllm/commit/17c540a993af88204ad1b78345c8a865cf58ce44
1. refactor deepseek to the latest code arch as of
https://github.com/vllm-project/vllm/commit/17c540a993af88204ad1b78345c8a865cf58ce44
2. bunches of fixes due to vllm changes
- Fix `AscendScheduler` `__post_init__`, caused by
https://github.com/vllm-project/vllm/pull/25075
- Fix `AscendScheduler` init got an unexpected arg `block_size`, caused
by https://github.com/vllm-project/vllm/pull/26296
- Fix `KVCacheManager` `get_num_common_prefix_blocks` arg, caused by
https://github.com/vllm-project/vllm/pull/23485
- Fix `MLAAttention` import,caused by
https://github.com/vllm-project/vllm/pull/25103
- Fix `SharedFusedMoE` import, caused by
https://github.com/vllm-project/vllm/pull/26145
- Fix `LazyLoader` improt, caused by
https://github.com/vllm-project/vllm/pull/27022
- Fix `vllm.utils.swap_dict_values` improt, caused by
https://github.com/vllm-project/vllm/pull/26990
- Fix `Backend` enum import, caused by
https://github.com/vllm-project/vllm/pull/25893
- Fix `CompilationLevel` renaming to `CompilationMode` issue introduced
by https://github.com/vllm-project/vllm/pull/26355
- Fix fused_moe ops, caused by
https://github.com/vllm-project/vllm/pull/24097
- Fix bert model because of `inputs_embeds`, caused by
https://github.com/vllm-project/vllm/pull/25922
- Fix MRope because of `get_input_positions_tensor` to
`get_mrope_input_positions`, caused by
https://github.com/vllm-project/vllm/pull/24172
- Fix `splitting_ops` changes introduced by
https://github.com/vllm-project/vllm/pull/25845
- Fix multi-modality changes introduced by
https://github.com/vllm-project/vllm/issues/16229
- Fix lora bias dropping issue introduced by
https://github.com/vllm-project/vllm/pull/25807
- Fix structured ouput break introduced by
https://github.com/vllm-project/vllm/issues/26737
### Does this PR introduce _any_ user-facing change?
### How was this patch tested?
CI passed with existing test.
- vLLM version: v0.11.0rc3
- vLLM main: https://github.com/vllm-project/vllm/commit/v0.11.0
---------
Signed-off-by: MengqingCao <cmq0113@163.com>
Signed-off-by: Icey <1790571317@qq.com>
Co-authored-by: Icey <1790571317@qq.com>
2025-10-24 16:55:08 +08:00
|
|
|
vllm_config.scheduler_config = MagicMock()
|
2025-07-02 17:46:06 +08:00
|
|
|
from vllm_ascend import platform
|
|
|
|
|
|
|
|
|
|
importlib.reload(platform)
|
|
|
|
|
|
2025-09-02 18:34:04 +08:00
|
|
|
self.platform.check_and_update_config(vllm_config)
|
|
|
|
|
self.assertEqual(vllm_config.compilation_config.custom_ops, [])
|
2025-07-02 17:46:06 +08:00
|
|
|
|
2025-12-10 09:20:40 +08:00
|
|
|
def test_get_attn_backend_cls_use_v1_and_mla(self):
|
2025-12-20 09:38:53 +08:00
|
|
|
attn_selector_config = AttentionSelectorConfig(
|
|
|
|
|
dtype=torch.float16,
|
|
|
|
|
head_size=0,
|
|
|
|
|
kv_cache_dtype=None,
|
|
|
|
|
block_size=128,
|
2025-07-02 17:46:06 +08:00
|
|
|
use_mla=True,
|
2025-12-20 09:38:53 +08:00
|
|
|
use_sparse=False,
|
2025-07-02 17:46:06 +08:00
|
|
|
)
|
2026-01-20 11:02:38 +08:00
|
|
|
result = self.platform.get_attn_backend_cls("ascend", attn_selector_config)
|
|
|
|
|
self.assertEqual(result, "vllm_ascend.attention.mla_v1.AscendMLABackend")
|
2025-07-02 17:46:06 +08:00
|
|
|
|
2025-12-10 09:20:40 +08:00
|
|
|
def test_get_attn_backend_cls_use_v1_only(self):
|
2025-12-20 09:38:53 +08:00
|
|
|
attn_selector_config = AttentionSelectorConfig(
|
|
|
|
|
dtype=torch.float16,
|
|
|
|
|
head_size=0,
|
|
|
|
|
kv_cache_dtype=None,
|
|
|
|
|
block_size=128,
|
2025-07-02 17:46:06 +08:00
|
|
|
use_mla=False,
|
2025-12-20 09:38:53 +08:00
|
|
|
use_sparse=False,
|
2025-07-02 17:46:06 +08:00
|
|
|
)
|
2026-01-20 11:02:38 +08:00
|
|
|
result = self.platform.get_attn_backend_cls("ascend", attn_selector_config)
|
|
|
|
|
self.assertEqual(result, "vllm_ascend.attention.attention_v1.AscendAttentionBackend")
|
2025-07-02 17:46:06 +08:00
|
|
|
|
|
|
|
|
def test_get_punica_wrapper(self):
|
|
|
|
|
result = self.platform.get_punica_wrapper()
|
2025-11-24 17:08:20 +08:00
|
|
|
|
2026-01-20 11:02:38 +08:00
|
|
|
self.assertEqual(result, "vllm_ascend.lora.punica_npu.PunicaWrapperNPU")
|
2025-07-02 17:46:06 +08:00
|
|
|
|
|
|
|
|
@patch("torch.npu.reset_peak_memory_stats")
|
|
|
|
|
@patch("torch.npu.max_memory_allocated")
|
2026-01-20 11:02:38 +08:00
|
|
|
def test_get_current_memory_usage_with_specific_device(self, mock_max_memory, mock_reset_stats):
|
2025-07-02 17:46:06 +08:00
|
|
|
max_memory_allocated_result = 1024.0
|
|
|
|
|
mock_max_memory.return_value = max_memory_allocated_result
|
|
|
|
|
test_device = torch.device("npu:0")
|
|
|
|
|
result = self.platform.get_current_memory_usage(device=test_device)
|
|
|
|
|
|
|
|
|
|
mock_reset_stats.assert_called_once_with(test_device)
|
|
|
|
|
mock_max_memory.assert_called_once_with(test_device)
|
|
|
|
|
self.assertEqual(result, max_memory_allocated_result)
|
|
|
|
|
|
|
|
|
|
@patch("torch.npu.reset_peak_memory_stats")
|
|
|
|
|
@patch("torch.npu.max_memory_allocated")
|
2026-01-20 11:02:38 +08:00
|
|
|
def test_get_current_memory_usage_with_default_device(self, mock_max_memory, mock_reset_stats):
|
2025-07-02 17:46:06 +08:00
|
|
|
max_memory_allocated_result = 1024.0
|
|
|
|
|
mock_max_memory.return_value = max_memory_allocated_result
|
|
|
|
|
|
|
|
|
|
result = self.platform.get_current_memory_usage()
|
|
|
|
|
|
|
|
|
|
mock_reset_stats.assert_called_once_with(None)
|
|
|
|
|
mock_max_memory.assert_called_once_with(None)
|
|
|
|
|
self.assertEqual(result, max_memory_allocated_result)
|
|
|
|
|
|
2026-01-20 11:02:38 +08:00
|
|
|
@patch("torch.npu.reset_peak_memory_stats", side_effect=RuntimeError("Device error"))
|
2025-07-02 17:46:06 +08:00
|
|
|
@patch("torch.npu.max_memory_allocated")
|
2026-01-20 11:02:38 +08:00
|
|
|
def test_get_current_memory_usage_when_reset_stats_fails(self, mock_max_memory, mock_reset_stats):
|
2025-07-02 17:46:06 +08:00
|
|
|
with self.assertRaises(RuntimeError):
|
|
|
|
|
self.platform.get_current_memory_usage()
|
|
|
|
|
mock_reset_stats.assert_called_once()
|
|
|
|
|
mock_max_memory.assert_not_called()
|
|
|
|
|
|
|
|
|
|
@patch("torch.npu.reset_peak_memory_stats")
|
|
|
|
|
@patch(
|
|
|
|
|
"torch.npu.max_memory_allocated",
|
|
|
|
|
side_effect=RuntimeError("Memory query failed"),
|
|
|
|
|
)
|
2026-01-20 11:02:38 +08:00
|
|
|
def test_get_current_memory_usage_when_query_fails(self, mock_max_memory, mock_reset_stats):
|
2025-07-02 17:46:06 +08:00
|
|
|
with self.assertRaises(RuntimeError):
|
|
|
|
|
self.platform.get_current_memory_usage()
|
|
|
|
|
mock_reset_stats.assert_called_once()
|
|
|
|
|
mock_max_memory.assert_called_once()
|
|
|
|
|
|
|
|
|
|
def test_get_device_communicator_cls_returns_correct_value(self):
|
|
|
|
|
self.assertEqual(
|
|
|
|
|
self.platform.get_device_communicator_cls(),
|
2026-01-15 08:57:40 +08:00
|
|
|
"vllm_ascend.distributed.device_communicators.npu_communicator.NPUCommunicator",
|
2025-07-02 17:46:06 +08:00
|
|
|
)
|
|
|
|
|
|
|
|
|
|
def test_is_pin_memory_available_returns_true(self):
|
|
|
|
|
self.assertTrue(self.platform.is_pin_memory_available())
|
|
|
|
|
|
2025-08-20 09:01:04 +08:00
|
|
|
def test_get_static_graph_wrapper_cls_returns_correct_value(self):
|
2025-07-02 17:46:06 +08:00
|
|
|
self.assertEqual(
|
2025-08-20 09:01:04 +08:00
|
|
|
self.platform.get_static_graph_wrapper_cls(),
|
|
|
|
|
"vllm_ascend.compilation.acl_graph.ACLGraphWrapper",
|
2025-07-02 17:46:06 +08:00
|
|
|
)
|