Add an option of enable frozen parameter (#2869)

### What this PR does / why we need it?
Add an option of enable  frozen parameter

### How was this patch tested?

- vLLM version: v0.10.2
- vLLM main:
68dbde5dbb

Signed-off-by: 1Fire4 <wangdingyi2@huawei.com>
This commit is contained in:
1Fire4
2025-09-17 12:00:44 +08:00
committed by GitHub
parent 76844eec78
commit 1f6465c399
4 changed files with 8 additions and 1 deletions

View File

@@ -48,6 +48,7 @@ The details of each config option are as follows:
| `enable_multistream_mla`| bool | `False` | Whether to put vector ops of MLA to another stream. This option only takes effects on models using MLA (e.g., DeepSeek). |
| `enable_multistream_moe`| bool | `False` | Whether to enable multistream shared expert. This option only takes effects on DeepSeek moe models. |
| `enable_view_optimize` | bool | `True` | Whether to enable torchair view optimization |
| `enable_frozen_parameter` | bool | `True` | Whether to fix the memory address of weights during inference to reduce the input address refresh time during graph execution. |
| `use_cached_graph` | bool | `False` | Whether to use cached graph |
| `graph_batch_sizes` | list[int] | `[]` | The batch size for torchair graph cache |
| `graph_batch_sizes_init` | bool | `False` | Init graph batch size dynamically if `graph_batch_sizes` is empty |

View File

@@ -53,6 +53,7 @@ class TestAscendConfig(TestBase):
self.assertFalse(torchair_graph_config.enable_multistream_mla)
self.assertFalse(torchair_graph_config.enable_multistream_moe)
self.assertTrue(torchair_graph_config.enable_view_optimize)
self.assertTrue(torchair_graph_config.enable_frozen_parameter)
self.assertFalse(torchair_graph_config.enable_kv_nz)
ascend_scheduler_config = ascend_config.ascend_scheduler_config
@@ -70,6 +71,7 @@ class TestAscendConfig(TestBase):
"enable_multistream_mla": True,
"enable_multistream_moe": True,
"enable_view_optimize": True,
"enable_frozen_parameter": True,
"enable_kv_nz": True
},
"ascend_scheduler_config": {
@@ -89,6 +91,7 @@ class TestAscendConfig(TestBase):
self.assertTrue(torchair_graph_config.enable_multistream_mla)
self.assertTrue(torchair_graph_config.enable_multistream_moe)
self.assertTrue(torchair_graph_config.enable_view_optimize)
self.assertTrue(torchair_graph_config.enable_frozen_parameter)
self.assertTrue(torchair_graph_config.enable_kv_nz)
ascend_scheduler_config = ascend_config.ascend_scheduler_config

View File

@@ -114,6 +114,8 @@ class TorchairGraphConfig:
"enable_multistream_moe", False)
self.enable_view_optimize = torchair_graph_config.get(
"enable_view_optimize", True)
self.enable_frozen_parameter = torchair_graph_config.get(
"enable_frozen_parameter", True)
self.enable_kv_nz = torchair_graph_config.get("enable_kv_nz", False)
if not isinstance(self.graph_batch_sizes, list):

View File

@@ -359,7 +359,8 @@ class NPUTorchairModelRunner(NPUModelRunner):
config = torchair.CompilerConfig()
if get_ascend_config().torchair_graph_config.mode:
config.mode = get_ascend_config().torchair_graph_config.mode
config.experimental_config.frozen_parameter = True
config.experimental_config.frozen_parameter = \
get_ascend_config().torchair_graph_config.enable_frozen_parameter
# enabling tiling_schedule_optimize on 300I Duo has some bugs, so we have to
# disable it on 300I Duo platform now.
config.experimental_config.tiling_schedule_optimize = not is_310p()