diff --git a/docs/source/user_guide/configuration/additional_config.md b/docs/source/user_guide/configuration/additional_config.md index d6b626f..eddb1c4 100644 --- a/docs/source/user_guide/configuration/additional_config.md +++ b/docs/source/user_guide/configuration/additional_config.md @@ -48,6 +48,7 @@ The details of each config option are as follows: | `enable_multistream_mla`| bool | `False` | Whether to put vector ops of MLA to another stream. This option only takes effects on models using MLA (e.g., DeepSeek). | | `enable_multistream_moe`| bool | `False` | Whether to enable multistream shared expert. This option only takes effects on DeepSeek moe models. | | `enable_view_optimize` | bool | `True` | Whether to enable torchair view optimization | +| `enable_frozen_parameter` | bool | `True` | Whether to fix the memory address of weights during inference to reduce the input address refresh time during graph execution. | | `use_cached_graph` | bool | `False` | Whether to use cached graph | | `graph_batch_sizes` | list[int] | `[]` | The batch size for torchair graph cache | | `graph_batch_sizes_init` | bool | `False` | Init graph batch size dynamically if `graph_batch_sizes` is empty | diff --git a/tests/ut/test_ascend_config.py b/tests/ut/test_ascend_config.py index 4abec5d..afc3883 100644 --- a/tests/ut/test_ascend_config.py +++ b/tests/ut/test_ascend_config.py @@ -53,6 +53,7 @@ class TestAscendConfig(TestBase): self.assertFalse(torchair_graph_config.enable_multistream_mla) self.assertFalse(torchair_graph_config.enable_multistream_moe) self.assertTrue(torchair_graph_config.enable_view_optimize) + self.assertTrue(torchair_graph_config.enable_frozen_parameter) self.assertFalse(torchair_graph_config.enable_kv_nz) ascend_scheduler_config = ascend_config.ascend_scheduler_config @@ -70,6 +71,7 @@ class TestAscendConfig(TestBase): "enable_multistream_mla": True, "enable_multistream_moe": True, "enable_view_optimize": True, + "enable_frozen_parameter": True, "enable_kv_nz": True }, "ascend_scheduler_config": { @@ -89,6 +91,7 @@ class TestAscendConfig(TestBase): self.assertTrue(torchair_graph_config.enable_multistream_mla) self.assertTrue(torchair_graph_config.enable_multistream_moe) self.assertTrue(torchair_graph_config.enable_view_optimize) + self.assertTrue(torchair_graph_config.enable_frozen_parameter) self.assertTrue(torchair_graph_config.enable_kv_nz) ascend_scheduler_config = ascend_config.ascend_scheduler_config diff --git a/vllm_ascend/ascend_config.py b/vllm_ascend/ascend_config.py index 4fba304..c54dec7 100644 --- a/vllm_ascend/ascend_config.py +++ b/vllm_ascend/ascend_config.py @@ -114,6 +114,8 @@ class TorchairGraphConfig: "enable_multistream_moe", False) self.enable_view_optimize = torchair_graph_config.get( "enable_view_optimize", True) + self.enable_frozen_parameter = torchair_graph_config.get( + "enable_frozen_parameter", True) self.enable_kv_nz = torchair_graph_config.get("enable_kv_nz", False) if not isinstance(self.graph_batch_sizes, list): diff --git a/vllm_ascend/torchair/torchair_model_runner.py b/vllm_ascend/torchair/torchair_model_runner.py index 3168170..e43d912 100644 --- a/vllm_ascend/torchair/torchair_model_runner.py +++ b/vllm_ascend/torchair/torchair_model_runner.py @@ -359,7 +359,8 @@ class NPUTorchairModelRunner(NPUModelRunner): config = torchair.CompilerConfig() if get_ascend_config().torchair_graph_config.mode: config.mode = get_ascend_config().torchair_graph_config.mode - config.experimental_config.frozen_parameter = True + config.experimental_config.frozen_parameter = \ + get_ascend_config().torchair_graph_config.enable_frozen_parameter # enabling tiling_schedule_optimize on 300I Duo has some bugs, so we have to # disable it on 300I Duo platform now. config.experimental_config.tiling_schedule_optimize = not is_310p()