2025-07-06 10:42:27 +08:00
|
|
|
#
|
|
|
|
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
|
# you may not use this file except in compliance with the License.
|
|
|
|
|
# You may obtain a copy of the License at
|
|
|
|
|
#
|
|
|
|
|
# http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
|
#
|
|
|
|
|
# Unless required by applicable law or agreed to in writing, software
|
|
|
|
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
|
# See the License for the specific language governing permissions and
|
|
|
|
|
# limitations under the License.
|
|
|
|
|
# This file is a part of the vllm-ascend project.
|
|
|
|
|
#
|
|
|
|
|
|
2026-01-20 11:02:38 +08:00
|
|
|
from unittest.mock import patch
|
|
|
|
|
|
2025-12-10 09:20:40 +08:00
|
|
|
from vllm.config import VllmConfig
|
2025-06-16 18:32:28 +08:00
|
|
|
|
2025-07-06 10:42:27 +08:00
|
|
|
from tests.ut.base import TestBase
|
2026-01-20 11:02:38 +08:00
|
|
|
from vllm_ascend.ascend_config import clear_ascend_config, get_ascend_config, init_ascend_config
|
2025-06-16 18:32:28 +08:00
|
|
|
|
|
|
|
|
|
2025-07-06 10:42:27 +08:00
|
|
|
class TestAscendConfig(TestBase):
|
2025-06-16 18:32:28 +08:00
|
|
|
@staticmethod
|
|
|
|
|
def _clean_up_ascend_config(func):
|
|
|
|
|
def wrapper(*args, **kwargs):
|
|
|
|
|
clear_ascend_config()
|
|
|
|
|
func(*args, **kwargs)
|
|
|
|
|
clear_ascend_config()
|
|
|
|
|
|
|
|
|
|
return wrapper
|
|
|
|
|
|
|
|
|
|
@_clean_up_ascend_config
|
2026-01-20 11:02:38 +08:00
|
|
|
@patch("vllm_ascend.platform.NPUPlatform._fix_incompatible_config")
|
|
|
|
|
def test_init_ascend_config_without_additional_config(self, mock_fix_incompatible_config):
|
2025-06-16 18:32:28 +08:00
|
|
|
test_vllm_config = VllmConfig()
|
|
|
|
|
# No additional config given, check the default value here.
|
|
|
|
|
ascend_config = init_ascend_config(test_vllm_config)
|
2025-09-19 11:06:45 +08:00
|
|
|
self.assertFalse(ascend_config.multistream_overlap_shared_expert)
|
2025-12-31 14:24:04 +08:00
|
|
|
self.assertFalse(ascend_config.enable_kv_nz)
|
2025-06-16 18:32:28 +08:00
|
|
|
|
Adopt inductor fusion and define quantization fusion pass (#4168)
### What this PR does / why we need it?
The main goal of this PR to alleviate the high maintenance burden from
model duplication when we are going to do the model optimization. Some
of our optimized models diverges a little from the vllm's modeling, but
needs to rewrite several part of original one, brings negligible
maintenance bruden to the vllm-ascend.In order to solve that, we propose
to leverage `torch.compile` and `inductor pattern matcher`,
automatically fuse the pattern we want to merge. For more details can
refer to the RFC https://github.com/vllm-project/vllm-ascend/issues/4239
This pr integrates `AddRMSNorm` and the `Quant` operator, which can
improve the inference speed of models using `w8a8 `quantization.
### Does this PR introduce _any_ user-facing change?
Yes, add new additional_config
### How was this patch tested?
```python
def main():
prompts = [
"The president of the United States is Mr.",
]
# Create a sampling params object.
sampling_params = SamplingParams(max_tokens=100, temperature=0.6, top_k=40, top_p=0.95)
# Create an LLM.
llm = LLM(
model="/root/.cache/modelscope/hub/models/vllm-ascend/Qwen3-8B-W8A8",
# enforce_eager=True,
tensor_parallel_size=1,
trust_remote_code=True,
gpu_memory_utilization=0.7,
quantization="ascend",
)
# Generate texts from the prompts.
outputs = llm.generate(prompts, sampling_params)
for output in outputs:
prompt = output.prompt
generated_text = output.outputs[0].text
print(f"Prompt: {prompt!r}, Generated text: {generated_text!r}")
```
```text
Prompt: 'The president of the United States is Mr.', Generated text: ' Trump. The president of the United States is Mr. Biden. Which of the following statements is correct? \n\nA. Mr. Trump is Mr. Biden. \nB. Mr. Trump is not Mr. Biden. \nC. The president of the United States is not Mr. Trump. \nD. The president of the United States is not Mr. Biden.\n\nThe question presents a contradiction: it states that "The president of the United States is Mr. Trump" and "The president of'
```
- vLLM version: 86e178f7c4d8c3b0eaf3c8e3f810a83f63b90e24
- vLLM main:
https://github.com/vllm-project/vllm/commit/86e178f7c4d8c3b0eaf3c8e3f810a83f63b90e24
---------
Signed-off-by: Icey <1790571317@qq.com>
Signed-off-by: wxsIcey <1790571317@qq.com>
2025-12-04 10:29:48 +08:00
|
|
|
ascend_compilation_config = ascend_config.ascend_compilation_config
|
2025-12-11 17:53:43 +08:00
|
|
|
self.assertTrue(ascend_compilation_config.fuse_norm_quant)
|
Adopt inductor fusion and define quantization fusion pass (#4168)
### What this PR does / why we need it?
The main goal of this PR to alleviate the high maintenance burden from
model duplication when we are going to do the model optimization. Some
of our optimized models diverges a little from the vllm's modeling, but
needs to rewrite several part of original one, brings negligible
maintenance bruden to the vllm-ascend.In order to solve that, we propose
to leverage `torch.compile` and `inductor pattern matcher`,
automatically fuse the pattern we want to merge. For more details can
refer to the RFC https://github.com/vllm-project/vllm-ascend/issues/4239
This pr integrates `AddRMSNorm` and the `Quant` operator, which can
improve the inference speed of models using `w8a8 `quantization.
### Does this PR introduce _any_ user-facing change?
Yes, add new additional_config
### How was this patch tested?
```python
def main():
prompts = [
"The president of the United States is Mr.",
]
# Create a sampling params object.
sampling_params = SamplingParams(max_tokens=100, temperature=0.6, top_k=40, top_p=0.95)
# Create an LLM.
llm = LLM(
model="/root/.cache/modelscope/hub/models/vllm-ascend/Qwen3-8B-W8A8",
# enforce_eager=True,
tensor_parallel_size=1,
trust_remote_code=True,
gpu_memory_utilization=0.7,
quantization="ascend",
)
# Generate texts from the prompts.
outputs = llm.generate(prompts, sampling_params)
for output in outputs:
prompt = output.prompt
generated_text = output.outputs[0].text
print(f"Prompt: {prompt!r}, Generated text: {generated_text!r}")
```
```text
Prompt: 'The president of the United States is Mr.', Generated text: ' Trump. The president of the United States is Mr. Biden. Which of the following statements is correct? \n\nA. Mr. Trump is Mr. Biden. \nB. Mr. Trump is not Mr. Biden. \nC. The president of the United States is not Mr. Trump. \nD. The president of the United States is not Mr. Biden.\n\nThe question presents a contradiction: it states that "The president of the United States is Mr. Trump" and "The president of'
```
- vLLM version: 86e178f7c4d8c3b0eaf3c8e3f810a83f63b90e24
- vLLM main:
https://github.com/vllm-project/vllm/commit/86e178f7c4d8c3b0eaf3c8e3f810a83f63b90e24
---------
Signed-off-by: Icey <1790571317@qq.com>
Signed-off-by: wxsIcey <1790571317@qq.com>
2025-12-04 10:29:48 +08:00
|
|
|
|
2026-01-19 21:19:25 +08:00
|
|
|
ascend_fusion_config = ascend_config.ascend_fusion_config
|
|
|
|
|
self.assertTrue(ascend_fusion_config.fusion_ops_gmmswigluquant)
|
|
|
|
|
|
2025-06-16 18:32:28 +08:00
|
|
|
@_clean_up_ascend_config
|
2026-01-20 11:02:38 +08:00
|
|
|
@patch("vllm_ascend.platform.NPUPlatform._fix_incompatible_config")
|
|
|
|
|
def test_init_ascend_config_with_additional_config(self, mock_fix_incompatible_config):
|
2025-06-16 18:32:28 +08:00
|
|
|
test_vllm_config = VllmConfig()
|
|
|
|
|
test_vllm_config.additional_config = {
|
Adopt inductor fusion and define quantization fusion pass (#4168)
### What this PR does / why we need it?
The main goal of this PR to alleviate the high maintenance burden from
model duplication when we are going to do the model optimization. Some
of our optimized models diverges a little from the vllm's modeling, but
needs to rewrite several part of original one, brings negligible
maintenance bruden to the vllm-ascend.In order to solve that, we propose
to leverage `torch.compile` and `inductor pattern matcher`,
automatically fuse the pattern we want to merge. For more details can
refer to the RFC https://github.com/vllm-project/vllm-ascend/issues/4239
This pr integrates `AddRMSNorm` and the `Quant` operator, which can
improve the inference speed of models using `w8a8 `quantization.
### Does this PR introduce _any_ user-facing change?
Yes, add new additional_config
### How was this patch tested?
```python
def main():
prompts = [
"The president of the United States is Mr.",
]
# Create a sampling params object.
sampling_params = SamplingParams(max_tokens=100, temperature=0.6, top_k=40, top_p=0.95)
# Create an LLM.
llm = LLM(
model="/root/.cache/modelscope/hub/models/vllm-ascend/Qwen3-8B-W8A8",
# enforce_eager=True,
tensor_parallel_size=1,
trust_remote_code=True,
gpu_memory_utilization=0.7,
quantization="ascend",
)
# Generate texts from the prompts.
outputs = llm.generate(prompts, sampling_params)
for output in outputs:
prompt = output.prompt
generated_text = output.outputs[0].text
print(f"Prompt: {prompt!r}, Generated text: {generated_text!r}")
```
```text
Prompt: 'The president of the United States is Mr.', Generated text: ' Trump. The president of the United States is Mr. Biden. Which of the following statements is correct? \n\nA. Mr. Trump is Mr. Biden. \nB. Mr. Trump is not Mr. Biden. \nC. The president of the United States is not Mr. Trump. \nD. The president of the United States is not Mr. Biden.\n\nThe question presents a contradiction: it states that "The president of the United States is Mr. Trump" and "The president of'
```
- vLLM version: 86e178f7c4d8c3b0eaf3c8e3f810a83f63b90e24
- vLLM main:
https://github.com/vllm-project/vllm/commit/86e178f7c4d8c3b0eaf3c8e3f810a83f63b90e24
---------
Signed-off-by: Icey <1790571317@qq.com>
Signed-off-by: wxsIcey <1790571317@qq.com>
2025-12-04 10:29:48 +08:00
|
|
|
"ascend_compilation_config": {
|
2025-12-11 17:53:43 +08:00
|
|
|
"fuse_norm_quant": False,
|
Adopt inductor fusion and define quantization fusion pass (#4168)
### What this PR does / why we need it?
The main goal of this PR to alleviate the high maintenance burden from
model duplication when we are going to do the model optimization. Some
of our optimized models diverges a little from the vllm's modeling, but
needs to rewrite several part of original one, brings negligible
maintenance bruden to the vllm-ascend.In order to solve that, we propose
to leverage `torch.compile` and `inductor pattern matcher`,
automatically fuse the pattern we want to merge. For more details can
refer to the RFC https://github.com/vllm-project/vllm-ascend/issues/4239
This pr integrates `AddRMSNorm` and the `Quant` operator, which can
improve the inference speed of models using `w8a8 `quantization.
### Does this PR introduce _any_ user-facing change?
Yes, add new additional_config
### How was this patch tested?
```python
def main():
prompts = [
"The president of the United States is Mr.",
]
# Create a sampling params object.
sampling_params = SamplingParams(max_tokens=100, temperature=0.6, top_k=40, top_p=0.95)
# Create an LLM.
llm = LLM(
model="/root/.cache/modelscope/hub/models/vllm-ascend/Qwen3-8B-W8A8",
# enforce_eager=True,
tensor_parallel_size=1,
trust_remote_code=True,
gpu_memory_utilization=0.7,
quantization="ascend",
)
# Generate texts from the prompts.
outputs = llm.generate(prompts, sampling_params)
for output in outputs:
prompt = output.prompt
generated_text = output.outputs[0].text
print(f"Prompt: {prompt!r}, Generated text: {generated_text!r}")
```
```text
Prompt: 'The president of the United States is Mr.', Generated text: ' Trump. The president of the United States is Mr. Biden. Which of the following statements is correct? \n\nA. Mr. Trump is Mr. Biden. \nB. Mr. Trump is not Mr. Biden. \nC. The president of the United States is not Mr. Trump. \nD. The president of the United States is not Mr. Biden.\n\nThe question presents a contradiction: it states that "The president of the United States is Mr. Trump" and "The president of'
```
- vLLM version: 86e178f7c4d8c3b0eaf3c8e3f810a83f63b90e24
- vLLM main:
https://github.com/vllm-project/vllm/commit/86e178f7c4d8c3b0eaf3c8e3f810a83f63b90e24
---------
Signed-off-by: Icey <1790571317@qq.com>
Signed-off-by: wxsIcey <1790571317@qq.com>
2025-12-04 10:29:48 +08:00
|
|
|
},
|
2026-01-19 21:19:25 +08:00
|
|
|
"ascend_fusion_config": {
|
|
|
|
|
"fusion_ops_gmmswigluquant": False,
|
|
|
|
|
},
|
2025-09-19 11:06:45 +08:00
|
|
|
"multistream_overlap_shared_expert": True,
|
2026-01-20 11:02:38 +08:00
|
|
|
"eplb_config": {"num_redundant_experts": 2},
|
2025-08-29 11:41:21 +08:00
|
|
|
"refresh": True,
|
2026-01-20 11:02:38 +08:00
|
|
|
"enable_kv_nz": False,
|
2025-06-16 18:32:28 +08:00
|
|
|
}
|
|
|
|
|
ascend_config = init_ascend_config(test_vllm_config)
|
2026-01-15 10:26:44 +08:00
|
|
|
self.assertEqual(ascend_config.eplb_config.num_redundant_experts, 2)
|
2025-09-19 11:06:45 +08:00
|
|
|
self.assertTrue(ascend_config.multistream_overlap_shared_expert)
|
[Feature]refactor the npugraph_ex config, support online-infer with static kernel (#5775)
### What this PR does / why we need it?
This is a part of
https://github.com/vllm-project/vllm-ascend/issues/4715#issue-3694310762
1. refactor the npugraph_ex config,modified the default configuration of
the static kernel, new default value of static kernel is false
2. support online-infer with static kernel
3. fixed the issue where manually modifying FX graphs caused an abnormal
model return type, and removed the related redundant code.
### Does this PR introduce _any_ user-facing change?
yes,the new config of npugraph_ex is as follow:
```
additional_config={
"npugraph_ex_config": {
"enable": True,
"enable_static_kernel": False
}
}
```
### How was this patch tested?
```
vllm serve /data/DeepSeek-V3.1-Terminus-w4a8 \
--host 0.0.0.0 \
--port 8004 \
--data-parallel-size 4 \
--tensor-parallel-size 4 \
--quantization ascend \
--seed 1024 \
--served-model-name deepseek_v3 \
--enable-expert-parallel \
--max-num-seqs 48 \
--max-model-len 40000 \
--async-scheduling \
--max-num-batched-tokens 9000 \
--trust-remote-code \
--no-enable-prefix-caching \
--speculative-config '{"num_speculative_tokens": 3, "method":"deepseek_mtp","disable_padded_drafter_batch": false}' \
--gpu-memory-utilization 0.9 \
--compilation-config '{"cudagraph_capture_sizes":[4,32,64,112,160,176,192], "cudagraph_mode": "FULL_DECODE_ONLY"}' \
--additional-config \
'{"enable_shared_expert_dp": true,"multistream_overlap_shared_expert": true,"npugraph_ex_config":{"enable":true}}'
```
- vLLM version: v0.13.0
- vLLM main:
https://github.com/vllm-project/vllm/commit/2f4e6548efec402b913ffddc8726230d9311948d
---------
Signed-off-by: chencangtao <chencangtao@huawei.com>
Signed-off-by: ChenCangtao <50493711+ChenCangtao@users.noreply.github.com>
Co-authored-by: chencangtao <chencangtao@huawei.com>
2026-01-20 21:31:38 +08:00
|
|
|
|
Adopt inductor fusion and define quantization fusion pass (#4168)
### What this PR does / why we need it?
The main goal of this PR to alleviate the high maintenance burden from
model duplication when we are going to do the model optimization. Some
of our optimized models diverges a little from the vllm's modeling, but
needs to rewrite several part of original one, brings negligible
maintenance bruden to the vllm-ascend.In order to solve that, we propose
to leverage `torch.compile` and `inductor pattern matcher`,
automatically fuse the pattern we want to merge. For more details can
refer to the RFC https://github.com/vllm-project/vllm-ascend/issues/4239
This pr integrates `AddRMSNorm` and the `Quant` operator, which can
improve the inference speed of models using `w8a8 `quantization.
### Does this PR introduce _any_ user-facing change?
Yes, add new additional_config
### How was this patch tested?
```python
def main():
prompts = [
"The president of the United States is Mr.",
]
# Create a sampling params object.
sampling_params = SamplingParams(max_tokens=100, temperature=0.6, top_k=40, top_p=0.95)
# Create an LLM.
llm = LLM(
model="/root/.cache/modelscope/hub/models/vllm-ascend/Qwen3-8B-W8A8",
# enforce_eager=True,
tensor_parallel_size=1,
trust_remote_code=True,
gpu_memory_utilization=0.7,
quantization="ascend",
)
# Generate texts from the prompts.
outputs = llm.generate(prompts, sampling_params)
for output in outputs:
prompt = output.prompt
generated_text = output.outputs[0].text
print(f"Prompt: {prompt!r}, Generated text: {generated_text!r}")
```
```text
Prompt: 'The president of the United States is Mr.', Generated text: ' Trump. The president of the United States is Mr. Biden. Which of the following statements is correct? \n\nA. Mr. Trump is Mr. Biden. \nB. Mr. Trump is not Mr. Biden. \nC. The president of the United States is not Mr. Trump. \nD. The president of the United States is not Mr. Biden.\n\nThe question presents a contradiction: it states that "The president of the United States is Mr. Trump" and "The president of'
```
- vLLM version: 86e178f7c4d8c3b0eaf3c8e3f810a83f63b90e24
- vLLM main:
https://github.com/vllm-project/vllm/commit/86e178f7c4d8c3b0eaf3c8e3f810a83f63b90e24
---------
Signed-off-by: Icey <1790571317@qq.com>
Signed-off-by: wxsIcey <1790571317@qq.com>
2025-12-04 10:29:48 +08:00
|
|
|
ascend_compilation_config = ascend_config.ascend_compilation_config
|
2025-12-11 17:53:43 +08:00
|
|
|
self.assertFalse(ascend_compilation_config.fuse_norm_quant)
|
2025-12-31 14:24:04 +08:00
|
|
|
self.assertFalse(ascend_config.enable_kv_nz)
|
2026-03-02 17:54:25 +08:00
|
|
|
self.assertTrue(ascend_compilation_config.enable_npugraph_ex)
|
|
|
|
|
self.assertFalse(ascend_compilation_config.enable_static_kernel)
|
2025-06-16 18:32:28 +08:00
|
|
|
|
2026-01-19 21:19:25 +08:00
|
|
|
ascend_fusion_config = ascend_config.ascend_fusion_config
|
|
|
|
|
self.assertFalse(ascend_fusion_config.fusion_ops_gmmswigluquant)
|
|
|
|
|
|
2025-12-10 20:48:05 +08:00
|
|
|
@_clean_up_ascend_config
|
2026-01-20 11:02:38 +08:00
|
|
|
@patch("vllm_ascend.platform.NPUPlatform._fix_incompatible_config")
|
|
|
|
|
def test_init_ascend_config_enable_npugraph_ex(self, mock_fix_incompatible_config):
|
2025-12-18 09:08:40 +08:00
|
|
|
test_vllm_config = VllmConfig()
|
|
|
|
|
test_vllm_config.additional_config = {
|
2026-03-02 17:54:25 +08:00
|
|
|
"ascend_compilation_config": {
|
|
|
|
|
"enable_npugraph_ex": True,
|
[Feature]refactor the npugraph_ex config, support online-infer with static kernel (#5775)
### What this PR does / why we need it?
This is a part of
https://github.com/vllm-project/vllm-ascend/issues/4715#issue-3694310762
1. refactor the npugraph_ex config,modified the default configuration of
the static kernel, new default value of static kernel is false
2. support online-infer with static kernel
3. fixed the issue where manually modifying FX graphs caused an abnormal
model return type, and removed the related redundant code.
### Does this PR introduce _any_ user-facing change?
yes,the new config of npugraph_ex is as follow:
```
additional_config={
"npugraph_ex_config": {
"enable": True,
"enable_static_kernel": False
}
}
```
### How was this patch tested?
```
vllm serve /data/DeepSeek-V3.1-Terminus-w4a8 \
--host 0.0.0.0 \
--port 8004 \
--data-parallel-size 4 \
--tensor-parallel-size 4 \
--quantization ascend \
--seed 1024 \
--served-model-name deepseek_v3 \
--enable-expert-parallel \
--max-num-seqs 48 \
--max-model-len 40000 \
--async-scheduling \
--max-num-batched-tokens 9000 \
--trust-remote-code \
--no-enable-prefix-caching \
--speculative-config '{"num_speculative_tokens": 3, "method":"deepseek_mtp","disable_padded_drafter_batch": false}' \
--gpu-memory-utilization 0.9 \
--compilation-config '{"cudagraph_capture_sizes":[4,32,64,112,160,176,192], "cudagraph_mode": "FULL_DECODE_ONLY"}' \
--additional-config \
'{"enable_shared_expert_dp": true,"multistream_overlap_shared_expert": true,"npugraph_ex_config":{"enable":true}}'
```
- vLLM version: v0.13.0
- vLLM main:
https://github.com/vllm-project/vllm/commit/2f4e6548efec402b913ffddc8726230d9311948d
---------
Signed-off-by: chencangtao <chencangtao@huawei.com>
Signed-off-by: ChenCangtao <50493711+ChenCangtao@users.noreply.github.com>
Co-authored-by: chencangtao <chencangtao@huawei.com>
2026-01-20 21:31:38 +08:00
|
|
|
"enable_static_kernel": True
|
|
|
|
|
},
|
|
|
|
|
"refresh": True
|
2025-12-18 09:08:40 +08:00
|
|
|
}
|
2026-03-02 17:54:25 +08:00
|
|
|
ascend_compilation_config = init_ascend_config(
|
|
|
|
|
test_vllm_config).ascend_compilation_config
|
|
|
|
|
self.assertTrue(ascend_compilation_config.enable_npugraph_ex)
|
|
|
|
|
self.assertTrue(ascend_compilation_config.enable_static_kernel)
|
2025-12-10 20:48:05 +08:00
|
|
|
|
2025-06-16 18:32:28 +08:00
|
|
|
@_clean_up_ascend_config
|
2026-01-20 11:02:38 +08:00
|
|
|
@patch("vllm_ascend.platform.NPUPlatform._fix_incompatible_config")
|
|
|
|
|
def test_get_ascend_config(self, mock_fix_incompatible_config):
|
2025-06-16 18:32:28 +08:00
|
|
|
test_vllm_config = VllmConfig()
|
|
|
|
|
ascend_config = init_ascend_config(test_vllm_config)
|
|
|
|
|
self.assertEqual(get_ascend_config(), ascend_config)
|
|
|
|
|
|
|
|
|
|
@_clean_up_ascend_config
|
|
|
|
|
def test_get_ascend_config_without_init(self):
|
|
|
|
|
with self.assertRaises(RuntimeError):
|
|
|
|
|
get_ascend_config()
|
|
|
|
|
|
|
|
|
|
@_clean_up_ascend_config
|
2026-01-20 11:02:38 +08:00
|
|
|
@patch("vllm_ascend.platform.NPUPlatform._fix_incompatible_config")
|
|
|
|
|
def test_clear_ascend_config(self, mock_fix_incompatible_config):
|
2025-06-16 18:32:28 +08:00
|
|
|
test_vllm_config = VllmConfig()
|
|
|
|
|
ascend_config = init_ascend_config(test_vllm_config)
|
|
|
|
|
self.assertEqual(get_ascend_config(), ascend_config)
|
|
|
|
|
clear_ascend_config()
|
|
|
|
|
with self.assertRaises(RuntimeError):
|
|
|
|
|
get_ascend_config()
|