### What this PR does / why we need it?
Upgrade vllm commit to 0318.
Main content: Added a pre-operation for cleaning up and waiting(default
max 50s) for the completion of the clean up of the NPU memory to some
test cases that failed due to the failure to release the NPU memory in a
timely manner when the previous test cases were executed.
### Does this PR introduce _any_ user-facing change?
NA
### How was this patch tested?
NA
- vLLM version: v0.17.0
- vLLM main:
4497431df6
---------
Signed-off-by: leo-pony <nengjunma@outlook.com>
79 lines
2.3 KiB
Python
79 lines
2.3 KiB
Python
#
|
|
# Copyright (c) 2025 Huawei Technologies Co., Ltd. All Rights Reserved.
|
|
# Copyright 2023 The vLLM team.
|
|
#
|
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
# you may not use this file except in compliance with the License.
|
|
# You may obtain a copy of the License at
|
|
#
|
|
# http://www.apache.org/licenses/LICENSE-2.0
|
|
#
|
|
# Unless required by applicable law or agreed to in writing, software
|
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
# See the License for the specific language governing permissions and
|
|
# limitations under the License.
|
|
#
|
|
"""
|
|
Compare the outputs of vLLM with and without aclgraph.
|
|
|
|
Run `pytest tests/multicard/test_data_parallel.py`.
|
|
"""
|
|
|
|
import os
|
|
import subprocess
|
|
import sys
|
|
from unittest.mock import patch
|
|
|
|
import pytest
|
|
|
|
from tests.e2e.conftest import wait_until_npu_memory_free
|
|
|
|
MODELS = ["Qwen/Qwen3-30B-A3B", "vllm-ascend/Qwen3-30B-A3B-W8A8"]
|
|
|
|
|
|
@pytest.mark.parametrize("model", MODELS)
|
|
@pytest.mark.parametrize("max_tokens", [32])
|
|
@patch.dict(os.environ, {"ASCEND_RT_VISIBLE_DEVICES": "0,1"})
|
|
@patch.dict(os.environ, {"HCCL_BUFFSIZE": "1024"})
|
|
@wait_until_npu_memory_free(target_free_percentage=0.95)
|
|
def test_qwen3_inference_dp2(model, max_tokens):
|
|
moe_models = ["Qwen/Qwen3-30B-A3B", "vllm-ascend/Qwen3-30B-A3B-W8A8"]
|
|
quantization_models = ["vllm-ascend/Qwen3-30B-A3B-W8A8"]
|
|
script = "examples/offline_data_parallel.py"
|
|
|
|
env = os.environ.copy()
|
|
|
|
cmd = [
|
|
sys.executable,
|
|
script,
|
|
"--model",
|
|
model,
|
|
"--dp-size",
|
|
"2",
|
|
"--tp-size",
|
|
"1",
|
|
"--node-size",
|
|
"1",
|
|
"--node-rank",
|
|
"0",
|
|
"--trust-remote-code",
|
|
]
|
|
|
|
if model in moe_models:
|
|
cmd.append("--enable-expert-parallel")
|
|
if model in quantization_models:
|
|
cmd.append("--quantization")
|
|
cmd.append("ascend")
|
|
|
|
print(f"Running subprocess: {' '.join(cmd)}")
|
|
proc = subprocess.run(cmd, env=env, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, timeout=600)
|
|
output = proc.stdout.decode(errors="ignore")
|
|
|
|
print(output)
|
|
|
|
assert "DP rank 0 needs to process" in output
|
|
assert "DP rank 1 needs to process" in output
|
|
assert "Generated text:" in output
|
|
assert proc.returncode == 0
|