Files
xc-llm-ascend/tests/e2e/multicard/2-cards/test_disaggregated_encoder.py
Nengjun Ma 8b79d4de52 Main2main upgrade to vllm 0317 afternoon (#7409)
### What this PR does / why we need it?

1.fix "TypeError: get_attn_backend() remove variable": [Refactor
`check_and_update_config`](https://github.com/vllm-project/vllm/pull/35122)

2.fix [Rename `compile_ranges_split_points` to
`compile_ranges_endpoints`](https://github.com/vllm-project/vllm/pull/36027)

3.fix "RuntimeError: device_allocator not a DeviceAllocator":[Replace
memory related torch.cuda
APIs"](https://github.com/vllm-project/vllm/pull/37031)

4.fix [Support multiple KV groups in OffloadingSpec
](https://github.com/vllm-project/vllm/pull/36610) removed
self.offloaded_block_size and changed self.gpu_block_size from a scalar
to a tuple of per-group block sizes, adding block_size_factor.

5.fix [Consolidate
SupportsEagle](https://github.com/vllm-project/vllm/pull/36063) renamed
get_eagle3_aux_hidden_state_layers() to
get_eagle3_default_aux_hidden_state_layers() and added a
supports_eagle3() guard before calling it.

### Does this PR introduce _any_ user-facing change?
NA
### How was this patch tested?
E2E


- vLLM version: v0.17.0
- vLLM main:
8a680463fa

---------

Signed-off-by: leo-pony <nengjunma@outlook.com>
Co-authored-by: Claude Code <noreply@anthropic.com>
2026-03-18 23:24:27 +08:00

104 lines
3.3 KiB
Python

# Copyright (c) 2025 Huawei Technologies Co., Ltd. All Rights Reserved.
# Copyright 2023 The vLLM team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This file is a part of the vllm-ascend project.
#
import pytest
from vllm.utils.network_utils import get_open_port
from tests.e2e.conftest import DisaggEpdProxy, RemoteEPDServer
from tools.send_mm_request import send_image_request
MODELS = [
"Qwen/Qwen2.5-VL-7B-Instruct",
]
SHARED_STORAGE_PATH = "/dev/shm/epd/storage"
TENSOR_PARALLELS = [1]
@pytest.mark.asyncio
@pytest.mark.parametrize("model", MODELS)
@pytest.mark.parametrize("tp_size", TENSOR_PARALLELS)
async def test_models(model: str, tp_size: int) -> None:
from vllm_ascend.utils import vllm_version_is
if not vllm_version_is("0.17.0"):
pytest.skip(
"EPLB output is different without EPLB, see issue: https://github.com/vllm-project/vllm-ascend/issues/7408",
)
encode_port = get_open_port()
pd_port = get_open_port()
vllm_server_args = [
[
"--port",
str(encode_port),
"--model",
model,
"--gpu-memory-utilization",
"0.01",
"--tensor-parallel-size",
str(tp_size),
"--enforce-eager",
"--no-enable-prefix-caching",
"--max-model-len",
"10000",
"--max-num-batched-tokens",
"10000",
"--max-num-seqs",
"1",
"--ec-transfer-config",
'{"ec_connector_extra_config":{"shared_storage_path":"'
+ SHARED_STORAGE_PATH
+ '"},"ec_connector":"ECExampleConnector","ec_role": "ec_producer"}',
],
[
"--port",
str(pd_port),
"--model",
model,
"--gpu-memory-utilization",
"0.95",
"--tensor-parallel-size",
str(tp_size),
"--enforce-eager",
"--max-model-len",
"10000",
"--max-num-batched-tokens",
"10000",
"--max-num-seqs",
"128",
"--ec-transfer-config",
'{"ec_connector_extra_config":{"shared_storage_path":"'
+ SHARED_STORAGE_PATH
+ '"},"ec_connector":"ECExampleConnector","ec_role": "ec_consumer"}',
],
]
proxy_port = get_open_port()
proxy_args = [
"--host",
"127.0.0.1",
"--port",
str(proxy_port),
"--encode-servers-urls",
f"http://localhost:{encode_port}",
"--decode-servers-urls",
f"http://localhost:{pd_port}",
"--prefill-servers-urls",
"disable",
]
with RemoteEPDServer(vllm_serve_args=vllm_server_args) as _, DisaggEpdProxy(proxy_args=proxy_args) as proxy:
send_image_request(model, proxy)