[E2E] add E2E for Prefix Caching cp & Chunked Prefill cp (#5149)

### What this PR does / why we need it?
Add E2E for Prefix Caching cp & Chunked Prefill cp 
### Does this PR introduce _any_ user-facing change?
No.
### How was this patch tested?

- vLLM version: v0.15.0
- vLLM main:
ad32e3e19c

---------

Signed-off-by: F.Liu <liufeng248@huawei.com>
Signed-off-by: Feng Liu <46866849+ader47@users.noreply.github.com>
Co-authored-by: F.Liu <liufeng248@huawei.com>
This commit is contained in:
Feng Liu
2026-02-03 15:04:14 +08:00
committed by GitHub
parent be5b66de6d
commit 03a18ad6fd
6 changed files with 404 additions and 123 deletions

View File

@@ -1,122 +0,0 @@
#
# Copyright (c) 2025 Huawei Technologies Co., Ltd. All Rights Reserved.
# Copyright 2023 The vLLM team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This file is a part of the vllm-ascend project.
# Adapted from vllm/tests/basic_correctness/test_basic_correctness.py
#
import os
import random
import string
from unittest.mock import patch
import pytest
from vllm import SamplingParams
from tests.e2e.conftest import VllmRunner
MODELS = [
"vllm-ascend/Qwen3-30B-A3B-W8A8",
"vllm-ascend/DeepSeek-V2-Lite-W8A8",
]
def generate_prompts(input_len, batchsize):
prompts = [
" ".join([
f"{random.choice(string.ascii_letters)}" for _ in range(input_len)
]) for _ in range(batchsize)
]
return prompts
@patch.dict(
os.environ, {
"HCCL_BUFFSIZE": "768",
"VLLM_ASCEND_ENABLE_FLASHCOMM1": "1",
"VLLM_ALLOW_LONG_MAX_MODEL_LEN": "1"
})
@pytest.mark.parametrize("model", MODELS)
def test_models_chunked_prefill_mixed_length_prompts_including_1_token(
model: str):
TEST_ROPE_PARAMETERS = {
"rope_theta": 1000000,
"rope_type": "yarn",
"factor": 4,
"original_max_position_embeddings": 32768
}
prompts = [
generate_prompts(128 * 1024, 1)[0],
generate_prompts(1, 1)[0],
generate_prompts(9104, 1)[0],
]
sampling_params = SamplingParams(max_tokens=1, temperature=0.0)
with VllmRunner(
model,
enforce_eager=True,
max_num_seqs=2,
max_num_batched_tokens=131000,
max_model_len=132000,
tensor_parallel_size=2,
prefill_context_parallel_size=2,
decode_context_parallel_size=1,
enable_expert_parallel=True,
block_size=128,
quantization="ascend",
hf_overrides={"rope_parameters": TEST_ROPE_PARAMETERS},
) as runner:
runner.model.generate(prompts, sampling_params)
@patch.dict(
os.environ, {
"HCCL_BUFFSIZE": "768",
"VLLM_ASCEND_ENABLE_FLASHCOMM1": "1",
"VLLM_ALLOW_LONG_MAX_MODEL_LEN": "1"
})
@pytest.mark.parametrize("model", MODELS)
def test_models_chunked_prefill_with_empty_kvcache(model: str):
TEST_ROPE_PARAMETERS = {
"rope_theta": 1000000,
"rope_type": "yarn",
"factor": 4,
"original_max_position_embeddings": 32768
}
# Note(qcs): we use chunk_size=50, kv_cache_interleave_size=128
# to simulate certain edge cases.
prompts = [
generate_prompts(128, 1)[0],
generate_prompts(1, 1)[0],
generate_prompts(130, 1)[0],
generate_prompts(51, 1)[0],
generate_prompts(129, 1)[0],
]
sampling_params = SamplingParams(max_tokens=1, temperature=0.0)
with VllmRunner(
model,
enforce_eager=True,
max_num_seqs=2,
tensor_parallel_size=2,
prefill_context_parallel_size=2,
decode_context_parallel_size=1,
enable_expert_parallel=True,
long_prefill_token_threshold=50,
block_size=128,
cp_kv_cache_interleave_size=128,
quantization="ascend",
hf_overrides={"rope_parameters": TEST_ROPE_PARAMETERS},
) as runner:
runner.model.generate(prompts, sampling_params)

View File

@@ -0,0 +1,230 @@
#
# Copyright (c) 2025 Huawei Technologies Co., Ltd. All Rights Reserved.
# Copyright 2023 The vLLM team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This file is a part of the vllm-ascend project.
# Adapted from vllm/tests/basic_correctness/test_basic_correctness.py
#
import os
import random
import string
from typing import Any, Dict
from unittest.mock import patch
import pytest
from vllm import SamplingParams
from tests.e2e.conftest import _LONG_PROMPTS, VllmRunner
MODELS = [
"vllm-ascend/Qwen3-30B-A3B-W8A8",
"vllm-ascend/DeepSeek-V2-Lite-W8A8",
]
SETTINGS: Dict[str, Dict[str, Any]] = {
"vllm-ascend/Qwen3-30B-A3B-W8A8": {
"TP": 2,
"PCP": 2,
"DCP": 1,
"quantization": "ascend",
},
"vllm-ascend/DeepSeek-V2-Lite-W8A8": {
"TP": 2,
"PCP": 2,
"DCP": 2,
"quantization": "ascend",
}
}
# A prompt containing a large markdown table. The table is randomly generated by GPT-4.
with open(_LONG_PROMPTS[0], 'r', encoding='utf-8') as file:
LONG_PROMPT = file.read()
INPUT_PROMPTS = [
LONG_PROMPT +
"Question: what is the age of John Doe? Your answer: The age of John Doe is ",
LONG_PROMPT +
"Question: what is the age of Alice Johnson? Your answer: The age of Alice Johnson is "
]
VLLM_OUTPUT = [INPUT_PROMPTS[0] + "29", INPUT_PROMPTS[1] + "27"]
def generate_prompts(input_len, batchsize):
prompts = [
" ".join([
f"{random.choice(string.ascii_letters)}" for _ in range(input_len)
]) for _ in range(batchsize)
]
return prompts
@patch.dict(
os.environ, {
"HCCL_BUFFSIZE": "768",
"VLLM_ASCEND_ENABLE_FLASHCOMM1": "1",
"VLLM_ALLOW_LONG_MAX_MODEL_LEN": "1"
})
@pytest.mark.parametrize("model", MODELS)
def test_models_chunked_prefill_mixed_length_prompts_including_1_token(
model: str):
TEST_ROPE_PARAMETERS = {
"rope_theta": 1000000,
"rope_type": "yarn",
"factor": 4,
"original_max_position_embeddings": 32768
}
prompts = [
generate_prompts(128 * 1024, 1)[0],
generate_prompts(1, 1)[0],
generate_prompts(9104, 1)[0],
]
sampling_params = SamplingParams(max_tokens=1, temperature=0.0)
with VllmRunner(
model,
enforce_eager=True,
max_num_seqs=2,
max_num_batched_tokens=131000,
max_model_len=132000,
tensor_parallel_size=2,
prefill_context_parallel_size=2,
decode_context_parallel_size=1,
enable_expert_parallel=True,
block_size=128,
quantization="ascend",
hf_overrides={"rope_parameters": TEST_ROPE_PARAMETERS},
) as runner:
runner.model.generate(prompts, sampling_params)
@patch.dict(
os.environ, {
"HCCL_BUFFSIZE": "768",
"VLLM_ASCEND_ENABLE_FLASHCOMM1": "1",
"VLLM_ALLOW_LONG_MAX_MODEL_LEN": "1"
})
@pytest.mark.parametrize("model", MODELS)
@pytest.mark.skip(reason="skip for bad adaptability with main2main")
def test_models_chunked_prefill_with_empty_kvcache(model: str):
TEST_ROPE_PARAMETERS = {
"rope_theta": 1000000,
"rope_type": "yarn",
"factor": 4,
"original_max_position_embeddings": 32768
}
# Note(qcs): we use chunk_size=50, kv_cache_interleave_size=128
# to simulate certain edge cases.
prompts = [
generate_prompts(128, 1)[0],
generate_prompts(1, 1)[0],
generate_prompts(130, 1)[0],
generate_prompts(51, 1)[0],
generate_prompts(129, 1)[0],
]
sampling_params = SamplingParams(max_tokens=1, temperature=0.0)
with VllmRunner(
model,
enforce_eager=True,
max_num_seqs=2,
tensor_parallel_size=2,
prefill_context_parallel_size=2,
decode_context_parallel_size=1,
enable_expert_parallel=True,
long_prefill_token_threshold=50,
block_size=128,
cp_kv_cache_interleave_size=128,
quantization="ascend",
hf_overrides={"rope_parameters": TEST_ROPE_PARAMETERS},
) as runner:
runner.model.generate(prompts, sampling_params)
@pytest.mark.parametrize("model", MODELS)
@pytest.mark.parametrize("max_tokens", [2])
@patch.dict(os.environ, {"HCCL_BUFFSIZE": "768"})
def test_models_chunked_prefill_with_cp_basic(model: str,
max_tokens: int) -> None:
with VllmRunner(
model,
block_size=128,
max_model_len=4096,
enforce_eager=True,
max_num_batched_tokens=128,
enable_expert_parallel=True,
enable_prefix_caching=False,
enable_chunked_prefill=True,
tensor_parallel_size=SETTINGS[model]['TP'],
quantization=SETTINGS[model]["quantization"],
prefill_context_parallel_size=SETTINGS[model]['PCP'],
decode_context_parallel_size=SETTINGS[model]['DCP']) as vllm_model:
chunked_prefill_outputs = vllm_model.generate_greedy(
INPUT_PROMPTS, max_tokens)
for i in range(len(chunked_prefill_outputs)):
assert chunked_prefill_outputs[i][1] == VLLM_OUTPUT[i]
@pytest.mark.parametrize("model", MODELS)
@pytest.mark.parametrize("max_tokens", [2])
@patch.dict(os.environ, {"HCCL_BUFFSIZE": "768"})
def test_models_chunked_prefill_with_cp_piecewise(model: str,
max_tokens: int) -> None:
with VllmRunner(
model,
block_size=128,
max_model_len=4096,
enforce_eager=False,
max_num_batched_tokens=128,
enable_expert_parallel=True,
enable_prefix_caching=False,
enable_chunked_prefill=True,
tensor_parallel_size=SETTINGS[model]['TP'],
quantization=SETTINGS[model]["quantization"],
prefill_context_parallel_size=SETTINGS[model]['PCP'],
decode_context_parallel_size=SETTINGS[model]['DCP']) as vllm_model:
chunked_prefill_outputs = vllm_model.generate_greedy(
INPUT_PROMPTS, max_tokens)
for i in range(len(chunked_prefill_outputs)):
assert chunked_prefill_outputs[i][1] == VLLM_OUTPUT[i]
@pytest.mark.parametrize("model", MODELS)
@pytest.mark.parametrize("max_tokens", [2])
@patch.dict(os.environ, {"HCCL_BUFFSIZE": "768"})
def test_models_chunked_prefill_with_cp_full_graph(model: str,
max_tokens: int) -> None:
with VllmRunner(model,
block_size=128,
max_model_len=4096,
enforce_eager=False,
max_num_batched_tokens=128,
enable_expert_parallel=True,
enable_prefix_caching=False,
enable_chunked_prefill=True,
tensor_parallel_size=SETTINGS[model]['TP'],
quantization=SETTINGS[model]["quantization"],
prefill_context_parallel_size=SETTINGS[model]['PCP'],
decode_context_parallel_size=SETTINGS[model]['DCP'],
compilation_config={
"cudagraph_capture_sizes": [4, 8, 24, 48, 60],
"cudagraph_mode": "FULL_DECODE_ONLY"
}) as vllm_model:
chunked_prefill_outputs = vllm_model.generate_greedy(
INPUT_PROMPTS, max_tokens)
for i in range(len(chunked_prefill_outputs)):
assert chunked_prefill_outputs[i][1] == VLLM_OUTPUT[i]

View File

@@ -0,0 +1,135 @@
#
# Copyright (c) 2025 Huawei Technologies Co., Ltd. All Rights Reserved.
# Copyright 2023 The vLLM team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This file is a part of the vllm-ascend project.
# Adapted from vllm/tests/basic_correctness/test_basic_correctness.py
#
"""Compare the short outputs of HF and vLLM when using greedy sampling.
Run `pytest tests/e2e/multicard/long_sequence/test_prefix_caching_cp.py`
"""
from typing import Any, Dict
import pytest
from tests.e2e.conftest import _LONG_PROMPTS, VllmRunner
MODELS = [
"vllm-ascend/Qwen3-30B-A3B-W8A8", "vllm-ascend/DeepSeek-V2-Lite-W8A8"
]
SETTINGS: Dict[str, Dict[str, Any]] = {
"vllm-ascend/Qwen3-30B-A3B-W8A8": {
"TP": 2,
"PCP": 2,
"DCP": 1,
"quantization": "ascend",
},
"vllm-ascend/DeepSeek-V2-Lite-W8A8": {
"TP": 2,
"PCP": 2,
"DCP": 2,
"quantization": "ascend",
}
}
# A prompt containing a large markdown table. The table is randomly generated by GPT-4.
with open(_LONG_PROMPTS[0], 'r', encoding='utf-8') as file:
LONG_PROMPT = file.read()
INPUT_PROMPTS = [
LONG_PROMPT +
"Question: what is the age of John Doe? Your answer: The age of John Doe is ",
LONG_PROMPT +
"Question: what is the age of Umar Black? Your answer: The age of Umar Black is "
]
VLLM_OUTPUT = [INPUT_PROMPTS[0] + "29", INPUT_PROMPTS[1] + "39"]
@pytest.mark.parametrize("model", MODELS)
@pytest.mark.parametrize("max_tokens", [2])
def test_models_prefix_cache_with_cp_basic(
model: str, max_tokens: int, monkeypatch: pytest.MonkeyPatch) -> None:
monkeypatch.setenv("HCCL_BUFFSIZE", "768")
with VllmRunner(
model,
block_size=128,
enforce_eager=True,
max_model_len=4096,
enable_prefix_caching=True,
enable_expert_parallel=True,
max_num_batched_tokens=4096,
tensor_parallel_size=SETTINGS[model]['TP'],
quantization=SETTINGS[model]["quantization"],
prefill_context_parallel_size=SETTINGS[model]['PCP'],
decode_context_parallel_size=SETTINGS[model]['DCP']) as vllm_model:
prefix_cache_outputs = vllm_model.generate_greedy(
INPUT_PROMPTS, max_tokens)
for i in range(len(prefix_cache_outputs)):
assert prefix_cache_outputs[i][1] == VLLM_OUTPUT[i]
@pytest.mark.parametrize("model", MODELS)
@pytest.mark.parametrize("max_tokens", [2])
def test_models_prefix_cache_with_cp_piecewise(
model: str, max_tokens: int, monkeypatch: pytest.MonkeyPatch) -> None:
monkeypatch.setenv("HCCL_BUFFSIZE", "768")
with VllmRunner(
model,
block_size=128,
max_model_len=4096,
enforce_eager=False,
enable_prefix_caching=True,
enable_expert_parallel=True,
max_num_batched_tokens=4096,
tensor_parallel_size=SETTINGS[model]['TP'],
quantization=SETTINGS[model]["quantization"],
prefill_context_parallel_size=SETTINGS[model]['PCP'],
decode_context_parallel_size=SETTINGS[model]['DCP']) as vllm_model:
prefix_cache_outputs = vllm_model.generate_greedy(
INPUT_PROMPTS, max_tokens)
for i in range(len(prefix_cache_outputs)):
assert prefix_cache_outputs[i][1] == VLLM_OUTPUT[i]
@pytest.mark.parametrize("model", MODELS)
@pytest.mark.parametrize("max_tokens", [2])
def test_models_prefix_cache_with_cp_full_graph(
model: str, max_tokens: int, monkeypatch: pytest.MonkeyPatch) -> None:
monkeypatch.setenv("HCCL_BUFFSIZE", "768")
with VllmRunner(model,
block_size=128,
max_model_len=4096,
enforce_eager=False,
enable_prefix_caching=True,
enable_expert_parallel=True,
max_num_batched_tokens=4096,
tensor_parallel_size=SETTINGS[model]['TP'],
quantization=SETTINGS[model]["quantization"],
prefill_context_parallel_size=SETTINGS[model]['PCP'],
decode_context_parallel_size=SETTINGS[model]['DCP'],
compilation_config={
"cudagraph_capture_sizes": [4, 8, 24, 48, 60],
"cudagraph_mode": "FULL_DECODE_ONLY"
}) as vllm_model:
prefix_cache_outputs = vllm_model.generate_greedy(
INPUT_PROMPTS, max_tokens)
for i in range(len(prefix_cache_outputs)):
assert prefix_cache_outputs[i][1] == VLLM_OUTPUT[i]