[Bugfix] fix pcp + eplb error (#5561)
### What this PR does / why we need it?
Fix the bug in the PCP overlay feature
1、Fix the bug related to PCP and EPLB overlap by including PCP size in
the word_size calculation.
2、In the PCP pooling scenario, a prompt has been added for setting the
cp_kv_cache_interleave_size.
- vLLM version: v0.13.0
- vLLM main:
7157596103
Signed-off-by: weiguihua2 <weiguihua2@huawei.com>
This commit is contained in:
@@ -24,7 +24,6 @@ import pytest
|
||||
|
||||
from tests.e2e.conftest import VllmRunner
|
||||
from tests.e2e.model_utils import check_outputs_equal
|
||||
from vllm_ascend.utils import vllm_version_is
|
||||
|
||||
MODELS = [
|
||||
"Qwen/Qwen3-8B",
|
||||
@@ -32,8 +31,6 @@ MODELS = [
|
||||
]
|
||||
|
||||
|
||||
@pytest.mark.skipif(vllm_version_is('0.12.0'),
|
||||
reason="0.12.0 is not supported for context sequence.")
|
||||
@pytest.mark.parametrize("model", MODELS)
|
||||
@pytest.mark.parametrize("max_tokens", [10])
|
||||
def test_models_long_sequence_output_between_tp_and_cp(
|
||||
|
||||
@@ -23,17 +23,13 @@ Run `pytest tests/e2e/multicard/test_qwen3_moe.py`.
|
||||
|
||||
import os
|
||||
|
||||
import pytest
|
||||
from vllm import SamplingParams
|
||||
|
||||
from tests.e2e.conftest import VllmRunner
|
||||
from vllm_ascend.utils import vllm_version_is
|
||||
|
||||
os.environ["HCCL_BUFFSIZE"] = "768"
|
||||
|
||||
|
||||
@pytest.mark.skipif(vllm_version_is('0.12.0'),
|
||||
reason="0.12.0 is not supported for context sequence.")
|
||||
def test_models_pcp_dcp_basic():
|
||||
prompts = [
|
||||
"The capital of France is", "Hello, my name is Tom, I am",
|
||||
@@ -67,8 +63,6 @@ def test_models_pcp_dcp_basic():
|
||||
runner.model.generate(prompts, sampling_params)
|
||||
|
||||
|
||||
@pytest.mark.skipif(vllm_version_is('0.12.0'),
|
||||
reason="0.12.0 is not supported for context sequence.")
|
||||
def test_models_pcp_dcp_full_graph():
|
||||
prompts = [
|
||||
"The capital of France is", "Hello, my name is Tom, I am",
|
||||
@@ -106,8 +100,6 @@ def test_models_pcp_dcp_full_graph():
|
||||
runner.model.generate(prompts, sampling_params)
|
||||
|
||||
|
||||
@pytest.mark.skipif(vllm_version_is('0.12.0'),
|
||||
reason="0.12.0 is not supported for context sequence.")
|
||||
def test_models_pcp_dcp_piece_wise():
|
||||
prompts = [
|
||||
"The capital of France is", "Hello, my name is Tom, I am",
|
||||
@@ -139,8 +131,6 @@ def test_models_pcp_dcp_piece_wise():
|
||||
runner.model.generate(prompts, sampling_params)
|
||||
|
||||
|
||||
@pytest.mark.skipif(vllm_version_is('0.12.0'),
|
||||
reason="0.12.0 is not supported for context sequence.")
|
||||
def test_pcp_basic():
|
||||
prompts = [
|
||||
"The capital of France is", "Hello, my name is Tom, I am",
|
||||
@@ -160,8 +150,6 @@ def test_pcp_basic():
|
||||
runner.model.generate(prompts, sampling_params)
|
||||
|
||||
|
||||
@pytest.mark.skipif(vllm_version_is('0.12.0'),
|
||||
reason="0.12.0 is not supported for context sequence.")
|
||||
def test_pcp_full_graph():
|
||||
prompts = [
|
||||
"The capital of France is", "Hello, my name is Tom, I am",
|
||||
@@ -185,8 +173,6 @@ def test_pcp_full_graph():
|
||||
runner.model.generate(prompts, sampling_params)
|
||||
|
||||
|
||||
@pytest.mark.skipif(vllm_version_is('0.12.0'),
|
||||
reason="0.12.0 is not supported for context sequence.")
|
||||
def test_pcp_piece_wise():
|
||||
prompts = [
|
||||
"The capital of France is", "Hello, my name is Tom, I am",
|
||||
@@ -206,8 +192,6 @@ def test_pcp_piece_wise():
|
||||
runner.model.generate(prompts, sampling_params)
|
||||
|
||||
|
||||
@pytest.mark.skipif(vllm_version_is('0.12.0'),
|
||||
reason="0.12.0 is not supported for context sequence.")
|
||||
def test_dcp_basic():
|
||||
prompts = [
|
||||
"The capital of France is", "Hello, my name is Tom, I am",
|
||||
@@ -227,8 +211,6 @@ def test_dcp_basic():
|
||||
runner.model.generate(prompts, sampling_params)
|
||||
|
||||
|
||||
@pytest.mark.skipif(vllm_version_is('0.12.0'),
|
||||
reason="0.12.0 is not supported for context sequence.")
|
||||
def test_dcp_full_graph():
|
||||
prompts = [
|
||||
"The capital of France is", "Hello, my name is Tom, I am",
|
||||
@@ -252,8 +234,6 @@ def test_dcp_full_graph():
|
||||
runner.model.generate(prompts, sampling_params)
|
||||
|
||||
|
||||
@pytest.mark.skipif(vllm_version_is('0.12.0'),
|
||||
reason="0.12.0 is not supported for context sequence.")
|
||||
def test_dcp_piece_wise():
|
||||
prompts = [
|
||||
"The capital of France is", "Hello, my name is Tom, I am",
|
||||
|
||||
@@ -19,16 +19,11 @@
|
||||
|
||||
import os
|
||||
|
||||
import pytest
|
||||
|
||||
from tests.e2e.conftest import VllmRunner
|
||||
from vllm_ascend.utils import vllm_version_is
|
||||
|
||||
os.environ["HCCL_BUFFSIZE"] = "512"
|
||||
|
||||
|
||||
@pytest.mark.skipif(vllm_version_is('0.12.0'),
|
||||
reason="0.12.0 is not supported for context sequence.")
|
||||
def test_pcp_dcp_mtp1_eager():
|
||||
prompts = [
|
||||
"The capital of France is", "Hello, my name is Tom, I am",
|
||||
@@ -53,8 +48,6 @@ def test_pcp_dcp_mtp1_eager():
|
||||
runner.generate_greedy(prompts, 32)
|
||||
|
||||
|
||||
@pytest.mark.skipif(vllm_version_is('0.12.0'),
|
||||
reason="0.12.0 is not supported for context sequence.")
|
||||
def test_pcp_dcp_mtp3_eager():
|
||||
prompts = [
|
||||
"The capital of France is", "Hello, my name is Tom, I am",
|
||||
@@ -79,8 +72,6 @@ def test_pcp_dcp_mtp3_eager():
|
||||
runner.generate_greedy(prompts, 32)
|
||||
|
||||
|
||||
@pytest.mark.skipif(vllm_version_is('0.12.0'),
|
||||
reason="0.12.0 is not supported for context sequence.")
|
||||
def test_pcp_dcp_mtp3_piecewise_graph():
|
||||
prompts = [
|
||||
"The capital of France is", "Hello, my name is Tom, I am",
|
||||
@@ -108,8 +99,6 @@ def test_pcp_dcp_mtp3_piecewise_graph():
|
||||
runner.generate_greedy(prompts, 32)
|
||||
|
||||
|
||||
@pytest.mark.skipif(vllm_version_is('0.12.0'),
|
||||
reason="0.12.0 is not supported for context sequence.")
|
||||
def test_pcp_dcp_mtp3_full_graph():
|
||||
prompts = [
|
||||
"The capital of France is", "Hello, my name is Tom, I am",
|
||||
@@ -137,8 +126,6 @@ def test_pcp_dcp_mtp3_full_graph():
|
||||
runner.generate_greedy(prompts, 32)
|
||||
|
||||
|
||||
@pytest.mark.skipif(vllm_version_is('0.12.0'),
|
||||
reason="0.12.0 is not supported for context sequence.")
|
||||
def test_dcp_mtp3_full_graph():
|
||||
prompts = [
|
||||
"The capital of France is", "Hello, my name is Tom, I am",
|
||||
|
||||
Reference in New Issue
Block a user