[Misc] Remove CP Redundant Variables after FIA operator enables for CANN 8.5 (#6013)
### What this PR does / why we need it?
PCP/DCP splits the kv-cache onto different cards. After introducing the
parameter cp-kv-cache-interleave-size, the first size tokens will be
cached at Card 0, and so on.
However, if there are too few tokens, some cards will not store the
key-value pairs, resulting in values of 0, corrupted values, and
precision issues. Currently, additional operations are introduced to
avoid this precision problem.
After we integrate FIA operator in mla_cp._forward_decode and CANN
updates to 8.5.0, we now can remove these additional operations.
### Does this PR introduce _any_ user-facing change?
### How was this patch tested?
passed all CI by CANN 8.5.0
- vLLM version: v0.13.0
- vLLM main:
2c24bc6996
Signed-off-by: dsxsteven <dsxsteven@sina.com>
Signed-off-by: dsxsteven <36877507+dsxsteven@users.noreply.github.com>
This commit is contained in:
@@ -210,3 +210,72 @@ def test_accuracy_pcp_only(max_tokens: int, ) -> None:
|
||||
name_0="vllm_eager_outputs",
|
||||
name_1="vllm_pcp_only_outputs",
|
||||
)
|
||||
|
||||
|
||||
@pytest.mark.parametrize("model", MODELS)
|
||||
@pytest.mark.parametrize("max_tokens", [10])
|
||||
def test_models_long_sequence_cp_kv_interleave_size_output_between_tp_and_cp(
|
||||
model: str,
|
||||
max_tokens: int,
|
||||
) -> None:
|
||||
prompts = [
|
||||
"The president of the United States is", "The capital of France is"
|
||||
]
|
||||
|
||||
common_kwargs = {
|
||||
"max_model_len": 1024,
|
||||
}
|
||||
|
||||
if model == "vllm-ascend/DeepSeek-V2-Lite-W8A8":
|
||||
cp_kwargs = {
|
||||
"tensor_parallel_size": 2,
|
||||
"decode_context_parallel_size": 2,
|
||||
"prefill_context_parallel_size": 2,
|
||||
"enable_expert_parallel": True,
|
||||
"cp_kv_cache_interleave_size": 128,
|
||||
"enforce_eager": True,
|
||||
"quantization": "ascend",
|
||||
}
|
||||
tp_kwargs = {
|
||||
"tensor_parallel_size": 4,
|
||||
"enable_expert_parallel": True,
|
||||
"enforce_eager": True,
|
||||
"quantization": "ascend",
|
||||
}
|
||||
|
||||
else:
|
||||
cp_kwargs = {
|
||||
"tensor_parallel_size": 1,
|
||||
"decode_context_parallel_size": 1,
|
||||
"prefill_context_parallel_size": 2,
|
||||
"cp_kv_cache_interleave_size": 128,
|
||||
"compilation_config": {
|
||||
"cudagraph_mode": "FULL_DECODE_ONLY",
|
||||
"cudagraph_capture_sizes": [4, 8, 24, 48, 60]
|
||||
},
|
||||
}
|
||||
tp_kwargs = {
|
||||
"tensor_parallel_size": 2,
|
||||
"enforce_eager": True,
|
||||
}
|
||||
|
||||
cp_full_kwargs = {}
|
||||
cp_full_kwargs.update(common_kwargs) # type: ignore
|
||||
cp_full_kwargs.update(cp_kwargs) # type: ignore
|
||||
|
||||
tp_full_kwargs = {}
|
||||
tp_full_kwargs.update(common_kwargs) # type: ignore
|
||||
tp_full_kwargs.update(tp_kwargs) # type: ignore
|
||||
with VllmRunner(model, **cp_full_kwargs) as runner: # type: ignore
|
||||
vllm_context_parallel_outputs = runner.generate_greedy(
|
||||
prompts, max_tokens)
|
||||
|
||||
with VllmRunner(model, **tp_full_kwargs) as runner: # type: ignore
|
||||
vllm_eager_outputs = runner.generate_greedy(prompts, max_tokens)
|
||||
|
||||
check_outputs_equal(
|
||||
outputs_0_lst=vllm_eager_outputs,
|
||||
outputs_1_lst=vllm_context_parallel_outputs,
|
||||
name_0="vllm_eager_outputs",
|
||||
name_1="vllm_context_parallel_outputs",
|
||||
)
|
||||
Reference in New Issue
Block a user