[Misc] Clean up useless patch (#3320)

### What this PR does / why we need it?
1. clean up v0.10.2 support in ut and e2e test
2. remove v0.11.0 period job, we're at v0.11.0 now.
3. remove uesless patch for deepseek v3.2. They have been done in vLLM
already.
### Does this PR introduce _any_ user-facing change?

### How was this patch tested?


- vLLM version: v0.11.0rc3
- vLLM main: https://github.com/vllm-project/vllm/commit/v0.11.0

Signed-off-by: wangxiyuan <wangxiyuan1007@gmail.com>
This commit is contained in:
wangxiyuan
2025-10-09 14:07:26 +08:00
committed by GitHub
parent a43e2f61e1
commit 1c5b302f0d
10 changed files with 29 additions and 412 deletions

View File

@@ -32,14 +32,7 @@ from transformers import (AutoConfig, AutoModelForCausalLM, AutoTokenizer,
BatchEncoding, BatchFeature)
from transformers.models.auto.auto_factory import _BaseAutoModelClass
from vllm import LLM, SamplingParams
from vllm_ascend.utils import vllm_version_is
if vllm_version_is("0.10.2"):
from vllm.config import TaskOption, _get_and_verify_dtype
else:
from vllm.config.model import TaskOption, _get_and_verify_dtype
from vllm.config.model import TaskOption, _get_and_verify_dtype
from vllm.inputs import TextPrompt
from vllm.outputs import RequestOutput
from vllm.transformers_utils.utils import maybe_model_redirect

View File

@@ -19,12 +19,7 @@
from typing import Dict, List, Optional, Sequence, Tuple, Union
from vllm_ascend.utils import vllm_version_is
if vllm_version_is("0.10.2"):
from vllm.sequence import PromptLogprobs, SampleLogprobs
else:
from vllm.logprobs import PromptLogprobs, SampleLogprobs
from vllm.logprobs import PromptLogprobs, SampleLogprobs
TokensText = Tuple[List[int], str]

View File

@@ -22,15 +22,8 @@ from typing import Any, Dict
import jsonschema
import pytest
import regex as re
from vllm_ascend.utils import vllm_version_is
if vllm_version_is("0.10.2"):
from vllm.sampling_params import GuidedDecodingParams, SamplingParams
else:
from vllm.sampling_params import SamplingParams, StructuredOutputsParams
from vllm.outputs import RequestOutput
from vllm.sampling_params import SamplingParams, StructuredOutputsParams
from tests.e2e.conftest import VllmRunner
@@ -91,27 +84,16 @@ def sample_json_schema():
def test_guided_json_completion(guided_decoding_backend: str,
sample_json_schema):
runner_kwargs: Dict[str, Any] = {}
if vllm_version_is("0.10.2"):
sampling_params = SamplingParams(
temperature=1.0,
max_tokens=500,
guided_decoding=GuidedDecodingParams(json=sample_json_schema))
runner_kwargs = {
"seed": 0,
"guided_decoding_backend": guided_decoding_backend,
}
else:
sampling_params = SamplingParams(
temperature=1.0,
max_tokens=500,
structured_outputs=StructuredOutputsParams(
json=sample_json_schema))
runner_kwargs = {
"seed": 0,
"structured_outputs_config": {
"backend": guided_decoding_backend
},
}
sampling_params = SamplingParams(
temperature=1.0,
max_tokens=500,
structured_outputs=StructuredOutputsParams(json=sample_json_schema))
runner_kwargs = {
"seed": 0,
"structured_outputs_config": {
"backend": guided_decoding_backend
},
}
with VllmRunner(MODEL_NAME, **runner_kwargs) as vllm_model:
prompts = [
f"Give an example JSON for an employee profile "
@@ -141,26 +123,16 @@ def test_guided_regex(guided_decoding_backend: str, sample_regex):
if guided_decoding_backend == "outlines":
pytest.skip("Outlines doesn't support regex-based guided decoding.")
runner_kwargs: Dict[str, Any] = {}
if vllm_version_is("0.10.2"):
sampling_params = SamplingParams(
temperature=0.8,
top_p=0.95,
guided_decoding=GuidedDecodingParams(regex=sample_regex))
runner_kwargs = {
"seed": 0,
"guided_decoding_backend": guided_decoding_backend,
}
else:
sampling_params = SamplingParams(
temperature=0.8,
top_p=0.95,
structured_outputs=StructuredOutputsParams(regex=sample_regex))
runner_kwargs = {
"seed": 0,
"structured_outputs_config": {
"backend": guided_decoding_backend
},
}
sampling_params = SamplingParams(
temperature=0.8,
top_p=0.95,
structured_outputs=StructuredOutputsParams(regex=sample_regex))
runner_kwargs = {
"seed": 0,
"structured_outputs_config": {
"backend": guided_decoding_backend
},
}
with VllmRunner(MODEL_NAME, **runner_kwargs) as vllm_model:
prompts = [

View File

@@ -8,7 +8,6 @@ from vllm_ascend.ops.moe.fused_moe_prepare_and_finalize import (
FusedMoEPrepareAndFinalizeWithAll2All,
FusedMoEPrepareAndFinalizeWithAllGather, FusedMoEPrepareAndFinalizeWithMC2,
FusedMoEPrepareAndFinalizeWithNaiveMulticast)
from vllm_ascend.utils import vllm_version_is
class TestFusedMoEPrepareAndFinalize(unittest.TestCase):
@@ -231,12 +230,8 @@ class TestFusedMoEPrepareAndFinalize(unittest.TestCase):
mock_get_dp_group):
# Mock forward context with DP metadata
mock_context = MagicMock()
if vllm_version_is("0.10.2"):
mock_context.dp_metadata.cu_tokens_across_dp_cpu = torch.tensor(
[2, 5, 7])
else:
mock_context.dp_metadata.cu_tokens_across_sp.return_value = torch.tensor(
[2, 5, 7])
mock_context.dp_metadata.cu_tokens_across_sp.return_value = torch.tensor(
[2, 5, 7])
mock_get_forward_context.return_value = mock_context
# Setup DP group mock

View File

@@ -28,7 +28,7 @@ from vllm_ascend.ops.fused_moe import (AscendFusedMoE,
AscendUnquantizedFusedMoEMethod)
from vllm_ascend.ops.moe.experts_selector import select_experts
from vllm_ascend.ops.moe.moe_mlp import cumsum_group_list, unified_apply_mlp
from vllm_ascend.utils import AscendSocVersion, adapt_patch, vllm_version_is
from vllm_ascend.utils import AscendSocVersion, adapt_patch
adapt_patch(True)
@@ -92,11 +92,7 @@ def mock_dist_env(mocker: MockerFixture):
return hidden_states
mock_moe_comm_method.finalize.side_effect = mock_finalize
if vllm_version_is("0.10.2"):
dp_metadata = MagicMock(cu_tokens_across_dp_cpu=[5, 10])
else:
dp_metadata = MagicMock(num_tokens_across_dp_cpu=[5, 5])
dp_metadata = MagicMock(num_tokens_across_dp_cpu=[5, 5])
mock_forward_context_obj = MagicMock(moe_comm_method=mock_moe_comm_method,
moe_comm_type=MoECommType.MC2,
max_tokens_across_dp=10,

View File

@@ -27,7 +27,7 @@ from vllm_ascend.quantization.quant_config import AscendFusedMoEMethod
from vllm_ascend.torchair.ops.torchair_fused_moe import (
TorchairAscendFusedMoE, TorchairAscendUnquantizedFusedMoEMethod)
from vllm_ascend.utils import adapt_patch # noqa E402
from vllm_ascend.utils import AscendSocVersion, vllm_version_is
from vllm_ascend.utils import AscendSocVersion
adapt_patch(True)
@@ -54,10 +54,7 @@ def mock_dp_and_tp_group(mocker):
@pytest.fixture
def mock_dist_env(mocker: MockerFixture):
# init dist env patch
if vllm_version_is("0.10.2"):
dp_metadata = MagicMock(cu_tokens_across_dp_cpu=[5, 10])
else:
dp_metadata = MagicMock(num_tokens_across_dp_cpu=[5, 5])
dp_metadata = MagicMock(num_tokens_across_dp_cpu=[5, 5])
with patch('torch.distributed.get_rank', return_value=0), \
patch('torch.distributed.get_world_size', return_value=4), \