[Misc] Clean up useless patch (#3320)
### What this PR does / why we need it? 1. clean up v0.10.2 support in ut and e2e test 2. remove v0.11.0 period job, we're at v0.11.0 now. 3. remove uesless patch for deepseek v3.2. They have been done in vLLM already. ### Does this PR introduce _any_ user-facing change? ### How was this patch tested? - vLLM version: v0.11.0rc3 - vLLM main: https://github.com/vllm-project/vllm/commit/v0.11.0 Signed-off-by: wangxiyuan <wangxiyuan1007@gmail.com>
This commit is contained in:
@@ -32,14 +32,7 @@ from transformers import (AutoConfig, AutoModelForCausalLM, AutoTokenizer,
|
||||
BatchEncoding, BatchFeature)
|
||||
from transformers.models.auto.auto_factory import _BaseAutoModelClass
|
||||
from vllm import LLM, SamplingParams
|
||||
|
||||
from vllm_ascend.utils import vllm_version_is
|
||||
|
||||
if vllm_version_is("0.10.2"):
|
||||
from vllm.config import TaskOption, _get_and_verify_dtype
|
||||
else:
|
||||
from vllm.config.model import TaskOption, _get_and_verify_dtype
|
||||
|
||||
from vllm.config.model import TaskOption, _get_and_verify_dtype
|
||||
from vllm.inputs import TextPrompt
|
||||
from vllm.outputs import RequestOutput
|
||||
from vllm.transformers_utils.utils import maybe_model_redirect
|
||||
|
||||
@@ -19,12 +19,7 @@
|
||||
|
||||
from typing import Dict, List, Optional, Sequence, Tuple, Union
|
||||
|
||||
from vllm_ascend.utils import vllm_version_is
|
||||
|
||||
if vllm_version_is("0.10.2"):
|
||||
from vllm.sequence import PromptLogprobs, SampleLogprobs
|
||||
else:
|
||||
from vllm.logprobs import PromptLogprobs, SampleLogprobs
|
||||
from vllm.logprobs import PromptLogprobs, SampleLogprobs
|
||||
|
||||
TokensText = Tuple[List[int], str]
|
||||
|
||||
|
||||
@@ -22,15 +22,8 @@ from typing import Any, Dict
|
||||
import jsonschema
|
||||
import pytest
|
||||
import regex as re
|
||||
|
||||
from vllm_ascend.utils import vllm_version_is
|
||||
|
||||
if vllm_version_is("0.10.2"):
|
||||
from vllm.sampling_params import GuidedDecodingParams, SamplingParams
|
||||
else:
|
||||
from vllm.sampling_params import SamplingParams, StructuredOutputsParams
|
||||
|
||||
from vllm.outputs import RequestOutput
|
||||
from vllm.sampling_params import SamplingParams, StructuredOutputsParams
|
||||
|
||||
from tests.e2e.conftest import VllmRunner
|
||||
|
||||
@@ -91,27 +84,16 @@ def sample_json_schema():
|
||||
def test_guided_json_completion(guided_decoding_backend: str,
|
||||
sample_json_schema):
|
||||
runner_kwargs: Dict[str, Any] = {}
|
||||
if vllm_version_is("0.10.2"):
|
||||
sampling_params = SamplingParams(
|
||||
temperature=1.0,
|
||||
max_tokens=500,
|
||||
guided_decoding=GuidedDecodingParams(json=sample_json_schema))
|
||||
runner_kwargs = {
|
||||
"seed": 0,
|
||||
"guided_decoding_backend": guided_decoding_backend,
|
||||
}
|
||||
else:
|
||||
sampling_params = SamplingParams(
|
||||
temperature=1.0,
|
||||
max_tokens=500,
|
||||
structured_outputs=StructuredOutputsParams(
|
||||
json=sample_json_schema))
|
||||
runner_kwargs = {
|
||||
"seed": 0,
|
||||
"structured_outputs_config": {
|
||||
"backend": guided_decoding_backend
|
||||
},
|
||||
}
|
||||
sampling_params = SamplingParams(
|
||||
temperature=1.0,
|
||||
max_tokens=500,
|
||||
structured_outputs=StructuredOutputsParams(json=sample_json_schema))
|
||||
runner_kwargs = {
|
||||
"seed": 0,
|
||||
"structured_outputs_config": {
|
||||
"backend": guided_decoding_backend
|
||||
},
|
||||
}
|
||||
with VllmRunner(MODEL_NAME, **runner_kwargs) as vllm_model:
|
||||
prompts = [
|
||||
f"Give an example JSON for an employee profile "
|
||||
@@ -141,26 +123,16 @@ def test_guided_regex(guided_decoding_backend: str, sample_regex):
|
||||
if guided_decoding_backend == "outlines":
|
||||
pytest.skip("Outlines doesn't support regex-based guided decoding.")
|
||||
runner_kwargs: Dict[str, Any] = {}
|
||||
if vllm_version_is("0.10.2"):
|
||||
sampling_params = SamplingParams(
|
||||
temperature=0.8,
|
||||
top_p=0.95,
|
||||
guided_decoding=GuidedDecodingParams(regex=sample_regex))
|
||||
runner_kwargs = {
|
||||
"seed": 0,
|
||||
"guided_decoding_backend": guided_decoding_backend,
|
||||
}
|
||||
else:
|
||||
sampling_params = SamplingParams(
|
||||
temperature=0.8,
|
||||
top_p=0.95,
|
||||
structured_outputs=StructuredOutputsParams(regex=sample_regex))
|
||||
runner_kwargs = {
|
||||
"seed": 0,
|
||||
"structured_outputs_config": {
|
||||
"backend": guided_decoding_backend
|
||||
},
|
||||
}
|
||||
sampling_params = SamplingParams(
|
||||
temperature=0.8,
|
||||
top_p=0.95,
|
||||
structured_outputs=StructuredOutputsParams(regex=sample_regex))
|
||||
runner_kwargs = {
|
||||
"seed": 0,
|
||||
"structured_outputs_config": {
|
||||
"backend": guided_decoding_backend
|
||||
},
|
||||
}
|
||||
|
||||
with VllmRunner(MODEL_NAME, **runner_kwargs) as vllm_model:
|
||||
prompts = [
|
||||
|
||||
Reference in New Issue
Block a user