[Misc] upgrade to vllm main (#6646)
### What this PR does / why we need it? This PR upgrades the core vLLM dependency to a newer version from the main branch (`13397841ab469cecf1ed425c3f52a9ffc38139b5`). This is necessary to keep our project up-to-date with the latest features and fixes from upstream vLLM. 1.ac32e66cf9pass file is moved. - vLLM version: v0.15.0 - vLLM main:d7e17aaacd--------- Signed-off-by: wangxiyuan <wangxiyuan1007@gmail.com> Signed-off-by: wxsIcey <1790571317@qq.com> Signed-off-by: Meihan-chen <jcccx.cmh@gmail.com> Co-authored-by: wxsIcey <1790571317@qq.com>
This commit is contained in:
@@ -1,6 +1,8 @@
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
|
||||
|
||||
import pytest
|
||||
|
||||
import vllm
|
||||
import vllm.config
|
||||
from vllm.lora.request import LoRARequest
|
||||
@@ -121,6 +123,7 @@ def generate_and_test(llm,
|
||||
print("removing lora")
|
||||
|
||||
|
||||
@pytest.mark.skip(reason="fix me")
|
||||
@patch.dict("os.environ", {"VLLM_USE_MODELSCOPE": "False"})
|
||||
def test_llama_lora(llama32_lora_files):
|
||||
vllm_model = VllmRunner(
|
||||
|
||||
Reference in New Issue
Block a user