[Misc] upgrade to vllm main (#6646)

### What this PR does / why we need it?
This PR upgrades the core vLLM dependency to a newer version from the
main branch (`13397841ab469cecf1ed425c3f52a9ffc38139b5`). This is
necessary to keep our project up-to-date with the latest features and
fixes from upstream vLLM.

1.
ac32e66cf9
pass file is moved.

- vLLM version: v0.15.0
- vLLM main:
d7e17aaacd

---------

Signed-off-by: wangxiyuan <wangxiyuan1007@gmail.com>
Signed-off-by: wxsIcey <1790571317@qq.com>
Signed-off-by: Meihan-chen <jcccx.cmh@gmail.com>
Co-authored-by: wxsIcey <1790571317@qq.com>
This commit is contained in:
wangxiyuan
2026-02-10 14:08:59 +08:00
committed by GitHub
parent 1c7d1163f5
commit 2a826b5fad
19 changed files with 296 additions and 146 deletions

View File

@@ -19,10 +19,15 @@ from typing import Any, Callable, List, Optional, Sequence
import torch.fx as fx
from torch._inductor.decomposition import select_decomp_table
from vllm.compilation.fx_utils import OpOverload
from vllm.config import get_current_vllm_config
from vllm_ascend.compilation.compiler_interface import compile_fx
from vllm_ascend.utils import vllm_version_is
if vllm_version_is("0.15.0"):
from vllm.compilation.fx_utils import OpOverload # type: ignore
else:
from vllm.compilation.passes.fx_utils import OpOverload
class TestBackend:

View File

@@ -21,7 +21,6 @@ import torch
import torch.nn as nn
import torch_npu
import vllm.config
from vllm.compilation.fx_utils import OpOverload
from vllm.config import ModelConfig, VllmConfig
from vllm.distributed import (ensure_model_parallel_initialized,
init_distributed_environment)
@@ -33,6 +32,13 @@ from vllm_ascend.ascend_forward_context import set_ascend_forward_context
from vllm_ascend.compilation.passes.norm_quant_fusion_pass import \
AddRMSNormQuantFusionPass
from vllm_ascend.utils import enable_custom_op
from vllm_ascend.utils import vllm_version_is
if vllm_version_is("0.15.0"):
from vllm.compilation.fx_utils import OpOverload # type: ignore
else:
from vllm.compilation.passes.fx_utils import OpOverload
class TestModelWithoutBias(nn.Module):

View File

@@ -1,6 +1,8 @@
# SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
import pytest
import vllm
import vllm.config
from vllm.lora.request import LoRARequest
@@ -121,6 +123,7 @@ def generate_and_test(llm,
print("removing lora")
@pytest.mark.skip(reason="fix me")
@patch.dict("os.environ", {"VLLM_USE_MODELSCOPE": "False"})
def test_llama_lora(llama32_lora_files):
vllm_model = VllmRunner(