[Main2Main] Upgrade vllm commit to 0123 (#6169)

### What this PR does / why we need it?
1.  Upgrade vllm commit to: 0115
(8471b27df97c3eb79f891802fc0e858f8f7ac6a0)
Modify import paths due to the refactors:
https://github.com/vllm-project/vllm/pull/32245
https://github.com/vllm-project/vllm/pull/32060
Test result:
https://github.com/vllm-project/vllm-ascend/actions/runs/21034239336/job/60490156965?pr=5913
2. Upgrade vllm commit to: 0119
(9a1f16da1e423ede2c2f52a9850cbfbb39cefe96)
Fix `WorkerProc.__init__() missing 1 required positional argument:
'is_driver_worker'` due to
https://github.com/vllm-project/vllm/pull/28506
Test result:
https://github.com/vllm-project/vllm-ascend/actions/runs/21156263050/job/60841668755?5569
3. Upgrade vllm commit to:
0120(148117ea2e689cd43df4be6892671a17cdae5833)
1. Add `skip_compiled` param in `set_forward_context` due to
https://github.com/vllm-project/vllm/pull/30385
2. Modify `tests/ut/spec_decode/test_eagle_proposer.py` due to
https://github.com/vllm-project/vllm/pull/24322
change `self.max_num_tokens =
vllm_config.scheduler_config.max_num_batched_tokens + max_batch_size`
3. Modify UT import paths due to the
refactors:https://github.com/vllm-project/vllm/pull/32060
Test result:
https://github.com/vllm-project/vllm-ascend/actions/runs/21204851770/job/60999046946
4. Upgrade vllm commit to:
0121(f23fb5a7c1b61350c5c40ca1115d3bf8cf2b8cc9)
1. vLLM switched `uses_mrope` from target to draft model config, making
`positions`/`mrope_positions` mutually exclusive, breaking vllm-ascend's
direct self.positions access and tests missing
`draft_model_config.uses_mrope`.
https://github.com/vllm-project/vllm/pull/32048
2. Moved bs_to_padded_graph_size from CompilationConfig to
CudagraphDispatcher due to the refactor
https://github.com/vllm-project/vllm/pull/30143
3. Remove unused `maybe_setup_kv_connector` due to
https://github.com/vllm-project/vllm/pull/32077
Test result:
https://github.com/vllm-project/vllm-ascend/actions/runs/21217728738/job/61043738834
6. Upgrade vllm commit to:
0122(8ebf271bb6d1e7e9b1a55be73d755ef1a57dbbe5)
Updating FusedMoEParallelConfig (added enable_eplb) and FusedMoEConfig
due to https://github.com/vllm-project/vllm/pull/32414
Test result:
https://github.com/vllm-project/vllm-ascend/actions/runs/21249922546/job/61148613054
8. Upgrade vllm commit to:
0123(dc917cceb877dfd13f98c538c4c96158047d98bd)
Setting temperature=0.0 due to the removal of the default temperature
value in https://github.com/vllm-project/vllm/pull/32723
Test result:
https://github.com/vllm-project/vllm-ascend/actions/runs/21280796875
### Does this PR introduce _any_ user-facing change?

### How was this patch tested?

- vLLM version: v0.14.0
- vLLM main:
d68209402d

---------

Signed-off-by: wjunLu <wjunlu217@gmail.com>
Signed-off-by: Meihan-chen <jcccx.cmh@gmail.com>
Co-authored-by: wjunLu <wjunlu217@gmail.com>
This commit is contained in:
meihanc
2026-01-27 08:44:36 +08:00
committed by GitHub
parent 9780a995e1
commit fea197ad50
25 changed files with 173 additions and 83 deletions

View File

@@ -103,7 +103,7 @@ from vllm_ascend.utils import (AscendDeviceType, ProfileExecuteDuration,
enable_sp, get_ascend_device_type,
is_drafter_moe_model, is_moe_model,
lmhead_tp_enable, maybe_trans_nz,
set_weight_prefetch_method)
set_weight_prefetch_method, vllm_version_is)
from vllm_ascend.worker.npu_input_batch import NPUInputBatch
from vllm_ascend.worker.pcp_utils import PCPManager
@@ -587,8 +587,12 @@ class NPUModelRunner(GPUModelRunner):
if (self.use_aclgraph and total_num_scheduled_tokens
<= self.cudagraph_batch_sizes[-1]):
# Add padding to the batch size.
num_input_tokens = self.vllm_config.pad_for_cudagraph(
total_num_scheduled_tokens)
if vllm_version_is('0.14.1'):
num_input_tokens = self.vllm_config.pad_for_cudagraph(
total_num_scheduled_tokens)
else:
num_input_tokens = self.cudagraph_dispatcher._bs_to_padded_graph_size[
total_num_scheduled_tokens]
elif self.use_aclgraph and enable_sp(self.vllm_config):
# When using aclgraph, if total_num_scheduled_tokens exceeds the maximum graph size,
# the model will fall back to running its FX graph in eager mode.
@@ -1403,9 +1407,17 @@ class NPUModelRunner(GPUModelRunner):
head_dim=self.model_config.get_vocab_size(),
generators=self.input_batch.sampling_metadata.generators)
# Encoder-decoder models can only compile the pure decode steps where no
# encoder inputs are present. Use eager for the first pass.
num_encoder_reqs = len(scheduler_output.scheduled_encoder_inputs)
has_encoder_input = (
self.model_config.is_encoder_decoder and num_encoder_reqs > 0
)
# Run forward pass
with ProfileExecuteDuration().capture_async("forward"):
with set_ascend_forward_context(
with (
set_ascend_forward_context(
attn_metadata,
self.vllm_config,
num_tokens=num_input_tokens,
@@ -1414,26 +1426,18 @@ class NPUModelRunner(GPUModelRunner):
batch_descriptor=batch_descriptor,
num_actual_tokens=scheduler_output.
total_num_scheduled_tokens,
model_instance=self.model):
self.maybe_setup_kv_connector(scheduler_output)
model_instance=self.model,
skip_compiled=has_encoder_input),
self.maybe_get_kv_connector_output(scheduler_output) as kv_connector_output,
):
hidden_states = self._generate_process_reqs_hidden_states(
num_input_tokens, input_ids, positions,
intermediate_tensors, inputs_embeds, model_kwargs)
self.maybe_wait_for_kv_save()
finished_sending, finished_recving = self.get_finished_kv_transfer(
scheduler_output)
aux_hidden_states = None
if self.use_aux_hidden_state_outputs:
hidden_states, aux_hidden_states = hidden_states
kv_connector_output = KVConnectorOutput(
finished_sending=finished_sending,
finished_recving=finished_recving)
finished_sending = None
finished_recving = None
with ProfileExecuteDuration().capture_async("post process"):
# Broadcast PP output for external_launcher (torchrun)
# to make sure we are synced across pp ranks

View File

@@ -22,7 +22,6 @@ from typing import Any
import torch
import torch.nn as nn
from vllm.config import VllmConfig
from vllm.v1.attention.backends.utils import AttentionMetadataBuilder
from vllm.v1.kv_cache_interface import KVCacheConfig
from vllm.v1.worker.gpu.block_table import BlockTables
from vllm.v1.worker.gpu.cudagraph_utils import CudaGraphManager
@@ -31,6 +30,12 @@ from vllm.v1.worker.gpu.cudagraph_utils import \
from vllm.v1.worker.gpu.input_batch import InputBuffers
from vllm_ascend.worker.v2.utils import torch_cuda_wrapper
from vllm_ascend.utils import vllm_version_is
if vllm_version_is('0.14.1'):
from vllm.v1.attention.backends.utils import AttentionMetadataBuilder
else:
from vllm.v1.attention.backend import AttentionMetadataBuilder
class AclGraphManager(CudaGraphManager):

View File

@@ -23,13 +23,18 @@ from typing import Any, Tuple
import numpy as np
import torch
from vllm.config import VllmConfig
from vllm.v1.attention.backends.utils import AttentionMetadataBuilder
from vllm.v1.kv_cache_interface import EncoderOnlyAttentionSpec, KVCacheConfig
from vllm_ascend.attention.attention_mask import AttentionMaskBuilder
from vllm_ascend.attention.attention_v1 import AscendAttentionState
from vllm_ascend.attention.utils import (AscendCommonAttentionMetadata,
AscendPrefillContextParallelMetadata)
from vllm_ascend.utils import vllm_version_is
if vllm_version_is('0.14.1'):
from vllm.v1.attention.backends.utils import AttentionMetadataBuilder
else:
from vllm.v1.attention.backend import AttentionMetadataBuilder
_ATTENTION_MASK_BUILDER = None

View File

@@ -20,7 +20,7 @@
import torch
from vllm.triton_utils import tl, triton
from vllm.v1.worker.gpu.sample.metadata import SamplingMetadata
from vllm.v1.sample.metadata import SamplingMetadata
@triton.jit

View File

@@ -17,7 +17,7 @@
import torch
from vllm.v1.sample.ops.topk_topp_sampler import apply_top_k_top_p
from vllm.v1.worker.gpu.sample.metadata import SamplingMetadata
from vllm.v1.sample.metadata import SamplingMetadata
from vllm.v1.worker.gpu.sample.min_p import apply_min_p
from vllm.v1.worker.gpu.sample.sampler import Sampler