[Main2Main] Upgrade vllm commit to 0123 (#6169)

### What this PR does / why we need it?
1.  Upgrade vllm commit to: 0115
(8471b27df97c3eb79f891802fc0e858f8f7ac6a0)
Modify import paths due to the refactors:
https://github.com/vllm-project/vllm/pull/32245
https://github.com/vllm-project/vllm/pull/32060
Test result:
https://github.com/vllm-project/vllm-ascend/actions/runs/21034239336/job/60490156965?pr=5913
2. Upgrade vllm commit to: 0119
(9a1f16da1e423ede2c2f52a9850cbfbb39cefe96)
Fix `WorkerProc.__init__() missing 1 required positional argument:
'is_driver_worker'` due to
https://github.com/vllm-project/vllm/pull/28506
Test result:
https://github.com/vllm-project/vllm-ascend/actions/runs/21156263050/job/60841668755?5569
3. Upgrade vllm commit to:
0120(148117ea2e689cd43df4be6892671a17cdae5833)
1. Add `skip_compiled` param in `set_forward_context` due to
https://github.com/vllm-project/vllm/pull/30385
2. Modify `tests/ut/spec_decode/test_eagle_proposer.py` due to
https://github.com/vllm-project/vllm/pull/24322
change `self.max_num_tokens =
vllm_config.scheduler_config.max_num_batched_tokens + max_batch_size`
3. Modify UT import paths due to the
refactors:https://github.com/vllm-project/vllm/pull/32060
Test result:
https://github.com/vllm-project/vllm-ascend/actions/runs/21204851770/job/60999046946
4. Upgrade vllm commit to:
0121(f23fb5a7c1b61350c5c40ca1115d3bf8cf2b8cc9)
1. vLLM switched `uses_mrope` from target to draft model config, making
`positions`/`mrope_positions` mutually exclusive, breaking vllm-ascend's
direct self.positions access and tests missing
`draft_model_config.uses_mrope`.
https://github.com/vllm-project/vllm/pull/32048
2. Moved bs_to_padded_graph_size from CompilationConfig to
CudagraphDispatcher due to the refactor
https://github.com/vllm-project/vllm/pull/30143
3. Remove unused `maybe_setup_kv_connector` due to
https://github.com/vllm-project/vllm/pull/32077
Test result:
https://github.com/vllm-project/vllm-ascend/actions/runs/21217728738/job/61043738834
6. Upgrade vllm commit to:
0122(8ebf271bb6d1e7e9b1a55be73d755ef1a57dbbe5)
Updating FusedMoEParallelConfig (added enable_eplb) and FusedMoEConfig
due to https://github.com/vllm-project/vllm/pull/32414
Test result:
https://github.com/vllm-project/vllm-ascend/actions/runs/21249922546/job/61148613054
8. Upgrade vllm commit to:
0123(dc917cceb877dfd13f98c538c4c96158047d98bd)
Setting temperature=0.0 due to the removal of the default temperature
value in https://github.com/vllm-project/vllm/pull/32723
Test result:
https://github.com/vllm-project/vllm-ascend/actions/runs/21280796875
### Does this PR introduce _any_ user-facing change?

### How was this patch tested?

- vLLM version: v0.14.0
- vLLM main:
d68209402d

---------

Signed-off-by: wjunLu <wjunlu217@gmail.com>
Signed-off-by: Meihan-chen <jcccx.cmh@gmail.com>
Co-authored-by: wjunLu <wjunlu217@gmail.com>
This commit is contained in:
meihanc
2026-01-27 08:44:36 +08:00
committed by GitHub
parent 9780a995e1
commit fea197ad50
25 changed files with 173 additions and 83 deletions

View File

@@ -41,7 +41,7 @@ from vllm_ascend.ops.rotary_embedding import update_cos_sin
from vllm_ascend.ops.triton.spec_decode.utils import \
prepare_inputs_padded_kernel
from vllm_ascend.ops.triton.triton_utils import get_vectorcore_num
from vllm_ascend.utils import enable_sp, shared_expert_dp_enabled
from vllm_ascend.utils import enable_sp, shared_expert_dp_enabled, vllm_version_is
# Currently we will fix block size to a small one since `num_reqs` can't be too large
_PREPARE_INPUTS_BLOCK_SIZE = 4
@@ -455,7 +455,11 @@ class EagleProposer(VllmEagleProposer):
self.input_ids[last_token_indices] = next_token_ids
if self.use_cuda_graph and \
num_tokens <= self.runner.cudagraph_batch_sizes[-1]:
num_input_tokens = self.vllm_config.pad_for_cudagraph(num_tokens)
if vllm_version_is('0.14.1'):
num_input_tokens = self.vllm_config.pad_for_cudagraph(num_tokens)
else:
num_input_tokens = self.runner.cudagraph_dispatcher._bs_to_padded_graph_size[
num_tokens]
else:
num_input_tokens = num_tokens

View File

@@ -17,7 +17,7 @@ from vllm_ascend.attention.utils import AscendCommonAttentionMetadata
from vllm_ascend.compilation.acl_graph import ACLGraphWrapper
from vllm_ascend.ops.rotary_embedding import get_cos_and_sin_mla
from vllm_ascend.spec_decode.eagle_proposer import EagleProposer
from vllm_ascend.utils import ProfileExecuteDuration, lmhead_tp_enable
from vllm_ascend.utils import ProfileExecuteDuration, lmhead_tp_enable, vllm_version_is
class MtpProposer(EagleProposer):
@@ -97,7 +97,7 @@ class MtpProposer(EagleProposer):
attn_metadata = None
input_ids = self.input_ids[:num_tokens]
positions = self.positions[:num_tokens]
positions = self._get_positions(num_tokens)
previous_hidden_states = self.hidden_states[:num_tokens]
for i in range(self.num_speculative_tokens):
if i > 0 and not in_graph_capturing and aclgraph_runtime_mode == CUDAGraphMode.FULL:
@@ -244,14 +244,18 @@ class MtpProposer(EagleProposer):
# Note(qcs): We may need to refactor these check logics.
if self.use_cuda_graph and num_scheduled_tokens <= self.runner.cudagraph_batch_sizes[
-1]:
num_input_tokens = self.vllm_config.pad_for_cudagraph(
num_scheduled_tokens)
if vllm_version_is('0.14.1'):
num_input_tokens = self.vllm_config.pad_for_cudagraph(
num_scheduled_tokens)
else:
num_input_tokens = self.runner.cudagraph_dispatcher._bs_to_padded_graph_size[
num_scheduled_tokens]
else:
# Eager mode, no padding needed
num_input_tokens = num_tokens
# copy inputs to buffer for cudagraph
self.positions[:num_tokens] = target_positions
self._set_positions(num_tokens, target_positions)
self.hidden_states[:num_tokens] = target_hidden_states
# eager/acl piecewise mode need to update num_tokens_across_dp
(num_input_tokens, num_tokens_across_dp,
@@ -311,7 +315,7 @@ class MtpProposer(EagleProposer):
model_kwargs = {}
model_kwargs["attn_metadata"] = attn_metadata
input_ids = self.input_ids[:num_input_tokens]
positions = self.positions[:num_input_tokens]
positions = self._get_positions(num_input_tokens)
hidden_states = self.hidden_states[:num_input_tokens]
hidden_states, positions = self.maybe_pad_and_reduce(
@@ -474,7 +478,7 @@ class MtpProposer(EagleProposer):
# copy inputs to buffer for cudagraph
self.input_ids[:batch_size] = input_ids
self.positions[:batch_size] = clamped_positions
self._set_positions(batch_size, clamped_positions)
self.hidden_states[:hidden_states.shape[0]] = hidden_states
if self.pcp_size * self.dcp_size > 1:
# update local seq_len
@@ -495,7 +499,10 @@ class MtpProposer(EagleProposer):
else:
attn_metadata_i.slot_mapping[:batch_size] = slot_mapping
if self.speculative_config.disable_padded_drafter_batch:
self.positions[batch_size:num_input_tokens] = 0
if self.uses_mrope:
self.mrope_positions[:, batch_size:num_input_tokens] = 0
else:
self.positions[batch_size:num_input_tokens] = 0
self.input_ids[batch_size:num_input_tokens] = 0
self.hidden_states[batch_size:num_input_tokens].fill_(0)
@@ -504,8 +511,8 @@ class MtpProposer(EagleProposer):
prefill_metadata.seq_lens_list = prefill_metadata.seq_lens.tolist(
)
prefill_metadata.context_lens = attn_metadata_i.seq_lens
prefill_metadata.input_positions = self.positions[:
num_input_tokens]
prefill_metadata.input_positions = self._get_positions(
num_input_tokens)
prefill_metadata.max_seq_lens += 1
prefill_metadata.max_seq_lens = min(
prefill_metadata.max_seq_lens,
@@ -520,8 +527,8 @@ class MtpProposer(EagleProposer):
decode_metadata.seq_lens_list = decode_seq_lens_list + [
0
] * (graph_pad_size - len(decode_seq_lens_list))
decode_metadata.input_positions = self.positions[:
num_input_tokens]
decode_metadata.input_positions = self._get_positions(
num_input_tokens)
decode_metadata.max_seq_lens += 1
decode_metadata.max_seq_lens = min(
decode_metadata.max_seq_lens,