[Main2Main] Upgrade vllm commit to 0109 (#5752)
### What this PR does / why we need it?
Upgrade vllm commit to 0109 (bde38c11df0ea066a740efe9b77fff5418be45df)
1. remove `init_cached_hf_modules ` due to
https://github.com/vllm-project/vllm/pull/31786
2. fix spec_decode e2e test due to
https://github.com/vllm-project/vllm/pull/29821 break
3. fix `vllm.v1.attention.backends.utils` duo to
https://github.com/vllm-project/vllm/pull/31891
4. fix `self.seq_lens - query_lens` on same device due to
https://github.com/vllm-project/vllm/pull/31773
5. skip model_runner_v2 e2e test due to `'_OpNamespace' '_C' object has
no attribute 'get_cuda_view_from_cpu_tensor'`
- vLLM version: v0.13.0
- vLLM main:
2f4e6548ef
Signed-off-by: hfadzxy <starmoon_zhang@163.com>
This commit is contained in:
@@ -1670,6 +1670,8 @@ class NPUModelRunner(GPUModelRunner):
|
||||
attn_metadata,
|
||||
aux_hidden_states,
|
||||
)
|
||||
if not vllm_version_is('0.13.0'):
|
||||
self._copy_draft_token_ids_to_cpu(scheduler_output)
|
||||
|
||||
(
|
||||
logprobs_lists,
|
||||
@@ -1983,7 +1985,7 @@ class NPUModelRunner(GPUModelRunner):
|
||||
query_start_loc_cpu=self.query_start_loc.cpu[:num_reqs +
|
||||
1],
|
||||
_seq_lens_cpu=self.seq_lens.cpu[:num_reqs],
|
||||
seq_lens=self.seq_lens.cpu[:num_reqs],
|
||||
seq_lens=self.seq_lens.gpu[:num_reqs],
|
||||
num_reqs=num_reqs,
|
||||
num_actual_tokens=num_tokens,
|
||||
block_table_tensor=block_table_tensor[:num_reqs],
|
||||
|
||||
@@ -121,11 +121,12 @@ class NPUWorker(WorkerBase):
|
||||
self.cache_dtype = STR_DTYPE_TO_TORCH_DTYPE[
|
||||
self.cache_config.cache_dtype]
|
||||
|
||||
if self.model_config.trust_remote_code:
|
||||
# note: lazy import to avoid importing torch before initializing
|
||||
from vllm.utils.import_utils import init_cached_hf_modules
|
||||
if vllm_version_is('0.13.0'):
|
||||
if self.model_config.trust_remote_code:
|
||||
# note: lazy import to avoid importing torch before initializing
|
||||
from vllm.utils.import_utils import init_cached_hf_modules
|
||||
|
||||
init_cached_hf_modules()
|
||||
init_cached_hf_modules()
|
||||
|
||||
self.profiler = self._init_profiler()
|
||||
if vllm_config.model_config and vllm_config.model_config.enable_sleep_mode:
|
||||
|
||||
Reference in New Issue
Block a user