adapt to main2main for model runner v2 (#7578)
### What this PR does / why we need it?
This PR aims to adapt to newest commit of vllm main branch for model
runner v2. please refer to
https://github.com/vllm-project/vllm-ascend/issues/5208
### Does this PR introduce _any_ user-facing change?
no
### How was this patch tested?
- vLLM version: v0.18.0
- vLLM main:
ed359c497a
---------
Signed-off-by: Ronald1995 <ronaldautomobile@163.com>
This commit is contained in:
@@ -1,12 +1,8 @@
|
||||
from contextlib import contextmanager
|
||||
|
||||
import torch
|
||||
import vllm
|
||||
from vllm.logger import logger
|
||||
|
||||
from vllm_ascend.worker.v2.block_table import AscendBlockTables
|
||||
from vllm_ascend.worker.v2.model_states import init_asecnd_model_state
|
||||
|
||||
|
||||
@contextmanager
|
||||
def torch_cuda_wrapper():
|
||||
@@ -27,27 +23,3 @@ def torch_cuda_wrapper():
|
||||
yield
|
||||
finally:
|
||||
pass
|
||||
|
||||
|
||||
@contextmanager
|
||||
def block_table_wrapper():
|
||||
try:
|
||||
# vllm-ascend need to initialize slot mapping as torch.int32 dtype,
|
||||
# but vllm default is torch.int64 dtype.
|
||||
vllm.v1.worker.gpu.model_runner.BlockTables = AscendBlockTables
|
||||
logger.info_once("Wrapping BlockTables with AscendBlockTables.")
|
||||
yield
|
||||
finally:
|
||||
pass
|
||||
|
||||
|
||||
@contextmanager
|
||||
def model_states_wrapper():
|
||||
try:
|
||||
# prepare_attn in AscendModelState is different from vllm,
|
||||
# we need to override init_model_state.
|
||||
vllm.v1.worker.gpu.model_runner.init_model_state = init_asecnd_model_state
|
||||
logger.info_once("Wrapping init_model_state with init_asecnd_model_state.")
|
||||
yield
|
||||
finally:
|
||||
pass
|
||||
|
||||
Reference in New Issue
Block a user