[Feature] support eager mode in model runner v2 (#5210)

### What this PR does / why we need it?
#5051 only implement a basic framework for model runner v2, but there
are still some bugs for e2e functionality, this PR aim to enable basic
functionality.
model runner v2 plans:
https://github.com/vllm-project/vllm-ascend/issues/5208

- vLLM version: release/v0.13.0
- vLLM main:
ad32e3e19c
---------
Signed-off-by: Ronald1995 <ronaldautomobile@163.com>
This commit is contained in:
Ronald
2025-12-29 15:28:34 +08:00
committed by GitHub
parent 4da46da9bf
commit e7e1a7dc05
19 changed files with 528 additions and 44 deletions

View File

@@ -5,29 +5,16 @@ import torch
@contextmanager
def torch_cuda_wrapper():
ori_event = torch.cuda.Event
ori_stream = torch.cuda.Stream
ori_default_stream = torch.cuda.default_stream
ori_current_stream = torch.cuda.current_stream
ori_graph_pool_handle = torch.cuda.graph_pool_handle
ori_cuda_graph_cls = torch.cuda.CUDAGraph
ori_cuda_graph_func = torch.cuda.graph
try:
torch.cuda.Event = torch.npu.Event
torch.cuda.Stream = torch.npu.Stream
torch.cuda.stream = torch.npu.stream
torch.cuda.default_stream = torch.npu.default_stream
torch.cuda.current_stream = torch.npu.current_stream
torch.cuda.graph_pool_handle = torch.npu.graph_pool_handle
torch.cuda.CUDAGraph = torch.npu.NpuGraph
torch.cuda.CUDAGraph = torch.npu.NPUGraph
torch.cuda.graph = torch.npu.graph
torch.cuda.synchronize = torch.npu.synchronize
yield
finally:
# revert back torch cuda properties, so it will still raise error
# to call cuda ops in npu environment.
torch.cuda.Event = ori_event
torch.cuda.Stream = ori_stream
torch.cuda.default_stream = ori_default_stream
torch.cuda.current_stream = ori_current_stream
torch.cuda.graph_pool_handle = ori_graph_pool_handle
torch.cuda.CUDAGraph = ori_cuda_graph_cls
torch.cuda.graph = ori_cuda_graph_func
pass