### What this PR does / why we need it?
This PR aims to adapt to newest commit of vllm main branch for model
runner v2. please refer to
https://github.com/vllm-project/vllm-ascend/issues/5208
### Does this PR introduce _any_ user-facing change?
no
### How was this patch tested?
- vLLM version: v0.18.0
- vLLM main:
ed359c497a
---------
Signed-off-by: Ronald1995 <ronaldautomobile@163.com>
26 lines
881 B
Python
26 lines
881 B
Python
from contextlib import contextmanager
|
|
|
|
import torch
|
|
from vllm.logger import logger
|
|
|
|
|
|
@contextmanager
|
|
def torch_cuda_wrapper():
|
|
try:
|
|
torch.cuda.Event = torch.npu.Event
|
|
torch.cuda.Stream = torch.npu.Stream
|
|
torch.cuda.stream = torch.npu.stream
|
|
torch.cuda.default_stream = torch.npu.default_stream
|
|
torch.cuda.current_stream = torch.npu.current_stream
|
|
torch.cuda.graph_pool_handle = torch.npu.graph_pool_handle
|
|
torch.cuda.CUDAGraph = torch.npu.NPUGraph
|
|
torch.cuda.graph = torch.npu.graph
|
|
torch.cuda.synchronize = torch.npu.synchronize
|
|
torch.cuda.set_stream = torch.npu.set_stream
|
|
torch.cuda.current_device = torch.npu.current_device
|
|
torch.cuda.mem_get_info = torch.npu.mem_get_info
|
|
logger.info_once("Wrapping torch.cuda with torch.npu.")
|
|
yield
|
|
finally:
|
|
pass
|