Code clean up: Remove deprecated prefill move InputMetadata to infer_batch.py (#609)

This commit is contained in:
Lianmin Zheng
2024-07-12 12:28:09 -07:00
committed by GitHub
parent d9a6902986
commit 519e20cfda
3 changed files with 219 additions and 245 deletions

View File

@@ -8,6 +8,7 @@ from torch import nn
from sglang.global_config import global_config
from sglang.srt.layers.extend_attention import extend_attention_fwd
from sglang.srt.layers.token_attention import token_attention_fwd
from sglang.srt.managers.controller.infer_batch import global_server_args_dict
from sglang.srt.managers.controller.model_runner import ForwardMode, InputMetadata
@@ -29,8 +30,6 @@ class RadixAttention(nn.Module):
self.scaling = scaling
self.layer_id = layer_id
from sglang.srt.managers.controller.model_runner import global_server_args_dict
if not global_server_args_dict.get("disable_flashinfer", False):
self.prefill_forward = self.prefill_forward_flashinfer
self.extend_forward = self.prefill_forward_flashinfer
@@ -141,9 +140,7 @@ class RadixAttention(nn.Module):
k = k.view(-1, self.tp_k_head_num, self.head_dim)
v = v.view(-1, self.tp_v_head_num, self.head_dim)
if input_metadata.forward_mode == ForwardMode.PREFILL:
return self.prefill_forward(q, k, v, input_metadata)
elif input_metadata.forward_mode == ForwardMode.EXTEND:
if input_metadata.forward_mode == ForwardMode.EXTEND:
return self.extend_forward(q, k, v, input_metadata)
elif input_metadata.forward_mode == ForwardMode.DECODE:
return self.decode_forward(q, k, v, input_metadata)