add pagedattention to support FULL_DECODE_ONLY. (#3102)
### What this PR does / why we need it? Calculate in advance the workspace memory size needed for the PagedAttention operator to avoid deadlocks during resource cleanup. This PR requires torch_npu version 0920 or newer. ### How was this patch tested? - vLLM version: v0.11.0 --------- Signed-off-by: wangxiaoxin-sherie <wangxiaoxin7@huawei.com> Co-authored-by: wangxiaoxin-sherie <wangxiaoxin7@huawei.com>
This commit is contained in:
@@ -405,6 +405,109 @@ class TestAscendAttentionBackendImpl(TestBase):
|
||||
mock_paged_attention.assert_called_once()
|
||||
assert output.shape == (10, 8 * 64)
|
||||
|
||||
@patch('vllm_ascend.attention.attention_v1.get_forward_context')
|
||||
@patch('vllm_ascend.attention.attention_v1.get_graph_params')
|
||||
@patch('torch_npu._npu_reshape_and_cache')
|
||||
@patch('torch_npu._npu_paged_attention')
|
||||
@patch('torch.npu.graph_task_group_end')
|
||||
@patch('torch.npu.graph_task_group_begin')
|
||||
@patch('torch.npu.ExternalEvent')
|
||||
@patch('torch_npu.npu.current_stream')
|
||||
def test_paged_attention_with_existing_workspace(
|
||||
self,
|
||||
mock_get_forward_context,
|
||||
mock_get_graph_params,
|
||||
mock_npu_reshape_and_cache,
|
||||
mock_paged_attention,
|
||||
mock_graph_begin,
|
||||
mock_graph_end,
|
||||
mock_external_event_class,
|
||||
mock_current_stream,
|
||||
):
|
||||
graph_params = MagicMock()
|
||||
attn_metadata = MagicMock()
|
||||
num_tokens = 10
|
||||
|
||||
graph_params.workspaces = {num_tokens: 10}
|
||||
graph_params.events = {num_tokens: []}
|
||||
graph_params.attn_params = {num_tokens: []}
|
||||
graph_params.handles = {num_tokens: []}
|
||||
|
||||
query = torch.randn(2, 5, 8) # [batch_size, seq_len, hidden_size]
|
||||
key_cache = MagicMock()
|
||||
value_cache = MagicMock()
|
||||
num_kv_heads = 4
|
||||
num_heads = 8
|
||||
scale = 0.1
|
||||
output = torch.randn(2, 5, 8)
|
||||
|
||||
self_obj = MagicMock()
|
||||
self_obj.key_cache = key_cache
|
||||
self_obj.value_cache = value_cache
|
||||
self_obj.num_kv_heads = num_kv_heads
|
||||
self_obj.num_heads = num_heads
|
||||
self_obj.scale = scale
|
||||
|
||||
mock_stream = MagicMock()
|
||||
mock_current_stream.return_value = mock_stream
|
||||
mock_event_instance = MagicMock()
|
||||
mock_external_event_class.return_value = mock_event_instance
|
||||
|
||||
mock_handle = MagicMock()
|
||||
mock_graph_end.return_value = mock_handle
|
||||
|
||||
workspace = graph_params.workspaces.get(num_tokens)
|
||||
self.assertEqual(workspace, 10)
|
||||
|
||||
# 2. Handle graph capturing mode
|
||||
stream = mock_current_stream()
|
||||
event = mock_external_event_class()
|
||||
event.wait(stream)
|
||||
event.reset(stream)
|
||||
graph_params.events[num_tokens].append(event)
|
||||
graph_params.attn_params[num_tokens].append((
|
||||
query,
|
||||
self_obj.key_cache,
|
||||
self_obj.value_cache,
|
||||
self_obj.num_kv_heads,
|
||||
self_obj.num_heads,
|
||||
self_obj.scale,
|
||||
attn_metadata.block_tables,
|
||||
attn_metadata.seq_lens,
|
||||
output,
|
||||
))
|
||||
|
||||
mock_event_instance.wait.assert_called_once_with(mock_stream)
|
||||
mock_event_instance.reset.assert_called_once_with(mock_stream)
|
||||
self.assertEqual(len(graph_params.events[num_tokens]), 1)
|
||||
self.assertEqual(len(graph_params.attn_params[num_tokens]), 1)
|
||||
|
||||
query = torch.randn(10, 8 * 64)
|
||||
key = torch.randn(10, 8 * 64)
|
||||
value = torch.randn(10, 8 * 64)
|
||||
kv_cache = torch.empty(2, 5, 128, 8, 64)
|
||||
metadata = self.attn_metadata
|
||||
metadata.attn_state = AscendAttentionState.DecodeOnly
|
||||
metadata.seq_lens = torch.tensor([10])
|
||||
metadata.block_tables = torch.zeros(1, 5, dtype=torch.long)
|
||||
metadata.num_actual_tokens = 10
|
||||
metadata.slot_mapping = torch.zeros(10, dtype=torch.long)
|
||||
layer = self.layer_no_quant
|
||||
|
||||
mock_get_forward_context.return_value = MagicMock(capturing=True)
|
||||
mock_get_graph_params.return_value = graph_params
|
||||
|
||||
output = self.impl.forward(layer,
|
||||
query,
|
||||
key,
|
||||
value,
|
||||
kv_cache,
|
||||
metadata,
|
||||
trace_flag=False)
|
||||
|
||||
mock_paged_attention.assert_called_once()
|
||||
self.assertEqual(len(graph_params.handles[num_tokens]), 0)
|
||||
|
||||
@patch('torch_npu._npu_reshape_and_cache')
|
||||
@patch('torch_npu.npu_fused_infer_attention_score')
|
||||
def test_forward_decode_only_swa(self, mock_fused_infer_attention_score,
|
||||
|
||||
Reference in New Issue
Block a user