Drop vLLM 0.13.0 support (#6069)
### What this PR does / why we need it?
Drop vLLM 0.13.0 support, upgrade to 0.14.0
- vLLM version: v0.13.0
- vLLM main:
d68209402d
---------
Signed-off-by: hfadzxy <starmoon_zhang@163.com>
This commit is contained in:
@@ -17,7 +17,6 @@ from vllm_ascend.attention.mla_v1 import (AscendMLABackend,
|
||||
AscendMLAPrefillMetadata,
|
||||
ChunkedContextMetadata)
|
||||
from vllm_ascend.attention.utils import AscendCommonAttentionMetadata
|
||||
from vllm_ascend.utils import vllm_version_is
|
||||
|
||||
|
||||
class TestAscendMLABackend(TestBase):
|
||||
@@ -477,10 +476,7 @@ class TestAscendMLAMetadataBuilderBuild(TestBase):
|
||||
self.mock_vllm_config.model_config = model_config
|
||||
self.kv_cache_spec = MagicMock()
|
||||
self.kv_cache_spec.num_layers = 32
|
||||
if vllm_version_is('0.13.0'):
|
||||
self.kv_cache_spec.head_size = 128
|
||||
else:
|
||||
self.kv_cache_spec.head_size = 64
|
||||
self.kv_cache_spec.head_size = 64
|
||||
self.kv_cache_spec.num_heads = 32
|
||||
|
||||
def tearDown(self):
|
||||
|
||||
Reference in New Issue
Block a user