[Main2Main] Upgrade vllm commit to 0105 (#5595)
### What this PR does / why we need it?
Upgrade vllm commit to 0105 (8be6432bdaf6275664d857b1e5e9bf8ed1ce299e)
1. Remove `maybe_padded_num_tokens` arg in `model_runner_v1.py` since
https://github.com/vllm-project/vllm/pull/31517 deleted unused arg
2. Remove dense `Qwen/Qwen3-0.6B` in
`tests/e2e/multicard/test_aclgraph_capture_replay.py` and
`tests/e2e/multicard/test_data_parallel.py` due to
https://github.com/vllm-project/vllm/pull/30739
where offline data parallel mode will not be supported/useful for dense
models
3. Adapt `vllm_ascend/worker/worker.py` due to
https://github.com/vllm-project/vllm/pull/31584
4. Adapt `self.block_size` calling due to
https://github.com/vllm-project/vllm/pull/31540
5. Modify `test_mla_v1.py` due to
https://github.com/vllm-project/vllm/pull/28454 , which refactorred
`get_head_size()`
### Does this PR introduce _any_ user-facing change?
### How was this patch tested?
- vLLM version: v0.13.0
- vLLM main:
7157596103
Signed-off-by: wjunLu <wjunlu217@gmail.com>
This commit is contained in:
@@ -17,6 +17,7 @@ from vllm_ascend.attention.mla_v1 import (AscendMLABackend,
|
||||
AscendMLAPrefillMetadata,
|
||||
ChunkedContextMetadata)
|
||||
from vllm_ascend.attention.utils import AscendCommonAttentionMetadata
|
||||
from vllm_ascend.utils import vllm_version_is
|
||||
|
||||
|
||||
class TestAscendMLABackend(TestBase):
|
||||
@@ -392,7 +393,10 @@ class TestAscendMLAMetadataBuilderBuild(TestBase):
|
||||
self.mock_vllm_config.model_config = model_config
|
||||
self.kv_cache_spec = MagicMock()
|
||||
self.kv_cache_spec.num_layers = 32
|
||||
self.kv_cache_spec.head_size = 128
|
||||
if vllm_version_is('0.13.0'):
|
||||
self.kv_cache_spec.head_size = 128
|
||||
else:
|
||||
self.kv_cache_spec.head_size = 64
|
||||
self.kv_cache_spec.num_heads = 32
|
||||
|
||||
@patch("vllm_ascend.attention.mla_v1.get_cos_and_sin_mla")
|
||||
|
||||
@@ -18,13 +18,6 @@
|
||||
import sys
|
||||
from unittest.mock import MagicMock
|
||||
|
||||
from vllm_ascend.utils import adapt_patch # noqa E402
|
||||
from vllm_ascend.utils import register_ascend_customop
|
||||
|
||||
# triton and torch_npu is not available in the environment, so we need to mock them
|
||||
sys.modules['torch_npu'].npu.current_device = MagicMock(return_value=0)
|
||||
sys.modules['torch_npu._inductor'] = MagicMock()
|
||||
|
||||
triton_runtime = MagicMock()
|
||||
triton_runtime.driver.active.utils.get_device_properties.return_value = {
|
||||
'num_aic': 8,
|
||||
@@ -32,6 +25,13 @@ triton_runtime.driver.active.utils.get_device_properties.return_value = {
|
||||
}
|
||||
sys.modules['triton.runtime'] = triton_runtime
|
||||
|
||||
from vllm_ascend.utils import adapt_patch # noqa E402
|
||||
from vllm_ascend.utils import register_ascend_customop # noqa E402
|
||||
|
||||
# triton and torch_npu is not available in the environment, so we need to mock them
|
||||
sys.modules['torch_npu'].npu.current_device = MagicMock(return_value=0)
|
||||
sys.modules['torch_npu._inductor'] = MagicMock()
|
||||
|
||||
adapt_patch()
|
||||
adapt_patch(True)
|
||||
|
||||
|
||||
@@ -58,7 +58,6 @@ class TestEagleProposerInitialization(TestBase):
|
||||
device=self.device,
|
||||
runner=self.runner)
|
||||
|
||||
self.assertEqual(proposer.block_size, 16)
|
||||
self.assertEqual(proposer.hidden_size, 4096)
|
||||
self.assertTrue(proposer.use_cuda_graph)
|
||||
|
||||
|
||||
@@ -86,7 +86,6 @@ class TestMtpProposer:
|
||||
assert proposer.dtype == torch.float16
|
||||
assert proposer.num_speculative_tokens == 2
|
||||
assert proposer.hidden_size == 4096
|
||||
assert proposer.block_size == 16
|
||||
|
||||
# Test with mrope enabled
|
||||
assert hasattr(proposer, "positions")
|
||||
|
||||
Reference in New Issue
Block a user