### What this PR does / why we need it?
Upgrade vllm commit to 0105 (8be6432bdaf6275664d857b1e5e9bf8ed1ce299e)
1. Remove `maybe_padded_num_tokens` arg in `model_runner_v1.py` since
https://github.com/vllm-project/vllm/pull/31517 deleted unused arg
2. Remove dense `Qwen/Qwen3-0.6B` in
`tests/e2e/multicard/test_aclgraph_capture_replay.py` and
`tests/e2e/multicard/test_data_parallel.py` due to
https://github.com/vllm-project/vllm/pull/30739
where offline data parallel mode will not be supported/useful for dense
models
3. Adapt `vllm_ascend/worker/worker.py` due to
https://github.com/vllm-project/vllm/pull/31584
4. Adapt `self.block_size` calling due to
https://github.com/vllm-project/vllm/pull/31540
5. Modify `test_mla_v1.py` due to
https://github.com/vllm-project/vllm/pull/28454 , which refactorred
`get_head_size()`
### Does this PR introduce _any_ user-facing change?
### How was this patch tested?
- vLLM version: v0.13.0
- vLLM main:
7157596103
Signed-off-by: wjunLu <wjunlu217@gmail.com>
40 lines
1.4 KiB
Python
40 lines
1.4 KiB
Python
#
|
|
# Copyright (c) 2025 Huawei Technologies Co., Ltd. All Rights Reserved.
|
|
# Copyright 2023 The vLLM team.
|
|
#
|
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
# you may not use this file except in compliance with the License.
|
|
# You may obtain a copy of the License at
|
|
#
|
|
# http://www.apache.org/licenses/LICENSE-2.0
|
|
#
|
|
# Unless required by applicable law or agreed to in writing, software
|
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
# See the License for the specific language governing permissions and
|
|
# limitations under the License.
|
|
# This file is a part of the vllm-ascend project.
|
|
#
|
|
import sys
|
|
from unittest.mock import MagicMock
|
|
|
|
triton_runtime = MagicMock()
|
|
triton_runtime.driver.active.utils.get_device_properties.return_value = {
|
|
'num_aic': 8,
|
|
'num_vectorcore': 8,
|
|
}
|
|
sys.modules['triton.runtime'] = triton_runtime
|
|
|
|
from vllm_ascend.utils import adapt_patch # noqa E402
|
|
from vllm_ascend.utils import register_ascend_customop # noqa E402
|
|
|
|
# triton and torch_npu is not available in the environment, so we need to mock them
|
|
sys.modules['torch_npu'].npu.current_device = MagicMock(return_value=0)
|
|
sys.modules['torch_npu._inductor'] = MagicMock()
|
|
|
|
adapt_patch()
|
|
adapt_patch(True)
|
|
|
|
# register Ascend CustomOp here because uts will use this
|
|
register_ascend_customop()
|