[Main2Main] Upgrade vllm commit to 0113 (#5839)
### What this PR does / why we need it?
Upgrade vllm commit to 0113 (11b6af5280d6d6dfb8953af16e67b25f819b3be9)
- Modify import paths due to the refactors
https://github.com/vllm-project/vllm/pull/31916
https://github.com/vllm-project/vllm/pull/32054
- Fix `TypeError: NPUOffloadingSpec.__init__() takes 2 positional
arguments but 3 were given` due to
https://github.com/vllm-project/vllm/pull/24498
- Skip the async-scheduling tests in
`tests/e2e/multicard/4-cards/long_sequence/test_mtp.py`, which are never
verified
https://github.com/vllm-project/vllm/pull/31998
- Skip some pooling tests, which are caused by
https://github.com/vllm-project/vllm/pull/32148
where vllm is also failed
https://buildkite.com/vllm/ci/builds/46705/steps/canvas?jid=019bb329-3834-4685-862b-1613b8e0f5d4
We will reopen those tests when main2main reachs
https://github.com/vllm-project/vllm/pull/32243
- Skip some cases in
`tests/e2e/multicard/4-cards/long_sequence/test_mtp.py`, which are
broken by
https://github.com/vllm-project/vllm/pull/32118
### Does this PR introduce _any_ user-facing change?
### How was this patch tested?
- vLLM version: v0.13.0
- vLLM main:
2f4e6548ef
Signed-off-by: wjunLu <wjunlu217@gmail.com>
Signed-off-by: hfadzxy <starmoon_zhang@163.com>
Co-authored-by: hfadzxy <starmoon_zhang@163.com>
This commit is contained in:
2
.github/workflows/_e2e_test.yaml
vendored
2
.github/workflows/_e2e_test.yaml
vendored
@@ -132,7 +132,7 @@ jobs:
|
||||
# spec_decode
|
||||
pytest -sv --durations=0 tests/e2e/singlecard/spec_decode/test_mtp_eagle_correctness.py
|
||||
pytest -sv --durations=0 tests/e2e/singlecard/spec_decode/test_v1_spec_decode.py
|
||||
|
||||
|
||||
e2e-2-cards:
|
||||
name: multicard-2
|
||||
runs-on: linux-aarch64-a3-2
|
||||
|
||||
2
.github/workflows/bot_pr_create.yaml
vendored
2
.github/workflows/bot_pr_create.yaml
vendored
@@ -37,7 +37,7 @@ jobs:
|
||||
steps:
|
||||
- name: Get vLLM version
|
||||
run: |
|
||||
VLLM_COMMIT=bde38c11df0ea066a740efe9b77fff5418be45df
|
||||
VLLM_COMMIT=11b6af5280d6d6dfb8953af16e67b25f819b3be9
|
||||
echo "VLLM_COMMIT=https://github.com/vllm-project/vllm/commit/$VLLM_COMMIT" >> $GITHUB_ENV
|
||||
|
||||
- name: Checkout repository
|
||||
|
||||
2
.github/workflows/pr_test_full.yaml
vendored
2
.github/workflows/pr_test_full.yaml
vendored
@@ -75,7 +75,7 @@ jobs:
|
||||
name: e2e-full
|
||||
strategy:
|
||||
matrix:
|
||||
vllm_version: [bde38c11df0ea066a740efe9b77fff5418be45df, v0.13.0]
|
||||
vllm_version: [11b6af5280d6d6dfb8953af16e67b25f819b3be9, v0.13.0]
|
||||
needs: [changes]
|
||||
if: ${{ needs.changes.outputs.e2e_tracker == 'true' }}
|
||||
uses: ./.github/workflows/_e2e_test.yaml
|
||||
|
||||
6
.github/workflows/pr_test_light.yaml
vendored
6
.github/workflows/pr_test_light.yaml
vendored
@@ -41,7 +41,7 @@ jobs:
|
||||
lint:
|
||||
uses: ./.github/workflows/_pre_commit.yml
|
||||
with:
|
||||
vllm: bde38c11df0ea066a740efe9b77fff5418be45df
|
||||
vllm: 11b6af5280d6d6dfb8953af16e67b25f819b3be9
|
||||
changes:
|
||||
runs-on: linux-aarch64-a2-0
|
||||
outputs:
|
||||
@@ -81,7 +81,7 @@ jobs:
|
||||
if: ${{ needs.lint.result == 'success' && (needs.changes.outputs.e2e_tracker == 'true' || needs.changes.outputs.ut_tracker == 'true') }}
|
||||
strategy:
|
||||
matrix:
|
||||
vllm_version: [bde38c11df0ea066a740efe9b77fff5418be45df, v0.13.0]
|
||||
vllm_version: [11b6af5280d6d6dfb8953af16e67b25f819b3be9, v0.13.0]
|
||||
uses: ./.github/workflows/_unit_test.yaml
|
||||
with:
|
||||
vllm: ${{ matrix.vllm_version }}
|
||||
@@ -93,7 +93,7 @@ jobs:
|
||||
name: e2e-light
|
||||
strategy:
|
||||
matrix:
|
||||
vllm_version: [bde38c11df0ea066a740efe9b77fff5418be45df, v0.13.0]
|
||||
vllm_version: [11b6af5280d6d6dfb8953af16e67b25f819b3be9, v0.13.0]
|
||||
# Note (yikun): If CI resource are limited we can split job into two chain jobs
|
||||
needs: [lint, changes]
|
||||
# only trigger e2e test after lint passed and the change is e2e related with pull request.
|
||||
|
||||
@@ -33,7 +33,7 @@ jobs:
|
||||
name: refresh codecov
|
||||
strategy:
|
||||
matrix:
|
||||
vllm_version: [bde38c11df0ea066a740efe9b77fff5418be45df]
|
||||
vllm_version: [11b6af5280d6d6dfb8953af16e67b25f819b3be9]
|
||||
uses: ./.github/workflows/_unit_test.yaml
|
||||
with:
|
||||
vllm: ${{ matrix.vllm_version }}
|
||||
|
||||
@@ -53,7 +53,7 @@ For main branch of vLLM Ascend, we usually make it compatible with the latest vL
|
||||
|
||||
| vLLM Ascend | vLLM | Python | Stable CANN | PyTorch/torch_npu |
|
||||
|-------------|--------------|------------------|-------------|--------------------|
|
||||
| main | bde38c11df0ea066a740efe9b77fff5418be45df, v0.13.0 tag | >= 3.10, < 3.12 | 8.3.RC2 | 2.8.0 / 2.8.0 |
|
||||
| main | 11b6af5280d6d6dfb8953af16e67b25f819b3be9, v0.13.0 tag | >= 3.10, < 3.12 | 8.3.RC2 | 2.8.0 / 2.8.0 |
|
||||
|
||||
## Release cadence
|
||||
|
||||
|
||||
@@ -18,8 +18,10 @@
|
||||
#
|
||||
|
||||
import os
|
||||
import pytest
|
||||
|
||||
from tests.e2e.conftest import VllmRunner
|
||||
from vllm_ascend.utils import vllm_version_is
|
||||
|
||||
os.environ["HCCL_BUFFSIZE"] = "512"
|
||||
|
||||
@@ -44,10 +46,15 @@ def test_pcp_dcp_mtp1_eager():
|
||||
"method": "deepseek_mtp",
|
||||
},
|
||||
enforce_eager=True,
|
||||
async_scheduling=False,
|
||||
) as runner:
|
||||
runner.generate_greedy(prompts, 32)
|
||||
|
||||
|
||||
@pytest.mark.skipif(
|
||||
not vllm_version_is('0.13.0'),
|
||||
reason="vLLM PR-32118 break this",
|
||||
)
|
||||
def test_pcp_dcp_mtp3_eager():
|
||||
prompts = [
|
||||
"The capital of France is", "Hello, my name is Tom, I am",
|
||||
@@ -68,10 +75,15 @@ def test_pcp_dcp_mtp3_eager():
|
||||
"method": "deepseek_mtp",
|
||||
},
|
||||
enforce_eager=True,
|
||||
async_scheduling=False,
|
||||
) as runner:
|
||||
runner.generate_greedy(prompts, 32)
|
||||
|
||||
|
||||
@pytest.mark.skipif(
|
||||
not vllm_version_is('0.13.0'),
|
||||
reason="vLLM PR-32118 break this",
|
||||
)
|
||||
def test_pcp_dcp_mtp3_piecewise_graph():
|
||||
prompts = [
|
||||
"The capital of France is", "Hello, my name is Tom, I am",
|
||||
@@ -95,10 +107,15 @@ def test_pcp_dcp_mtp3_piecewise_graph():
|
||||
"cudagraph_mode": "PIECEWISE",
|
||||
"cudagraph_capture_sizes": [4, 8, 16],
|
||||
},
|
||||
async_scheduling=False,
|
||||
) as runner:
|
||||
runner.generate_greedy(prompts, 32)
|
||||
|
||||
|
||||
@pytest.mark.skipif(
|
||||
not vllm_version_is('0.13.0'),
|
||||
reason="vLLM PR-32118 break this",
|
||||
)
|
||||
def test_pcp_dcp_mtp3_full_graph():
|
||||
prompts = [
|
||||
"The capital of France is", "Hello, my name is Tom, I am",
|
||||
@@ -122,6 +139,7 @@ def test_pcp_dcp_mtp3_full_graph():
|
||||
"cudagraph_mode": "FULL_DECODE_ONLY",
|
||||
"cudagraph_capture_sizes": [4, 8, 16],
|
||||
},
|
||||
async_scheduling=False,
|
||||
) as runner:
|
||||
runner.generate_greedy(prompts, 32)
|
||||
|
||||
@@ -148,5 +166,6 @@ def test_dcp_mtp3_full_graph():
|
||||
"cudagraph_mode": "FULL_DECODE_ONLY",
|
||||
"cudagraph_capture_sizes": [4, 8, 16],
|
||||
},
|
||||
async_scheduling=False,
|
||||
) as runner:
|
||||
runner.generate_greedy(prompts, 32)
|
||||
|
||||
@@ -79,7 +79,7 @@ def test_qwen3_next_mtp_acceptance_tp4(model_name):
|
||||
for num_accepted_tokens in num_accepted_tokens_per_pos
|
||||
]
|
||||
|
||||
match = all(abs(a - b) < 0.06 for a, b in zip(acceptance_per_pos, golden))
|
||||
match = all((a >= b) or (b - a < 0.06) for a, b in zip(acceptance_per_pos, golden))
|
||||
if not match:
|
||||
print(f"acceptance_per_pos: {acceptance_per_pos}")
|
||||
print(f"golden: {golden}")
|
||||
|
||||
@@ -5,6 +5,8 @@ import torch
|
||||
import torch.nn.functional as F
|
||||
from modelscope import snapshot_download # type: ignore[import-untyped]
|
||||
|
||||
from vllm_ascend.utils import vllm_version_is
|
||||
|
||||
from tests.e2e.conftest import HfRunner, VllmRunner
|
||||
|
||||
CROSS_ENCODER_MODELS = [
|
||||
@@ -33,7 +35,10 @@ DTYPE = "half"
|
||||
def model_name(request):
|
||||
yield snapshot_download(request.param)
|
||||
|
||||
|
||||
@pytest.mark.skipif(
|
||||
not vllm_version_is('0.13.0'),
|
||||
reason="vLLM PR-32148 changed the behavior of cross scoring",
|
||||
)
|
||||
def test_cross_encoder_score_1_to_1(model_name):
|
||||
text_pair = [TEXTS_1[0], TEXTS_2[0]]
|
||||
|
||||
@@ -53,6 +58,10 @@ def test_cross_encoder_score_1_to_1(model_name):
|
||||
assert hf_outputs[0] == pytest.approx(vllm_outputs[0], rel=0.01)
|
||||
|
||||
|
||||
@pytest.mark.skipif(
|
||||
not vllm_version_is('0.13.0'),
|
||||
reason="vLLM PR-32148 changed the behavior of cross scoring",
|
||||
)
|
||||
def test_cross_encoder_score_1_to_N(model_name):
|
||||
text_pairs = [
|
||||
[TEXTS_1[0], TEXTS_2[0]],
|
||||
@@ -76,6 +85,10 @@ def test_cross_encoder_score_1_to_N(model_name):
|
||||
assert hf_outputs[1] == pytest.approx(vllm_outputs[1], rel=0.01)
|
||||
|
||||
|
||||
@pytest.mark.skipif(
|
||||
not vllm_version_is('0.13.0'),
|
||||
reason="vLLM PR-32148 changed the behavior of cross scoring",
|
||||
)
|
||||
def test_cross_encoder_score_N_to_N(model_name):
|
||||
text_pairs = [
|
||||
[TEXTS_1[0], TEXTS_2[0]],
|
||||
|
||||
@@ -136,14 +136,11 @@ class TestAscendAttentionBackendImpl(TestBase):
|
||||
self.layer.layer_name = "test_layer"
|
||||
self.layer._k_scale_float = 1.0
|
||||
self.layer._v_scale_float = 1.0
|
||||
|
||||
self.attention_type = MagicMock()
|
||||
self.attention_type.DECODER = "decoder"
|
||||
self.attention_type.ENCODER = "encoder"
|
||||
|
||||
self.attn_metadata = MagicMock()
|
||||
self.attn_metadata.return_value = "1"
|
||||
|
||||
self.layer_no_quant = MagicMock(
|
||||
spec=['layer_name', '_k_scale_float', '_v_scale_float'])
|
||||
self.layer_no_quant.layer_name = "test_layer"
|
||||
|
||||
@@ -380,6 +380,7 @@ class TestAscendDeepseekScalingRotaryEmbedding(TestBase):
|
||||
class TestAscendMRotaryEmbedding(unittest.TestCase):
|
||||
|
||||
def setUp(self):
|
||||
# Common setup for tests
|
||||
self.config_patcher = patch('vllm.config.vllm.get_current_vllm_config')
|
||||
self.mock_get_config = self.config_patcher.start()
|
||||
mock_config = MagicMock()
|
||||
|
||||
@@ -3,14 +3,21 @@ from unittest.mock import MagicMock, patch
|
||||
|
||||
import pytest
|
||||
import torch
|
||||
from vllm.attention.selector import AttentionSelectorConfig
|
||||
from vllm.config.compilation import CompilationMode, CUDAGraphMode
|
||||
from vllm.platforms import PlatformEnum
|
||||
|
||||
from tests.ut.base import TestBase
|
||||
from vllm_ascend.platform import NPUPlatform
|
||||
from vllm_ascend.utils import (ASCEND_QUANTIZATION_METHOD,
|
||||
COMPRESSED_TENSORS_METHOD, AscendDeviceType)
|
||||
COMPRESSED_TENSORS_METHOD, AscendDeviceType,
|
||||
vllm_version_is)
|
||||
|
||||
# isort: off
|
||||
if vllm_version_is('0.13.0'):
|
||||
from vllm.attention.selector import AttentionSelectorConfig # type: ignore
|
||||
else:
|
||||
from vllm.v1.attention.selector import AttentionSelectorConfig # type: ignore
|
||||
# isort: on
|
||||
|
||||
|
||||
class TestNPUPlatform(TestBase):
|
||||
@@ -37,6 +44,9 @@ class TestNPUPlatform(TestBase):
|
||||
|
||||
def setUp(self):
|
||||
self.platform = NPUPlatform()
|
||||
self.platform.supported_quantization[:] = [
|
||||
"ascend", "compressed-tensors"
|
||||
]
|
||||
|
||||
def test_class_variables(self):
|
||||
self.assertEqual(NPUPlatform._enum, PlatformEnum.OOT)
|
||||
|
||||
@@ -22,15 +22,9 @@ from typing import ClassVar, List, Optional, Tuple, Type
|
||||
import torch
|
||||
import torch_npu
|
||||
import vllm.envs as envs_vllm
|
||||
from vllm.attention.backends.abstract import (AttentionBackend, AttentionImpl,
|
||||
AttentionLayer, AttentionType)
|
||||
from vllm.attention.backends.registry import (AttentionBackendEnum,
|
||||
register_backend)
|
||||
from vllm.config import VllmConfig, get_current_vllm_config
|
||||
from vllm.forward_context import ForwardContext, get_forward_context
|
||||
from vllm.utils.math_utils import cdiv
|
||||
from vllm.v1.attention.backends.utils import (AttentionCGSupport,
|
||||
AttentionMetadataBuilder)
|
||||
from vllm.v1.core.sched.output import SchedulerOutput
|
||||
from vllm.v1.kv_cache_interface import AttentionSpec, CrossAttentionSpec
|
||||
|
||||
@@ -45,7 +39,23 @@ from vllm_ascend.compilation.acl_graph import (
|
||||
update_draft_graph_params_workspaces, update_graph_params_workspaces)
|
||||
from vllm_ascend.device.device_op import DeviceOperator
|
||||
from vllm_ascend.ops.flashcomm2_oshard_manager import flashcomm2_oshard_manager
|
||||
from vllm_ascend.utils import weak_ref_tensors
|
||||
from vllm_ascend.utils import vllm_version_is, weak_ref_tensors
|
||||
|
||||
# isort: off
|
||||
if vllm_version_is('0.13.0'):
|
||||
from vllm.v1.attention.backends.utils import (AttentionCGSupport,
|
||||
AttentionMetadataBuilder)
|
||||
from vllm.attention.backends.abstract import ( # type: ignore
|
||||
AttentionBackend, AttentionImpl, AttentionLayer, AttentionType)
|
||||
from vllm.attention.backends.registry import ( # type: ignore
|
||||
AttentionBackendEnum, register_backend)
|
||||
else:
|
||||
from vllm.v1.attention.backend import ( # type: ignore
|
||||
AttentionBackend, AttentionCGSupport, AttentionImpl, AttentionLayer,
|
||||
AttentionType, AttentionMetadataBuilder)
|
||||
from vllm.v1.attention.backends.registry import ( # type: ignore
|
||||
AttentionBackendEnum, register_backend)
|
||||
# isort: on
|
||||
|
||||
# default max value of sliding window size
|
||||
SWA_INT_MAX = 2147483647
|
||||
|
||||
@@ -27,7 +27,6 @@ from vllm.distributed import (get_dcp_group,
|
||||
get_decode_context_model_parallel_world_size,
|
||||
get_pcp_group)
|
||||
from vllm.forward_context import ForwardContext, get_forward_context
|
||||
from vllm.v1.attention.backends.utils import AttentionCGSupport
|
||||
from vllm.v1.kv_cache_interface import AttentionSpec
|
||||
|
||||
from vllm_ascend.attention.attention_v1 import (AscendAttentionBackendImpl,
|
||||
@@ -41,7 +40,13 @@ from vllm_ascend.attention.utils import (AscendCommonAttentionMetadata,
|
||||
split_decodes_and_prefills)
|
||||
from vllm_ascend.compilation.acl_graph import (get_graph_params,
|
||||
update_graph_params_workspaces)
|
||||
from vllm_ascend.utils import cp_chunkedprefill_comm_stream, weak_ref_tensors
|
||||
from vllm_ascend.utils import (cp_chunkedprefill_comm_stream, vllm_version_is,
|
||||
weak_ref_tensors)
|
||||
|
||||
if vllm_version_is('0.13.0'):
|
||||
from vllm.v1.attention.backends.utils import AttentionCGSupport
|
||||
else:
|
||||
from vllm.v1.attention.backend import AttentionCGSupport
|
||||
|
||||
|
||||
class AscendAttentionCPMetadataBuilder(AscendAttentionMetadataBuilder):
|
||||
|
||||
@@ -10,7 +10,6 @@ from vllm.distributed import (get_dcp_group,
|
||||
get_pcp_group)
|
||||
from vllm.forward_context import ForwardContext, get_forward_context
|
||||
from vllm.utils.math_utils import cdiv
|
||||
from vllm.v1.attention.backends.utils import AttentionCGSupport
|
||||
from vllm.v1.kv_cache_interface import AttentionSpec, MLAAttentionSpec
|
||||
|
||||
# isort: off
|
||||
@@ -28,7 +27,12 @@ from vllm_ascend.attention.context_parallel.common_cp import (
|
||||
from vllm_ascend.compilation.acl_graph import (get_draft_graph_params,
|
||||
get_graph_params,
|
||||
update_graph_params_workspaces)
|
||||
from vllm_ascend.utils import weak_ref_tensors
|
||||
from vllm_ascend.utils import weak_ref_tensors, vllm_version_is
|
||||
|
||||
if vllm_version_is('0.13.0'):
|
||||
from vllm.v1.attention.backends.utils import AttentionCGSupport
|
||||
else:
|
||||
from vllm.v1.attention.backend import AttentionCGSupport
|
||||
|
||||
MAX_O_PROJ_PREFETCH_SIZE = 16 * 1024 * 1024
|
||||
|
||||
|
||||
@@ -5,14 +5,12 @@ import numpy as np
|
||||
import torch
|
||||
import torch_npu
|
||||
import vllm.envs as envs_vllm
|
||||
from vllm.attention.backends.abstract import AttentionBackend, MLAAttentionImpl
|
||||
from vllm.config import VllmConfig, get_current_vllm_config
|
||||
from vllm.forward_context import ForwardContext, get_forward_context
|
||||
from vllm.logger import logger
|
||||
from vllm.model_executor.layers.linear import UnquantizedLinearMethod
|
||||
from vllm.utils.math_utils import cdiv, round_down
|
||||
from vllm.v1.attention.backends.mla.common import MLACommonMetadataBuilder
|
||||
from vllm.v1.attention.backends.utils import AttentionCGSupport
|
||||
from vllm.v1.kv_cache_interface import AttentionSpec, MLAAttentionSpec
|
||||
|
||||
from vllm_ascend import envs
|
||||
@@ -44,10 +42,17 @@ from vllm_ascend.worker.npu_input_batch import NPUInputBatch
|
||||
if TYPE_CHECKING:
|
||||
from vllm.v1.core.sched.output import SchedulerOutput
|
||||
|
||||
# isort: off
|
||||
if vllm_version_is('0.13.0'):
|
||||
from vllm.v1.attention.backends.utils import AttentionCGSupport
|
||||
from vllm.attention.backends.abstract import ( # type: ignore
|
||||
AttentionBackend, MLAAttentionImpl)
|
||||
from vllm.attention.backends.utils import PAD_SLOT_ID # type: ignore
|
||||
else:
|
||||
from vllm.v1.attention.backend import ( # type: ignore
|
||||
AttentionBackend, AttentionCGSupport, MLAAttentionImpl)
|
||||
from vllm.v1.attention.backends.utils import PAD_SLOT_ID # type: ignore
|
||||
# isort: on
|
||||
|
||||
MAX_O_PROJ_PREFETCH_SIZE = 16 * 1024 * 1024
|
||||
BUILD_METADATA_STEP_PREFILL = 0
|
||||
|
||||
@@ -5,7 +5,6 @@ import torch
|
||||
import torch_npu
|
||||
import vllm.envs as envs_vllm
|
||||
from torch import nn
|
||||
from vllm.attention.backends.abstract import AttentionBackend, MLAAttentionImpl
|
||||
from vllm.config import CUDAGraphMode, VllmConfig, get_current_vllm_config
|
||||
from vllm.distributed import get_tensor_model_parallel_world_size, get_tp_group
|
||||
from vllm.forward_context import get_forward_context
|
||||
@@ -13,7 +12,6 @@ from vllm.logger import logger
|
||||
from vllm.model_executor.layers.linear import UnquantizedLinearMethod
|
||||
from vllm.triton_utils import HAS_TRITON
|
||||
from vllm.v1.attention.backends.mla.common import MLACommonMetadataBuilder
|
||||
from vllm.v1.attention.backends.utils import AttentionCGSupport
|
||||
from vllm.v1.kv_cache_interface import AttentionSpec
|
||||
|
||||
from vllm_ascend import envs
|
||||
@@ -34,11 +32,20 @@ from vllm_ascend.ops.triton.rope import rope_forward_triton
|
||||
from vllm_ascend.ops.weight_prefetch import maybe_npu_prefetch
|
||||
from vllm_ascend.quantization.w8a8 import AscendW8A8LinearMethod
|
||||
from vllm_ascend.utils import (ACL_FORMAT_FRACTAL_ND, _round_up, dispose_layer,
|
||||
enable_dsa_cp, maybe_trans_nz)
|
||||
enable_dsa_cp, maybe_trans_nz, vllm_version_is)
|
||||
from vllm_ascend.worker.npu_input_batch import NPUInputBatch
|
||||
|
||||
# isort: off
|
||||
if TYPE_CHECKING:
|
||||
from vllm.v1.core.sched.output import SchedulerOutput
|
||||
if vllm_version_is('0.13.0'):
|
||||
from vllm.v1.attention.backends.utils import AttentionCGSupport
|
||||
from vllm.attention.backends.abstract import ( # type: ignore
|
||||
AttentionBackend, MLAAttentionImpl)
|
||||
else:
|
||||
from vllm.v1.attention.backend import ( # type: ignore
|
||||
AttentionBackend, AttentionCGSupport, MLAAttentionImpl)
|
||||
# isort: on
|
||||
|
||||
|
||||
class AscendSFABackend(AttentionBackend):
|
||||
|
||||
@@ -43,13 +43,18 @@ from vllm.v1.request import RequestStatus
|
||||
from vllm_ascend.ascend_config import get_ascend_config, init_ascend_config
|
||||
from vllm_ascend.distributed.kv_transfer.utils.mooncake_transfer_engine import global_te
|
||||
from vllm_ascend.distributed.kv_transfer.utils.utils import get_transfer_timeout_value
|
||||
from vllm_ascend.utils import is_vl_model
|
||||
from vllm_ascend.utils import is_vl_model, vllm_version_is
|
||||
|
||||
# isort: off
|
||||
if TYPE_CHECKING:
|
||||
from vllm.attention.backends.abstract import AttentionMetadata
|
||||
if vllm_version_is('0.13.0'):
|
||||
from vllm.attention.backends.abstract import AttentionMetadata # type: ignore
|
||||
else:
|
||||
from vllm.attention.backends import AttentionMetadata # type: ignore
|
||||
from vllm.forward_context import ForwardContext
|
||||
from vllm.v1.core.kv_cache_manager import KVCacheBlocks
|
||||
from vllm.v1.request import Request
|
||||
# isort: on
|
||||
|
||||
GET_META_MSG = b"get_meta_msg"
|
||||
DONE_RECVING_MSG = b"done_recving_msg"
|
||||
|
||||
@@ -38,13 +38,18 @@ from vllm_ascend.distributed.kv_transfer.utils.mooncake_transfer_engine import \
|
||||
global_te
|
||||
from vllm_ascend.distributed.kv_transfer.utils.utils import (
|
||||
align_memory, get_transfer_timeout_value, kv_alltoall_and_rearrange)
|
||||
from vllm_ascend.utils import npu_stream_switch
|
||||
from vllm_ascend.utils import npu_stream_switch, vllm_version_is
|
||||
|
||||
# isort: off
|
||||
if TYPE_CHECKING:
|
||||
from vllm.attention.backends.abstract import AttentionMetadata
|
||||
if vllm_version_is('0.13.0'):
|
||||
from vllm.attention.backends.abstract import AttentionMetadata # type: ignore
|
||||
else:
|
||||
from vllm.attention.backends import AttentionMetadata # type: ignore
|
||||
from vllm.forward_context import ForwardContext
|
||||
from vllm.v1.core.kv_cache_manager import KVCacheBlocks
|
||||
from vllm.v1.request import Request
|
||||
# isort: on
|
||||
|
||||
DONE_SENDING_MSG = b"done_sending_msg"
|
||||
|
||||
|
||||
@@ -3,7 +3,6 @@ from typing import Any, Optional
|
||||
|
||||
import torch
|
||||
import zmq
|
||||
from vllm.attention.backends.abstract import AttentionMetadata
|
||||
from vllm.config import VllmConfig
|
||||
from vllm.distributed.kv_transfer.kv_connector.v1.base import (
|
||||
KVConnectorBase_V1, KVConnectorMetadata, KVConnectorRole)
|
||||
@@ -20,6 +19,14 @@ from vllm_ascend.distributed.kv_transfer.kv_pool.ascend_store.pool_scheduler imp
|
||||
KVPoolScheduler, get_zmq_rpc_path_lookup)
|
||||
from vllm_ascend.distributed.kv_transfer.kv_pool.ascend_store.pool_worker import \
|
||||
KVPoolWorker
|
||||
from vllm_ascend.utils import vllm_version_is
|
||||
|
||||
# isort: off
|
||||
if vllm_version_is('0.13.0'):
|
||||
from vllm.attention.backends.abstract import AttentionMetadata # type: ignore
|
||||
else:
|
||||
from vllm.v1.attention.backend import AttentionMetadata # type: ignore
|
||||
# isort: on
|
||||
|
||||
|
||||
class AscendStoreConnector(KVConnectorBase_V1):
|
||||
|
||||
@@ -9,7 +9,6 @@ from dataclasses import dataclass
|
||||
from typing import TYPE_CHECKING, Any, Optional, Sequence
|
||||
|
||||
import torch
|
||||
from vllm.attention.backends.abstract import AttentionType
|
||||
from vllm.attention.layer import Attention, MLAAttention
|
||||
from vllm.config import VllmConfig, get_layers_from_vllm_config
|
||||
from vllm.distributed.ec_transfer import get_ec_transfer, has_ec_transfer
|
||||
@@ -26,13 +25,25 @@ from vllm.v1.kv_cache_interface import (FullAttentionSpec, KVCacheSpec,
|
||||
|
||||
from vllm_ascend.distributed.kv_transfer.kv_pool.ascend_store.metadata import (
|
||||
MetadataServer, MetadataServerProc, MLAConfig)
|
||||
from vllm_ascend.utils import vllm_version_is
|
||||
|
||||
# isort: off
|
||||
if vllm_version_is('0.13.0'):
|
||||
from vllm.attention.backends.abstract import AttentionType # type: ignore
|
||||
else:
|
||||
from vllm.v1.attention.backend import AttentionType # type: ignore
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from vllm.attention.backends.abstract import AttentionMetadata
|
||||
if vllm_version_is('0.13.0'):
|
||||
from vllm.attention.backends.abstract import \
|
||||
AttentionMetadata # type: ignore
|
||||
else:
|
||||
from vllm.v1.attention.backend import AttentionType #type: ignore
|
||||
from vllm.forward_context import ForwardContext
|
||||
from vllm.v1.core.kv_cache_manager import KVCacheBlocks
|
||||
from vllm.v1.kv_cache_interface import KVCacheConfig
|
||||
from vllm.v1.request import Request
|
||||
# isort: on
|
||||
|
||||
|
||||
@dataclass
|
||||
|
||||
@@ -9,16 +9,23 @@ from vllm.distributed.kv_transfer.kv_connector.v1.base import (
|
||||
from vllm.logger import init_logger
|
||||
from vllm.v1.core.sched.output import SchedulerOutput
|
||||
|
||||
from vllm_ascend.utils import vllm_version_is
|
||||
|
||||
logger = init_logger(__name__)
|
||||
|
||||
# isort: off
|
||||
if TYPE_CHECKING:
|
||||
from vllm.attention.backends.abstract import AttentionMetadata
|
||||
if vllm_version_is('0.13.0'):
|
||||
from vllm.attention.backends.abstract import AttentionMetadata # type: ignore
|
||||
else:
|
||||
from vllm.v1.attention.backend import AttentionMetadata # type: ignore
|
||||
from vllm.distributed.kv_transfer.kv_connector.v1.metrics import (
|
||||
KVConnectorPromMetrics, KVConnectorStats, PromMetric, PromMetricT)
|
||||
from vllm.forward_context import ForwardContext
|
||||
from vllm.v1.core.kv_cache_manager import KVCacheBlocks
|
||||
from vllm.v1.kv_cache_interface import KVCacheConfig
|
||||
from vllm.v1.request import Request
|
||||
# isort: on
|
||||
|
||||
|
||||
class UCMConnectorV1(KVConnectorBase_V1):
|
||||
|
||||
@@ -1,12 +1,20 @@
|
||||
import numpy as np
|
||||
import torch
|
||||
from vllm.attention.backends.abstract import AttentionBackend
|
||||
from vllm.logger import init_logger
|
||||
from vllm.utils.platform_utils import is_pin_memory_available
|
||||
from vllm.v1.kv_offload.mediums import CPULoadStoreSpec, GPULoadStoreSpec
|
||||
from vllm.v1.kv_offload.worker.worker import (OffloadingHandler,
|
||||
TransferResult, TransferSpec)
|
||||
|
||||
from vllm_ascend.utils import vllm_version_is
|
||||
|
||||
# isort: off
|
||||
if vllm_version_is('0.13.0'):
|
||||
from vllm.attention.backends.abstract import AttentionBackend # type: ignore
|
||||
else:
|
||||
from vllm.v1.attention.backend import AttentionBackend # type: ignore
|
||||
# isort: on
|
||||
|
||||
logger = init_logger(__name__)
|
||||
|
||||
|
||||
@@ -166,3 +174,13 @@ class CpuNpuOffloadingHandler(OffloadingHandler):
|
||||
for job_id, _ in results:
|
||||
del self.transfer_events[job_id]
|
||||
return results
|
||||
|
||||
def wait(self, job_ids: set[int]) -> None:
|
||||
"""
|
||||
Wait (block) until all specified transfer jobs are completed.
|
||||
"""
|
||||
for job_id in job_ids:
|
||||
event = self.transfer_events.get(job_id)
|
||||
if event is not None:
|
||||
# This will block until the NPU event is complete
|
||||
event.synchronize()
|
||||
@@ -2,7 +2,6 @@ from collections.abc import Iterator
|
||||
from typing import Optional
|
||||
|
||||
import torch
|
||||
from vllm.attention.backends.abstract import AttentionBackend
|
||||
from vllm.config import VllmConfig
|
||||
from vllm.v1.kv_offload.abstract import LoadStoreSpec, OffloadingManager
|
||||
from vllm.v1.kv_offload.backends.cpu import CPUBackend
|
||||
@@ -10,14 +9,28 @@ from vllm.v1.kv_offload.lru_manager import LRUOffloadingManager
|
||||
from vllm.v1.kv_offload.mediums import CPULoadStoreSpec, GPULoadStoreSpec
|
||||
from vllm.v1.kv_offload.spec import OffloadingSpec
|
||||
from vllm.v1.kv_offload.worker.worker import OffloadingHandler
|
||||
from vllm.v1.kv_cache_interface import KVCacheConfig
|
||||
|
||||
from vllm_ascend.kv_offload.cpu_npu import CpuNpuOffloadingHandler
|
||||
from vllm_ascend.utils import vllm_version_is
|
||||
|
||||
# isort: off
|
||||
if vllm_version_is('0.13.0'):
|
||||
from vllm.attention.backends.abstract import AttentionBackend # type: ignore
|
||||
else:
|
||||
from vllm.v1.attention.backend import AttentionBackend # type: ignore
|
||||
# isort: on
|
||||
|
||||
|
||||
class NPUOffloadingSpec(OffloadingSpec):
|
||||
|
||||
def __init__(self, vllm_config: VllmConfig):
|
||||
super().__init__(vllm_config)
|
||||
def __init__(self,
|
||||
vllm_config: VllmConfig,
|
||||
kv_cache_config: Optional[KVCacheConfig] = None):
|
||||
if vllm_version_is('0.13.0'):
|
||||
super().__init__(vllm_config)
|
||||
else:
|
||||
super().__init__(vllm_config, kv_cache_config)
|
||||
|
||||
num_cpu_blocks = self.extra_config.get("num_cpu_blocks")
|
||||
if not num_cpu_blocks:
|
||||
|
||||
@@ -23,7 +23,6 @@ from typing import Optional
|
||||
|
||||
import torch
|
||||
from torch import nn
|
||||
from vllm.attention.backends.abstract import AttentionMetadata
|
||||
from vllm.attention.layer import MLAAttention
|
||||
from vllm.config import CacheConfig, get_current_vllm_config
|
||||
from vllm.distributed import get_tensor_model_parallel_world_size
|
||||
@@ -34,6 +33,14 @@ from vllm.model_executor.layers.quantization import QuantizationConfig
|
||||
from vllm.utils.torch_utils import direct_register_custom_op
|
||||
|
||||
from vllm_ascend.ascend_config import get_ascend_config
|
||||
from vllm_ascend.utils import vllm_version_is
|
||||
|
||||
# isort: off
|
||||
if vllm_version_is('0.13.0'):
|
||||
from vllm.attention.backends.abstract import AttentionMetadata # type: ignore
|
||||
else:
|
||||
from vllm.v1.attention.backend import AttentionMetadata # type: ignore
|
||||
# isort: on
|
||||
|
||||
|
||||
class IndexerWrapper(nn.Module):
|
||||
|
||||
@@ -19,10 +19,17 @@ import einops
|
||||
import torch
|
||||
import torch.nn.functional as F
|
||||
import torch_npu
|
||||
from vllm.attention.layers.mm_encoder_attention import MMEncoderAttention
|
||||
from vllm.config import MultiModalConfig
|
||||
|
||||
import vllm_ascend.envs as envs_ascend
|
||||
from vllm_ascend.utils import vllm_version_is
|
||||
|
||||
# isort: off
|
||||
if vllm_version_is('0.13.0'):
|
||||
from vllm.attention.layers.mm_encoder_attention import MMEncoderAttention # type: ignore
|
||||
else:
|
||||
from vllm.model_executor.layers.attention.mm_encoder_attention import MMEncoderAttention # type: ignore
|
||||
# isort: on
|
||||
|
||||
MIN_PAD_SIZE = 64 # min_size to pad weight
|
||||
MAX_PAD_SIZE = 128 # max_size to pad weight
|
||||
|
||||
@@ -16,10 +16,12 @@ import triton.language as tl
|
||||
|
||||
from vllm_ascend.utils import vllm_version_is
|
||||
|
||||
# isort: off
|
||||
if vllm_version_is('0.13.0'):
|
||||
from vllm.attention.backends.utils import PAD_SLOT_ID # type: ignore
|
||||
else:
|
||||
from vllm.v1.attention.backends.utils import PAD_SLOT_ID # type: ignore
|
||||
# isort: on
|
||||
|
||||
|
||||
def causal_conv1d_ref(
|
||||
|
||||
@@ -18,7 +18,6 @@
|
||||
import torch
|
||||
from einops import rearrange
|
||||
from torch import nn
|
||||
from vllm.attention.backends.abstract import AttentionMetadata
|
||||
from vllm.config import CUDAGraphMode
|
||||
from vllm.forward_context import get_forward_context
|
||||
from vllm.model_executor.layers.fla.ops import (
|
||||
@@ -36,6 +35,14 @@ from vllm_ascend.ops.triton.fla.fused_qkvzba_split_reshape import \
|
||||
from vllm_ascend.ops.triton.fla.sigmoid_gating import \
|
||||
fused_sigmoid_gating_delta_rule_update
|
||||
from vllm_ascend.ops.triton.fused_gdn_gating import fused_gdn_gating_patch
|
||||
from vllm_ascend.utils import vllm_version_is
|
||||
|
||||
# isort: off
|
||||
if vllm_version_is('0.13.0'):
|
||||
from vllm.attention.backends.abstract import AttentionMetadata # type: ignore
|
||||
else:
|
||||
from vllm.v1.attention.backend import AttentionMetadata # type: ignore
|
||||
# isort: on
|
||||
|
||||
|
||||
class AscendQwen3Next_GatedDeltaNet(nn.Module, MambaBase):
|
||||
|
||||
@@ -30,9 +30,7 @@ import numpy as np
|
||||
import torch
|
||||
import torch.distributed as dist
|
||||
import torch.nn as nn
|
||||
from vllm.attention.backends.abstract import AttentionBackend, AttentionType
|
||||
from vllm.attention.layer import Attention, MLAAttention
|
||||
from vllm.attention.selector import get_attn_backend
|
||||
from vllm.config import (CompilationMode, CUDAGraphMode, VllmConfig,
|
||||
get_layers_from_vllm_config)
|
||||
from vllm.distributed import (get_tensor_model_parallel_world_size,
|
||||
@@ -119,6 +117,15 @@ if TYPE_CHECKING:
|
||||
else:
|
||||
xgr = LazyLoader("xgr", globals(), "xgrammar")
|
||||
|
||||
# isort: off
|
||||
if vllm_version_is('0.13.0'):
|
||||
from vllm.attention.backends.abstract import ( # type: ignore
|
||||
AttentionBackend, AttentionType)
|
||||
from vllm.attention.selector import get_attn_backend # type: ignore
|
||||
else:
|
||||
from vllm.v1.attention.selector import get_attn_backend # type: ignore
|
||||
from vllm.v1.attention.backend import AttentionBackend, AttentionType # type: ignore
|
||||
# isort: on
|
||||
import torch_npu
|
||||
|
||||
# if true, allow tensor initialization and casting with internal format (e.g., NZ)
|
||||
@@ -1817,12 +1824,20 @@ class NPUModelRunner(GPUModelRunner):
|
||||
valid_sampled_token_ids[int(i)].clear()
|
||||
else:
|
||||
# Includes spec decode tokens.
|
||||
valid_sampled_token_ids, cu_num_tokens = RejectionSampler.parse_output(
|
||||
sampled_token_ids,
|
||||
self.input_batch.vocab_size,
|
||||
discard_sampled_tokens_req_indices,
|
||||
return_cu_num_tokens=logprobs_tensors is not None,
|
||||
)
|
||||
if vllm_version_is('0.13.0'):
|
||||
valid_sampled_token_ids, cu_num_tokens = RejectionSampler.parse_output(
|
||||
sampled_token_ids,
|
||||
self.input_batch.vocab_size,
|
||||
discard_sampled_tokens_req_indices,
|
||||
return_cu_num_tokens=logprobs_tensors is not None,
|
||||
)
|
||||
else:
|
||||
valid_sampled_token_ids, cu_num_tokens = RejectionSampler.parse_output(
|
||||
sampled_token_ids,
|
||||
self.input_batch.vocab_size,
|
||||
discard_sampled_tokens_req_indices,
|
||||
logprobs_tensors=logprobs_tensors,
|
||||
)
|
||||
else:
|
||||
valid_sampled_token_ids = []
|
||||
invalid_req_indices = discard_sampled_tokens_req_indices.tolist()
|
||||
|
||||
Reference in New Issue
Block a user