diff --git a/.github/workflows/bot_pr_create.yaml b/.github/workflows/bot_pr_create.yaml index 048679c2..1a310aae 100644 --- a/.github/workflows/bot_pr_create.yaml +++ b/.github/workflows/bot_pr_create.yaml @@ -34,7 +34,7 @@ jobs: steps: - name: Get vLLM version run: | - VLLM_COMMIT=ad32e3e19ccf0526cb6744a5fed09a138a5fb2f9 + VLLM_COMMIT=5fbfa8d9ef15948599631baeb91e8220b2ee9bcc echo "VLLM_COMMIT=https://github.com/vllm-project/vllm/commit/$VLLM_COMMIT" >> $GITHUB_ENV - name: Checkout repository diff --git a/.github/workflows/pr_test_full.yaml b/.github/workflows/pr_test_full.yaml index e747b5ac..17266072 100644 --- a/.github/workflows/pr_test_full.yaml +++ b/.github/workflows/pr_test_full.yaml @@ -74,7 +74,7 @@ jobs: name: e2e-full strategy: matrix: - vllm_version: [v0.13.0] + vllm_version: [5fbfa8d9ef15948599631baeb91e8220b2ee9bcc, v0.13.0] needs: [changes] if: ${{ needs.changes.outputs.e2e_tracker == 'true' }} uses: ./.github/workflows/_e2e_test.yaml diff --git a/.github/workflows/pr_test_light.yaml b/.github/workflows/pr_test_light.yaml index 04cf55fa..ae0f9a39 100644 --- a/.github/workflows/pr_test_light.yaml +++ b/.github/workflows/pr_test_light.yaml @@ -42,7 +42,7 @@ jobs: lint: uses: ./.github/workflows/_pre_commit.yml with: - vllm: v0.13.0 + vllm: 5fbfa8d9ef15948599631baeb91e8220b2ee9bcc changes: runs-on: linux-aarch64-a2-0 outputs: @@ -90,7 +90,7 @@ jobs: SOC_VERSION: ascend910b1 strategy: matrix: - vllm_version: [v0.13.0] + vllm_version: [5fbfa8d9ef15948599631baeb91e8220b2ee9bcc, v0.13.0] steps: - name: Free up disk space @@ -160,7 +160,7 @@ jobs: name: e2e-light strategy: matrix: - vllm_version: [v0.13.0] + vllm_version: [5fbfa8d9ef15948599631baeb91e8220b2ee9bcc, v0.13.0] # Note (yikun): If CI resource are limited we can split job into two chain jobs needs: [lint, changes] # only trigger e2e test after lint passed and the change is e2e related with pull request. diff --git a/docs/source/community/versioning_policy.md b/docs/source/community/versioning_policy.md index 02eb1d2d..ed3816e2 100644 --- a/docs/source/community/versioning_policy.md +++ b/docs/source/community/versioning_policy.md @@ -50,7 +50,7 @@ If you're using v0.7.3, don't forget to install [mindie-turbo](https://pypi.org/ For main branch of vLLM Ascend, we usually make it compatible with the latest vLLM release and a newer commit hash of vLLM. Please note that this table is usually updated. Please check it regularly. | vLLM Ascend | vLLM | Python | Stable CANN | PyTorch/torch_npu | |-------------|--------------|------------------|-------------|--------------------| -| main | v0.13.0 tag | >= 3.10, < 3.12 | 8.3.RC2 | 2.8.0 / 2.8.0 | +| main | 5fbfa8d9ef15948599631baeb91e8220b2ee9bcc, v0.13.0 tag | >= 3.10, < 3.12 | 8.3.RC2 | 2.8.0 / 2.8.0 | ## Release cadence diff --git a/tests/e2e/conftest.py b/tests/e2e/conftest.py index 12f5101e..be9d2a2d 100644 --- a/tests/e2e/conftest.py +++ b/tests/e2e/conftest.py @@ -781,11 +781,6 @@ PROMPT_CONFIGS = { "fps": 1, }, }, - "hunyuan-vl": { - "model": "Tencent-Hunyuan/HunyuanOCR", - "prompt_fn": hunyuan_prompt, - "mm_processor_kwargs": {}, - }, } diff --git a/vllm_ascend/ops/mm_encoder_attention.py b/vllm_ascend/ops/mm_encoder_attention.py index 397e0ba3..6f21a5ce 100644 --- a/vllm_ascend/ops/mm_encoder_attention.py +++ b/vllm_ascend/ops/mm_encoder_attention.py @@ -58,6 +58,30 @@ class AscendMMEncoderAttention(MMEncoderAttention): multimodal_config=multimodal_config, ) + def reshape_qkv_to_3d( + self, + query: torch.Tensor, + key: torch.Tensor, + value: torch.Tensor, + bsz: int, + q_len: int, + kv_len: int, + ) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor]: + """ + Reshape query, key, value to 3D tensors: + (batch_size * seq_len, num_heads, head_size) + """ + query = query.view(bsz * q_len, self.num_heads, self.head_size) + key = key.view(bsz * kv_len, self.num_kv_heads, self.head_size) + value = value.view(bsz * kv_len, self.num_kv_heads, self.head_size) + self.num_queries_per_kv = self.num_heads // self.num_kv_heads + if (num_repeat := self.num_queries_per_kv) > 1: + # Handle MQA and GQA + key = torch.repeat_interleave(key, num_repeat, dim=1) + value = torch.repeat_interleave(value, num_repeat, dim=1) + + return query, key, value + def forward_oot( self, query: torch.Tensor, @@ -86,6 +110,13 @@ class AscendMMEncoderAttention(MMEncoderAttention): v = F.pad(v, (0, pad_len), mode="constant", value=0) context_layer = torch.empty_like(q) + + if cu_seqlens is None: + cu_seqlens = torch.arange(0, (bsz + 1) * q_len, + step=q_len, + dtype=torch.int32, + device=query.device) + cu_seqlens = torch.diff(cu_seqlens).to("cpu") # operator requires pta version >= 2.5.1 diff --git a/vllm_ascend/platform.py b/vllm_ascend/platform.py index 7f2eb793..b98e7104 100644 --- a/vllm_ascend/platform.py +++ b/vllm_ascend/platform.py @@ -232,7 +232,11 @@ class NPUPlatform(Platform): "using only ACL Graph mode") assert compilation_config.mode == CompilationMode.VLLM_COMPILE, \ "When enabling VLLM_COMPILE aclgraph, please make sure compilation_config.mode == CompilationMode.VLLM_COMPILE and compilation_config.cudagraph_mode == CUDAGraphMode.VLLM_COMPILE" - compilation_config.set_splitting_ops_for_v1() + compilation_config.set_splitting_ops_for_v1( + all2all_backend=vllm_config.parallel_config.all2all_backend, + data_parallel_size=vllm_config.parallel_config. + data_parallel_size, + ) compilation_config.use_inductor = False compilation_config.splitting_ops.extend(["vllm::mla_forward"]) update_aclgraph_sizes(vllm_config)