diff --git a/.github/workflows/_e2e_nightly_single_node.yaml b/.github/workflows/_e2e_nightly_single_node.yaml index 4e8fa1a6..bf6062a6 100644 --- a/.github/workflows/_e2e_nightly_single_node.yaml +++ b/.github/workflows/_e2e_nightly_single_node.yaml @@ -143,5 +143,3 @@ jobs: # ignore test_dispatch_ffn_combine until the test is fixed pytest -sv ${{ inputs.tests }} \ --ignore=tests/e2e/nightly/single_node/ops/singlecard_ops/test_fused_moe.py - - diff --git a/.github/workflows/nightly_test_a3.yaml b/.github/workflows/nightly_test_a3.yaml index a507dcba..1f5b5cf9 100644 --- a/.github/workflows/nightly_test_a3.yaml +++ b/.github/workflows/nightly_test_a3.yaml @@ -83,6 +83,9 @@ jobs: - name: multi-node-qwen-vl-disagg-pd config_file_path: Qwen3-VL-235B-disagg-pd.yaml size: 2 + - name: multi-node-kimi-k2-instruct-w8a8 + config_file_path: Kimi-K2-Instruct-W8A8.yaml + size: 2 uses: ./.github/workflows/_e2e_nightly_multi_node.yaml with: soc_version: a3 @@ -144,6 +147,9 @@ jobs: - name: qwen3-next-w8a8 os: linux-aarch64-a3-4 tests: tests/e2e/nightly/single_node/models/test_qwen3_next_w8a8.py + - name: kimi-k2-thinking + os: linux-aarch64-a3-16 + tests: tests/e2e/nightly/single_node/models/test_kimi_k2_thinking.py # TODO: Replace deepseek3.2-exp with deepseek3.2 after nightly tests pass # - name: deepseek3_2-exp-w8a8 # os: linux-aarch64-a3-16 diff --git a/tests/e2e/nightly/multi_node/config/Kimi-K2-Instruct-W8A8.yaml b/tests/e2e/nightly/multi_node/config/Kimi-K2-Instruct-W8A8.yaml new file mode 100644 index 00000000..4ddfeffd --- /dev/null +++ b/tests/e2e/nightly/multi_node/config/Kimi-K2-Instruct-W8A8.yaml @@ -0,0 +1,79 @@ +test_name: "test Kimi-K2-Instruct-W8A8 2-nodes-dp4-tp8-torchair" +model: "vllm-ascend/Kimi-K2-Instruct-W8A8" + +num_nodes: 2 +npu_per_node: 16 +env_common: + VLLM_USE_MODELSCOPE: true + HCCL_BUFFSIZE: 1024 + SERVER_PORT: 8080 + OMP_PROC_BIND: false + OMP_NUM_THREADS: 100 + NUMEXPR_MAX_THREADS: 128 + +deployment: + - + server_cmd: > + vllm serve "vllm-ascend/Kimi-K2-Instruct-W8A8" + --host 0.0.0.0 + --port $SERVER_PORT + --data-parallel-size 4 + --data-parallel-size-local 2 + --data-parallel-start-rank 0 + --data-parallel-address $LOCAL_IP + --data-parallel-rpc-port 13389 + --tensor-parallel-size 8 + --seed 1024 + --enable-expert-parallel + --max-num-seqs 32 + --max-model-len 8192 + --max-num-batched-tokens 8192 + --quantization ascend + --trust-remote-code + --no-enable-prefix-caching + --gpu-memory-utilization 0.9 + --additional-config '{"torchair_graph_config":{"enabled":true}}' + + - + server_cmd: > + vllm serve "vllm-ascend/Kimi-K2-Instruct-W8A8" + --headless + --data-parallel-size 4 + --data-parallel-size-local 2 + --data-parallel-start-rank 2 + --data-parallel-address $MASTER_IP + --data-parallel-rpc-port 13389 + --tensor-parallel-size 8 + --seed 1024 + --enable-expert-parallel + --max-num-seqs 32 + --max-model-len 8192 + --max-num-batched-tokens 8192 + --quantization ascend + --trust-remote-code + --no-enable-prefix-caching + --gpu-memory-utilization 0.9 + --additional-config '{"torchair_graph_config":{"enabled":true}}' + +benchmarks: + perf: + case_type: performance + dataset_path: vllm-ascend/GSM8K-in3500-bs2800 + request_conf: vllm_api_stream_chat + dataset_conf: gsm8k/gsm8k_gen_0_shot_cot_str_perf + num_prompts: 512 + max_out_len: 256 + batch_size: 64 + trust_remote_code: True + request_rate: 11.2 + baseline: 1 + threshold: 0.97 + acc: + case_type: accuracy + dataset_path: vllm-ascend/gsm8k-lite + request_conf: vllm_api_general_chat + dataset_conf: gsm8k/gsm8k_gen_0_shot_cot_chat_prompt + max_out_len: 7680 + batch_size: 64 + baseline: 95 + threshold: 5 diff --git a/tests/e2e/nightly/single_node/models/test_kimi_k2_thinking.py b/tests/e2e/nightly/single_node/models/test_kimi_k2_thinking.py new file mode 100644 index 00000000..a4d64f2b --- /dev/null +++ b/tests/e2e/nightly/single_node/models/test_kimi_k2_thinking.py @@ -0,0 +1,110 @@ +# Copyright (c) 2025 Huawei Technologies Co., Ltd. All Rights Reserved. +# Copyright 2023 The vLLM team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# This file is a part of the vllm-ascend project. +# +from typing import Any + +import openai +import pytest +from vllm.utils.network_utils import get_open_port + +from tests.e2e.conftest import RemoteOpenAIServer +from tools.aisbench import run_aisbench_cases + +MODELS = [ + "moonshotai/Kimi-K2-Thinking", +] + +TENSOR_PARALLELS = [16] + +prompts = [ + "San Francisco is a", +] + +api_keyword_args = { + "max_tokens": 10, +} + +aisbench_cases = [{ + "case_type": "accuracy", + "dataset_path": "vllm-ascend/gsm8k-lite", + "request_conf": "vllm_api_general_chat", + "dataset_conf": "gsm8k/gsm8k_gen_0_shot_cot_chat_prompt", + "max_out_len": 4096, + "batch_size": 32, + "baseline": 95, + "threshold": 5 +}, { + "case_type": "performance", + "dataset_path": "vllm-ascend/GSM8K-in3500-bs400", + "request_conf": "vllm_api_stream_chat", + "dataset_conf": "gsm8k/gsm8k_gen_0_shot_cot_str_perf", + "num_prompts": 512, + "max_out_len": 256, + "batch_size": 64, + "trust_remote_code": True, + "request_rate": 11.2, + "baseline": 1, + "threshold": 0.97 +}] + + +@pytest.mark.asyncio +@pytest.mark.parametrize("model", MODELS) +@pytest.mark.parametrize("tp_size", TENSOR_PARALLELS) +async def test_models(model: str, tp_size: int) -> None: + port = get_open_port() + env_dict = { + "HCCL_BUFFSIZE": "1024", + "TASK_QUEUE_ENABLE": "1", + "OMP_PROC_BIND": "false", + "HCCL_OP_EXPANSION_MODE": "AIV", + "PYTORCH_NPU_ALLOC_CONF": "expandable_segments:True" + } + server_args = [ + "--tensor-parallel-size", + str(tp_size), + "--port", + str(port), + "--max-model-len", + "8192", + "--max-num-batched-tokens", + "8192", + "--max-num-seqs", + "12", + "--gpu-memory-utilization", + "0.9", + "--trust-remote-code", + "--enable-expert-parallel", + "--no-enable-prefix-caching", + ] + request_keyword_args: dict[str, Any] = { + **api_keyword_args, + } + with RemoteOpenAIServer(model, + server_args, + server_port=port, + env_dict=env_dict, + auto_port=False) as server: + client = server.get_async_client() + batch = await client.completions.create( + model=model, + prompt=prompts, + **request_keyword_args, + ) + choices: list[openai.types.CompletionChoice] = batch.choices + assert choices[0].text, "empty response" + # aisbench test + run_aisbench_cases(model, port, aisbench_cases) diff --git a/tools/aisbench.py b/tools/aisbench.py index 2dc13b4d..a4ddb0ad 100644 --- a/tools/aisbench.py +++ b/tools/aisbench.py @@ -92,6 +92,8 @@ class AisbenchRunner: self.max_out_len = aisbench_config["max_out_len"] self.batch_size = aisbench_config["batch_size"] self.request_rate = aisbench_config.get("request_rate", 0) + self.trust_remote_code = aisbench_config.get("trust_remote_code", + False) self.temperature = aisbench_config.get("temperature") self.top_k = aisbench_config.get("top_k") self.top_p = aisbench_config.get("top_p") @@ -145,6 +147,9 @@ class AisbenchRunner: f'max_out_len = {self.max_out_len},', content) content = re.sub(r'batch_size.*', f'batch_size = {self.batch_size},', content) + content = re.sub(r'trust_remote_code=.*', + f'trust_remote_code={self.trust_remote_code},', + content) content = content.replace("top_k", "#top_k") content = content.replace("seed", "#seed") content = content.replace("repetition_penalty", "#repetition_penalty")