test_name: "test Qwen3-VL-235B-A22B disaggregated_prefill" model: "Qwen/Qwen3-VL-235B-A22B-Instruct" num_nodes: 2 npu_per_node: 16 env_common: VLLM_USE_MODELSCOPE: true HCCL_BUFFSIZE: 1024 SERVER_PORT: 8080 OMP_PROC_BIND: false OMP_NUM_THREADS: 1 HCCL_OP_EXPANSION_MODE: "AIV" TASK_QUEUE_ENABLE: 1 PYTORCH_NPU_ALLOC_CONF: expandable_segments:True disaggregated_prefill: enabled: true prefiller_host_index: [0] decoder_host_index: [1] deployment: - server_cmd: > vllm serve "Qwen/Qwen3-VL-235B-A22B-Instruct" --host 0.0.0.0 --port $SERVER_PORT --data-parallel-size 2 --data-parallel-size-local 2 --tensor-parallel-size 8 --seed 1024 --enable-expert-parallel --max-num-seqs 32 --max-model-len 8192 --max-num-batched-tokens 8192 --trust-remote-code --no-enable-prefix-caching --gpu-memory-utilization 0.9 --kv-transfer-config '{"kv_connector": "MooncakeConnectorV1", "kv_role": "kv_producer", "kv_port": "30000", "engine_id": "0", "kv_connector_extra_config": { "prefill": { "dp_size": 2, "tp_size": 8 }, "decode": { "dp_size": 4, "tp_size": 4 } } }' - server_cmd: > vllm serve "Qwen/Qwen3-VL-235B-A22B-Instruct" --host 0.0.0.0 --port $SERVER_PORT --data-parallel-size 4 --data-parallel-size-local 4 --tensor-parallel-size 4 --seed 1024 --enable-expert-parallel --max-num-seqs 32 --max-model-len 8192 --max-num-batched-tokens 8192 --trust-remote-code --no-enable-prefix-caching --gpu-memory-utilization 0.9 --compilation-config '{"cudagraph_mode":"FULL_DECODE_ONLY"}' --kv-transfer-config '{"kv_connector": "MooncakeConnectorV1", "kv_role": "kv_consumer", "kv_port": "30200", "engine_id": "1", "kv_connector_extra_config": { "prefill": { "dp_size": 2, "tp_size": 8 }, "decode": { "dp_size": 4, "tp_size": 4 } } }' benchmarks: perf: case_type: performance dataset_path: vllm-ascend/textvqa-perf-1080p request_conf: vllm_api_stream_chat dataset_conf: textvqa/textvqa_gen_base64 num_prompts: 2800 max_out_len: 1500 batch_size: 64 request_rate: 11.2 baseline: 1 threshold: 0.97 acc: case_type: accuracy dataset_path: vllm-ascend/textvqa-lite request_conf: vllm_api_stream_chat dataset_conf: textvqa/textvqa_gen_base64 max_out_len: 7680 batch_size: 64 baseline: 85 threshold: 5