upgrade to vllm 0.11.2 (#4400)

Bump vLLM version to v0.11.2

What's broken and changed by vLLM:
1. structured_output is broken by
https://github.com/vllm-project/vllm/pull/26866
2. get_mrope_input_positions is broken by
https://github.com/vllm-project/vllm/pull/28399
3. graph mode is broken by
https://github.com/vllm-project/vllm/pull/25110 we'll upgrade torch to
2.8 to fix the problem later
4. embedding is broken by
https://github.com/vllm-project/vllm/pull/27583
5. `get_attn_backend_cls` and attention backend is broken are broken by
https://github.com/vllm-project/vllm/pull/28534
6. spec decode is broken by
https://github.com/vllm-project/vllm/pull/28771
7. sp feature is broken by
https://github.com/vllm-project/vllm/pull/27126
8. mtp is broken by https://github.com/vllm-project/vllm/pull/27922
9. lora is broken by https://github.com/vllm-project/vllm/pull/21068
10. execute_model is broken by
https://github.com/vllm-project/vllm/pull/26866
11. `VLLM_DISABLE_SHARED_EXPERTS_STREAM` env is broken by
https://github.com/vllm-project/vllm/pull/28159
12. kv cahe is broken by https://github.com/vllm-project/vllm/pull/27753
13. dp is broken by https://github.com/vllm-project/vllm/pull/25110

 
What's broken and changed by ourself:
1. qwen vl is broken by https://github.com/vllm-project/vllm/pull/28455
We'll remove model files in the future to avoid this kind of error
2. Engine core is broken by
https://github.com/vllm-project/vllm/pull/23691 We'll remove the patch
file in the future.
3. Ascend scheduler is broken by
https://github.com/vllm-project/vllm/pull/28733 We'll remove ascend
scheudler later.
4. qwen3-next is broken by
https://github.com/vllm-project/vllm/pull/28083 We'll remove model files
in the future to avoid this kind of error
5. qwen vl is broken by https://github.com/vllm-project/vllm/pull/27764.
We'll remove model files in the future

Known issue:
1. ray doesn't work 
2. the accuracy of qwen3-next is not correct
3. qwen3-vl is broken
4. prefix cache+ ascend scheduler + deepseek v2 lite is broken.

Co-authored-by: MengqingCao <cmq0113@163.com>
Co-authored-by: hfadzxy <starmoon_zhang@163.com>
Co-authored-by: leo-pony <nengjunma@outlook.com>
Co-authored-by: 22dimensions <waitingwind@foxmail.com>
Co-authored-by: shen-shanshan <467638484@qq.com>


- vLLM version: v0.11.2

---------

Signed-off-by: wangxiyuan <wangxiyuan1007@gmail.com>
Signed-off-by: MengqingCao <cmq0113@163.com>
Signed-off-by: hfadzxy <starmoon_zhang@163.com>
Signed-off-by: leo-pony <nengjunma@outlook.com>
Co-authored-by: MengqingCao <cmq0113@163.com>
Co-authored-by: hfadzxy <starmoon_zhang@163.com>
Co-authored-by: leo-pony <nengjunma@outlook.com>
This commit is contained in:
wangxiyuan
2025-11-26 11:48:58 +08:00
committed by GitHub
parent d5f77f14d0
commit bc69d7cfe1
54 changed files with 744 additions and 437 deletions

View File

@@ -107,8 +107,7 @@ class TestACLGraphWrapper(TestBase):
wrapper = ACLGraphWrapper(runnable=self.mock_runnable,
vllm_config=self.mock_vllm_config,
runtime_mode=CUDAGraphMode.FULL,
graph_pool=self.mock_graph_pool)
runtime_mode=CUDAGraphMode.FULL)
self.assertEqual(wrapper.runnable, self.mock_runnable)
self.assertEqual(wrapper.vllm_config, self.mock_vllm_config)
@@ -130,7 +129,6 @@ class TestACLGraphWrapper(TestBase):
runnable=self.mock_runnable,
vllm_config=self.mock_vllm_config,
runtime_mode=CUDAGraphMode.FULL,
graph_pool=self.mock_graph_pool,
cudagraph_options=self.mock_cudagraph_options)
self.assertEqual(wrapper.runnable, self.mock_runnable)
@@ -152,8 +150,7 @@ class TestACLGraphWrapper(TestBase):
with self.assertRaises(AssertionError):
ACLGraphWrapper(runnable=self.mock_runnable,
vllm_config=self.mock_vllm_config,
runtime_mode=CUDAGraphMode.NONE,
graph_pool=self.mock_graph_pool)
runtime_mode=CUDAGraphMode.NONE)
@patch('vllm_ascend.compilation.acl_graph.get_forward_context')
@patch('vllm_ascend.compilation.acl_graph.current_platform')
@@ -171,7 +168,6 @@ class TestACLGraphWrapper(TestBase):
runnable=self.mock_runnable,
vllm_config=self.mock_vllm_config,
runtime_mode=CUDAGraphMode.FULL,
graph_pool=self.mock_graph_pool,
cudagraph_options=self.mock_cudagraph_options)
result = wrapper("arg1", "arg2")
@@ -196,7 +192,6 @@ class TestACLGraphWrapper(TestBase):
runnable=self.mock_runnable,
vllm_config=self.mock_vllm_config,
runtime_mode=CUDAGraphMode.FULL,
graph_pool=self.mock_graph_pool,
cudagraph_options=self.mock_cudagraph_options)
result = wrapper("arg1", "arg2")
@@ -247,7 +242,6 @@ class TestACLGraphWrapper(TestBase):
runnable=self.mock_runnable,
vllm_config=self.mock_vllm_config,
runtime_mode=CUDAGraphMode.FULL,
graph_pool=self.mock_graph_pool,
cudagraph_options=self.mock_cudagraph_options)
# Create a real torch tensor for the test, not a mock
@@ -319,7 +313,6 @@ class TestACLGraphWrapper(TestBase):
runnable=self.mock_runnable,
vllm_config=self.mock_vllm_config,
runtime_mode=CUDAGraphMode.FULL,
graph_pool=self.mock_graph_pool,
cudagraph_options=self.mock_cudagraph_options)
# Create a real torch tensor for the test, not a mock
@@ -392,7 +385,6 @@ class TestACLGraphWrapper(TestBase):
runnable=self.mock_runnable,
vllm_config=self.mock_vllm_config,
runtime_mode=CUDAGraphMode.FULL,
graph_pool=self.mock_graph_pool,
cudagraph_options=self.mock_cudagraph_options)
# First call to capture the graph
@@ -447,7 +439,6 @@ class TestACLGraphWrapper(TestBase):
runnable=self.mock_runnable,
vllm_config=self.mock_vllm_config,
runtime_mode=CUDAGraphMode.FULL,
graph_pool=self.mock_graph_pool,
cudagraph_options=self.mock_cudagraph_options)
# First call to capture the graph
@@ -518,7 +509,6 @@ class TestACLGraphWrapper(TestBase):
runnable=self.mock_runnable,
vllm_config=self.mock_vllm_config,
runtime_mode=CUDAGraphMode.FULL,
graph_pool=self.mock_graph_pool,
cudagraph_options=self.mock_cudagraph_options)
# Create a real torch tensor for the test, not a mock
@@ -588,7 +578,6 @@ class TestACLGraphWrapper(TestBase):
runnable=self.mock_runnable,
vllm_config=self.mock_vllm_config,
runtime_mode=CUDAGraphMode.FULL,
graph_pool=self.mock_graph_pool,
cudagraph_options=self.mock_cudagraph_options)
# Create a real torch tensor for the test, not a mock
@@ -659,7 +648,6 @@ class TestACLGraphWrapper(TestBase):
runnable=self.mock_runnable,
vllm_config=self.mock_vllm_config,
runtime_mode=CUDAGraphMode.FULL,
graph_pool=self.mock_graph_pool,
cudagraph_options=self.mock_cudagraph_options)
# Create a real torch tensor for the test, not a mock
@@ -680,7 +668,6 @@ class TestACLGraphWrapper(TestBase):
runnable=mock_runnable,
vllm_config=self.mock_vllm_config,
runtime_mode=CUDAGraphMode.FULL,
graph_pool=self.mock_graph_pool,
cudagraph_options=self.mock_cudagraph_options)
# Should be able to access attributes of the runnable
@@ -699,7 +686,6 @@ class TestACLGraphWrapper(TestBase):
runnable=mock_runnable,
vllm_config=self.mock_vllm_config,
runtime_mode=CUDAGraphMode.FULL,
graph_pool=self.mock_graph_pool,
cudagraph_options=self.mock_cudagraph_options)
# Should raise AttributeError for non-existent attributes
@@ -715,7 +701,6 @@ class TestACLGraphWrapper(TestBase):
runnable=self.mock_runnable,
vllm_config=self.mock_vllm_config,
runtime_mode=CUDAGraphMode.FULL,
graph_pool=self.mock_graph_pool,
cudagraph_options=self.mock_cudagraph_options)
unwrapped = wrapper.unwrap()