[Misc] clean up useless function (#3348)
Remove the interface which has been removed from vLLM already. - vLLM version: v0.11.0rc3 - vLLM main: https://github.com/vllm-project/vllm/commit/v0.11.0 Signed-off-by: wangxiyuan <wangxiyuan1007@gmail.com>
This commit is contained in:
@@ -117,14 +117,6 @@ class TestNPUPlatform(TestBase):
|
||||
self.assertEqual(self.platform.get_device_name(device_id), device_name)
|
||||
mock_get_device_name.assert_called_once_with(0)
|
||||
|
||||
def test_is_async_output_supported(self):
|
||||
self.assertTrue(
|
||||
self.platform.is_async_output_supported(enforce_eager=None))
|
||||
self.assertTrue(
|
||||
self.platform.is_async_output_supported(enforce_eager=True))
|
||||
self.assertTrue(
|
||||
self.platform.is_async_output_supported(enforce_eager=False))
|
||||
|
||||
@patch("torch.inference_mode")
|
||||
def test_inference_mode(self, mock_inference_mode):
|
||||
mock_inference_mode.return_value = None
|
||||
|
||||
@@ -87,10 +87,6 @@ class NPUPlatform(Platform):
|
||||
def get_device_name(cls, device_id: int = 0) -> str:
|
||||
return torch.npu.get_device_name(device_id)
|
||||
|
||||
@classmethod
|
||||
def is_async_output_supported(cls, enforce_eager: Optional[bool]) -> bool:
|
||||
return True
|
||||
|
||||
@classmethod
|
||||
def inference_mode(cls):
|
||||
return torch.inference_mode()
|
||||
@@ -379,13 +375,6 @@ class NPUPlatform(Platform):
|
||||
def is_pin_memory_available(cls):
|
||||
return True
|
||||
|
||||
@classmethod
|
||||
def supports_v1(cls, model_config: ModelConfig) -> bool:
|
||||
"""Returns whether the current platform can support v1 for the supplied
|
||||
model configuration.
|
||||
"""
|
||||
return True
|
||||
|
||||
@classmethod
|
||||
def get_static_graph_wrapper_cls(cls) -> str:
|
||||
"""
|
||||
|
||||
Reference in New Issue
Block a user