Upgrade to new vllm commit (#3719)
### What this PR does / why we need it? Upgrade to new vllm commit:c9461e05a4- Fix many imports, caused by https://github.com/vllm-project/vllm/pull/26908 - Fix import ```sha256```, caused by https://github.com/vllm-project/vllm/pull/27169 - Remove ```SchedulerConfig.send_delta_data```, caused by https://github.com/vllm-project/vllm/pull/27142 - Fix ```FusedMoE``` because of dual stream execution, caused by https://github.com/vllm-project/vllm/pull/26440 ### Does this PR introduce _any_ user-facing change? N/A ### How was this patch tested? CI passed with new added/existing test. - vLLM version: v0.11.0rc3 - vLLM main:17c540a993--------- Signed-off-by: MengqingCao <cmq0113@163.com> Signed-off-by: Icey <1790571317@qq.com> Co-authored-by: MengqingCao <cmq0113@163.com>
This commit is contained in:
@@ -78,21 +78,6 @@ class TestAscendSchedulerConfig(TestBase):
|
||||
str(context.exception),
|
||||
)
|
||||
|
||||
def test_not_implemented_send_delta_data(self):
|
||||
with self.assertRaises(NotImplementedError) as context:
|
||||
AscendSchedulerConfig.initialize_from_config(
|
||||
self.basic_scheduler_config,
|
||||
AscendSchedulerConfig(
|
||||
send_delta_data=True,
|
||||
max_num_batched_tokens=2048,
|
||||
max_model_len=2048,
|
||||
),
|
||||
)
|
||||
self.assertIn(
|
||||
"currently AscendScheduler doesn't support send_delta_data",
|
||||
str(context.exception),
|
||||
)
|
||||
|
||||
def test_no_override(self):
|
||||
ascend_config = AscendSchedulerConfig.initialize_from_config(
|
||||
self.basic_scheduler_config, {})
|
||||
|
||||
@@ -9,7 +9,6 @@ from vllm.config import (CacheConfig, KVTransferConfig, ModelConfig,
|
||||
from vllm.multimodal.inputs import (MultiModalFeatureSpec,
|
||||
MultiModalKwargsItem, PlaceholderRange)
|
||||
from vllm.sampling_params import SamplingParams
|
||||
from vllm.utils import sha256
|
||||
from vllm.v1.core.kv_cache_utils import (get_request_block_hasher,
|
||||
init_none_hash)
|
||||
from vllm.v1.core.sched.output import SchedulerOutput
|
||||
@@ -24,6 +23,11 @@ from vllm_ascend.core.scheduler import AscendScheduler
|
||||
from vllm_ascend.core.scheduler_dynamic_batch import SchedulerDynamicBatch
|
||||
from vllm_ascend.utils import vllm_version_is
|
||||
|
||||
if vllm_version_is("0.11.0"):
|
||||
from vllm.utils import sha256
|
||||
else:
|
||||
from vllm.utils.hashing import sha256
|
||||
|
||||
EOS_TOKEN_ID = 50256
|
||||
MODEL = "Qwen3-0.6B"
|
||||
ENABLE_PREFIX_CACHING = None
|
||||
|
||||
@@ -12,7 +12,13 @@ from unittest.mock import MagicMock, patch
|
||||
|
||||
import msgspec
|
||||
import zmq
|
||||
from vllm.utils import make_zmq_path
|
||||
|
||||
from vllm_ascend.utils import vllm_version_is
|
||||
|
||||
if vllm_version_is("0.11.0"):
|
||||
from vllm.utils import make_zmq_path
|
||||
else:
|
||||
from vllm.utils.network_utils import make_zmq_path
|
||||
|
||||
fake_engine = types.ModuleType("mooncake.engine")
|
||||
fake_engine.TransferEngine = MagicMock() # type: ignore[attr-defined]
|
||||
|
||||
@@ -10,7 +10,6 @@ import torch
|
||||
from vllm import SamplingParams
|
||||
from vllm.config import (CacheConfig, DeviceConfig, KVTransferConfig,
|
||||
ModelConfig, SchedulerConfig, VllmConfig)
|
||||
from vllm.utils import sha256
|
||||
from vllm.v1.core.kv_cache_utils import (get_request_block_hasher,
|
||||
init_none_hash)
|
||||
from vllm.v1.core.sched.scheduler import Scheduler
|
||||
@@ -22,6 +21,11 @@ from vllm.v1.structured_output import StructuredOutputManager
|
||||
|
||||
from vllm_ascend.utils import vllm_version_is
|
||||
|
||||
if vllm_version_is("0.11.0"):
|
||||
from vllm.utils import sha256
|
||||
else:
|
||||
from vllm.utils.hashing import sha256
|
||||
|
||||
EOS_TOKEN_ID = 50256
|
||||
os.environ["VLLM_USE_V1"] = "1"
|
||||
|
||||
|
||||
@@ -22,6 +22,7 @@ import torch
|
||||
from torch import nn
|
||||
|
||||
from vllm_ascend.model_loader.netloader.netloader import ModelNetLoaderElastic
|
||||
from vllm_ascend.utils import vllm_version_is
|
||||
|
||||
|
||||
class DummyDeviceConfig:
|
||||
@@ -173,7 +174,11 @@ def test_load_model_elastic_success(mock_logger, monkeypatch, tmp_path):
|
||||
"vllm_ascend.model_loader.netloader.netloader.process_weights_after_loading",
|
||||
lambda *a, **k: None)
|
||||
# patch get_ip
|
||||
monkeypatch.setattr("vllm.utils.get_ip", lambda: "127.0.0.1")
|
||||
if vllm_version_is("0.11.0"):
|
||||
monkeypatch.setattr("vllm.utils.get_ip", lambda: "127.0.0.1")
|
||||
else:
|
||||
monkeypatch.setattr("vllm.utils.network_utils.get_ip",
|
||||
lambda: "127.0.0.1")
|
||||
# patch find_free_port
|
||||
monkeypatch.setattr(
|
||||
"vllm_ascend.model_loader.netloader.netloader.find_free_port",
|
||||
|
||||
@@ -20,14 +20,19 @@ import numpy as np
|
||||
import pytest
|
||||
import torch
|
||||
from vllm.sampling_params import SamplingParams
|
||||
from vllm.utils import make_tensor_with_pad
|
||||
from vllm.v1.pool.metadata import PoolingMetadata
|
||||
from vllm.v1.sample.logits_processor import LogitsProcessors
|
||||
from vllm.v1.sample.metadata import SamplingMetadata
|
||||
|
||||
from vllm_ascend.utils import vllm_version_is
|
||||
from vllm_ascend.worker.block_table import BlockTable, MultiGroupBlockTable
|
||||
from vllm_ascend.worker.npu_input_batch import CachedRequestState, InputBatch
|
||||
|
||||
if vllm_version_is("0.11.0"):
|
||||
from vllm.utils import make_tensor_with_pad
|
||||
else:
|
||||
from vllm.utils.torch_utils import make_tensor_with_pad
|
||||
|
||||
VOCAB_SIZE = 1024
|
||||
NUM_OUTPUT_TOKENS = 20
|
||||
MAX_PROMPT_SIZE = 100
|
||||
|
||||
@@ -5,6 +5,7 @@ import torch
|
||||
from vllm.config import CacheConfig, ModelConfig, ParallelConfig, VllmConfig
|
||||
|
||||
from tests.ut.base import TestBase
|
||||
from vllm_ascend.utils import vllm_version_is
|
||||
|
||||
|
||||
class TestNPUWorker(TestBase):
|
||||
@@ -178,15 +179,26 @@ class TestNPUWorker(TestBase):
|
||||
# Create NPUWorker instance
|
||||
from vllm_ascend.worker.worker_v1 import NPUWorker
|
||||
|
||||
with patch("vllm.utils.STR_DTYPE_TO_TORCH_DTYPE",
|
||||
{"float32": torch.float32}):
|
||||
worker = NPUWorker(
|
||||
vllm_config=self.vllm_config_mock,
|
||||
local_rank=self.local_rank,
|
||||
rank=self.rank,
|
||||
distributed_init_method=self.distributed_init_method,
|
||||
is_driver_worker=self.is_driver_worker,
|
||||
)
|
||||
if vllm_version_is("0.11.0"):
|
||||
with patch("vllm.utils.STR_DTYPE_TO_TORCH_DTYPE",
|
||||
{"float32": torch.float32}):
|
||||
worker = NPUWorker(
|
||||
vllm_config=self.vllm_config_mock,
|
||||
local_rank=self.local_rank,
|
||||
rank=self.rank,
|
||||
distributed_init_method=self.distributed_init_method,
|
||||
is_driver_worker=self.is_driver_worker,
|
||||
)
|
||||
else:
|
||||
with patch("vllm.utils.torch_utils.STR_DTYPE_TO_TORCH_DTYPE",
|
||||
{"float32": torch.float32}):
|
||||
worker = NPUWorker(
|
||||
vllm_config=self.vllm_config_mock,
|
||||
local_rank=self.local_rank,
|
||||
rank=self.rank,
|
||||
distributed_init_method=self.distributed_init_method,
|
||||
is_driver_worker=self.is_driver_worker,
|
||||
)
|
||||
|
||||
# Verify cache_dtype is set to custom value
|
||||
self.assertEqual(worker.cache_dtype, torch.float32)
|
||||
|
||||
Reference in New Issue
Block a user