diff --git a/vllm_ascend/worker.py b/vllm_ascend/worker.py index 27930d4..d1e0765 100644 --- a/vllm_ascend/worker.py +++ b/vllm_ascend/worker.py @@ -84,9 +84,6 @@ class NPUWorker(LocalOrDistributedWorkerBase): self.distributed_init_method = distributed_init_method self.is_driver_worker = is_driver_worker - if is_driver_worker: - assert rank % self.parallel_config.tensor_parallel_size == 0, \ - "Driver worker should be rank 0 of tensor parallel group." if self.model_config.trust_remote_code: # note: lazy import to avoid importing torch before initializing from vllm.utils import init_cached_hf_modules