Add pyhccl (#503)

This is the first step to support trl vllm serve on Ascend NPU
https://github.com/vllm-project/vllm-ascend/issues/459.
This PR can work properly only when
https://github.com/vllm-project/vllm/pull/16464 is merged into vLLM.

---------

Signed-off-by: hzji210@gmail.com <hzji210@gmail.com>
This commit is contained in:
Huazhong Ji
2025-04-17 14:57:52 +08:00
committed by GitHub
parent 64fdf4cbef
commit c3d1a3782a
8 changed files with 589 additions and 1 deletions

View File

@@ -17,8 +17,11 @@
# limitations under the License.
#
import torch
import torch_npu # noqa: F401
from vllm.logger import logger
import vllm_ascend.envs as envs
def try_register_lib(lib_name: str, lib_info: str = ""):
import importlib
@@ -33,6 +36,28 @@ def try_register_lib(lib_name: str, lib_info: str = ""):
pass
def find_hccl_library() -> str:
"""
We either use the library file specified by the `HCCL_SO_PATH`
environment variable, or we find the library file brought by PyTorch.
After importing `torch`, `libhccl.so` can be
found by `ctypes` automatically.
"""
so_file = envs.HCCL_SO_PATH
# manually load the hccl library
if so_file:
logger.info("Found hccl from environment variable HCCL_SO_PATH=%s",
so_file)
else:
if torch.version.cann is not None:
so_file = "libhccl.so"
else:
raise ValueError("HCCL only supports Ascend NPU backends.")
logger.info("Found hccl from library %s", so_file)
return so_file
_current_stream = None