Update python API of activation, topk, norm and rope and remove vllm dependency (#6614)

Co-authored-by: Wu, Chunyuan <chunyuan.wu@intel.com>
Co-authored-by: jianan-gu <jianan.gu@intel.com>
Co-authored-by: sdp <sdp@gnr799219.jf.intel.com>
This commit is contained in:
YanbingJiang
2025-06-18 13:11:50 +08:00
committed by GitHub
parent e56685ac1b
commit 094c116f7d
23 changed files with 270 additions and 56 deletions

View File

@@ -14,15 +14,18 @@ from sglang.srt.layers.quantization.fp8_kernel import is_fp8_fnuz, scaled_fp8_qu
from sglang.srt.layers.quantization.fp8_utils import normalize_e4m3fn_to_e4m3fnuz
from sglang.srt.layers.quantization.utils import (
all_close_1d,
cpu_has_amx_support,
per_tensor_dequantize,
replace_parameter,
)
from sglang.srt.utils import is_cuda, is_npu, set_weight_attrs
from sglang.srt.utils import is_cpu, is_cuda, is_npu, set_weight_attrs
_is_cuda = is_cuda()
_is_npu = is_npu()
_is_cpu_amx_available = cpu_has_amx_support()
_is_cpu = is_cpu()
if not _is_cuda and not _is_npu:
if not (_is_cuda or _is_npu or (_is_cpu and _is_cpu_amx_available)):
from vllm import _custom_ops as vllm_ops
from vllm._custom_ops import scaled_fp8_quant