Update python API of activation, topk, norm and rope and remove vllm dependency (#6614)

Co-authored-by: Wu, Chunyuan <chunyuan.wu@intel.com>
Co-authored-by: jianan-gu <jianan.gu@intel.com>
Co-authored-by: sdp <sdp@gnr799219.jf.intel.com>
This commit is contained in:
YanbingJiang
2025-06-18 13:11:50 +08:00
committed by GitHub
parent e56685ac1b
commit 094c116f7d
23 changed files with 270 additions and 56 deletions

View File

@@ -91,9 +91,7 @@ class TestFusedExperts(CustomTestCase):
fused_output = fused_moe(a, w1, w2, score, topk, renormalize, prepack)
atol = rtol = precision[torch_output.dtype]
self.assertTrue(
torch.allclose(torch_output, fused_output, atol=atol, rtol=rtol)
)
torch.testing.assert_close(torch_output, fused_output, atol=atol, rtol=rtol)
def test_bf16_moe(self):
for params in itertools.product(
@@ -171,7 +169,7 @@ class TestFusedExperts(CustomTestCase):
# Increase the tolerance for large input shapes
if M > 35:
atol = rtol = 0.02
self.assertTrue(torch.allclose(ref_out, out, atol=atol, rtol=rtol))
torch.testing.assert_close(ref_out, out, atol=atol, rtol=rtol)
def test_int8_moe(self):
for params in itertools.product(
@@ -235,7 +233,7 @@ class TestFusedExperts(CustomTestCase):
)
atol = rtol = precision[dtype]
self.assertTrue(torch.allclose(ref_out.bfloat16(), out, atol=atol, rtol=rtol))
torch.testing.assert_close(ref_out.bfloat16(), out, atol=atol, rtol=rtol)
def test_fp8_moe(self):
for params in itertools.product(