diff --git a/vllm_ascend/utils.py b/vllm_ascend/utils.py index 27020de..f65aaa0 100644 --- a/vllm_ascend/utils.py +++ b/vllm_ascend/utils.py @@ -624,14 +624,24 @@ def shared_expert_dp_enabled() -> bool: def is_moe_model(vllm_config: VllmConfig): + """Checks if the model is a MoE model by config""" global _IS_MOE_MODEL if _IS_MOE_MODEL is None: - config = vllm_config.model_config.hf_config - _IS_MOE_MODEL = any('experts' in key.lower() - for key in config.to_dict()) + model_configs = vllm_config.model_config.hf_config.to_dict() + _IS_MOE_MODEL = _is_contain_expert(model_configs) return _IS_MOE_MODEL +def _is_contain_expert(config: Any): + if isinstance(config, dict): + for k, v in config.items(): + if "expert" in str(k): + return True + if _is_contain_expert(v): + return True + return False + + def weak_ref_tensor(tensor: Any) -> Any: """ Create a weak reference to a tensor.