Fix chat template handling for OpenAI serving (#8635)

Signed-off-by: Xinyuan Tong <justinning0323@outlook.com>
Signed-off-by: Xinyuan Tong <xinyuantong.cs@gmail.com>
This commit is contained in:
Xinyuan Tong
2025-07-31 21:49:45 -07:00
committed by GitHub
parent 20b5563eda
commit 7e831efee8
7 changed files with 83 additions and 156 deletions

View File

@@ -954,20 +954,6 @@ register_conv_template(
)
)
register_conv_template(
Conversation(
name="mimo-vl",
system_message="You are MiMo, an AI assistant developed by Xiaomi.",
system_template="<|im_start|>system\n{system_message}",
roles=("<|im_start|>user", "<|im_start|>assistant"),
sep="<|im_end|>\n",
sep_style=SeparatorStyle.ADD_NEW_LINE_SINGLE,
stop_str=["<|im_end|>"],
image_token="<|vision_start|><|image_pad|><|vision_end|>",
)
)
register_conv_template(
Conversation(
name="qwen2-audio",
@@ -981,51 +967,11 @@ register_conv_template(
)
)
register_conv_template(
Conversation(
name="llama_4_vision",
system_message="You are a helpful language and vision assistant. You are able to understand the visual content that the user provides, and assist the user with a variety of tasks using natural language.",
system_template="<|header_start|>system<|header_end|>\n\n{system_message}<|eot|>",
roles=("user", "assistant"),
sep_style=SeparatorStyle.LLAMA4,
sep="",
stop_str="<|eot|>",
image_token="<|image|>",
)
)
register_conv_template(
Conversation(
name="step3-vl",
system_message="<begin▁of▁sentence>You are a helpful assistant",
system_template="{system_message}\n",
roles=(
"<|BOT|>user\n",
"<|BOT|>assistant\n<think>\n",
),
sep="<|EOT|>",
sep_style=SeparatorStyle.NO_COLON_SINGLE,
stop_str="<|EOT|>",
image_token="<im_patch>",
# add_bos=True,
)
)
@register_conv_template_matching_function
def match_internvl(model_path: str):
if re.search(r"internvl", model_path, re.IGNORECASE):
return "internvl-2-5"
if re.search(r"intern.*s1", model_path, re.IGNORECASE):
return "interns1"
@register_conv_template_matching_function
def match_llama_vision(model_path: str):
if re.search(r"llama.*3\.2.*vision", model_path, re.IGNORECASE):
return "llama_3_vision"
if re.search(r"llama.*4.*", model_path, re.IGNORECASE):
return "llama_4_vision"
@register_conv_template_matching_function
@@ -1040,22 +986,6 @@ def match_vicuna(model_path: str):
return "vicuna_v1.1"
@register_conv_template_matching_function
def match_llama2_chat(model_path: str):
if re.search(
r"llama-2.*chat|codellama.*instruct",
model_path,
re.IGNORECASE,
):
return "llama-2"
@register_conv_template_matching_function
def match_mistral(model_path: str):
if re.search(r"pixtral|(mistral|mixtral).*instruct", model_path, re.IGNORECASE):
return "mistral"
@register_conv_template_matching_function
def match_deepseek_vl(model_path: str):
if re.search(r"deepseek.*vl2", model_path, re.IGNORECASE):
@@ -1064,12 +994,6 @@ def match_deepseek_vl(model_path: str):
@register_conv_template_matching_function
def match_qwen_chat_ml(model_path: str):
if re.search(r"gme.*qwen.*vl", model_path, re.IGNORECASE):
return "gme-qwen2-vl"
if re.search(r"qwen.*vl", model_path, re.IGNORECASE):
return "qwen2-vl"
if re.search(r"qwen.*audio", model_path, re.IGNORECASE):
return "qwen2-audio"
if re.search(
r"llava-v1\.6-34b|llava-v1\.6-yi-34b|llava-next-video-34b|llava-onevision-qwen2",
model_path,
@@ -1078,12 +1002,6 @@ def match_qwen_chat_ml(model_path: str):
return "chatml-llava"
@register_conv_template_matching_function
def match_gemma3_instruct(model_path: str):
if re.search(r"gemma-3.*it", model_path, re.IGNORECASE):
return "gemma-it"
@register_conv_template_matching_function
def match_openbmb_minicpm(model_path: str):
if re.search(r"minicpm-v", model_path, re.IGNORECASE):
@@ -1092,37 +1010,7 @@ def match_openbmb_minicpm(model_path: str):
return "minicpmo"
@register_conv_template_matching_function
def match_moonshot_kimivl(model_path: str):
if re.search(r"kimi.*vl", model_path, re.IGNORECASE):
return "kimi-vl"
@register_conv_template_matching_function
def match_devstral(model_path: str):
if re.search(r"devstral", model_path, re.IGNORECASE):
return "devstral"
@register_conv_template_matching_function
def match_phi_4_mm(model_path: str):
if "phi-4-multimodal" in model_path.lower():
return "phi-4-mm"
@register_conv_template_matching_function
def match_vila(model_path: str):
if re.search(r"vila", model_path, re.IGNORECASE):
return "chatml"
@register_conv_template_matching_function
def match_mimo_vl(model_path: str):
if re.search(r"mimo.*vl", model_path, re.IGNORECASE):
return "mimo-vl"
# @register_conv_template_matching_function
# def match_step3(model_path: str):
# if re.search(r"step3", model_path, re.IGNORECASE):
# return "step3-vl"

View File

@@ -84,26 +84,27 @@ class TemplateManager:
if chat_template_arg:
self._load_explicit_chat_template(tokenizer_manager, chat_template_arg)
else:
# Try HuggingFace template first
hf_template = self._resolve_hf_chat_template(tokenizer_manager)
if hf_template:
self._jinja_template_content_format = (
detect_jinja_template_content_format(hf_template)
)
logger.info(
f"Using default HuggingFace chat template with detected content format: {self._jinja_template_content_format}"
)
return
# Fallback to SGLang template guessing
# Guess chat template from model path
self.guess_chat_template_from_model_path(model_path)
# Set default format if no template was found
# If no pre-defined template was found, fallback to HuggingFace template
if self._chat_template_name is None:
self._jinja_template_content_format = "string"
logger.info(
"No chat template found, defaulting to 'string' content format"
)
# Try HuggingFace template first
hf_template = self._resolve_hf_chat_template(tokenizer_manager)
if hf_template:
# override the chat template
tokenizer_manager.tokenizer.chat_template = hf_template
self._jinja_template_content_format = (
detect_jinja_template_content_format(hf_template)
)
logger.info(
f"Using default HuggingFace chat template with detected content format: {self._jinja_template_content_format}"
)
return
# Default to string content format if no template was found
self._jinja_template_content_format = "string"
logger.info("No chat template found, defaulting to 'string' content format")
def _load_explicit_chat_template(
self, tokenizer_manager, chat_template_arg: str
@@ -257,13 +258,15 @@ class TemplateManager:
Returns the chat template string if found, None otherwise.
"""
tokenizer = tokenizer_manager.tokenizer
# Try to get AutoTokenizer chat template
try:
return tokenizer.get_chat_template()
if processor := tokenizer_manager.processor:
if hasattr(processor, "chat_template") and processor.chat_template:
return processor.chat_template
if tokenizer := tokenizer_manager.tokenizer:
if hasattr(tokenizer, "chat_template") and tokenizer.chat_template:
return tokenizer.chat_template
except Exception as e:
logger.debug(f"Error getting chat template via get_chat_template(): {e}")
logger.debug(f"Error getting chat template: {e}")
logger.debug("No HuggingFace chat template found")
return None

View File

@@ -225,10 +225,10 @@ class TokenizerManager:
self.tokenizer = get_tokenizer_from_processor(self.processor)
os.environ["TOKENIZERS_PARALLELISM"] = "false"
else:
self.mm_processor = None
self.mm_processor = self.processor = None
if server_args.skip_tokenizer_init:
self.tokenizer = self.processor = None
self.tokenizer = None
else:
self.tokenizer = get_tokenizer(
server_args.tokenizer_path,

View File

@@ -12,7 +12,6 @@
# limitations under the License.
# ==============================================================================
import re
from typing import Dict, List, Optional, Union
from sglang.srt.managers.multimodal_processor import (
@@ -38,14 +37,8 @@ class Gemma3nSGLangProcessor(SGLangBaseProcessor):
self.mm_tokens = MultimodalSpecialTokens(
image_token="<image_soft_token>",
image_token_id=hf_config.image_token_id,
image_token_regex=re.compile(
r"<start_of_image>(?:(?:<image_soft_token>)*<end_of_image>)?"
),
audio_token="<audio_soft_token>",
audio_token_id=hf_config.audio_token_id,
audio_token_regex=re.compile(
r"<start_of_audio>(?:(?:<audio_soft_token>)*<end_of_audio>)?"
),
).build(_processor)
async def process_mm_data_async(