Unify the model type checking (#1905)
This commit is contained in:
@@ -116,6 +116,23 @@ register_chat_template(
|
||||
)
|
||||
)
|
||||
|
||||
|
||||
register_chat_template(
|
||||
ChatTemplate(
|
||||
name="chatml-llava",
|
||||
default_system_prompt="You are a helpful assistant.",
|
||||
role_prefix_and_suffix={
|
||||
"system": ("<|im_start|>system\n", "<|im_end|>\n"),
|
||||
"user": ("<|im_start|>user\n", "<|im_end|>\n"),
|
||||
"assistant": ("<|im_start|>assistant\n", "<|im_end|>\n"),
|
||||
},
|
||||
style=ChatTemplateStyle.PLAIN,
|
||||
stop_str=("<|im_end|>",),
|
||||
image_token="<image>\n",
|
||||
)
|
||||
)
|
||||
|
||||
|
||||
# There is default system prompt for qwen
|
||||
# reference: https://modelscope.cn/models/qwen/Qwen2-72B-Instruct/file/view/master?fileName=tokenizer_config.json&status=1
|
||||
# The chat template is: "{% for message in messages %}{% if loop.first and messages[0]['role'] != 'system' %}{{ '<|im_start|>system\nYou are a helpful assistant.<|im_end|>\n' }}{% endif %}{{'<|im_start|>' + message['role'] + '\n' + message['content'] + '<|im_end|>' + '\n'}}{% endfor %}{% if add_generation_prompt %}{{ '<|im_start|>assistant\n' }}{% endif %}"
|
||||
@@ -149,22 +166,6 @@ register_chat_template(
|
||||
)
|
||||
)
|
||||
|
||||
|
||||
register_chat_template(
|
||||
ChatTemplate(
|
||||
name="chatml-llava",
|
||||
default_system_prompt="You are a helpful assistant.",
|
||||
role_prefix_and_suffix={
|
||||
"system": ("<|im_start|>system\n", "<|im_end|>\n"),
|
||||
"user": ("<|im_start|>user\n", "<|im_end|>\n"),
|
||||
"assistant": ("<|im_start|>assistant\n", "<|im_end|>\n"),
|
||||
},
|
||||
style=ChatTemplateStyle.PLAIN,
|
||||
stop_str=("<|im_end|>",),
|
||||
image_token="<image>\n",
|
||||
)
|
||||
)
|
||||
|
||||
# Reference: https://github.com/lm-sys/FastChat/blob/main/docs/vicuna_weights_version.md#prompt-template
|
||||
register_chat_template(
|
||||
ChatTemplate(
|
||||
@@ -182,21 +183,6 @@ register_chat_template(
|
||||
)
|
||||
)
|
||||
|
||||
# Reference: https://modelscope.cn/models/01ai/Yi-1.5-34B-Chat/file/view/master?fileName=tokenizer_config.json&status=1
|
||||
register_chat_template(
|
||||
ChatTemplate(
|
||||
name="yi-1.5",
|
||||
default_system_prompt=None,
|
||||
role_prefix_and_suffix={
|
||||
"system": ("", ""),
|
||||
"user": ("<|im_start|>user\n", "<|im_end|>\n<|im_start|>assistant\n"),
|
||||
"assistant": ("", "<|im_end|>\n"),
|
||||
},
|
||||
style=ChatTemplateStyle.PLAIN,
|
||||
stop_str=("<|im_end|>",),
|
||||
)
|
||||
)
|
||||
|
||||
register_chat_template(
|
||||
ChatTemplate(
|
||||
name="llama-2-chat",
|
||||
@@ -233,6 +219,45 @@ register_chat_template(
|
||||
)
|
||||
)
|
||||
|
||||
# The difference between "llama-3-instruct-llava" and "llama-3-instruct" is that llava uses a different image_token.
|
||||
register_chat_template(
|
||||
ChatTemplate(
|
||||
name="llama-3-instruct-llava",
|
||||
default_system_prompt=None,
|
||||
role_prefix_and_suffix={
|
||||
"system": (
|
||||
"<|start_header_id|>system<|end_header_id|>\n\n",
|
||||
"<|eot_id|>",
|
||||
),
|
||||
"user": (
|
||||
"<|start_header_id|>user<|end_header_id|>\n\n",
|
||||
"<|eot_id|>",
|
||||
),
|
||||
"assistant": (
|
||||
"<|start_header_id|>assistant<|end_header_id|>\n\n",
|
||||
"<|eot_id|>",
|
||||
),
|
||||
},
|
||||
stop_str=("<|eot_id|>",),
|
||||
image_token="<image>\n",
|
||||
)
|
||||
)
|
||||
|
||||
# Reference: https://modelscope.cn/models/01ai/Yi-1.5-34B-Chat/file/view/master?fileName=tokenizer_config.json&status=1
|
||||
register_chat_template(
|
||||
ChatTemplate(
|
||||
name="yi-1.5",
|
||||
default_system_prompt=None,
|
||||
role_prefix_and_suffix={
|
||||
"system": ("", ""),
|
||||
"user": ("<|im_start|>user\n", "<|im_end|>\n<|im_start|>assistant\n"),
|
||||
"assistant": ("", "<|im_end|>\n"),
|
||||
},
|
||||
style=ChatTemplateStyle.PLAIN,
|
||||
stop_str=("<|im_end|>",),
|
||||
)
|
||||
)
|
||||
|
||||
# Reference: https://github.com/01-ai/Yi/tree/main/VL#major-difference-with-llava
|
||||
register_chat_template(
|
||||
ChatTemplate(
|
||||
|
||||
Reference in New Issue
Block a user