Support LoRA in TestOpenAIVisionServer and fix fused kv_proj loading bug. (#6861)

This commit is contained in:
Lifu Huang
2025-06-04 22:08:30 -07:00
committed by GitHub
parent 499f5e620c
commit 4474eaf552
4 changed files with 66 additions and 16 deletions

View File

@@ -177,9 +177,19 @@ class TestKimiVLServer(TestOpenAIVisionServer):
class TestPhi4MMServer(TestOpenAIVisionServer):
@classmethod
def setUpClass(cls):
# Manually download LoRA adapter_config.json as it's not downloaded by the model loader by default.
from huggingface_hub import constants, snapshot_download
snapshot_download(
"microsoft/Phi-4-multimodal-instruct",
allow_patterns=["**/adapter_config.json"],
)
cls.model = "microsoft/Phi-4-multimodal-instruct"
cls.base_url = DEFAULT_URL_FOR_TEST
cls.api_key = "sk-123456"
revision = "33e62acdd07cd7d6635badd529aa0a3467bb9c6a"
cls.process = popen_launch_server(
cls.model,
cls.base_url,
@@ -188,15 +198,27 @@ class TestPhi4MMServer(TestOpenAIVisionServer):
"--trust-remote-code",
"--mem-fraction-static",
"0.75",
"--disable-radix-cache",
"--max-loras-per-batch",
"1",
"--revision",
revision,
"--lora-paths",
f"vision={constants.HF_HUB_CACHE}/models--microsoft--Phi-4-multimodal-instruct/snapshots/{revision}/vision-lora",
],
)
cls.base_url += "/v1"
def test_video_chat_completion(self):
pass
def get_request_kwargs(self):
return {
"extra_body": {
"lora_path": "vision",
"top_k": 1,
"top_p": 1.0,
}
}
def test_multi_images_chat_completion(self):
# TODO (lifuhuang): support LoRA to enable Phi4MM multi-image understanding capability.
def test_video_chat_completion(self):
pass

View File

@@ -1,4 +1,5 @@
import base64
import copy
import io
import json
import os
@@ -47,6 +48,9 @@ class TestOpenAIVisionServer(CustomTestCase):
def tearDownClass(cls):
kill_process_tree(cls.process.pid)
def get_request_kwargs(self):
return {}
def test_single_image_chat_completion(self):
client = openai.Client(api_key=self.api_key, base_url=self.base_url)
@@ -68,6 +72,7 @@ class TestOpenAIVisionServer(CustomTestCase):
},
],
temperature=0,
**(self.get_request_kwargs()),
)
assert response.choices[0].message.role == "assistant"
@@ -130,6 +135,7 @@ class TestOpenAIVisionServer(CustomTestCase):
},
],
temperature=0,
**(self.get_request_kwargs()),
)
assert response.choices[0].message.role == "assistant"
@@ -172,6 +178,7 @@ class TestOpenAIVisionServer(CustomTestCase):
},
],
temperature=0,
**(self.get_request_kwargs()),
)
assert response.choices[0].message.role == "assistant"
@@ -284,6 +291,7 @@ class TestOpenAIVisionServer(CustomTestCase):
temperature=0,
max_tokens=1024,
stream=False,
**(self.get_request_kwargs()),
)
video_response = response.choices[0].message.content
@@ -324,6 +332,9 @@ class TestOpenAIVisionServer(CustomTestCase):
+ r"""\}"""
)
extra_kwargs = self.get_request_kwargs()
extra_kwargs.setdefault("extra_body", {})["regex"] = regex
response = client.chat.completions.create(
model="default",
messages=[
@@ -342,7 +353,7 @@ class TestOpenAIVisionServer(CustomTestCase):
},
],
temperature=0,
extra_body={"regex": regex},
**extra_kwargs,
)
text = response.choices[0].message.content
@@ -388,6 +399,7 @@ class TestOpenAIVisionServer(CustomTestCase):
{"role": "user", "content": content},
],
temperature=0,
**(self.get_request_kwargs()),
)
assert response.choices[0].message.role == "assistant"
@@ -430,6 +442,7 @@ class TestOpenAIVisionServer(CustomTestCase):
temperature=0,
max_tokens=128,
stream=False,
**(self.get_request_kwargs()),
)
audio_response = response.choices[0].message.content