ci: Revert openai_server related tests in AMD suites (#7449)
This commit is contained in:
@@ -163,48 +163,49 @@ class TestCacheReport(CustomTestCase):
|
||||
>= usage_2.prompt_tokens - self.min_cached
|
||||
)
|
||||
|
||||
def test_cache_report_openai_async(self):
|
||||
print("=" * 100)
|
||||
# TODO: flaky test
|
||||
# def test_cache_report_openai_async(self):
|
||||
# print("=" * 100)
|
||||
|
||||
async def run_test():
|
||||
task0 = asyncio.create_task(
|
||||
self.cache_report_openai_async(
|
||||
"first request, to start the inference and let the next two request be started in the same batch"
|
||||
)
|
||||
)
|
||||
await asyncio.sleep(0.05) # to force the first request to be started first
|
||||
task1 = asyncio.create_task(
|
||||
self.cache_report_openai_async(
|
||||
"> can the same batch parallel request use the cache?"
|
||||
)
|
||||
)
|
||||
task2 = asyncio.create_task(
|
||||
self.cache_report_openai_async(
|
||||
"> can the same batch parallel request use the cache?"
|
||||
)
|
||||
)
|
||||
result0, result1, result2 = await asyncio.gather(task0, task1, task2)
|
||||
# async def run_test():
|
||||
# task0 = asyncio.create_task(
|
||||
# self.cache_report_openai_async(
|
||||
# "first request, to start the inference and let the next two request be started in the same batch"
|
||||
# )
|
||||
# )
|
||||
# await asyncio.sleep(1) # to force the first request to be started first
|
||||
# task1 = asyncio.create_task(
|
||||
# self.cache_report_openai_async(
|
||||
# "> can the same batch parallel request use the cache?"
|
||||
# )
|
||||
# )
|
||||
# task2 = asyncio.create_task(
|
||||
# self.cache_report_openai_async(
|
||||
# "> can the same batch parallel request use the cache?"
|
||||
# )
|
||||
# )
|
||||
# result0, result1, result2 = await asyncio.gather(task0, task1, task2)
|
||||
|
||||
cached_tokens0, prompt_tokens0 = result0
|
||||
cached_tokens1, prompt_tokens1 = result1
|
||||
cached_tokens2, prompt_tokens2 = result2
|
||||
# cached_tokens0, prompt_tokens0 = result0
|
||||
# cached_tokens1, prompt_tokens1 = result1
|
||||
# cached_tokens2, prompt_tokens2 = result2
|
||||
|
||||
print(
|
||||
f"Async request 0 - Cached tokens: {cached_tokens0}, Prompt tokens: {prompt_tokens0}"
|
||||
)
|
||||
print(
|
||||
f"Async request 1 - Cached tokens: {cached_tokens1}, Prompt tokens: {prompt_tokens1}"
|
||||
)
|
||||
print(
|
||||
f"Async request 2 - Cached tokens: {cached_tokens2}, Prompt tokens: {prompt_tokens2}"
|
||||
)
|
||||
# print(
|
||||
# f"Async request 0 - Cached tokens: {cached_tokens0}, Prompt tokens: {prompt_tokens0}"
|
||||
# )
|
||||
# print(
|
||||
# f"Async request 1 - Cached tokens: {cached_tokens1}, Prompt tokens: {prompt_tokens1}"
|
||||
# )
|
||||
# print(
|
||||
# f"Async request 2 - Cached tokens: {cached_tokens2}, Prompt tokens: {prompt_tokens2}"
|
||||
# )
|
||||
|
||||
# Assert that no requests used the cache (becausefirst is alone, and the next two are in the same batch)
|
||||
# If a new optimisation limiting starting request with same prefix at the same time was added
|
||||
# to maximise the cache hit, this would not be true
|
||||
assert cached_tokens1 == cached_tokens2 == cached_tokens0
|
||||
# # Assert that no requests used the cache (because first is alone, and the next two are in the same batch)
|
||||
# # If a new optimisation limiting starting request with same prefix at the same time was added
|
||||
# # to maximise the cache hit, this would not be true
|
||||
# assert cached_tokens1 == cached_tokens2 == cached_tokens0
|
||||
|
||||
asyncio.run(run_test())
|
||||
# asyncio.run(run_test())
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
|
||||
Reference in New Issue
Block a user