# # Copyright (c) 2025 Huawei Technologies Co., Ltd. All Rights Reserved. # Copyright 2023 The vLLM team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # This file is a part of the vllm-ascend project. # Adapted from vllm-project/vllm/blob/main/tests/models/utils.py # from typing import Dict, List, Optional, Sequence, Tuple, Union from vllm.logprobs import PromptLogprobs, SampleLogprobs TokensText = Tuple[List[int], str] def check_outputs_equal( *, outputs_0_lst: Sequence[TokensText], outputs_1_lst: Sequence[TokensText], name_0: str, name_1: str, ): """ Compare the two sequences generated by different models, which should be equal. """ assert len(outputs_0_lst) == len(outputs_1_lst) for prompt_idx, (outputs_0, outputs_1) in enumerate(zip(outputs_0_lst, outputs_1_lst)): output_ids_0, output_str_0 = outputs_0 output_ids_1, output_str_1 = outputs_1 # The text and token outputs should exactly match fail_msg = (f"Test{prompt_idx}:" f"\n{name_0}:\t{output_str_0!r}" f"\n{name_1}:\t{output_str_1!r}") assert output_str_0 == output_str_1, fail_msg assert output_ids_0 == output_ids_1, fail_msg # Representation of generated sequence as a tuple of # * Token ID list # * String # * List of top sample logprobs for each sampled token # # Assumes prompt logprobs were not requested. TokensTextLogprobs = Tuple[List[int], str, Optional[Union[List[Dict[int, float]], SampleLogprobs]]] # Representation of generated sequence as a tuple of # * Token ID list # * String # * Optional list of top sample logprobs for each sampled token # * Optional list of top prompt logprobs for each prompt token # # Allows prompt logprobs to be requested. TokensTextLogprobsPromptLogprobs = Tuple[ List[int], str, Optional[Union[List[Dict[int, float]], SampleLogprobs]], Optional[Union[List[Optional[Dict[int, float]]], PromptLogprobs]]]