Sync from v0.13
This commit is contained in:
0
tests/models/language/pooling_mteb_test/__init__.py
Normal file
0
tests/models/language/pooling_mteb_test/__init__.py
Normal file
415
tests/models/language/pooling_mteb_test/mteb_utils.py
Normal file
415
tests/models/language/pooling_mteb_test/mteb_utils.py
Normal file
@@ -0,0 +1,415 @@
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
|
||||
|
||||
import tempfile
|
||||
|
||||
import mteb
|
||||
import numpy as np
|
||||
import requests
|
||||
import torch
|
||||
from mteb.models import ModelMeta
|
||||
from mteb.types import Array
|
||||
from torch.utils.data import DataLoader
|
||||
|
||||
import tests.ci_envs as ci_envs
|
||||
from tests.models.utils import (
|
||||
EmbedModelInfo,
|
||||
RerankModelInfo,
|
||||
check_embeddings_close,
|
||||
get_vllm_extra_kwargs,
|
||||
)
|
||||
|
||||
# Most embedding models on the STS12 task (See #17175):
|
||||
# - Model implementation and minor changes in tensor dtype
|
||||
# results in differences less than 1e-4
|
||||
# - Different model results in differences more than 1e-3
|
||||
# 1e-4 is a good tolerance threshold
|
||||
MTEB_EMBED_TASKS = ["STS12"]
|
||||
MTEB_EMBED_TOL = 1e-4
|
||||
|
||||
# See #19344
|
||||
MTEB_RERANK_TASKS = ["NFCorpus"]
|
||||
MTEB_RERANK_LANGS = ["eng"]
|
||||
MTEB_RERANK_TOL = 2e-3
|
||||
|
||||
_empty_model_meta = ModelMeta(
|
||||
loader=None,
|
||||
name="vllm/model",
|
||||
revision="1",
|
||||
release_date=None,
|
||||
languages=None,
|
||||
framework=[],
|
||||
similarity_fn_name=None,
|
||||
n_parameters=None,
|
||||
memory_usage_mb=None,
|
||||
max_tokens=None,
|
||||
embed_dim=None,
|
||||
license=None,
|
||||
open_weights=None,
|
||||
public_training_code=None,
|
||||
public_training_data=None,
|
||||
use_instructions=None,
|
||||
training_datasets=None,
|
||||
modalities=["text"], # 'image' can be added to evaluate multimodal models
|
||||
)
|
||||
|
||||
|
||||
class VllmMtebEncoder(mteb.EncoderProtocol):
|
||||
mteb_model_meta = _empty_model_meta
|
||||
|
||||
def __init__(self, vllm_model):
|
||||
self.llm = vllm_model
|
||||
self.rng = np.random.default_rng(seed=42)
|
||||
|
||||
def encode(
|
||||
self,
|
||||
inputs: DataLoader[mteb.types.BatchedInput],
|
||||
*args,
|
||||
**kwargs,
|
||||
) -> np.ndarray:
|
||||
# Hoping to discover potential scheduling
|
||||
# issues by randomizing the order.
|
||||
sentences = [text for batch in inputs for text in batch["text"]]
|
||||
r = self.rng.permutation(len(sentences))
|
||||
sentences = [sentences[i] for i in r]
|
||||
outputs = self.llm.embed(sentences, use_tqdm=False)
|
||||
embeds = np.array(outputs)
|
||||
embeds = embeds[np.argsort(r)]
|
||||
return embeds
|
||||
|
||||
def similarity(
|
||||
self,
|
||||
embeddings1: np.ndarray,
|
||||
embeddings2: np.ndarray,
|
||||
) -> np.ndarray:
|
||||
# Cosine similarity
|
||||
norm1 = np.linalg.norm(embeddings1, axis=1, keepdims=True)
|
||||
norm2 = np.linalg.norm(embeddings2, axis=1, keepdims=True)
|
||||
sim = np.dot(embeddings1, embeddings2.T) / (norm1 * norm2.T)
|
||||
return sim
|
||||
|
||||
def similarity_pairwise(
|
||||
self,
|
||||
embeddings1: Array,
|
||||
embeddings2: Array,
|
||||
) -> Array:
|
||||
# Cosine similarity
|
||||
norm1 = np.linalg.norm(embeddings1, axis=1, keepdims=True)
|
||||
norm2 = np.linalg.norm(embeddings2, axis=1, keepdims=True)
|
||||
sim = np.sum(embeddings1 * embeddings2, axis=1) / (
|
||||
norm1.flatten() * norm2.flatten()
|
||||
)
|
||||
return sim
|
||||
|
||||
|
||||
class VllmMtebCrossEncoder(mteb.CrossEncoderProtocol):
|
||||
mteb_model_meta = _empty_model_meta
|
||||
|
||||
def __init__(self, vllm_model):
|
||||
self.llm = vllm_model
|
||||
self.rng = np.random.default_rng(seed=42)
|
||||
|
||||
def predict(
|
||||
self,
|
||||
inputs1: DataLoader[mteb.types.BatchedInput],
|
||||
inputs2: DataLoader[mteb.types.BatchedInput],
|
||||
*args,
|
||||
**kwargs,
|
||||
) -> np.ndarray:
|
||||
queries = [text for batch in inputs1 for text in batch["text"]]
|
||||
corpus = [text for batch in inputs2 for text in batch["text"]]
|
||||
|
||||
outputs = self.llm.score(
|
||||
queries, corpus, truncate_prompt_tokens=-1, use_tqdm=False
|
||||
)
|
||||
scores = np.array(outputs)
|
||||
return scores
|
||||
|
||||
|
||||
class OpenAIClientMtebEncoder(VllmMtebEncoder):
|
||||
def __init__(self, model_name: str, client):
|
||||
self.model_name = model_name
|
||||
self.client = client
|
||||
self.rng = np.random.default_rng(seed=42)
|
||||
|
||||
def encode(
|
||||
self,
|
||||
inputs: DataLoader[mteb.types.BatchedInput],
|
||||
*args,
|
||||
**kwargs,
|
||||
) -> np.ndarray:
|
||||
# Hoping to discover potential scheduling
|
||||
# issues by randomizing the order.
|
||||
sentences = [text for batch in inputs for text in batch["text"]]
|
||||
r = self.rng.permutation(len(sentences))
|
||||
sentences = [sentences[i] for i in r]
|
||||
|
||||
embeddings = self.client.embeddings.create(
|
||||
model=self.model_name, input=sentences
|
||||
)
|
||||
outputs = [d.embedding for d in embeddings.data]
|
||||
embeds = np.array(outputs)
|
||||
embeds = embeds[np.argsort(r)]
|
||||
return embeds
|
||||
|
||||
|
||||
class ScoreClientMtebEncoder(mteb.CrossEncoderProtocol):
|
||||
mteb_model_meta = _empty_model_meta
|
||||
|
||||
def __init__(self, model_name: str, url):
|
||||
self.model_name = model_name
|
||||
self.url = url
|
||||
self.rng = np.random.default_rng(seed=42)
|
||||
|
||||
def predict(
|
||||
self,
|
||||
inputs1: DataLoader[mteb.types.BatchedInput],
|
||||
inputs2: DataLoader[mteb.types.BatchedInput],
|
||||
*args,
|
||||
**kwargs,
|
||||
) -> np.ndarray:
|
||||
queries = [text for batch in inputs1 for text in batch["text"]]
|
||||
full_corpus = [text for batch in inputs2 for text in batch["text"]]
|
||||
|
||||
outputs = []
|
||||
for query, corpus in zip(queries, full_corpus):
|
||||
outputs.append(self.get_score(query, corpus))
|
||||
|
||||
scores = np.array(outputs)
|
||||
return scores
|
||||
|
||||
def get_score(self, query, corpus):
|
||||
response = requests.post(
|
||||
self.url,
|
||||
json={
|
||||
"model": self.model_name,
|
||||
"text_1": query,
|
||||
"text_2": corpus,
|
||||
"truncate_prompt_tokens": -1,
|
||||
},
|
||||
).json()
|
||||
return response["data"][0]["score"]
|
||||
|
||||
|
||||
class RerankClientMtebEncoder(ScoreClientMtebEncoder):
|
||||
def get_score(self, query, corpus):
|
||||
response = requests.post(
|
||||
self.url,
|
||||
json={
|
||||
"model": self.model_name,
|
||||
"query": query,
|
||||
"documents": [corpus],
|
||||
"truncate_prompt_tokens": -1,
|
||||
},
|
||||
).json()
|
||||
return response["results"][0]["relevance_score"]
|
||||
|
||||
|
||||
def run_mteb_embed_task(encoder: mteb.EncoderProtocol, tasks):
|
||||
tasks = mteb.get_tasks(tasks=tasks)
|
||||
results = mteb.evaluate(
|
||||
encoder,
|
||||
tasks,
|
||||
cache=None,
|
||||
show_progress_bar=False,
|
||||
)
|
||||
|
||||
main_score = results[0].scores["test"][0]["main_score"]
|
||||
return main_score
|
||||
|
||||
|
||||
def mteb_test_embed_models(
|
||||
hf_runner,
|
||||
vllm_runner,
|
||||
model_info: EmbedModelInfo,
|
||||
vllm_extra_kwargs=None,
|
||||
hf_model_callback=None,
|
||||
atol=MTEB_EMBED_TOL,
|
||||
):
|
||||
vllm_extra_kwargs = get_vllm_extra_kwargs(model_info, vllm_extra_kwargs)
|
||||
|
||||
# Test embed_dims, isnan and whether to use normalize
|
||||
example_prompts = ["The chef prepared a delicious meal." * 1000]
|
||||
|
||||
with vllm_runner(
|
||||
model_info.name,
|
||||
runner="pooling",
|
||||
max_model_len=model_info.max_model_len,
|
||||
**vllm_extra_kwargs,
|
||||
) as vllm_model:
|
||||
model_config = vllm_model.llm.llm_engine.model_config
|
||||
|
||||
# Confirm whether vllm is using the correct architecture
|
||||
if model_info.architecture:
|
||||
assert model_info.architecture in model_config.architectures
|
||||
|
||||
# Confirm whether vllm uses the correct default_pooling_type, which
|
||||
# relates to whether chunked prefill and prefix caching are enabled
|
||||
assert (
|
||||
model_config._model_info.default_pooling_type
|
||||
== model_info.default_pooling_type
|
||||
)
|
||||
|
||||
vllm_main_score = run_mteb_embed_task(
|
||||
VllmMtebEncoder(vllm_model), MTEB_EMBED_TASKS
|
||||
)
|
||||
vllm_dtype = vllm_model.llm.llm_engine.model_config.dtype
|
||||
head_dtype = model_config.head_dtype
|
||||
|
||||
# Test embedding_size, isnan and whether to use normalize
|
||||
vllm_outputs = vllm_model.embed(example_prompts, truncate_prompt_tokens=-1)
|
||||
outputs_tensor = torch.tensor(vllm_outputs)
|
||||
assert not torch.any(torch.isnan(outputs_tensor))
|
||||
embedding_size = model_config.embedding_size
|
||||
assert torch.tensor(vllm_outputs).shape[-1] == embedding_size
|
||||
|
||||
# Accelerate mteb test by setting
|
||||
# SentenceTransformers mteb score to a constant
|
||||
if model_info.mteb_score is None:
|
||||
with hf_runner(
|
||||
model_info.name,
|
||||
is_sentence_transformer=True,
|
||||
dtype=ci_envs.VLLM_CI_HF_DTYPE or model_info.hf_dtype,
|
||||
) as hf_model:
|
||||
# e.g. setting default parameters for the encode method of hf_runner
|
||||
if hf_model_callback is not None:
|
||||
hf_model_callback(hf_model)
|
||||
|
||||
st_main_score = run_mteb_embed_task(hf_model, MTEB_EMBED_TASKS)
|
||||
st_dtype = next(hf_model.model.parameters()).dtype
|
||||
|
||||
# Check embeddings close to hf outputs
|
||||
hf_outputs = hf_model.encode(example_prompts)
|
||||
check_embeddings_close(
|
||||
embeddings_0_lst=hf_outputs,
|
||||
embeddings_1_lst=vllm_outputs,
|
||||
name_0="hf",
|
||||
name_1="vllm",
|
||||
tol=1e-2,
|
||||
)
|
||||
else:
|
||||
st_main_score = model_info.mteb_score
|
||||
st_dtype = "Constant"
|
||||
|
||||
print("Model:", model_info.name)
|
||||
print("VLLM:", f"dtype:{vllm_dtype}", f"head_dtype:{head_dtype}", vllm_main_score)
|
||||
print("SentenceTransformers:", st_dtype, st_main_score)
|
||||
print("Difference:", st_main_score - vllm_main_score)
|
||||
|
||||
# We are not concerned that the vllm mteb results are better
|
||||
# than SentenceTransformers, so we only perform one-sided testing.
|
||||
assert st_main_score - vllm_main_score < atol
|
||||
|
||||
|
||||
def run_mteb_rerank(cross_encoder: mteb.CrossEncoderProtocol, tasks, languages):
|
||||
with tempfile.TemporaryDirectory() as prediction_folder:
|
||||
bm25s = mteb.get_model("bm25s")
|
||||
eval_splits = ["test"]
|
||||
|
||||
mteb_tasks: list[mteb.abstasks.AbsTaskRetrieval] = mteb.get_tasks(
|
||||
tasks=tasks, languages=languages, eval_splits=eval_splits
|
||||
)
|
||||
|
||||
mteb.evaluate(
|
||||
bm25s,
|
||||
mteb_tasks,
|
||||
prediction_folder=prediction_folder,
|
||||
show_progress_bar=False,
|
||||
# don't save results for test runs
|
||||
cache=None,
|
||||
overwrite_strategy="always",
|
||||
)
|
||||
|
||||
second_stage_tasks = []
|
||||
for task in mteb_tasks:
|
||||
second_stage_tasks.append(
|
||||
task.convert_to_reranking(
|
||||
prediction_folder,
|
||||
top_k=10,
|
||||
)
|
||||
)
|
||||
|
||||
results = mteb.evaluate(
|
||||
cross_encoder,
|
||||
second_stage_tasks,
|
||||
show_progress_bar=False,
|
||||
cache=None,
|
||||
)
|
||||
main_score = results[0].scores["test"][0]["main_score"]
|
||||
return main_score
|
||||
|
||||
|
||||
def mteb_test_rerank_models_hf(
|
||||
hf_runner, model_name, hf_dtype="float32", hf_model_callback=None
|
||||
):
|
||||
with hf_runner(model_name, is_cross_encoder=True, dtype=hf_dtype) as hf_model:
|
||||
if hf_model_callback is not None:
|
||||
hf_model_callback(hf_model)
|
||||
|
||||
st_main_score = run_mteb_rerank(
|
||||
hf_model, tasks=MTEB_RERANK_TASKS, languages=MTEB_RERANK_LANGS
|
||||
)
|
||||
st_dtype = next(hf_model.model.model.parameters()).dtype
|
||||
return st_main_score, st_dtype
|
||||
|
||||
|
||||
def mteb_test_rerank_models(
|
||||
hf_runner,
|
||||
vllm_runner,
|
||||
model_info: RerankModelInfo,
|
||||
vllm_extra_kwargs=None,
|
||||
hf_model_callback=None,
|
||||
vllm_mteb_encoder=VllmMtebCrossEncoder,
|
||||
atol=MTEB_RERANK_TOL,
|
||||
):
|
||||
vllm_extra_kwargs = get_vllm_extra_kwargs(model_info, vllm_extra_kwargs)
|
||||
|
||||
with vllm_runner(
|
||||
model_info.name,
|
||||
runner="pooling",
|
||||
max_model_len=None,
|
||||
max_num_seqs=8,
|
||||
**vllm_extra_kwargs,
|
||||
) as vllm_model:
|
||||
model_config = vllm_model.llm.llm_engine.model_config
|
||||
|
||||
# Confirm whether vllm is using the correct architecture
|
||||
if model_info.architecture:
|
||||
assert model_info.architecture in model_config.architectures
|
||||
|
||||
# Score API is only enabled for num_labels == 1
|
||||
assert model_config.hf_config.num_labels == 1
|
||||
|
||||
# Confirm whether vllm uses the correct default_pooling_type, which
|
||||
# relates to whether chunked prefill and prefix caching are enabled
|
||||
assert (
|
||||
model_config._model_info.default_pooling_type
|
||||
== model_info.default_pooling_type
|
||||
)
|
||||
|
||||
vllm_main_score = run_mteb_rerank(
|
||||
vllm_mteb_encoder(vllm_model),
|
||||
tasks=MTEB_RERANK_TASKS,
|
||||
languages=MTEB_RERANK_LANGS,
|
||||
)
|
||||
vllm_dtype = model_config.dtype
|
||||
head_dtype = model_config.head_dtype
|
||||
|
||||
# Accelerate mteb test by setting
|
||||
# SentenceTransformers mteb score to a constant
|
||||
if model_info.mteb_score is None:
|
||||
st_main_score, st_dtype = mteb_test_rerank_models_hf(
|
||||
hf_runner, model_info.name, model_info.hf_dtype, hf_model_callback
|
||||
)
|
||||
else:
|
||||
st_main_score = model_info.mteb_score
|
||||
st_dtype = "Constant"
|
||||
|
||||
print("Model:", model_info.name)
|
||||
print("VLLM:", f"dtype:{vllm_dtype}", f"head_dtype:{head_dtype}", vllm_main_score)
|
||||
print("SentenceTransformers:", st_dtype, st_main_score)
|
||||
print("Difference:", st_main_score - vllm_main_score)
|
||||
|
||||
# We are not concerned that the vllm mteb results are better
|
||||
# than SentenceTransformers, so we only perform one-sided testing.
|
||||
assert st_main_score - vllm_main_score < atol
|
||||
114
tests/models/language/pooling_mteb_test/test_baai.py
Normal file
114
tests/models/language/pooling_mteb_test/test_baai.py
Normal file
@@ -0,0 +1,114 @@
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
|
||||
import pytest
|
||||
|
||||
from tests.models.language.pooling.embed_utils import correctness_test_embed_models
|
||||
from tests.models.utils import (
|
||||
CLSPoolingEmbedModelInfo,
|
||||
CLSPoolingRerankModelInfo,
|
||||
EmbedModelInfo,
|
||||
LASTPoolingEmbedModelInfo,
|
||||
RerankModelInfo,
|
||||
)
|
||||
|
||||
from .mteb_utils import mteb_test_embed_models, mteb_test_rerank_models
|
||||
|
||||
MODELS = [
|
||||
########## BertModel
|
||||
CLSPoolingEmbedModelInfo(
|
||||
"BAAI/bge-base-en",
|
||||
architecture="BertModel",
|
||||
mteb_score=0.779336792,
|
||||
enable_test=True,
|
||||
),
|
||||
CLSPoolingEmbedModelInfo(
|
||||
"BAAI/bge-base-zh", architecture="BertModel", enable_test=False
|
||||
),
|
||||
CLSPoolingEmbedModelInfo(
|
||||
"BAAI/bge-small-en", architecture="BertModel", enable_test=False
|
||||
),
|
||||
CLSPoolingEmbedModelInfo(
|
||||
"BAAI/bge-small-zh", architecture="BertModel", enable_test=False
|
||||
),
|
||||
CLSPoolingEmbedModelInfo(
|
||||
"BAAI/bge-large-en", architecture="BertModel", enable_test=False
|
||||
),
|
||||
CLSPoolingEmbedModelInfo(
|
||||
"BAAI/bge-large-zh", architecture="BertModel", enable_test=False
|
||||
),
|
||||
CLSPoolingEmbedModelInfo(
|
||||
"BAAI/bge-large-zh-noinstruct", architecture="BertModel", enable_test=False
|
||||
),
|
||||
CLSPoolingEmbedModelInfo(
|
||||
"BAAI/bge-base-en-v1.5", architecture="BertModel", enable_test=False
|
||||
),
|
||||
CLSPoolingEmbedModelInfo(
|
||||
"BAAI/bge-base-zh-v1.5", architecture="BertModel", enable_test=False
|
||||
),
|
||||
CLSPoolingEmbedModelInfo(
|
||||
"BAAI/bge-small-en-v1.5", architecture="BertModel", enable_test=False
|
||||
),
|
||||
CLSPoolingEmbedModelInfo(
|
||||
"BAAI/bge-small-zh-v1.5", architecture="BertModel", enable_test=False
|
||||
),
|
||||
CLSPoolingEmbedModelInfo(
|
||||
"BAAI/bge-large-en-v1.5", architecture="BertModel", enable_test=False
|
||||
),
|
||||
CLSPoolingEmbedModelInfo(
|
||||
"BAAI/bge-large-zh-v1.5", architecture="BertModel", enable_test=False
|
||||
),
|
||||
########## XLMRobertaModel
|
||||
CLSPoolingEmbedModelInfo(
|
||||
"BAAI/bge-m3",
|
||||
architecture="XLMRobertaModel",
|
||||
mteb_score=0.787343078,
|
||||
enable_test=True,
|
||||
),
|
||||
########## Qwen2Model
|
||||
LASTPoolingEmbedModelInfo(
|
||||
"BAAI/bge-code-v1",
|
||||
architecture="Qwen2Model",
|
||||
mteb_score=0.75724465,
|
||||
dtype="float32",
|
||||
enable_test=True,
|
||||
),
|
||||
]
|
||||
|
||||
RERANK_MODELS = [
|
||||
########## XLMRobertaForSequenceClassification
|
||||
CLSPoolingRerankModelInfo(
|
||||
"BAAI/bge-reranker-base",
|
||||
architecture="XLMRobertaForSequenceClassification",
|
||||
mteb_score=0.32398,
|
||||
enable_test=True,
|
||||
),
|
||||
CLSPoolingRerankModelInfo(
|
||||
"BAAI/bge-reranker-large",
|
||||
architecture="XLMRobertaForSequenceClassification",
|
||||
enable_test=False,
|
||||
),
|
||||
CLSPoolingRerankModelInfo(
|
||||
"BAAI/bge-reranker-v2-m3",
|
||||
architecture="XLMRobertaForSequenceClassification",
|
||||
enable_test=False,
|
||||
),
|
||||
]
|
||||
|
||||
|
||||
@pytest.mark.parametrize("model_info", MODELS)
|
||||
def test_embed_models_mteb(hf_runner, vllm_runner, model_info: EmbedModelInfo) -> None:
|
||||
mteb_test_embed_models(hf_runner, vllm_runner, model_info)
|
||||
|
||||
|
||||
@pytest.mark.parametrize("model_info", MODELS)
|
||||
def test_embed_models_correctness(
|
||||
hf_runner, vllm_runner, model_info: EmbedModelInfo, example_prompts
|
||||
) -> None:
|
||||
correctness_test_embed_models(hf_runner, vllm_runner, model_info, example_prompts)
|
||||
|
||||
|
||||
@pytest.mark.parametrize("model_info", RERANK_MODELS)
|
||||
def test_rerank_models_mteb(
|
||||
hf_runner, vllm_runner, model_info: RerankModelInfo
|
||||
) -> None:
|
||||
mteb_test_rerank_models(hf_runner, vllm_runner, model_info)
|
||||
@@ -0,0 +1,145 @@
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
|
||||
from typing import Any
|
||||
|
||||
import mteb
|
||||
import numpy as np
|
||||
import pytest
|
||||
import torch
|
||||
from torch.utils.data import DataLoader
|
||||
|
||||
from tests.conftest import HfRunner
|
||||
from tests.models.language.pooling_mteb_test.mteb_utils import (
|
||||
VllmMtebCrossEncoder,
|
||||
mteb_test_rerank_models,
|
||||
)
|
||||
from tests.models.utils import LASTPoolingRerankModelInfo, RerankModelInfo
|
||||
|
||||
RERANK_MODELS = [
|
||||
LASTPoolingRerankModelInfo(
|
||||
"BAAI/bge-reranker-v2-gemma",
|
||||
architecture="GemmaForSequenceClassification",
|
||||
mteb_score=0.33757,
|
||||
hf_overrides={
|
||||
"architectures": ["GemmaForSequenceClassification"],
|
||||
"classifier_from_token": ["Yes"],
|
||||
"method": "no_post_processing",
|
||||
},
|
||||
),
|
||||
]
|
||||
|
||||
PROMPT = "Given a query A and a passage B, determine whether the passage contains an answer to the query by providing a prediction of either 'Yes' or 'No'." # noqa: E501
|
||||
|
||||
|
||||
class GemmaRerankerHfRunner(HfRunner):
|
||||
def __init__(
|
||||
self, model_name: str, dtype: str = "auto", *args: Any, **kwargs: Any
|
||||
) -> None:
|
||||
from transformers import AutoModelForCausalLM, AutoTokenizer
|
||||
|
||||
super().__init__(model_name, dtype, auto_cls=AutoModelForCausalLM)
|
||||
self.tokenizer = AutoTokenizer.from_pretrained(model_name, padding_side="left")
|
||||
self.yes_loc = self.tokenizer.convert_tokens_to_ids("Yes")
|
||||
|
||||
@torch.no_grad()
|
||||
def predict(self, prompts: list[list[str]], *args, **kwargs) -> torch.Tensor:
|
||||
def get_inputs(pairs, tokenizer, prompt=None):
|
||||
if prompt is None:
|
||||
prompt = PROMPT
|
||||
|
||||
sep = "\n"
|
||||
prompt_inputs = tokenizer(
|
||||
prompt, return_tensors=None, add_special_tokens=False
|
||||
)["input_ids"]
|
||||
sep_inputs = tokenizer(sep, return_tensors=None, add_special_tokens=False)[
|
||||
"input_ids"
|
||||
]
|
||||
inputs = []
|
||||
for query, passage in pairs:
|
||||
query_inputs = tokenizer(
|
||||
f"A: {query}",
|
||||
return_tensors=None,
|
||||
add_special_tokens=False,
|
||||
truncation=True,
|
||||
)
|
||||
passage_inputs = tokenizer(
|
||||
f"B: {passage}",
|
||||
return_tensors=None,
|
||||
add_special_tokens=False,
|
||||
truncation=True,
|
||||
)
|
||||
item = tokenizer.prepare_for_model(
|
||||
[tokenizer.bos_token_id] + query_inputs["input_ids"],
|
||||
sep_inputs + passage_inputs["input_ids"],
|
||||
truncation="only_second",
|
||||
padding=False,
|
||||
return_attention_mask=False,
|
||||
return_token_type_ids=False,
|
||||
add_special_tokens=False,
|
||||
)
|
||||
item["input_ids"] = item["input_ids"] + sep_inputs + prompt_inputs
|
||||
item["attention_mask"] = [1] * len(item["input_ids"])
|
||||
inputs.append(item)
|
||||
return tokenizer.pad(
|
||||
inputs,
|
||||
padding=True,
|
||||
return_tensors="pt",
|
||||
)
|
||||
|
||||
scores = []
|
||||
for query, doc, *_ in prompts:
|
||||
pairs = [(query, doc)]
|
||||
inputs = get_inputs(pairs, self.tokenizer)
|
||||
inputs = inputs.to(self.model.device)
|
||||
_n_tokens = inputs["input_ids"].shape[1]
|
||||
logits = self.model(**inputs, return_dict=True).logits
|
||||
_scores = (
|
||||
logits[:, -1, self.yes_loc]
|
||||
.view(
|
||||
-1,
|
||||
)
|
||||
.float()
|
||||
.sigmoid()
|
||||
)
|
||||
scores.append(_scores[0].item())
|
||||
return torch.Tensor(scores)
|
||||
|
||||
|
||||
class GemmaMtebEncoder(VllmMtebCrossEncoder):
|
||||
def __init__(self, *args, **kwargs):
|
||||
super().__init__(*args, **kwargs)
|
||||
self.query_template = "A: {query}\n"
|
||||
self.document_template = "B: {doc}\n{prompt}"
|
||||
|
||||
def predict(
|
||||
self,
|
||||
inputs1: DataLoader[mteb.types.BatchedInput],
|
||||
inputs2: DataLoader[mteb.types.BatchedInput],
|
||||
*args,
|
||||
**kwargs,
|
||||
) -> np.ndarray:
|
||||
queries = [
|
||||
self.query_template.format(query=text)
|
||||
for batch in inputs1
|
||||
for text in batch["text"]
|
||||
]
|
||||
corpus = [
|
||||
self.document_template.format(doc=text, prompt=PROMPT)
|
||||
for batch in inputs2
|
||||
for text in batch["text"]
|
||||
]
|
||||
outputs = self.llm.score(
|
||||
queries, corpus, truncate_prompt_tokens=-1, use_tqdm=False
|
||||
)
|
||||
scores = np.array(outputs)
|
||||
return scores
|
||||
|
||||
|
||||
@pytest.mark.parametrize("model_info", RERANK_MODELS)
|
||||
def test_rerank_models_mteb(vllm_runner, model_info: RerankModelInfo) -> None:
|
||||
mteb_test_rerank_models(
|
||||
GemmaRerankerHfRunner,
|
||||
vllm_runner,
|
||||
model_info,
|
||||
vllm_mteb_encoder=GemmaMtebEncoder,
|
||||
)
|
||||
@@ -0,0 +1,31 @@
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
|
||||
import pytest
|
||||
|
||||
from tests.models.utils import (
|
||||
CLSPoolingRerankModelInfo,
|
||||
LASTPoolingRerankModelInfo,
|
||||
RerankModelInfo,
|
||||
)
|
||||
|
||||
from .mteb_utils import mteb_test_rerank_models
|
||||
|
||||
RERANK_MODELS = [
|
||||
CLSPoolingRerankModelInfo(
|
||||
"cross-encoder/ms-marco-TinyBERT-L-2-v2",
|
||||
mteb_score=0.32898,
|
||||
architecture="BertForSequenceClassification",
|
||||
),
|
||||
LASTPoolingRerankModelInfo(
|
||||
"tomaarsen/Qwen3-Reranker-0.6B-seq-cls",
|
||||
mteb_score=0.25736,
|
||||
architecture="Qwen3ForSequenceClassification",
|
||||
),
|
||||
]
|
||||
|
||||
|
||||
@pytest.mark.parametrize("model_info", RERANK_MODELS)
|
||||
def test_rerank_models_mteb(
|
||||
hf_runner, vllm_runner, model_info: RerankModelInfo
|
||||
) -> None:
|
||||
mteb_test_rerank_models(hf_runner, vllm_runner, model_info)
|
||||
129
tests/models/language/pooling_mteb_test/test_gte.py
Normal file
129
tests/models/language/pooling_mteb_test/test_gte.py
Normal file
@@ -0,0 +1,129 @@
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
|
||||
|
||||
import pytest
|
||||
|
||||
from tests.models.language.pooling.embed_utils import correctness_test_embed_models
|
||||
from tests.models.utils import (
|
||||
CLSPoolingEmbedModelInfo,
|
||||
CLSPoolingRerankModelInfo,
|
||||
EmbedModelInfo,
|
||||
LASTPoolingEmbedModelInfo,
|
||||
RerankModelInfo,
|
||||
)
|
||||
|
||||
from .mteb_utils import mteb_test_embed_models, mteb_test_rerank_models
|
||||
|
||||
MODELS = [
|
||||
########## BertModel
|
||||
CLSPoolingEmbedModelInfo(
|
||||
"thenlper/gte-large",
|
||||
mteb_score=0.76807651,
|
||||
architecture="BertModel",
|
||||
enable_test=True,
|
||||
),
|
||||
CLSPoolingEmbedModelInfo(
|
||||
"thenlper/gte-base", architecture="BertModel", enable_test=False
|
||||
),
|
||||
CLSPoolingEmbedModelInfo(
|
||||
"thenlper/gte-small", architecture="BertModel", enable_test=False
|
||||
),
|
||||
CLSPoolingEmbedModelInfo(
|
||||
"thenlper/gte-large-zh", architecture="BertModel", enable_test=False
|
||||
),
|
||||
CLSPoolingEmbedModelInfo(
|
||||
"thenlper/gte-base-zh", architecture="BertModel", enable_test=False
|
||||
),
|
||||
CLSPoolingEmbedModelInfo(
|
||||
"thenlper/gte-small-zh", architecture="BertModel", enable_test=False
|
||||
),
|
||||
########### NewModel
|
||||
# These three architectures are almost the same, but not exactly the same.
|
||||
# For example,
|
||||
# - whether to use token_type_embeddings
|
||||
# - whether to use context expansion
|
||||
# So only test one (the most widely used) model
|
||||
CLSPoolingEmbedModelInfo(
|
||||
"Alibaba-NLP/gte-multilingual-base",
|
||||
architecture="GteNewModel",
|
||||
mteb_score=0.775074696,
|
||||
hf_overrides={"architectures": ["GteNewModel"]},
|
||||
enable_test=True,
|
||||
),
|
||||
CLSPoolingEmbedModelInfo(
|
||||
"Alibaba-NLP/gte-base-en-v1.5",
|
||||
architecture="GteNewModel",
|
||||
hf_overrides={"architectures": ["GteNewModel"]},
|
||||
enable_test=False,
|
||||
),
|
||||
CLSPoolingEmbedModelInfo(
|
||||
"Alibaba-NLP/gte-large-en-v1.5",
|
||||
architecture="GteNewModel",
|
||||
hf_overrides={"architectures": ["GteNewModel"]},
|
||||
enable_test=False,
|
||||
),
|
||||
########### Qwen2ForCausalLM
|
||||
LASTPoolingEmbedModelInfo(
|
||||
"Alibaba-NLP/gte-Qwen2-1.5B-instruct",
|
||||
mteb_score=0.758473459018872,
|
||||
architecture="Qwen2ForCausalLM",
|
||||
enable_test=True,
|
||||
),
|
||||
########## ModernBertModel
|
||||
CLSPoolingEmbedModelInfo(
|
||||
"Alibaba-NLP/gte-modernbert-base",
|
||||
mteb_score=0.748193353,
|
||||
architecture="ModernBertModel",
|
||||
enable_test=True,
|
||||
),
|
||||
########## Qwen3ForCausalLM
|
||||
LASTPoolingEmbedModelInfo(
|
||||
"Qwen/Qwen3-Embedding-0.6B",
|
||||
mteb_score=0.771163695,
|
||||
architecture="Qwen3ForCausalLM",
|
||||
dtype="float32",
|
||||
enable_test=True,
|
||||
),
|
||||
LASTPoolingEmbedModelInfo(
|
||||
"Qwen/Qwen3-Embedding-4B",
|
||||
architecture="Qwen3ForCausalLM",
|
||||
dtype="float32",
|
||||
enable_test=False,
|
||||
),
|
||||
]
|
||||
|
||||
RERANK_MODELS = [
|
||||
CLSPoolingRerankModelInfo(
|
||||
# classifier_pooling: mean
|
||||
"Alibaba-NLP/gte-reranker-modernbert-base",
|
||||
mteb_score=0.33386,
|
||||
architecture="ModernBertForSequenceClassification",
|
||||
enable_test=True,
|
||||
),
|
||||
CLSPoolingRerankModelInfo(
|
||||
"Alibaba-NLP/gte-multilingual-reranker-base",
|
||||
mteb_score=0.33062,
|
||||
architecture="GteNewForSequenceClassification",
|
||||
hf_overrides={"architectures": ["GteNewForSequenceClassification"]},
|
||||
enable_test=True,
|
||||
),
|
||||
]
|
||||
|
||||
|
||||
@pytest.mark.parametrize("model_info", MODELS)
|
||||
def test_embed_models_mteb(hf_runner, vllm_runner, model_info: EmbedModelInfo) -> None:
|
||||
mteb_test_embed_models(hf_runner, vllm_runner, model_info)
|
||||
|
||||
|
||||
@pytest.mark.parametrize("model_info", MODELS)
|
||||
def test_embed_models_correctness(
|
||||
hf_runner, vllm_runner, model_info: EmbedModelInfo, example_prompts
|
||||
) -> None:
|
||||
correctness_test_embed_models(hf_runner, vllm_runner, model_info, example_prompts)
|
||||
|
||||
|
||||
@pytest.mark.parametrize("model_info", RERANK_MODELS)
|
||||
def test_rerank_models_mteb(
|
||||
hf_runner, vllm_runner, model_info: RerankModelInfo
|
||||
) -> None:
|
||||
mteb_test_rerank_models(hf_runner, vllm_runner, model_info)
|
||||
56
tests/models/language/pooling_mteb_test/test_intfloat.py
Normal file
56
tests/models/language/pooling_mteb_test/test_intfloat.py
Normal file
@@ -0,0 +1,56 @@
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
|
||||
import pytest
|
||||
|
||||
from tests.models.language.pooling.embed_utils import correctness_test_embed_models
|
||||
from tests.models.utils import CLSPoolingEmbedModelInfo, EmbedModelInfo
|
||||
|
||||
from .mteb_utils import mteb_test_embed_models
|
||||
|
||||
MODELS = [
|
||||
########## BertModel
|
||||
CLSPoolingEmbedModelInfo(
|
||||
"intfloat/e5-small",
|
||||
architecture="BertModel",
|
||||
mteb_score=0.742285423,
|
||||
enable_test=True,
|
||||
),
|
||||
CLSPoolingEmbedModelInfo(
|
||||
"intfloat/e5-base", architecture="BertModel", enable_test=False
|
||||
),
|
||||
CLSPoolingEmbedModelInfo(
|
||||
"intfloat/e5-large", architecture="BertModel", enable_test=False
|
||||
),
|
||||
CLSPoolingEmbedModelInfo(
|
||||
"intfloat/multilingual-e5-small", architecture="BertModel", enable_test=False
|
||||
),
|
||||
########## XLMRobertaModel
|
||||
CLSPoolingEmbedModelInfo(
|
||||
"intfloat/multilingual-e5-base",
|
||||
architecture="XLMRobertaModel",
|
||||
mteb_score=0.779325955,
|
||||
enable_test=True,
|
||||
),
|
||||
CLSPoolingEmbedModelInfo(
|
||||
"intfloat/multilingual-e5-large",
|
||||
architecture="XLMRobertaModel",
|
||||
enable_test=False,
|
||||
),
|
||||
CLSPoolingEmbedModelInfo(
|
||||
"intfloat/multilingual-e5-large-instruct",
|
||||
architecture="XLMRobertaModel",
|
||||
enable_test=False,
|
||||
),
|
||||
]
|
||||
|
||||
|
||||
@pytest.mark.parametrize("model_info", MODELS)
|
||||
def test_embed_models_mteb(hf_runner, vllm_runner, model_info: EmbedModelInfo) -> None:
|
||||
mteb_test_embed_models(hf_runner, vllm_runner, model_info)
|
||||
|
||||
|
||||
@pytest.mark.parametrize("model_info", MODELS)
|
||||
def test_embed_models_correctness(
|
||||
hf_runner, vllm_runner, model_info: EmbedModelInfo, example_prompts
|
||||
) -> None:
|
||||
correctness_test_embed_models(hf_runner, vllm_runner, model_info, example_prompts)
|
||||
126
tests/models/language/pooling_mteb_test/test_jina.py
Normal file
126
tests/models/language/pooling_mteb_test/test_jina.py
Normal file
@@ -0,0 +1,126 @@
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
|
||||
from functools import partial
|
||||
|
||||
import pytest
|
||||
|
||||
from tests.models.language.pooling.embed_utils import (
|
||||
check_embeddings_close,
|
||||
correctness_test_embed_models,
|
||||
matryoshka_fy,
|
||||
)
|
||||
from tests.models.utils import (
|
||||
CLSPoolingEmbedModelInfo,
|
||||
CLSPoolingRerankModelInfo,
|
||||
EmbedModelInfo,
|
||||
RerankModelInfo,
|
||||
)
|
||||
from vllm import PoolingParams
|
||||
|
||||
from .mteb_utils import mteb_test_embed_models, mteb_test_rerank_models
|
||||
|
||||
EMBEDDING_MODELS = [
|
||||
CLSPoolingEmbedModelInfo(
|
||||
"jinaai/jina-embeddings-v3",
|
||||
mteb_score=0.824413164,
|
||||
architecture="XLMRobertaModel",
|
||||
is_matryoshka=True,
|
||||
dtype="float32",
|
||||
)
|
||||
]
|
||||
|
||||
RERANK_MODELS = [
|
||||
CLSPoolingRerankModelInfo(
|
||||
"jinaai/jina-reranker-v2-base-multilingual",
|
||||
mteb_score=0.33643,
|
||||
architecture="XLMRobertaForSequenceClassification",
|
||||
)
|
||||
]
|
||||
|
||||
|
||||
@pytest.mark.parametrize("model_info", EMBEDDING_MODELS)
|
||||
def test_embed_models_mteb(hf_runner, vllm_runner, model_info: EmbedModelInfo) -> None:
|
||||
def hf_model_callback(model):
|
||||
model.encode = partial(model.encode, task="text-matching")
|
||||
|
||||
mteb_test_embed_models(
|
||||
hf_runner, vllm_runner, model_info, hf_model_callback=hf_model_callback
|
||||
)
|
||||
|
||||
|
||||
@pytest.mark.parametrize("model_info", EMBEDDING_MODELS)
|
||||
def test_embed_models_correctness(
|
||||
hf_runner, vllm_runner, model_info: EmbedModelInfo, example_prompts
|
||||
) -> None:
|
||||
def hf_model_callback(model):
|
||||
model.encode = partial(model.encode, task="text-matching")
|
||||
|
||||
correctness_test_embed_models(
|
||||
hf_runner,
|
||||
vllm_runner,
|
||||
model_info,
|
||||
example_prompts,
|
||||
hf_model_callback=hf_model_callback,
|
||||
)
|
||||
|
||||
|
||||
@pytest.mark.parametrize("model_info", RERANK_MODELS)
|
||||
def test_rerank_models_mteb(
|
||||
hf_runner, vllm_runner, model_info: RerankModelInfo
|
||||
) -> None:
|
||||
mteb_test_rerank_models(hf_runner, vllm_runner, model_info)
|
||||
|
||||
|
||||
@pytest.mark.parametrize("model_info", EMBEDDING_MODELS)
|
||||
@pytest.mark.parametrize("dtype", ["half"])
|
||||
@pytest.mark.parametrize("dimensions", [16, 32])
|
||||
def test_matryoshka(
|
||||
hf_runner,
|
||||
vllm_runner,
|
||||
model_info,
|
||||
dtype: str,
|
||||
dimensions: int,
|
||||
example_prompts,
|
||||
monkeypatch,
|
||||
) -> None:
|
||||
if not model_info.is_matryoshka:
|
||||
pytest.skip("Model is not matryoshka")
|
||||
|
||||
# ST will strip the input texts, see test_embedding.py
|
||||
example_prompts = [str(s).strip() for s in example_prompts]
|
||||
|
||||
with hf_runner(
|
||||
model_info.name,
|
||||
dtype=dtype,
|
||||
is_sentence_transformer=True,
|
||||
) as hf_model:
|
||||
hf_outputs = hf_model.encode(example_prompts, task="text-matching")
|
||||
hf_outputs = matryoshka_fy(hf_outputs, dimensions)
|
||||
|
||||
with vllm_runner(
|
||||
model_info.name, runner="pooling", dtype=dtype, max_model_len=None
|
||||
) as vllm_model:
|
||||
assert vllm_model.llm.llm_engine.model_config.is_matryoshka
|
||||
|
||||
matryoshka_dimensions = (
|
||||
vllm_model.llm.llm_engine.model_config.matryoshka_dimensions
|
||||
)
|
||||
assert matryoshka_dimensions is not None
|
||||
|
||||
if dimensions not in matryoshka_dimensions:
|
||||
with pytest.raises(ValueError):
|
||||
vllm_model.embed(
|
||||
example_prompts, pooling_params=PoolingParams(dimensions=dimensions)
|
||||
)
|
||||
else:
|
||||
vllm_outputs = vllm_model.embed(
|
||||
example_prompts, pooling_params=PoolingParams(dimensions=dimensions)
|
||||
)
|
||||
|
||||
check_embeddings_close(
|
||||
embeddings_0_lst=hf_outputs,
|
||||
embeddings_1_lst=vllm_outputs,
|
||||
name_0="hf",
|
||||
name_1="vllm",
|
||||
tol=1e-2,
|
||||
)
|
||||
83
tests/models/language/pooling_mteb_test/test_mxbai_rerank.py
Normal file
83
tests/models/language/pooling_mteb_test/test_mxbai_rerank.py
Normal file
@@ -0,0 +1,83 @@
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
|
||||
from typing import Any
|
||||
|
||||
import pytest
|
||||
import torch
|
||||
|
||||
from tests.conftest import HfRunner
|
||||
from tests.models.utils import LASTPoolingRerankModelInfo, RerankModelInfo
|
||||
|
||||
from .mteb_utils import mteb_test_rerank_models
|
||||
|
||||
mxbai_rerank_hf_overrides = {
|
||||
"architectures": ["Qwen2ForSequenceClassification"],
|
||||
"classifier_from_token": ["0", "1"],
|
||||
"method": "from_2_way_softmax",
|
||||
}
|
||||
|
||||
RERANK_MODELS = [
|
||||
LASTPoolingRerankModelInfo(
|
||||
"mixedbread-ai/mxbai-rerank-base-v2",
|
||||
architecture="Qwen2ForSequenceClassification",
|
||||
hf_overrides=mxbai_rerank_hf_overrides,
|
||||
mteb_score=0.273,
|
||||
enable_test=True,
|
||||
),
|
||||
LASTPoolingRerankModelInfo(
|
||||
"mixedbread-ai/mxbai-rerank-large-v2",
|
||||
architecture="Qwen2ForSequenceClassification",
|
||||
hf_overrides=mxbai_rerank_hf_overrides,
|
||||
enable_test=False,
|
||||
),
|
||||
]
|
||||
|
||||
|
||||
class MxbaiRerankerHfRunner(HfRunner):
|
||||
def __init__(
|
||||
self, model_name: str, dtype: str = "auto", *args: Any, **kwargs: Any
|
||||
) -> None:
|
||||
from transformers import AutoModelForCausalLM, AutoTokenizer
|
||||
|
||||
super().__init__(model_name, dtype, auto_cls=AutoModelForCausalLM)
|
||||
|
||||
self.tokenizer = AutoTokenizer.from_pretrained(model_name, padding_side="left")
|
||||
self.yes_loc = self.tokenizer.convert_tokens_to_ids("1")
|
||||
self.no_loc = self.tokenizer.convert_tokens_to_ids("0")
|
||||
|
||||
def predict(self, prompts: list[list[str]], *args, **kwargs) -> torch.Tensor:
|
||||
def process_inputs(pairs):
|
||||
inputs = self.tokenizer(
|
||||
pairs,
|
||||
padding=False,
|
||||
truncation="longest_first",
|
||||
return_attention_mask=False,
|
||||
)
|
||||
for i, ele in enumerate(inputs["input_ids"]):
|
||||
inputs["input_ids"][i] = ele
|
||||
inputs = self.tokenizer.pad(inputs, padding=True, return_tensors="pt")
|
||||
for key in inputs:
|
||||
inputs[key] = inputs[key].to(self.model.device)
|
||||
return inputs
|
||||
|
||||
@torch.no_grad()
|
||||
def compute_logits(inputs):
|
||||
logits = self.model(**inputs).logits[:, -1, :]
|
||||
yes_logits = logits[:, self.yes_loc]
|
||||
no_logits = logits[:, self.no_loc]
|
||||
logits = yes_logits - no_logits
|
||||
scores = logits.float().sigmoid()
|
||||
return scores
|
||||
|
||||
scores = []
|
||||
for query, doc, *_ in prompts:
|
||||
pairs = [(query, doc)]
|
||||
inputs = process_inputs(pairs)
|
||||
score = compute_logits(inputs)
|
||||
scores.append(score[0].item())
|
||||
return torch.Tensor(scores)
|
||||
|
||||
|
||||
@pytest.mark.parametrize("model_info", RERANK_MODELS)
|
||||
def test_rerank_models_mteb(vllm_runner, model_info: RerankModelInfo) -> None:
|
||||
mteb_test_rerank_models(MxbaiRerankerHfRunner, vllm_runner, model_info)
|
||||
44
tests/models/language/pooling_mteb_test/test_nomic.py
Normal file
44
tests/models/language/pooling_mteb_test/test_nomic.py
Normal file
@@ -0,0 +1,44 @@
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
|
||||
|
||||
import pytest
|
||||
|
||||
from tests.models.language.pooling.embed_utils import correctness_test_embed_models
|
||||
from tests.models.utils import CLSPoolingEmbedModelInfo, EmbedModelInfo
|
||||
|
||||
from .mteb_utils import mteb_test_embed_models
|
||||
|
||||
MODELS = [
|
||||
CLSPoolingEmbedModelInfo(
|
||||
"nomic-ai/nomic-embed-text-v1",
|
||||
architecture="NomicBertModel",
|
||||
mteb_score=0.737568559,
|
||||
enable_test=True,
|
||||
),
|
||||
CLSPoolingEmbedModelInfo(
|
||||
"nomic-ai/nomic-embed-text-v1.5",
|
||||
architecture="NomicBertModel",
|
||||
enable_test=False,
|
||||
),
|
||||
CLSPoolingEmbedModelInfo(
|
||||
"nomic-ai/CodeRankEmbed", architecture="NomicBertModel", enable_test=False
|
||||
),
|
||||
CLSPoolingEmbedModelInfo(
|
||||
"nomic-ai/nomic-embed-text-v2-moe",
|
||||
architecture="NomicBertModel",
|
||||
mteb_score=0.715488912,
|
||||
enable_test=True,
|
||||
),
|
||||
]
|
||||
|
||||
|
||||
@pytest.mark.parametrize("model_info", MODELS)
|
||||
def test_embed_models_mteb(hf_runner, vllm_runner, model_info: EmbedModelInfo) -> None:
|
||||
mteb_test_embed_models(hf_runner, vllm_runner, model_info)
|
||||
|
||||
|
||||
@pytest.mark.parametrize("model_info", MODELS)
|
||||
def test_embed_models_correctness(
|
||||
hf_runner, vllm_runner, model_info: EmbedModelInfo, example_prompts
|
||||
) -> None:
|
||||
correctness_test_embed_models(hf_runner, vllm_runner, model_info, example_prompts)
|
||||
@@ -0,0 +1,99 @@
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
|
||||
from typing import Any
|
||||
|
||||
import pytest
|
||||
import torch
|
||||
|
||||
from tests.conftest import HfRunner
|
||||
from tests.models.utils import LASTPoolingRerankModelInfo, RerankModelInfo
|
||||
from tests.utils import multi_gpu_test
|
||||
|
||||
from .mteb_utils import mteb_test_rerank_models
|
||||
|
||||
qwen3_reranker_hf_overrides = {
|
||||
"architectures": ["Qwen3ForSequenceClassification"],
|
||||
"classifier_from_token": ["no", "yes"],
|
||||
"is_original_qwen3_reranker": True,
|
||||
}
|
||||
|
||||
RERANK_MODELS = [
|
||||
LASTPoolingRerankModelInfo(
|
||||
"Qwen/Qwen3-Reranker-0.6B",
|
||||
architecture="Qwen3ForSequenceClassification",
|
||||
mteb_score=0.25736,
|
||||
hf_overrides=qwen3_reranker_hf_overrides,
|
||||
enable_test=True,
|
||||
),
|
||||
LASTPoolingRerankModelInfo(
|
||||
"Qwen/Qwen3-Reranker-4B",
|
||||
architecture="Qwen3ForSequenceClassification",
|
||||
hf_overrides=qwen3_reranker_hf_overrides,
|
||||
enable_test=False,
|
||||
),
|
||||
]
|
||||
|
||||
|
||||
class Qwen3RerankerHfRunner(HfRunner):
|
||||
def __init__(
|
||||
self, model_name: str, dtype: str = "auto", *args: Any, **kwargs: Any
|
||||
) -> None:
|
||||
from transformers import AutoModelForCausalLM, AutoTokenizer
|
||||
|
||||
super().__init__(model_name, dtype, auto_cls=AutoModelForCausalLM)
|
||||
|
||||
self.tokenizer = AutoTokenizer.from_pretrained(model_name, padding_side="left")
|
||||
self.token_false_id = self.tokenizer.convert_tokens_to_ids("no")
|
||||
self.token_true_id = self.tokenizer.convert_tokens_to_ids("yes")
|
||||
|
||||
def predict(self, prompts: list[list[str]], *args, **kwargs) -> torch.Tensor:
|
||||
def process_inputs(pairs):
|
||||
inputs = self.tokenizer(
|
||||
pairs,
|
||||
padding=False,
|
||||
truncation="longest_first",
|
||||
return_attention_mask=False,
|
||||
)
|
||||
for i, ele in enumerate(inputs["input_ids"]):
|
||||
inputs["input_ids"][i] = ele
|
||||
inputs = self.tokenizer.pad(inputs, padding=True, return_tensors="pt")
|
||||
for key in inputs:
|
||||
inputs[key] = inputs[key].to(self.model.device)
|
||||
return inputs
|
||||
|
||||
@torch.no_grad()
|
||||
def compute_logits(inputs):
|
||||
batch_scores = self.model(**inputs).logits[:, -1, :]
|
||||
true_vector = batch_scores[:, self.token_true_id]
|
||||
false_vector = batch_scores[:, self.token_false_id]
|
||||
batch_scores = torch.stack([false_vector, true_vector], dim=1)
|
||||
batch_scores = torch.nn.functional.log_softmax(batch_scores, dim=1)
|
||||
scores = batch_scores[:, 1].exp()
|
||||
return scores
|
||||
|
||||
scores = []
|
||||
for query, doc, *_ in prompts:
|
||||
pairs = [(query, doc)]
|
||||
inputs = process_inputs(pairs)
|
||||
score = compute_logits(inputs)
|
||||
scores.append(score[0].item())
|
||||
return torch.Tensor(scores)
|
||||
|
||||
|
||||
@pytest.mark.parametrize("model_info", RERANK_MODELS)
|
||||
def test_rerank_models_mteb(vllm_runner, model_info: RerankModelInfo) -> None:
|
||||
mteb_test_rerank_models(Qwen3RerankerHfRunner, vllm_runner, model_info)
|
||||
|
||||
|
||||
@pytest.mark.parametrize("model_info", RERANK_MODELS)
|
||||
@multi_gpu_test(num_gpus=2)
|
||||
def test_rerank_models_mteb_tp(vllm_runner, model_info: RerankModelInfo) -> None:
|
||||
assert model_info.architecture == "Qwen3ForSequenceClassification"
|
||||
|
||||
vllm_extra_kwargs: dict[str, Any] = {
|
||||
"tensor_parallel_size": 2,
|
||||
}
|
||||
|
||||
mteb_test_rerank_models(
|
||||
Qwen3RerankerHfRunner, vllm_runner, model_info, vllm_extra_kwargs
|
||||
)
|
||||
@@ -0,0 +1,77 @@
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
|
||||
|
||||
import pytest
|
||||
|
||||
from tests.models.language.pooling.embed_utils import correctness_test_embed_models
|
||||
from tests.models.utils import CLSPoolingEmbedModelInfo, EmbedModelInfo
|
||||
|
||||
from .mteb_utils import mteb_test_embed_models
|
||||
|
||||
MODELS = [
|
||||
CLSPoolingEmbedModelInfo(
|
||||
"Snowflake/snowflake-arctic-embed-xs",
|
||||
is_matryoshka=False,
|
||||
architecture="BertModel",
|
||||
mteb_score=0.714927797,
|
||||
enable_test=True,
|
||||
),
|
||||
CLSPoolingEmbedModelInfo(
|
||||
"Snowflake/snowflake-arctic-embed-s",
|
||||
is_matryoshka=False,
|
||||
architecture="BertModel",
|
||||
enable_test=False,
|
||||
),
|
||||
CLSPoolingEmbedModelInfo(
|
||||
"Snowflake/snowflake-arctic-embed-m",
|
||||
is_matryoshka=False,
|
||||
architecture="BertModel",
|
||||
enable_test=False,
|
||||
),
|
||||
CLSPoolingEmbedModelInfo(
|
||||
"Snowflake/snowflake-arctic-embed-m-long",
|
||||
is_matryoshka=False,
|
||||
architecture="NomicBertModel",
|
||||
mteb_score=0.681146831,
|
||||
enable_test=True,
|
||||
),
|
||||
CLSPoolingEmbedModelInfo(
|
||||
"Snowflake/snowflake-arctic-embed-l",
|
||||
is_matryoshka=False,
|
||||
architecture="BertModel",
|
||||
enable_test=False,
|
||||
),
|
||||
CLSPoolingEmbedModelInfo(
|
||||
"Snowflake/snowflake-arctic-embed-m-v1.5",
|
||||
is_matryoshka=True,
|
||||
architecture="BertModel",
|
||||
mteb_score=0.649088363,
|
||||
enable_test=True,
|
||||
),
|
||||
CLSPoolingEmbedModelInfo(
|
||||
"Snowflake/snowflake-arctic-embed-l-v2.0",
|
||||
is_matryoshka=True,
|
||||
architecture="XLMRobertaModel",
|
||||
mteb_score=0.712258299,
|
||||
enable_test=True,
|
||||
),
|
||||
CLSPoolingEmbedModelInfo(
|
||||
"Snowflake/snowflake-arctic-embed-m-v2.0",
|
||||
is_matryoshka=True,
|
||||
architecture="GteModel",
|
||||
mteb_score=0.706622444,
|
||||
enable_test=True,
|
||||
),
|
||||
]
|
||||
|
||||
|
||||
@pytest.mark.parametrize("model_info", MODELS)
|
||||
def test_embed_models_mteb(hf_runner, vllm_runner, model_info: EmbedModelInfo) -> None:
|
||||
mteb_test_embed_models(hf_runner, vllm_runner, model_info)
|
||||
|
||||
|
||||
@pytest.mark.parametrize("model_info", MODELS)
|
||||
def test_embed_models_correctness(
|
||||
hf_runner, vllm_runner, model_info: EmbedModelInfo, example_prompts
|
||||
) -> None:
|
||||
correctness_test_embed_models(hf_runner, vllm_runner, model_info, example_prompts)
|
||||
33
tests/models/language/pooling_mteb_test/test_st_projector.py
Normal file
33
tests/models/language/pooling_mteb_test/test_st_projector.py
Normal file
@@ -0,0 +1,33 @@
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
|
||||
import pytest
|
||||
|
||||
from tests.models.utils import (
|
||||
CLSPoolingEmbedModelInfo,
|
||||
EmbedModelInfo,
|
||||
LASTPoolingEmbedModelInfo,
|
||||
)
|
||||
|
||||
from .mteb_utils import mteb_test_embed_models
|
||||
|
||||
# ST models with projector (Dense) layers
|
||||
ST_PROJECTOR_MODELS = [
|
||||
CLSPoolingEmbedModelInfo(
|
||||
"TencentBAC/Conan-embedding-v1",
|
||||
architecture="BertModel",
|
||||
mteb_score=0.688611955,
|
||||
enable_test=True,
|
||||
),
|
||||
LASTPoolingEmbedModelInfo(
|
||||
"google/embeddinggemma-300m",
|
||||
architecture="Gemma3TextModel",
|
||||
mteb_score=0.7473819294684156,
|
||||
enable_test=True,
|
||||
dtype="float32",
|
||||
),
|
||||
]
|
||||
|
||||
|
||||
@pytest.mark.parametrize("model_info", ST_PROJECTOR_MODELS)
|
||||
def test_embed_models_mteb(hf_runner, vllm_runner, model_info: EmbedModelInfo) -> None:
|
||||
mteb_test_embed_models(hf_runner, vllm_runner, model_info)
|
||||
Reference in New Issue
Block a user