Files
sglang/python/sglang/srt/models/llama_classification.py

92 lines
3.2 KiB
Python
Raw Normal View History

2024-07-28 23:07:12 +10:00
"""
Copyright 2023-2024 SGLang Team
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
2024-06-22 00:45:33 -07:00
from typing import Iterable, Optional, Tuple
import torch
from torch import nn
from transformers import LlamaConfig
from vllm.model_executor.model_loader.weight_utils import default_weight_loader
2024-08-28 18:58:52 -07:00
from sglang.srt.layers.logits_processor import LogitsProcessorOutput
2024-09-19 20:53:11 +08:00
from sglang.srt.layers.quantization.base_config import QuantizationConfig
from sglang.srt.model_executor.forward_batch_info import ForwardBatch
from sglang.srt.models.llama import LlamaForCausalLM, LlamaModel
2024-06-22 00:45:33 -07:00
class LlamaForClassification(nn.Module):
def __init__(
self,
config: LlamaConfig,
quant_config: Optional[QuantizationConfig] = None,
cache_config=None,
2024-06-22 00:45:33 -07:00
) -> None:
super().__init__()
self.config = config
self.torchao_config = None
2024-06-22 00:45:33 -07:00
self.quant_config = quant_config
self.model = LlamaModel(config, quant_config=quant_config)
2024-07-05 10:06:17 -07:00
self.classification_head = nn.Linear(
config.hidden_size, config.classification_out_size, bias=False
2024-07-05 10:06:17 -07:00
)
2024-06-22 00:45:33 -07:00
self.eos_token_id = config.eos_token_id
2024-07-15 22:09:09 -07:00
@torch.no_grad()
2024-06-22 00:45:33 -07:00
def forward(
self,
input_ids: torch.Tensor,
positions: torch.Tensor,
forward_batch: ForwardBatch,
2024-06-22 00:45:33 -07:00
input_embeds: torch.Tensor = None,
) -> torch.Tensor:
hidden_states = self.model(input_ids, positions, forward_batch, input_embeds)
2024-06-22 00:45:33 -07:00
is_eos_token = input_ids == self.eos_token_id
hidden_states = hidden_states[is_eos_token]
scores = self.classification_head(hidden_states)
if scores.shape[0] != forward_batch.batch_size:
2024-06-22 00:45:33 -07:00
print("Warning: the EOS tokens are missing in some sentences.")
2024-07-05 10:06:17 -07:00
scores = torch.ones(
(forward_batch.batch_size, self.config.classification_out_size)
2024-07-05 10:06:17 -07:00
).to(input_ids.device)
2024-06-22 00:45:33 -07:00
logits_output = LogitsProcessorOutput(
2024-06-22 00:45:33 -07:00
next_token_logits=scores,
next_token_logprobs=scores,
normalized_prompt_logprobs=scores,
input_token_logprobs=torch.ones_like(input_ids),
input_top_logprobs=None,
output_top_logprobs=None,
2024-06-22 00:45:33 -07:00
)
return logits_output
2024-06-22 00:45:33 -07:00
def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]):
params_dict = dict(self.named_parameters())
2024-06-22 00:45:33 -07:00
for name, loaded_weight in weights:
if "classification_head" in name:
2024-06-22 00:45:33 -07:00
param = params_dict[name]
weight_loader = getattr(param, "weight_loader", default_weight_loader)
weight_loader(param, loaded_weight)
elif "lm_head" in name:
continue
else:
LlamaForCausalLM.load_weights(self, [(name, loaded_weight)])
2024-06-22 00:45:33 -07:00
2024-07-05 10:06:17 -07:00
EntryClass = LlamaForClassification