add support for nvidia/gpt-oss-120b-Eagle3 (#9739)

This commit is contained in:
zyksir
2025-08-28 15:20:20 +08:00
committed by GitHub
parent 55349e361d
commit aee094e430
2 changed files with 13 additions and 2 deletions

View File

@@ -185,9 +185,13 @@ class LlamaForCausalLMEagle3(LlamaForCausalLM):
)
# Llama 3.2 1B Instruct set tie_word_embeddings to True
# Llama 3.1 8B Instruct set tie_word_embeddings to False
self.load_lm_head_from_target = False
if self.config.tie_word_embeddings:
self.lm_head = self.model.embed_tokens
else:
if config.draft_vocab_size is None:
self.load_lm_head_from_target = True
config.draft_vocab_size = config.vocab_size
self.lm_head = ParallelLMHead(
config.draft_vocab_size,
config.hidden_size,

View File

@@ -137,8 +137,15 @@ class EAGLEWorker(TpModelWorker):
embed, head = self.target_worker.model_runner.model.get_embed_and_head()
if self.speculative_algorithm.is_eagle3():
# EAGLE3 models don't share lm_head
self.draft_model_runner.model.set_embed(embed)
# most cases EAGLE3 models don't share lm_head
# but some models (e.g. nvidia/gpt-oss-120b-Eagle3) shares
if (
hasattr(self.draft_model_runner.model, "load_lm_head_from_target")
and self.draft_model_runner.model.load_lm_head_from_target
):
self.draft_model_runner.model.set_embed_and_head(embed, head)
else:
self.draft_model_runner.model.set_embed(embed)
# grab hot token ids
if self.draft_model_runner.model.hot_token_id is not None: