From a36e3da78e7024ed8d98cea0e0b5486ee6396656 Mon Sep 17 00:00:00 2001 From: Li Wang Date: Thu, 9 Oct 2025 14:10:57 +0800 Subject: [PATCH] [Misc] Drop 0102 related lines (#3323) ### What this PR does / why we need it? Since https://github.com/vllm-project/vllm-ascend/pull/3284 merged, should discard some extra code that was previously done for version compatibility ### Does this PR introduce _any_ user-facing change? ### How was this patch tested? - vLLM version: v0.11.0 Signed-off-by: wangli --- vllm_ascend/ops/vocab_parallel_embedding.py | 13 ------------- 1 file changed, 13 deletions(-) diff --git a/vllm_ascend/ops/vocab_parallel_embedding.py b/vllm_ascend/ops/vocab_parallel_embedding.py index fe7ee51..0a7d7ef 100644 --- a/vllm_ascend/ops/vocab_parallel_embedding.py +++ b/vllm_ascend/ops/vocab_parallel_embedding.py @@ -253,16 +253,3 @@ class AscendLogitsProcessor(LogitsProcessor): logits = logits[..., :self.org_vocab_size] return logits - - def forward( - self, - lm_head: VocabParallelEmbedding, - hidden_states: torch.Tensor, - # keep this for version compatibility - sampling_metadata=None, # type: ignore - embedding_bias: Optional[torch.Tensor] = None, - ) -> Optional[torch.Tensor]: - return LogitsProcessor.forward(self, - lm_head, - hidden_states, - embedding_bias=embedding_bias)