[bugfix] fix Error 'ValueError: Duplicate layer name' (#5280)

### What this PR does / why we need it?
When matmul_and_reduce is enabled, the prefix attribute is required.
However, in some models, the prefix is not passed correctly, causing
errors when starting the service.
The issue of incorrect prefix passing will be fixed in vLLM in the
future.

- vLLM version: release/v0.13.0
- vLLM main:
ad32e3e19c
---------
Signed-off-by: Wang Kunpeng <1289706727@qq.com>
This commit is contained in:
Wang Kunpeng
2025-12-25 10:43:24 +08:00
committed by GitHub
parent 30778f371b
commit 13cd6362c6
4 changed files with 56 additions and 10 deletions

View File

@@ -484,6 +484,10 @@ class SequenceColumnParallelOp(CustomColumnParallelOp):
class SequenceRowParallelOp(CustomRowParallelOp):
def __init__(self, layer):
super().__init__(layer)
self.unique_prefix = None
def apply_impl(
self, input_: torch.Tensor
) -> Union[torch.Tensor, tuple[torch.Tensor, Optional[Parameter]]]:
@@ -509,7 +513,7 @@ class SequenceRowParallelOp(CustomRowParallelOp):
bias=bias_)
else:
output = torch.ops.vllm.matmul_and_reduce(input_parallel,
self.prefix)
self.unique_prefix)
output_bias = self.bias if self.skip_bias_add else None
return output, output_bias
@@ -602,6 +606,7 @@ class SequenceRowParallelOp(CustomRowParallelOp):
super().update_attrs()
self.input_is_parallel = self.layer.input_is_parallel
self.reduce_results = self.layer.reduce_results
self.unique_prefix = self.layer.unique_prefix
def _get_column_parallel_op(