[Bugfix][LoRA] Fix the issue when enable LoRA + tp + fully_sharded_loras (#6650)

### What this PR does / why we need it?
Fix the issue #6143 .

### Does this PR introduce _any_ user-facing change?
Allow to start the server with "--enable-lora && --fully-sharded-loras
&& --tensor_parallel_size 2".

### How was this patch tested?
pytest -sv tests/e2e/multicard/2-cards/test_llama32_lora_tp2.py
- vLLM version: v0.15.0
- vLLM main:
d7e17aaacd

---------

Signed-off-by: paulyu12 <507435917@qq.com>
Co-authored-by: wangxiyuan <wangxiyuan1007@gmail.com>
This commit is contained in:
yupeng
2026-03-11 15:43:15 +08:00
committed by GitHub
parent a7f91fce71
commit 830f39dd70
4 changed files with 113 additions and 9 deletions

11
vllm_ascend/lora/punica_npu.py Normal file → Executable file
View File

@@ -205,7 +205,6 @@ class PunicaWrapperNPU(PunicaWrapperBase):
y: torch.Tensor,
x: tuple[torch.Tensor, ...] | torch.Tensor,
lora_b_stacked: tuple[torch.Tensor, ...],
lora_bias_stacked: tuple[torch.Tensor, ...] | None,
output_slices: tuple[int, ...],
offset_start: int = 0,
add_inputs=True,
@@ -217,24 +216,20 @@ class PunicaWrapperNPU(PunicaWrapperBase):
Semantics:
for i in range(len(lora_b_stacked)):
slice = output_slices[i]
y[:, offset:offset+slice] += x[i] @ lora_b_stacked[i] +
lora_bias_stacked[i]
y[:, offset:offset+slice] += x[i] @ lora_b_stacked[i]
offset += slice
Args:
y (torch.Tensor): Output tensor.
x (Union[Tuple[torch.Tensor, ...], torch.Tensor]): Input tensors
lora_b_stacked (Tuple[torch.Tensor, ...]): lora_b's weight
lora_bias_stacked (Optional[Tuple[torch.Tensor, ...]]):
bias's weight
output_slices (Tuple[int, ...]): Every slice's size
offset_start (int): The starting position of y, defaults to 0
add_inputs (bool): Defaults to True.
"""
y_org = y
y = y.view(-1, y.shape[-1])
offset_left = offset_start
if lora_bias_stacked is not None:
self._apply_bias(self.token_lora_indices, y, output_slices, lora_bias_stacked)
for slice_idx in range(len(lora_b_stacked)):
self._apply_expand(
y,
@@ -313,7 +308,7 @@ class PunicaWrapperNPU(PunicaWrapperBase):
torch.zeros((x.size(0), r), dtype=torch.float32, device=x.device) for _ in range(len(output_slices))
)
self.add_shrink(buffer, x, lora_a_stacked, scale, **kwargs)
self.add_expand(y, buffer, lora_b_stacked, None, output_slices, add_inputs=True, **kwargs)
self.add_expand(y, buffer, lora_b_stacked, output_slices, add_inputs=True, **kwargs)
def add_lora_logits(
self,