[Feature] adapt to uva buffer and main2main (#6657)
### What this PR does / why we need it?
vllm model runner v2 use uva buffer to prepare input data, but npu
doesn't support uva yet, this pr implement a uvawrapper class to mimic
gpu's uva backend. what's more, this pr make some modifications to adapt
to the newer main branch.
### Does this PR introduce _any_ user-facing change?
no
### How was this patch tested?
- vLLM main:
13397841ab
---------
Signed-off-by: Ronald1995 <ronaldautomobile@163.com>
This commit is contained in:
@@ -14,22 +14,25 @@
|
||||
# limitations under the License.
|
||||
# This file is a part of the vllm-ascend project.
|
||||
#
|
||||
|
||||
import numpy as np
|
||||
import torch
|
||||
from vllm.v1.sample.metadata import SamplingMetadata
|
||||
from vllm.v1.sample.ops.topk_topp_sampler import apply_top_k_top_p
|
||||
from vllm.v1.worker.gpu.sample.gumbel import apply_temperature
|
||||
from vllm.v1.worker.gpu.sample.min_p import apply_min_p
|
||||
from vllm.v1.worker.gpu.sample.sampler import Sampler
|
||||
|
||||
from vllm_ascend.worker.v2.sample.gumbel import gumbel_sample
|
||||
from vllm_ascend.worker.v2.sample.penalties import apply_penalties_and_temperature
|
||||
|
||||
|
||||
class AscendSampler(Sampler):
|
||||
def sample(
|
||||
self,
|
||||
logits: torch.Tensor,
|
||||
sampling_metadata: SamplingMetadata,
|
||||
idx_mapping: torch.Tensor,
|
||||
idx_mapping_np: np.ndarray,
|
||||
pos: torch.Tensor,
|
||||
input_ids: torch.Tensor,
|
||||
expanded_local_pos: torch.Tensor,
|
||||
) -> tuple[torch.Tensor, torch.Tensor]:
|
||||
"""Override sample method because we need to override triton operators
|
||||
called in the method.
|
||||
@@ -37,19 +40,42 @@ class AscendSampler(Sampler):
|
||||
# Copy logits to a new FP32 tensor.
|
||||
logits = torch.empty_like(logits, dtype=torch.float32).copy_(logits)
|
||||
|
||||
# Apply penalties and temperature in place.
|
||||
apply_penalties_and_temperature(logits, sampling_metadata)
|
||||
# Apply min_p in place.
|
||||
if sampling_metadata.min_p is not None:
|
||||
apply_min_p(logits, sampling_metadata.min_p)
|
||||
# Apply top_k and/or top_p. This might return a new tensor.
|
||||
logits = apply_top_k_top_p(logits, sampling_metadata.top_k, sampling_metadata.top_p)
|
||||
# Apply logit bias (e.g., allowed_token_ids, min_tokens) in place.
|
||||
self.logit_bias_state.apply_logit_bias(logits, idx_mapping, idx_mapping_np, pos)
|
||||
|
||||
# Apply penalties in place.
|
||||
self.penalties_state.apply_penalties(
|
||||
logits,
|
||||
idx_mapping,
|
||||
idx_mapping_np,
|
||||
input_ids,
|
||||
expanded_local_pos,
|
||||
self.num_speculative_tokens,
|
||||
)
|
||||
|
||||
# Apply temperature in place.
|
||||
apply_temperature(logits, idx_mapping, self.sampling_states.temperature.gpu)
|
||||
|
||||
# Apply min_p in place if any request has a non-zero min_p.
|
||||
do_min_p = self.sampling_states.do_min_p(idx_mapping_np)
|
||||
if do_min_p:
|
||||
apply_min_p(logits, idx_mapping, self.sampling_states.min_p.gpu)
|
||||
|
||||
# Apply top_k and/or top_p. This might return a new tensor.
|
||||
do_top_k = self.sampling_states.do_top_k(idx_mapping_np)
|
||||
top_k = self.sampling_states.top_k.gpu[idx_mapping] if do_top_k else None
|
||||
do_top_p = self.sampling_states.do_top_p(idx_mapping_np)
|
||||
top_p = self.sampling_states.top_p.gpu[idx_mapping] if do_top_p else None
|
||||
if do_top_k or do_top_p:
|
||||
logits = apply_top_k_top_p(logits, top_k, top_p)
|
||||
|
||||
# Sample the next token.
|
||||
sampled = gumbel_sample(
|
||||
logits,
|
||||
sampling_metadata.temperature,
|
||||
sampling_metadata.seeds,
|
||||
sampling_metadata.pos,
|
||||
idx_mapping,
|
||||
self.sampling_states.temperature.gpu,
|
||||
self.sampling_states.seeds.gpu,
|
||||
pos,
|
||||
apply_temperature=False,
|
||||
)
|
||||
return sampled, logits
|
||||
|
||||
Reference in New Issue
Block a user