### What this PR does / why we need it? Fixed incorrect class attribute assignment and corrected it to instance attribute assignment. Ensured reorder_batch_threshold only applies to the current instance to avoid global pollution and multi-instance conflicts. Backport of #7586 Signed-off-by: LookAround <lixushi@huawei.com> Signed-off-by: Mengqing Cao <cmq0113@163.com> Co-authored-by: LookAround0301 <lixushi@huawei.com>
This commit is contained in:
@@ -15,8 +15,6 @@
|
||||
# This file is a part of the vllm-ascend project.
|
||||
#
|
||||
|
||||
from typing import ClassVar
|
||||
|
||||
import numpy as np
|
||||
import torch
|
||||
import torch.distributed as dist
|
||||
@@ -61,11 +59,6 @@ class AscendAttentionCPMetadataBuilder(AscendAttentionMetadataBuilder):
|
||||
Extends AscendAttentionMetadataBuilder with PCP/DCP metadata handling.
|
||||
"""
|
||||
|
||||
# Does this backend/builder reorder the batch?
|
||||
# If not, set this to None. Otherwise set it to the query
|
||||
# length that will be pulled into the front of the batch.
|
||||
reorder_batch_threshold: ClassVar[int] = 1
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
kv_cache_spec: AttentionSpec,
|
||||
|
||||
Reference in New Issue
Block a user