Disable all two stream overlap on amd (#6475)
This commit is contained in:
@@ -76,13 +76,12 @@ from sglang.srt.layers.vocab_parallel_embedding import (
|
||||
VocabParallelEmbedding,
|
||||
)
|
||||
from sglang.srt.managers.expert_distribution import (
|
||||
ExpertDistributionRecorder,
|
||||
get_global_expert_distribution_recorder,
|
||||
)
|
||||
from sglang.srt.managers.expert_location import ModelConfigForExpertLocation
|
||||
from sglang.srt.managers.expert_location_dispatch import ExpertLocationDispatchInfo
|
||||
from sglang.srt.managers.schedule_batch import global_server_args_dict
|
||||
from sglang.srt.model_executor.forward_batch_info import ForwardBatch, ForwardMode
|
||||
from sglang.srt.model_executor.forward_batch_info import ForwardBatch
|
||||
from sglang.srt.model_loader.weight_utils import default_weight_loader
|
||||
from sglang.srt.operations import execute_operations
|
||||
from sglang.srt.operations_strategy import compute_layer_operations
|
||||
@@ -1321,8 +1320,7 @@ class DeepseekV2Model(nn.Module):
|
||||
config.hidden_size,
|
||||
enable_tp=not global_server_args_dict["enable_dp_attention"],
|
||||
)
|
||||
# TODO(haishaw): multi-stream performance on ROCm
|
||||
self.alt_stream = None if _is_hip else torch.cuda.Stream()
|
||||
self.alt_stream = torch.cuda.Stream() if _is_cuda else None
|
||||
self.layers = nn.ModuleList(
|
||||
[
|
||||
DeepseekV2DecoderLayer(
|
||||
|
||||
Reference in New Issue
Block a user