Revert "Only stream output on tp rank 0" (#2130)

This commit is contained in:
Lianmin Zheng
2024-11-22 15:46:16 -08:00
committed by GitHub
parent e1b63624d7
commit 66d4859acf
2 changed files with 5 additions and 7 deletions

View File

@@ -134,8 +134,8 @@ class Scheduler:
)
else:
self.recv_from_tokenizer = None
self.send_to_tokenizer = SimpleNamespace(send_pyobj=lambda _: None)
self.send_to_detokenizer = SimpleNamespace(send_pyobj=lambda _: None)
self.send_to_tokenizer = SimpleNamespace(send_pyobj=lambda x: None)
self.send_to_detokenizer = SimpleNamespace(send_pyobj=lambda x: None)
# Init tokenizer
self.model_config = ModelConfig(
@@ -1028,8 +1028,7 @@ class Scheduler:
else:
self.tree_cache.cache_unfinished_req(req)
if self.tp_rank == 0:
self.stream_output(batch.reqs)
self.stream_output(batch.reqs)
def process_batch_result_decode(self, batch: ScheduleBatch, result):
logits_output, next_token_ids, bid = result
@@ -1080,8 +1079,7 @@ class Scheduler:
torch.cuda.current_stream().synchronize()
batch.next_batch_sampling_info.sampling_info_done.set()
if self.tp_rank == 0:
self.stream_output(batch.reqs)
self.stream_output(batch.reqs)
self.token_to_kv_pool.free_group_end()

View File

@@ -179,7 +179,7 @@ class ModelRunner:
if self.device == "cuda":
torch.cuda.set_device(self.gpu_id)
backend = "nccl"
# TODO(liangan1): Just use gloo to bypass the initilization fail
# ToDO(liangan1):Just use gloo to bypass the initilization fail
# Need to use xccl for xpu backend in the future
elif self.device == "xpu":
torch.xpu.set_device(self.gpu_id)