diff --git a/python/sglang/srt/server_args.py b/python/sglang/srt/server_args.py index 939e502fc..6c9d7a8d1 100644 --- a/python/sglang/srt/server_args.py +++ b/python/sglang/srt/server_args.py @@ -1406,6 +1406,26 @@ class ServerArgs: "Please choose one tokenizer batching approach." ) + if self.skip_tokenizer_init: + if self.tokenizer_worker_num != 1: + logger.warning( + "skip_tokenizer_init=True disables tokenizer workers; forcing tokenizer_worker_num=1 " + f"(requested {self.tokenizer_worker_num})." + ) + self.tokenizer_worker_num = 1 + + if self.enable_tokenizer_batch_encode: + logger.warning( + "skip_tokenizer_init=True ignores --enable-tokenizer-batch-encode; disabling it." + ) + self.enable_tokenizer_batch_encode = False + + if self.enable_dynamic_batch_tokenizer: + logger.warning( + "skip_tokenizer_init=True ignores --enable-dynamic-batch-tokenizer; disabling it." + ) + self.enable_dynamic_batch_tokenizer = False + def _handle_environment_variables(self): os.environ["SGLANG_ENABLE_TORCH_COMPILE"] = ( "1" if self.enable_torch_compile else "0" @@ -3279,7 +3299,6 @@ class ServerArgs: " Please manually install torch 2.6.x." ) - # Check multi tokenizer assert self.tokenizer_worker_num > 0, "Tokenizer worker num must >= 1" self.validate_buckets_rule( "--prompt-tokens-buckets", self.prompt_tokens_buckets