Fix docs for clip max new tokens (#9082)
This commit is contained in:
@@ -59,9 +59,7 @@ if TYPE_CHECKING:
|
||||
from sglang.srt.managers.schedule_batch import Req
|
||||
from sglang.srt.managers.scheduler import Scheduler
|
||||
|
||||
DECODE_CLIP_MAX_NEW_TOKEN = get_int_env_var(
|
||||
"SGLANG_CLIP_MAX_NEW_TOKENS_ESTIMATION", 4096
|
||||
)
|
||||
CLIP_MAX_NEW_TOKEN = get_int_env_var("SGLANG_CLIP_MAX_NEW_TOKENS_ESTIMATION", 4096)
|
||||
|
||||
|
||||
class DecodeReqToTokenPool:
|
||||
@@ -390,7 +388,7 @@ class DecodePreallocQueue:
|
||||
origin_input_len
|
||||
+ min(
|
||||
decode_req.req.sampling_params.max_new_tokens,
|
||||
DECODE_CLIP_MAX_NEW_TOKEN,
|
||||
CLIP_MAX_NEW_TOKEN,
|
||||
)
|
||||
- retractable_tokens,
|
||||
)
|
||||
@@ -440,7 +438,7 @@ class DecodePreallocQueue:
|
||||
need_space_for_single_req = (
|
||||
max(
|
||||
[
|
||||
min(x.sampling_params.max_new_tokens, DECODE_CLIP_MAX_NEW_TOKEN)
|
||||
min(x.sampling_params.max_new_tokens, CLIP_MAX_NEW_TOKEN)
|
||||
+ len(x.origin_input_ids)
|
||||
- retractable_tokens
|
||||
for x in self.scheduler.running_batch.reqs
|
||||
|
||||
@@ -36,7 +36,7 @@ if TYPE_CHECKING:
|
||||
# This can prevent the server from being too conservative.
|
||||
# Note that this only clips the estimation in the scheduler but does not change the stop
|
||||
# condition. The request can still generate tokens until it hits the unclipped max_new_tokens.
|
||||
CLIP_MAX_NEW_TOKENS_ESTIMATION = int(
|
||||
CLIP_MAX_NEW_TOKENS = int(
|
||||
os.environ.get("SGLANG_CLIP_MAX_NEW_TOKENS_ESTIMATION", "4096")
|
||||
)
|
||||
|
||||
@@ -305,7 +305,7 @@ class PrefillAdder:
|
||||
[
|
||||
min(
|
||||
(r.sampling_params.max_new_tokens - len(r.output_ids)),
|
||||
CLIP_MAX_NEW_TOKENS_ESTIMATION,
|
||||
CLIP_MAX_NEW_TOKENS,
|
||||
)
|
||||
* self.new_token_ratio
|
||||
for r in running_batch.reqs
|
||||
@@ -388,7 +388,7 @@ class PrefillAdder:
|
||||
0,
|
||||
req.extend_input_len,
|
||||
(
|
||||
min(req.sampling_params.max_new_tokens, CLIP_MAX_NEW_TOKENS_ESTIMATION)
|
||||
min(req.sampling_params.max_new_tokens, CLIP_MAX_NEW_TOKENS)
|
||||
if not truncated
|
||||
else 0
|
||||
),
|
||||
@@ -477,7 +477,7 @@ class PrefillAdder:
|
||||
self._update_prefill_budget(
|
||||
0,
|
||||
req.extend_input_len,
|
||||
min(req.sampling_params.max_new_tokens, CLIP_MAX_NEW_TOKENS_ESTIMATION),
|
||||
min(req.sampling_params.max_new_tokens, CLIP_MAX_NEW_TOKENS),
|
||||
)
|
||||
else:
|
||||
if self.rem_chunk_tokens == 0:
|
||||
@@ -499,7 +499,7 @@ class PrefillAdder:
|
||||
return self.add_one_req_ignore_eos(req, has_chunked_req)
|
||||
|
||||
total_tokens = req.extend_input_len + min(
|
||||
req.sampling_params.max_new_tokens, CLIP_MAX_NEW_TOKENS_ESTIMATION
|
||||
req.sampling_params.max_new_tokens, CLIP_MAX_NEW_TOKENS
|
||||
)
|
||||
|
||||
# adjusting the input_tokens based on host_hit_length and page_size
|
||||
@@ -544,7 +544,7 @@ class PrefillAdder:
|
||||
input_tokens,
|
||||
min(
|
||||
req.sampling_params.max_new_tokens,
|
||||
CLIP_MAX_NEW_TOKENS_ESTIMATION,
|
||||
CLIP_MAX_NEW_TOKENS,
|
||||
),
|
||||
)
|
||||
else:
|
||||
|
||||
Reference in New Issue
Block a user