Increase the number of thread limitation for tp worker managers. (#567)

This commit is contained in:
Lianmin Zheng
2024-06-26 09:33:45 -07:00
committed by GitHub
parent a385ee27bd
commit 2e6e62e156
9 changed files with 148 additions and 84 deletions

View File

@@ -8,7 +8,8 @@ class FSMCache(BaseCache):
def __init__(self, tokenizer_path, tokenizer_args_dict, enable=True):
super().__init__(enable=enable)
if tokenizer_path.endswith(".json"):
if tokenizer_path.endswith(".json") or tokenizer_path.endswith(".model"):
# Do not support TiktokenTokenizer or SentencePieceTokenizer
return
from importlib.metadata import version

View File

@@ -88,6 +88,9 @@ def get_tokenizer(
if tokenizer_name.endswith(".json"):
return TiktokenTokenizer(tokenizer_name)
if tokenizer_name.endswith(".model"):
return SentencePieceTokenizer(tokenizer_name)
"""Gets a tokenizer for the given model name via Huggingface."""
if is_multimodal_model(tokenizer_name):
processor = get_processor(
@@ -179,6 +182,7 @@ def get_processor(
class TiktokenTokenizer:
def __init__(self, tokenizer_path):
import tiktoken
from jinja2 import Template
PAT_STR_B = r"""(?i:'s|'t|'re|'ve|'m|'ll|'d)|[^\r\n\p{L}\p{N}]?\p{L}+|\p{N}| ?[^\s\p{L}\p{N}]+[\r\n]*|\s*[\r\n]+|\s+(?!\S)|\s+"""
@@ -216,6 +220,7 @@ class TiktokenTokenizer:
tokenizer = tiktoken.Encoding(**kwargs)
tokenizer._default_allowed_special = default_allowed_special or set()
tokenizer._default_allowed_special |= {"<|separator|>"}
def encode_patched(
self,
@@ -241,6 +246,9 @@ class TiktokenTokenizer:
self.tokenizer = tokenizer
self.eos_token_id = tokenizer._special_tokens["<|eos|>"]
self.vocab_size = tokenizer.n_vocab
self.chat_template = Template(
"{% for message in messages %}{% if message['role'] == 'user' %}{{ 'Human: ' + message['content'].strip() + '<|separator|>\n\n' }}{% elif message['role'] == 'system' %}{{ 'System: ' + message['content'].strip() + '<|separator|>\n\n' }}{% elif message['role'] == 'assistant' %}{{ 'Assistant: ' + message['content'] + '<|separator|>\n\n' }}{% endif %}{% endfor %}{% if add_generation_prompt %}{{ 'Assistant:' }}{% endif %}"
)
def encode(self, x, add_special_tokens=False):
return self.tokenizer.encode(x)
@@ -255,7 +263,39 @@ class TiktokenTokenizer:
batch = [[x] for x in batch]
return self.tokenizer.decode_batch(batch)
def convert_ids_to_tokens(self, index):
return self.tokenizer.decode_single_token_bytes(index).decode(
"utf-8", errors="ignore"
def apply_chat_template(self, messages, tokenize, add_generation_prompt):
ret = self.chat_template.render(messages=messages, add_generation_prompt=add_generation_prompt)
return self.encode(ret) if tokenize else ret
class SentencePieceTokenizer:
def __init__(self, tokenizer_path):
import sentencepiece as spm
from jinja2 import Template
tokenizer = spm.SentencePieceProcessor(model_file=tokenizer_path)
# Convert to HF interface
self.tokenizer = tokenizer
self.eos_token_id = tokenizer.eos_id()
self.vocab_size = tokenizer.vocab_size()
self.chat_template = Template(
"{% for message in messages %}{% if message['role'] == 'user' %}{{ 'Human: ' + message['content'].strip() + '<|separator|>\n\n' }}{% elif message['role'] == 'system' %}{{ 'System: ' + message['content'].strip() + '<|separator|>\n\n' }}{% elif message['role'] == 'assistant' %}{{ 'Assistant: ' + message['content'] + '<|separator|>\n\n' }}{% endif %}{% endfor %}{% if add_generation_prompt %}{{ 'Assistant:' }}{% endif %}"
)
def encode(self, x, add_special_tokens=False):
return self.tokenizer.encode(x)
def decode(self, x):
return self.tokenizer.decode(x)
def batch_decode(
self, batch, skip_special_tokens=True, spaces_between_special_tokens=False
):
if isinstance(batch[0], int):
batch = [[x] for x in batch]
return self.tokenizer.decode(batch)
def apply_chat_template(self, messages, tokenize, add_generation_prompt):
ret = self.chat_template.render(messages=messages, add_generation_prompt=add_generation_prompt)
return self.encode(ret) if tokenize else ret

View File

@@ -317,19 +317,38 @@ def get_default_config(
topk: int,
dtype: Optional[str],
) -> Dict[str, int]:
config = {
'BLOCK_SIZE_M': 64,
'BLOCK_SIZE_N': 64,
'BLOCK_SIZE_K': 32,
'GROUP_SIZE_M': 8
}
if M <= E:
if dtype == "float8":
config = {
'BLOCK_SIZE_M': 16,
'BLOCK_SIZE_N': 32,
'BLOCK_SIZE_K': 64,
'GROUP_SIZE_M': 1
'BLOCK_SIZE_M': 128,
'BLOCK_SIZE_N': 256,
'BLOCK_SIZE_K': 128,
'GROUP_SIZE_M': 32,
"num_warps": 8,
"num_stages": 4
}
if M <= E:
config = {
'BLOCK_SIZE_M': 64,
'BLOCK_SIZE_N': 128,
'BLOCK_SIZE_K': 128,
'GROUP_SIZE_M': 1,
"num_warps": 4,
"num_stages": 4
}
else:
config = {
'BLOCK_SIZE_M': 64,
'BLOCK_SIZE_N': 64,
'BLOCK_SIZE_K': 32,
'GROUP_SIZE_M': 8
}
if M <= E:
config = {
'BLOCK_SIZE_M': 16,
'BLOCK_SIZE_N': 32,
'BLOCK_SIZE_K': 64,
'GROUP_SIZE_M': 1
}
return config

View File

@@ -2,7 +2,7 @@
import asyncio
import logging
import time
from concurrent.futures import ThreadPoolExecutor
import uvloop
import zmq
@@ -91,6 +91,7 @@ def start_controller_process(
pipe_writer.send("init ok")
loop = asyncio.new_event_loop()
loop.set_default_executor(ThreadPoolExecutor(max_workers=256))
asyncio.set_event_loop(loop)
loop.create_task(controller.loop_for_recv_requests())
try:
@@ -98,4 +99,4 @@ def start_controller_process(
except Exception:
logger.error("Exception in ControllerSingle:\n" + get_exception_traceback())
finally:
kill_parent_process()
kill_parent_process()

View File

@@ -100,7 +100,7 @@ class ModelTpServer:
self.max_prefill_tokens = (
max(
self.model_config.context_len,
min(self.max_total_num_tokens // 6, 65536),
min(self.max_total_num_tokens // 6, 32768),
)
if server_args.max_prefill_tokens is None
else server_args.max_prefill_tokens