Revert "Add simple CPU offloading support" (#2252)

We'll re-add the commit to correctly ack Kaichao's authorship
This commit is contained in:
Ying Sheng
2024-11-28 23:36:55 -08:00
committed by GitHub
parent 4f2ee48ed1
commit 4057ea82c9
9 changed files with 29 additions and 173 deletions

View File

@@ -38,7 +38,6 @@ from sglang.srt.layers.quantization.base_config import QuantizationConfig
from sglang.srt.layers.radix_attention import RadixAttention
from sglang.srt.layers.vocab_parallel_embedding import VocabParallelEmbedding
from sglang.srt.model_executor.forward_batch_info import ForwardBatch
from sglang.srt.utils import make_layers
# Aligned with HF's implementation, using sliding window inclusive with the last token
@@ -268,15 +267,11 @@ class Gemma2Model(nn.Module):
config.vocab_size,
config.hidden_size,
)
self.layers = make_layers(
config.num_hidden_layers,
lambda idx, prefix: Gemma2DecoderLayer(
layer_id=idx,
config=config,
cache_config=cache_config,
quant_config=quant_config,
),
prefix="",
self.layers = nn.ModuleList(
[
Gemma2DecoderLayer(layer_id, config, cache_config, quant_config)
for layer_id in range(config.num_hidden_layers)
]
)
self.norm = GemmaRMSNorm(config.hidden_size, eps=config.rms_norm_eps)

View File

@@ -43,7 +43,6 @@ from sglang.srt.layers.vocab_parallel_embedding import (
)
from sglang.srt.managers.schedule_batch import global_server_args_dict
from sglang.srt.model_executor.forward_batch_info import ForwardBatch
from sglang.srt.utils import make_layers
class LlamaMLP(nn.Module):
@@ -256,12 +255,13 @@ class LlamaModel(nn.Module):
config.vocab_size,
config.hidden_size,
)
self.layers = make_layers(
config.num_hidden_layers,
lambda idx, prefix: LlamaDecoderLayer(
config=config, quant_config=quant_config, layer_id=idx, prefix=prefix
),
prefix="model.layers",
self.layers = nn.ModuleList(
[
LlamaDecoderLayer(
config, i, quant_config=quant_config, prefix=f"model.layers.{i}"
)
for i in range(config.num_hidden_layers)
]
)
self.norm = RMSNorm(config.hidden_size, eps=config.rms_norm_eps)

View File

@@ -38,7 +38,6 @@ from sglang.srt.layers.vocab_parallel_embedding import (
VocabParallelEmbedding,
)
from sglang.srt.model_executor.forward_batch_info import ForwardBatch
from sglang.srt.utils import make_layers
class OlmoAttention(nn.Module):
@@ -221,13 +220,11 @@ class OlmoModel(nn.Module):
self.embed_tokens = VocabParallelEmbedding(
config.vocab_size, config.hidden_size
)
self.layers = make_layers(
config.num_hidden_layers,
lambda idx, prefix: OlmoDecoderLayer(
layer_id=idx,
config=config,
quant_config=quant_config,
),
self.layers = nn.ModuleList(
[
OlmoDecoderLayer(config, layer_id, quant_config)
for layer_id in range(config.num_hidden_layers)
]
)
self.norm = nn.LayerNorm(
config.hidden_size, elementwise_affine=False, bias=False

View File

@@ -48,7 +48,6 @@ from sglang.srt.layers.vocab_parallel_embedding import (
VocabParallelEmbedding,
)
from sglang.srt.model_executor.forward_batch_info import ForwardBatch
from sglang.srt.utils import make_layers
class OlmoeMoE(nn.Module):
@@ -262,13 +261,11 @@ class OlmoeModel(nn.Module):
config.vocab_size,
config.hidden_size,
)
self.layers = make_layers(
config.num_hidden_layers,
lambda idx, prefix: OlmoeDecoderLayer(
config=config,
quant_config=quant_config,
layer_id=idx,
),
self.layers = nn.ModuleList(
[
OlmoeDecoderLayer(config, layer_id, quant_config=quant_config)
for layer_id in range(config.num_hidden_layers)
]
)
self.norm = RMSNorm(config.hidden_size, eps=1e-5)

View File

@@ -40,7 +40,6 @@ from sglang.srt.layers.vocab_parallel_embedding import (
VocabParallelEmbedding,
)
from sglang.srt.model_executor.forward_batch_info import ForwardBatch
from sglang.srt.utils import make_layers
Qwen2Config = None
@@ -231,13 +230,11 @@ class Qwen2Model(nn.Module):
config.vocab_size,
config.hidden_size,
)
self.layers = make_layers(
config.num_hidden_layers,
lambda idx, prefix: Qwen2DecoderLayer(
layer_id=idx,
config=config,
quant_config=quant_config,
),
self.layers = nn.ModuleList(
[
Qwen2DecoderLayer(config, i, quant_config=quant_config)
for i in range(config.num_hidden_layers)
]
)
self.norm = RMSNorm(config.hidden_size, eps=config.rms_norm_eps)