初始化项目,由ModelHub XC社区提供模型
Model: tiny-random/qwen3-moe Source: Original Platform
This commit is contained in:
111
README.md
Normal file
111
README.md
Normal file
@@ -0,0 +1,111 @@
|
||||
---
|
||||
library_name: transformers
|
||||
pipeline_tag: text-generation
|
||||
inference: true
|
||||
widget:
|
||||
- text: Hello!
|
||||
example_title: Hello world
|
||||
group: Python
|
||||
---
|
||||
|
||||
This tiny model is for debugging. It is randomly initialized with the config adapted from [Qwen/Qwen3-235B-A22B](https://huggingface.co/Qwen/Qwen3-235B-A22B).
|
||||
|
||||
### Example usage:
|
||||
|
||||
```python
|
||||
from transformers import pipeline
|
||||
model_id = "tiny-random/qwen3-moe"
|
||||
pipe = pipeline(
|
||||
"text-generation", model=model_id, device="cuda",
|
||||
trust_remote_code=True, max_new_tokens=3,
|
||||
)
|
||||
print(pipe("Hello World!"))
|
||||
|
||||
from transformers import AutoModelForCausalLM, AutoTokenizer
|
||||
tokenizer = AutoTokenizer.from_pretrained(model_id)
|
||||
model = AutoModelForCausalLM.from_pretrained(
|
||||
model_id,
|
||||
torch_dtype="auto",
|
||||
device_map="auto"
|
||||
)
|
||||
prompt = "Give me a short introduction to large language model."
|
||||
messages = [
|
||||
{"role": "user", "content": prompt}
|
||||
]
|
||||
text = tokenizer.apply_chat_template(
|
||||
messages,
|
||||
tokenize=False,
|
||||
add_generation_prompt=True,
|
||||
enable_thinking=True # Switches between thinking and non-thinking modes. Default is True.
|
||||
)
|
||||
print(text)
|
||||
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
|
||||
generated_ids = model.generate(
|
||||
**model_inputs,
|
||||
max_new_tokens=128
|
||||
)
|
||||
output_ids = generated_ids[0][len(model_inputs.input_ids[0]):].tolist()
|
||||
try:
|
||||
# rindex finding 151668 (</think>)
|
||||
index = len(output_ids) - output_ids[::-1].index(151668)
|
||||
except ValueError:
|
||||
index = 0
|
||||
thinking_content = tokenizer.decode(output_ids[:index], skip_special_tokens=True).strip("\n")
|
||||
content = tokenizer.decode(output_ids[index:], skip_special_tokens=True).strip("\n")
|
||||
print("thinking content:", thinking_content)
|
||||
print("content:", content)
|
||||
```
|
||||
|
||||
### Codes to create this repo:
|
||||
|
||||
```python
|
||||
import torch
|
||||
|
||||
from transformers import (
|
||||
AutoConfig,
|
||||
AutoModelForCausalLM,
|
||||
AutoTokenizer,
|
||||
GenerationConfig,
|
||||
pipeline,
|
||||
set_seed,
|
||||
)
|
||||
|
||||
source_model_id = "Qwen/Qwen3-235B-A22B"
|
||||
save_folder = "/tmp/tiny-random/qwen3-moe"
|
||||
|
||||
tokenizer = AutoTokenizer.from_pretrained(
|
||||
source_model_id, trust_remote_code=True,
|
||||
)
|
||||
tokenizer.save_pretrained(save_folder)
|
||||
|
||||
config = AutoConfig.from_pretrained(
|
||||
source_model_id, trust_remote_code=True,
|
||||
)
|
||||
config._name_or_path = source_model_id
|
||||
config.hidden_size = 64
|
||||
config.intermediate_size = 128
|
||||
config.moe_intermediate_size = 128
|
||||
config.head_dim = 32
|
||||
config.decoder_sparse_step = 2 # layer0=mlp, layer1=moe
|
||||
config.num_experts = 8
|
||||
config.num_experts_per_tok = 2
|
||||
config.num_key_value_heads = 1
|
||||
config.num_attention_heads = 2
|
||||
config.num_hidden_layers = 2
|
||||
config.max_window_layers = 1
|
||||
config.tie_word_embeddings = True
|
||||
model = AutoModelForCausalLM.from_config(
|
||||
config,
|
||||
torch_dtype=torch.bfloat16,
|
||||
trust_remote_code=True,
|
||||
)
|
||||
model.generation_config = GenerationConfig.from_pretrained(
|
||||
source_model_id, trust_remote_code=True,
|
||||
)
|
||||
set_seed(42)
|
||||
with torch.no_grad():
|
||||
for name, p in sorted(model.named_parameters()):
|
||||
torch.nn.init.normal_(p, 0, 0.5)
|
||||
print(name, p.shape)
|
||||
model.save_pretrained(save_folder)
|
||||
```
|
||||
Reference in New Issue
Block a user