update modeling file to newest

This commit is contained in:
ai-modelscope
2025-02-26 21:05:04 +08:00
parent b19e3ae77b
commit b7229bee87

View File

@@ -92,14 +92,26 @@ All performance is based on greedy decoding with COT. We notice that the perform
# Inference # Inference
## LMDeploy
We suggest using [LMDeploy](https://github.com/InternLM/LMDeploy)(>=0.2.1) for inference.
```python ```python
from modelscope import snapshot_download, AutoTokenizer, AutoModelForCausalLM from lmdeploy import pipeline, TurbomindEngineConfig, ChatTemplateConfig
import torch
model_dir = snapshot_download("Shanghai_AI_Laboratory/internlm2-math-20b") backend_config = TurbomindEngineConfig(model_name='internlm2-chat-7b', tp=1, cache_max_entry_count=0.3)
tokenizer = AutoTokenizer.from_pretrained(model_dir, device_map="auto", trust_remote_code=True) chat_template = ChatTemplateConfig(model_name='internlm2-chat-7b', system='', eosys='', meta_instruction='')
pipe = pipeline(model_path='internlm/internlm2-math-7b', chat_template_config=chat_template, backend_config=backend_config)
problem = '1+1='
result = pipe([problem], request_output_len=1024, top_k=1)
```
## Huggingface
```python
import torch
from transformers import AutoTokenizer, AutoModelForCausalLM
tokenizer = AutoTokenizer.from_pretrained("internlm/internlm2-math-20b", trust_remote_code=True)
# Set `torch_dtype=torch.float16` to load model in float16, otherwise it will be loaded as float32 and might cause OOM Error. # Set `torch_dtype=torch.float16` to load model in float16, otherwise it will be loaded as float32 and might cause OOM Error.
model = AutoModelForCausalLM.from_pretrained(model_dir, device_map="auto", trust_remote_code=True, torch_dtype=torch.float16) model = AutoModelForCausalLM.from_pretrained("internlm/internlm2-math-20b", trust_remote_code=True, torch_dtype=torch.float16).cuda()
model = model.eval() model = model.eval()
response, history = model.chat(tokenizer, "1+1=", history=[], meta_instruction="") response, history = model.chat(tokenizer, "1+1=", history=[], meta_instruction="")
print(response) print(response)