update modeling file to newest
This commit is contained in:
22
README.md
22
README.md
@@ -92,14 +92,26 @@ All performance is based on greedy decoding with COT. We notice that the perform
|
||||
|
||||
# Inference
|
||||
|
||||
## LMDeploy
|
||||
We suggest using [LMDeploy](https://github.com/InternLM/LMDeploy)(>=0.2.1) for inference.
|
||||
```python
|
||||
from modelscope import snapshot_download, AutoTokenizer, AutoModelForCausalLM
|
||||
import torch
|
||||
from lmdeploy import pipeline, TurbomindEngineConfig, ChatTemplateConfig
|
||||
|
||||
model_dir = snapshot_download("Shanghai_AI_Laboratory/internlm2-math-20b")
|
||||
tokenizer = AutoTokenizer.from_pretrained(model_dir, device_map="auto", trust_remote_code=True)
|
||||
backend_config = TurbomindEngineConfig(model_name='internlm2-chat-7b', tp=1, cache_max_entry_count=0.3)
|
||||
chat_template = ChatTemplateConfig(model_name='internlm2-chat-7b', system='', eosys='', meta_instruction='')
|
||||
pipe = pipeline(model_path='internlm/internlm2-math-7b', chat_template_config=chat_template, backend_config=backend_config)
|
||||
|
||||
problem = '1+1='
|
||||
result = pipe([problem], request_output_len=1024, top_k=1)
|
||||
```
|
||||
|
||||
## Huggingface
|
||||
```python
|
||||
import torch
|
||||
from transformers import AutoTokenizer, AutoModelForCausalLM
|
||||
tokenizer = AutoTokenizer.from_pretrained("internlm/internlm2-math-20b", trust_remote_code=True)
|
||||
# Set `torch_dtype=torch.float16` to load model in float16, otherwise it will be loaded as float32 and might cause OOM Error.
|
||||
model = AutoModelForCausalLM.from_pretrained(model_dir, device_map="auto", trust_remote_code=True, torch_dtype=torch.float16)
|
||||
model = AutoModelForCausalLM.from_pretrained("internlm/internlm2-math-20b", trust_remote_code=True, torch_dtype=torch.float16).cuda()
|
||||
model = model.eval()
|
||||
response, history = model.chat(tokenizer, "1+1=", history=[], meta_instruction="")
|
||||
print(response)
|
||||
|
||||
Reference in New Issue
Block a user