Update README.md
This commit is contained in:
39
README.md
39
README.md
@@ -17,8 +17,47 @@ Lingma SWE-GPT has demonstrated impressive performance in software engineering t
|
||||
- Outperforms other open-source models of similar scale in software engineering-specific tasks.
|
||||
|
||||
## How to use
|
||||
|
||||
### Run on SWE-bench
|
||||
Refer to https://github.com/LingmaTongyi/Lingma-SWE-GPT
|
||||
|
||||
### Quick Start
|
||||
```
|
||||
from modelscope import AutoModelForCausalLM, AutoTokenizer
|
||||
|
||||
model_name = "Lingma/Lingma-SWE-GPT-7B"
|
||||
|
||||
model = AutoModelForCausalLM.from_pretrained(
|
||||
model_name,
|
||||
torch_dtype="auto",
|
||||
device_map="auto"
|
||||
)
|
||||
tokenizer = AutoTokenizer.from_pretrained(model_name)
|
||||
|
||||
prompt = "Give me a short introduction to large language model."
|
||||
messages = [
|
||||
{"role": "system", "content": "You are Lingma, created by Tongyi Lingma team. You are a helpful assistant."},
|
||||
{"role": "user", "content": prompt}
|
||||
]
|
||||
text = tokenizer.apply_chat_template(
|
||||
messages,
|
||||
tokenize=False,
|
||||
add_generation_prompt=True
|
||||
)
|
||||
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
|
||||
|
||||
generated_ids = model.generate(
|
||||
**model_inputs,
|
||||
max_new_tokens=512
|
||||
)
|
||||
generated_ids = [
|
||||
output_ids[len(input_ids):] for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids)
|
||||
]
|
||||
|
||||
response = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
|
||||
print(response)
|
||||
```
|
||||
|
||||
## TODO
|
||||
Currently only Python is supported. In future updates, we will provide more support for Java, JS/TS and other languages.
|
||||
|
||||
|
||||
Reference in New Issue
Block a user