Upload folder using huggingface_hub
This commit is contained in:
33
.gitattributes
vendored
33
.gitattributes
vendored
@@ -1,47 +1,36 @@
|
||||
*.7z filter=lfs diff=lfs merge=lfs -text
|
||||
*.arrow filter=lfs diff=lfs merge=lfs -text
|
||||
*.bin filter=lfs diff=lfs merge=lfs -text
|
||||
*.bin.* filter=lfs diff=lfs merge=lfs -text
|
||||
*.bz2 filter=lfs diff=lfs merge=lfs -text
|
||||
*.ckpt filter=lfs diff=lfs merge=lfs -text
|
||||
*.ftz filter=lfs diff=lfs merge=lfs -text
|
||||
*.gz filter=lfs diff=lfs merge=lfs -text
|
||||
*.h5 filter=lfs diff=lfs merge=lfs -text
|
||||
*.joblib filter=lfs diff=lfs merge=lfs -text
|
||||
*.lfs.* filter=lfs diff=lfs merge=lfs -text
|
||||
*.mlmodel filter=lfs diff=lfs merge=lfs -text
|
||||
*.model filter=lfs diff=lfs merge=lfs -text
|
||||
*.msgpack filter=lfs diff=lfs merge=lfs -text
|
||||
*.npy filter=lfs diff=lfs merge=lfs -text
|
||||
*.npz filter=lfs diff=lfs merge=lfs -text
|
||||
*.onnx filter=lfs diff=lfs merge=lfs -text
|
||||
*.ot filter=lfs diff=lfs merge=lfs -text
|
||||
*.parquet filter=lfs diff=lfs merge=lfs -text
|
||||
*.pb filter=lfs diff=lfs merge=lfs -text
|
||||
*.pickle filter=lfs diff=lfs merge=lfs -text
|
||||
*.pkl filter=lfs diff=lfs merge=lfs -text
|
||||
*.pt filter=lfs diff=lfs merge=lfs -text
|
||||
*.pth filter=lfs diff=lfs merge=lfs -text
|
||||
*.rar filter=lfs diff=lfs merge=lfs -text
|
||||
*.safetensors filter=lfs diff=lfs merge=lfs -text
|
||||
saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
||||
*.tar.* filter=lfs diff=lfs merge=lfs -text
|
||||
*.tar filter=lfs diff=lfs merge=lfs -text
|
||||
*.tflite filter=lfs diff=lfs merge=lfs -text
|
||||
*.tgz filter=lfs diff=lfs merge=lfs -text
|
||||
*.wasm filter=lfs diff=lfs merge=lfs -text
|
||||
*.xz filter=lfs diff=lfs merge=lfs -text
|
||||
*.zip filter=lfs diff=lfs merge=lfs -text
|
||||
*.zstandard filter=lfs diff=lfs merge=lfs -text
|
||||
*.tfevents* filter=lfs diff=lfs merge=lfs -text
|
||||
*.db* filter=lfs diff=lfs merge=lfs -text
|
||||
*.ark* filter=lfs diff=lfs merge=lfs -text
|
||||
**/*ckpt*data* filter=lfs diff=lfs merge=lfs -text
|
||||
**/*ckpt*.meta filter=lfs diff=lfs merge=lfs -text
|
||||
**/*ckpt*.index filter=lfs diff=lfs merge=lfs -text
|
||||
*.safetensors filter=lfs diff=lfs merge=lfs -text
|
||||
*.ckpt filter=lfs diff=lfs merge=lfs -text
|
||||
*.gguf* filter=lfs diff=lfs merge=lfs -text
|
||||
*.ggml filter=lfs diff=lfs merge=lfs -text
|
||||
*.llamafile* filter=lfs diff=lfs merge=lfs -text
|
||||
*.pt2 filter=lfs diff=lfs merge=lfs -text
|
||||
*.mlmodel filter=lfs diff=lfs merge=lfs -text
|
||||
*.npy filter=lfs diff=lfs merge=lfs -text
|
||||
*.npz filter=lfs diff=lfs merge=lfs -text
|
||||
*.pickle filter=lfs diff=lfs merge=lfs -text
|
||||
*.pkl filter=lfs diff=lfs merge=lfs -text
|
||||
*.tar filter=lfs diff=lfs merge=lfs -text
|
||||
*.wasm filter=lfs diff=lfs merge=lfs -text
|
||||
*.zst filter=lfs diff=lfs merge=lfs -text
|
||||
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
||||
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
||||
model.safetensors filter=lfs diff=lfs merge=lfs -text
|
||||
|
||||
77
LICENSE
Normal file
77
LICENSE
Normal file
@@ -0,0 +1,77 @@
|
||||
# Katanemo Labs, Inc. COMMUNITY LICENSE AGREEMENT
|
||||
**Version Release Date:** September 30th, 2024
|
||||
|
||||
This Katanemo Labs, Inc. COMMUNITY LICENSE AGREEMENT is based on the Llama 3.2 Community License, Copyright © Meta Platforms, Inc. The terms and conditions have been adapted to reflect the proprietary nature of Katanemo Labs' materials.
|
||||
|
||||
---
|
||||
|
||||
1.Definitions
|
||||
a. "Agreement": The terms and conditions for use, reproduction, distribution, and modification of the Katanemo Materials set forth herein.
|
||||
b. "Documentation": The specifications, manuals, and documentation accompanying Katanemo LLMs v1.
|
||||
c. "Licensee" or "you: The individual or entity entering into this Agreement, including your employer if you are acting on their behalf.
|
||||
d. "Katanemo": The foundational large language models and software provided by Katanemo Labs, Inc., available at https://huggingface.co/katanemolabs.
|
||||
e. "Katanemo Materials": Collectively, Katanemo's proprietary models and Documentation. Some Materials are derived from the Qwen language models licensed under the Qwen RESEARCH LICENSE AGREEMENT.
|
||||
f. "Katanemo Labs" or "we": Katanemo Labs Inc., a Delaware, USA Corporation.
|
||||
|
||||
---
|
||||
|
||||
2.
|
||||
By clicking "I Accept" or using any part of the Katanemo Materials, you agree to be bound by this Agreement.
|
||||
|
||||
---
|
||||
|
||||
3. License Rights and Redistribution
|
||||
a. Grant of Rights
|
||||
You are granted a non-exclusive, worldwide, non-transferable, and royalty-free license to:
|
||||
- Use, reproduce, distribute, and modify the Katanemo Materials.
|
||||
- Create derivative works based on the Katanemo Materials.
|
||||
|
||||
4. Redistribution and Use
|
||||
a. Distribution:
|
||||
If you distribute the Katanemo Materials or a derivative work:
|
||||
- Include a copy of this Agreement.
|
||||
- Prominently display "Built with Katanemo" on a related website or documentation.
|
||||
|
||||
b. Attribution:
|
||||
Include the following attribution notice:
|
||||
"Katanemo is licensed under the Katanemo Labs Community License, Copyright © Katanemo Labs, Inc. All Rights Reserved."_
|
||||
|
||||
c. Compliance:
|
||||
Your use must adhere to the Acceptable Use Policy, available at https://katanemolabs.com/katanemo/use-policy.
|
||||
|
||||
---
|
||||
|
||||
5. Additional Commercial Terms
|
||||
If you are commercially using the Materials, you shall request a license from us.
|
||||
|
||||
---
|
||||
|
||||
6. Disclaimer of Warranty
|
||||
The Katanemo Materials are provided "AS IS" without warranties of any kind, either express or implied, including but not limited to warranties of title, non-infringement, or fitness for a particular purpose.
|
||||
|
||||
---
|
||||
|
||||
7. Limitation of Liability
|
||||
Katanemo Labs is not liable for any indirect, special, or consequential damages arising out of the use of the Katanemo Materials, even if advised of the possibility of such damages.
|
||||
|
||||
---
|
||||
|
||||
8. Intellectual Property
|
||||
a. Trademarks
|
||||
No trademark licenses are granted, except as required for attribution as described in Section 1.b. You may use the “Katanemo” mark according to Katanemo Labs' brand guidelines.
|
||||
|
||||
b. Ownership
|
||||
You own any derivative works or modifications you create, except for portions owned by Katanemo Labs.
|
||||
|
||||
c. Litigation
|
||||
If you file a lawsuit against Katanemo Labs regarding intellectual property, your license under this Agreement terminates.
|
||||
|
||||
---
|
||||
|
||||
9. Term and Termination
|
||||
This Agreement continues until terminated. Katanemo Labs may terminate the Agreement if you breach any terms. Upon termination, you must cease using the Katanemo Materials.
|
||||
|
||||
---
|
||||
|
||||
10. Governing Law and Jurisdiction
|
||||
This Agreement is governed by the laws of the State of Washington, USA. Any disputes will be resolved in the courts of California.
|
||||
380
README.md
380
README.md
@@ -1,47 +1,345 @@
|
||||
---
|
||||
license: Apache License 2.0
|
||||
|
||||
#model-type:
|
||||
##如 gpt、phi、llama、chatglm、baichuan 等
|
||||
#- gpt
|
||||
|
||||
#domain:
|
||||
##如 nlp、cv、audio、multi-modal
|
||||
#- nlp
|
||||
|
||||
#language:
|
||||
##语言代码列表 https://help.aliyun.com/document_detail/215387.html?spm=a2c4g.11186623.0.0.9f8d7467kni6Aa
|
||||
#- cn
|
||||
|
||||
#metrics:
|
||||
##如 CIDEr、Blue、ROUGE 等
|
||||
#- CIDEr
|
||||
|
||||
#tags:
|
||||
##各种自定义,包括 pretrained、fine-tuned、instruction-tuned、RL-tuned 等训练方法和其他
|
||||
#- pretrained
|
||||
|
||||
#tools:
|
||||
##如 vllm、fastchat、llamacpp、AdaSeq 等
|
||||
#- vllm
|
||||
license: other
|
||||
license_name: katanemo-research
|
||||
license_link: >-
|
||||
https://huggingface.co/katanemolabs/Arch-Function-1.5B/blob/main/LICENSE
|
||||
base_model:
|
||||
- Qwen/Qwen2.5-Coder-1.5B-Instruct
|
||||
language:
|
||||
- en
|
||||
pipeline_tag: text-generation
|
||||
library_name: transformers
|
||||
---
|
||||
### 当前模型的贡献者未提供更加详细的模型介绍。模型文件和权重,可浏览“模型文件”页面获取。
|
||||
#### 您可以通过如下git clone命令,或者ModelScope SDK来下载模型
|
||||
|
||||
SDK下载
|
||||
# katanemo/Arch-Function-1.5B
|
||||
|
||||
## Overview
|
||||
The Katanemo Arch-Function collection of large language models (LLMs) is a collection state-of-the-art (SOTA) LLMs specifically designed for **function calling** tasks. The models are designed to understand complex function signatures, identify required parameters, and produce accurate function call outputs based on natural language prompts. Achieving performance on par with GPT-4, these models set a new benchmark in the domain of function-oriented tasks, making them suitable for scenarios where automated API interaction and function execution is crucial.
|
||||
|
||||
In summary, the Katanemo Arch-Function collection demonstrates:
|
||||
- **State-of-the-art performance** in function calling
|
||||
- **Accurate parameter identification and suggestion**, even in ambiguous or incomplete inputs
|
||||
- **High generalization** across multiple function calling use cases, from API interactions to automated backend tasks.
|
||||
- Optimized **low-latency, high-throughput** performance, making it suitable for real-time, production environments.
|
||||
|
||||
Arch-Function is the core LLM used in then open source [Arch Gateway](https://github.com/katanemo/arch) to seamlessly integrate user prompts with developers APIs
|
||||
|
||||
## Key Features
|
||||
<table>
|
||||
<tr style="text-align: left; vertical-align: middle; font-weight: bold;">
|
||||
<td>Functionality</td>
|
||||
<td>Definition</td>
|
||||
</tr>
|
||||
<tr style="text-left: left; vertical-align: middle;">
|
||||
<td>Single Function Calling</td>
|
||||
<td>Call only one function per user query </td>
|
||||
</tr>
|
||||
<tr style="text-left: left; vertical-align: middle;">
|
||||
<td>Parallel Function Calling</td>
|
||||
<td>Call the same function multiple times but with different set of parameter values</td>
|
||||
</tr>
|
||||
<tr style="text-left: left; vertical-align: middle;">
|
||||
<td>Multiple Function Calling</td>
|
||||
<td>Call different functions per user query</td>
|
||||
</tr>
|
||||
<tr style="text-left: left; vertical-align: middle;">
|
||||
<td>Parallel & Multiple</td>
|
||||
<td>Perform both parallel and multiple function calling</td>
|
||||
</tr>
|
||||
</table>
|
||||
|
||||
|
||||
## Training Details
|
||||
Katanemo Arch-Function collection is built on top of the [Qwen 2.5](https://huggingface.co/collections/Qwen/qwen25-66e81a666513e518adb90d9e). A blog with technical details leading to our models will be published soon.
|
||||
|
||||
|
||||
## Performance Benchmarks
|
||||
We evaluate Katanemo Arch-Function series on the [Berkeley Function-Calling Leaderboard (BFCL)](https://gorilla.cs.berkeley.edu/leaderboard.html#leaderboard). We compare with commonly-used models and the results (as of Oct 21st, 2024) are shwon below. For each model family, we select the one with the highest rank.
|
||||
|
||||
<table>
|
||||
<tr style="text-align: center; vertical-align: middle; font-weight: bold;">
|
||||
<td rowspan=2>Rank</td>
|
||||
<td rowspan=2>Model</td>
|
||||
<td rowspan=2>Overall</td>
|
||||
<td colspan=3>Single Turn</td>
|
||||
<td rowspan=1>Multi Turn</td>
|
||||
<td colspan=2>Hallucination</td>
|
||||
</tr>
|
||||
<tr style="text-align: center; vertical-align: middle; font-weight: bold;">
|
||||
<td>Non-live (AST)</td>
|
||||
<td>Non-live (Exec)</td>
|
||||
<td>Live (AST)</td>
|
||||
<td>Overall</td>
|
||||
<td>Relevance</td>
|
||||
<td>Irrelevance</td>
|
||||
</tr>
|
||||
<tr style="text-align: center; vertical-align: middle;">
|
||||
<td>1</td>
|
||||
<td>GPT-4o-2024-08-06 (FC)</td>
|
||||
<td>62.19%</td>
|
||||
<td>85.90%</td>
|
||||
<td>85.64%</td>
|
||||
<td>75.43%</td>
|
||||
<td>25.00%</td>
|
||||
<td>63.41%</td>
|
||||
<td>82.93%</td>
|
||||
</tr>
|
||||
<tr style="text-align: center; vertical-align: middle; font-weight: bold;">
|
||||
<td> </td>
|
||||
<td>Arch-Function-7B</td>
|
||||
<td>59.62%</td>
|
||||
<td>86.83%</td>
|
||||
<td>88.07%</td>
|
||||
<td>71.57%</td>
|
||||
<td>21.00%</td>
|
||||
<td>95.12%</td>
|
||||
<td>73.63%</td>
|
||||
</tr>
|
||||
<tr style="text-align: center; vertical-align: middle;">
|
||||
<td>6</td>
|
||||
<td>o1-preview-2024-09-12 (Prompt)</td>
|
||||
<td>59.27%</td>
|
||||
<td>86.42%</td>
|
||||
<td>88.88%</td>
|
||||
<td>73.08%</td>
|
||||
<td>17.62%</td>
|
||||
<td>73.17%</td>
|
||||
<td>74.60%</td>
|
||||
</tr>
|
||||
<tr style="text-align: center; vertical-align: middle; ">
|
||||
<td>9</td>
|
||||
<td>Gemini-1.5-Flash-002 (Prompt)</td>
|
||||
<td>57.92%</td>
|
||||
<td>86.58%</td>
|
||||
<td>89.48%</td>
|
||||
<td>76.28%</td>
|
||||
<td>9.88%</td>
|
||||
<td>85.37%</td>
|
||||
<td>78.54%</td>
|
||||
</tr>
|
||||
<tr style="text-align: center; vertical-align: middle; font-weight: bold;">
|
||||
<td> </td>
|
||||
<td>Arch-Function-3B</td>
|
||||
<td>57.69%</td>
|
||||
<td>85.19%</td>
|
||||
<td>86.18%</td>
|
||||
<td>71.21%</td>
|
||||
<td>17.50%</td>
|
||||
<td>90.24%</td>
|
||||
<td>72.88%</td>
|
||||
</tr>
|
||||
<tr style="text-align: center; vertical-align: middle; ">
|
||||
<td>12</td>
|
||||
<td>Claude-3.5-Sonnet-20240620 (FC)</td>
|
||||
<td>57.42%</td>
|
||||
<td>70.04%</td>
|
||||
<td>66.27%</td>
|
||||
<td>74.68%</td>
|
||||
<td>28.38%</td>
|
||||
<td>68.29%</td>
|
||||
<td>74.58%</td>
|
||||
</tr>
|
||||
<tr style="text-align: center; vertical-align: middle; ">
|
||||
<td>13</td>
|
||||
<td>mistral-large-2407 (FC)</td>
|
||||
<td>56.80%</td>
|
||||
<td>86.62%</td>
|
||||
<td>84.57%</td>
|
||||
<td>68.37%</td>
|
||||
<td>20.62%</td>
|
||||
<td>75.61%</td>
|
||||
<td>49.44%</td>
|
||||
</tr>
|
||||
<tr style="text-align: center; vertical-align: middle; font-weight: bold;">
|
||||
<td> </td>
|
||||
<td>Arch-Function-1.5B</td>
|
||||
<td>56.20%</td>
|
||||
<td>84.40%</td>
|
||||
<td>83.96%</td>
|
||||
<td>69.36%</td>
|
||||
<td>15.88%</td>
|
||||
<td>87.80%</td>
|
||||
<td>74.39%</td>
|
||||
</tr>
|
||||
<tr style="text-align: center; vertical-align: middle; ">
|
||||
<td>21</td>
|
||||
<td>Llama-3.1-70B-Instruct (Prompt)</td>
|
||||
<td>53.67%</td>
|
||||
<td>88.90%</td>
|
||||
<td>89.34%</td>
|
||||
<td>61.13%</td>
|
||||
<td>12.38%</td>
|
||||
<td>92.68%</td>
|
||||
<td>58.38%</td>
|
||||
</tr>
|
||||
<tr style="text-align: center; vertical-align: middle; ">
|
||||
<td>22</td>
|
||||
<td>Gemma-2-27b-it (Prompt)</td>
|
||||
<td>53.66%</td>
|
||||
<td>88.52%</td>
|
||||
<td>87.89%</td>
|
||||
<td>69.48%</td>
|
||||
<td>4.12%</td>
|
||||
<td>87.8%</td>
|
||||
<td>68.76%</td>
|
||||
</tr>
|
||||
</table>
|
||||
|
||||
|
||||
# Requirements
|
||||
The code of Arch-Function-1.5B has been in the Hugging Face `transformers` library and we advise you to install latest version:
|
||||
```bash
|
||||
#安装ModelScope
|
||||
pip install modelscope
|
||||
```
|
||||
```python
|
||||
#SDK模型下载
|
||||
from modelscope import snapshot_download
|
||||
model_dir = snapshot_download('katanemo/Arch-Function-1.5B')
|
||||
```
|
||||
Git下载
|
||||
```
|
||||
#Git模型下载
|
||||
git clone https://www.modelscope.cn/katanemo/Arch-Function-1.5B.git
|
||||
pip install transformers>=4.37.0
|
||||
```
|
||||
|
||||
<p style="color: lightgrey;">如果您是本模型的贡献者,我们邀请您根据<a href="https://modelscope.cn/docs/ModelScope%E6%A8%A1%E5%9E%8B%E6%8E%A5%E5%85%A5%E6%B5%81%E7%A8%8B%E6%A6%82%E8%A7%88" style="color: lightgrey; text-decoration: underline;">模型贡献文档</a>,及时完善模型卡片内容。</p>
|
||||
|
||||
# How to use
|
||||
We use the following example to illustrate how to use our model to perform function calling tasks. Please note that, our model works best with our provided prompt format. It allows us to extract JSON output that is similar to the [function-calling mode of ChatGPT](https://platform.openai.com/docs/guides/function-calling).
|
||||
|
||||
|
||||
### Single Turn Example
|
||||
````python
|
||||
import json
|
||||
from typing import Any, Dict, List
|
||||
from transformers import AutoModelForCausalLM, AutoTokenizer
|
||||
|
||||
model_name = "katanemo/Arch-Function-1.5B"
|
||||
model = AutoModelForCausalLM.from_pretrained(
|
||||
model_name, device_map="auto", torch_dtype="auto", trust_remote_code=True
|
||||
)
|
||||
tokenizer = AutoTokenizer.from_pretrained(model_name)
|
||||
|
||||
# Please use our provided prompt for best performance
|
||||
TASK_PROMPT = """
|
||||
You are a helpful assistant.
|
||||
""".strip()
|
||||
|
||||
TOOL_PROMPT = """
|
||||
# Tools
|
||||
|
||||
You may call one or more functions to assist with the user query.
|
||||
|
||||
You are provided with function signatures within <tools></tools> XML tags:
|
||||
<tools>
|
||||
{tool_text}
|
||||
</tools>
|
||||
""".strip()
|
||||
|
||||
FORMAT_PROMPT = """
|
||||
For each function call, return a json object with function name and arguments within <tool_call></tool_call> XML tags:
|
||||
<tool_call>
|
||||
{"name": <function-name>, "arguments": <args-json-object>}
|
||||
</tool_call>
|
||||
""".strip()
|
||||
|
||||
# Define available tools
|
||||
get_weather_api = {
|
||||
"type": "function",
|
||||
"function": {
|
||||
"name": "get_weather",
|
||||
"description": "Get the current weather for a location",
|
||||
"parameters": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"location": {
|
||||
"type": "str",
|
||||
"description": "The city and state, e.g. San Francisco, New York",
|
||||
},
|
||||
"unit": {
|
||||
"type": "str",
|
||||
"enum": ["celsius", "fahrenheit"],
|
||||
"description": "The unit of temperature to return",
|
||||
},
|
||||
},
|
||||
"required": ["location"],
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
openai_format_tools = [get_weather_api]
|
||||
|
||||
|
||||
def convert_tools(tools: List[Dict[str, Any]]):
|
||||
return "\n".join([json.dumps(tool) for tool in tools])
|
||||
|
||||
# Helper function to create the system prompt for our model
|
||||
def format_prompt(tools: List[Dict[str, Any]]):
|
||||
tool_text = convert_tools(tools)
|
||||
|
||||
return (
|
||||
TASK_PROMPT
|
||||
+ "\n\n"
|
||||
+ TOOL_PROMPT.format(tool_text=tool_text)
|
||||
+ "\n\n"
|
||||
+ FORMAT_PROMPT
|
||||
+ "\n"
|
||||
)
|
||||
|
||||
|
||||
system_prompt = format_prompt(openai_format_tools)
|
||||
|
||||
messages = [
|
||||
{"role": "system", "content": system_prompt},
|
||||
{"role": "user", "content": "What is the weather in Seattle?"},
|
||||
]
|
||||
|
||||
inputs = tokenizer.apply_chat_template(
|
||||
messages, add_generation_prompt=True, return_tensors="pt"
|
||||
).to(model.device)
|
||||
|
||||
outputs = model.generate(
|
||||
inputs,
|
||||
max_new_tokens=512,
|
||||
do_sample=False,
|
||||
num_return_sequences=1,
|
||||
eos_token_id=tokenizer.eos_token_id,
|
||||
)
|
||||
|
||||
response = tokenizer.decode(outputs[0][len(inputs[0]) :], skip_special_tokens=True)
|
||||
print(response)
|
||||
````
|
||||
|
||||
Then you should be able to see the following output string in JSON format:
|
||||
````python
|
||||
<tool_call>
|
||||
{"name": "get_weather", "arguments": {"location": "Seattle"}}
|
||||
</tool_call>
|
||||
````
|
||||
|
||||
### Multi Turn Example
|
||||
Upon getting results from functions, you can add it to the `messages` list as a `user` message and pass it to the model to get responses for users.
|
||||
|
||||
````python
|
||||
# Suppose we receive the following result from the function:
|
||||
get_weather_api_result = {'name': 'get_weather', 'results': {'temperature': '62°', 'unit': 'fahrenheit'}}
|
||||
execution_results = [get_weather_api_result]
|
||||
|
||||
def add_execution_results(messages: List[Dict[str, Any]], execution_results: List[Dict[str, Any]]):
|
||||
content = "\n".join([f"<tool_response>\n{json.dumps(result)}</tool_response>" for result in execution_results])
|
||||
messages.append({"role": "user", "content": content})
|
||||
return messages
|
||||
|
||||
messages = add_execution_results(messages, execution_results)
|
||||
|
||||
inputs = tokenizer.apply_chat_template(messages, add_generation_prompt=True, return_tensors="pt").to(model.device)
|
||||
|
||||
outputs = model.generate(
|
||||
inputs,
|
||||
max_new_tokens=512,
|
||||
do_sample=False,
|
||||
num_return_sequences=1,
|
||||
eos_token_id=tokenizer.eos_token_id,
|
||||
)
|
||||
|
||||
response = tokenizer.decode(outputs[0][len(inputs[0]) :], skip_special_tokens=True)
|
||||
print(response)
|
||||
````
|
||||
|
||||
Then you should be able to see the following output:
|
||||
```
|
||||
The current temperature in Seattle is 62 degrees in Fahrenheit.
|
||||
```
|
||||
|
||||
|
||||
# License
|
||||
Katanemo Arch-Function collection is distributed under the [Katanemo license](https://huggingface.co/katanemolabs/Arch-Function-1.5B/blob/main/LICENSE).
|
||||
33
config.json
Normal file
33
config.json
Normal file
@@ -0,0 +1,33 @@
|
||||
{
|
||||
"_name_or_path": "Qwen/Qwen2.5-Coder-1.5B-Instruct",
|
||||
"architectures": [
|
||||
"Qwen2ForCausalLM"
|
||||
],
|
||||
"attention_dropout": 0.0,
|
||||
"bos_token_id": 151643,
|
||||
"eos_token_id": 151645,
|
||||
"hidden_act": "silu",
|
||||
"hidden_size": 1536,
|
||||
"initializer_range": 0.02,
|
||||
"intermediate_size": 8960,
|
||||
"max_position_embeddings": 32768,
|
||||
"max_window_layers": 28,
|
||||
"model_type": "qwen2",
|
||||
"num_attention_heads": 12,
|
||||
"num_hidden_layers": 28,
|
||||
"num_key_value_heads": 2,
|
||||
"rms_norm_eps": 1e-06,
|
||||
"rope_theta": 1000000.0,
|
||||
"rope_scaling": {
|
||||
"factor": 4.0,
|
||||
"original_max_position_embeddings": 32768,
|
||||
"type": "yarn"
|
||||
},
|
||||
"sliding_window": null,
|
||||
"tie_word_embeddings": true,
|
||||
"torch_dtype": "bfloat16",
|
||||
"transformers_version": "4.43.2",
|
||||
"use_cache": false,
|
||||
"use_sliding_window": false,
|
||||
"vocab_size": 151936
|
||||
}
|
||||
1
configuration.json
Normal file
1
configuration.json
Normal file
@@ -0,0 +1 @@
|
||||
{"framework": "pytorch", "task": "text-generation", "allow_remote": true}
|
||||
14
generation_config.json
Normal file
14
generation_config.json
Normal file
@@ -0,0 +1,14 @@
|
||||
{
|
||||
"bos_token_id": 151643,
|
||||
"do_sample": true,
|
||||
"eos_token_id": [
|
||||
151645,
|
||||
151643
|
||||
],
|
||||
"pad_token_id": 151643,
|
||||
"repetition_penalty": 1.1,
|
||||
"temperature": 0.7,
|
||||
"top_k": 20,
|
||||
"top_p": 0.8,
|
||||
"transformers_version": "4.43.2"
|
||||
}
|
||||
151388
merges.txt
Normal file
151388
merges.txt
Normal file
File diff suppressed because it is too large
Load Diff
3
model.safetensors
Normal file
3
model.safetensors
Normal file
@@ -0,0 +1,3 @@
|
||||
version https://git-lfs.github.com/spec/v1
|
||||
oid sha256:3440dc10971cff500578021d8f87b978786b39379963fb4056bab0fb8d085630
|
||||
size 3087467144
|
||||
303282
tokenizer.json
Normal file
303282
tokenizer.json
Normal file
File diff suppressed because it is too large
Load Diff
207
tokenizer_config.json
Normal file
207
tokenizer_config.json
Normal file
@@ -0,0 +1,207 @@
|
||||
{
|
||||
"add_bos_token": false,
|
||||
"add_prefix_space": false,
|
||||
"added_tokens_decoder": {
|
||||
"151643": {
|
||||
"content": "<|endoftext|>",
|
||||
"lstrip": false,
|
||||
"normalized": false,
|
||||
"rstrip": false,
|
||||
"single_word": false,
|
||||
"special": true
|
||||
},
|
||||
"151644": {
|
||||
"content": "<|im_start|>",
|
||||
"lstrip": false,
|
||||
"normalized": false,
|
||||
"rstrip": false,
|
||||
"single_word": false,
|
||||
"special": true
|
||||
},
|
||||
"151645": {
|
||||
"content": "<|im_end|>",
|
||||
"lstrip": false,
|
||||
"normalized": false,
|
||||
"rstrip": false,
|
||||
"single_word": false,
|
||||
"special": true
|
||||
},
|
||||
"151646": {
|
||||
"content": "<|object_ref_start|>",
|
||||
"lstrip": false,
|
||||
"normalized": false,
|
||||
"rstrip": false,
|
||||
"single_word": false,
|
||||
"special": true
|
||||
},
|
||||
"151647": {
|
||||
"content": "<|object_ref_end|>",
|
||||
"lstrip": false,
|
||||
"normalized": false,
|
||||
"rstrip": false,
|
||||
"single_word": false,
|
||||
"special": true
|
||||
},
|
||||
"151648": {
|
||||
"content": "<|box_start|>",
|
||||
"lstrip": false,
|
||||
"normalized": false,
|
||||
"rstrip": false,
|
||||
"single_word": false,
|
||||
"special": true
|
||||
},
|
||||
"151649": {
|
||||
"content": "<|box_end|>",
|
||||
"lstrip": false,
|
||||
"normalized": false,
|
||||
"rstrip": false,
|
||||
"single_word": false,
|
||||
"special": true
|
||||
},
|
||||
"151650": {
|
||||
"content": "<|quad_start|>",
|
||||
"lstrip": false,
|
||||
"normalized": false,
|
||||
"rstrip": false,
|
||||
"single_word": false,
|
||||
"special": true
|
||||
},
|
||||
"151651": {
|
||||
"content": "<|quad_end|>",
|
||||
"lstrip": false,
|
||||
"normalized": false,
|
||||
"rstrip": false,
|
||||
"single_word": false,
|
||||
"special": true
|
||||
},
|
||||
"151652": {
|
||||
"content": "<|vision_start|>",
|
||||
"lstrip": false,
|
||||
"normalized": false,
|
||||
"rstrip": false,
|
||||
"single_word": false,
|
||||
"special": true
|
||||
},
|
||||
"151653": {
|
||||
"content": "<|vision_end|>",
|
||||
"lstrip": false,
|
||||
"normalized": false,
|
||||
"rstrip": false,
|
||||
"single_word": false,
|
||||
"special": true
|
||||
},
|
||||
"151654": {
|
||||
"content": "<|vision_pad|>",
|
||||
"lstrip": false,
|
||||
"normalized": false,
|
||||
"rstrip": false,
|
||||
"single_word": false,
|
||||
"special": true
|
||||
},
|
||||
"151655": {
|
||||
"content": "<|image_pad|>",
|
||||
"lstrip": false,
|
||||
"normalized": false,
|
||||
"rstrip": false,
|
||||
"single_word": false,
|
||||
"special": true
|
||||
},
|
||||
"151656": {
|
||||
"content": "<|video_pad|>",
|
||||
"lstrip": false,
|
||||
"normalized": false,
|
||||
"rstrip": false,
|
||||
"single_word": false,
|
||||
"special": true
|
||||
},
|
||||
"151657": {
|
||||
"content": "<tool_call>",
|
||||
"lstrip": false,
|
||||
"normalized": false,
|
||||
"rstrip": false,
|
||||
"single_word": false,
|
||||
"special": false
|
||||
},
|
||||
"151658": {
|
||||
"content": "</tool_call>",
|
||||
"lstrip": false,
|
||||
"normalized": false,
|
||||
"rstrip": false,
|
||||
"single_word": false,
|
||||
"special": false
|
||||
},
|
||||
"151659": {
|
||||
"content": "<|fim_prefix|>",
|
||||
"lstrip": false,
|
||||
"normalized": false,
|
||||
"rstrip": false,
|
||||
"single_word": false,
|
||||
"special": false
|
||||
},
|
||||
"151660": {
|
||||
"content": "<|fim_middle|>",
|
||||
"lstrip": false,
|
||||
"normalized": false,
|
||||
"rstrip": false,
|
||||
"single_word": false,
|
||||
"special": false
|
||||
},
|
||||
"151661": {
|
||||
"content": "<|fim_suffix|>",
|
||||
"lstrip": false,
|
||||
"normalized": false,
|
||||
"rstrip": false,
|
||||
"single_word": false,
|
||||
"special": false
|
||||
},
|
||||
"151662": {
|
||||
"content": "<|fim_pad|>",
|
||||
"lstrip": false,
|
||||
"normalized": false,
|
||||
"rstrip": false,
|
||||
"single_word": false,
|
||||
"special": false
|
||||
},
|
||||
"151663": {
|
||||
"content": "<|repo_name|>",
|
||||
"lstrip": false,
|
||||
"normalized": false,
|
||||
"rstrip": false,
|
||||
"single_word": false,
|
||||
"special": false
|
||||
},
|
||||
"151664": {
|
||||
"content": "<|file_sep|>",
|
||||
"lstrip": false,
|
||||
"normalized": false,
|
||||
"rstrip": false,
|
||||
"single_word": false,
|
||||
"special": false
|
||||
}
|
||||
},
|
||||
"additional_special_tokens": [
|
||||
"<|im_start|>",
|
||||
"<|im_end|>",
|
||||
"<|object_ref_start|>",
|
||||
"<|object_ref_end|>",
|
||||
"<|box_start|>",
|
||||
"<|box_end|>",
|
||||
"<|quad_start|>",
|
||||
"<|quad_end|>",
|
||||
"<|vision_start|>",
|
||||
"<|vision_end|>",
|
||||
"<|vision_pad|>",
|
||||
"<|image_pad|>",
|
||||
"<|video_pad|>"
|
||||
],
|
||||
"bos_token": null,
|
||||
"chat_template": "{%- if tools %}\n {{- '<|im_start|>system\\n' }}\n {%- if messages[0]['role'] == 'system' %}\n {{- messages[0]['content'] }}\n {%- else %}\n {{- 'You are Qwen, created by Alibaba Cloud. You are a helpful assistant.' }}\n {%- endif %}\n {{- \"\\n\\n# Tools\\n\\nYou may call one or more functions to assist with the user query.\\n\\nYou are provided with function signatures within <tools></tools> XML tags:\\n<tools>\" }}\n {%- for tool in tools %}\n {{- \"\\n\" }}\n {{- tool | tojson }}\n {%- endfor %}\n {{- \"\\n</tools>\\n\\nFor each function call, return a json object with function name and arguments within <tool_call></tool_call> XML tags:\\n<tool_call>\\n{\\\"name\\\": <function-name>, \\\"arguments\\\": <args-json-object>}\\n</tool_call><|im_end|>\\n\" }}\n{%- else %}\n {%- if messages[0]['role'] == 'system' %}\n {{- '<|im_start|>system\\n' + messages[0]['content'] + '<|im_end|>\\n' }}\n {%- else %}\n {{- '<|im_start|>system\\nYou are Qwen, created by Alibaba Cloud. You are a helpful assistant.<|im_end|>\\n' }}\n {%- endif %}\n{%- endif %}\n{%- for message in messages %}\n {%- if (message.role == \"user\") or (message.role == \"system\" and not loop.first) or (message.role == \"assistant\" and not message.tool_calls) %}\n {{- '<|im_start|>' + message.role + '\\n' + message.content + '<|im_end|>' + '\\n' }}\n {%- elif message.role == \"assistant\" %}\n {{- '<|im_start|>' + message.role }}\n {%- if message.content %}\n {{- '\\n' + message.content }}\n {%- endif %}\n {%- for tool_call in message.tool_calls %}\n {%- if tool_call.function is defined %}\n {%- set tool_call = tool_call.function %}\n {%- endif %}\n {{- '\\n<tool_call>\\n{\"name\": \"' }}\n {{- tool_call.name }}\n {{- '\", \"arguments\": ' }}\n {{- tool_call.arguments | tojson }}\n {{- '}\\n</tool_call>' }}\n {%- endfor %}\n {{- '<|im_end|>\\n' }}\n {%- elif message.role == \"tool\" %}\n {%- if (loop.index0 == 0) or (messages[loop.index0 - 1].role != \"tool\") %}\n {{- '<|im_start|>user' }}\n {%- endif %}\n {{- '\\n<tool_response>\\n' }}\n {{- message.content }}\n {{- '\\n</tool_response>' }}\n {%- if loop.last or (messages[loop.index0 + 1].role != \"tool\") %}\n {{- '<|im_end|>\\n' }}\n {%- endif %}\n {%- endif %}\n{%- endfor %}\n{%- if add_generation_prompt %}\n {{- '<|im_start|>assistant\\n' }}\n{%- endif %}\n",
|
||||
"clean_up_tokenization_spaces": false,
|
||||
"eos_token": "<|im_end|>",
|
||||
"errors": "replace",
|
||||
"model_max_length": 32768,
|
||||
"pad_token": "<|endoftext|>",
|
||||
"split_special_tokens": false,
|
||||
"tokenizer_class": "Qwen2Tokenizer",
|
||||
"unk_token": null
|
||||
}
|
||||
1
vocab.json
Normal file
1
vocab.json
Normal file
File diff suppressed because one or more lines are too long
Reference in New Issue
Block a user