commit a032b6c33717868765dfaa0e9b02827e8f64a2d6 Author: ModelHub XC Date: Sun May 3 17:56:38 2026 +0800 初始化项目,由ModelHub XC社区提供模型 Model: AI-ModelScope/c4ai-command-r7b-12-2024 Source: Original Platform diff --git a/.gitattributes b/.gitattributes new file mode 100644 index 0000000..52373fe --- /dev/null +++ b/.gitattributes @@ -0,0 +1,36 @@ +*.7z filter=lfs diff=lfs merge=lfs -text +*.arrow filter=lfs diff=lfs merge=lfs -text +*.bin filter=lfs diff=lfs merge=lfs -text +*.bz2 filter=lfs diff=lfs merge=lfs -text +*.ckpt filter=lfs diff=lfs merge=lfs -text +*.ftz filter=lfs diff=lfs merge=lfs -text +*.gz filter=lfs diff=lfs merge=lfs -text +*.h5 filter=lfs diff=lfs merge=lfs -text +*.joblib filter=lfs diff=lfs merge=lfs -text +*.lfs.* filter=lfs diff=lfs merge=lfs -text +*.mlmodel filter=lfs diff=lfs merge=lfs -text +*.model filter=lfs diff=lfs merge=lfs -text +*.msgpack filter=lfs diff=lfs merge=lfs -text +*.npy filter=lfs diff=lfs merge=lfs -text +*.npz filter=lfs diff=lfs merge=lfs -text +*.onnx filter=lfs diff=lfs merge=lfs -text +*.ot filter=lfs diff=lfs merge=lfs -text +*.parquet filter=lfs diff=lfs merge=lfs -text +*.pb filter=lfs diff=lfs merge=lfs -text +*.pickle filter=lfs diff=lfs merge=lfs -text +*.pkl filter=lfs diff=lfs merge=lfs -text +*.pt filter=lfs diff=lfs merge=lfs -text +*.pth filter=lfs diff=lfs merge=lfs -text +*.rar filter=lfs diff=lfs merge=lfs -text +*.safetensors filter=lfs diff=lfs merge=lfs -text +saved_model/**/* filter=lfs diff=lfs merge=lfs -text +*.tar.* filter=lfs diff=lfs merge=lfs -text +*.tar filter=lfs diff=lfs merge=lfs -text +*.tflite filter=lfs diff=lfs merge=lfs -text +*.tgz filter=lfs diff=lfs merge=lfs -text +*.wasm filter=lfs diff=lfs merge=lfs -text +*.xz filter=lfs diff=lfs merge=lfs -text +*.zip filter=lfs diff=lfs merge=lfs -text +*.zst filter=lfs diff=lfs merge=lfs -text +*tfevents* filter=lfs diff=lfs merge=lfs -text +tokenizer.json filter=lfs diff=lfs merge=lfs -text diff --git a/README.md b/README.md new file mode 100644 index 0000000..9c99999 --- /dev/null +++ b/README.md @@ -0,0 +1,251 @@ +--- +inference: false +library_name: transformers +language: +- en +- fr +- de +- es +- it +- pt +- ja +- ko +- zh +- ar +- el +- fa +- pl +- id +- cs +- he +- hi +- nl +- ro +- ru +- tr +- uk +- vi +license: cc-by-nc-4.0 +extra_gated_prompt: "By submitting this form, you agree to the [License Agreement](https://cohere.com/c4ai-cc-by-nc-license) and acknowledge that the information you provide will be collected, used, and shared in accordance with Cohere’s [Privacy Policy]( https://cohere.com/privacy). You’ll receive email updates about Cohere Labs and Cohere research, events, products and services. You can unsubscribe at any time." +extra_gated_fields: + Name: text + Affiliation: text + Country: country + I agree to use this model for non-commercial use ONLY: checkbox +--- + +# **Model Card for Cohere Labs Command R7B** + +## **Model Summary** + +Cohere Labs Command R7B is an open weights research release of a 7B billion parameter model with advanced capabilities optimized for a variety of use cases including reasoning, summarization, question answering, and code. The model is trained to perform sophisticated tasks including Retrieval Augmented Generation (RAG) and tool use. The model also has powerful agentic capabilities with the ability to use and combine multiple tools over multiple steps to accomplish more difficult tasks. It obtains top performance on enterprise relevant code use cases. Command R7B is a multilingual model trained on 23 languages. + +Developed by: [Cohere](https://cohere.com/) and [Cohere Labs](https://cohere.for.ai/) + +* Point of Contact: [Cohere Labs](https://cohere.for.ai/) +* License: [CC-BY-NC](https://cohere.com/cohere-labs-cc-by-nc-license), requires also adhering to [Cohere Lab's Acceptable Use Policy](https://docs.cohere.com/docs/cohere-labs-acceptable-use-policy) + * Model: CohereLabs-command-r7b-12-2024 +* Model Size: 7 billion parameters +* Context length: 128K + +For more details on how this model was developed, check out our [Tech Report](https://arxiv.org/abs/2504.00698). + + +**Try Command R7B** + +You can try out Cohere Labs Command R7B before downloading the weights in our hosted [Hugging Face Space](https://coherelabs-c4ai-command.hf.space/models/command-r7b-12-2024). + + +**Usage** + +Please install transformers from the source repository that includes the necessary changes for this model. + +```py +# pip install 'git+https://github.com/huggingface/transformers.git' +from transformers import AutoTokenizer, AutoModelForCausalLM + +model_id = "CohereLabs/c4ai-command-r7b-12-2024" +tokenizer = AutoTokenizer.from_pretrained(model_id) +model = AutoModelForCausalLM.from_pretrained(model_id) + +# Format message with the c4ai-command-r7b-12-2024 chat template +messages = [{"role": "user", "content": "Hello, how are you?"}] +input_ids = tokenizer.apply_chat_template(messages, tokenize=True, add_generation_prompt=True, return_tensors="pt") + +gen_tokens = model.generate( + input_ids, + max_new_tokens=100, + do_sample=True, + temperature=0.3, +) + +gen_text = tokenizer.decode(gen_tokens[0], skip_special_tokens=True) +print(gen_text) +``` + +## **Model Details** + +**Input**: Models input text only. + +**Output**: Models generate text only. + +**Model Architecture**: This is an auto-regressive language model that uses an optimized transformer architecture. After pretraining, this model uses supervised fine-tuning (SFT) and preference training to align model behavior to human preferences for helpfulness and safety. The model features three layers with **sliding window attention** (window size 4096\) and **ROPE** for efficient local context modeling and relative positional encoding. A fourth layer uses **global attention** without positional embeddings, enabling unrestricted token interactions across the entire sequence. + +**Languages covered**: The model has been trained on 23 languages: English, French, Spanish, Italian, German, Portuguese, Japanese, Korean, Arabic, Chinese, Russian, Polish, Turkish, Vietnamese, Dutch, Czech, Indonesian, Ukrainian, Romanian, Greek, Hindi, Hebrew, and Persian. + +Context length: Command R7B supports a context length of 128K. + +### A well-rounded model + +Command R7B excels on standardized and externally verifiable benchmarks such as the [HuggingFace Open LLM Leaderboard](https://huggingface.co/spaces/open-llm-leaderboard/open_llm_leaderboard#/). Compared to other similarly sized open-weights models, Command R7B ranks first with strong performance across all tasks. + +| | Command R7B | Gemma 2 IT 9B | Ministral 8B | Llama 3.1 8B | Qwen 2.5 7B | Tulu 3 8B +| :---- | :---- | :---- | :---- | :---- | :---- | :---- +| Average | **31.4** | 28.9 | 22 | 28.2 | 26.87 | 26.03 +| IFEval | 77.9 | 74.4 | 58.96 | 78.6 | 75.85 | **82.67** +| BBH | 36.1 | **42.1** | 25.82 | 29.9 | 34.89 | 16.67 +| MATH hard | **26.4** | 0.2 | 6.5 | 19.3 | 0.0 | 19.64 +| GPQA | 7.7 | **14.8** | 4.5 | 2.4 | 5.48 | 6.49 +| MUSR | **11.6** | 9.74 | 10.7 | 8.41 | 8.45 | 10.45 +| MMLU-Pro | 28.5 | 32 | 25.5 | 30.7 | **36.52** | 20.3 + +*HuggingFace Leaderboard evaluation results. Competitor numbers are taken from the official leaderboard. Command R7B results are calculated by us using the official HuggingFace prompts and evaluation code.* + + +### **Chat Capabilities:** + +Command R7B can be configured as both a conversational model and an instruct model. The [conversational mode](https://docs.cohere.com/docs/command-r7b-hf) conditions the model on interactive behaviour, meaning it is expected to reply in a conversational fashion, provides introductory statements and follow-up questions, and uses Markdown as well as LaTeX where appropriate. It is optimized for interactive experiences, such as chatbots, where the model engages in dialogue. + +The [instruct mode](https://docs.cohere.com/docs/command-r7b-hf), in contrast, conditions the model to provide concise yet comprehensive responses, and does not use Markdown / LaTeX by default. It is designed for non-interactive, task-focused use cases like extracting information, summarizing text, translation, and categorization. + +**Note:** by default, Command R7B is delivered without a system preamble. We recommend to add the conversational or instruct preambles as [described in our docs](https://docs.cohere.com/docs/command-r7b-hf). + + +### **RAG Capabilities:** + +Command R7B has been trained specifically for tasks like the final step of Retrieval Augmented Generation (RAG). + +RAG with Command R7B is supported through [chat templates](https://huggingface.co/docs/transformers/main/en/chat_templating#advanced-retrieval-augmented-generation) in Transformers. The model takes a conversation as input (with an optional user-supplied system preamble), along with a list of document snippets. + + +
+RAG Example [CLICK TO EXPAND] + +```py +# Define conversation input +conversation = [{"role": "user", "content": "What has Man always dreamed of?"}] + +# Define documents for retrieval-based generation +documents = [ + {"heading": "The Moon: Our Age-Old Foe", "body": "Man has always dreamed of destroying the moon. In this essay, I shall..."}, + {"heading": "Love is all you need", "body": "Man's dream has always been to find love. This profound lesson..."} +] + +# Get the RAG prompt +input_prompt = tokenizer.apply_chat_template(conversation=conversation, documents=documents, tokenize=False, add_generation_prompt=True, return_tensors="pt") +# Tokenize the prompt +input_ids = tokenizer.encode_plus(input_prompt, return_tensors="pt") +``` + +You can then generate text from this input as normal. + +Document snippets should be short chunks, rather than long documents, typically around 100-400 words per chunk, formatted as key-value pairs. The keys should be short descriptive strings, the values can be text or semi-structured. + +You may find that simply including relevant documents directly in a user message works just as well, or better than using the documents parameter to render the special RAG template. The RAG template is generally a strong default. We encourage users to play with both, and to evaluate which mode works best for their specific use case. +
+ +Note that this was a very brief introduction to RAG \- for more information, see the Command R7B [prompt format docs](https://docs.cohere.com/docs/command-r7b-hf) and the Transformers [RAG documentation](https://huggingface.co/docs/transformers/main/chat_templating#advanced-retrieval-augmented-generation). + +### **Tool Use Capabilities:** +Command R7B has been specifically trained with conversational tool use capabilities. This allows the model to interact with external tools like APIs, databases, or search engines. + +Tool use with Command R7B is supported through [chat templates](https://huggingface.co/docs/transformers/main/en/chat_templating#advanced-tool-use--function-calling) in Transformers. We recommend providing tool descriptions using JSON schema. + +
+Tool Use Example [CLICK TO EXPAND] + +```py +# Define tools +tools = [ + { + "type": "function", + "function": { + "name": "query_daily_sales_report", + "description": "Connects to a database to retrieve overall sales volumes and sales information for a given day.", + "parameters": { + "type": "object", + "properties": { + "day": { + "description": "Retrieves sales data for this day, formatted as YYYY-MM-DD.", + "type": "string", + } + }, + "required": ["day"] + }, + } + } +] + +# Define conversation input +conversation = [{"role": "user", "content": "Can you provide a sales summary for 29th September 2023?"}] + +# Get the Tool Use prompt +input_prompt = tokenizer.apply_chat_template(conversation=conversation, tools=tools, tokenize=False, add_generation_prompt=True, return_tensors="pt") + +# Tokenize the prompt +input_ids = tokenizer.encode_plus(input_prompt, return_tensors="pt") +``` + +You can then generate text from this input as normal. + +If the model generates a plan and tool calls, you should add them to the chat history like so: + +```py +tool_call = {"name": "query_daily_sales_report", "arguments": {"day": "2023-09-29"}} +tool_plan = "I will use the query_daily_sales_report tool to find the sales summary for 29th September 2023." +conversation.append({"role": "assistant", "tool_calls": [{ "id": "0", "type": "function", "function": tool_call},], "tool_plan": tool_plan}) +``` + +and then call the tool(s) and append the result(s), with the tool role, like so: + +```py +# every tool result needs to be a dictionary!! +api_response_for_query_daily_sales_report = {"date": "2023-09-29", "summary": "Total Sales Amount: 10000, Total Units Sold: 250"} +# append tool results +conversation.append({"role": "tool", "tool_call_id": "0", "content": api_response_for_query_daily_sales_report}) # make sure "tool_call_id" matches the "id" of the tool_call +``` + +After that, you can generate() again to let the model use the tool result in the chat. +
+ +Note that this was a very brief introduction to tool calling \- for more information, see the Command R7B [prompt format docs](https://docs.cohere.com/docs/command-r7b-hf#tool-use-function-calling--agent-capabilities) and the Transformers [tool use documentation](https://huggingface.co/docs/transformers/main/chat_templating#advanced-tool-use--function-calling). + + +### **Code Capabilities:** + +Command R7B has meaningfully improved on code capabilities. In addition to academic code benchmarks, we have evaluated it on enterprise-relevant scenarios, including SQL and code translation, where it outperforms other models of similar size. Try these out by requesting code snippets, code explanations, or code rewrites. For better performance, we also recommend using a low temperature (and even greedy decoding) for code-generation related instructions. + +## **Model Card Contact** + +For errors or additional questions about details in this model card, contact labs@cohere.com + +## **Terms of Use:** + +We hope that the release of this model will make community-based research efforts more accessible, by releasing the weights of a highly performant 7 billion parameter model to researchers all over the world. This model is governed by a [CC-BY-NC](https://cohere.com/cohere-labs-cc-by-nc-license), requires also adhering to [Cohere Lab's Acceptable Use Policy](https://docs.cohere.com/docs/cohere-labs-acceptable-use-policy) +## **Try Chat:** + +You can try Command R7B chat in the playground [here](https://dashboard.cohere.com/playground/chat). You can also use it in our dedicated Hugging Face Space [here](https://coherelabs-c4ai-command.hf.space/models/command-r7b-12-2024). + +## **Citation:** + +``` +@misc{cohere2025commandaenterprisereadylarge, + title={Command A: An Enterprise-Ready Large Language Model}, + author={Team Cohere and Aakanksha and Arash Ahmadian and Marwan Ahmed and Jay Alammar and Yazeed Alnumay and Sophia Althammer and Arkady Arkhangorodsky and Viraat Aryabumi and Dennis Aumiller and Raphaël Avalos and Zahara Aviv and Sammie Bae and Saurabh Baji and Alexandre Barbet and Max Bartolo and Björn Bebensee and Neeral Beladia and Walter Beller-Morales and Alexandre Bérard and Andrew Berneshawi and Anna Bialas and Phil Blunsom and Matt Bobkin and Adi Bongale and Sam Braun and Maxime Brunet and Samuel Cahyawijaya and David Cairuz and Jon Ander Campos and Cassie Cao and Kris Cao and Roman Castagné and Julián Cendrero and Leila Chan Currie and Yash Chandak and Diane Chang and Giannis Chatziveroglou and Hongyu Chen and Claire Cheng and Alexis Chevalier and Justin T. Chiu and Eugene Cho and Eugene Choi and Eujeong Choi and Tim Chung and Volkan Cirik and Ana Cismaru and Pierre Clavier and Henry Conklin and Lucas Crawhall-Stein and Devon Crouse and Andres Felipe Cruz-Salinas and Ben Cyrus and Daniel D'souza and Hugo Dalla-Torre and John Dang and William Darling and Omar Darwiche Domingues and Saurabh Dash and Antoine Debugne and Théo Dehaze and Shaan Desai and Joan Devassy and Rishit Dholakia and Kyle Duffy and Ali Edalati and Ace Eldeib and Abdullah Elkady and Sarah Elsharkawy and Irem Ergün and Beyza Ermis and Marzieh Fadaee and Boyu Fan and Lucas Fayoux and Yannis Flet-Berliac and Nick Frosst and Matthias Gallé and Wojciech Galuba and Utsav Garg and Matthieu Geist and Mohammad Gheshlaghi Azar and Seraphina Goldfarb-Tarrant and Tomas Goldsack and Aidan Gomez and Victor Machado Gonzaga and Nithya Govindarajan and Manoj Govindassamy and Nathan Grinsztajn and Nikolas Gritsch and Patrick Gu and Shangmin Guo and Kilian Haefeli and Rod Hajjar and Tim Hawes and Jingyi He and Sebastian Hofstätter and Sungjin Hong and Sara Hooker and Tom Hosking and Stephanie Howe and Eric Hu and Renjie Huang and Hemant Jain and Ritika Jain and Nick Jakobi and Madeline Jenkins and JJ Jordan and Dhruti Joshi and Jason Jung and Trushant Kalyanpur and Siddhartha Rao Kamalakara and Julia Kedrzycki and Gokce Keskin and Edward Kim and Joon Kim and Wei-Yin Ko and Tom Kocmi and Michael Kozakov and Wojciech Kryściński and Arnav Kumar Jain and Komal Kumar Teru and Sander Land and Michael Lasby and Olivia Lasche and Justin Lee and Patrick Lewis and Jeffrey Li and Jonathan Li and Hangyu Lin and Acyr Locatelli and Kevin Luong and Raymond Ma and Lukas Mach and Marina Machado and Joanne Magbitang and Brenda Malacara Lopez and Aryan Mann and Kelly Marchisio and Olivia Markham and Alexandre Matton and Alex McKinney and Dominic McLoughlin and Jozef Mokry and Adrien Morisot and Autumn Moulder and Harry Moynehan and Maximilian Mozes and Vivek Muppalla and Lidiya Murakhovska and Hemangani Nagarajan and Alekhya Nandula and Hisham Nasir and Shauna Nehra and Josh Netto-Rosen and Daniel Ohashi and James Owers-Bardsley and Jason Ozuzu and Dennis Padilla and Gloria Park and Sam Passaglia and Jeremy Pekmez and Laura Penstone and Aleksandra Piktus and Case Ploeg and Andrew Poulton and Youran Qi and Shubha Raghvendra and Miguel Ramos and Ekagra Ranjan and Pierre Richemond and Cécile Robert-Michon and Aurélien Rodriguez and Sudip Roy and Laura Ruis and Louise Rust and Anubhav Sachan and Alejandro Salamanca and Kailash Karthik Saravanakumar and Isha Satyakam and Alice Schoenauer Sebag and Priyanka Sen and Sholeh Sepehri and Preethi Seshadri and Ye Shen and Tom Sherborne and Sylvie Chang Shi and Sanal Shivaprasad and Vladyslav Shmyhlo and Anirudh Shrinivason and Inna Shteinbuk and Amir Shukayev and Mathieu Simard and Ella Snyder and Ava Spataru and Victoria Spooner and Trisha Starostina and Florian Strub and Yixuan Su and Jimin Sun and Dwarak Talupuru and Eugene Tarassov and Elena Tommasone and Jennifer Tracey and Billy Trend and Evren Tumer and Ahmet Üstün and Bharat Venkitesh and David Venuto and Pat Verga and Maxime Voisin and Alex Wang and Donglu Wang and Shijian Wang and Edmond Wen and Naomi White and Jesse Willman and Marysia Winkels and Chen Xia and Jessica Xie and Minjie Xu and Bowen Yang and Tan Yi-Chern and Ivan Zhang and Zhenyu Zhao and Zhoujie Zhao}, + year={2025}, + eprint={2504.00698}, + archivePrefix={arXiv}, + primaryClass={cs.CL}, + url={https://arxiv.org/abs/2504.00698}, +} +``` diff --git a/config.json b/config.json new file mode 100644 index 0000000..8bf71a2 --- /dev/null +++ b/config.json @@ -0,0 +1,39 @@ +{ + "architectures": [ + "Cohere2ForCausalLM" + ], + "attention_bias": false, + "attention_dropout": 0.0, + "bos_token_id": 5, + "cache_implementation": "hybrid", + "eos_token_id": 255001, + "head_dim": 128, + "hidden_act": "silu", + "hidden_size": 4096, + "initializer_range": 0.02, + "intermediate_size": 14336, + "layer_norm_eps": 1e-05, + "layer_switch": 4, + "logit_scale": 0.25, + "max_position_embeddings": 132096, + "model_type": "cohere2", + "num_attention_heads": 32, + "num_hidden_layers": 32, + "num_key_value_heads": 8, + "order_of_interleaved_layers": "local_attn_first", + "pad_token_id": 0, + "position_embedding_type": "rope_gptj", + "rope_scaling": null, + "rope_theta": 50000, + "rotary_pct": 1.0, + "sliding_window": 4096, + "sliding_window_pattern": 4, + "torch_dtype": "bfloat16", + "transformers_version": "4.48.0.dev0", + "use_cache": true, + "use_embedding_sharing": true, + "use_gated_activation": true, + "use_parallel_block": true, + "use_parallel_embedding": true, + "vocab_size": 256000 +} diff --git a/configuration.json b/configuration.json new file mode 100644 index 0000000..bbeeda1 --- /dev/null +++ b/configuration.json @@ -0,0 +1 @@ +{"framework": "pytorch", "task": "text-generation", "allow_remote": true} \ No newline at end of file diff --git a/generation_config.json b/generation_config.json new file mode 100644 index 0000000..2a6a49e --- /dev/null +++ b/generation_config.json @@ -0,0 +1,8 @@ +{ + "_from_model_config": true, + "bos_token_id": 5, + "cache_implementation": "hybrid", + "eos_token_id": 255001, + "pad_token_id": 0, + "transformers_version": "4.48.0.dev0" +} diff --git a/model-00001-of-00004.safetensors b/model-00001-of-00004.safetensors new file mode 100644 index 0000000..34ac65a --- /dev/null +++ b/model-00001-of-00004.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d3e0547fbfcb0e010c9640a99e509c05a700302c09278e6e3513cf214d56a953 +size 4915779696 diff --git a/model-00002-of-00004.safetensors b/model-00002-of-00004.safetensors new file mode 100644 index 0000000..135a965 --- /dev/null +++ b/model-00002-of-00004.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1b7ae637b33bf833c2b22a3382f7b671c7754c19bead0d2f3f84d2977b834447 +size 4915824704 diff --git a/model-00003-of-00004.safetensors b/model-00003-of-00004.safetensors new file mode 100644 index 0000000..4329d28 --- /dev/null +++ b/model-00003-of-00004.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b0a437c9e2ca587ffd732f0a442c6c442cca6a4ebd58c3c781d33213204e6d4b +size 4999719592 diff --git a/model-00004-of-00004.safetensors b/model-00004-of-00004.safetensors new file mode 100644 index 0000000..1b32896 --- /dev/null +++ b/model-00004-of-00004.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2ba7a9c927fffdbeae21a534706fc3a817daf09a76102da0f434b1a8c73515bf +size 1224771944 diff --git a/model.safetensors.index.json b/model.safetensors.index.json new file mode 100644 index 0000000..3136b2a --- /dev/null +++ b/model.safetensors.index.json @@ -0,0 +1,265 @@ +{ + "metadata": { + "total_size": 16056066048 + }, + "weight_map": { + "model.embed_tokens.weight": "model-00001-of-00004.safetensors", + "model.layers.0.input_layernorm.weight": "model-00001-of-00004.safetensors", + "model.layers.0.mlp.down_proj.weight": "model-00001-of-00004.safetensors", + "model.layers.0.mlp.gate_proj.weight": "model-00001-of-00004.safetensors", + "model.layers.0.mlp.up_proj.weight": "model-00001-of-00004.safetensors", + "model.layers.0.self_attn.k_proj.weight": "model-00001-of-00004.safetensors", + "model.layers.0.self_attn.o_proj.weight": "model-00001-of-00004.safetensors", + "model.layers.0.self_attn.q_proj.weight": "model-00001-of-00004.safetensors", + "model.layers.0.self_attn.v_proj.weight": "model-00001-of-00004.safetensors", + "model.layers.1.input_layernorm.weight": "model-00001-of-00004.safetensors", + "model.layers.1.mlp.down_proj.weight": "model-00001-of-00004.safetensors", + "model.layers.1.mlp.gate_proj.weight": "model-00001-of-00004.safetensors", + "model.layers.1.mlp.up_proj.weight": "model-00001-of-00004.safetensors", + "model.layers.1.self_attn.k_proj.weight": "model-00001-of-00004.safetensors", + "model.layers.1.self_attn.o_proj.weight": "model-00001-of-00004.safetensors", + "model.layers.1.self_attn.q_proj.weight": "model-00001-of-00004.safetensors", + "model.layers.1.self_attn.v_proj.weight": "model-00001-of-00004.safetensors", + "model.layers.10.input_layernorm.weight": "model-00002-of-00004.safetensors", + "model.layers.10.mlp.down_proj.weight": "model-00002-of-00004.safetensors", + "model.layers.10.mlp.gate_proj.weight": "model-00002-of-00004.safetensors", + "model.layers.10.mlp.up_proj.weight": "model-00002-of-00004.safetensors", + "model.layers.10.self_attn.k_proj.weight": "model-00002-of-00004.safetensors", + "model.layers.10.self_attn.o_proj.weight": "model-00002-of-00004.safetensors", + "model.layers.10.self_attn.q_proj.weight": "model-00002-of-00004.safetensors", + "model.layers.10.self_attn.v_proj.weight": "model-00002-of-00004.safetensors", + "model.layers.11.input_layernorm.weight": "model-00002-of-00004.safetensors", + "model.layers.11.mlp.down_proj.weight": "model-00002-of-00004.safetensors", + "model.layers.11.mlp.gate_proj.weight": "model-00002-of-00004.safetensors", + "model.layers.11.mlp.up_proj.weight": "model-00002-of-00004.safetensors", + "model.layers.11.self_attn.k_proj.weight": "model-00002-of-00004.safetensors", + "model.layers.11.self_attn.o_proj.weight": "model-00002-of-00004.safetensors", + "model.layers.11.self_attn.q_proj.weight": "model-00002-of-00004.safetensors", + "model.layers.11.self_attn.v_proj.weight": "model-00002-of-00004.safetensors", + "model.layers.12.input_layernorm.weight": "model-00002-of-00004.safetensors", + "model.layers.12.mlp.down_proj.weight": "model-00002-of-00004.safetensors", + "model.layers.12.mlp.gate_proj.weight": "model-00002-of-00004.safetensors", + "model.layers.12.mlp.up_proj.weight": "model-00002-of-00004.safetensors", + "model.layers.12.self_attn.k_proj.weight": "model-00002-of-00004.safetensors", + "model.layers.12.self_attn.o_proj.weight": "model-00002-of-00004.safetensors", + "model.layers.12.self_attn.q_proj.weight": "model-00002-of-00004.safetensors", + "model.layers.12.self_attn.v_proj.weight": "model-00002-of-00004.safetensors", + "model.layers.13.input_layernorm.weight": "model-00002-of-00004.safetensors", + "model.layers.13.mlp.down_proj.weight": "model-00002-of-00004.safetensors", + "model.layers.13.mlp.gate_proj.weight": "model-00002-of-00004.safetensors", + "model.layers.13.mlp.up_proj.weight": "model-00002-of-00004.safetensors", + "model.layers.13.self_attn.k_proj.weight": "model-00002-of-00004.safetensors", + "model.layers.13.self_attn.o_proj.weight": "model-00002-of-00004.safetensors", + "model.layers.13.self_attn.q_proj.weight": "model-00002-of-00004.safetensors", + "model.layers.13.self_attn.v_proj.weight": "model-00002-of-00004.safetensors", + "model.layers.14.input_layernorm.weight": "model-00002-of-00004.safetensors", + "model.layers.14.mlp.down_proj.weight": "model-00002-of-00004.safetensors", + "model.layers.14.mlp.gate_proj.weight": "model-00002-of-00004.safetensors", + "model.layers.14.mlp.up_proj.weight": "model-00002-of-00004.safetensors", + "model.layers.14.self_attn.k_proj.weight": "model-00002-of-00004.safetensors", + "model.layers.14.self_attn.o_proj.weight": "model-00002-of-00004.safetensors", + "model.layers.14.self_attn.q_proj.weight": "model-00002-of-00004.safetensors", + "model.layers.14.self_attn.v_proj.weight": "model-00002-of-00004.safetensors", + "model.layers.15.input_layernorm.weight": "model-00002-of-00004.safetensors", + "model.layers.15.mlp.down_proj.weight": "model-00002-of-00004.safetensors", + "model.layers.15.mlp.gate_proj.weight": "model-00002-of-00004.safetensors", + "model.layers.15.mlp.up_proj.weight": "model-00002-of-00004.safetensors", + "model.layers.15.self_attn.k_proj.weight": "model-00002-of-00004.safetensors", + "model.layers.15.self_attn.o_proj.weight": "model-00002-of-00004.safetensors", + "model.layers.15.self_attn.q_proj.weight": "model-00002-of-00004.safetensors", + "model.layers.15.self_attn.v_proj.weight": "model-00002-of-00004.safetensors", + "model.layers.16.input_layernorm.weight": "model-00002-of-00004.safetensors", + "model.layers.16.mlp.down_proj.weight": "model-00002-of-00004.safetensors", + "model.layers.16.mlp.gate_proj.weight": "model-00002-of-00004.safetensors", + "model.layers.16.mlp.up_proj.weight": "model-00002-of-00004.safetensors", + "model.layers.16.self_attn.k_proj.weight": "model-00002-of-00004.safetensors", + "model.layers.16.self_attn.o_proj.weight": "model-00002-of-00004.safetensors", + "model.layers.16.self_attn.q_proj.weight": "model-00002-of-00004.safetensors", + "model.layers.16.self_attn.v_proj.weight": "model-00002-of-00004.safetensors", + "model.layers.17.input_layernorm.weight": "model-00003-of-00004.safetensors", + "model.layers.17.mlp.down_proj.weight": "model-00003-of-00004.safetensors", + "model.layers.17.mlp.gate_proj.weight": "model-00002-of-00004.safetensors", + "model.layers.17.mlp.up_proj.weight": "model-00002-of-00004.safetensors", + "model.layers.17.self_attn.k_proj.weight": "model-00002-of-00004.safetensors", + "model.layers.17.self_attn.o_proj.weight": "model-00002-of-00004.safetensors", + "model.layers.17.self_attn.q_proj.weight": "model-00002-of-00004.safetensors", + "model.layers.17.self_attn.v_proj.weight": "model-00002-of-00004.safetensors", + "model.layers.18.input_layernorm.weight": "model-00003-of-00004.safetensors", + "model.layers.18.mlp.down_proj.weight": "model-00003-of-00004.safetensors", + "model.layers.18.mlp.gate_proj.weight": "model-00003-of-00004.safetensors", + "model.layers.18.mlp.up_proj.weight": "model-00003-of-00004.safetensors", + "model.layers.18.self_attn.k_proj.weight": "model-00003-of-00004.safetensors", + "model.layers.18.self_attn.o_proj.weight": "model-00003-of-00004.safetensors", + "model.layers.18.self_attn.q_proj.weight": "model-00003-of-00004.safetensors", + "model.layers.18.self_attn.v_proj.weight": "model-00003-of-00004.safetensors", + "model.layers.19.input_layernorm.weight": "model-00003-of-00004.safetensors", + "model.layers.19.mlp.down_proj.weight": "model-00003-of-00004.safetensors", + "model.layers.19.mlp.gate_proj.weight": "model-00003-of-00004.safetensors", + "model.layers.19.mlp.up_proj.weight": "model-00003-of-00004.safetensors", + "model.layers.19.self_attn.k_proj.weight": "model-00003-of-00004.safetensors", + "model.layers.19.self_attn.o_proj.weight": "model-00003-of-00004.safetensors", + "model.layers.19.self_attn.q_proj.weight": "model-00003-of-00004.safetensors", + "model.layers.19.self_attn.v_proj.weight": "model-00003-of-00004.safetensors", + "model.layers.2.input_layernorm.weight": "model-00001-of-00004.safetensors", + "model.layers.2.mlp.down_proj.weight": "model-00001-of-00004.safetensors", + "model.layers.2.mlp.gate_proj.weight": "model-00001-of-00004.safetensors", + "model.layers.2.mlp.up_proj.weight": "model-00001-of-00004.safetensors", + "model.layers.2.self_attn.k_proj.weight": "model-00001-of-00004.safetensors", + "model.layers.2.self_attn.o_proj.weight": "model-00001-of-00004.safetensors", + "model.layers.2.self_attn.q_proj.weight": "model-00001-of-00004.safetensors", + "model.layers.2.self_attn.v_proj.weight": "model-00001-of-00004.safetensors", + "model.layers.20.input_layernorm.weight": "model-00003-of-00004.safetensors", + "model.layers.20.mlp.down_proj.weight": "model-00003-of-00004.safetensors", + "model.layers.20.mlp.gate_proj.weight": "model-00003-of-00004.safetensors", + "model.layers.20.mlp.up_proj.weight": "model-00003-of-00004.safetensors", + "model.layers.20.self_attn.k_proj.weight": "model-00003-of-00004.safetensors", + "model.layers.20.self_attn.o_proj.weight": "model-00003-of-00004.safetensors", + "model.layers.20.self_attn.q_proj.weight": "model-00003-of-00004.safetensors", + "model.layers.20.self_attn.v_proj.weight": "model-00003-of-00004.safetensors", + "model.layers.21.input_layernorm.weight": "model-00003-of-00004.safetensors", + "model.layers.21.mlp.down_proj.weight": "model-00003-of-00004.safetensors", + "model.layers.21.mlp.gate_proj.weight": "model-00003-of-00004.safetensors", + "model.layers.21.mlp.up_proj.weight": "model-00003-of-00004.safetensors", + "model.layers.21.self_attn.k_proj.weight": "model-00003-of-00004.safetensors", + "model.layers.21.self_attn.o_proj.weight": "model-00003-of-00004.safetensors", + "model.layers.21.self_attn.q_proj.weight": "model-00003-of-00004.safetensors", + "model.layers.21.self_attn.v_proj.weight": "model-00003-of-00004.safetensors", + "model.layers.22.input_layernorm.weight": "model-00003-of-00004.safetensors", + "model.layers.22.mlp.down_proj.weight": "model-00003-of-00004.safetensors", + "model.layers.22.mlp.gate_proj.weight": "model-00003-of-00004.safetensors", + "model.layers.22.mlp.up_proj.weight": "model-00003-of-00004.safetensors", + "model.layers.22.self_attn.k_proj.weight": "model-00003-of-00004.safetensors", + "model.layers.22.self_attn.o_proj.weight": "model-00003-of-00004.safetensors", + "model.layers.22.self_attn.q_proj.weight": "model-00003-of-00004.safetensors", + "model.layers.22.self_attn.v_proj.weight": "model-00003-of-00004.safetensors", + "model.layers.23.input_layernorm.weight": "model-00003-of-00004.safetensors", + "model.layers.23.mlp.down_proj.weight": "model-00003-of-00004.safetensors", + "model.layers.23.mlp.gate_proj.weight": "model-00003-of-00004.safetensors", + "model.layers.23.mlp.up_proj.weight": "model-00003-of-00004.safetensors", + "model.layers.23.self_attn.k_proj.weight": "model-00003-of-00004.safetensors", + "model.layers.23.self_attn.o_proj.weight": "model-00003-of-00004.safetensors", + "model.layers.23.self_attn.q_proj.weight": "model-00003-of-00004.safetensors", + "model.layers.23.self_attn.v_proj.weight": "model-00003-of-00004.safetensors", + "model.layers.24.input_layernorm.weight": "model-00003-of-00004.safetensors", + "model.layers.24.mlp.down_proj.weight": "model-00003-of-00004.safetensors", + "model.layers.24.mlp.gate_proj.weight": "model-00003-of-00004.safetensors", + "model.layers.24.mlp.up_proj.weight": "model-00003-of-00004.safetensors", + "model.layers.24.self_attn.k_proj.weight": "model-00003-of-00004.safetensors", + "model.layers.24.self_attn.o_proj.weight": "model-00003-of-00004.safetensors", + "model.layers.24.self_attn.q_proj.weight": "model-00003-of-00004.safetensors", + "model.layers.24.self_attn.v_proj.weight": "model-00003-of-00004.safetensors", + "model.layers.25.input_layernorm.weight": "model-00003-of-00004.safetensors", + "model.layers.25.mlp.down_proj.weight": "model-00003-of-00004.safetensors", + "model.layers.25.mlp.gate_proj.weight": "model-00003-of-00004.safetensors", + "model.layers.25.mlp.up_proj.weight": "model-00003-of-00004.safetensors", + "model.layers.25.self_attn.k_proj.weight": "model-00003-of-00004.safetensors", + "model.layers.25.self_attn.o_proj.weight": "model-00003-of-00004.safetensors", + "model.layers.25.self_attn.q_proj.weight": "model-00003-of-00004.safetensors", + "model.layers.25.self_attn.v_proj.weight": "model-00003-of-00004.safetensors", + "model.layers.26.input_layernorm.weight": "model-00003-of-00004.safetensors", + "model.layers.26.mlp.down_proj.weight": "model-00003-of-00004.safetensors", + "model.layers.26.mlp.gate_proj.weight": "model-00003-of-00004.safetensors", + "model.layers.26.mlp.up_proj.weight": "model-00003-of-00004.safetensors", + "model.layers.26.self_attn.k_proj.weight": "model-00003-of-00004.safetensors", + "model.layers.26.self_attn.o_proj.weight": "model-00003-of-00004.safetensors", + "model.layers.26.self_attn.q_proj.weight": "model-00003-of-00004.safetensors", + "model.layers.26.self_attn.v_proj.weight": "model-00003-of-00004.safetensors", + "model.layers.27.input_layernorm.weight": "model-00003-of-00004.safetensors", + "model.layers.27.mlp.down_proj.weight": "model-00003-of-00004.safetensors", + "model.layers.27.mlp.gate_proj.weight": "model-00003-of-00004.safetensors", + "model.layers.27.mlp.up_proj.weight": "model-00003-of-00004.safetensors", + "model.layers.27.self_attn.k_proj.weight": "model-00003-of-00004.safetensors", + "model.layers.27.self_attn.o_proj.weight": "model-00003-of-00004.safetensors", + "model.layers.27.self_attn.q_proj.weight": "model-00003-of-00004.safetensors", + "model.layers.27.self_attn.v_proj.weight": "model-00003-of-00004.safetensors", + "model.layers.28.input_layernorm.weight": "model-00003-of-00004.safetensors", + "model.layers.28.mlp.down_proj.weight": "model-00003-of-00004.safetensors", + "model.layers.28.mlp.gate_proj.weight": "model-00003-of-00004.safetensors", + "model.layers.28.mlp.up_proj.weight": "model-00003-of-00004.safetensors", + "model.layers.28.self_attn.k_proj.weight": "model-00003-of-00004.safetensors", + "model.layers.28.self_attn.o_proj.weight": "model-00003-of-00004.safetensors", + "model.layers.28.self_attn.q_proj.weight": "model-00003-of-00004.safetensors", + "model.layers.28.self_attn.v_proj.weight": "model-00003-of-00004.safetensors", + "model.layers.29.input_layernorm.weight": "model-00004-of-00004.safetensors", + "model.layers.29.mlp.down_proj.weight": "model-00004-of-00004.safetensors", + "model.layers.29.mlp.gate_proj.weight": "model-00004-of-00004.safetensors", + "model.layers.29.mlp.up_proj.weight": "model-00004-of-00004.safetensors", + "model.layers.29.self_attn.k_proj.weight": "model-00003-of-00004.safetensors", + "model.layers.29.self_attn.o_proj.weight": "model-00003-of-00004.safetensors", + "model.layers.29.self_attn.q_proj.weight": "model-00003-of-00004.safetensors", + "model.layers.29.self_attn.v_proj.weight": "model-00003-of-00004.safetensors", + "model.layers.3.input_layernorm.weight": "model-00001-of-00004.safetensors", + "model.layers.3.mlp.down_proj.weight": "model-00001-of-00004.safetensors", + "model.layers.3.mlp.gate_proj.weight": "model-00001-of-00004.safetensors", + "model.layers.3.mlp.up_proj.weight": "model-00001-of-00004.safetensors", + "model.layers.3.self_attn.k_proj.weight": "model-00001-of-00004.safetensors", + "model.layers.3.self_attn.o_proj.weight": "model-00001-of-00004.safetensors", + "model.layers.3.self_attn.q_proj.weight": "model-00001-of-00004.safetensors", + "model.layers.3.self_attn.v_proj.weight": "model-00001-of-00004.safetensors", + "model.layers.30.input_layernorm.weight": "model-00004-of-00004.safetensors", + "model.layers.30.mlp.down_proj.weight": "model-00004-of-00004.safetensors", + "model.layers.30.mlp.gate_proj.weight": "model-00004-of-00004.safetensors", + "model.layers.30.mlp.up_proj.weight": "model-00004-of-00004.safetensors", + "model.layers.30.self_attn.k_proj.weight": "model-00004-of-00004.safetensors", + "model.layers.30.self_attn.o_proj.weight": "model-00004-of-00004.safetensors", + "model.layers.30.self_attn.q_proj.weight": "model-00004-of-00004.safetensors", + "model.layers.30.self_attn.v_proj.weight": "model-00004-of-00004.safetensors", + "model.layers.31.input_layernorm.weight": "model-00004-of-00004.safetensors", + "model.layers.31.mlp.down_proj.weight": "model-00004-of-00004.safetensors", + "model.layers.31.mlp.gate_proj.weight": "model-00004-of-00004.safetensors", + "model.layers.31.mlp.up_proj.weight": "model-00004-of-00004.safetensors", + "model.layers.31.self_attn.k_proj.weight": "model-00004-of-00004.safetensors", + "model.layers.31.self_attn.o_proj.weight": "model-00004-of-00004.safetensors", + "model.layers.31.self_attn.q_proj.weight": "model-00004-of-00004.safetensors", + "model.layers.31.self_attn.v_proj.weight": "model-00004-of-00004.safetensors", + "model.layers.4.input_layernorm.weight": "model-00001-of-00004.safetensors", + "model.layers.4.mlp.down_proj.weight": "model-00001-of-00004.safetensors", + "model.layers.4.mlp.gate_proj.weight": "model-00001-of-00004.safetensors", + "model.layers.4.mlp.up_proj.weight": "model-00001-of-00004.safetensors", + "model.layers.4.self_attn.k_proj.weight": "model-00001-of-00004.safetensors", + "model.layers.4.self_attn.o_proj.weight": "model-00001-of-00004.safetensors", + "model.layers.4.self_attn.q_proj.weight": "model-00001-of-00004.safetensors", + "model.layers.4.self_attn.v_proj.weight": "model-00001-of-00004.safetensors", + "model.layers.5.input_layernorm.weight": "model-00001-of-00004.safetensors", + "model.layers.5.mlp.down_proj.weight": "model-00001-of-00004.safetensors", + "model.layers.5.mlp.gate_proj.weight": "model-00001-of-00004.safetensors", + "model.layers.5.mlp.up_proj.weight": "model-00001-of-00004.safetensors", + "model.layers.5.self_attn.k_proj.weight": "model-00001-of-00004.safetensors", + "model.layers.5.self_attn.o_proj.weight": "model-00001-of-00004.safetensors", + "model.layers.5.self_attn.q_proj.weight": "model-00001-of-00004.safetensors", + "model.layers.5.self_attn.v_proj.weight": "model-00001-of-00004.safetensors", + "model.layers.6.input_layernorm.weight": "model-00002-of-00004.safetensors", + "model.layers.6.mlp.down_proj.weight": "model-00002-of-00004.safetensors", + "model.layers.6.mlp.gate_proj.weight": "model-00001-of-00004.safetensors", + "model.layers.6.mlp.up_proj.weight": "model-00002-of-00004.safetensors", + "model.layers.6.self_attn.k_proj.weight": "model-00001-of-00004.safetensors", + "model.layers.6.self_attn.o_proj.weight": "model-00001-of-00004.safetensors", + "model.layers.6.self_attn.q_proj.weight": "model-00001-of-00004.safetensors", + "model.layers.6.self_attn.v_proj.weight": "model-00001-of-00004.safetensors", + "model.layers.7.input_layernorm.weight": "model-00002-of-00004.safetensors", + "model.layers.7.mlp.down_proj.weight": "model-00002-of-00004.safetensors", + "model.layers.7.mlp.gate_proj.weight": "model-00002-of-00004.safetensors", + "model.layers.7.mlp.up_proj.weight": "model-00002-of-00004.safetensors", + "model.layers.7.self_attn.k_proj.weight": "model-00002-of-00004.safetensors", + "model.layers.7.self_attn.o_proj.weight": "model-00002-of-00004.safetensors", + "model.layers.7.self_attn.q_proj.weight": "model-00002-of-00004.safetensors", + "model.layers.7.self_attn.v_proj.weight": "model-00002-of-00004.safetensors", + "model.layers.8.input_layernorm.weight": "model-00002-of-00004.safetensors", + "model.layers.8.mlp.down_proj.weight": "model-00002-of-00004.safetensors", + "model.layers.8.mlp.gate_proj.weight": "model-00002-of-00004.safetensors", + "model.layers.8.mlp.up_proj.weight": "model-00002-of-00004.safetensors", + "model.layers.8.self_attn.k_proj.weight": "model-00002-of-00004.safetensors", + "model.layers.8.self_attn.o_proj.weight": "model-00002-of-00004.safetensors", + "model.layers.8.self_attn.q_proj.weight": "model-00002-of-00004.safetensors", + "model.layers.8.self_attn.v_proj.weight": "model-00002-of-00004.safetensors", + "model.layers.9.input_layernorm.weight": "model-00002-of-00004.safetensors", + "model.layers.9.mlp.down_proj.weight": "model-00002-of-00004.safetensors", + "model.layers.9.mlp.gate_proj.weight": "model-00002-of-00004.safetensors", + "model.layers.9.mlp.up_proj.weight": "model-00002-of-00004.safetensors", + "model.layers.9.self_attn.k_proj.weight": "model-00002-of-00004.safetensors", + "model.layers.9.self_attn.o_proj.weight": "model-00002-of-00004.safetensors", + "model.layers.9.self_attn.q_proj.weight": "model-00002-of-00004.safetensors", + "model.layers.9.self_attn.v_proj.weight": "model-00002-of-00004.safetensors", + "model.norm.weight": "model-00004-of-00004.safetensors" + } +} diff --git a/signatures/c4ai-command-r7b-12-2024.sig b/signatures/c4ai-command-r7b-12-2024.sig new file mode 100644 index 0000000..5945fa3 --- /dev/null +++ b/signatures/c4ai-command-r7b-12-2024.sig @@ -0,0 +1 @@ +{"mediaType":"application/vnd.dev.sigstore.bundle.v0.3+json","verificationMaterial":{"certificate":{"rawBytes":"MIIHAjCCBoegAwIBAgIUHlEeyUsfdV46zCjJCXL50ub8KZgwCgYIKoZIzj0EAwMwNzEVMBMGA1UEChMMc2lnc3RvcmUuZGV2MR4wHAYDVQQDExVzaWdzdG9yZS1pbnRlcm1lZGlhdGUwHhcNMjUxMDI3MTk0MzI4WhcNMjUxMDI3MTk1MzI4WjAAMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEEoiEAlzuVi9GhCGRaP8bJmV8UWp1Y4QvycpXPLE9BFTdog3+MkAMEfn3HLsT2VokPADjEBGcUsidruw/0TzWxaOCBaYwggWiMA4GA1UdDwEB/wQEAwIHgDATBgNVHSUEDDAKBggrBgEFBQcDAzAdBgNVHQ4EFgQUqD6zX7d4CzuGGxwOIghlH3g0EaIwHwYDVR0jBBgwFoAU39Ppz1YkEZb5qNjpKFWixi4YZD8waQYDVR0RAQH/BF8wXYZbaHR0cHM6Ly9naXRodWIuY29tL2NvaGVyZS1haS9tb2RlbC1zaWduaW5nLy5naXRodWIvd29ya2Zsb3dzL3NpZ24tbW9kZWwueW1sQHJlZnMvaGVhZHMvbWFpbjA5BgorBgEEAYO/MAEBBCtodHRwczovL3Rva2VuLmFjdGlvbnMuZ2l0aHVidXNlcmNvbnRlbnQuY29tMB8GCisGAQQBg78wAQIEEXdvcmtmbG93X2Rpc3BhdGNoMDYGCisGAQQBg78wAQMEKDRlNDg2OTI3MDM5MDViOTJiMDMyYzY3NDNhZWE4YmFiOWYyNDU0M2IwJgYKKwYBBAGDvzABBAQYU2lnbiBNb2RlbCB3aXRoIFNpZ3N0b3JlMCUGCisGAQQBg78wAQUEF2NvaGVyZS1haS9tb2RlbC1zaWduaW5nMB0GCisGAQQBg78wAQYED3JlZnMvaGVhZHMvbWFpbjA7BgorBgEEAYO/MAEIBC0MK2h0dHBzOi8vdG9rZW4uYWN0aW9ucy5naXRodWJ1c2VyY29udGVudC5jb20wawYKKwYBBAGDvzABCQRdDFtodHRwczovL2dpdGh1Yi5jb20vY29oZXJlLWFpL21vZGVsLXNpZ25pbmcvLmdpdGh1Yi93b3JrZmxvd3Mvc2lnbi1tb2RlbC55bWxAcmVmcy9oZWFkcy9tYWluMDgGCisGAQQBg78wAQoEKgwoNGU0ODY5MjcwMzkwNWI5MmIwMzJjNjc0M2FlYThiYWI5ZjI0NTQzYjAdBgorBgEEAYO/MAELBA8MDWdpdGh1Yi1ob3N0ZWQwOgYKKwYBBAGDvzABDAQsDCpodHRwczovL2dpdGh1Yi5jb20vY29oZXJlLWFpL21vZGVsLXNpZ25pbmcwOAYKKwYBBAGDvzABDQQqDCg0ZTQ4NjkyNzAzOTA1YjkyYjAzMmM2NzQzYWVhOGJhYjlmMjQ1NDNiMB8GCisGAQQBg78wAQ4EEQwPcmVmcy9oZWFkcy9tYWluMBoGCisGAQQBg78wAQ8EDAwKMTA2NzY3NTI1MDAsBgorBgEEAYO/MAEQBB4MHGh0dHBzOi8vZ2l0aHViLmNvbS9jb2hlcmUtYWkwGAYKKwYBBAGDvzABEQQKDAg1NDg1MDkyMzBrBgorBgEEAYO/MAESBF0MW2h0dHBzOi8vZ2l0aHViLmNvbS9jb2hlcmUtYWkvbW9kZWwtc2lnbmluZy8uZ2l0aHViL3dvcmtmbG93cy9zaWduLW1vZGVsLnltbEByZWZzL2hlYWRzL21haW4wOAYKKwYBBAGDvzABEwQqDCg0ZTQ4NjkyNzAzOTA1YjkyYjAzMmM2NzQzYWVhOGJhYjlmMjQ1NDNiMCEGCisGAQQBg78wARQEEwwRd29ya2Zsb3dfZGlzcGF0Y2gwXgYKKwYBBAGDvzABFQRQDE5odHRwczovL2dpdGh1Yi5jb20vY29oZXJlLWFpL21vZGVsLXNpZ25pbmcvYWN0aW9ucy9ydW5zLzE4ODUzODI1MjE1L2F0dGVtcHRzLzEwGAYKKwYBBAGDvzABFgQKDAhpbnRlcm5hbDCBiwYKKwYBBAHWeQIEAgR9BHsAeQB3AN09MGrGxxEyYxkeHJlnNwKiSl643jyt/4eKcoAvKe6OAAABmicychIAAAQDAEgwRgIhAJDCn6+Pp7sPA2X2U440RNsf2rLvapmtz38sSQd0x63bAiEAqL+j4ADYKb61QCGsFoTW/HIIbvBgrFG8kfbNjVt0yB0wCgYIKoZIzj0EAwMDaQAwZgIxAP/i/1w2Yu+U4RmVI7nKzJR+jOZtd2q4x8fl5PZEJeR17zhoO0vAn5U7v3YTfEphyQIxAKXk9sZeObdGxNp4c+9RfU1eWlDwQBivaj8BFO/Ge6m2MqSYjyMoRn0zbyAVlnwAPg=="},"tlogEntries":[{"logIndex":"646203249","logId":{"keyId":"wNI9atQGlz+VWfO6LRygH4QUfY/8W4RFwiT5i5WRgB0="},"kindVersion":{"kind":"dsse","version":"0.0.1"},"integratedTime":"1761594209","inclusionPromise":{"signedEntryTimestamp":"MEUCIQCVkctdemNBLFGkna6h2eA+fJrgtOs8uTy1EM7iL4t6+QIgIdEncfIH+4459dKwxia3uWQvhEvuKZaDIE4aJ28tTFs="},"inclusionProof":{"logIndex":"524298987","rootHash":"Q1pH2eA24TKCy5W6GBy8l/EhKxf61PsNNG0+p0pSB+4=","treeSize":"524299001","hashes":["xviH3KOYOSEVuqXF6+RiHgbdkT6xAp5tXMFNF4rzb1U=","dW5eQOoY0O+jad7vBr1V/sVFqmPSu54srqVUV/d/Vpg=","LpWV7z/y6vWS+LuNfei/SkE5s7mW5v9KQ9sJEDJfbKU=","QbQJjh/WfRutAv2z0J8S9dO8jS6/MFjZBpcwknNq6Dk=","/56yq7rtHKPCvG4mgxMbtTNKyU4MeikySfZ8ObwKOko=","G+oVitV6QfsyoJoQICxlU3aSJYXvRICPvjVjDjuCb+k=","aCbO5Fk5Ua1TaJkHMv5WaWc3hy3CLTZGRrL4At/60iU=","9UgECSLvJqib5D2kompgf0jdliWR7NXiQ2PeO7llJyA=","Gf/ylsKRXGTwuxVq5/OzyHxE239GMHNlAWNcy2lzpRU=","+NexnJu1Mw9gN+IJ6iFNqyKtQond3p8BN6iow8hbqXM=","nHVuhfWTSzQtuU+ifemlV5mL9AHO/Ohwp7XDIBBHg/Q=","sYE1mTJ1i4v5a5nunficijhNy0jcH8T0B9Jhw7LHHqE=","oUpQPKjwnG3jhQtgDGLtyjyTJmrqoENa8C6gx+qWIls=","HAOTg+Fg3H1Ej24mw+P9lXfi+4mPL5EKwAd4aNQOV6M=","2Wv4GiithwNukRKV06clevnQQYCzXmSS/+/OJtXgsXQ=","1mfy94KpcItqshH9+gwqV6jccupcaMpVsF28New8zDY=","vS7O4ozHIQZJWBiov+mkpI27GE8zAmVCEkRcP3NDyNE="],"checkpoint":{"envelope":"rekor.sigstore.dev - 1193050959916656506\n524299001\nQ1pH2eA24TKCy5W6GBy8l/EhKxf61PsNNG0+p0pSB+4=\n\n— rekor.sigstore.dev wNI9ajBFAiAbfL8oh3NLQslQpw9G2ytBxnYotw+l7JKi0atnxziK/QIhAMY+87gOtzZ1IKs1Qgn1yFbu+rpzH74FvqK7ntE25Ytk\n"}},"canonicalizedBody":"eyJhcGlWZXJzaW9uIjoiMC4wLjEiLCJraW5kIjoiZHNzZSIsInNwZWMiOnsiZW52ZWxvcGVIYXNoIjp7ImFsZ29yaXRobSI6InNoYTI1NiIsInZhbHVlIjoiM2UyZTk1NTgyMjk2YTZiYzNjZTAwNGEzNzY5Yjk3ODU4MTkyNmJkNmNiZTY0NDk4OTEwMjdjZWUyMWFiMDQ1MyJ9LCJwYXlsb2FkSGFzaCI6eyJhbGdvcml0aG0iOiJzaGEyNTYiLCJ2YWx1ZSI6IjE4ZjZjYjFlYzljNWVhMzI5YWQ1ZDQ1ZTQ4YTBkYWM4OGZiZTE2MGNiODIyYTc5MGU4ODJjM2Q0MDcwMTEwOTQifSwic2lnbmF0dXJlcyI6W3sic2lnbmF0dXJlIjoiTUVVQ0lRRGFtSFd1ajkxeUt5OG5qd250N3VBRFF6RzRyd0VoQjdiOHRmMGJLOE1reHdJZ1pLeFNYcng2NVZxSDA1N082SXpMUGNFNTVCbDQ1K0NnZmxFOEdCaWgwemc9IiwidmVyaWZpZXIiOiJMUzB0TFMxQ1JVZEpUaUJEUlZKVVNVWkpRMEZVUlMwdExTMHRDazFKU1VoQmFrTkRRbTlsWjBGM1NVSkJaMGxWU0d4RlpYbFZjMlprVmpRMmVrTnFTa05ZVERVd2RXSTRTMXBuZDBObldVbExiMXBKZW1vd1JVRjNUWGNLVG5wRlZrMUNUVWRCTVZWRlEyaE5UV015Ykc1ak0xSjJZMjFWZFZwSFZqSk5ValIzU0VGWlJGWlJVVVJGZUZaNllWZGtlbVJIT1hsYVV6RndZbTVTYkFwamJURnNXa2RzYUdSSFZYZElhR05PVFdwVmVFMUVTVE5OVkdzd1RYcEpORmRvWTA1TmFsVjRUVVJKTTAxVWF6Rk5la2swVjJwQlFVMUdhM2RGZDFsSUNrdHZXa2w2YWpCRFFWRlpTVXR2V2tsNmFqQkVRVkZqUkZGblFVVkZiMmxGUVd4NmRWWnBPVWRvUTBkU1lWQTRZa3B0VmpoVlYzQXhXVFJSZG5samNGZ0tVRXhGT1VKR1ZHUnZaek1yVFd0QlRVVm1iak5JVEhOVU1sWnZhMUJCUkdwRlFrZGpWWE5wWkhKMWR5OHdWSHBYZUdGUFEwSmhXWGRuWjFkcFRVRTBSd3BCTVZWa1JIZEZRaTkzVVVWQmQwbElaMFJCVkVKblRsWklVMVZGUkVSQlMwSm5aM0pDWjBWR1FsRmpSRUY2UVdSQ1owNVdTRkUwUlVablVWVnhSRFo2Q2xnM1pEUkRlblZIUjNoM1QwbG5hR3hJTTJjd1JXRkpkMGgzV1VSV1VqQnFRa0puZDBadlFWVXpPVkJ3ZWpGWmEwVmFZalZ4VG1wd1MwWlhhWGhwTkZrS1drUTRkMkZSV1VSV1VqQlNRVkZJTDBKR09IZFlXVnBpWVVoU01HTklUVFpNZVRsdVlWaFNiMlJYU1hWWk1qbDBUREpPZG1GSFZubGFVekZvWVZNNWRBcGlNbEpzWWtNeGVtRlhaSFZoVnpWdVRIazFibUZZVW05a1YwbDJaREk1ZVdFeVduTmlNMlI2VEROT2NGb3lOSFJpVnpscldsZDNkV1ZYTVhOUlNFcHNDbHB1VFhaaFIxWm9Xa2hOZG1KWFJuQmlha0UxUW1kdmNrSm5SVVZCV1U4dlRVRkZRa0pEZEc5a1NGSjNZM3B2ZGt3elVuWmhNbFoxVEcxR2FtUkhiSFlLWW01TmRWb3liREJoU0ZacFpGaE9iR050VG5aaWJsSnNZbTVSZFZreU9YUk5RamhIUTJselIwRlJVVUpuTnpoM1FWRkpSVVZZWkhaamJYUnRZa2M1TXdwWU1sSndZek5DYUdSSFRtOU5SRmxIUTJselIwRlJVVUpuTnpoM1FWRk5SVXRFVW14T1JHY3lUMVJKTTAxRVRUVk5SRlpwVDFSS2FVMUVUWGxaZWxrekNrNUVUbWhhVjBVMFdXMUdhVTlYV1hsT1JGVXdUVEpKZDBwbldVdExkMWxDUWtGSFJIWjZRVUpDUVZGWlZUSnNibUpwUWs1aU1sSnNZa05DTTJGWVVtOEtTVVpPY0ZvelRqQmlNMHBzVFVOVlIwTnBjMGRCVVZGQ1p6YzRkMEZSVlVWR01rNTJZVWRXZVZwVE1XaGhVemwwWWpKU2JHSkRNWHBoVjJSMVlWYzFiZ3BOUWpCSFEybHpSMEZSVVVKbk56aDNRVkZaUlVRelNteGFiazEyWVVkV2FGcElUWFppVjBad1ltcEJOMEpuYjNKQ1owVkZRVmxQTDAxQlJVbENRekJOQ2tzeWFEQmtTRUo2VDJrNGRtUkhPWEphVnpSMVdWZE9NR0ZYT1hWamVUVnVZVmhTYjJSWFNqRmpNbFo1V1RJNWRXUkhWblZrUXpWcVlqSXdkMkYzV1VzS1MzZFpRa0pCUjBSMmVrRkNRMUZTWkVSR2RHOWtTRkozWTNwdmRrd3laSEJrUjJneFdXazFhbUl5TUhaWk1qbHZXbGhLYkV4WFJuQk1NakYyV2tkV2N3cE1XRTV3V2pJMWNHSnRZM1pNYldSd1pFZG9NVmxwT1ROaU0wcHlXbTE0ZG1RelRYWmpNbXh1WW1reGRHSXlVbXhpUXpVMVlsZDRRV050Vm0xamVUbHZDbHBYUm10amVUbDBXVmRzZFUxRVowZERhWE5IUVZGUlFtYzNPSGRCVVc5RlMyZDNiMDVIVlRCUFJGazFUV3BqZDAxNmEzZE9WMGsxVFcxSmQwMTZTbW9LVG1wak1FMHlSbXhaVkdocFdWZEpOVnBxU1RCT1ZGRjZXV3BCWkVKbmIzSkNaMFZGUVZsUEwwMUJSVXhDUVRoTlJGZGtjR1JIYURGWmFURnZZak5PTUFwYVYxRjNUMmRaUzB0M1dVSkNRVWRFZG5wQlFrUkJVWE5FUTNCdlpFaFNkMk42YjNaTU1tUndaRWRvTVZscE5XcGlNakIyV1RJNWIxcFlTbXhNVjBad0Nrd3lNWFphUjFaelRGaE9jRm95TlhCaWJXTjNUMEZaUzB0M1dVSkNRVWRFZG5wQlFrUlJVWEZFUTJjd1dsUlJORTVxYTNsT2VrRjZUMVJCTVZscWEza0tXV3BCZWsxdFRUSk9lbEY2V1ZkV2FFOUhTbWhaYW14dFRXcFJNVTVFVG1sTlFqaEhRMmx6UjBGUlVVSm5OemgzUVZFMFJVVlJkMUJqYlZadFkzazVid3BhVjBaclkzazVkRmxYYkhWTlFtOUhRMmx6UjBGUlVVSm5OemgzUVZFNFJVUkJkMHROVkVFeVRucFpNMDVVU1RGTlJFRnpRbWR2Y2tKblJVVkJXVTh2Q2sxQlJWRkNRalJOU0Vkb01HUklRbnBQYVRoMldqSnNNR0ZJVm1sTWJVNTJZbE01YW1JeWFHeGpiVlYwV1ZkcmQwZEJXVXRMZDFsQ1FrRkhSSFo2UVVJS1JWRlJTMFJCWnpGT1JHY3hUVVJyZVUxNlFuSkNaMjl5UW1kRlJVRlpUeTlOUVVWVFFrWXdUVmN5YURCa1NFSjZUMms0ZGxveWJEQmhTRlpwVEcxT2RncGlVemxxWWpKb2JHTnRWWFJaVjJ0MllsYzVhMXBYZDNSak1teHVZbTFzZFZwNU9IVmFNbXd3WVVoV2FVd3paSFpqYlhSdFlrYzVNMk41T1hwaFYyUjFDa3hYTVhaYVIxWnpURzVzZEdKRlFubGFWMXA2VERKb2JGbFhVbnBNTWpGb1lWYzBkMDlCV1V0TGQxbENRa0ZIUkhaNlFVSkZkMUZ4UkVObk1GcFVVVFFLVG1wcmVVNTZRWHBQVkVFeFdXcHJlVmxxUVhwTmJVMHlUbnBSZWxsWFZtaFBSMHBvV1dwc2JVMXFVVEZPUkU1cFRVTkZSME5wYzBkQlVWRkNaemM0ZHdwQlVsRkZSWGQzVW1ReU9YbGhNbHB6WWpOa1pscEhiSHBqUjBZd1dUSm5kMWhuV1V0TGQxbENRa0ZIUkhaNlFVSkdVVkpSUkVVMWIyUklVbmRqZW05MkNrd3laSEJrUjJneFdXazFhbUl5TUhaWk1qbHZXbGhLYkV4WFJuQk1NakYyV2tkV2MweFlUbkJhTWpWd1ltMWpkbGxYVGpCaFZ6bDFZM2s1ZVdSWE5Yb0tUSHBGTkU5RVZYcFBSRWt4VFdwRk1Vd3lSakJrUjFaMFkwaFNla3g2UlhkSFFWbExTM2RaUWtKQlIwUjJla0ZDUm1kUlMwUkJhSEJpYmxKc1kyMDFhQXBpUkVOQ2FYZFpTMHQzV1VKQ1FVaFhaVkZKUlVGblVqbENTSE5CWlZGQ00wRk9NRGxOUjNKSGVIaEZlVmw0YTJWSVNteHVUbmRMYVZOc05qUXphbmwwQ2k4MFpVdGpiMEYyUzJVMlQwRkJRVUp0YVdONVkyaEpRVUZCVVVSQlJXZDNVbWRKYUVGS1JFTnVOaXRRY0RkelVFRXlXREpWTkRRd1VrNXpaakp5VEhZS1lYQnRkSG96T0hOVFVXUXdlRFl6WWtGcFJVRnhUQ3RxTkVGRVdVdGlOakZSUTBkelJtOVVWeTlJU1VsaWRrSm5ja1pIT0d0bVlrNXFWblF3ZVVJd2R3cERaMWxKUzI5YVNYcHFNRVZCZDAxRVlWRkJkMXBuU1hoQlVDOXBMekYzTWxsMUsxVTBVbTFXU1RkdVMzcEtVaXRxVDFwMFpESnhOSGc0Wm13MVVGcEZDa3BsVWpFM2VtaHZUekIyUVc0MVZUZDJNMWxVWmtWd2FIbFJTWGhCUzFock9YTmFaVTlpWkVkNFRuQTBZeXM1VW1aVk1XVlhiRVIzVVVKcGRtRnFPRUlLUms4dlIyVTJiVEpOY1ZOWmFubE5iMUp1TUhwaWVVRldiRzUzUVZCblBUMEtMUzB0TFMxRlRrUWdRMFZTVkVsR1NVTkJWRVV0TFMwdExRbz0ifV19fQ=="}],"timestampVerificationData":{"rfc3161Timestamps":[{"signedTimestamp":"MIIE6DADAgEAMIIE3wYJKoZIhvcNAQcCoIIE0DCCBMwCAQMxDTALBglghkgBZQMEAgEwgcEGCyqGSIb3DQEJEAEEoIGxBIGuMIGrAgEBBgkrBgEEAYO/MAIwMTANBglghkgBZQMEAgEFAAQgL8EVUsc9XlLLJqtXr6YRuWz0hOvw0MtrmFCmG982ZRUCFFJOzAl7OzVP5fK7+3+RNMfm9ZjwGA8yMDI1MTAyNzE5NDMyOFowAwIBAQIIIKCnEUeTQlGgMqQwMC4xFTATBgNVBAoTDHNpZ3N0b3JlLmRldjEVMBMGA1UEAxMMc2lnc3RvcmUtdHNhoIICFDCCAhAwggGWoAMCAQICFDoTVC8MkGHuvMFDL8uKjosqI4sMMAoGCCqGSM49BAMDMDkxFTATBgNVBAoTDHNpZ3N0b3JlLmRldjEgMB4GA1UEAxMXc2lnc3RvcmUtdHNhLXNlbGZzaWduZWQwHhcNMjUwNDA4MDY1OTQzWhcNMzUwNDA2MDY1OTQzWjAuMRUwEwYDVQQKEwxzaWdzdG9yZS5kZXYxFTATBgNVBAMTDHNpZ3N0b3JlLXRzYTB2MBAGByqGSM49AgEGBSuBBAAiA2IABOK2tmfISjYoNk/ZBYwgE6Bht9I5MvlkL9wcy/pir4dUijUf1MLsLHzQoOLK8qGAHfBOorKL1QNzOGqDXZvUA4udGfJ0xMr6oHwz7UyMFyLX4lvwBX9Ve7sJG5AKI9McXKNqMGgwDgYDVR0PAQH/BAQDAgeAMB0GA1UdDgQWBBSJ/XlDh8/QZUbDAkbHLHNbfbTrAzAfBgNVHSMEGDAWgBSY7AHvf7tR/9SVHm+KiJhTB4nOvzAWBgNVHSUBAf8EDDAKBggrBgEFBQcDCDAKBggqhkjOPQQDAwNoADBlAjA7abFf+imjtKshf1DLF9mklFykBXaBk6bUBnNH7atXLLpRzxhunbJsjDXdyQfhtxECMQDmo7wXI6SZim+Db/tk2qI/FHOfm+ooehVwgiq2kqrqXtO86rMDHFyU3tXBMbx775cxggHaMIIB1gIBATBRMDkxFTATBgNVBAoTDHNpZ3N0b3JlLmRldjEgMB4GA1UEAxMXc2lnc3RvcmUtdHNhLXNlbGZzaWduZWQCFDoTVC8MkGHuvMFDL8uKjosqI4sMMAsGCWCGSAFlAwQCAaCB/DAaBgkqhkiG9w0BCQMxDQYLKoZIhvcNAQkQAQQwHAYJKoZIhvcNAQkFMQ8XDTI1MTAyNzE5NDMyOFowLwYJKoZIhvcNAQkEMSIEIPwVwIL7LNdIrKV+7ucTuOn/yueiH4ymeIzy9J5NQXteMIGOBgsqhkiG9w0BCRACLzF/MH0wezB5BCCF+Se8B6tiysO0Q1bBDvyBssaIP9p6uebYcNnROs0FtzBVMD2kOzA5MRUwEwYDVQQKEwxzaWdzdG9yZS5kZXYxIDAeBgNVBAMTF3NpZ3N0b3JlLXRzYS1zZWxmc2lnbmVkAhQ6E1QvDJBh7rzBQy/Lio6LKiOLDDAKBggqhkjOPQQDAgRmMGQCMBz5gIcZXI+155j6BQT98ag+XO4HZ++e+m+bAf/zRP4rl6sVSdyLDYRuMlsj8UiK6AIwLYDiEnBG7B5KbHNh9PbmdDDtxM17OXIPfYLZqMYZ1oHrf/KI+VGo45yjTt23bJsB"}]}},"dsseEnvelope":{"payload":"ewogICJfdHlwZSI6ICJodHRwczovL2luLXRvdG8uaW8vU3RhdGVtZW50L3YxIiwKICAic3ViamVjdCI6IFsKICAgIHsKICAgICAgIm5hbWUiOiAibW9kZWxfY2FjaGUiLAogICAgICAiZGlnZXN0IjogewogICAgICAgICJzaGEyNTYiOiAiZjY0NzhiZmViNTU4NDFmYzVlMjU0MmFhNzFjOTdjN2Q0YmQwZTcwMGY0Yjc3Nzg0Y2VhNWUzZGNmOGMyMDU0YiIKICAgICAgfQogICAgfQogIF0sCiAgInByZWRpY2F0ZVR5cGUiOiAiaHR0cHM6Ly9tb2RlbF9zaWduaW5nL3NpZ25hdHVyZS92MS4wIiwKICAicHJlZGljYXRlIjogewogICAgInJlc291cmNlcyI6IFsKICAgICAgewogICAgICAgICJuYW1lIjogImNvbmZpZy5qc29uIiwKICAgICAgICAiYWxnb3JpdGhtIjogInNoYTI1NiIsCiAgICAgICAgImRpZ2VzdCI6ICIzZTMxMmFiOTc3MzU3Y2ZjN2VhNmJmODdjN2E0MjI3YzcwYmQxODdkOTEzODNmMjMyMDBjNTAzMzY2NDUxMWVmIgogICAgICB9LAogICAgICB7CiAgICAgICAgIm5hbWUiOiAiZ2VuZXJhdGlvbl9jb25maWcuanNvbiIsCiAgICAgICAgImFsZ29yaXRobSI6ICJzaGEyNTYiLAogICAgICAgICJkaWdlc3QiOiAiMGQyOTYwMzZiZTc3ZDNmMmI1M2U0NmQ1MjY4ZjdmODFlZTYyMTdkNjlhNWU3ZGY5YTc4MGE1OTlkY2E0MmM3NSIKICAgICAgfSwKICAgICAgewogICAgICAgICJuYW1lIjogIm1vZGVsLTAwMDAxLW9mLTAwMDA0LnNhZmV0ZW5zb3JzIiwKICAgICAgICAiYWxnb3JpdGhtIjogInNoYTI1NiIsCiAgICAgICAgImRpZ2VzdCI6ICJkM2UwNTQ3ZmJmY2IwZTAxMGM5NjQwYTk5ZTUwOWMwNWE3MDAzMDJjMDkyNzhlNmUzNTEzY2YyMTRkNTZhOTUzIgogICAgICB9LAogICAgICB7CiAgICAgICAgIm5hbWUiOiAibW9kZWwtMDAwMDItb2YtMDAwMDQuc2FmZXRlbnNvcnMiLAogICAgICAgICJhbGdvcml0aG0iOiAic2hhMjU2IiwKICAgICAgICAiZGlnZXN0IjogIjFiN2FlNjM3YjMzYmY4MzNjMmIyMmEzMzgyZjdiNjcxYzc3NTRjMTliZWFkMGQyZjNmODRkMjk3N2I4MzQ0NDciCiAgICAgIH0sCiAgICAgIHsKICAgICAgICAibmFtZSI6ICJtb2RlbC0wMDAwMy1vZi0wMDAwNC5zYWZldGVuc29ycyIsCiAgICAgICAgImFsZ29yaXRobSI6ICJzaGEyNTYiLAogICAgICAgICJkaWdlc3QiOiAiYjBhNDM3YzllMmNhNTg3ZmZkNzMyZjBhNDQyYzZjNDQyY2NhNmE0ZWJkNThjM2M3ODFkMzMyMTMyMDRlNmQ0YiIKICAgICAgfSwKICAgICAgewogICAgICAgICJuYW1lIjogIm1vZGVsLTAwMDA0LW9mLTAwMDA0LnNhZmV0ZW5zb3JzIiwKICAgICAgICAiYWxnb3JpdGhtIjogInNoYTI1NiIsCiAgICAgICAgImRpZ2VzdCI6ICIyYmE3YTljOTI3ZmZmZGJlYWUyMWE1MzQ3MDZmYzNhODE3ZGFmMDlhNzYxMDJkYTBmNDM0YjFhOGM3MzUxNWJmIgogICAgICB9LAogICAgICB7CiAgICAgICAgIm5hbWUiOiAibW9kZWwuc2FmZXRlbnNvcnMuaW5kZXguanNvbiIsCiAgICAgICAgImFsZ29yaXRobSI6ICJzaGEyNTYiLAogICAgICAgICJkaWdlc3QiOiAiZGVhZWNjOTJlMDBiNjZhMmZkZDI0NTc2MTQ4NzYwZTkzMGRjMzliNmM1YzBkYjhjM2MzZTM0ZWNkOGUwZGE4ZiIKICAgICAgfSwKICAgICAgewogICAgICAgICJuYW1lIjogInNwZWNpYWxfdG9rZW5zX21hcC5qc29uIiwKICAgICAgICAiYWxnb3JpdGhtIjogInNoYTI1NiIsCiAgICAgICAgImRpZ2VzdCI6ICI1ZDUyNDc2MGQ3Yzk4MmVjZmJlZmY0YzJmYmQxNzJlMmMxMmU3ZWYzNjM2ZTg1MmQxMTFiYzFmNjBiYjBhNDY0IgogICAgICB9LAogICAgICB7CiAgICAgICAgIm5hbWUiOiAidG9rZW5pemVyLmpzb24iLAogICAgICAgICJhbGdvcml0aG0iOiAic2hhMjU2IiwKICAgICAgICAiZGlnZXN0IjogIjk1M2IyNzMwZDIzY2ExOWU3ZGNhOTZmNzVmM2UxMGI0OTdiYjY3OTI5MGIwNmQ4OTgxMTkwYmZmMjAzOWZjNzIiCiAgICAgIH0sCiAgICAgIHsKICAgICAgICAibmFtZSI6ICJ0b2tlbml6ZXJfY29uZmlnLmpzb24iLAogICAgICAgICJhbGdvcml0aG0iOiAic2hhMjU2IiwKICAgICAgICAiZGlnZXN0IjogImZjM2VkODVjZDM5MmJlZmU5ZjhiODA4Nzg0ZmFmZGVjNDczMDc1Yjg5MGE3MzYwZDYyYTE1YWY1YTAyOTMyMzIiCiAgICAgIH0KICAgIF0sCiAgICAic2VyaWFsaXphdGlvbiI6IHsKICAgICAgImhhc2hfdHlwZSI6ICJzaGEyNTYiLAogICAgICAiaWdub3JlX3BhdGhzIjogWwogICAgICAgICIuZ2l0aHViIiwKICAgICAgICAiLmdpdGF0dHJpYnV0ZXMiLAogICAgICAgICIuZ2l0IiwKICAgICAgICAiLmdpdGlnbm9yZSIKICAgICAgXSwKICAgICAgIm1ldGhvZCI6ICJmaWxlcyIsCiAgICAgICJhbGxvd19zeW1saW5rcyI6IGZhbHNlCiAgICB9CiAgfQp9","payloadType":"application/vnd.in-toto+json","signatures":[{"sig":"MEUCIQDamHWuj91yKy8njwnt7uADQzG4rwEhB7b8tf0bK8MkxwIgZKxSXrx65VqH057O6IzLPcE55Bl45+CgflE8GBih0zg="}]}} \ No newline at end of file diff --git a/signatures/verification-instructions.txt b/signatures/verification-instructions.txt new file mode 100644 index 0000000..2b350fd --- /dev/null +++ b/signatures/verification-instructions.txt @@ -0,0 +1,40 @@ +==================================== +MODEL SIGNATURE VERIFICATION GUIDE +==================================== + +Model: CohereLabs/c4ai-command-r7b-12-2024 +Revision: main +Environment: PRODUCTION +Signed at: 2025-10-27T18:55:09Z +Workflow Run: https://github.com/cohere-ai/model-signing/actions/runs/18853825215 + +TRANSPARENCY LOG +---------------- +This signature is recorded in the Sigstore Rekor transparency log. + +Rekor Entry: https://search.sigstore.dev/?logIndex=646203249 +Log Index: 646203249 +Identity: https://github.com/cohere-ai/model-signing/.github/workflows/sign-model.yml@refs/heads/main + +VERIFICATION +------------ +To verify this signature locally: + +1. Install the model-signing package: + pip install model-signing + +2. Install huggingface_hub and download the model: + pip install huggingface_hub + huggingface-cli download CohereLabs/c4ai-command-r7b-12-2024 --revision main --local-dir ./model + +3. Verify the signature: + model_signing verify ./model \ + --signature c4ai-command-r7b-12-2024.sig \ + --identity "https://github.com/cohere-ai/model-signing/.github/workflows/sign-model.yml@refs/heads/main" \ + --identity_provider "https://token.actions.githubusercontent.com" \ + --ignore_unsigned_files + +Note: This signature was created with selective file inclusion (*.safetensors,*.json). + Use --ignore_unsigned_files to verify only the files that were signed. + +==================================== diff --git a/special_tokens_map.json b/special_tokens_map.json new file mode 100644 index 0000000..d5a6b7a --- /dev/null +++ b/special_tokens_map.json @@ -0,0 +1,34 @@ +{ + "additional_special_tokens": [ + "<|START_RESPONSE|>", + "<|END_RESPONSE|>" + ], + "bos_token": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false + }, + "eos_token": { + "content": "<|END_OF_TURN_TOKEN|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false + }, + "pad_token": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false + }, + "unk_token": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false + } +} diff --git a/tokenizer.json b/tokenizer.json new file mode 100644 index 0000000..3e13c77 --- /dev/null +++ b/tokenizer.json @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:953b2730d23ca19e7dca96f75f3e10b497bb679290b06d8981190bff2039fc72 +size 20124922 diff --git a/tokenizer_config.json b/tokenizer_config.json new file mode 100644 index 0000000..0fc8aca --- /dev/null +++ b/tokenizer_config.json @@ -0,0 +1,367 @@ +{ + "add_bos_token": true, + "add_eos_token": false, + "add_prefix_space": false, + "added_tokens_decoder": { + "0": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "1": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "2": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "3": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "4": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "5": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "6": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "7": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "255000": { + "content": "<|START_OF_TURN_TOKEN|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": false + }, + "255001": { + "content": "<|END_OF_TURN_TOKEN|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "255002": { + "content": "<|YES_TOKEN|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": false + }, + "255003": { + "content": "<|NO_TOKEN|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": false + }, + "255004": { + "content": "<|GOOD_TOKEN|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": false + }, + "255005": { + "content": "<|BAD_TOKEN|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": false + }, + "255006": { + "content": "<|USER_TOKEN|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": false + }, + "255007": { + "content": "<|CHATBOT_TOKEN|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": false + }, + "255008": { + "content": "<|SYSTEM_TOKEN|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": false + }, + "255009": { + "content": "<|USER_0_TOKEN|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": false + }, + "255010": { + "content": "<|USER_1_TOKEN|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": false + }, + "255011": { + "content": "<|USER_2_TOKEN|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": false + }, + "255012": { + "content": "<|USER_3_TOKEN|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": false + }, + "255013": { + "content": "<|USER_4_TOKEN|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": false + }, + "255014": { + "content": "<|USER_5_TOKEN|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": false + }, + "255015": { + "content": "<|USER_6_TOKEN|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": false + }, + "255016": { + "content": "<|USER_7_TOKEN|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": false + }, + "255017": { + "content": "<|USER_8_TOKEN|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": false + }, + "255018": { + "content": "<|USER_9_TOKEN|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": false + }, + "255019": { + "content": "<|START_THINKING|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": false + }, + "255020": { + "content": "<|END_THINKING|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": false + }, + "255021": { + "content": "<|START_RESPONSE|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "255022": { + "content": "<|END_RESPONSE|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "255023": { + "content": "<|START_ACTION|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": false + }, + "255024": { + "content": "<|END_ACTION|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": false + }, + "255025": { + "content": "<|START_TOOL_RESULT|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": false + }, + "255026": { + "content": "<|END_TOOL_RESULT|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": false + }, + "255027": { + "content": "<|EXTRA_8_TOKEN|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": false + }, + "255028": { + "content": "<|NEW_FILE|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "255029": { + "content": "<|BEGINNING_OF_PREFIX_FIM_TOKEN|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": false + }, + "255030": { + "content": "<|BEGINNING_OF_MIDDLE_FIM_TOKEN|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": false + }, + "255031": { + "content": "<|BEGINNING_OF_SUFFIX_FIM_TOKEN|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": false + }, + "255032": { + "content": "<|END_OF_MIDDLE_FIM_TOKEN|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": false + } + }, + "bos_token": "", + "chat_template": [ + { + "name": "default", + "template": "{{ bos_token }}{% if documents %}\n{% set tools = [] %}\n{%- macro document_turn(documents) -%}\n{# format documents into chat turn #}\n<|START_OF_TURN_TOKEN|><|CHATBOT_TOKEN|><|START_THINKING|>I will look through the document to address the users needs.<|END_THINKING|><|START_ACTION|>[\n {\"tool_call_id\": \"0\", \"tool_name\": \"direct-injected-document\", \"parameters\": {}}\n]<|END_ACTION|><|END_OF_TURN_TOKEN|><|START_OF_TURN_TOKEN|><|SYSTEM_TOKEN|><|START_TOOL_RESULT|>[\n {\n \"tool_call_id\": \"0\",\n \"results\": {\n{% for doc in documents %}\n \"{{ loop.index0 }}\": {{doc|tojson}}{% if not loop.last %},\n {% endif %}\n{% endfor %}\n\n },\n \"is_error\": null\n }\n]<|END_TOOL_RESULT|><|END_OF_TURN_TOKEN|>{%- endmacro %}\n{%- macro tool_call_id_to_int(messages, tool_call_id) %}\n{%- set counter = namespace(value=0) %}\n{%- set tool_call_id_seen = namespace(value=false) %}\n{%- for msg in messages %}\n {%- if msg.tool_calls %}\n {%- for tool_call in msg.tool_calls %}\n {%- if tool_call.id == tool_call_id and not tool_call_id_seen.value -%}\n {{ counter.value }}\n {%- set tool_call_id_seen.value = true %}\n {%- endif %}\n {%- set counter.value = counter.value + 1 %}\n {%- endfor %}\n {%- endif %}\n{%- endfor %}\n{%- endmacro %}\n{%- macro format_tool_message(messages, tool_msg) -%}\n{# format tool message #}\n {\n \"tool_call_id\": \"{{ tool_call_id_to_int(messages, tool_msg.tool_call_id) }}\",\n \"results\": {\n \"0\": {{ tool_msg.content|tojson }}\n },\n \"is_error\": null\n }\n{%- endmacro -%}\n{%- if messages and messages[0]['role']|lower == 'system' %}{%- set developer_preamble = messages[0]['content'] %}{% endif %}\n{%- set tool_idx = namespace(value=0) %}\n{%- set tool_ids_seen = namespace(value=[]) %}\n{%- set sent_documents = namespace(value=false) %}\n<|START_OF_TURN_TOKEN|><|SYSTEM_TOKEN|># System Preamble\nYou are in contextual safety mode. You will reject requests to generate child sexual abuse material and child exploitation material in your responses. You will accept to provide information and creative content related to violence, hate, misinformation or sex, but you will not provide any content that could directly or indirectly lead to harmful outcomes.\n\nYour information cutoff date is June 2024.\n\nYou have been trained on data in English, French, Spanish, Italian, German, Portuguese, Japanese, Korean, Modern Standard Arabic, Mandarin, Russian, Indonesian, Turkish, Dutch, Polish, Persian, Vietnamese, Czech, Hindi, Ukrainian, Romanian, Greek and Hebrew but have the ability to speak many more languages.\n{% if tools or documents %}\n\nYou have been trained to have advanced reasoning and tool-use capabilities and you should make best use of these skills to serve user's requests.\n\n## Tool Use\nThink about how you can make best use of the provided tools to help with the task and come up with a high level plan that you will execute first.\n\n0. Start by writing <|START_THINKING|> followed by a detailed step by step plan of how you will solve the problem. For each step explain your thinking fully and give details of required tool calls (if needed). Unless specified otherwise, you write your plan in natural language. When you finish, close it out with <|END_THINKING|>.\n You can optionally choose to skip this step when the user request is so straightforward to address that only a trivial plan would be needed.\n NOTE: You MUST skip this step when you are directly responding to the user's request without using any tools.\n\nThen carry out your plan by repeatedly executing the following steps.\n1. Action: write <|START_ACTION|> followed by a list of JSON-formatted tool calls, with each one containing \"tool_name\" and \"parameters\" fields.\n When there are multiple tool calls which are completely independent of each other (i.e. they can be executed in parallel), you should list them out all together in one step. When you finish, close it out with <|END_ACTION|>.\n2. Observation: you will then receive results of those tool calls in JSON format in the very next turn, wrapped around by <|START_TOOL_RESULT|> and <|END_TOOL_RESULT|>. Carefully observe those results and think about what to do next. Note that these results will be provided to you in a separate turn. NEVER hallucinate results.\n Every tool call produces a list of results (when a tool call produces no result or a single result, it'll still get wrapped inside a list). Each result is clearly linked to its originating tool call via its \"tool_call_id\".\n3. Reflection: start the next turn by writing <|START_THINKING|> followed by what you've figured out so far, any changes you need to make to your plan, and what you will do next. When you finish, close it out with <|END_THINKING|>.\n You can optionally choose to skip this step when everything is going according to plan and no special pieces of information or reasoning chains need to be recorded.\n NOTE: You MUST skip this step when you are done with tool-use actions and are ready to respond to the user.\n\nYou can repeat the above 3 steps multiple times (could be 0 times too if no suitable tool calls are available or needed), until you decide it's time to finally respond to the user.\n\n4. Response: then break out of the loop and write <|START_RESPONSE|> followed by a piece of text which serves as a response to the user's last request. Use all previous tool calls and results to help you when formulating your response. When you finish, close it out with <|END_RESPONSE|>.\n{% if enable_citations %}\n\n## Grounding\nImportantly, note that \"Reflection\" and \"Response\" above can be grounded.\nGrounding means you associate pieces of texts (called \"spans\") with those specific tool results that support them (called \"sources\"). And you use a pair of tags \"\" and \"\" to indicate when a span can be grounded onto a list of sources, listing them out in the closing tag. Sources from the same tool call are grouped together and listed as \"{tool_call_id}:[{list of result indices}]\", before they are joined together by \",\". E.g., \"span\" means that \"span\" is supported by result 1 and 2 from \"tool_call_id=0\" as well as result 0 from \"tool_call_id=1\".\n{% endif %}\n\n## Available Tools\nHere is the list of tools that you have available to you.\nYou can ONLY use the tools listed here. When a tool is not listed below, it is NOT available and you should NEVER attempt to use it.\nEach tool is represented as a JSON object with fields like \"name\", \"description\", \"parameters\" (per JSON Schema), and optionally, \"responses\" (per JSON Schema).\n\n```json\n[\n{% if documents %}\n {\"name\": \"direct-injected-document\", \"description\": \"This is a special tool to directly inject user-uploaded documents into the chat as additional context. DO NOT use this tool by yourself!\", \"parameters\": {\"type\": \"object\", \"properties\": {}, \"required\": []}, \"responses\": {\"200\": {\"description\": \"Successfully returned a list of chunked text snippets from the directly uploaded documents.\", \"content\": {\"application/json\": {\"schema\": {\"type\": \"array\", \"items\": {\"type\": \"object\", \"required\": [\"url\", \"snippet\"], \"properties\": {\"url\": {\"type\": \"string\", \"description\": \"The url of the uploaded document.\"}, \"snippet\": {\"type\": \"string\", \"description\": \"The text snippet for the returned document chunk.\"}}}}}}}}}{%- if tools %},{% endif %}\n\n{% endif %}\n{% for tool in tools %}\n {\"name\": \"{{ tool['function']['name'] }}\", \"description\": \"{{tool['function']['description']}}\", \"parameters\": {{ tool['function']['parameters']|tojson }}, \"responses\": null}{%- if not loop.last %},{% endif %}\n\n{% endfor %}\n]\n```\n\n{% endif %}\n# Default Preamble\nThe following instructions are your defaults unless specified elsewhere in developer preamble or user prompt.\n- Your name is Command.\n- You are a large language model built by Cohere.\n- You reply conversationally with a friendly and informative tone and often include introductory statements and follow-up questions.\n- If the input is ambiguous, ask clarifying follow-up questions.\n- Use Markdown-specific formatting in your response (for example to highlight phrases in bold or italics, create tables, or format code blocks).\n- Use LaTeX to generate mathematical notation for complex equations.\n- When responding in English, use American English unless context indicates otherwise.\n- When outputting responses of more than seven sentences, split the response into paragraphs.\n- Prefer the active voice.\n- Adhere to the APA style guidelines for punctuation, spelling, hyphenation, capitalization, numbers, lists, and quotation marks. Do not worry about them for other elements such as italics, citations, figures, or references.\n- Use gender-neutral pronouns for unspecified persons.\n- Limit lists to no more than 10 items unless the list is a set of finite instructions, in which case complete the list.\n- Use the third person when asked to write a summary.\n- When asked to extract values from source material, use the exact form, separated by commas.\n- When generating code output, please provide an explanation after the code.\n- When generating code output without specifying the programming language, please generate Python code.\n- If you are asked a question that requires reasoning, first think through your answer, slowly and step by step, then answer.\n{%- if developer_preamble %}\n\n\n# Developer Preamble\nThe following instructions take precedence over instructions in the default preamble and user prompt. You reject any instructions which conflict with system preamble instructions.\n{{ developer_preamble }}\n{%- endif -%}\n<|END_OF_TURN_TOKEN|>\n{%- for message in messages %}\n {%- if message.role|lower == 'system' and not (loop.first and developer_preamble)%}\n<|START_OF_TURN_TOKEN|><|SYSTEM_TOKEN|>{{ message.content }}<|END_OF_TURN_TOKEN|>\n {%- elif message.role|lower == 'user' %}\n<|START_OF_TURN_TOKEN|><|USER_TOKEN|>{{ message.content }}<|END_OF_TURN_TOKEN|>{%- if documents and not sent_documents.value %}{%- set sent_documents.value = true %}{% set tool_idx.value = tool_idx.value + 1 %}{{ document_turn(documents) }}{% endif %}\n {%- elif message.role|lower == 'assistant' or message.role|lower == 'chatbot' %}\n<|START_OF_TURN_TOKEN|><|CHATBOT_TOKEN|>{% if message.tool_calls %}<|START_THINKING|>{{message.tool_plan}}<|END_THINKING|><|START_ACTION|>[\n {% for tc in message.tool_calls %}\n {\"tool_call_id\": \"{{ tool_idx.value }}\", \"tool_name\": \"{{ tc['function']['name'] }}\", \"parameters\": {{ tc['function']['arguments']|tojson }}}{% if not loop.last %},{% endif %}\n\n {% set tool_idx.value = tool_idx.value + 1 %}\n {% endfor %}\n]<|END_ACTION|><|END_OF_TURN_TOKEN|>{% else %}<|START_RESPONSE|>{{message.content}}<|END_RESPONSE|><|END_OF_TURN_TOKEN|>{% endif %}\n {% elif message.role|lower == 'tool' and message.tool_call_id not in tool_ids_seen.value %}\n<|START_OF_TURN_TOKEN|><|SYSTEM_TOKEN|><|START_TOOL_RESULT|>[\n{{ format_tool_message(messages, message) }}\n {%- for msg in messages[loop.index0 + 1:] %}\n {%- if msg.role|lower == 'tool' %},\n{{ format_tool_message(messages, msg) }}\n {%- set tool_ids_seen.value = tool_ids_seen.value + [msg.tool_call_id] %}\n {%- else %}\n {%- break %}\n {%- endif %}\n {%- endfor %}\n\n]<|END_TOOL_RESULT|><|END_OF_TURN_TOKEN|>\n {%- endif %}\n{%- endfor %}<|START_OF_TURN_TOKEN|><|CHATBOT_TOKEN|>\n{%- else -%}\n{% if messages[0]['role'] == 'system' %}{% set loop_messages = messages[1:] %}\n {%- set system_message = messages[0]['content'] %}{% elif false == true %}\n {%- set loop_messages = messages %}{% set system_message = '' %}\n{%- else %}\n {%- set loop_messages = messages %}\n {%- set system_message = false %}\n{%- endif %}\n{%- if system_message != false -%}\n {{ '<|START_OF_TURN_TOKEN|><|SYSTEM_TOKEN|>' + system_message + '<|END_OF_TURN_TOKEN|>' }}\n{%- else -%}\n {{ '<|START_OF_TURN_TOKEN|><|SYSTEM_TOKEN|><|END_OF_TURN_TOKEN|>' }}\n{%- endif %}\n{%- for message in loop_messages %}\n {%- if (message['role'] == 'user') != (loop.index0 % 2 == 0) -%}\n {{ raise_exception('Conversation roles must alternate user/assistant/user/assistant/...') }}\n {%- endif -%}\n {%- set content = message['content'] -%}\n {%- if message['role'] == 'user' -%}\n {{ '<|START_OF_TURN_TOKEN|><|USER_TOKEN|>' + content.strip() + '<|END_OF_TURN_TOKEN|>' }}\n {%- elif message['role'] == 'assistant' -%}\n {{ '<|START_OF_TURN_TOKEN|><|CHATBOT_TOKEN|><|START_RESPONSE|>' + content.strip() + '<|END_RESPONSE|><|END_OF_TURN_TOKEN|>' }}\n {%- endif %}\n{%- endfor %}\n{%- if add_generation_prompt -%}\n {{ '<|START_OF_TURN_TOKEN|><|CHATBOT_TOKEN|><|START_RESPONSE|>' }}\n{%- endif %}\n{% endif %}" + }, + { + "name": "tool_use", + "template": "{{ bos_token }}{%- macro document_turn(documents) -%}\n{# format documents into chat turn #}\n<|START_OF_TURN_TOKEN|><|CHATBOT_TOKEN|><|START_THINKING|>I will look through the document to address the users needs.<|END_THINKING|><|START_ACTION|>[\n {\"tool_call_id\": \"0\", \"tool_name\": \"direct-injected-document\", \"parameters\": {}}\n]<|END_ACTION|><|END_OF_TURN_TOKEN|><|START_OF_TURN_TOKEN|><|SYSTEM_TOKEN|><|START_TOOL_RESULT|>[\n {\n \"tool_call_id\": \"0\",\n \"results\": {\n{% for doc in documents %}\n \"{{ loop.index0 }}\": {{doc|tojson}}{% if not loop.last %},\n {% endif %}\n{% endfor %}\n\n },\n \"is_error\": null\n }\n]<|END_TOOL_RESULT|><|END_OF_TURN_TOKEN|>{%- endmacro %}\n{%- macro tool_call_id_to_int(messages, tool_call_id) %}\n{%- set counter = namespace(value=0) %}\n{%- set tool_call_id_seen = namespace(value=false) %}\n{%- for msg in messages %}\n {%- if msg.tool_calls %}\n {%- for tool_call in msg.tool_calls %}\n {%- if tool_call.id == tool_call_id and not tool_call_id_seen.value -%}\n {{ counter.value }}\n {%- set tool_call_id_seen.value = true %}\n {%- endif %}\n {%- set counter.value = counter.value + 1 %}\n {%- endfor %}\n {%- endif %}\n{%- endfor %}\n{%- endmacro %}\n{%- macro format_tool_message(messages, tool_msg) -%}\n{# format tool message #}\n {\n \"tool_call_id\": \"{{ tool_call_id_to_int(messages, tool_msg.tool_call_id) }}\",\n \"results\": {\n \"0\": {{ tool_msg.content|tojson }}\n },\n \"is_error\": null\n }\n{%- endmacro -%}\n{%- if messages and messages[0]['role']|lower == 'system' %}{%- set developer_preamble = messages[0]['content'] %}{% endif %}\n{%- set tool_idx = namespace(value=0) %}\n{%- set tool_ids_seen = namespace(value=[]) %}\n{%- set sent_documents = namespace(value=false) %}\n<|START_OF_TURN_TOKEN|><|SYSTEM_TOKEN|># System Preamble\nYou are in contextual safety mode. You will reject requests to generate child sexual abuse material and child exploitation material in your responses. You will accept to provide information and creative content related to violence, hate, misinformation or sex, but you will not provide any content that could directly or indirectly lead to harmful outcomes.\n\nYour information cutoff date is June 2024.\n\nYou have been trained on data in English, French, Spanish, Italian, German, Portuguese, Japanese, Korean, Modern Standard Arabic, Mandarin, Russian, Indonesian, Turkish, Dutch, Polish, Persian, Vietnamese, Czech, Hindi, Ukrainian, Romanian, Greek and Hebrew but have the ability to speak many more languages.\n{% if tools or documents %}\n\nYou have been trained to have advanced reasoning and tool-use capabilities and you should make best use of these skills to serve user's requests.\n\n## Tool Use\nThink about how you can make best use of the provided tools to help with the task and come up with a high level plan that you will execute first.\n\n0. Start by writing <|START_THINKING|> followed by a detailed step by step plan of how you will solve the problem. For each step explain your thinking fully and give details of required tool calls (if needed). Unless specified otherwise, you write your plan in natural language. When you finish, close it out with <|END_THINKING|>.\n You can optionally choose to skip this step when the user request is so straightforward to address that only a trivial plan would be needed.\n NOTE: You MUST skip this step when you are directly responding to the user's request without using any tools.\n\nThen carry out your plan by repeatedly executing the following steps.\n1. Action: write <|START_ACTION|> followed by a list of JSON-formatted tool calls, with each one containing \"tool_name\" and \"parameters\" fields.\n When there are multiple tool calls which are completely independent of each other (i.e. they can be executed in parallel), you should list them out all together in one step. When you finish, close it out with <|END_ACTION|>.\n2. Observation: you will then receive results of those tool calls in JSON format in the very next turn, wrapped around by <|START_TOOL_RESULT|> and <|END_TOOL_RESULT|>. Carefully observe those results and think about what to do next. Note that these results will be provided to you in a separate turn. NEVER hallucinate results.\n Every tool call produces a list of results (when a tool call produces no result or a single result, it'll still get wrapped inside a list). Each result is clearly linked to its originating tool call via its \"tool_call_id\".\n3. Reflection: start the next turn by writing <|START_THINKING|> followed by what you've figured out so far, any changes you need to make to your plan, and what you will do next. When you finish, close it out with <|END_THINKING|>.\n You can optionally choose to skip this step when everything is going according to plan and no special pieces of information or reasoning chains need to be recorded.\n NOTE: You MUST skip this step when you are done with tool-use actions and are ready to respond to the user.\n\nYou can repeat the above 3 steps multiple times (could be 0 times too if no suitable tool calls are available or needed), until you decide it's time to finally respond to the user.\n\n4. Response: then break out of the loop and write <|START_RESPONSE|> followed by a piece of text which serves as a response to the user's last request. Use all previous tool calls and results to help you when formulating your response. When you finish, close it out with <|END_RESPONSE|>.\n{% if enable_citations %}\n\n## Grounding\nImportantly, note that \"Reflection\" and \"Response\" above can be grounded.\nGrounding means you associate pieces of texts (called \"spans\") with those specific tool results that support them (called \"sources\"). And you use a pair of tags \"\" and \"\" to indicate when a span can be grounded onto a list of sources, listing them out in the closing tag. Sources from the same tool call are grouped together and listed as \"{tool_call_id}:[{list of result indices}]\", before they are joined together by \",\". E.g., \"span\" means that \"span\" is supported by result 1 and 2 from \"tool_call_id=0\" as well as result 0 from \"tool_call_id=1\".\n{% endif %}\n\n## Available Tools\nHere is the list of tools that you have available to you.\nYou can ONLY use the tools listed here. When a tool is not listed below, it is NOT available and you should NEVER attempt to use it.\nEach tool is represented as a JSON object with fields like \"name\", \"description\", \"parameters\" (per JSON Schema), and optionally, \"responses\" (per JSON Schema).\n\n```json\n[\n{% if documents %}\n {\"name\": \"direct-injected-document\", \"description\": \"This is a special tool to directly inject user-uploaded documents into the chat as additional context. DO NOT use this tool by yourself!\", \"parameters\": {\"type\": \"object\", \"properties\": {}, \"required\": []}, \"responses\": {\"200\": {\"description\": \"Successfully returned a list of chunked text snippets from the directly uploaded documents.\", \"content\": {\"application/json\": {\"schema\": {\"type\": \"array\", \"items\": {\"type\": \"object\", \"required\": [\"url\", \"snippet\"], \"properties\": {\"url\": {\"type\": \"string\", \"description\": \"The url of the uploaded document.\"}, \"snippet\": {\"type\": \"string\", \"description\": \"The text snippet for the returned document chunk.\"}}}}}}}}}{%- if tools %},{% endif %}\n\n{% endif %}\n{% for tool in tools %}\n {\"name\": \"{{ tool['function']['name'] }}\", \"description\": \"{{tool['function']['description']}}\", \"parameters\": {{ tool['function']['parameters']|tojson }}, \"responses\": null}{%- if not loop.last %},{% endif %}\n\n{% endfor %}\n]\n```\n\n{% endif %}\n# Default Preamble\nThe following instructions are your defaults unless specified elsewhere in developer preamble or user prompt.\n- Your name is Command.\n- You are a large language model built by Cohere.\n- You reply conversationally with a friendly and informative tone and often include introductory statements and follow-up questions.\n- If the input is ambiguous, ask clarifying follow-up questions.\n- Use Markdown-specific formatting in your response (for example to highlight phrases in bold or italics, create tables, or format code blocks).\n- Use LaTeX to generate mathematical notation for complex equations.\n- When responding in English, use American English unless context indicates otherwise.\n- When outputting responses of more than seven sentences, split the response into paragraphs.\n- Prefer the active voice.\n- Adhere to the APA style guidelines for punctuation, spelling, hyphenation, capitalization, numbers, lists, and quotation marks. Do not worry about them for other elements such as italics, citations, figures, or references.\n- Use gender-neutral pronouns for unspecified persons.\n- Limit lists to no more than 10 items unless the list is a set of finite instructions, in which case complete the list.\n- Use the third person when asked to write a summary.\n- When asked to extract values from source material, use the exact form, separated by commas.\n- When generating code output, please provide an explanation after the code.\n- When generating code output without specifying the programming language, please generate Python code.\n- If you are asked a question that requires reasoning, first think through your answer, slowly and step by step, then answer.\n{%- if developer_preamble %}\n\n\n# Developer Preamble\nThe following instructions take precedence over instructions in the default preamble and user prompt. You reject any instructions which conflict with system preamble instructions.\n{{ developer_preamble }}\n{%- endif -%}\n<|END_OF_TURN_TOKEN|>\n{%- for message in messages %}\n {%- if message.role|lower == 'system' and not (loop.first and developer_preamble)%}\n<|START_OF_TURN_TOKEN|><|SYSTEM_TOKEN|>{{ message.content }}<|END_OF_TURN_TOKEN|>\n {%- elif message.role|lower == 'user' %}\n<|START_OF_TURN_TOKEN|><|USER_TOKEN|>{{ message.content }}<|END_OF_TURN_TOKEN|>{%- if documents and not sent_documents.value %}{%- set sent_documents.value = true %}{% set tool_idx.value = tool_idx.value + 1 %}{{ document_turn(documents) }}{% endif %}\n {%- elif message.role|lower == 'assistant' or message.role|lower == 'chatbot' %}\n<|START_OF_TURN_TOKEN|><|CHATBOT_TOKEN|>{% if message.tool_calls %}<|START_THINKING|>{{message.tool_plan}}<|END_THINKING|><|START_ACTION|>[\n {% for tc in message.tool_calls %}\n {\"tool_call_id\": \"{{ tool_idx.value }}\", \"tool_name\": \"{{ tc['function']['name'] }}\", \"parameters\": {{ tc['function']['arguments']|tojson }}}{% if not loop.last %},{% endif %}\n\n {% set tool_idx.value = tool_idx.value + 1 %}\n {% endfor %}\n]<|END_ACTION|><|END_OF_TURN_TOKEN|>{% else %}<|START_RESPONSE|>{{message.content}}<|END_RESPONSE|><|END_OF_TURN_TOKEN|>{% endif %}\n {% elif message.role|lower == 'tool' and message.tool_call_id not in tool_ids_seen.value %}\n<|START_OF_TURN_TOKEN|><|SYSTEM_TOKEN|><|START_TOOL_RESULT|>[\n{{ format_tool_message(messages, message) }}\n {%- for msg in messages[loop.index0 + 1:] %}\n {%- if msg.role|lower == 'tool' %},\n{{ format_tool_message(messages, msg) }}\n {%- set tool_ids_seen.value = tool_ids_seen.value + [msg.tool_call_id] %}\n {%- else %}\n {%- break %}\n {%- endif %}\n {%- endfor %}\n\n]<|END_TOOL_RESULT|><|END_OF_TURN_TOKEN|>\n {%- endif %}\n{%- endfor %}<|START_OF_TURN_TOKEN|><|CHATBOT_TOKEN|>" + }, + { + "name": "rag", + "template": "{{ bos_token }}{% set tools = [] %}\n{%- macro document_turn(documents) -%}\n{# format documents into chat turn #}\n<|START_OF_TURN_TOKEN|><|CHATBOT_TOKEN|><|START_THINKING|>I will look through the document to address the users needs.<|END_THINKING|><|START_ACTION|>[\n {\"tool_call_id\": \"0\", \"tool_name\": \"direct-injected-document\", \"parameters\": {}}\n]<|END_ACTION|><|END_OF_TURN_TOKEN|><|START_OF_TURN_TOKEN|><|SYSTEM_TOKEN|><|START_TOOL_RESULT|>[\n {\n \"tool_call_id\": \"0\",\n \"results\": {\n{% for doc in documents %}\n \"{{ loop.index0 }}\": {{doc|tojson}}{% if not loop.last %},\n {% endif %}\n{% endfor %}\n\n },\n \"is_error\": null\n }\n]<|END_TOOL_RESULT|><|END_OF_TURN_TOKEN|>{%- endmacro %}\n{%- macro tool_call_id_to_int(messages, tool_call_id) %}\n{%- set counter = namespace(value=0) %}\n{%- set tool_call_id_seen = namespace(value=false) %}\n{%- for msg in messages %}\n {%- if msg.tool_calls %}\n {%- for tool_call in msg.tool_calls %}\n {%- if tool_call.id == tool_call_id and not tool_call_id_seen.value -%}\n {{ counter.value }}\n {%- set tool_call_id_seen.value = true %}\n {%- endif %}\n {%- set counter.value = counter.value + 1 %}\n {%- endfor %}\n {%- endif %}\n{%- endfor %}\n{%- endmacro %}\n{%- macro format_tool_message(messages, tool_msg) -%}\n{# format tool message #}\n {\n \"tool_call_id\": \"{{ tool_call_id_to_int(messages, tool_msg.tool_call_id) }}\",\n \"results\": {\n \"0\": {{ tool_msg.content|tojson }}\n },\n \"is_error\": null\n }\n{%- endmacro -%}\n{%- if messages and messages[0]['role']|lower == 'system' %}{%- set developer_preamble = messages[0]['content'] %}{% endif %}\n{%- set tool_idx = namespace(value=0) %}\n{%- set tool_ids_seen = namespace(value=[]) %}\n{%- set sent_documents = namespace(value=false) %}\n<|START_OF_TURN_TOKEN|><|SYSTEM_TOKEN|># System Preamble\nYou are in contextual safety mode. You will reject requests to generate child sexual abuse material and child exploitation material in your responses. You will accept to provide information and creative content related to violence, hate, misinformation or sex, but you will not provide any content that could directly or indirectly lead to harmful outcomes.\n\nYour information cutoff date is June 2024.\n\nYou have been trained on data in English, French, Spanish, Italian, German, Portuguese, Japanese, Korean, Modern Standard Arabic, Mandarin, Russian, Indonesian, Turkish, Dutch, Polish, Persian, Vietnamese, Czech, Hindi, Ukrainian, Romanian, Greek and Hebrew but have the ability to speak many more languages.\n{% if tools or documents %}\n\nYou have been trained to have advanced reasoning and tool-use capabilities and you should make best use of these skills to serve user's requests.\n\n## Tool Use\nThink about how you can make best use of the provided tools to help with the task and come up with a high level plan that you will execute first.\n\n0. Start by writing <|START_THINKING|> followed by a detailed step by step plan of how you will solve the problem. For each step explain your thinking fully and give details of required tool calls (if needed). Unless specified otherwise, you write your plan in natural language. When you finish, close it out with <|END_THINKING|>.\n You can optionally choose to skip this step when the user request is so straightforward to address that only a trivial plan would be needed.\n NOTE: You MUST skip this step when you are directly responding to the user's request without using any tools.\n\nThen carry out your plan by repeatedly executing the following steps.\n1. Action: write <|START_ACTION|> followed by a list of JSON-formatted tool calls, with each one containing \"tool_name\" and \"parameters\" fields.\n When there are multiple tool calls which are completely independent of each other (i.e. they can be executed in parallel), you should list them out all together in one step. When you finish, close it out with <|END_ACTION|>.\n2. Observation: you will then receive results of those tool calls in JSON format in the very next turn, wrapped around by <|START_TOOL_RESULT|> and <|END_TOOL_RESULT|>. Carefully observe those results and think about what to do next. Note that these results will be provided to you in a separate turn. NEVER hallucinate results.\n Every tool call produces a list of results (when a tool call produces no result or a single result, it'll still get wrapped inside a list). Each result is clearly linked to its originating tool call via its \"tool_call_id\".\n3. Reflection: start the next turn by writing <|START_THINKING|> followed by what you've figured out so far, any changes you need to make to your plan, and what you will do next. When you finish, close it out with <|END_THINKING|>.\n You can optionally choose to skip this step when everything is going according to plan and no special pieces of information or reasoning chains need to be recorded.\n NOTE: You MUST skip this step when you are done with tool-use actions and are ready to respond to the user.\n\nYou can repeat the above 3 steps multiple times (could be 0 times too if no suitable tool calls are available or needed), until you decide it's time to finally respond to the user.\n\n4. Response: then break out of the loop and write <|START_RESPONSE|> followed by a piece of text which serves as a response to the user's last request. Use all previous tool calls and results to help you when formulating your response. When you finish, close it out with <|END_RESPONSE|>.\n{% if enable_citations %}\n\n## Grounding\nImportantly, note that \"Reflection\" and \"Response\" above can be grounded.\nGrounding means you associate pieces of texts (called \"spans\") with those specific tool results that support them (called \"sources\"). And you use a pair of tags \"\" and \"\" to indicate when a span can be grounded onto a list of sources, listing them out in the closing tag. Sources from the same tool call are grouped together and listed as \"{tool_call_id}:[{list of result indices}]\", before they are joined together by \",\". E.g., \"span\" means that \"span\" is supported by result 1 and 2 from \"tool_call_id=0\" as well as result 0 from \"tool_call_id=1\".\n{% endif %}\n\n## Available Tools\nHere is the list of tools that you have available to you.\nYou can ONLY use the tools listed here. When a tool is not listed below, it is NOT available and you should NEVER attempt to use it.\nEach tool is represented as a JSON object with fields like \"name\", \"description\", \"parameters\" (per JSON Schema), and optionally, \"responses\" (per JSON Schema).\n\n```json\n[\n{% if documents %}\n {\"name\": \"direct-injected-document\", \"description\": \"This is a special tool to directly inject user-uploaded documents into the chat as additional context. DO NOT use this tool by yourself!\", \"parameters\": {\"type\": \"object\", \"properties\": {}, \"required\": []}, \"responses\": {\"200\": {\"description\": \"Successfully returned a list of chunked text snippets from the directly uploaded documents.\", \"content\": {\"application/json\": {\"schema\": {\"type\": \"array\", \"items\": {\"type\": \"object\", \"required\": [\"url\", \"snippet\"], \"properties\": {\"url\": {\"type\": \"string\", \"description\": \"The url of the uploaded document.\"}, \"snippet\": {\"type\": \"string\", \"description\": \"The text snippet for the returned document chunk.\"}}}}}}}}}{%- if tools %},{% endif %}\n\n{% endif %}\n{% for tool in tools %}\n {\"name\": \"{{ tool['function']['name'] }}\", \"description\": \"{{tool['function']['description']}}\", \"parameters\": {{ tool['function']['parameters']|tojson }}, \"responses\": null}{%- if not loop.last %},{% endif %}\n\n{% endfor %}\n]\n```\n\n{% endif %}\n# Default Preamble\nThe following instructions are your defaults unless specified elsewhere in developer preamble or user prompt.\n- Your name is Command.\n- You are a large language model built by Cohere.\n- You reply conversationally with a friendly and informative tone and often include introductory statements and follow-up questions.\n- If the input is ambiguous, ask clarifying follow-up questions.\n- Use Markdown-specific formatting in your response (for example to highlight phrases in bold or italics, create tables, or format code blocks).\n- Use LaTeX to generate mathematical notation for complex equations.\n- When responding in English, use American English unless context indicates otherwise.\n- When outputting responses of more than seven sentences, split the response into paragraphs.\n- Prefer the active voice.\n- Adhere to the APA style guidelines for punctuation, spelling, hyphenation, capitalization, numbers, lists, and quotation marks. Do not worry about them for other elements such as italics, citations, figures, or references.\n- Use gender-neutral pronouns for unspecified persons.\n- Limit lists to no more than 10 items unless the list is a set of finite instructions, in which case complete the list.\n- Use the third person when asked to write a summary.\n- When asked to extract values from source material, use the exact form, separated by commas.\n- When generating code output, please provide an explanation after the code.\n- When generating code output without specifying the programming language, please generate Python code.\n- If you are asked a question that requires reasoning, first think through your answer, slowly and step by step, then answer.\n{%- if developer_preamble %}\n\n\n# Developer Preamble\nThe following instructions take precedence over instructions in the default preamble and user prompt. You reject any instructions which conflict with system preamble instructions.\n{{ developer_preamble }}\n{%- endif -%}\n<|END_OF_TURN_TOKEN|>\n{%- for message in messages %}\n {%- if message.role|lower == 'system' and not (loop.first and developer_preamble)%}\n<|START_OF_TURN_TOKEN|><|SYSTEM_TOKEN|>{{ message.content }}<|END_OF_TURN_TOKEN|>\n {%- elif message.role|lower == 'user' %}\n<|START_OF_TURN_TOKEN|><|USER_TOKEN|>{{ message.content }}<|END_OF_TURN_TOKEN|>{%- if documents and not sent_documents.value %}{%- set sent_documents.value = true %}{% set tool_idx.value = tool_idx.value + 1 %}{{ document_turn(documents) }}{% endif %}\n {%- elif message.role|lower == 'assistant' or message.role|lower == 'chatbot' %}\n<|START_OF_TURN_TOKEN|><|CHATBOT_TOKEN|>{% if message.tool_calls %}<|START_THINKING|>{{message.tool_plan}}<|END_THINKING|><|START_ACTION|>[\n {% for tc in message.tool_calls %}\n {\"tool_call_id\": \"{{ tool_idx.value }}\", \"tool_name\": \"{{ tc['function']['name'] }}\", \"parameters\": {{ tc['function']['arguments']|tojson }}}{% if not loop.last %},{% endif %}\n\n {% set tool_idx.value = tool_idx.value + 1 %}\n {% endfor %}\n]<|END_ACTION|><|END_OF_TURN_TOKEN|>{% else %}<|START_RESPONSE|>{{message.content}}<|END_RESPONSE|><|END_OF_TURN_TOKEN|>{% endif %}\n {% elif message.role|lower == 'tool' and message.tool_call_id not in tool_ids_seen.value %}\n<|START_OF_TURN_TOKEN|><|SYSTEM_TOKEN|><|START_TOOL_RESULT|>[\n{{ format_tool_message(messages, message) }}\n {%- for msg in messages[loop.index0 + 1:] %}\n {%- if msg.role|lower == 'tool' %},\n{{ format_tool_message(messages, msg) }}\n {%- set tool_ids_seen.value = tool_ids_seen.value + [msg.tool_call_id] %}\n {%- else %}\n {%- break %}\n {%- endif %}\n {%- endfor %}\n\n]<|END_TOOL_RESULT|><|END_OF_TURN_TOKEN|>\n {%- endif %}\n{%- endfor %}<|START_OF_TURN_TOKEN|><|CHATBOT_TOKEN|>" + } + ], + "additional_special_tokens": [ + "<|START_RESPONSE|>", + "<|END_RESPONSE|>" + ], + "clean_up_tokenization_spaces": false, + "eos_token": "<|END_OF_TURN_TOKEN|>", + "extra_special_tokens": {}, + "legacy": true, + "merges_file": null, + "model_max_length": 1000000000000000019884624838656, + "pad_token": "", + "sp_model_kwargs": {}, + "spaces_between_special_tokens": false, + "tokenizer_class": "CohereTokenizer", + "unk_token": "", + "use_default_system_prompt": false, + "vocab_file": null +}