初始化项目,由ModelHub XC社区提供模型
Model: mlabonne/ChimeraLlama-3-8B-v2 Source: Original Platform
This commit is contained in:
33
mergekit_config.yml
Normal file
33
mergekit_config.yml
Normal file
@@ -0,0 +1,33 @@
|
||||
|
||||
models:
|
||||
- model: NousResearch/Meta-Llama-3-8B
|
||||
# No parameters necessary for base model
|
||||
- model: NousResearch/Meta-Llama-3-8B-Instruct
|
||||
parameters:
|
||||
density: 0.6
|
||||
weight: 0.55
|
||||
- model: mlabonne/OrpoLlama-3-8B
|
||||
parameters:
|
||||
density: 0.55
|
||||
weight: 0.05
|
||||
- model: cognitivecomputations/dolphin-2.9-llama3-8b
|
||||
parameters:
|
||||
density: 0.55
|
||||
weight: 0.1
|
||||
- model: Locutusque/llama-3-neural-chat-v1-8b
|
||||
parameters:
|
||||
density: 0.55
|
||||
weight: 0.05
|
||||
- model: cloudyu/Meta-Llama-3-8B-Instruct-DPO
|
||||
parameters:
|
||||
density: 0.55
|
||||
weight: 0.15
|
||||
- model: vicgalle/Configurable-Llama-3-8B-v0.3
|
||||
parameters:
|
||||
density: 0.55
|
||||
weight: 0.1
|
||||
merge_method: dare_ties
|
||||
base_model: NousResearch/Meta-Llama-3-8B
|
||||
parameters:
|
||||
int8_mask: true
|
||||
dtype: float16
|
||||
Reference in New Issue
Block a user