23 lines
684 B
Python
23 lines
684 B
Python
from transformers import AutoModelForCausalLM, AutoTokenizer
|
|
from peft import PeftModel, PeftConfig
|
|
|
|
peft_path = "./qwen_lora_checkpoint"
|
|
merged_path = "./qwen2_ad_audit_merged"
|
|
|
|
# 读取 LoRA 配置
|
|
config = PeftConfig.from_pretrained(peft_path)
|
|
|
|
# 加载 base 模型
|
|
base = AutoModelForCausalLM.from_pretrained(config.base_model_name_or_path, trust_remote_code=True)
|
|
|
|
# 注入 LoRA
|
|
model = PeftModel.from_pretrained(base, peft_path)
|
|
|
|
# 合并
|
|
model = model.merge_and_unload()
|
|
|
|
# 保存合并后的完整模型
|
|
model.save_pretrained(merged_path)
|
|
tokenizer = AutoTokenizer.from_pretrained(config.base_model_name_or_path, trust_remote_code=True)
|
|
tokenizer.save_pretrained(merged_path)
|