Files
llama-3-8b-base-epsilon-dpo…/eval_results.json
ModelHub XC 460b68bc24 初始化项目,由ModelHub XC社区提供模型
Model: W-61/llama-3-8b-base-epsilon-dpo-hh-helpful-8xh200
Source: Original Platform
2026-04-24 10:33:04 +08:00

20 lines
777 B
JSON

{
"epoch": 1.0,
"eval_kl/n_epsilon_steps": 0.3836805522441864,
"eval_kl/p_epsilon_steps": 0.6158854365348816,
"eval_logits/chosen": -0.8295226693153381,
"eval_logits/rejected": -0.7224703431129456,
"eval_logps/chosen": -208.40182495117188,
"eval_logps/ref_chosen": -87.82356262207031,
"eval_logps/ref_rejected": -82.81887817382812,
"eval_logps/rejected": -244.0940704345703,
"eval_loss": 0.6479349136352539,
"eval_rewards/accuracies": 0.6414930820465088,
"eval_rewards/chosen": -0.3830554485321045,
"eval_rewards/margins": 0.12642447650432587,
"eval_rewards/rejected": -0.5094798803329468,
"eval_runtime": 22.2873,
"eval_samples": 2339,
"eval_samples_per_second": 104.948,
"eval_steps_per_second": 0.853
}