初始化项目,由ModelHub XC社区提供模型

Model: MathMindsAGI/Test_context_pretrain
Source: Original Platform
This commit is contained in:
ModelHub XC
2026-04-11 11:04:57 +08:00
commit 200675bd2d
23 changed files with 28409 additions and 0 deletions

View File

@@ -0,0 +1,8 @@
{
"epoch": 1.0,
"total_flos": 5.994863411375112e+18,
"train_loss": 0.031714059101823636,
"train_runtime": 3465.0177,
"train_samples_per_second": 1387.365,
"train_steps_per_second": 5.42
}

View File

@@ -0,0 +1,24 @@
#!/usr/bin/env bash
#SBATCH --ntasks=1
#SBATCH --nodes=1
#SBATCH --partition=short-unkillable
#SBATCH --gres=gpu:h100:4
#SBATCH -c 24
#SBATCH --mem=64G
#SBATCH -t 2:59:0
set -euo pipefail
SCRATCH_ROOT="${SCRATCH_ROOT:-/network/scratch/k/kamran.chitsaz}"
export HF_HOME="${HF_HOME:-${SCRATCH_ROOT}/.cache/huggingface}"
export HF_DATASETS_CACHE="${HF_DATASETS_CACHE:-${HF_HOME}/datasets}"
export TRANSFORMERS_CACHE="${TRANSFORMERS_CACHE:-${HF_HOME}/transformers}"
export TMPDIR="${TMPDIR:-${SCRATCH_ROOT}/tmp}"
mkdir -p "${HF_DATASETS_CACHE}" "${TRANSFORMERS_CACHE}" "${TMPDIR}"
export PREPROCESSING_NUM_WORKERS=6
export DATALOADER_NUM_WORKERS=6
bash scripts/composition/op-difficulty-10B/script_pt/run_pretrain_id2-10_0.25easy_0.25medium_0.5hard.sh

View File

@@ -0,0 +1,62 @@
#!/usr/bin/env bash
set -euo pipefail
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
REPO_ROOT="$(cd "${SCRIPT_DIR}/../../../.." && pwd)"
cd "${REPO_ROOT}"
# llamafactory-cli launches distributed training via `torchrun`, so the venv
# bin dir must be on PATH even when the CLI itself is invoked by absolute path.
export PATH="${REPO_ROOT}/.venv/bin:${PATH}"
if [[ -n "${SLURM_CPUS_PER_TASK:-}" ]]; then
CPU_WORKERS_DEFAULT="${SLURM_CPUS_PER_TASK}"
elif command -v nproc >/dev/null 2>&1; then
CPU_WORKERS_DEFAULT="$(nproc)"
else
CPU_WORKERS_DEFAULT=1
fi
DEFAULT_PREPROCESSING_WORKERS="${PREPROCESSING_NUM_WORKERS:-${CPU_WORKERS_DEFAULT}}"
DEFAULT_DATALOADER_WORKERS="${DATALOADER_NUM_WORKERS:-${CPU_WORKERS_DEFAULT}}"
if [[ -z "${CUDA_VISIBLE_DEVICES:-}" ]]; then
echo "CUDA_VISIBLE_DEVICES must be set before running this script" >&2
exit 1
fi
LLAMA_BIN_DEFAULT="${REPO_ROOT}/.venv/bin/llamafactory-cli"
DATASET_DIR_ROOT="${DATASET_DIR_ROOT:-data}"
if [[ ! -x "${LLAMA_BIN_DEFAULT}" ]]; then
echo "Missing ${LLAMA_BIN_DEFAULT}. Run scripts/setup/install_local_llamafactory.sh first." >&2
exit 1
fi
if [[ ! -d "${REPO_ROOT}/${DATASET_DIR_ROOT}/composition/train" ]]; then
echo "Missing ${DATASET_DIR_ROOT}/composition/train. Run scripts/composition/prepare_hf_composition_data.sh first." >&2
exit 1
fi
if [[ ! -d "${REPO_ROOT}/${DATASET_DIR_ROOT}/composition/test" ]]; then
echo "Missing ${DATASET_DIR_ROOT}/composition/test. Run scripts/composition/prepare_hf_composition_data.sh first." >&2
exit 1
fi
export WANDB_PROJECT="${WANDB_PROJECT:-Interplay-LM-Reasoning}"
export WANDB_ENTITY="${WANDB_ENTITY:-kmchiti}"
DEFAULT_LLAMA_ARGS=(
"preprocessing_num_workers=${DEFAULT_PREPROCESSING_WORKERS}"
"dataloader_num_workers=${DEFAULT_DATALOADER_WORKERS}"
)
if [[ -n "${LLAMA_EXTRA_ARGS:-}" ]]; then
export LLAMA_EXTRA_ARGS="${DEFAULT_LLAMA_ARGS[*]} ${LLAMA_EXTRA_ARGS}"
else
export LLAMA_EXTRA_ARGS="${DEFAULT_LLAMA_ARGS[*]}"
fi
EVAL_DATA_ROOT="${EVAL_DATA_ROOT:-${DATASET_DIR_ROOT}/composition/test}" \
LLAMA_BIN="${LLAMA_BIN:-${LLAMA_BIN_DEFAULT}}" \
LLAMA_CONFIG="scripts/composition/op-difficulty-10B/pt-diff2_10-tok10B-lr1e-4-bs512k-schedcos-minlr3e-5/id2-10_0.25easy_0.25medium_0.5hard.yaml" \
./scripts/meta_run.sh --skip-rl "$@"

View File

@@ -0,0 +1,8 @@
{
"epoch": 1.0,
"total_flos": 5.994863411375112e+18,
"train_loss": 0.031714059101823636,
"train_runtime": 3465.0177,
"train_samples_per_second": 1387.365,
"train_steps_per_second": 5.42
}

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:513e25aff4b26f6d4627ba5293f2d27ea5ae535068c635b6f731c100fe640f23
size 6353

Binary file not shown.

After

Width:  |  Height:  |  Size: 28 KiB