commit 7e9399a54bbea32977b735307ae4ddc396bf1532 Author: ModelHub XC Date: Wed May 6 23:46:32 2026 +0800 初始化项目,由ModelHub XC社区提供模型 Model: zhou778899/test_case_ai Source: Original Platform diff --git a/.gitattributes b/.gitattributes new file mode 100644 index 0000000..36d55d1 --- /dev/null +++ b/.gitattributes @@ -0,0 +1,53 @@ +*.7z filter=lfs diff=lfs merge=lfs -text +*.arrow filter=lfs diff=lfs merge=lfs -text +*.bin filter=lfs diff=lfs merge=lfs -text +*.bin.* filter=lfs diff=lfs merge=lfs -text +*.bz2 filter=lfs diff=lfs merge=lfs -text +*.ftz filter=lfs diff=lfs merge=lfs -text +*.gz filter=lfs diff=lfs merge=lfs -text +*.h5 filter=lfs diff=lfs merge=lfs -text +*.joblib filter=lfs diff=lfs merge=lfs -text +*.lfs.* filter=lfs diff=lfs merge=lfs -text +*.model filter=lfs diff=lfs merge=lfs -text +*.msgpack filter=lfs diff=lfs merge=lfs -text +*.onnx filter=lfs diff=lfs merge=lfs -text +*.ot filter=lfs diff=lfs merge=lfs -text +*.parquet filter=lfs diff=lfs merge=lfs -text +*.pb filter=lfs diff=lfs merge=lfs -text +*.pt filter=lfs diff=lfs merge=lfs -text +*.pth filter=lfs diff=lfs merge=lfs -text +*.rar filter=lfs diff=lfs merge=lfs -text +saved_model/**/* filter=lfs diff=lfs merge=lfs -text +*.tar.* filter=lfs diff=lfs merge=lfs -text +*.tflite filter=lfs diff=lfs merge=lfs -text +*.tgz filter=lfs diff=lfs merge=lfs -text +*.xz filter=lfs diff=lfs merge=lfs -text +*.zip filter=lfs diff=lfs merge=lfs -text +*.zstandard filter=lfs diff=lfs merge=lfs -text +*.tfevents* filter=lfs diff=lfs merge=lfs -text +*.db* filter=lfs diff=lfs merge=lfs -text +*.ark* filter=lfs diff=lfs merge=lfs -text +**/*ckpt*data* filter=lfs diff=lfs merge=lfs -text +**/*ckpt*.meta filter=lfs diff=lfs merge=lfs -text +**/*ckpt*.index filter=lfs diff=lfs merge=lfs -text +*.safetensors filter=lfs diff=lfs merge=lfs -text +*.ckpt filter=lfs diff=lfs merge=lfs -text +*.gguf* filter=lfs diff=lfs merge=lfs -text +*.ggml filter=lfs diff=lfs merge=lfs -text +*.llamafile* filter=lfs diff=lfs merge=lfs -text +*.pt2 filter=lfs diff=lfs merge=lfs -text +*.mlmodel filter=lfs diff=lfs merge=lfs -text +*.npy filter=lfs diff=lfs merge=lfs -text +*.npz filter=lfs diff=lfs merge=lfs -text +*.pickle filter=lfs diff=lfs merge=lfs -text +*.pkl filter=lfs diff=lfs merge=lfs -text +*.tar filter=lfs diff=lfs merge=lfs -text +*.wasm filter=lfs diff=lfs merge=lfs -text +*.zst filter=lfs diff=lfs merge=lfs -text +*tfevents* filter=lfs diff=lfs merge=lfs -text + +._____temp/deploy_result/20250414-081029.jsonl filter=lfs diff=lfs merge=lfs -text +deploy_result/20250414-081029.jsonl filter=lfs diff=lfs merge=lfs -text + +._____temp/deploy_result/20250414-081029.jsonl filter=lfs diff=lfs merge=lfs -text +deploy_result/20250414-081029.jsonl filter=lfs diff=lfs merge=lfs -text \ No newline at end of file diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..f0ec800 --- /dev/null +++ b/.gitignore @@ -0,0 +1,2 @@ +runs/ +images/ diff --git a/added_tokens.json b/added_tokens.json new file mode 100644 index 0000000..f3a7f4e --- /dev/null +++ b/added_tokens.json @@ -0,0 +1,16 @@ +{ + "": 151334, + "": 151333, + "<|assistant|>": 151337, + "<|begin_of_image|>": 151339, + "<|begin_of_video|>": 151341, + "<|end_of_image|>": 151340, + "<|end_of_video|>": 151342, + "<|endoftext|>": 151329, + "<|observation|>": 151338, + "<|system|>": 151335, + "<|user|>": 151336, + "[MASK]": 151330, + "[gMASK]": 151331, + "[sMASK]": 151332 +} diff --git a/args.json b/args.json new file mode 100644 index 0000000..b1ddfd5 --- /dev/null +++ b/args.json @@ -0,0 +1,434 @@ +{ + "model": "ZhipuAI/glm-4-9b-chat", + "model_type": "glm4", + "model_revision": null, + "task_type": "causal_lm", + "torch_dtype": "bfloat16", + "attn_impl": null, + "num_labels": null, + "problem_type": null, + "rope_scaling": null, + "device_map": null, + "max_memory": {}, + "local_repo_path": null, + "template": "glm4", + "system": null, + "max_length": 3072, + "truncation_strategy": "delete", + "max_pixels": null, + "tools_prompt": "react_en", + "norm_bbox": null, + "response_prefix": null, + "padding_side": "right", + "loss_scale": "last_round", + "sequence_parallel_size": 1, + "use_chat_template": true, + "template_backend": "swift", + "dataset": [ + "/root/data/new_train.jsonl" + ], + "val_dataset": [ + "/root/data/new_dev.jsonl" + ], + "split_dataset_ratio": 0.0, + "data_seed": 42, + "dataset_num_proc": 4, + "streaming": false, + "enable_cache": false, + "download_mode": "reuse_dataset_if_exists", + "columns": {}, + "strict": false, + "remove_unused_columns": true, + "model_name": [ + null, + null + ], + "model_author": [ + null, + null + ], + "custom_dataset_info": [], + "quant_method": null, + "quant_bits": null, + "hqq_axis": null, + "bnb_4bit_compute_dtype": "bfloat16", + "bnb_4bit_quant_type": "nf4", + "bnb_4bit_use_double_quant": true, + "bnb_4bit_quant_storage": null, + "max_new_tokens": 64, + "temperature": 0.9, + "top_k": 50, + "top_p": 0.9, + "repetition_penalty": 1.0, + "num_beams": 1, + "stream": false, + "stop_words": [], + "logprobs": false, + "top_logprobs": null, + "ckpt_dir": null, + "load_dataset_config": null, + "lora_modules": [], + "tuner_backend": "peft", + "train_type": "lora", + "adapters": [], + "external_plugins": [], + "seed": 42, + "model_kwargs": {}, + "load_args": false, + "load_data_args": false, + "use_hf": false, + "hub_token": null, + "custom_register_path": [], + "ignore_args_error": false, + "use_swift_lora": false, + "output_dir": "/root/output/testcase/v0-20250409-150230", + "overwrite_output_dir": false, + "do_train": false, + "do_eval": false, + "do_predict": false, + "eval_strategy": "steps", + "prediction_loss_only": false, + "per_device_train_batch_size": 1, + "per_device_eval_batch_size": 1, + "per_gpu_train_batch_size": null, + "per_gpu_eval_batch_size": null, + "gradient_accumulation_steps": 8, + "eval_accumulation_steps": null, + "eval_delay": 0, + "torch_empty_cache_steps": null, + "learning_rate": 0.0001, + "weight_decay": 0.1, + "adam_beta1": 0.9, + "adam_beta2": 0.95, + "adam_epsilon": 1e-08, + "max_grad_norm": 1.0, + "num_train_epochs": 1.0, + "max_steps": 200, + "lr_scheduler_type": "cosine", + "lr_scheduler_kwargs": null, + "warmup_ratio": 0.05, + "warmup_steps": 0, + "log_level": "passive", + "log_level_replica": "warning", + "log_on_each_node": true, + "logging_dir": "/root/output/testcase/v0-20250409-150230/runs", + "logging_strategy": "steps", + "logging_first_step": true, + "logging_steps": 2, + "logging_nan_inf_filter": true, + "save_strategy": "steps", + "save_steps": 50.0, + "save_total_limit": 5, + "save_safetensors": true, + "save_on_each_node": false, + "save_only_model": false, + "restore_callback_states_from_checkpoint": false, + "no_cuda": false, + "use_cpu": false, + "use_mps_device": false, + "jit_mode_eval": false, + "use_ipex": false, + "bf16": true, + "fp16": false, + "fp16_opt_level": "O1", + "half_precision_backend": "auto", + "bf16_full_eval": false, + "fp16_full_eval": false, + "tf32": null, + "local_rank": 0, + "ddp_backend": null, + "tpu_num_cores": null, + "tpu_metrics_debug": false, + "debug": null, + "dataloader_drop_last": false, + "eval_steps": 50.0, + "dataloader_num_workers": 4, + "dataloader_prefetch_factor": null, + "past_index": -1, + "run_name": null, + "disable_tqdm": null, + "label_names": null, + "load_best_model_at_end": false, + "metric_for_best_model": "loss", + "greater_is_better": false, + "ignore_data_skip": false, + "fsdp": "", + "fsdp_min_num_params": 0, + "fsdp_config": null, + "tp_size": 0, + "fsdp_transformer_layer_cls_to_wrap": null, + "accelerator_config": { + "dispatch_batches": false + }, + "deepspeed": { + "fp16": { + "enabled": "auto", + "loss_scale": 0, + "loss_scale_window": 1000, + "initial_scale_power": 16, + "hysteresis": 2, + "min_loss_scale": 1 + }, + "bf16": { + "enabled": "auto" + }, + "zero_optimization": { + "stage": 3, + "offload_optimizer": { + "device": "none", + "pin_memory": true + }, + "offload_param": { + "device": "none", + "pin_memory": true + }, + "overlap_comm": false, + "contiguous_gradients": true, + "sub_group_size": 1000000000.0, + "reduce_bucket_size": "auto", + "zero_quantized_weights": false, + "zero_quantized_gradients": false, + "stage3_prefetch_bucket_size": 0, + "stage3_param_persistence_threshold": "auto", + "stage3_max_live_parameters": 1000000000.0, + "stage3_max_reuse_distance": 1000000000.0, + "stage3_gather_16bit_weights_on_model_save": true + }, + "gradient_accumulation_steps": "auto", + "gradient_clipping": "auto", + "steps_per_print": 2000, + "train_batch_size": "auto", + "train_micro_batch_size_per_gpu": "auto", + "wall_clock_breakdown": false + }, + "label_smoothing_factor": 0.0, + "optim": "adamw_torch", + "optim_args": null, + "adafactor": false, + "group_by_length": false, + "length_column_name": "length", + "report_to": [ + "tensorboard" + ], + "ddp_find_unused_parameters": null, + "ddp_bucket_cap_mb": null, + "ddp_broadcast_buffers": null, + "dataloader_pin_memory": true, + "dataloader_persistent_workers": false, + "skip_memory_metrics": true, + "use_legacy_prediction_loop": false, + "push_to_hub": false, + "resume_from_checkpoint": null, + "hub_model_id": null, + "hub_strategy": "every_save", + "hub_private_repo": null, + "hub_always_push": false, + "gradient_checkpointing": true, + "gradient_checkpointing_kwargs": null, + "include_inputs_for_metrics": false, + "include_for_metrics": [], + "eval_do_concat_batches": true, + "fp16_backend": "auto", + "evaluation_strategy": "steps", + "push_to_hub_model_id": null, + "push_to_hub_organization": null, + "push_to_hub_token": null, + "mp_parameters": "", + "auto_find_batch_size": false, + "full_determinism": false, + "torchdynamo": null, + "ray_scope": "last", + "ddp_timeout": 1800, + "torch_compile": false, + "torch_compile_backend": null, + "torch_compile_mode": null, + "dispatch_batches": null, + "split_batches": null, + "include_tokens_per_second": false, + "include_num_input_tokens_seen": false, + "neftune_noise_alpha": null, + "optim_target_modules": null, + "batch_eval_metrics": false, + "eval_on_start": false, + "use_liger_kernel": false, + "eval_use_gather_object": false, + "average_tokens_across_devices": false, + "sortish_sampler": false, + "predict_with_generate": false, + "generation_max_length": null, + "generation_num_beams": null, + "generation_config": null, + "check_model": true, + "acc_strategy": "token", + "train_sampler_random": true, + "metric_warmup_step": 0, + "fsdp_num": 1, + "acc_steps": 1, + "eval_use_evalscope": false, + "eval_datasets": [], + "eval_limit": null, + "eval_datasets_args": null, + "eval_generation_config": null, + "freeze_parameters": [], + "freeze_parameters_ratio": 0.0, + "trainable_parameters": [], + "freeze_llm": false, + "freeze_vit": true, + "freeze_aligner": true, + "target_modules": [ + "all-linear" + ], + "target_regex": null, + "modules_to_save": [], + "lora_rank": 8, + "lora_alpha": 32, + "lora_dropout": 0.05, + "lora_bias": "none", + "lora_dtype": null, + "lorap_lr_ratio": null, + "use_rslora": false, + "use_dora": false, + "lora_ga_batch_size": 2, + "lora_ga_iters": 2, + "lora_ga_max_length": 1024, + "lora_ga_direction": "ArB2r", + "lora_ga_scale": "stable", + "lora_ga_stable_gamma": 16, + "init_weights": true, + "fourier_n_frequency": 2000, + "fourier_scaling": 300.0, + "boft_block_size": 4, + "boft_block_num": 0, + "boft_n_butterfly_factor": 1, + "boft_dropout": 0.0, + "vera_rank": 256, + "vera_projection_prng_key": 0, + "vera_dropout": 0.0, + "vera_d_initial": 0.1, + "adapter_act": "gelu", + "adapter_length": 128, + "use_galore": false, + "galore_target_modules": null, + "galore_rank": 128, + "galore_update_proj_gap": 50, + "galore_scale": 1.0, + "galore_proj_type": "std", + "galore_optim_per_parameter": false, + "galore_with_embedding": false, + "galore_quantization": false, + "galore_proj_quant": false, + "galore_proj_bits": 4, + "galore_proj_group_size": 256, + "galore_cos_threshold": 0.4, + "galore_gamma_proj": 2, + "galore_queue_size": 5, + "adalora_target_r": 8, + "adalora_init_r": 12, + "adalora_tinit": 0, + "adalora_tfinal": 0, + "adalora_deltaT": 1, + "adalora_beta1": 0.85, + "adalora_beta2": 0.85, + "adalora_orth_reg_weight": 0.5, + "llamapro_num_new_blocks": 4, + "llamapro_num_groups": null, + "lisa_activated_layers": 0, + "lisa_step_interval": 20, + "reft_layer_key": null, + "reft_layers": null, + "reft_rank": 4, + "reft_intervention_type": "LoreftIntervention", + "reft_args": null, + "use_liger": false, + "swanlab_token": null, + "swanlab_project": null, + "swanlab_workspace": null, + "swanlab_exp_name": null, + "swanlab_mode": "cloud", + "add_version": true, + "resume_only_model": false, + "create_checkpoint_symlink": false, + "packing": false, + "lazy_tokenize": false, + "loss_type": "kto", + "optimizer": null, + "metric": null, + "zero_hpz_partition_size": null, + "reward_model": null, + "reward_adapters": [], + "reward_model_type": null, + "reward_model_revision": null, + "num_ppo_epochs": 4, + "whiten_rewards": false, + "kl_coef": 0.05, + "cliprange": 0.2, + "vf_coef": 0.1, + "cliprange_value": 0.2, + "gamma": 1.0, + "lam": 0.95, + "num_mini_batches": 1, + "local_rollout_forward_batch_size": 64, + "num_sample_generations": 10, + "response_length": 512, + "missing_eos_penalty": null, + "epsilon": 0.2, + "epsilon_high": null, + "num_infer_workers": 1, + "vllm_max_num_seqs": 256, + "vllm_enforce_eager": false, + "vllm_limit_mm_per_prompt": null, + "vllm_enable_prefix_caching": true, + "cosine_min_len_value_wrong": -0.5, + "cosine_max_len_value_wrong": 0.0, + "cosine_min_len_value_correct": 1.0, + "cosine_max_len_value_correct": 0.5, + "cosine_max_len": null, + "repetition_n_grams": 3, + "repetition_max_penalty": -1.0, + "use_lmdeploy": false, + "lmdeploy_device": "auto", + "lmdeploy_session_len": null, + "lmdeploy_cache_max_entry_count": 0.8, + "async_generate": false, + "tensor_parallel_size": 1, + "sleep_level": 0, + "move_model_batches": null, + "offload_optimizer": false, + "offload_model": false, + "gc_collect_after_offload": false, + "multi_turn_func": null, + "mini_batch_size": null, + "num_generations": 8, + "max_completion_length": 512, + "ds3_gather_for_generation": true, + "reward_funcs": [], + "reward_weights": null, + "log_completions": false, + "use_vllm": false, + "vllm_device": [ + "auto" + ], + "vllm_gpu_memory_utilization": 0.9, + "vllm_max_model_len": null, + "num_iterations": 1, + "rlhf_type": "kto", + "ref_model": null, + "ref_model_type": null, + "ref_model_revision": null, + "beta": 0.1, + "label_smoothing": 0, + "rpo_alpha": 1.0, + "cpo_alpha": 1.0, + "simpo_gamma": 1, + "desirable_weight": 1.0, + "undesirable_weight": 1.0, + "rank": 0, + "global_world_size": 2, + "local_world_size": 2, + "model_suffix": "glm-4-9b-chat", + "model_info": "ModelInfo(model_type='glm4', model_dir='/root/.cache/modelscope/hub/models/ZhipuAI/glm-4-9b-chat', torch_dtype=torch.bfloat16, max_model_len=131072, quant_method=None, quant_bits=None, rope_scaling=None, config=None, task_type='causal_lm', num_labels=None)", + "model_meta": "ModelMeta(model_type='glm4', model_groups=[ModelGroup(models=[Model(ms_model_id='ZhipuAI/glm-4-9b-chat', hf_model_id='THUDM/glm-4-9b-chat', model_path=None, ms_revision=None, hf_revision=None), Model(ms_model_id='ZhipuAI/glm-4-9b', hf_model_id='THUDM/glm-4-9b', model_path=None, ms_revision=None, hf_revision=None), Model(ms_model_id='ZhipuAI/glm-4-9b-chat-1m', hf_model_id='THUDM/glm-4-9b-chat-1m', model_path=None, ms_revision=None, hf_revision=None)], ignore_patterns=None, requires=None, tags=[]), ModelGroup(models=[Model(ms_model_id='ZhipuAI/LongWriter-glm4-9b', hf_model_id='THUDM/LongWriter-glm4-9b', model_path=None, ms_revision=None, hf_revision=None)], ignore_patterns=None, requires=None, tags=[])], template='glm4', get_function=, model_arch='chatglm', architectures=['ChatGLMModel', 'ChatGLMForConditionalGeneration'], additional_saved_files=[], torch_dtype=None, is_multimodal=False, is_reward=False, task_type=None, ignore_patterns=['*.zip', '*.gguf', '*.pth', '*.pt', 'consolidated*', 'onnx/*', '*.safetensors.md', '*.msgpack', '*.onnx', '*.ot', '*.h5', '*.bin', '*.safetensors'], requires=['transformers>=4.42'], tags=[])", + "model_dir": "/root/.cache/modelscope/hub/models/ZhipuAI/glm-4-9b-chat", + "hub": "", + "training_args": "KTOConfig(output_dir='/root/output/testcase/v0-20250409-150230', overwrite_output_dir=False, do_train=False, do_eval=True, do_predict=False, eval_strategy=, prediction_loss_only=False, per_device_train_batch_size=1, per_device_eval_batch_size=1, per_gpu_train_batch_size=None, per_gpu_eval_batch_size=None, gradient_accumulation_steps=8, eval_accumulation_steps=None, eval_delay=0, torch_empty_cache_steps=None, learning_rate=0.0001, weight_decay=0.1, adam_beta1=0.9, adam_beta2=0.95, adam_epsilon=1e-08, max_grad_norm=1.0, num_train_epochs=1.0, max_steps=200, lr_scheduler_type=, lr_scheduler_kwargs=None, warmup_ratio=0.05, warmup_steps=0, log_level='passive', log_level_replica='warning', log_on_each_node=True, logging_dir='/root/output/testcase/v0-20250409-150230/runs', logging_strategy=, logging_first_step=True, logging_steps=2, logging_nan_inf_filter=True, save_strategy=, save_steps=50, save_total_limit=5, save_safetensors=True, save_on_each_node=False, save_only_model=False, restore_callback_states_from_checkpoint=False, no_cuda=False, use_cpu=False, use_mps_device=False, seed=42, data_seed=42, jit_mode_eval=False, use_ipex=False, bf16=True, fp16=False, fp16_opt_level='O1', half_precision_backend='auto', bf16_full_eval=False, fp16_full_eval=False, tf32=None, local_rank=0, ddp_backend=None, tpu_num_cores=None, tpu_metrics_debug=False, debug=[], dataloader_drop_last=False, eval_steps=50, dataloader_num_workers=4, dataloader_prefetch_factor=None, past_index=-1, run_name='/root/output/testcase/v0-20250409-150230', disable_tqdm=False, remove_unused_columns=False, label_names=None, load_best_model_at_end=False, metric_for_best_model='loss', greater_is_better=False, ignore_data_skip=False, fsdp=[], fsdp_min_num_params=0, fsdp_config={'min_num_params': 0, 'xla': False, 'xla_fsdp_v2': False, 'xla_fsdp_grad_ckpt': False}, tp_size=0, fsdp_transformer_layer_cls_to_wrap=None, accelerator_config=AcceleratorConfig(split_batches=False, dispatch_batches=False, even_batches=True, use_seedable_sampler=True, non_blocking=False, gradient_accumulation_kwargs=None, use_configured_state=False), deepspeed={'fp16': {'enabled': 'auto', 'loss_scale': 0, 'loss_scale_window': 1000, 'initial_scale_power': 16, 'hysteresis': 2, 'min_loss_scale': 1}, 'bf16': {'enabled': 'auto'}, 'zero_optimization': {'stage': 3, 'offload_optimizer': {'device': 'none', 'pin_memory': True}, 'offload_param': {'device': 'none', 'pin_memory': True}, 'overlap_comm': False, 'contiguous_gradients': True, 'sub_group_size': 1000000000.0, 'reduce_bucket_size': 'auto', 'zero_quantized_weights': False, 'zero_quantized_gradients': False, 'stage3_prefetch_bucket_size': 0, 'stage3_param_persistence_threshold': 'auto', 'stage3_max_live_parameters': 1000000000.0, 'stage3_max_reuse_distance': 1000000000.0, 'stage3_gather_16bit_weights_on_model_save': True}, 'gradient_accumulation_steps': 'auto', 'gradient_clipping': 'auto', 'steps_per_print': 2000, 'train_batch_size': 'auto', 'train_micro_batch_size_per_gpu': 'auto', 'wall_clock_breakdown': False}, label_smoothing_factor=0.0, optim=, optim_args=None, adafactor=False, group_by_length=False, length_column_name='length', report_to=['tensorboard'], ddp_find_unused_parameters=None, ddp_bucket_cap_mb=None, ddp_broadcast_buffers=None, dataloader_pin_memory=True, dataloader_persistent_workers=False, skip_memory_metrics=True, use_legacy_prediction_loop=False, push_to_hub=False, resume_from_checkpoint=None, hub_model_id=None, hub_strategy=, hub_token=None, hub_private_repo=None, hub_always_push=False, gradient_checkpointing=True, gradient_checkpointing_kwargs=None, include_inputs_for_metrics=False, include_for_metrics=[], eval_do_concat_batches=True, fp16_backend='auto', evaluation_strategy='steps', push_to_hub_model_id=None, push_to_hub_organization=None, push_to_hub_token=None, mp_parameters='', auto_find_batch_size=False, full_determinism=False, torchdynamo=None, ray_scope='last', ddp_timeout=1800, torch_compile=False, torch_compile_backend=None, torch_compile_mode=None, dispatch_batches=None, split_batches=None, include_tokens_per_second=None, include_num_input_tokens_seen=None, neftune_noise_alpha=None, optim_target_modules=None, batch_eval_metrics=False, eval_on_start=False, use_liger_kernel=False, eval_use_gather_object=False, average_tokens_across_devices=None, max_length=3072, max_prompt_length=512, max_completion_length=512, beta=0.1, loss_type='kto', desirable_weight=1.0, undesirable_weight=1.0, label_pad_token_id=None, padding_value=None, truncation_mode='keep_end', generate_during_eval=False, is_encoder_decoder=None, disable_dropout=True, precompute_ref_log_probs=False, model_init_kwargs=None, ref_model_init_kwargs=None, dataset_num_proc=4, check_model=True, acc_strategy='token', train_sampler_random=True, metric_warmup_step=0, fsdp_num=1, acc_steps=1, eval_use_evalscope=False, eval_datasets=[], eval_limit=None, eval_datasets_args=None, eval_generation_config=None, train_type='lora', optimizer=None, local_repo_path=None, galore_config=None)" +} \ No newline at end of file diff --git a/config.json b/config.json new file mode 100644 index 0000000..04adbb0 --- /dev/null +++ b/config.json @@ -0,0 +1,52 @@ +{ + "add_bias_linear": false, + "add_qkv_bias": true, + "apply_query_key_layer_scaling": true, + "apply_residual_connection_post_layernorm": false, + "architectures": [ + "ChatGLMForConditionalGeneration" + ], + "attention_dropout": 0.0, + "attention_softmax_in_fp32": true, + "auto_map": { + "AutoConfig": "configuration_chatglm.ChatGLMConfig", + "AutoModel": "modeling_chatglm.ChatGLMForConditionalGeneration", + "AutoModelForCausalLM": "modeling_chatglm.ChatGLMForConditionalGeneration", + "AutoModelForSeq2SeqLM": "modeling_chatglm.ChatGLMForConditionalGeneration", + "AutoModelForSequenceClassification": "modeling_chatglm.ChatGLMForSequenceClassification" + }, + "bias_dropout_fusion": true, + "classifier_dropout": null, + "eos_token_id": [ + 151329, + 151336, + 151338 + ], + "ffn_hidden_size": 13696, + "fp32_residual_connection": false, + "hidden_dropout": 0.0, + "hidden_size": 4096, + "keys_to_ignore_at_inference": [ + "past_key_values" + ], + "kv_channels": 128, + "layernorm_epsilon": 1.5625e-07, + "model_type": "chatglm", + "multi_query_attention": true, + "multi_query_group_num": 2, + "num_attention_heads": 32, + "num_hidden_layers": 40, + "num_layers": 40, + "original_rope": true, + "pad_token_id": 151329, + "padded_vocab_size": 151552, + "post_layer_norm": true, + "rmsnorm": true, + "rope_ratio": 500, + "seq_length": 131072, + "tie_word_embeddings": false, + "torch_dtype": "bfloat16", + "transformers_version": "4.51.2", + "use_cache": true, + "vocab_size": 151552 +} diff --git a/configuration.json b/configuration.json new file mode 100644 index 0000000..bbeeda1 --- /dev/null +++ b/configuration.json @@ -0,0 +1 @@ +{"framework": "pytorch", "task": "text-generation", "allow_remote": true} \ No newline at end of file diff --git a/configuration_chatglm.py b/configuration_chatglm.py new file mode 100644 index 0000000..65efd3a --- /dev/null +++ b/configuration_chatglm.py @@ -0,0 +1,58 @@ +from transformers import PretrainedConfig + + +class ChatGLMConfig(PretrainedConfig): + model_type = "chatglm" + + def __init__( + self, + num_layers=28, + padded_vocab_size=65024, + hidden_size=4096, + ffn_hidden_size=13696, + kv_channels=128, + num_attention_heads=32, + seq_length=2048, + hidden_dropout=0.0, + classifier_dropout=None, + attention_dropout=0.0, + layernorm_epsilon=1e-5, + rmsnorm=True, + apply_residual_connection_post_layernorm=False, + post_layer_norm=True, + add_bias_linear=False, + add_qkv_bias=False, + bias_dropout_fusion=True, + multi_query_attention=False, + multi_query_group_num=1, + rope_ratio=1, + apply_query_key_layer_scaling=True, + attention_softmax_in_fp32=True, + fp32_residual_connection=False, + **kwargs + ): + self.num_layers = num_layers + self.vocab_size = padded_vocab_size + self.padded_vocab_size = padded_vocab_size + self.hidden_size = hidden_size + self.ffn_hidden_size = ffn_hidden_size + self.kv_channels = kv_channels + self.num_attention_heads = num_attention_heads + self.seq_length = seq_length + self.hidden_dropout = hidden_dropout + self.classifier_dropout = classifier_dropout + self.attention_dropout = attention_dropout + self.layernorm_epsilon = layernorm_epsilon + self.rmsnorm = rmsnorm + self.apply_residual_connection_post_layernorm = apply_residual_connection_post_layernorm + self.post_layer_norm = post_layer_norm + self.add_bias_linear = add_bias_linear + self.add_qkv_bias = add_qkv_bias + self.bias_dropout_fusion = bias_dropout_fusion + self.multi_query_attention = multi_query_attention + self.multi_query_group_num = multi_query_group_num + self.rope_ratio = rope_ratio + self.apply_query_key_layer_scaling = apply_query_key_layer_scaling + self.attention_softmax_in_fp32 = attention_softmax_in_fp32 + self.fp32_residual_connection = fp32_residual_connection + super().__init__(**kwargs) diff --git a/generation_config.json b/generation_config.json new file mode 100644 index 0000000..2395c84 --- /dev/null +++ b/generation_config.json @@ -0,0 +1,13 @@ +{ + "do_sample": true, + "eos_token_id": [ + 151329, + 151336, + 151338 + ], + "max_length": 128000, + "pad_token_id": 151329, + "temperature": 0.8, + "top_p": 0.8, + "transformers_version": "4.51.2" +} diff --git a/model-00001-of-00004.safetensors b/model-00001-of-00004.safetensors new file mode 100644 index 0000000..ba9cfb4 --- /dev/null +++ b/model-00001-of-00004.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b97c090d17eb9d025ff5fbe8ef54577923f0d78630c9980998178ad1eb675fdb +size 4984147224 diff --git a/model-00002-of-00004.safetensors b/model-00002-of-00004.safetensors new file mode 100644 index 0000000..0a16709 --- /dev/null +++ b/model-00002-of-00004.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:aa986163e1bbaee06261a9c8419927aa02b67c20ca8fe3ed4587d2c7c37927f9 +size 4895071360 diff --git a/model-00003-of-00004.safetensors b/model-00003-of-00004.safetensors new file mode 100644 index 0000000..5ad3ea9 --- /dev/null +++ b/model-00003-of-00004.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e5369c3d87ba0f782ba6cda95282e9419401bb998d08875d39f173d30935a967 +size 4895071384 diff --git a/model-00004-of-00004.safetensors b/model-00004-of-00004.safetensors new file mode 100644 index 0000000..064ac4e --- /dev/null +++ b/model-00004-of-00004.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:14b3256c47e9ab10f4a3aa68a23375ea213ea6f56f0599a1ec98243d0ad4472f +size 4025651256 diff --git a/model.safetensors.index.json b/model.safetensors.index.json new file mode 100644 index 0000000..444425f --- /dev/null +++ b/model.safetensors.index.json @@ -0,0 +1,291 @@ +{ + "metadata": { + "total_size": 18799902784 + }, + "weight_map": { + "transformer.embedding.word_embeddings.weight": "model-00001-of-00004.safetensors", + "transformer.encoder.final_layernorm.weight": "model-00004-of-00004.safetensors", + "transformer.encoder.layers.0.input_layernorm.weight": "model-00001-of-00004.safetensors", + "transformer.encoder.layers.0.mlp.dense_4h_to_h.weight": "model-00001-of-00004.safetensors", + "transformer.encoder.layers.0.mlp.dense_h_to_4h.weight": "model-00001-of-00004.safetensors", + "transformer.encoder.layers.0.post_attention_layernorm.weight": "model-00001-of-00004.safetensors", + "transformer.encoder.layers.0.self_attention.dense.weight": "model-00001-of-00004.safetensors", + "transformer.encoder.layers.0.self_attention.query_key_value.bias": "model-00001-of-00004.safetensors", + "transformer.encoder.layers.0.self_attention.query_key_value.weight": "model-00001-of-00004.safetensors", + "transformer.encoder.layers.1.input_layernorm.weight": "model-00001-of-00004.safetensors", + "transformer.encoder.layers.1.mlp.dense_4h_to_h.weight": "model-00001-of-00004.safetensors", + "transformer.encoder.layers.1.mlp.dense_h_to_4h.weight": "model-00001-of-00004.safetensors", + "transformer.encoder.layers.1.post_attention_layernorm.weight": "model-00001-of-00004.safetensors", + "transformer.encoder.layers.1.self_attention.dense.weight": "model-00001-of-00004.safetensors", + "transformer.encoder.layers.1.self_attention.query_key_value.bias": "model-00001-of-00004.safetensors", + "transformer.encoder.layers.1.self_attention.query_key_value.weight": "model-00001-of-00004.safetensors", + "transformer.encoder.layers.10.input_layernorm.weight": "model-00002-of-00004.safetensors", + "transformer.encoder.layers.10.mlp.dense_4h_to_h.weight": "model-00002-of-00004.safetensors", + "transformer.encoder.layers.10.mlp.dense_h_to_4h.weight": "model-00002-of-00004.safetensors", + "transformer.encoder.layers.10.post_attention_layernorm.weight": "model-00002-of-00004.safetensors", + "transformer.encoder.layers.10.self_attention.dense.weight": "model-00002-of-00004.safetensors", + "transformer.encoder.layers.10.self_attention.query_key_value.bias": "model-00002-of-00004.safetensors", + "transformer.encoder.layers.10.self_attention.query_key_value.weight": "model-00002-of-00004.safetensors", + "transformer.encoder.layers.11.input_layernorm.weight": "model-00002-of-00004.safetensors", + "transformer.encoder.layers.11.mlp.dense_4h_to_h.weight": "model-00002-of-00004.safetensors", + "transformer.encoder.layers.11.mlp.dense_h_to_4h.weight": "model-00002-of-00004.safetensors", + "transformer.encoder.layers.11.post_attention_layernorm.weight": "model-00002-of-00004.safetensors", + "transformer.encoder.layers.11.self_attention.dense.weight": "model-00002-of-00004.safetensors", + "transformer.encoder.layers.11.self_attention.query_key_value.bias": "model-00002-of-00004.safetensors", + "transformer.encoder.layers.11.self_attention.query_key_value.weight": "model-00002-of-00004.safetensors", + "transformer.encoder.layers.12.input_layernorm.weight": "model-00002-of-00004.safetensors", + "transformer.encoder.layers.12.mlp.dense_4h_to_h.weight": "model-00002-of-00004.safetensors", + "transformer.encoder.layers.12.mlp.dense_h_to_4h.weight": "model-00002-of-00004.safetensors", + "transformer.encoder.layers.12.post_attention_layernorm.weight": "model-00002-of-00004.safetensors", + "transformer.encoder.layers.12.self_attention.dense.weight": "model-00002-of-00004.safetensors", + "transformer.encoder.layers.12.self_attention.query_key_value.bias": "model-00002-of-00004.safetensors", + "transformer.encoder.layers.12.self_attention.query_key_value.weight": "model-00002-of-00004.safetensors", + "transformer.encoder.layers.13.input_layernorm.weight": "model-00002-of-00004.safetensors", + "transformer.encoder.layers.13.mlp.dense_4h_to_h.weight": "model-00002-of-00004.safetensors", + "transformer.encoder.layers.13.mlp.dense_h_to_4h.weight": "model-00002-of-00004.safetensors", + "transformer.encoder.layers.13.post_attention_layernorm.weight": "model-00002-of-00004.safetensors", + "transformer.encoder.layers.13.self_attention.dense.weight": "model-00002-of-00004.safetensors", + "transformer.encoder.layers.13.self_attention.query_key_value.bias": "model-00002-of-00004.safetensors", + "transformer.encoder.layers.13.self_attention.query_key_value.weight": "model-00002-of-00004.safetensors", + "transformer.encoder.layers.14.input_layernorm.weight": "model-00002-of-00004.safetensors", + "transformer.encoder.layers.14.mlp.dense_4h_to_h.weight": "model-00002-of-00004.safetensors", + "transformer.encoder.layers.14.mlp.dense_h_to_4h.weight": "model-00002-of-00004.safetensors", + "transformer.encoder.layers.14.post_attention_layernorm.weight": "model-00002-of-00004.safetensors", + "transformer.encoder.layers.14.self_attention.dense.weight": "model-00002-of-00004.safetensors", + "transformer.encoder.layers.14.self_attention.query_key_value.bias": "model-00002-of-00004.safetensors", + "transformer.encoder.layers.14.self_attention.query_key_value.weight": "model-00002-of-00004.safetensors", + "transformer.encoder.layers.15.input_layernorm.weight": "model-00002-of-00004.safetensors", + "transformer.encoder.layers.15.mlp.dense_4h_to_h.weight": "model-00002-of-00004.safetensors", + "transformer.encoder.layers.15.mlp.dense_h_to_4h.weight": "model-00002-of-00004.safetensors", + "transformer.encoder.layers.15.post_attention_layernorm.weight": "model-00002-of-00004.safetensors", + "transformer.encoder.layers.15.self_attention.dense.weight": "model-00002-of-00004.safetensors", + "transformer.encoder.layers.15.self_attention.query_key_value.bias": "model-00002-of-00004.safetensors", + "transformer.encoder.layers.15.self_attention.query_key_value.weight": "model-00002-of-00004.safetensors", + "transformer.encoder.layers.16.input_layernorm.weight": "model-00002-of-00004.safetensors", + "transformer.encoder.layers.16.mlp.dense_4h_to_h.weight": "model-00002-of-00004.safetensors", + "transformer.encoder.layers.16.mlp.dense_h_to_4h.weight": "model-00002-of-00004.safetensors", + "transformer.encoder.layers.16.post_attention_layernorm.weight": "model-00002-of-00004.safetensors", + "transformer.encoder.layers.16.self_attention.dense.weight": "model-00002-of-00004.safetensors", + "transformer.encoder.layers.16.self_attention.query_key_value.bias": "model-00002-of-00004.safetensors", + "transformer.encoder.layers.16.self_attention.query_key_value.weight": "model-00002-of-00004.safetensors", + "transformer.encoder.layers.17.input_layernorm.weight": "model-00002-of-00004.safetensors", + "transformer.encoder.layers.17.mlp.dense_4h_to_h.weight": "model-00002-of-00004.safetensors", + "transformer.encoder.layers.17.mlp.dense_h_to_4h.weight": "model-00002-of-00004.safetensors", + "transformer.encoder.layers.17.post_attention_layernorm.weight": "model-00002-of-00004.safetensors", + "transformer.encoder.layers.17.self_attention.dense.weight": "model-00002-of-00004.safetensors", + "transformer.encoder.layers.17.self_attention.query_key_value.bias": "model-00002-of-00004.safetensors", + "transformer.encoder.layers.17.self_attention.query_key_value.weight": "model-00002-of-00004.safetensors", + "transformer.encoder.layers.18.input_layernorm.weight": "model-00002-of-00004.safetensors", + "transformer.encoder.layers.18.mlp.dense_4h_to_h.weight": "model-00002-of-00004.safetensors", + "transformer.encoder.layers.18.mlp.dense_h_to_4h.weight": "model-00002-of-00004.safetensors", + "transformer.encoder.layers.18.post_attention_layernorm.weight": "model-00002-of-00004.safetensors", + "transformer.encoder.layers.18.self_attention.dense.weight": "model-00002-of-00004.safetensors", + "transformer.encoder.layers.18.self_attention.query_key_value.bias": "model-00002-of-00004.safetensors", + "transformer.encoder.layers.18.self_attention.query_key_value.weight": "model-00002-of-00004.safetensors", + "transformer.encoder.layers.19.input_layernorm.weight": "model-00002-of-00004.safetensors", + "transformer.encoder.layers.19.mlp.dense_4h_to_h.weight": "model-00002-of-00004.safetensors", + "transformer.encoder.layers.19.mlp.dense_h_to_4h.weight": "model-00002-of-00004.safetensors", + "transformer.encoder.layers.19.post_attention_layernorm.weight": "model-00002-of-00004.safetensors", + "transformer.encoder.layers.19.self_attention.dense.weight": "model-00002-of-00004.safetensors", + "transformer.encoder.layers.19.self_attention.query_key_value.bias": "model-00002-of-00004.safetensors", + "transformer.encoder.layers.19.self_attention.query_key_value.weight": "model-00002-of-00004.safetensors", + "transformer.encoder.layers.2.input_layernorm.weight": "model-00001-of-00004.safetensors", + "transformer.encoder.layers.2.mlp.dense_4h_to_h.weight": "model-00001-of-00004.safetensors", + "transformer.encoder.layers.2.mlp.dense_h_to_4h.weight": "model-00001-of-00004.safetensors", + "transformer.encoder.layers.2.post_attention_layernorm.weight": "model-00001-of-00004.safetensors", + "transformer.encoder.layers.2.self_attention.dense.weight": "model-00001-of-00004.safetensors", + "transformer.encoder.layers.2.self_attention.query_key_value.bias": "model-00001-of-00004.safetensors", + "transformer.encoder.layers.2.self_attention.query_key_value.weight": "model-00001-of-00004.safetensors", + "transformer.encoder.layers.20.input_layernorm.weight": "model-00002-of-00004.safetensors", + "transformer.encoder.layers.20.mlp.dense_4h_to_h.weight": "model-00002-of-00004.safetensors", + "transformer.encoder.layers.20.mlp.dense_h_to_4h.weight": "model-00002-of-00004.safetensors", + "transformer.encoder.layers.20.post_attention_layernorm.weight": "model-00002-of-00004.safetensors", + "transformer.encoder.layers.20.self_attention.dense.weight": "model-00002-of-00004.safetensors", + "transformer.encoder.layers.20.self_attention.query_key_value.bias": "model-00002-of-00004.safetensors", + "transformer.encoder.layers.20.self_attention.query_key_value.weight": "model-00002-of-00004.safetensors", + "transformer.encoder.layers.21.input_layernorm.weight": "model-00002-of-00004.safetensors", + "transformer.encoder.layers.21.mlp.dense_4h_to_h.weight": "model-00003-of-00004.safetensors", + "transformer.encoder.layers.21.mlp.dense_h_to_4h.weight": "model-00003-of-00004.safetensors", + "transformer.encoder.layers.21.post_attention_layernorm.weight": "model-00002-of-00004.safetensors", + "transformer.encoder.layers.21.self_attention.dense.weight": "model-00002-of-00004.safetensors", + "transformer.encoder.layers.21.self_attention.query_key_value.bias": "model-00002-of-00004.safetensors", + "transformer.encoder.layers.21.self_attention.query_key_value.weight": "model-00002-of-00004.safetensors", + "transformer.encoder.layers.22.input_layernorm.weight": "model-00003-of-00004.safetensors", + "transformer.encoder.layers.22.mlp.dense_4h_to_h.weight": "model-00003-of-00004.safetensors", + "transformer.encoder.layers.22.mlp.dense_h_to_4h.weight": "model-00003-of-00004.safetensors", + "transformer.encoder.layers.22.post_attention_layernorm.weight": "model-00003-of-00004.safetensors", + "transformer.encoder.layers.22.self_attention.dense.weight": "model-00003-of-00004.safetensors", + "transformer.encoder.layers.22.self_attention.query_key_value.bias": "model-00003-of-00004.safetensors", + "transformer.encoder.layers.22.self_attention.query_key_value.weight": "model-00003-of-00004.safetensors", + "transformer.encoder.layers.23.input_layernorm.weight": "model-00003-of-00004.safetensors", + "transformer.encoder.layers.23.mlp.dense_4h_to_h.weight": "model-00003-of-00004.safetensors", + "transformer.encoder.layers.23.mlp.dense_h_to_4h.weight": "model-00003-of-00004.safetensors", + "transformer.encoder.layers.23.post_attention_layernorm.weight": "model-00003-of-00004.safetensors", + "transformer.encoder.layers.23.self_attention.dense.weight": "model-00003-of-00004.safetensors", + "transformer.encoder.layers.23.self_attention.query_key_value.bias": "model-00003-of-00004.safetensors", + "transformer.encoder.layers.23.self_attention.query_key_value.weight": "model-00003-of-00004.safetensors", + "transformer.encoder.layers.24.input_layernorm.weight": "model-00003-of-00004.safetensors", + "transformer.encoder.layers.24.mlp.dense_4h_to_h.weight": "model-00003-of-00004.safetensors", + "transformer.encoder.layers.24.mlp.dense_h_to_4h.weight": "model-00003-of-00004.safetensors", + "transformer.encoder.layers.24.post_attention_layernorm.weight": "model-00003-of-00004.safetensors", + "transformer.encoder.layers.24.self_attention.dense.weight": "model-00003-of-00004.safetensors", + "transformer.encoder.layers.24.self_attention.query_key_value.bias": "model-00003-of-00004.safetensors", + "transformer.encoder.layers.24.self_attention.query_key_value.weight": "model-00003-of-00004.safetensors", + "transformer.encoder.layers.25.input_layernorm.weight": "model-00003-of-00004.safetensors", + "transformer.encoder.layers.25.mlp.dense_4h_to_h.weight": "model-00003-of-00004.safetensors", + "transformer.encoder.layers.25.mlp.dense_h_to_4h.weight": "model-00003-of-00004.safetensors", + "transformer.encoder.layers.25.post_attention_layernorm.weight": "model-00003-of-00004.safetensors", + "transformer.encoder.layers.25.self_attention.dense.weight": "model-00003-of-00004.safetensors", + "transformer.encoder.layers.25.self_attention.query_key_value.bias": "model-00003-of-00004.safetensors", + "transformer.encoder.layers.25.self_attention.query_key_value.weight": "model-00003-of-00004.safetensors", + "transformer.encoder.layers.26.input_layernorm.weight": "model-00003-of-00004.safetensors", + "transformer.encoder.layers.26.mlp.dense_4h_to_h.weight": "model-00003-of-00004.safetensors", + "transformer.encoder.layers.26.mlp.dense_h_to_4h.weight": "model-00003-of-00004.safetensors", + "transformer.encoder.layers.26.post_attention_layernorm.weight": "model-00003-of-00004.safetensors", + "transformer.encoder.layers.26.self_attention.dense.weight": "model-00003-of-00004.safetensors", + "transformer.encoder.layers.26.self_attention.query_key_value.bias": "model-00003-of-00004.safetensors", + "transformer.encoder.layers.26.self_attention.query_key_value.weight": "model-00003-of-00004.safetensors", + "transformer.encoder.layers.27.input_layernorm.weight": "model-00003-of-00004.safetensors", + "transformer.encoder.layers.27.mlp.dense_4h_to_h.weight": "model-00003-of-00004.safetensors", + "transformer.encoder.layers.27.mlp.dense_h_to_4h.weight": "model-00003-of-00004.safetensors", + "transformer.encoder.layers.27.post_attention_layernorm.weight": "model-00003-of-00004.safetensors", + "transformer.encoder.layers.27.self_attention.dense.weight": "model-00003-of-00004.safetensors", + "transformer.encoder.layers.27.self_attention.query_key_value.bias": "model-00003-of-00004.safetensors", + "transformer.encoder.layers.27.self_attention.query_key_value.weight": "model-00003-of-00004.safetensors", + "transformer.encoder.layers.28.input_layernorm.weight": "model-00003-of-00004.safetensors", + "transformer.encoder.layers.28.mlp.dense_4h_to_h.weight": "model-00003-of-00004.safetensors", + "transformer.encoder.layers.28.mlp.dense_h_to_4h.weight": "model-00003-of-00004.safetensors", + "transformer.encoder.layers.28.post_attention_layernorm.weight": "model-00003-of-00004.safetensors", + "transformer.encoder.layers.28.self_attention.dense.weight": "model-00003-of-00004.safetensors", + "transformer.encoder.layers.28.self_attention.query_key_value.bias": "model-00003-of-00004.safetensors", + "transformer.encoder.layers.28.self_attention.query_key_value.weight": "model-00003-of-00004.safetensors", + "transformer.encoder.layers.29.input_layernorm.weight": "model-00003-of-00004.safetensors", + "transformer.encoder.layers.29.mlp.dense_4h_to_h.weight": "model-00003-of-00004.safetensors", + "transformer.encoder.layers.29.mlp.dense_h_to_4h.weight": "model-00003-of-00004.safetensors", + "transformer.encoder.layers.29.post_attention_layernorm.weight": "model-00003-of-00004.safetensors", + "transformer.encoder.layers.29.self_attention.dense.weight": "model-00003-of-00004.safetensors", + "transformer.encoder.layers.29.self_attention.query_key_value.bias": "model-00003-of-00004.safetensors", + "transformer.encoder.layers.29.self_attention.query_key_value.weight": "model-00003-of-00004.safetensors", + "transformer.encoder.layers.3.input_layernorm.weight": "model-00001-of-00004.safetensors", + "transformer.encoder.layers.3.mlp.dense_4h_to_h.weight": "model-00001-of-00004.safetensors", + "transformer.encoder.layers.3.mlp.dense_h_to_4h.weight": "model-00001-of-00004.safetensors", + "transformer.encoder.layers.3.post_attention_layernorm.weight": "model-00001-of-00004.safetensors", + "transformer.encoder.layers.3.self_attention.dense.weight": "model-00001-of-00004.safetensors", + "transformer.encoder.layers.3.self_attention.query_key_value.bias": "model-00001-of-00004.safetensors", + "transformer.encoder.layers.3.self_attention.query_key_value.weight": "model-00001-of-00004.safetensors", + "transformer.encoder.layers.30.input_layernorm.weight": "model-00003-of-00004.safetensors", + "transformer.encoder.layers.30.mlp.dense_4h_to_h.weight": "model-00003-of-00004.safetensors", + "transformer.encoder.layers.30.mlp.dense_h_to_4h.weight": "model-00003-of-00004.safetensors", + "transformer.encoder.layers.30.post_attention_layernorm.weight": "model-00003-of-00004.safetensors", + "transformer.encoder.layers.30.self_attention.dense.weight": "model-00003-of-00004.safetensors", + "transformer.encoder.layers.30.self_attention.query_key_value.bias": "model-00003-of-00004.safetensors", + "transformer.encoder.layers.30.self_attention.query_key_value.weight": "model-00003-of-00004.safetensors", + "transformer.encoder.layers.31.input_layernorm.weight": "model-00003-of-00004.safetensors", + "transformer.encoder.layers.31.mlp.dense_4h_to_h.weight": "model-00003-of-00004.safetensors", + "transformer.encoder.layers.31.mlp.dense_h_to_4h.weight": "model-00003-of-00004.safetensors", + "transformer.encoder.layers.31.post_attention_layernorm.weight": "model-00003-of-00004.safetensors", + "transformer.encoder.layers.31.self_attention.dense.weight": "model-00003-of-00004.safetensors", + "transformer.encoder.layers.31.self_attention.query_key_value.bias": "model-00003-of-00004.safetensors", + "transformer.encoder.layers.31.self_attention.query_key_value.weight": "model-00003-of-00004.safetensors", + "transformer.encoder.layers.32.input_layernorm.weight": "model-00003-of-00004.safetensors", + "transformer.encoder.layers.32.mlp.dense_4h_to_h.weight": "model-00003-of-00004.safetensors", + "transformer.encoder.layers.32.mlp.dense_h_to_4h.weight": "model-00003-of-00004.safetensors", + "transformer.encoder.layers.32.post_attention_layernorm.weight": "model-00003-of-00004.safetensors", + "transformer.encoder.layers.32.self_attention.dense.weight": "model-00003-of-00004.safetensors", + "transformer.encoder.layers.32.self_attention.query_key_value.bias": "model-00003-of-00004.safetensors", + "transformer.encoder.layers.32.self_attention.query_key_value.weight": "model-00003-of-00004.safetensors", + "transformer.encoder.layers.33.input_layernorm.weight": "model-00003-of-00004.safetensors", + "transformer.encoder.layers.33.mlp.dense_4h_to_h.weight": "model-00004-of-00004.safetensors", + "transformer.encoder.layers.33.mlp.dense_h_to_4h.weight": "model-00004-of-00004.safetensors", + "transformer.encoder.layers.33.post_attention_layernorm.weight": "model-00003-of-00004.safetensors", + "transformer.encoder.layers.33.self_attention.dense.weight": "model-00003-of-00004.safetensors", + "transformer.encoder.layers.33.self_attention.query_key_value.bias": "model-00003-of-00004.safetensors", + "transformer.encoder.layers.33.self_attention.query_key_value.weight": "model-00003-of-00004.safetensors", + "transformer.encoder.layers.34.input_layernorm.weight": "model-00004-of-00004.safetensors", + "transformer.encoder.layers.34.mlp.dense_4h_to_h.weight": "model-00004-of-00004.safetensors", + "transformer.encoder.layers.34.mlp.dense_h_to_4h.weight": "model-00004-of-00004.safetensors", + "transformer.encoder.layers.34.post_attention_layernorm.weight": "model-00004-of-00004.safetensors", + "transformer.encoder.layers.34.self_attention.dense.weight": "model-00004-of-00004.safetensors", + "transformer.encoder.layers.34.self_attention.query_key_value.bias": "model-00004-of-00004.safetensors", + "transformer.encoder.layers.34.self_attention.query_key_value.weight": "model-00004-of-00004.safetensors", + "transformer.encoder.layers.35.input_layernorm.weight": "model-00004-of-00004.safetensors", + "transformer.encoder.layers.35.mlp.dense_4h_to_h.weight": "model-00004-of-00004.safetensors", + "transformer.encoder.layers.35.mlp.dense_h_to_4h.weight": "model-00004-of-00004.safetensors", + "transformer.encoder.layers.35.post_attention_layernorm.weight": "model-00004-of-00004.safetensors", + "transformer.encoder.layers.35.self_attention.dense.weight": "model-00004-of-00004.safetensors", + "transformer.encoder.layers.35.self_attention.query_key_value.bias": "model-00004-of-00004.safetensors", + "transformer.encoder.layers.35.self_attention.query_key_value.weight": "model-00004-of-00004.safetensors", + "transformer.encoder.layers.36.input_layernorm.weight": "model-00004-of-00004.safetensors", + "transformer.encoder.layers.36.mlp.dense_4h_to_h.weight": "model-00004-of-00004.safetensors", + "transformer.encoder.layers.36.mlp.dense_h_to_4h.weight": "model-00004-of-00004.safetensors", + "transformer.encoder.layers.36.post_attention_layernorm.weight": "model-00004-of-00004.safetensors", + "transformer.encoder.layers.36.self_attention.dense.weight": "model-00004-of-00004.safetensors", + "transformer.encoder.layers.36.self_attention.query_key_value.bias": "model-00004-of-00004.safetensors", + "transformer.encoder.layers.36.self_attention.query_key_value.weight": "model-00004-of-00004.safetensors", + "transformer.encoder.layers.37.input_layernorm.weight": "model-00004-of-00004.safetensors", + "transformer.encoder.layers.37.mlp.dense_4h_to_h.weight": "model-00004-of-00004.safetensors", + "transformer.encoder.layers.37.mlp.dense_h_to_4h.weight": "model-00004-of-00004.safetensors", + "transformer.encoder.layers.37.post_attention_layernorm.weight": "model-00004-of-00004.safetensors", + "transformer.encoder.layers.37.self_attention.dense.weight": "model-00004-of-00004.safetensors", + "transformer.encoder.layers.37.self_attention.query_key_value.bias": "model-00004-of-00004.safetensors", + "transformer.encoder.layers.37.self_attention.query_key_value.weight": "model-00004-of-00004.safetensors", + "transformer.encoder.layers.38.input_layernorm.weight": "model-00004-of-00004.safetensors", + "transformer.encoder.layers.38.mlp.dense_4h_to_h.weight": "model-00004-of-00004.safetensors", + "transformer.encoder.layers.38.mlp.dense_h_to_4h.weight": "model-00004-of-00004.safetensors", + "transformer.encoder.layers.38.post_attention_layernorm.weight": "model-00004-of-00004.safetensors", + "transformer.encoder.layers.38.self_attention.dense.weight": "model-00004-of-00004.safetensors", + "transformer.encoder.layers.38.self_attention.query_key_value.bias": "model-00004-of-00004.safetensors", + "transformer.encoder.layers.38.self_attention.query_key_value.weight": "model-00004-of-00004.safetensors", + "transformer.encoder.layers.39.input_layernorm.weight": "model-00004-of-00004.safetensors", + "transformer.encoder.layers.39.mlp.dense_4h_to_h.weight": "model-00004-of-00004.safetensors", + "transformer.encoder.layers.39.mlp.dense_h_to_4h.weight": "model-00004-of-00004.safetensors", + "transformer.encoder.layers.39.post_attention_layernorm.weight": "model-00004-of-00004.safetensors", + "transformer.encoder.layers.39.self_attention.dense.weight": "model-00004-of-00004.safetensors", + "transformer.encoder.layers.39.self_attention.query_key_value.bias": "model-00004-of-00004.safetensors", + "transformer.encoder.layers.39.self_attention.query_key_value.weight": "model-00004-of-00004.safetensors", + "transformer.encoder.layers.4.input_layernorm.weight": "model-00001-of-00004.safetensors", + "transformer.encoder.layers.4.mlp.dense_4h_to_h.weight": "model-00001-of-00004.safetensors", + "transformer.encoder.layers.4.mlp.dense_h_to_4h.weight": "model-00001-of-00004.safetensors", + "transformer.encoder.layers.4.post_attention_layernorm.weight": "model-00001-of-00004.safetensors", + "transformer.encoder.layers.4.self_attention.dense.weight": "model-00001-of-00004.safetensors", + "transformer.encoder.layers.4.self_attention.query_key_value.bias": "model-00001-of-00004.safetensors", + "transformer.encoder.layers.4.self_attention.query_key_value.weight": "model-00001-of-00004.safetensors", + "transformer.encoder.layers.5.input_layernorm.weight": "model-00001-of-00004.safetensors", + "transformer.encoder.layers.5.mlp.dense_4h_to_h.weight": "model-00001-of-00004.safetensors", + "transformer.encoder.layers.5.mlp.dense_h_to_4h.weight": "model-00001-of-00004.safetensors", + "transformer.encoder.layers.5.post_attention_layernorm.weight": "model-00001-of-00004.safetensors", + "transformer.encoder.layers.5.self_attention.dense.weight": "model-00001-of-00004.safetensors", + "transformer.encoder.layers.5.self_attention.query_key_value.bias": "model-00001-of-00004.safetensors", + "transformer.encoder.layers.5.self_attention.query_key_value.weight": "model-00001-of-00004.safetensors", + "transformer.encoder.layers.6.input_layernorm.weight": "model-00001-of-00004.safetensors", + "transformer.encoder.layers.6.mlp.dense_4h_to_h.weight": "model-00001-of-00004.safetensors", + "transformer.encoder.layers.6.mlp.dense_h_to_4h.weight": "model-00001-of-00004.safetensors", + "transformer.encoder.layers.6.post_attention_layernorm.weight": "model-00001-of-00004.safetensors", + "transformer.encoder.layers.6.self_attention.dense.weight": "model-00001-of-00004.safetensors", + "transformer.encoder.layers.6.self_attention.query_key_value.bias": "model-00001-of-00004.safetensors", + "transformer.encoder.layers.6.self_attention.query_key_value.weight": "model-00001-of-00004.safetensors", + "transformer.encoder.layers.7.input_layernorm.weight": "model-00001-of-00004.safetensors", + "transformer.encoder.layers.7.mlp.dense_4h_to_h.weight": "model-00001-of-00004.safetensors", + "transformer.encoder.layers.7.mlp.dense_h_to_4h.weight": "model-00001-of-00004.safetensors", + "transformer.encoder.layers.7.post_attention_layernorm.weight": "model-00001-of-00004.safetensors", + "transformer.encoder.layers.7.self_attention.dense.weight": "model-00001-of-00004.safetensors", + "transformer.encoder.layers.7.self_attention.query_key_value.bias": "model-00001-of-00004.safetensors", + "transformer.encoder.layers.7.self_attention.query_key_value.weight": "model-00001-of-00004.safetensors", + "transformer.encoder.layers.8.input_layernorm.weight": "model-00001-of-00004.safetensors", + "transformer.encoder.layers.8.mlp.dense_4h_to_h.weight": "model-00001-of-00004.safetensors", + "transformer.encoder.layers.8.mlp.dense_h_to_4h.weight": "model-00001-of-00004.safetensors", + "transformer.encoder.layers.8.post_attention_layernorm.weight": "model-00001-of-00004.safetensors", + "transformer.encoder.layers.8.self_attention.dense.weight": "model-00001-of-00004.safetensors", + "transformer.encoder.layers.8.self_attention.query_key_value.bias": "model-00001-of-00004.safetensors", + "transformer.encoder.layers.8.self_attention.query_key_value.weight": "model-00001-of-00004.safetensors", + "transformer.encoder.layers.9.input_layernorm.weight": "model-00001-of-00004.safetensors", + "transformer.encoder.layers.9.mlp.dense_4h_to_h.weight": "model-00002-of-00004.safetensors", + "transformer.encoder.layers.9.mlp.dense_h_to_4h.weight": "model-00002-of-00004.safetensors", + "transformer.encoder.layers.9.post_attention_layernorm.weight": "model-00001-of-00004.safetensors", + "transformer.encoder.layers.9.self_attention.dense.weight": "model-00001-of-00004.safetensors", + "transformer.encoder.layers.9.self_attention.query_key_value.bias": "model-00001-of-00004.safetensors", + "transformer.encoder.layers.9.self_attention.query_key_value.weight": "model-00001-of-00004.safetensors", + "transformer.output_layer.weight": "model-00004-of-00004.safetensors", + "transformer.rotary_pos_emb.inv_freq": "model-00001-of-00004.safetensors" + } +} diff --git a/modeling_chatglm.py b/modeling_chatglm.py new file mode 100644 index 0000000..3b1e503 --- /dev/null +++ b/modeling_chatglm.py @@ -0,0 +1,1138 @@ +""" PyTorch ChatGLM model. """ + +import math +import sys +import torch +import torch.utils.checkpoint +import torch.nn.functional as F +from torch import nn +from torch.nn import CrossEntropyLoss, LayerNorm, MSELoss, BCEWithLogitsLoss +from torch.nn.utils import skip_init +from typing import Optional, Tuple, Union, List, Dict, Any + +from transformers.modeling_outputs import ( + BaseModelOutputWithPast, + CausalLMOutputWithPast, + SequenceClassifierOutputWithPast, +) +from transformers.modeling_utils import PreTrainedModel +from transformers.utils import logging, is_torch_npu_available +from transformers.generation.logits_process import LogitsProcessor +from transformers.generation.utils import ModelOutput + +from .configuration_chatglm import ChatGLMConfig + +try: + from transformers.utils import is_flash_attn_greater_or_equal_2_10, is_flash_attn_2_available + + if is_flash_attn_2_available(): + from flash_attn import flash_attn_func, flash_attn_varlen_func + from flash_attn.bert_padding import index_first_axis, pad_input, unpad_input # noqa +except: + pass + +# flags required to enable jit fusion kernels + +if sys.platform != 'darwin' and not is_torch_npu_available(): + torch._C._jit_set_profiling_mode(False) + torch._C._jit_set_profiling_executor(False) + torch._C._jit_override_can_fuse_on_cpu(True) + torch._C._jit_override_can_fuse_on_gpu(True) + +logger = logging.get_logger(__name__) + +_CHECKPOINT_FOR_DOC = "THUDM/ChatGLM" +_CONFIG_FOR_DOC = "ChatGLMConfig" + + +def default_init(cls, *args, **kwargs): + return cls(*args, **kwargs) + + +class InvalidScoreLogitsProcessor(LogitsProcessor): + def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor) -> torch.FloatTensor: + if torch.isnan(scores).any() or torch.isinf(scores).any(): + scores.zero_() + scores[..., 198] = 5e4 + return scores + + +def split_tensor_along_last_dim( + tensor: torch.Tensor, + num_partitions: int, + contiguous_split_chunks: bool = False, +) -> List[torch.Tensor]: + """Split a tensor along its last dimension. + + Arguments: + tensor: input tensor. + num_partitions: number of partitions to split the tensor + contiguous_split_chunks: If True, make each chunk contiguous + in memory. + + Returns: + A list of Tensors + """ + # Get the size and dimension. + last_dim = tensor.dim() - 1 + last_dim_size = tensor.size()[last_dim] // num_partitions + # Split. + tensor_list = torch.split(tensor, last_dim_size, dim=last_dim) + # Note: torch.split does not create contiguous tensors by default. + if contiguous_split_chunks: + return tuple(chunk.contiguous() for chunk in tensor_list) + + return tensor_list + + +class RotaryEmbedding(nn.Module): + def __init__(self, dim, rope_ratio=1, original_impl=False, device=None, dtype=None): + super().__init__() + inv_freq = 1.0 / (10000 ** (torch.arange(0, dim, 2, device=device).to(dtype=dtype) / dim)) + self.register_buffer("inv_freq", inv_freq) + self.dim = dim + self.original_impl = original_impl + self.rope_ratio = rope_ratio + + def forward_impl( + self, seq_len: int, n_elem: int, dtype: torch.dtype, device: torch.device, base: int = 10000 + ): + """Enhanced Transformer with Rotary Position Embedding. + + Derived from: https://github.com/labmlai/annotated_deep_learning_paper_implementations/blob/master/labml_nn/ + transformers/rope/__init__.py. MIT License: + https://github.com/labmlai/annotated_deep_learning_paper_implementations/blob/master/license. + """ + # $\Theta = {\theta_i = 10000^{\frac{2(i-1)}{d}}, i \in [1, 2, ..., \frac{d}{2}]}$ + base = base * self.rope_ratio + theta = 1.0 / (base ** (torch.arange(0, n_elem, 2, dtype=torch.float, device=device) / n_elem)) + + # Create position indexes `[0, 1, ..., seq_len - 1]` + seq_idx = torch.arange(seq_len, dtype=torch.float, device=device) + + # Calculate the product of position index and $\theta_i$ + idx_theta = torch.outer(seq_idx, theta).float() + + cache = torch.stack([torch.cos(idx_theta), torch.sin(idx_theta)], dim=-1) + + # this is to mimic the behaviour of complex32, else we will get different results + if dtype in (torch.float16, torch.bfloat16, torch.int8): + cache = cache.bfloat16() if dtype == torch.bfloat16 else cache.half() + return cache + + def forward(self, max_seq_len, offset=0): + return self.forward_impl( + max_seq_len, self.dim, dtype=self.inv_freq.dtype, device=self.inv_freq.device + ) + + +@torch.jit.script +def apply_rotary_pos_emb(x: torch.Tensor, rope_cache: torch.Tensor) -> torch.Tensor: + # x: [b, np, sq, hn] + b, np, sq, hn = x.size(0), x.size(1), x.size(2), x.size(3) + rot_dim = rope_cache.shape[-2] * 2 + x, x_pass = x[..., :rot_dim], x[..., rot_dim:] + # truncate to support variable sizes + rope_cache = rope_cache[:, :sq] + xshaped = x.reshape(b, np, sq, rot_dim // 2, 2) + rope_cache = rope_cache.view(-1, 1, sq, xshaped.size(3), 2) + x_out2 = torch.stack( + [ + xshaped[..., 0] * rope_cache[..., 0] - xshaped[..., 1] * rope_cache[..., 1], + xshaped[..., 1] * rope_cache[..., 0] + xshaped[..., 0] * rope_cache[..., 1], + ], + -1, + ) + x_out2 = x_out2.flatten(3) + return torch.cat((x_out2, x_pass), dim=-1) + + +class RMSNorm(torch.nn.Module): + def __init__(self, normalized_shape, eps=1e-5, device=None, dtype=None, **kwargs): + super().__init__() + self.weight = torch.nn.Parameter(torch.empty(normalized_shape, device=device, dtype=dtype)) + self.eps = eps + + def forward(self, hidden_states: torch.Tensor): + input_dtype = hidden_states.dtype + variance = hidden_states.to(torch.float32).pow(2).mean(-1, keepdim=True) + hidden_states = hidden_states * torch.rsqrt(variance + self.eps) + + return (self.weight * hidden_states).to(input_dtype) + + +class CoreAttention(torch.nn.Module): + def __init__(self, config: ChatGLMConfig, layer_number): + super(CoreAttention, self).__init__() + self.config = config + self.apply_query_key_layer_scaling = config.apply_query_key_layer_scaling + self.attention_softmax_in_fp32 = config.attention_softmax_in_fp32 + if self.apply_query_key_layer_scaling: + self.attention_softmax_in_fp32 = True + self.layer_number = max(1, layer_number) + self.is_causal = True + + projection_size = config.kv_channels * config.num_attention_heads + + # Per attention head and per partition values. + self.hidden_size_per_partition = projection_size + self.hidden_size_per_attention_head = projection_size // config.num_attention_heads + self.num_attention_heads_per_partition = config.num_attention_heads + + coeff = None + self.norm_factor = math.sqrt(self.hidden_size_per_attention_head) + if self.apply_query_key_layer_scaling: + coeff = self.layer_number + self.norm_factor *= coeff + self.coeff = coeff + + self.attention_dropout = torch.nn.Dropout(config.attention_dropout) + + def forward(self, query_layer, key_layer, value_layer, attention_mask): + # [b, np, sq, sk] + output_size = (query_layer.size(0), query_layer.size(1), query_layer.size(2), key_layer.size(2)) + + # [b, np, sq, hn] -> [b * np, sq, hn] + query_layer = query_layer.view(output_size[0] * output_size[1], output_size[2], -1) + # [b, np, sk, hn] -> [b * np, sk, hn] + key_layer = key_layer.view(output_size[0] * output_size[1], output_size[3], -1) + + # preallocting input tensor: [b * np, sq, sk] + matmul_input_buffer = torch.empty( + output_size[0] * output_size[1], output_size[2], output_size[3], dtype=query_layer.dtype, + device=query_layer.device + ) + + # Raw attention scores. [b * np, sq, sk] + matmul_result = torch.baddbmm( + matmul_input_buffer, + query_layer, # [b * np, sq, hn] + key_layer.transpose(1, 2), # [b * np, hn, sk] + beta=0.0, + alpha=(1.0 / self.norm_factor), + ) + + # change view to [b, np, sq, sk] + attention_scores = matmul_result.view(*output_size) + + # =========================== + # Attention probs and dropout + # =========================== + + # attention scores and attention mask [b, np, sq, sk] + if self.attention_softmax_in_fp32: + attention_scores = attention_scores.float() + if self.coeff is not None: + attention_scores = attention_scores * self.coeff + if attention_mask is None and attention_scores.shape[2] == attention_scores.shape[3]: + attention_mask = torch.ones(output_size[0], 1, output_size[2], output_size[3], + device=attention_scores.device, dtype=torch.bool) + attention_mask.tril_() + attention_mask = ~attention_mask + if attention_mask is not None: + attention_scores = attention_scores.masked_fill(attention_mask, float("-inf")) + attention_probs = F.softmax(attention_scores, dim=-1) + attention_probs = attention_probs.type_as(value_layer) + + # This is actually dropping out entire tokens to attend to, which might + # seem a bit unusual, but is taken from the original Transformer paper. + attention_probs = self.attention_dropout(attention_probs) + + # query layer shape: [b * np, sq, hn] + # value layer shape: [b, np, sk, hn] + # attention shape: [b, np, sq, sk] + # context layer shape: [b, np, sq, hn] + output_size = (value_layer.size(0), value_layer.size(1), query_layer.size(1), value_layer.size(3)) + # change view [b * np, sk, hn] + value_layer = value_layer.view(output_size[0] * output_size[1], value_layer.size(2), -1) + # change view [b * np, sq, sk] + attention_probs = attention_probs.view(output_size[0] * output_size[1], output_size[2], -1) + # matmul: [b * np, sq, hn] + context_layer = torch.bmm(attention_probs, value_layer) + # change view [b, np, sq, hn] + context_layer = context_layer.view(*output_size) + # [b, np, sq, hn] --> [b, sq, np, hn] + context_layer = context_layer.transpose(1, 2).contiguous() + # [b, sq, np, hn] --> [b, sq, hp] + new_context_layer_shape = context_layer.size()[:-2] + (self.hidden_size_per_partition,) + context_layer = context_layer.reshape(*new_context_layer_shape) + + return context_layer + + +class SdpaAttention(CoreAttention): + def forward(self, query_layer, key_layer, value_layer, attention_mask): + if attention_mask is None and query_layer.shape[2] == key_layer.shape[2]: + context_layer = torch.nn.functional.scaled_dot_product_attention(query_layer, key_layer, value_layer, + is_causal=True, + dropout_p=self.config.attention_dropout if self.training else 0.0) + else: + if attention_mask is not None: + attention_mask = ~attention_mask + context_layer = torch.nn.functional.scaled_dot_product_attention(query_layer, key_layer, value_layer, + attention_mask, + dropout_p=self.config.attention_dropout if self.training else 0.0) + context_layer = context_layer.transpose(1, 2).contiguous() + new_context_layer_shape = context_layer.size()[:-2] + (self.hidden_size_per_partition,) + context_layer = context_layer.reshape(*new_context_layer_shape) + return context_layer + + +def _get_unpad_data(attention_mask): + seqlens_in_batch = attention_mask.sum(dim=-1, dtype=torch.int32) + indices = torch.nonzero(attention_mask.flatten(), as_tuple=False).flatten() + max_seqlen_in_batch = seqlens_in_batch.max().item() + cu_seqlens = F.pad(torch.cumsum(seqlens_in_batch, dim=0, dtype=torch.int32), (1, 0)) + return ( + indices, + cu_seqlens, + max_seqlen_in_batch, + ) + + +# Copied from transformers.models.llama.modeling_llama.LlamaFlashAttention2 +class FlashAttention2(CoreAttention): + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + self._flash_attn_uses_top_left_mask = not is_flash_attn_greater_or_equal_2_10() + + def forward(self, query_states, key_states, value_states, attention_mask): + query_states = query_states.transpose(1, 2) + key_states = key_states.transpose(1, 2) + value_states = value_states.transpose(1, 2) + batch_size, query_length = query_states.shape[:2] + if not self._flash_attn_uses_top_left_mask: + causal = self.is_causal + else: + # TODO: Remove the `query_length != 1` check once Flash Attention for RoCm is bumped to 2.1. For details, please see the comment in LlamaFlashAttention2 __init__. + causal = self.is_causal and query_length != 1 + dropout = self.config.attention_dropout if self.training else 0.0 + # Contains at least one padding token in the sequence + if attention_mask is not None: + query_states, key_states, value_states, indices_q, cu_seq_lens, max_seq_lens = self._upad_input( + query_states, key_states, value_states, attention_mask, query_length + ) + + cu_seqlens_q, cu_seqlens_k = cu_seq_lens + max_seqlen_in_batch_q, max_seqlen_in_batch_k = max_seq_lens + + attn_output_unpad = flash_attn_varlen_func( + query_states, + key_states, + value_states, + cu_seqlens_q=cu_seqlens_q, + cu_seqlens_k=cu_seqlens_k, + max_seqlen_q=max_seqlen_in_batch_q, + max_seqlen_k=max_seqlen_in_batch_k, + dropout_p=dropout, + softmax_scale=None, + causal=causal, + ) + + attn_output = pad_input(attn_output_unpad, indices_q, batch_size, query_length) + else: + attn_output = flash_attn_func( + query_states, key_states, value_states, dropout, softmax_scale=None, causal=causal + ) + attn_output = attn_output.reshape(batch_size, query_length, self.hidden_size_per_partition).contiguous() + return attn_output + + def _upad_input(self, query_layer, key_layer, value_layer, attention_mask, query_length): + indices_k, cu_seqlens_k, max_seqlen_in_batch_k = _get_unpad_data(attention_mask) + batch_size, kv_seq_len, num_key_value_heads, head_dim = key_layer.shape + + key_layer = index_first_axis( + key_layer.reshape(batch_size * kv_seq_len, num_key_value_heads, head_dim), indices_k + ) + value_layer = index_first_axis( + value_layer.reshape(batch_size * kv_seq_len, num_key_value_heads, head_dim), indices_k + ) + if query_length == kv_seq_len: + query_layer = index_first_axis( + query_layer.reshape(batch_size * kv_seq_len, self.num_attention_heads_per_partition, head_dim), + indices_k + ) + cu_seqlens_q = cu_seqlens_k + max_seqlen_in_batch_q = max_seqlen_in_batch_k + indices_q = indices_k + elif query_length == 1: + max_seqlen_in_batch_q = 1 + cu_seqlens_q = torch.arange( + batch_size + 1, dtype=torch.int32, device=query_layer.device + ) # There is a memcpy here, that is very bad. + indices_q = cu_seqlens_q[:-1] + query_layer = query_layer.squeeze(1) + else: + # The -q_len: slice assumes left padding. + attention_mask = attention_mask[:, -query_length:] + query_layer, indices_q, cu_seqlens_q, max_seqlen_in_batch_q = unpad_input(query_layer, attention_mask) + + return ( + query_layer, + key_layer, + value_layer, + indices_q, + (cu_seqlens_q, cu_seqlens_k), + (max_seqlen_in_batch_q, max_seqlen_in_batch_k), + ) + + +CORE_ATTENTION_CLASSES = { + "eager": CoreAttention, + "sdpa": SdpaAttention, + "flash_attention_2": FlashAttention2 +} + + +class SelfAttention(torch.nn.Module): + """Parallel self-attention layer abstract class. + + Self-attention layer takes input with size [s, b, h] + and returns output of the same size. + """ + + def __init__(self, config: ChatGLMConfig, layer_number, device=None): + super(SelfAttention, self).__init__() + self.layer_number = max(1, layer_number) + + self.projection_size = config.kv_channels * config.num_attention_heads + + # Per attention head and per partition values. + self.hidden_size_per_attention_head = self.projection_size // config.num_attention_heads + self.num_attention_heads_per_partition = config.num_attention_heads + + self.multi_query_attention = config.multi_query_attention + self.qkv_hidden_size = 3 * self.projection_size + if self.multi_query_attention: + self.num_multi_query_groups_per_partition = config.multi_query_group_num + self.qkv_hidden_size = ( + self.projection_size + 2 * self.hidden_size_per_attention_head * config.multi_query_group_num + ) + self.query_key_value = nn.Linear(config.hidden_size, self.qkv_hidden_size, + bias=config.add_bias_linear or config.add_qkv_bias, + device=device, **_config_to_kwargs(config) + ) + + self.core_attention = CORE_ATTENTION_CLASSES[config._attn_implementation](config, self.layer_number) + + # Output. + self.dense = nn.Linear(self.projection_size, config.hidden_size, bias=config.add_bias_linear, + device=device, **_config_to_kwargs(config) + ) + + def _allocate_memory(self, inference_max_sequence_len, batch_size, device=None, dtype=None): + if self.multi_query_attention: + num_attention_heads = self.num_multi_query_groups_per_partition + else: + num_attention_heads = self.num_attention_heads_per_partition + return torch.empty( + inference_max_sequence_len, + batch_size, + num_attention_heads, + self.hidden_size_per_attention_head, + dtype=dtype, + device=device, + ) + + def forward( + self, hidden_states, attention_mask, rotary_pos_emb, kv_cache=None, use_cache=True + ): + # hidden_states: [b, sq, h] + + # ================================================= + # Pre-allocate memory for key-values for inference. + # ================================================= + # ===================== + # Query, Key, and Value + # ===================== + + # Attention heads [b, sq, h] --> [b, sq, (np * 3 * hn)] + mixed_x_layer = self.query_key_value(hidden_states) + + if self.multi_query_attention: + (query_layer, key_layer, value_layer) = mixed_x_layer.split( + [ + self.num_attention_heads_per_partition * self.hidden_size_per_attention_head, + self.num_multi_query_groups_per_partition * self.hidden_size_per_attention_head, + self.num_multi_query_groups_per_partition * self.hidden_size_per_attention_head, + ], + dim=-1, + ) + query_layer = query_layer.view( + query_layer.size()[:-1] + (self.num_attention_heads_per_partition, self.hidden_size_per_attention_head) + ) + key_layer = key_layer.view( + key_layer.size()[:-1] + (self.num_multi_query_groups_per_partition, self.hidden_size_per_attention_head) + ) + value_layer = value_layer.view( + value_layer.size()[:-1] + + (self.num_multi_query_groups_per_partition, self.hidden_size_per_attention_head) + ) + else: + new_tensor_shape = mixed_x_layer.size()[:-1] + \ + (self.num_attention_heads_per_partition, + 3 * self.hidden_size_per_attention_head) + mixed_x_layer = mixed_x_layer.view(*new_tensor_shape) + + # [b, sq, np, 3 * hn] --> 3 [b, sq, np, hn] + (query_layer, key_layer, value_layer) = split_tensor_along_last_dim(mixed_x_layer, 3) + + # [b, sq, np, hn] -> [b, np, sq, hn] + query_layer, key_layer, value_layer = [k.transpose(1, 2) for k in [query_layer, key_layer, value_layer]] + + # apply relative positional encoding (rotary embedding) + if rotary_pos_emb is not None: + query_layer = apply_rotary_pos_emb(query_layer, rotary_pos_emb) + key_layer = apply_rotary_pos_emb(key_layer, rotary_pos_emb) + + # adjust key and value for inference + if kv_cache is not None: + cache_k, cache_v = kv_cache + key_layer = torch.cat((cache_k, key_layer), dim=2) + value_layer = torch.cat((cache_v, value_layer), dim=2) + if use_cache: + if kv_cache is None: + kv_cache = torch.cat((key_layer.unsqueeze(0).unsqueeze(0), value_layer.unsqueeze(0).unsqueeze(0)), + dim=1) + else: + kv_cache = (key_layer, value_layer) + else: + kv_cache = None + + if self.multi_query_attention: + key_layer = key_layer.unsqueeze(2) + key_layer = key_layer.expand( + -1, -1, self.num_attention_heads_per_partition // self.num_multi_query_groups_per_partition, -1, -1 + ) + key_layer = key_layer.contiguous().view( + key_layer.size()[:1] + (self.num_attention_heads_per_partition,) + key_layer.size()[3:] + ) + value_layer = value_layer.unsqueeze(2) + value_layer = value_layer.expand( + -1, -1, self.num_attention_heads_per_partition // self.num_multi_query_groups_per_partition, -1, -1 + ) + value_layer = value_layer.contiguous().view( + value_layer.size()[:1] + (self.num_attention_heads_per_partition,) + value_layer.size()[3:] + ) + + # ================================== + # core attention computation + # ================================== + + context_layer = self.core_attention(query_layer, key_layer, value_layer, attention_mask) + + # ================= + # Output. [sq, b, h] + # ================= + + output = self.dense(context_layer) + + return output, kv_cache + + +def _config_to_kwargs(args): + common_kwargs = { + "dtype": args.torch_dtype, + } + return common_kwargs + + +class MLP(torch.nn.Module): + """MLP. + + MLP will take the input with h hidden state, project it to 4*h + hidden dimension, perform nonlinear transformation, and project the + state back into h hidden dimension. + """ + + def __init__(self, config: ChatGLMConfig, device=None): + super(MLP, self).__init__() + + self.add_bias = config.add_bias_linear + + # Project to 4h. If using swiglu double the output width, see https://arxiv.org/pdf/2002.05202.pdf + self.dense_h_to_4h = nn.Linear( + config.hidden_size, + config.ffn_hidden_size * 2, + bias=self.add_bias, + device=device, + **_config_to_kwargs(config) + ) + + def swiglu(x): + x = torch.chunk(x, 2, dim=-1) + return F.silu(x[0]) * x[1] + + self.activation_func = swiglu + + # Project back to h. + self.dense_4h_to_h = nn.Linear( + config.ffn_hidden_size, + config.hidden_size, + bias=self.add_bias, + device=device, + **_config_to_kwargs(config) + ) + + def forward(self, hidden_states): + # [s, b, 4hp] + intermediate_parallel = self.dense_h_to_4h(hidden_states) + intermediate_parallel = self.activation_func(intermediate_parallel) + # [s, b, h] + output = self.dense_4h_to_h(intermediate_parallel) + return output + + +class GLMBlock(torch.nn.Module): + """A single transformer layer. + + Transformer layer takes input with size [s, b, h] and returns an + output of the same size. + """ + + def __init__(self, config: ChatGLMConfig, layer_number, device=None): + super(GLMBlock, self).__init__() + self.layer_number = layer_number + + self.apply_residual_connection_post_layernorm = config.apply_residual_connection_post_layernorm + + self.fp32_residual_connection = config.fp32_residual_connection + + LayerNormFunc = RMSNorm if config.rmsnorm else LayerNorm + # Layernorm on the input data. + self.input_layernorm = LayerNormFunc(config.hidden_size, eps=config.layernorm_epsilon, device=device, + dtype=config.torch_dtype) + + # Self attention. + self.self_attention = SelfAttention(config, layer_number, device=device) + self.hidden_dropout = config.hidden_dropout + + # Layernorm on the attention output + self.post_attention_layernorm = LayerNormFunc(config.hidden_size, eps=config.layernorm_epsilon, device=device, + dtype=config.torch_dtype) + + # MLP + self.mlp = MLP(config, device=device) + + def forward( + self, hidden_states, attention_mask, rotary_pos_emb, kv_cache=None, use_cache=True, + ): + # hidden_states: [s, b, h] + + # Layer norm at the beginning of the transformer layer. + layernorm_output = self.input_layernorm(hidden_states) + # Self attention. + attention_output, kv_cache = self.self_attention( + layernorm_output, + attention_mask, + rotary_pos_emb, + kv_cache=kv_cache, + use_cache=use_cache + ) + + # Residual connection. + if self.apply_residual_connection_post_layernorm: + residual = layernorm_output + else: + residual = hidden_states + + layernorm_input = torch.nn.functional.dropout(attention_output, p=self.hidden_dropout, training=self.training) + layernorm_input = residual + layernorm_input + + # Layer norm post the self attention. + layernorm_output = self.post_attention_layernorm(layernorm_input) + + # MLP. + mlp_output = self.mlp(layernorm_output) + + # Second residual connection. + if self.apply_residual_connection_post_layernorm: + residual = layernorm_output + else: + residual = layernorm_input + + output = torch.nn.functional.dropout(mlp_output, p=self.hidden_dropout, training=self.training) + output = residual + output + + return output, kv_cache + + +class GLMTransformer(torch.nn.Module): + """Transformer class.""" + + def __init__(self, config: ChatGLMConfig, device=None): + super(GLMTransformer, self).__init__() + + self.fp32_residual_connection = config.fp32_residual_connection + self.post_layer_norm = config.post_layer_norm + + # Number of layers. + self.num_layers = config.num_layers + + # Transformer layers. + def build_layer(layer_number): + return GLMBlock(config, layer_number, device=device) + + self.layers = torch.nn.ModuleList([build_layer(i + 1) for i in range(self.num_layers)]) + + if self.post_layer_norm: + LayerNormFunc = RMSNorm if config.rmsnorm else LayerNorm + # Final layer norm before output. + self.final_layernorm = LayerNormFunc(config.hidden_size, eps=config.layernorm_epsilon, device=device, + dtype=config.torch_dtype) + + self.gradient_checkpointing = False + + def _get_layer(self, layer_number): + return self.layers[layer_number] + + def forward( + self, hidden_states, attention_mask, rotary_pos_emb, kv_caches=None, + use_cache: Optional[bool] = True, + output_hidden_states: Optional[bool] = False, + ): + if not kv_caches: + kv_caches = [None for _ in range(self.num_layers)] + presents = () if use_cache else None + if self.gradient_checkpointing and self.training: + if use_cache: + logger.warning_once( + "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..." + ) + use_cache = False + + all_self_attentions = None + all_hidden_states = () if output_hidden_states else None + for index in range(self.num_layers): + if output_hidden_states: + all_hidden_states = all_hidden_states + (hidden_states,) + + layer = self._get_layer(index) + if self.gradient_checkpointing and self.training: + layer_ret = torch.utils.checkpoint.checkpoint( + layer, + hidden_states, + attention_mask, + rotary_pos_emb, + kv_caches[index], + use_cache, + use_reentrant=False + ) + else: + layer_ret = layer( + hidden_states, + attention_mask, + rotary_pos_emb, + kv_cache=kv_caches[index], + use_cache=use_cache + ) + hidden_states, kv_cache = layer_ret + if use_cache: + # token by token decoding, use tuple format + if kv_caches[0] is not None: + presents = presents + (kv_cache,) + # prefilling in decoding, use tensor format to save cuda memory + else: + if len(presents) == 0: + presents = kv_cache + else: + presents = torch.cat((presents, kv_cache.to(presents.device)), dim=0) + + if output_hidden_states: + all_hidden_states = all_hidden_states + (hidden_states,) + + # Final layer norm. + if self.post_layer_norm: + hidden_states = self.final_layernorm(hidden_states) + + return hidden_states, presents, all_hidden_states, all_self_attentions + + +class ChatGLMPreTrainedModel(PreTrainedModel): + """ + An abstract class to handle weights initialization and + a simple interface for downloading and loading pretrained models. + """ + + is_parallelizable = False + supports_gradient_checkpointing = True + config_class = ChatGLMConfig + base_model_prefix = "transformer" + _no_split_modules = ["GLMBlock"] + _supports_flash_attn_2 = True + _supports_sdpa = True + + def _init_weights(self, module: nn.Module): + """Initialize the weights.""" + return + + def get_masks(self, input_ids, past_key_values, padding_mask=None): + if self.config._attn_implementation == "flash_attention_2": + if padding_mask is not None and not padding_mask.all(): + return padding_mask + return None + batch_size, seq_length = input_ids.shape + full_attention_mask = torch.ones(batch_size, seq_length, seq_length, device=input_ids.device) + full_attention_mask.tril_() + past_length = 0 + if past_key_values: + past_length = past_key_values[0][0].shape[2] + if past_length: + full_attention_mask = torch.cat((torch.ones(batch_size, seq_length, past_length, + device=input_ids.device), full_attention_mask), dim=-1) + if padding_mask is not None: + full_attention_mask = full_attention_mask * padding_mask.unsqueeze(1) + if not past_length and padding_mask is not None: + full_attention_mask -= padding_mask.unsqueeze(-1) - 1 + full_attention_mask = (full_attention_mask < 0.5).bool() + full_attention_mask.unsqueeze_(1) + return full_attention_mask + + def get_position_ids(self, input_ids, device): + batch_size, seq_length = input_ids.shape + position_ids = torch.arange(seq_length, dtype=torch.long, device=device).unsqueeze(0).repeat(batch_size, 1) + return position_ids + +class Embedding(torch.nn.Module): + """Language model embeddings.""" + + def __init__(self, config: ChatGLMConfig, device=None): + super(Embedding, self).__init__() + + self.hidden_size = config.hidden_size + # Word embeddings (parallel). + self.word_embeddings = nn.Embedding( + config.padded_vocab_size, + self.hidden_size, + dtype=config.torch_dtype, + device=device + ) + self.fp32_residual_connection = config.fp32_residual_connection + + def forward(self, input_ids): + # Embeddings. + words_embeddings = self.word_embeddings(input_ids) + embeddings = words_embeddings + # If the input flag for fp32 residual connection is set, convert for float. + if self.fp32_residual_connection: + embeddings = embeddings.float() + return embeddings + + +class ChatGLMModel(ChatGLMPreTrainedModel): + def __init__(self, config: ChatGLMConfig, device=None, empty_init=True): + super().__init__(config) + if empty_init: + init_method = skip_init + else: + init_method = default_init + init_kwargs = {} + if device is not None: + init_kwargs["device"] = device + self.embedding = init_method(Embedding, config, **init_kwargs) + self.num_layers = config.num_layers + self.multi_query_group_num = config.multi_query_group_num + self.kv_channels = config.kv_channels + + # Rotary positional embeddings + self.seq_length = config.seq_length + rotary_dim = ( + config.hidden_size // config.num_attention_heads if config.kv_channels is None else config.kv_channels + ) + + self.rotary_pos_emb = RotaryEmbedding(rotary_dim // 2, rope_ratio=config.rope_ratio, + original_impl=config.original_rope, + device=device, dtype=config.torch_dtype) + self.encoder = init_method(GLMTransformer, config, **init_kwargs) + self.output_layer = init_method(nn.Linear, config.hidden_size, config.padded_vocab_size, bias=False, + dtype=config.torch_dtype, **init_kwargs) + + def get_input_embeddings(self): + return self.embedding.word_embeddings + + def set_input_embeddings(self, value): + self.embedding.word_embeddings = value + + def forward( + self, + input_ids, + position_ids: Optional[torch.Tensor] = None, + attention_mask: Optional[torch.BoolTensor] = None, + full_attention_mask: Optional[torch.BoolTensor] = None, + past_key_values: Optional[Tuple[Tuple[torch.Tensor, torch.Tensor], ...]] = None, + inputs_embeds: Optional[torch.Tensor] = None, + use_cache: Optional[bool] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + ): + output_hidden_states = ( + output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states + ) + use_cache = use_cache if use_cache is not None else self.config.use_cache + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + batch_size, seq_length = input_ids.shape + + if inputs_embeds is None: + inputs_embeds = self.embedding(input_ids) + + if full_attention_mask is None: + if (attention_mask is not None and not attention_mask.all()) or (past_key_values and seq_length != 1): + full_attention_mask = self.get_masks(input_ids, past_key_values, padding_mask=attention_mask) + + # Rotary positional embeddings + rotary_pos_emb = self.rotary_pos_emb(self.seq_length) + if position_ids is not None: + rotary_pos_emb = rotary_pos_emb[position_ids] + else: + rotary_pos_emb = rotary_pos_emb[None, :seq_length] + + # Run encoder. + hidden_states, presents, all_hidden_states, all_self_attentions = self.encoder( + inputs_embeds, full_attention_mask, rotary_pos_emb=rotary_pos_emb, + kv_caches=past_key_values, use_cache=use_cache, output_hidden_states=output_hidden_states + ) + if presents is not None and type(presents) is torch.Tensor: + presents = presents.split(1, dim=0) + presents = list(presents) + presents = [list(x.squeeze(0).split(1, dim=0)) for x in presents] + presents = [tuple([x.squeeze(0) for x in y]) for y in presents] + presents = tuple(presents) + + if not return_dict: + return tuple(v for v in [hidden_states, presents, all_hidden_states, all_self_attentions] if v is not None) + + return BaseModelOutputWithPast( + last_hidden_state=hidden_states, + past_key_values=presents, + hidden_states=all_hidden_states, + attentions=all_self_attentions, + ) + + +class ChatGLMForConditionalGeneration(ChatGLMPreTrainedModel): + def __init__(self, config: ChatGLMConfig, empty_init=True, device=None): + super().__init__(config) + + self.max_sequence_length = config.max_length + self.transformer = ChatGLMModel(config, empty_init=empty_init, device=device) + self.config = config + + def _update_model_kwargs_for_generation( + self, + outputs: ModelOutput, + model_kwargs: Dict[str, Any], + is_encoder_decoder: bool = False, + ) -> Dict[str, Any]: + # update past_key_values + cache_name, cache = self._extract_past_from_model_output(outputs) + model_kwargs[cache_name] = cache + + # update attention mask + if "attention_mask" in model_kwargs: + attention_mask = model_kwargs["attention_mask"] + model_kwargs["attention_mask"] = torch.cat( + [attention_mask, attention_mask.new_ones((attention_mask.shape[0], 1))], dim=-1 + ) + + # update position ids + if "position_ids" in model_kwargs: + position_ids = model_kwargs["position_ids"] + new_position_id = position_ids[..., -1:].clone() + new_position_id += 1 + model_kwargs["position_ids"] = torch.cat( + [position_ids, new_position_id], dim=-1 + ) + + model_kwargs["is_first_forward"] = False + return model_kwargs + + def prepare_inputs_for_generation( + self, + input_ids: torch.LongTensor, + past_key_values: Optional[torch.Tensor] = None, + attention_mask: Optional[torch.Tensor] = None, + position_ids: Optional[torch.Tensor] = None, + use_cache: Optional[bool] = None, + is_first_forward: bool = True, + **kwargs + ) -> dict: + # only last token for input_ids if past is not None + if position_ids is None: + position_ids = self.get_position_ids(input_ids, device=input_ids.device) + if not is_first_forward: + if past_key_values is not None: + position_ids = position_ids[..., -1:] + input_ids = input_ids[:, -1:] + return { + "input_ids": input_ids, + "past_key_values": past_key_values, + "position_ids": position_ids, + "attention_mask": attention_mask, + "return_last_logit": True, + "use_cache": use_cache + } + + def forward( + self, + input_ids: Optional[torch.Tensor] = None, + position_ids: Optional[torch.Tensor] = None, + attention_mask: Optional[torch.Tensor] = None, + past_key_values: Optional[Tuple[torch.FloatTensor]] = None, + inputs_embeds: Optional[torch.Tensor] = None, + labels: Optional[torch.Tensor] = None, + use_cache: Optional[bool] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + return_last_logit: Optional[bool] = False, + ): + use_cache = use_cache if use_cache is not None else self.config.use_cache + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + transformer_outputs = self.transformer( + input_ids=input_ids, + position_ids=position_ids, + attention_mask=attention_mask, + past_key_values=past_key_values, + inputs_embeds=inputs_embeds, + use_cache=use_cache, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + + hidden_states = transformer_outputs[0] + if return_last_logit: + hidden_states = hidden_states[:, -1:] + lm_logits = self.transformer.output_layer(hidden_states) + + loss = None + if labels is not None: + lm_logits = lm_logits.to(torch.float32) + + # Shift so that tokens < n predict n + shift_logits = lm_logits[..., :-1, :].contiguous() + shift_labels = labels[..., 1:].contiguous() + # Flatten the tokens + loss_fct = CrossEntropyLoss(ignore_index=-100) + loss = loss_fct(shift_logits.view(-1, shift_logits.size(-1)), shift_labels.view(-1)) + + lm_logits = lm_logits.to(hidden_states.dtype) + loss = loss.to(hidden_states.dtype) + + if not return_dict: + output = (lm_logits,) + transformer_outputs[1:] + return ((loss,) + output) if loss is not None else output + + return CausalLMOutputWithPast( + loss=loss, + logits=lm_logits, + past_key_values=transformer_outputs.past_key_values, + hidden_states=transformer_outputs.hidden_states, + attentions=transformer_outputs.attentions, + ) + + @staticmethod + def _reorder_cache( + past: Tuple[Tuple[torch.Tensor, torch.Tensor], ...], beam_idx: torch.LongTensor + ) -> Tuple[Tuple[torch.Tensor, torch.Tensor], ...]: + """ + This function is used to re-order the `past_key_values` cache if [`~PreTrainedModel.beam_search`] or + [`~PreTrainedModel.beam_sample`] is called. This is required to match `past_key_values` with the correct + beam_idx at every generation step. + + Output shares the same memory storage as `past`. + """ + return tuple( + ( + layer_past[0].index_select(0, beam_idx.to(layer_past[0].device)), + layer_past[1].index_select(0, beam_idx.to(layer_past[1].device)), + ) + for layer_past in past + ) + + +class ChatGLMForSequenceClassification(ChatGLMPreTrainedModel): + def __init__(self, config: ChatGLMConfig, empty_init=True, device=None): + super().__init__(config) + + self.num_labels = config.num_labels + self.transformer = ChatGLMModel(config, empty_init=empty_init, device=device) + + self.classifier_head = nn.Linear(config.hidden_size, config.num_labels, bias=True, dtype=config.torch_dtype) + if config.classifier_dropout is not None: + self.dropout = nn.Dropout(config.classifier_dropout) + else: + self.dropout = None + self.config = config + + def forward( + self, + input_ids: Optional[torch.LongTensor] = None, + position_ids: Optional[torch.LongTensor] = None, + attention_mask: Optional[torch.Tensor] = None, + full_attention_mask: Optional[torch.Tensor] = None, + past_key_values: Optional[Tuple[Tuple[torch.Tensor, torch.Tensor], ...]] = None, + inputs_embeds: Optional[torch.LongTensor] = None, + labels: Optional[torch.LongTensor] = None, + use_cache: Optional[bool] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + ) -> Union[Tuple[torch.Tensor, ...], SequenceClassifierOutputWithPast]: + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + transformer_outputs = self.transformer( + input_ids=input_ids, + position_ids=position_ids, + attention_mask=attention_mask, + full_attention_mask=full_attention_mask, + past_key_values=past_key_values, + inputs_embeds=inputs_embeds, + use_cache=use_cache, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + + hidden_states = transformer_outputs[0] + pooled_hidden_states = hidden_states[:, -1] + if self.dropout is not None: + pooled_hidden_states = self.dropout(pooled_hidden_states) + logits = self.classifier_head(pooled_hidden_states) + + loss = None + if labels is not None: + if self.config.problem_type is None: + if self.num_labels == 1: + self.config.problem_type = "regression" + elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): + self.config.problem_type = "single_label_classification" + else: + self.config.problem_type = "multi_label_classification" + + if self.config.problem_type == "regression": + loss_fct = MSELoss() + if self.num_labels == 1: + loss = loss_fct(logits.squeeze().float(), labels.squeeze()) + else: + loss = loss_fct(logits.float(), labels) + elif self.config.problem_type == "single_label_classification": + loss_fct = CrossEntropyLoss() + loss = loss_fct(logits.view(-1, self.num_labels).float(), labels.view(-1)) + elif self.config.problem_type == "multi_label_classification": + loss_fct = BCEWithLogitsLoss() + loss = loss_fct(logits.float(), labels.view(-1, self.num_labels)) + + if not return_dict: + output = (logits,) + transformer_outputs[1:] + return ((loss,) + output) if loss is not None else output + + return SequenceClassifierOutputWithPast( + loss=loss, + logits=logits, + past_key_values=transformer_outputs.past_key_values, + hidden_states=transformer_outputs.hidden_states, + attentions=transformer_outputs.attentions, + ) diff --git a/special_tokens_map.json b/special_tokens_map.json new file mode 100644 index 0000000..582da4a --- /dev/null +++ b/special_tokens_map.json @@ -0,0 +1,32 @@ +{ + "additional_special_tokens": [ + "<|endoftext|>", + "[MASK]", + "[gMASK]", + "[sMASK]", + "", + "", + "<|system|>", + "<|user|>", + "<|assistant|>", + "<|observation|>", + "<|begin_of_image|>", + "<|end_of_image|>", + "<|begin_of_video|>", + "<|end_of_video|>" + ], + "eos_token": { + "content": "<|endoftext|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false + }, + "pad_token": { + "content": "<|endoftext|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false + } +} diff --git a/tokenization_chatglm.py b/tokenization_chatglm.py new file mode 100644 index 0000000..90ed04a --- /dev/null +++ b/tokenization_chatglm.py @@ -0,0 +1,224 @@ +import regex as re +import base64 +import os +import tiktoken +from typing import List, Optional, Union, Dict +from transformers import PreTrainedTokenizer +from transformers.utils import PaddingStrategy +from transformers.tokenization_utils_base import EncodedInput, BatchEncoding + + +class ChatGLM4Tokenizer(PreTrainedTokenizer): + vocab_files_names = {"vocab_file": "tokenizer.model"} + model_input_names = ["input_ids", "attention_mask", "position_ids"] + + def __init__( + self, + vocab_file, + clean_up_tokenization_spaces=False, + **kwargs + ): + self.name = "GLM4Tokenizer" + self.vocab_file = vocab_file + pat_str = "(?i:'s|'t|'re|'ve|'m|'ll|'d)|[^\\r\\n\\p{L}\\p{N}]?\\p{L}+|\\p{N}{1,3}| ?[^\\s\\p{L}\\p{N}]+[\\r\\n]*|\\s*[\\r\\n]+|\\s+(?!\\S)|\\s+" + self.pat_str = re.compile(pat_str) + + mergeable_ranks = {} + with open(vocab_file) as f: + for line in f: + token, rank = line.strip().split() + rank = int(rank) + token = base64.b64decode(token) + mergeable_ranks[token] = rank + + self.mergeable_ranks = mergeable_ranks + + self.tokenizer = tiktoken.Encoding( + name="my_tokenizer", + pat_str=pat_str, + mergeable_ranks=mergeable_ranks, + special_tokens={} + ) + self.decoder = {rank: token for token, rank in mergeable_ranks.items()} + self.n_words = len(self.decoder) + + super().__init__( + clean_up_tokenization_spaces=clean_up_tokenization_spaces, + **kwargs + ) + + @property + def vocab_size(self): + return self.n_words + + def get_vocab(self): + """ Returns vocab as a dict """ + vocab = {self._convert_id_to_token(i): i for i in range(self.vocab_size)} + vocab.update(self.added_tokens_encoder) + return vocab + + def convert_tokens_to_string(self, tokens: List[Union[bytes, str, int]]) -> str: + """ + Converts a sequence of tokens in a single string. + """ + text = "" + temp = b"" + for t in tokens: + if isinstance(t, int): + t = chr(t) + if isinstance(t, str): + if temp: + text += temp.decode("utf-8", errors="replace") + elif isinstance(t, bytes): + temp += t + else: + raise TypeError("token should only be of type int, bytes or str") + if temp: + text += temp.decode("utf-8", errors="replace") + return text + + def _tokenize(self, text, **kwargs): + tokens = [] + ids = self.tokenizer.encode(text) + for t in ids: + tokens.append(self.decoder[t]) + return tokens + + def _convert_token_to_id(self, token): + """ Converts a token (str) in an id using the vocab. """ + return self.mergeable_ranks[token] + + def _convert_id_to_token(self, index): + """Converts an index (integer) in a token (str) using the vocab.""" + return self.decoder.get(index, "") + + def save_vocabulary(self, save_directory, filename_prefix=None): + """ + Save the vocabulary and special tokens file to a directory. + + Args: + save_directory (`str`): + The directory in which to save the vocabulary. + filename_prefix (`str`, *optional*): + An optional prefix to add to the named of the saved files. + + Returns: + `Tuple(str)`: Paths to the files saved. + """ + if os.path.isdir(save_directory): + vocab_file = os.path.join( + save_directory, self.vocab_files_names["vocab_file"] + ) + else: + vocab_file = save_directory + + with open(self.vocab_file, 'rb') as fin: + proto_str = fin.read() + + with open(vocab_file, "wb") as writer: + writer.write(proto_str) + + return (vocab_file,) + + def get_prefix_tokens(self): + prefix_tokens = [self.convert_tokens_to_ids("[gMASK]"), self.convert_tokens_to_ids("")] + return prefix_tokens + + def build_single_message(self, role, metadata, message, tokenize=True): + assert role in ["system", "user", "assistant", "observation"], role + if tokenize: + role_tokens = [self.convert_tokens_to_ids(f"<|{role}|>")] + self.tokenizer.encode(f"{metadata}\n", + disallowed_special=()) + message_tokens = self.tokenizer.encode(message, disallowed_special=()) + tokens = role_tokens + message_tokens + return tokens + else: + return str(f"<|{role}|>{metadata}\n{message}") + + def build_inputs_with_special_tokens( + self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None + ) -> List[int]: + """ + Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and + adding special tokens. A BERT sequence has the following format: + + - single sequence: `[CLS] X [SEP]` + - pair of sequences: `[CLS] A [SEP] B [SEP]` + + Args: + token_ids_0 (`List[int]`): + List of IDs to which the special tokens will be added. + token_ids_1 (`List[int]`, *optional*): + Optional second list of IDs for sequence pairs. + + Returns: + `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens. + """ + prefix_tokens = self.get_prefix_tokens() + token_ids_0 = prefix_tokens + token_ids_0 + if token_ids_1 is not None: + token_ids_0 = token_ids_0 + token_ids_1 + [self.convert_tokens_to_ids("")] + return token_ids_0 + + def _pad( + self, + encoded_inputs: Union[Dict[str, EncodedInput], BatchEncoding], + max_length: Optional[int] = None, + padding_side: str = "left", + padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD, + pad_to_multiple_of: Optional[int] = None, + return_attention_mask: Optional[bool] = None, + ) -> dict: + """ + Pad encoded inputs (on left/right and up to predefined length or max length in the batch) + + Args: + encoded_inputs: + Dictionary of tokenized inputs (`List[int]`) or batch of tokenized inputs (`List[List[int]]`). + max_length: maximum length of the returned list and optionally padding length (see below). + Will truncate by taking into account the special tokens. + padding_strategy: PaddingStrategy to use for padding. + + - PaddingStrategy.LONGEST Pad to the longest sequence in the batch + - PaddingStrategy.MAX_LENGTH: Pad to the max length (default) + - PaddingStrategy.DO_NOT_PAD: Do not pad + The tokenizer padding sides are defined in self.padding_side: + + - 'left': pads on the left of the sequences + - 'right': pads on the right of the sequences + pad_to_multiple_of: (optional) Integer if set will pad the sequence to a multiple of the provided value. + This is especially useful to enable the use of Tensor Core on NVIDIA hardware with compute capability + `>= 7.5` (Volta). + return_attention_mask: + (optional) Set to False to avoid returning attention mask (default: set to model specifics) + """ + # Load from model defaults + + required_input = encoded_inputs[self.model_input_names[0]] + seq_length = len(required_input) + + if padding_strategy == PaddingStrategy.LONGEST: + max_length = len(required_input) + + if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0): + max_length = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of + + needs_to_be_padded = padding_strategy != PaddingStrategy.DO_NOT_PAD and len(required_input) != max_length + + # Initialize attention mask if not present. + if "attention_mask" not in encoded_inputs: + encoded_inputs["attention_mask"] = [1] * seq_length + + if "position_ids" not in encoded_inputs: + encoded_inputs["position_ids"] = list(range(seq_length)) + + if needs_to_be_padded: + difference = max_length - len(required_input) + + if "attention_mask" in encoded_inputs: + encoded_inputs["attention_mask"] = [0] * difference + encoded_inputs["attention_mask"] + if "position_ids" in encoded_inputs: + encoded_inputs["position_ids"] = [0] * difference + encoded_inputs["position_ids"] + encoded_inputs[self.model_input_names[0]] = [self.pad_token_id] * difference + required_input + + return encoded_inputs diff --git a/tokenizer.model b/tokenizer.model new file mode 100644 index 0000000..8650fd9 --- /dev/null +++ b/tokenizer.model @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5a493598071550244b2ee7f26118f3edec2150b9dfa967929a99052ac83fe716 +size 2623634 diff --git a/tokenizer_config.json b/tokenizer_config.json new file mode 100644 index 0000000..52053c5 --- /dev/null +++ b/tokenizer_config.json @@ -0,0 +1,148 @@ +{ + "added_tokens_decoder": { + "151329": { + "content": "<|endoftext|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "151330": { + "content": "[MASK]", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "151331": { + "content": "[gMASK]", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "151332": { + "content": "[sMASK]", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "151333": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "151334": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "151335": { + "content": "<|system|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "151336": { + "content": "<|user|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "151337": { + "content": "<|assistant|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "151338": { + "content": "<|observation|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "151339": { + "content": "<|begin_of_image|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "151340": { + "content": "<|end_of_image|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "151341": { + "content": "<|begin_of_video|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "151342": { + "content": "<|end_of_video|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + } + }, + "additional_special_tokens": [ + "<|endoftext|>", + "[MASK]", + "[gMASK]", + "[sMASK]", + "", + "", + "<|system|>", + "<|user|>", + "<|assistant|>", + "<|observation|>", + "<|begin_of_image|>", + "<|end_of_image|>", + "<|begin_of_video|>", + "<|end_of_video|>" + ], + "auto_map": { + "AutoTokenizer": [ + "tokenization_chatglm.ChatGLM4Tokenizer", + null + ] + }, + "chat_template": "[gMASK]{% for item in messages %}{% if item['tools'] is defined %}<|system|>\n你是一个名为 GLM-4 的人工智能助手。你是基于智谱AI训练的语言模型 GLM-4 模型开发的,你的任务是针对用户的问题和要求提供适当的答复和支持。\n\n# 可用工具{% set tools = item['tools'] %}{% for tool in tools %}{% if tool['type'] == 'function' %}\n\n## {{ tool['function']['name'] }}\n\n{{ tool['function'] | tojson(indent=4) }}\n在调用上述函数时,请使用 Json 格式表示调用的参数。{% elif tool['type'] == 'python' %}\n\n## python\n\n当你向 `python` 发送包含 Python 代码的消息时,该代码将会在一个有状态的 Jupyter notebook 环境中执行。\n`python` 返回代码执行的输出,或在执行 60 秒后返回超时。\n`/mnt/data` 将会持久化存储你的文件。在此会话中,`python` 无法访问互联网。不要使用 `python` 进行任何网络请求或者在线 API 调用,这些在线内容的访问将不会成功。{% elif tool['type'] == 'simple_browser' %}\n\n## simple_browser\n\n你可以使用 `simple_browser` 工具。该工具支持以下函数:\n`search(query: str, recency_days: int)`:使用搜索引擎进行查询并显示结果,可以使用 `recency_days` 参数控制搜索内容的时效性。\n`mclick(ids: list[int])`:获取一系列指定 id 的页面内容。每次调用时,须选择3-10个页面。选择多个角度的页面,同时尽可能选择可信任的信息来源。考虑到部分页面是无法加载的,你也可以多打开一些可能有用的页面而不用担心内容过多。\n`open_url(url: str)`:打开指定的 URL。\n\n使用 `【{引用 id}†{引用文本}】` 来引用内容。\n\n操作步骤:1. 使用 `search` 来获得信息列表; 2. 使用 `mclick` 来获取指定 ID 页面的内容; 3. 根据获得的内容进行回复。在回复中应当引用信息来源。\n 如果用户提供了 URL,也可以用 `open_url` 直接打开页面。\n如果初次搜索结果没有找到合适的信息,也可以再次使用 `search` 进行搜索。{% elif tool['type'] == 'cogview' %}\n\n## cogview\n\n如果用户的请求中包含了对图像的描述,你可以使用 `cogview` 来生成图像并展示给用户。你需要向 `cogview` 发送图像描述,规则:\n- 发送给 `cogview` 的消息必须使用英语。用户的中文描述必须完全翻译为英语。\n- 应当尽可能详细地描述图像生成的需求,需求描述约 100 英文单词。\n- 保持用户原始描述的意图。不要虚构内容或者没见过的人物。\n- 如无特殊说明,所在地为中国,持有中国立场并遵循中国社会主义价值观。{% endif %}{% endfor %}{% endif %}{% if item['content'] %}<|{{ item['role'] }}|>{{ item['metadata'] }}\n{{ item['content'] }}{% endif %}{% endfor %}{% if add_generation_prompt %}<|assistant|>{% endif %}", + "clean_up_tokenization_spaces": false, + "do_lower_case": false, + "eos_token": "<|endoftext|>", + "extra_special_tokens": {}, + "model_max_length": 128000, + "pad_token": "<|endoftext|>", + "padding_side": "left", + "remove_space": false, + "tokenizer_class": "ChatGLM4Tokenizer" +}