初始化项目,由ModelHub XC社区提供模型
Model: McClain/PlasmidGPT-RL Source: Original Platform
This commit is contained in:
35
.gitattributes
vendored
Normal file
35
.gitattributes
vendored
Normal file
@@ -0,0 +1,35 @@
|
|||||||
|
*.7z filter=lfs diff=lfs merge=lfs -text
|
||||||
|
*.arrow filter=lfs diff=lfs merge=lfs -text
|
||||||
|
*.bin filter=lfs diff=lfs merge=lfs -text
|
||||||
|
*.bz2 filter=lfs diff=lfs merge=lfs -text
|
||||||
|
*.ckpt filter=lfs diff=lfs merge=lfs -text
|
||||||
|
*.ftz filter=lfs diff=lfs merge=lfs -text
|
||||||
|
*.gz filter=lfs diff=lfs merge=lfs -text
|
||||||
|
*.h5 filter=lfs diff=lfs merge=lfs -text
|
||||||
|
*.joblib filter=lfs diff=lfs merge=lfs -text
|
||||||
|
*.lfs.* filter=lfs diff=lfs merge=lfs -text
|
||||||
|
*.mlmodel filter=lfs diff=lfs merge=lfs -text
|
||||||
|
*.model filter=lfs diff=lfs merge=lfs -text
|
||||||
|
*.msgpack filter=lfs diff=lfs merge=lfs -text
|
||||||
|
*.npy filter=lfs diff=lfs merge=lfs -text
|
||||||
|
*.npz filter=lfs diff=lfs merge=lfs -text
|
||||||
|
*.onnx filter=lfs diff=lfs merge=lfs -text
|
||||||
|
*.ot filter=lfs diff=lfs merge=lfs -text
|
||||||
|
*.parquet filter=lfs diff=lfs merge=lfs -text
|
||||||
|
*.pb filter=lfs diff=lfs merge=lfs -text
|
||||||
|
*.pickle filter=lfs diff=lfs merge=lfs -text
|
||||||
|
*.pkl filter=lfs diff=lfs merge=lfs -text
|
||||||
|
*.pt filter=lfs diff=lfs merge=lfs -text
|
||||||
|
*.pth filter=lfs diff=lfs merge=lfs -text
|
||||||
|
*.rar filter=lfs diff=lfs merge=lfs -text
|
||||||
|
*.safetensors filter=lfs diff=lfs merge=lfs -text
|
||||||
|
saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
||||||
|
*.tar.* filter=lfs diff=lfs merge=lfs -text
|
||||||
|
*.tar filter=lfs diff=lfs merge=lfs -text
|
||||||
|
*.tflite filter=lfs diff=lfs merge=lfs -text
|
||||||
|
*.tgz filter=lfs diff=lfs merge=lfs -text
|
||||||
|
*.wasm filter=lfs diff=lfs merge=lfs -text
|
||||||
|
*.xz filter=lfs diff=lfs merge=lfs -text
|
||||||
|
*.zip filter=lfs diff=lfs merge=lfs -text
|
||||||
|
*.zst filter=lfs diff=lfs merge=lfs -text
|
||||||
|
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
||||||
76
README.md
Normal file
76
README.md
Normal file
@@ -0,0 +1,76 @@
|
|||||||
|
---
|
||||||
|
base_model: UCL-CSSB/PlasmidGPT-SFT
|
||||||
|
library_name: transformers
|
||||||
|
model_name: PlasmidGPT-RL
|
||||||
|
tags:
|
||||||
|
- generated_from_trainer
|
||||||
|
- grpo
|
||||||
|
- trl
|
||||||
|
- plasmid
|
||||||
|
- biology
|
||||||
|
- dna
|
||||||
|
license: mit
|
||||||
|
---
|
||||||
|
|
||||||
|
# PlasmidGPT-RL
|
||||||
|
|
||||||
|
This model is a fine-tuned version of [UCL-CSSB/PlasmidGPT-SFT](https://huggingface.co/UCL-CSSB/PlasmidGPT-SFT) using Group Relative Policy Optimization (GRPO).
|
||||||
|
|
||||||
|
## Model Description
|
||||||
|
|
||||||
|
PlasmidGPT-RL is trained to generate functional plasmid DNA sequences. It was fine-tuned using reinforcement learning with a reward model that evaluates:
|
||||||
|
- Presence of valid origins of replication (OriV)
|
||||||
|
- Presence of antibiotic resistance genes (ARGs)
|
||||||
|
- Absence of problematic repeat sequences
|
||||||
|
|
||||||
|
## Training
|
||||||
|
|
||||||
|
This model was trained with GRPO using the [TRL library](https://github.com/huggingface/trl).
|
||||||
|
|
||||||
|
**Training run**: [Weights & Biases](https://wandb.ai/ucl-cssb/PlasmidRL/runs/4e783zua)
|
||||||
|
|
||||||
|
### Training Details
|
||||||
|
- **Base model**: UCL-CSSB/PlasmidGPT-SFT
|
||||||
|
- **Method**: GRPO (Group Relative Policy Optimization)
|
||||||
|
- **Checkpoint**: 800 steps
|
||||||
|
|
||||||
|
## Usage
|
||||||
|
|
||||||
|
```python
|
||||||
|
from transformers import AutoTokenizer, AutoModelForCausalLM
|
||||||
|
|
||||||
|
tokenizer = AutoTokenizer.from_pretrained("McClain/PlasmidGPT-RL")
|
||||||
|
model = AutoModelForCausalLM.from_pretrained("McClain/PlasmidGPT-RL")
|
||||||
|
|
||||||
|
# Generate a plasmid sequence
|
||||||
|
prompt = "ATG"
|
||||||
|
inputs = tokenizer(prompt, return_tensors="pt")
|
||||||
|
outputs = model.generate(
|
||||||
|
inputs.input_ids,
|
||||||
|
max_new_tokens=256,
|
||||||
|
do_sample=True,
|
||||||
|
temperature=0.95,
|
||||||
|
top_p=0.9
|
||||||
|
)
|
||||||
|
sequence = tokenizer.decode(outputs[0], skip_special_tokens=True)
|
||||||
|
print(sequence)
|
||||||
|
```
|
||||||
|
|
||||||
|
## Framework Versions
|
||||||
|
|
||||||
|
- TRL: 0.23.1
|
||||||
|
- Transformers: 4.57.0
|
||||||
|
- PyTorch: 2.8.0
|
||||||
|
|
||||||
|
## Citation
|
||||||
|
|
||||||
|
If you use this model, please cite the GRPO paper:
|
||||||
|
|
||||||
|
```bibtex
|
||||||
|
@article{shao2024deepseekmath,
|
||||||
|
title={{DeepSeekMath: Pushing the Limits of Mathematical Reasoning in Open Language Models}},
|
||||||
|
author={Zhihong Shao and Peiyi Wang and Qihao Zhu and Runxin Xu and Junxiao Song and Mingchuan Zhang and Y. K. Li and Y. Wu and Daya Guo},
|
||||||
|
year={2024},
|
||||||
|
eprint={arXiv:2402.03300},
|
||||||
|
}
|
||||||
|
```
|
||||||
39
config.json
Normal file
39
config.json
Normal file
@@ -0,0 +1,39 @@
|
|||||||
|
{
|
||||||
|
"activation_function": "gelu_new",
|
||||||
|
"architectures": [
|
||||||
|
"GPT2LMHeadModel"
|
||||||
|
],
|
||||||
|
"attn_pdrop": 0.1,
|
||||||
|
"bos_token_id": 30000,
|
||||||
|
"dtype": "float32",
|
||||||
|
"embd_pdrop": 0.1,
|
||||||
|
"eos_token_id": 30001,
|
||||||
|
"initializer_range": 0.02,
|
||||||
|
"layer_norm_epsilon": 1e-05,
|
||||||
|
"model_type": "gpt2",
|
||||||
|
"n_ctx": 2048,
|
||||||
|
"n_embd": 768,
|
||||||
|
"n_head": 12,
|
||||||
|
"n_inner": null,
|
||||||
|
"n_layer": 12,
|
||||||
|
"n_positions": 2048,
|
||||||
|
"pad_token_id": 3,
|
||||||
|
"reorder_and_upcast_attn": false,
|
||||||
|
"resid_pdrop": 0.1,
|
||||||
|
"scale_attn_by_inverse_layer_idx": false,
|
||||||
|
"scale_attn_weights": true,
|
||||||
|
"summary_activation": null,
|
||||||
|
"summary_first_dropout": 0.1,
|
||||||
|
"summary_proj_to_labels": true,
|
||||||
|
"summary_type": "cls_index",
|
||||||
|
"summary_use_proj": true,
|
||||||
|
"task_specific_params": {
|
||||||
|
"text-generation": {
|
||||||
|
"do_sample": true,
|
||||||
|
"max_length": 50
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"transformers_version": "4.57.0",
|
||||||
|
"use_cache": true,
|
||||||
|
"vocab_size": 30002
|
||||||
|
}
|
||||||
9
generation_config.json
Normal file
9
generation_config.json
Normal file
@@ -0,0 +1,9 @@
|
|||||||
|
{
|
||||||
|
"_from_model_config": true,
|
||||||
|
"bos_token_id": 30000,
|
||||||
|
"eos_token_id": [
|
||||||
|
30001
|
||||||
|
],
|
||||||
|
"pad_token_id": 3,
|
||||||
|
"transformers_version": "4.57.0"
|
||||||
|
}
|
||||||
3
model.safetensors
Normal file
3
model.safetensors
Normal file
@@ -0,0 +1,3 @@
|
|||||||
|
version https://git-lfs.github.com/spec/v1
|
||||||
|
oid sha256:b3cde5d397ae479478770b59033fcb8c7bcf72569bb3e324b67572e47617ba0d
|
||||||
|
size 438696576
|
||||||
5
special_tokens_map.json
Normal file
5
special_tokens_map.json
Normal file
@@ -0,0 +1,5 @@
|
|||||||
|
{
|
||||||
|
"bos_token": "<s>",
|
||||||
|
"eos_token": "</s>",
|
||||||
|
"pad_token": "[PAD]"
|
||||||
|
}
|
||||||
150057
tokenizer.json
Normal file
150057
tokenizer.json
Normal file
File diff suppressed because one or more lines are too long
71
tokenizer_config.json
Normal file
71
tokenizer_config.json
Normal file
@@ -0,0 +1,71 @@
|
|||||||
|
{
|
||||||
|
"added_tokens_decoder": {
|
||||||
|
"0": {
|
||||||
|
"content": "[UNK]",
|
||||||
|
"lstrip": false,
|
||||||
|
"normalized": false,
|
||||||
|
"rstrip": false,
|
||||||
|
"single_word": false,
|
||||||
|
"special": true
|
||||||
|
},
|
||||||
|
"1": {
|
||||||
|
"content": "[CLS]",
|
||||||
|
"lstrip": false,
|
||||||
|
"normalized": false,
|
||||||
|
"rstrip": false,
|
||||||
|
"single_word": false,
|
||||||
|
"special": true
|
||||||
|
},
|
||||||
|
"2": {
|
||||||
|
"content": "[SEP]",
|
||||||
|
"lstrip": false,
|
||||||
|
"normalized": false,
|
||||||
|
"rstrip": false,
|
||||||
|
"single_word": false,
|
||||||
|
"special": true
|
||||||
|
},
|
||||||
|
"3": {
|
||||||
|
"content": "[PAD]",
|
||||||
|
"lstrip": false,
|
||||||
|
"normalized": false,
|
||||||
|
"rstrip": false,
|
||||||
|
"single_word": false,
|
||||||
|
"special": true
|
||||||
|
},
|
||||||
|
"4": {
|
||||||
|
"content": "[MASK]",
|
||||||
|
"lstrip": false,
|
||||||
|
"normalized": false,
|
||||||
|
"rstrip": false,
|
||||||
|
"single_word": false,
|
||||||
|
"special": true
|
||||||
|
},
|
||||||
|
"30000": {
|
||||||
|
"content": "<s>",
|
||||||
|
"lstrip": false,
|
||||||
|
"normalized": false,
|
||||||
|
"rstrip": false,
|
||||||
|
"single_word": false,
|
||||||
|
"special": true
|
||||||
|
},
|
||||||
|
"30001": {
|
||||||
|
"content": "</s>",
|
||||||
|
"lstrip": false,
|
||||||
|
"normalized": false,
|
||||||
|
"rstrip": false,
|
||||||
|
"single_word": false,
|
||||||
|
"special": true
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"bos_token": "<s>",
|
||||||
|
"clean_up_tokenization_spaces": false,
|
||||||
|
"eos_token": "</s>",
|
||||||
|
"extra_special_tokens": {},
|
||||||
|
"max_length": null,
|
||||||
|
"model_max_length": 1000000000000000019884624838656,
|
||||||
|
"pad_to_multiple_of": null,
|
||||||
|
"pad_token": "[PAD]",
|
||||||
|
"pad_token_type_id": 0,
|
||||||
|
"padding_side": "left",
|
||||||
|
"tokenizer_class": "PreTrainedTokenizerFast"
|
||||||
|
}
|
||||||
Reference in New Issue
Block a user