初始化项目,由ModelHub XC社区提供模型
Model: URajinda/Qwen-1.5B-Burmese-SFT-v2 Source: Original Platform
This commit is contained in:
36
.gitattributes
vendored
Normal file
36
.gitattributes
vendored
Normal file
@@ -0,0 +1,36 @@
|
|||||||
|
*.7z filter=lfs diff=lfs merge=lfs -text
|
||||||
|
*.arrow filter=lfs diff=lfs merge=lfs -text
|
||||||
|
*.bin filter=lfs diff=lfs merge=lfs -text
|
||||||
|
*.bz2 filter=lfs diff=lfs merge=lfs -text
|
||||||
|
*.ckpt filter=lfs diff=lfs merge=lfs -text
|
||||||
|
*.ftz filter=lfs diff=lfs merge=lfs -text
|
||||||
|
*.gz filter=lfs diff=lfs merge=lfs -text
|
||||||
|
*.h5 filter=lfs diff=lfs merge=lfs -text
|
||||||
|
*.joblib filter=lfs diff=lfs merge=lfs -text
|
||||||
|
*.lfs.* filter=lfs diff=lfs merge=lfs -text
|
||||||
|
*.mlmodel filter=lfs diff=lfs merge=lfs -text
|
||||||
|
*.model filter=lfs diff=lfs merge=lfs -text
|
||||||
|
*.msgpack filter=lfs diff=lfs merge=lfs -text
|
||||||
|
*.npy filter=lfs diff=lfs merge=lfs -text
|
||||||
|
*.npz filter=lfs diff=lfs merge=lfs -text
|
||||||
|
*.onnx filter=lfs diff=lfs merge=lfs -text
|
||||||
|
*.ot filter=lfs diff=lfs merge=lfs -text
|
||||||
|
*.parquet filter=lfs diff=lfs merge=lfs -text
|
||||||
|
*.pb filter=lfs diff=lfs merge=lfs -text
|
||||||
|
*.pickle filter=lfs diff=lfs merge=lfs -text
|
||||||
|
*.pkl filter=lfs diff=lfs merge=lfs -text
|
||||||
|
*.pt filter=lfs diff=lfs merge=lfs -text
|
||||||
|
*.pth filter=lfs diff=lfs merge=lfs -text
|
||||||
|
*.rar filter=lfs diff=lfs merge=lfs -text
|
||||||
|
*.safetensors filter=lfs diff=lfs merge=lfs -text
|
||||||
|
saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
||||||
|
*.tar.* filter=lfs diff=lfs merge=lfs -text
|
||||||
|
*.tar filter=lfs diff=lfs merge=lfs -text
|
||||||
|
*.tflite filter=lfs diff=lfs merge=lfs -text
|
||||||
|
*.tgz filter=lfs diff=lfs merge=lfs -text
|
||||||
|
*.wasm filter=lfs diff=lfs merge=lfs -text
|
||||||
|
*.xz filter=lfs diff=lfs merge=lfs -text
|
||||||
|
*.zip filter=lfs diff=lfs merge=lfs -text
|
||||||
|
*.zst filter=lfs diff=lfs merge=lfs -text
|
||||||
|
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
||||||
|
tokenizer.json filter=lfs diff=lfs merge=lfs -text
|
||||||
63
README.md
Normal file
63
README.md
Normal file
@@ -0,0 +1,63 @@
|
|||||||
|
---
|
||||||
|
# ဤအပေါ်ဆုံး အပိုင်းကို Hugging Face မှ Metadata အတွက် သုံးပါသည်
|
||||||
|
library_name: transformers
|
||||||
|
tags:
|
||||||
|
- myanmar
|
||||||
|
- burmese
|
||||||
|
- instruction-tuned
|
||||||
|
- qlora
|
||||||
|
- qwen
|
||||||
|
- qwen-1.5
|
||||||
|
- llm
|
||||||
|
language: "my"
|
||||||
|
license: apache-2.0
|
||||||
|
model_name: "Qwen-1.5B-Burmese-SFT-v2"
|
||||||
|
model_creator: "URajinda"
|
||||||
|
base_model: "Qwen/Qwen-1.5-1.8B"
|
||||||
|
---
|
||||||
|
|
||||||
|
# 🚀 URajinda/Qwen-1.5B-Burmese-SFT-v2 (မြန်မာစာ Fine-Tuned LLM)
|
||||||
|
|
||||||
|
ဤသည်မှာ **Alibaba Cloud မှ Qwen-1.5-1.8B** အခြေခံမော်ဒယ်ကို မြန်မာဘာသာစကားဖြင့် **Instruction-Tuning (SFT)** ထပ်မံပြုလုပ်ထားသော ဘာသာစကားမော်ဒယ် (LLM) ဖြစ်ပါသည်။ ၎င်းကို မြန်မာစာဖြင့် မေးခွန်းဖြေဆိုခြင်း (QA) နှင့် ညွှန်ကြားချက်များ လိုက်နာခြင်း (Instruction Following) တို့အတွက် အထူးကောင်းမွန်စေရန် လေ့ကျင့်ထားပါသည်။
|
||||||
|
|
||||||
|
## 💡 အသုံးပြုပုံ (How to Use)
|
||||||
|
|
||||||
|
ဤမော်ဒယ်သည် စကားပြောဆိုမှု (Chat) ပုံစံဖြင့် လေ့ကျင့်ထားသောကြောင့်၊ `User:` နှင့် `Assistant:` tags များကို အသုံးပြုပြီး မေးမြန်းရပါမည်။
|
||||||
|
|
||||||
|
### 🐍 Hugging Face Pipeline ဖြင့် စမ်းသပ်ခြင်း
|
||||||
|
|
||||||
|
```python
|
||||||
|
import torch
|
||||||
|
from transformers import pipeline, AutoTokenizer, AutoModelForCausalLM
|
||||||
|
|
||||||
|
model_id = "URajinda/Qwen-1.5B-Burmese-SFT-v2"
|
||||||
|
|
||||||
|
# 1. Tokenizer နှင့် Model ကို load လုပ်ခြင်း
|
||||||
|
# Note: torch_dtype အစား dtype ကို သုံးပါ
|
||||||
|
tokenizer = AutoTokenizer.from_pretrained(model_id)
|
||||||
|
model = AutoModelForCausalLM.from_pretrained(
|
||||||
|
model_id,
|
||||||
|
dtype=torch.float16,
|
||||||
|
device_map="auto"
|
||||||
|
)
|
||||||
|
|
||||||
|
# 2. Inference Pipeline ကို တည်ဆောက်ခြင်း
|
||||||
|
generator = pipeline(
|
||||||
|
"text-generation",
|
||||||
|
model=model,
|
||||||
|
tokenizer=tokenizer,
|
||||||
|
device=model.device
|
||||||
|
)
|
||||||
|
|
||||||
|
# 3. မြန်မာဘာသာဖြင့် မေးမြန်းခြင်း (Instruction Format ကို သုံးပါ)
|
||||||
|
prompt = "User: မန္တလေးမြို့ရဲ့ အထင်ကရနေရာ သုံးခုကို ဖော်ပြပေးပါ။\nAssistant:"
|
||||||
|
|
||||||
|
output = generator(
|
||||||
|
prompt,
|
||||||
|
max_new_tokens=256,
|
||||||
|
do_sample=True,
|
||||||
|
temperature=0.7,
|
||||||
|
eos_token_id=tokenizer.eos_token_id
|
||||||
|
)
|
||||||
|
|
||||||
|
print(output[0]['generated_text'])
|
||||||
5
added_tokens.json
Normal file
5
added_tokens.json
Normal file
@@ -0,0 +1,5 @@
|
|||||||
|
{
|
||||||
|
"<|endoftext|>": 151643,
|
||||||
|
"<|im_end|>": 151645,
|
||||||
|
"<|im_start|>": 151644
|
||||||
|
}
|
||||||
6
chat_template.jinja
Normal file
6
chat_template.jinja
Normal file
@@ -0,0 +1,6 @@
|
|||||||
|
{% for message in messages %}{% if loop.first and messages[0]['role'] != 'system' %}{{ '<|im_start|>system
|
||||||
|
You are a helpful assistant.<|im_end|>
|
||||||
|
' }}{% endif %}{{'<|im_start|>' + message['role'] + '
|
||||||
|
' + message['content'] + '<|im_end|>' + '
|
||||||
|
'}}{% endfor %}{% if add_generation_prompt %}{{ '<|im_start|>assistant
|
||||||
|
' }}{% endif %}
|
||||||
58
config.json
Normal file
58
config.json
Normal file
@@ -0,0 +1,58 @@
|
|||||||
|
{
|
||||||
|
"architectures": [
|
||||||
|
"Qwen2ForCausalLM"
|
||||||
|
],
|
||||||
|
"attention_dropout": 0.0,
|
||||||
|
"bos_token_id": 151643,
|
||||||
|
"dtype": "float16",
|
||||||
|
"eos_token_id": 151645,
|
||||||
|
"hidden_act": "silu",
|
||||||
|
"hidden_size": 1536,
|
||||||
|
"initializer_range": 0.02,
|
||||||
|
"intermediate_size": 8960,
|
||||||
|
"layer_types": [
|
||||||
|
"full_attention",
|
||||||
|
"full_attention",
|
||||||
|
"full_attention",
|
||||||
|
"full_attention",
|
||||||
|
"full_attention",
|
||||||
|
"full_attention",
|
||||||
|
"full_attention",
|
||||||
|
"full_attention",
|
||||||
|
"full_attention",
|
||||||
|
"full_attention",
|
||||||
|
"full_attention",
|
||||||
|
"full_attention",
|
||||||
|
"full_attention",
|
||||||
|
"full_attention",
|
||||||
|
"full_attention",
|
||||||
|
"full_attention",
|
||||||
|
"full_attention",
|
||||||
|
"full_attention",
|
||||||
|
"full_attention",
|
||||||
|
"full_attention",
|
||||||
|
"full_attention",
|
||||||
|
"full_attention",
|
||||||
|
"full_attention",
|
||||||
|
"full_attention",
|
||||||
|
"full_attention",
|
||||||
|
"full_attention",
|
||||||
|
"full_attention",
|
||||||
|
"full_attention"
|
||||||
|
],
|
||||||
|
"max_position_embeddings": 32768,
|
||||||
|
"max_window_layers": 28,
|
||||||
|
"model_type": "qwen2",
|
||||||
|
"num_attention_heads": 12,
|
||||||
|
"num_hidden_layers": 28,
|
||||||
|
"num_key_value_heads": 2,
|
||||||
|
"rms_norm_eps": 1e-06,
|
||||||
|
"rope_scaling": null,
|
||||||
|
"rope_theta": 1000000.0,
|
||||||
|
"sliding_window": null,
|
||||||
|
"tie_word_embeddings": true,
|
||||||
|
"transformers_version": "4.57.3",
|
||||||
|
"use_cache": true,
|
||||||
|
"use_sliding_window": false,
|
||||||
|
"vocab_size": 151936
|
||||||
|
}
|
||||||
14
generation_config.json
Normal file
14
generation_config.json
Normal file
@@ -0,0 +1,14 @@
|
|||||||
|
{
|
||||||
|
"bos_token_id": 151643,
|
||||||
|
"do_sample": true,
|
||||||
|
"eos_token_id": [
|
||||||
|
151645,
|
||||||
|
151643
|
||||||
|
],
|
||||||
|
"pad_token_id": 151643,
|
||||||
|
"repetition_penalty": 1.1,
|
||||||
|
"temperature": 0.7,
|
||||||
|
"top_k": 20,
|
||||||
|
"top_p": 0.8,
|
||||||
|
"transformers_version": "4.57.3"
|
||||||
|
}
|
||||||
151388
merges.txt
Normal file
151388
merges.txt
Normal file
File diff suppressed because it is too large
Load Diff
3
model.safetensors
Normal file
3
model.safetensors
Normal file
@@ -0,0 +1,3 @@
|
|||||||
|
version https://git-lfs.github.com/spec/v1
|
||||||
|
oid sha256:33cd656a4bd8195c022a4dd5f63a725dc4abafc98d9bd6cacd1f552ec0cd7814
|
||||||
|
size 3087466808
|
||||||
20
special_tokens_map.json
Normal file
20
special_tokens_map.json
Normal file
@@ -0,0 +1,20 @@
|
|||||||
|
{
|
||||||
|
"additional_special_tokens": [
|
||||||
|
"<|im_start|>",
|
||||||
|
"<|im_end|>"
|
||||||
|
],
|
||||||
|
"eos_token": {
|
||||||
|
"content": "<|im_end|>",
|
||||||
|
"lstrip": false,
|
||||||
|
"normalized": false,
|
||||||
|
"rstrip": false,
|
||||||
|
"single_word": false
|
||||||
|
},
|
||||||
|
"pad_token": {
|
||||||
|
"content": "<|endoftext|>",
|
||||||
|
"lstrip": false,
|
||||||
|
"normalized": false,
|
||||||
|
"rstrip": false,
|
||||||
|
"single_word": false
|
||||||
|
}
|
||||||
|
}
|
||||||
3
tokenizer.json
Normal file
3
tokenizer.json
Normal file
@@ -0,0 +1,3 @@
|
|||||||
|
version https://git-lfs.github.com/spec/v1
|
||||||
|
oid sha256:bcfe42da0a4497e8b2b172c1f9f4ec423a46dc12907f4349c55025f670422ba9
|
||||||
|
size 11418266
|
||||||
43
tokenizer_config.json
Normal file
43
tokenizer_config.json
Normal file
@@ -0,0 +1,43 @@
|
|||||||
|
{
|
||||||
|
"add_prefix_space": false,
|
||||||
|
"added_tokens_decoder": {
|
||||||
|
"151643": {
|
||||||
|
"content": "<|endoftext|>",
|
||||||
|
"lstrip": false,
|
||||||
|
"normalized": false,
|
||||||
|
"rstrip": false,
|
||||||
|
"single_word": false,
|
||||||
|
"special": true
|
||||||
|
},
|
||||||
|
"151644": {
|
||||||
|
"content": "<|im_start|>",
|
||||||
|
"lstrip": false,
|
||||||
|
"normalized": false,
|
||||||
|
"rstrip": false,
|
||||||
|
"single_word": false,
|
||||||
|
"special": true
|
||||||
|
},
|
||||||
|
"151645": {
|
||||||
|
"content": "<|im_end|>",
|
||||||
|
"lstrip": false,
|
||||||
|
"normalized": false,
|
||||||
|
"rstrip": false,
|
||||||
|
"single_word": false,
|
||||||
|
"special": true
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"additional_special_tokens": [
|
||||||
|
"<|im_start|>",
|
||||||
|
"<|im_end|>"
|
||||||
|
],
|
||||||
|
"bos_token": null,
|
||||||
|
"clean_up_tokenization_spaces": false,
|
||||||
|
"eos_token": "<|im_end|>",
|
||||||
|
"errors": "replace",
|
||||||
|
"extra_special_tokens": {},
|
||||||
|
"model_max_length": 32768,
|
||||||
|
"pad_token": "<|endoftext|>",
|
||||||
|
"split_special_tokens": false,
|
||||||
|
"tokenizer_class": "Qwen2Tokenizer",
|
||||||
|
"unk_token": null
|
||||||
|
}
|
||||||
1
vocab.json
Normal file
1
vocab.json
Normal file
File diff suppressed because one or more lines are too long
Reference in New Issue
Block a user