From 6d11cffea947b8fbbefbbde473d2aa35452dcb4e Mon Sep 17 00:00:00 2001 From: ModelHub XC Date: Sun, 12 Apr 2026 05:04:55 +0800 Subject: [PATCH] =?UTF-8?q?=E5=88=9D=E5=A7=8B=E5=8C=96=E9=A1=B9=E7=9B=AE?= =?UTF-8?q?=EF=BC=8C=E7=94=B1ModelHub=20XC=E7=A4=BE=E5=8C=BA=E6=8F=90?= =?UTF-8?q?=E4=BE=9B=E6=A8=A1=E5=9E=8B?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Model: CharlieGreenman/email-qwen3-0.6b Source: Original Platform --- .gitattributes | 37 ++++++++++++++ README.md | 94 +++++++++++++++++++++++++++++++++++ chat_template.jinja | 99 +++++++++++++++++++++++++++++++++++++ config.json | 64 ++++++++++++++++++++++++ email-qwen3-06b-q4_k_m.gguf | 3 ++ model.safetensors | 3 ++ tokenizer.json | 3 ++ tokenizer_config.json | 18 +++++++ 8 files changed, 321 insertions(+) create mode 100644 .gitattributes create mode 100644 README.md create mode 100644 chat_template.jinja create mode 100644 config.json create mode 100644 email-qwen3-06b-q4_k_m.gguf create mode 100644 model.safetensors create mode 100644 tokenizer.json create mode 100644 tokenizer_config.json diff --git a/.gitattributes b/.gitattributes new file mode 100644 index 0000000..359d19e --- /dev/null +++ b/.gitattributes @@ -0,0 +1,37 @@ +*.7z filter=lfs diff=lfs merge=lfs -text +*.arrow filter=lfs diff=lfs merge=lfs -text +*.bin filter=lfs diff=lfs merge=lfs -text +*.bz2 filter=lfs diff=lfs merge=lfs -text +*.ckpt filter=lfs diff=lfs merge=lfs -text +*.ftz filter=lfs diff=lfs merge=lfs -text +*.gz filter=lfs diff=lfs merge=lfs -text +*.h5 filter=lfs diff=lfs merge=lfs -text +*.joblib filter=lfs diff=lfs merge=lfs -text +*.lfs.* filter=lfs diff=lfs merge=lfs -text +*.mlmodel filter=lfs diff=lfs merge=lfs -text +*.model filter=lfs diff=lfs merge=lfs -text +*.msgpack filter=lfs diff=lfs merge=lfs -text +*.npy filter=lfs diff=lfs merge=lfs -text +*.npz filter=lfs diff=lfs merge=lfs -text +*.onnx filter=lfs diff=lfs merge=lfs -text +*.ot filter=lfs diff=lfs merge=lfs -text +*.parquet filter=lfs diff=lfs merge=lfs -text +*.pb filter=lfs diff=lfs merge=lfs -text +*.pickle filter=lfs diff=lfs merge=lfs -text +*.pkl filter=lfs diff=lfs merge=lfs -text +*.pt filter=lfs diff=lfs merge=lfs -text +*.pth filter=lfs diff=lfs merge=lfs -text +*.rar filter=lfs diff=lfs merge=lfs -text +*.safetensors filter=lfs diff=lfs merge=lfs -text +saved_model/**/* filter=lfs diff=lfs merge=lfs -text +*.tar.* filter=lfs diff=lfs merge=lfs -text +*.tar filter=lfs diff=lfs merge=lfs -text +*.tflite filter=lfs diff=lfs merge=lfs -text +*.tgz filter=lfs diff=lfs merge=lfs -text +*.wasm filter=lfs diff=lfs merge=lfs -text +*.xz filter=lfs diff=lfs merge=lfs -text +*.zip filter=lfs diff=lfs merge=lfs -text +*.zst filter=lfs diff=lfs merge=lfs -text +*tfevents* filter=lfs diff=lfs merge=lfs -text +tokenizer.json filter=lfs diff=lfs merge=lfs -text +email-qwen3-06b-q4_k_m.gguf filter=lfs diff=lfs merge=lfs -text diff --git a/README.md b/README.md new file mode 100644 index 0000000..559476b --- /dev/null +++ b/README.md @@ -0,0 +1,94 @@ +--- +license: apache-2.0 +language: +- en +tags: +- email +- cold-outreach +- text-generation +- qwen3 +- fine-tuned +base_model: Qwen/Qwen3-0.6B +pipeline_tag: text-generation +--- + +# Email-Qwen3-0.6B — Fine-tuned for Email Generation + +A fine-tuned Qwen3 0.6B model specialized in generating professional emails from simple prompts. Trained on 130k curated email examples with 5 rounds of rejection sampling alignment. + +## Model Details + +- **Base model:** Qwen/Qwen3-0.6B +- **Training:** SFT on 130k prompt-email pairs + 5 rounds rejection sampling fine-tuning +- **Quantized version:** Q4_K_M (378MB) available for local inference via llama.cpp +- **Use case:** Cold outreach, thank-you, request, apology, invitation, congratulations, and 10+ other email types + +## Usage + +### With llama.cpp (recommended) + +```bash +# Download the GGUF quantized version +# Start the server +llama-server -m email-qwen3-06b-q4_k_m.gguf --host 127.0.0.1 --port 8081 -c 2048 + +# Generate an email +curl http://127.0.0.1:8081/v1/chat/completions \ + -H "Content-Type: application/json" \ + -d '{ + "messages": [ + {"role": "system", "content": "You are an email writing assistant. Write a polished email body for the given request."}, + {"role": "user", "content": "Cold outreach to the CTO at Stripe about our developer tools platform"} + ], + "max_tokens": 256, + "temperature": 0.7 + }' +``` + +### With Transformers + +```python +from transformers import AutoModelForCausalLM, AutoTokenizer + +model = AutoModelForCausalLM.from_pretrained("CharlieGreenman/email-qwen3-0.6b") +tokenizer = AutoTokenizer.from_pretrained("CharlieGreenman/email-qwen3-0.6b") + +messages = [ + {"role": "system", "content": "You are an email writing assistant. Write a polished email body for the given request."}, + {"role": "user", "content": "Thank Sarah for helping with the presentation last week"}, +] + +text = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True) +inputs = tokenizer(text, return_tensors="pt") +outputs = model.generate(**inputs, max_new_tokens=256, temperature=0.7, do_sample=True) +print(tokenizer.decode(outputs[0][inputs['input_ids'].shape[1]:], skip_special_tokens=True)) +``` + +## Tips for Best Results + +- **One paragraph at a time:** This model performs best when asked to generate individual paragraphs rather than full multi-paragraph emails. Generate each paragraph with a focused prompt, then assemble. +- **Keep prompts specific:** Include the recipient's name, company, role, and topic for better personalization. +- **Use best-of-N:** Generate 3-5 variants and pick the best one. Small models benefit significantly from selection. +- **Temperature 0.7-0.8** works well for email generation. + +## Training Data + +- 50,000 diverse email prompts across 17 email types +- 34,055 high-quality prompt-email pairs (scored 80+ by our quality scorer) +- 96,034 section-level examples (individual email paragraphs) +- 5 rounds of rejection sampling using best-of-5 selection with quality scoring + +## Supported Email Types + +Cold outreach, follow-up, newsletter, transactional, welcome, personal, request, meeting, FYI, thank-you, confirmation, apology, introduction, invitation, deadline, congratulations, and freeform. + +## Limitations + +- Best for common email types; may struggle with unusual or highly creative prompts +- Generates email body text; subject lines should be handled separately +- Small model (0.6B) — quality improves significantly with best-of-N selection and post-processing +- May occasionally hallucinate company names or statistics + +## License + +Apache 2.0 diff --git a/chat_template.jinja b/chat_template.jinja new file mode 100644 index 0000000..370b963 --- /dev/null +++ b/chat_template.jinja @@ -0,0 +1,99 @@ +{%- if tools %} + {{- '<|im_start|>system\n' }} + {%- if messages[0].role == 'system' %} + {{- messages[0].content + '\n\n' }} + {%- endif %} + {{- "# Tools\n\nYou may call one or more functions to assist with the user query.\n\nYou are provided with function signatures within XML tags:\n" }} + {%- for tool in tools %} + {{- "\n" }} + {{- tool | tojson }} + {%- endfor %} + {{- "\n\n\nFor each function call, return a json object with function name and arguments within XML tags:\n\n{\"name\": , \"arguments\": }\n<|im_end|>\n" }} +{%- else %} + {%- if messages[0].role == 'system' %} + {{- '<|im_start|>system\n' + messages[0].content + '<|im_end|>\n' }} + {%- endif %} +{%- endif %} +{%- set ns = namespace(multi_step_tool=true, last_query_index=messages|length - 1) %} +{%- for forward_message in messages %} + {%- set index = (messages|length - 1) - loop.index0 %} + {%- set message = messages[index] %} + {%- set current_content = message.content if message.content is defined and message.content is not none else '' %} + {%- set tool_start = '' %} + {%- set tool_start_length = tool_start|length %} + {%- set start_of_message = current_content[:tool_start_length] %} + {%- set tool_end = '' %} + {%- set tool_end_length = tool_end|length %} + {%- set start_pos = (current_content|length) - tool_end_length %} + {%- if start_pos < 0 %} + {%- set start_pos = 0 %} + {%- endif %} + {%- set end_of_message = current_content[start_pos:] %} + {%- if ns.multi_step_tool and message.role == "user" and not(start_of_message == tool_start and end_of_message == tool_end) %} + {%- set ns.multi_step_tool = false %} + {%- set ns.last_query_index = index %} + {%- endif %} +{%- endfor %} +{%- for message in messages %} + {%- if (message.role == "user") or (message.role == "system" and not loop.first) %} + {{- '<|im_start|>' + message.role + '\n' + message.content + '<|im_end|>' + '\n' }} + {%- elif message.role == "assistant" %} + {%- set m_content = message.content if message.content is defined and message.content is not none else '' %} + {%- set content = m_content %} + {%- set reasoning_content = '' %} + {%- if message.reasoning_content is defined and message.reasoning_content is not none %} + {%- set reasoning_content = message.reasoning_content %} + {%- else %} + {%- if '' in m_content %} + {%- set content = (m_content.split('')|last).lstrip('\n') %} + {%- set reasoning_content = (m_content.split('')|first).rstrip('\n') %} + {%- set reasoning_content = (reasoning_content.split('')|last).lstrip('\n') %} + {%- endif %} + {%- endif %} + {%- if loop.index0 > ns.last_query_index %} + {%- if loop.last or (not loop.last and (not reasoning_content.strip() == '')) %} + {{- '<|im_start|>' + message.role + '\n\n' + reasoning_content.strip('\n') + '\n\n\n' + content.lstrip('\n') }} + {%- else %} + {{- '<|im_start|>' + message.role + '\n' + content }} + {%- endif %} + {%- else %} + {{- '<|im_start|>' + message.role + '\n' + content }} + {%- endif %} + {%- if message.tool_calls %} + {%- for tool_call in message.tool_calls %} + {%- if (loop.first and content) or (not loop.first) %} + {{- '\n' }} + {%- endif %} + {%- if tool_call.function %} + {%- set tool_call = tool_call.function %} + {%- endif %} + {{- '\n{"name": "' }} + {{- tool_call.name }} + {{- '", "arguments": ' }} + {%- if tool_call.arguments is string %} + {{- tool_call.arguments }} + {%- else %} + {{- tool_call.arguments | tojson }} + {%- endif %} + {{- '}\n' }} + {%- endfor %} + {%- endif %} + {{- '<|im_end|>\n' }} + {%- elif message.role == "tool" %} + {%- if loop.first or (messages[loop.index0 - 1].role != "tool") %} + {{- '<|im_start|>user' }} + {%- endif %} + {{- '\n\n' }} + {{- message.content }} + {{- '\n' }} + {%- if loop.last or (messages[loop.index0 + 1].role != "tool") %} + {{- '<|im_end|>\n' }} + {%- endif %} + {%- endif %} +{%- endfor %} +{%- if add_generation_prompt %} + {{- '<|im_start|>assistant\n' }} + {%- if enable_thinking is defined and enable_thinking is false %} + {{- '\n\n\n\n' }} + {%- endif %} +{%- endif %} \ No newline at end of file diff --git a/config.json b/config.json new file mode 100644 index 0000000..3bb3514 --- /dev/null +++ b/config.json @@ -0,0 +1,64 @@ +{ + "architectures": [ + "Qwen3ForCausalLM" + ], + "attention_bias": false, + "attention_dropout": 0.0, + "bos_token_id": null, + "torch_dtype": "bfloat16", + "eos_token_id": 151645, + "head_dim": 128, + "hidden_act": "silu", + "hidden_size": 1024, + "initializer_range": 0.02, + "intermediate_size": 3072, + "layer_types": [ + "full_attention", + "full_attention", + "full_attention", + "full_attention", + "full_attention", + "full_attention", + "full_attention", + "full_attention", + "full_attention", + "full_attention", + "full_attention", + "full_attention", + "full_attention", + "full_attention", + "full_attention", + "full_attention", + "full_attention", + "full_attention", + "full_attention", + "full_attention", + "full_attention", + "full_attention", + "full_attention", + "full_attention", + "full_attention", + "full_attention", + "full_attention", + "full_attention" + ], + "max_position_embeddings": 40960, + "max_window_layers": 28, + "model_type": "qwen3", + "num_attention_heads": 16, + "num_hidden_layers": 28, + "num_key_value_heads": 8, + "pad_token_id": 151669, + "rms_norm_eps": 1e-06, + "rope_parameters": { + "rope_theta": 1000000, + "rope_type": "default" + }, + "sliding_window": null, + "tie_word_embeddings": true, + "unsloth_fixed": true, + "unsloth_version": "2026.4.2", + "use_cache": false, + "use_sliding_window": false, + "vocab_size": 151936 +} \ No newline at end of file diff --git a/email-qwen3-06b-q4_k_m.gguf b/email-qwen3-06b-q4_k_m.gguf new file mode 100644 index 0000000..3683b91 --- /dev/null +++ b/email-qwen3-06b-q4_k_m.gguf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d25c5506a93396821d07faadfe96c3b99d70aab452c3f112a967262664813bd9 +size 396705216 diff --git a/model.safetensors b/model.safetensors new file mode 100644 index 0000000..539e5ee --- /dev/null +++ b/model.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a9fa771421062228a6fc2f04ae0884b60657bede61bfbd1952356a487d80ee91 +size 1192135096 diff --git a/tokenizer.json b/tokenizer.json new file mode 100644 index 0000000..7edcf72 --- /dev/null +++ b/tokenizer.json @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d7430e9138b76e93fb6f93462394d236b411111aef53cb421ba97d2691040cca +size 11423114 diff --git a/tokenizer_config.json b/tokenizer_config.json new file mode 100644 index 0000000..498c24a --- /dev/null +++ b/tokenizer_config.json @@ -0,0 +1,18 @@ +{ + "add_prefix_space": false, + "backend": "tokenizers", + "bos_token": null, + "clean_up_tokenization_spaces": false, + "eos_token": "<|im_end|>", + "errors": "replace", + "is_local": true, + "max_length": null, + "model_max_length": 40960, + "pad_to_multiple_of": null, + "pad_token": "<|PAD_TOKEN|>", + "pad_token_type_id": 0, + "padding_side": "left", + "split_special_tokens": false, + "tokenizer_class": "Qwen2Tokenizer", + "unk_token": null +}