From 0ff598b84b25bf6a3d5971a50bf3ab7876d9d6a3 Mon Sep 17 00:00:00 2001 From: ModelHub XC Date: Mon, 4 May 2026 12:05:44 +0800 Subject: [PATCH] =?UTF-8?q?=E5=88=9D=E5=A7=8B=E5=8C=96=E9=A1=B9=E7=9B=AE?= =?UTF-8?q?=EF=BC=8C=E7=94=B1ModelHub=20XC=E7=A4=BE=E5=8C=BA=E6=8F=90?= =?UTF-8?q?=E4=BE=9B=E6=A8=A1=E5=9E=8B?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Model: megabytes/gemma-3-1b-qat-int4-heretic Source: Original Platform --- .gitattributes | 36 +++ README.md | 482 +++++++++++++++++++++++++++++++++++++++++ chat_template.jinja | 47 ++++ config.json | 72 ++++++ generation_config.json | 11 + model.safetensors | 3 + tokenizer.json | 3 + tokenizer_config.json | 23 ++ 8 files changed, 677 insertions(+) create mode 100644 .gitattributes create mode 100644 README.md create mode 100644 chat_template.jinja create mode 100644 config.json create mode 100644 generation_config.json create mode 100644 model.safetensors create mode 100644 tokenizer.json create mode 100644 tokenizer_config.json diff --git a/.gitattributes b/.gitattributes new file mode 100644 index 0000000..52373fe --- /dev/null +++ b/.gitattributes @@ -0,0 +1,36 @@ +*.7z filter=lfs diff=lfs merge=lfs -text +*.arrow filter=lfs diff=lfs merge=lfs -text +*.bin filter=lfs diff=lfs merge=lfs -text +*.bz2 filter=lfs diff=lfs merge=lfs -text +*.ckpt filter=lfs diff=lfs merge=lfs -text +*.ftz filter=lfs diff=lfs merge=lfs -text +*.gz filter=lfs diff=lfs merge=lfs -text +*.h5 filter=lfs diff=lfs merge=lfs -text +*.joblib filter=lfs diff=lfs merge=lfs -text +*.lfs.* filter=lfs diff=lfs merge=lfs -text +*.mlmodel filter=lfs diff=lfs merge=lfs -text +*.model filter=lfs diff=lfs merge=lfs -text +*.msgpack filter=lfs diff=lfs merge=lfs -text +*.npy filter=lfs diff=lfs merge=lfs -text +*.npz filter=lfs diff=lfs merge=lfs -text +*.onnx filter=lfs diff=lfs merge=lfs -text +*.ot filter=lfs diff=lfs merge=lfs -text +*.parquet filter=lfs diff=lfs merge=lfs -text +*.pb filter=lfs diff=lfs merge=lfs -text +*.pickle filter=lfs diff=lfs merge=lfs -text +*.pkl filter=lfs diff=lfs merge=lfs -text +*.pt filter=lfs diff=lfs merge=lfs -text +*.pth filter=lfs diff=lfs merge=lfs -text +*.rar filter=lfs diff=lfs merge=lfs -text +*.safetensors filter=lfs diff=lfs merge=lfs -text +saved_model/**/* filter=lfs diff=lfs merge=lfs -text +*.tar.* filter=lfs diff=lfs merge=lfs -text +*.tar filter=lfs diff=lfs merge=lfs -text +*.tflite filter=lfs diff=lfs merge=lfs -text +*.tgz filter=lfs diff=lfs merge=lfs -text +*.wasm filter=lfs diff=lfs merge=lfs -text +*.xz filter=lfs diff=lfs merge=lfs -text +*.zip filter=lfs diff=lfs merge=lfs -text +*.zst filter=lfs diff=lfs merge=lfs -text +*tfevents* filter=lfs diff=lfs merge=lfs -text +tokenizer.json filter=lfs diff=lfs merge=lfs -text diff --git a/README.md b/README.md new file mode 100644 index 0000000..20328ae --- /dev/null +++ b/README.md @@ -0,0 +1,482 @@ +--- +base_model: +- google/gemma-3-1b-it-qat-int4-unquantized +license: gemma +tags: +- gemma3 +- gemma +- google +- heretic +- uncensored +- decensored +- abliterated +pipeline_tag: text-generation +library_name: transformers +extra_gated_heading: Access Gemma on Hugging Face +extra_gated_prompt: >- + To access Gemma on Hugging Face, you’re required to review and agree to + Google’s usage license. To do this, please ensure you’re logged in to Hugging + Face and click below. Requests are processed immediately. +extra_gated_button_content: Acknowledge license +--- +# This is a decensored version of [google/gemma-3-1b-it-qat-int4-unquantized](https://huggingface.co/google/gemma-3-1b-it-qat-int4-unquantized), made using [Heretic](https://github.com/p-e-w/heretic) v1.2.0 + +## Abliteration parameters + +| Parameter | Value | +| :-------- | :---: | +| **direction_index** | per layer | +| **attn.o_proj.max_weight** | 1.47 | +| **attn.o_proj.max_weight_position** | 17.99 | +| **attn.o_proj.min_weight** | 1.34 | +| **attn.o_proj.min_weight_distance** | 5.29 | +| **mlp.down_proj.max_weight** | 1.31 | +| **mlp.down_proj.max_weight_position** | 22.92 | +| **mlp.down_proj.min_weight** | 1.15 | +| **mlp.down_proj.min_weight_distance** | 4.07 | + +## Performance + +| Metric | This model | Original model ([google/gemma-3-1b-it-qat-int4-unquantized](https://huggingface.co/google/gemma-3-1b-it-qat-int4-unquantized)) | +| :----- | :--------: | :---------------------------: | +| **KL divergence** | 0.0082 | 0 *(by definition)* | +| **Refusals** | 6/100 | 88/100 | + +----- + + +# Gemma 3 model card + +**Model Page**: [Gemma](https://ai.google.dev/gemma/docs/core) + +> [!Note] +> This repository corresponds to the 1B **instruction-tuned** version of the Gemma 3 model using Quantization Aware Training (QAT). +> +> **The checkpoint in this repository is unquantized, please make sure to quantize with int4 with your favorite tool** +> +> Thanks to QAT, the model is able to preserve similar quality as `bfloat16` while significantly reducing the memory requirements +> to load the model. + + +**Resources and Technical Documentation**: + +* [Gemma 3 Technical Report][g3-tech-report] +* [Responsible Generative AI Toolkit][rai-toolkit] +* [Gemma on Kaggle][kaggle-gemma] +* [Gemma on Vertex Model Garden][vertex-mg-gemma3] + +**Terms of Use**: [Terms][terms] + +**Authors**: Google DeepMind + +## Model Information + +Summary description and brief definition of inputs and outputs. + +### Description + +Gemma is a family of lightweight, state-of-the-art open models from Google, +built from the same research and technology used to create the Gemini models. +Gemma 3 models are multimodal, handling text and image input and generating text +output, with open weights for both pre-trained variants and instruction-tuned +variants. Gemma 3 has a large, 128K context window, multilingual support in over +140 languages, and is available in more sizes than previous versions. Gemma 3 +models are well-suited for a variety of text generation and image understanding +tasks, including question answering, summarization, and reasoning. Their +relatively small size makes it possible to deploy them in environments with +limited resources such as laptops, desktops or your own cloud infrastructure, +democratizing access to state of the art AI models and helping foster innovation +for everyone. + +### Inputs and outputs + +- **Input:** + - Text string, such as a question, a prompt, or a document to be summarized + - Images, normalized to 896 x 896 resolution and encoded to 256 tokens + each + - Total input context of 128K tokens for the 4B, 12B, and 27B sizes, and + 32K tokens for the 1B size + +- **Output:** + - Generated text in response to the input, such as an answer to a + question, analysis of image content, or a summary of a document + - Total output context of 8192 tokens + +### Citation + +```none +@article{gemma_2025, + title={Gemma 3}, + url={https://goo.gle/Gemma3Report}, + publisher={Kaggle}, + author={Gemma Team}, + year={2025} +} +``` + +## Model Data + +Data used for model training and how the data was processed. + +### Training Dataset + +These models were trained on a dataset of text data that includes a wide variety +of sources. The 27B model was trained with 14 trillion tokens, the 12B model was +trained with 12 trillion tokens, 4B model was trained with 4 trillion tokens and +1B with 2 trillion tokens. Here are the key components: + +- Web Documents: A diverse collection of web text ensures the model is + exposed to a broad range of linguistic styles, topics, and vocabulary. The + training dataset includes content in over 140 languages. +- Code: Exposing the model to code helps it to learn the syntax and + patterns of programming languages, which improves its ability to generate + code and understand code-related questions. +- Mathematics: Training on mathematical text helps the model learn logical + reasoning, symbolic representation, and to address mathematical queries. +- Images: A wide range of images enables the model to perform image + analysis and visual data extraction tasks. + +The combination of these diverse data sources is crucial for training a powerful +multimodal model that can handle a wide variety of different tasks and data +formats. + +### Data Preprocessing + +Here are the key data cleaning and filtering methods applied to the training +data: + +- CSAM Filtering: Rigorous CSAM (Child Sexual Abuse Material) filtering + was applied at multiple stages in the data preparation process to ensure + the exclusion of harmful and illegal content. +- Sensitive Data Filtering: As part of making Gemma pre-trained models + safe and reliable, automated techniques were used to filter out certain + personal information and other sensitive data from training sets. +- Additional methods: Filtering based on content quality and safety in + line with [our policies][safety-policies]. + +## Implementation Information + +Details about the model internals. + +### Hardware + +Gemma was trained using [Tensor Processing Unit (TPU)][tpu] hardware (TPUv4p, +TPUv5p and TPUv5e). Training vision-language models (VLMS) requires significant +computational power. TPUs, designed specifically for matrix operations common in +machine learning, offer several advantages in this domain: + +- Performance: TPUs are specifically designed to handle the massive + computations involved in training VLMs. They can speed up training + considerably compared to CPUs. +- Memory: TPUs often come with large amounts of high-bandwidth memory, + allowing for the handling of large models and batch sizes during training. + This can lead to better model quality. +- Scalability: TPU Pods (large clusters of TPUs) provide a scalable + solution for handling the growing complexity of large foundation models. + You can distribute training across multiple TPU devices for faster and more + efficient processing. +- Cost-effectiveness: In many scenarios, TPUs can provide a more + cost-effective solution for training large models compared to CPU-based + infrastructure, especially when considering the time and resources saved + due to faster training. +- These advantages are aligned with + [Google's commitments to operate sustainably][sustainability]. + +### Software + +Training was done using [JAX][jax] and [ML Pathways][ml-pathways]. + +JAX allows researchers to take advantage of the latest generation of hardware, +including TPUs, for faster and more efficient training of large models. ML +Pathways is Google's latest effort to build artificially intelligent systems +capable of generalizing across multiple tasks. This is specially suitable for +foundation models, including large language models like these ones. + +Together, JAX and ML Pathways are used as described in the +[paper about the Gemini family of models][gemini-2-paper]; *"the 'single +controller' programming model of Jax and Pathways allows a single Python +process to orchestrate the entire training run, dramatically simplifying the +development workflow."* + +## Evaluation + +> [!Note] +> The evaluation in this section correspond to the original checkpoint, not the QAT checkpoint. +> + +Model evaluation metrics and results. + +### Benchmark Results + +These models were evaluated against a large collection of different datasets and +metrics to cover different aspects of text generation: + +#### Reasoning and factuality + +| Benchmark | Metric | Gemma 3 PT 1B | Gemma 3 PT 4B | Gemma 3 PT 12B | Gemma 3 PT 27B | +| ------------------------------ |----------------|:--------------:|:-------------:|:--------------:|:--------------:| +| [HellaSwag][hellaswag] | 10-shot | 62.3 | 77.2 | 84.2 | 85.6 | +| [BoolQ][boolq] | 0-shot | 63.2 | 72.3 | 78.8 | 82.4 | +| [PIQA][piqa] | 0-shot | 73.8 | 79.6 | 81.8 | 83.3 | +| [SocialIQA][socialiqa] | 0-shot | 48.9 | 51.9 | 53.4 | 54.9 | +| [TriviaQA][triviaqa] | 5-shot | 39.8 | 65.8 | 78.2 | 85.5 | +| [Natural Questions][naturalq] | 5-shot | 9.48 | 20.0 | 31.4 | 36.1 | +| [ARC-c][arc] | 25-shot | 38.4 | 56.2 | 68.9 | 70.6 | +| [ARC-e][arc] | 0-shot | 73.0 | 82.4 | 88.3 | 89.0 | +| [WinoGrande][winogrande] | 5-shot | 58.2 | 64.7 | 74.3 | 78.8 | +| [BIG-Bench Hard][bbh] | few-shot | 28.4 | 50.9 | 72.6 | 77.7 | +| [DROP][drop] | 1-shot | 42.4 | 60.1 | 72.2 | 77.2 | + +[hellaswag]: https://arxiv.org/abs/1905.07830 +[boolq]: https://arxiv.org/abs/1905.10044 +[piqa]: https://arxiv.org/abs/1911.11641 +[socialiqa]: https://arxiv.org/abs/1904.09728 +[triviaqa]: https://arxiv.org/abs/1705.03551 +[naturalq]: https://github.com/google-research-datasets/natural-questions +[arc]: https://arxiv.org/abs/1911.01547 +[winogrande]: https://arxiv.org/abs/1907.10641 +[bbh]: https://paperswithcode.com/dataset/bbh +[drop]: https://arxiv.org/abs/1903.00161 + +#### STEM and code + +| Benchmark | Metric | Gemma 3 PT 4B | Gemma 3 PT 12B | Gemma 3 PT 27B | +| ------------------------------ |----------------|:-------------:|:--------------:|:--------------:| +| [MMLU][mmlu] | 5-shot | 59.6 | 74.5 | 78.6 | +| [MMLU][mmlu] (Pro COT) | 5-shot | 29.2 | 45.3 | 52.2 | +| [AGIEval][agieval] | 3-5-shot | 42.1 | 57.4 | 66.2 | +| [MATH][math] | 4-shot | 24.2 | 43.3 | 50.0 | +| [GSM8K][gsm8k] | 8-shot | 38.4 | 71.0 | 82.6 | +| [GPQA][gpqa] | 5-shot | 15.0 | 25.4 | 24.3 | +| [MBPP][mbpp] | 3-shot | 46.0 | 60.4 | 65.6 | +| [HumanEval][humaneval] | 0-shot | 36.0 | 45.7 | 48.8 | + +[mmlu]: https://arxiv.org/abs/2009.03300 +[agieval]: https://arxiv.org/abs/2304.06364 +[math]: https://arxiv.org/abs/2103.03874 +[gsm8k]: https://arxiv.org/abs/2110.14168 +[gpqa]: https://arxiv.org/abs/2311.12022 +[mbpp]: https://arxiv.org/abs/2108.07732 +[humaneval]: https://arxiv.org/abs/2107.03374 + +#### Multilingual + +| Benchmark | Gemma 3 PT 1B | Gemma 3 PT 4B | Gemma 3 PT 12B | Gemma 3 PT 27B | +| ------------------------------------ |:-------------:|:-------------:|:--------------:|:--------------:| +| [MGSM][mgsm] | 2.04 | 34.7 | 64.3 | 74.3 | +| [Global-MMLU-Lite][global-mmlu-lite] | 24.9 | 57.0 | 69.4 | 75.7 | +| [WMT24++][wmt24pp] (ChrF) | 36.7 | 48.4 | 53.9 | 55.7 | +| [FloRes][flores] | 29.5 | 39.2 | 46.0 | 48.8 | +| [XQuAD][xquad] (all) | 43.9 | 68.0 | 74.5 | 76.8 | +| [ECLeKTic][eclektic] | 4.69 | 11.0 | 17.2 | 24.4 | +| [IndicGenBench][indicgenbench] | 41.4 | 57.2 | 61.7 | 63.4 | + +[mgsm]: https://arxiv.org/abs/2210.03057 +[flores]: https://arxiv.org/abs/2106.03193 +[xquad]: https://arxiv.org/abs/1910.11856v3 +[global-mmlu-lite]: https://huggingface.co/datasets/CohereForAI/Global-MMLU-Lite +[wmt24pp]: https://arxiv.org/abs/2502.12404v1 +[eclektic]: https://arxiv.org/abs/2502.21228 +[indicgenbench]: https://arxiv.org/abs/2404.16816 + +#### Multimodal + +| Benchmark | Gemma 3 PT 4B | Gemma 3 PT 12B | Gemma 3 PT 27B | +| ------------------------------ |:-------------:|:--------------:|:--------------:| +| [COCOcap][coco-cap] | 102 | 111 | 116 | +| [DocVQA][docvqa] (val) | 72.8 | 82.3 | 85.6 | +| [InfoVQA][info-vqa] (val) | 44.1 | 54.8 | 59.4 | +| [MMMU][mmmu] (pt) | 39.2 | 50.3 | 56.1 | +| [TextVQA][textvqa] (val) | 58.9 | 66.5 | 68.6 | +| [RealWorldQA][realworldqa] | 45.5 | 52.2 | 53.9 | +| [ReMI][remi] | 27.3 | 38.5 | 44.8 | +| [AI2D][ai2d] | 63.2 | 75.2 | 79.0 | +| [ChartQA][chartqa] | 63.6 | 74.7 | 76.3 | +| [VQAv2][vqav2] | 63.9 | 71.2 | 72.9 | +| [BLINK][blinkvqa] | 38.0 | 35.9 | 39.6 | +| [OKVQA][okvqa] | 51.0 | 58.7 | 60.2 | +| [TallyQA][tallyqa] | 42.5 | 51.8 | 54.3 | +| [SpatialSense VQA][ss-vqa] | 50.9 | 60.0 | 59.4 | +| [CountBenchQA][countbenchqa] | 26.1 | 17.8 | 68.0 | + +[coco-cap]: https://cocodataset.org/#home +[docvqa]: https://www.docvqa.org/ +[info-vqa]: https://arxiv.org/abs/2104.12756 +[mmmu]: https://arxiv.org/abs/2311.16502 +[textvqa]: https://textvqa.org/ +[realworldqa]: https://paperswithcode.com/dataset/realworldqa +[remi]: https://arxiv.org/html/2406.09175v1 +[ai2d]: https://allenai.org/data/diagrams +[chartqa]: https://arxiv.org/abs/2203.10244 +[vqav2]: https://visualqa.org/index.html +[blinkvqa]: https://arxiv.org/abs/2404.12390 +[okvqa]: https://okvqa.allenai.org/ +[tallyqa]: https://arxiv.org/abs/1810.12440 +[ss-vqa]: https://arxiv.org/abs/1908.02660 +[countbenchqa]: https://github.com/google-research/big_vision/blob/main/big_vision/datasets/countbenchqa/ + +## Ethics and Safety + +Ethics and safety evaluation approach and results. + +### Evaluation Approach + +Our evaluation methods include structured evaluations and internal red-teaming +testing of relevant content policies. Red-teaming was conducted by a number of +different teams, each with different goals and human evaluation metrics. These +models were evaluated against a number of different categories relevant to +ethics and safety, including: + +- **Child Safety**: Evaluation of text-to-text and image to text prompts + covering child safety policies, including child sexual abuse and + exploitation. +- **Content Safety:** Evaluation of text-to-text and image to text prompts + covering safety policies including, harassment, violence and gore, and hate + speech. +- **Representational Harms**: Evaluation of text-to-text and image to text + prompts covering safety policies including bias, stereotyping, and harmful + associations or inaccuracies. + +In addition to development level evaluations, we conduct "assurance +evaluations" which are our 'arms-length' internal evaluations for responsibility +governance decision making. They are conducted separately from the model +development team, to inform decision making about release. High level findings +are fed back to the model team, but prompt sets are held-out to prevent +overfitting and preserve the results' ability to inform decision making. +Assurance evaluation results are reported to our Responsibility & Safety Council +as part of release review. + +### Evaluation Results + +For all areas of safety testing, we saw major improvements in the categories of +child safety, content safety, and representational harms relative to previous +Gemma models. All testing was conducted without safety filters to evaluate the +model capabilities and behaviors. For both text-to-text and image-to-text, and +across all model sizes, the model produced minimal policy violations, and showed +significant improvements over previous Gemma models' performance with respect +to ungrounded inferences. A limitation of our evaluations was they included only +English language prompts. + +## Usage and Limitations + +These models have certain limitations that users should be aware of. + +### Intended Usage + +Open vision-language models (VLMs) models have a wide range of applications +across various industries and domains. The following list of potential uses is +not comprehensive. The purpose of this list is to provide contextual information +about the possible use-cases that the model creators considered as part of model +training and development. + +- Content Creation and Communication + - Text Generation: These models can be used to generate creative text + formats such as poems, scripts, code, marketing copy, and email drafts. + - Chatbots and Conversational AI: Power conversational interfaces + for customer service, virtual assistants, or interactive applications. + - Text Summarization: Generate concise summaries of a text corpus, + research papers, or reports. + - Image Data Extraction: These models can be used to extract, + interpret, and summarize visual data for text communications. +- Research and Education + - Natural Language Processing (NLP) and VLM Research: These + models can serve as a foundation for researchers to experiment with VLM + and NLP techniques, develop algorithms, and contribute to the + advancement of the field. + - Language Learning Tools: Support interactive language learning + experiences, aiding in grammar correction or providing writing practice. + - Knowledge Exploration: Assist researchers in exploring large + bodies of text by generating summaries or answering questions about + specific topics. + +### Limitations + +- Training Data + - The quality and diversity of the training data significantly + influence the model's capabilities. Biases or gaps in the training data + can lead to limitations in the model's responses. + - The scope of the training dataset determines the subject areas + the model can handle effectively. +- Context and Task Complexity + - Models are better at tasks that can be framed with clear + prompts and instructions. Open-ended or highly complex tasks might be + challenging. + - A model's performance can be influenced by the amount of context + provided (longer context generally leads to better outputs, up to a + certain point). +- Language Ambiguity and Nuance + - Natural language is inherently complex. Models might struggle + to grasp subtle nuances, sarcasm, or figurative language. +- Factual Accuracy + - Models generate responses based on information they learned + from their training datasets, but they are not knowledge bases. They + may generate incorrect or outdated factual statements. +- Common Sense + - Models rely on statistical patterns in language. They might + lack the ability to apply common sense reasoning in certain situations. + +### Ethical Considerations and Risks + +The development of vision-language models (VLMs) raises several ethical +concerns. In creating an open model, we have carefully considered the following: + +- Bias and Fairness + - VLMs trained on large-scale, real-world text and image data can + reflect socio-cultural biases embedded in the training material. These + models underwent careful scrutiny, input data pre-processing described + and posterior evaluations reported in this card. +- Misinformation and Misuse + - VLMs can be misused to generate text that is false, misleading, + or harmful. + - Guidelines are provided for responsible use with the model, see the + [Responsible Generative AI Toolkit][rai-toolkit]. +- Transparency and Accountability: + - This model card summarizes details on the models' architecture, + capabilities, limitations, and evaluation processes. + - A responsibly developed open model offers the opportunity to + share innovation by making VLM technology accessible to developers and + researchers across the AI ecosystem. + +Risks identified and mitigations: + +- **Perpetuation of biases**: It's encouraged to perform continuous + monitoring (using evaluation metrics, human review) and the exploration of + de-biasing techniques during model training, fine-tuning, and other use + cases. +- **Generation of harmful content**: Mechanisms and guidelines for content + safety are essential. Developers are encouraged to exercise caution and + implement appropriate content safety safeguards based on their specific + product policies and application use cases. +- **Misuse for malicious purposes**: Technical limitations and developer + and end-user education can help mitigate against malicious applications of + VLMs. Educational resources and reporting mechanisms for users to flag + misuse are provided. Prohibited uses of Gemma models are outlined in the + [Gemma Prohibited Use Policy][prohibited-use]. +- **Privacy violations**: Models were trained on data filtered for removal + of certain personal information and other sensitive data. Developers are + encouraged to adhere to privacy regulations with privacy-preserving + techniques. + +### Benefits + +At the time of release, this family of models provides high-performance open +vision-language model implementations designed from the ground up for +responsible AI development compared to similarly sized models. + +Using the benchmark evaluation metrics described in this document, these models +have shown to provide superior performance to other, comparably-sized open model +alternatives. + +[g3-tech-report]: https://goo.gle/Gemma3Report +[rai-toolkit]: https://ai.google.dev/responsible +[kaggle-gemma]: https://www.kaggle.com/models/google/gemma-3 +[vertex-mg-gemma3]: https://console.cloud.google.com/vertex-ai/publishers/google/model-garden/gemma3 +[terms]: https://ai.google.dev/gemma/terms +[safety-policies]: https://ai.google/static/documents/ai-responsibility-update-published-february-2025.pdf +[prohibited-use]: https://ai.google.dev/gemma/prohibited_use_policy +[tpu]: https://cloud.google.com/tpu/docs/intro-to-tpu +[sustainability]: https://sustainability.google/operating-sustainably/ +[jax]: https://github.com/jax-ml/jax +[ml-pathways]: https://blog.google/technology/ai/introducing-pathways-next-generation-ai-architecture/ +[sustainability]: https://sustainability.google/operating-sustainably/ +[gemini-2-paper]: https://arxiv.org/abs/2312.11805 \ No newline at end of file diff --git a/chat_template.jinja b/chat_template.jinja new file mode 100644 index 0000000..c5f1365 --- /dev/null +++ b/chat_template.jinja @@ -0,0 +1,47 @@ +{{ bos_token }} +{%- if messages[0]['role'] == 'system' -%} + {%- if messages[0]['content'] is string -%} + {%- set first_user_prefix = messages[0]['content'] + ' + +' -%} + {%- else -%} + {%- set first_user_prefix = messages[0]['content'][0]['text'] + ' + +' -%} + {%- endif -%} + {%- set loop_messages = messages[1:] -%} +{%- else -%} + {%- set first_user_prefix = "" -%} + {%- set loop_messages = messages -%} +{%- endif -%} +{%- for message in loop_messages -%} + {%- if (message['role'] == 'user') != (loop.index0 % 2 == 0) -%} + {{ raise_exception("Conversation roles must alternate user/assistant/user/assistant/...") }} + {%- endif -%} + {%- if (message['role'] == 'assistant') -%} + {%- set role = "model" -%} + {%- else -%} + {%- set role = message['role'] -%} + {%- endif -%} + {{ '' + role + ' +' + (first_user_prefix if loop.first else "") }} + {%- if message['content'] is string -%} + {{ message['content'] | trim }} + {%- elif message['content'] is iterable -%} + {%- for item in message['content'] -%} + {%- if item['type'] == 'image' -%} + {{ '' }} + {%- elif item['type'] == 'text' -%} + {{ item['text'] | trim }} + {%- endif -%} + {%- endfor -%} + {%- else -%} + {{ raise_exception("Invalid content type") }} + {%- endif -%} + {{ ' +' }} +{%- endfor -%} +{%- if add_generation_prompt -%} + {{'model +'}} +{%- endif -%} diff --git a/config.json b/config.json new file mode 100644 index 0000000..c8a8ec4 --- /dev/null +++ b/config.json @@ -0,0 +1,72 @@ +{ + "_sliding_window_pattern": 6, + "architectures": [ + "Gemma3ForCausalLM" + ], + "attention_bias": false, + "attention_dropout": 0.0, + "attn_logit_softcapping": null, + "bos_token_id": 2, + "cache_implementation": "hybrid", + "dtype": "bfloat16", + "eos_token_id": 1, + "final_logit_softcapping": null, + "head_dim": 256, + "hidden_activation": "gelu_pytorch_tanh", + "hidden_size": 1152, + "initializer_range": 0.02, + "intermediate_size": 6912, + "layer_types": [ + "sliding_attention", + "sliding_attention", + "sliding_attention", + "sliding_attention", + "sliding_attention", + "full_attention", + "sliding_attention", + "sliding_attention", + "sliding_attention", + "sliding_attention", + "sliding_attention", + "full_attention", + "sliding_attention", + "sliding_attention", + "sliding_attention", + "sliding_attention", + "sliding_attention", + "full_attention", + "sliding_attention", + "sliding_attention", + "sliding_attention", + "sliding_attention", + "sliding_attention", + "full_attention", + "sliding_attention", + "sliding_attention" + ], + "max_position_embeddings": 32768, + "model_type": "gemma3_text", + "num_attention_heads": 4, + "num_hidden_layers": 26, + "num_key_value_heads": 1, + "pad_token_id": 0, + "query_pre_attn_scalar": 256, + "rms_norm_eps": 1e-06, + "rope_parameters": { + "full_attention": { + "rope_theta": 1000000, + "rope_type": "default" + }, + "sliding_attention": { + "rope_theta": 10000, + "rope_type": "default" + } + }, + "sliding_window": 512, + "sliding_window_pattern": 6, + "tie_word_embeddings": true, + "transformers_version": "5.0.0", + "use_bidirectional_attention": false, + "use_cache": true, + "vocab_size": 262144 +} diff --git a/generation_config.json b/generation_config.json new file mode 100644 index 0000000..bf94d07 --- /dev/null +++ b/generation_config.json @@ -0,0 +1,11 @@ +{ + "cache_implementation": "hybrid", + "do_sample": true, + "eos_token_id": [ + 1, + 106 + ], + "top_k": 64, + "top_p": 0.95, + "transformers_version": "5.0.0" +} diff --git a/model.safetensors b/model.safetensors new file mode 100644 index 0000000..7cb5316 --- /dev/null +++ b/model.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f05b62939b1bd4db36eab0987eb2494c550d9a98738671b2d1a174d719c49145 +size 1999811208 diff --git a/tokenizer.json b/tokenizer.json new file mode 100644 index 0000000..899af07 --- /dev/null +++ b/tokenizer.json @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a74aefb1dc1340a25f29ab8370384b9ed24b2d921d7749ece7bbcfcfdf00d497 +size 33384443 diff --git a/tokenizer_config.json b/tokenizer_config.json new file mode 100644 index 0000000..5cb2e8c --- /dev/null +++ b/tokenizer_config.json @@ -0,0 +1,23 @@ +{ + "backend": "tokenizers", + "boi_token": "", + "bos_token": "", + "clean_up_tokenization_spaces": false, + "eoi_token": "", + "eos_token": "", + "image_token": "", + "is_local": false, + "mask_token": "", + "model_max_length": 1000000000000000019884624838656, + "model_specific_special_tokens": { + "boi_token": "", + "eoi_token": "", + "image_token": "" + }, + "pad_token": "", + "sp_model_kwargs": null, + "spaces_between_special_tokens": false, + "tokenizer_class": "GemmaTokenizer", + "unk_token": "", + "use_default_system_prompt": false +}