初始化项目,由ModelHub XC社区提供模型

Model: pkshatech/GLuCoSE-base-ja-v2
Source: Original Platform
This commit is contained in:
ModelHub XC
2026-05-14 18:04:47 +08:00
commit a21d22f1b8
13 changed files with 542 additions and 0 deletions

35
.gitattributes vendored Normal file
View File

@@ -0,0 +1,35 @@
*.7z filter=lfs diff=lfs merge=lfs -text
*.arrow filter=lfs diff=lfs merge=lfs -text
*.bin filter=lfs diff=lfs merge=lfs -text
*.bz2 filter=lfs diff=lfs merge=lfs -text
*.ckpt filter=lfs diff=lfs merge=lfs -text
*.ftz filter=lfs diff=lfs merge=lfs -text
*.gz filter=lfs diff=lfs merge=lfs -text
*.h5 filter=lfs diff=lfs merge=lfs -text
*.joblib filter=lfs diff=lfs merge=lfs -text
*.lfs.* filter=lfs diff=lfs merge=lfs -text
*.mlmodel filter=lfs diff=lfs merge=lfs -text
*.model filter=lfs diff=lfs merge=lfs -text
*.msgpack filter=lfs diff=lfs merge=lfs -text
*.npy filter=lfs diff=lfs merge=lfs -text
*.npz filter=lfs diff=lfs merge=lfs -text
*.onnx filter=lfs diff=lfs merge=lfs -text
*.ot filter=lfs diff=lfs merge=lfs -text
*.parquet filter=lfs diff=lfs merge=lfs -text
*.pb filter=lfs diff=lfs merge=lfs -text
*.pickle filter=lfs diff=lfs merge=lfs -text
*.pkl filter=lfs diff=lfs merge=lfs -text
*.pt filter=lfs diff=lfs merge=lfs -text
*.pth filter=lfs diff=lfs merge=lfs -text
*.rar filter=lfs diff=lfs merge=lfs -text
*.safetensors filter=lfs diff=lfs merge=lfs -text
saved_model/**/* filter=lfs diff=lfs merge=lfs -text
*.tar.* filter=lfs diff=lfs merge=lfs -text
*.tar filter=lfs diff=lfs merge=lfs -text
*.tflite filter=lfs diff=lfs merge=lfs -text
*.tgz filter=lfs diff=lfs merge=lfs -text
*.wasm filter=lfs diff=lfs merge=lfs -text
*.xz filter=lfs diff=lfs merge=lfs -text
*.zip filter=lfs diff=lfs merge=lfs -text
*.zst filter=lfs diff=lfs merge=lfs -text
*tfevents* filter=lfs diff=lfs merge=lfs -text

10
1_Pooling/config.json Normal file
View File

@@ -0,0 +1,10 @@
{
"word_embedding_dimension": 768,
"pooling_mode_cls_token": false,
"pooling_mode_mean_tokens": true,
"pooling_mode_max_tokens": false,
"pooling_mode_mean_sqrt_len_tokens": false,
"pooling_mode_weightedmean_tokens": false,
"pooling_mode_lasttoken": false,
"include_prompt": true
}

222
README.md Normal file
View File

@@ -0,0 +1,222 @@
---
language:
- ja
library_name: sentence-transformers
tags:
- sentence-transformers
- sentence-similarity
- feature-extraction
metrics:
- pearson_cosine
- spearman_cosine
- pearson_manhattan
- spearman_manhattan
- pearson_euclidean
- spearman_euclidean
- pearson_dot
- spearman_dot
- pearson_max
- spearman_max
widget: []
pipeline_tag: sentence-similarity
datasets:
- hpprc/emb
- hpprc/mqa-ja
- google-research-datasets/paws-x
base_model: pkshatech/GLuCoSE-base-ja
license: apache-2.0
---
# GLuCoSE v2
This model is a general Japanese text embedding model, excelling in retrieval tasks. It can run on CPU and is designed to measure semantic similarity between sentences, as well as to function as a retrieval system for searching passages based on queries.
Key features:
- Specialized for retrieval tasks, it demonstrates the highest performance among similar size models in MIRACL and other tasks .
- Optimized for Japanese text processing
- Can run on CPU
During inference, the prefix "query: " or "passage: " is required. Please check the Usage section for details.
## Model Description
The model is based on [GLuCoSE](https://huggingface.co/pkshatech/GLuCoSE-base-ja) and fine-tuned through distillation using several large-scale embedding models and multi-stage contrastive learning.
- **Maximum Sequence Length:** 512 tokens
- **Output Dimensionality:** 768 tokens
- **Similarity Function:** Cosine Similarity
## Usage
### Direct Usage (Sentence Transformers)
You can perform inference using SentenceTransformer with the following code:
```python
from sentence_transformers import SentenceTransformer
import torch.nn.functional as F
# Download from the 🤗 Hub
model = SentenceTransformer("pkshatech/GLuCoSE-base-ja-v2")
# Each input text should start with "query: " or "passage: ".
# For tasks other than retrieval, you can simply use the "query: " prefix.
sentences = [
'query: PKSHAはどんな会社ですか',
'passage: 研究開発したアルゴリズムを、多くの企業のソフトウエア・オペレーションに導入しています。',
'query: 日本で一番高い山は?',
'passage: 富士山ふじさんは、標高3776.12 m、日本最高峰剣ヶ峰の独立峰で、その優美な風貌は日本国外でも日本の象徴として広く知られている。',
]
embeddings = model.encode(sentences,convert_to_tensor=True)
print(embeddings.shape)
# [4, 768]
# Get the similarity scores for the embeddings
similarities = F.cosine_similarity(embeddings.unsqueeze(0), embeddings.unsqueeze(1), dim=2)
print(similarities)
# [[1.0000, 0.6050, 0.4341, 0.5537],
# [0.6050, 1.0000, 0.5018, 0.6815],
# [0.4341, 0.5018, 1.0000, 0.7534],
# [0.5537, 0.6815, 0.7534, 1.0000]]
```
### Direct Usage (Transformers)
You can perform inference using Transformers with the following code:
```python
import torch.nn.functional as F
from torch import Tensor
from transformers import AutoTokenizer, AutoModel
def mean_pooling(last_hidden_states: Tensor,attention_mask: Tensor) -> Tensor:
emb = last_hidden_states * attention_mask.unsqueeze(-1)
emb = emb.sum(dim=1) / attention_mask.sum(dim=1).unsqueeze(-1)
return emb
# Download from the 🤗 Hub
tokenizer = AutoTokenizer.from_pretrained("pkshatech/GLuCoSE-base-ja-v2")
model = AutoModel.from_pretrained("pkshatech/GLuCoSE-base-ja-v2")
# Each input text should start with "query: " or "passage: ".
# For tasks other than retrieval, you can simply use the "query: " prefix.
sentences = [
'query: PKSHAはどんな会社ですか',
'passage: 研究開発したアルゴリズムを、多くの企業のソフトウエア・オペレーションに導入しています。',
'query: 日本で一番高い山は?',
'passage: 富士山ふじさんは、標高3776.12 m、日本最高峰剣ヶ峰の独立峰で、その優美な風貌は日本国外でも日本の象徴として広く知られている。',
]
# Tokenize the input texts
batch_dict = tokenizer(sentences, max_length=512, padding=True, truncation=True, return_tensors='pt')
outputs = model(**batch_dict)
embeddings = mean_pooling(outputs.last_hidden_state, batch_dict['attention_mask'])
print(embeddings.shape)
# [4, 768]
# Get the similarity scores for the embeddings
similarities = F.cosine_similarity(embeddings.unsqueeze(0), embeddings.unsqueeze(1), dim=2)
print(similarities)
# [[1.0000, 0.6050, 0.4341, 0.5537],
# [0.6050, 1.0000, 0.5018, 0.6815],
# [0.4341, 0.5018, 1.0000, 0.7534],
# [0.5537, 0.6815, 0.7534, 1.0000]]
```
## Training Details
The fine-tuning of GLuCoSE v2 is carried out through the following steps:
**Step 1: Ensemble distillation**
- The embedded representation was distilled using [E5-mistral](https://huggingface.co/intfloat/e5-mistral-7b-instruct), [gte-Qwen2](https://huggingface.co/Alibaba-NLP/gte-Qwen2-7B-instruct), and [mE5-large](https://huggingface.co/intfloat/multilingual-e5-large) as teacher models.
**Step 2: Contrastive learning**
- Triplets were created from [JSNLI](https://nlp.ist.i.kyoto-u.ac.jp/?%E6%97%A5%E6%9C%AC%E8%AA%9ESNLI%28JSNLI%29%E3%83%87%E3%83%BC%E3%82%BF%E3%82%BB%E3%83%83%E3%83%88), [MNLI](https://huggingface.co/datasets/MoritzLaurer/multilingual-NLI-26lang-2mil7), [PAWS-X](https://huggingface.co/datasets/paws-x), [JSeM](https://github.com/DaisukeBekki/JSeM) and [Mr.TyDi](https://huggingface.co/datasets/castorini/mr-tydi) and used for training.
- This training aimed to improve the overall performance as a sentence embedding model.
**Step 3: Search-specific contrastive learning**
- In order to make the model more robust to the retrieval task, additional two-stage training with QA and retrieval task was conducted.
- In the first stage, the synthetic dataset [auto-wiki-qa](https://huggingface.co/datasets/cl-nagoya/auto-wiki-qa) was used for training,
while in the second stage, [JQaRA](https://huggingface.co/datasets/hotchpotch/JQaRA), [MQA](https://huggingface.co/datasets/hpprc/mqa-ja), [Japanese Wikipedia Human Retrieval, Mr.TyDi,MIRACL, Quiz Works and Quiz No Mor](https://huggingface.co/datasets/hpprc/emb)i were used.
<!--
### Downstream Usage (Sentence Transformers)
You can finetune this model on your own dataset.
<details><summary>Click to expand</summary>
</details>
-->
<!--
### Out-of-Scope Use
*List how the model may foreseeably be misused and address what users ought not to do with the model.*
-->
<!--
## Bias, Risks and Limitations
*What are the known or foreseeable issues stemming from this model? You could also flag here known failure cases or weaknesses of the model.*
-->
<!--
### Recommendations
*What are recommendations with respect to the foreseeable issues? For example, filtering explicit content.*
-->
## Benchmarks
### Retrieval
Evaluated with [MIRACL-ja](https://huggingface.co/datasets/miracl/miracl), [JQARA](https://huggingface.co/datasets/hotchpotch/JQaRA) , [JaCWIR](https://huggingface.co/datasets/hotchpotch/JaCWIR) and [MLDR-ja](https://huggingface.co/datasets/Shitao/MLDR).
| Model | Size | MIRACL<br>Recall@5 | JQaRA<br>nDCG@10 | JaCWIR<br>MAP@10 | MLDR<br>nDCG@10 |
| :---: | :---: | :---: | :---: | :---: | :---: |
| [intfloat/multilingual-e5-large](https://huggingface.co/intfloat/multilingual-e5-large) | 0.6B | 89.2 | 55.4 | **87.6** | 29.8 |
| [cl-nagoya/ruri-large](https://huggingface.co/cl-nagoya/ruri-large) | 0.3B | 78.7 | 62.4 | 85.0 | **37.5** |
| | | | | | |
| [intfloat/multilingual-e5-base](https://huggingface.co/intfloat/multilingual-e5-base) | 0.3B | 84.2 | 47.2 | **85.3** | 25.4 |
| [cl-nagoya/ruri-base](https://huggingface.co/cl-nagoya/ruri-base) | 0.1B | 74.3 | 58.1 | 84.6 | **35.3** |
| [pkshatech/GLuCoSE-base-ja](https://huggingface.co/pkshatech/GLuCoSE-base-ja) | 0.1B | 53.3 | 30.8 | 68.6 | 25.2 |
| **GLuCoSE v2** | 0.1B | **85.5** | **60.6** | **85.3** | 33.8 |
Note: Results for OpenAI small embeddings in JQARA and JaCWIR are quoted from the [JQARA](https://huggingface.co/datasets/hotchpotch/JQaRA) and [JaCWIR](https://huggingface.co/datasets/hotchpotch/JaCWIR).
### JMTEB
Evaluated with [JMTEB](https://github.com/sbintuitions/JMTEB).
The average score is macro-average.
| Model | Size | Avg. | Retrieval | STS | Classification | Reranking | Clustering | PairClassification |
| :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: |
| OpenAI/text-embedding-3-small | - | 69.18 | 66.39 | 79.46 | 73.06 | 92.92 | 51.06 | 62.27 |
| OpenAI/text-embedding-3-large | - | 74.05 | 74.48 | 82.52 | 77.58 | 93.58 | 53.32 | 62.35 |
| | | | | | | | | |
| [intfloat/multilingual-e5-large](https://huggingface.co/intfloat/multilingual-e5-large) | 0.6B | 70.90 | 70.98 | 79.70 | 72.89 | 92.96 | 51.24 | 62.15 |
| [cl-nagoya/ruri-large](https://huggingface.co/cl-nagoya/ruri-large) | 0.3B | 73.31 | 73.02 | 83.13 | 77.43 | 92.99 | 51.82 | 62.29 |
| | | | | | | | | |
| [intfloat/multilingual-e5-base](https://huggingface.co/intfloat/multilingual-e5-base) | 0.3B | 68.61 | 68.21 | 79.84 | 69.30 | **92.85** | 48.26 | 62.26 |
| [cl-nagoya/ruri-base](https://huggingface.co/cl-nagoya/ruri-base) | 0.1B | 71.91 | 69.82 | 82.87 | 75.58 | 92.91 | **54.16** | 62.38 |
| [pkshatech/GLuCoSE-base-ja](https://huggingface.co/pkshatech/GLuCoSE-base-ja) | 0.1B | 67.29 | 59.02 | 78.71 | **76.82** | 91.90 | 49.78 | **66.39** |
| **GLuCoSE v2** | 0.1B | **72.23** | **73.36** | **82.96** | 74.21 | 93.01 | 48.65 | 62.37 |
Note: Results for OpenAI embeddings and multilingual-e5 models are quoted from the [JMTEB leaderboard](https://github.com/sbintuitions/JMTEB/blob/main/leaderboard.md). Results for ruri are quoted from the [cl-nagoya/ruri-base model card](https://huggingface.co/cl-nagoya/ruri-base/blob/main/README.md).
## Authors
Chihiro Yano, Mocho Go, Hideyuki Tachibana, Hiroto Takegawa, Yotaro Watanabe
## License
This model is published under the [Apache License, Version 2.0](https://www.apache.org/licenses/LICENSE-2.0).

4
added_tokens.json Normal file
View File

@@ -0,0 +1,4 @@
{
"<ent2>": 32771,
"<ent>": 32770
}

32
config.json Normal file
View File

@@ -0,0 +1,32 @@
{
"_name_or_path": "/workspace/store/outputs/step3/B2048E3LR3e-05_glu-big-prefix/B2048E3LR3e-05_mir_mr-_jqa_bao_qui_qui_mqa-prefix/93",
"architectures": [
"LukeModel"
],
"attention_probs_dropout_prob": 0.1,
"bert_model_name": "models/luke-japanese/hf_xlm_roberta",
"bos_token_id": 0,
"classifier_dropout": null,
"cls_entity_prediction": false,
"entity_emb_size": 256,
"entity_vocab_size": 4,
"eos_token_id": 2,
"hidden_act": "gelu",
"hidden_dropout_prob": 0.1,
"hidden_size": 768,
"initializer_range": 0.02,
"intermediate_size": 3072,
"layer_norm_eps": 1e-05,
"max_position_embeddings": 514,
"model_type": "luke",
"num_attention_heads": 12,
"num_hidden_layers": 12,
"pad_token_id": 1,
"position_embedding_type": "absolute",
"torch_dtype": "float32",
"transformers_version": "4.44.0",
"type_vocab_size": 1,
"use_cache": true,
"use_entity_aware_attention": true,
"vocab_size": 32772
}

View File

@@ -0,0 +1,10 @@
{
"__version__": {
"sentence_transformers": "3.0.1",
"transformers": "4.44.0",
"pytorch": "2.3.1+cu118"
},
"prompts": {},
"default_prompt_name": null,
"similarity_fn_name": null
}

6
entity_vocab.json Normal file
View File

@@ -0,0 +1,6 @@
{
"[MASK2]": 3,
"[MASK]": 0,
"[PAD]": 2,
"[UNK]": 1
}

3
model.safetensors Normal file
View File

@@ -0,0 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:9157b2c1939b03938604b8f175fb24acf9d403afdeabdf8b6d54be6a6bce137c
size 532299592

14
modules.json Normal file
View File

@@ -0,0 +1,14 @@
[
{
"idx": 0,
"name": "0",
"path": "",
"type": "sentence_transformers.models.Transformer"
},
{
"idx": 1,
"name": "1",
"path": "1_Pooling",
"type": "sentence_transformers.models.Pooling"
}
]

View File

@@ -0,0 +1,4 @@
{
"max_seq_length": 512,
"do_lower_case": false
}

3
sentencepiece.bpe.model Normal file
View File

@@ -0,0 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:d8b73a5e054936c920cf5b7d1ec21ce9c281977078269963beb821c6c86fbff7
size 841889

83
special_tokens_map.json Normal file
View File

@@ -0,0 +1,83 @@
{
"additional_special_tokens": [
"<ent>",
"<ent2>",
"<ent>",
"<ent2>",
"<ent>",
"<ent2>",
"<ent>",
"<ent2>",
"<ent>",
"<ent2>",
"<ent>",
"<ent2>",
"<ent>",
"<ent2>",
"<ent>",
"<ent2>",
{
"content": "<ent>",
"lstrip": false,
"normalized": true,
"rstrip": false,
"single_word": false
},
{
"content": "<ent2>",
"lstrip": false,
"normalized": true,
"rstrip": false,
"single_word": false
}
],
"bos_token": {
"content": "<s>",
"lstrip": false,
"normalized": false,
"rstrip": false,
"single_word": false
},
"cls_token": {
"content": "<s>",
"lstrip": false,
"normalized": false,
"rstrip": false,
"single_word": false
},
"eos_token": {
"content": "</s>",
"lstrip": false,
"normalized": false,
"rstrip": false,
"single_word": false
},
"mask_token": {
"content": "<mask>",
"lstrip": true,
"normalized": true,
"rstrip": false,
"single_word": false
},
"pad_token": {
"content": "<pad>",
"lstrip": false,
"normalized": false,
"rstrip": false,
"single_word": false
},
"sep_token": {
"content": "</s>",
"lstrip": false,
"normalized": false,
"rstrip": false,
"single_word": false
},
"unk_token": {
"content": "<unk>",
"lstrip": false,
"normalized": false,
"rstrip": false,
"single_word": false
}
}

116
tokenizer_config.json Normal file
View File

@@ -0,0 +1,116 @@
{
"added_tokens_decoder": {
"0": {
"content": "<s>",
"lstrip": false,
"normalized": false,
"rstrip": false,
"single_word": false,
"special": true
},
"1": {
"content": "<pad>",
"lstrip": false,
"normalized": false,
"rstrip": false,
"single_word": false,
"special": true
},
"2": {
"content": "</s>",
"lstrip": false,
"normalized": false,
"rstrip": false,
"single_word": false,
"special": true
},
"3": {
"content": "<unk>",
"lstrip": false,
"normalized": false,
"rstrip": false,
"single_word": false,
"special": true
},
"32769": {
"content": "<mask>",
"lstrip": true,
"normalized": true,
"rstrip": false,
"single_word": false,
"special": true
},
"32770": {
"content": "<ent>",
"lstrip": false,
"normalized": true,
"rstrip": false,
"single_word": false,
"special": true
},
"32771": {
"content": "<ent2>",
"lstrip": false,
"normalized": true,
"rstrip": false,
"single_word": false,
"special": true
}
},
"additional_special_tokens": [
"<ent>",
"<ent2>",
"<ent>",
"<ent2>",
"<ent>",
"<ent2>",
"<ent>",
"<ent2>",
"<ent>",
"<ent2>",
"<ent>",
"<ent2>",
"<ent>",
"<ent2>",
"<ent>",
"<ent2>",
"<ent>",
"<ent2>"
],
"bos_token": "<s>",
"clean_up_tokenization_spaces": true,
"cls_token": "<s>",
"entity_mask2_token": "[MASK2]",
"entity_mask_token": "[MASK]",
"entity_pad_token": "[PAD]",
"entity_token_1": {
"__type": "AddedToken",
"content": "<ent>",
"lstrip": false,
"normalized": true,
"rstrip": false,
"single_word": false,
"special": false
},
"entity_token_2": {
"__type": "AddedToken",
"content": "<ent2>",
"lstrip": false,
"normalized": true,
"rstrip": false,
"single_word": false,
"special": false
},
"entity_unk_token": "[UNK]",
"eos_token": "</s>",
"mask_token": "<mask>",
"max_entity_length": 32,
"max_mention_length": 30,
"model_max_length": 512,
"pad_token": "<pad>",
"sep_token": "</s>",
"sp_model_kwargs": {},
"task": null,
"tokenizer_class": "MLukeTokenizer",
"unk_token": "<unk>"
}