初始化项目,由ModelHub XC社区提供模型
Model: Muennighoff/SGPT-125M-mean-nli-linear5 Source: Original Platform
This commit is contained in:
27
.gitattributes
vendored
Normal file
27
.gitattributes
vendored
Normal file
@@ -0,0 +1,27 @@
|
||||
*.7z filter=lfs diff=lfs merge=lfs -text
|
||||
*.arrow filter=lfs diff=lfs merge=lfs -text
|
||||
*.bin filter=lfs diff=lfs merge=lfs -text
|
||||
*.bin.* filter=lfs diff=lfs merge=lfs -text
|
||||
*.bz2 filter=lfs diff=lfs merge=lfs -text
|
||||
*.ftz filter=lfs diff=lfs merge=lfs -text
|
||||
*.gz filter=lfs diff=lfs merge=lfs -text
|
||||
*.h5 filter=lfs diff=lfs merge=lfs -text
|
||||
*.joblib filter=lfs diff=lfs merge=lfs -text
|
||||
*.lfs.* filter=lfs diff=lfs merge=lfs -text
|
||||
*.model filter=lfs diff=lfs merge=lfs -text
|
||||
*.msgpack filter=lfs diff=lfs merge=lfs -text
|
||||
*.onnx filter=lfs diff=lfs merge=lfs -text
|
||||
*.ot filter=lfs diff=lfs merge=lfs -text
|
||||
*.parquet filter=lfs diff=lfs merge=lfs -text
|
||||
*.pb filter=lfs diff=lfs merge=lfs -text
|
||||
*.pt filter=lfs diff=lfs merge=lfs -text
|
||||
*.pth filter=lfs diff=lfs merge=lfs -text
|
||||
*.rar filter=lfs diff=lfs merge=lfs -text
|
||||
saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
||||
*.tar.* filter=lfs diff=lfs merge=lfs -text
|
||||
*.tflite filter=lfs diff=lfs merge=lfs -text
|
||||
*.tgz filter=lfs diff=lfs merge=lfs -text
|
||||
*.xz filter=lfs diff=lfs merge=lfs -text
|
||||
*.zip filter=lfs diff=lfs merge=lfs -text
|
||||
*.zstandard filter=lfs diff=lfs merge=lfs -text
|
||||
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
||||
9
1_Pooling/config.json
Normal file
9
1_Pooling/config.json
Normal file
@@ -0,0 +1,9 @@
|
||||
{
|
||||
"word_embedding_dimension": 768,
|
||||
"pooling_mode_cls_token": false,
|
||||
"pooling_mode_mean_tokens": true,
|
||||
"pooling_mode_max_tokens": false,
|
||||
"pooling_mode_mean_sqrt_len_tokens": false,
|
||||
"pooling_mode_weightedmean_tokens": false,
|
||||
"pooling_mode_lasttoken": false
|
||||
}
|
||||
1
2_Dense/config.json
Normal file
1
2_Dense/config.json
Normal file
@@ -0,0 +1 @@
|
||||
{"in_features": 768, "out_features": 768, "bias": true, "activation_function": "torch.nn.modules.activation.GELU"}
|
||||
3
2_Dense/pytorch_model.bin
Normal file
3
2_Dense/pytorch_model.bin
Normal file
@@ -0,0 +1,3 @@
|
||||
version https://git-lfs.github.com/spec/v1
|
||||
oid sha256:ab58ee7c0bf4ebb6a6c66b3907aef60dcc3662952df1393b01723c4dd0fb50ea
|
||||
size 2363431
|
||||
1
3_Dense/config.json
Normal file
1
3_Dense/config.json
Normal file
@@ -0,0 +1 @@
|
||||
{"in_features": 768, "out_features": 768, "bias": true, "activation_function": "torch.nn.modules.activation.GELU"}
|
||||
3
3_Dense/pytorch_model.bin
Normal file
3
3_Dense/pytorch_model.bin
Normal file
@@ -0,0 +1,3 @@
|
||||
version https://git-lfs.github.com/spec/v1
|
||||
oid sha256:bdcc7e501da47fb2ee1297506d5cd1a96d3bce3b91589461b3349b5cfb62f80d
|
||||
size 2363431
|
||||
1
4_Dense/config.json
Normal file
1
4_Dense/config.json
Normal file
@@ -0,0 +1 @@
|
||||
{"in_features": 768, "out_features": 768, "bias": true, "activation_function": "torch.nn.modules.activation.GELU"}
|
||||
3
4_Dense/pytorch_model.bin
Normal file
3
4_Dense/pytorch_model.bin
Normal file
@@ -0,0 +1,3 @@
|
||||
version https://git-lfs.github.com/spec/v1
|
||||
oid sha256:611bfb930f316f7517bcef08cb7df35d2062feb5890533c41b0464732f979abb
|
||||
size 2363431
|
||||
1
5_Dense/config.json
Normal file
1
5_Dense/config.json
Normal file
@@ -0,0 +1 @@
|
||||
{"in_features": 768, "out_features": 768, "bias": true, "activation_function": "torch.nn.modules.activation.GELU"}
|
||||
3
5_Dense/pytorch_model.bin
Normal file
3
5_Dense/pytorch_model.bin
Normal file
@@ -0,0 +1,3 @@
|
||||
version https://git-lfs.github.com/spec/v1
|
||||
oid sha256:cc11027db566a4d3dc2ccf9f55e68d1930be618e219bc6e090bff3532644663a
|
||||
size 2363431
|
||||
1
6_Dense/config.json
Normal file
1
6_Dense/config.json
Normal file
@@ -0,0 +1 @@
|
||||
{"in_features": 768, "out_features": 768, "bias": true, "activation_function": "torch.nn.modules.activation.GELU"}
|
||||
3
6_Dense/pytorch_model.bin
Normal file
3
6_Dense/pytorch_model.bin
Normal file
@@ -0,0 +1,3 @@
|
||||
version https://git-lfs.github.com/spec/v1
|
||||
oid sha256:649980f85e7cc80814d98cdf98494cfd69dddf05b102e4e519a2c4efe12e1f4e
|
||||
size 2363431
|
||||
77
README.md
Normal file
77
README.md
Normal file
@@ -0,0 +1,77 @@
|
||||
---
|
||||
pipeline_tag: sentence-similarity
|
||||
tags:
|
||||
- sentence-transformers
|
||||
- feature-extraction
|
||||
- sentence-similarity
|
||||
---
|
||||
|
||||
# SGPT-125M-mean-nli-linear5
|
||||
|
||||
## Usage
|
||||
|
||||
For usage instructions, refer to our codebase: https://github.com/Muennighoff/sgpt
|
||||
|
||||
## Evaluation Results
|
||||
|
||||
For eval results, refer to our paper: https://arxiv.org/abs/2202.08904
|
||||
|
||||
## Training
|
||||
The model was trained with the parameters:
|
||||
|
||||
**DataLoader**:
|
||||
|
||||
`sentence_transformers.datasets.NoDuplicatesDataLoader.NoDuplicatesDataLoader` of length 8807 with parameters:
|
||||
```
|
||||
{'batch_size': 64}
|
||||
```
|
||||
|
||||
**Loss**:
|
||||
|
||||
`sentence_transformers.losses.MultipleNegativesRankingLoss.MultipleNegativesRankingLoss` with parameters:
|
||||
```
|
||||
{'scale': 20.0, 'similarity_fct': 'cos_sim'}
|
||||
```
|
||||
|
||||
Parameters of the fit()-Method:
|
||||
```
|
||||
{
|
||||
"epochs": 1,
|
||||
"evaluation_steps": 880,
|
||||
"evaluator": "sentence_transformers.evaluation.EmbeddingSimilarityEvaluator.EmbeddingSimilarityEvaluator",
|
||||
"max_grad_norm": 1,
|
||||
"optimizer_class": "<class 'transformers.optimization.AdamW'>",
|
||||
"optimizer_params": {
|
||||
"lr": 2e-05
|
||||
},
|
||||
"scheduler": "WarmupLinear",
|
||||
"steps_per_epoch": null,
|
||||
"warmup_steps": 881,
|
||||
"weight_decay": 0.01
|
||||
}
|
||||
```
|
||||
|
||||
|
||||
## Full Model Architecture
|
||||
```
|
||||
SentenceTransformer(
|
||||
(0): Transformer({'max_seq_length': 75, 'do_lower_case': False}) with Transformer model: GPTNeoModel
|
||||
(1): Pooling({'word_embedding_dimension': 768, 'pooling_mode_cls_token': False, 'pooling_mode_mean_tokens': True, 'pooling_mode_max_tokens': False, 'pooling_mode_mean_sqrt_len_tokens': False, 'pooling_mode_weightedmean_tokens': False, 'pooling_mode_lasttoken': False})
|
||||
(2): Dense({'in_features': 768, 'out_features': 768, 'bias': True, 'activation_function': 'torch.nn.modules.activation.GELU'})
|
||||
(3): Dense({'in_features': 768, 'out_features': 768, 'bias': True, 'activation_function': 'torch.nn.modules.activation.GELU'})
|
||||
(4): Dense({'in_features': 768, 'out_features': 768, 'bias': True, 'activation_function': 'torch.nn.modules.activation.GELU'})
|
||||
(5): Dense({'in_features': 768, 'out_features': 768, 'bias': True, 'activation_function': 'torch.nn.modules.activation.GELU'})
|
||||
(6): Dense({'in_features': 768, 'out_features': 768, 'bias': True, 'activation_function': 'torch.nn.modules.activation.GELU'})
|
||||
)
|
||||
```
|
||||
|
||||
## Citing & Authors
|
||||
|
||||
```bibtex
|
||||
@article{muennighoff2022sgpt,
|
||||
title={SGPT: GPT Sentence Embeddings for Semantic Search},
|
||||
author={Muennighoff, Niklas},
|
||||
journal={arXiv preprint arXiv:2202.08904},
|
||||
year={2022}
|
||||
}
|
||||
```
|
||||
54
config.json
Normal file
54
config.json
Normal file
@@ -0,0 +1,54 @@
|
||||
{
|
||||
"_name_or_path": "EleutherAI/gpt-neo-125M",
|
||||
"activation_function": "gelu_new",
|
||||
"architectures": [
|
||||
"GPTNeoModel"
|
||||
],
|
||||
"attention_dropout": 0,
|
||||
"attention_layers": [
|
||||
"global",
|
||||
"local",
|
||||
"global",
|
||||
"local",
|
||||
"global",
|
||||
"local",
|
||||
"global",
|
||||
"local",
|
||||
"global",
|
||||
"local",
|
||||
"global",
|
||||
"local"
|
||||
],
|
||||
"attention_types": [
|
||||
[
|
||||
[
|
||||
"global",
|
||||
"local"
|
||||
],
|
||||
6
|
||||
]
|
||||
],
|
||||
"bos_token_id": 50256,
|
||||
"embed_dropout": 0,
|
||||
"eos_token_id": 50256,
|
||||
"gradient_checkpointing": false,
|
||||
"hidden_size": 768,
|
||||
"initializer_range": 0.02,
|
||||
"intermediate_size": null,
|
||||
"layer_norm_epsilon": 1e-05,
|
||||
"max_position_embeddings": 2048,
|
||||
"model_type": "gpt_neo",
|
||||
"num_heads": 12,
|
||||
"num_layers": 12,
|
||||
"resid_dropout": 0,
|
||||
"summary_activation": null,
|
||||
"summary_first_dropout": 0.1,
|
||||
"summary_proj_to_labels": true,
|
||||
"summary_type": "cls_index",
|
||||
"summary_use_proj": true,
|
||||
"torch_dtype": "float32",
|
||||
"transformers_version": "4.11.3",
|
||||
"use_cache": true,
|
||||
"vocab_size": 50257,
|
||||
"window_size": 256
|
||||
}
|
||||
7
config_sentence_transformers.json
Normal file
7
config_sentence_transformers.json
Normal file
@@ -0,0 +1,7 @@
|
||||
{
|
||||
"__version__": {
|
||||
"sentence_transformers": "2.1.0",
|
||||
"transformers": "4.11.3",
|
||||
"pytorch": "1.10.1"
|
||||
}
|
||||
}
|
||||
12
eval/similarity_evaluation_sts-dev_results.csv
Normal file
12
eval/similarity_evaluation_sts-dev_results.csv
Normal file
@@ -0,0 +1,12 @@
|
||||
epoch,steps,cosine_pearson,cosine_spearman,euclidean_pearson,euclidean_spearman,manhattan_pearson,manhattan_spearman,dot_pearson,dot_spearman
|
||||
0,880,0.6793242897830243,0.6800051116275396,0.6526675304698322,0.6598914874266282,0.6523759012817913,0.6596549908221383,0.37758613161811344,0.44107892538971527
|
||||
0,1760,0.7093062659759418,0.7043392262855407,0.6799424742385566,0.6871311628086488,0.679709965760185,0.686909064790608,0.4244807707330064,0.489454058418039
|
||||
0,2640,0.7162510204173811,0.7108099478329052,0.6844751159621512,0.6902131311277189,0.6843642132691589,0.6900856150254838,0.4430006769070316,0.49415192642877115
|
||||
0,3520,0.7210577333102874,0.7159412485270954,0.6874901591743661,0.693434015639469,0.6875126171641188,0.6936410018207312,0.45731448945063213,0.5199390563243889
|
||||
0,4400,0.7279170676663442,0.7217689927624069,0.6918732703073953,0.6971102070618418,0.6917278226947479,0.6970824988576667,0.45420687165035545,0.5189615022689855
|
||||
0,5280,0.7260344818186505,0.720721312324089,0.6921883942481113,0.6972724171381975,0.6921449258154431,0.6971027942453383,0.43819063915065287,0.5029727634769791
|
||||
0,6160,0.7286999102843462,0.7228504098788112,0.6939166076911062,0.6994848783615378,0.6937150393058567,0.6991998971160589,0.445240349764503,0.5095797004208383
|
||||
0,7040,0.7302858437234605,0.725494459746203,0.694175642980331,0.6995536927338825,0.6937733393621677,0.6991565696840228,0.439262522665688,0.5065563833877958
|
||||
0,7920,0.7314337586011384,0.7268544909338852,0.6942073318019882,0.6996691145217213,0.6938620780761877,0.6992384398472841,0.44088081789557454,0.5113894614562506
|
||||
0,8800,0.7316399822010149,0.7275473603861525,0.6938081719844009,0.699432422187559,0.6934701320652052,0.698900686945158,0.43536164213762274,0.5046829247995284
|
||||
0,-1,0.7316436973142261,0.7275333401568148,0.6938119379020147,0.6994414880301143,0.6934733474552642,0.6989065292074504,0.4353497873382448,0.5046755102036641
|
||||
|
50001
merges.txt
Normal file
50001
merges.txt
Normal file
File diff suppressed because it is too large
Load Diff
44
modules.json
Normal file
44
modules.json
Normal file
@@ -0,0 +1,44 @@
|
||||
[
|
||||
{
|
||||
"idx": 0,
|
||||
"name": "0",
|
||||
"path": "",
|
||||
"type": "sentence_transformers.models.Transformer"
|
||||
},
|
||||
{
|
||||
"idx": 1,
|
||||
"name": "1",
|
||||
"path": "1_Pooling",
|
||||
"type": "sentence_transformers.models.Pooling"
|
||||
},
|
||||
{
|
||||
"idx": 2,
|
||||
"name": "2",
|
||||
"path": "2_Dense",
|
||||
"type": "sentence_transformers.models.Dense"
|
||||
},
|
||||
{
|
||||
"idx": 3,
|
||||
"name": "3",
|
||||
"path": "3_Dense",
|
||||
"type": "sentence_transformers.models.Dense"
|
||||
},
|
||||
{
|
||||
"idx": 4,
|
||||
"name": "4",
|
||||
"path": "4_Dense",
|
||||
"type": "sentence_transformers.models.Dense"
|
||||
},
|
||||
{
|
||||
"idx": 5,
|
||||
"name": "5",
|
||||
"path": "5_Dense",
|
||||
"type": "sentence_transformers.models.Dense"
|
||||
},
|
||||
{
|
||||
"idx": 6,
|
||||
"name": "6",
|
||||
"path": "6_Dense",
|
||||
"type": "sentence_transformers.models.Dense"
|
||||
}
|
||||
]
|
||||
3
pytorch_model.bin
Normal file
3
pytorch_model.bin
Normal file
@@ -0,0 +1,3 @@
|
||||
version https://git-lfs.github.com/spec/v1
|
||||
oid sha256:9e4119de5997c833c7566dc4766cfd1ce0fcafab88f327adf0dbd85e41f03e3c
|
||||
size 551190545
|
||||
4
sentence_bert_config.json
Normal file
4
sentence_bert_config.json
Normal file
@@ -0,0 +1,4 @@
|
||||
{
|
||||
"max_seq_length": 75,
|
||||
"do_lower_case": false
|
||||
}
|
||||
2
similarity_evaluation_sts-test_results.csv
Normal file
2
similarity_evaluation_sts-test_results.csv
Normal file
@@ -0,0 +1,2 @@
|
||||
epoch,steps,cosine_pearson,cosine_spearman,euclidean_pearson,euclidean_spearman,manhattan_pearson,manhattan_spearman,dot_pearson,dot_spearman
|
||||
-1,-1,0.67010540977066,0.6281656744516964,0.6151881660970145,0.5880977577692394,0.6156838702005203,0.5883481650395049,0.3694855758671593,0.4361936325224468
|
||||
|
1
special_tokens_map.json
Normal file
1
special_tokens_map.json
Normal file
@@ -0,0 +1 @@
|
||||
{"bos_token": {"content": "<|endoftext|>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}, "eos_token": {"content": "<|endoftext|>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}, "unk_token": {"content": "<|endoftext|>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}, "pad_token": "<|endoftext|>"}
|
||||
1
tokenizer.json
Normal file
1
tokenizer.json
Normal file
File diff suppressed because one or more lines are too long
1
tokenizer_config.json
Normal file
1
tokenizer_config.json
Normal file
@@ -0,0 +1 @@
|
||||
{"unk_token": {"content": "<|endoftext|>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true, "__type": "AddedToken"}, "bos_token": {"content": "<|endoftext|>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true, "__type": "AddedToken"}, "eos_token": {"content": "<|endoftext|>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true, "__type": "AddedToken"}, "add_prefix_space": false, "errors": "replace", "model_max_length": 2048, "special_tokens_map_file": null, "name_or_path": "EleutherAI/gpt-neo-125M", "tokenizer_class": "GPT2Tokenizer"}
|
||||
1
vocab.json
Normal file
1
vocab.json
Normal file
File diff suppressed because one or more lines are too long
Reference in New Issue
Block a user