初始化项目,由ModelHub XC社区提供模型
Model: Muennighoff/SGPT-125M-lasttoken-msmarco-specb Source: Original Platform
This commit is contained in:
27
.gitattributes
vendored
Normal file
27
.gitattributes
vendored
Normal file
@@ -0,0 +1,27 @@
|
||||
*.7z filter=lfs diff=lfs merge=lfs -text
|
||||
*.arrow filter=lfs diff=lfs merge=lfs -text
|
||||
*.bin filter=lfs diff=lfs merge=lfs -text
|
||||
*.bin.* filter=lfs diff=lfs merge=lfs -text
|
||||
*.bz2 filter=lfs diff=lfs merge=lfs -text
|
||||
*.ftz filter=lfs diff=lfs merge=lfs -text
|
||||
*.gz filter=lfs diff=lfs merge=lfs -text
|
||||
*.h5 filter=lfs diff=lfs merge=lfs -text
|
||||
*.joblib filter=lfs diff=lfs merge=lfs -text
|
||||
*.lfs.* filter=lfs diff=lfs merge=lfs -text
|
||||
*.model filter=lfs diff=lfs merge=lfs -text
|
||||
*.msgpack filter=lfs diff=lfs merge=lfs -text
|
||||
*.onnx filter=lfs diff=lfs merge=lfs -text
|
||||
*.ot filter=lfs diff=lfs merge=lfs -text
|
||||
*.parquet filter=lfs diff=lfs merge=lfs -text
|
||||
*.pb filter=lfs diff=lfs merge=lfs -text
|
||||
*.pt filter=lfs diff=lfs merge=lfs -text
|
||||
*.pth filter=lfs diff=lfs merge=lfs -text
|
||||
*.rar filter=lfs diff=lfs merge=lfs -text
|
||||
saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
||||
*.tar.* filter=lfs diff=lfs merge=lfs -text
|
||||
*.tflite filter=lfs diff=lfs merge=lfs -text
|
||||
*.tgz filter=lfs diff=lfs merge=lfs -text
|
||||
*.xz filter=lfs diff=lfs merge=lfs -text
|
||||
*.zip filter=lfs diff=lfs merge=lfs -text
|
||||
*.zstandard filter=lfs diff=lfs merge=lfs -text
|
||||
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
||||
9
1_Pooling/config.json
Normal file
9
1_Pooling/config.json
Normal file
@@ -0,0 +1,9 @@
|
||||
{
|
||||
"word_embedding_dimension": 768,
|
||||
"pooling_mode_cls_token": false,
|
||||
"pooling_mode_mean_tokens": false,
|
||||
"pooling_mode_max_tokens": false,
|
||||
"pooling_mode_mean_sqrt_len_tokens": false,
|
||||
"pooling_mode_weightedmean_tokens": false,
|
||||
"pooling_mode_lasttoken": true
|
||||
}
|
||||
72
README.md
Normal file
72
README.md
Normal file
@@ -0,0 +1,72 @@
|
||||
---
|
||||
pipeline_tag: sentence-similarity
|
||||
tags:
|
||||
- sentence-transformers
|
||||
- feature-extraction
|
||||
- sentence-similarity
|
||||
---
|
||||
|
||||
# SGPT-125M-lasttoken-msmarco-specb
|
||||
|
||||
## Usage
|
||||
|
||||
For usage instructions, refer to our codebase: https://github.com/Muennighoff/sgpt
|
||||
|
||||
## Evaluation Results
|
||||
|
||||
For eval results, refer to our paper: https://arxiv.org/abs/2202.08904
|
||||
|
||||
## Training
|
||||
The model was trained with the parameters:
|
||||
|
||||
**DataLoader**:
|
||||
|
||||
`torch.utils.data.dataloader.DataLoader` of length 15600 with parameters:
|
||||
```
|
||||
{'batch_size': 32, 'sampler': 'torch.utils.data.sampler.RandomSampler', 'batch_sampler': 'torch.utils.data.sampler.BatchSampler'}
|
||||
```
|
||||
|
||||
**Loss**:
|
||||
|
||||
`sentence_transformers.losses.MultipleNegativesRankingLoss.MultipleNegativesRankingLoss` with parameters:
|
||||
```
|
||||
{'scale': 20.0, 'similarity_fct': 'cos_sim'}
|
||||
```
|
||||
|
||||
Parameters of the fit()-Method:
|
||||
```
|
||||
{
|
||||
"epochs": 10,
|
||||
"evaluation_steps": 0,
|
||||
"evaluator": "NoneType",
|
||||
"max_grad_norm": 1,
|
||||
"optimizer_class": "<class 'transformers.optimization.AdamW'>",
|
||||
"optimizer_params": {
|
||||
"lr": 2e-05
|
||||
},
|
||||
"scheduler": "WarmupLinear",
|
||||
"steps_per_epoch": null,
|
||||
"warmup_steps": 1000,
|
||||
"weight_decay": 0.01
|
||||
}
|
||||
```
|
||||
|
||||
|
||||
## Full Model Architecture
|
||||
```
|
||||
SentenceTransformer(
|
||||
(0): Transformer({'max_seq_length': 300, 'do_lower_case': False}) with Transformer model: GPTNeoModel
|
||||
(1): Pooling({'word_embedding_dimension': 768, 'pooling_mode_cls_token': False, 'pooling_mode_mean_tokens': False, 'pooling_mode_max_tokens': False, 'pooling_mode_mean_sqrt_len_tokens': False, 'pooling_mode_weightedmean_tokens': False, 'pooling_mode_lasttoken': True})
|
||||
)
|
||||
```
|
||||
|
||||
## Citing & Authors
|
||||
|
||||
```bibtex
|
||||
@article{muennighoff2022sgpt,
|
||||
title={SGPT: GPT Sentence Embeddings for Semantic Search},
|
||||
author={Muennighoff, Niklas},
|
||||
journal={arXiv preprint arXiv:2202.08904},
|
||||
year={2022}
|
||||
}
|
||||
```
|
||||
1
added_tokens.json
Normal file
1
added_tokens.json
Normal file
@@ -0,0 +1 @@
|
||||
{"[SOS]": 50257, "{SOS}": 50258}
|
||||
54
config.json
Normal file
54
config.json
Normal file
@@ -0,0 +1,54 @@
|
||||
{
|
||||
"_name_or_path": "EleutherAI/gpt-neo-125M",
|
||||
"activation_function": "gelu_new",
|
||||
"architectures": [
|
||||
"GPTNeoModel"
|
||||
],
|
||||
"attention_dropout": 0,
|
||||
"attention_layers": [
|
||||
"global",
|
||||
"local",
|
||||
"global",
|
||||
"local",
|
||||
"global",
|
||||
"local",
|
||||
"global",
|
||||
"local",
|
||||
"global",
|
||||
"local",
|
||||
"global",
|
||||
"local"
|
||||
],
|
||||
"attention_types": [
|
||||
[
|
||||
[
|
||||
"global",
|
||||
"local"
|
||||
],
|
||||
6
|
||||
]
|
||||
],
|
||||
"bos_token_id": 50256,
|
||||
"embed_dropout": 0,
|
||||
"eos_token_id": 50256,
|
||||
"gradient_checkpointing": false,
|
||||
"hidden_size": 768,
|
||||
"initializer_range": 0.02,
|
||||
"intermediate_size": null,
|
||||
"layer_norm_epsilon": 1e-05,
|
||||
"max_position_embeddings": 2048,
|
||||
"model_type": "gpt_neo",
|
||||
"num_heads": 12,
|
||||
"num_layers": 12,
|
||||
"resid_dropout": 0,
|
||||
"summary_activation": null,
|
||||
"summary_first_dropout": 0.1,
|
||||
"summary_proj_to_labels": true,
|
||||
"summary_type": "cls_index",
|
||||
"summary_use_proj": true,
|
||||
"torch_dtype": "float32",
|
||||
"transformers_version": "4.11.3",
|
||||
"use_cache": true,
|
||||
"vocab_size": 50259,
|
||||
"window_size": 256
|
||||
}
|
||||
7
config_sentence_transformers.json
Normal file
7
config_sentence_transformers.json
Normal file
@@ -0,0 +1,7 @@
|
||||
{
|
||||
"__version__": {
|
||||
"sentence_transformers": "2.1.0",
|
||||
"transformers": "4.11.3",
|
||||
"pytorch": "1.10.1"
|
||||
}
|
||||
}
|
||||
50001
merges.txt
Normal file
50001
merges.txt
Normal file
File diff suppressed because it is too large
Load Diff
14
modules.json
Normal file
14
modules.json
Normal file
@@ -0,0 +1,14 @@
|
||||
[
|
||||
{
|
||||
"idx": 0,
|
||||
"name": "0",
|
||||
"path": "",
|
||||
"type": "sentence_transformers.models.Transformer"
|
||||
},
|
||||
{
|
||||
"idx": 1,
|
||||
"name": "1",
|
||||
"path": "1_Pooling",
|
||||
"type": "sentence_transformers.models.Pooling"
|
||||
}
|
||||
]
|
||||
3
pytorch_model.bin
Normal file
3
pytorch_model.bin
Normal file
@@ -0,0 +1,3 @@
|
||||
version https://git-lfs.github.com/spec/v1
|
||||
oid sha256:a361b78aee540249bd43948b02324eb953ffec1255a80a4b1443b397aa82b7d9
|
||||
size 551196689
|
||||
4
sentence_bert_config.json
Normal file
4
sentence_bert_config.json
Normal file
@@ -0,0 +1,4 @@
|
||||
{
|
||||
"max_seq_length": 300,
|
||||
"do_lower_case": false
|
||||
}
|
||||
1
special_tokens_map.json
Normal file
1
special_tokens_map.json
Normal file
@@ -0,0 +1 @@
|
||||
{"bos_token": {"content": "<|endoftext|>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}, "eos_token": {"content": "<|endoftext|>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}, "unk_token": {"content": "<|endoftext|>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}, "pad_token": "<|endoftext|>"}
|
||||
1
tokenizer.json
Normal file
1
tokenizer.json
Normal file
File diff suppressed because one or more lines are too long
1
tokenizer_config.json
Normal file
1
tokenizer_config.json
Normal file
@@ -0,0 +1 @@
|
||||
{"unk_token": {"content": "<|endoftext|>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true, "__type": "AddedToken"}, "bos_token": {"content": "<|endoftext|>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true, "__type": "AddedToken"}, "eos_token": {"content": "<|endoftext|>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true, "__type": "AddedToken"}, "add_prefix_space": false, "errors": "replace", "model_max_length": 2048, "special_tokens_map_file": null, "name_or_path": "EleutherAI/gpt-neo-125M", "tokenizer_class": "GPT2Tokenizer"}
|
||||
1
vocab.json
Normal file
1
vocab.json
Normal file
File diff suppressed because one or more lines are too long
Reference in New Issue
Block a user