初始化项目,由ModelHub XC社区提供模型
Model: numind/NuExtract-tiny Source: Original Platform
This commit is contained in:
36
.gitattributes
vendored
Normal file
36
.gitattributes
vendored
Normal file
@@ -0,0 +1,36 @@
|
||||
*.7z filter=lfs diff=lfs merge=lfs -text
|
||||
*.arrow filter=lfs diff=lfs merge=lfs -text
|
||||
*.bin filter=lfs diff=lfs merge=lfs -text
|
||||
*.bz2 filter=lfs diff=lfs merge=lfs -text
|
||||
*.ckpt filter=lfs diff=lfs merge=lfs -text
|
||||
*.ftz filter=lfs diff=lfs merge=lfs -text
|
||||
*.gz filter=lfs diff=lfs merge=lfs -text
|
||||
*.h5 filter=lfs diff=lfs merge=lfs -text
|
||||
*.joblib filter=lfs diff=lfs merge=lfs -text
|
||||
*.lfs.* filter=lfs diff=lfs merge=lfs -text
|
||||
*.mlmodel filter=lfs diff=lfs merge=lfs -text
|
||||
*.model filter=lfs diff=lfs merge=lfs -text
|
||||
*.msgpack filter=lfs diff=lfs merge=lfs -text
|
||||
*.npy filter=lfs diff=lfs merge=lfs -text
|
||||
*.npz filter=lfs diff=lfs merge=lfs -text
|
||||
*.onnx filter=lfs diff=lfs merge=lfs -text
|
||||
*.ot filter=lfs diff=lfs merge=lfs -text
|
||||
*.parquet filter=lfs diff=lfs merge=lfs -text
|
||||
*.pb filter=lfs diff=lfs merge=lfs -text
|
||||
*.pickle filter=lfs diff=lfs merge=lfs -text
|
||||
*.pkl filter=lfs diff=lfs merge=lfs -text
|
||||
*.pt filter=lfs diff=lfs merge=lfs -text
|
||||
*.pth filter=lfs diff=lfs merge=lfs -text
|
||||
*.rar filter=lfs diff=lfs merge=lfs -text
|
||||
*.safetensors filter=lfs diff=lfs merge=lfs -text
|
||||
saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
||||
*.tar.* filter=lfs diff=lfs merge=lfs -text
|
||||
*.tar filter=lfs diff=lfs merge=lfs -text
|
||||
*.tflite filter=lfs diff=lfs merge=lfs -text
|
||||
*.tgz filter=lfs diff=lfs merge=lfs -text
|
||||
*.wasm filter=lfs diff=lfs merge=lfs -text
|
||||
*.xz filter=lfs diff=lfs merge=lfs -text
|
||||
*.zip filter=lfs diff=lfs merge=lfs -text
|
||||
*.zst filter=lfs diff=lfs merge=lfs -text
|
||||
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
||||
model.safetensors filter=lfs diff=lfs merge=lfs -text
|
||||
84
README.md
Normal file
84
README.md
Normal file
@@ -0,0 +1,84 @@
|
||||
---
|
||||
license: mit
|
||||
language:
|
||||
- en
|
||||
base_model: Qwen/Qwen1.5-0.5B
|
||||
new_version: numind/NuExtract-v1.5
|
||||
---
|
||||
> ⚠️ **_NOTE:_** This model is out-dated. Find the updated version [here](https://huggingface.co/numind/NuExtract-tiny-v1.5)
|
||||
>
|
||||
# Structure Extraction Model by NuMind 🔥
|
||||
|
||||
NuExtract_tiny is a version of [Qwen1.5-0.5](https://huggingface.co/Qwen/Qwen1.5-0.5B), fine-tuned on a private high-quality synthetic dataset for information extraction. To use the model, provide an input text (less than 2000 tokens) and a JSON template describing the information you need to extract.
|
||||
|
||||
Note: This model is purely extractive, so all text output by the model is present as is in the original text. You can also provide an example of output formatting to help the model understand your task more precisely.
|
||||
|
||||
Note: While this model provides good 0 shot performance, it is intended to be fine-tuned on a specific task (>=30 examples).
|
||||
|
||||
We also provide a base (3.8B) and large(7B) version of this model: [NuExtract](https://huggingface.co/numind/NuExtract) and [NuExtract-large](https://huggingface.co/numind/NuExtract-large)
|
||||
|
||||
**Checkout other models by NuMind:**
|
||||
* SOTA Zero-shot NER Model [NuNER Zero](https://huggingface.co/numind/NuNER_Zero)
|
||||
* SOTA Multilingual Entity Recognition Foundation Model: [link](https://huggingface.co/numind/entity-recognition-multilingual-general-sota-v1)
|
||||
* SOTA Sentiment Analysis Foundation Model: [English](https://huggingface.co/numind/generic-sentiment-v1), [Multilingual](https://huggingface.co/numind/generic-sentiment-multi-v1)
|
||||
|
||||
|
||||
## Usage
|
||||
|
||||
To use the model:
|
||||
|
||||
```python
|
||||
import json
|
||||
from transformers import AutoModelForCausalLM, AutoTokenizer
|
||||
|
||||
|
||||
def predict_NuExtract(model, tokenizer, text, schema, example=["","",""]):
|
||||
schema = json.dumps(json.loads(schema), indent=4)
|
||||
input_llm = "<|input|>\n### Template:\n" + schema + "\n"
|
||||
for i in example:
|
||||
if i != "":
|
||||
input_llm += "### Example:\n"+ json.dumps(json.loads(i), indent=4)+"\n"
|
||||
|
||||
input_llm += "### Text:\n"+text +"\n<|output|>\n"
|
||||
input_ids = tokenizer(input_llm, return_tensors="pt", truncation=True, max_length=4000).to("cuda")
|
||||
|
||||
output = tokenizer.decode(model.generate(**input_ids)[0], skip_special_tokens=True)
|
||||
return output.split("<|output|>")[1].split("<|end-output|>")[0]
|
||||
|
||||
|
||||
model = AutoModelForCausalLM.from_pretrained("numind/NuExtract-tiny", trust_remote_code=True)
|
||||
tokenizer = AutoTokenizer.from_pretrained("numind/NuExtract-tiny", trust_remote_code=True)
|
||||
|
||||
model.to("cuda")
|
||||
|
||||
model.eval()
|
||||
|
||||
text = """We introduce Mistral 7B, a 7–billion-parameter language model engineered for
|
||||
superior performance and efficiency. Mistral 7B outperforms the best open 13B
|
||||
model (Llama 2) across all evaluated benchmarks, and the best released 34B
|
||||
model (Llama 1) in reasoning, mathematics, and code generation. Our model
|
||||
leverages grouped-query attention (GQA) for faster inference, coupled with sliding
|
||||
window attention (SWA) to effectively handle sequences of arbitrary length with a
|
||||
reduced inference cost. We also provide a model fine-tuned to follow instructions,
|
||||
Mistral 7B – Instruct, that surpasses Llama 2 13B – chat model both on human and
|
||||
automated benchmarks. Our models are released under the Apache 2.0 license.
|
||||
Code: https://github.com/mistralai/mistral-src
|
||||
Webpage: https://mistral.ai/news/announcing-mistral-7b/"""
|
||||
|
||||
schema = """{
|
||||
"Model": {
|
||||
"Name": "",
|
||||
"Number of parameters": "",
|
||||
"Number of max token": "",
|
||||
"Architecture": []
|
||||
},
|
||||
"Usage": {
|
||||
"Use case": [],
|
||||
"Licence": ""
|
||||
}
|
||||
}"""
|
||||
|
||||
prediction = predict_NuExtract(model, tokenizer, text, schema, example=["","",""])
|
||||
print(prediction)
|
||||
|
||||
```
|
||||
6
added_tokens.json
Normal file
6
added_tokens.json
Normal file
@@ -0,0 +1,6 @@
|
||||
{
|
||||
"<|end-output|>": 151646,
|
||||
"<|endoftext|>": 151643,
|
||||
"<|im_end|>": 151645,
|
||||
"<|im_start|>": 151644
|
||||
}
|
||||
28
config.json
Normal file
28
config.json
Normal file
@@ -0,0 +1,28 @@
|
||||
{
|
||||
"_name_or_path": "Qwen/Qwen1.5-0.5B",
|
||||
"architectures": [
|
||||
"Qwen2ForCausalLM"
|
||||
],
|
||||
"attention_dropout": 0.0,
|
||||
"bos_token_id": 151643,
|
||||
"eos_token_id": 151643,
|
||||
"hidden_act": "silu",
|
||||
"hidden_size": 1024,
|
||||
"initializer_range": 0.02,
|
||||
"intermediate_size": 2816,
|
||||
"max_position_embeddings": 32768,
|
||||
"max_window_layers": 21,
|
||||
"model_type": "qwen2",
|
||||
"num_attention_heads": 16,
|
||||
"num_hidden_layers": 24,
|
||||
"num_key_value_heads": 16,
|
||||
"rms_norm_eps": 1e-06,
|
||||
"rope_theta": 1000000.0,
|
||||
"sliding_window": 32768,
|
||||
"tie_word_embeddings": true,
|
||||
"torch_dtype": "float32",
|
||||
"transformers_version": "4.42.0.dev0",
|
||||
"use_cache": true,
|
||||
"use_sliding_window": false,
|
||||
"vocab_size": 151936
|
||||
}
|
||||
1
configuration.json
Normal file
1
configuration.json
Normal file
@@ -0,0 +1 @@
|
||||
{"framework": "pytorch", "task": "text-generation", "allow_remote": true}
|
||||
6
generation_config.json
Normal file
6
generation_config.json
Normal file
@@ -0,0 +1,6 @@
|
||||
{
|
||||
"bos_token_id": 151643,
|
||||
"eos_token_id": 151646,
|
||||
"max_new_tokens": 2048,
|
||||
"transformers_version": "4.42.0.dev0"
|
||||
}
|
||||
151388
merges.txt
Normal file
151388
merges.txt
Normal file
File diff suppressed because it is too large
Load Diff
3
model.safetensors
Normal file
3
model.safetensors
Normal file
@@ -0,0 +1,3 @@
|
||||
version https://git-lfs.github.com/spec/v1
|
||||
oid sha256:31109b1a75bd232a1f6882cd1910a17492425531de9b7ffb60f39ccfc9fd2f79
|
||||
size 1855983640
|
||||
14
special_tokens_map.json
Normal file
14
special_tokens_map.json
Normal file
@@ -0,0 +1,14 @@
|
||||
{
|
||||
"additional_special_tokens": [
|
||||
"<|im_start|>",
|
||||
"<|im_end|>"
|
||||
],
|
||||
"eos_token": {
|
||||
"content": "<|endoftext|>",
|
||||
"lstrip": false,
|
||||
"normalized": false,
|
||||
"rstrip": false,
|
||||
"single_word": false
|
||||
},
|
||||
"pad_token": "<|endoftext|>"
|
||||
}
|
||||
303126
tokenizer.json
Normal file
303126
tokenizer.json
Normal file
File diff suppressed because it is too large
Load Diff
51
tokenizer_config.json
Normal file
51
tokenizer_config.json
Normal file
@@ -0,0 +1,51 @@
|
||||
{
|
||||
"add_prefix_space": false,
|
||||
"added_tokens_decoder": {
|
||||
"151643": {
|
||||
"content": "<|endoftext|>",
|
||||
"lstrip": false,
|
||||
"normalized": false,
|
||||
"rstrip": false,
|
||||
"single_word": false,
|
||||
"special": true
|
||||
},
|
||||
"151644": {
|
||||
"content": "<|im_start|>",
|
||||
"lstrip": false,
|
||||
"normalized": false,
|
||||
"rstrip": false,
|
||||
"single_word": false,
|
||||
"special": true
|
||||
},
|
||||
"151645": {
|
||||
"content": "<|im_end|>",
|
||||
"lstrip": false,
|
||||
"normalized": false,
|
||||
"rstrip": false,
|
||||
"single_word": false,
|
||||
"special": true
|
||||
},
|
||||
"151646": {
|
||||
"content": "<|end-output|>",
|
||||
"lstrip": false,
|
||||
"normalized": true,
|
||||
"rstrip": false,
|
||||
"single_word": false,
|
||||
"special": false
|
||||
}
|
||||
},
|
||||
"additional_special_tokens": [
|
||||
"<|im_start|>",
|
||||
"<|im_end|>"
|
||||
],
|
||||
"bos_token": null,
|
||||
"chat_template": "{% for message in messages %}{% if loop.first and messages[0]['role'] != 'system' %}{{ '<|im_start|>system\nYou are a helpful assistant<|im_end|>\n' }}{% endif %}{{'<|im_start|>' + message['role'] + '\n' + message['content'] + '<|im_end|>' + '\n'}}{% endfor %}{% if add_generation_prompt %}{{ '<|im_start|>assistant\n' }}{% endif %}",
|
||||
"clean_up_tokenization_spaces": false,
|
||||
"eos_token": "<|end-output|>",
|
||||
"errors": "replace",
|
||||
"model_max_length": 32768,
|
||||
"pad_token": "<|endoftext|>",
|
||||
"split_special_tokens": false,
|
||||
"tokenizer_class": "Qwen2Tokenizer",
|
||||
"unk_token": null
|
||||
}
|
||||
1
vocab.json
Normal file
1
vocab.json
Normal file
File diff suppressed because one or more lines are too long
Reference in New Issue
Block a user