Update README.md
This commit is contained in:
31
.gitattributes
vendored
31
.gitattributes
vendored
@@ -1,47 +1,36 @@
|
|||||||
*.7z filter=lfs diff=lfs merge=lfs -text
|
*.7z filter=lfs diff=lfs merge=lfs -text
|
||||||
*.arrow filter=lfs diff=lfs merge=lfs -text
|
*.arrow filter=lfs diff=lfs merge=lfs -text
|
||||||
*.bin filter=lfs diff=lfs merge=lfs -text
|
*.bin filter=lfs diff=lfs merge=lfs -text
|
||||||
*.bin.* filter=lfs diff=lfs merge=lfs -text
|
|
||||||
*.bz2 filter=lfs diff=lfs merge=lfs -text
|
*.bz2 filter=lfs diff=lfs merge=lfs -text
|
||||||
|
*.ckpt filter=lfs diff=lfs merge=lfs -text
|
||||||
*.ftz filter=lfs diff=lfs merge=lfs -text
|
*.ftz filter=lfs diff=lfs merge=lfs -text
|
||||||
*.gz filter=lfs diff=lfs merge=lfs -text
|
*.gz filter=lfs diff=lfs merge=lfs -text
|
||||||
*.h5 filter=lfs diff=lfs merge=lfs -text
|
*.h5 filter=lfs diff=lfs merge=lfs -text
|
||||||
*.joblib filter=lfs diff=lfs merge=lfs -text
|
*.joblib filter=lfs diff=lfs merge=lfs -text
|
||||||
*.lfs.* filter=lfs diff=lfs merge=lfs -text
|
*.lfs.* filter=lfs diff=lfs merge=lfs -text
|
||||||
|
*.mlmodel filter=lfs diff=lfs merge=lfs -text
|
||||||
*.model filter=lfs diff=lfs merge=lfs -text
|
*.model filter=lfs diff=lfs merge=lfs -text
|
||||||
*.msgpack filter=lfs diff=lfs merge=lfs -text
|
*.msgpack filter=lfs diff=lfs merge=lfs -text
|
||||||
|
*.npy filter=lfs diff=lfs merge=lfs -text
|
||||||
|
*.npz filter=lfs diff=lfs merge=lfs -text
|
||||||
*.onnx filter=lfs diff=lfs merge=lfs -text
|
*.onnx filter=lfs diff=lfs merge=lfs -text
|
||||||
*.ot filter=lfs diff=lfs merge=lfs -text
|
*.ot filter=lfs diff=lfs merge=lfs -text
|
||||||
*.parquet filter=lfs diff=lfs merge=lfs -text
|
*.parquet filter=lfs diff=lfs merge=lfs -text
|
||||||
*.pb filter=lfs diff=lfs merge=lfs -text
|
*.pb filter=lfs diff=lfs merge=lfs -text
|
||||||
|
*.pickle filter=lfs diff=lfs merge=lfs -text
|
||||||
|
*.pkl filter=lfs diff=lfs merge=lfs -text
|
||||||
*.pt filter=lfs diff=lfs merge=lfs -text
|
*.pt filter=lfs diff=lfs merge=lfs -text
|
||||||
*.pth filter=lfs diff=lfs merge=lfs -text
|
*.pth filter=lfs diff=lfs merge=lfs -text
|
||||||
*.rar filter=lfs diff=lfs merge=lfs -text
|
*.rar filter=lfs diff=lfs merge=lfs -text
|
||||||
|
*.safetensors filter=lfs diff=lfs merge=lfs -text
|
||||||
saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
||||||
*.tar.* filter=lfs diff=lfs merge=lfs -text
|
*.tar.* filter=lfs diff=lfs merge=lfs -text
|
||||||
|
*.tar filter=lfs diff=lfs merge=lfs -text
|
||||||
*.tflite filter=lfs diff=lfs merge=lfs -text
|
*.tflite filter=lfs diff=lfs merge=lfs -text
|
||||||
*.tgz filter=lfs diff=lfs merge=lfs -text
|
*.tgz filter=lfs diff=lfs merge=lfs -text
|
||||||
|
*.wasm filter=lfs diff=lfs merge=lfs -text
|
||||||
*.xz filter=lfs diff=lfs merge=lfs -text
|
*.xz filter=lfs diff=lfs merge=lfs -text
|
||||||
*.zip filter=lfs diff=lfs merge=lfs -text
|
*.zip filter=lfs diff=lfs merge=lfs -text
|
||||||
*.zstandard filter=lfs diff=lfs merge=lfs -text
|
|
||||||
*.tfevents* filter=lfs diff=lfs merge=lfs -text
|
|
||||||
*.db* filter=lfs diff=lfs merge=lfs -text
|
|
||||||
*.ark* filter=lfs diff=lfs merge=lfs -text
|
|
||||||
**/*ckpt*data* filter=lfs diff=lfs merge=lfs -text
|
|
||||||
**/*ckpt*.meta filter=lfs diff=lfs merge=lfs -text
|
|
||||||
**/*ckpt*.index filter=lfs diff=lfs merge=lfs -text
|
|
||||||
*.safetensors filter=lfs diff=lfs merge=lfs -text
|
|
||||||
*.ckpt filter=lfs diff=lfs merge=lfs -text
|
|
||||||
*.gguf* filter=lfs diff=lfs merge=lfs -text
|
|
||||||
*.ggml filter=lfs diff=lfs merge=lfs -text
|
|
||||||
*.llamafile* filter=lfs diff=lfs merge=lfs -text
|
|
||||||
*.pt2 filter=lfs diff=lfs merge=lfs -text
|
|
||||||
*.mlmodel filter=lfs diff=lfs merge=lfs -text
|
|
||||||
*.npy filter=lfs diff=lfs merge=lfs -text
|
|
||||||
*.npz filter=lfs diff=lfs merge=lfs -text
|
|
||||||
*.pickle filter=lfs diff=lfs merge=lfs -text
|
|
||||||
*.pkl filter=lfs diff=lfs merge=lfs -text
|
|
||||||
*.tar filter=lfs diff=lfs merge=lfs -text
|
|
||||||
*.wasm filter=lfs diff=lfs merge=lfs -text
|
|
||||||
*.zst filter=lfs diff=lfs merge=lfs -text
|
*.zst filter=lfs diff=lfs merge=lfs -text
|
||||||
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
||||||
|
pytorch_model.bin filter=lfs diff=lfs merge=lfs -text
|
||||||
|
|||||||
239
README.md
239
README.md
@@ -1,47 +1,204 @@
|
|||||||
---
|
---
|
||||||
license: Apache License 2.0
|
tags:
|
||||||
|
- GUI agents
|
||||||
#model-type:
|
- vision-language-action model
|
||||||
##如 gpt、phi、llama、chatglm、baichuan 等
|
- computer use
|
||||||
#- gpt
|
base_model:
|
||||||
|
- Qwen/Qwen2-VL-2B-Instruct
|
||||||
#domain:
|
license: mit
|
||||||
##如 nlp、cv、audio、multi-modal
|
|
||||||
#- nlp
|
|
||||||
|
|
||||||
#language:
|
|
||||||
##语言代码列表 https://help.aliyun.com/document_detail/215387.html?spm=a2c4g.11186623.0.0.9f8d7467kni6Aa
|
|
||||||
#- cn
|
|
||||||
|
|
||||||
#metrics:
|
|
||||||
##如 CIDEr、Blue、ROUGE 等
|
|
||||||
#- CIDEr
|
|
||||||
|
|
||||||
#tags:
|
|
||||||
##各种自定义,包括 pretrained、fine-tuned、instruction-tuned、RL-tuned 等训练方法和其他
|
|
||||||
#- pretrained
|
|
||||||
|
|
||||||
#tools:
|
|
||||||
##如 vllm、fastchat、llamacpp、AdaSeq 等
|
|
||||||
#- vllm
|
|
||||||
---
|
---
|
||||||
### 当前模型的贡献者未提供更加详细的模型介绍。模型文件和权重,可浏览“模型文件”页面获取。
|
[Github](https://github.com/showlab/ShowUI/tree/main) | [arXiv](https://arxiv.org/abs/2411.17465) | [HF Paper](https://huggingface.co/papers/2411.17465) | [Spaces](https://huggingface.co/spaces/showlab/ShowUI) | [Datasets](https://huggingface.co/datasets/showlab/ShowUI-desktop-8K) | [Quick Start](https://huggingface.co/showlab/ShowUI-2B)
|
||||||
#### 您可以通过如下git clone命令,或者ModelScope SDK来下载模型
|
<img src="examples/showui.jpg" alt="ShowUI" width="640">
|
||||||
|
|
||||||
SDK下载
|
ShowUI is a lightweight (2B) vision-language-action model designed for GUI agents.
|
||||||
```bash
|
|
||||||
#安装ModelScope
|
## 🤗 Try our HF Space Demo
|
||||||
pip install modelscope
|
https://huggingface.co/spaces/showlab/ShowUI
|
||||||
```
|
|
||||||
|
|
||||||
|
## ⭐ Quick Start
|
||||||
|
|
||||||
|
1. Load model
|
||||||
```python
|
```python
|
||||||
#SDK模型下载
|
import ast
|
||||||
from modelscope import snapshot_download
|
import torch
|
||||||
model_dir = snapshot_download('AI-ModelScope/ShowUI-2B')
|
from PIL import Image, ImageDraw
|
||||||
```
|
from qwen_vl_utils import process_vision_info
|
||||||
Git下载
|
from transformers import Qwen2VLForConditionalGeneration, AutoTokenizer, AutoProcessor
|
||||||
```
|
|
||||||
#Git模型下载
|
def draw_point(image_input, point=None, radius=5):
|
||||||
git clone https://www.modelscope.cn/AI-ModelScope/ShowUI-2B.git
|
if isinstance(image_input, str):
|
||||||
|
image = Image.open(BytesIO(requests.get(image_input).content)) if image_input.startswith('http') else Image.open(image_input)
|
||||||
|
else:
|
||||||
|
image = image_input
|
||||||
|
|
||||||
|
if point:
|
||||||
|
x, y = point[0] * image.width, point[1] * image.height
|
||||||
|
ImageDraw.Draw(image).ellipse((x - radius, y - radius, x + radius, y + radius), fill='red')
|
||||||
|
display(image)
|
||||||
|
return
|
||||||
|
|
||||||
|
model = Qwen2VLForConditionalGeneration.from_pretrained(
|
||||||
|
"showlab/ShowUI-2B",
|
||||||
|
torch_dtype=torch.bfloat16,
|
||||||
|
device_map="auto"
|
||||||
|
)
|
||||||
|
|
||||||
|
min_pixels = 256*28*28
|
||||||
|
max_pixels = 1344*28*28
|
||||||
|
|
||||||
|
processor = AutoProcessor.from_pretrained("Qwen/Qwen2-VL-2B-Instruct", min_pixels=min_pixels, max_pixels=max_pixels)
|
||||||
```
|
```
|
||||||
|
|
||||||
<p style="color: lightgrey;">如果您是本模型的贡献者,我们邀请您根据<a href="https://modelscope.cn/docs/ModelScope%E6%A8%A1%E5%9E%8B%E6%8E%A5%E5%85%A5%E6%B5%81%E7%A8%8B%E6%A6%82%E8%A7%88" style="color: lightgrey; text-decoration: underline;">模型贡献文档</a>,及时完善模型卡片内容。</p>
|
2. **UI Grounding**
|
||||||
|
```python
|
||||||
|
img_url = 'examples/web_dbd7514b-9ca3-40cd-b09a-990f7b955da1.png'
|
||||||
|
query = "Nahant"
|
||||||
|
|
||||||
|
|
||||||
|
_SYSTEM = "Based on the screenshot of the page, I give a text description and you give its corresponding location. The coordinate represents a clickable location [x, y] for an element, which is a relative coordinate on the screenshot, scaled from 0 to 1."
|
||||||
|
messages = [
|
||||||
|
{
|
||||||
|
"role": "user",
|
||||||
|
"content": [
|
||||||
|
{"type": "text", "text": _SYSTEM},
|
||||||
|
{"type": "image", "image": img_url, "min_pixels": min_pixels, "max_pixels": max_pixels},
|
||||||
|
{"type": "text", "text": query}
|
||||||
|
],
|
||||||
|
}
|
||||||
|
]
|
||||||
|
|
||||||
|
text = processor.apply_chat_template(
|
||||||
|
messages, tokenize=False, add_generation_prompt=True,
|
||||||
|
)
|
||||||
|
image_inputs, video_inputs = process_vision_info(messages)
|
||||||
|
inputs = processor(
|
||||||
|
text=[text],
|
||||||
|
images=image_inputs,
|
||||||
|
videos=video_inputs,
|
||||||
|
padding=True,
|
||||||
|
return_tensors="pt",
|
||||||
|
)
|
||||||
|
inputs = inputs.to("cuda")
|
||||||
|
|
||||||
|
generated_ids = model.generate(**inputs, max_new_tokens=128)
|
||||||
|
generated_ids_trimmed = [
|
||||||
|
out_ids[len(in_ids) :] for in_ids, out_ids in zip(inputs.input_ids, generated_ids)
|
||||||
|
]
|
||||||
|
output_text = processor.batch_decode(
|
||||||
|
generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False
|
||||||
|
)[0]
|
||||||
|
|
||||||
|
click_xy = ast.literal_eval(output_text)
|
||||||
|
# [0.73, 0.21]
|
||||||
|
|
||||||
|
draw_point(img_url, click_xy, 10)
|
||||||
|
```
|
||||||
|
|
||||||
|
This will visualize the grounding results like (where the red points are [x,y])
|
||||||
|
|
||||||
|

|
||||||
|
|
||||||
|
3. **UI Navigation**
|
||||||
|
- Set up system prompt.
|
||||||
|
```python
|
||||||
|
_NAV_SYSTEM = """You are an assistant trained to navigate the {_APP} screen.
|
||||||
|
Given a task instruction, a screen observation, and an action history sequence,
|
||||||
|
output the next action and wait for the next observation.
|
||||||
|
Here is the action space:
|
||||||
|
{_ACTION_SPACE}
|
||||||
|
"""
|
||||||
|
|
||||||
|
_NAV_FORMAT = """
|
||||||
|
Format the action as a dictionary with the following keys:
|
||||||
|
{'action': 'ACTION_TYPE', 'value': 'element', 'position': [x,y]}
|
||||||
|
|
||||||
|
If value or position is not applicable, set it as `None`.
|
||||||
|
Position might be [[x1,y1], [x2,y2]] if the action requires a start and end position.
|
||||||
|
Position represents the relative coordinates on the screenshot and should be scaled to a range of 0-1.
|
||||||
|
"""
|
||||||
|
|
||||||
|
action_map = {
|
||||||
|
'web': """
|
||||||
|
1. `CLICK`: Click on an element, value is not applicable and the position [x,y] is required.
|
||||||
|
2. `INPUT`: Type a string into an element, value is a string to type and the position [x,y] is required.
|
||||||
|
3. `SELECT`: Select a value for an element, value is not applicable and the position [x,y] is required.
|
||||||
|
4. `HOVER`: Hover on an element, value is not applicable and the position [x,y] is required.
|
||||||
|
5. `ANSWER`: Answer the question, value is the answer and the position is not applicable.
|
||||||
|
6. `ENTER`: Enter operation, value and position are not applicable.
|
||||||
|
7. `SCROLL`: Scroll the screen, value is the direction to scroll and the position is not applicable.
|
||||||
|
8. `SELECT_TEXT`: Select some text content, value is not applicable and position [[x1,y1], [x2,y2]] is the start and end position of the select operation.
|
||||||
|
9. `COPY`: Copy the text, value is the text to copy and the position is not applicable.
|
||||||
|
""",
|
||||||
|
|
||||||
|
'phone': """
|
||||||
|
1. `INPUT`: Type a string into an element, value is not applicable and the position [x,y] is required.
|
||||||
|
2. `SWIPE`: Swipe the screen, value is not applicable and the position [[x1,y1], [x2,y2]] is the start and end position of the swipe operation.
|
||||||
|
3. `TAP`: Tap on an element, value is not applicable and the position [x,y] is required.
|
||||||
|
4. `ANSWER`: Answer the question, value is the status (e.g., 'task complete') and the position is not applicable.
|
||||||
|
5. `ENTER`: Enter operation, value and position are not applicable.
|
||||||
|
"""
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
```python
|
||||||
|
img_url = 'examples/chrome.png'
|
||||||
|
split='web'
|
||||||
|
system_prompt = _NAV_SYSTEM.format(_APP=split, _ACTION_SPACE=action_map[split])
|
||||||
|
query = "Search the weather for the New York city."
|
||||||
|
|
||||||
|
messages = [
|
||||||
|
{
|
||||||
|
"role": "user",
|
||||||
|
"content": [
|
||||||
|
{"type": "text", "text": system_prompt},
|
||||||
|
{"type": "text", "text": f'Task: {query}'},
|
||||||
|
# {"type": "text", "text": PAST_ACTION},
|
||||||
|
{"type": "image", "image": img_url, "min_pixels": min_pixels, "max_pixels": max_pixels},
|
||||||
|
],
|
||||||
|
}
|
||||||
|
]
|
||||||
|
|
||||||
|
text = processor.apply_chat_template(
|
||||||
|
messages, tokenize=False, add_generation_prompt=True,
|
||||||
|
)
|
||||||
|
image_inputs, video_inputs = process_vision_info(messages)
|
||||||
|
inputs = processor(
|
||||||
|
text=[text],
|
||||||
|
images=image_inputs,
|
||||||
|
videos=video_inputs,
|
||||||
|
padding=True,
|
||||||
|
return_tensors="pt",
|
||||||
|
)
|
||||||
|
inputs = inputs.to("cuda")
|
||||||
|
|
||||||
|
generated_ids = model.generate(**inputs, max_new_tokens=128)
|
||||||
|
generated_ids_trimmed = [
|
||||||
|
out_ids[len(in_ids) :] for in_ids, out_ids in zip(inputs.input_ids, generated_ids)
|
||||||
|
]
|
||||||
|
output_text = processor.batch_decode(
|
||||||
|
generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False
|
||||||
|
)[0]
|
||||||
|
|
||||||
|
print(output_text)
|
||||||
|
# {'action': 'CLICK', 'value': None, 'position': [0.49, 0.42]},
|
||||||
|
# {'action': 'INPUT', 'value': 'weather for New York city', 'position': [0.49, 0.42]},
|
||||||
|
# {'action': 'ENTER', 'value': None, 'position': None}
|
||||||
|
```
|
||||||
|
|
||||||
|

|
||||||
|
|
||||||
|
|
||||||
|
If you find our work helpful, please consider citing our paper.
|
||||||
|
|
||||||
|
```
|
||||||
|
@misc{lin2024showui,
|
||||||
|
title={ShowUI: One Vision-Language-Action Model for GUI Visual Agent},
|
||||||
|
author={Kevin Qinghong Lin and Linjie Li and Difei Gao and Zhengyuan Yang and Shiwei Wu and Zechen Bai and Weixian Lei and Lijuan Wang and Mike Zheng Shou},
|
||||||
|
year={2024},
|
||||||
|
eprint={2411.17465},
|
||||||
|
archivePrefix={arXiv},
|
||||||
|
primaryClass={cs.CV},
|
||||||
|
url={https://arxiv.org/abs/2411.17465},
|
||||||
|
}
|
||||||
|
```
|
||||||
16
added_tokens.json
Normal file
16
added_tokens.json
Normal file
@@ -0,0 +1,16 @@
|
|||||||
|
{
|
||||||
|
"<|box_end|>": 151649,
|
||||||
|
"<|box_start|>": 151648,
|
||||||
|
"<|endoftext|>": 151643,
|
||||||
|
"<|im_end|>": 151645,
|
||||||
|
"<|im_start|>": 151644,
|
||||||
|
"<|image_pad|>": 151655,
|
||||||
|
"<|object_ref_end|>": 151647,
|
||||||
|
"<|object_ref_start|>": 151646,
|
||||||
|
"<|quad_end|>": 151651,
|
||||||
|
"<|quad_start|>": 151650,
|
||||||
|
"<|video_pad|>": 151656,
|
||||||
|
"<|vision_end|>": 151653,
|
||||||
|
"<|vision_pad|>": 151654,
|
||||||
|
"<|vision_start|>": 151652
|
||||||
|
}
|
||||||
48
config.json
Normal file
48
config.json
Normal file
@@ -0,0 +1,48 @@
|
|||||||
|
{
|
||||||
|
"_name_or_path": "Qwen/Qwen2-VL-2B-Instruct",
|
||||||
|
"architectures": [
|
||||||
|
"Qwen2VLForConditionalGeneration"
|
||||||
|
],
|
||||||
|
"attention_dropout": 0.0,
|
||||||
|
"bos_token_id": 151643,
|
||||||
|
"eos_token_id": 151645,
|
||||||
|
"hidden_act": "silu",
|
||||||
|
"hidden_size": 1536,
|
||||||
|
"image_token_id": 151655,
|
||||||
|
"initializer_range": 0.02,
|
||||||
|
"intermediate_size": 8960,
|
||||||
|
"max_position_embeddings": 32768,
|
||||||
|
"max_window_layers": 28,
|
||||||
|
"model_type": "qwen2_vl",
|
||||||
|
"num_attention_heads": 12,
|
||||||
|
"num_hidden_layers": 28,
|
||||||
|
"num_key_value_heads": 2,
|
||||||
|
"rms_norm_eps": 1e-06,
|
||||||
|
"rope_scaling": {
|
||||||
|
"mrope_section": [
|
||||||
|
16,
|
||||||
|
24,
|
||||||
|
24
|
||||||
|
],
|
||||||
|
"type": "mrope"
|
||||||
|
},
|
||||||
|
"rope_theta": 1000000.0,
|
||||||
|
"sliding_window": 32768,
|
||||||
|
"tie_word_embeddings": true,
|
||||||
|
"tokenizer_model_max_length": 4096,
|
||||||
|
"torch_dtype": "bfloat16",
|
||||||
|
"transformers_version": "4.45.0.dev0",
|
||||||
|
"use_cache": false,
|
||||||
|
"use_sliding_window": false,
|
||||||
|
"video_token_id": 151656,
|
||||||
|
"vision_config": {
|
||||||
|
"hidden_size": 1536,
|
||||||
|
"in_chans": 3,
|
||||||
|
"model_type": "qwen2_vl",
|
||||||
|
"spatial_patch_size": 14
|
||||||
|
},
|
||||||
|
"vision_end_token_id": 151653,
|
||||||
|
"vision_start_token_id": 151652,
|
||||||
|
"vision_token_id": 151654,
|
||||||
|
"vocab_size": 151936
|
||||||
|
}
|
||||||
1
configuration.json
Normal file
1
configuration.json
Normal file
@@ -0,0 +1 @@
|
|||||||
|
{"framework": "pytorch", "task": "image-to-text", "allow_remote": true}
|
||||||
BIN
examples/0730d43001da36204b8cb9495b61308.png
Normal file
BIN
examples/0730d43001da36204b8cb9495b61308.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 66 KiB |
BIN
examples/chrome.png
Normal file
BIN
examples/chrome.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 66 KiB |
BIN
examples/showui.jpg
Normal file
BIN
examples/showui.jpg
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 54 KiB |
14
generation_config.json
Normal file
14
generation_config.json
Normal file
@@ -0,0 +1,14 @@
|
|||||||
|
{
|
||||||
|
"_attn_implementation": "eager",
|
||||||
|
"bos_token_id": 151643,
|
||||||
|
"do_sample": true,
|
||||||
|
"eos_token_id": [
|
||||||
|
151645,
|
||||||
|
151643
|
||||||
|
],
|
||||||
|
"pad_token_id": 151643,
|
||||||
|
"temperature": 0.01,
|
||||||
|
"top_k": 1,
|
||||||
|
"top_p": 0.001,
|
||||||
|
"transformers_version": "4.45.0.dev0"
|
||||||
|
}
|
||||||
151388
merges.txt
Normal file
151388
merges.txt
Normal file
File diff suppressed because it is too large
Load Diff
29
preprocessor_config.json
Normal file
29
preprocessor_config.json
Normal file
@@ -0,0 +1,29 @@
|
|||||||
|
{
|
||||||
|
"do_convert_rgb": true,
|
||||||
|
"do_normalize": true,
|
||||||
|
"do_rescale": true,
|
||||||
|
"do_resize": true,
|
||||||
|
"image_mean": [
|
||||||
|
0.48145466,
|
||||||
|
0.4578275,
|
||||||
|
0.40821073
|
||||||
|
],
|
||||||
|
"image_processor_type": "Qwen2VLImageProcessor",
|
||||||
|
"image_std": [
|
||||||
|
0.26862954,
|
||||||
|
0.26130258,
|
||||||
|
0.27577711
|
||||||
|
],
|
||||||
|
"max_pixels": 12845056,
|
||||||
|
"merge_size": 2,
|
||||||
|
"min_pixels": 3136,
|
||||||
|
"patch_size": 14,
|
||||||
|
"processor_class": "Qwen2VLProcessor",
|
||||||
|
"resample": 3,
|
||||||
|
"rescale_factor": 0.00392156862745098,
|
||||||
|
"size": {
|
||||||
|
"max_pixels": 12845056,
|
||||||
|
"min_pixels": 3136
|
||||||
|
},
|
||||||
|
"temporal_patch_size": 2
|
||||||
|
}
|
||||||
3
pytorch_model.bin
Normal file
3
pytorch_model.bin
Normal file
@@ -0,0 +1,3 @@
|
|||||||
|
version https://git-lfs.github.com/spec/v1
|
||||||
|
oid sha256:68080df785764e98976eb9cc93a07c6c69cf8a6933738496e02aef55b53d2aa3
|
||||||
|
size 4418202778
|
||||||
24
special_tokens_map.json
Normal file
24
special_tokens_map.json
Normal file
@@ -0,0 +1,24 @@
|
|||||||
|
{
|
||||||
|
"additional_special_tokens": [
|
||||||
|
"<|im_start|>",
|
||||||
|
"<|im_end|>",
|
||||||
|
"<|object_ref_start|>",
|
||||||
|
"<|object_ref_end|>",
|
||||||
|
"<|box_start|>",
|
||||||
|
"<|box_end|>",
|
||||||
|
"<|quad_start|>",
|
||||||
|
"<|quad_end|>",
|
||||||
|
"<|vision_start|>",
|
||||||
|
"<|vision_end|>",
|
||||||
|
"<|vision_pad|>",
|
||||||
|
"<|image_pad|>",
|
||||||
|
"<|video_pad|>"
|
||||||
|
],
|
||||||
|
"eos_token": {
|
||||||
|
"content": "<|im_end|>",
|
||||||
|
"lstrip": false,
|
||||||
|
"normalized": false,
|
||||||
|
"rstrip": false,
|
||||||
|
"single_word": false
|
||||||
|
}
|
||||||
|
}
|
||||||
303211
tokenizer.json
Normal file
303211
tokenizer.json
Normal file
File diff suppressed because it is too large
Load Diff
144
tokenizer_config.json
Normal file
144
tokenizer_config.json
Normal file
@@ -0,0 +1,144 @@
|
|||||||
|
{
|
||||||
|
"add_prefix_space": false,
|
||||||
|
"added_tokens_decoder": {
|
||||||
|
"151643": {
|
||||||
|
"content": "<|endoftext|>",
|
||||||
|
"lstrip": false,
|
||||||
|
"normalized": false,
|
||||||
|
"rstrip": false,
|
||||||
|
"single_word": false,
|
||||||
|
"special": true
|
||||||
|
},
|
||||||
|
"151644": {
|
||||||
|
"content": "<|im_start|>",
|
||||||
|
"lstrip": false,
|
||||||
|
"normalized": false,
|
||||||
|
"rstrip": false,
|
||||||
|
"single_word": false,
|
||||||
|
"special": true
|
||||||
|
},
|
||||||
|
"151645": {
|
||||||
|
"content": "<|im_end|>",
|
||||||
|
"lstrip": false,
|
||||||
|
"normalized": false,
|
||||||
|
"rstrip": false,
|
||||||
|
"single_word": false,
|
||||||
|
"special": true
|
||||||
|
},
|
||||||
|
"151646": {
|
||||||
|
"content": "<|object_ref_start|>",
|
||||||
|
"lstrip": false,
|
||||||
|
"normalized": false,
|
||||||
|
"rstrip": false,
|
||||||
|
"single_word": false,
|
||||||
|
"special": true
|
||||||
|
},
|
||||||
|
"151647": {
|
||||||
|
"content": "<|object_ref_end|>",
|
||||||
|
"lstrip": false,
|
||||||
|
"normalized": false,
|
||||||
|
"rstrip": false,
|
||||||
|
"single_word": false,
|
||||||
|
"special": true
|
||||||
|
},
|
||||||
|
"151648": {
|
||||||
|
"content": "<|box_start|>",
|
||||||
|
"lstrip": false,
|
||||||
|
"normalized": false,
|
||||||
|
"rstrip": false,
|
||||||
|
"single_word": false,
|
||||||
|
"special": true
|
||||||
|
},
|
||||||
|
"151649": {
|
||||||
|
"content": "<|box_end|>",
|
||||||
|
"lstrip": false,
|
||||||
|
"normalized": false,
|
||||||
|
"rstrip": false,
|
||||||
|
"single_word": false,
|
||||||
|
"special": true
|
||||||
|
},
|
||||||
|
"151650": {
|
||||||
|
"content": "<|quad_start|>",
|
||||||
|
"lstrip": false,
|
||||||
|
"normalized": false,
|
||||||
|
"rstrip": false,
|
||||||
|
"single_word": false,
|
||||||
|
"special": true
|
||||||
|
},
|
||||||
|
"151651": {
|
||||||
|
"content": "<|quad_end|>",
|
||||||
|
"lstrip": false,
|
||||||
|
"normalized": false,
|
||||||
|
"rstrip": false,
|
||||||
|
"single_word": false,
|
||||||
|
"special": true
|
||||||
|
},
|
||||||
|
"151652": {
|
||||||
|
"content": "<|vision_start|>",
|
||||||
|
"lstrip": false,
|
||||||
|
"normalized": false,
|
||||||
|
"rstrip": false,
|
||||||
|
"single_word": false,
|
||||||
|
"special": true
|
||||||
|
},
|
||||||
|
"151653": {
|
||||||
|
"content": "<|vision_end|>",
|
||||||
|
"lstrip": false,
|
||||||
|
"normalized": false,
|
||||||
|
"rstrip": false,
|
||||||
|
"single_word": false,
|
||||||
|
"special": true
|
||||||
|
},
|
||||||
|
"151654": {
|
||||||
|
"content": "<|vision_pad|>",
|
||||||
|
"lstrip": false,
|
||||||
|
"normalized": false,
|
||||||
|
"rstrip": false,
|
||||||
|
"single_word": false,
|
||||||
|
"special": true
|
||||||
|
},
|
||||||
|
"151655": {
|
||||||
|
"content": "<|image_pad|>",
|
||||||
|
"lstrip": false,
|
||||||
|
"normalized": false,
|
||||||
|
"rstrip": false,
|
||||||
|
"single_word": false,
|
||||||
|
"special": true
|
||||||
|
},
|
||||||
|
"151656": {
|
||||||
|
"content": "<|video_pad|>",
|
||||||
|
"lstrip": false,
|
||||||
|
"normalized": false,
|
||||||
|
"rstrip": false,
|
||||||
|
"single_word": false,
|
||||||
|
"special": true
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"additional_special_tokens": [
|
||||||
|
"<|im_start|>",
|
||||||
|
"<|im_end|>",
|
||||||
|
"<|object_ref_start|>",
|
||||||
|
"<|object_ref_end|>",
|
||||||
|
"<|box_start|>",
|
||||||
|
"<|box_end|>",
|
||||||
|
"<|quad_start|>",
|
||||||
|
"<|quad_end|>",
|
||||||
|
"<|vision_start|>",
|
||||||
|
"<|vision_end|>",
|
||||||
|
"<|vision_pad|>",
|
||||||
|
"<|image_pad|>",
|
||||||
|
"<|video_pad|>"
|
||||||
|
],
|
||||||
|
"bos_token": null,
|
||||||
|
"chat_template": "{% set image_count = namespace(value=0) %}{% set video_count = namespace(value=0) %}{% for message in messages %}{% if loop.first and message['role'] != 'system' %}<|im_start|>system\nYou are a helpful assistant.<|im_end|>\n{% endif %}<|im_start|>{{ message['role'] }}\n{% if message['content'] is string %}{{ message['content'] }}<|im_end|>\n{% else %}{% for content in message['content'] %}{% if content['type'] == 'image' or 'image' in content or 'image_url' in content %}{% set image_count.value = image_count.value + 1 %}{% if add_vision_id %}Picture {{ image_count.value }}: {% endif %}<|vision_start|><|image_pad|><|vision_end|>{% elif content['type'] == 'video' or 'video' in content %}{% set video_count.value = video_count.value + 1 %}{% if add_vision_id %}Video {{ video_count.value }}: {% endif %}<|vision_start|><|video_pad|><|vision_end|>{% elif 'text' in content %}{{ content['text'] }}{% endif %}{% endfor %}<|im_end|>\n{% endif %}{% endfor %}{% if add_generation_prompt %}<|im_start|>assistant\n{% endif %}",
|
||||||
|
"clean_up_tokenization_spaces": false,
|
||||||
|
"eos_token": "<|im_end|>",
|
||||||
|
"errors": "replace",
|
||||||
|
"model_max_length": 4096,
|
||||||
|
"pad_token": null,
|
||||||
|
"padding_side": "right",
|
||||||
|
"processor_class": "Qwen2VLProcessor",
|
||||||
|
"split_special_tokens": false,
|
||||||
|
"tokenizer_class": "Qwen2Tokenizer",
|
||||||
|
"unk_token": null
|
||||||
|
}
|
||||||
1
vocab.json
Normal file
1
vocab.json
Normal file
File diff suppressed because one or more lines are too long
Reference in New Issue
Block a user