Update README.md
This commit is contained in:
11
README.md
11
README.md
@@ -96,22 +96,25 @@ Here we show a code snippet to show you how to use the chat model with `transfor
|
||||
```python
|
||||
from transformers import Qwen2_5_VLForConditionalGeneration, AutoTokenizer, AutoProcessor
|
||||
from qwen_vl_utils import process_vision_info
|
||||
from modelscope import snapshot_download
|
||||
|
||||
model_dir = snapshot_download("Qwen/Qwen2.5-VL-7B-Instruct-AWQ")
|
||||
|
||||
# default: Load the model on the available device(s)
|
||||
model = Qwen2_5_VLForConditionalGeneration.from_pretrained(
|
||||
"Qwen/Qwen2.5-VL-7B-Instruct-AWQ", torch_dtype="auto", device_map="auto"
|
||||
model_dir, torch_dtype="auto", device_map="auto"
|
||||
)
|
||||
|
||||
# We recommend enabling flash_attention_2 for better acceleration and memory saving, especially in multi-image and video scenarios.
|
||||
# model = Qwen2_5_VLForConditionalGeneration.from_pretrained(
|
||||
# "Qwen/Qwen2.5-VL-7B-Instruct-AWQ",
|
||||
# model_dir,
|
||||
# torch_dtype=torch.bfloat16,
|
||||
# attn_implementation="flash_attention_2",
|
||||
# device_map="auto",
|
||||
# )
|
||||
|
||||
# default processer
|
||||
processor = AutoProcessor.from_pretrained("Qwen/Qwen2.5-VL-7B-Instruct-AWQ")
|
||||
processor = AutoProcessor.from_pretrained(model_dir)
|
||||
|
||||
# The default range for the number of visual tokens per image in the model is 4-16384.
|
||||
# You can set min_pixels and max_pixels according to your needs, such as a token range of 256-1280, to balance performance and cost.
|
||||
@@ -207,7 +210,7 @@ The model supports a wide range of resolution inputs. By default, it uses the na
|
||||
min_pixels = 256 * 28 * 28
|
||||
max_pixels = 1280 * 28 * 28
|
||||
processor = AutoProcessor.from_pretrained(
|
||||
"Qwen/Qwen2.5-VL-7B-Instruct-AWQ", min_pixels=min_pixels, max_pixels=max_pixels
|
||||
model_dir, min_pixels=min_pixels, max_pixels=max_pixels
|
||||
)
|
||||
```
|
||||
|
||||
|
||||
Reference in New Issue
Block a user