diff --git a/docs/source/tutorials/single_npu_audio.md b/docs/source/tutorials/single_npu_audio.md index 734e6dc..137d761 100644 --- a/docs/source/tutorials/single_npu_audio.md +++ b/docs/source/tutorials/single_npu_audio.md @@ -54,6 +54,7 @@ from vllm import LLM, SamplingParams from vllm.assets.audio import AudioAsset from vllm.utils import FlexibleArgumentParser +# If network issues prevent AudioAsset from fetching remote audio files, retry or check your network. audio_assets = [AudioAsset("mary_had_lamb"), AudioAsset("winning_call")] question_per_audio_count = { 1: "What is recited in the audio?", diff --git a/docs/source/tutorials/single_npu_multimodal.md b/docs/source/tutorials/single_npu_multimodal.md index 8c19651..a678ec7 100644 --- a/docs/source/tutorials/single_npu_multimodal.md +++ b/docs/source/tutorials/single_npu_multimodal.md @@ -43,7 +43,7 @@ export PYTORCH_NPU_ALLOC_CONF=max_split_size_mb:256 Run the following script to execute offline inference on a single NPU: ```bash -pip install torchvision==0.20.1 qwen_vl_utils --extra-index-url https://download.pytorch.org/whl/cpu/ +pip install qwen_vl_utils --extra-index-url https://download.pytorch.org/whl/cpu/ ``` ```python