diff --git a/docs/source/tutorials/index.md b/docs/source/tutorials/index.md index 3c4e89f..db447b5 100644 --- a/docs/source/tutorials/index.md +++ b/docs/source/tutorials/index.md @@ -5,6 +5,7 @@ :maxdepth: 1 single_npu single_npu_multimodal +single_npu_audio multi_npu multi_npu_quantization single_node_300i diff --git a/docs/source/tutorials/single_npu_audio.md b/docs/source/tutorials/single_npu_audio.md new file mode 100644 index 0000000..130ed00 --- /dev/null +++ b/docs/source/tutorials/single_npu_audio.md @@ -0,0 +1,125 @@ +# Single NPU (Qwen2-Audio 7B) + +## Run vllm-ascend on Single NPU + +### Offline Inference on Single NPU + +Run docker container: + +```{code-block} bash + :substitutions: +# Update the vllm-ascend image +export IMAGE=quay.io/ascend/vllm-ascend:|vllm_ascend_version| +docker run --rm \ +--name vllm-ascend \ +--device /dev/davinci0 \ +--device /dev/davinci_manager \ +--device /dev/devmm_svm \ +--device /dev/hisi_hdc \ +-v /usr/local/dcmi:/usr/local/dcmi \ +-v /usr/local/bin/npu-smi:/usr/local/bin/npu-smi \ +-v /usr/local/Ascend/driver/lib64/:/usr/local/Ascend/driver/lib64/ \ +-v /usr/local/Ascend/driver/version.info:/usr/local/Ascend/driver/version.info \ +-v /etc/ascend_install.info:/etc/ascend_install.info \ +-v /root/.cache:/root/.cache \ +-p 8000:8000 \ +-it $IMAGE bash +``` + +Setup environment variables: + +```bash +# Use vllm v1 engine +export VLLM_USE_V1=1 + +# Load model from ModelScope to speed up download +export VLLM_USE_MODELSCOPE=True + +# Set `max_split_size_mb` to reduce memory fragmentation and avoid out of memory +export PYTORCH_NPU_ALLOC_CONF=max_split_size_mb:256 +``` + +:::{note} +`max_split_size_mb` prevents the native allocator from splitting blocks larger than this size (in MB). This can reduce fragmentation and may allow some borderline workloads to complete without running out of memory. You can find more details [here](https://www.hiascend.com/document/detail/zh/CANNCommunityEdition/800alpha003/apiref/envref/envref_07_0061.html). +::: + +Install packages required for audio processing: + +```bash +pip config set global.index-url https://mirrors.tuna.tsinghua.edu.cn/pypi/web/simple +pip install librosa soundfile +``` + +Run the following script to execute offline inference on a single NPU: + +```python +from vllm import LLM, SamplingParams +from vllm.assets.audio import AudioAsset +from vllm.utils import FlexibleArgumentParser + +audio_assets = [AudioAsset("mary_had_lamb"), AudioAsset("winning_call")] +question_per_audio_count = { + 1: "What is recited in the audio?", + 2: "What sport and what nursery rhyme are referenced?" +} + + +def prepare_inputs(audio_count: int): + audio_in_prompt = "".join([ + f"Audio {idx+1}: <|audio_bos|><|AUDIO|><|audio_eos|>\n" + for idx in range(audio_count) + ]) + question = question_per_audio_count[audio_count] + prompt = ("<|im_start|>system\nYou are a helpful assistant.<|im_end|>\n" + "<|im_start|>user\n" + f"{audio_in_prompt}{question}<|im_end|>\n" + "<|im_start|>assistant\n") + + mm_data = { + "audio": + [asset.audio_and_sample_rate for asset in audio_assets[:audio_count]] + } + + # Merge text prompt and audio data into inputs + inputs = {"prompt": prompt, "multi_modal_data": mm_data} + return inputs + + +def main(audio_count: int): + # NOTE: The default `max_num_seqs` and `max_model_len` may result in OOM on + # lower-end GPUs. + # Unless specified, these settings have been tested to work on a single L4. + # `limit_mm_per_prompt`: the max num items for each modality per prompt. + llm = LLM(model="Qwen/Qwen2-Audio-7B-Instruct", + max_model_len=4096, + max_num_seqs=5, + limit_mm_per_prompt={"audio": audio_count}, + enforce_eager=True) + + inputs = prepare_inputs(audio_count) + + sampling_params = SamplingParams(temperature=0.2, + max_tokens=64, + stop_token_ids=None) + + outputs = llm.generate(inputs, sampling_params=sampling_params) + + for o in outputs: + generated_text = o.outputs[0].text + print(generated_text) + + +if __name__ == "__main__": + audio_count = 2 + main(audio_count) +``` + +If you run this script successfully, you can see the info shown below: + +```bash +The sport referenced is baseball, and the nursery rhyme is 'Mary Had a Little Lamb'. +``` + +### Online Serving on Single NPU + +Currently, vllm's OpenAI-compatible server doesn't support audio inputs, find more details [here](https://github.com/vllm-project/vllm/issues/19977). diff --git a/examples/offline_inference_audio_language.py b/examples/offline_inference_audio_language.py index deb8105..7392283 100644 --- a/examples/offline_inference_audio_language.py +++ b/examples/offline_inference_audio_language.py @@ -26,74 +26,51 @@ on HuggingFace model repository. from vllm import LLM, SamplingParams from vllm.assets.audio import AudioAsset -from vllm.utils import FlexibleArgumentParser audio_assets = [AudioAsset("mary_had_lamb"), AudioAsset("winning_call")] question_per_audio_count = { - 0: "What is 1+1?", 1: "What is recited in the audio?", 2: "What sport and what nursery rhyme are referenced?" } -# NOTE: The default `max_num_seqs` and `max_model_len` may result in OOM on -# lower-end GPUs. -# Unless specified, these settings have been tested to work on a single L4. - - -# Qwen2-Audio -def run_qwen2_audio(question: str, audio_count: int): - model_name = "Qwen/Qwen2-Audio-7B-Instruct" - - llm = LLM(model=model_name, - max_model_len=4096, - max_num_seqs=5, - limit_mm_per_prompt={"audio": audio_count}) +def prepare_inputs(audio_count: int): audio_in_prompt = "".join([ - f"Audio {idx+1}: " - f"<|audio_bos|><|AUDIO|><|audio_eos|>\n" for idx in range(audio_count) + f"Audio {idx+1}: <|audio_bos|><|AUDIO|><|audio_eos|>\n" + for idx in range(audio_count) ]) - + question = question_per_audio_count[audio_count] prompt = ("<|im_start|>system\nYou are a helpful assistant.<|im_end|>\n" "<|im_start|>user\n" f"{audio_in_prompt}{question}<|im_end|>\n" "<|im_start|>assistant\n") - stop_token_ids = None - return llm, prompt, stop_token_ids + + mm_data = { + "audio": + [asset.audio_and_sample_rate for asset in audio_assets[:audio_count]] + } + + # Merge text prompt and audio data into inputs + inputs = {"prompt": prompt, "multi_modal_data": mm_data} + return inputs -model_example_map = {"qwen2_audio": run_qwen2_audio} +def main(audio_count: int): + # NOTE: The default `max_num_seqs` and `max_model_len` may result in OOM on + # lower-end GPUs. + # Unless specified, these settings have been tested to work on a single L4. + # `limit_mm_per_prompt`: the max num items for each modality per prompt. + llm = LLM(model="Qwen/Qwen2-Audio-7B-Instruct", + max_model_len=4096, + max_num_seqs=5, + limit_mm_per_prompt={"audio": audio_count}, + enforce_eager=True) + inputs = prepare_inputs(audio_count) -def main(args): - model = args.model_type - if model not in model_example_map: - raise ValueError(f"Model type {model} is not supported.") - - audio_count = args.num_audios - llm, prompt, stop_token_ids = model_example_map[model]( - question_per_audio_count[audio_count], audio_count) - - # We set temperature to 0.2 so that outputs can be different - # even when all prompts are identical when running batch inference. sampling_params = SamplingParams(temperature=0.2, max_tokens=64, - stop_token_ids=stop_token_ids) - - mm_data = {} - if audio_count > 0: - mm_data = { - "audio": [ - asset.audio_and_sample_rate - for asset in audio_assets[:audio_count] - ] - } - - assert args.num_prompts > 0 - inputs = {"prompt": prompt, "multi_modal_data": mm_data} - if args.num_prompts > 1: - # Batch inference - inputs = [inputs] * args.num_prompts # type: ignore + stop_token_ids=None) outputs = llm.generate(inputs, sampling_params=sampling_params) @@ -103,24 +80,5 @@ def main(args): if __name__ == "__main__": - parser = FlexibleArgumentParser( - description='Demo on using vLLM for offline inference with ' - 'audio language models') - parser.add_argument('--model-type', - '-m', - type=str, - default="qwen2_audio", - choices=model_example_map.keys(), - help='Huggingface "model_type".') - parser.add_argument('--num-prompts', - type=int, - default=1, - help='Number of prompts to run.') - parser.add_argument("--num-audios", - type=int, - default=1, - choices=[0, 1, 2], - help="Number of audio items per prompt.") - - args = parser.parse_args() - main(args) + audio_count = 2 + main(audio_count)