From b2ef04d792ea4cf703602f8a0e8781b364bfe11a Mon Sep 17 00:00:00 2001 From: luopingyi Date: Fri, 9 Jan 2026 13:34:11 +0800 Subject: [PATCH] init --- CMakeLists.txt | 366 +++ CONTRIBUTING.md | 56 + Dockerfile | 164 +- Dockerfile.cpu | 20 + Dockerfile.neuron | 36 + Dockerfile.rocm | 107 + LICENSE | 846 +++++++ MANIFEST.in | 10 + README.md | 125 +- README_vllm_musa.md | 66 + benchmarks/README.md | 8 + benchmarks/backend_request_func.py | 389 +++ benchmarks/benchmark_latency.py | 195 ++ benchmarks/benchmark_prefix_caching.py | 62 + benchmarks/benchmark_serving.py | 596 +++++ benchmarks/benchmark_throughput.py | 387 +++ benchmarks/kernels/benchmark_aqlm.py | 302 +++ benchmarks/kernels/benchmark_mixtral_moe.py | 215 ++ .../kernels/benchmark_paged_attention.py | 211 ++ benchmarks/kernels/benchmark_rope.py | 121 + benchmarks/launch_tgi_server.sh | 16 + benchmarks/sonnet.txt | 518 ++++ build_musa.sh | 20 + cmake/cpu_extension.cmake | 90 + cmake/hipify.py | 73 + cmake/utils.cmake | 354 +++ collect_env.py | 721 ++++++ csrc_musa/activation_kernels.mu | 161 ++ csrc_musa/attention/attention_dtypes.h | 7 + csrc_musa/attention/attention_generic.muh | 65 + csrc_musa/attention/attention_kernels.mu | 981 ++++++++ csrc_musa/attention/attention_utils.muh | 57 + csrc_musa/attention/dtype_bfloat16.muh | 452 ++++ csrc_musa/attention/dtype_float16.muh | 503 ++++ csrc_musa/attention/dtype_float32.muh | 274 +++ csrc_musa/attention/dtype_fp8.muh | 35 + csrc_musa/cache.h | 38 + csrc_musa/cache_kernels.mu | 419 ++++ csrc_musa/cpu/activation.cpp | 148 ++ csrc_musa/cpu/attention.cpp | 746 ++++++ csrc_musa/cpu/cache.cpp | 141 ++ csrc_musa/cpu/cpu_types.hpp | 352 +++ csrc_musa/cpu/layernorm.cpp | 117 + csrc_musa/cpu/pos_encoding.cpp | 199 ++ csrc_musa/cpu/pybind.cpp | 73 + csrc_musa/custom_all_reduce.mu | 148 ++ csrc_musa/custom_all_reduce.muh | 485 ++++ csrc_musa/custom_all_reduce_test.mu | 316 +++ csrc_musa/dispatch_utils.h | 37 + csrc_musa/layernorm_kernels.mu | 352 +++ csrc_musa/moe/moe_ops.cpp | 7 + csrc_musa/moe/moe_ops.h | 9 + csrc_musa/moe/topk_softmax_kernels.mu | 500 ++++ csrc_musa/moe_align_block_size_kernels.mu | 125 + csrc_musa/musa_compat.h | 38 + csrc_musa/musa_utils.h | 10 + csrc_musa/musa_utils_kernels.mu | 35 + csrc_musa/ops.h | 206 ++ csrc_musa/pos_encoding_kernels.mu | 226 ++ csrc_musa/punica/.LICENSE | 217 ++ csrc_musa/punica/bgmv/bgmv_bf16_bf16_bf16.mu | 5 + csrc_musa/punica/bgmv/bgmv_bf16_fp32_bf16.mu | 5 + csrc_musa/punica/bgmv/bgmv_config.h | 162 ++ csrc_musa/punica/bgmv/bgmv_fp16_fp16_fp16.mu | 5 + csrc_musa/punica/bgmv/bgmv_fp16_fp32_fp16.mu | 5 + csrc_musa/punica/bgmv/bgmv_fp32_bf16_bf16.mu | 5 + csrc_musa/punica/bgmv/bgmv_fp32_fp16_fp16.mu | 5 + csrc_musa/punica/bgmv/bgmv_impl.muh | 297 +++ csrc_musa/punica/bgmv/generator.py | 48 + csrc_musa/punica/bgmv/vec_dtypes.muh | 1324 +++++++++++ csrc_musa/punica/punica_ops.cc | 582 +++++ csrc_musa/pybind.cpp | 136 ++ csrc_musa/quantization/aqlm/gemm_kernels.mu | 712 ++++++ csrc_musa/quantization/awq/dequantize.muh | 87 + csrc_musa/quantization/awq/gemm_kernels.mu | 446 ++++ .../quantization/fp8/amd_detail/hip_float8.h | 167 ++ .../fp8/amd_detail/hip_float8_impl.h | 316 +++ .../fp8/amd_detail/quant_utils.muh | 517 ++++ .../quantization/fp8/fp8_cuda_kernels.mu | 126 + .../fp8_e5m2_kvcache/quant_utils.muh | 277 +++ csrc_musa/quantization/gptq/compat.muh | 64 + csrc_musa/quantization/gptq/matrix_view.muh | 274 +++ csrc_musa/quantization/gptq/q_gemm.mu | 2075 +++++++++++++++++ csrc_musa/quantization/gptq/qdq_2.muh | 87 + csrc_musa/quantization/gptq/qdq_3.muh | 141 ++ csrc_musa/quantization/gptq/qdq_4.muh | 147 ++ csrc_musa/quantization/gptq/qdq_8.muh | 40 + csrc_musa/quantization/gptq/qdq_util.muh | 60 + .../quantization/gptq_marlin/gptq_marlin.mu | 1722 ++++++++++++++ .../quantization/gptq_marlin/gptq_marlin.muh | 70 + .../gptq_marlin/gptq_marlin_repack.mu | 352 +++ csrc_musa/quantization/marlin/.LICENSE | 209 ++ .../quantization/marlin/marlin_cuda_kernel.mu | 1138 +++++++++ .../squeezellm/quant_cuda_kernel.mu | 225 ++ csrc_musa/reduction_utils.muh | 66 + docs/Makefile | 20 + docs/README.md | 19 + docs/make.bat | 35 + docs/requirements-docs.txt | 12 + .../dev/dockerfile-stages-dependency.png | Bin 0 -> 118207 bytes docs/source/assets/kernel/k_vecs.png | Bin 0 -> 27676 bytes docs/source/assets/kernel/key.png | Bin 0 -> 111314 bytes docs/source/assets/kernel/logits_vec.png | Bin 0 -> 17475 bytes docs/source/assets/kernel/q_vecs.png | Bin 0 -> 42065 bytes docs/source/assets/kernel/query.png | Bin 0 -> 32710 bytes docs/source/assets/kernel/v_vec.png | Bin 0 -> 42452 bytes docs/source/assets/kernel/value.png | Bin 0 -> 171134 bytes .../assets/logos/vllm-logo-only-light.png | Bin 0 -> 54209 bytes .../assets/logos/vllm-logo-text-dark.png | Bin 0 -> 88342 bytes .../assets/logos/vllm-logo-text-light.png | Bin 0 -> 89976 bytes docs/source/conf.py | 118 + docs/source/dev/dockerfile/dockerfile.rst | 50 + docs/source/dev/engine/async_llm_engine.rst | 6 + docs/source/dev/engine/engine_index.rst | 13 + docs/source/dev/engine/llm_engine.rst | 6 + docs/source/dev/kernel/paged_attention.rst | 525 +++++ docs/source/dev/sampling_params.rst | 5 + docs/source/generate_examples.py | 61 + .../getting_started/amd-installation.rst | 137 ++ .../getting_started/cpu-installation.rst | 87 + .../examples/examples_index.template.rst | 8 + docs/source/getting_started/installation.rst | 88 + .../getting_started/neuron-installation.rst | 136 ++ docs/source/getting_started/quickstart.rst | 176 ++ docs/source/index.rst | 113 + docs/source/models/adding_model.rst | 123 + docs/source/models/engine_args.rst | 23 + docs/source/models/lora.rst | 104 + docs/source/models/performance.rst | 38 + docs/source/models/supported_models.rst | 200 ++ docs/source/quantization/auto_awq.rst | 75 + docs/source/quantization/fp8_e4m3_kvcache.rst | 49 + docs/source/quantization/fp8_e5m2_kvcache.rst | 36 + .../source/serving/deploying_with_bentoml.rst | 8 + docs/source/serving/deploying_with_docker.rst | 54 + docs/source/serving/deploying_with_kserve.rst | 8 + docs/source/serving/deploying_with_triton.rst | 6 + docs/source/serving/distributed_serving.rst | 38 + docs/source/serving/env_vars.rst | 9 + docs/source/serving/integrations.rst | 11 + docs/source/serving/metrics.rst | 13 + .../serving/openai_compatible_server.md | 112 + docs/source/serving/run_on_sky.rst | 310 +++ .../source/serving/serving_with_langchain.rst | 31 + docs/source/serving/usage_stats.md | 57 + examples/api_client.py | 77 + examples/aqlm_example.py | 46 + examples/fp8/README.md | 96 + examples/fp8/extract_scales.py | 367 +++ examples/fp8/quantizer/README.md | 32 + examples/fp8/quantizer/quantize.py | 368 +++ examples/gradio_openai_chatbot_webserver.py | 82 + examples/gradio_webserver.py | 52 + examples/llava_example.py | 90 + examples/llm_engine_example.py | 62 + examples/logging_configuration.md | 178 ++ examples/multilora_inference.py | 124 + examples/offline_inference.py | 22 + examples/offline_inference_distributed.py | 72 + examples/offline_inference_neuron.py | 36 + examples/offline_inference_with_prefix.py | 53 + examples/openai_chat_completion_client.py | 36 + examples/openai_completion_client.py | 31 + examples/production_monitoring/README.md | 54 + .../production_monitoring/docker-compose.yaml | 19 + examples/production_monitoring/grafana.json | 1206 ++++++++++ .../production_monitoring/prometheus.yaml | 10 + examples/template_alpaca.jinja | 29 + examples/template_baichuan.jinja | 13 + examples/template_chatglm.jinja | 18 + examples/template_chatglm2.jinja | 18 + examples/template_chatml.jinja | 2 + examples/template_falcon.jinja | 15 + examples/template_falcon_180b.jinja | 17 + examples/template_inkbot.jinja | 30 + examples/tensorize_vllm_model.py | 282 +++ format.sh | 244 ++ musa_porting.py | 36 + pyproject.toml | 67 + requirements-build.txt | 8 + requirements-common.txt | 20 + requirements-cpu.txt | 6 + requirements-cuda.txt | 9 + requirements-dev.txt | 33 + requirements-musa.txt | 7 + requirements-neuron.txt | 7 + requirements-rocm.txt | 5 + rocm_patch/rocm_bf16.patch | 15 + setup.py | 447 ++++ tests/__init__.py | 0 tests/async_engine/api_server_async_engine.py | 50 + tests/async_engine/test_api_server.py | 108 + tests/async_engine/test_async_llm_engine.py | 96 + tests/async_engine/test_chat_template.py | 134 ++ .../test_merge_async_iterators.py | 41 + tests/async_engine/test_openapi_server_ray.py | 157 ++ tests/async_engine/test_request_tracker.py | 67 + .../test_basic_correctness.py | 50 + .../basic_correctness/test_chunked_prefill.py | 65 + tests/basic_correctness/test_preemption.py | 223 ++ tests/conftest.py | 417 ++++ tests/core/__init__.py | 0 tests/core/block/__init__.py | 0 tests/core/block/conftest.py | 12 + tests/core/block/e2e/conftest.py | 41 + tests/core/block/e2e/test_correctness.py | 455 ++++ tests/core/block/test_block_manager_v2.py | 103 + tests/core/block/test_block_table.py | 575 +++++ tests/core/block/test_common.py | 42 + .../block/test_cpu_gpu_block_allocator.py | 93 + tests/core/block/test_naive_block.py | 102 + tests/core/block/test_prefix_caching_block.py | 509 ++++ tests/core/test_block_manager.py | 367 +++ tests/core/test_chunked_prefill_scheduler.py | 564 +++++ tests/core/test_scheduler.py | 900 +++++++ tests/core/utils.py | 74 + .../test_basic_distributed_correctness.py | 59 + .../test_chunked_prefill_distributed.py | 66 + tests/distributed/test_comm_ops.py | 110 + tests/distributed/test_custom_all_reduce.py | 84 + tests/distributed/test_pynccl.py | 159 ++ tests/distributed/test_pynccl_library.py | 43 + .../output_processor/test_multi_step.py | 270 +++ tests/engine/test_computed_prefix_blocks.py | 34 + tests/engine/test_detokenization.py | 32 + tests/engine/test_multiproc_workers.py | 176 ++ tests/engine/test_skip_tokenizer_init.py | 23 + tests/engine/test_stop_reason.py | 59 + tests/engine/test_stop_strings.py | 111 + tests/entrypoints/openai/test_serving_chat.py | 37 + tests/entrypoints/test_guided_processors.py | 113 + tests/entrypoints/test_llm_generate.py | 41 + tests/entrypoints/test_openai_server.py | 894 +++++++ .../test_server_oot_registration.py | 66 + .../llama2-70b-fp8-kv/kv_cache_scales.json | 90 + .../llama2-7b-fp8-kv/kv_cache_scales.json | 42 + tests/kernels/allclose_default.py | 18 + tests/kernels/conftest.py | 14 + tests/kernels/test_activation.py | 78 + tests/kernels/test_attention.py | 376 +++ tests/kernels/test_cache.py | 375 +++ tests/kernels/test_layernorm.py | 54 + tests/kernels/test_moe.py | 101 + tests/kernels/test_pos_encoding.py | 208 ++ tests/kernels/test_prefix_prefill.py | 209 ++ tests/kernels/test_rand.py | 52 + tests/kernels/test_sampler.py | 196 ++ tests/lora/__init__.py | 0 tests/lora/conftest.py | 179 ++ tests/lora/test_baichuan.py | 108 + tests/lora/test_chatglm3.py | 57 + tests/lora/test_gemma.py | 46 + tests/lora/test_layer_variation.py | 106 + tests/lora/test_layers.py | 773 ++++++ tests/lora/test_llama.py | 148 ++ tests/lora/test_lora.py | 224 ++ tests/lora/test_lora_checkpoints.py | 58 + tests/lora/test_lora_manager.py | 487 ++++ tests/lora/test_mixtral.py | 53 + tests/lora/test_punica.py | 231 ++ tests/lora/test_quant_model.py | 179 ++ tests/lora/test_tokenizer_group.py | 55 + tests/lora/test_utils.py | 172 ++ tests/lora/test_worker.py | 69 + tests/lora/utils.py | 88 + tests/metrics/test_metrics.py | 194 ++ tests/model_executor/weight_utils.py | 54 + tests/models/test_aqlm.py | 95 + tests/models/test_big_models.py | 60 + tests/models/test_fp8.py | 90 + tests/models/test_gptq_marlin.py | 98 + tests/models/test_llava.py | 107 + tests/models/test_marlin.py | 78 + tests/models/test_mistral.py | 40 + tests/models/test_models.py | 66 + tests/models/test_oot_registration.py | 32 + tests/models/utils.py | 29 + tests/prefix_caching/test_prefix_caching.py | 75 + tests/prompts/example.txt | 8 + tests/prompts/summary.txt | 1 + tests/quantization/test_configs.py | 73 + tests/quantization/test_fp8.py | 24 + tests/samplers/test_beam_search.py | 54 + tests/samplers/test_ignore_eos.py | 31 + tests/samplers/test_logits_processor.py | 62 + tests/samplers/test_logprobs.py | 124 + tests/samplers/test_ranks.py | 50 + tests/samplers/test_rejection_sampler.py | 385 +++ tests/samplers/test_sampler.py | 661 ++++++ tests/samplers/test_seeded_generate.py | 82 + tests/spec_decode/__init__.py | 0 tests/spec_decode/e2e/__init__.py | 0 tests/spec_decode/e2e/conftest.py | 305 +++ tests/spec_decode/e2e/test_compatibility.py | 176 ++ tests/spec_decode/e2e/test_logprobs.py | 335 +++ .../e2e/test_multistep_correctness.py | 579 +++++ .../spec_decode/e2e/test_ngram_correctness.py | 172 ++ tests/spec_decode/test_batch_expansion.py | 98 + tests/spec_decode/test_metrics.py | 159 ++ tests/spec_decode/test_multi_step_worker.py | 431 ++++ tests/spec_decode/test_ngram_worker.py | 206 ++ tests/spec_decode/test_spec_decode_worker.py | 620 +++++ tests/spec_decode/test_utils.py | 111 + tests/spec_decode/utils.py | 223 ++ tests/tensorizer_loader/__init__.py | 0 .../tensorize_vllm_model_for_testing.py | 245 ++ tests/tensorizer_loader/test_tensorizer.py | 327 +++ tests/test_cache_block_hashing.py | 93 + tests/test_config.py | 39 + tests/test_logger.py | 214 ++ tests/test_logits_processor.py | 103 + tests/test_regression.py | 58 + tests/test_sampling_params.py | 13 + tests/test_sequence.py | 124 + tests/tokenization/__init__.py | 0 tests/tokenization/test_cached_tokenizer.py | 22 + tests/tokenization/test_detokenize.py | 208 ++ tests/tokenization/test_tokenizer.py | 20 + tests/tokenization/test_tokenizer_group.py | 102 + tests/worker/__init__.py | 0 tests/worker/test_model_runner.py | 357 +++ tests/worker/test_swap.py | 89 + vllm/__init__.py | 25 + vllm/_custom_ops.py | 251 ++ vllm/attention/__init__.py | 13 + vllm/attention/backends/__init__.py | 0 vllm/attention/backends/abstract.py | 127 + vllm/attention/backends/flash_attn.py | 283 +++ vllm/attention/backends/flashinfer.py | 220 ++ vllm/attention/backends/rocm_flash_attn.py | 374 +++ vllm/attention/backends/torch_sdpa.py | 253 ++ vllm/attention/backends/xformers.py | 393 ++++ vllm/attention/layer.py | 56 + vllm/attention/ops/__init__.py | 0 vllm/attention/ops/paged_attn.py | 216 ++ vllm/attention/ops/prefix_prefill.py | 792 +++++++ vllm/attention/ops/triton_flash_attention.py | 810 +++++++ vllm/attention/selector.py | 94 + vllm/block.py | 84 + vllm/config.py | 1225 ++++++++++ vllm/core/__init__.py | 0 vllm/core/block/__init__.py | 0 vllm/core/block/block_table.py | 295 +++ vllm/core/block/common.py | 199 ++ vllm/core/block/cpu_gpu_block_allocator.py | 228 ++ vllm/core/block/interfaces.py | 205 ++ vllm/core/block/naive_block.py | 318 +++ vllm/core/block/prefix_caching_block.py | 606 +++++ vllm/core/block_manager_v1.py | 625 +++++ vllm/core/block_manager_v2.py | 258 ++ vllm/core/evictor_v1.py | 105 + vllm/core/evictor_v2.py | 127 + vllm/core/interfaces.py | 113 + vllm/core/policy.py | 45 + vllm/core/scheduler.py | 1163 +++++++++ vllm/distributed/__init__.py | 3 + vllm/distributed/communication_op.py | 237 ++ .../device_communicators/__init__.py | 0 .../device_communicators/custom_all_reduce.py | 274 +++ .../device_communicators/pymccl.py | 284 +++ .../device_communicators/pymccl_utils.py | 66 + .../device_communicators/pynccl.py | 287 +++ vllm/distributed/parallel_state.py | 341 +++ vllm/distributed/utils.py | 137 ++ vllm/engine/__init__.py | 0 vllm/engine/arg_utils.py | 649 ++++++ vllm/engine/async_llm_engine.py | 737 ++++++ vllm/engine/llm_engine.py | 784 +++++++ vllm/engine/metrics.py | 368 +++ vllm/engine/output_processor/__init__.py | 0 vllm/engine/output_processor/interfaces.py | 76 + vllm/engine/output_processor/multi_step.py | 142 ++ vllm/engine/output_processor/single_step.py | 284 +++ vllm/engine/output_processor/stop_checker.py | 101 + vllm/engine/output_processor/util.py | 19 + vllm/entrypoints/__init__.py | 0 vllm/entrypoints/api_server.py | 119 + vllm/entrypoints/llm.py | 259 ++ vllm/entrypoints/openai/__init__.py | 0 vllm/entrypoints/openai/api_server.py | 186 ++ vllm/entrypoints/openai/cli_args.py | 115 + vllm/entrypoints/openai/protocol.py | 460 ++++ vllm/entrypoints/openai/serving_chat.py | 392 ++++ vllm/entrypoints/openai/serving_completion.py | 347 +++ vllm/entrypoints/openai/serving_engine.py | 234 ++ vllm/envs.py | 217 ++ vllm/executor/__init__.py | 0 vllm/executor/cpu_executor.py | 152 ++ vllm/executor/distributed_gpu_executor.py | 115 + vllm/executor/executor_base.py | 115 + vllm/executor/gpu_executor.py | 150 ++ vllm/executor/multiproc_worker_utils.py | 263 +++ vllm/executor/neuron_executor.py | 91 + vllm/executor/ray_gpu_executor.py | 327 +++ vllm/executor/ray_utils.py | 119 + vllm/logger.py | 153 ++ vllm/logging/__init__.py | 5 + vllm/logging/formatter.py | 15 + vllm/lora/__init__.py | 0 vllm/lora/fully_sharded_layers.py | 262 +++ vllm/lora/layers.py | 1181 ++++++++++ vllm/lora/lora.py | 167 ++ vllm/lora/models.py | 645 +++++ vllm/lora/punica.py | 213 ++ vllm/lora/request.py | 32 + vllm/lora/utils.py | 98 + vllm/lora/worker_manager.py | 251 ++ vllm/model_executor/__init__.py | 7 + .../guided_decoding/__init__.py | 25 + .../lm_format_enforcer_decoding.py | 70 + .../guided_decoding/outlines_decoding.py | 130 ++ .../outlines_logits_processors.py | 184 ++ vllm/model_executor/layers/__init__.py | 0 vllm/model_executor/layers/activation.py | 173 ++ .../layers/fused_moe/__init__.py | 7 + ...344,device_name=NVIDIA_A100-SXM4-40GB.json | 146 ++ ...344,device_name=NVIDIA_A100-SXM4-80GB.json | 146 ++ ...344,device_name=NVIDIA_H100_80GB_HBM3.json | 146 ++ ...688,device_name=NVIDIA_A100-SXM4-80GB.json | 146 ++ ...688,device_name=NVIDIA_H100_80GB_HBM3.json | 146 ++ ...792,device_name=NVIDIA_A100-SXM4-40GB.json | 146 ++ ...792,device_name=NVIDIA_A100-SXM4-80GB.json | 146 ++ ...792,device_name=NVIDIA_H100_80GB_HBM3.json | 146 ++ ...048,device_name=NVIDIA_A100-SXM4-80GB.json | 146 ++ ...048,device_name=NVIDIA_H100_80GB_HBM3.json | 146 ++ ...584,device_name=NVIDIA_A100-SXM4-40GB.json | 146 ++ ...584,device_name=NVIDIA_A100-SXM4-80GB.json | 146 ++ ...me=NVIDIA_H100_80GB_HBM3,dtype=float8.json | 140 ++ ...584,device_name=NVIDIA_H100_80GB_HBM3.json | 146 ++ ...096,device_name=NVIDIA_A100-SXM4-80GB.json | 146 ++ ...096,device_name=NVIDIA_H100_80GB_HBM3.json | 146 ++ ...168,device_name=NVIDIA_A100-SXM4-80GB.json | 146 ++ ...me=NVIDIA_H100_80GB_HBM3,dtype=float8.json | 146 ++ ...168,device_name=NVIDIA_H100_80GB_HBM3.json | 146 ++ .../layers/fused_moe/configs/README | 10 + .../layers/fused_moe/fused_moe.py | 479 ++++ vllm/model_executor/layers/layernorm.py | 71 + vllm/model_executor/layers/linear.py | 709 ++++++ .../model_executor/layers/logits_processor.py | 115 + vllm/model_executor/layers/ops/__init__.py | 0 vllm/model_executor/layers/ops/rand.py | 157 ++ vllm/model_executor/layers/ops/sample.py | 406 ++++ .../layers/quantization/__init__.py | 35 + .../layers/quantization/aqlm.py | 376 +++ .../model_executor/layers/quantization/awq.py | 175 ++ .../layers/quantization/base_config.py | 97 + .../model_executor/layers/quantization/fp8.py | 265 +++ .../layers/quantization/gptq.py | 224 ++ .../layers/quantization/gptq_marlin.py | 438 ++++ .../layers/quantization/marlin.py | 227 ++ .../layers/quantization/schema.py | 84 + .../layers/quantization/squeezellm.py | 137 ++ .../layers/rejection_sampler.py | 405 ++++ .../model_executor/layers/rotary_embedding.py | 531 +++++ vllm/model_executor/layers/sampler.py | 1051 +++++++++ .../layers/vocab_parallel_embedding.py | 155 ++ vllm/model_executor/model_loader/__init__.py | 30 + vllm/model_executor/model_loader/loader.py | 362 +++ vllm/model_executor/model_loader/neuron.py | 136 ++ .../model_executor/model_loader/tensorizer.py | 368 +++ vllm/model_executor/model_loader/utils.py | 41 + .../model_loader/weight_utils.py | 372 +++ vllm/model_executor/models/__init__.py | 119 + vllm/model_executor/models/baichuan.py | 410 ++++ vllm/model_executor/models/bloom.py | 327 +++ vllm/model_executor/models/chatglm.py | 386 +++ vllm/model_executor/models/commandr.py | 373 +++ vllm/model_executor/models/dbrx.py | 413 ++++ vllm/model_executor/models/decilm.py | 122 + vllm/model_executor/models/deepseek.py | 438 ++++ vllm/model_executor/models/falcon.py | 444 ++++ vllm/model_executor/models/gemma.py | 394 ++++ vllm/model_executor/models/gpt2.py | 267 +++ vllm/model_executor/models/gpt_bigcode.py | 275 +++ vllm/model_executor/models/gpt_j.py | 281 +++ vllm/model_executor/models/gpt_neox.py | 295 +++ vllm/model_executor/models/internlm2.py | 323 +++ vllm/model_executor/models/jais.py | 333 +++ vllm/model_executor/models/llama.py | 442 ++++ vllm/model_executor/models/llava.py | 239 ++ vllm/model_executor/models/minicpm.py | 531 +++++ vllm/model_executor/models/mixtral.py | 583 +++++ vllm/model_executor/models/mixtral_quant.py | 404 ++++ vllm/model_executor/models/mpt.py | 295 +++ vllm/model_executor/models/olmo.py | 356 +++ vllm/model_executor/models/opt.py | 349 +++ vllm/model_executor/models/orion.py | 320 +++ vllm/model_executor/models/phi.py | 301 +++ vllm/model_executor/models/qwen.py | 285 +++ vllm/model_executor/models/qwen2.py | 367 +++ vllm/model_executor/models/qwen2_moe.py | 447 ++++ vllm/model_executor/models/stablelm.py | 301 +++ vllm/model_executor/models/starcoder2.py | 302 +++ vllm/model_executor/models/xverse.py | 366 +++ vllm/model_executor/sampling_metadata.py | 588 +++++ vllm/model_executor/utils.py | 37 + vllm/outputs.py | 150 ++ vllm/py.typed | 2 + vllm/sampling_params.py | 340 +++ vllm/sequence.py | 766 ++++++ vllm/spec_decode/__init__.py | 0 vllm/spec_decode/batch_expansion.py | 397 ++++ vllm/spec_decode/interfaces.py | 73 + vllm/spec_decode/metrics.py | 191 ++ vllm/spec_decode/multi_step_worker.py | 203 ++ vllm/spec_decode/ngram_worker.py | 176 ++ vllm/spec_decode/spec_decode_worker.py | 472 ++++ vllm/spec_decode/top1_proposer.py | 200 ++ vllm/spec_decode/util.py | 228 ++ vllm/test_utils.py | 41 + vllm/transformers_utils/__init__.py | 0 vllm/transformers_utils/config.py | 58 + vllm/transformers_utils/configs/__init__.py | 16 + vllm/transformers_utils/configs/chatglm.py | 68 + vllm/transformers_utils/configs/dbrx.py | 278 +++ vllm/transformers_utils/configs/falcon.py | 87 + vllm/transformers_utils/configs/jais.py | 237 ++ vllm/transformers_utils/configs/mpt.py | 178 ++ vllm/transformers_utils/detokenizer.py | 313 +++ vllm/transformers_utils/tokenizer.py | 149 ++ .../tokenizer_group/__init__.py | 33 + .../tokenizer_group/base_tokenizer_group.py | 55 + .../tokenizer_group/ray_tokenizer_group.py | 169 ++ .../tokenizer_group/tokenizer_group.py | 78 + .../transformers_utils/tokenizers/__init__.py | 5 + .../transformers_utils/tokenizers/baichuan.py | 256 ++ vllm/usage/__init__.py | 0 vllm/usage/usage_lib.py | 209 ++ vllm/utils.py | 728 ++++++ vllm/worker/__init__.py | 0 vllm/worker/cache_engine.py | 105 + vllm/worker/cpu_model_runner.py | 346 +++ vllm/worker/cpu_worker.py | 321 +++ vllm/worker/model_runner.py | 1172 ++++++++++ vllm/worker/neuron_model_runner.py | 196 ++ vllm/worker/neuron_worker.py | 98 + vllm/worker/worker.py | 366 +++ vllm/worker/worker_base.py | 146 ++ 538 files changed, 105693 insertions(+), 2 deletions(-) create mode 100644 CMakeLists.txt create mode 100644 CONTRIBUTING.md create mode 100644 Dockerfile.cpu create mode 100644 Dockerfile.neuron create mode 100644 Dockerfile.rocm create mode 100644 LICENSE create mode 100644 MANIFEST.in create mode 100644 README_vllm_musa.md create mode 100644 benchmarks/README.md create mode 100644 benchmarks/backend_request_func.py create mode 100644 benchmarks/benchmark_latency.py create mode 100644 benchmarks/benchmark_prefix_caching.py create mode 100644 benchmarks/benchmark_serving.py create mode 100644 benchmarks/benchmark_throughput.py create mode 100644 benchmarks/kernels/benchmark_aqlm.py create mode 100644 benchmarks/kernels/benchmark_mixtral_moe.py create mode 100644 benchmarks/kernels/benchmark_paged_attention.py create mode 100644 benchmarks/kernels/benchmark_rope.py create mode 100755 benchmarks/launch_tgi_server.sh create mode 100644 benchmarks/sonnet.txt create mode 100644 build_musa.sh create mode 100644 cmake/cpu_extension.cmake create mode 100755 cmake/hipify.py create mode 100644 cmake/utils.cmake create mode 100644 collect_env.py create mode 100644 csrc_musa/activation_kernels.mu create mode 100644 csrc_musa/attention/attention_dtypes.h create mode 100644 csrc_musa/attention/attention_generic.muh create mode 100644 csrc_musa/attention/attention_kernels.mu create mode 100644 csrc_musa/attention/attention_utils.muh create mode 100644 csrc_musa/attention/dtype_bfloat16.muh create mode 100644 csrc_musa/attention/dtype_float16.muh create mode 100644 csrc_musa/attention/dtype_float32.muh create mode 100644 csrc_musa/attention/dtype_fp8.muh create mode 100644 csrc_musa/cache.h create mode 100644 csrc_musa/cache_kernels.mu create mode 100644 csrc_musa/cpu/activation.cpp create mode 100644 csrc_musa/cpu/attention.cpp create mode 100644 csrc_musa/cpu/cache.cpp create mode 100644 csrc_musa/cpu/cpu_types.hpp create mode 100644 csrc_musa/cpu/layernorm.cpp create mode 100644 csrc_musa/cpu/pos_encoding.cpp create mode 100644 csrc_musa/cpu/pybind.cpp create mode 100644 csrc_musa/custom_all_reduce.mu create mode 100644 csrc_musa/custom_all_reduce.muh create mode 100644 csrc_musa/custom_all_reduce_test.mu create mode 100644 csrc_musa/dispatch_utils.h create mode 100644 csrc_musa/layernorm_kernels.mu create mode 100644 csrc_musa/moe/moe_ops.cpp create mode 100644 csrc_musa/moe/moe_ops.h create mode 100644 csrc_musa/moe/topk_softmax_kernels.mu create mode 100644 csrc_musa/moe_align_block_size_kernels.mu create mode 100644 csrc_musa/musa_compat.h create mode 100644 csrc_musa/musa_utils.h create mode 100644 csrc_musa/musa_utils_kernels.mu create mode 100644 csrc_musa/ops.h create mode 100644 csrc_musa/pos_encoding_kernels.mu create mode 100644 csrc_musa/punica/.LICENSE create mode 100644 csrc_musa/punica/bgmv/bgmv_bf16_bf16_bf16.mu create mode 100644 csrc_musa/punica/bgmv/bgmv_bf16_fp32_bf16.mu create mode 100644 csrc_musa/punica/bgmv/bgmv_config.h create mode 100644 csrc_musa/punica/bgmv/bgmv_fp16_fp16_fp16.mu create mode 100644 csrc_musa/punica/bgmv/bgmv_fp16_fp32_fp16.mu create mode 100644 csrc_musa/punica/bgmv/bgmv_fp32_bf16_bf16.mu create mode 100644 csrc_musa/punica/bgmv/bgmv_fp32_fp16_fp16.mu create mode 100644 csrc_musa/punica/bgmv/bgmv_impl.muh create mode 100644 csrc_musa/punica/bgmv/generator.py create mode 100644 csrc_musa/punica/bgmv/vec_dtypes.muh create mode 100644 csrc_musa/punica/punica_ops.cc create mode 100644 csrc_musa/pybind.cpp create mode 100644 csrc_musa/quantization/aqlm/gemm_kernels.mu create mode 100644 csrc_musa/quantization/awq/dequantize.muh create mode 100644 csrc_musa/quantization/awq/gemm_kernels.mu create mode 100644 csrc_musa/quantization/fp8/amd_detail/hip_float8.h create mode 100644 csrc_musa/quantization/fp8/amd_detail/hip_float8_impl.h create mode 100644 csrc_musa/quantization/fp8/amd_detail/quant_utils.muh create mode 100644 csrc_musa/quantization/fp8/fp8_cuda_kernels.mu create mode 100644 csrc_musa/quantization/fp8_e5m2_kvcache/quant_utils.muh create mode 100644 csrc_musa/quantization/gptq/compat.muh create mode 100644 csrc_musa/quantization/gptq/matrix_view.muh create mode 100644 csrc_musa/quantization/gptq/q_gemm.mu create mode 100644 csrc_musa/quantization/gptq/qdq_2.muh create mode 100644 csrc_musa/quantization/gptq/qdq_3.muh create mode 100644 csrc_musa/quantization/gptq/qdq_4.muh create mode 100644 csrc_musa/quantization/gptq/qdq_8.muh create mode 100644 csrc_musa/quantization/gptq/qdq_util.muh create mode 100644 csrc_musa/quantization/gptq_marlin/gptq_marlin.mu create mode 100644 csrc_musa/quantization/gptq_marlin/gptq_marlin.muh create mode 100644 csrc_musa/quantization/gptq_marlin/gptq_marlin_repack.mu create mode 100644 csrc_musa/quantization/marlin/.LICENSE create mode 100644 csrc_musa/quantization/marlin/marlin_cuda_kernel.mu create mode 100644 csrc_musa/quantization/squeezellm/quant_cuda_kernel.mu create mode 100644 csrc_musa/reduction_utils.muh create mode 100644 docs/Makefile create mode 100644 docs/README.md create mode 100644 docs/make.bat create mode 100644 docs/requirements-docs.txt create mode 100644 docs/source/assets/dev/dockerfile-stages-dependency.png create mode 100644 docs/source/assets/kernel/k_vecs.png create mode 100644 docs/source/assets/kernel/key.png create mode 100644 docs/source/assets/kernel/logits_vec.png create mode 100644 docs/source/assets/kernel/q_vecs.png create mode 100644 docs/source/assets/kernel/query.png create mode 100644 docs/source/assets/kernel/v_vec.png create mode 100644 docs/source/assets/kernel/value.png create mode 100644 docs/source/assets/logos/vllm-logo-only-light.png create mode 100644 docs/source/assets/logos/vllm-logo-text-dark.png create mode 100644 docs/source/assets/logos/vllm-logo-text-light.png create mode 100644 docs/source/conf.py create mode 100644 docs/source/dev/dockerfile/dockerfile.rst create mode 100644 docs/source/dev/engine/async_llm_engine.rst create mode 100644 docs/source/dev/engine/engine_index.rst create mode 100644 docs/source/dev/engine/llm_engine.rst create mode 100644 docs/source/dev/kernel/paged_attention.rst create mode 100644 docs/source/dev/sampling_params.rst create mode 100644 docs/source/generate_examples.py create mode 100644 docs/source/getting_started/amd-installation.rst create mode 100644 docs/source/getting_started/cpu-installation.rst create mode 100644 docs/source/getting_started/examples/examples_index.template.rst create mode 100644 docs/source/getting_started/installation.rst create mode 100644 docs/source/getting_started/neuron-installation.rst create mode 100644 docs/source/getting_started/quickstart.rst create mode 100644 docs/source/index.rst create mode 100644 docs/source/models/adding_model.rst create mode 100644 docs/source/models/engine_args.rst create mode 100644 docs/source/models/lora.rst create mode 100644 docs/source/models/performance.rst create mode 100644 docs/source/models/supported_models.rst create mode 100644 docs/source/quantization/auto_awq.rst create mode 100644 docs/source/quantization/fp8_e4m3_kvcache.rst create mode 100644 docs/source/quantization/fp8_e5m2_kvcache.rst create mode 100644 docs/source/serving/deploying_with_bentoml.rst create mode 100644 docs/source/serving/deploying_with_docker.rst create mode 100644 docs/source/serving/deploying_with_kserve.rst create mode 100644 docs/source/serving/deploying_with_triton.rst create mode 100644 docs/source/serving/distributed_serving.rst create mode 100644 docs/source/serving/env_vars.rst create mode 100644 docs/source/serving/integrations.rst create mode 100644 docs/source/serving/metrics.rst create mode 100644 docs/source/serving/openai_compatible_server.md create mode 100644 docs/source/serving/run_on_sky.rst create mode 100644 docs/source/serving/serving_with_langchain.rst create mode 100644 docs/source/serving/usage_stats.md create mode 100644 examples/api_client.py create mode 100644 examples/aqlm_example.py create mode 100644 examples/fp8/README.md create mode 100644 examples/fp8/extract_scales.py create mode 100644 examples/fp8/quantizer/README.md create mode 100644 examples/fp8/quantizer/quantize.py create mode 100644 examples/gradio_openai_chatbot_webserver.py create mode 100644 examples/gradio_webserver.py create mode 100644 examples/llava_example.py create mode 100644 examples/llm_engine_example.py create mode 100644 examples/logging_configuration.md create mode 100644 examples/multilora_inference.py create mode 100644 examples/offline_inference.py create mode 100644 examples/offline_inference_distributed.py create mode 100755 examples/offline_inference_neuron.py create mode 100644 examples/offline_inference_with_prefix.py create mode 100644 examples/openai_chat_completion_client.py create mode 100644 examples/openai_completion_client.py create mode 100644 examples/production_monitoring/README.md create mode 100644 examples/production_monitoring/docker-compose.yaml create mode 100644 examples/production_monitoring/grafana.json create mode 100644 examples/production_monitoring/prometheus.yaml create mode 100644 examples/template_alpaca.jinja create mode 100644 examples/template_baichuan.jinja create mode 100644 examples/template_chatglm.jinja create mode 100644 examples/template_chatglm2.jinja create mode 100644 examples/template_chatml.jinja create mode 100644 examples/template_falcon.jinja create mode 100644 examples/template_falcon_180b.jinja create mode 100644 examples/template_inkbot.jinja create mode 100644 examples/tensorize_vllm_model.py create mode 100755 format.sh create mode 100644 musa_porting.py create mode 100644 pyproject.toml create mode 100644 requirements-build.txt create mode 100644 requirements-common.txt create mode 100644 requirements-cpu.txt create mode 100644 requirements-cuda.txt create mode 100644 requirements-dev.txt create mode 100644 requirements-musa.txt create mode 100644 requirements-neuron.txt create mode 100644 requirements-rocm.txt create mode 100644 rocm_patch/rocm_bf16.patch create mode 100644 setup.py create mode 100644 tests/__init__.py create mode 100644 tests/async_engine/api_server_async_engine.py create mode 100644 tests/async_engine/test_api_server.py create mode 100644 tests/async_engine/test_async_llm_engine.py create mode 100644 tests/async_engine/test_chat_template.py create mode 100644 tests/async_engine/test_merge_async_iterators.py create mode 100644 tests/async_engine/test_openapi_server_ray.py create mode 100644 tests/async_engine/test_request_tracker.py create mode 100644 tests/basic_correctness/test_basic_correctness.py create mode 100644 tests/basic_correctness/test_chunked_prefill.py create mode 100644 tests/basic_correctness/test_preemption.py create mode 100644 tests/conftest.py create mode 100644 tests/core/__init__.py create mode 100644 tests/core/block/__init__.py create mode 100644 tests/core/block/conftest.py create mode 100644 tests/core/block/e2e/conftest.py create mode 100644 tests/core/block/e2e/test_correctness.py create mode 100644 tests/core/block/test_block_manager_v2.py create mode 100644 tests/core/block/test_block_table.py create mode 100644 tests/core/block/test_common.py create mode 100644 tests/core/block/test_cpu_gpu_block_allocator.py create mode 100644 tests/core/block/test_naive_block.py create mode 100644 tests/core/block/test_prefix_caching_block.py create mode 100644 tests/core/test_block_manager.py create mode 100644 tests/core/test_chunked_prefill_scheduler.py create mode 100644 tests/core/test_scheduler.py create mode 100644 tests/core/utils.py create mode 100644 tests/distributed/test_basic_distributed_correctness.py create mode 100644 tests/distributed/test_chunked_prefill_distributed.py create mode 100644 tests/distributed/test_comm_ops.py create mode 100644 tests/distributed/test_custom_all_reduce.py create mode 100644 tests/distributed/test_pynccl.py create mode 100644 tests/distributed/test_pynccl_library.py create mode 100644 tests/engine/output_processor/test_multi_step.py create mode 100644 tests/engine/test_computed_prefix_blocks.py create mode 100644 tests/engine/test_detokenization.py create mode 100644 tests/engine/test_multiproc_workers.py create mode 100644 tests/engine/test_skip_tokenizer_init.py create mode 100644 tests/engine/test_stop_reason.py create mode 100644 tests/engine/test_stop_strings.py create mode 100644 tests/entrypoints/openai/test_serving_chat.py create mode 100644 tests/entrypoints/test_guided_processors.py create mode 100644 tests/entrypoints/test_llm_generate.py create mode 100644 tests/entrypoints/test_openai_server.py create mode 100644 tests/entrypoints/test_server_oot_registration.py create mode 100644 tests/fp8_kv/llama2-70b-fp8-kv/kv_cache_scales.json create mode 100644 tests/fp8_kv/llama2-7b-fp8-kv/kv_cache_scales.json create mode 100644 tests/kernels/allclose_default.py create mode 100644 tests/kernels/conftest.py create mode 100644 tests/kernels/test_activation.py create mode 100644 tests/kernels/test_attention.py create mode 100644 tests/kernels/test_cache.py create mode 100644 tests/kernels/test_layernorm.py create mode 100644 tests/kernels/test_moe.py create mode 100644 tests/kernels/test_pos_encoding.py create mode 100644 tests/kernels/test_prefix_prefill.py create mode 100644 tests/kernels/test_rand.py create mode 100644 tests/kernels/test_sampler.py create mode 100644 tests/lora/__init__.py create mode 100644 tests/lora/conftest.py create mode 100644 tests/lora/test_baichuan.py create mode 100644 tests/lora/test_chatglm3.py create mode 100644 tests/lora/test_gemma.py create mode 100644 tests/lora/test_layer_variation.py create mode 100644 tests/lora/test_layers.py create mode 100644 tests/lora/test_llama.py create mode 100644 tests/lora/test_lora.py create mode 100644 tests/lora/test_lora_checkpoints.py create mode 100644 tests/lora/test_lora_manager.py create mode 100644 tests/lora/test_mixtral.py create mode 100644 tests/lora/test_punica.py create mode 100644 tests/lora/test_quant_model.py create mode 100644 tests/lora/test_tokenizer_group.py create mode 100644 tests/lora/test_utils.py create mode 100644 tests/lora/test_worker.py create mode 100644 tests/lora/utils.py create mode 100644 tests/metrics/test_metrics.py create mode 100644 tests/model_executor/weight_utils.py create mode 100644 tests/models/test_aqlm.py create mode 100644 tests/models/test_big_models.py create mode 100644 tests/models/test_fp8.py create mode 100644 tests/models/test_gptq_marlin.py create mode 100644 tests/models/test_llava.py create mode 100644 tests/models/test_marlin.py create mode 100644 tests/models/test_mistral.py create mode 100644 tests/models/test_models.py create mode 100644 tests/models/test_oot_registration.py create mode 100644 tests/models/utils.py create mode 100644 tests/prefix_caching/test_prefix_caching.py create mode 100644 tests/prompts/example.txt create mode 100644 tests/prompts/summary.txt create mode 100644 tests/quantization/test_configs.py create mode 100644 tests/quantization/test_fp8.py create mode 100644 tests/samplers/test_beam_search.py create mode 100644 tests/samplers/test_ignore_eos.py create mode 100644 tests/samplers/test_logits_processor.py create mode 100644 tests/samplers/test_logprobs.py create mode 100644 tests/samplers/test_ranks.py create mode 100644 tests/samplers/test_rejection_sampler.py create mode 100644 tests/samplers/test_sampler.py create mode 100644 tests/samplers/test_seeded_generate.py create mode 100644 tests/spec_decode/__init__.py create mode 100644 tests/spec_decode/e2e/__init__.py create mode 100644 tests/spec_decode/e2e/conftest.py create mode 100644 tests/spec_decode/e2e/test_compatibility.py create mode 100644 tests/spec_decode/e2e/test_logprobs.py create mode 100644 tests/spec_decode/e2e/test_multistep_correctness.py create mode 100644 tests/spec_decode/e2e/test_ngram_correctness.py create mode 100644 tests/spec_decode/test_batch_expansion.py create mode 100644 tests/spec_decode/test_metrics.py create mode 100644 tests/spec_decode/test_multi_step_worker.py create mode 100644 tests/spec_decode/test_ngram_worker.py create mode 100644 tests/spec_decode/test_spec_decode_worker.py create mode 100644 tests/spec_decode/test_utils.py create mode 100644 tests/spec_decode/utils.py create mode 100644 tests/tensorizer_loader/__init__.py create mode 100644 tests/tensorizer_loader/tensorize_vllm_model_for_testing.py create mode 100644 tests/tensorizer_loader/test_tensorizer.py create mode 100644 tests/test_cache_block_hashing.py create mode 100644 tests/test_config.py create mode 100644 tests/test_logger.py create mode 100644 tests/test_logits_processor.py create mode 100644 tests/test_regression.py create mode 100644 tests/test_sampling_params.py create mode 100644 tests/test_sequence.py create mode 100644 tests/tokenization/__init__.py create mode 100644 tests/tokenization/test_cached_tokenizer.py create mode 100644 tests/tokenization/test_detokenize.py create mode 100644 tests/tokenization/test_tokenizer.py create mode 100644 tests/tokenization/test_tokenizer_group.py create mode 100644 tests/worker/__init__.py create mode 100644 tests/worker/test_model_runner.py create mode 100644 tests/worker/test_swap.py create mode 100644 vllm/__init__.py create mode 100644 vllm/_custom_ops.py create mode 100644 vllm/attention/__init__.py create mode 100644 vllm/attention/backends/__init__.py create mode 100644 vllm/attention/backends/abstract.py create mode 100644 vllm/attention/backends/flash_attn.py create mode 100644 vllm/attention/backends/flashinfer.py create mode 100644 vllm/attention/backends/rocm_flash_attn.py create mode 100644 vllm/attention/backends/torch_sdpa.py create mode 100644 vllm/attention/backends/xformers.py create mode 100644 vllm/attention/layer.py create mode 100644 vllm/attention/ops/__init__.py create mode 100644 vllm/attention/ops/paged_attn.py create mode 100644 vllm/attention/ops/prefix_prefill.py create mode 100644 vllm/attention/ops/triton_flash_attention.py create mode 100644 vllm/attention/selector.py create mode 100644 vllm/block.py create mode 100644 vllm/config.py create mode 100644 vllm/core/__init__.py create mode 100644 vllm/core/block/__init__.py create mode 100644 vllm/core/block/block_table.py create mode 100644 vllm/core/block/common.py create mode 100644 vllm/core/block/cpu_gpu_block_allocator.py create mode 100644 vllm/core/block/interfaces.py create mode 100644 vllm/core/block/naive_block.py create mode 100644 vllm/core/block/prefix_caching_block.py create mode 100644 vllm/core/block_manager_v1.py create mode 100644 vllm/core/block_manager_v2.py create mode 100644 vllm/core/evictor_v1.py create mode 100644 vllm/core/evictor_v2.py create mode 100644 vllm/core/interfaces.py create mode 100644 vllm/core/policy.py create mode 100644 vllm/core/scheduler.py create mode 100644 vllm/distributed/__init__.py create mode 100644 vllm/distributed/communication_op.py create mode 100644 vllm/distributed/device_communicators/__init__.py create mode 100644 vllm/distributed/device_communicators/custom_all_reduce.py create mode 100644 vllm/distributed/device_communicators/pymccl.py create mode 100644 vllm/distributed/device_communicators/pymccl_utils.py create mode 100644 vllm/distributed/device_communicators/pynccl.py create mode 100644 vllm/distributed/parallel_state.py create mode 100644 vllm/distributed/utils.py create mode 100644 vllm/engine/__init__.py create mode 100644 vllm/engine/arg_utils.py create mode 100644 vllm/engine/async_llm_engine.py create mode 100644 vllm/engine/llm_engine.py create mode 100644 vllm/engine/metrics.py create mode 100644 vllm/engine/output_processor/__init__.py create mode 100644 vllm/engine/output_processor/interfaces.py create mode 100644 vllm/engine/output_processor/multi_step.py create mode 100644 vllm/engine/output_processor/single_step.py create mode 100644 vllm/engine/output_processor/stop_checker.py create mode 100644 vllm/engine/output_processor/util.py create mode 100644 vllm/entrypoints/__init__.py create mode 100644 vllm/entrypoints/api_server.py create mode 100644 vllm/entrypoints/llm.py create mode 100644 vllm/entrypoints/openai/__init__.py create mode 100644 vllm/entrypoints/openai/api_server.py create mode 100644 vllm/entrypoints/openai/cli_args.py create mode 100644 vllm/entrypoints/openai/protocol.py create mode 100644 vllm/entrypoints/openai/serving_chat.py create mode 100644 vllm/entrypoints/openai/serving_completion.py create mode 100644 vllm/entrypoints/openai/serving_engine.py create mode 100644 vllm/envs.py create mode 100644 vllm/executor/__init__.py create mode 100644 vllm/executor/cpu_executor.py create mode 100644 vllm/executor/distributed_gpu_executor.py create mode 100644 vllm/executor/executor_base.py create mode 100644 vllm/executor/gpu_executor.py create mode 100644 vllm/executor/multiproc_worker_utils.py create mode 100644 vllm/executor/neuron_executor.py create mode 100644 vllm/executor/ray_gpu_executor.py create mode 100644 vllm/executor/ray_utils.py create mode 100644 vllm/logger.py create mode 100644 vllm/logging/__init__.py create mode 100644 vllm/logging/formatter.py create mode 100644 vllm/lora/__init__.py create mode 100644 vllm/lora/fully_sharded_layers.py create mode 100644 vllm/lora/layers.py create mode 100644 vllm/lora/lora.py create mode 100644 vllm/lora/models.py create mode 100644 vllm/lora/punica.py create mode 100644 vllm/lora/request.py create mode 100644 vllm/lora/utils.py create mode 100644 vllm/lora/worker_manager.py create mode 100644 vllm/model_executor/__init__.py create mode 100644 vllm/model_executor/guided_decoding/__init__.py create mode 100644 vllm/model_executor/guided_decoding/lm_format_enforcer_decoding.py create mode 100644 vllm/model_executor/guided_decoding/outlines_decoding.py create mode 100644 vllm/model_executor/guided_decoding/outlines_logits_processors.py create mode 100644 vllm/model_executor/layers/__init__.py create mode 100644 vllm/model_executor/layers/activation.py create mode 100644 vllm/model_executor/layers/fused_moe/__init__.py create mode 100644 vllm/model_executor/layers/fused_moe/configs/E=16,N=1344,device_name=NVIDIA_A100-SXM4-40GB.json create mode 100644 vllm/model_executor/layers/fused_moe/configs/E=16,N=1344,device_name=NVIDIA_A100-SXM4-80GB.json create mode 100644 vllm/model_executor/layers/fused_moe/configs/E=16,N=1344,device_name=NVIDIA_H100_80GB_HBM3.json create mode 100644 vllm/model_executor/layers/fused_moe/configs/E=16,N=2688,device_name=NVIDIA_A100-SXM4-80GB.json create mode 100644 vllm/model_executor/layers/fused_moe/configs/E=16,N=2688,device_name=NVIDIA_H100_80GB_HBM3.json create mode 100644 vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=NVIDIA_A100-SXM4-40GB.json create mode 100644 vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=NVIDIA_A100-SXM4-80GB.json create mode 100644 vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=NVIDIA_H100_80GB_HBM3.json create mode 100644 vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=NVIDIA_A100-SXM4-80GB.json create mode 100644 vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=NVIDIA_H100_80GB_HBM3.json create mode 100644 vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_A100-SXM4-40GB.json create mode 100644 vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_A100-SXM4-80GB.json create mode 100644 vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_H100_80GB_HBM3,dtype=float8.json create mode 100644 vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_H100_80GB_HBM3.json create mode 100644 vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=NVIDIA_A100-SXM4-80GB.json create mode 100644 vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=NVIDIA_H100_80GB_HBM3.json create mode 100644 vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=NVIDIA_A100-SXM4-80GB.json create mode 100644 vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=float8.json create mode 100644 vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=NVIDIA_H100_80GB_HBM3.json create mode 100644 vllm/model_executor/layers/fused_moe/configs/README create mode 100644 vllm/model_executor/layers/fused_moe/fused_moe.py create mode 100644 vllm/model_executor/layers/layernorm.py create mode 100644 vllm/model_executor/layers/linear.py create mode 100644 vllm/model_executor/layers/logits_processor.py create mode 100644 vllm/model_executor/layers/ops/__init__.py create mode 100644 vllm/model_executor/layers/ops/rand.py create mode 100644 vllm/model_executor/layers/ops/sample.py create mode 100644 vllm/model_executor/layers/quantization/__init__.py create mode 100644 vllm/model_executor/layers/quantization/aqlm.py create mode 100644 vllm/model_executor/layers/quantization/awq.py create mode 100644 vllm/model_executor/layers/quantization/base_config.py create mode 100644 vllm/model_executor/layers/quantization/fp8.py create mode 100644 vllm/model_executor/layers/quantization/gptq.py create mode 100644 vllm/model_executor/layers/quantization/gptq_marlin.py create mode 100644 vllm/model_executor/layers/quantization/marlin.py create mode 100644 vllm/model_executor/layers/quantization/schema.py create mode 100644 vllm/model_executor/layers/quantization/squeezellm.py create mode 100644 vllm/model_executor/layers/rejection_sampler.py create mode 100644 vllm/model_executor/layers/rotary_embedding.py create mode 100644 vllm/model_executor/layers/sampler.py create mode 100644 vllm/model_executor/layers/vocab_parallel_embedding.py create mode 100644 vllm/model_executor/model_loader/__init__.py create mode 100644 vllm/model_executor/model_loader/loader.py create mode 100644 vllm/model_executor/model_loader/neuron.py create mode 100644 vllm/model_executor/model_loader/tensorizer.py create mode 100644 vllm/model_executor/model_loader/utils.py create mode 100644 vllm/model_executor/model_loader/weight_utils.py create mode 100755 vllm/model_executor/models/__init__.py create mode 100644 vllm/model_executor/models/baichuan.py create mode 100644 vllm/model_executor/models/bloom.py create mode 100644 vllm/model_executor/models/chatglm.py create mode 100644 vllm/model_executor/models/commandr.py create mode 100644 vllm/model_executor/models/dbrx.py create mode 100644 vllm/model_executor/models/decilm.py create mode 100644 vllm/model_executor/models/deepseek.py create mode 100644 vllm/model_executor/models/falcon.py create mode 100644 vllm/model_executor/models/gemma.py create mode 100644 vllm/model_executor/models/gpt2.py create mode 100644 vllm/model_executor/models/gpt_bigcode.py create mode 100644 vllm/model_executor/models/gpt_j.py create mode 100644 vllm/model_executor/models/gpt_neox.py create mode 100644 vllm/model_executor/models/internlm2.py create mode 100644 vllm/model_executor/models/jais.py create mode 100644 vllm/model_executor/models/llama.py create mode 100644 vllm/model_executor/models/llava.py create mode 100644 vllm/model_executor/models/minicpm.py create mode 100644 vllm/model_executor/models/mixtral.py create mode 100644 vllm/model_executor/models/mixtral_quant.py create mode 100644 vllm/model_executor/models/mpt.py create mode 100644 vllm/model_executor/models/olmo.py create mode 100644 vllm/model_executor/models/opt.py create mode 100644 vllm/model_executor/models/orion.py create mode 100644 vllm/model_executor/models/phi.py create mode 100644 vllm/model_executor/models/qwen.py create mode 100644 vllm/model_executor/models/qwen2.py create mode 100644 vllm/model_executor/models/qwen2_moe.py create mode 100644 vllm/model_executor/models/stablelm.py create mode 100644 vllm/model_executor/models/starcoder2.py create mode 100644 vllm/model_executor/models/xverse.py create mode 100644 vllm/model_executor/sampling_metadata.py create mode 100644 vllm/model_executor/utils.py create mode 100644 vllm/outputs.py create mode 100644 vllm/py.typed create mode 100644 vllm/sampling_params.py create mode 100644 vllm/sequence.py create mode 100644 vllm/spec_decode/__init__.py create mode 100644 vllm/spec_decode/batch_expansion.py create mode 100644 vllm/spec_decode/interfaces.py create mode 100644 vllm/spec_decode/metrics.py create mode 100644 vllm/spec_decode/multi_step_worker.py create mode 100644 vllm/spec_decode/ngram_worker.py create mode 100644 vllm/spec_decode/spec_decode_worker.py create mode 100644 vllm/spec_decode/top1_proposer.py create mode 100644 vllm/spec_decode/util.py create mode 100644 vllm/test_utils.py create mode 100644 vllm/transformers_utils/__init__.py create mode 100644 vllm/transformers_utils/config.py create mode 100644 vllm/transformers_utils/configs/__init__.py create mode 100644 vllm/transformers_utils/configs/chatglm.py create mode 100644 vllm/transformers_utils/configs/dbrx.py create mode 100644 vllm/transformers_utils/configs/falcon.py create mode 100644 vllm/transformers_utils/configs/jais.py create mode 100644 vllm/transformers_utils/configs/mpt.py create mode 100644 vllm/transformers_utils/detokenizer.py create mode 100644 vllm/transformers_utils/tokenizer.py create mode 100644 vllm/transformers_utils/tokenizer_group/__init__.py create mode 100644 vllm/transformers_utils/tokenizer_group/base_tokenizer_group.py create mode 100644 vllm/transformers_utils/tokenizer_group/ray_tokenizer_group.py create mode 100644 vllm/transformers_utils/tokenizer_group/tokenizer_group.py create mode 100644 vllm/transformers_utils/tokenizers/__init__.py create mode 100644 vllm/transformers_utils/tokenizers/baichuan.py create mode 100644 vllm/usage/__init__.py create mode 100644 vllm/usage/usage_lib.py create mode 100644 vllm/utils.py create mode 100644 vllm/worker/__init__.py create mode 100644 vllm/worker/cache_engine.py create mode 100644 vllm/worker/cpu_model_runner.py create mode 100644 vllm/worker/cpu_worker.py create mode 100644 vllm/worker/model_runner.py create mode 100644 vllm/worker/neuron_model_runner.py create mode 100644 vllm/worker/neuron_worker.py create mode 100644 vllm/worker/worker.py create mode 100644 vllm/worker/worker_base.py diff --git a/CMakeLists.txt b/CMakeLists.txt new file mode 100644 index 0000000..44359c0 --- /dev/null +++ b/CMakeLists.txt @@ -0,0 +1,366 @@ +cmake_minimum_required(VERSION 3.21) + +project(vllm_extensions LANGUAGES CXX) + +option(VLLM_TARGET_DEVICE "Target device backend for vLLM" "musa") + +message(STATUS "Build type: ${CMAKE_BUILD_TYPE}") +message(STATUS "Target device: ${VLLM_TARGET_DEVICE}") + +include(${CMAKE_CURRENT_LIST_DIR}/cmake/utils.cmake) + +# +# Supported python versions. These versions will be searched in order, the +# first match will be selected. These should be kept in sync with setup.py. +# +set(PYTHON_SUPPORTED_VERSIONS "3.8" "3.9" "3.10" "3.11") + +# Supported NVIDIA architectures. +# set(CUDA_SUPPORTED_ARCHS "7.0;7.5;8.0;8.6;8.9;9.0") + +# Supported MUSA architectures. +set(MUSA_SUPPORTED_ARCHS "220") + +# Supported AMD GPU architectures. +# set(HIP_SUPPORTED_ARCHS "gfx906;gfx908;gfx90a;gfx940;gfx941;gfx942;gfx1030;gfx1100") + +# +# Supported/expected torch versions for CUDA/ROCm. +# +# Currently, having an incorrect pytorch version results in a warning +# rather than an error. +# +# Note: the CUDA torch version is derived from pyproject.toml and various +# requirements.txt files and should be kept consistent. The ROCm torch +# versions are derived from Dockerfile.rocm +# +set(TORCH_SUPPORTED_VERSION_CUDA "2.2.0") +set(TORCH_SUPPORTED_VERSION_ROCM_5X "2.0.1") +set(TORCH_SUPPORTED_VERSION_ROCM_6X "2.1.1") + +# +# Try to find python package with an executable that exactly matches +# `VLLM_PYTHON_EXECUTABLE` and is one of the supported versions. +# +if (VLLM_PYTHON_EXECUTABLE) + find_python_from_executable(${VLLM_PYTHON_EXECUTABLE} "${PYTHON_SUPPORTED_VERSIONS}") +else() + message(FATAL_ERROR + "Please set VLLM_PYTHON_EXECUTABLE to the path of the desired python version" + " before running cmake configure.") +endif() + +# +# Update cmake's `CMAKE_PREFIX_PATH` with torch location. +# +append_cmake_prefix_path("torch" "torch.utils.cmake_prefix_path") + +include(/opt/conda/envs/py39/lib/python3.9/site-packages/torch_musa/share/cmake/utils.cmake) + +add_definitions(-DTORCH_MUSA_ARCH=220) +set(MUSA_CSRCS) +set(CMAKE_MODULE_PATH /opt/conda/envs/py39/lib/python3.9/site-packages/torch_musa/share/cmake/modules) +set(DEPENDENT_LIBRARIES "") +set(DEPENDENT_INCLUDE_DIRS "") +find_package(MUDNN) + +if(MUDNN_FOUND) + list(APPEND DEPENDENT_INCLUDE_DIRS ${MUDNN_INCLUDE_DIRS}) + list(APPEND DEPENDENT_LIBRARIES ${MUDNN_LIBRARIES}) +else() + message(WARNING " The environment variable MUSA_HOME may be not specified." + "Using default MUDNN PATH: /usr/local/musa") + + list(APPEND DEPENDENT_INCLUDE_DIRS "/usr/local/musa/include") + list(APPEND DEPENDENT_LIBRARIES "/usr/local/musa/lib/libmudnn.so") + set(MUDNN_PATH "/usr/local/musa") + set(MUDNN_LIBRARIES "/usr/local/musa/lib/libmudnn.so") +endif() + +find_package(MUSAToolkits) + +if(MUSAToolkits_FOUND) + list(APPEND DEPENDENT_INCLUDE_DIRS ${MUSAToolkits_INCLUDE_DIRS}) + list(APPEND DEPENDENT_LIBRARIES ${MUSAToolkits_LIBRARIES}) +else() + message(WARNING " The environment variable MUSA_HOME may be not specified." + "Using default MUSATOOLKITS PATH: /usr/local/musa") + + list(APPEND DEPENDENT_INCLUDE_DIRS "/usr/local/musa/include/") + list(APPEND DEPENDENT_LIBRARIES "/usr/local/musa/lib/libmusart.so") + set(ENV{MUSA_HOME} "/usr/local/musa") + set(MUSATOOLKITS_PATH "/usr/local/musa") + set(MUSAToolkits_LIBRARIES "/usr/local/musa/lib/") +endif() + +if(DEFINED PYTHON_INCLUDE_DIR) + include_directories(${PYTHON_INCLUDE_DIR}) +else() + message(FATAL_ERROR, "Cannot find installed Python head file directory") +endif() + +list(APPEND CMAKE_MODULE_PATH $ENV{MUSA_HOME}/cmake) +find_package(MUSA REQUIRED) + +# +# Import torch cmake configuration. +# Torch also imports CUDA (and partially HIP) languages with some customizations, +# so there is no need to do this explicitly with check_language/enable_language, +# etc. +# +find_package(Torch REQUIRED) + +# +# Normally `torch.utils.cpp_extension.CUDAExtension` would add +# `libtorch_python.so` for linking against an extension. Torch's cmake +# configuration does not include this library (presumably since the cmake +# config is used for standalone C++ binaries that link against torch). +# The `libtorch_python.so` library defines some of the glue code between +# torch/python via pybind and is required by VLLM extensions for this +# reason. So, add it by manually with `find_library` using torch's +# installed library path. +# +find_library(torch_python_LIBRARY torch_python PATHS + "${TORCH_INSTALL_PREFIX}/lib") + +# +# Forward the non-CUDA device extensions to external CMake scripts. +# +if (NOT VLLM_TARGET_DEVICE STREQUAL "cuda" AND + NOT VLLM_TARGET_DEVICE STREQUAL "musa" AND + NOT VLLM_TARGET_DEVICE STREQUAL "rocm") + if (VLLM_TARGET_DEVICE STREQUAL "cpu") + include(${CMAKE_CURRENT_LIST_DIR}/cmake/cpu_extension.cmake) + else() + message(FATAL_ERROR "Unsupported vLLM target device: ${VLLM_TARGET_DEVICE}") + endif() + return() +endif() + +# +# Set up GPU language and check the torch version and warn if it isn't +# what is expected. +# +if (NOT HIP_FOUND AND MUSA_FOUND) + set(VLLM_GPU_LANG "MUSA") + + if (NOT Torch_VERSION VERSION_EQUAL ${TORCH_SUPPORTED_VERSION_CUDA}) + message(WARNING "Pytorch version ${TORCH_SUPPORTED_VERSION_CUDA} " + "expected for CUDA build, saw ${Torch_VERSION} instead.") + endif() +elseif(HIP_FOUND) + set(VLLM_GPU_LANG "HIP") + + # Importing torch recognizes and sets up some HIP/ROCm configuration but does + # not let cmake recognize .hip files. In order to get cmake to understand the + # .hip extension automatically, HIP must be enabled explicitly. + enable_language(HIP) + + # ROCm 5.x + if (ROCM_VERSION_DEV_MAJOR EQUAL 5 AND + NOT Torch_VERSION VERSION_EQUAL ${TORCH_SUPPORTED_VERSION_ROCM_5X}) + message(WARNING "Pytorch version ${TORCH_SUPPORTED_VERSION_ROCM_5X} " + "expected for ROCMm 5.x build, saw ${Torch_VERSION} instead.") + endif() + + # ROCm 6.x + if (ROCM_VERSION_DEV_MAJOR EQUAL 6 AND + NOT Torch_VERSION VERSION_EQUAL ${TORCH_SUPPORTED_VERSION_ROCM_6X}) + message(WARNING "Pytorch version ${TORCH_SUPPORTED_VERSION_ROCM_6X} " + "expected for ROCMm 6.x build, saw ${Torch_VERSION} instead.") + endif() +else() + message(FATAL_ERROR "Can't find CUDA or HIP installation.") +endif() + +# +# Override the GPU architectures detected by cmake/torch and filter them by +# the supported versions for the current language. +# The final set of arches is stored in `VLLM_GPU_ARCHES`. +# +# override_gpu_arches(VLLM_GPU_ARCHES +# ${VLLM_GPU_LANG} +# "${${VLLM_GPU_LANG}_SUPPORTED_ARCHS}") + +# +# Query torch for additional GPU compilation flags for the given +# `VLLM_GPU_LANG`. +# The final set of arches is stored in `VLLM_GPU_FLAGS`. +# +get_torch_gpu_compiler_flags(VLLM_GPU_FLAGS ${VLLM_GPU_LANG}) + +# +# Set nvcc parallelism. +# +if(NVCC_THREADS AND VLLM_GPU_LANG STREQUAL "CUDA") + list(APPEND VLLM_GPU_FLAGS "--threads=${NVCC_THREADS}") +endif() + +# +# Define extension targets +# + +# +# _C extension +# + +set(VLLM_EXT_SRC + "csrc_musa/cache_kernels.mu" + "csrc_musa/attention/attention_kernels.mu" + "csrc_musa/pos_encoding_kernels.mu" + "csrc_musa/activation_kernels.mu" + "csrc_musa/layernorm_kernels.mu" + "csrc_musa/quantization/squeezellm/quant_cuda_kernel.mu" + "csrc_musa/quantization/gptq/q_gemm.mu" + "csrc_musa/quantization/fp8/fp8_cuda_kernels.mu" + "csrc_musa/musa_utils_kernels.mu" + "csrc_musa/moe_align_block_size_kernels.mu" + "csrc_musa/pybind.cpp") + +if(VLLM_GPU_LANG STREQUAL "MUSA") + list(APPEND VLLM_EXT_SRC + "csrc_musa/quantization/aqlm/gemm_kernels.mu" + "csrc_musa/quantization/awq/gemm_kernels.mu" + "csrc_musa/quantization/marlin/marlin_cuda_kernel.mu" + "csrc_musa/quantization/gptq_marlin/gptq_marlin.mu" + "csrc_musa/quantization/gptq_marlin/gptq_marlin_repack.mu" + "csrc_musa/custom_all_reduce.mu") +endif() + +string(APPEND MUSA_MCC_FLAGS + +) +string(APPEND MUSA_MCC_FLAGS " -U__CUDA__") + +set(MUSA_VERBOSE_BUILD ON) + + +musa_include_directories( +/opt/conda/envs/py39/include/python3.9 +/usr/local/musa/include +/opt/conda/envs/py39/lib/python3.9/site-packages/torch_musa/share/generated_cuda_compatible/aten/src +/opt/conda/envs/py39/lib/python3.9/site-packages/torch_musa/share/generated_cuda_compatible/include +/opt/conda/envs/py39/lib/python3.9/site-packages/torch_musa/share/generated_cuda_compatible/include/torch/csrc/api/include +/opt/conda/envs/py39/lib/python3.9/site-packages +/opt/conda/envs/py39/lib/python3.9/site-packages/torch_musa +) + +musa_add_library(vllm_C SHARED ${VLLM_EXT_SRC}) +set(INSTALL_BIN_DIR "bin") +set(INSTALL_LIB_DIR "lib64") +set(INSTALL_INC_DIR "include") +set(INSTALL_SHARE_DIR "share") +set(INSTALL_DOC_DIR "docs") + +define_gpu_extension_target( + vllm_C + DESTINATION vllm + LANGUAGE ${VLLM_GPU_LANG} + SOURCES ${VLLM_EXT_SRC} + COMPILE_FLAGS ${VLLM_GPU_FLAGS} + ARCHITECTURES ${VLLM_GPU_ARCHES} + WITH_SOABI) + +target_link_libraries(vllm_C ${DEPENDENT_LIBRARIES}) +target_link_libraries(vllm_C "/opt/conda/envs/py39/lib/python3.9/site-packages/torch_musa/lib/libmusa_python.so") +# +# _moe_C extension +# + +set(VLLM_MOE_EXT_SRC + "csrc_musa/moe/moe_ops.cpp" + "csrc_musa/moe/topk_softmax_kernels.mu") + +define_gpu_extension_target( + _moe_C + DESTINATION vllm + LANGUAGE ${VLLM_GPU_LANG} + SOURCES ${VLLM_MOE_EXT_SRC} + COMPILE_FLAGS ${VLLM_GPU_FLAGS} + ARCHITECTURES ${VLLM_GPU_ARCHES} + WITH_SOABI) + +# +# _punica_C extension +# + +set(VLLM_PUNICA_EXT_SRC + "csrc_musa/punica/bgmv/bgmv_bf16_bf16_bf16.mu" + "csrc_musa/punica/bgmv/bgmv_bf16_fp32_bf16.mu" + "csrc_musa/punica/bgmv/bgmv_fp16_fp16_fp16.mu" + "csrc_musa/punica/bgmv/bgmv_fp16_fp32_fp16.mu" + "csrc_musa/punica/bgmv/bgmv_fp32_bf16_bf16.mu" + "csrc_musa/punica/bgmv/bgmv_fp32_fp16_fp16.mu" + "csrc_musa/punica/punica_ops.cc") + +# +# Copy GPU compilation flags+update for punica +# +set(VLLM_PUNICA_GPU_FLAGS ${VLLM_GPU_FLAGS}) +list(REMOVE_ITEM VLLM_PUNICA_GPU_FLAGS + "-D__MUSA_NO_HALF_OPERATORS__" + "-D__MUSA_NO_HALF_CONVERSIONS__" + "-D__MUSA_NO_BFLOAT16_CONVERSIONS__" + "-D__MUSA_NO_HALF2_OPERATORS__") + +# +# Filter out CUDA architectures < 8.0 for punica. +# +# if (${VLLM_GPU_LANG} STREQUAL "CUDA") +# set(VLLM_PUNICA_GPU_ARCHES) +# foreach(ARCH ${VLLM_GPU_ARCHES}) +# string_to_ver(CODE_VER ${ARCH}) +# if (CODE_VER GREATER_EQUAL 8.0) +# list(APPEND VLLM_PUNICA_GPU_ARCHES ${ARCH}) +# endif() +# endforeach() +# message(STATUS "Punica target arches: ${VLLM_PUNICA_GPU_ARCHES}") +# endif() + +if (VLLM_PUNICA_GPU_ARCHES) + define_gpu_extension_target( + _punica_C + DESTINATION vllm + LANGUAGE ${VLLM_GPU_LANG} + SOURCES ${VLLM_PUNICA_EXT_SRC} + COMPILE_FLAGS ${VLLM_PUNICA_GPU_FLAGS} + ARCHITECTURES ${VLLM_PUNICA_GPU_ARCHES} + WITH_SOABI) +else() + message(WARNING "Unable to create _punica_C target because none of the " + "requested architectures (${VLLM_GPU_ARCHES}) are supported, i.e. >= 8.0") +endif() + +# +# Add the `default` target which detects which extensions should be +# built based on platform/architecture. This is the same logic that +# setup.py uses to select which extensions should be built and should +# be kept in sync. +# +# The `default` target makes direct use of cmake easier since knowledge +# of which extensions are supported has been factored in, e.g. +# +# mkdir build && cd build +# cmake -G Ninja -DVLLM_PYTHON_EXECUTABLE=`which python3` -DCMAKE_LIBRARY_OUTPUT_DIRECTORY=../vllm .. +# cmake --build . --target default +# +add_custom_target(default) + +if(VLLM_GPU_LANG STREQUAL "CUDA" OR VLLM_GPU_LANG STREQUAL "MUSA" OR VLLM_GPU_LANG STREQUAL "HIP") + message(STATUS "Enabling C extension.") + add_dependencies(default _C) +endif() + +if(VLLM_GPU_LANG STREQUAL "CUDA" OR VLLM_GPU_LANG STREQUAL "MUSA") + message(STATUS "Enabling moe extension.") + add_dependencies(default _moe_C) + + # Enable punica if -DVLLM_INSTALL_PUNICA_KERNELS=ON or + # VLLM_INSTALL_PUNICA_KERNELS is set in the environment and + # there are supported target arches. + if (VLLM_PUNICA_GPU_ARCHES AND + (ENV{VLLM_INSTALL_PUNICA_KERNELS} OR VLLM_INSTALL_PUNICA_KERNELS)) + message(STATUS "Enabling punica extension.") + add_dependencies(default _punica_C) + endif() +endif() diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md new file mode 100644 index 0000000..81a8db2 --- /dev/null +++ b/CONTRIBUTING.md @@ -0,0 +1,56 @@ +# Contributing to vLLM + +Thank you for your interest in contributing to vLLM! +Our community is open to everyone and welcomes all kinds of contributions, no matter how small or large. +There are several ways you can contribute to the project: + +- Identify and report any issues or bugs. +- Request or add a new model. +- Suggest or implement new features. + +However, remember that contributions aren't just about code. +We believe in the power of community support; thus, answering queries, assisting others, and enhancing the documentation are highly regarded and beneficial contributions. + +Finally, one of the most impactful ways to support us is by raising awareness about vLLM. +Talk about it in your blog posts, highlighting how it's driving your incredible projects. +Express your support on Twitter if vLLM aids you, or simply offer your appreciation by starring our repository. + + +## Setup for development + +### Build from source + +```bash +pip install -e . # This may take several minutes. +``` + +### Testing + +```bash +pip install -r requirements-dev.txt + +# linting and formatting +bash format.sh +# Static type checking +mypy +# Unit tests +pytest tests/ +``` +**Note:** Currently, the repository does not pass the mypy tests. + + +## Contributing Guidelines + +### Issue Reporting + +If you encounter a bug or have a feature request, please check our issues page first to see if someone else has already reported it. +If not, please file a new issue, providing as much relevant information as possible. + +### Pull Requests & Code Reviews + +Please check the PR checklist in the [PR template](.github/PULL_REQUEST_TEMPLATE.md) for detailed guide for contribution. + +### Thank You + +Finally, thank you for taking the time to read these guidelines and for your interest in contributing to vLLM. +Your contributions make vLLM a great tool for everyone! diff --git a/Dockerfile b/Dockerfile index 1c0a8e4..90be3a3 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1 +1,163 @@ -FROM registry.mthreads.com/mcconline/vllm-musa-qy2-py310:v0.8.4-release \ No newline at end of file +# The vLLM Dockerfile is used to construct vLLM image that can be directly used +# to run the OpenAI compatible server. + +# Please update any changes made here to +# docs/source/dev/dockerfile/dockerfile.rst and +# docs/source/assets/dev/dockerfile-stages-dependency.png + +#################### BASE BUILD IMAGE #################### +# prepare basic build environment +FROM nvidia/cuda:12.4.1-devel-ubuntu22.04 AS dev + +RUN apt-get update -y \ + && apt-get install -y python3-pip git + +# Workaround for https://github.com/openai/triton/issues/2507 and +# https://github.com/pytorch/pytorch/issues/107960 -- hopefully +# this won't be needed for future versions of this docker image +# or future versions of triton. +RUN ldconfig /usr/local/cuda-12.4/compat/ + +WORKDIR /workspace + +# install build and runtime dependencies +COPY requirements-common.txt requirements-common.txt +COPY requirements-cuda.txt requirements-cuda.txt +RUN --mount=type=cache,target=/root/.cache/pip \ + pip install -r requirements-cuda.txt + +# install development dependencies +COPY requirements-dev.txt requirements-dev.txt +RUN --mount=type=cache,target=/root/.cache/pip \ + pip install -r requirements-dev.txt + +# cuda arch list used by torch +# can be useful for both `dev` and `test` +# explicitly set the list to avoid issues with torch 2.2 +# see https://github.com/pytorch/pytorch/pull/123243 +ARG torch_cuda_arch_list='7.0 7.5 8.0 8.6 8.9 9.0+PTX' +ENV TORCH_CUDA_ARCH_LIST=${torch_cuda_arch_list} +#################### BASE BUILD IMAGE #################### + + +#################### WHEEL BUILD IMAGE #################### +FROM dev AS build + +# install build dependencies +COPY requirements-build.txt requirements-build.txt +RUN --mount=type=cache,target=/root/.cache/pip \ + pip install -r requirements-build.txt + +# install compiler cache to speed up compilation leveraging local or remote caching +RUN apt-get update -y && apt-get install -y ccache + +# files and directories related to build wheels +COPY csrc csrc +COPY setup.py setup.py +COPY cmake cmake +COPY CMakeLists.txt CMakeLists.txt +COPY requirements-common.txt requirements-common.txt +COPY requirements-cuda.txt requirements-cuda.txt +COPY pyproject.toml pyproject.toml +COPY vllm vllm + +# max jobs used by Ninja to build extensions +ARG max_jobs=2 +ENV MAX_JOBS=${max_jobs} +# number of threads used by nvcc +ARG nvcc_threads=8 +ENV NVCC_THREADS=$nvcc_threads +# make sure punica kernels are built (for LoRA) +ENV VLLM_INSTALL_PUNICA_KERNELS=1 + +ENV CCACHE_DIR=/root/.cache/ccache +RUN --mount=type=cache,target=/root/.cache/ccache \ + --mount=type=cache,target=/root/.cache/pip \ + python3 setup.py bdist_wheel --dist-dir=dist + +# check the size of the wheel, we cannot upload wheels larger than 100MB +COPY .buildkite/check-wheel-size.py check-wheel-size.py +RUN python3 check-wheel-size.py dist + +# the `vllm_nccl` package must be installed from source distribution +# pip is too smart to store a wheel in the cache, and other CI jobs +# will directly use the wheel from the cache, which is not what we want. +# we need to remove it manually +RUN --mount=type=cache,target=/root/.cache/pip \ + pip cache remove vllm_nccl* +#################### EXTENSION Build IMAGE #################### + +#################### FLASH_ATTENTION Build IMAGE #################### +FROM dev as flash-attn-builder +# max jobs used for build +ARG max_jobs=2 +ENV MAX_JOBS=${max_jobs} +# flash attention version +ARG flash_attn_version=v2.5.8 +ENV FLASH_ATTN_VERSION=${flash_attn_version} + +WORKDIR /usr/src/flash-attention-v2 + +# Download the wheel or build it if a pre-compiled release doesn't exist +RUN pip --verbose wheel flash-attn==${FLASH_ATTN_VERSION} \ + --no-build-isolation --no-deps --no-cache-dir + +#################### FLASH_ATTENTION Build IMAGE #################### + +#################### vLLM installation IMAGE #################### +# image with vLLM installed +FROM nvidia/cuda:12.4.1-base-ubuntu22.04 AS vllm-base +WORKDIR /vllm-workspace + +RUN apt-get update -y \ + && apt-get install -y python3-pip git vim + +# Workaround for https://github.com/openai/triton/issues/2507 and +# https://github.com/pytorch/pytorch/issues/107960 -- hopefully +# this won't be needed for future versions of this docker image +# or future versions of triton. +RUN ldconfig /usr/local/cuda-12.4/compat/ + +# install vllm wheel first, so that torch etc will be installed +RUN --mount=type=bind,from=build,src=/workspace/dist,target=/vllm-workspace/dist \ + --mount=type=cache,target=/root/.cache/pip \ + pip install dist/*.whl --verbose + +RUN --mount=type=bind,from=flash-attn-builder,src=/usr/src/flash-attention-v2,target=/usr/src/flash-attention-v2 \ + --mount=type=cache,target=/root/.cache/pip \ + pip install /usr/src/flash-attention-v2/*.whl --no-cache-dir +#################### vLLM installation IMAGE #################### + + +#################### TEST IMAGE #################### +# image to run unit testing suite +# note that this uses vllm installed by `pip` +FROM vllm-base AS test + +ADD . /vllm-workspace/ + +# install development dependencies (for testing) +RUN --mount=type=cache,target=/root/.cache/pip \ + pip install -r requirements-dev.txt + +# doc requires source code +# we hide them inside `test_docs/` , so that this source code +# will not be imported by other tests +RUN mkdir test_docs +RUN mv docs test_docs/ +RUN mv vllm test_docs/ + +#################### TEST IMAGE #################### + +#################### OPENAI API SERVER #################### +# openai api server alternative +FROM vllm-base AS vllm-openai + +# install additional dependencies for openai api server +RUN --mount=type=cache,target=/root/.cache/pip \ + pip install accelerate hf_transfer modelscope + +ENV VLLM_USAGE_SOURCE production-docker-image + +ENTRYPOINT ["python3", "-m", "vllm.entrypoints.openai.api_server"] +#################### OPENAI API SERVER #################### diff --git a/Dockerfile.cpu b/Dockerfile.cpu new file mode 100644 index 0000000..4251fdd --- /dev/null +++ b/Dockerfile.cpu @@ -0,0 +1,20 @@ +# This vLLM Dockerfile is used to construct image that can build and run vLLM on x86 CPU platform. + +FROM ubuntu:22.04 + +RUN apt-get update -y \ + && apt-get install -y git wget vim numactl gcc-12 g++-12 python3 python3-pip \ + && update-alternatives --install /usr/bin/gcc gcc /usr/bin/gcc-12 10 --slave /usr/bin/g++ g++ /usr/bin/g++-12 + +RUN pip install --upgrade pip \ + && pip install wheel packaging ninja setuptools>=49.4.0 numpy + +COPY ./ /workspace/vllm + +WORKDIR /workspace/vllm + +RUN pip install -v -r requirements-cpu.txt --extra-index-url https://download.pytorch.org/whl/cpu + +RUN VLLM_TARGET_DEVICE=cpu python3 setup.py install + +CMD ["/bin/bash"] diff --git a/Dockerfile.neuron b/Dockerfile.neuron new file mode 100644 index 0000000..fe42b4e --- /dev/null +++ b/Dockerfile.neuron @@ -0,0 +1,36 @@ +# default base image +ARG BASE_IMAGE="763104351884.dkr.ecr.us-west-2.amazonaws.com/pytorch-inference-neuronx:2.1.1-neuronx-py310-sdk2.17.0-ubuntu20.04" + +FROM $BASE_IMAGE + +RUN echo "Base image is $BASE_IMAGE" + +# Install some basic utilities +RUN apt-get update && apt-get install python3 python3-pip -y + +### Mount Point ### +# When launching the container, mount the code directory to /app +ARG APP_MOUNT=/app +VOLUME [ ${APP_MOUNT} ] +WORKDIR ${APP_MOUNT} + +RUN python3 -m pip install --upgrade pip +RUN python3 -m pip install --no-cache-dir fastapi ninja tokenizers pandas +RUN python3 -m pip install sentencepiece transformers==4.36.2 -U +RUN python3 -m pip install transformers-neuronx --extra-index-url=https://pip.repos.neuron.amazonaws.com -U +RUN python3 -m pip install --pre neuronx-cc==2.12.* --extra-index-url=https://pip.repos.neuron.amazonaws.com -U + +COPY ./vllm /app/vllm/vllm +COPY ./setup.py /app/vllm/setup.py +COPY ./requirements-common.txt /app/vllm/requirements-common.txt +COPY ./requirements-neuron.txt /app/vllm/requirements-neuron.txt + +RUN cd /app/vllm \ + && python3 -m pip install -U -r requirements-neuron.txt + +ENV VLLM_BUILD_WITH_NEURON 1 +RUN cd /app/vllm \ + && pip install -e . \ + && cd .. + +CMD ["/bin/bash"] diff --git a/Dockerfile.rocm b/Dockerfile.rocm new file mode 100644 index 0000000..d04bb99 --- /dev/null +++ b/Dockerfile.rocm @@ -0,0 +1,107 @@ +# default base image +ARG BASE_IMAGE="rocm/pytorch:rocm6.0_ubuntu20.04_py3.9_pytorch_2.1.1" + +FROM $BASE_IMAGE + +ARG BASE_IMAGE="rocm/pytorch:rocm6.0_ubuntu20.04_py3.9_pytorch_2.1.1" + +RUN echo "Base image is $BASE_IMAGE" + +# BASE_IMAGE for ROCm_5.7: "rocm/pytorch:rocm5.7_ubuntu22.04_py3.10_pytorch_2.0.1" +# BASE_IMAGE for ROCm_6.0: "rocm/pytorch:rocm6.0_ubuntu20.04_py3.9_pytorch_2.1.1" + + +ARG FA_GFX_ARCHS="gfx90a;gfx942" +RUN echo "FA_GFX_ARCHS is $FA_GFX_ARCHS" + +ARG FA_BRANCH="ae7928c" +RUN echo "FA_BRANCH is $FA_BRANCH" + +# whether to build flash-attention +# if 0, will not build flash attention +# this is useful for gfx target where flash-attention is not supported +# In that case, we need to use the python reference attention implementation in vllm +ARG BUILD_FA="1" + +# whether to build triton on rocm +ARG BUILD_TRITON="1" + +# Install some basic utilities +RUN apt-get update && apt-get install python3 python3-pip -y + +# Install some basic utilities +RUN apt-get update && apt-get install -y \ + curl \ + ca-certificates \ + sudo \ + git \ + bzip2 \ + libx11-6 \ + build-essential \ + wget \ + unzip \ + nvidia-cuda-toolkit \ + tmux \ + && rm -rf /var/lib/apt/lists/* + +### Mount Point ### +# When launching the container, mount the code directory to /app +ARG APP_MOUNT=/vllm-workspace +VOLUME [ ${APP_MOUNT} ] +WORKDIR ${APP_MOUNT} + +RUN python3 -m pip install --upgrade pip +RUN python3 -m pip install --no-cache-dir fastapi ninja tokenizers pandas + +ENV LLVM_SYMBOLIZER_PATH=/opt/rocm/llvm/bin/llvm-symbolizer +ENV PATH=$PATH:/opt/rocm/bin:/libtorch/bin: +ENV LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/opt/rocm/lib/:/libtorch/lib: +ENV CPLUS_INCLUDE_PATH=$CPLUS_INCLUDE_PATH:/libtorch/include:/libtorch/include/torch/csrc/api/include/:/opt/rocm/include/: + +# Install ROCm flash-attention +RUN if [ "$BUILD_FA" = "1" ]; then \ + mkdir libs \ + && cd libs \ + && git clone https://github.com/ROCm/flash-attention.git \ + && cd flash-attention \ + && git checkout ${FA_BRANCH} \ + && git submodule update --init \ + && export GPU_ARCHS=${FA_GFX_ARCHS} \ + && if [ "$BASE_IMAGE" = "rocm/pytorch:rocm5.7_ubuntu22.04_py3.10_pytorch_2.0.1" ]; then \ + patch /opt/conda/envs/py_3.10/lib/python3.10/site-packages/torch/utils/hipify/hipify_python.py hipify_patch.patch; fi \ + && python3 setup.py install \ + && cd ..; \ + fi + +# Error related to odd state for numpy 1.20.3 where there is no METADATA etc, but an extra LICENSES_bundled.txt. +# Manually removed it so that later steps of numpy upgrade can continue +RUN if [ "$BASE_IMAGE" = "rocm/pytorch:rocm6.0_ubuntu20.04_py3.9_pytorch_2.1.1" ]; then \ + rm -rf /opt/conda/envs/py_3.9/lib/python3.9/site-packages/numpy-1.20.3.dist-info/; fi + +# build triton +RUN if [ "$BUILD_TRITON" = "1" ]; then \ + mkdir -p libs \ + && cd libs \ + && pip uninstall -y triton \ + && git clone https://github.com/ROCm/triton.git \ + && cd triton/python \ + && pip3 install . \ + && cd ../..; \ + fi + +WORKDIR /vllm-workspace +COPY . . + +RUN python3 -m pip install --upgrade pip numba + +RUN --mount=type=cache,target=/root/.cache/pip \ + pip install -U -r requirements-rocm.txt \ + && patch /opt/rocm/include/hip/amd_detail/amd_hip_bf16.h ./rocm_patch/rocm_bf16.patch \ + && python3 setup.py install \ + && cp build/lib.linux-x86_64-cpython-39/vllm/_C.cpython-39-x86_64-linux-gnu.so vllm/ \ + && cd .. + +RUN python3 -m pip install --upgrade pip +RUN python3 -m pip install --no-cache-dir ray[all]==2.9.3 + +CMD ["/bin/bash"] diff --git a/LICENSE b/LICENSE new file mode 100644 index 0000000..da695f2 --- /dev/null +++ b/LICENSE @@ -0,0 +1,846 @@ +The vllm_musa from Moore Threads is licensed under the Apache License 2.0 listed below. +Copyright (c) 2022-2024 Moore Threads Technology Co., Ltd("Moore Threads"). All rights reserved. +Terms of the Apache License 2.0 +------------------------------------------------------------------------- + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +------------------------------------------------------------------------- +The following copyright statements and licenses apply to various open source software/model +packages (or portions thereof) that are distributed with this vllm_musa. vllm_musa that +includes this file does not necessarily use all the open source software packages referred +to below and may also only use portions of a given package. Some open source software +packages referred to below may have been modified by Moore Threads Technology Co., Ltd + +------------------------------------------------------------------------- +vllm + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +------------------------------------------------------------------------------------ +Contains code from https://github.com/punica-ai/punica + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright {yyyy} {name of copyright owner} + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +------------------------------------------------------------------------------------ + +This product bundles various third-party components under other open source licenses. +This section summarizes those components and their licenses. See licenses/ +for text of these licenses. + + +Apache-2.0 +* third_party/nvbench (with LLVM exception) +* third_party/flashinfer + +BSD-3-Clause: +* third_party/cutlass + +------------------------------------------------------------------------------------ +Contains code from https://github.com/IST-DASLab/marlin + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright {yyyy} {name of copyright owner} + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +------------------------------------------------------------------------------------ + +This product bundles various third-party components under other open source licenses. +This section summarizes those components and their licenses. See licenses/ +for text of these licenses. diff --git a/MANIFEST.in b/MANIFEST.in new file mode 100644 index 0000000..82be639 --- /dev/null +++ b/MANIFEST.in @@ -0,0 +1,10 @@ +include LICENSE +include requirements-common.txt +include requirements-cuda.txt +include requirements-rocm.txt +include requirements-neuron.txt +include requirements-cpu.txt +include CMakeLists.txt + +recursive-include cmake * +recursive-include csrc * diff --git a/README.md b/README.md index 8a67ab1..ab02cb4 100644 --- a/README.md +++ b/README.md @@ -8,4 +8,127 @@ vllm 版本:v0.8.4 源码地址:https://github.com/MooreThreads/vllm_musa -原始镜像:registry.mthreads.com/mcconline/vllm-musa-qy2-py310:v0.8.4-release \ No newline at end of file +镜像:git.modelhub.org.cn:9443/enginex-mthreads/vllm-musa-qy2-py310:v0.8.4-release + +

+ + + vLLM + +

+ +

+Easy, fast, and cheap LLM serving for everyone +

+ +

+| Documentation | Blog | Paper | Discord | + +

+ +*Latest News* 🔥 +- [2024/04] We hosted [the third vLLM meetup](https://robloxandvllmmeetup2024.splashthat.com/) with Roblox! Please find the meetup slides [here](https://docs.google.com/presentation/d/1A--47JAK4BJ39t954HyTkvtfwn0fkqtsL8NGFuslReM/edit?usp=sharing). +- [2024/01] We hosted [the second vLLM meetup](https://lu.ma/ygxbpzhl) in SF! Please find the meetup slides [here](https://docs.google.com/presentation/d/12mI2sKABnUw5RBWXDYY-HtHth4iMSNcEoQ10jDQbxgA/edit?usp=sharing). +- [2024/01] Added ROCm 6.0 support to vLLM. +- [2023/12] Added ROCm 5.7 support to vLLM. +- [2023/10] We hosted [the first vLLM meetup](https://lu.ma/first-vllm-meetup) in SF! Please find the meetup slides [here](https://docs.google.com/presentation/d/1QL-XPFXiFpDBh86DbEegFXBXFXjix4v032GhShbKf3s/edit?usp=sharing). +- [2023/09] We created our [Discord server](https://discord.gg/jz7wjKhh6g)! Join us to discuss vLLM and LLM serving! We will also post the latest announcements and updates there. +- [2023/09] We released our [PagedAttention paper](https://arxiv.org/abs/2309.06180) on arXiv! +- [2023/08] We would like to express our sincere gratitude to [Andreessen Horowitz](https://a16z.com/2023/08/30/supporting-the-open-source-ai-community/) (a16z) for providing a generous grant to support the open-source development and research of vLLM. +- [2023/07] Added support for LLaMA-2! You can run and serve 7B/13B/70B LLaMA-2s on vLLM with a single command! +- [2023/06] Serving vLLM On any Cloud with SkyPilot. Check out a 1-click [example](https://github.com/skypilot-org/skypilot/blob/master/llm/vllm) to start the vLLM demo, and the [blog post](https://blog.skypilot.co/serving-llm-24x-faster-on-the-cloud-with-vllm-and-skypilot/) for the story behind vLLM development on the clouds. +- [2023/06] We officially released vLLM! FastChat-vLLM integration has powered [LMSYS Vicuna and Chatbot Arena](https://chat.lmsys.org) since mid-April. Check out our [blog post](https://vllm.ai). + +--- +## About +vLLM is a fast and easy-to-use library for LLM inference and serving. + +vLLM is fast with: + +- State-of-the-art serving throughput +- Efficient management of attention key and value memory with **PagedAttention** +- Continuous batching of incoming requests +- Fast model execution with CUDA/HIP graph +- Quantization: [GPTQ](https://arxiv.org/abs/2210.17323), [AWQ](https://arxiv.org/abs/2306.00978), [SqueezeLLM](https://arxiv.org/abs/2306.07629), FP8 KV Cache +- Optimized CUDA kernels + +vLLM is flexible and easy to use with: + +- Seamless integration with popular Hugging Face models +- High-throughput serving with various decoding algorithms, including *parallel sampling*, *beam search*, and more +- Tensor parallelism support for distributed inference +- Streaming outputs +- OpenAI-compatible API server +- Support NVIDIA GPUs and AMD GPUs +- (Experimental) Prefix caching support +- (Experimental) Multi-lora support + +vLLM seamlessly supports many Hugging Face models, including the following architectures: + +- Aquila & Aquila2 (`BAAI/AquilaChat2-7B`, `BAAI/AquilaChat2-34B`, `BAAI/Aquila-7B`, `BAAI/AquilaChat-7B`, etc.) +- Baichuan & Baichuan2 (`baichuan-inc/Baichuan2-13B-Chat`, `baichuan-inc/Baichuan-7B`, etc.) +- BLOOM (`bigscience/bloom`, `bigscience/bloomz`, etc.) +- ChatGLM (`THUDM/chatglm2-6b`, `THUDM/chatglm3-6b`, etc.) +- Command-R (`CohereForAI/c4ai-command-r-v01`, etc.) +- DBRX (`databricks/dbrx-base`, `databricks/dbrx-instruct` etc.) +- DeciLM (`Deci/DeciLM-7B`, `Deci/DeciLM-7B-instruct`, etc.) +- Falcon (`tiiuae/falcon-7b`, `tiiuae/falcon-40b`, `tiiuae/falcon-rw-7b`, etc.) +- Gemma (`google/gemma-2b`, `google/gemma-7b`, etc.) +- GPT-2 (`gpt2`, `gpt2-xl`, etc.) +- GPT BigCode (`bigcode/starcoder`, `bigcode/gpt_bigcode-santacoder`, etc.) +- GPT-J (`EleutherAI/gpt-j-6b`, `nomic-ai/gpt4all-j`, etc.) +- GPT-NeoX (`EleutherAI/gpt-neox-20b`, `databricks/dolly-v2-12b`, `stabilityai/stablelm-tuned-alpha-7b`, etc.) +- InternLM (`internlm/internlm-7b`, `internlm/internlm-chat-7b`, etc.) +- InternLM2 (`internlm/internlm2-7b`, `internlm/internlm2-chat-7b`, etc.) +- Jais (`core42/jais-13b`, `core42/jais-13b-chat`, `core42/jais-30b-v3`, `core42/jais-30b-chat-v3`, etc.) +- LLaMA, Llama 2, and Meta Llama 3 (`meta-llama/Meta-Llama-3-8B-Instruct`, `meta-llama/Meta-Llama-3-70B-Instruct`, `meta-llama/Llama-2-70b-hf`, `lmsys/vicuna-13b-v1.3`, `young-geng/koala`, `openlm-research/open_llama_13b`, etc.) +- MiniCPM (`openbmb/MiniCPM-2B-sft-bf16`, `openbmb/MiniCPM-2B-dpo-bf16`, etc.) +- Mistral (`mistralai/Mistral-7B-v0.1`, `mistralai/Mistral-7B-Instruct-v0.1`, etc.) +- Mixtral (`mistralai/Mixtral-8x7B-v0.1`, `mistralai/Mixtral-8x7B-Instruct-v0.1`, `mistral-community/Mixtral-8x22B-v0.1`, etc.) +- MPT (`mosaicml/mpt-7b`, `mosaicml/mpt-30b`, etc.) +- OLMo (`allenai/OLMo-1B-hf`, `allenai/OLMo-7B-hf`, etc.) +- OPT (`facebook/opt-66b`, `facebook/opt-iml-max-30b`, etc.) +- Orion (`OrionStarAI/Orion-14B-Base`, `OrionStarAI/Orion-14B-Chat`, etc.) +- Phi (`microsoft/phi-1_5`, `microsoft/phi-2`, etc.) +- Phi-3 (`microsoft/Phi-3-mini-4k-instruct`, `microsoft/Phi-3-mini-128k-instruct`, etc.) +- Qwen (`Qwen/Qwen-7B`, `Qwen/Qwen-7B-Chat`, etc.) +- Qwen2 (`Qwen/Qwen1.5-7B`, `Qwen/Qwen1.5-7B-Chat`, etc.) +- Qwen2MoE (`Qwen/Qwen1.5-MoE-A2.7B`, `Qwen/Qwen1.5-MoE-A2.7B-Chat`, etc.) +- StableLM(`stabilityai/stablelm-3b-4e1t`, `stabilityai/stablelm-base-alpha-7b-v2`, etc.) +- Starcoder2(`bigcode/starcoder2-3b`, `bigcode/starcoder2-7b`, `bigcode/starcoder2-15b`, etc.) +- Xverse (`xverse/XVERSE-7B-Chat`, `xverse/XVERSE-13B-Chat`, `xverse/XVERSE-65B-Chat`, etc.) +- Yi (`01-ai/Yi-6B`, `01-ai/Yi-34B`, etc.) + +Install vLLM with pip or [from source](https://vllm.readthedocs.io/en/latest/getting_started/installation.html#build-from-source): + +```bash +pip install vllm +``` + +## Getting Started + +Visit our [documentation](https://vllm.readthedocs.io/en/latest/) to get started. +- [Installation](https://vllm.readthedocs.io/en/latest/getting_started/installation.html) +- [Quickstart](https://vllm.readthedocs.io/en/latest/getting_started/quickstart.html) +- [Supported Models](https://vllm.readthedocs.io/en/latest/models/supported_models.html) + +## Contributing + +We welcome and value any contributions and collaborations. +Please check out [CONTRIBUTING.md](./CONTRIBUTING.md) for how to get involved. + +## Citation + +If you use vLLM for your research, please cite our [paper](https://arxiv.org/abs/2309.06180): +```bibtex +@inproceedings{kwon2023efficient, + title={Efficient Memory Management for Large Language Model Serving with PagedAttention}, + author={Woosuk Kwon and Zhuohan Li and Siyuan Zhuang and Ying Sheng and Lianmin Zheng and Cody Hao Yu and Joseph E. Gonzalez and Hao Zhang and Ion Stoica}, + booktitle={Proceedings of the ACM SIGOPS 29th Symposium on Operating Systems Principles}, + year={2023} +} +``` + +## vllm with MUSA + +Please refer to [README_vllm_musa](./README_vllm_musa.md). diff --git a/README_vllm_musa.md b/README_vllm_musa.md new file mode 100644 index 0000000..ad6e687 --- /dev/null +++ b/README_vllm_musa.md @@ -0,0 +1,66 @@ +# vllm_musa + +摩尔线程致力于构建完善好用的国产GPU应用生态,自主研发了MUSA架构及软件平台。vllm项目是业界广泛使用的大语言模型的推理和服务引擎,使用CUDA/ROCm提供GPU加速能力。为了方便摩尔线程GPU用户使用vllm框架,我们发起vllm_musa开源项目为vllm提供MUSA加速,让用户可释放摩尔线程GPU的澎湃算力。 + +现有的vllm代码不支持摩尔线程GPU作为后端,因此我们新增了MUSA设备后端。vllm_musa接口与官方接口一致,用户无需改动业务代码,开箱即用。 + +MUSA的一大优势是CUDA兼容,通过musify工具,我们可以快速将官方代码porting至MUSA软件栈,用户可以根据文档自行升级vllm版本并适配MUSA软件栈。 + +## 依赖 + +- musa_toolkit >= dev3.0.0 +- pytorch >= v2.2.0 +- [torch_musa](https://github.com/MooreThreads/torch_musa) >= v1.3.0 +- triton >= v2.2.0 +- ray >= 2.9 +- vllm v0.4.2 + +## 使用 +### 编译 +运行 `bash build_musa.sh` +### 测试示例 +``` +from vllm import LLM, SamplingParams +from transformers import AutoTokenizer, LlamaForCausalLM +import transformers +import time +import torch +import torch_musa + + +model_path = + +prompts = [ + "Hello, my name is", + "The president of the United States is", + "The capital of France is", + "The future of AI is", +] + +sampling_params = SamplingParams(temperature=0.8, top_p=0.95) +llm = LLM(model=model_path, trust_remote_code=True, device="musa") + +outputs = llm.generate(prompts, sampling_params) + +# Print the outputs. +for output in outputs: + prompt = output.prompt + generated_text = output.outputs[0].text + print(f"Prompt: {prompt!r}, Generated text: {generated_text!r}") + +``` + +## Porting + +当前仓库porting自vllm v0.4.2版本。如果用户希望使用更高版本的vllm,只需要运行`musa_porting.py`将原生CUDA代码适配到MUSA代码即可。当然随着vllm的迭代可能会有些代码成为漏网之鱼,没有porting成功,用户可自行修改`musa_porting.py`文件中的文本替换规则。从而发挥MUSA强大的CUDA兼容能力。 + +### 步骤 +1. 运行 `python musa_porting.py` +2. 将`CMakeLists.txt`中需要编译的文件后缀从`.cu`修改为`.mu` +3. 编译运行vllm_musa + +## 贡献 + +欢迎广大用户及开发者使用、反馈,助力vllm_musa功能及性能持续完善。 + +社区共建,期待广大开发者与我们一道,共同打造MUSA软件生态。我们将陆续推出一系列开源软件MUSA加速项目。 \ No newline at end of file diff --git a/benchmarks/README.md b/benchmarks/README.md new file mode 100644 index 0000000..192d6c4 --- /dev/null +++ b/benchmarks/README.md @@ -0,0 +1,8 @@ +# Benchmarking vLLM + +## Downloading the ShareGPT dataset + +You can download the dataset by running: +```bash +wget https://huggingface.co/datasets/anon8231489123/ShareGPT_Vicuna_unfiltered/resolve/main/ShareGPT_V3_unfiltered_cleaned_split.json +``` diff --git a/benchmarks/backend_request_func.py b/benchmarks/backend_request_func.py new file mode 100644 index 0000000..f9d1675 --- /dev/null +++ b/benchmarks/backend_request_func.py @@ -0,0 +1,389 @@ +import json +import os +import sys +import time +import traceback +from dataclasses import dataclass, field +from typing import List, Optional + +import aiohttp +from tqdm.asyncio import tqdm + +AIOHTTP_TIMEOUT = aiohttp.ClientTimeout(total=6 * 60 * 60) + + +@dataclass +class RequestFuncInput: + prompt: str + api_url: str + prompt_len: int + output_len: int + model: str + best_of: int = 1 + use_beam_search: bool = False + + +@dataclass +class RequestFuncOutput: + generated_text: str = "" + success: bool = False + latency: float = 0.0 + ttft: float = 0.0 # Time to first token + itl: List[float] = field( + default_factory=list) # List of inter-token latencies + prompt_len: int = 0 + error: str = "" + + +async def async_request_tgi( + request_func_input: RequestFuncInput, + pbar: Optional[tqdm] = None, +) -> RequestFuncOutput: + api_url = request_func_input.api_url + assert api_url.endswith("generate_stream") + + async with aiohttp.ClientSession(timeout=AIOHTTP_TIMEOUT) as session: + assert not request_func_input.use_beam_search + params = { + "best_of": request_func_input.best_of, + "max_new_tokens": request_func_input.output_len, + "do_sample": True, + "temperature": 0.01, # TGI does not accept 0.0 temperature. + "top_p": 0.99, # TGI does not accept 1.0 top_p. + } + payload = { + "inputs": request_func_input.prompt, + "parameters": params, + } + output = RequestFuncOutput() + output.prompt_len = request_func_input.prompt_len + + ttft = 0.0 + st = time.perf_counter() + most_recent_timestamp = st + try: + async with session.post(url=api_url, json=payload) as response: + if response.status == 200: + async for chunk_bytes in response.content: + chunk_bytes = chunk_bytes.strip() + if not chunk_bytes: + continue + + chunk = remove_prefix(chunk_bytes.decode("utf-8"), + "data:") + + data = json.loads(chunk) + timestamp = time.perf_counter() + # First token + if ttft == 0.0: + ttft = time.perf_counter() - st + output.ttft = ttft + + # Decoding phase + else: + output.itl.append(timestamp - + most_recent_timestamp) + + most_recent_timestamp = timestamp + + output.latency = most_recent_timestamp - st + output.success = True + output.generated_text = data["generated_text"] + except Exception: + output.success = False + exc_info = sys.exc_info() + output.error = "".join(traceback.format_exception(*exc_info)) + + if pbar: + pbar.update(1) + return output + + +async def async_request_trt_llm( + request_func_input: RequestFuncInput, + pbar: Optional[tqdm] = None, +) -> RequestFuncOutput: + api_url = request_func_input.api_url + assert api_url.endswith("generate_stream") + + async with aiohttp.ClientSession(timeout=AIOHTTP_TIMEOUT) as session: + assert not request_func_input.use_beam_search + assert request_func_input.best_of == 1 + payload = { + "accumulate_tokens": True, + "text_input": request_func_input.prompt, + "temperature": 0.0, + "top_p": 1.0, + "max_tokens": request_func_input.output_len, + "stream": True, + } + output = RequestFuncOutput() + output.prompt_len = request_func_input.prompt_len + + ttft = 0.0 + st = time.perf_counter() + most_recent_timestamp = st + try: + async with session.post(url=api_url, json=payload) as response: + if response.status == 200: + async for chunk_bytes in response.content: + chunk_bytes = chunk_bytes.strip() + if not chunk_bytes: + continue + + chunk = remove_prefix(chunk_bytes.decode("utf-8"), + "data:") + + data = json.loads(chunk) + output.generated_text += data["text_output"] + timestamp = time.perf_counter() + # First token + if ttft == 0.0: + ttft = time.perf_counter() - st + output.ttft = ttft + + # Decoding phase + else: + output.itl.append(timestamp - + most_recent_timestamp) + + most_recent_timestamp = timestamp + + output.latency = most_recent_timestamp - st + output.success = True + + else: + output.error = response.reason or "" + output.success = False + except Exception: + output.success = False + exc_info = sys.exc_info() + output.error = "".join(traceback.format_exception(*exc_info)) + + if pbar: + pbar.update(1) + return output + + +async def async_request_deepspeed_mii( + request_func_input: RequestFuncInput, + pbar: Optional[tqdm] = None, +) -> RequestFuncOutput: + async with aiohttp.ClientSession(timeout=AIOHTTP_TIMEOUT) as session: + assert request_func_input.best_of == 1 + assert not request_func_input.use_beam_search + + payload = { + "prompt": request_func_input.prompt, + "max_tokens": request_func_input.output_len, + "temperature": 0.01, # deepspeed-mii does not accept 0.0 temp. + "top_p": 1.0, + } + output = RequestFuncOutput() + output.prompt_len = request_func_input.prompt_len + + # NOTE: DeepSpeed-MII doesn't support streaming as of Jan 28 2024, + # will use 0 as placeholder. + # See https://github.com/microsoft/DeepSpeed-MII/pull/311 + output.ttft = 0 + + st = time.perf_counter() + try: + async with session.post(url=request_func_input.api_url, + json=payload) as response: + if response.status == 200: + parsed_resp = await response.json() + output.latency = time.perf_counter() - st + output.generated_text = parsed_resp["text"][0] + output.success = True + else: + output.error = response.reason or "" + output.success = False + except Exception: + output.success = False + exc_info = sys.exc_info() + output.error = "".join(traceback.format_exception(*exc_info)) + + if pbar: + pbar.update(1) + return output + + +async def async_request_openai_completions( + request_func_input: RequestFuncInput, + pbar: Optional[tqdm] = None, +) -> RequestFuncOutput: + api_url = request_func_input.api_url + assert api_url.endswith( + "v1/completions" + ), "OpenAI Completions API URL must end with 'v1/completions'." + + async with aiohttp.ClientSession(timeout=AIOHTTP_TIMEOUT) as session: + assert not request_func_input.use_beam_search + payload = { + "model": request_func_input.model, + "prompt": request_func_input.prompt, + "temperature": 0.0, + "best_of": request_func_input.best_of, + "max_tokens": request_func_input.output_len, + "stream": True, + } + headers = { + "Authorization": f"Bearer {os.environ.get('OPENAI_API_KEY')}" + } + + output = RequestFuncOutput() + output.prompt_len = request_func_input.prompt_len + + generated_text = "" + ttft = 0.0 + st = time.perf_counter() + most_recent_timestamp = st + try: + async with session.post(url=api_url, json=payload, + headers=headers) as response: + if response.status == 200: + async for chunk_bytes in response.content: + chunk_bytes = chunk_bytes.strip() + if not chunk_bytes: + continue + + chunk = remove_prefix(chunk_bytes.decode("utf-8"), + "data: ") + if chunk == "[DONE]": + latency = time.perf_counter() - st + else: + data = json.loads(chunk) + + if data["choices"][0]["text"]: + timestamp = time.perf_counter() + # First token + if ttft == 0.0: + ttft = time.perf_counter() - st + output.ttft = ttft + + # Decoding phase + # NOTE: Some completion API might have a last + # usage summary response without a token so we + # do not want to include as inter-token-latency + elif data.get("usage", None) is None: + output.itl.append(timestamp - + most_recent_timestamp) + + most_recent_timestamp = timestamp + generated_text += data["choices"][0]["text"] + + output.generated_text = generated_text + output.success = True + output.latency = latency + except Exception: + output.success = False + exc_info = sys.exc_info() + output.error = "".join(traceback.format_exception(*exc_info)) + + if pbar: + pbar.update(1) + return output + + +async def async_request_openai_chat_completions( + request_func_input: RequestFuncInput, + pbar: Optional[tqdm] = None, +) -> RequestFuncOutput: + api_url = request_func_input.api_url + assert api_url.endswith( + "v1/chat/completions" + ), "OpenAI Chat Completions API URL must end with 'v1/chat/completions'." + + async with aiohttp.ClientSession(timeout=AIOHTTP_TIMEOUT) as session: + assert not request_func_input.use_beam_search + payload = { + "model": request_func_input.model, + "messages": [ + { + "role": "user", + "content": request_func_input.prompt, + }, + ], + "temperature": 0.0, + "max_tokens": request_func_input.output_len, + "stream": True, + } + headers = { + "Content-Type": "application/json", + "Authorization": f"Bearer {os.environ.get('OPENAI_API_KEY')}", + } + + output = RequestFuncOutput() + output.prompt_len = request_func_input.prompt_len + + generated_text = "" + ttft = 0.0 + st = time.perf_counter() + most_recent_timestamp = st + try: + async with session.post(url=api_url, json=payload, + headers=headers) as response: + if response.status == 200: + async for chunk_bytes in response.content: + chunk_bytes = chunk_bytes.strip() + if not chunk_bytes: + continue + + chunk = remove_prefix(chunk_bytes.decode("utf-8"), + "data: ") + if chunk == "[DONE]": + latency = time.perf_counter() - st + else: + timestamp = time.perf_counter() + data = json.loads(chunk) + + delta = data["choices"][0]["delta"] + if delta.get("content", None): + # First token + if ttft == 0.0: + ttft = time.perf_counter() - st + output.ttft = ttft + + # Decoding phase + else: + output.itl.append(timestamp - + most_recent_timestamp) + + generated_text += delta["content"] + + most_recent_timestamp = timestamp + + output.generated_text = generated_text + output.success = True + output.latency = latency + else: + output.error = response.reason or "" + output.success = False + except Exception: + output.success = False + exc_info = sys.exc_info() + output.error = "".join(traceback.format_exception(*exc_info)) + + if pbar: + pbar.update(1) + return output + + +# Since vllm must support Python 3.8, we can't use str.removeprefix(prefix) +# introduced in Python 3.9 +def remove_prefix(text: str, prefix: str) -> str: + if text.startswith(prefix): + return text[len(prefix):] + return text + + +ASYNC_REQUEST_FUNCS = { + "tgi": async_request_tgi, + "vllm": async_request_openai_completions, + "lmdeploy": async_request_openai_completions, + "deepspeed-mii": async_request_deepspeed_mii, + "openai": async_request_openai_completions, + "openai-chat": async_request_openai_chat_completions, + "tensorrt-llm": async_request_trt_llm, +} diff --git a/benchmarks/benchmark_latency.py b/benchmarks/benchmark_latency.py new file mode 100644 index 0000000..44da3ba --- /dev/null +++ b/benchmarks/benchmark_latency.py @@ -0,0 +1,195 @@ +"""Benchmark the latency of processing a single batch of requests.""" +import argparse +import time +from pathlib import Path +from typing import Optional + +import numpy as np +import torch +from tqdm import tqdm + +from vllm import LLM, SamplingParams +from vllm.model_executor.layers.quantization import QUANTIZATION_METHODS + + +def main(args: argparse.Namespace): + print(args) + + # NOTE(woosuk): If the request cannot be processed in a single batch, + # the engine will automatically process the request in multiple batches. + llm = LLM(model=args.model, + tokenizer=args.tokenizer, + quantization=args.quantization, + tensor_parallel_size=args.tensor_parallel_size, + trust_remote_code=args.trust_remote_code, + dtype=args.dtype, + enforce_eager=args.enforce_eager, + kv_cache_dtype=args.kv_cache_dtype, + quantization_param_path=args.quantization_param_path, + device=args.device, + ray_workers_use_nsight=args.ray_workers_use_nsight, + enable_chunked_prefill=args.enable_chunked_prefill, + download_dir=args.download_dir, + block_size=args.block_size) + + sampling_params = SamplingParams( + n=args.n, + temperature=0.0 if args.use_beam_search else 1.0, + top_p=1.0, + use_beam_search=args.use_beam_search, + ignore_eos=True, + max_tokens=args.output_len, + ) + print(sampling_params) + dummy_prompt_token_ids = np.random.randint(10000, + size=(args.batch_size, + args.input_len)) + dummy_prompt_token_ids = dummy_prompt_token_ids.tolist() + + def run_to_completion(profile_dir: Optional[str] = None): + if profile_dir: + with torch.profiler.profile( + activities=[ + torch.profiler.ProfilerActivity.CPU, + torch.profiler.ProfilerActivity.CUDA, + ], + on_trace_ready=torch.profiler.tensorboard_trace_handler( + str(profile_dir))) as p: + llm.generate(prompt_token_ids=dummy_prompt_token_ids, + sampling_params=sampling_params, + use_tqdm=False) + print(p.key_averages()) + else: + start_time = time.perf_counter() + llm.generate(prompt_token_ids=dummy_prompt_token_ids, + sampling_params=sampling_params, + use_tqdm=False) + end_time = time.perf_counter() + latency = end_time - start_time + return latency + + print("Warming up...") + for _ in tqdm(range(args.num_iters_warmup), desc="Warmup iterations"): + run_to_completion(profile_dir=None) + + if args.profile: + profile_dir = args.profile_result_dir + if not profile_dir: + profile_dir = Path( + "." + ) / "vllm_benchmark_result" / f"latency_result_{time.time()}" + print(f"Profiling (results will be saved to '{profile_dir}')...") + run_to_completion(profile_dir=profile_dir) + return + + # Benchmark. + latencies = [] + for _ in tqdm(range(args.num_iters), desc="Profiling iterations"): + latencies.append(run_to_completion(profile_dir=None)) + latencies = np.array(latencies) + percentages = [10, 25, 50, 75, 90] + percentiles = np.percentile(latencies, percentages) + print(f'Avg latency: {np.mean(latencies)} seconds') + for percentage, percentile in zip(percentages, percentiles): + print(f'{percentage}% percentile latency: {percentile} seconds') + + +if __name__ == '__main__': + parser = argparse.ArgumentParser( + description='Benchmark the latency of processing a single batch of ' + 'requests till completion.') + parser.add_argument('--model', type=str, default='facebook/opt-125m') + parser.add_argument('--tokenizer', type=str, default=None) + parser.add_argument('--quantization', + '-q', + choices=[*QUANTIZATION_METHODS, None], + default=None) + parser.add_argument('--tensor-parallel-size', '-tp', type=int, default=1) + parser.add_argument('--input-len', type=int, default=32) + parser.add_argument('--output-len', type=int, default=128) + parser.add_argument('--batch-size', type=int, default=8) + parser.add_argument('--n', + type=int, + default=1, + help='Number of generated sequences per prompt.') + parser.add_argument('--use-beam-search', action='store_true') + parser.add_argument('--num-iters-warmup', + type=int, + default=10, + help='Number of iterations to run for warmup.') + parser.add_argument('--num-iters', + type=int, + default=30, + help='Number of iterations to run.') + parser.add_argument('--trust-remote-code', + action='store_true', + help='trust remote code from huggingface') + parser.add_argument( + '--dtype', + type=str, + default='auto', + choices=['auto', 'half', 'float16', 'bfloat16', 'float', 'float32'], + help='data type for model weights and activations. ' + 'The "auto" option will use FP16 precision ' + 'for FP32 and FP16 models, and BF16 precision ' + 'for BF16 models.') + parser.add_argument('--enforce-eager', + action='store_true', + help='enforce eager mode and disable CUDA graph') + parser.add_argument( + "--kv-cache-dtype", + type=str, + choices=['auto', 'fp8'], + default='auto', + help= + 'Data type for kv cache storage. If "auto", will use model data type. ' + 'FP8_E5M2 (without scaling) is only supported on cuda version greater ' + 'than 11.8. On ROCm (AMD GPU), FP8_E4M3 is instead supported for ' + 'common inference criteria.') + parser.add_argument( + '--quantization-param-path', + type=str, + default=None, + help='Path to the JSON file containing the KV cache scaling factors. ' + 'This should generally be supplied, when KV cache dtype is FP8. ' + 'Otherwise, KV cache scaling factors default to 1.0, which may cause ' + 'accuracy issues. FP8_E5M2 (without scaling) is only supported on ' + 'cuda version greater than 11.8. On ROCm (AMD GPU), FP8_E4M3 is ' + 'instead supported for common inference criteria.') + parser.add_argument( + '--profile', + action='store_true', + help='profile the generation process of a single batch') + parser.add_argument( + '--profile-result-dir', + type=str, + default=None, + help=('path to save the pytorch profiler output. Can be visualized ' + 'with ui.perfetto.dev or Tensorboard.')) + parser.add_argument( + "--device", + type=str, + default="cuda", + choices=["cuda", "cpu"], + help='device type for vLLM execution, supporting CUDA and CPU.') + parser.add_argument('--block-size', + type=int, + default=16, + help='block size of key/value cache') + parser.add_argument( + '--enable-chunked-prefill', + action='store_true', + help='If True, the prefill requests can be chunked based on the ' + 'max_num_batched_tokens') + parser.add_argument( + "--ray-workers-use-nsight", + action='store_true', + help="If specified, use nsight to profile ray workers", + ) + parser.add_argument('--download-dir', + type=str, + default=None, + help='directory to download and load the weights, ' + 'default to the default cache dir of huggingface') + args = parser.parse_args() + main(args) diff --git a/benchmarks/benchmark_prefix_caching.py b/benchmarks/benchmark_prefix_caching.py new file mode 100644 index 0000000..0899669 --- /dev/null +++ b/benchmarks/benchmark_prefix_caching.py @@ -0,0 +1,62 @@ +import argparse +import time + +from vllm import LLM, SamplingParams + +PROMPT = "You are a helpful assistant in recognizes the content of tables in markdown format. Here is a table as fellows. You need to answer my question about the table.\n# Table\n|Opening|Opening|Sl. No.|Film|Cast|Director|Music Director|Notes|\n|----|----|----|----|----|----|----|----|\n|J A N|9|1|Agni Pushpam|Jayabharathi, Kamalahasan|Jeassy|M. K. Arjunan||\n|J A N|16|2|Priyamvada|Mohan Sharma, Lakshmi, KPAC Lalitha|K. S. Sethumadhavan|V. Dakshinamoorthy||\n|J A N|23|3|Yakshagaanam|Madhu, Sheela|Sheela|M. S. Viswanathan||\n|J A N|30|4|Paalkkadal|Sheela, Sharada|T. K. Prasad|A. T. Ummer||\n|F E B|5|5|Amma|Madhu, Srividya|M. Krishnan Nair|M. K. Arjunan||\n|F E B|13|6|Appooppan|Thikkurissi Sukumaran Nair, Kamal Haasan|P. Bhaskaran|M. S. Baburaj||\n|F E B|20|7|Srishti|Chowalloor Krishnankutty, Ravi Alummoodu|K. T. Muhammad|M. S. Baburaj||\n|F E B|20|8|Vanadevatha|Prem Nazir, Madhubala|Yusufali Kechery|G. Devarajan||\n|F E B|27|9|Samasya|Madhu, Kamalahaasan|K. Thankappan|Shyam||\n|F E B|27|10|Yudhabhoomi|K. P. Ummer, Vidhubala|Crossbelt Mani|R. K. Shekhar||\n|M A R|5|11|Seemantha Puthran|Prem Nazir, Jayabharathi|A. B. Raj|M. K. Arjunan||\n|M A R|12|12|Swapnadanam|Rani Chandra, Dr. Mohandas|K. G. George|Bhaskar Chandavarkar||\n|M A R|19|13|Thulavarsham|Prem Nazir, sreedevi, Sudheer|N. Sankaran Nair|V. Dakshinamoorthy||\n|M A R|20|14|Aruthu|Kaviyoor Ponnamma, Kamalahasan|Ravi|G. Devarajan||\n|M A R|26|15|Swimming Pool|Kamal Haasan, M. G. Soman|J. Sasikumar|M. K. Arjunan||\n\n# Question\nWhat' s the content in the (1,1) cells\n" # noqa: E501 + + +def test_prefix(llm=None, sampling_params=None, prompts=None): + start_time = time.time() + + llm.generate(prompts, sampling_params=sampling_params) + + end_time = time.time() + print(f"cost time {end_time - start_time}") + + +def main(args): + llm = LLM(model=args.model, + tokenizer_mode='auto', + trust_remote_code=True, + enforce_eager=True, + use_v2_block_manager=args.use_v2_block_manager, + tensor_parallel_size=args.tensor_parallel_size, + enable_prefix_caching=args.enable_prefix_caching) + + num_prompts = 100 + prompts = [PROMPT] * num_prompts + sampling_params = SamplingParams(temperature=0, max_tokens=args.output_len) + + print("------warm up------") + test_prefix( + llm=llm, + prompts=prompts, + sampling_params=sampling_params, + ) + + print("------start generating------") + test_prefix( + llm=llm, + prompts=prompts, + sampling_params=sampling_params, + ) + + +if __name__ == "__main__": + parser = argparse.ArgumentParser( + description='Benchmark the performance with or without automatic ' + 'prefix caching.') + parser.add_argument('--model', + type=str, + default='baichuan-inc/Baichuan2-13B-Chat') + parser.add_argument('--tensor-parallel-size', '-tp', type=int, default=1) + parser.add_argument('--output-len', type=int, default=10) + parser.add_argument('--enable-prefix-caching', + action='store_true', + help='enable prefix caching') + parser.add_argument('--use-v2-block-manager', + action='store_true', + help='Use BlockSpaceMangerV2') + args = parser.parse_args() + main(args) diff --git a/benchmarks/benchmark_serving.py b/benchmarks/benchmark_serving.py new file mode 100644 index 0000000..2c2d69d --- /dev/null +++ b/benchmarks/benchmark_serving.py @@ -0,0 +1,596 @@ +"""Benchmark online serving throughput. + +On the server side, run one of the following commands: + vLLM OpenAI API server + python -m vllm.entrypoints.openai.api_server \ + --model --swap-space 16 \ + --disable-log-requests + + (TGI backend) + ./launch_tgi_server.sh + +On the client side, run: + python benchmarks/benchmark_serving.py \ + --backend \ + --model \ + --dataset-name sharegpt \ + --dataset-path \ + --request-rate \ # By default is inf + --num-prompts # By default is 1000 +""" +import argparse +import asyncio +import json +import os +import random +import time +import warnings +from dataclasses import dataclass +from datetime import datetime +from typing import AsyncGenerator, List, Optional, Tuple + +import numpy as np +from backend_request_func import (ASYNC_REQUEST_FUNCS, RequestFuncInput, + RequestFuncOutput) +from tqdm.asyncio import tqdm +from transformers import PreTrainedTokenizerBase + +from vllm.transformers_utils.tokenizer import get_tokenizer + + +@dataclass +class BenchmarkMetrics: + completed: int + total_input: int + total_output: int + request_throughput: float + input_throughput: float + output_throughput: float + mean_ttft_ms: float + median_ttft_ms: float + p99_ttft_ms: float + mean_tpot_ms: float + median_tpot_ms: float + p99_tpot_ms: float + + +def sample_sharegpt_requests( + dataset_path: str, + num_requests: int, + tokenizer: PreTrainedTokenizerBase, + fixed_output_len: Optional[int] = None, +) -> List[Tuple[str, int, int]]: + if fixed_output_len is not None and fixed_output_len < 4: + raise ValueError("output_len too small") + + # Load the dataset. + with open(dataset_path) as f: + dataset = json.load(f) + # Filter out the conversations with less than 2 turns. + dataset = [data for data in dataset if len(data["conversations"]) >= 2] + # Only keep the first two turns of each conversation. + dataset = [(data["conversations"][0]["value"], + data["conversations"][1]["value"]) for data in dataset] + + # Shuffle the dataset. + random.shuffle(dataset) + + # Filter out sequences that are too long or too short + filtered_dataset: List[Tuple[str, int, int]] = [] + for i in range(len(dataset)): + if len(filtered_dataset) == num_requests: + break + + # Tokenize the prompts and completions. + prompt = dataset[i][0] + prompt_token_ids = tokenizer(prompt).input_ids + completion = dataset[i][1] + completion_token_ids = tokenizer(completion).input_ids + prompt_len = len(prompt_token_ids) + output_len = len(completion_token_ids + ) if fixed_output_len is None else fixed_output_len + if prompt_len < 4 or output_len < 4: + # Prune too short sequences. + continue + if prompt_len > 1024 or prompt_len + output_len > 2048: + # Prune too long sequences. + continue + filtered_dataset.append((prompt, prompt_len, output_len)) + + return filtered_dataset + + +def sample_sonnet_requests( + dataset_path: str, + num_requests: int, + input_len: int, + output_len: int, + prefix_len: int, + tokenizer: PreTrainedTokenizerBase, +) -> List[Tuple[str, str, int, int]]: + assert ( + input_len > prefix_len + ), "'args.sonnet-input-len' must be greater than 'args.prefix-input-len'." + + # Load the dataset. + with open(dataset_path) as f: + poem_lines = f.readlines() + + # Tokenize the poem lines. + poem_token_ids = tokenizer(poem_lines).input_ids + average_poem_len = sum( + len(token_ids) for token_ids in poem_token_ids) / len(poem_token_ids) + + # Base prefix for all requests. + base_prompt = "Pick as many lines as you can from these poem lines:\n" + base_message = [{ + "role": "user", + "content": base_prompt, + }] + base_prompt_formatted = tokenizer.apply_chat_template( + base_message, add_generation_prompt=True, tokenize=False) + base_prompt_offset = len(tokenizer(base_prompt_formatted).input_ids) + + assert ( + input_len > base_prompt_offset + ), f"Please set 'args.sonnet-input-len' higher than {base_prompt_offset}." + num_input_lines = round( + (input_len - base_prompt_offset) / average_poem_len) + + # First approximately `prefix_len` number of tokens in the + # prompt are fixed poem lines. + assert ( + prefix_len > base_prompt_offset + ), f"Please set 'args.sonnet-prefix-len' higher than {base_prompt_offset}." + + num_prefix_lines = round( + (prefix_len - base_prompt_offset) / average_poem_len) + prefix_lines = poem_lines[:num_prefix_lines] + + # Sample the rest of lines per request. + sampled_requests: List[Tuple[str, int, int]] = [] + for _ in range(num_requests): + sampled_lines = "".join( + prefix_lines + + random.sample(poem_lines, num_input_lines - num_prefix_lines)) + + prompt = f"{base_prompt}{sampled_lines}" + message = [ + { + "role": "user", + "content": prompt, + }, + ] + prompt_formatted = tokenizer.apply_chat_template( + message, add_generation_prompt=True, tokenize=False) + prompt_len = len(tokenizer(prompt_formatted).input_ids) + sampled_requests.append( + (prompt, prompt_formatted, prompt_len, output_len)) + + return sampled_requests + + +async def get_request( + input_requests: List[Tuple[str, int, int]], + request_rate: float, +) -> AsyncGenerator[Tuple[str, int, int], None]: + input_requests = iter(input_requests) + for request in input_requests: + yield request + + if request_rate == float("inf"): + # If the request rate is infinity, then we don't need to wait. + continue + # Sample the request interval from the exponential distribution. + interval = np.random.exponential(1.0 / request_rate) + # The next request will be sent after the interval. + await asyncio.sleep(interval) + + +def calculate_metrics( + input_requests: List[Tuple[str, int, int]], + outputs: List[RequestFuncOutput], + dur_s: float, + tokenizer: PreTrainedTokenizerBase, +) -> Tuple[BenchmarkMetrics, List[int]]: + actual_output_lens = [] + total_input = 0 + completed = 0 + tpots = [] + ttfts = [] + for i in range(len(outputs)): + if outputs[i].success: + output_len = len(tokenizer(outputs[i].generated_text).input_ids) + actual_output_lens.append(output_len) + total_input += input_requests[i][1] + if output_len > 1: + tpots.append( + (outputs[i].latency - outputs[i].ttft) / (output_len - 1)) + ttfts.append(outputs[i].ttft) + completed += 1 + else: + actual_output_lens.append(0) + + metrics = BenchmarkMetrics( + completed=completed, + total_input=total_input, + total_output=sum(actual_output_lens), + request_throughput=completed / dur_s, + input_throughput=total_input / dur_s, + output_throughput=sum(actual_output_lens) / dur_s, + mean_ttft_ms=np.mean(ttfts or 0) * + 1000, # ttfts is empty if streaming is not supported by backend + median_ttft_ms=np.median(ttfts or 0) * 1000, + p99_ttft_ms=np.percentile(ttfts or 0, 99) * 1000, + mean_tpot_ms=np.mean(tpots) * 1000, + median_tpot_ms=np.median(tpots) * 1000, + p99_tpot_ms=np.percentile(tpots, 99) * 1000, + ) + + return metrics, actual_output_lens + + +async def benchmark( + backend: str, + api_url: str, + model_id: str, + tokenizer: PreTrainedTokenizerBase, + input_requests: List[Tuple[str, int, int]], + best_of: int, + use_beam_search: bool, + request_rate: float, + disable_tqdm: bool, +): + if backend in ASYNC_REQUEST_FUNCS: + request_func = ASYNC_REQUEST_FUNCS.get(backend) + else: + raise ValueError(f"Unknown backend: {backend}") + + print(f"Traffic request rate: {request_rate}") + + pbar = None if disable_tqdm else tqdm(total=len(input_requests)) + + benchmark_start_time = time.perf_counter() + tasks = [] + async for request in get_request(input_requests, request_rate): + prompt, prompt_len, output_len = request + request_func_input = RequestFuncInput( + model=model_id, + prompt=prompt, + api_url=api_url, + prompt_len=prompt_len, + output_len=output_len, + best_of=best_of, + use_beam_search=use_beam_search, + ) + tasks.append( + asyncio.create_task( + request_func(request_func_input=request_func_input, + pbar=pbar))) + outputs: List[RequestFuncOutput] = await asyncio.gather(*tasks) + + if not disable_tqdm: + pbar.close() + + benchmark_duration = time.perf_counter() - benchmark_start_time + + metrics, actual_output_lens = calculate_metrics( + input_requests=input_requests, + outputs=outputs, + dur_s=benchmark_duration, + tokenizer=tokenizer, + ) + + print("{s:{c}^{n}}".format(s=' Serving Benchmark Result ', n=50, c='=')) + print("{:<40} {:<10}".format("Successful requests:", metrics.completed)) + print("{:<40} {:<10.2f}".format("Benchmark duration (s):", + benchmark_duration)) + print("{:<40} {:<10}".format("Total input tokens:", metrics.total_input)) + print("{:<40} {:<10}".format("Total generated tokens:", + metrics.total_output)) + print("{:<40} {:<10.2f}".format("Request throughput (req/s):", + metrics.request_throughput)) + print("{:<40} {:<10.2f}".format("Input token throughput (tok/s):", + metrics.input_throughput)) + print("{:<40} {:<10.2f}".format("Output token throughput (tok/s):", + metrics.output_throughput)) + print("{s:{c}^{n}}".format(s='Time to First Token', n=50, c='-')) + print("{:<40} {:<10.2f}".format("Mean TTFT (ms):", metrics.mean_ttft_ms)) + print("{:<40} {:<10.2f}".format("Median TTFT (ms):", + metrics.median_ttft_ms)) + print("{:<40} {:<10.2f}".format("P99 TTFT (ms):", metrics.p99_ttft_ms)) + print("{s:{c}^{n}}".format(s='Time per Output Token (excl. 1st token)', + n=50, + c='-')) + print("{:<40} {:<10.2f}".format("Mean TPOT (ms):", metrics.mean_tpot_ms)) + print("{:<40} {:<10.2f}".format("Median TPOT (ms):", + metrics.median_tpot_ms)) + print("{:<40} {:<10.2f}".format("P99 TPOT (ms):", metrics.p99_tpot_ms)) + print("=" * 50) + + result = { + "duration": benchmark_duration, + "completed": metrics.completed, + "total_input_tokens": metrics.total_input, + "total_output_tokens": metrics.total_output, + "request_throughput": metrics.request_throughput, + "input_throughput": metrics.input_throughput, + "output_throughput": metrics.output_throughput, + "mean_ttft_ms": metrics.mean_ttft_ms, + "median_ttft_ms": metrics.median_ttft_ms, + "p99_ttft_ms": metrics.p99_ttft_ms, + "mean_tpot_ms": metrics.mean_tpot_ms, + "median_tpot_ms": metrics.median_tpot_ms, + "p99_tpot_ms": metrics.p99_tpot_ms, + "input_lens": [output.prompt_len for output in outputs], + "output_lens": actual_output_lens, + "ttfts": [output.ttft for output in outputs], + "itls": [output.itl for output in outputs], + "generated_texts": [output.generated_text for output in outputs], + "errors": [output.error for output in outputs], + } + return result + + +def main(args: argparse.Namespace): + print(args) + random.seed(args.seed) + np.random.seed(args.seed) + + backend = args.backend + model_id = args.model + tokenizer_id = args.tokenizer if args.tokenizer is not None else args.model + + if args.base_url is not None: + api_url = f"{args.base_url}{args.endpoint}" + else: + api_url = f"http://{args.host}:{args.port}{args.endpoint}" + + tokenizer = get_tokenizer(tokenizer_id, + trust_remote_code=args.trust_remote_code) + + if args.dataset is not None: + warnings.warn( + "The '--dataset' argument will be deprecated in the next " + "release. Please use '--dataset-name' and " + "'--dataset-path' in the future runs.", + stacklevel=2) + input_requests = sample_sharegpt_requests( + dataset_path=args.dataset, + num_requests=args.num_prompts, + tokenizer=tokenizer, + fixed_output_len=args.sharegpt_output_len, + ) + + elif args.dataset_name == "sharegpt": + input_requests = sample_sharegpt_requests( + dataset_path=args.dataset_path, + num_requests=args.num_prompts, + tokenizer=tokenizer, + fixed_output_len=args.sharegpt_output_len, + ) + + elif args.dataset_name == "sonnet": + # Do not format the prompt, pass to message directly + if args.backend == "openai-chat": + input_requests = sample_sonnet_requests( + dataset_path=args.dataset_path, + num_requests=args.num_prompts, + input_len=args.sonnet_input_len, + output_len=args.sonnet_output_len, + prefix_len=args.sonnet_prefix_len, + tokenizer=tokenizer, + ) + input_requests = [(prompt, prompt_len, output_len) + for prompt, prompt_formatted, prompt_len, + output_len in input_requests] + else: + assert ( + tokenizer.chat_template or tokenizer.default_chat_template + ), "Tokenizer/model must have chat template for sonnet dataset." + input_requests = sample_sonnet_requests( + dataset_path=args.dataset_path, + num_requests=args.num_prompts, + input_len=args.sonnet_input_len, + output_len=args.sonnet_output_len, + prefix_len=args.sonnet_prefix_len, + tokenizer=tokenizer, + ) + input_requests = [(prompt_formatted, prompt_len, output_len) + for prompt, prompt_formatted, prompt_len, + output_len in input_requests] + + else: + raise ValueError(f"Unknown dataset: {args.dataset_name}") + + benchmark_result = asyncio.run( + benchmark( + backend=backend, + api_url=api_url, + model_id=model_id, + tokenizer=tokenizer, + input_requests=input_requests, + best_of=args.best_of, + use_beam_search=args.use_beam_search, + request_rate=args.request_rate, + disable_tqdm=args.disable_tqdm, + )) + + # Save config and results to json + if args.save_result: + result_json = {} + + # Setup + current_dt = datetime.now().strftime("%Y%m%d-%H%M%S") + result_json["date"] = current_dt + result_json["backend"] = backend + result_json["model_id"] = model_id + result_json["tokenizer_id"] = tokenizer_id + result_json["best_of"] = args.best_of + result_json["use_beam_search"] = args.use_beam_search + result_json["num_prompts"] = args.num_prompts + + # Metadata + if args.metadata: + for item in args.metadata: + if "=" in item: + kvstring = item.split("=") + result_json[kvstring[0].strip()] = kvstring[1].strip() + else: + raise ValueError( + "Invalid metadata format. Please use KEY=VALUE format." + ) + + # Traffic + result_json["request_rate"] = ( + args.request_rate if args.request_rate < float("inf") else "inf") + + # Merge with benchmark result + result_json = {**result_json, **benchmark_result} + + # Save to file + base_model_id = model_id.split("/")[-1] + file_name = f"{backend}-{args.request_rate}qps-{base_model_id}-{current_dt}.json" #noqa + if args.result_dir: + file_name = os.path.join(args.result_dir, file_name) + with open(file_name, "w") as outfile: + json.dump(result_json, outfile) + + +if __name__ == "__main__": + parser = argparse.ArgumentParser( + description="Benchmark the online serving throughput.") + parser.add_argument( + "--backend", + type=str, + default="vllm", + choices=list(ASYNC_REQUEST_FUNCS.keys()), + ) + parser.add_argument( + "--base-url", + type=str, + default=None, + help="Server or API base url if not using http host and port.", + ) + parser.add_argument("--host", type=str, default="localhost") + parser.add_argument("--port", type=int, default=8000) + parser.add_argument( + "--endpoint", + type=str, + default="/v1/completions", + help="API endpoint.", + ) + parser.add_argument( + "--dataset", + type=str, + default=None, + help="Path to the ShareGPT dataset, will be deprecated in the " + "next release.", + ) + parser.add_argument( + "--dataset-name", + type=str, + default="sharegpt", + choices=["sharegpt", "sonnet"], + help="Name of the dataset to benchmark on.", + ) + parser.add_argument("--dataset-path", + type=str, + default=None, + help="Path to the dataset.") + parser.add_argument( + "--model", + type=str, + required=True, + help="Name of the model.", + ) + parser.add_argument( + "--tokenizer", + type=str, + help= + "Name or path of the tokenizer, if not using the default tokenizer.", + ) + parser.add_argument( + "--best-of", + type=int, + default=1, + help="Generates `best_of` sequences per prompt and " + "returns the best one.", + ) + parser.add_argument("--use-beam-search", action="store_true") + parser.add_argument( + "--num-prompts", + type=int, + default=1000, + help="Number of prompts to process.", + ) + parser.add_argument( + "--sharegpt-output-len", + type=int, + default=None, + help="Output length for each request. Overrides the output length " + "from the ShareGPT dataset.") + parser.add_argument( + "--sonnet-input-len", + type=int, + default=550, + help= + "Number of input tokens per request, used only for sonnet dataset.", + ) + parser.add_argument( + "--sonnet-output-len", + type=int, + default=150, + help= + "Number of output tokens per request, used only for sonnet dataset.", + ) + parser.add_argument( + "--sonnet-prefix-len", + type=int, + default=200, + help= + "Number of prefix tokens per request, used only for sonnet dataset.", + ) + parser.add_argument( + "--request-rate", + type=float, + default=float("inf"), + help="Number of requests per second. If this is inf, " + "then all the requests are sent at time 0. " + "Otherwise, we use Poisson process to synthesize " + "the request arrival times.", + ) + parser.add_argument("--seed", type=int, default=0) + parser.add_argument( + "--trust-remote-code", + action="store_true", + help="Trust remote code from huggingface", + ) + parser.add_argument( + "--disable-tqdm", + action="store_true", + help="Specify to disable tqdm progress bar.", + ) + parser.add_argument( + "--save-result", + action="store_true", + help="Specify to save benchmark results to a json file", + ) + parser.add_argument( + "--metadata", + metavar="KEY=VALUE", + nargs="*", + help="Key-value pairs (e.g, --metadata version=0.3.3 tp=1) " + "for metadata of this run to be saved in the result JSON file " + "for record keeping purposes.", + ) + parser.add_argument( + "--result-dir", + type=str, + default=None, + help="Specify directory to save benchmark json results." + "If not specified, results are saved in the current directory.", + ) + + args = parser.parse_args() + main(args) diff --git a/benchmarks/benchmark_throughput.py b/benchmarks/benchmark_throughput.py new file mode 100644 index 0000000..695d06e --- /dev/null +++ b/benchmarks/benchmark_throughput.py @@ -0,0 +1,387 @@ +"""Benchmark offline inference throughput.""" +import argparse +import json +import random +import time +from typing import List, Optional, Tuple + +import torch +from tqdm import tqdm +from transformers import (AutoModelForCausalLM, AutoTokenizer, + PreTrainedTokenizerBase) + +from vllm.model_executor.layers.quantization import QUANTIZATION_METHODS + + +def sample_requests( + dataset_path: str, + num_requests: int, + tokenizer: PreTrainedTokenizerBase, + fixed_output_len: Optional[int], +) -> List[Tuple[str, int, int]]: + if fixed_output_len is not None and fixed_output_len < 4: + raise ValueError("output_len too small") + + # Load the dataset. + with open(dataset_path) as f: + dataset = json.load(f) + # Filter out the conversations with less than 2 turns. + dataset = [data for data in dataset if len(data["conversations"]) >= 2] + # Only keep the first two turns of each conversation. + dataset = [(data["conversations"][0]["value"], + data["conversations"][1]["value"]) for data in dataset] + + # Shuffle the dataset. + random.shuffle(dataset) + + # Filter out sequences that are too long or too short + filtered_dataset: List[Tuple[str, int, int]] = [] + for i in range(len(dataset)): + if len(filtered_dataset) == num_requests: + break + + # Tokenize the prompts and completions. + prompt = dataset[i][0] + prompt_token_ids = tokenizer(prompt).input_ids + completion = dataset[i][1] + completion_token_ids = tokenizer(completion).input_ids + prompt_len = len(prompt_token_ids) + output_len = len(completion_token_ids + ) if fixed_output_len is None else fixed_output_len + if prompt_len < 4 or output_len < 4: + # Prune too short sequences. + continue + if prompt_len > 1024 or prompt_len + output_len > 2048: + # Prune too long sequences. + continue + filtered_dataset.append((prompt, prompt_len, output_len)) + + return filtered_dataset + + +def run_vllm( + requests: List[Tuple[str, int, int]], + model: str, + tokenizer: str, + quantization: Optional[str], + tensor_parallel_size: int, + seed: int, + n: int, + use_beam_search: bool, + trust_remote_code: bool, + dtype: str, + max_model_len: Optional[int], + enforce_eager: bool, + kv_cache_dtype: str, + quantization_param_path: Optional[str], + device: str, + enable_prefix_caching: bool, + enable_chunked_prefill: bool, + max_num_batched_tokens: int, + gpu_memory_utilization: float = 0.9, + download_dir: Optional[str] = None, +) -> float: + from vllm import LLM, SamplingParams + llm = LLM( + model=model, + tokenizer=tokenizer, + quantization=quantization, + tensor_parallel_size=tensor_parallel_size, + seed=seed, + trust_remote_code=trust_remote_code, + dtype=dtype, + max_model_len=max_model_len, + gpu_memory_utilization=gpu_memory_utilization, + enforce_eager=enforce_eager, + kv_cache_dtype=kv_cache_dtype, + quantization_param_path=quantization_param_path, + device=device, + enable_prefix_caching=enable_prefix_caching, + download_dir=download_dir, + enable_chunked_prefill=enable_chunked_prefill, + max_num_batched_tokens=max_num_batched_tokens, + ) + + # Add the requests to the engine. + prompts = [] + sampling_params = [] + for prompt, _, output_len in requests: + prompts.append(prompt) + sampling_params.append( + SamplingParams( + n=n, + temperature=0.0 if use_beam_search else 1.0, + top_p=1.0, + use_beam_search=use_beam_search, + ignore_eos=True, + max_tokens=output_len, + )) + + start = time.perf_counter() + llm.generate(prompts, sampling_params, use_tqdm=True) + end = time.perf_counter() + return end - start + + +def run_hf( + requests: List[Tuple[str, int, int]], + model: str, + tokenizer: PreTrainedTokenizerBase, + n: int, + use_beam_search: bool, + max_batch_size: int, + trust_remote_code: bool, +) -> float: + assert not use_beam_search + llm = AutoModelForCausalLM.from_pretrained( + model, torch_dtype=torch.float16, trust_remote_code=trust_remote_code) + if llm.config.model_type == "llama": + # To enable padding in the HF backend. + tokenizer.pad_token = tokenizer.eos_token + llm = llm.cuda() + + pbar = tqdm(total=len(requests)) + start = time.perf_counter() + batch: List[str] = [] + max_prompt_len = 0 + max_output_len = 0 + for i in range(len(requests)): + prompt, prompt_len, output_len = requests[i] + # Add the prompt to the batch. + batch.append(prompt) + max_prompt_len = max(max_prompt_len, prompt_len) + max_output_len = max(max_output_len, output_len) + if len(batch) < max_batch_size and i != len(requests) - 1: + # Check if we can add more requests to the batch. + _, next_prompt_len, next_output_len = requests[i + 1] + if (max(max_prompt_len, next_prompt_len) + + max(max_output_len, next_output_len)) <= 2048: + # We can add more requests to the batch. + continue + + # Generate the sequences. + input_ids = tokenizer(batch, return_tensors="pt", + padding=True).input_ids + llm_outputs = llm.generate( + input_ids=input_ids.cuda(), + do_sample=not use_beam_search, + num_return_sequences=n, + temperature=1.0, + top_p=1.0, + use_cache=True, + max_new_tokens=max_output_len, + ) + # Include the decoding time. + tokenizer.batch_decode(llm_outputs, skip_special_tokens=True) + pbar.update(len(batch)) + + # Clear the batch. + batch = [] + max_prompt_len = 0 + max_output_len = 0 + end = time.perf_counter() + return end - start + + +def run_mii( + requests: List[Tuple[str, int, int]], + model: str, + tensor_parallel_size: int, + output_len: int, +) -> float: + from mii import client, serve + llm = serve(model, tensor_parallel=tensor_parallel_size) + prompts = [prompt for prompt, _, _ in requests] + + start = time.perf_counter() + llm.generate(prompts, max_new_tokens=output_len) + end = time.perf_counter() + client = client(model) + client.terminate_server() + return end - start + + +def main(args: argparse.Namespace): + print(args) + random.seed(args.seed) + + # Sample the requests. + tokenizer = AutoTokenizer.from_pretrained( + args.tokenizer, trust_remote_code=args.trust_remote_code) + if args.dataset is None: + # Synthesize a prompt with the given input length. + prompt = "hi" * (args.input_len - 1) + requests = [(prompt, args.input_len, args.output_len) + for _ in range(args.num_prompts)] + else: + requests = sample_requests(args.dataset, args.num_prompts, tokenizer, + args.output_len) + + if args.backend == "vllm": + elapsed_time = run_vllm( + requests, args.model, args.tokenizer, args.quantization, + args.tensor_parallel_size, args.seed, args.n, args.use_beam_search, + args.trust_remote_code, args.dtype, args.max_model_len, + args.enforce_eager, args.kv_cache_dtype, + args.quantization_param_path, args.device, + args.enable_prefix_caching, args.enable_chunked_prefill, + args.max_num_batched_tokens, args.gpu_memory_utilization, + args.download_dir) + elif args.backend == "hf": + assert args.tensor_parallel_size == 1 + elapsed_time = run_hf(requests, args.model, tokenizer, args.n, + args.use_beam_search, args.hf_max_batch_size, + args.trust_remote_code) + elif args.backend == "mii": + elapsed_time = run_mii(requests, args.model, args.tensor_parallel_size, + args.output_len) + else: + raise ValueError(f"Unknown backend: {args.backend}") + total_num_tokens = sum(prompt_len + output_len + for _, prompt_len, output_len in requests) + print(f"Throughput: {len(requests) / elapsed_time:.2f} requests/s, " + f"{total_num_tokens / elapsed_time:.2f} tokens/s") + + +if __name__ == "__main__": + parser = argparse.ArgumentParser(description="Benchmark the throughput.") + parser.add_argument("--backend", + type=str, + choices=["vllm", "hf", "mii"], + default="vllm") + parser.add_argument("--dataset", + type=str, + default=None, + help="Path to the dataset.") + parser.add_argument("--input-len", + type=int, + default=None, + help="Input prompt length for each request") + parser.add_argument("--output-len", + type=int, + default=None, + help="Output length for each request. Overrides the " + "output length from the dataset.") + parser.add_argument("--model", type=str, default="facebook/opt-125m") + parser.add_argument("--tokenizer", type=str, default=None) + parser.add_argument('--quantization', + '-q', + choices=[*QUANTIZATION_METHODS, None], + default=None) + parser.add_argument("--tensor-parallel-size", "-tp", type=int, default=1) + parser.add_argument("--n", + type=int, + default=1, + help="Number of generated sequences per prompt.") + parser.add_argument("--use-beam-search", action="store_true") + parser.add_argument("--num-prompts", + type=int, + default=1000, + help="Number of prompts to process.") + parser.add_argument("--seed", type=int, default=0) + parser.add_argument("--hf-max-batch-size", + type=int, + default=None, + help="Maximum batch size for HF backend.") + parser.add_argument('--trust-remote-code', + action='store_true', + help='trust remote code from huggingface') + parser.add_argument( + '--max-model-len', + type=int, + default=None, + help='Maximum length of a sequence (including prompt and output). ' + 'If None, will be derived from the model.') + parser.add_argument( + '--dtype', + type=str, + default='auto', + choices=['auto', 'half', 'float16', 'bfloat16', 'float', 'float32'], + help='data type for model weights and activations. ' + 'The "auto" option will use FP16 precision ' + 'for FP32 and FP16 models, and BF16 precision ' + 'for BF16 models.') + parser.add_argument('--gpu-memory-utilization', + type=float, + default=0.9, + help='the fraction of GPU memory to be used for ' + 'the model executor, which can range from 0 to 1.' + 'If unspecified, will use the default value of 0.9.') + parser.add_argument("--enforce-eager", + action="store_true", + help="enforce eager execution") + parser.add_argument( + "--kv-cache-dtype", + type=str, + choices=["auto", "fp8"], + default="auto", + help= + 'Data type for kv cache storage. If "auto", will use model data type. ' + 'FP8_E5M2 (without scaling) is only supported on cuda version greater ' + 'than 11.8. On ROCm (AMD GPU), FP8_E4M3 is instead supported for ' + 'common inference criteria.') + parser.add_argument( + '--quantization-param-path', + type=str, + default=None, + help='Path to the JSON file containing the KV cache scaling factors. ' + 'This should generally be supplied, when KV cache dtype is FP8. ' + 'Otherwise, KV cache scaling factors default to 1.0, which may cause ' + 'accuracy issues. FP8_E5M2 (without scaling) is only supported on ' + 'cuda version greater than 11.8. On ROCm (AMD GPU), FP8_E4M3 is ' + 'instead supported for common inference criteria.') + parser.add_argument( + "--device", + type=str, + default="cuda", + choices=["cuda", "cpu"], + help='device type for vLLM execution, supporting CUDA and CPU.') + parser.add_argument( + "--enable-prefix-caching", + action='store_true', + help="enable automatic prefix caching for vLLM backend.") + parser.add_argument("--enable-chunked-prefill", + action='store_true', + help="enable chunked prefill for vLLM backend.") + parser.add_argument('--max-num-batched-tokens', + type=int, + default=None, + help='maximum number of batched tokens per ' + 'iteration') + parser.add_argument('--download-dir', + type=str, + default=None, + help='directory to download and load the weights, ' + 'default to the default cache dir of huggingface') + args = parser.parse_args() + if args.tokenizer is None: + args.tokenizer = args.model + if args.dataset is None: + assert args.input_len is not None + assert args.output_len is not None + else: + assert args.input_len is None + + if args.backend == "vllm": + if args.hf_max_batch_size is not None: + raise ValueError("HF max batch size is only for HF backend.") + elif args.backend == "hf": + if args.hf_max_batch_size is None: + raise ValueError("HF max batch size is required for HF backend.") + if args.quantization is not None: + raise ValueError("Quantization is only for vLLM backend.") + elif args.backend == "mii": + if args.dtype != "auto": + raise ValueError("dtype must be auto for MII backend.") + if args.n != 1: + raise ValueError("n must be 1 for MII backend.") + if args.use_beam_search: + raise ValueError("Beam search is not supported for MII backend.") + if args.quantization is not None: + raise ValueError("Quantization is only for vLLM backend.") + if args.hf_max_batch_size is not None: + raise ValueError("HF max batch size is only for HF backend.") + if args.tokenizer != args.model: + raise ValueError("Tokenizer must be the same as the model for MII " + "backend.") + main(args) diff --git a/benchmarks/kernels/benchmark_aqlm.py b/benchmarks/kernels/benchmark_aqlm.py new file mode 100644 index 0000000..5939294 --- /dev/null +++ b/benchmarks/kernels/benchmark_aqlm.py @@ -0,0 +1,302 @@ +import argparse +import os +import sys +from typing import Optional + +import torch +import torch.nn.functional as F + +from vllm import _custom_ops as ops +from vllm.model_executor.layers.quantization.aqlm import ( + dequantize_weight, generic_dequantize_gemm, get_int_dtype, + optimized_dequantize_gemm) + +os.environ['CUDA_VISIBLE_DEVICES'] = '0' + + +def torch_mult( + input: torch.Tensor, # [..., in_features] + weights: torch.Tensor, + scales: torch.Tensor, # [num_out_groups, 1, 1, 1] +) -> torch.Tensor: + output = F.linear(input, weights) + return output + + +def dequant_out_scale( + input: torch.Tensor, # [..., in_features] + codes: torch.IntTensor, # [num_out_groups, num_in_groups, num_codebooks] + codebooks: torch. + Tensor, # [num_codebooks, codebook_size, out_group_size, in_group_size] + scales: torch.Tensor, # [num_out_groups, 1, 1, 1] + output_partition_sizes: torch.IntTensor, + bias: Optional[torch.Tensor], +) -> torch.Tensor: + + weights = ops.aqlm_dequant(codes, codebooks, output_partition_sizes) + + if bias is None: + output = F.linear(input, weights, bias) + orig_shape = output.shape + flattened_output = output.view(-1, output.size(-1)) + f_scales = scales.view(-1, scales.shape[0]) + b_scales = f_scales.expand(flattened_output.shape[0], -1) + flattened_output *= b_scales + return flattened_output.view(orig_shape) + else: + b_scales = scales.view(scales.shape[:-3] + (-1, )).expand( + -1, weights.shape[1]) + weights *= b_scales + return F.linear(input, weights, bias) + + +def dequant_weight_scale( + input: torch.Tensor, # [..., in_features] + codes: torch.IntTensor, # [num_out_groups, num_in_groups, num_codebooks] + codebooks: torch. + Tensor, # [num_codebooks, codebook_size, out_group_size, in_group_size] + scales: torch.Tensor, # [num_out_groups, 1, 1, 1] + output_partition_sizes: torch.IntTensor, + bias: Optional[torch.Tensor], +) -> torch.Tensor: + + weights = ops.aqlm_dequant(codes, codebooks, output_partition_sizes) + + b_scales = scales.view(scales.shape[:-3] + (-1, )).expand( + -1, weights.shape[1]) + weights *= b_scales + return F.linear(input, weights, bias) + + +def dequant_no_scale( + input: torch.Tensor, # [..., in_features] + codes: torch.IntTensor, # [num_out_groups, num_in_groups, num_codebooks] + codebooks: torch. + Tensor, # [num_codebooks, codebook_size, out_group_size, in_group_size] + scales: torch.Tensor, # [num_out_groups, 1, 1, 1] + output_partition_sizes: torch.IntTensor, + bias: Optional[torch.Tensor], +) -> torch.Tensor: + + weights = ops.aqlm_dequant(codes, codebooks, output_partition_sizes) + + return F.linear(input, weights, bias) + + +# Compare the optimized 1x16 and 2x8 cuda decompression/dequant kernels against +# the generic pytorch version. +# Just visual comparison. +def dequant_test(k: int, parts: torch.tensor, nbooks: int, bits: int) -> None: + + n = parts.sum().item() + + device = torch.device('cuda:0') + + code_range = (1 << bits) // 2 + ingroups = 8 + + codes = torch.randint(-code_range, + code_range, + size=(n, k // ingroups, nbooks), + dtype=get_int_dtype(bits), + device=device) + + codebooks = torch.randn(size=(parts.shape[0] * nbooks, 1 << bits, 1, 8), + dtype=torch.float16, + device=device) + + count = 0 + for index in range(16): + for i in range(8): + for book in range(nbooks): + codebooks[book, index, 0, i] = count * (10**book) + count += 1 + + print("codes shape", codes.shape) + + for i in range(16): + for book in range(nbooks): + codes[0, i, book] = i + codes[0, -i, book] = i + + weights = dequantize_weight(codes, codebooks, None) + weights2 = ops.aqlm_dequant(codes, codebooks, parts) + + print("weights shape:", weights.shape) + print("weights2 shape:", weights2.shape) + + print("weights are:", weights) + print("weights2 are:", weights2) + + print("first 128 weights are", weights[0, 0:128].to(torch.int32)) + print("first 128 weights2 are:", weights2[0, 0:128].to(torch.int32)) + + print("last 128 weights are", weights[0, -128:]) + print("last 128 weights2 are:", weights2[0, -128:]) + + +def main(): + + parser = argparse.ArgumentParser(description="Benchmark aqlm performance.") + + # Add arguments + parser.add_argument("--nbooks", + type=int, + default=1, + help="Number of codebooks (default: 1)") + parser.add_argument("--bits", + type=int, + default=16, + help="Number of bits per code element (default: 16)") + parser.add_argument( + "--test", + type=bool, + default=False, + help="Run the decompression/dequant tester rather than benchmarking " + "(default: False)") + + # Parse the arguments + args = parser.parse_args() + + # Extract values + nbooks = args.nbooks + bits = args.bits + + if args.test: + dequant_test(4096, torch.tensor((4096, )), nbooks, bits) + return + + # Otherwise, benchmark. + methods = [ + ops.aqlm_gemm, + dequant_out_scale, + generic_dequantize_gemm, + optimized_dequantize_gemm, + dequant_weight_scale, + torch_mult, + dequant_no_scale, + ] + + filename = f"./aqlm_benchmark_{nbooks}x{bits}.csv" + print(f"writing benchmarks to file {filename}") + with open(filename, "w") as f: + sys.stdout = f + + print('m | k | n | n parts', end='') + for method in methods: + print(f" | {method.__name__.replace('_', ' ')} (µs)", end='') + print('') + + # These are reasonable prefill sizes. + ksandpartions = ((4096, (4096, 4096, 4096)), (4096, (4096, )), + (4096, (11008, 11008)), (11008, (4096, ))) + + # reasonable ranges for m. + for m in [ + 1, 2, 4, 8, 10, 12, 14, 16, 24, 32, 48, 52, 56, 64, 96, 112, + 128, 256, 512, 1024, 1536, 2048, 3072, 4096 + ]: + print(f'{m}', file=sys.__stdout__) + for ksp in ksandpartions: + run_grid(m, ksp[0], torch.tensor(ksp[1]), nbooks, bits, + methods) + + sys.stdout = sys.__stdout__ + + +def run_grid(m: int, k: int, parts: torch.tensor, nbooks: int, bits: int, + methods): + + # I didn't see visible improvements from increasing these, but feel free :) + num_warmup_trials = 1 + num_trials = 1 + + num_calls = 100 + + # warmup. + for method in methods: + for _ in range(num_warmup_trials): + run_timing( + num_calls=num_calls, + m=m, + k=k, + parts=parts, + nbooks=nbooks, + bits=bits, + method=method, + ) + + n = parts.sum().item() + print(f'{m} | {k} | {n} | {parts.tolist()}', end='') + + for method in methods: + best_time_us = 1e20 + for _ in range(num_trials): + kernel_dur_ms = run_timing( + num_calls=num_calls, + m=m, + k=k, + parts=parts, + nbooks=nbooks, + bits=bits, + method=method, + ) + + kernel_dur_us = 1000 * kernel_dur_ms + + if kernel_dur_us < best_time_us: + best_time_us = kernel_dur_us + + print(f' | {kernel_dur_us:.0f}', end='') + + print('') + + +def run_timing(num_calls: int, m: int, k: int, parts: torch.tensor, + nbooks: int, bits: int, method) -> float: + + n = parts.sum().item() + + device = torch.device('cuda:0') + + input = torch.randn((1, m, k), dtype=torch.float16, device=device) + + code_range = (1 << bits) // 2 + ingroups = 8 + + codes = torch.randint(-code_range, + code_range, + size=(n, k // ingroups, nbooks), + dtype=get_int_dtype(bits), + device=device) + + codebooks = torch.randn(size=(parts.shape[0] * nbooks, 1 << bits, 1, 8), + dtype=torch.float16, + device=device) + + scales = torch.randn(size=(n, 1, 1, 1), dtype=torch.float16, device=device) + + # for comparison to just a pytorch mult. + weights = torch.randn((n, k), dtype=torch.float16, device=device) + + start_event = torch.cuda.Event(enable_timing=True) + end_event = torch.cuda.Event(enable_timing=True) + + start_event.record() + + if method is torch_mult: + for i in range(num_calls): + torch_mult(input, weights, scales) + else: + for i in range(num_calls): + method(input, codes, codebooks, scales, parts, None) + + end_event.record() + end_event.synchronize() + + dur_ms = start_event.elapsed_time(end_event) / num_calls + return dur_ms + + +if __name__ == "__main__": + sys.exit(main()) diff --git a/benchmarks/kernels/benchmark_mixtral_moe.py b/benchmarks/kernels/benchmark_mixtral_moe.py new file mode 100644 index 0000000..5280b21 --- /dev/null +++ b/benchmarks/kernels/benchmark_mixtral_moe.py @@ -0,0 +1,215 @@ +import argparse +import json +import os +import sys + +import torch +import torch.nn.functional as F +import triton +from tqdm import tqdm + +from vllm.model_executor.layers.fused_moe import (fused_moe, + get_config_file_name) + +os.environ['CUDA_VISIBLE_DEVICES'] = '0' + + +def main(dtype: str): + method = fused_moe + for bs in [ + 1, 2, 4, 8, 16, 24, 32, 48, 64, 96, 128, 256, 512, 1024, 1536, + 2048, 3072, 4096 + ]: + run_grid(bs, method=method, dtype=dtype) + + +def run_grid(bs, method, dtype: str): + d_model = 4096 + num_total_experts = 8 + top_k = 2 + tp_size = 2 + model_intermediate_size = 14336 + num_layers = 32 + num_calls = 100 + + num_warmup_trials = 1 + num_trials = 1 + + configs = [] + + for block_size_n in [32, 64, 128, 256]: + for block_size_m in [16, 32, 64, 128, 256]: + for block_size_k in [64, 128, 256]: + for group_size_m in [1, 16, 32, 64]: + for num_warps in [4, 8]: + for num_stages in [2, 3, 4, 5]: + configs.append({ + "BLOCK_SIZE_M": block_size_m, + "BLOCK_SIZE_N": block_size_n, + "BLOCK_SIZE_K": block_size_k, + "GROUP_SIZE_M": group_size_m, + "num_warps": num_warps, + "num_stages": num_stages, + }) + + best_config = None + best_time_us = 1e20 + + print(f'{tp_size=} {bs=}') + + for config in tqdm(configs): + # warmup + try: + for _ in range(num_warmup_trials): + run_timing( + num_calls=num_calls, + bs=bs, + d_model=d_model, + num_total_experts=num_total_experts, + top_k=top_k, + tp_size=tp_size, + model_intermediate_size=model_intermediate_size, + method=method, + config=config, + dtype=dtype, + ) + except triton.runtime.autotuner.OutOfResources: + continue + + # trial + for _ in range(num_trials): + kernel_dur_ms = run_timing( + num_calls=num_calls, + bs=bs, + d_model=d_model, + num_total_experts=num_total_experts, + top_k=top_k, + tp_size=tp_size, + model_intermediate_size=model_intermediate_size, + method=method, + config=config, + dtype=dtype, + ) + + kernel_dur_us = 1000 * kernel_dur_ms + model_dur_ms = kernel_dur_ms * num_layers + + if kernel_dur_us < best_time_us: + best_config = config + best_time_us = kernel_dur_us + + tqdm.write( + f'{kernel_dur_us=:.1f} {model_dur_ms=:.1f}' + f' {bs=} {tp_size=} {top_k=} {num_total_experts=} ' + f'{d_model=} {model_intermediate_size=} {num_layers=}') + + print("best_time_us", best_time_us) + print("best_config", best_config) + + # holds Dict[str, Dict[str, int]] + filename = get_config_file_name(num_total_experts, + model_intermediate_size // tp_size, + "float8" if dtype == "float8" else None) + print(f"writing config to file {filename}") + existing_content = {} + if os.path.exists(filename): + with open(filename, "r") as f: + existing_content = json.load(f) + existing_content[str(bs)] = best_config + with open(filename, "w") as f: + json.dump(existing_content, f, indent=4) + f.write("\n") + + +def run_timing(num_calls: int, bs: int, d_model: int, num_total_experts: int, + top_k: int, tp_size: int, model_intermediate_size: int, method, + config, dtype: str) -> float: + shard_intermediate_size = model_intermediate_size // tp_size + + hidden_states = torch.rand( + (bs, d_model), + device="cuda:0", + dtype=torch.float16, + ) + + w1 = torch.rand( + (num_total_experts, 2 * shard_intermediate_size, d_model), + device=hidden_states.device, + dtype=hidden_states.dtype, + ) + + w2 = torch.rand( + (num_total_experts, d_model, shard_intermediate_size), + device=hidden_states.device, + dtype=hidden_states.dtype, + ) + + w1_scale = None + w2_scale = None + a1_scale = None + a2_scale = None + + if dtype == "float8": + w1 = w1.to(torch.float8_e4m3fn) + w2 = w2.to(torch.float8_e4m3fn) + w1_scale = torch.ones(num_total_experts, + device=hidden_states.device, + dtype=torch.float32) + w2_scale = torch.ones(num_total_experts, + device=hidden_states.device, + dtype=torch.float32) + a1_scale = torch.ones(1, + device=hidden_states.device, + dtype=torch.float32) + a2_scale = torch.ones(1, + device=hidden_states.device, + dtype=torch.float32) + + gating_output = F.softmax(torch.rand( + (num_calls, bs, num_total_experts), + device=hidden_states.device, + dtype=torch.float32, + ), + dim=-1) + + start_event = torch.cuda.Event(enable_timing=True) + end_event = torch.cuda.Event(enable_timing=True) + + start_event.record() + for i in range(num_calls): + hidden_states = method( + hidden_states=hidden_states, + w1=w1, + w2=w2, + w1_scale=w1_scale, + w2_scale=w2_scale, + a1_scale=a1_scale, + a2_scale=a2_scale, + gating_output=gating_output[i], + topk=2, + renormalize=True, + inplace=True, + override_config=config, + use_fp8=dtype == "float8", + ) + end_event.record() + end_event.synchronize() + + dur_ms = start_event.elapsed_time(end_event) / num_calls + return dur_ms + + +if __name__ == "__main__": + parser = argparse.ArgumentParser( + prog='benchmark_mixtral_moe', + description='Benchmark and tune the fused_moe kernel', + ) + parser.add_argument( + '--dtype', + type=str, + default='auto', + choices=['float8', 'float16'], + help='Data type used for fused_moe kernel computations', + ) + args = parser.parse_args() + sys.exit(main(args.dtype)) diff --git a/benchmarks/kernels/benchmark_paged_attention.py b/benchmarks/kernels/benchmark_paged_attention.py new file mode 100644 index 0000000..ca7967c --- /dev/null +++ b/benchmarks/kernels/benchmark_paged_attention.py @@ -0,0 +1,211 @@ +import argparse +import random +import time +from typing import Optional + +import torch + +from vllm import _custom_ops as ops +from vllm.utils import STR_DTYPE_TO_TORCH_DTYPE, create_kv_caches_with_random + +NUM_BLOCKS = 1024 +PARTITION_SIZE = 512 + + +@torch.inference_mode() +def main( + version: str, + num_seqs: int, + seq_len: int, + num_query_heads: int, + num_kv_heads: int, + head_size: int, + use_alibi: bool, + block_size: int, + dtype: torch.dtype, + seed: int, + do_profile: bool, + device: str = "cuda", + kv_cache_dtype: Optional[str] = None, +) -> None: + random.seed(seed) + torch.random.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed(seed) + + scale = float(1.0 / (head_size**0.5)) + query = torch.empty(num_seqs, + num_query_heads, + head_size, + dtype=dtype, + device=device) + query.uniform_(-scale, scale) + + assert num_query_heads % num_kv_heads == 0 + alibi_slopes = None + if use_alibi: + alibi_slopes = torch.randn(num_query_heads, + dtype=torch.float, + device=device) + + seq_lens = [seq_len for _ in range(num_seqs)] + max_seq_len = max(seq_lens) + seq_lens = torch.tensor(seq_lens, dtype=torch.int, device=device) + + # Create the block tables. + max_num_blocks_per_seq = (max_seq_len + block_size - 1) // block_size + block_tables = [] + for _ in range(num_seqs): + block_table = [ + random.randint(0, NUM_BLOCKS - 1) + for _ in range(max_num_blocks_per_seq) + ] + block_tables.append(block_table) + block_tables = torch.tensor(block_tables, dtype=torch.int, device=device) + + # Create the KV cache. + key_caches, value_caches = create_kv_caches_with_random(NUM_BLOCKS, + block_size, + 1, + num_kv_heads, + head_size, + kv_cache_dtype, + dtype, + device=device) + key_cache, value_cache = key_caches[0], value_caches[0] + + # Prepare for the paged attention kernel. + output = torch.empty_like(query) + if version == "v2": + num_partitions = ((max_seq_len + PARTITION_SIZE - 1) // PARTITION_SIZE) + tmp_output = torch.empty( + size=(num_seqs, num_query_heads, num_partitions, head_size), + dtype=output.dtype, + device=output.device, + ) + exp_sums = torch.empty( + size=(num_seqs, num_query_heads, num_partitions), + dtype=torch.float32, + device=output.device, + ) + max_logits = torch.empty_like(exp_sums) + + def run_cuda_benchmark(num_iters: int, profile: bool = False) -> float: + torch.cuda.synchronize() + if profile: + torch.cuda.cudart().cudaProfilerStart() + start_time = time.perf_counter() + + # Using default kv_scale + kv_scale = 1.0 + + for _ in range(num_iters): + if version == "v1": + ops.paged_attention_v1( + output, + query, + key_cache, + value_cache, + num_kv_heads, + scale, + block_tables, + seq_lens, + block_size, + max_seq_len, + alibi_slopes, + kv_cache_dtype, + kv_scale, + ) + elif version == "v2": + ops.paged_attention_v2( + output, + exp_sums, + max_logits, + tmp_output, + query, + key_cache, + value_cache, + num_kv_heads, + scale, + block_tables, + seq_lens, + block_size, + max_seq_len, + alibi_slopes, + kv_cache_dtype, + kv_scale, + ) + else: + raise ValueError(f"Invalid version: {version}") + torch.cuda.synchronize() + + end_time = time.perf_counter() + if profile: + torch.cuda.cudart().cudaProfilerStart() + return (end_time - start_time) / num_iters + + # Warmup. + print("Warming up...") + run_benchmark = run_cuda_benchmark + run_benchmark(num_iters=3, profile=False) + + # Benchmark. + if do_profile: + latency = run_benchmark(num_iters=1, profile=True) + else: + latency = run_benchmark(num_iters=100, profile=False) + print(f"Kernel running time: {latency * 1000000:.3f} us") + + +if __name__ == '__main__': + parser = argparse.ArgumentParser( + description="Benchmark the paged attention kernel.") + parser.add_argument("--version", + type=str, + choices=["v1", "v2"], + default="v2") + parser.add_argument("--batch-size", type=int, default=8) + parser.add_argument("--seq_len", type=int, default=4096) + parser.add_argument("--num-query-heads", type=int, default=64) + parser.add_argument("--num-kv-heads", type=int, default=8) + parser.add_argument("--head-size", + type=int, + choices=[64, 80, 96, 112, 128, 256], + default=128) + parser.add_argument("--block-size", type=int, choices=[16, 32], default=16) + parser.add_argument("--use-alibi", action="store_true") + parser.add_argument("--dtype", + type=str, + choices=["half", "bfloat16", "float"], + default="half") + parser.add_argument("--seed", type=int, default=0) + parser.add_argument("--profile", action="store_true") + parser.add_argument( + "--kv-cache-dtype", + type=str, + choices=["auto", "fp8"], + default="auto", + help= + 'Data type for kv cache storage. If "auto", will use model data type. ' + 'FP8_E5M2 (without scaling) is only supported on cuda version greater ' + 'than 11.8. On ROCm (AMD GPU), FP8_E4M3 is instead supported for ' + 'common inference criteria.') + args = parser.parse_args() + print(args) + + if args.num_query_heads % args.num_kv_heads != 0: + raise ValueError("num_query_heads must be divisible by num_kv_heads") + main( + version=args.version, + num_seqs=args.batch_size, + seq_len=args.seq_len, + num_query_heads=args.num_query_heads, + num_kv_heads=args.num_kv_heads, + head_size=args.head_size, + block_size=args.block_size, + use_alibi=args.use_alibi, + dtype=STR_DTYPE_TO_TORCH_DTYPE[args.dtype], + seed=args.seed, + do_profile=args.profile, + kv_cache_dtype=args.kv_cache_dtype, + ) diff --git a/benchmarks/kernels/benchmark_rope.py b/benchmarks/kernels/benchmark_rope.py new file mode 100644 index 0000000..9188e81 --- /dev/null +++ b/benchmarks/kernels/benchmark_rope.py @@ -0,0 +1,121 @@ +import argparse +from itertools import accumulate +from typing import Optional + +import nvtx +import torch + +from vllm.model_executor.layers.rotary_embedding import get_rope + + +def benchmark_rope_kernels_multi_lora( + is_neox_style: bool, + batch_size: int, + seq_len: int, + num_heads: int, + head_size: int, + rotary_dim: Optional[int], + dtype: torch.dtype, + seed: int, + device: str, + max_position: int = 8192, + base: int = 10000, +) -> None: + torch.random.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed(seed) + torch.set_default_device(device) + if rotary_dim is None: + rotary_dim = head_size + # silulating serving 4 LoRAs + scaling_factors = [1, 2, 4, 8] + # batched RoPE can take multiple scaling factors + batched_rope = get_rope(head_size, rotary_dim, max_position, base, + is_neox_style, { + "type": "linear", + "factor": tuple(scaling_factors) + }) + # non-batched RoPE takes only one scaling factor, we create multiple + # instances to simulate the same behavior + non_batched_ropes = [] + for scaling_factor in scaling_factors: + non_batched_ropes.append( + get_rope(head_size, rotary_dim, max_position, base, is_neox_style, + { + "type": "linear", + "factor": (scaling_factor, ) + })) + + positions = torch.randint(0, max_position, (batch_size, seq_len)) + query = torch.randn(batch_size, + seq_len, + num_heads * head_size, + dtype=dtype) + key = torch.randn_like(query) + + # create query offsets for batched RoPE, we concat multiple kv cache + # together and each query needs to find the right kv cache of its type + offset_map = torch.tensor( + list( + accumulate([0] + [ + max_position * scaling_factor * 2 + for scaling_factor in scaling_factors[:-1] + ]))) + query_types = torch.randint(0, + len(scaling_factors), (batch_size, seq_len), + device=device) + # map query types to offsets + query_offsets = offset_map[query_types] + # the kernel takes flattened offsets + flatten_offsets = query_offsets.flatten() + + # batched queries of the same type together for non-batched RoPE + queries = [query[query_types == i] for i in range(len(scaling_factors))] + keys = [key[query_types == i] for i in range(len(scaling_factors))] + packed_qkr = zip(queries, keys, non_batched_ropes) + # synchronize before start timing + torch.cuda.synchronize() + with nvtx.annotate("non-batched", color="yellow"): + for q, k, r in packed_qkr: + r.forward(positions, q, k) + torch.cuda.synchronize() + with nvtx.annotate("batched", color="green"): + batched_rope.forward(positions, query, key, flatten_offsets) + torch.cuda.synchronize() + + +if __name__ == '__main__': + parser = argparse.ArgumentParser( + description="Benchmark the rotary embedding kernels.") + parser.add_argument("--is-neox-style", type=bool, default=True) + parser.add_argument("--batch-size", type=int, default=16) + parser.add_argument("--seq-len", type=int, default=512) + parser.add_argument("--num-heads", type=int, default=8) + parser.add_argument("--head-size", + type=int, + choices=[64, 80, 96, 112, 128, 256], + default=128) + parser.add_argument("--rotary-dim", type=int, choices=[16, 32], default=32) + parser.add_argument("--dtype", + type=str, + choices=["bfloat16", "float"], + default="float") + parser.add_argument("--seed", type=int, default=0) + parser.add_argument("--device", + type=str, + choices=["cuda:0", "cuda:1"], + default="cuda:0") + args = parser.parse_args() + print(args) + + benchmark_rope_kernels_multi_lora( + is_neox_style=args.is_neox_style, + batch_size=args.batch_size, + seq_len=args.seq_len, + num_heads=args.num_heads, + head_size=args.head_size, + rotary_dim=args.rotary_dim, + dtype=getattr(torch, args.dtype), + seed=args.seed, + device=args.device, + ) diff --git a/benchmarks/launch_tgi_server.sh b/benchmarks/launch_tgi_server.sh new file mode 100755 index 0000000..64d3c4f --- /dev/null +++ b/benchmarks/launch_tgi_server.sh @@ -0,0 +1,16 @@ +#!/bin/bash + +PORT=8000 +MODEL=$1 +TOKENS=$2 + +docker run --gpus all --shm-size 1g -p $PORT:80 \ + -v $PWD/data:/data \ + ghcr.io/huggingface/text-generation-inference:1.4.0 \ + --model-id $MODEL \ + --sharded false \ + --max-input-length 1024 \ + --max-total-tokens 2048 \ + --max-best-of 5 \ + --max-concurrent-requests 5000 \ + --max-batch-total-tokens $TOKENS diff --git a/benchmarks/sonnet.txt b/benchmarks/sonnet.txt new file mode 100644 index 0000000..34c444e --- /dev/null +++ b/benchmarks/sonnet.txt @@ -0,0 +1,518 @@ +FROM fairest creatures we desire increase, +That thereby beauty's rose might never die, +But as the riper should by time decease, +His tender heir might bear his memory: +But thou, contracted to thine own bright eyes, +Feed'st thy light'st flame with self-substantial fuel, +Making a famine where abundance lies, +Thyself thy foe, to thy sweet self too cruel. +Thou that art now the world's fresh ornament +And only herald to the gaudy spring, +Within thine own bud buriest thy content +And, tender churl, makest waste in niggarding. +Pity the world, or else this glutton be, +To eat the world's due, by the grave and thee. +When forty winters shall beseige thy brow, +And dig deep trenches in thy beauty's field, +Thy youth's proud livery, so gazed on now, +Will be a tatter'd weed, of small worth held: +Then being ask'd where all thy beauty lies, +Where all the treasure of thy lusty days, +To say, within thine own deep-sunken eyes, +Were an all-eating shame and thriftless praise. +How much more praise deserved thy beauty's use, +If thou couldst answer 'This fair child of mine +Shall sum my count and make my old excuse,' +Proving his beauty by succession thine! +This were to be new made when thou art old, +And see thy blood warm when thou feel'st it cold. +Look in thy glass, and tell the face thou viewest +Now is the time that face should form another; +Whose fresh repair if now thou not renewest, +Thou dost beguile the world, unbless some mother. +For where is she so fair whose unear'd womb +Disdains the tillage of thy husbandry? +Or who is he so fond will be the tomb +Of his self-love, to stop posterity? +Thou art thy mother's glass, and she in thee +Calls back the lovely April of her prime: +So thou through windows of thine age shall see +Despite of wrinkles this thy golden time. +But if thou live, remember'd not to be, +Die single, and thine image dies with thee. +Unthrifty loveliness, why dost thou spend +Upon thyself thy beauty's legacy? +Nature's bequest gives nothing but doth lend, +And being frank she lends to those are free. +Then, beauteous niggard, why dost thou abuse +The bounteous largess given thee to give? +Profitless usurer, why dost thou use +So great a sum of sums, yet canst not live? +For having traffic with thyself alone, +Thou of thyself thy sweet self dost deceive. +Then how, when nature calls thee to be gone, +What acceptable audit canst thou leave? +Thy unused beauty must be tomb'd with thee, +Which, used, lives th' executor to be. +Those hours, that with gentle work did frame +The lovely gaze where every eye doth dwell, +Will play the tyrants to the very same +And that unfair which fairly doth excel: +For never-resting time leads summer on +To hideous winter and confounds him there; +Sap cheque'd with frost and lusty leaves quite gone, +Beauty o'ersnow'd and bareness every where: +Then, were not summer's distillation left, +A liquid prisoner pent in walls of glass, +Beauty's effect with beauty were bereft, +Nor it nor no remembrance what it was: +But flowers distill'd though they with winter meet, +Leese but their show; their substance still lives sweet. +Then let not winter's ragged hand deface +In thee thy summer, ere thou be distill'd: +Make sweet some vial; treasure thou some place +With beauty's treasure, ere it be self-kill'd. +That use is not forbidden usury, +Which happies those that pay the willing loan; +That's for thyself to breed another thee, +Or ten times happier, be it ten for one; +Ten times thyself were happier than thou art, +If ten of thine ten times refigured thee: +Then what could death do, if thou shouldst depart, +Leaving thee living in posterity? +Be not self-will'd, for thou art much too fair +To be death's conquest and make worms thine heir. +Lo! in the orient when the gracious light +Lifts up his burning head, each under eye +Doth homage to his new-appearing sight, +Serving with looks his sacred majesty; +And having climb'd the steep-up heavenly hill, +Resembling strong youth in his middle age, +yet mortal looks adore his beauty still, +Attending on his golden pilgrimage; +But when from highmost pitch, with weary car, +Like feeble age, he reeleth from the day, +The eyes, 'fore duteous, now converted are +From his low tract and look another way: +So thou, thyself out-going in thy noon, +Unlook'd on diest, unless thou get a son. +Music to hear, why hear'st thou music sadly? +Sweets with sweets war not, joy delights in joy. +Why lovest thou that which thou receivest not gladly, +Or else receivest with pleasure thine annoy? +If the true concord of well-tuned sounds, +By unions married, do offend thine ear, +They do but sweetly chide thee, who confounds +In singleness the parts that thou shouldst bear. +Mark how one string, sweet husband to another, +Strikes each in each by mutual ordering, +Resembling sire and child and happy mother +Who all in one, one pleasing note do sing: +Whose speechless song, being many, seeming one, +Sings this to thee: 'thou single wilt prove none.' +Is it for fear to wet a widow's eye +That thou consumest thyself in single life? +Ah! if thou issueless shalt hap to die. +The world will wail thee, like a makeless wife; +The world will be thy widow and still weep +That thou no form of thee hast left behind, +When every private widow well may keep +By children's eyes her husband's shape in mind. +Look, what an unthrift in the world doth spend +Shifts but his place, for still the world enjoys it; +But beauty's waste hath in the world an end, +And kept unused, the user so destroys it. +No love toward others in that bosom sits +That on himself such murderous shame commits. +For shame! deny that thou bear'st love to any, +Who for thyself art so unprovident. +Grant, if thou wilt, thou art beloved of many, +But that thou none lovest is most evident; +For thou art so possess'd with murderous hate +That 'gainst thyself thou stick'st not to conspire. +Seeking that beauteous roof to ruinate +Which to repair should be thy chief desire. +O, change thy thought, that I may change my mind! +Shall hate be fairer lodged than gentle love? +Be, as thy presence is, gracious and kind, +Or to thyself at least kind-hearted prove: +Make thee another self, for love of me, +That beauty still may live in thine or thee. +As fast as thou shalt wane, so fast thou growest +In one of thine, from that which thou departest; +And that fresh blood which youngly thou bestowest +Thou mayst call thine when thou from youth convertest. +Herein lives wisdom, beauty and increase: +Without this, folly, age and cold decay: +If all were minded so, the times should cease +And threescore year would make the world away. +Let those whom Nature hath not made for store, +Harsh featureless and rude, barrenly perish: +Look, whom she best endow'd she gave the more; +Which bounteous gift thou shouldst in bounty cherish: +She carved thee for her seal, and meant thereby +Thou shouldst print more, not let that copy die. +When I do count the clock that tells the time, +And see the brave day sunk in hideous night; +When I behold the violet past prime, +And sable curls all silver'd o'er with white; +When lofty trees I see barren of leaves +Which erst from heat did canopy the herd, +And summer's green all girded up in sheaves +Borne on the bier with white and bristly beard, +Then of thy beauty do I question make, +That thou among the wastes of time must go, +Since sweets and beauties do themselves forsake +And die as fast as they see others grow; +And nothing 'gainst Time's scythe can make defence +Save breed, to brave him when he takes thee hence. +O, that you were yourself! but, love, you are +No longer yours than you yourself here live: +Against this coming end you should prepare, +And your sweet semblance to some other give. +So should that beauty which you hold in lease +Find no determination: then you were +Yourself again after yourself's decease, +When your sweet issue your sweet form should bear. +Who lets so fair a house fall to decay, +Which husbandry in honour might uphold +Against the stormy gusts of winter's day +And barren rage of death's eternal cold? +O, none but unthrifts! Dear my love, you know +You had a father: let your son say so. +Not from the stars do I my judgment pluck; +And yet methinks I have astronomy, +But not to tell of good or evil luck, +Of plagues, of dearths, or seasons' quality; +Nor can I fortune to brief minutes tell, +Pointing to each his thunder, rain and wind, +Or say with princes if it shall go well, +By oft predict that I in heaven find: +But from thine eyes my knowledge I derive, +And, constant stars, in them I read such art +As truth and beauty shall together thrive, +If from thyself to store thou wouldst convert; +Or else of thee this I prognosticate: +Thy end is truth's and beauty's doom and date. +When I consider every thing that grows +Holds in perfection but a little moment, +That this huge stage presenteth nought but shows +Whereon the stars in secret influence comment; +When I perceive that men as plants increase, +Cheered and cheque'd even by the self-same sky, +Vaunt in their youthful sap, at height decrease, +And wear their brave state out of memory; +Then the conceit of this inconstant stay +Sets you most rich in youth before my sight, +Where wasteful Time debateth with Decay, +To change your day of youth to sullied night; +And all in war with Time for love of you, +As he takes from you, I engraft you new. +But wherefore do not you a mightier way +Make war upon this bloody tyrant, Time? +And fortify yourself in your decay +With means more blessed than my barren rhyme? +Now stand you on the top of happy hours, +And many maiden gardens yet unset +With virtuous wish would bear your living flowers, +Much liker than your painted counterfeit: +So should the lines of life that life repair, +Which this, Time's pencil, or my pupil pen, +Neither in inward worth nor outward fair, +Can make you live yourself in eyes of men. +To give away yourself keeps yourself still, +And you must live, drawn by your own sweet skill. +Who will believe my verse in time to come, +If it were fill'd with your most high deserts? +Though yet, heaven knows, it is but as a tomb +Which hides your life and shows not half your parts. +If I could write the beauty of your eyes +And in fresh numbers number all your graces, +The age to come would say 'This poet lies: +Such heavenly touches ne'er touch'd earthly faces.' +So should my papers yellow'd with their age +Be scorn'd like old men of less truth than tongue, +And your true rights be term'd a poet's rage +And stretched metre of an antique song: +But were some child of yours alive that time, +You should live twice; in it and in my rhyme. +Shall I compare thee to a summer's day? +Thou art more lovely and more temperate: +Rough winds do shake the darling buds of May, +And summer's lease hath all too short a date: +Sometime too hot the eye of heaven shines, +And often is his gold complexion dimm'd; +And every fair from fair sometime declines, +By chance or nature's changing course untrimm'd; +But thy eternal summer shall not fade +Nor lose possession of that fair thou owest; +Nor shall Death brag thou wander'st in his shade, +When in eternal lines to time thou growest: +So long as men can breathe or eyes can see, +So long lives this and this gives life to thee. +Devouring Time, blunt thou the lion's paws, +And make the earth devour her own sweet brood; +Pluck the keen teeth from the fierce tiger's jaws, +And burn the long-lived phoenix in her blood; +Make glad and sorry seasons as thou fleets, +And do whate'er thou wilt, swift-footed Time, +To the wide world and all her fading sweets; +But I forbid thee one most heinous crime: +O, carve not with thy hours my love's fair brow, +Nor draw no lines there with thine antique pen; +Him in thy course untainted do allow +For beauty's pattern to succeeding men. +Yet, do thy worst, old Time: despite thy wrong, +My love shall in my verse ever live young. +A woman's face with Nature's own hand painted +Hast thou, the master-mistress of my passion; +A woman's gentle heart, but not acquainted +With shifting change, as is false women's fashion; +An eye more bright than theirs, less false in rolling, +Gilding the object whereupon it gazeth; +A man in hue, all 'hues' in his controlling, +Much steals men's eyes and women's souls amazeth. +And for a woman wert thou first created; +Till Nature, as she wrought thee, fell a-doting, +And by addition me of thee defeated, +By adding one thing to my purpose nothing. +But since she prick'd thee out for women's pleasure, +Mine be thy love and thy love's use their treasure. +So is it not with me as with that Muse +Stirr'd by a painted beauty to his verse, +Who heaven itself for ornament doth use +And every fair with his fair doth rehearse +Making a couplement of proud compare, +With sun and moon, with earth and sea's rich gems, +With April's first-born flowers, and all things rare +That heaven's air in this huge rondure hems. +O' let me, true in love, but truly write, +And then believe me, my love is as fair +As any mother's child, though not so bright +As those gold candles fix'd in heaven's air: +Let them say more than like of hearsay well; +I will not praise that purpose not to sell. +My glass shall not persuade me I am old, +So long as youth and thou are of one date; +But when in thee time's furrows I behold, +Then look I death my days should expiate. +For all that beauty that doth cover thee +Is but the seemly raiment of my heart, +Which in thy breast doth live, as thine in me: +How can I then be elder than thou art? +O, therefore, love, be of thyself so wary +As I, not for myself, but for thee will; +Bearing thy heart, which I will keep so chary +As tender nurse her babe from faring ill. +Presume not on thy heart when mine is slain; +Thou gavest me thine, not to give back again. +As an unperfect actor on the stage +Who with his fear is put besides his part, +Or some fierce thing replete with too much rage, +Whose strength's abundance weakens his own heart. +So I, for fear of trust, forget to say +The perfect ceremony of love's rite, +And in mine own love's strength seem to decay, +O'ercharged with burden of mine own love's might. +O, let my books be then the eloquence +And dumb presagers of my speaking breast, +Who plead for love and look for recompense +More than that tongue that more hath more express'd. +O, learn to read what silent love hath writ: +To hear with eyes belongs to love's fine wit. +Mine eye hath play'd the painter and hath stell'd +Thy beauty's form in table of my heart; +My body is the frame wherein 'tis held, +And perspective it is the painter's art. +For through the painter must you see his skill, +To find where your true image pictured lies; +Which in my bosom's shop is hanging still, +That hath his windows glazed with thine eyes. +Now see what good turns eyes for eyes have done: +Mine eyes have drawn thy shape, and thine for me +Are windows to my breast, where-through the sun +Delights to peep, to gaze therein on thee; +Yet eyes this cunning want to grace their art; +They draw but what they see, know not the heart. +Let those who are in favour with their stars +Of public honour and proud titles boast, +Whilst I, whom fortune of such triumph bars, +Unlook'd for joy in that I honour most. +Great princes' favourites their fair leaves spread +But as the marigold at the sun's eye, +And in themselves their pride lies buried, +For at a frown they in their glory die. +The painful warrior famoused for fight, +After a thousand victories once foil'd, +Is from the book of honour razed quite, +And all the rest forgot for which he toil'd: +Then happy I, that love and am beloved +Where I may not remove nor be removed. +Lord of my love, to whom in vassalage +Thy merit hath my duty strongly knit, +To thee I send this written embassage, +To witness duty, not to show my wit: +Duty so great, which wit so poor as mine +May make seem bare, in wanting words to show it, +But that I hope some good conceit of thine +In thy soul's thought, all naked, will bestow it; +Till whatsoever star that guides my moving +Points on me graciously with fair aspect +And puts apparel on my tatter'd loving, +To show me worthy of thy sweet respect: +Then may I dare to boast how I do love thee; +Till then not show my head where thou mayst prove me. +Weary with toil, I haste me to my bed, +The dear repose for limbs with travel tired; +But then begins a journey in my head, +To work my mind, when body's work's expired: +For then my thoughts, from far where I abide, +Intend a zealous pilgrimage to thee, +And keep my drooping eyelids open wide, +Looking on darkness which the blind do see +Save that my soul's imaginary sight +Presents thy shadow to my sightless view, +Which, like a jewel hung in ghastly night, +Makes black night beauteous and her old face new. +Lo! thus, by day my limbs, by night my mind, +For thee and for myself no quiet find. +How can I then return in happy plight, +That am debarr'd the benefit of rest? +When day's oppression is not eased by night, +But day by night, and night by day, oppress'd? +And each, though enemies to either's reign, +Do in consent shake hands to torture me; +The one by toil, the other to complain +How far I toil, still farther off from thee. +I tell the day, to please them thou art bright +And dost him grace when clouds do blot the heaven: +So flatter I the swart-complexion'd night, +When sparkling stars twire not thou gild'st the even. +But day doth daily draw my sorrows longer +And night doth nightly make grief's strength seem stronger. +When, in disgrace with fortune and men's eyes, +I all alone beweep my outcast state +And trouble deal heaven with my bootless cries +And look upon myself and curse my fate, +Wishing me like to one more rich in hope, +Featured like him, like him with friends possess'd, +Desiring this man's art and that man's scope, +With what I most enjoy contented least; +Yet in these thoughts myself almost despising, +Haply I think on thee, and then my state, +Like to the lark at break of day arising +From sullen earth, sings hymns at heaven's gate; +For thy sweet love remember'd such wealth brings +That then I scorn to change my state with kings. +When to the sessions of sweet silent thought +I summon up remembrance of things past, +I sigh the lack of many a thing I sought, +And with old woes new wail my dear time's waste: +Then can I drown an eye, unused to flow, +For precious friends hid in death's dateless night, +And weep afresh love's long since cancell'd woe, +And moan the expense of many a vanish'd sight: +Then can I grieve at grievances foregone, +And heavily from woe to woe tell o'er +The sad account of fore-bemoaned moan, +Which I new pay as if not paid before. +But if the while I think on thee, dear friend, +All losses are restored and sorrows end. +Thy bosom is endeared with all hearts, +Which I by lacking have supposed dead, +And there reigns love and all love's loving parts, +And all those friends which I thought buried. +How many a holy and obsequious tear +Hath dear religious love stol'n from mine eye +As interest of the dead, which now appear +But things removed that hidden in thee lie! +Thou art the grave where buried love doth live, +Hung with the trophies of my lovers gone, +Who all their parts of me to thee did give; +That due of many now is thine alone: +Their images I loved I view in thee, +And thou, all they, hast all the all of me. +If thou survive my well-contented day, +When that churl Death my bones with dust shall cover, +And shalt by fortune once more re-survey +These poor rude lines of thy deceased lover, +Compare them with the bettering of the time, +And though they be outstripp'd by every pen, +Reserve them for my love, not for their rhyme, +Exceeded by the height of happier men. +O, then vouchsafe me but this loving thought: +'Had my friend's Muse grown with this growing age, +A dearer birth than this his love had brought, +To march in ranks of better equipage: +But since he died and poets better prove, +Theirs for their style I'll read, his for his love.' +Full many a glorious morning have I seen +Flatter the mountain-tops with sovereign eye, +Kissing with golden face the meadows green, +Gilding pale streams with heavenly alchemy; +Anon permit the basest clouds to ride +With ugly rack on his celestial face, +And from the forlorn world his visage hide, +Stealing unseen to west with this disgrace: +Even so my sun one early morn did shine +With all triumphant splendor on my brow; +But out, alack! he was but one hour mine; +The region cloud hath mask'd him from me now. +Yet him for this my love no whit disdaineth; +Suns of the world may stain when heaven's sun staineth. +Why didst thou promise such a beauteous day, +And make me travel forth without my cloak, +To let base clouds o'ertake me in my way, +Hiding thy bravery in their rotten smoke? +'Tis not enough that through the cloud thou break, +To dry the rain on my storm-beaten face, +For no man well of such a salve can speak +That heals the wound and cures not the disgrace: +Nor can thy shame give physic to my grief; +Though thou repent, yet I have still the loss: +The offender's sorrow lends but weak relief +To him that bears the strong offence's cross. +Ah! but those tears are pearl which thy love sheds, +And they are rich and ransom all ill deeds. +No more be grieved at that which thou hast done: +Roses have thorns, and silver fountains mud; +Clouds and eclipses stain both moon and sun, +And loathsome canker lives in sweetest bud. +All men make faults, and even I in this, +Authorizing thy trespass with compare, +Myself corrupting, salving thy amiss, +Excusing thy sins more than thy sins are; +For to thy sensual fault I bring in sense-- +Thy adverse party is thy advocate-- +And 'gainst myself a lawful plea commence: +Such civil war is in my love and hate +That I an accessary needs must be +To that sweet thief which sourly robs from me. +Let me confess that we two must be twain, +Although our undivided loves are one: +So shall those blots that do with me remain +Without thy help by me be borne alone. +In our two loves there is but one respect, +Though in our lives a separable spite, +Which though it alter not love's sole effect, +Yet doth it steal sweet hours from love's delight. +I may not evermore acknowledge thee, +Lest my bewailed guilt should do thee shame, +Nor thou with public kindness honour me, +Unless thou take that honour from thy name: +But do not so; I love thee in such sort +As, thou being mine, mine is thy good report. +As a decrepit father takes delight +To see his active child do deeds of youth, +So I, made lame by fortune's dearest spite, +Take all my comfort of thy worth and truth. +For whether beauty, birth, or wealth, or wit, +Or any of these all, or all, or more, +Entitled in thy parts do crowned sit, +I make my love engrafted to this store: +So then I am not lame, poor, nor despised, +Whilst that this shadow doth such substance give +That I in thy abundance am sufficed +And by a part of all thy glory live. +Look, what is best, that best I wish in thee: +This wish I have; then ten times happy me! \ No newline at end of file diff --git a/build_musa.sh b/build_musa.sh new file mode 100644 index 0000000..b831c83 --- /dev/null +++ b/build_musa.sh @@ -0,0 +1,20 @@ +#!/bin/bash + +set -x +set -e + +pip install -r requirements-build.txt +pip install -r requirements-musa.txt + +export VLLM_TARGET_DEVICE=musa +export CMAKE_BUILD_TYPE=Debug +export VERBOSE=1 +export VLLM_ATTENTION_BACKEND=FLASH_ATTN + +rm -rf build +rm -rf dist +rm -rf vllm.egg-info +pip uninstall -y vllm + +python setup.py bdist_wheel +pip install dist/* \ No newline at end of file diff --git a/cmake/cpu_extension.cmake b/cmake/cpu_extension.cmake new file mode 100644 index 0000000..0cf3776 --- /dev/null +++ b/cmake/cpu_extension.cmake @@ -0,0 +1,90 @@ +set(CMAKE_EXPORT_COMPILE_COMMANDS ON) + +# +# Define environment variables for special configurations +# +if(DEFINED ENV{VLLM_CPU_AVX512BF16}) + set(ENABLE_AVX512BF16 ON) +endif() + +include_directories("${CMAKE_SOURCE_DIR}/csrc") + +# +# Check the compile flags +# +list(APPEND CXX_COMPILE_FLAGS + "-fopenmp" + "-DVLLM_CPU_EXTENSION") + +execute_process(COMMAND cat /proc/cpuinfo + RESULT_VARIABLE CPUINFO_RET + OUTPUT_VARIABLE CPUINFO) + +if (NOT CPUINFO_RET EQUAL 0) + message(FATAL_ERROR "Failed to check CPU features via /proc/cpuinfo") +endif() + +function (find_isa CPUINFO TARGET OUT) + string(FIND ${CPUINFO} ${TARGET} ISA_FOUND) + if(NOT ISA_FOUND EQUAL -1) + set(${OUT} ON PARENT_SCOPE) + else() + set(${OUT} OFF PARENT_SCOPE) + endif() +endfunction() + +find_isa(${CPUINFO} "avx512f" AVX512_FOUND) + +if (AVX512_FOUND) + list(APPEND CXX_COMPILE_FLAGS + "-mavx512f" + "-mavx512vl" + "-mavx512bw" + "-mavx512dq") + + find_isa(${CPUINFO} "avx512_bf16" AVX512BF16_FOUND) + if (AVX512BF16_FOUND OR ENABLE_AVX512BF16) + if (CMAKE_CXX_COMPILER_ID STREQUAL "GNU" AND + CMAKE_CXX_COMPILER_VERSION VERSION_GREATER_EQUAL 12.3) + list(APPEND CXX_COMPILE_FLAGS "-mavx512bf16") + else() + message(WARNING "Disable AVX512-BF16 ISA support, requires gcc/g++ >= 12.3") + endif() + else() + message(WARNING "Disable AVX512-BF16 ISA support, no avx512_bf16 found in local CPU flags." " If cross-compilation is required, please set env VLLM_CPU_AVX512BF16=1.") + endif() +else() + message(FATAL_ERROR "vLLM CPU backend requires AVX512 ISA support.") +endif() + +message(STATUS "CPU extension compile flags: ${CXX_COMPILE_FLAGS}") + + +# +# Define extension targets +# + +# +# _C extension +# +set(VLLM_EXT_SRC + "csrc/cpu/activation.cpp" + "csrc/cpu/attention.cpp" + "csrc/cpu/cache.cpp" + "csrc/cpu/layernorm.cpp" + "csrc/cpu/pos_encoding.cpp" + "csrc/cpu/pybind.cpp") + +define_gpu_extension_target( + _C + DESTINATION vllm + LANGUAGE CXX + SOURCES ${VLLM_EXT_SRC} + COMPILE_FLAGS ${CXX_COMPILE_FLAGS} + WITH_SOABI +) + +add_custom_target(default) +message(STATUS "Enabling C extension.") +add_dependencies(default _C) + diff --git a/cmake/hipify.py b/cmake/hipify.py new file mode 100755 index 0000000..340e41c --- /dev/null +++ b/cmake/hipify.py @@ -0,0 +1,73 @@ +#!/usr/bin/env python3 + +# +# A command line tool for running pytorch's hipify preprocessor on CUDA +# source files. +# +# See https://github.com/ROCm/hipify_torch +# and /utils/hipify/hipify_python.py +# + +import argparse +import os +import shutil + +from torch.utils.hipify.hipify_python import hipify + +if __name__ == '__main__': + parser = argparse.ArgumentParser() + + # Project directory where all the source + include files live. + parser.add_argument( + "-p", + "--project_dir", + help="The project directory.", + ) + + # Directory where hipified files are written. + parser.add_argument( + "-o", + "--output_dir", + help="The output directory.", + ) + + # Source files to convert. + parser.add_argument("sources", + help="Source files to hipify.", + nargs="*", + default=[]) + + args = parser.parse_args() + + # Limit include scope to project_dir only + includes = [os.path.join(args.project_dir, '*')] + + # Get absolute path for all source files. + extra_files = [os.path.abspath(s) for s in args.sources] + + # Copy sources from project directory to output directory. + # The directory might already exist to hold object files so we ignore that. + shutil.copytree(args.project_dir, args.output_dir, dirs_exist_ok=True) + + hipify_result = hipify(project_directory=args.project_dir, + output_directory=args.output_dir, + header_include_dirs=[], + includes=includes, + extra_files=extra_files, + show_detailed=True, + is_pytorch_extension=True, + hipify_extra_files_only=True) + + hipified_sources = [] + for source in args.sources: + s_abs = os.path.abspath(source) + hipified_s_abs = (hipify_result[s_abs].hipified_path if + (s_abs in hipify_result + and hipify_result[s_abs].hipified_path is not None) + else s_abs) + hipified_sources.append(hipified_s_abs) + + assert (len(hipified_sources) == len(args.sources)) + + # Print hipified source files. + print("\n".join(hipified_sources)) diff --git a/cmake/utils.cmake b/cmake/utils.cmake new file mode 100644 index 0000000..7c71673 --- /dev/null +++ b/cmake/utils.cmake @@ -0,0 +1,354 @@ +# +# Attempt to find the python package that uses the same python executable as +# `EXECUTABLE` and is one of the `SUPPORTED_VERSIONS`. +# +macro (find_python_from_executable EXECUTABLE SUPPORTED_VERSIONS) + file(REAL_PATH ${EXECUTABLE} EXECUTABLE) + set(Python_EXECUTABLE ${EXECUTABLE}) + find_package(Python COMPONENTS Interpreter Development.Module) + if (NOT Python_FOUND) + message(FATAL_ERROR "Unable to find python matching: ${EXECUTABLE}.") + endif() + set(_VER "${Python_VERSION_MAJOR}.${Python_VERSION_MINOR}") + set(_SUPPORTED_VERSIONS_LIST ${SUPPORTED_VERSIONS} ${ARGN}) + if (NOT _VER IN_LIST _SUPPORTED_VERSIONS_LIST) + message(FATAL_ERROR + "Python version (${_VER}) is not one of the supported versions: " + "${_SUPPORTED_VERSIONS_LIST}.") + endif() + message(STATUS "Found python matching: ${EXECUTABLE}.") +endmacro() + +# +# Run `EXPR` in python. The standard output of python is stored in `OUT` and +# has trailing whitespace stripped. If an error is encountered when running +# python, a fatal message `ERR_MSG` is issued. +# +function (run_python OUT EXPR ERR_MSG) + execute_process( + COMMAND + "${Python_EXECUTABLE}" "-c" "${EXPR}" + OUTPUT_VARIABLE PYTHON_OUT + RESULT_VARIABLE PYTHON_ERROR_CODE + ERROR_VARIABLE PYTHON_STDERR + OUTPUT_STRIP_TRAILING_WHITESPACE) + + if(NOT PYTHON_ERROR_CODE EQUAL 0) + message(FATAL_ERROR "${ERR_MSG}: ${PYTHON_STDERR}") + endif() + set(${OUT} ${PYTHON_OUT} PARENT_SCOPE) +endfunction() + +# Run `EXPR` in python after importing `PKG`. Use the result of this to extend +# `CMAKE_PREFIX_PATH` so the torch cmake configuration can be imported. +macro (append_cmake_prefix_path PKG EXPR) + run_python(_PREFIX_PATH + "import ${PKG}; print(${EXPR})" "Failed to locate ${PKG} path") + list(APPEND CMAKE_PREFIX_PATH ${_PREFIX_PATH}) +endmacro() + +# +# Add a target named `hipify${NAME}` that runs the hipify preprocessor on a set +# of CUDA source files. The names of the corresponding "hipified" sources are +# stored in `OUT_SRCS`. +# +function (hipify_sources_target OUT_SRCS NAME ORIG_SRCS) + # + # Split into C++ and non-C++ (i.e. CUDA) sources. + # + set(SRCS ${ORIG_SRCS}) + set(CXX_SRCS ${ORIG_SRCS}) + list(FILTER SRCS EXCLUDE REGEX "\.(cc)|(cpp)$") + list(FILTER CXX_SRCS INCLUDE REGEX "\.(cc)|(cpp)$") + + # + # Generate ROCm/HIP source file names from CUDA file names. + # Since HIP files are generated code, they will appear in the build area + # `CMAKE_CURRENT_BINARY_DIR` directory rather than the original csrc dir. + # + set(HIP_SRCS) + foreach (SRC ${SRCS}) + string(REGEX REPLACE "\.cu$" "\.hip" SRC ${SRC}) + string(REGEX REPLACE "cuda" "hip" SRC ${SRC}) + list(APPEND HIP_SRCS "${CMAKE_CURRENT_BINARY_DIR}/${SRC}") + endforeach() + + set(CSRC_BUILD_DIR ${CMAKE_CURRENT_BINARY_DIR}/csrc) + add_custom_target( + hipify${NAME} + COMMAND ${CMAKE_SOURCE_DIR}/cmake/hipify.py -p ${CMAKE_SOURCE_DIR}/csrc -o ${CSRC_BUILD_DIR} ${SRCS} + DEPENDS ${CMAKE_SOURCE_DIR}/cmake/hipify.py ${SRCS} + BYPRODUCTS ${HIP_SRCS} + COMMENT "Running hipify on ${NAME} extension source files.") + + # Swap out original extension sources with hipified sources. + list(APPEND HIP_SRCS ${CXX_SRCS}) + set(${OUT_SRCS} ${HIP_SRCS} PARENT_SCOPE) +endfunction() + +# +# Get additional GPU compiler flags from torch. +# +function (get_torch_gpu_compiler_flags OUT_GPU_FLAGS GPU_LANG) + if (${GPU_LANG} STREQUAL "CUDA") + # + # Get common NVCC flags from torch. + # + run_python(GPU_FLAGS + "from torch.utils.cpp_extension import COMMON_NVCC_FLAGS; print(';'.join(COMMON_NVCC_FLAGS))" + "Failed to determine torch nvcc compiler flags") + + if (CUDA_VERSION VERSION_GREATER_EQUAL 11.8) + list(APPEND GPU_FLAGS "-DENABLE_FP8_E5M2") + endif() + if (CUDA_VERSION VERSION_GREATER_EQUAL 12.0) + list(REMOVE_ITEM GPU_FLAGS + "-D__CUDA_NO_HALF_OPERATORS__" + "-D__CUDA_NO_HALF_CONVERSIONS__" + "-D__CUDA_NO_BFLOAT16_CONVERSIONS__" + "-D__CUDA_NO_HALF2_OPERATORS__") + endif() + + elseif(${GPU_LANG} STREQUAL "HIP") + # + # Get common HIP/HIPCC flags from torch. + # + run_python(GPU_FLAGS + "import torch.utils.cpp_extension as t; print(';'.join(t.COMMON_HIP_FLAGS + t.COMMON_HIPCC_FLAGS))" + "Failed to determine torch nvcc compiler flags") + + list(APPEND GPU_FLAGS + "-DUSE_ROCM" + "-DENABLE_FP8_E4M3" + "-U__HIP_NO_HALF_CONVERSIONS__" + "-U__HIP_NO_HALF_OPERATORS__" + "-fno-gpu-rdc") + + endif() + set(${OUT_GPU_FLAGS} ${GPU_FLAGS} PARENT_SCOPE) +endfunction() + +# Macro for converting a `gencode` version number to a cmake version number. +macro(string_to_ver OUT_VER IN_STR) + string(REGEX REPLACE "\([0-9]+\)\([0-9]\)" "\\1.\\2" ${OUT_VER} ${IN_STR}) +endmacro() + +# +# Override the GPU architectures detected by cmake/torch and filter them by +# `GPU_SUPPORTED_ARCHES`. Sets the final set of architectures in +# `GPU_ARCHES`. +# +# Note: this is defined as a macro since it updates `CMAKE_CUDA_FLAGS`. +# +macro(override_gpu_arches GPU_ARCHES GPU_LANG GPU_SUPPORTED_ARCHES) + set(_GPU_SUPPORTED_ARCHES_LIST ${GPU_SUPPORTED_ARCHES} ${ARGN}) + message(STATUS "${GPU_LANG} supported arches: ${_GPU_SUPPORTED_ARCHES_LIST}") + + if (${GPU_LANG} STREQUAL "HIP") + # + # `GPU_ARCHES` controls the `--offload-arch` flags. + # `CMAKE_HIP_ARCHITECTURES` is set up by torch and can be controlled + # via the `PYTORCH_ROCM_ARCH` env variable. + # + + # + # Find the intersection of the supported + detected architectures to + # set the module architecture flags. + # + set(${GPU_ARCHES}) + foreach (_ARCH ${CMAKE_HIP_ARCHITECTURES}) + if (_ARCH IN_LIST _GPU_SUPPORTED_ARCHES_LIST) + list(APPEND ${GPU_ARCHES} ${_ARCH}) + endif() + endforeach() + + if(NOT ${GPU_ARCHES}) + message(FATAL_ERROR + "None of the detected ROCm architectures: ${CMAKE_HIP_ARCHITECTURES} is" + " supported. Supported ROCm architectures are: ${_GPU_SUPPORTED_ARCHES_LIST}.") + endif() + + elseif(${GPU_LANG} STREQUAL "CUDA") + # + # Setup/process CUDA arch flags. + # + # The torch cmake setup hardcodes the detected architecture flags in + # `CMAKE_CUDA_FLAGS`. Since `CMAKE_CUDA_FLAGS` is a "global" variable, it + # can't modified on a per-target basis, e.g. for the `punica` extension. + # So, all the `-gencode` flags need to be extracted and removed from + # `CMAKE_CUDA_FLAGS` for processing so they can be passed by another method. + # Since it's not possible to use `target_compiler_options` for adding target + # specific `-gencode` arguments, the target's `CUDA_ARCHITECTURES` property + # must be used instead. This requires repackaging the architecture flags + # into a format that cmake expects for `CUDA_ARCHITECTURES`. + # + # This is a bit fragile in that it depends on torch using `-gencode` as opposed + # to one of the other nvcc options to specify architectures. + # + # Note: torch uses the `TORCH_CUDA_ARCH_LIST` environment variable to override + # detected architectures. + # + message(DEBUG "initial CMAKE_CUDA_FLAGS: ${CMAKE_CUDA_FLAGS}") + + # Extract all `-gencode` flags from `CMAKE_CUDA_FLAGS` + string(REGEX MATCHALL "-gencode arch=[^ ]+" _CUDA_ARCH_FLAGS + ${CMAKE_CUDA_FLAGS}) + + # Remove all `-gencode` flags from `CMAKE_CUDA_FLAGS` since they will be modified + # and passed back via the `CUDA_ARCHITECTURES` property. + string(REGEX REPLACE "-gencode arch=[^ ]+ *" "" CMAKE_CUDA_FLAGS + ${CMAKE_CUDA_FLAGS}) + + # If this error is triggered, it might mean that torch has changed how it sets + # up nvcc architecture code generation flags. + if (NOT _CUDA_ARCH_FLAGS) + message(FATAL_ERROR + "Could not find any architecture related code generation flags in " + "CMAKE_CUDA_FLAGS. (${CMAKE_CUDA_FLAGS})") + endif() + + message(DEBUG "final CMAKE_CUDA_FLAGS: ${CMAKE_CUDA_FLAGS}") + message(DEBUG "arch flags: ${_CUDA_ARCH_FLAGS}") + + # Initialize the architecture lists to empty. + set(${GPU_ARCHES}) + + # Process each `gencode` flag. + foreach(_ARCH ${_CUDA_ARCH_FLAGS}) + # For each flag, extract the version number and whether it refers to PTX + # or native code. + # Note: if a regex matches then `CMAKE_MATCH_1` holds the binding + # for that match. + + string(REGEX MATCH "arch=compute_\([0-9]+a?\)" _COMPUTE ${_ARCH}) + if (_COMPUTE) + set(_COMPUTE ${CMAKE_MATCH_1}) + endif() + + string(REGEX MATCH "code=sm_\([0-9]+a?\)" _SM ${_ARCH}) + if (_SM) + set(_SM ${CMAKE_MATCH_1}) + endif() + + string(REGEX MATCH "code=compute_\([0-9]+a?\)" _CODE ${_ARCH}) + if (_CODE) + set(_CODE ${CMAKE_MATCH_1}) + endif() + + # Make sure the virtual architecture can be matched. + if (NOT _COMPUTE) + message(FATAL_ERROR + "Could not determine virtual architecture from: ${_ARCH}.") + endif() + + # One of sm_ or compute_ must exist. + if ((NOT _SM) AND (NOT _CODE)) + message(FATAL_ERROR + "Could not determine a codegen architecture from: ${_ARCH}.") + endif() + + if (_SM) + # -real suffix let CMake to only generate elf code for the kernels. + # we want this, otherwise the added ptx (default) will increase binary size. + set(_VIRT "-real") + set(_CODE_ARCH ${_SM}) + else() + # -virtual suffix let CMake to generate ptx code for the kernels. + set(_VIRT "-virtual") + set(_CODE_ARCH ${_CODE}) + endif() + + # Check if the current version is in the supported arch list. + string_to_ver(_CODE_VER ${_CODE_ARCH}) + if (NOT _CODE_VER IN_LIST _GPU_SUPPORTED_ARCHES_LIST) + message(STATUS "discarding unsupported CUDA arch ${_VER}.") + continue() + endif() + + # Add it to the arch list. + list(APPEND ${GPU_ARCHES} "${_CODE_ARCH}${_VIRT}") + endforeach() + endif() + message(STATUS "${GPU_LANG} target arches: ${${GPU_ARCHES}}") +endmacro() + +# +# Define a target named `GPU_MOD_NAME` for a single extension. The +# arguments are: +# +# DESTINATION - Module destination directory. +# LANGUAGE - The GPU language for this module, e.g CUDA, HIP, +# etc. +# SOURCES - List of source files relative to CMakeLists.txt +# directory. +# +# Optional arguments: +# +# ARCHITECTURES - A list of target GPU architectures in cmake +# format. +# Refer `CMAKE_CUDA_ARCHITECTURES` documentation +# and `CMAKE_HIP_ARCHITECTURES` for more info. +# ARCHITECTURES will use cmake's defaults if +# not provided. +# COMPILE_FLAGS - Extra compiler flags passed to NVCC/hip. +# INCLUDE_DIRECTORIES - Extra include directories. +# LIBRARIES - Extra link libraries. +# WITH_SOABI - Generate library with python SOABI suffix name. +# +# Note: optimization level/debug info is set via cmake build type. +# +function (define_gpu_extension_target GPU_MOD_NAME) + cmake_parse_arguments(PARSE_ARGV 1 + GPU + "WITH_SOABI" + "DESTINATION;LANGUAGE" + "SOURCES;ARCHITECTURES;COMPILE_FLAGS;INCLUDE_DIRECTORIES;LIBRARIES") + + # Add hipify preprocessing step when building with HIP/ROCm. + if (GPU_LANGUAGE STREQUAL "HIP") + hipify_sources_target(GPU_SOURCES ${GPU_MOD_NAME} "${GPU_SOURCES}") + endif() + + if (GPU_WITH_SOABI) + set(GPU_WITH_SOABI WITH_SOABI) + else() + set(GPU_WITH_SOABI) + endif() + + Python_add_library(${GPU_MOD_NAME} MODULE "${GPU_SOURCES}" ${GPU_WITH_SOABI}) + + if (GPU_LANGUAGE STREQUAL "HIP") + # Make this target dependent on the hipify preprocessor step. + add_dependencies(${GPU_MOD_NAME} hipify${GPU_MOD_NAME}) + endif() + + if (GPU_ARCHITECTURES) + set_target_properties(${GPU_MOD_NAME} PROPERTIES + ${GPU_LANGUAGE}_ARCHITECTURES "${GPU_ARCHITECTURES}") + endif() + + set_property(TARGET ${GPU_MOD_NAME} PROPERTY CXX_STANDARD 17) + + target_compile_options(${GPU_MOD_NAME} PRIVATE + $<$:${GPU_COMPILE_FLAGS}>) + + target_compile_definitions(${GPU_MOD_NAME} PRIVATE + "-DTORCH_EXTENSION_NAME=${GPU_MOD_NAME}") + + target_include_directories(${GPU_MOD_NAME} PRIVATE csrc + ${GPU_INCLUDE_DIRECTORIES}) + + target_link_libraries(${GPU_MOD_NAME} PRIVATE torch ${torch_python_LIBRARY} + ${GPU_LIBRARIES}) + + # Don't use `TORCH_LIBRARIES` for CUDA since it pulls in a bunch of + # dependencies that are not necessary and may not be installed. + if (GPU_LANGUAGE STREQUAL "CUDA") + target_link_libraries(${GPU_MOD_NAME} PRIVATE ${CUDA_CUDA_LIB} + ${CUDA_LIBRARIES}) + else() + target_link_libraries(${GPU_MOD_NAME} PRIVATE ${TORCH_LIBRARIES}) + endif() + + install(TARGETS ${GPU_MOD_NAME} LIBRARY DESTINATION ${GPU_DESTINATION}) +endfunction() diff --git a/collect_env.py b/collect_env.py new file mode 100644 index 0000000..1ecfeb8 --- /dev/null +++ b/collect_env.py @@ -0,0 +1,721 @@ +# ruff: noqa +# code borrowed from https://github.com/pytorch/pytorch/blob/main/torch/utils/collect_env.py + +# Unlike the rest of the PyTorch this file must be python2 compliant. +# This script outputs relevant system environment info +# Run it with `python collect_env.py` or `python -m torch.utils.collect_env` +import datetime +import locale +import os +import re +import subprocess +import sys +from collections import namedtuple + +try: + import torch + TORCH_AVAILABLE = True +except (ImportError, NameError, AttributeError, OSError): + TORCH_AVAILABLE = False + +# System Environment Information +SystemEnv = namedtuple( + 'SystemEnv', + [ + 'torch_version', + 'is_debug_build', + 'cuda_compiled_version', + 'gcc_version', + 'clang_version', + 'cmake_version', + 'os', + 'libc_version', + 'python_version', + 'python_platform', + 'is_cuda_available', + 'cuda_runtime_version', + 'cuda_module_loading', + 'nvidia_driver_version', + 'nvidia_gpu_models', + 'cudnn_version', + 'pip_version', # 'pip' or 'pip3' + 'pip_packages', + 'conda_packages', + 'hip_compiled_version', + 'hip_runtime_version', + 'miopen_runtime_version', + 'caching_allocator_config', + 'is_xnnpack_available', + 'cpu_info', + 'rocm_version', # vllm specific field + 'neuron_sdk_version', # vllm specific field + 'vllm_version', # vllm specific field + 'vllm_build_flags', # vllm specific field + 'gpu_topo', # vllm specific field + ]) + +DEFAULT_CONDA_PATTERNS = { + "torch", + "numpy", + "cudatoolkit", + "soumith", + "mkl", + "magma", + "triton", + "optree", + "nccl", +} + +DEFAULT_PIP_PATTERNS = { + "torch", + "numpy", + "mypy", + "flake8", + "triton", + "optree", + "onnx", + "nccl", +} + + +def run(command): + """Return (return-code, stdout, stderr).""" + shell = True if type(command) is str else False + p = subprocess.Popen(command, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + shell=shell) + raw_output, raw_err = p.communicate() + rc = p.returncode + if get_platform() == 'win32': + enc = 'oem' + else: + enc = locale.getpreferredencoding() + output = raw_output.decode(enc) + err = raw_err.decode(enc) + return rc, output.strip(), err.strip() + + +def run_and_read_all(run_lambda, command): + """Run command using run_lambda; reads and returns entire output if rc is 0.""" + rc, out, _ = run_lambda(command) + if rc != 0: + return None + return out + + +def run_and_parse_first_match(run_lambda, command, regex): + """Run command using run_lambda, returns the first regex match if it exists.""" + rc, out, _ = run_lambda(command) + if rc != 0: + return None + match = re.search(regex, out) + if match is None: + return None + return match.group(1) + + +def run_and_return_first_line(run_lambda, command): + """Run command using run_lambda and returns first line if output is not empty.""" + rc, out, _ = run_lambda(command) + if rc != 0: + return None + return out.split('\n')[0] + + +def get_conda_packages(run_lambda, patterns=None): + if patterns is None: + patterns = DEFAULT_CONDA_PATTERNS + conda = os.environ.get('CONDA_EXE', 'conda') + out = run_and_read_all(run_lambda, "{} list".format(conda)) + if out is None: + return out + + return "\n".join(line for line in out.splitlines() + if not line.startswith("#") and any(name in line + for name in patterns)) + + +def get_gcc_version(run_lambda): + return run_and_parse_first_match(run_lambda, 'gcc --version', r'gcc (.*)') + + +def get_clang_version(run_lambda): + return run_and_parse_first_match(run_lambda, 'clang --version', + r'clang version (.*)') + + +def get_cmake_version(run_lambda): + return run_and_parse_first_match(run_lambda, 'cmake --version', + r'cmake (.*)') + + +def get_nvidia_driver_version(run_lambda): + if get_platform() == 'darwin': + cmd = 'kextstat | grep -i cuda' + return run_and_parse_first_match(run_lambda, cmd, + r'com[.]nvidia[.]CUDA [(](.*?)[)]') + smi = get_nvidia_smi() + return run_and_parse_first_match(run_lambda, smi, + r'Driver Version: (.*?) ') + + +def get_gpu_info(run_lambda): + if get_platform() == 'darwin' or (TORCH_AVAILABLE and hasattr( + torch.version, 'hip') and torch.version.hip is not None): + if TORCH_AVAILABLE and torch.cuda.is_available(): + if torch.version.hip is not None: + prop = torch.cuda.get_device_properties(0) + if hasattr(prop, "gcnArchName"): + gcnArch = " ({})".format(prop.gcnArchName) + else: + gcnArch = "NoGCNArchNameOnOldPyTorch" + else: + gcnArch = "" + return torch.cuda.get_device_name(None) + gcnArch + return None + smi = get_nvidia_smi() + uuid_regex = re.compile(r' \(UUID: .+?\)') + rc, out, _ = run_lambda(smi + ' -L') + if rc != 0: + return None + # Anonymize GPUs by removing their UUID + return re.sub(uuid_regex, '', out) + + +def get_running_cuda_version(run_lambda): + return run_and_parse_first_match(run_lambda, 'nvcc --version', + r'release .+ V(.*)') + + +def get_cudnn_version(run_lambda): + """Return a list of libcudnn.so; it's hard to tell which one is being used.""" + if get_platform() == 'win32': + system_root = os.environ.get('SYSTEMROOT', 'C:\\Windows') + cuda_path = os.environ.get('CUDA_PATH', "%CUDA_PATH%") + where_cmd = os.path.join(system_root, 'System32', 'where') + cudnn_cmd = '{} /R "{}\\bin" cudnn*.dll'.format(where_cmd, cuda_path) + elif get_platform() == 'darwin': + # CUDA libraries and drivers can be found in /usr/local/cuda/. See + # https://docs.nvidia.com/cuda/cuda-installation-guide-mac-os-x/index.html#install + # https://docs.nvidia.com/deeplearning/sdk/cudnn-install/index.html#installmac + # Use CUDNN_LIBRARY when cudnn library is installed elsewhere. + cudnn_cmd = 'ls /usr/local/cuda/lib/libcudnn*' + else: + cudnn_cmd = 'ldconfig -p | grep libcudnn | rev | cut -d" " -f1 | rev' + rc, out, _ = run_lambda(cudnn_cmd) + # find will return 1 if there are permission errors or if not found + if len(out) == 0 or (rc != 1 and rc != 0): + l = os.environ.get('CUDNN_LIBRARY') + if l is not None and os.path.isfile(l): + return os.path.realpath(l) + return None + files_set = set() + for fn in out.split('\n'): + fn = os.path.realpath(fn) # eliminate symbolic links + if os.path.isfile(fn): + files_set.add(fn) + if not files_set: + return None + # Alphabetize the result because the order is non-deterministic otherwise + files = sorted(files_set) + if len(files) == 1: + return files[0] + result = '\n'.join(files) + return 'Probably one of the following:\n{}'.format(result) + + +def get_nvidia_smi(): + # Note: nvidia-smi is currently available only on Windows and Linux + smi = 'nvidia-smi' + if get_platform() == 'win32': + system_root = os.environ.get('SYSTEMROOT', 'C:\\Windows') + program_files_root = os.environ.get('PROGRAMFILES', + 'C:\\Program Files') + legacy_path = os.path.join(program_files_root, 'NVIDIA Corporation', + 'NVSMI', smi) + new_path = os.path.join(system_root, 'System32', smi) + smis = [new_path, legacy_path] + for candidate_smi in smis: + if os.path.exists(candidate_smi): + smi = '"{}"'.format(candidate_smi) + break + return smi + + +def get_rocm_version(run_lambda): + """Returns the ROCm version if available, otherwise 'N/A'.""" + return run_and_parse_first_match(run_lambda, 'hipcc --version', + r'HIP version: (\S+)') + + +def get_neuron_sdk_version(run_lambda): + # Adapted from your install script + try: + result = run_lambda(["neuron-ls"]) + return result if result[0] == 0 else 'N/A' + except Exception: + return 'N/A' + + +def get_vllm_version(): + try: + import vllm + return vllm.__version__ + except ImportError: + return 'N/A' + + +def summarize_vllm_build_flags(): + # This could be a static method if the flags are constant, or dynamic if you need to check environment variables, etc. + return 'CUDA Archs: {}; ROCm: {}; Neuron: {}'.format( + os.environ.get('TORCH_CUDA_ARCH_LIST', 'Not Set'), + 'Enabled' if os.environ.get('ROCM_HOME') else 'Disabled', + 'Enabled' if os.environ.get('NEURON_CORES') else 'Disabled', + ) + + +def get_gpu_topo(run_lambda): + if get_platform() == 'linux': + return run_and_read_all(run_lambda, 'nvidia-smi topo -m') + return None + + +# example outputs of CPU infos +# * linux +# Architecture: x86_64 +# CPU op-mode(s): 32-bit, 64-bit +# Address sizes: 46 bits physical, 48 bits virtual +# Byte Order: Little Endian +# CPU(s): 128 +# On-line CPU(s) list: 0-127 +# Vendor ID: GenuineIntel +# Model name: Intel(R) Xeon(R) Platinum 8375C CPU @ 2.90GHz +# CPU family: 6 +# Model: 106 +# Thread(s) per core: 2 +# Core(s) per socket: 32 +# Socket(s): 2 +# Stepping: 6 +# BogoMIPS: 5799.78 +# Flags: fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr +# sse sse2 ss ht syscall nx pdpe1gb rdtscp lm constant_tsc arch_perfmon rep_good nopl +# xtopology nonstop_tsc cpuid aperfmperf tsc_known_freq pni pclmulqdq monitor ssse3 fma cx16 +# pcid sse4_1 sse4_2 x2apic movbe popcnt tsc_deadline_timer aes xsave avx f16c rdrand +# hypervisor lahf_lm abm 3dnowprefetch invpcid_single ssbd ibrs ibpb stibp ibrs_enhanced +# fsgsbase tsc_adjust bmi1 avx2 smep bmi2 erms invpcid avx512f avx512dq rdseed adx smap +# avx512ifma clflushopt clwb avx512cd sha_ni avx512bw avx512vl xsaveopt xsavec xgetbv1 +# xsaves wbnoinvd ida arat avx512vbmi pku ospke avx512_vbmi2 gfni vaes vpclmulqdq +# avx512_vnni avx512_bitalg tme avx512_vpopcntdq rdpid md_clear flush_l1d arch_capabilities +# Virtualization features: +# Hypervisor vendor: KVM +# Virtualization type: full +# Caches (sum of all): +# L1d: 3 MiB (64 instances) +# L1i: 2 MiB (64 instances) +# L2: 80 MiB (64 instances) +# L3: 108 MiB (2 instances) +# NUMA: +# NUMA node(s): 2 +# NUMA node0 CPU(s): 0-31,64-95 +# NUMA node1 CPU(s): 32-63,96-127 +# Vulnerabilities: +# Itlb multihit: Not affected +# L1tf: Not affected +# Mds: Not affected +# Meltdown: Not affected +# Mmio stale data: Vulnerable: Clear CPU buffers attempted, no microcode; SMT Host state unknown +# Retbleed: Not affected +# Spec store bypass: Mitigation; Speculative Store Bypass disabled via prctl and seccomp +# Spectre v1: Mitigation; usercopy/swapgs barriers and __user pointer sanitization +# Spectre v2: Mitigation; Enhanced IBRS, IBPB conditional, RSB filling, PBRSB-eIBRS SW sequence +# Srbds: Not affected +# Tsx async abort: Not affected +# * win32 +# Architecture=9 +# CurrentClockSpeed=2900 +# DeviceID=CPU0 +# Family=179 +# L2CacheSize=40960 +# L2CacheSpeed= +# Manufacturer=GenuineIntel +# MaxClockSpeed=2900 +# Name=Intel(R) Xeon(R) Platinum 8375C CPU @ 2.90GHz +# ProcessorType=3 +# Revision=27142 +# +# Architecture=9 +# CurrentClockSpeed=2900 +# DeviceID=CPU1 +# Family=179 +# L2CacheSize=40960 +# L2CacheSpeed= +# Manufacturer=GenuineIntel +# MaxClockSpeed=2900 +# Name=Intel(R) Xeon(R) Platinum 8375C CPU @ 2.90GHz +# ProcessorType=3 +# Revision=27142 + + +def get_cpu_info(run_lambda): + rc, out, err = 0, '', '' + if get_platform() == 'linux': + rc, out, err = run_lambda('lscpu') + elif get_platform() == 'win32': + rc, out, err = run_lambda( + 'wmic cpu get Name,Manufacturer,Family,Architecture,ProcessorType,DeviceID, \ + CurrentClockSpeed,MaxClockSpeed,L2CacheSize,L2CacheSpeed,Revision /VALUE' + ) + elif get_platform() == 'darwin': + rc, out, err = run_lambda("sysctl -n machdep.cpu.brand_string") + cpu_info = 'None' + if rc == 0: + cpu_info = out + else: + cpu_info = err + return cpu_info + + +def get_platform(): + if sys.platform.startswith('linux'): + return 'linux' + elif sys.platform.startswith('win32'): + return 'win32' + elif sys.platform.startswith('cygwin'): + return 'cygwin' + elif sys.platform.startswith('darwin'): + return 'darwin' + else: + return sys.platform + + +def get_mac_version(run_lambda): + return run_and_parse_first_match(run_lambda, 'sw_vers -productVersion', + r'(.*)') + + +def get_windows_version(run_lambda): + system_root = os.environ.get('SYSTEMROOT', 'C:\\Windows') + wmic_cmd = os.path.join(system_root, 'System32', 'Wbem', 'wmic') + findstr_cmd = os.path.join(system_root, 'System32', 'findstr') + return run_and_read_all( + run_lambda, + '{} os get Caption | {} /v Caption'.format(wmic_cmd, findstr_cmd)) + + +def get_lsb_version(run_lambda): + return run_and_parse_first_match(run_lambda, 'lsb_release -a', + r'Description:\t(.*)') + + +def check_release_file(run_lambda): + return run_and_parse_first_match(run_lambda, 'cat /etc/*-release', + r'PRETTY_NAME="(.*)"') + + +def get_os(run_lambda): + from platform import machine + platform = get_platform() + + if platform == 'win32' or platform == 'cygwin': + return get_windows_version(run_lambda) + + if platform == 'darwin': + version = get_mac_version(run_lambda) + if version is None: + return None + return 'macOS {} ({})'.format(version, machine()) + + if platform == 'linux': + # Ubuntu/Debian based + desc = get_lsb_version(run_lambda) + if desc is not None: + return '{} ({})'.format(desc, machine()) + + # Try reading /etc/*-release + desc = check_release_file(run_lambda) + if desc is not None: + return '{} ({})'.format(desc, machine()) + + return '{} ({})'.format(platform, machine()) + + # Unknown platform + return platform + + +def get_python_platform(): + import platform + return platform.platform() + + +def get_libc_version(): + import platform + if get_platform() != 'linux': + return 'N/A' + return '-'.join(platform.libc_ver()) + + +def get_pip_packages(run_lambda, patterns=None): + """Return `pip list` output. Note: will also find conda-installed pytorch and numpy packages.""" + if patterns is None: + patterns = DEFAULT_PIP_PATTERNS + + # People generally have `pip` as `pip` or `pip3` + # But here it is invoked as `python -mpip` + def run_with_pip(pip): + out = run_and_read_all(run_lambda, pip + ["list", "--format=freeze"]) + return "\n".join(line for line in out.splitlines() + if any(name in line for name in patterns)) + + pip_version = 'pip3' if sys.version[0] == '3' else 'pip' + out = run_with_pip([sys.executable, '-mpip']) + + return pip_version, out + + +def get_cachingallocator_config(): + ca_config = os.environ.get('PYTORCH_CUDA_ALLOC_CONF', '') + return ca_config + + +def get_cuda_module_loading_config(): + if TORCH_AVAILABLE and torch.cuda.is_available(): + torch.cuda.init() + config = os.environ.get('CUDA_MODULE_LOADING', '') + return config + else: + return "N/A" + + +def is_xnnpack_available(): + if TORCH_AVAILABLE: + import torch.backends.xnnpack + return str( + torch.backends.xnnpack.enabled) # type: ignore[attr-defined] + else: + return "N/A" + + +def get_env_info(): + run_lambda = run + pip_version, pip_list_output = get_pip_packages(run_lambda) + + if TORCH_AVAILABLE: + version_str = torch.__version__ + debug_mode_str = str(torch.version.debug) + cuda_available_str = str(torch.cuda.is_available()) + cuda_version_str = torch.version.cuda + if not hasattr(torch.version, + 'hip') or torch.version.hip is None: # cuda version + hip_compiled_version = hip_runtime_version = miopen_runtime_version = 'N/A' + else: # HIP version + + def get_version_or_na(cfg, prefix): + _lst = [s.rsplit(None, 1)[-1] for s in cfg if prefix in s] + return _lst[0] if _lst else 'N/A' + + cfg = torch._C._show_config().split('\n') + hip_runtime_version = get_version_or_na(cfg, 'HIP Runtime') + miopen_runtime_version = get_version_or_na(cfg, 'MIOpen') + cuda_version_str = 'N/A' + hip_compiled_version = torch.version.hip + else: + version_str = debug_mode_str = cuda_available_str = cuda_version_str = 'N/A' + hip_compiled_version = hip_runtime_version = miopen_runtime_version = 'N/A' + + sys_version = sys.version.replace("\n", " ") + + conda_packages = get_conda_packages(run_lambda) + + rocm_version = get_rocm_version(run_lambda) + neuron_sdk_version = get_neuron_sdk_version(run_lambda) + vllm_version = get_vllm_version() + vllm_build_flags = summarize_vllm_build_flags() + gpu_topo = get_gpu_topo(run_lambda) + + return SystemEnv( + torch_version=version_str, + is_debug_build=debug_mode_str, + python_version='{} ({}-bit runtime)'.format( + sys_version, + sys.maxsize.bit_length() + 1), + python_platform=get_python_platform(), + is_cuda_available=cuda_available_str, + cuda_compiled_version=cuda_version_str, + cuda_runtime_version=get_running_cuda_version(run_lambda), + cuda_module_loading=get_cuda_module_loading_config(), + nvidia_gpu_models=get_gpu_info(run_lambda), + nvidia_driver_version=get_nvidia_driver_version(run_lambda), + cudnn_version=get_cudnn_version(run_lambda), + hip_compiled_version=hip_compiled_version, + hip_runtime_version=hip_runtime_version, + miopen_runtime_version=miopen_runtime_version, + pip_version=pip_version, + pip_packages=pip_list_output, + conda_packages=conda_packages, + os=get_os(run_lambda), + libc_version=get_libc_version(), + gcc_version=get_gcc_version(run_lambda), + clang_version=get_clang_version(run_lambda), + cmake_version=get_cmake_version(run_lambda), + caching_allocator_config=get_cachingallocator_config(), + is_xnnpack_available=is_xnnpack_available(), + cpu_info=get_cpu_info(run_lambda), + rocm_version=rocm_version, + neuron_sdk_version=neuron_sdk_version, + vllm_version=vllm_version, + vllm_build_flags=vllm_build_flags, + gpu_topo=gpu_topo, + ) + + +env_info_fmt = """ +PyTorch version: {torch_version} +Is debug build: {is_debug_build} +CUDA used to build PyTorch: {cuda_compiled_version} +ROCM used to build PyTorch: {hip_compiled_version} + +OS: {os} +GCC version: {gcc_version} +Clang version: {clang_version} +CMake version: {cmake_version} +Libc version: {libc_version} + +Python version: {python_version} +Python platform: {python_platform} +Is CUDA available: {is_cuda_available} +CUDA runtime version: {cuda_runtime_version} +CUDA_MODULE_LOADING set to: {cuda_module_loading} +GPU models and configuration: {nvidia_gpu_models} +Nvidia driver version: {nvidia_driver_version} +cuDNN version: {cudnn_version} +HIP runtime version: {hip_runtime_version} +MIOpen runtime version: {miopen_runtime_version} +Is XNNPACK available: {is_xnnpack_available} + +CPU: +{cpu_info} + +Versions of relevant libraries: +{pip_packages} +{conda_packages} +""".strip() + +env_info_fmt += """ +ROCM Version: {rocm_version} +Neuron SDK Version: {neuron_sdk_version} +vLLM Version: {vllm_version} +vLLM Build Flags: +{vllm_build_flags} +GPU Topology: +{gpu_topo} +""".strip() + + +def pretty_str(envinfo): + + def replace_nones(dct, replacement='Could not collect'): + for key in dct.keys(): + if dct[key] is not None: + continue + dct[key] = replacement + return dct + + def replace_bools(dct, true='Yes', false='No'): + for key in dct.keys(): + if dct[key] is True: + dct[key] = true + elif dct[key] is False: + dct[key] = false + return dct + + def prepend(text, tag='[prepend]'): + lines = text.split('\n') + updated_lines = [tag + line for line in lines] + return '\n'.join(updated_lines) + + def replace_if_empty(text, replacement='No relevant packages'): + if text is not None and len(text) == 0: + return replacement + return text + + def maybe_start_on_next_line(string): + # If `string` is multiline, prepend a \n to it. + if string is not None and len(string.split('\n')) > 1: + return '\n{}\n'.format(string) + return string + + mutable_dict = envinfo._asdict() + + # If nvidia_gpu_models is multiline, start on the next line + mutable_dict['nvidia_gpu_models'] = \ + maybe_start_on_next_line(envinfo.nvidia_gpu_models) + + # If the machine doesn't have CUDA, report some fields as 'No CUDA' + dynamic_cuda_fields = [ + 'cuda_runtime_version', + 'nvidia_gpu_models', + 'nvidia_driver_version', + ] + all_cuda_fields = dynamic_cuda_fields + ['cudnn_version'] + all_dynamic_cuda_fields_missing = all(mutable_dict[field] is None + for field in dynamic_cuda_fields) + if TORCH_AVAILABLE and not torch.cuda.is_available( + ) and all_dynamic_cuda_fields_missing: + for field in all_cuda_fields: + mutable_dict[field] = 'No CUDA' + if envinfo.cuda_compiled_version is None: + mutable_dict['cuda_compiled_version'] = 'None' + + # Replace True with Yes, False with No + mutable_dict = replace_bools(mutable_dict) + + # Replace all None objects with 'Could not collect' + mutable_dict = replace_nones(mutable_dict) + + # If either of these are '', replace with 'No relevant packages' + mutable_dict['pip_packages'] = replace_if_empty( + mutable_dict['pip_packages']) + mutable_dict['conda_packages'] = replace_if_empty( + mutable_dict['conda_packages']) + + # Tag conda and pip packages with a prefix + # If they were previously None, they'll show up as ie '[conda] Could not collect' + if mutable_dict['pip_packages']: + mutable_dict['pip_packages'] = prepend( + mutable_dict['pip_packages'], '[{}] '.format(envinfo.pip_version)) + if mutable_dict['conda_packages']: + mutable_dict['conda_packages'] = prepend( + mutable_dict['conda_packages'], '[conda] ') + mutable_dict['cpu_info'] = envinfo.cpu_info + return env_info_fmt.format(**mutable_dict) + + +def get_pretty_env_info(): + return pretty_str(get_env_info()) + + +def main(): + print("Collecting environment information...") + output = get_pretty_env_info() + print(output) + + if TORCH_AVAILABLE and hasattr(torch, 'utils') and hasattr( + torch.utils, '_crash_handler'): + minidump_dir = torch.utils._crash_handler.DEFAULT_MINIDUMP_DIR + if sys.platform == "linux" and os.path.exists(minidump_dir): + dumps = [ + os.path.join(minidump_dir, dump) + for dump in os.listdir(minidump_dir) + ] + latest = max(dumps, key=os.path.getctime) + ctime = os.path.getctime(latest) + creation_time = datetime.datetime.fromtimestamp(ctime).strftime( + '%Y-%m-%d %H:%M:%S') + msg = "\n*** Detected a minidump at {} created on {}, ".format(latest, creation_time) + \ + "if this is related to your bug please include it when you file a report ***" + print(msg, file=sys.stderr) + + +if __name__ == '__main__': + main() diff --git a/csrc_musa/activation_kernels.mu b/csrc_musa/activation_kernels.mu new file mode 100644 index 0000000..c21a02d --- /dev/null +++ b/csrc_musa/activation_kernels.mu @@ -0,0 +1,161 @@ +#include "torch_musa/csrc/aten/musa/MUSAContext.h" +#include +#include "torch_musa/csrc/core/MUSAGuard.h" + +#include + +#include "musa_compat.h" +#include "dispatch_utils.h" + +namespace vllm { + +// Activation and gating kernel template. +template +__global__ void act_and_mul_kernel( + scalar_t* __restrict__ out, // [..., d] + const scalar_t* __restrict__ input, // [..., 2, d] + const int d) { + const int64_t token_idx = blockIdx.x; + for (int64_t idx = threadIdx.x; idx < d; idx += blockDim.x) { + const scalar_t x = VLLM_LDG(&input[token_idx * 2 * d + idx]); + const scalar_t y = VLLM_LDG(&input[token_idx * 2 * d + d + idx]); + out[token_idx * d + idx] = ACT_FN(x) * y; + } +} + +template +__device__ __forceinline__ T silu_kernel(const T& x) { + // x * sigmoid(x) + return (T) (((float) x) / (1.0f + expf((float) -x))); +} + +template +__device__ __forceinline__ T gelu_kernel(const T& x) { + // Equivalent to PyTorch GELU with 'none' approximation. + // Refer to: + // https://github.com/pytorch/pytorch/blob/8ac9b20d4b090c213799e81acf48a55ea8d437d6/aten/src/ATen/native/cuda/ActivationGeluKernel.cu#L36-L38 + const float f = (float) x; + constexpr float ALPHA = M_SQRT1_2; + return (T) (f * 0.5f * (1.0f + ::erf(f * ALPHA))); +} + +template +__device__ __forceinline__ T gelu_tanh_kernel(const T& x) { + // Equivalent to PyTorch GELU with 'tanh' approximation. + // Refer to: + // https://github.com/pytorch/pytorch/blob/8ac9b20d4b090c213799e81acf48a55ea8d437d6/aten/src/ATen/native/cuda/ActivationGeluKernel.cu#L25-L30 + const float f = (float) x; + constexpr float BETA = M_SQRT2 * M_2_SQRTPI * 0.5f; + constexpr float KAPPA = 0.044715; + float x_cube = f * f * f; + float inner = BETA * (f + KAPPA * x_cube); + return (T) (0.5f * f * (1.0f + ::tanhf(inner))); +} + +} // namespace vllm + +// Launch activation and gating kernel. +#define LAUNCH_ACTIVATION_GATE_KERNEL(KERNEL) \ + int d = input.size(-1) / 2; \ + int64_t num_tokens = input.numel() / input.size(-1); \ + dim3 grid(num_tokens); \ + dim3 block(std::min(d, 1024)); \ + const at::musa::OptionalMUSAGuard device_guard(device_of(input)); \ + const musaStream_t stream = at::musa::getCurrentMUSAStream(); \ + VLLM_DISPATCH_FLOATING_TYPES( \ + input.scalar_type(), \ + "act_and_mul_kernel", \ + [&] { \ + vllm::act_and_mul_kernel><<>>( \ + out.data_ptr(), \ + input.data_ptr(), \ + d); \ + }); + +void silu_and_mul( + torch::Tensor& out, // [..., d] + torch::Tensor& input) // [..., 2 * d] +{ + LAUNCH_ACTIVATION_GATE_KERNEL(vllm::silu_kernel); +} + +void gelu_and_mul( + torch::Tensor& out, // [..., d] + torch::Tensor& input) // [..., 2 * d] +{ + LAUNCH_ACTIVATION_GATE_KERNEL(vllm::gelu_kernel); +} + +void gelu_tanh_and_mul( + torch::Tensor& out, // [..., d] + torch::Tensor& input) // [..., 2 * d] +{ + LAUNCH_ACTIVATION_GATE_KERNEL(vllm::gelu_tanh_kernel); +} + +namespace vllm { + +// Element-wise activation kernel template. +template +__global__ void activation_kernel( + scalar_t* __restrict__ out, // [..., d] + const scalar_t* __restrict__ input, // [..., d] + const int d) { + const int64_t token_idx = blockIdx.x; + for (int64_t idx = threadIdx.x; idx < d; idx += blockDim.x) { + const scalar_t x = VLLM_LDG(&input[token_idx * d + idx]); + out[token_idx * d + idx] = ACT_FN(x); + } +} + +} // namespace vllm + +// Launch element-wise activation kernel. +#define LAUNCH_ACTIVATION_KERNEL(KERNEL) \ + int d = input.size(-1); \ + int64_t num_tokens = input.numel() / d; \ + dim3 grid(num_tokens); \ + dim3 block(std::min(d, 1024)); \ + const at::musa::OptionalMUSAGuard device_guard(device_of(input)); \ + const musaStream_t stream = at::musa::getCurrentMUSAStream(); \ + VLLM_DISPATCH_FLOATING_TYPES( \ + input.scalar_type(), \ + "activation_kernel", \ + [&] { \ + vllm::activation_kernel><<>>( \ + out.data_ptr(), \ + input.data_ptr(), \ + d); \ + }); + +namespace vllm { + +template +__device__ __forceinline__ T gelu_new_kernel(const T& x) { + const float x3 = (float) (x * x * x); + const T t = (T) tanhf((T) (0.79788456f * (float) (x + (T) (0.044715f * x3)))); + return ((T) 0.5) * x * (((T) 1.0) + t); +} + +template +__device__ __forceinline__ T gelu_fast_kernel(const T& x) { + const float f = (float) x; + const T t = (T) tanhf(((T) (f * 0.79788456f)) * (((T) 1.0) + (T) (0.044715f * f) * x)); + return ((T) 0.5) * x * (((T) 1.0) + t); +} + +} // namespace vllm + +void gelu_new( + torch::Tensor& out, // [..., d] + torch::Tensor& input) // [..., d] +{ + LAUNCH_ACTIVATION_KERNEL(vllm::gelu_new_kernel); +} + +void gelu_fast( + torch::Tensor& out, // [..., d] + torch::Tensor& input) // [..., d] +{ + LAUNCH_ACTIVATION_KERNEL(vllm::gelu_fast_kernel); +} diff --git a/csrc_musa/attention/attention_dtypes.h b/csrc_musa/attention/attention_dtypes.h new file mode 100644 index 0000000..02cb8f1 --- /dev/null +++ b/csrc_musa/attention/attention_dtypes.h @@ -0,0 +1,7 @@ +#pragma once + +#include "attention_generic.muh" +#include "dtype_float16.muh" +#include "dtype_float32.muh" +#include "dtype_bfloat16.muh" +#include "dtype_fp8.muh" diff --git a/csrc_musa/attention/attention_generic.muh b/csrc_musa/attention/attention_generic.muh new file mode 100644 index 0000000..dfec157 --- /dev/null +++ b/csrc_musa/attention/attention_generic.muh @@ -0,0 +1,65 @@ +/* + * Adapted from https://github.com/NVIDIA/FasterTransformer/blob/release/v5.3_tag/src/fastertransformer/kernels/decoder_masked_multihead_attention_utils.h + * Copyright (c) 2024 - 2024 Moore Threads Technology Co., Ltd("Moore Threads"). All rights reserved. + * Copyright (c) 2023, The vLLM team. + * Copyright (c) 2020-2023, NVIDIA CORPORATION. All rights reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#pragma once + +#include + +namespace vllm { + +// A vector type to store Q, K, V elements. +template +struct Vec {}; + +// A vector type to store FP32 accumulators. +template +struct FloatVec {}; + +// Template vector operations. +template +inline __device__ Acc mul(A a, B b); + +template +inline __device__ float sum(T v); + +template +inline __device__ float dot(T a, T b) { + return sum(mul(a, b)); +} + +template +inline __device__ float dot(T a, T b) { + return sum(mul(a, b)); +} + +template +inline __device__ void zero(T& dst) { + constexpr int WORDS = sizeof(T) / 4; + union { + T raw; + uint32_t words[WORDS]; + } tmp; + +#pragma unroll + for (int ii = 0; ii < WORDS; ++ii) { + tmp.words[ii] = 0u; + } + dst = tmp.raw; +} + +} // namespace vllm diff --git a/csrc_musa/attention/attention_kernels.mu b/csrc_musa/attention/attention_kernels.mu new file mode 100644 index 0000000..6bade47 --- /dev/null +++ b/csrc_musa/attention/attention_kernels.mu @@ -0,0 +1,981 @@ +/* + * Adapted from https://github.com/NVIDIA/FasterTransformer/blob/release/v5.3_tag/src/fastertransformer/kernels/decoder_masked_multihead_attention/decoder_masked_multihead_attention_template.hpp + * Copyright (c) 2024 - 2024 Moore Threads Technology Co., Ltd("Moore Threads"). All rights reserved. + * Copyright (c) 2023, The vLLM team. + * Copyright (c) 2020-2023, NVIDIA CORPORATION. All rights reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include "torch_musa/csrc/aten/musa/MUSAContext.h" +#include "torch_musa/csrc/core/MUSAGuard.h" + +#include "attention_dtypes.h" +#include "attention_utils.muh" + +#if defined(ENABLE_FP8_E5M2) +#include "../quantization/fp8_e5m2_kvcache/quant_utils.cuh" +#elif defined(ENABLE_FP8_E4M3) +#include "../quantization/fp8/amd_detail/quant_utils.cuh" +#endif + +#include + +#ifdef USE_ROCM + #include + typedef __hip_bfloat16 __mt_bfloat16; +#endif + +#ifndef USE_ROCM +#define WARP_SIZE 32 +#else +#define WARP_SIZE warpSize +#endif + +#define MAX(a, b) ((a) > (b) ? (a) : (b)) +#define MIN(a, b) ((a) < (b) ? (a) : (b)) +#define DIVIDE_ROUND_UP(a, b) (((a) + (b) - 1) / (b)) + +namespace vllm { + +// Utility function for attention softmax. +template +inline __device__ float block_sum(float* red_smem, float sum) { + // Decompose the thread index into warp / lane. + int warp = threadIdx.x / WARP_SIZE; + int lane = threadIdx.x % WARP_SIZE; + + // Compute the sum per warp. +#pragma unroll + for (int mask = WARP_SIZE / 2; mask >= 1; mask /= 2) { + sum += VLLM_SHFL_XOR_SYNC(sum, mask); + } + + // Warp leaders store the data to shared memory. + if (lane == 0) { + red_smem[warp] = sum; + } + + // Make sure the data is in shared memory. + __syncthreads(); + + // The warps compute the final sums. + if (lane < NUM_WARPS) { + sum = red_smem[lane]; + } + + // Parallel reduction inside the warp. +#pragma unroll + for (int mask = NUM_WARPS / 2; mask >= 1; mask /= 2) { + sum += VLLM_SHFL_XOR_SYNC(sum, mask); + } + + // Broadcast to other threads. + return VLLM_SHFL_SYNC(sum, 0); +} + +// TODO(woosuk): Merge the last two dimensions of the grid. +// Grid: (num_heads, num_seqs, max_num_partitions). +template< + typename scalar_t, + typename cache_t, + int HEAD_SIZE, + int BLOCK_SIZE, + int NUM_THREADS, + bool IS_FP8_KV_CACHE, + int PARTITION_SIZE = 0> // Zero means no partitioning. +__device__ void paged_attention_kernel( + float* __restrict__ exp_sums, // [num_seqs, num_heads, max_num_partitions] + float* __restrict__ max_logits, // [num_seqs, num_heads, max_num_partitions] + scalar_t* __restrict__ out, // [num_seqs, num_heads, max_num_partitions, head_size] + const scalar_t* __restrict__ q, // [num_seqs, num_heads, head_size] + const cache_t* __restrict__ k_cache, // [num_blocks, num_kv_heads, head_size/x, block_size, x] + const cache_t* __restrict__ v_cache, // [num_blocks, num_kv_heads, head_size, block_size] + const int num_kv_heads, // [num_heads] + const float scale, + const int* __restrict__ block_tables, // [num_seqs, max_num_blocks_per_seq] + const int* __restrict__ seq_lens, // [num_seqs] + const int max_num_blocks_per_seq, + const float* __restrict__ alibi_slopes, // [num_heads] + const int q_stride, + const int kv_block_stride, + const int kv_head_stride, + const float kv_scale) { + const int seq_idx = blockIdx.y; + const int partition_idx = blockIdx.z; + const int max_num_partitions = gridDim.z; + constexpr bool USE_PARTITIONING = PARTITION_SIZE > 0; + const int seq_len = seq_lens[seq_idx]; + if (USE_PARTITIONING && partition_idx * PARTITION_SIZE >= seq_len) { + // No work to do. Terminate the thread block. + return; + } + + const int num_seq_blocks = DIVIDE_ROUND_UP(seq_len, BLOCK_SIZE); + const int num_blocks_per_partition = USE_PARTITIONING ? PARTITION_SIZE / BLOCK_SIZE : num_seq_blocks; + + // [start_block_idx, end_block_idx) is the range of blocks to process. + const int start_block_idx = USE_PARTITIONING ? partition_idx * num_blocks_per_partition : 0; + const int end_block_idx = MIN(start_block_idx + num_blocks_per_partition, num_seq_blocks); + const int num_blocks = end_block_idx - start_block_idx; + + // [start_token_idx, end_token_idx) is the range of tokens to process. + const int start_token_idx = start_block_idx * BLOCK_SIZE; + const int end_token_idx = MIN(start_token_idx + num_blocks * BLOCK_SIZE, seq_len); + const int num_tokens = end_token_idx - start_token_idx; + + constexpr int THREAD_GROUP_SIZE = MAX(WARP_SIZE / BLOCK_SIZE, 1); + constexpr int NUM_THREAD_GROUPS = NUM_THREADS / THREAD_GROUP_SIZE; // Note: This assumes THREAD_GROUP_SIZE divides NUM_THREADS + assert(NUM_THREADS % THREAD_GROUP_SIZE == 0); + constexpr int NUM_TOKENS_PER_THREAD_GROUP = DIVIDE_ROUND_UP(BLOCK_SIZE, WARP_SIZE); + constexpr int NUM_WARPS = NUM_THREADS / WARP_SIZE; + const int thread_idx = threadIdx.x; + const int warp_idx = thread_idx / WARP_SIZE; + const int lane = thread_idx % WARP_SIZE; + + const int head_idx = blockIdx.x; + const int num_heads = gridDim.x; + const int num_queries_per_kv = num_heads / num_kv_heads; + const int kv_head_idx = head_idx / num_queries_per_kv; + const float alibi_slope = alibi_slopes == nullptr ? 0.f : alibi_slopes[head_idx]; + + // A vector type to store a part of a key or a query. + // The vector size is configured in such a way that the threads in a thread group + // fetch or compute 16 bytes at a time. + // For example, if the size of a thread group is 4 and the data type is half, + // then the vector size is 16 / (4 * sizeof(half)) == 2. + constexpr int VEC_SIZE = MAX(16 / (THREAD_GROUP_SIZE * sizeof(scalar_t)), 1); + using K_vec = typename Vec::Type; + using Q_vec = typename Vec::Type; +#if defined(ENABLE_FP8_E5M2) || defined(ENABLE_FP8_E4M3) + using Quant_vec = typename Vec::Type; +#endif + + constexpr int NUM_ELEMS_PER_THREAD = HEAD_SIZE / THREAD_GROUP_SIZE; + constexpr int NUM_VECS_PER_THREAD = NUM_ELEMS_PER_THREAD / VEC_SIZE; + + const int thread_group_idx = thread_idx / THREAD_GROUP_SIZE; + const int thread_group_offset = thread_idx % THREAD_GROUP_SIZE; + + // Load the query to registers. + // Each thread in a thread group has a different part of the query. + // For example, if the the thread group size is 4, then the first thread in the group + // has 0, 4, 8, ... th vectors of the query, and the second thread has 1, 5, 9, ... + // th vectors of the query, and so on. + // NOTE(woosuk): Because q is split from a qkv tensor, it may not be contiguous. + const scalar_t* q_ptr = q + seq_idx * q_stride + head_idx * HEAD_SIZE; + __shared__ Q_vec q_vecs[THREAD_GROUP_SIZE][NUM_VECS_PER_THREAD]; +#pragma unroll + for (int i = thread_group_idx; i < NUM_VECS_PER_THREAD; i += NUM_THREAD_GROUPS) { + const int vec_idx = thread_group_offset + i * THREAD_GROUP_SIZE; + q_vecs[thread_group_offset][i] = *reinterpret_cast(q_ptr + vec_idx * VEC_SIZE); + } + __syncthreads(); // TODO(naed90): possible speedup if this is replaced with a memory wall right before we use q_vecs + + // Memory planning. + extern __shared__ char shared_mem[]; + // NOTE(woosuk): We use FP32 for the softmax logits for better accuracy. + float* logits = reinterpret_cast(shared_mem); + // Workspace for reduction. + __shared__ float red_smem[2 * NUM_WARPS]; + + // x == THREAD_GROUP_SIZE * VEC_SIZE + // Each thread group fetches x elements from the key at a time. + constexpr int x = 16 / sizeof(cache_t); + float qk_max = -FLT_MAX; + + // Iterate over the key blocks. + // Each warp fetches a block of keys for each iteration. + // Each thread group in a warp fetches a key from the block, and computes + // dot product with the query. + const int* block_table = block_tables + seq_idx * max_num_blocks_per_seq; + for (int block_idx = start_block_idx + warp_idx; block_idx < end_block_idx; block_idx += NUM_WARPS) { + // NOTE(woosuk): The block number is stored in int32. However, we cast it to int64 + // because int32 can lead to overflow when this variable is multiplied by large numbers + // (e.g., kv_block_stride). + const int64_t physical_block_number = static_cast(block_table[block_idx]); + + // Load a key to registers. + // Each thread in a thread group has a different part of the key. + // For example, if the the thread group size is 4, then the first thread in the group + // has 0, 4, 8, ... th vectors of the key, and the second thread has 1, 5, 9, ... th + // vectors of the key, and so on. + for (int i = 0; i < NUM_TOKENS_PER_THREAD_GROUP; i++) { + const int physical_block_offset = (thread_group_idx + i * WARP_SIZE) % BLOCK_SIZE; + const int token_idx = block_idx * BLOCK_SIZE + physical_block_offset; + K_vec k_vecs[NUM_VECS_PER_THREAD]; + +#pragma unroll + for (int j = 0; j < NUM_VECS_PER_THREAD; j++) { + const cache_t* k_ptr = k_cache + physical_block_number * kv_block_stride + + kv_head_idx * kv_head_stride + + physical_block_offset * x; + const int vec_idx = thread_group_offset + j * THREAD_GROUP_SIZE; + const int offset1 = (vec_idx * VEC_SIZE) / x; + const int offset2 = (vec_idx * VEC_SIZE) % x; + if constexpr (IS_FP8_KV_CACHE) { +#if defined(ENABLE_FP8_E5M2) + Quant_vec k_vec_quant = *reinterpret_cast(k_ptr + offset1 * BLOCK_SIZE * x + offset2); + // Vector conversion from Quant_vec to K_vec. + k_vecs[j] = fp8_e5m2_unscaled::vec_conversion(k_vec_quant); +#elif defined(ENABLE_FP8_E4M3) + Quant_vec k_vec_quant = *reinterpret_cast(k_ptr + offset1 * BLOCK_SIZE * x + offset2); + // Vector conversion from Quant_vec to K_vec. Use scaled_vec_conversion to convert FP8_E4M3 quantized k + // cache vec to k vec in higher precision (FP16, BFloat16, etc.) + k_vecs[j] = fp8_e4m3::scaled_vec_conversion(k_vec_quant, kv_scale); +#else + assert(false); +#endif + } else { + k_vecs[j] = *reinterpret_cast(k_ptr + offset1 * BLOCK_SIZE * x + offset2); + } + } + + // Compute dot product. + // This includes a reduction across the threads in the same thread group. + float qk = scale * Qk_dot::dot(q_vecs[thread_group_offset], k_vecs); + // Add the ALiBi bias if slopes are given. + qk += (alibi_slope != 0) ? alibi_slope * (token_idx - seq_len + 1) : 0; + + if (thread_group_offset == 0) { + // Store the partial reductions to shared memory. + // NOTE(woosuk): It is required to zero out the masked logits. + const bool mask = token_idx >= seq_len; + logits[token_idx - start_token_idx] = mask ? 0.f : qk; + // Update the max value. + qk_max = mask ? qk_max : fmaxf(qk_max, qk); + } + } + } + + // Perform reduction across the threads in the same warp to get the + // max qk value for each "warp" (not across the thread block yet). + // The 0-th thread of each thread group already has its max qk value. +#pragma unroll + for (int mask = WARP_SIZE / 2; mask >= THREAD_GROUP_SIZE; mask /= 2) { + qk_max = fmaxf(qk_max, VLLM_SHFL_XOR_SYNC(qk_max, mask)); + } + if (lane == 0) { + red_smem[warp_idx] = qk_max; + } + __syncthreads(); + + // TODO(woosuk): Refactor this part. + // Get the max qk value for the sequence. + qk_max = lane < NUM_WARPS ? red_smem[lane] : -FLT_MAX; +#pragma unroll + for (int mask = NUM_WARPS / 2; mask >= 1; mask /= 2) { + qk_max = fmaxf(qk_max, VLLM_SHFL_XOR_SYNC(qk_max, mask)); + } + // Broadcast the max qk value to all threads. + qk_max = VLLM_SHFL_SYNC(qk_max, 0); + + // Get the sum of the exp values. + float exp_sum = 0.f; + for (int i = thread_idx; i < num_tokens; i += NUM_THREADS) { + float val = __expf(logits[i] - qk_max); + logits[i] = val; + exp_sum += val; + } + exp_sum = block_sum(&red_smem[NUM_WARPS], exp_sum); + + // Compute softmax. + const float inv_sum = __fdividef(1.f, exp_sum + 1e-6f); + for (int i = thread_idx; i < num_tokens; i += NUM_THREADS) { + logits[i] *= inv_sum; + } + __syncthreads(); + + // If partitioning is enabled, store the max logit and exp_sum. + if (USE_PARTITIONING && thread_idx == 0) { + float* max_logits_ptr = max_logits + seq_idx * num_heads * max_num_partitions + + head_idx * max_num_partitions + + partition_idx; + *max_logits_ptr = qk_max; + float* exp_sums_ptr = exp_sums + seq_idx * num_heads * max_num_partitions + + head_idx * max_num_partitions + + partition_idx; + *exp_sums_ptr = exp_sum; + } + + // Each thread will fetch 16 bytes from the value cache at a time. + constexpr int V_VEC_SIZE = MIN(16 / sizeof(scalar_t), BLOCK_SIZE); + using V_vec = typename Vec::Type; + using L_vec = typename Vec::Type; +#if defined(ENABLE_FP8_E5M2) || defined(ENABLE_FP8_E4M3) + using V_quant_vec = typename Vec::Type; +#endif + using Float_L_vec = typename FloatVec::Type; + + constexpr int NUM_V_VECS_PER_ROW = BLOCK_SIZE / V_VEC_SIZE; + constexpr int NUM_ROWS_PER_ITER = WARP_SIZE / NUM_V_VECS_PER_ROW; + constexpr int NUM_ROWS_PER_THREAD = DIVIDE_ROUND_UP(HEAD_SIZE, NUM_ROWS_PER_ITER); + + // NOTE(woosuk): We use FP32 for the accumulator for better accuracy. + float accs[NUM_ROWS_PER_THREAD]; +#pragma unroll + for (int i = 0; i < NUM_ROWS_PER_THREAD; i++) { + accs[i] = 0.f; + } + + scalar_t zero_value; + zero(zero_value); + for (int block_idx = start_block_idx + warp_idx; block_idx < end_block_idx; block_idx += NUM_WARPS) { + // NOTE(woosuk): The block number is stored in int32. However, we cast it to int64 + // because int32 can lead to overflow when this variable is multiplied by large numbers + // (e.g., kv_block_stride). + const int64_t physical_block_number = static_cast(block_table[block_idx]); + const int physical_block_offset = (lane % NUM_V_VECS_PER_ROW) * V_VEC_SIZE; + const int token_idx = block_idx * BLOCK_SIZE + physical_block_offset; + L_vec logits_vec; + from_float(logits_vec, *reinterpret_cast(logits + token_idx - start_token_idx)); + + const cache_t* v_ptr = v_cache + physical_block_number * kv_block_stride + + kv_head_idx * kv_head_stride; +#pragma unroll + for (int i = 0; i < NUM_ROWS_PER_THREAD; i++) { + const int row_idx = lane / NUM_V_VECS_PER_ROW + i * NUM_ROWS_PER_ITER; + if (row_idx < HEAD_SIZE) { + const int offset = row_idx * BLOCK_SIZE + physical_block_offset; + V_vec v_vec; + if constexpr (IS_FP8_KV_CACHE) { +#if defined(ENABLE_FP8_E5M2) + V_quant_vec v_quant_vec = *reinterpret_cast(v_ptr + offset); + // Vector conversion from V_quant_vec to V_vec. + v_vec = fp8_e5m2_unscaled::vec_conversion(v_quant_vec); +#elif defined(ENABLE_FP8_E4M3) + V_quant_vec v_quant_vec = *reinterpret_cast(v_ptr + offset); + // Vector conversion from V_quant_vec to V_vec. Use scaled_vec_conversion to convert + // FP8_E4M3 quantized v cache vec to v vec in higher precision (FP16, BFloat16, etc.) + v_vec = fp8_e4m3::scaled_vec_conversion(v_quant_vec, kv_scale); +#else + assert(false); +#endif + } else { + v_vec = *reinterpret_cast(v_ptr + offset); + } + if (block_idx == num_seq_blocks - 1) { + // NOTE(woosuk): When v_vec contains the tokens that are out of the context, + // we should explicitly zero out the values since they may contain NaNs. + // See https://github.com/vllm-project/vllm/issues/641#issuecomment-1682544472 + scalar_t* v_vec_ptr = reinterpret_cast(&v_vec); +#pragma unroll + for (int j = 0; j < V_VEC_SIZE; j++) { + v_vec_ptr[j] = token_idx + j < seq_len ? v_vec_ptr[j] : zero_value; + } + } + accs[i] += dot(logits_vec, v_vec); + } + } + } + + // Perform reduction within each warp. +#pragma unroll + for (int i = 0; i < NUM_ROWS_PER_THREAD; i++) { + float acc = accs[i]; +#pragma unroll + for (int mask = NUM_V_VECS_PER_ROW / 2; mask >= 1; mask /= 2) { + acc += VLLM_SHFL_XOR_SYNC(acc, mask); + } + accs[i] = acc; + } + + // NOTE(woosuk): A barrier is required because the shared memory space for logits + // is reused for the output. + __syncthreads(); + + // Perform reduction across warps. + float* out_smem = reinterpret_cast(shared_mem); +#pragma unroll + for (int i = NUM_WARPS; i > 1; i /= 2) { + int mid = i / 2; + // Upper warps write to shared memory. + if (warp_idx >= mid && warp_idx < i) { + float* dst = &out_smem[(warp_idx - mid) * HEAD_SIZE]; +#pragma unroll + for (int i = 0; i < NUM_ROWS_PER_THREAD; i++) { + const int row_idx = lane / NUM_V_VECS_PER_ROW + i * NUM_ROWS_PER_ITER; + if (row_idx < HEAD_SIZE && lane % NUM_V_VECS_PER_ROW == 0) { + dst[row_idx] = accs[i]; + } + } + } + __syncthreads(); + + // Lower warps update the output. + if (warp_idx < mid) { + const float* src = &out_smem[warp_idx * HEAD_SIZE]; +#pragma unroll + for (int i = 0; i < NUM_ROWS_PER_THREAD; i++) { + const int row_idx = lane / NUM_V_VECS_PER_ROW + i * NUM_ROWS_PER_ITER; + if (row_idx < HEAD_SIZE && lane % NUM_V_VECS_PER_ROW == 0) { + accs[i] += src[row_idx]; + } + } + } + __syncthreads(); + } + + // Write the final output. + if (warp_idx == 0) { + scalar_t* out_ptr = out + seq_idx * num_heads * max_num_partitions * HEAD_SIZE + + head_idx * max_num_partitions * HEAD_SIZE + + partition_idx * HEAD_SIZE; +#pragma unroll + for (int i = 0; i < NUM_ROWS_PER_THREAD; i++) { + const int row_idx = lane / NUM_V_VECS_PER_ROW + i * NUM_ROWS_PER_ITER; + if (row_idx < HEAD_SIZE && lane % NUM_V_VECS_PER_ROW == 0) { + from_float(*(out_ptr + row_idx), accs[i]); + } + } + } +} + +// Grid: (num_heads, num_seqs, 1). +template< + typename scalar_t, + typename cache_t, + int HEAD_SIZE, + int BLOCK_SIZE, + int NUM_THREADS, + bool IS_FP8_KV_CACHE> +__global__ void paged_attention_v1_kernel( + scalar_t* __restrict__ out, // [num_seqs, num_heads, head_size] + const scalar_t* __restrict__ q, // [num_seqs, num_heads, head_size] + const cache_t* __restrict__ k_cache, // [num_blocks, num_kv_heads, head_size/x, block_size, x] + const cache_t* __restrict__ v_cache, // [num_blocks, num_kv_heads, head_size, block_size] + const int num_kv_heads, // [num_heads] + const float scale, + const int* __restrict__ block_tables, // [num_seqs, max_num_blocks_per_seq] + const int* __restrict__ seq_lens, // [num_seqs] + const int max_num_blocks_per_seq, + const float* __restrict__ alibi_slopes, // [num_heads] + const int q_stride, + const int kv_block_stride, + const int kv_head_stride, + const float kv_scale) { + paged_attention_kernel( + /* exp_sums */ nullptr, /* max_logits */ nullptr, + out, q, k_cache, v_cache, num_kv_heads, scale, block_tables, seq_lens, + max_num_blocks_per_seq, alibi_slopes, q_stride, kv_block_stride, kv_head_stride, kv_scale); +} + +// Grid: (num_heads, num_seqs, max_num_partitions). +template< + typename scalar_t, + typename cache_t, + int HEAD_SIZE, + int BLOCK_SIZE, + int NUM_THREADS, + bool IS_FP8_KV_CACHE, + int PARTITION_SIZE> +__global__ void paged_attention_v2_kernel( + float* __restrict__ exp_sums, // [num_seqs, num_heads, max_num_partitions] + float* __restrict__ max_logits, // [num_seqs, num_heads, max_num_partitions] + scalar_t* __restrict__ tmp_out, // [num_seqs, num_heads, max_num_partitions, head_size] + const scalar_t* __restrict__ q, // [num_seqs, num_heads, head_size] + const cache_t* __restrict__ k_cache, // [num_blocks, num_kv_heads, head_size/x, block_size, x] + const cache_t* __restrict__ v_cache, // [num_blocks, num_kv_heads, head_size, block_size] + const int num_kv_heads, // [num_heads] + const float scale, + const int* __restrict__ block_tables, // [num_seqs, max_num_blocks_per_seq] + const int* __restrict__ seq_lens, // [num_seqs] + const int max_num_blocks_per_seq, + const float* __restrict__ alibi_slopes, // [num_heads] + const int q_stride, + const int kv_block_stride, + const int kv_head_stride, + const float kv_scale) { + paged_attention_kernel( + exp_sums, max_logits, tmp_out, q, k_cache, v_cache, num_kv_heads, scale, + block_tables, seq_lens, max_num_blocks_per_seq, alibi_slopes, + q_stride, kv_block_stride, kv_head_stride, kv_scale); +} + +// Grid: (num_heads, num_seqs). +template< + typename scalar_t, + int HEAD_SIZE, + int NUM_THREADS, + int PARTITION_SIZE> +__global__ void paged_attention_v2_reduce_kernel( + scalar_t* __restrict__ out, // [num_seqs, num_heads, head_size] + const float* __restrict__ exp_sums, // [num_seqs, num_heads, max_num_partitions] + const float* __restrict__ max_logits, // [num_seqs, num_heads, max_num_partitions] + const scalar_t* __restrict__ tmp_out, // [num_seqs, num_heads, max_num_partitions, head_size] + const int* __restrict__ seq_lens, // [num_seqs] + const int max_num_partitions) { + const int num_heads = gridDim.x; + const int head_idx = blockIdx.x; + const int seq_idx = blockIdx.y; + const int seq_len = seq_lens[seq_idx]; + const int num_partitions = DIVIDE_ROUND_UP(seq_len, PARTITION_SIZE); + if (num_partitions == 1) { + // No need to reduce. Only copy tmp_out to out. + scalar_t* out_ptr = out + seq_idx * num_heads * HEAD_SIZE + head_idx * HEAD_SIZE; + const scalar_t* tmp_out_ptr = tmp_out + seq_idx * num_heads * max_num_partitions * HEAD_SIZE + + head_idx * max_num_partitions * HEAD_SIZE; + for (int i = threadIdx.x; i < HEAD_SIZE; i += blockDim.x) { + out_ptr[i] = tmp_out_ptr[i]; + } + // Terminate the thread block. + return; + } + + constexpr int NUM_WARPS = NUM_THREADS / WARP_SIZE; + const int warp_idx = threadIdx.x / WARP_SIZE; + const int lane = threadIdx.x % WARP_SIZE; + + // Size: 2 * num_partitions. + extern __shared__ char shared_mem[]; + // Workspace for reduction. + __shared__ float red_smem[2 * NUM_WARPS]; + + // Load max logits to shared memory. + float* shared_max_logits = reinterpret_cast(shared_mem); + const float* max_logits_ptr = max_logits + seq_idx * num_heads * max_num_partitions + + head_idx * max_num_partitions; + float max_logit = -FLT_MAX; + for (int i = threadIdx.x; i < num_partitions; i += blockDim.x) { + const float l = max_logits_ptr[i]; + shared_max_logits[i] = l; + max_logit = fmaxf(max_logit, l); + } + __syncthreads(); + + // Get the global max logit. + // Reduce within the warp. +#pragma unroll + for (int mask = WARP_SIZE / 2; mask >= 1; mask /= 2) { + max_logit = fmaxf(max_logit, VLLM_SHFL_XOR_SYNC(max_logit, mask)); + } + if (lane == 0) { + red_smem[warp_idx] = max_logit; + } + __syncthreads(); + // Reduce across warps. + max_logit = lane < NUM_WARPS ? red_smem[lane] : -FLT_MAX; +#pragma unroll + for (int mask = NUM_WARPS / 2; mask >= 1; mask /= 2) { + max_logit = fmaxf(max_logit, VLLM_SHFL_XOR_SYNC(max_logit, mask)); + } + // Broadcast the max value to all threads. + max_logit = VLLM_SHFL_SYNC(max_logit, 0); + + // Load rescaled exp sums to shared memory. + float* shared_exp_sums = reinterpret_cast(shared_mem + sizeof(float) * num_partitions); + const float* exp_sums_ptr = exp_sums + seq_idx * num_heads * max_num_partitions + + head_idx * max_num_partitions; + float global_exp_sum = 0.0f; + for (int i = threadIdx.x; i < num_partitions; i += blockDim.x) { + float l = shared_max_logits[i]; + float rescaled_exp_sum = exp_sums_ptr[i] * expf(l - max_logit); + global_exp_sum += rescaled_exp_sum; + shared_exp_sums[i] = rescaled_exp_sum; + } + __syncthreads(); + global_exp_sum = block_sum(&red_smem[NUM_WARPS], global_exp_sum); + const float inv_global_exp_sum = __fdividef(1.0f, global_exp_sum + 1e-6f); + + // Aggregate tmp_out to out. + const scalar_t* tmp_out_ptr = tmp_out + seq_idx * num_heads * max_num_partitions * HEAD_SIZE + + head_idx * max_num_partitions * HEAD_SIZE; + scalar_t* out_ptr = out + seq_idx * num_heads * HEAD_SIZE + head_idx * HEAD_SIZE; +#pragma unroll + for (int i = threadIdx.x; i < HEAD_SIZE; i += NUM_THREADS) { + float acc = 0.0f; + for (int j = 0; j < num_partitions; ++j) { + acc += to_float(tmp_out_ptr[j * HEAD_SIZE + i]) * shared_exp_sums[j] * inv_global_exp_sum; + } + from_float(out_ptr[i], acc); + } +} + +} // namespace vllm + +#define LAUNCH_PAGED_ATTENTION_V1(HEAD_SIZE) \ + VLLM_DevFuncAttribute_SET_MaxDynamicSharedMemorySize( \ + ((void*)vllm::paged_attention_v1_kernel), shared_mem_size); \ + vllm::paged_attention_v1_kernel<<>>( \ + out_ptr, \ + query_ptr, \ + key_cache_ptr, \ + value_cache_ptr, \ + num_kv_heads, \ + scale, \ + block_tables_ptr, \ + seq_lens_ptr, \ + max_num_blocks_per_seq, \ + alibi_slopes_ptr, \ + q_stride, \ + kv_block_stride, \ + kv_head_stride, \ + kv_scale); + +// TODO(woosuk): Tune NUM_THREADS. +template< + typename T, + typename CACHE_T, + int BLOCK_SIZE, + bool IS_FP8_KV_CACHE, + int NUM_THREADS = 128> +void paged_attention_v1_launcher( + torch::Tensor& out, + torch::Tensor& query, + torch::Tensor& key_cache, + torch::Tensor& value_cache, + int num_kv_heads, + float scale, + torch::Tensor& block_tables, + torch::Tensor& seq_lens, + int max_seq_len, + const c10::optional& alibi_slopes, + float kv_scale) { + int num_seqs = query.size(0); + int num_heads = query.size(1); + int head_size = query.size(2); + int max_num_blocks_per_seq = block_tables.size(1); + int q_stride = query.stride(0); + int kv_block_stride = key_cache.stride(0); + int kv_head_stride = key_cache.stride(1); + + int thread_group_size = MAX(WARP_SIZE / BLOCK_SIZE, 1); + assert(head_size % thread_group_size == 0); + + // NOTE: alibi_slopes is optional. + const float* alibi_slopes_ptr = alibi_slopes ? + reinterpret_cast(alibi_slopes.value().data_ptr()) + : nullptr; + + T* out_ptr = reinterpret_cast(out.data_ptr()); + T* query_ptr = reinterpret_cast(query.data_ptr()); + CACHE_T* key_cache_ptr = reinterpret_cast(key_cache.data_ptr()); + CACHE_T* value_cache_ptr = reinterpret_cast(value_cache.data_ptr()); + int* block_tables_ptr = block_tables.data_ptr(); + int* seq_lens_ptr = seq_lens.data_ptr(); + + constexpr int NUM_WARPS = NUM_THREADS / WARP_SIZE; + int padded_max_seq_len = DIVIDE_ROUND_UP(max_seq_len, BLOCK_SIZE) * BLOCK_SIZE; + int logits_size = padded_max_seq_len * sizeof(float); + int outputs_size = (NUM_WARPS / 2) * head_size * sizeof(float); + // Python-side check in vllm.worker.worker._check_if_can_support_max_seq_len + // Keep that in sync with the logic here! + int shared_mem_size = std::max(logits_size, outputs_size); + + dim3 grid(num_heads, num_seqs, 1); + dim3 block(NUM_THREADS); + const at::musa::OptionalMUSAGuard device_guard(device_of(query)); + const musaStream_t stream = at::musa::getCurrentMUSAStream(); + switch (head_size) { + // NOTE(woosuk): To reduce the compilation time, we only compile for the + // head sizes that we use in the model. However, we can easily extend this + // to support any head size which is a multiple of 16. + case 64: + LAUNCH_PAGED_ATTENTION_V1(64); + break; + case 80: + LAUNCH_PAGED_ATTENTION_V1(80); + break; + case 96: + LAUNCH_PAGED_ATTENTION_V1(96); + break; + case 112: + LAUNCH_PAGED_ATTENTION_V1(112); + break; + case 128: + LAUNCH_PAGED_ATTENTION_V1(128); + break; + case 256: + LAUNCH_PAGED_ATTENTION_V1(256); + break; + default: + TORCH_CHECK(false, "Unsupported head size: ", head_size); + break; + } +} + +#define CALL_V1_LAUNCHER(T, CACHE_T, BLOCK_SIZE, IS_FP8_KV_CACHE) \ + paged_attention_v1_launcher( \ + out, \ + query, \ + key_cache, \ + value_cache, \ + num_kv_heads, \ + scale, \ + block_tables, \ + seq_lens, \ + max_seq_len, \ + alibi_slopes, \ + kv_scale); + +// NOTE(woosuk): To reduce the compilation time, we omitted block sizes +// 1, 2, 4, 64, 128, 256. +#define CALL_V1_LAUNCHER_BLOCK_SIZE(T, CACHE_T, IS_FP8_KV_CACHE) \ + switch (block_size) { \ + case 8: \ + CALL_V1_LAUNCHER(T, CACHE_T, 8, IS_FP8_KV_CACHE); \ + break; \ + case 16: \ + CALL_V1_LAUNCHER(T, CACHE_T, 16, IS_FP8_KV_CACHE); \ + break; \ + case 32: \ + CALL_V1_LAUNCHER(T, CACHE_T, 32, IS_FP8_KV_CACHE); \ + break; \ + default: \ + TORCH_CHECK(false, "Unsupported block size: ", block_size); \ + break; \ + } + +void paged_attention_v1( + torch::Tensor& out, // [num_seqs, num_heads, head_size] + torch::Tensor& query, // [num_seqs, num_heads, head_size] + torch::Tensor& key_cache, // [num_blocks, num_heads, head_size/x, block_size, x] + torch::Tensor& value_cache, // [num_blocks, num_heads, head_size, block_size] + int num_kv_heads, // [num_heads] + float scale, + torch::Tensor& block_tables, // [num_seqs, max_num_blocks_per_seq] + torch::Tensor& seq_lens, // [num_seqs] + int block_size, + int max_seq_len, + const c10::optional& alibi_slopes, + const std::string& kv_cache_dtype, + float kv_scale) { + if (kv_cache_dtype == "auto") { + if (query.dtype() == at::ScalarType::Float) { + CALL_V1_LAUNCHER_BLOCK_SIZE(float, float, false); + } else if (query.dtype() == at::ScalarType::Half) { + CALL_V1_LAUNCHER_BLOCK_SIZE(uint16_t, uint16_t, false); + } else if (query.dtype() == at::ScalarType::BFloat16) { + CALL_V1_LAUNCHER_BLOCK_SIZE(__mt_bfloat16, __mt_bfloat16, false); + } else { + TORCH_CHECK(false, "Unsupported data type: ", query.dtype()); + } + } else if (kv_cache_dtype == "fp8") { + if (query.dtype() == at::ScalarType::Float) { + CALL_V1_LAUNCHER_BLOCK_SIZE(float, uint8_t, true); + } else if (query.dtype() == at::ScalarType::Half) { + CALL_V1_LAUNCHER_BLOCK_SIZE(uint16_t, uint8_t, true); + } else if (query.dtype() == at::ScalarType::BFloat16) { + CALL_V1_LAUNCHER_BLOCK_SIZE(__mt_bfloat16, uint8_t, true); + } else { + TORCH_CHECK(false, "Unsupported data type: ", query.dtype()); + } + } else { + TORCH_CHECK(false, "Unsupported data type of kv cache: ", kv_cache_dtype); + } +} + +#define LAUNCH_PAGED_ATTENTION_V2(HEAD_SIZE) \ + vllm::paged_attention_v2_kernel \ + <<>>( \ + exp_sums_ptr, \ + max_logits_ptr, \ + tmp_out_ptr, \ + query_ptr, \ + key_cache_ptr, \ + value_cache_ptr, \ + num_kv_heads, \ + scale, \ + block_tables_ptr, \ + seq_lens_ptr, \ + max_num_blocks_per_seq, \ + alibi_slopes_ptr, \ + q_stride, \ + kv_block_stride, \ + kv_head_stride, \ + kv_scale); \ + vllm::paged_attention_v2_reduce_kernel \ + <<>>( \ + out_ptr, \ + exp_sums_ptr, \ + max_logits_ptr, \ + tmp_out_ptr, \ + seq_lens_ptr, \ + max_num_partitions); + +template< + typename T, + typename CACHE_T, + int BLOCK_SIZE, + bool IS_FP8_KV_CACHE, + int NUM_THREADS = 128, + int PARTITION_SIZE = 512> +void paged_attention_v2_launcher( + torch::Tensor& out, + torch::Tensor& exp_sums, + torch::Tensor& max_logits, + torch::Tensor& tmp_out, + torch::Tensor& query, + torch::Tensor& key_cache, + torch::Tensor& value_cache, + int num_kv_heads, + float scale, + torch::Tensor& block_tables, + torch::Tensor& seq_lens, + int max_seq_len, + const c10::optional& alibi_slopes, + float kv_scale) { + int num_seqs = query.size(0); + int num_heads = query.size(1); + int head_size = query.size(2); + int max_num_blocks_per_seq = block_tables.size(1); + int q_stride = query.stride(0); + int kv_block_stride = key_cache.stride(0); + int kv_head_stride = key_cache.stride(1); + + int thread_group_size = MAX(WARP_SIZE / BLOCK_SIZE, 1); + assert(head_size % thread_group_size == 0); + + // NOTE: alibi_slopes is optional. + const float* alibi_slopes_ptr = alibi_slopes ? + reinterpret_cast(alibi_slopes.value().data_ptr()) + : nullptr; + + T* out_ptr = reinterpret_cast(out.data_ptr()); + float* exp_sums_ptr = reinterpret_cast(exp_sums.data_ptr()); + float* max_logits_ptr = reinterpret_cast(max_logits.data_ptr()); + T* tmp_out_ptr = reinterpret_cast(tmp_out.data_ptr()); + T* query_ptr = reinterpret_cast(query.data_ptr()); + CACHE_T* key_cache_ptr = reinterpret_cast(key_cache.data_ptr()); + CACHE_T* value_cache_ptr = reinterpret_cast(value_cache.data_ptr()); + int* block_tables_ptr = block_tables.data_ptr(); + int* seq_lens_ptr = seq_lens.data_ptr(); + + constexpr int NUM_WARPS = NUM_THREADS / WARP_SIZE; + int max_num_partitions = DIVIDE_ROUND_UP(max_seq_len, PARTITION_SIZE); + int logits_size = PARTITION_SIZE * sizeof(float); + int outputs_size = (NUM_WARPS / 2) * head_size * sizeof(float); + + // For paged attention v2 kernel. + dim3 grid(num_heads, num_seqs, max_num_partitions); + int shared_mem_size = std::max(logits_size, outputs_size); + // For paged attention v2 reduce kernel. + dim3 reduce_grid(num_heads, num_seqs); + int reduce_shared_mem_size = 2 * max_num_partitions * sizeof(float); + + dim3 block(NUM_THREADS); + const at::musa::OptionalMUSAGuard device_guard(device_of(query)); + const musaStream_t stream = at::musa::getCurrentMUSAStream(); + switch (head_size) { + // NOTE(woosuk): To reduce the compilation time, we only compile for the + // head sizes that we use in the model. However, we can easily extend this + // to support any head size which is a multiple of 16. + case 64: + LAUNCH_PAGED_ATTENTION_V2(64); + break; + case 80: + LAUNCH_PAGED_ATTENTION_V2(80); + break; + case 96: + LAUNCH_PAGED_ATTENTION_V2(96); + break; + case 112: + LAUNCH_PAGED_ATTENTION_V2(112); + break; + case 128: + LAUNCH_PAGED_ATTENTION_V2(128); + break; + case 256: + LAUNCH_PAGED_ATTENTION_V2(256); + break; + default: + TORCH_CHECK(false, "Unsupported head size: ", head_size); + break; + } +} + +#define CALL_V2_LAUNCHER(T, CACHE_T, BLOCK_SIZE, IS_FP8_KV_CACHE) \ + paged_attention_v2_launcher( \ + out, \ + exp_sums, \ + max_logits, \ + tmp_out, \ + query, \ + key_cache, \ + value_cache, \ + num_kv_heads, \ + scale, \ + block_tables, \ + seq_lens, \ + max_seq_len, \ + alibi_slopes, \ + kv_scale); + +// NOTE(woosuk): To reduce the compilation time, we omitted block sizes +// 1, 2, 4, 64, 128, 256. +#define CALL_V2_LAUNCHER_BLOCK_SIZE(T, CACHE_T, IS_FP8_KV_CACHE) \ + switch (block_size) { \ + case 8: \ + CALL_V2_LAUNCHER(T, CACHE_T, 8, IS_FP8_KV_CACHE); \ + break; \ + case 16: \ + CALL_V2_LAUNCHER(T, CACHE_T, 16, IS_FP8_KV_CACHE); \ + break; \ + case 32: \ + CALL_V2_LAUNCHER(T, CACHE_T, 32, IS_FP8_KV_CACHE); \ + break; \ + default: \ + TORCH_CHECK(false, "Unsupported block size: ", block_size); \ + break; \ + } + +void paged_attention_v2( + torch::Tensor& out, // [num_seqs, num_heads, head_size] + torch::Tensor& exp_sums, // [num_seqs, num_heads, max_num_partitions] + torch::Tensor& max_logits, // [num_seqs, num_heads, max_num_partitions] + torch::Tensor& tmp_out, // [num_seqs, num_heads, max_num_partitions, head_size] + torch::Tensor& query, // [num_seqs, num_heads, head_size] + torch::Tensor& key_cache, // [num_blocks, num_heads, head_size/x, block_size, x] + torch::Tensor& value_cache, // [num_blocks, num_heads, head_size, block_size] + int num_kv_heads, // [num_heads] + float scale, + torch::Tensor& block_tables, // [num_seqs, max_num_blocks_per_seq] + torch::Tensor& seq_lens, // [num_seqs] + int block_size, + int max_seq_len, + const c10::optional& alibi_slopes, + const std::string& kv_cache_dtype, + float kv_scale) { + if (kv_cache_dtype == "auto") { + if (query.dtype() == at::ScalarType::Float) { + CALL_V2_LAUNCHER_BLOCK_SIZE(float, float, false); + } else if (query.dtype() == at::ScalarType::Half) { + CALL_V2_LAUNCHER_BLOCK_SIZE(uint16_t, uint16_t, false); + } else if (query.dtype() == at::ScalarType::BFloat16) { + CALL_V2_LAUNCHER_BLOCK_SIZE(__mt_bfloat16, __mt_bfloat16, false); + } else { + TORCH_CHECK(false, "Unsupported data type: ", query.dtype()); + } + } else if (kv_cache_dtype == "fp8") { + if (query.dtype() == at::ScalarType::Float) { + CALL_V2_LAUNCHER_BLOCK_SIZE(float, uint8_t, true); + } else if (query.dtype() == at::ScalarType::Half) { + CALL_V2_LAUNCHER_BLOCK_SIZE(uint16_t, uint8_t, true); + } else if (query.dtype() == at::ScalarType::BFloat16) { + CALL_V2_LAUNCHER_BLOCK_SIZE(__mt_bfloat16, uint8_t, true); + } else { + TORCH_CHECK(false, "Unsupported data type: ", query.dtype()); + } + } else { + TORCH_CHECK(false, "Unsupported data type of kv cache: ", kv_cache_dtype); + } +} + +#undef WARP_SIZE +#undef MAX +#undef MIN +#undef DIVIDE_ROUND_UP diff --git a/csrc_musa/attention/attention_utils.muh b/csrc_musa/attention/attention_utils.muh new file mode 100644 index 0000000..8993a64 --- /dev/null +++ b/csrc_musa/attention/attention_utils.muh @@ -0,0 +1,57 @@ +/* + * Adapted from https://github.com/NVIDIA/FasterTransformer/blob/release/v5.3_tag/src/fastertransformer/kernels/decoder_masked_multihead_attention/decoder_masked_multihead_attention_template.hpp + * Copyright (c) 2024 - 2024 Moore Threads Technology Co., Ltd("Moore Threads"). All rights reserved. + * Copyright (c) 2023, The vLLM team. + * Copyright (c) 2020-2023, NVIDIA CORPORATION. All rights reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#pragma once + +#include "../musa_compat.h" +#include "attention_dtypes.h" + +#include +#include + +namespace vllm { + +// Q*K^T operation. +template +inline __device__ float qk_dot_(const Vec (&q)[N], const Vec (&k)[N]) { + using A_vec = typename FloatVec::Type; + // Compute the parallel products for Q*K^T (treat vector lanes separately). + A_vec qk_vec = mul(q[0], k[0]); +#pragma unroll + for (int ii = 1; ii < N; ++ii) { + qk_vec = fma(q[ii], k[ii], qk_vec); + } + + // Finalize the reduction across lanes. + float qk = sum(qk_vec); +#pragma unroll + for (int mask = THREAD_GROUP_SIZE / 2; mask >= 1; mask /= 2) { + qk += VLLM_SHFL_XOR_SYNC(qk, mask); + } + return qk; +} + +template +struct Qk_dot { + template + static inline __device__ float dot(const Vec (&q)[N], const Vec (&k)[N]) { + return qk_dot_(q, k); + } +}; + +} // namespace vllm diff --git a/csrc_musa/attention/dtype_bfloat16.muh b/csrc_musa/attention/dtype_bfloat16.muh new file mode 100644 index 0000000..5526476 --- /dev/null +++ b/csrc_musa/attention/dtype_bfloat16.muh @@ -0,0 +1,452 @@ +/* + * Adapted from https://github.com/NVIDIA/FasterTransformer/blob/release/v5.3_tag/src/fastertransformer/kernels/decoder_masked_multihead_attention/decoder_masked_multihead_attention_template.hpp + * and https://github.com/NVIDIA/FasterTransformer/blob/release/v5.3_tag/src/fastertransformer/kernels/decoder_masked_multihead_attention_utils.h + * Copyright (c) 2024 - 2024 Moore Threads Technology Co., Ltd("Moore Threads"). All rights reserved. + * Copyright (c) 2023, The vLLM team. + * Copyright (c) 2020-2023, NVIDIA CORPORATION. All rights reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#pragma once + +#include "attention_generic.muh" +#include "dtype_float32.muh" + +#ifndef USE_ROCM + #include + #include +#else + #include + #include + + typedef __hip_bfloat162 __mt_bfloat162; + typedef __hip_bfloat16 __mt_bfloat16; +#endif + +#include + +namespace vllm { + +// Define custom BF16 vector data types. +struct bf16_4_t { + __mt_bfloat162 x; + __mt_bfloat162 y; +}; + +struct bf16_8_t { + __mt_bfloat162 x; + __mt_bfloat162 y; + __mt_bfloat162 z; + __mt_bfloat162 w; +}; + +// BF16 vector types for Q, K, V. +template<> +struct Vec<__mt_bfloat16, 1> { + using Type = __mt_bfloat16; +}; +template<> +struct Vec<__mt_bfloat16, 2> { + using Type = __mt_bfloat162; +}; +template<> +struct Vec<__mt_bfloat16, 4> { + using Type = bf16_4_t; +}; +template<> +struct Vec<__mt_bfloat16, 8> { + using Type = bf16_8_t; +}; + +// FP32 accumulator vector types corresponding to Vec. +template<> +struct FloatVec<__mt_bfloat16> { + using Type = float; +}; +template<> +struct FloatVec<__mt_bfloat162> { + using Type = float2; +}; +template<> +struct FloatVec { + using Type = Float4_; +}; +template<> +struct FloatVec { + using Type = Float8_; +}; + +// Utility functions for type conversions. +inline __device__ float2 bf1622float2(const __mt_bfloat162 val) { +#if defined(__MUSA_ARCH__) && __MUSA_ARCH__ < 800 + assert(false); +#else + return __bfloat1622float2(val); +#endif +} + +inline __device__ __mt_bfloat162 bf162bf162(const __mt_bfloat16 val) { +#if defined(__MUSA_ARCH__) && __MUSA_ARCH__ < 800 + assert(false); +#else + return __bfloat162bfloat162(val); +#endif +} + +// Vector addition. +inline __device__ __mt_bfloat16 add(__mt_bfloat16 a, __mt_bfloat16 b) { +#if defined(__MUSA_ARCH__) && __MUSA_ARCH__ < 800 + assert(false); +#else + #ifndef USE_ROCM + return a + b; + #else + return __hadd(a, b); + #endif +#endif +} + +inline __device__ __mt_bfloat162 add(__mt_bfloat162 a, __mt_bfloat162 b) { +#if defined(__MUSA_ARCH__) && __MUSA_ARCH__ < 800 + assert(false); +#else + return __hadd2(a, b); +#endif +} + +inline __device__ bf16_4_t add(bf16_4_t a, bf16_4_t b) { + bf16_4_t c; + c.x = add(a.x, b.x); + c.y = add(a.y, b.y); + return c; +} + +inline __device__ bf16_8_t add(bf16_8_t a, bf16_8_t b) { + bf16_8_t c; + c.x = add(a.x, b.x); + c.y = add(a.y, b.y); + c.z = add(a.z, b.z); + c.w = add(a.w, b.w); + return c; +} + +inline __device__ float2 add(__mt_bfloat162 a, float2 fb) { + float2 fa = bf1622float2(a); + return add(fa, fb); +} + +inline __device__ Float4_ add(bf16_4_t a, Float4_ fb) { + Float4_ fc; + fc.x = add(a.x, fb.x); + fc.y = add(a.y, fb.y); + return fc; +} + +inline __device__ Float8_ add(bf16_8_t a, Float8_ fb) { + Float8_ fc; + fc.x = add(a.x, fb.x); + fc.y = add(a.y, fb.y); + fc.z = add(a.z, fb.z); + fc.w = add(a.w, fb.w); + return fc; +} + +// Vector multiplication. +template<> +inline __device__ __mt_bfloat16 mul(__mt_bfloat16 a, __mt_bfloat16 b) { +#if defined(__MUSA_ARCH__) && __MUSA_ARCH__ < 800 + assert(false); +#else + return __hmul(a, b); +#endif +} + +template<> +inline __device__ __mt_bfloat162 mul(__mt_bfloat162 a, __mt_bfloat162 b) { +#if defined(__MUSA_ARCH__) && __MUSA_ARCH__ < 800 + assert(false); +#else + return __hmul2(a, b); +#endif +} + +template<> +inline __device__ __mt_bfloat162 mul(__mt_bfloat16 a, __mt_bfloat162 b) { + return mul<__mt_bfloat162, __mt_bfloat162, __mt_bfloat162>(bf162bf162(a), b); +} + +template<> +inline __device__ bf16_4_t mul(bf16_4_t a, bf16_4_t b) { + bf16_4_t c; + c.x = mul<__mt_bfloat162, __mt_bfloat162, __mt_bfloat162>(a.x, b.x); + c.y = mul<__mt_bfloat162, __mt_bfloat162, __mt_bfloat162>(a.y, b.y); + return c; +} + +template<> +inline __device__ bf16_4_t mul(__mt_bfloat16 a, bf16_4_t b) { + __mt_bfloat162 s = bf162bf162(a); + bf16_4_t c; + c.x = mul<__mt_bfloat162, __mt_bfloat162, __mt_bfloat162>(s, b.x); + c.y = mul<__mt_bfloat162, __mt_bfloat162, __mt_bfloat162>(s, b.y); + return c; +} + +template<> +inline __device__ bf16_8_t mul(bf16_8_t a, bf16_8_t b) { + bf16_8_t c; + c.x = mul<__mt_bfloat162, __mt_bfloat162, __mt_bfloat162>(a.x, b.x); + c.y = mul<__mt_bfloat162, __mt_bfloat162, __mt_bfloat162>(a.y, b.y); + c.z = mul<__mt_bfloat162, __mt_bfloat162, __mt_bfloat162>(a.z, b.z); + c.w = mul<__mt_bfloat162, __mt_bfloat162, __mt_bfloat162>(a.w, b.w); + return c; +} + +template<> +inline __device__ bf16_8_t mul(__mt_bfloat16 a, bf16_8_t b) { + __mt_bfloat162 s = bf162bf162(a); + bf16_8_t c; + c.x = mul<__mt_bfloat162, __mt_bfloat162, __mt_bfloat162>(s, b.x); + c.y = mul<__mt_bfloat162, __mt_bfloat162, __mt_bfloat162>(s, b.y); + c.z = mul<__mt_bfloat162, __mt_bfloat162, __mt_bfloat162>(s, b.z); + c.w = mul<__mt_bfloat162, __mt_bfloat162, __mt_bfloat162>(s, b.w); + return c; +} + +template<> +inline __device__ float mul(__mt_bfloat16 a, __mt_bfloat16 b) { + float fa = __bfloat162float(a); + float fb = __bfloat162float(b); + return fa * fb; +} + +template<> +inline __device__ float2 mul(__mt_bfloat162 a, __mt_bfloat162 b) { + float2 fa = bf1622float2(a); + float2 fb = bf1622float2(b); + return mul(fa, fb); +} + +template<> +inline __device__ float2 mul(__mt_bfloat16 a, __mt_bfloat162 b) { + return mul(bf162bf162(a), b); +} + +template<> +inline __device__ Float4_ mul(bf16_4_t a, bf16_4_t b) { + Float4_ fc; + fc.x = mul(a.x, b.x); + fc.y = mul(a.y, b.y); + return fc; +} + +template<> +inline __device__ Float4_ mul(__mt_bfloat16 a, bf16_4_t b) { + __mt_bfloat162 s = bf162bf162(a); + Float4_ fc; + fc.x = mul(s, b.x); + fc.y = mul(s, b.y); + return fc; +} + +template<> +inline __device__ Float8_ mul(bf16_8_t a, bf16_8_t b) { + Float8_ fc; + fc.x = mul(a.x, b.x); + fc.y = mul(a.y, b.y); + fc.z = mul(a.z, b.z); + fc.w = mul(a.w, b.w); + return fc; +} + +template<> +inline __device__ Float8_ mul(__mt_bfloat16 a, bf16_8_t b) { + __mt_bfloat162 s = bf162bf162(a); + Float8_ fc; + fc.x = mul(s, b.x); + fc.y = mul(s, b.y); + fc.z = mul(s, b.z); + fc.w = mul(s, b.w); + return fc; +} + +// Vector fused multiply-add. +inline __device__ __mt_bfloat162 fma(__mt_bfloat162 a, __mt_bfloat162 b, __mt_bfloat162 c) { +#if defined(__MUSA_ARCH__) && __MUSA_ARCH__ < 800 + assert(false); +#else + return __hfma2(a, b, c); +#endif +} + +inline __device__ __mt_bfloat162 fma(__mt_bfloat16 a, __mt_bfloat162 b, __mt_bfloat162 c) { +#if defined(__MUSA_ARCH__) && __MUSA_ARCH__ < 800 + assert(false); +#else + return __hfma2(bf162bf162(a), b, c); +#endif +} + +inline __device__ bf16_4_t fma(bf16_4_t a, bf16_4_t b, bf16_4_t c) { + bf16_4_t d; + d.x = fma(a.x, b.x, c.x); + d.y = fma(a.y, b.y, c.y); + return d; +} + +inline __device__ bf16_4_t fma(__mt_bfloat16 a, bf16_4_t b, bf16_4_t c) { + __mt_bfloat162 s = bf162bf162(a); + bf16_4_t d; + d.x = fma(s, b.x, c.x); + d.y = fma(s, b.y, c.y); + return d; +} + +inline __device__ bf16_8_t fma(bf16_8_t a, bf16_8_t b, bf16_8_t c) { + bf16_8_t d; + d.x = fma(a.x, b.x, c.x); + d.y = fma(a.y, b.y, c.y); + d.z = fma(a.z, b.z, c.z); + d.w = fma(a.w, b.w, c.w); + return d; +} + +inline __device__ bf16_8_t fma(__mt_bfloat16 a, bf16_8_t b, bf16_8_t c) { + __mt_bfloat162 s = bf162bf162(a); + bf16_8_t d; + d.x = fma(s, b.x, c.x); + d.y = fma(s, b.y, c.y); + d.z = fma(s, b.z, c.z); + d.w = fma(s, b.w, c.w); + return d; +} + +inline __device__ float fma(__mt_bfloat16 a, __mt_bfloat16 b, float fc) { + return __bfloat162float(a) * __bfloat162float(b) + fc; +} + +inline __device__ float2 fma(__mt_bfloat162 a, __mt_bfloat162 b, float2 fc) { + float2 fa = bf1622float2(a); + float2 fb = bf1622float2(b); + return fma(fa, fb, fc); +} + +inline __device__ float2 fma(__mt_bfloat16 a, __mt_bfloat162 b, float2 fc) { + return fma(bf162bf162(a), b, fc); +} + +inline __device__ Float4_ fma(bf16_4_t a, bf16_4_t b, Float4_ fc) { + Float4_ fd; + fd.x = fma(a.x, b.x, fc.x); + fd.y = fma(a.y, b.y, fc.y); + return fd; +} + +inline __device__ Float4_ fma(__mt_bfloat16 a, bf16_4_t b, Float4_ fc) { + __mt_bfloat162 s = bf162bf162(a); + Float4_ fd; + fd.x = fma(s, b.x, fc.x); + fd.y = fma(s, b.y, fc.y); + return fd; +} + +inline __device__ Float8_ fma(bf16_8_t a, bf16_8_t b, Float8_ fc) { + Float8_ fd; + fd.x = fma(a.x, b.x, fc.x); + fd.y = fma(a.y, b.y, fc.y); + fd.z = fma(a.z, b.z, fc.z); + fd.w = fma(a.w, b.w, fc.w); + return fd; +} + +inline __device__ Float8_ fma(__mt_bfloat16 a, bf16_8_t b, Float8_ fc) { + __mt_bfloat162 s = bf162bf162(a); + Float8_ fd; + fd.x = fma(s, b.x, fc.x); + fd.y = fma(s, b.y, fc.y); + fd.z = fma(s, b.z, fc.z); + fd.w = fma(s, b.w, fc.w); + return fd; +} + +// Vector sum. +template<> +inline __device__ float sum(__mt_bfloat16 v) { + return __bfloat162float(v); +} + +template<> +inline __device__ float sum(__mt_bfloat162 v) { + float2 vf = bf1622float2(v); + return vf.x + vf.y; +} + +template<> +inline __device__ float sum(bf16_4_t v) { + return sum(v.x) + sum(v.y); +} + +template<> +inline __device__ float sum(bf16_8_t v) { + return sum(v.x) + sum(v.y) + sum(v.z) + sum(v.w); +} + +// From float32 to bfloat16. +inline __device__ void from_float(__mt_bfloat16& dst, float src) { + dst = __float2bfloat16(src); +} + +inline __device__ void from_float(__mt_bfloat162& dst, float2 src) { +#if defined(__MUSA_ARCH__) && __MUSA_ARCH__ < 800 + assert(false); +#else + dst = __float22bfloat162_rn(src); +#endif +} + +inline __device__ void from_float(bf16_4_t& dst, Float4_ src) { +#if defined(__MUSA_ARCH__) && __MUSA_ARCH__ < 800 + assert(false); +#else + dst.x = __float22bfloat162_rn(src.x); + dst.y = __float22bfloat162_rn(src.y); +#endif +} + +inline __device__ void from_float(bf16_8_t& dst, Float8_ src) { +#if defined(__MUSA_ARCH__) && __MUSA_ARCH__ < 800 + assert(false); +#else + dst.x = __float22bfloat162_rn(src.x); + dst.y = __float22bfloat162_rn(src.y); + dst.z = __float22bfloat162_rn(src.z); + dst.w = __float22bfloat162_rn(src.w); +#endif +} + +// From bfloat16 to float32. +inline __device__ float to_float(__mt_bfloat16 u) { + return __bfloat162float(u); +} + +// Zero-out a variable. +inline __device__ void zero(__mt_bfloat16& dst) { +#if defined(__MUSA_ARCH__) && __MUSA_ARCH__ < 800 + assert(false); +#else + // Same as CUDART_ZERO_BF16 introduced in CUDA 12.2. + dst = __ushort_as_bfloat16((unsigned short)0x0000U); +#endif +} + +} // namespace vllm diff --git a/csrc_musa/attention/dtype_float16.muh b/csrc_musa/attention/dtype_float16.muh new file mode 100644 index 0000000..ba4de2b --- /dev/null +++ b/csrc_musa/attention/dtype_float16.muh @@ -0,0 +1,503 @@ +/* + * Adapted from https://github.com/NVIDIA/FasterTransformer/blob/release/v5.3_tag/src/fastertransformer/kernels/decoder_masked_multihead_attention/decoder_masked_multihead_attention_template.hpp + * and https://github.com/NVIDIA/FasterTransformer/blob/release/v5.3_tag/src/fastertransformer/kernels/decoder_masked_multihead_attention_utils.h + * Copyright (c) 2024 - 2024 Moore Threads Technology Co., Ltd("Moore Threads"). All rights reserved. + * Copyright (c) 2023, The vLLM team. + * Copyright (c) 2020-2023, NVIDIA CORPORATION. All rights reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#pragma once + +#include "attention_generic.muh" +#include "dtype_float32.muh" + +#ifdef USE_ROCM + #include +#endif + +#include + +namespace vllm { + +// FP16 vector types for Q, K, V. +template<> +struct Vec { + using Type = uint16_t; +}; +template<> +struct Vec { + using Type = uint32_t; +}; +template<> +struct Vec { + using Type = uint2; +}; +template<> +struct Vec { + using Type = uint4; +}; + +// FP32 accumulator vector types corresponding to Vec. +template<> +struct FloatVec { + using Type = float; +}; +template<> +struct FloatVec { + using Type = float2; +}; +template<> +struct FloatVec { + using Type = Float4_; +}; +template<> +struct FloatVec { + using Type = Float8_; +}; + +// Utility functions for type conversions. +inline __device__ uint32_t h0_h0(uint16_t a) { +#ifndef USE_ROCM + uint32_t b; + asm volatile("mov.b32 %0, {%1, %1};" : "=r"(b) : "h"(a)); + return b; +#else + union { + uint32_t u32; + uint16_t u16[2]; + } tmp; + tmp.u16[0] = a; + tmp.u16[1] = a; + return tmp.u32; +#endif +} + +inline __device__ float half_to_float(uint16_t h) { + float f; +#ifndef USE_ROCM + asm volatile("cvt.f32.f16 %0, %1;\n" : "=f"(f) : "h"(h)); +#else + asm volatile("v_cvt_f32_f16 %0, %1;" : "=v"(f) : "v"(h)); +#endif + return f; +} + +inline __device__ float2 half2_to_float2(uint32_t v) { +#ifndef USE_ROCM + uint16_t lo, hi; + asm volatile("mov.b32 {%0, %1}, %2;\n" : "=h"(lo), "=h"(hi) : "r"(v)); + return make_float2(half_to_float(lo), half_to_float(hi)); +#else + union { + uint32_t u32; + uint16_t u16[2]; + } tmp; + tmp.u32 = v; + float2 ret; + ret.x = half_to_float(tmp.u16[0]); + ret.y = half_to_float(tmp.u16[1]); + return ret; +#endif +} + +inline __device__ uint16_t float_to_half(float f) { + union { + uint32_t u32; + uint16_t u16[2]; + } tmp; +#ifndef USE_ROCM + asm volatile("cvt.rn.f16.f32 %0, %1;\n" : "=h"(tmp.u16[0]) : "f"(f)); +#else + asm volatile("v_cvt_f16_f32 %0, %1;\n" : "=v"(tmp.u32) : "v"(f)); +#endif + return tmp.u16[0]; +} + +inline __device__ uint32_t float2_to_half2(float2 f) { + union { + uint32_t u32; + uint16_t u16[2]; + } tmp; +#ifndef USE_ROCM + #if defined(__MUSA_ARCH__) && __MUSA_ARCH__ >= 800 + asm volatile("cvt.rn.f16x2.f32 %0, %1, %2;\n" : "=r"(tmp.u32) : "f"(f.y), "f"(f.x)); + #else + asm volatile("cvt.rn.f16.f32 %0, %1;\n" : "=h"(tmp.u16[0]) : "f"(f.x)); + asm volatile("cvt.rn.f16.f32 %0, %1;\n" : "=h"(tmp.u16[1]) : "f"(f.y)); + #endif +#else + tmp.u16[0] = float_to_half(f.x); + tmp.u16[1] = float_to_half(f.y); +#endif + return tmp.u32; +} + +// Vector addition. +inline __device__ uint16_t add(uint16_t a, uint16_t b) { + uint16_t c; +#ifndef USE_ROCM + asm volatile("add.f16 %0, %1, %2;\n" : "=h"(c) : "h"(a), "h"(b)); +#else + asm volatile("v_add_f16 %0, %1, %2;\n" : "=v"(c) : "v"(a), "v"(b)); +#endif + return c; +} + +inline __device__ uint32_t add(uint32_t a, uint32_t b) { + uint32_t c; +#ifndef USE_ROCM + asm volatile("add.f16x2 %0, %1, %2;\n" : "=r"(c) : "r"(a), "r"(b)); +#else + asm volatile("v_pk_add_f16 %0, %1, %2;\n" : "=v"(c) : "v"(a), "v"(b)); +#endif + return c; +} + +inline __device__ uint2 add(uint2 a, uint2 b) { + uint2 c; + c.x = add(a.x, b.x); + c.y = add(a.y, b.y); + return c; +} + +inline __device__ uint4 add(uint4 a, uint4 b) { + uint4 c; + c.x = add(a.x, b.x); + c.y = add(a.y, b.y); + c.z = add(a.z, b.z); + c.w = add(a.w, b.w); + return c; +} + +inline __device__ float2 add(uint32_t a, float2 fb) { + float2 fa = half2_to_float2(a); + return add(fa, fb); +} + +inline __device__ Float4_ add(uint2 a, Float4_ fb) { + Float4_ fc; + fc.x = add(a.x, fb.x); + fc.y = add(a.y, fb.y); + return fc; +} + +inline __device__ Float8_ add(uint4 a, Float8_ fb) { + Float8_ fc; + fc.x = add(a.x, fb.x); + fc.y = add(a.y, fb.y); + fc.z = add(a.z, fb.z); + fc.w = add(a.w, fb.w); + return fc; +} + +// Vector multiplication. +template<> +inline __device__ uint16_t mul(uint16_t a, uint16_t b) { + uint16_t c; +#ifndef USE_ROCM + asm volatile("mul.f16 %0, %1, %2;\n" : "=h"(c) : "h"(a), "h"(b)); +#else + asm volatile("v_mul_f16 %0, %1, %2;\n" : "=v"(c) : "v"(a), "v"(b)); +#endif + return c; +} + +template<> +inline __device__ uint32_t mul(uint32_t a, uint32_t b) { + uint32_t c; +#ifndef USE_ROCM + asm volatile("mul.f16x2 %0, %1, %2;\n" : "=r"(c) : "r"(a), "r"(b)); +#else + asm volatile("v_pk_mul_f16 %0, %1, %2;\n" : "=v"(c) : "v"(a), "v"(b)); +#endif + return c; +} + +template<> +inline __device__ uint32_t mul(uint16_t a, uint32_t b) { + return mul(h0_h0(a), b); +} + +template<> +inline __device__ uint2 mul(uint2 a, uint2 b) { + uint2 c; + c.x = mul(a.x, b.x); + c.y = mul(a.y, b.y); + return c; +} + +template<> +inline __device__ uint2 mul(uint16_t a, uint2 b) { + uint32_t s = h0_h0(a); + uint2 c; + c.x = mul(s, b.x); + c.y = mul(s, b.y); + return c; +} + +template<> +inline __device__ uint4 mul(uint4 a, uint4 b) { + uint4 c; + c.x = mul(a.x, b.x); + c.y = mul(a.y, b.y); + c.z = mul(a.z, b.z); + c.w = mul(a.w, b.w); + return c; +} + +template<> +inline __device__ uint4 mul(uint16_t a, uint4 b) { + uint32_t s = h0_h0(a); + uint4 c; + c.x = mul(s, b.x); + c.y = mul(s, b.y); + c.z = mul(s, b.z); + c.w = mul(s, b.w); + return c; +} + +template<> +inline __device__ float mul(uint16_t a, uint16_t b) { + float fa = half_to_float(a); + float fb = half_to_float(b); + return fa * fb; +} + +template<> +inline __device__ float2 mul(uint32_t a, uint32_t b) { + float2 fa = half2_to_float2(a); + float2 fb = half2_to_float2(b); + return mul(fa, fb); +} + +template<> +inline __device__ float2 mul(uint16_t a, uint32_t b) { + return mul(h0_h0(a), b); +} + +template<> +inline __device__ Float4_ mul(uint2 a, uint2 b) { + Float4_ fc; + fc.x = mul(a.x, b.x); + fc.y = mul(a.y, b.y); + return fc; +} + +template<> +inline __device__ Float4_ mul(uint16_t a, uint2 b) { + uint32_t s = h0_h0(a); + Float4_ fc; + fc.x = mul(s, b.x); + fc.y = mul(s, b.y); + return fc; +} + +template<> +inline __device__ Float8_ mul(uint4 a, uint4 b) { + Float8_ fc; + fc.x = mul(a.x, b.x); + fc.y = mul(a.y, b.y); + fc.z = mul(a.z, b.z); + fc.w = mul(a.w, b.w); + return fc; +} + +template<> +inline __device__ Float8_ mul(uint16_t a, uint4 b) { + uint32_t s = h0_h0(a); + Float8_ fc; + fc.x = mul(s, b.x); + fc.y = mul(s, b.y); + fc.z = mul(s, b.z); + fc.w = mul(s, b.w); + return fc; +} + +// Vector fused multiply-add. +inline __device__ uint32_t fma(uint32_t a, uint32_t b, uint32_t c) { + uint32_t d; +#ifndef USE_ROCM + asm volatile("fma.rn.f16x2 %0, %1, %2, %3;\n" : "=r"(d) : "r"(a), "r"(b), "r"(c)); +#else + asm volatile("v_pk_fma_f16 %0, %1, %2, %3;\n" : "=v"(d) : "v"(a), "v"(b), "v"(c)); +#endif + return d; +} + +inline __device__ uint32_t fma(uint16_t a, uint32_t b, uint32_t c) { + return fma(h0_h0(a), b, c); +} + +inline __device__ uint2 fma(uint2 a, uint2 b, uint2 c) { + uint2 d; + d.x = fma(a.x, b.x, c.x); + d.y = fma(a.y, b.y, c.y); + return d; +} + +inline __device__ uint2 fma(uint16_t a, uint2 b, uint2 c) { + uint32_t s = h0_h0(a); + uint2 d; + d.x = fma(s, b.x, c.x); + d.y = fma(s, b.y, c.y); + return d; +} + +inline __device__ uint4 fma(uint4 a, uint4 b, uint4 c) { + uint4 d; + d.x = fma(a.x, b.x, c.x); + d.y = fma(a.y, b.y, c.y); + d.z = fma(a.z, b.z, c.z); + d.w = fma(a.w, b.w, c.w); + return d; +} + +inline __device__ uint4 fma(uint16_t a, uint4 b, uint4 c) { + uint32_t s = h0_h0(a); + uint4 d; + d.x = fma(s, b.x, c.x); + d.y = fma(s, b.y, c.y); + d.z = fma(s, b.z, c.z); + d.w = fma(s, b.w, c.w); + return d; +} + +inline __device__ float fma(uint16_t a, uint16_t b, float fc) { + float fa = half_to_float(a); + float fb = half_to_float(b); + return fa * fb + fc; +} + +inline __device__ float2 fma(uint32_t a, uint32_t b, float2 fc) { + float2 fa = half2_to_float2(a); + float2 fb = half2_to_float2(b); + return fma(fa, fb, fc); +} + +inline __device__ float2 fma(uint16_t a, uint32_t b, float2 fc) { + return fma(h0_h0(a), b, fc); +} + +inline __device__ Float4_ fma(uint2 a, uint2 b, Float4_ fc) { + Float4_ fd; + fd.x = fma(a.x, b.x, fc.x); + fd.y = fma(a.y, b.y, fc.y); + return fd; +} + +inline __device__ Float4_ fma(uint16_t a, uint2 b, Float4_ fc) { + uint32_t s = h0_h0(a); + Float4_ fd; + fd.x = fma(s, b.x, fc.x); + fd.y = fma(s, b.y, fc.y); + return fd; +} + +inline __device__ Float8_ fma(uint4 a, uint4 b, Float8_ fc) { + Float8_ fd; + fd.x = fma(a.x, b.x, fc.x); + fd.y = fma(a.y, b.y, fc.y); + fd.z = fma(a.z, b.z, fc.z); + fd.w = fma(a.w, b.w, fc.w); + return fd; +} + +inline __device__ Float8_ fma(uint16_t a, uint4 b, Float8_ fc) { + uint32_t s = h0_h0(a); + Float8_ fd; + fd.x = fma(s, b.x, fc.x); + fd.y = fma(s, b.y, fc.y); + fd.z = fma(s, b.z, fc.z); + fd.w = fma(s, b.w, fc.w); + return fd; +} + +// Vector sum. +template<> +inline __device__ float sum(uint16_t v) { + return half_to_float(v); +} + +template<> +inline __device__ float sum(uint32_t v) { + float2 tmp = half2_to_float2(v); + return tmp.x + tmp.y; +} + +template<> +inline __device__ float sum(uint2 v) { + uint32_t c = add(v.x, v.y); + return sum(c); +} + +template<> +inline __device__ float sum(uint4 v) { + uint32_t c = add(v.x, v.y); + c = add(c, v.z); + c = add(c, v.w); + return sum(c); +} + +// From float32 to float16. +inline __device__ void from_float(uint16_t& dst, float src) { + dst = float_to_half(src); +} + +inline __device__ void from_float(uint32_t& dst, float2 src) { + dst = float2_to_half2(src); +} + +inline __device__ void from_float(uint2& dst, Float4_ src) { + dst.x = float2_to_half2(src.x); + dst.y = float2_to_half2(src.y); +} + +inline __device__ void from_float(uint4& dst, Float8_ src) { + dst.x = float2_to_half2(src.x); + dst.y = float2_to_half2(src.y); + dst.z = float2_to_half2(src.z); + dst.w = float2_to_half2(src.w); +} + +// From float16 to float32. +inline __device__ float to_float(uint16_t u) { + return half_to_float(u); +} + +inline __device__ float2 to_float(uint32_t u) { + return half2_to_float2(u); +} + +inline __device__ Float4_ to_float(uint2 u) { + Float4_ tmp; + tmp.x = half2_to_float2(u.x); + tmp.y = half2_to_float2(u.y); + return tmp; +} + +inline __device__ Float8_ to_float(uint4 u) { + Float8_ tmp; + tmp.x = half2_to_float2(u.x); + tmp.y = half2_to_float2(u.y); + tmp.z = half2_to_float2(u.z); + tmp.w = half2_to_float2(u.w); + return tmp; +} + +// Zero-out a variable. +inline __device__ void zero(uint16_t& dst) { + dst = uint16_t(0); +} + +} // namespace vllm diff --git a/csrc_musa/attention/dtype_float32.muh b/csrc_musa/attention/dtype_float32.muh new file mode 100644 index 0000000..7eaffc3 --- /dev/null +++ b/csrc_musa/attention/dtype_float32.muh @@ -0,0 +1,274 @@ +/* + * Adapted from https://github.com/NVIDIA/FasterTransformer/blob/release/v5.3_tag/src/fastertransformer/kernels/decoder_masked_multihead_attention/decoder_masked_multihead_attention_template.hpp + * and https://github.com/NVIDIA/FasterTransformer/blob/release/v5.3_tag/src/fastertransformer/kernels/decoder_masked_multihead_attention_utils.h + * Copyright (c) 2024 - 2024 Moore Threads Technology Co., Ltd("Moore Threads"). All rights reserved. + * Copyright (c) 2023, The vLLM team. + * Copyright (c) 2020-2023, NVIDIA CORPORATION. All rights reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#pragma once + +#include "attention_generic.muh" + +#include + +namespace vllm { + +// Define custom FP32 vector data types. +struct Float4_ { + float2 x; + float2 y; +}; + +struct Float8_ { + float2 x; + float2 y; + float2 z; + float2 w; +}; + +// FP32 vector types for Q, K, V. +template<> +struct Vec { + using Type = float; +}; +template<> +struct Vec { + using Type = float2; +}; +template<> +struct Vec { + using Type = float4; +}; + +// FP32 accumulator vector types corresponding to Vec. +template<> +struct FloatVec { + using Type = float; +}; +template<> +struct FloatVec { + using Type = float2; +}; +template<> +struct FloatVec { + using Type = float4; +}; + +// Vector addition. +inline __device__ float add(float a, float b) { + return a + b; +} + +inline __device__ float2 add(float2 a, float2 b) { + float2 c; + c.x = add(a.x, b.x); + c.y = add(a.y, b.y); + return c; +} + +inline __device__ float4 add(float4 a, float4 b) { + float4 c; + c.x = add(a.x, b.x); + c.y = add(a.y, b.y); + c.z = add(a.z, b.z); + c.w = add(a.w, b.w); + return c; +} + +// Vector multiplication. +template<> +inline __device__ float mul(float a, float b) { + return a * b; +} + +template<> +inline __device__ float2 mul(float2 a, float2 b) { + float2 c; + c.x = a.x * b.x; + c.y = a.y * b.y; + return c; +} + +template<> +inline __device__ float2 mul(float a, float2 b) { + float2 c; + c.x = a * b.x; + c.y = a * b.y; + return c; +} + +template<> +inline __device__ float4 mul(float4 a, float4 b) { + float4 c; + c.x = a.x * b.x; + c.y = a.y * b.y; + c.z = a.z * b.z; + c.w = a.w * b.w; + return c; +} + +template<> +inline __device__ float4 mul(float a, float4 b) { + float4 c; + c.x = a * b.x; + c.y = a * b.y; + c.z = a * b.z; + c.w = a * b.w; + return c; +} + +// Vector fused multiply-add. +inline __device__ float fma(float a, float b, float c) { + return a * b + c; +} + +inline __device__ float2 fma(float2 a, float2 b, float2 c) { + float2 d; + d.x = fma(a.x, b.x, c.x); + d.y = fma(a.y, b.y, c.y); + return d; +} + +inline __device__ float2 fma(float a, float2 b, float2 c) { + float2 d; + d.x = fma(a, b.x, c.x); + d.y = fma(a, b.y, c.y); + return d; +} + +inline __device__ float4 fma(float4 a, float4 b, float4 c) { + float4 d; + d.x = fma(a.x, b.x, c.x); + d.y = fma(a.y, b.y, c.y); + d.z = fma(a.z, b.z, c.z); + d.w = fma(a.w, b.w, c.w); + return d; +} + +inline __device__ float4 fma(float a, float4 b, float4 c) { + float4 d; + d.x = fma(a, b.x, c.x); + d.y = fma(a, b.y, c.y); + d.z = fma(a, b.z, c.z); + d.w = fma(a, b.w, c.w); + return d; +} + +inline __device__ Float4_ fma(float a, Float4_ b, Float4_ c) { + Float4_ d; + d.x = fma(a, b.x, c.x); + d.y = fma(a, b.y, c.y); + return d; +} + +inline __device__ Float8_ fma(float a, Float8_ b, Float8_ c) { + Float8_ d; + d.x = fma(a, b.x, c.x); + d.y = fma(a, b.y, c.y); + d.z = fma(a, b.z, c.z); + d.w = fma(a, b.w, c.w); + return d; +} + +// Vector sum. +template<> +inline __device__ float sum(float v) { + return v; +} + +template<> +inline __device__ float sum(float2 v) { + return v.x + v.y; +} + +template<> +inline __device__ float sum(float4 v) { + return v.x + v.y + v.z + v.w; +} + +template<> +inline __device__ float sum(Float4_ v) { + return v.x.x + v.x.y + v.y.x + v.y.y; +} + +template<> +inline __device__ float sum(Float8_ v) { + return v.x.x + v.x.y + v.y.x + v.y.y + v.z.x + v.z.y + v.w.x + v.w.y; +} + +// Vector dot product. +inline __device__ float dot(float a, float b) { + return a * b; +} + +inline __device__ float dot(float2 a, float2 b) { + float2 c = mul(a, b); + return c.x + c.y; +} + +inline __device__ float dot(Float4_ a, Float4_ b) { + float2 acc = mul(a.x, b.x); + acc = fma(a.y, b.y, acc); + return acc.x + acc.y; +} + +inline __device__ float dot(Float8_ a, Float8_ b) { + float2 acc = mul(a.x, b.x); + acc = fma(a.y, b.y, acc); + acc = fma(a.z, b.z, acc); + acc = fma(a.w, b.w, acc); + return acc.x + acc.y; +} + +// From float to float. +inline __device__ void from_float(float& dst, float src) { + dst = src; +} + +inline __device__ void from_float(float2& dst, float2 src) { + dst = src; +} + +inline __device__ void from_float(float4& dst, float4 src) { + dst = src; +} + +// From float to float. +inline __device__ float to_float(float u) { + return u; +} + +inline __device__ float2 to_float(float2 u) { + return u; +} + +inline __device__ float4 to_float(float4 u) { + return u; +} + +inline __device__ Float4_ to_float(Float4_ u) { + return u; +} + +inline __device__ Float8_ to_float(Float8_ u) { + return u; +} + +// Zero-out a variable. +inline __device__ void zero(float& dst) { + dst = 0.f; +} + +} // namespace vllm diff --git a/csrc_musa/attention/dtype_fp8.muh b/csrc_musa/attention/dtype_fp8.muh new file mode 100644 index 0000000..845ad85 --- /dev/null +++ b/csrc_musa/attention/dtype_fp8.muh @@ -0,0 +1,35 @@ +#pragma once + +#include "attention_generic.muh" + +#include +#ifdef ENABLE_FP8_E5M2 +#include +#endif + +namespace vllm { +#if defined(ENABLE_FP8_E5M2) || defined(ENABLE_FP8_E4M3) +// fp8 vector types for quantization of kv cache + +template<> +struct Vec { + using Type = uint8_t; +}; + +template<> +struct Vec { + using Type = uint16_t; +}; + +template<> +struct Vec { + using Type = uint32_t; +}; + +template<> +struct Vec { + using Type = uint2; +}; +#endif // ENABLE_FP8_E5M2 + +} // namespace vllm diff --git a/csrc_musa/cache.h b/csrc_musa/cache.h new file mode 100644 index 0000000..4c142ce --- /dev/null +++ b/csrc_musa/cache.h @@ -0,0 +1,38 @@ +#pragma once + +#include + +#include +#include + +void swap_blocks( + torch::Tensor& src, + torch::Tensor& dst, + const std::map& block_mapping); + +void copy_blocks( + std::vector& key_caches, + std::vector& value_caches, + const std::map>& block_mapping); + +void reshape_and_cache( + torch::Tensor& key, + torch::Tensor& value, + torch::Tensor& key_cache, + torch::Tensor& value_cache, + torch::Tensor& slot_mapping, + const std::string& kv_cache_dtype, + const float kv_scale); + +void reshape_and_cache_flash( + torch::Tensor& key, + torch::Tensor& value, + torch::Tensor& key_cache, + torch::Tensor& value_cache, + torch::Tensor& slot_mapping, + const std::string& kv_cache_dtype); + +// Just for unittest +void convert_fp8( + torch::Tensor& src_cache, + torch::Tensor& dst_cache); diff --git a/csrc_musa/cache_kernels.mu b/csrc_musa/cache_kernels.mu new file mode 100644 index 0000000..727d7a0 --- /dev/null +++ b/csrc_musa/cache_kernels.mu @@ -0,0 +1,419 @@ +#include +#include "torch_musa/csrc/aten/musa/MUSAContext.h" +#include "torch_musa/csrc/core/MUSAGuard.h" + +#include "musa_compat.h" +#include "dispatch_utils.h" +#if defined(ENABLE_FP8_E5M2) +#include "quantization/fp8_e5m2_kvcache/quant_utils.cuh" +#elif defined(ENABLE_FP8_E4M3) +#include "quantization/fp8/amd_detail/quant_utils.cuh" +#endif + +#include +#include +#include +#include + +#ifdef USE_ROCM + #include + typedef __hip_bfloat16 __mt_bfloat16; +#endif + +void swap_blocks( + torch::Tensor& src, + torch::Tensor& dst, + const std::map& block_mapping) { + torch::Device src_device = src.device(); + torch::Device dst_device = dst.device(); + musaMemcpyKind memcpy_type; + if (src_device.is_cuda() && dst_device.is_cuda()) { + TORCH_CHECK( + src_device.index() == dst_device.index(), + "src and dst must be on the same GPU"); + memcpy_type = musaMemcpyDeviceToDevice; + } else if (src_device.is_cuda() && dst_device.is_cpu()) { + memcpy_type = musaMemcpyDeviceToHost; + } else if (src_device.is_cpu() && dst_device.is_cuda()) { + memcpy_type = musaMemcpyHostToDevice; + } else { + TORCH_CHECK(false, "Invalid device combination"); + } + + char *src_ptr = static_cast(src.data_ptr()); + char *dst_ptr = static_cast(dst.data_ptr()); + + const int64_t block_size_in_bytes = src.element_size() * src[0].numel(); + const at::musa::OptionalMUSAGuard device_guard(src_device.is_cuda() ? src_device : dst_device); + const musaStream_t stream = at::musa::getCurrentMUSAStream(); + // NOTE(woosuk): This can be slow if the number of blocks is large. + for (const auto& pair : block_mapping) { + int64_t src_block_number = pair.first; + int64_t dst_block_number = pair.second; + int64_t src_offset = src_block_number * block_size_in_bytes; + int64_t dst_offset = dst_block_number * block_size_in_bytes; + musaMemcpyAsync( + dst_ptr + dst_offset, + src_ptr + src_offset, + block_size_in_bytes, + memcpy_type, + stream); + } +} + +namespace vllm { + +// Grid: (num_layers, num_pairs) +template +__global__ void copy_blocks_kernel( + int64_t* key_cache_ptrs, + int64_t* value_cache_ptrs, + const int64_t* __restrict__ block_mapping, + const int numel_per_block) { + const int layer_idx = blockIdx.x; + const int pair_idx = blockIdx.y; + + scalar_t* key_cache = reinterpret_cast(key_cache_ptrs[layer_idx]); + scalar_t* value_cache = reinterpret_cast(value_cache_ptrs[layer_idx]); + int64_t src_block_number = block_mapping[2 * pair_idx]; + int64_t dst_block_number = block_mapping[2 * pair_idx + 1]; + + const int64_t src_block_offset = src_block_number * numel_per_block; + const int64_t dst_block_offset = dst_block_number * numel_per_block; + for (int i = threadIdx.x; i < numel_per_block; i += blockDim.x) { + int64_t src_offset = src_block_offset + i; + int64_t dst_offset = dst_block_offset + i; + key_cache[dst_offset] = key_cache[src_offset]; + } + for (int i = threadIdx.x; i < numel_per_block; i += blockDim.x) { + int64_t src_offset = src_block_offset + i; + int64_t dst_offset = dst_block_offset + i; + value_cache[dst_offset] = value_cache[src_offset]; + } +} + +} // namespace vllm + +void copy_blocks( + std::vector& key_caches, + std::vector& value_caches, + const std::map>& block_mapping) { + int num_layers = key_caches.size(); + TORCH_CHECK(num_layers == value_caches.size()); + if (num_layers == 0) { + return; + } + torch::Device cache_device = key_caches[0].device(); + TORCH_CHECK(cache_device.is_cuda()); + + // Create data structures for the kernel. + // Create an array of pointers to the key and value caches. + int64_t key_cache_ptrs[num_layers]; + int64_t value_cache_ptrs[num_layers]; + for (int layer_idx = 0; layer_idx < num_layers; ++layer_idx) { + key_cache_ptrs[layer_idx] = reinterpret_cast(key_caches[layer_idx].data_ptr()); + value_cache_ptrs[layer_idx] = reinterpret_cast(value_caches[layer_idx].data_ptr()); + } + // Create block mapping array. + std::vector block_mapping_vec; + for (const auto& pair : block_mapping) { + int64_t src_block_number = pair.first; + for (int64_t dst_block_number : pair.second) { + block_mapping_vec.push_back(src_block_number); + block_mapping_vec.push_back(dst_block_number); + } + } + int64_t* block_mapping_array = block_mapping_vec.data(); + int num_pairs = block_mapping_vec.size() / 2; + + // Move the data structures to the GPU. + // NOTE: This synchronizes the CPU and GPU. + torch::Tensor key_cache_ptrs_tensor = torch::from_blob( + key_cache_ptrs, {num_layers}, torch::kInt64).to(cache_device); + torch::Tensor value_cache_ptrs_tensor = torch::from_blob( + value_cache_ptrs, {num_layers}, torch::kInt64).to(cache_device); + torch::Tensor block_mapping_tensor = torch::from_blob( + block_mapping_array, {2 * num_pairs}, torch::kInt64).to(cache_device); + + // Launch the kernel. + const int numel_per_block = key_caches[0][0].numel(); + dim3 grid(num_layers, num_pairs); + dim3 block(std::min(1024, numel_per_block)); + const at::musa::OptionalMUSAGuard device_guard(cache_device); + const musaStream_t stream = at::musa::getCurrentMUSAStream(); + VLLM_DISPATCH_FLOATING_AND_BYTE_TYPES( + key_caches[0].scalar_type(), "copy_blocks_kernel", ([&] { + vllm::copy_blocks_kernel<<>>( + key_cache_ptrs_tensor.data_ptr(), + value_cache_ptrs_tensor.data_ptr(), + block_mapping_tensor.data_ptr(), + numel_per_block); + })); +} + +namespace vllm { + +template +__global__ void reshape_and_cache_kernel( + const scalar_t* __restrict__ key, // [num_tokens, num_heads, head_size] + const scalar_t* __restrict__ value, // [num_tokens, num_heads, head_size] + cache_t* __restrict__ key_cache, // [num_blocks, num_heads, head_size/x, block_size, x] + cache_t* __restrict__ value_cache, // [num_blocks, num_heads, head_size, block_size] + const int64_t* __restrict__ slot_mapping, // [num_tokens] + const int key_stride, + const int value_stride, + const int num_heads, + const int head_size, + const int block_size, + const int x, + const float kv_scale) { + const int64_t token_idx = blockIdx.x; + const int64_t slot_idx = slot_mapping[token_idx]; + if (slot_idx < 0) { + // Padding token that should be ignored. + return; + } + + const int64_t block_idx = slot_idx / block_size; + const int64_t block_offset = slot_idx % block_size; + + const int n = num_heads * head_size; + for (int i = threadIdx.x; i < n; i += blockDim.x) { + const int64_t src_key_idx = token_idx * key_stride + i; + const int64_t src_value_idx = token_idx * value_stride + i; + + const int head_idx = i / head_size; + const int head_offset = i % head_size; + const int x_idx = head_offset / x; + const int x_offset = head_offset % x; + + const int64_t tgt_key_idx = block_idx * num_heads * (head_size / x) * block_size * x + + head_idx * (head_size / x) * block_size * x + + x_idx * block_size * x + + block_offset * x + + x_offset; + const int64_t tgt_value_idx = block_idx * num_heads * head_size * block_size + + head_idx * head_size * block_size + + head_offset * block_size + + block_offset; + scalar_t tgt_key = key[src_key_idx]; + scalar_t tgt_value = value[src_value_idx]; + if constexpr (is_fp8_kv_cache) { +#if defined(ENABLE_FP8_E5M2) + key_cache[tgt_key_idx] = fp8_e5m2_unscaled::vec_conversion(tgt_key); + value_cache[tgt_value_idx] = fp8_e5m2_unscaled::vec_conversion(tgt_value); +#elif defined(ENABLE_FP8_E4M3) + key_cache[tgt_key_idx] = fp8_e4m3::scaled_vec_conversion(tgt_key, kv_scale); + value_cache[tgt_value_idx] = fp8_e4m3::scaled_vec_conversion(tgt_value, kv_scale); +#else + assert(false); +#endif + } else { + key_cache[tgt_key_idx] = tgt_key; + value_cache[tgt_value_idx] = tgt_value; + } + } +} + +template +__global__ void reshape_and_cache_flash_kernel( + const scalar_t* __restrict__ key, // [num_tokens, num_heads, head_size] + const scalar_t* __restrict__ value, // [num_tokens, num_heads, head_size] + scalar_t* __restrict__ k_cache, // [num_blocks, block_size, num_heads, head_size] + scalar_t* __restrict__ v_cache, // [num_blocks, block_size, num_heads, head_size] + const int64_t* __restrict__ slot_mapping, // [num_tokens] + const int block_stride, + const int key_stride, + const int value_stride, + const int num_heads, + const int head_size, + const int block_size) { + const int64_t token_idx = blockIdx.x; + const int64_t slot_idx = slot_mapping[token_idx]; + // NOTE: slot_idx can be -1 if the token is padded + if (slot_idx < 0) { + return; + } + const int64_t block_idx = slot_idx / block_size; + const int64_t block_offset = slot_idx % block_size; + const int n = num_heads * head_size; + for (int i = threadIdx.x; i < n; i += blockDim.x) { + const int64_t src_key_idx = token_idx * key_stride + i; + const int64_t src_value_idx = token_idx * value_stride + i; + const int head_idx = i / head_size; + const int head_offset = i % head_size; + const int64_t tgt_value_idx = block_idx * block_stride + + block_offset * num_heads * head_size + + head_idx * head_size + + head_offset; + k_cache[tgt_value_idx] = key[src_key_idx]; + v_cache[tgt_value_idx] = value[src_value_idx]; + } +} +} // namespace vllm + +#define CALL_RESHAPE_AND_CACHE(KV_T, CACHE_T, IS_FP8_KV_CACHE) \ + vllm::reshape_and_cache_kernel<<>>( \ + reinterpret_cast(key.data_ptr()), \ + reinterpret_cast(value.data_ptr()), \ + reinterpret_cast(key_cache.data_ptr()), \ + reinterpret_cast(value_cache.data_ptr()), \ + slot_mapping.data_ptr(), \ + key_stride, \ + value_stride, \ + num_heads, \ + head_size, \ + block_size, \ + x, \ + kv_scale); + +void reshape_and_cache( + torch::Tensor& key, // [num_tokens, num_heads, head_size] + torch::Tensor& value, // [num_tokens, num_heads, head_size] + torch::Tensor& key_cache, // [num_blocks, num_heads, head_size/x, block_size, x] + torch::Tensor& value_cache, // [num_blocks, num_heads, head_size, block_size] + torch::Tensor& slot_mapping, // [num_tokens] + const std::string& kv_cache_dtype, + const float kv_scale) +{ + int num_tokens = key.size(0); + int num_heads = key.size(1); + int head_size = key.size(2); + int block_size = key_cache.size(3); + int x = key_cache.size(4); + + int key_stride = key.stride(0); + int value_stride = value.stride(0); + + dim3 grid(num_tokens); + dim3 block(std::min(num_heads * head_size, 512)); + const at::musa::OptionalMUSAGuard device_guard(device_of(key)); + const musaStream_t stream = at::musa::getCurrentMUSAStream(); + if (kv_cache_dtype == "auto") { + if (key.dtype() == at::ScalarType::Float) { + CALL_RESHAPE_AND_CACHE(float, float, false); + } else if (key.dtype() == at::ScalarType::Half) { + CALL_RESHAPE_AND_CACHE(uint16_t, uint16_t, false); + } else if (key.dtype() == at::ScalarType::BFloat16) { + CALL_RESHAPE_AND_CACHE(__mt_bfloat16, __mt_bfloat16, false); + } + } else if (kv_cache_dtype == "fp8") { + if (key.dtype() == at::ScalarType::Float) { + CALL_RESHAPE_AND_CACHE(float, uint8_t, true); + } else if (key.dtype() == at::ScalarType::Half) { + CALL_RESHAPE_AND_CACHE(uint16_t, uint8_t, true); + } else if (key.dtype() == at::ScalarType::BFloat16) { + CALL_RESHAPE_AND_CACHE(__mt_bfloat16, uint8_t, true); + } + } else { + TORCH_CHECK(false, "Unsupported data type of kv cache: ", kv_cache_dtype); + } +} + +void reshape_and_cache_flash( + torch::Tensor& key, // [num_tokens, num_heads, head_size] + torch::Tensor& value, // [num_tokens, num_heads, head_size] + torch::Tensor& k_cache, // [num_blocks, block_size, num_heads, head_size] + torch::Tensor& v_cache, // [num_blocks, block_size, num_heads, head_size] + torch::Tensor& slot_mapping, // [num_tokens] + const std::string& kv_cache_dtype) +{ + // FIXME: only support auto datatype, does not support fp8 + if (kv_cache_dtype != "auto") { + TORCH_CHECK(false, "Unsupported data type of kv cache: ", kv_cache_dtype); + } + int num_tokens = key.size(0); + int num_heads = key.size(1); + int head_size = key.size(2); + int block_size = k_cache.size(1); + + int key_stride = key.stride(0); + int value_stride = value.stride(0); + int block_stride = k_cache.stride(0); + TORCH_CHECK(k_cache.stride(0) == v_cache.stride(0)); + + dim3 grid(num_tokens); + dim3 block(std::min(num_heads * head_size, 512)); + const at::musa::OptionalMUSAGuard device_guard(device_of(key)); + const musaStream_t stream = at::musa::getCurrentMUSAStream(); + VLLM_DISPATCH_FLOATING_TYPES( + key.scalar_type(), + "reshape_and_cache_flash", + [&] { + vllm::reshape_and_cache_flash_kernel<<>>( + key.data_ptr(), + value.data_ptr(), + k_cache.data_ptr(), + v_cache.data_ptr(), + slot_mapping.data_ptr(), + block_stride, + key_stride, + value_stride, + num_heads, + head_size, + block_size); + }); +} + +namespace vllm { + +template +__global__ void convert_fp8_kernel( + const Tin* __restrict__ src_cache, + Tout* __restrict__ dst_cache, + const int64_t block_stride) { + const int64_t block_idx = blockIdx.x; + for (int i = threadIdx.x; i < block_stride; i += blockDim.x) { + int64_t idx = block_idx * block_stride + i; +#if defined(ENABLE_FP8_E5M2) + dst_cache[idx] = fp8_e5m2_unscaled::vec_conversion(src_cache[idx]); +#elif defined(ENABLE_FP8_E4M3) + dst_cache[idx] = fp8_e4m3::vec_conversion(src_cache[idx]); +#else + assert(false); +#endif + } +} + +} // namespace vllm + +#define CALL_CONVERT_FP8(Tout, Tin) \ + vllm::convert_fp8_kernel<<>>( \ + reinterpret_cast(src_cache.data_ptr()), \ + reinterpret_cast(dst_cache.data_ptr()), \ + block_stride); + +void convert_fp8( + torch::Tensor& src_cache, + torch::Tensor& dst_cache) +{ + torch::Device src_device = src_cache.device(); + torch::Device dst_device = dst_cache.device(); + TORCH_CHECK(src_device.is_cuda(), "src must be on a GPU") + TORCH_CHECK(dst_device.is_cuda(), "dst must be on a GPU") + TORCH_CHECK( + src_device.index() == dst_device.index(), + "src and dst must be on the same GPU"); + at::musa::OptionalMUSAGuard device_guard(src_device); + + int64_t num_blocks = src_cache.size(0); + int64_t block_stride = src_cache.stride(0); + + dim3 grid(num_blocks); + dim3 block(std::min(block_stride, int64_t(512))); + const musaStream_t stream = at::musa::getCurrentMUSAStream(); + + if (src_cache.dtype() == at::ScalarType::Float) { + CALL_CONVERT_FP8(uint8_t, float); + } else if (src_cache.dtype() == at::ScalarType::Half) { + CALL_CONVERT_FP8(uint8_t, uint16_t); + } else if (src_cache.dtype() == at::ScalarType::BFloat16) { + CALL_CONVERT_FP8(uint8_t, __mt_bfloat16); + } else if (dst_cache.dtype() == at::ScalarType::Float) { + CALL_CONVERT_FP8(float, uint8_t); + } else if (dst_cache.dtype() == at::ScalarType::Half) { + CALL_CONVERT_FP8(uint16_t, uint8_t); + } else if (dst_cache.dtype() == at::ScalarType::BFloat16) { + CALL_CONVERT_FP8(__mt_bfloat16, uint8_t); + } +} diff --git a/csrc_musa/cpu/activation.cpp b/csrc_musa/cpu/activation.cpp new file mode 100644 index 0000000..1bd24eb --- /dev/null +++ b/csrc_musa/cpu/activation.cpp @@ -0,0 +1,148 @@ +#include "cpu_types.hpp" + +namespace { +template +void activation_kernel(int num_tokens, int d, scalar_t *__restrict__ input, + scalar_t *__restrict__ output) { + using scalar_vec_t = vec_op::vec_t; + constexpr int VEC_ELEM_NUM = scalar_vec_t::get_elem_num(); + + TORCH_CHECK(d % VEC_ELEM_NUM == 0); + +#pragma omp parallel for + for (int i = 0; i < num_tokens; ++i) { + for (int j = 0; j < d; j += VEC_ELEM_NUM) { + int start = i * d; + if constexpr (is_gated) { + start *= 2; + } + + const scalar_vec_t x(input + start + j); + const vec_op::FP32Vec8 f32_x(x); + vec_op::FP32Vec8 f32_ans = func(f32_x); + + if constexpr (is_gated) { + const scalar_vec_t y(input + start + d + j); + const vec_op::FP32Vec8 f32_y(y); + f32_ans = f32_y * f32_ans; + } + + const scalar_vec_t result(f32_ans); + result.save(output + i * d + j); + } + } +} + +FORCE_INLINE vec_op::FP32Vec8 silu_act(const vec_op::FP32Vec8 &x) { + const vec_op::FP32Vec8 zeros(0.0); + const vec_op::FP32Vec8 ones(1.0); + return x / (ones + (zeros - x).exp()); +} + +FORCE_INLINE vec_op::FP32Vec8 gelu_new_act(const vec_op::FP32Vec8 &x) { + const vec_op::FP32Vec8 ones(1.0); + const vec_op::FP32Vec8 w1(0.79788456f); + const vec_op::FP32Vec8 w2(0.044715f); + const vec_op::FP32Vec8 w3(0.5); + const vec_op::FP32Vec8 x3 = x * x * x; + const vec_op::FP32Vec8 t = (w1 * (x + w2 * x3)).tanh(); + return w3 * x * (ones + t); +} + +FORCE_INLINE vec_op::FP32Vec8 gelu_fast_act(const vec_op::FP32Vec8 &x) { + const vec_op::FP32Vec8 ones(1.0); + const vec_op::FP32Vec8 w1(0.79788456f); + const vec_op::FP32Vec8 w2(0.044715f); + const vec_op::FP32Vec8 w3(0.5); + const vec_op::FP32Vec8 t = (x * w1 * (ones + x * w2 * x)).tanh(); + return w3 * x * (ones + t); +} + +FORCE_INLINE vec_op::FP32Vec8 gelu_act(const vec_op::FP32Vec8 &x) { + const vec_op::FP32Vec8 ones(1.0); + const vec_op::FP32Vec8 w1(M_SQRT1_2); + const vec_op::FP32Vec8 w2(0.5); + return x * w2 * (ones + (x * w1).er()); +} + +FORCE_INLINE vec_op::FP32Vec8 gelu_tanh_act(const vec_op::FP32Vec8 &x) { + const vec_op::FP32Vec8 ones(1.0); + const vec_op::FP32Vec8 w1(M_SQRT2 * M_2_SQRTPI * 0.5); + const vec_op::FP32Vec8 w2(0.5); + const vec_op::FP32Vec8 w3(0.044715); + const vec_op::FP32Vec8 x_3 = x * x * x; + const vec_op::FP32Vec8 inner = w1 * (x + x_3 * w3); + return x * w2 * (ones + inner.tanh()); +} +}; // namespace + +void silu_and_mul(torch::Tensor &out, torch::Tensor &input) { + int num_tokens = input.numel() / input.size(-1); + int d = input.size(-1) / 2; + + VLLM_DISPATCH_FLOATING_TYPES( + input.scalar_type(), "silu_and_mul_impl", [&] { + CPU_KERNEL_GUARD_IN(silu_and_mul_impl) + activation_kernel(num_tokens, d, + input.data_ptr(), + out.data_ptr()); + CPU_KERNEL_GUARD_OUT(silu_and_mul_impl) + }); +} + +void gelu_and_mul(torch::Tensor &out, // [..., d] + torch::Tensor &input) // [..., 2 * d] +{ + int num_tokens = input.numel() / input.size(-1); + int d = input.size(-1) / 2; + + VLLM_DISPATCH_FLOATING_TYPES( + input.scalar_type(), "gelu_and_mul_impl", [&] { + CPU_KERNEL_GUARD_IN(gelu_and_mul_impl) + activation_kernel(num_tokens, d, + input.data_ptr(), + out.data_ptr()); + CPU_KERNEL_GUARD_OUT(gelu_and_mul_impl) + }); +} + +void gelu_tanh_and_mul(torch::Tensor &out, // [..., d] + torch::Tensor &input) // [..., 2 * d] +{ + int num_tokens = input.numel() / input.size(-1); + int d = input.size(-1) / 2; + + VLLM_DISPATCH_FLOATING_TYPES( + input.scalar_type(), "gelu_tanh_and_mul_impl", [&] { + CPU_KERNEL_GUARD_IN(gelu_tanh_and_mul_impl) + activation_kernel( + num_tokens, d, input.data_ptr(), + out.data_ptr()); + CPU_KERNEL_GUARD_OUT(gelu_tanh_and_mul_impl) + }); +} + +void gelu_new(torch::Tensor &out, torch::Tensor &input) { + int num_tokens = input.numel() / input.size(-1); + int d = input.size(-1); + + VLLM_DISPATCH_FLOATING_TYPES(input.scalar_type(), "gelu_new_impl", [&] { + CPU_KERNEL_GUARD_IN(gelu_new_impl) + activation_kernel( + num_tokens, d, input.data_ptr(), out.data_ptr()); + CPU_KERNEL_GUARD_OUT(gelu_new_impl) + }); +} + +void gelu_fast(torch::Tensor &out, torch::Tensor &input) { + int num_tokens = input.numel() / input.size(-1); + int d = input.size(-1); + + VLLM_DISPATCH_FLOATING_TYPES(input.scalar_type(), "gelu_fast_impl", [&] { + CPU_KERNEL_GUARD_IN(gelu_fast_impl) + activation_kernel( + num_tokens, d, input.data_ptr(), out.data_ptr()); + CPU_KERNEL_GUARD_OUT(gelu_fast_impl) + }); +} diff --git a/csrc_musa/cpu/attention.cpp b/csrc_musa/cpu/attention.cpp new file mode 100644 index 0000000..c1d765b --- /dev/null +++ b/csrc_musa/cpu/attention.cpp @@ -0,0 +1,746 @@ +#include "cpu_types.hpp" + +namespace { + +template struct KernelVecType { + using q_load_vec_type = void; + using q_vec_type = void; + using k_load_vec_type = void; + using k_vec_type = void; + using qk_acc_vec_type = void; + using v_load_vec_type = void; +}; + +template <> struct KernelVecType { + using q_load_vec_type = vec_op::FP32Vec4; + using q_vec_type = vec_op::FP32Vec16; + using k_load_vec_type = vec_op::FP32Vec16; + using k_vec_type = vec_op::FP32Vec16; + using qk_acc_vec_type = vec_op::FP32Vec16; + using v_load_vec_type = vec_op::FP32Vec16; +}; + +#ifdef __AVX512BF16__ +template <> struct KernelVecType { + using q_load_vec_type = vec_op::BF16Vec8; + using q_vec_type = vec_op::BF16Vec32; + using k_load_vec_type = vec_op::BF16Vec32; + using k_vec_type = vec_op::BF16Vec32; + using qk_acc_vec_type = vec_op::FP32Vec16; + using v_load_vec_type = vec_op::BF16Vec16; +}; +#else +template <> struct KernelVecType { + using q_load_vec_type = vec_op::BF16Vec8; + using q_vec_type = vec_op::FP32Vec16; + using k_load_vec_type = vec_op::BF16Vec16; + using k_vec_type = vec_op::FP32Vec16; + using qk_acc_vec_type = vec_op::FP32Vec16; + using v_load_vec_type = vec_op::BF16Vec16; +}; +#endif + +template +FORCE_INLINE std::pair reduceSoftmax(T *data, const int size, + const int capacity) { + T max = data[0]; + for (int i = 1; i < size; ++i) { + max = max >= data[i] ? max : data[i]; + } + + T sum = 0; + for (int i = 0; i < size; ++i) { + data[i] = std::exp(data[i] - max); + sum += data[i]; + } + + int i = 0; + for (; i < size; ++i) { + data[i] /= sum; + } + + for (; i < capacity; ++i) { + data[i] = 0; + } + + return {max, sum}; +} + +template +FORCE_INLINE std::pair +reduceSoftmaxAlibi(T *data, const int size, const int capacity, + const float alibi_slope, const int start_index, + const int seq_len) { + data[0] += alibi_slope * (start_index - seq_len + 1); + T max = data[0]; + for (int i = 1; i < size; ++i) { + T qk = data[i] + alibi_slope * (start_index + i - seq_len + 1); + data[i] = qk; + max = max >= qk ? max : qk; + } + + T sum = 0; + for (int i = 0; i < size; ++i) { + data[i] = std::exp(data[i] - max); + sum += data[i]; + } + + int i = 0; + for (; i < size; ++i) { + data[i] /= sum; + } + + for (; i < capacity; ++i) { + data[i] = 0; + } + + return {max, sum}; +} + +template +FORCE_INLINE void reducePartitonSoftmax(const T *max_data, T *sum_data, + const int size) { + T max = max_data[0]; + for (int i = 1; i < size; ++i) { + max = max >= max_data[i] ? max : max_data[i]; + } + + T rescaled_sum = 0; + for (int i = 0; i < size; ++i) { + T rescale_factor = std::exp(max_data[i] - max); + rescaled_sum += rescale_factor * sum_data[i]; + sum_data[i] *= rescale_factor; + } + for (int i = 0; i < size; ++i) { + sum_data[i] /= rescaled_sum + 1e-8; + } +} + +template +struct reduceQKBlockKernel { + using q_load_vec_type = typename KernelVecType::q_load_vec_type; + using q_vec_type = typename KernelVecType::q_vec_type; + using k_load_vec_type = typename KernelVecType::k_load_vec_type; + using k_vec_type = typename KernelVecType::k_vec_type; + using qk_acc_vec_type = typename KernelVecType::qk_acc_vec_type; + + constexpr static int TOKEN_PER_GROUP = k_load_vec_type::get_elem_num() / x; + constexpr static int MAX_GROUP_NUM = 16 / TOKEN_PER_GROUP; + constexpr static int UNROLL_GROUP_NUM = MAX_GROUP_NUM / 4; + + static_assert(MAX_GROUP_NUM == 8 || MAX_GROUP_NUM == 4); + static_assert(k_load_vec_type::get_elem_num() % x == 0); + static_assert(q_load_vec_type::get_elem_num() * sizeof(scalar_t) == 16); + + FORCE_INLINE static void call(const scalar_t *__restrict__ q, + const scalar_t *__restrict__ k_block, + float *__restrict__ logits, float scale, + const int token_num) { + const int group_num = (token_num + TOKEN_PER_GROUP - 1) / TOKEN_PER_GROUP; + + qk_acc_vec_type group_accums[MAX_GROUP_NUM]; + if (token_num == BLOCK_SIZE) { + for (int q_offset = 0; q_offset < HEAD_SIZE; + q_offset += x, k_block += x * BLOCK_SIZE) { + q_load_vec_type q_load_group_vec(q + q_offset); + q_vec_type q_group_vec(q_load_group_vec); + + vec_op::unroll_loop( + [k_block, &q_group_vec, &group_accums](int token_group_idx) { + k_load_vec_type k_load_group_vec(k_block + token_group_idx * x * + TOKEN_PER_GROUP); + k_vec_type k_group_vec(k_load_group_vec); + vec_op::fma(group_accums[token_group_idx], q_group_vec, + k_group_vec); + vec_op::prefetch(k_block + x * BLOCK_SIZE + + token_group_idx * x * TOKEN_PER_GROUP); + }); + } + } else { + for (int q_offset = 0; q_offset < HEAD_SIZE; + q_offset += x, k_block += x * BLOCK_SIZE) { + q_load_vec_type q_load_group_vec(q + q_offset); + q_vec_type q_group_vec(q_load_group_vec); + for (int token_group_start = 0; token_group_start < group_num; + token_group_start += UNROLL_GROUP_NUM) { + vec_op::unroll_loop( + [token_group_start, k_block, &q_group_vec, + &group_accums](int token_group_idx) { + token_group_idx += token_group_start; + k_load_vec_type k_load_group_vec(k_block + token_group_idx * x * + TOKEN_PER_GROUP); + k_vec_type k_group_vec(k_load_group_vec); + vec_op::fma(group_accums[token_group_idx], q_group_vec, + k_group_vec); + vec_op::prefetch(k_block + x * BLOCK_SIZE + + token_group_idx * x * TOKEN_PER_GROUP); + }); + } + } + } + + for (int token_group_idx = 0; token_group_idx < group_num; + ++token_group_idx) { + vec_op::unroll_loop( + [&group_accums, logits, scale, token_group_idx](int token_idx) { + float dot_v = + group_accums[token_group_idx] + .template reduce_sub_sum(token_idx); + logits[token_group_idx * TOKEN_PER_GROUP + token_idx] = + dot_v * scale; + }); + } + } +}; + +template +FORCE_INLINE void reduceValueBlock(const float *prob, const scalar_t *v_block, + acc_t &&acc) { + using v_load_vec_type = typename KernelVecType::v_load_vec_type; + constexpr int ELEM_NUM = v_load_vec_type::get_elem_num(); + static_assert(BLOCK_SIZE == ELEM_NUM); + vec_op::FP32Vec16 prob_vec(prob); + + vec_op::unroll_loop([&](int head_elem_idx) { + v_load_vec_type v_vec(v_block + BLOCK_SIZE * head_elem_idx); + vec_op::FP32Vec16 fp32_v_vec(v_vec); + acc[head_elem_idx] = acc[head_elem_idx] + prob_vec * fp32_v_vec; + }); +} +}; // namespace + +// Paged attention v1 +namespace { +template +struct paged_attention_v1_impl { + static void + call(scalar_t *__restrict__ out, // [num_seqs, num_heads, head_size] + const scalar_t *__restrict__ q, // [num_seqs, num_heads, head_size] + const scalar_t *__restrict__ k_cache, // [num_blocks, num_kv_heads, + // head_size/x, block_size, x] + const scalar_t *__restrict__ v_cache, // [num_blocks, num_kv_heads, + // head_size, block_size] + const int num_kv_heads, const float scale, + const int + *__restrict__ block_tables, // [num_seqs, max_num_blocks_per_seq] + const int *__restrict__ seq_lens, // [num_seqs] + const int max_num_blocks_per_seq, + const float *__restrict__ alibi_slopes, // [num_heads] + const int q_stride, const int kv_block_stride, const int kv_head_stride, + const int num_seqs, const int num_heads) { + constexpr int x = 16 / sizeof(scalar_t); + const int num_queries_per_kv = num_heads / num_kv_heads; + + static_assert(BLOCK_SIZE == 16); + + int max_seq_len = max_num_blocks_per_seq * BLOCK_SIZE; + int max_seq_len_padded = (max_seq_len + 15) & 0xFFFFFFF0; + TORCH_CHECK((max_seq_len_padded * sizeof(float)) % 64 == 0); + + const int parallel_work_item_num = omp_get_max_threads(); + + size_t logits_bytes = + parallel_work_item_num * max_seq_len_padded * sizeof(float); + float *logits = (float *)std::aligned_alloc( + 64, logits_bytes); // Cacheline alignment for each context token. + // [parallel_work_item_num, max_seq_len_padded] + +#pragma omp parallel for collapse(2) schedule(dynamic, 1) + for (int seq_idx = 0; seq_idx < num_seqs; ++seq_idx) { + for (int head_idx = 0; head_idx < num_heads; ++head_idx) { + int seq_len = seq_lens[seq_idx]; + const int *seq_block_table = + block_tables + max_num_blocks_per_seq * seq_idx; + const int block_num = (seq_len + BLOCK_SIZE - 1) / BLOCK_SIZE; + const int64_t kv_head_idx = head_idx / num_queries_per_kv; + const scalar_t *__restrict__ q_vec_ptr = + q + seq_idx * q_stride + head_idx * HEAD_SIZE; + const int last_block_token_num = + seq_len - (block_num - 1) * BLOCK_SIZE; + float *__restrict__ thread_block_logits = + logits + omp_get_thread_num() * max_seq_len_padded; + + // Compute logits + for (int block_idx = 0; block_idx < block_num; ++block_idx) { + const int64_t physical_block_idx = seq_block_table[block_idx]; + const scalar_t *__restrict__ k_block_cache_ptr = + k_cache + physical_block_idx * kv_block_stride + + kv_head_idx * kv_head_stride; + float *__restrict__ head_block_logits = + thread_block_logits + block_idx * BLOCK_SIZE; + + reduceQKBlockKernel::call( + q_vec_ptr, k_block_cache_ptr, head_block_logits, scale, + block_idx == block_num - 1 ? last_block_token_num : BLOCK_SIZE); + } + + // Compute softmax + if (alibi_slopes) { + reduceSoftmaxAlibi(thread_block_logits, seq_len, + block_num * BLOCK_SIZE, alibi_slopes[head_idx], 0, + seq_len); + } else { + reduceSoftmax(thread_block_logits, seq_len, + block_num * BLOCK_SIZE); + } + + // Compute value + constexpr int head_elem_num_per_partition = 16; + constexpr int head_partition_num = + HEAD_SIZE / head_elem_num_per_partition; + for (int head_part_idx = 0; head_part_idx < head_partition_num; + ++head_part_idx) { + vec_op::FP32Vec16 accums[head_elem_num_per_partition]; + scalar_t *__restrict__ out_ptr = + out + seq_idx * num_heads * HEAD_SIZE + head_idx * HEAD_SIZE + + head_part_idx * head_elem_num_per_partition; + for (int block_idx = 0; block_idx < block_num; ++block_idx) { + const int64_t physical_block_idx = seq_block_table[block_idx]; + const float *__restrict__ prob_vec_ptr = + thread_block_logits + block_idx * BLOCK_SIZE; + const scalar_t *__restrict__ v_block_cache_ptr = + v_cache + physical_block_idx * kv_block_stride + + kv_head_idx * kv_head_stride + + BLOCK_SIZE * head_part_idx * head_elem_num_per_partition; + reduceValueBlock( + prob_vec_ptr, v_block_cache_ptr, accums); + + if (block_idx != block_num - 1) { + const int64_t next_physical_block_idx = + seq_block_table[block_idx + 1]; + const scalar_t *__restrict__ next_v_block_cache_ptr = + v_cache + next_physical_block_idx * kv_block_stride + + kv_head_idx * kv_head_stride + + BLOCK_SIZE * head_part_idx * head_elem_num_per_partition; + vec_op::unroll_loop( + [&](int head_elem_idx) { + if (head_elem_idx % 2 == 0) { + vec_op::prefetch(next_v_block_cache_ptr + + BLOCK_SIZE * head_elem_idx); + } + }); + } + } + + vec_op::unroll_loop( + [&](int head_elem_idx) { + float value = accums[head_elem_idx].reduce_sum(); + vec_op::storeFP32(value, out_ptr + head_elem_idx); + }); + } + } + } + std::free(logits); + } +}; + +#define LAUNCH_V1_ATTENTION_KERNEL(T, HEAD_SIZE, BLOCK_SIZE) \ + paged_attention_v1_impl::call( \ + out_ptr, query_ptr, key_cache_ptr, value_cache_ptr, num_kv_heads, scale, \ + block_tables_ptr, seq_lens_ptr, max_num_blocks_per_seq, \ + alibi_slopes_ptr, q_stride, kv_block_stride, kv_head_stride, num_seqs, \ + num_heads); + +template +void paged_attention_v1_impl_launcher( + torch::Tensor &out, torch::Tensor &query, torch::Tensor &key_cache, + torch::Tensor &value_cache, int num_kv_heads, float scale, + torch::Tensor &block_tables, torch::Tensor &seq_lens, + int max_seq_len, const c10::optional &alibi_slopes) { + int num_seqs = query.size(0); + int num_heads = query.size(1); + int head_size = query.size(2); + int max_num_blocks_per_seq = block_tables.size(1); + int q_stride = query.stride(0); + int kv_block_stride = key_cache.stride(0); + int kv_head_stride = key_cache.stride(1); + + // NOTE: alibi_slopes is optional. + const float *alibi_slopes_ptr = + alibi_slopes + ? reinterpret_cast(alibi_slopes.value().data_ptr()) + : nullptr; + + T *out_ptr = reinterpret_cast(out.data_ptr()); + T *query_ptr = reinterpret_cast(query.data_ptr()); + T *key_cache_ptr = reinterpret_cast(key_cache.data_ptr()); + T *value_cache_ptr = reinterpret_cast(value_cache.data_ptr()); + int *block_tables_ptr = block_tables.data_ptr(); + int *seq_lens_ptr = seq_lens.data_ptr(); + + switch (head_size) { + case 64: + LAUNCH_V1_ATTENTION_KERNEL(T, 64, BLOCK_SIZE); + break; + case 80: + LAUNCH_V1_ATTENTION_KERNEL(T, 80, BLOCK_SIZE); + break; + case 96: + LAUNCH_V1_ATTENTION_KERNEL(T, 96, BLOCK_SIZE); + break; + case 112: + LAUNCH_V1_ATTENTION_KERNEL(T, 112, BLOCK_SIZE); + break; + case 128: + LAUNCH_V1_ATTENTION_KERNEL(T, 128, BLOCK_SIZE); + break; + case 256: + LAUNCH_V1_ATTENTION_KERNEL(T, 256, BLOCK_SIZE); + break; + default: + TORCH_CHECK(false, "Unsupported head size: ", head_size); + break; + } +} + +#define CALL_V1_KERNEL_LAUNCHER(T, BLOCK_SIZE) \ + paged_attention_v1_impl_launcher( \ + out, query, key_cache, value_cache, num_kv_heads, scale, block_tables, \ + seq_lens, max_seq_len, alibi_slopes); + +#define CALL_V1_KERNEL_LAUNCHER_BLOCK_SIZE(T) \ + switch (block_size) { \ + case 16: \ + CALL_V1_KERNEL_LAUNCHER(T, 16); \ + break; \ + default: \ + TORCH_CHECK(false, "Unsupported block size: ", block_size); \ + break; \ + } +} // namespace + +void paged_attention_v1(torch::Tensor &out, torch::Tensor &query, + torch::Tensor &key_cache, torch::Tensor &value_cache, + int num_kv_heads, float scale, + torch::Tensor &block_tables, + torch::Tensor &seq_lens, int block_size, + int max_seq_len, + const c10::optional &alibi_slopes, + const std::string &kv_cache_dtype, float kv_scale) { + TORCH_CHECK(kv_scale == 1.0f); + VLLM_DISPATCH_FLOATING_TYPES(query.scalar_type(), "paged_attention_v1_impl", + [&] { + CPU_KERNEL_GUARD_IN(paged_attention_v1_impl) + CALL_V1_KERNEL_LAUNCHER_BLOCK_SIZE(scalar_t); + CPU_KERNEL_GUARD_OUT(paged_attention_v1_impl) + }); +} + +// Paged attention v2 +namespace { +template +struct paged_attention_v2_impl { + static void call( + scalar_t *__restrict__ out, // [num_seqs, num_heads, head_size] + float *__restrict__ exp_sums, // [num_seqs, num_heads, max_num_partitions] + float + *__restrict__ max_logits, // [num_seqs, num_heads, max_num_partitions] + scalar_t *__restrict__ tmp_out, // [num_seqs, num_heads, + // max_num_partitions, head_size] + const scalar_t *__restrict__ q, // [num_seqs, num_heads, head_size] + const scalar_t *__restrict__ k_cache, // [num_blocks, num_kv_heads, + // head_size/x, block_size, x] + const scalar_t *__restrict__ v_cache, // [num_blocks, num_kv_heads, + // head_size, block_size] + const int num_kv_heads, const float scale, + const int + *__restrict__ block_tables, // [num_seqs, max_num_blocks_per_seq] + const int *__restrict__ seq_lens, // [num_seqs] + const int max_num_blocks_per_seq, + const float *__restrict__ alibi_slopes, // [num_heads] + const int q_stride, const int kv_block_stride, const int kv_head_stride, + const int num_seqs, const int num_heads, const int max_num_partitions) { + constexpr int x = 16 / sizeof(scalar_t); + const int num_queries_per_kv = num_heads / num_kv_heads; + + static_assert(BLOCK_SIZE == 16); + static_assert(PARTITION_SIZE * sizeof(float) % 64 == 0); + static_assert(PARTITION_SIZE % BLOCK_SIZE == 0); + +#pragma omp parallel for collapse(3) schedule(static, 1) + for (int seq_idx = 0; seq_idx < num_seqs; ++seq_idx) { + for (int partition_idx = 0; partition_idx < max_num_partitions; + ++partition_idx) { + for (int head_idx = 0; head_idx < num_heads; ++head_idx) { + const int seq_len = seq_lens[seq_idx]; + const int start_token_idx = partition_idx * PARTITION_SIZE; + + if (start_token_idx >= seq_len) + continue; + + const int partition_num = + (seq_len + PARTITION_SIZE - 1) / PARTITION_SIZE; + const bool no_reduce = (partition_num == 1); + const int token_num = + (std::min(seq_len, start_token_idx + PARTITION_SIZE) - + start_token_idx); + const int block_num = + (token_num + BLOCK_SIZE - 1) / BLOCK_SIZE; + const int last_block_token_num = + token_num - (block_num - 1) * BLOCK_SIZE; + const int *seq_block_table = block_tables + + max_num_blocks_per_seq * seq_idx + + start_token_idx / BLOCK_SIZE; + const int64_t kv_head_idx = head_idx / num_queries_per_kv; + const scalar_t *__restrict__ q_vec_ptr = + q + seq_idx * q_stride + head_idx * HEAD_SIZE; + + float logits[PARTITION_SIZE] __attribute__((aligned(64))) = {0}; + + // Compute logits + for (int block_idx = 0; block_idx < block_num; ++block_idx) { + const int64_t physical_block_idx = seq_block_table[block_idx]; + const scalar_t *__restrict__ k_block_cache_ptr = + k_cache + physical_block_idx * kv_block_stride + + kv_head_idx * kv_head_stride; + float *__restrict__ head_block_logits = + logits + block_idx * BLOCK_SIZE; + + reduceQKBlockKernel::call( + q_vec_ptr, k_block_cache_ptr, head_block_logits, scale, + block_idx == block_num - 1 ? last_block_token_num : BLOCK_SIZE); + } + + std::pair max_and_sum; + if (alibi_slopes) { + max_and_sum = reduceSoftmaxAlibi( + logits, token_num, block_num * BLOCK_SIZE, + alibi_slopes[head_idx], start_token_idx, seq_len); + } else { + max_and_sum = reduceSoftmax(logits, token_num, + block_num * BLOCK_SIZE); + } + + auto &&[max_logit, exp_sum] = max_and_sum; + + scalar_t *__restrict__ output_buffer = nullptr; + if (!no_reduce) { + auto idx = seq_idx * num_heads * max_num_partitions + + head_idx * max_num_partitions + partition_idx; + max_logits[idx] = max_logit; + exp_sums[idx] = exp_sum; + output_buffer = + tmp_out + seq_idx * num_heads * max_num_partitions * HEAD_SIZE + + head_idx * max_num_partitions * HEAD_SIZE + + partition_idx * HEAD_SIZE; + } else { + output_buffer = + out + seq_idx * num_heads * HEAD_SIZE + head_idx * HEAD_SIZE; + } + + // Compute value + constexpr int head_elem_num_per_partition = 16; + constexpr int head_partition_num = + HEAD_SIZE / head_elem_num_per_partition; + for (int head_part_idx = 0; head_part_idx < head_partition_num; + ++head_part_idx) { + vec_op::FP32Vec16 accums[head_elem_num_per_partition]; + scalar_t *__restrict__ out_ptr = + output_buffer + head_part_idx * head_elem_num_per_partition; + for (int block_idx = 0; block_idx < block_num; ++block_idx) { + const int64_t physical_block_idx = seq_block_table[block_idx]; + const float *__restrict__ prob_vec_ptr = + logits + block_idx * BLOCK_SIZE; + const scalar_t *__restrict__ v_block_cache_ptr = + v_cache + physical_block_idx * kv_block_stride + + kv_head_idx * kv_head_stride + + BLOCK_SIZE * head_part_idx * head_elem_num_per_partition; + reduceValueBlock( + prob_vec_ptr, v_block_cache_ptr, accums); + + if (block_idx != block_num - 1) { + const int64_t next_physical_block_idx = + seq_block_table[block_idx + 1]; + const scalar_t *__restrict__ next_v_block_cache_ptr = + v_cache + next_physical_block_idx * kv_block_stride + + kv_head_idx * kv_head_stride + + BLOCK_SIZE * head_part_idx * head_elem_num_per_partition; + vec_op::unroll_loop( + [&](int head_elem_idx) { + if (head_elem_idx % 2 == 0) { + vec_op::prefetch(next_v_block_cache_ptr + + BLOCK_SIZE * head_elem_idx); + } + }); + } + } + + vec_op::unroll_loop( + [&](int head_elem_idx) { + float value = accums[head_elem_idx].reduce_sum(); + vec_op::storeFP32(value, out_ptr + head_elem_idx); + }); + } + } + } + } + + // Rescale partition softmax and store the factors to exp_sums +#pragma omp parallel for collapse(2) schedule(static, 1) + for (int seq_idx = 0; seq_idx < num_seqs; ++seq_idx) { + for (int head_idx = 0; head_idx < num_heads; ++head_idx) { + const int seq_len = seq_lens[seq_idx]; + const int partition_num = + (seq_len + PARTITION_SIZE - 1) / PARTITION_SIZE; + + if (partition_num == 1) + continue; + + reducePartitonSoftmax( + max_logits + seq_idx * num_heads * max_num_partitions + + head_idx * max_num_partitions, + exp_sums + seq_idx * num_heads * max_num_partitions + + head_idx * max_num_partitions, + partition_num); + } + } + + // Reduce values + using v_load_vec_type = typename KernelVecType::v_load_vec_type; + static_assert(v_load_vec_type::get_elem_num() == BLOCK_SIZE); + constexpr int head_elem_num_per_group = + 16; // Note: didn't align with the cacheline size, due to some HEAD_SIZE + // didn't align with 64 bytes + static_assert(HEAD_SIZE % head_elem_num_per_group == 0); + constexpr int head_group_num = HEAD_SIZE / head_elem_num_per_group; + const float *__restrict__ rescale_factors = exp_sums; +#pragma omp parallel for collapse(3) schedule(static, 1) + for (int seq_idx = 0; seq_idx < num_seqs; ++seq_idx) { + for (int head_idx = 0; head_idx < num_heads; ++head_idx) { + for (int group_idx = 0; group_idx < head_group_num; ++group_idx) { + const int seq_len = seq_lens[seq_idx]; + const int partition_num = + (seq_len + PARTITION_SIZE - 1) / PARTITION_SIZE; + + if (partition_num == 1) + continue; + + const float *__restrict__ seq_head_rescale_factors = + rescale_factors + seq_idx * num_heads * max_num_partitions + + head_idx * max_num_partitions; + const scalar_t *__restrict__ seq_head_tmp_out = + tmp_out + seq_idx * num_heads * max_num_partitions * HEAD_SIZE + + head_idx * max_num_partitions * HEAD_SIZE + + group_idx * head_elem_num_per_group; + scalar_t *__restrict__ seq_head_output = + out + seq_idx * num_heads * HEAD_SIZE + head_idx * HEAD_SIZE + + group_idx * head_elem_num_per_group; + + vec_op::FP32Vec16 acc; + for (int i = 0; i < partition_num; ++i) { + vec_op::FP32Vec16 rescale_factor(seq_head_rescale_factors[i]); + v_load_vec_type value(seq_head_tmp_out + i * HEAD_SIZE); + vec_op::FP32Vec16 fp32_value(value); + acc = acc + fp32_value * rescale_factor; + } + v_load_vec_type cast_acc(acc); + cast_acc.save(seq_head_output); + } + } + } + } +}; + +#define LAUNCH_V2_ATTENTION_KERNEL(T, HEAD_SIZE, BLOCK_SIZE) \ + paged_attention_v2_impl::call( \ + out_ptr, exp_sums_ptr, max_logits_ptr, tmp_out_ptr, query_ptr, \ + key_cache_ptr, value_cache_ptr, num_kv_heads, scale, block_tables_ptr, \ + seq_lens_ptr, max_num_blocks_per_seq, alibi_slopes_ptr, q_stride, \ + kv_block_stride, kv_head_stride, num_seqs, num_heads, \ + max_num_partitions); + +template +void paged_attention_v2_impl_launcher( + torch::Tensor &out, torch::Tensor &exp_sums, torch::Tensor &max_logits, + torch::Tensor &tmp_out, torch::Tensor &query, torch::Tensor &key_cache, + torch::Tensor &value_cache, int num_kv_heads, float scale, + torch::Tensor &block_tables, torch::Tensor &seq_lens, int block_size, + int max_seq_len, const c10::optional &alibi_slopes) { + int num_seqs = query.size(0); + int num_heads = query.size(1); + int head_size = query.size(2); + int max_num_blocks_per_seq = block_tables.size(1); + int q_stride = query.stride(0); + int kv_block_stride = key_cache.stride(0); + int kv_head_stride = key_cache.stride(1); + int max_num_partitions = exp_sums.size(-1); + + // NOTE: alibi_slopes is optional. + const float *alibi_slopes_ptr = + alibi_slopes + ? reinterpret_cast(alibi_slopes.value().data_ptr()) + : nullptr; + + T *out_ptr = reinterpret_cast(out.data_ptr()); + float *exp_sums_ptr = reinterpret_cast(exp_sums.data_ptr()); + float *max_logits_ptr = reinterpret_cast(max_logits.data_ptr()); + T *tmp_out_ptr = reinterpret_cast(tmp_out.data_ptr()); + T *query_ptr = reinterpret_cast(query.data_ptr()); + T *key_cache_ptr = reinterpret_cast(key_cache.data_ptr()); + T *value_cache_ptr = reinterpret_cast(value_cache.data_ptr()); + int *block_tables_ptr = block_tables.data_ptr(); + int *seq_lens_ptr = seq_lens.data_ptr(); + + switch (head_size) { + case 64: + LAUNCH_V2_ATTENTION_KERNEL(T, 64, BLOCK_SIZE); + break; + case 80: + LAUNCH_V2_ATTENTION_KERNEL(T, 80, BLOCK_SIZE); + break; + case 96: + LAUNCH_V2_ATTENTION_KERNEL(T, 96, BLOCK_SIZE); + break; + case 112: + LAUNCH_V2_ATTENTION_KERNEL(T, 112, BLOCK_SIZE); + break; + case 128: + LAUNCH_V2_ATTENTION_KERNEL(T, 128, BLOCK_SIZE); + break; + case 256: + LAUNCH_V2_ATTENTION_KERNEL(T, 256, BLOCK_SIZE); + break; + default: + TORCH_CHECK(false, "Unsupported head size: ", head_size); + break; + } +} + +#define CALL_V2_KERNEL_LAUNCHER(T, BLOCK_SIZE) \ + paged_attention_v2_impl_launcher( \ + out, exp_sums, max_logits, tmp_out, query, key_cache, value_cache, \ + num_kv_heads, scale, block_tables, seq_lens, block_size, \ + max_seq_len, alibi_slopes); + +#define CALL_V2_KERNEL_LAUNCHER_BLOCK_SIZE(T) \ + switch (block_size) { \ + case 16: \ + CALL_V2_KERNEL_LAUNCHER(T, 16); \ + break; \ + default: \ + TORCH_CHECK(false, "Unsupported block size: ", block_size); \ + break; \ + } +} // namespace + +void paged_attention_v2(torch::Tensor &out, torch::Tensor &exp_sums, + torch::Tensor &max_logits, torch::Tensor &tmp_out, + torch::Tensor &query, torch::Tensor &key_cache, + torch::Tensor &value_cache, int num_kv_heads, + float scale, torch::Tensor &block_tables, + torch::Tensor &seq_lens, int block_size, + int max_seq_len, + const c10::optional &alibi_slopes, + const std::string &kv_cache_dtype, float kv_scale) { + TORCH_CHECK(kv_scale == 1.0f); + VLLM_DISPATCH_FLOATING_TYPES(query.scalar_type(), "paged_attention_v2_impl", + [&] { + CPU_KERNEL_GUARD_IN(paged_attention_v2_impl) + CALL_V2_KERNEL_LAUNCHER_BLOCK_SIZE(scalar_t); + CPU_KERNEL_GUARD_OUT(paged_attention_v2_impl) + }); +} diff --git a/csrc_musa/cpu/cache.cpp b/csrc_musa/cpu/cache.cpp new file mode 100644 index 0000000..7849a5d --- /dev/null +++ b/csrc_musa/cpu/cache.cpp @@ -0,0 +1,141 @@ +#include +#include + +#include "cpu_types.hpp" + +namespace { +template +void copy_blocks_cpu_impl( + std::vector &key_caches, + std::vector &value_caches, + const std::vector> mapping_pairs, + const int element_num_per_block, const int layer_num) { + const size_t pair_num = mapping_pairs.size(); + const size_t block_bytes = sizeof(scalar_t) * element_num_per_block; +#pragma omp parallel for collapse(2) + for (int layer = 0; layer < layer_num; ++layer) { + for (size_t pair = 0; pair < pair_num; ++pair) { + int64_t source_offset = element_num_per_block * mapping_pairs[pair].first; + int64_t target_offset = + element_num_per_block * mapping_pairs[pair].second; + scalar_t *key_cache_ptr = key_caches[layer].data_ptr(); + scalar_t *source_ptr = key_cache_ptr + source_offset; + scalar_t *target_ptr = key_cache_ptr + target_offset; + std::memcpy(target_ptr, source_ptr, block_bytes); + + scalar_t *value_cache_ptr = value_caches[layer].data_ptr(); + source_ptr = value_cache_ptr + source_offset; + target_ptr = value_cache_ptr + target_offset; + std::memcpy(target_ptr, source_ptr, block_bytes); + } + } +} + +template +void reshape_and_cache_cpu_impl( + const scalar_t *__restrict__ key, const scalar_t *__restrict__ value, + scalar_t *__restrict__ key_cache, scalar_t *__restrict__ value_cache, + const int64_t *__restrict__ slot_mapping, const int num_tokens, + const int key_stride, const int value_stride, const int num_heads, + const int head_size, const int block_size, const int x) { + const int block_elem_num = num_heads * head_size * block_size; + +#pragma omp parallel for collapse(2) + for (int token_idx = 0; token_idx < num_tokens; ++token_idx) { + for (int head_idx = 0; head_idx < num_heads; ++head_idx) { + const int64_t slot_idx = slot_mapping[token_idx]; + if (slot_idx >= 0) { + int src_key_head_idx = token_idx * key_stride + head_idx * head_size; + int src_value_head_idx = + token_idx * value_stride + head_idx * head_size; + const scalar_t *src_key_head_ptr = key + src_key_head_idx; + const scalar_t *src_value_head_ptr = value + src_value_head_idx; + const int64_t block_index = slot_idx / block_size; + const int64_t block_offset = slot_idx % block_size; + scalar_t *target_key_head_ptr = key_cache + + block_elem_num * block_index + + head_idx * block_size * head_size; + scalar_t *target_value_head_ptr = value_cache + + block_elem_num * block_index + + head_idx * block_size * head_size; + + for (int src_key_idx = 0; src_key_idx < head_size; src_key_idx += x) { + const int64_t target_offset = + src_key_idx * block_size + block_offset * x; + for (int i = 0; i < x; ++i) { + target_key_head_ptr[target_offset + i] = + src_key_head_ptr[src_key_idx + i]; + } + } + + for (int src_value_idx = 0; src_value_idx < head_size; + ++src_value_idx) { + const int64_t target_offset = + src_value_idx * block_size + block_offset; + target_value_head_ptr[target_offset] = + src_value_head_ptr[src_value_idx]; + } + } + } + } +} +}; // namespace + +void copy_blocks(std::vector &key_caches, + std::vector &value_caches, + const std::map> &block_mapping) { + int num_layers = key_caches.size(); + TORCH_CHECK(num_layers == value_caches.size()); + if (num_layers == 0) { + return; + } + + std::vector> mapping_pairs; + mapping_pairs.reserve(block_mapping.size()); + for (const auto &pair : block_mapping) { + for (const auto &dst : pair.second) { + mapping_pairs.emplace_back(pair.first, dst); + } + } + + const int element_num_per_block = key_caches[0][0].numel(); + VLLM_DISPATCH_FLOATING_TYPES( + key_caches[0].scalar_type(), "copy_blocks_cpu_impl", [&] { + CPU_KERNEL_GUARD_IN(copy_blocks_cpu_impl) + copy_blocks_cpu_impl(key_caches, value_caches, mapping_pairs, + element_num_per_block, num_layers); + CPU_KERNEL_GUARD_OUT(copy_blocks_cpu_impl) + }); +} + +void reshape_and_cache(torch::Tensor &key, torch::Tensor &value, + torch::Tensor &key_cache, torch::Tensor &value_cache, + torch::Tensor &slot_mapping, + const std::string &kv_cache_dtype, float kv_scale) { + TORCH_CHECK(kv_scale == 1.0f); + + int num_tokens = key.size(0); + int num_heads = key.size(1); + int head_size = key.size(2); + int block_size = key_cache.size(3); + int x = key_cache.size(4); + + int key_stride = key.stride(0); + int value_stride = value.stride(0); + + VLLM_DISPATCH_FLOATING_TYPES( + key.scalar_type(), "reshape_and_cache_cpu_impl", [&] { + CPU_KERNEL_GUARD_IN(reshape_and_cache_cpu_impl) + reshape_and_cache_cpu_impl( + key.data_ptr(), value.data_ptr(), + key_cache.data_ptr(), value_cache.data_ptr(), + slot_mapping.data_ptr(), num_tokens, key_stride, + value_stride, num_heads, head_size, block_size, x); + CPU_KERNEL_GUARD_OUT(reshape_and_cache_cpu_impl) + }); +} + +void swap_blocks(torch::Tensor &src, torch::Tensor &dst, + const std::map &block_mapping) { + TORCH_CHECK(false, "swap_blocks is unsupported on CPU.") +} diff --git a/csrc_musa/cpu/cpu_types.hpp b/csrc_musa/cpu/cpu_types.hpp new file mode 100644 index 0000000..c1d3ec0 --- /dev/null +++ b/csrc_musa/cpu/cpu_types.hpp @@ -0,0 +1,352 @@ + +#ifndef CPU_TYPES_HPP +#define CPU_TYPES_HPP + +#include +#include + +namespace vec_op { + +// FIXME: FP16 is not fully supported in Torch-CPU +#define VLLM_DISPATCH_CASE_FLOATING_TYPES(...) \ + AT_DISPATCH_CASE(at::ScalarType::Float, __VA_ARGS__) \ + AT_DISPATCH_CASE(at::ScalarType::BFloat16, __VA_ARGS__) + +#define VLLM_DISPATCH_FLOATING_TYPES(TYPE, NAME, ...) \ + AT_DISPATCH_SWITCH(TYPE, NAME, VLLM_DISPATCH_CASE_FLOATING_TYPES(__VA_ARGS__)) + +#ifndef CPU_OP_GUARD +#define CPU_KERNEL_GUARD_IN(NAME) +#define CPU_KERNEL_GUARD_OUT(NAME) +#else +#define CPU_KERNEL_GUARD_IN(NAME) \ + std::cout << #NAME << " invoked." << std::endl; +#define CPU_KERNEL_GUARD_OUT(NAME) std::cout << #NAME << " exit." << std::endl; +#endif + +#define FORCE_INLINE __attribute__((always_inline)) inline + +namespace { +template +constexpr void unroll_loop_item(std::integer_sequence, F &&f) { + (f(std::integral_constant{}), ...); +} +}; // namespace + +template >> +constexpr void unroll_loop(F &&f) { + unroll_loop_item(std::make_integer_sequence{}, std::forward(f)); +} + +template struct Vec { + constexpr static int get_elem_num() { return T::VEC_ELEM_NUM; } +}; + +struct FP32Vec8; +struct FP32Vec16; + +#ifdef __AVX512FP16__ +struct FP16Vec8 : public Vec { + constexpr static int VEC_ELEM_NUM = 8; + + __m128h reg; + + explicit FP16Vec8(_Float16 v) : reg(_mm_set1_ph(v)) {} + + explicit FP16Vec8(const void *ptr) : reg(_mm_loadu_ph(ptr)) {} + + explicit FP16Vec8(__m128h data) : reg(data) {} + + FP16Vec8 operator*(const FP16Vec8 &b) const { + return FP16Vec8(_mm_mul_ph(reg, b.reg)); + } + + FP16Vec8 operator+(const FP16Vec8 &b) const { + return FP16Vec8(_mm_add_ph(reg, b.reg)); + } + + FP16Vec8 operator-(const FP16Vec8 &b) const { + return FP16Vec8(_mm_sub_ph(reg, b.reg)); + } + + FP16Vec8 operator/(const FP16Vec8 &b) const { + return FP16Vec8(_mm_div_ph(reg, b.reg)); + } + + void save(void *ptr) const { _mm_storeu_ph(ptr, reg); } +}; +#endif + +struct BF16Vec8 : public Vec { + constexpr static int VEC_ELEM_NUM = 8; + + __m128i reg; + + explicit BF16Vec8(const void *ptr) + : reg((__m128i)_mm_loadu_si128((__m128i *)ptr)) {} + + explicit BF16Vec8(const FP32Vec8 &); + + void save(void *ptr) const { *reinterpret_cast<__m128i *>(ptr) = reg; } +}; + +struct BF16Vec16 : public Vec { + constexpr static int VEC_ELEM_NUM = 16; + + __m256i reg; + + explicit BF16Vec16(const void *ptr) + : reg((__m256i)_mm256_loadu_si256((__m256i *)ptr)) {} + + explicit BF16Vec16(const FP32Vec16 &); + + void save(void *ptr) const { *reinterpret_cast<__m256i *>(ptr) = reg; } +}; + +struct BF16Vec32 : public Vec { + constexpr static int VEC_ELEM_NUM = 32; + + __m512i reg; + + explicit BF16Vec32(const void *ptr) : reg((__m512i)_mm512_loadu_si512(ptr)) {} + + explicit BF16Vec32(__m512i data) : reg(data) {} + + explicit BF16Vec32(BF16Vec8 &vec8_data) + : reg((__m512i)_mm512_inserti32x4( + _mm512_inserti32x4(_mm512_inserti32x4(_mm512_castsi128_si512( + (__m128i)vec8_data.reg), + (__m128i)vec8_data.reg, 1), + (__m128i)vec8_data.reg, 2), + (__m128i)vec8_data.reg, 3)) {} + + void save(void *ptr) const { *reinterpret_cast<__m512i *>(ptr) = reg; } +}; + +struct FP32Vec4 : public Vec { + constexpr static int VEC_ELEM_NUM = 4; + union AliasReg { + __m128 reg; + float values[VEC_ELEM_NUM]; + }; + + __m128 reg; + + explicit FP32Vec4(float v) : reg(_mm_set1_ps(v)) {} + + explicit FP32Vec4() : reg(_mm_set1_ps(0.0)) {} + + explicit FP32Vec4(const float *ptr) : reg(_mm_loadu_ps(ptr)) {} + + explicit FP32Vec4(__m128 data) : reg(data) {} + + explicit FP32Vec4(const FP32Vec4 &data) : reg(data.reg) {} +}; + +struct FP32Vec8 : public Vec { + constexpr static int VEC_ELEM_NUM = 8; + union AliasReg { + __m256 reg; + float values[VEC_ELEM_NUM]; + }; + + __m256 reg; + + explicit FP32Vec8(float v) : reg(_mm256_set1_ps(v)) {} + + explicit FP32Vec8() : reg(_mm256_set1_ps(0.0)) {} + + explicit FP32Vec8(const float *ptr) : reg(_mm256_loadu_ps(ptr)) {} + + explicit FP32Vec8(__m256 data) : reg(data) {} + + explicit FP32Vec8(const FP32Vec8 &data) : reg(data.reg) {} + +#ifdef __AVX512FP16__ + explicit FP32Vec8(__m128h v) : reg(_mm256_cvtph_ps(_mm_castph_si128(v))) {} +#endif + + explicit FP32Vec8(const BF16Vec8 &v) + : reg(_mm256_castsi256_ps( + _mm256_bslli_epi128(_mm256_cvtepu16_epi32(v.reg), 2))) {} + + float reduce_sum() const { + AliasReg ar; + ar.reg = reg; + float result = 0; + unroll_loop([&result, &ar](int i) { result += ar.values[i]; }); + + return result; + } + + FP32Vec8 exp() const { + AliasReg ar; + ar.reg = reg; + return FP32Vec8(_mm256_set_ps(expf(ar.values[7]), expf(ar.values[6]), + expf(ar.values[5]), expf(ar.values[4]), + expf(ar.values[3]), expf(ar.values[2]), + expf(ar.values[1]), expf(ar.values[0]))); + } + + FP32Vec8 tanh() const { + AliasReg ar; + ar.reg = reg; + return FP32Vec8(_mm256_set_ps(tanhf(ar.values[7]), tanhf(ar.values[6]), + tanhf(ar.values[5]), tanhf(ar.values[4]), + tanhf(ar.values[3]), tanhf(ar.values[2]), + tanhf(ar.values[1]), tanhf(ar.values[0]))); + } + + FP32Vec8 er() const { + AliasReg ar; + ar.reg = reg; + return FP32Vec8(_mm256_set_ps(erf(ar.values[7]), erf(ar.values[6]), + erf(ar.values[5]), erf(ar.values[4]), + erf(ar.values[3]), erf(ar.values[2]), + erf(ar.values[1]), erf(ar.values[0]))); + } + + FP32Vec8 operator*(const FP32Vec8 &b) const { + return FP32Vec8(_mm256_mul_ps(reg, b.reg)); + } + + FP32Vec8 operator+(const FP32Vec8 &b) const { + return FP32Vec8(_mm256_add_ps(reg, b.reg)); + } + + FP32Vec8 operator-(const FP32Vec8 &b) const { + return FP32Vec8(_mm256_sub_ps(reg, b.reg)); + } + + FP32Vec8 operator/(const FP32Vec8 &b) const { + return FP32Vec8(_mm256_div_ps(reg, b.reg)); + } + + void save(float *ptr) const { _mm256_storeu_ps(ptr, reg); } +}; + +struct FP32Vec16 : public Vec { + constexpr static int VEC_ELEM_NUM = 16; + union AliasReg { + __m512 reg; + float values[VEC_ELEM_NUM]; + }; + + __m512 reg; + + explicit FP32Vec16(float v) : reg(_mm512_set1_ps(v)) {} + + explicit FP32Vec16() : reg(_mm512_set1_ps(0.0)) {} + + explicit FP32Vec16(const float *ptr) : reg(_mm512_loadu_ps(ptr)) {} + + explicit FP32Vec16(__m512 data) : reg(data) {} + + explicit FP32Vec16(const FP32Vec16 &data) : reg(data.reg) {} + + explicit FP32Vec16(const FP32Vec4 &data) + : reg((__m512)_mm512_inserti32x4( + _mm512_inserti32x4( + _mm512_inserti32x4(_mm512_castsi128_si512((__m128i)data.reg), + (__m128i)data.reg, 1), + (__m128i)data.reg, 2), + (__m128i)data.reg, 3)) {} + + explicit FP32Vec16(const FP32Vec8 &data) + : reg((__m512)_mm512_inserti32x8( + _mm512_castsi256_si512((__m256i)data.reg), (__m256i)data.reg, 1)) {} + + explicit FP32Vec16(const BF16Vec16 &v) + : reg(_mm512_castsi512_ps( + _mm512_bslli_epi128(_mm512_cvtepu16_epi32(v.reg), 2))) {} + + explicit FP32Vec16(const BF16Vec8 &v) : FP32Vec16(FP32Vec8(v)) {} + + FP32Vec16 operator*(const FP32Vec16 &b) const { + return FP32Vec16(_mm512_mul_ps(reg, b.reg)); + } + + FP32Vec16 operator+(const FP32Vec16 &b) const { + return FP32Vec16(_mm512_add_ps(reg, b.reg)); + } + + FP32Vec16 operator-(const FP32Vec16 &b) const { + return FP32Vec16(_mm512_sub_ps(reg, b.reg)); + } + + FP32Vec16 operator/(const FP32Vec16 &b) const { + return FP32Vec16(_mm512_div_ps(reg, b.reg)); + } + + float reduce_sum() const { return _mm512_reduce_add_ps(reg); } + + template float reduce_sub_sum(int idx) { + static_assert(VEC_ELEM_NUM % group_size == 0); + constexpr uint32_t base_mask = (0xFFFF >> (16 - group_size)); + __mmask16 mask = _cvtu32_mask16(base_mask << (idx * group_size)); + return _mm512_mask_reduce_add_ps(mask, reg); + } + + void save(float *ptr) const { _mm512_storeu_ps(ptr, reg); } +}; + +template struct VecType { using vec_type = void; }; + +template using vec_t = typename VecType::vec_type; + +template <> struct VecType { using vec_type = FP32Vec8; }; + +#ifdef __AVX512FP16__ +template <> struct VecType { using vec_type = FP16Vec16; }; +#endif + +template <> struct VecType { using vec_type = BF16Vec8; }; + +template void storeFP32(float v, T *ptr) { *ptr = v; } + +#ifdef __AVX512FP16__ +template <> inline void storeFP32(float v, c10::Half *ptr) { + *reinterpret_cast<_Float16 *>(ptr) = v; +} +#endif + +inline void fma(FP32Vec16 &acc, FP32Vec16 &a, FP32Vec16 &b) { + acc = acc + a * b; +} + +#ifdef __AVX512BF16__ +template <> inline void storeFP32(float v, c10::BFloat16 *ptr) { + *reinterpret_cast<__bfloat16 *>(ptr) = _mm_cvtness_sbh(v); +} + +inline BF16Vec8::BF16Vec8(const FP32Vec8 &v) + : reg((__m128i)_mm256_cvtneps_pbh(v.reg)) {} + +inline BF16Vec16::BF16Vec16(const FP32Vec16 &v) + : reg((__m256i)_mm512_cvtneps_pbh(v.reg)) {} + +inline void fma(FP32Vec16 &acc, BF16Vec32 &a, BF16Vec32 &b) { + acc.reg = _mm512_dpbf16_ps(acc.reg, (__m512bh)a.reg, (__m512bh)b.reg); +} +#else +template <> inline void storeFP32(float v, c10::BFloat16 *ptr) { + c10::BFloat16 __attribute__((__may_alias__)) *v_ptr = + reinterpret_cast(&v); + *ptr = *(v_ptr + 1); +} + +inline BF16Vec8::BF16Vec8(const FP32Vec8 &v) + : reg(_mm256_cvtepi32_epi16( + _mm256_bsrli_epi128(_mm256_castps_si256(v.reg), 2))) {} + +inline BF16Vec16::BF16Vec16(const FP32Vec16 &v) + : reg(_mm512_cvtepi32_epi16( + _mm512_bsrli_epi128(_mm512_castps_si512(v.reg), 2))) {} +#endif + +inline void prefetch(const void *addr) { _mm_prefetch(addr, _MM_HINT_T1); } + +}; // namespace vec_op + +#endif diff --git a/csrc_musa/cpu/layernorm.cpp b/csrc_musa/cpu/layernorm.cpp new file mode 100644 index 0000000..467f0dc --- /dev/null +++ b/csrc_musa/cpu/layernorm.cpp @@ -0,0 +1,117 @@ +#include "cpu_types.hpp" + +namespace { +template +void rms_norm_impl(scalar_t *__restrict__ out, + const scalar_t *__restrict__ input, + const scalar_t *__restrict__ weight, const float epsilon, + const int num_tokens, const int hidden_size) { + using scalar_vec_t = vec_op::vec_t; + constexpr int VEC_ELEM_NUM = scalar_vec_t::get_elem_num(); + TORCH_CHECK(hidden_size % VEC_ELEM_NUM == 0); + +#pragma omp parallel for + for (int i = 0; i < num_tokens; ++i) { + vec_op::FP32Vec8 variance(0.0); + auto input_p = input + i * hidden_size; + auto output_p = out + i * hidden_size; + for (int j = 0; j < hidden_size; j += VEC_ELEM_NUM) { + scalar_vec_t x(input_p + j); + vec_op::FP32Vec8 fp32_x(x); + variance = variance + fp32_x * fp32_x; + } + + float s_variance = + 1.0f / sqrtf(variance.reduce_sum() / (float)hidden_size + epsilon); + vec_op::FP32Vec8 fp32_s_variance(s_variance); + + for (int j = 0; j < hidden_size; j += VEC_ELEM_NUM) { + scalar_vec_t x(input_p + j); + scalar_vec_t w(weight + j); + + vec_op::FP32Vec8 fp32_x(x); + vec_op::FP32Vec8 fp32_w(w); + + vec_op::FP32Vec8 fp32_out = fp32_x * fp32_s_variance * fp32_w; + + scalar_vec_t out(fp32_out); + out.save(output_p + j); + } + } +} + +template +void fused_add_rms_norm_impl(scalar_t *__restrict__ input, + scalar_t *__restrict__ residual, + const scalar_t *__restrict__ weight, + const float epsilon, const int num_tokens, + const int hidden_size) { + using scalar_vec_t = vec_op::vec_t; + constexpr int VEC_ELEM_NUM = scalar_vec_t::get_elem_num(); + TORCH_CHECK(hidden_size % VEC_ELEM_NUM == 0); + +#pragma omp parallel for + for (int i = 0; i < num_tokens; ++i) { + vec_op::FP32Vec8 variance(0.0); + auto input_p = input + i * hidden_size; + auto residual_p = residual + i * hidden_size; + for (int j = 0; j < hidden_size; j += VEC_ELEM_NUM) { + scalar_vec_t x(input_p + j); + scalar_vec_t res(residual_p + j); + vec_op::FP32Vec8 fp32_x(x); + vec_op::FP32Vec8 fp32_res(res); + + fp32_x = fp32_x + fp32_res; + variance = variance + fp32_x * fp32_x; + scalar_vec_t out(fp32_x); + out.save(residual_p + j); + } + + float s_variance = + 1.0f / sqrtf(variance.reduce_sum() / (float)hidden_size + epsilon); + vec_op::FP32Vec8 fp32_s_variance(s_variance); + + for (int j = 0; j < hidden_size; j += VEC_ELEM_NUM) { + scalar_vec_t w(weight + j); + scalar_vec_t res(residual_p + j); + + vec_op::FP32Vec8 fp32_w(w); + vec_op::FP32Vec8 fp32_res(res); + + vec_op::FP32Vec8 fp32_out = fp32_res * fp32_s_variance * fp32_w; + + scalar_vec_t out(fp32_out); + out.save(input_p + j); + } + } +} +} // namespace + +void rms_norm(torch::Tensor &out, torch::Tensor &input, + torch::Tensor &weight, float epsilon) { + int hidden_size = input.size(-1); + int num_tokens = input.numel() / hidden_size; + + VLLM_DISPATCH_FLOATING_TYPES(input.scalar_type(), "rms_norm_impl", [&] { + CPU_KERNEL_GUARD_IN(rms_norm_impl) + rms_norm_impl(out.data_ptr(), input.data_ptr(), + weight.data_ptr(), epsilon, num_tokens, + hidden_size); + CPU_KERNEL_GUARD_OUT(rms_norm_impl) + }); +} + +void fused_add_rms_norm(torch::Tensor &input, torch::Tensor &residual, + torch::Tensor &weight, float epsilon) { + int hidden_size = input.size(-1); + int num_tokens = input.numel() / hidden_size; + + VLLM_DISPATCH_FLOATING_TYPES( + input.scalar_type(), "fused_add_rms_norm_impl", [&] { + CPU_KERNEL_GUARD_IN(fused_add_rms_norm_impl) + fused_add_rms_norm_impl( + input.data_ptr(), residual.data_ptr(), + weight.data_ptr(), epsilon, num_tokens, hidden_size); + CPU_KERNEL_GUARD_OUT(fused_add_rms_norm_impl) + }); +} diff --git a/csrc_musa/cpu/pos_encoding.cpp b/csrc_musa/cpu/pos_encoding.cpp new file mode 100644 index 0000000..e9b3992 --- /dev/null +++ b/csrc_musa/cpu/pos_encoding.cpp @@ -0,0 +1,199 @@ + +#include "cpu_types.hpp" + +namespace { +template +void rotary_embedding_impl( + const int64_t + *__restrict__ positions, // [batch_size, seq_len] or [num_tokens] + scalar_t + *__restrict__ query, /// [batch_size, seq_len, num_heads, head_size] or + /// [num_tokens, num_heads, head_size] + scalar_t + *__restrict__ key, // [batch_size, seq_len, num_kv_heads, head_size] or + // [num_tokens, num_kv_heads, head_size] + const scalar_t + *__restrict__ cos_sin_cache, // [max_position, 2, rot_dim // 2] + const int rot_dim, const int64_t query_stride, const int64_t key_stride, + const int num_heads, const int num_kv_heads, const int head_size, + const int num_tokens) { + using scalar_vec_t = vec_op::vec_t; + constexpr int VEC_ELEM_NUM = scalar_vec_t::get_elem_num(); + constexpr int ELEM_SIZE = sizeof(scalar_t); + + const int embed_dim = rot_dim / 2; + TORCH_CHECK(embed_dim % VEC_ELEM_NUM == 0); + +#pragma omp parallel for + for (int token_idx = 0; token_idx < num_tokens; ++token_idx) { + int64_t pos = positions[token_idx]; + const scalar_t *cache_ptr = cos_sin_cache + pos * rot_dim; + + for (int i = 0; i < num_heads; ++i) { + const int head_idx = i; + const int64_t token_head = + token_idx * query_stride + head_idx * head_size; + for (int j = 0; j < embed_dim; j += VEC_ELEM_NUM) { + const int rot_offset = j; + const int x_index = rot_offset; + const int y_index = embed_dim + rot_offset; + + const int64_t out_x = token_head + x_index; + const int64_t out_y = token_head + y_index; + + const scalar_vec_t cos(cache_ptr + x_index); + const scalar_vec_t sin(cache_ptr + y_index); + + const scalar_vec_t q_x(query + out_x); + const scalar_vec_t q_y(query + out_y); + + vec_op::FP32Vec8 fp32_cos(cos); + vec_op::FP32Vec8 fp32_sin(sin); + + vec_op::FP32Vec8 fp32_q_x(q_x); + vec_op::FP32Vec8 fp32_q_y(q_y); + + auto out1 = fp32_q_x * fp32_cos - fp32_q_y * fp32_sin; + scalar_vec_t(out1).save(query + out_x); + + auto out2 = fp32_q_y * fp32_cos + fp32_q_x * fp32_sin; + scalar_vec_t(out2).save(query + out_y); + } + } + + for (int i = 0; i < num_kv_heads; ++i) { + const int head_idx = i; + const int64_t token_head = token_idx * key_stride + head_idx * head_size; + for (int j = 0; j < embed_dim; j += VEC_ELEM_NUM) { + const int rot_offset = j; + const int x_index = rot_offset; + const int y_index = embed_dim + rot_offset; + + const int64_t out_x = token_head + x_index; + const int64_t out_y = token_head + y_index; + + const scalar_vec_t cos(cache_ptr + x_index); + const scalar_vec_t sin(cache_ptr + y_index); + + const scalar_vec_t k_x(key + out_x); + const scalar_vec_t k_y(key + out_y); + + vec_op::FP32Vec8 fp32_cos(cos); + vec_op::FP32Vec8 fp32_sin(sin); + + vec_op::FP32Vec8 fp32_k_x(k_x); + vec_op::FP32Vec8 fp32_k_y(k_y); + + auto out1 = fp32_k_x * fp32_cos - fp32_k_y * fp32_sin; + scalar_vec_t(out1).save(key + out_x); + auto out2 = fp32_k_y * fp32_cos + fp32_k_x * fp32_sin; + scalar_vec_t(out2).save(key + out_y); + } + } + } +} + +template +void rotary_embedding_gptj_impl( + const int64_t + *__restrict__ positions, // [batch_size, seq_len] or [num_tokens] + scalar_t + *__restrict__ query, /// [batch_size, seq_len, num_heads, head_size] or + /// [num_tokens, num_heads, head_size] + scalar_t + *__restrict__ key, // [batch_size, seq_len, num_kv_heads, head_size] or + // [num_tokens, num_kv_heads, head_size] + const scalar_t + *__restrict__ cos_sin_cache, // [max_position, 2, rot_dim // 2] + const int rot_dim, const int64_t query_stride, const int64_t key_stride, + const int num_heads, const int num_kv_heads, const int head_size, + const int num_tokens) { + const int embed_dim = rot_dim / 2; + +#pragma omp parallel for collapse(2) + for (int token_idx = 0; token_idx < num_tokens; ++token_idx) { + for (int i = 0; i < num_heads; ++i) { + int64_t pos = positions[token_idx]; + const scalar_t *cache_ptr = cos_sin_cache + pos * rot_dim; + const scalar_t *cos_cache_ptr = cache_ptr; + const scalar_t *sin_cache_ptr = cache_ptr + embed_dim; + const int head_idx = i; + const int64_t token_head = + token_idx * query_stride + head_idx * head_size; + scalar_t *head_query = token_head + query; + for (int j = 0; j < embed_dim; j += 1) { + const int rot_offset = j; + const int x_index = 2 * rot_offset; + const int y_index = 2 * rot_offset + 1; + + const float cos = cos_cache_ptr[rot_offset]; + const float sin = sin_cache_ptr[rot_offset]; + + const float x = head_query[x_index]; + const float y = head_query[y_index]; + + head_query[x_index] = x * cos - y * sin; + head_query[y_index] = y * cos + x * sin; + } + } + } + +#pragma omp parallel for collapse(2) + for (int token_idx = 0; token_idx < num_tokens; ++token_idx) { + for (int i = 0; i < num_kv_heads; ++i) { + int64_t pos = positions[token_idx]; + const scalar_t *cache_ptr = cos_sin_cache + pos * rot_dim; + const scalar_t *cos_cache_ptr = cache_ptr; + const scalar_t *sin_cache_ptr = cache_ptr + embed_dim; + const int head_idx = i; + const int64_t token_head = token_idx * key_stride + head_idx * head_size; + scalar_t *head_key = key + token_head; + for (int j = 0; j < embed_dim; j += 1) { + const int rot_offset = j; + const int x_index = 2 * rot_offset; + const int y_index = 2 * rot_offset + 1; + + const float cos = cos_cache_ptr[rot_offset]; + const float sin = sin_cache_ptr[rot_offset]; + + const float x = head_key[x_index]; + const float y = head_key[y_index]; + + head_key[x_index] = x * cos - y * sin; + head_key[y_index] = y * cos + x * sin; + } + } + } +} +}; // namespace + +void rotary_embedding(torch::Tensor &positions, torch::Tensor &query, + torch::Tensor &key, int head_size, + torch::Tensor &cos_sin_cache, bool is_neox) { + int num_tokens = query.numel() / query.size(-1); + int rot_dim = cos_sin_cache.size(1); + int num_heads = query.size(-1) / head_size; + int num_kv_heads = key.size(-1) / head_size; + int64_t key_stride = key.stride(-2); + int64_t query_stride = query.stride(-2); + + VLLM_DISPATCH_FLOATING_TYPES( + query.scalar_type(), "rotary_embedding_impl", [&] { + CPU_KERNEL_GUARD_IN(rotary_embedding_impl) + if (is_neox) { + rotary_embedding_impl( + positions.data_ptr(), query.data_ptr(), + key.data_ptr(), cos_sin_cache.data_ptr(), + rot_dim, query_stride, key_stride, num_heads, num_kv_heads, + head_size, num_tokens); + } else { + rotary_embedding_gptj_impl( + positions.data_ptr(), query.data_ptr(), + key.data_ptr(), cos_sin_cache.data_ptr(), + rot_dim, query_stride, key_stride, num_heads, num_kv_heads, + head_size, num_tokens); + } + + CPU_KERNEL_GUARD_OUT(rotary_embedding_impl) + }); +} diff --git a/csrc_musa/cpu/pybind.cpp b/csrc_musa/cpu/pybind.cpp new file mode 100644 index 0000000..bba0440 --- /dev/null +++ b/csrc_musa/cpu/pybind.cpp @@ -0,0 +1,73 @@ +#include "cache.h" +#include "cuda_utils.h" +#include "ops.h" +#include + +PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) { + // vLLM custom ops + pybind11::module ops = m.def_submodule("ops", "vLLM custom operators"); + + // Attention ops + ops.def( + "paged_attention_v1", + &paged_attention_v1, + "Compute the attention between an input query and the cached keys/values using PagedAttention."); + ops.def( + "paged_attention_v2", + &paged_attention_v2, + "PagedAttention V2."); + + // Activation ops + ops.def( + "silu_and_mul", + &silu_and_mul, + "Activation function used in SwiGLU."); + ops.def( + "gelu_and_mul", + &gelu_and_mul, + "Activation function used in GeGLU with `none` approximation."); + ops.def( + "gelu_tanh_and_mul", + &gelu_tanh_and_mul, + "Activation function used in GeGLU with `tanh` approximation."); + ops.def( + "gelu_new", + &gelu_new, + "GELU implementation used in GPT-2."); + ops.def( + "gelu_fast", + &gelu_fast, + "Approximate GELU implementation."); + + // Layernorm + ops.def( + "rms_norm", + &rms_norm, + "Apply Root Mean Square (RMS) Normalization to the input tensor."); + + ops.def( + "fused_add_rms_norm", + &fused_add_rms_norm, + "In-place fused Add and RMS Normalization"); + + // Rotary embedding + ops.def( + "rotary_embedding", + &rotary_embedding, + "Apply GPT-NeoX or GPT-J style rotary embedding to query and key"); + + // Cache ops + pybind11::module cache_ops = m.def_submodule("cache_ops", "vLLM cache ops"); + cache_ops.def( + "swap_blocks", + &swap_blocks, + "Swap in (out) the cache blocks from src to dst"); + cache_ops.def( + "copy_blocks", + ©_blocks, + "Copy the cache blocks from src to dst"); + cache_ops.def( + "reshape_and_cache", + &reshape_and_cache, + "Reshape the key and value tensors and cache them"); +} diff --git a/csrc_musa/custom_all_reduce.mu b/csrc_musa/custom_all_reduce.mu new file mode 100644 index 0000000..bb0ef5c --- /dev/null +++ b/csrc_musa/custom_all_reduce.mu @@ -0,0 +1,148 @@ +#include "torch_musa/csrc/core/MUSAException.h" +#include "torch_musa/csrc/core/MUSAGuard.h" +#include "torch_musa/csrc/core/MUSAStream.h" +#include + +#include "custom_all_reduce.muh" + +// fake pointer type +using fptr_t = uint64_t; +static_assert(sizeof(void *) == sizeof(fptr_t)); + +fptr_t init_custom_ar(torch::Tensor &meta, torch::Tensor &rank_data, + const std::vector &handles, + const std::vector &offsets, int rank, + bool full_nvlink) { + int world_size = offsets.size(); + if (world_size > 8) + throw std::invalid_argument("world size > 8 is not supported"); + if (world_size % 2 != 0) + throw std::invalid_argument("Odd num gpus is not supported for now"); + if (world_size != handles.size()) + throw std::invalid_argument( + "handles length should equal to offsets length"); + if (rank < 0 || rank >= world_size) + throw std::invalid_argument("invalid rank passed in"); + + musaIpcMemHandle_t ipc_handles[8]; + for (int i = 0; i < world_size; i++) { + std::memcpy(&ipc_handles[i], handles[i].data(), sizeof(musaIpcMemHandle_t)); + } + return (fptr_t) new vllm::CustomAllreduce( + reinterpret_cast(meta.data_ptr()), rank_data.data_ptr(), + rank_data.numel(), ipc_handles, offsets, rank, full_nvlink); +} + +/** + * Make sure tensor t's data lies completely within ((char)t.data_ptr()) + + * t.numel() * t.element_size(). This is slightly weaker than t.is_contiguous() + * because it allows transpose of contiguous slice (i.e. slicing the first + * dimension). Currently, we require this because stride information is not + * passed into the kernels and we treat input tensors as flat. + * + * Examples + * A = torch.zeros(3, 3, 3) + * 1. A: OK + * 2. A[1:]: OK + * 3. A.permute(2, 0, 1): OK + * 4. A[1:].permute(2, 0, 1): OK + * 5. A[None].expand(2, -1, -1, -1): Not OK + * 6. A[:, 1:, 1:]: Not OK + */ +bool _is_weak_contiguous(torch::Tensor &t) { + return t.is_contiguous() || + (t.storage().nbytes() - t.storage_offset() * t.element_size() == + t.numel() * t.element_size()); +} + +bool should_custom_ar(torch::Tensor &inp, int max_size, int world_size, + bool full_nvlink) { + auto inp_size = inp.numel() * inp.element_size(); + // custom allreduce requires input byte size to be multiples of 16 + if (inp_size % 16 != 0) return false; + if (!_is_weak_contiguous(inp)) return false; + if (world_size == 2 || full_nvlink) return inp_size <= max_size; + // for 4 or more non NVLink-capable GPUs, custom allreduce provides little + // performance improvement over NCCL. + return false; +} + +void _all_reduce(fptr_t _fa, torch::Tensor &inp, torch::Tensor &out, + musaStream_t stream) { + auto fa = reinterpret_cast(_fa); + TORCH_CHECK(_is_weak_contiguous(out)); + switch (out.scalar_type()) { + case at::ScalarType::Float: { + fa->allreduce(stream, reinterpret_cast(inp.data_ptr()), + reinterpret_cast(out.data_ptr()), + out.numel()); + break; + } + case at::ScalarType::Half: { + fa->allreduce(stream, reinterpret_cast(inp.data_ptr()), + reinterpret_cast(out.data_ptr()), + out.numel()); + break; + } +#if (__MUSA_ARCH__ >= 800 || !defined(__MUSA_ARCH__)) + case at::ScalarType::BFloat16: { + fa->allreduce( + stream, reinterpret_cast(inp.data_ptr()), + reinterpret_cast(out.data_ptr()), out.numel()); + break; + } +#endif + default: + throw std::runtime_error( + "custom allreduce only supports float32, float16 and bfloat16"); + } +} + +void all_reduce_reg(fptr_t _fa, torch::Tensor &inp, torch::Tensor &out) { + const at::musa::OptionalMUSAGuard device_guard(device_of(inp)); + auto stream = c10::musa::getCurrentMUSAStream().stream(); + TORCH_CHECK_EQ(inp.scalar_type(), out.scalar_type()); + TORCH_CHECK_EQ(inp.numel(), out.numel()); + _all_reduce(_fa, inp, out, stream); +} + +void all_reduce_unreg(fptr_t _fa, torch::Tensor &inp, torch::Tensor ®_buffer, + torch::Tensor &out) { + const at::musa::OptionalMUSAGuard device_guard(device_of(inp)); + auto stream = c10::musa::getCurrentMUSAStream().stream(); + + auto input_size = inp.numel() * inp.element_size(); + TORCH_CHECK_EQ(inp.scalar_type(), out.scalar_type()); + TORCH_CHECK_EQ(inp.numel(), out.numel()); + TORCH_CHECK(input_size <= reg_buffer.numel() * reg_buffer.element_size(), + "registered buffer is too small to contain the input"); + C10_MUSA_CHECK(musaMemcpyAsync(reg_buffer.data_ptr(), inp.data_ptr(), + input_size, musaMemcpyDeviceToDevice, stream)); + _all_reduce(_fa, reg_buffer, out, stream); +} + +void dispose(fptr_t _fa) { + auto fa = reinterpret_cast(_fa); + delete fa; +} + +int meta_size() { return sizeof(vllm::Signal); } + +void register_buffer(fptr_t _fa, torch::Tensor &t, + const std::vector &handles, + const std::vector &offsets) { + auto fa = reinterpret_cast(_fa); + fa->register_buffer(handles, offsets, t.data_ptr()); +} + +std::pair, std::vector> get_graph_buffer_ipc_meta( + fptr_t _fa) { + auto fa = reinterpret_cast(_fa); + return fa->get_graph_buffer_ipc_meta(); +} + +void register_graph_buffers(fptr_t _fa, const std::vector &handles, + const std::vector> &offsets) { + auto fa = reinterpret_cast(_fa); + fa->register_graph_buffers(handles, offsets); +} diff --git a/csrc_musa/custom_all_reduce.muh b/csrc_musa/custom_all_reduce.muh new file mode 100644 index 0000000..b04d0ae --- /dev/null +++ b/csrc_musa/custom_all_reduce.muh @@ -0,0 +1,485 @@ +#pragma once + +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +#define CUDACHECK(cmd) \ + do { \ + musaError_t e = cmd; \ + if (e != musaSuccess) { \ + printf("Failed: Cuda error %s:%d '%s'\n", __FILE__, __LINE__, \ + musaGetErrorString(e)); \ + exit(EXIT_FAILURE); \ + } \ + } while (0) + +namespace vllm { + +constexpr int kMaxBlocks = 64; +// note: we don't want to use atomics for signals because peer atomics are no +// supported on PCIe links +struct Signal { + alignas(128) uint32_t start[kMaxBlocks][8]; + alignas(128) uint32_t end[kMaxBlocks][8]; +}; + +struct __align__(16) RankData { const void *__restrict__ ptrs[8]; RankData& operator=(const RankData& ){return *this;} }; + +struct __align__(16) RankSignals { volatile Signal *signals[8]; }; + +// like std::array, but aligned +template +struct __align__(alignof(T) * sz) array_t { + T data[sz]; + using type = T; + static constexpr int size = sz; +}; + +// use packed type to maximize memory efficiency +// goal: generate ld.128 and st.128 instructions +template +struct packed_t { + // the (P)acked type for load/store + using P = array_t; + // the (A)ccumulator type for reduction + using A = array_t; +}; + +#define DINLINE __device__ __forceinline__ + +// scalar cast functions +DINLINE float upcast_s(half val) { return __half2float(val); } + +template +DINLINE T downcast_s(float val); +template <> +DINLINE half downcast_s(float val) { + return __float2half(val); +} + +// scalar add functions +// for some reason when compiling with Pytorch, the + operator for half and +// bfloat is disabled so we call the intrinsics directly +DINLINE half &assign_add(half &a, half b) { + a = __hadd(a, b); + return a; +} +DINLINE float &assign_add(float &a, float b) { return a += b; } + +#if (__MUSA_ARCH__ >= 800 || !defined(__MUSA_ARCH__)) +DINLINE float upcast_s(mt_bfloat16 val) { return __bfloat162float(val); } +template <> +DINLINE mt_bfloat16 downcast_s(float val) { + return __float2bfloat16(val); +} +DINLINE mt_bfloat16 &assign_add(mt_bfloat16 &a, mt_bfloat16 b) { + a = __hadd(a, b); + return a; +} +#endif + +template +DINLINE array_t &packed_assign_add(array_t &a, array_t b) { +#pragma unroll + for (int i = 0; i < N; i++) { + assign_add(a.data[i], b.data[i]); + } + return a; +} + +template +DINLINE array_t upcast(array_t val) { + if constexpr (std::is_same::value) { + return val; + } else { + array_t out; +#pragma unroll + for (int i = 0; i < N; i++) { + out.data[i] = upcast_s(val.data[i]); + } + return out; + } +} + +template +DINLINE O downcast(array_t val) { + if constexpr (std::is_same::value) { + return val; + } else { + O out; +#pragma unroll + for (int i = 0; i < O::size; i++) { + out.data[i] = downcast_s(val.data[i]); + } + return out; + } +} + +// This function is meant to be used as the first synchronization in the all +// reduce kernel. Thus, it doesn't need to make any visibility guarantees for +// prior memory accesses. Note: volatile writes will not be reordered against +// other volatile writes. +template +DINLINE void start_sync(const RankSignals &sg, volatile Signal *self_sg, + int rank) { + if (threadIdx.x < ngpus) { + // reset flag for next time + self_sg->end[blockIdx.x][threadIdx.x] = 0; + // simultaneously write to the corresponding flag of all ranks. + // Latency = 1 p2p write + sg.signals[threadIdx.x]->start[blockIdx.x][rank] = 1; + // wait until we got true from all ranks + while (!self_sg->start[blockIdx.x][threadIdx.x]) + ; + } + __syncthreads(); +} + +// This function is meant to be used as the second or the final synchronization +// barrier in the all reduce kernel. If it's the final synchronization barrier, +// we don't need to make any visibility guarantees for prior memory accesses. +template +DINLINE void end_sync(const RankSignals &sg, volatile Signal *self_sg, + int rank) { + __syncthreads(); + // eliminate the case that prior writes are not visible after signals become + // visible. Note that I did not managed to make this happen through a lot of + // testing. Might be the case that hardware provides stronger guarantee than + // the memory model. + if constexpr (!final_sync) __threadfence_system(); + if (threadIdx.x < ngpus) { + // reset flag for next time + self_sg->start[blockIdx.x][threadIdx.x] = 0; + // simultaneously write to the corresponding flag of all ranks. + // Latency = 1 p2p write + sg.signals[threadIdx.x]->end[blockIdx.x][rank] = 1; + // wait until we got true from all ranks + while (!self_sg->end[blockIdx.x][threadIdx.x]) + ; + } + if constexpr (!final_sync) __syncthreads(); +} + +template +DINLINE P packed_reduce(const P *ptrs[], int idx) { + A tmp = upcast(ptrs[0][idx]); +#pragma unroll + for (int i = 1; i < ngpus; i++) { + packed_assign_add(tmp, upcast(ptrs[i][idx])); + } + return downcast

(tmp); +} + +template +__global__ void __launch_bounds__(512, 1) + cross_device_reduce_1stage(RankData *_dp, RankSignals sg, + volatile Signal *self_sg, T *__restrict__ result, + int rank, int size) { + using P = typename packed_t::P; + using A = typename packed_t::A; + // note: we don't reorder the address so the accumulation order is the same + // for all ranks, ensuring bitwise identical results + auto dp = *_dp; + start_sync(sg, self_sg, rank); + // do the actual reduction + for (int idx = blockIdx.x * blockDim.x + threadIdx.x; idx < size; + idx += gridDim.x * blockDim.x) { + ((P *)result)[idx] = + packed_reduce((const P **)&dp.ptrs[0], idx); + } + end_sync(sg, self_sg, rank); +} + +template +DINLINE P *get_tmp_buf(volatile Signal *sg) { + return (P *)(((Signal *)sg) + 1); +} + +template +__global__ void __launch_bounds__(512, 1) + cross_device_reduce_2stage(RankData *_dp, RankSignals sg, + volatile Signal *self_sg, T *__restrict__ result, + int rank, int size) { + int tid = blockIdx.x * blockDim.x + threadIdx.x; + int stride = gridDim.x * blockDim.x; + using P = typename packed_t::P; + using A = typename packed_t::A; + int part = size / ngpus; + int start = rank * part; + int end = rank == ngpus - 1 ? size : start + part; + int largest_part = part + size % ngpus; + const P *ptrs[ngpus]; + P *tmps[ngpus]; +#pragma unroll + for (int i = 0; i < ngpus; i++) { + int target = (rank + i) % ngpus; + ptrs[i] = (const P *)_dp->ptrs[target]; + tmps[i] = get_tmp_buf

(sg.signals[target]); + } + auto tmp_out = tmps[0]; + start_sync(sg, self_sg, rank); + // stage 1: reduce scatter + for (int idx = start + tid; idx < end; idx += stride) { + tmp_out[idx - start] = packed_reduce(ptrs, idx); + } + end_sync(sg, self_sg, rank); + + // stage 2: allgather. Note: it's important to match the tid between + // the two stages, because visibility across devices is only guaranteed + // between threads that have the same tid. If thread i computes the sum of + // start + i in the first stage, then thread i also gathers start + i from all + // ranks. + for (int idx = tid; idx < largest_part; idx += stride) { +#pragma unroll + for (int i = 0; i < ngpus; i++) { + int gather_from_rank = ((rank + i) % ngpus); + if (gather_from_rank == ngpus - 1 || idx < part) { + int dst_idx = gather_from_rank * part + idx; + ((P *)result)[dst_idx] = tmps[i][idx]; + } + } + } +} + +using IPC_KEY = std::array; +static_assert(sizeof(IPC_KEY) == sizeof(musaIpcMemHandle_t)); +static_assert(alignof(IPC_KEY) == alignof(musaIpcMemHandle_t)); + +class CustomAllreduce { + public: + int rank_; + int world_size_; + bool full_nvlink_; + + // below are device pointers + RankSignals sg_; + std::unordered_map buffers_; + Signal *self_sg_; + + // stores the registered device pointers from all ranks + RankData *d_rank_data_base_, *d_rank_data_end_; + std::vector graph_unreg_buffers_; + // a map from IPC handles to opened IPC pointers + std::map ipc_handles_; + + /** + * meta is a pointer to device metadata and temporary buffer for allreduce. + * + * There's a total of sizeof(Signal) of prefix before the actual data, + * so meta + 1 points to actual temporary buffer. + * + * note: this class does not own any device memory. Any required buffers + * are passed in from the constructor + */ + CustomAllreduce(Signal *meta, void *rank_data, size_t rank_data_sz, + const musaIpcMemHandle_t *handles, + const std::vector &offsets, int rank, + bool full_nvlink = true) + : rank_(rank), + world_size_(offsets.size()), + full_nvlink_(full_nvlink), + self_sg_(meta), + d_rank_data_base_(reinterpret_cast(rank_data)), + d_rank_data_end_(d_rank_data_base_ + rank_data_sz / sizeof(RankData)) { + for (int i = 0; i < world_size_; i++) { + Signal *rank_sg; + if (i != rank_) { + char *handle = open_ipc_handle(&handles[i]); + handle += offsets[i]; + rank_sg = (Signal *)handle; + } else { + rank_sg = self_sg_; + } + sg_.signals[i] = rank_sg; + } + } + + char *open_ipc_handle(const void *ipc_handle) { + auto [it, new_handle] = + ipc_handles_.insert({*((IPC_KEY *)ipc_handle), nullptr}); + if (new_handle) { + char *ipc_ptr; + CUDACHECK(musaIpcOpenMemHandle((void **)&ipc_ptr, + *((const musaIpcMemHandle_t *)ipc_handle), + musaIpcMemLazyEnablePeerAccess)); + it->second = ipc_ptr; + } + return it->second; + } + + std::pair, std::vector> + get_graph_buffer_ipc_meta() { + auto num_buffers = graph_unreg_buffers_.size(); + auto handle_sz = sizeof(musaIpcMemHandle_t); + std::vector handles(handle_sz * num_buffers, 0); + std::vector offsets(num_buffers); + for (int i = 0; i < num_buffers; i++) { + auto ptr = graph_unreg_buffers_[i]; + void *base_ptr; + // note: must share the base address of each allocation, or we get wrong + // address + if (muPointerGetAttribute(&base_ptr, + MU_POINTER_ATTRIBUTE_RANGE_START_ADDR, + (MUdeviceptr)ptr) != MUSA_SUCCESS) + throw std::runtime_error("failed to get pointer attr"); + CUDACHECK(musaIpcGetMemHandle( + (musaIpcMemHandle_t *)&handles[i * handle_sz], base_ptr)); + offsets[i] = ((char *)ptr) - ((char *)base_ptr); + } + return std::make_pair(handles, offsets); + } + + void check_rank_data_capacity(size_t num = 1) { + if (d_rank_data_base_ + num > d_rank_data_end_) + throw std::runtime_error( + "Rank data buffer is overflowed by " + + std::to_string(d_rank_data_base_ + num - d_rank_data_end_)); + } + + void register_buffer(const std::vector &handles, + const std::vector &offsets, void *self) { + check_rank_data_capacity(); + RankData data; + for (int i = 0; i < world_size_; i++) { + if (i != rank_) { + char *handle = open_ipc_handle(handles[i].data()); + handle += offsets[i]; + data.ptrs[i] = handle; + } else { + data.ptrs[i] = self; + } + } + auto d_data = d_rank_data_base_++; + CUDACHECK( + musaMemcpy(d_data, &data, sizeof(RankData), musaMemcpyHostToDevice)); + buffers_[self] = d_data; + } + + // note: when registering graph buffers, we intentionally choose to not + // deduplicate the addresses. That means if the allocator reuses some + // addresses, they will be registered again. This is to account for the remote + // possibility of different allocation patterns between ranks. For example, + // rank 1 may get the same input address for the second allreduce, but rank 2 + // got a different address. IPC handles have internal reference counting + // mechanism so overhead should be small. + void register_graph_buffers( + const std::vector &handles, + const std::vector> &offsets) { + auto num_buffers = graph_unreg_buffers_.size(); + check_rank_data_capacity(num_buffers); + std::vector rank_data(num_buffers); + for (int i = 0; i < num_buffers; i++) { + auto self_ptr = graph_unreg_buffers_[i]; + auto &rd = rank_data[i]; + for (int j = 0; j < world_size_; j++) { + if (j != rank_) { + char *handle = + open_ipc_handle(&handles[j][i * sizeof(musaIpcMemHandle_t)]); + handle += offsets[j][i]; + rd.ptrs[j] = handle; + } else { + rd.ptrs[j] = self_ptr; + } + } + } + CUDACHECK(musaMemcpy(d_rank_data_base_, rank_data.data(), + sizeof(RankData) * num_buffers, + musaMemcpyHostToDevice)); + d_rank_data_base_ += num_buffers; + graph_unreg_buffers_.clear(); + } + + /** + * This is the result after careful grid search. Using 36 blocks give the best + * or close to the best runtime on the devices I tried: A100, A10, A30, T4, + * V100. You'll notice that NCCL kernels also only take a small amount of SMs. + * Not quite sure the underlying reason, but my guess is that too many SMs + * will cause contention on NVLink bus. + */ + template + void allreduce(musaStream_t stream, T *input, T *output, int size, + int threads = 512, int block_limit = 36) { + auto d = packed_t::P::size; + if (size % d != 0) + throw std::runtime_error( + "custom allreduce currently requires input length to be multiple " + "of " + + std::to_string(d)); + if (block_limit > kMaxBlocks) + throw std::runtime_error("max supported block limit is " + + std::to_string(kMaxBlocks) + ". Got " + + std::to_string(block_limit)); + + RankData *ptrs; + musaStreamCaptureStatus status; + CUDACHECK(at::musa::musaStreamIsCapturing(stream, &status)); + if (status == musaStreamCaptureStatusActive) { + ptrs = d_rank_data_base_ + graph_unreg_buffers_.size(); + graph_unreg_buffers_.push_back(input); + } else { + auto it = buffers_.find(input); + if (it == buffers_.end()) + throw std::runtime_error( + "buffer address " + + std::to_string(reinterpret_cast(input)) + + " is not registered!"); + ptrs = it->second; + } + + size /= d; + auto bytes = size * sizeof(typename packed_t::P); + int blocks = std::min(block_limit, (size + threads - 1) / threads); +#define KL(ngpus, name) \ + name<<>>(ptrs, sg_, self_sg_, output, \ + rank_, size); +#define REDUCE_CASE(ngpus) \ + case ngpus: { \ + if (world_size_ == 2) { \ + KL(ngpus, cross_device_reduce_1stage); \ + } else if (full_nvlink_) { \ + if ((world_size_ <= 4 && bytes < 512 * 1024) || \ + (world_size_ <= 8 && bytes < 256 * 1024)) { \ + KL(ngpus, cross_device_reduce_1stage); \ + } else { \ + KL(ngpus, cross_device_reduce_2stage); \ + } \ + } \ + break; \ + } + + switch (world_size_) { + REDUCE_CASE(2) + REDUCE_CASE(4) + REDUCE_CASE(6) + REDUCE_CASE(8) + default: + throw std::runtime_error( + "custom allreduce only supports num gpus in (2,4,6,8). Actual num " + "gpus = " + + std::to_string(world_size_)); + } +#undef REDUCE_CASE +#undef KL + } + + ~CustomAllreduce() { + for (auto [_, ptr] : ipc_handles_) { + CUDACHECK(musaIpcCloseMemHandle(ptr)); + } + } +}; +/** + * To inspect PTX/SASS, copy paste this header file to compiler explorer and add + a template instantiation: + * template void vllm::CustomAllreduce::allreduce(musaStream_t, half *, + half *, int, int, int); +*/ +} // namespace vllm diff --git a/csrc_musa/custom_all_reduce_test.mu b/csrc_musa/custom_all_reduce_test.mu new file mode 100644 index 0000000..db59e6b --- /dev/null +++ b/csrc_musa/custom_all_reduce_test.mu @@ -0,0 +1,316 @@ +/** + * This is a standalone test for custom allreduce. + * To compile, make sure you have MPI and NCCL installed in your system. + * export MPI_HOME=XXX + * nvcc -O2 -arch=native -std=c++17 custom_all_reduce_test.cu -o + * custom_all_reduce_test -lnccl -I${MPI_HOME}/include -lmpi + * + * Warning: this C++ test is not designed to be very readable and was used + * during the rapid prototyping process. + * + * To run: + * mpirun -np 8 ./custom_all_reduce_test + */ +#include +#include +#include +#include + +#include +#include + +#include "musa_profiler_api.h" +#include "custom_all_reduce.muh" +#include "mpi.h" +#include "nccl.h" + +#define MPICHECK(cmd) \ + do { \ + int e = cmd; \ + if (e != MPI_SUCCESS) { \ + printf("Failed: MPI error %s:%d '%d'\n", __FILE__, __LINE__, e); \ + exit(EXIT_FAILURE); \ + } \ + } while (0) + +#define NCCLCHECK(cmd) \ + do { \ + ncclResult_t r = cmd; \ + if (r != ncclSuccess) { \ + printf("Failed, NCCL error %s:%d '%s'\n", __FILE__, __LINE__, \ + ncclGetErrorString(r)); \ + exit(EXIT_FAILURE); \ + } \ + } while (0) + +__global__ void dummy_kernel() { + for (int i = 0; i < 100; i++) __nanosleep(1000000); // 100ms +} + +template +__global__ void set_data(T *data, int size, int myRank) { + for (int idx = blockIdx.x * blockDim.x + threadIdx.x; idx < size; + idx += gridDim.x * blockDim.x) { + data[idx] = myRank * 0.11f; + } +} + +template +__global__ void convert_data(const T *data1, const T *data2, double *fdata1, + double *fdata2, int size) { + for (int idx = blockIdx.x * blockDim.x + threadIdx.x; idx < size; + idx += gridDim.x * blockDim.x) { + fdata1[idx] = data1[idx]; + fdata2[idx] = data2[idx]; + } +} + +__global__ void init_rand(curandState_t *state, int size, int nRanks) { + for (int idx = blockIdx.x * blockDim.x + threadIdx.x; idx < size; + idx += gridDim.x * blockDim.x) { + for (int i = 0; i < nRanks; i++) { + curand_init(i + 1, idx, 0, &state[idx * nRanks + i]); + } + } +} + +template +__global__ void gen_data(curandState_t *state, T *data, double *ground_truth, + int myRank, int nRanks, int size) { + for (int idx = blockIdx.x * blockDim.x + threadIdx.x; idx < size; + idx += gridDim.x * blockDim.x) { + double sum = 0.0; + for (int i = 0; i < nRanks; i++) { + double val = curand_uniform_double(&state[idx * nRanks + i]) * 4; + T hval = val; // downcast first + sum += static_cast(hval); + if (i == myRank) data[idx] = hval; + } + ground_truth[idx] = sum; + } +} + +template +void run(int myRank, int nRanks, ncclComm_t &comm, int threads, int block_limit, + int data_size, bool performance_test) { + T *result; + musaStream_t stream; + CUDACHECK(musaStreamCreateWithFlags(&stream, musaStreamNonBlocking)); + CUDACHECK(musaMalloc(&result, data_size * sizeof(T))); + CUDACHECK(musaMemset(result, 0, data_size * sizeof(T))); + + musaIpcMemHandle_t self_data_handle; + musaIpcMemHandle_t data_handles[8]; + vllm::Signal *buffer; + T *self_data_copy; + /** + * Allocate IPC buffer + * + * The first section is a temporary buffer for storing intermediate allreduce + * results, if a particular algorithm requires it. The second section is for + * the input to the allreduce. The actual API takes the input pointer as an + * argument (that is, they can and usually should be allocated separately). + * But since the input pointers and the temporary buffer all require IPC + * registration, they are allocated and registered together in the test for + * convenience. + */ + CUDACHECK( + musaMalloc(&buffer, 2 * data_size * sizeof(T) + sizeof(vllm::Signal))); + CUDACHECK( + musaMemset(buffer, 0, 2 * data_size * sizeof(T) + sizeof(vllm::Signal))); + CUDACHECK(musaMalloc(&self_data_copy, data_size * sizeof(T))); + CUDACHECK(musaIpcGetMemHandle(&self_data_handle, buffer)); + + MPICHECK(MPI_Allgather(&self_data_handle, sizeof(musaIpcMemHandle_t), + MPI_BYTE, data_handles, sizeof(musaIpcMemHandle_t), + MPI_BYTE, MPI_COMM_WORLD)); + + void *rank_data; + size_t rank_data_sz = 16 * 1024 * 1024; + CUDACHECK(musaMalloc(&rank_data, rank_data_sz)); + std::vector offsets(nRanks, 0); + vllm::CustomAllreduce fa(buffer, rank_data, rank_data_sz, data_handles, + offsets, myRank); + auto *self_data = + reinterpret_cast(reinterpret_cast(buffer) + + sizeof(vllm::Signal) + data_size * sizeof(T)); + // hack buffer registration + { + std::vector handles; + handles.reserve(nRanks); + for (int i = 0; i < nRanks; i++) { + char *begin = (char *)&data_handles[i]; + char *end = (char *)&data_handles[i + 1]; + handles.emplace_back(begin, end); + } + std::vector offsets(nRanks, + sizeof(vllm::Signal) + data_size * sizeof(T)); + fa.register_buffer(handles, offsets, self_data); + } + + double *ground_truth; + CUDACHECK(musaMallocHost(&ground_truth, data_size * sizeof(double))); + curandState_t *states; + CUDACHECK(musaMalloc(&states, sizeof(curandState_t) * nRanks * data_size)); + init_rand<<<108, 1024, 0, stream>>>(states, data_size, nRanks); + gen_data<<<108, 1024, 0, stream>>>(states, self_data, ground_truth, myRank, + nRanks, data_size); + CUDACHECK(musaMemcpyAsync(self_data_copy, self_data, data_size * sizeof(T), + musaMemcpyDeviceToDevice, stream)); + musaEvent_t start, stop; + CUDACHECK(musaEventCreate(&start)); + CUDACHECK(musaEventCreate(&stop)); + + ncclDataType_t ncclDtype; + if (std::is_same::value) { + ncclDtype = ncclFloat16; + } else if (std::is_same::value) { + ncclDtype = ncclBfloat16; + } else { + ncclDtype = ncclFloat; + } + double *nccl_result, *my_result; + CUDACHECK(musaMallocHost(&nccl_result, data_size * sizeof(double))); + CUDACHECK(musaMallocHost(&my_result, data_size * sizeof(double))); + if (performance_test) { + dummy_kernel<<<1, 1, 0, stream>>>(); + constexpr int warmup_iters = 5; + constexpr int num_iters = 100; + // warmup + for (int i = 0; i < warmup_iters; i++) { + NCCLCHECK(ncclAllReduce(result, result, data_size, ncclDtype, ncclSum, + comm, stream)); + } + CUDACHECK(musaEventRecord(start, stream)); + for (int i = 0; i < num_iters; i++) { + NCCLCHECK(ncclAllReduce(result, result, data_size, ncclDtype, ncclSum, + comm, stream)); + } + CUDACHECK(musaEventRecord(stop, stream)); + CUDACHECK(musaStreamSynchronize(stream)); + float allreduce_ms = 0; + musaEventElapsedTime(&allreduce_ms, start, stop); + + dummy_kernel<<<1, 1, 0, stream>>>(); + // warm up + for (int i = 0; i < warmup_iters; i++) { + fa.allreduce(stream, self_data, result, data_size, threads, + block_limit); + } + CUDACHECK(musaEventRecord(start, stream)); + for (int i = 0; i < num_iters; i++) { + fa.allreduce(stream, self_data, result, data_size, threads, + block_limit); + } + CUDACHECK(musaEventRecord(stop, stream)); + CUDACHECK(musaStreamSynchronize(stream)); + + float duration_ms = 0; + musaEventElapsedTime(&duration_ms, start, stop); + if (myRank == 0) + printf( + "Rank %d done, nGPUs:%d, sz (kb): %d, %d, %d, my time:%.2fus, nccl " + "time:%.2fus\n", + myRank, nRanks, data_size * sizeof(T) / 1024, threads, block_limit, + duration_ms * 1e3 / num_iters, allreduce_ms * 1e3 / num_iters); + + // And wait for all the queued up work to complete + CUDACHECK(musaStreamSynchronize(stream)); + + NCCLCHECK(ncclAllReduce(self_data_copy, self_data, data_size, ncclDtype, + ncclSum, comm, stream)); + + convert_data<<<108, 1024, 0, stream>>>(self_data, result, nccl_result, + my_result, data_size); + CUDACHECK(musaStreamSynchronize(stream)); + + for (unsigned long j = 0; j < data_size; j++) { + auto diff = abs(nccl_result[j] - my_result[j]); + if (diff >= 4e-2) { + printf("Rank %d: Verification mismatch at %lld: %f != (my) %f, gt=%f\n", + myRank, j, nccl_result[j], my_result[j], ground_truth[j]); + break; + } + } + long double nccl_diffs = 0.0; + long double my_diffs = 0.0; + for (int j = 0; j < data_size; j++) { + nccl_diffs += abs(nccl_result[j] - ground_truth[j]); + my_diffs += abs(my_result[j] - ground_truth[j]); + } + if (myRank == 0) + std::cout << "average abs diffs: nccl: " << nccl_diffs / data_size + << " me: " << my_diffs / data_size << std::endl; + } else { + for (int i = 0; i < 100; i++) { + fa.allreduce(stream, self_data, result, data_size, threads, + block_limit); + CUDACHECK(musaStreamSynchronize(stream)); + NCCLCHECK(ncclAllReduce(self_data, self_data_copy, data_size, ncclDtype, + ncclSum, comm, stream)); + convert_data<<<108, 1024, 0, stream>>>( + self_data_copy, result, nccl_result, my_result, data_size); + CUDACHECK(musaStreamSynchronize(stream)); + + for (unsigned long j = 0; j < data_size; j++) { + auto diff = abs(nccl_result[j] - my_result[j]); + if (diff >= 4e-2) { + printf( + "Rank %d: Verification mismatch at %lld: %f != (my) %f, gt=%f\n", + myRank, j, nccl_result[j], my_result[j], ground_truth[j]); + break; + } + } + } + if (myRank == 0) + printf("Test passed: nGPUs:%d, sz (kb): %d, %d, %d\n", nRanks, + data_size * sizeof(T) / 1024, threads, block_limit); + // long double nccl_diffs = 0.0; + // long double my_diffs = 0.0; + // for (int j = 0; j < data_size; j++) { + // nccl_diffs += abs(nccl_result[j] - ground_truth[j]); + // my_diffs += abs(my_result[j] - ground_truth[j]); + // } + // if (myRank == 0) + // std::cout << "average abs diffs: nccl: " << nccl_diffs / data_size + // << " me: " << my_diffs / data_size << std::endl; + } + + CUDACHECK(musaFree(result)); + CUDACHECK(musaFree(self_data_copy)); + CUDACHECK(musaFree(rank_data)); + CUDACHECK(musaFree(buffer)); + CUDACHECK(musaFree(states)); + CUDACHECK(musaFreeHost(ground_truth)); + CUDACHECK(musaFreeHost(nccl_result)); + CUDACHECK(musaFreeHost(my_result)); + CUDACHECK(musaStreamDestroy(stream)); +} + +int main(int argc, char **argv) { + int nRanks, myRank; + MPICHECK(MPI_Init(&argc, &argv)); + MPICHECK(MPI_Comm_rank(MPI_COMM_WORLD, &myRank)); + MPICHECK(MPI_Comm_size(MPI_COMM_WORLD, &nRanks)); + CUDACHECK(musaSetDevice(myRank)); + ncclUniqueId id; + ncclComm_t comm; + if (myRank == 0) ncclGetUniqueId(&id); + MPICHECK(MPI_Bcast(static_cast(&id), sizeof(id), MPI_BYTE, 0, + MPI_COMM_WORLD)); + NCCLCHECK(ncclCommInitRank(&comm, nRanks, id, myRank)); + + bool performance_test = true; + cudaProfilerStart(); + // for (int threads : {256, 512}) { + // for (int block_limit = 16; block_limit < 112; block_limit += 4) { + // run(myRank, nRanks, comm, threads, block_limit, 4096 * 1024); + // } + // } + for (int sz = 512; sz <= (8 << 20); sz *= 2) { + run(myRank, nRanks, comm, 512, 36, sz + 8 * 47, performance_test); + } + + cudaProfilerStop(); + return EXIT_SUCCESS; +} diff --git a/csrc_musa/dispatch_utils.h b/csrc_musa/dispatch_utils.h new file mode 100644 index 0000000..91abd9e --- /dev/null +++ b/csrc_musa/dispatch_utils.h @@ -0,0 +1,37 @@ +/* + * Adapted from + * https://github.com/pytorch/pytorch/blob/v2.0.1/aten/src/ATen/Dispatch.h + */ +#pragma once + +#include + +#define VLLM_DISPATCH_CASE_FLOATING_TYPES(...) \ + AT_DISPATCH_CASE(at::ScalarType::Float, __VA_ARGS__) \ + AT_DISPATCH_CASE(at::ScalarType::Half, __VA_ARGS__) \ + AT_DISPATCH_CASE(at::ScalarType::BFloat16, __VA_ARGS__) + +#define VLLM_DISPATCH_FLOATING_TYPES(TYPE, NAME, ...) \ + AT_DISPATCH_SWITCH( \ + TYPE, NAME, VLLM_DISPATCH_CASE_FLOATING_TYPES(__VA_ARGS__)) + +#define VLLM_DISPATCH_CASE_FLOATING_AND_BYTE_TYPES(...) \ + AT_DISPATCH_CASE(at::ScalarType::Float, __VA_ARGS__) \ + AT_DISPATCH_CASE(at::ScalarType::Half, __VA_ARGS__) \ + AT_DISPATCH_CASE(at::ScalarType::BFloat16, __VA_ARGS__) \ + AT_DISPATCH_CASE(at::ScalarType::Byte, __VA_ARGS__) + +#define VLLM_DISPATCH_FLOATING_AND_BYTE_TYPES(TYPE, NAME, ...) \ + AT_DISPATCH_SWITCH( \ + TYPE, NAME, VLLM_DISPATCH_CASE_FLOATING_AND_BYTE_TYPES(__VA_ARGS__)) + +#define VLLM_DISPATCH_CASE_INTEGRAL_TYPES(...) \ + AT_DISPATCH_CASE(at::ScalarType::Byte, __VA_ARGS__) \ + AT_DISPATCH_CASE(at::ScalarType::Char, __VA_ARGS__) \ + AT_DISPATCH_CASE(at::ScalarType::Short, __VA_ARGS__) \ + AT_DISPATCH_CASE(at::ScalarType::Int, __VA_ARGS__) \ + AT_DISPATCH_CASE(at::ScalarType::Long, __VA_ARGS__) + +#define VLLM_DISPATCH_INTEGRAL_TYPES(TYPE, NAME, ...) \ + AT_DISPATCH_SWITCH( \ + TYPE, NAME, VLLM_DISPATCH_CASE_INTEGRAL_TYPES(__VA_ARGS__)) diff --git a/csrc_musa/layernorm_kernels.mu b/csrc_musa/layernorm_kernels.mu new file mode 100644 index 0000000..13a0b33 --- /dev/null +++ b/csrc_musa/layernorm_kernels.mu @@ -0,0 +1,352 @@ +#include +#include "torch_musa/csrc/aten/musa/MUSAContext.h" +#include "torch_musa/csrc/core/MUSAGuard.h" + +#include "dispatch_utils.h" +#include "reduction_utils.muh" +#ifndef USE_ROCM + #include + #include +#else + #include + #include + + using __mt_bfloat16 = __hip_bfloat16; + using __mt_bfloat162 = __hip_bfloat162; +#endif + +namespace vllm { + +// TODO(woosuk): Further optimize this kernel. +template +__global__ void rms_norm_kernel( + scalar_t* __restrict__ out, // [..., hidden_size] + const scalar_t* __restrict__ input, // [..., hidden_size] + const scalar_t* __restrict__ weight, // [hidden_size] + const float epsilon, + const int num_tokens, + const int hidden_size) { + __shared__ float s_variance; + float variance = 0.0f; + + for (int idx = threadIdx.x; idx < hidden_size; idx += blockDim.x) { + const float x = (float) input[blockIdx.x * hidden_size + idx]; + variance += x * x; + } + variance = blockReduceSum(variance); + if (threadIdx.x == 0) { + s_variance = rsqrtf(variance / hidden_size + epsilon); + } + __syncthreads(); + + for (int idx = threadIdx.x; idx < hidden_size; idx += blockDim.x) { + float x = (float) input[blockIdx.x * hidden_size + idx]; + out[blockIdx.x * hidden_size + idx] = ((scalar_t) (x * s_variance)) * weight[idx]; + } +} + + +/* Converter structs for the conversion from torch types to HIP/CUDA types, + and the associated type conversions within HIP/CUDA. These helpers need + to be implemented for now because the relevant type conversion + operators/constructors are not consistently implemented by HIP/CUDA, so + a generic conversion via type casts cannot be implemented. + + Each struct should have the member static constexpr bool `exists`: + If false, the optimized kernel is not used for the corresponding torch type. + If true, the struct should be fully defined as shown in the examples below. + */ +template +struct _typeConvert { static constexpr bool exists = false; }; + +#if defined(USE_ROCM) || (defined(CUDA_VERSION) && (CUDA_VERSION >= 12000)) +// CUDA < 12.0 runs into issues with packed type conversion +template<> +struct _typeConvert { + static constexpr bool exists = true; + using hip_type = __half; + using packed_hip_type = __half2; + + __device__ static inline float convert(hip_type x) { return __half2float(x); } + __device__ static inline float2 convert(packed_hip_type x) { return __half22float2(x); } + __device__ static inline hip_type convert(float x) { return __float2half_rn(x); } + __device__ static inline packed_hip_type convert(float2 x) { return __float22half2_rn(x); } +}; + +#if defined(__MUSA_ARCH__) && __MUSA_ARCH__ >= 800 +// CUDA_ARCH < 800 does not have BF16 support +// TODO: Add in ROCm support once public headers handle bf16 maturely +template<> +struct _typeConvert { + static constexpr bool exists = true; + using hip_type = __mt_bfloat16; + using packed_hip_type = __mt_bfloat162; + + __device__ static inline float convert(hip_type x) { return __bfloat162float(x); } + __device__ static inline float2 convert(packed_hip_type x) { return __bfloat1622float2(x); } + __device__ static inline hip_type convert(float x) { return __float2bfloat16(x); } + __device__ static inline packed_hip_type convert(float2 x) { return __float22bfloat162_rn(x); } +}; +#endif // defined(__MUSA_ARCH__) && __MUSA_ARCH__ >= 800 +#endif // defined(USE_ROCM) || (defined(CUDA_VERSION) && (CUDA_VERSION >= 12000)) + +/* Vector POD struct to generate vectorized and packed FP16/BF16 ops + for appropriate specializations of fused_add_rms_norm_kernel. + Only functions that are necessary in that kernel are implemented. + Alignment to 16 bytes is required to use 128-bit global memory ops. + */ +template +struct alignas(16) _f16Vec { + /* Not theoretically necessary that width is a power of 2 but should + almost always be the case for optimization purposes */ + static_assert(width > 0 && (width & (width - 1)) == 0, + "Width is not a positive power of 2!"); + using Converter = _typeConvert; + using T1 = typename Converter::hip_type; + using T2 = typename Converter::packed_hip_type; + T1 data[width]; + + __device__ _f16Vec& operator+=(const _f16Vec& other) { + if constexpr (width % 2 == 0) { + #pragma unroll + for (int i = 0; i < width; i += 2) { + T2 temp{data[i], data[i+1]}; + temp += T2{other.data[i], other.data[i+1]}; + data[i] = temp.x; + data[i+1] = temp.y; + } + } else { + #pragma unroll + for (int i = 0; i < width; ++i) + data[i] += other.data[i]; + } + return *this; + } + + __device__ _f16Vec& operator*=(const _f16Vec& other) { + if constexpr (width % 2 == 0) { + #pragma unroll + for (int i = 0; i < width; i += 2) { + T2 temp{data[i], data[i+1]}; + temp *= T2{other.data[i], other.data[i+1]}; + data[i] = temp.x; + data[i+1] = temp.y; + } + } else { + #pragma unroll + for (int i = 0; i < width; ++i) + data[i] *= other.data[i]; + } + return *this; + } + + __device__ _f16Vec& operator*=(const float scale) { + if constexpr (width % 2 == 0) { + #pragma unroll + for (int i = 0; i < width; i += 2) { + float2 temp_f = Converter::convert(T2{data[i], data[i+1]}); + temp_f.x *= scale; + temp_f.y *= scale; + T2 temp = Converter::convert(temp_f); + data[i] = temp.x; + data[i+1] = temp.y; + } + } else { + #pragma unroll + for (int i = 0; i < width; ++i) { + float temp = Converter::convert(data[i]) * scale; + data[i] = Converter::convert(temp); + } + } + return *this; + } + + __device__ float sum_squares() const { + float result = 0.0f; + if constexpr (width % 2 == 0) { + #pragma unroll + for (int i = 0; i < width; i += 2) { + float2 z = Converter::convert(T2{data[i], data[i+1]}); + result += z.x * z.x + z.y * z.y; + } + } else { + #pragma unroll + for (int i = 0; i < width; ++i) { + float x = Converter::convert(data[i]); + result += x * x; + } + } + return result; + } +}; + +/* Function specialization in the case of FP16/BF16 tensors. + Additional optimizations we can make in this case are + packed and vectorized operations, which help with the + memory latency bottleneck. */ +template +__global__ std::enable_if_t< + (width > 0) && _typeConvert::exists> fused_add_rms_norm_kernel( + scalar_t* __restrict__ input, // [..., hidden_size] + scalar_t* __restrict__ residual, // [..., hidden_size] + const scalar_t* __restrict__ weight, // [hidden_size] + const float epsilon, + const int num_tokens, + const int hidden_size) { + // Sanity checks on our vector struct and type-punned pointer arithmetic + static_assert(std::is_pod_v<_f16Vec>); + static_assert(sizeof(_f16Vec) == sizeof(scalar_t) * width); + + const int vec_hidden_size = hidden_size / width; + __shared__ float s_variance; + float variance = 0.0f; + /* These and the argument pointers are all declared `restrict` as they are + not aliased in practice. Argument pointers should not be dereferenced + in this kernel as that would be undefined behavior */ + auto* __restrict__ input_v = reinterpret_cast<_f16Vec*>(input); + auto* __restrict__ residual_v = reinterpret_cast<_f16Vec*>(residual); + auto* __restrict__ weight_v = reinterpret_cast*>(weight); + + for (int idx = threadIdx.x; idx < vec_hidden_size; idx += blockDim.x) { + int id = blockIdx.x * vec_hidden_size + idx; + _f16Vec temp = input_v[id]; + temp += residual_v[id]; + variance += temp.sum_squares(); + residual_v[id] = temp; + } + /* Keep the following if-else block in sync with the + calculation of max_block_size in fused_add_rms_norm */ + if (num_tokens < 256) { + variance = blockReduceSum(variance); + } else variance = blockReduceSum(variance); + if (threadIdx.x == 0) { + s_variance = rsqrtf(variance / hidden_size + epsilon); + } + __syncthreads(); + + for (int idx = threadIdx.x; idx < vec_hidden_size; idx += blockDim.x) { + int id = blockIdx.x * vec_hidden_size + idx; + _f16Vec temp = residual_v[id]; + temp *= s_variance; + temp *= weight_v[idx]; + input_v[id] = temp; + } +} + + +/* Generic fused_add_rms_norm_kernel + The width field is not used here but necessary for other specializations. + */ +template +__global__ std::enable_if_t< + (width == 0) || !_typeConvert::exists> fused_add_rms_norm_kernel( + scalar_t* __restrict__ input, // [..., hidden_size] + scalar_t* __restrict__ residual, // [..., hidden_size] + const scalar_t* __restrict__ weight, // [hidden_size] + const float epsilon, + const int num_tokens, + const int hidden_size) { + __shared__ float s_variance; + float variance = 0.0f; + + for (int idx = threadIdx.x; idx < hidden_size; idx += blockDim.x) { + scalar_t z = input[blockIdx.x * hidden_size + idx]; + z += residual[blockIdx.x * hidden_size + idx]; + float x = (float) z; + variance += x * x; + residual[blockIdx.x * hidden_size + idx] = z; + } + /* Keep the following if-else block in sync with the + calculation of max_block_size in fused_add_rms_norm */ + if (num_tokens < 256) { + variance = blockReduceSum(variance); + } else variance = blockReduceSum(variance); + if (threadIdx.x == 0) { + s_variance = rsqrtf(variance / hidden_size + epsilon); + } + __syncthreads(); + + for (int idx = threadIdx.x; idx < hidden_size; idx += blockDim.x) { + float x = (float) residual[blockIdx.x * hidden_size + idx]; + input[blockIdx.x * hidden_size + idx] = ((scalar_t) (x * s_variance)) * weight[idx]; + } +} + +} // namespace vllm + +void rms_norm( + torch::Tensor& out, // [..., hidden_size] + torch::Tensor& input, // [..., hidden_size] + torch::Tensor& weight, // [hidden_size] + float epsilon) { + int hidden_size = input.size(-1); + int num_tokens = input.numel() / hidden_size; + + dim3 grid(num_tokens); + dim3 block(std::min(hidden_size, 1024)); + const at::musa::OptionalMUSAGuard device_guard(device_of(input)); + const musaStream_t stream = at::musa::getCurrentMUSAStream(); + VLLM_DISPATCH_FLOATING_TYPES( + input.scalar_type(), + "rms_norm_kernel", + [&] { + vllm::rms_norm_kernel<<>>( + out.data_ptr(), + input.data_ptr(), + weight.data_ptr(), + epsilon, + num_tokens, + hidden_size); + }); +} + +#define LAUNCH_FUSED_ADD_RMS_NORM(width) \ + VLLM_DISPATCH_FLOATING_TYPES( \ + input.scalar_type(), \ + "fused_add_rms_norm_kernel", \ + [&] { \ + vllm::fused_add_rms_norm_kernel \ + <<>>( \ + input.data_ptr(), \ + residual.data_ptr(), \ + weight.data_ptr(), \ + epsilon, \ + num_tokens, \ + hidden_size); \ + }); + +void fused_add_rms_norm( + torch::Tensor& input, // [..., hidden_size] + torch::Tensor& residual, // [..., hidden_size] + torch::Tensor& weight, // [hidden_size] + float epsilon) { + int hidden_size = input.size(-1); + int num_tokens = input.numel() / hidden_size; + + dim3 grid(num_tokens); + /* This kernel is memory-latency bound in many scenarios. + When num_tokens is large, a smaller block size allows + for increased block occupancy on CUs and better latency + hiding on global mem ops. */ + const int max_block_size = (num_tokens < 256) ? 1024 : 256; + dim3 block(std::min(hidden_size, max_block_size)); + const at::musa::OptionalMUSAGuard device_guard(device_of(input)); + const musaStream_t stream = at::musa::getCurrentMUSAStream(); + /*If the tensor types are FP16/BF16, try to use the optimized kernel + with packed + vectorized ops. + Max optimization is achieved with a width-8 vector of FP16/BF16s + since we can load at most 128 bits at once in a global memory op. + However, this requires each tensor's data to be aligned to 16 + bytes. + */ + auto inp_ptr = reinterpret_cast(input.data_ptr()); + auto res_ptr = reinterpret_cast(residual.data_ptr()); + auto wt_ptr = reinterpret_cast(weight.data_ptr()); + bool ptrs_are_aligned = inp_ptr % 16 == 0 && res_ptr % 16 == 0 \ + && wt_ptr % 16 == 0; + if (ptrs_are_aligned && hidden_size % 8 == 0) { + LAUNCH_FUSED_ADD_RMS_NORM(8); + } else { + LAUNCH_FUSED_ADD_RMS_NORM(0); + } +} diff --git a/csrc_musa/moe/moe_ops.cpp b/csrc_musa/moe/moe_ops.cpp new file mode 100644 index 0000000..35c3284 --- /dev/null +++ b/csrc_musa/moe/moe_ops.cpp @@ -0,0 +1,7 @@ +#include "moe_ops.h" + +#include + +PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) { + m.def("topk_softmax", &topk_softmax, "Apply topk softmax to the gating outputs."); +} diff --git a/csrc_musa/moe/moe_ops.h b/csrc_musa/moe/moe_ops.h new file mode 100644 index 0000000..a01be3e --- /dev/null +++ b/csrc_musa/moe/moe_ops.h @@ -0,0 +1,9 @@ +#pragma once + +#include + +void topk_softmax( + torch::Tensor& topk_weights, + torch::Tensor& topk_indices, + torch::Tensor& token_expert_indices, + torch::Tensor& gating_output); diff --git a/csrc_musa/moe/topk_softmax_kernels.mu b/csrc_musa/moe/topk_softmax_kernels.mu new file mode 100644 index 0000000..b42b6b4 --- /dev/null +++ b/csrc_musa/moe/topk_softmax_kernels.mu @@ -0,0 +1,500 @@ +/* + * Adapted from https://github.com/NVIDIA/TensorRT-LLM/blob/v0.7.1/cpp/tensorrt_llm/kernels/mixtureOfExperts/moe_kernels.cu + * Copyright (c) 2024 - 2024 Moore Threads Technology Co., Ltd("Moore Threads"). All rights reserved. + * Copyright (c) 2024, The vLLM team. + * SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include +#include "torch_musa/csrc/aten/musa/MUSAContext.h" +#include "torch_musa/csrc/core/MUSAGuard.h" + +#include +#include + +namespace vllm { +namespace moe { + +static constexpr int WARP_SIZE = 32; + +/// Aligned array type +template < + typename T, + /// Number of elements in the array + int N, + /// Alignment requirement in bytes + int Alignment = sizeof(T) * N +> +class alignas(Alignment) AlignedArray { + float data[N]; +}; + +// ====================== Softmax things =============================== +// We have our own implementation of softmax here so we can support transposing the output +// in the softmax kernel when we extend this module to support expert-choice routing. +template +__launch_bounds__(TPB) __global__ + void moeSoftmax(const float* input, const bool* finished, float* output, const int num_cols) +{ + using BlockReduce = cub::BlockReduce; + __shared__ typename BlockReduce::TempStorage tmpStorage; + + __shared__ float normalizing_factor; + __shared__ float float_max; + + const int thread_row_offset = blockIdx.x * num_cols; + + cub::Sum sum; + float threadData(-FLT_MAX); + + // Don't touch finished rows. + if ((finished != nullptr) && finished[blockIdx.x]) + { + return; + } + + for (int ii = threadIdx.x; ii < num_cols; ii += TPB) + { + const int idx = thread_row_offset + ii; + threadData = max(static_cast(input[idx]), threadData); + } + + const float maxElem = BlockReduce(tmpStorage).Reduce(threadData, cub::Max()); + if (threadIdx.x == 0) + { + float_max = maxElem; + } + __syncthreads(); + + threadData = 0; + + for (int ii = threadIdx.x; ii < num_cols; ii += TPB) + { + const int idx = thread_row_offset + ii; + threadData += exp((static_cast(input[idx]) - float_max)); + } + + const auto Z = BlockReduce(tmpStorage).Reduce(threadData, sum); + + if (threadIdx.x == 0) + { + normalizing_factor = 1.f / Z; + } + __syncthreads(); + + for (int ii = threadIdx.x; ii < num_cols; ii += TPB) + { + const int idx = thread_row_offset + ii; + const float val = exp((static_cast(input[idx]) - float_max)) * normalizing_factor; + output[idx] = val; + } +} + +template +__launch_bounds__(TPB) __global__ void moeTopK(const float* inputs_after_softmax, const bool* finished, float* output, + int* indices, int* source_rows, const int num_experts, const int k, const int start_expert, const int end_expert) +{ + + using cub_kvp = cub::KeyValuePair; + using BlockReduce = cub::BlockReduce; + __shared__ typename BlockReduce::TempStorage tmpStorage; + + cub_kvp thread_kvp; + cub::ArgMax arg_max; + + const int num_rows = gridDim.x; + const int block_row = blockIdx.x; + + const bool row_is_active = finished ? !finished[block_row] : true; + const int thread_read_offset = blockIdx.x * num_experts; + for (int k_idx = 0; k_idx < k; ++k_idx) + { + thread_kvp.key = 0; + thread_kvp.value = -1.f; // This is OK because inputs are probabilities + + cub_kvp inp_kvp; + for (int expert = threadIdx.x; expert < num_experts; expert += TPB) + { + const int idx = thread_read_offset + expert; + inp_kvp.key = expert; + inp_kvp.value = inputs_after_softmax[idx]; + + for (int prior_k = 0; prior_k < k_idx; ++prior_k) + { + const int prior_winning_expert = indices[k * block_row + prior_k]; + + if (prior_winning_expert == expert) + { + inp_kvp = thread_kvp; + } + } + + thread_kvp = arg_max(inp_kvp, thread_kvp); + } + + const cub_kvp result_kvp = BlockReduce(tmpStorage).Reduce(thread_kvp, arg_max); + if (threadIdx.x == 0) + { + // Ignore experts the node isn't responsible for with expert parallelism + const int expert = result_kvp.key; + const bool node_uses_expert = expert >= start_expert && expert < end_expert; + const bool should_process_row = row_is_active && node_uses_expert; + + const int idx = k * block_row + k_idx; + output[idx] = result_kvp.value; + indices[idx] = should_process_row ? (expert - start_expert) : num_experts; + assert(indices[idx] >= 0); + source_rows[idx] = k_idx * num_rows + block_row; + } + __syncthreads(); + } +} + +// ====================== TopK softmax things =============================== + +/* + A Top-K gating softmax written to exploit when the number of experts in the MoE layers + are a small power of 2. This allows us to cleanly share the rows among the threads in + a single warp and eliminate communication between warps (so no need to use shared mem). + + It fuses the softmax, max and argmax into a single kernel. + + Limitations: + 1) This implementation is intended for when the number of experts is a small power of 2. + 2) This implementation assumes k is small, but will work for any k. +*/ + +template +__launch_bounds__(WARPS_PER_CTA* WARP_SIZE) __global__ + void topkGatingSoftmax(const float* input, const bool* finished, float* output, const int num_rows, int* indices, + int* source_rows, const int k, const int start_expert, const int end_expert) +{ + // We begin by enforcing compile time assertions and setting up compile time constants. + static_assert(VPT == (VPT & -VPT), "VPT must be power of 2"); + static_assert(NUM_EXPERTS == (NUM_EXPERTS & -NUM_EXPERTS), "NUM_EXPERTS must be power of 2"); + static_assert(BYTES_PER_LDG == (BYTES_PER_LDG & -BYTES_PER_LDG), "BYTES_PER_LDG must be power of 2"); + static_assert(BYTES_PER_LDG <= 16, "BYTES_PER_LDG must be leq 16"); + + // Number of bytes each thread pulls in per load + static constexpr int ELTS_PER_LDG = BYTES_PER_LDG / sizeof(float); + static constexpr int ELTS_PER_ROW = NUM_EXPERTS; + static constexpr int THREADS_PER_ROW = ELTS_PER_ROW / VPT; + static constexpr int LDG_PER_THREAD = VPT / ELTS_PER_LDG; + + // Restrictions based on previous section. + static_assert(VPT % ELTS_PER_LDG == 0, "The elements per thread must be a multiple of the elements per ldg"); + static_assert(WARP_SIZE % THREADS_PER_ROW == 0, "The threads per row must cleanly divide the threads per warp"); + static_assert(THREADS_PER_ROW == (THREADS_PER_ROW & -THREADS_PER_ROW), "THREADS_PER_ROW must be power of 2"); + static_assert(THREADS_PER_ROW <= WARP_SIZE, "THREADS_PER_ROW can be at most warp size"); + + // We have NUM_EXPERTS elements per row. We specialize for small #experts + static constexpr int ELTS_PER_WARP = WARP_SIZE * VPT; + static constexpr int ROWS_PER_WARP = ELTS_PER_WARP / ELTS_PER_ROW; + static constexpr int ROWS_PER_CTA = WARPS_PER_CTA * ROWS_PER_WARP; + + // Restrictions for previous section. + static_assert(ELTS_PER_WARP % ELTS_PER_ROW == 0, "The elts per row must cleanly divide the total elt per warp"); + + // ===================== From this point, we finally start computing run-time variables. ======================== + + // Compute CTA and warp rows. We pack multiple rows into a single warp, and a block contains WARPS_PER_CTA warps. + // This, each block processes a chunk of rows. We start by computing the start row for each block. + const int cta_base_row = blockIdx.x * ROWS_PER_CTA; + + // Now, using the base row per thread block, we compute the base row per warp. + const int warp_base_row = cta_base_row + threadIdx.y * ROWS_PER_WARP; + + // The threads in a warp are split into sub-groups that will work on a row. + // We compute row offset for each thread sub-group + const int thread_row_in_warp = threadIdx.x / THREADS_PER_ROW; + const int thread_row = warp_base_row + thread_row_in_warp; + + // Threads with indices out of bounds should early exit here. + if (thread_row >= num_rows) + { + return; + } + const bool row_is_active = finished ? !finished[thread_row] : true; + + // We finally start setting up the read pointers for each thread. First, each thread jumps to the start of the + // row it will read. + const float* thread_row_ptr = input + thread_row * ELTS_PER_ROW; + + // Now, we compute the group each thread belong to in order to determine the first column to start loads. + const int thread_group_idx = threadIdx.x % THREADS_PER_ROW; + const int first_elt_read_by_thread = thread_group_idx * ELTS_PER_LDG; + const float* thread_read_ptr = thread_row_ptr + first_elt_read_by_thread; + + // Determine the pointer type to use to read in the data depending on the BYTES_PER_LDG template param. In theory, + // this can support all powers of 2 up to 16. + // NOTE(woosuk): The original implementation uses CUTLASS aligned array here. + // We defined our own aligned array and use it here to avoid the dependency on CUTLASS. + using AccessType = AlignedArray; + + // Finally, we pull in the data from global mem + float row_chunk[VPT]; + AccessType* row_chunk_vec_ptr = reinterpret_cast(&row_chunk); + const AccessType* vec_thread_read_ptr = reinterpret_cast(thread_read_ptr); +#pragma unroll + for (int ii = 0; ii < LDG_PER_THREAD; ++ii) + { + row_chunk_vec_ptr[ii] = vec_thread_read_ptr[ii * THREADS_PER_ROW]; + } + + // First, we perform a max reduce within the thread. We can do the max in fp16 safely (I think) and just + // convert to float afterwards for the exp + sum reduction. + float thread_max = row_chunk[0]; +#pragma unroll + for (int ii = 1; ii < VPT; ++ii) + { + thread_max = max(thread_max, row_chunk[ii]); + } + +// Now, we find the max within the thread group and distribute among the threads. We use a butterfly reduce. +#pragma unroll + for (int mask = THREADS_PER_ROW / 2; mask > 0; mask /= 2) + { + thread_max = max(thread_max, __shfl_xor_sync(0xFFFFFFFF, thread_max, mask, THREADS_PER_ROW)); + } + + // From this point, thread max in all the threads have the max within the row. + // Now, we subtract the max from each element in the thread and take the exp. We also compute the thread local sum. + float row_sum = 0; +#pragma unroll + for (int ii = 0; ii < VPT; ++ii) + { + row_chunk[ii] = expf(row_chunk[ii] - thread_max); + row_sum += row_chunk[ii]; + } + +// Now, we perform the sum reduce within each thread group. Similar to the max reduce, we use a bufferfly pattern. +#pragma unroll + for (int mask = THREADS_PER_ROW / 2; mask > 0; mask /= 2) + { + row_sum += __shfl_xor_sync(0xFFFFFFFF, row_sum, mask, THREADS_PER_ROW); + } + + // From this point, all threads have the max and the sum for their rows in the thread_max and thread_sum variables + // respectively. Finally, we can scale the rows for the softmax. Technically, for top-k gating we don't need to + // compute the entire softmax row. We can likely look at the maxes and only compute for the top-k values in the row. + // However, this kernel will likely not be a bottle neck and it seems better to closer match torch and find the + // argmax after computing the softmax. + const float reciprocal_row_sum = 1.f / row_sum; + +#pragma unroll + for (int ii = 0; ii < VPT; ++ii) + { + row_chunk[ii] = row_chunk[ii] * reciprocal_row_sum; + } + + // Now, softmax_res contains the softmax of the row chunk. Now, I want to find the topk elements in each row, along + // with the max index. + int start_col = first_elt_read_by_thread; + static constexpr int COLS_PER_GROUP_LDG = ELTS_PER_LDG * THREADS_PER_ROW; + + for (int k_idx = 0; k_idx < k; ++k_idx) + { + // First, each thread does the local argmax + float max_val = row_chunk[0]; + int expert = start_col; +#pragma unroll + for (int ldg = 0, col = start_col; ldg < LDG_PER_THREAD; ++ldg, col += COLS_PER_GROUP_LDG) + { +#pragma unroll + for (int ii = 0; ii < ELTS_PER_LDG; ++ii) + { + float val = row_chunk[ldg * ELTS_PER_LDG + ii]; + + // No check on the experts here since columns with the smallest index are processed first and only + // updated if > (not >=) + if (val > max_val) + { + max_val = val; + expert = col + ii; + } + } + } + +// Now, we perform the argmax reduce. We use the butterfly pattern so threads reach consensus about the max. +// This will be useful for K > 1 so that the threads can agree on "who" had the max value. That thread can +// then blank out their max with -inf and the warp can run more iterations... +#pragma unroll + for (int mask = THREADS_PER_ROW / 2; mask > 0; mask /= 2) + { + float other_max = __shfl_xor_sync(0xFFFFFFFF, max_val, mask, THREADS_PER_ROW); + int other_expert = __shfl_xor_sync(0xFFFFFFFF, expert, mask, THREADS_PER_ROW); + + // We want lower indices to "win" in every thread so we break ties this way + if (other_max > max_val || (other_max == max_val && other_expert < expert)) + { + max_val = other_max; + expert = other_expert; + } + } + + // Write the max for this k iteration to global memory. + if (thread_group_idx == 0) + { + // Add a guard to ignore experts not included by this node + const bool node_uses_expert = expert >= start_expert && expert < end_expert; + const bool should_process_row = row_is_active && node_uses_expert; + + // The lead thread from each sub-group will write out the final results to global memory. (This will be a + // single) thread per row of the input/output matrices. + const int idx = k * thread_row + k_idx; + output[idx] = max_val; + indices[idx] = should_process_row ? (expert - start_expert) : NUM_EXPERTS; + source_rows[idx] = k_idx * num_rows + thread_row; + } + + // Finally, we clear the value in the thread with the current max if there is another iteration to run. + if (k_idx + 1 < k) + { + const int ldg_group_for_expert = expert / COLS_PER_GROUP_LDG; + const int thread_to_clear_in_group = (expert / ELTS_PER_LDG) % THREADS_PER_ROW; + + // Only the thread in the group which produced the max will reset the "winning" value to -inf. + if (thread_group_idx == thread_to_clear_in_group) + { + const int offset_for_expert = expert % ELTS_PER_LDG; + // Safe to set to any negative value since row_chunk values must be between 0 and 1. + row_chunk[ldg_group_for_expert * ELTS_PER_LDG + offset_for_expert] = -10000.f; + } + } + } +} + +namespace detail +{ +// Constructs some constants needed to partition the work across threads at compile time. +template +struct TopkConstants +{ + static constexpr int ELTS_PER_LDG = BYTES_PER_LDG / sizeof(float); + static_assert(EXPERTS / (ELTS_PER_LDG * WARP_SIZE) == 0 || EXPERTS % (ELTS_PER_LDG * WARP_SIZE) == 0, ""); + static constexpr int VECs_PER_THREAD = std::max(1, EXPERTS / (ELTS_PER_LDG * WARP_SIZE)); + static constexpr int VPT = VECs_PER_THREAD * ELTS_PER_LDG; + static constexpr int THREADS_PER_ROW = EXPERTS / VPT; + static constexpr int ROWS_PER_WARP = WARP_SIZE / THREADS_PER_ROW; +}; +} // namespace detail + +template +void topkGatingSoftmaxLauncherHelper(const float* input, const bool* finished, float* output, int* indices, + int* source_row, const int num_rows, const int k, const int start_expert, const int end_expert, musaStream_t stream) +{ + static constexpr std::size_t MAX_BYTES_PER_LDG = 16; + + static constexpr int BYTES_PER_LDG = std::min(MAX_BYTES_PER_LDG, sizeof(float) * EXPERTS); + using Constants = detail::TopkConstants; + static constexpr int VPT = Constants::VPT; + static constexpr int ROWS_PER_WARP = Constants::ROWS_PER_WARP; + const int num_warps = (num_rows + ROWS_PER_WARP - 1) / ROWS_PER_WARP; + const int num_blocks = (num_warps + WARPS_PER_TB - 1) / WARPS_PER_TB; + + dim3 block_dim(WARP_SIZE, WARPS_PER_TB); + topkGatingSoftmax<<>>( + input, finished, output, num_rows, indices, source_row, k, start_expert, end_expert); +} + +#define LAUNCH_SOFTMAX(NUM_EXPERTS, WARPS_PER_TB) \ + topkGatingSoftmaxLauncherHelper( \ + gating_output, nullptr, topk_weights, topk_indicies, \ + token_expert_indices, num_tokens, topk, 0, num_experts, \ + stream); + +void topkGatingSoftmaxKernelLauncher( + const float* gating_output, + float* topk_weights, + int* topk_indicies, + int* token_expert_indices, + float* softmax_workspace, + const int num_tokens, + const int num_experts, + const int topk, + musaStream_t stream) { + static constexpr int WARPS_PER_TB = 4; + switch (num_experts) { + case 1: + LAUNCH_SOFTMAX(1, WARPS_PER_TB); + break; + case 2: + LAUNCH_SOFTMAX(2, WARPS_PER_TB); + break; + case 4: + LAUNCH_SOFTMAX(4, WARPS_PER_TB); + break; + case 8: + LAUNCH_SOFTMAX(8, WARPS_PER_TB); + break; + case 16: + LAUNCH_SOFTMAX(16, WARPS_PER_TB); + break; + case 32: + LAUNCH_SOFTMAX(32, WARPS_PER_TB); + break; + case 64: + LAUNCH_SOFTMAX(64, WARPS_PER_TB); + break; + case 128: + LAUNCH_SOFTMAX(128, WARPS_PER_TB); + break; + case 256: + LAUNCH_SOFTMAX(256, WARPS_PER_TB); + break; + default: { + TORCH_CHECK(softmax_workspace != nullptr, + "softmax_workspace must be provided for num_experts that are not a power of 2."); + static constexpr int TPB = 256; + moeSoftmax<<>>( + gating_output, nullptr, softmax_workspace, num_experts); + moeTopK<<>>( + softmax_workspace, nullptr, topk_weights, topk_indicies, token_expert_indices, + num_experts, topk, 0, num_experts); + } + } +} + +} // namespace moe +} // namespace vllm + +void topk_softmax( + torch::Tensor& topk_weights, // [num_tokens, topk] + torch::Tensor& topk_indices, // [num_tokens, topk] + torch::Tensor& token_expert_indices, // [num_tokens, topk] + torch::Tensor& gating_output) // [num_tokens, num_experts] +{ + const int num_experts = gating_output.size(-1); + const int num_tokens = gating_output.numel() / num_experts; + const int topk = topk_weights.size(-1); + + const bool is_pow_2 = (num_experts != 0) && ((num_experts & (num_experts - 1)) == 0); + const bool needs_workspace = !is_pow_2 || num_experts > 256; + const int64_t workspace_size = needs_workspace ? num_tokens * num_experts : 0; + + const at::musa::OptionalMUSAGuard device_guard(device_of(gating_output)); + const musaStream_t stream = at::musa::getCurrentMUSAStream(); + torch::Tensor softmax_workspace = torch::empty({workspace_size}, gating_output.options()); + vllm::moe::topkGatingSoftmaxKernelLauncher( + gating_output.data_ptr(), + topk_weights.data_ptr(), + topk_indices.data_ptr(), + token_expert_indices.data_ptr(), + softmax_workspace.data_ptr(), + num_tokens, + num_experts, + topk, + stream); +} diff --git a/csrc_musa/moe_align_block_size_kernels.mu b/csrc_musa/moe_align_block_size_kernels.mu new file mode 100644 index 0000000..f7aef34 --- /dev/null +++ b/csrc_musa/moe_align_block_size_kernels.mu @@ -0,0 +1,125 @@ +#include +#include "torch_musa/csrc/aten/musa/MUSAContext.h" + +#include +#include + +#include "musa_compat.h" +#include "dispatch_utils.h" + +#define CEILDIV(x,y) (((x) + (y) - 1) / (y)) + +namespace vllm { + +namespace { +__device__ __forceinline__ int32_t index(int32_t total_col, int32_t row, int32_t col) { + // don't worry about overflow because num_experts is relatively small + return row * total_col + col; +} +} + +template +__global__ void moe_align_block_size_kernel(scalar_t *__restrict__ topk_ids, + int32_t *sorted_token_ids, + int32_t *expert_ids, + int32_t *total_tokens_post_pad, + int32_t num_experts, + int32_t block_size, + size_t numel) { + const size_t tokens_per_thread = CEILDIV(numel, blockDim.x); + const size_t start_idx = threadIdx.x * tokens_per_thread; + + extern __shared__ int32_t shared_mem[]; + + int32_t* tokens_cnts = shared_mem; // 2d tensor with shape (num_experts + 1, num_experts) + int32_t* cumsum = shared_mem + (num_experts + 1) * num_experts; // 1d tensor with shape (num_experts + 1) + + for (int i = 0; i < num_experts; ++i) { + tokens_cnts[index(num_experts, threadIdx.x + 1, i)] = 0; + } + + /** + * In the first step we compute token_cnts[thread_index + 1][expert_index], + * which counts how many tokens in the token shard of thread_index are assigned + * to expert expert_index. + */ + for (int i = start_idx; i < numel && i < start_idx + tokens_per_thread; ++i) { + ++tokens_cnts[index(num_experts, threadIdx.x + 1, topk_ids[i])]; + } + + __syncthreads(); + + // For each expert we accumulate the token counts from the different threads. + tokens_cnts[index(num_experts, 0, threadIdx.x)] = 0; + for (int i = 1; i <= blockDim.x; ++i) { + tokens_cnts[index(num_experts, i, threadIdx.x)] += tokens_cnts[index(num_experts, i-1, threadIdx.x)]; + } + + __syncthreads(); + + // We accumulate the token counts of all experts in thread 0. + if (threadIdx.x == 0) { + cumsum[0] = 0; + for (int i = 1; i <= num_experts; ++i) { + cumsum[i] = cumsum[i-1] + CEILDIV(tokens_cnts[index(num_experts, blockDim.x, i - 1)], block_size) * block_size; + } + *total_tokens_post_pad = cumsum[num_experts]; + } + + __syncthreads(); + + /** + * For each expert, each thread processes the tokens of the corresponding blocks + * and stores the corresponding expert_id for each block. + */ + for (int i = cumsum[threadIdx.x];i < cumsum[threadIdx.x + 1];i += block_size) { + expert_ids[i / block_size] = threadIdx.x; + } + + /** + * Each thread processes a token shard, calculating the index of each token after + * sorting by expert number. Given the example topk_ids = [0,1,2,1,2,3,0,3,4] and + * block_size = 4, then the output would be [0, 6, *, *, 1, 3, *, *, 2, 4, *, *, 5, 7, *, *, 8, *, *, *], + * where * represents a padding value(preset in python). + */ + for (int i = start_idx; i < numel && i < start_idx + tokens_per_thread; ++i) { + int32_t expert_id = topk_ids[i]; + /** The cumsum[expert_id] stores the starting index of the tokens that the + * expert with expert_id needs to process, and tokens_cnts[threadIdx.x][expert_id] + * stores the indices of the tokens processed by the expert with expert_id within + * the current thread's token shard. + */ + int32_t rank_post_pad = tokens_cnts[index(num_experts, threadIdx.x, expert_id)] + cumsum[expert_id]; + sorted_token_ids[rank_post_pad] = i; + ++tokens_cnts[index(num_experts, threadIdx.x, expert_id)]; + } +} +} + +void moe_align_block_size( + torch::Tensor topk_ids, + int num_experts, + int block_size, + torch::Tensor sorted_token_ids, + torch::Tensor experts_ids, + torch::Tensor num_tokens_post_pad) { + const musaStream_t stream = at::musa::getCurrentMUSAStream(); + VLLM_DISPATCH_INTEGRAL_TYPES( + topk_ids.scalar_type(), "moe_align_block_size_kernel", [&] { + // calc needed amount of shared mem for `tokens_cnts` and `cumsum` tensors + const int32_t shared_mem = ((num_experts + 1) * num_experts + (num_experts + 1)) * sizeof(int32_t); + + // set dynamic shared mem + auto kernel = vllm::moe_align_block_size_kernel; + AT_MUSA_CHECK( + VLLM_DevFuncAttribute_SET_MaxDynamicSharedMemorySize((void *)kernel, shared_mem)); + kernel<<<1, num_experts, shared_mem, stream>>>( + topk_ids.data_ptr(), + sorted_token_ids.data_ptr(), + experts_ids.data_ptr(), + num_tokens_post_pad.data_ptr(), + num_experts, + block_size, + topk_ids.numel()); + }); +} diff --git a/csrc_musa/musa_compat.h b/csrc_musa/musa_compat.h new file mode 100644 index 0000000..3443728 --- /dev/null +++ b/csrc_musa/musa_compat.h @@ -0,0 +1,38 @@ +#pragma once + +#ifdef USE_ROCM +#include +#endif + +#ifndef USE_ROCM + #define WARP_SIZE 32 +#else + #define WARP_SIZE warpSize +#endif + +#ifndef USE_ROCM + #define VLLM_LDG(arg) __ldg(arg) +#else + #define VLLM_LDG(arg) *(arg) +#endif + +#ifndef USE_ROCM + #define VLLM_SHFL_XOR_SYNC(var, lane_mask) __shfl_xor_sync(uint32_t(-1), var, lane_mask) +#else + #define VLLM_SHFL_XOR_SYNC(var, lane_mask) __shfl_xor(var, lane_mask) +#endif + +#ifndef USE_ROCM + #define VLLM_SHFL_SYNC(var, src_lane) __shfl_sync(uint32_t(-1), var, src_lane) +#else + #define VLLM_SHFL_SYNC(var, src_lane) __shfl(var, src_lane) +#endif + +#ifndef USE_ROCM + #define VLLM_DevFuncAttribute_SET_MaxDynamicSharedMemorySize(FUNC, VAL) \ + musaFuncSetAttribute(FUNC, musaFuncAttributeMaxDynamicSharedMemorySize, VAL) +#else + #define VLLM_DevFuncAttribute_SET_MaxDynamicSharedMemorySize(FUNC, VAL) \ + hipFuncSetAttribute(FUNC, hipFuncAttributeMaxDynamicSharedMemorySize, VAL) +#endif + diff --git a/csrc_musa/musa_utils.h b/csrc_musa/musa_utils.h new file mode 100644 index 0000000..1483484 --- /dev/null +++ b/csrc_musa/musa_utils.h @@ -0,0 +1,10 @@ +#pragma once + +#include + +int get_device_attribute( + int attribute, + int device_id); + +int get_max_shared_memory_per_block_device_attribute( + int device_id); diff --git a/csrc_musa/musa_utils_kernels.mu b/csrc_musa/musa_utils_kernels.mu new file mode 100644 index 0000000..a4dcd85 --- /dev/null +++ b/csrc_musa/musa_utils_kernels.mu @@ -0,0 +1,35 @@ +#ifdef USE_ROCM + #include + #include +#endif +int get_device_attribute( + int attribute, + int device_id) +{ + int device, value; + if (device_id < 0) { + musaGetDevice(&device); + } + else { + device = device_id; + } + musaDeviceGetAttribute(&value, static_cast(attribute), device); + return value; +} + + +int get_max_shared_memory_per_block_device_attribute( + int device_id) +{ +int attribute; +// https://docs.nvidia.com/cuda/cuda-runtime-api/group__CUDART__TYPES.html +// cudaDevAttrMaxSharedMemoryPerBlockOptin = 97 if not is_hip() else 74 + +#ifdef USE_ROCM + attribute = hipDeviceAttributeMaxSharedMemoryPerBlock; +#else + attribute = musaDevAttrMaxSharedMemoryPerBlockOptin; +#endif + + return get_device_attribute(attribute, device_id); +} diff --git a/csrc_musa/ops.h b/csrc_musa/ops.h new file mode 100644 index 0000000..9541adc --- /dev/null +++ b/csrc_musa/ops.h @@ -0,0 +1,206 @@ +#pragma once + +#include + +void paged_attention_v1( + torch::Tensor& out, + torch::Tensor& query, + torch::Tensor& key_cache, + torch::Tensor& value_cache, + int num_kv_heads, + float scale, + torch::Tensor& block_tables, + torch::Tensor& seq_lens, + int block_size, + int max_seq_len, + const c10::optional& alibi_slopes, + const std::string& kv_cache_dtype, + float kv_scale); + +void paged_attention_v2( + torch::Tensor& out, + torch::Tensor& exp_sums, + torch::Tensor& max_logits, + torch::Tensor& tmp_out, + torch::Tensor& query, + torch::Tensor& key_cache, + torch::Tensor& value_cache, + int num_kv_heads, + float scale, + torch::Tensor& block_tables, + torch::Tensor& seq_lens, + int block_size, + int max_seq_len, + const c10::optional& alibi_slopes, + const std::string& kv_cache_dtype, + float kv_scale); + +void rms_norm( + torch::Tensor& out, + torch::Tensor& input, + torch::Tensor& weight, + float epsilon); + +void fused_add_rms_norm( + torch::Tensor& input, + torch::Tensor& residual, + torch::Tensor& weight, + float epsilon); + +void rotary_embedding( + torch::Tensor& positions, + torch::Tensor& query, + torch::Tensor& key, + int head_size, + torch::Tensor& cos_sin_cache, + bool is_neox); + +void batched_rotary_embedding( + torch::Tensor& positions, + torch::Tensor& query, + torch::Tensor& key, + int head_size, + torch::Tensor& cos_sin_cache, + bool is_neox, + int rot_dim, + torch::Tensor& cos_sin_cache_offsets); + +void silu_and_mul( + torch::Tensor& out, + torch::Tensor& input); + +void gelu_and_mul( + torch::Tensor& out, + torch::Tensor& input); + +void gelu_tanh_and_mul( + torch::Tensor& out, + torch::Tensor& input); + +void gelu_new( + torch::Tensor& out, + torch::Tensor& input); + +void gelu_fast( + torch::Tensor& out, + torch::Tensor& input); + +#ifndef USE_ROCM +torch::Tensor aqlm_gemm( + const torch::Tensor& input, + const torch::Tensor& codes, + const torch::Tensor& codebooks, + const torch::Tensor& scales, + const torch::Tensor& codebook_partition_sizes, + const std::optional& bias +); + +torch::Tensor aqlm_dequant( + const torch::Tensor& codes, + const torch::Tensor& codebooks, + const torch::Tensor& codebook_partition_sizes +); + +torch::Tensor awq_gemm( + torch::Tensor _in_feats, + torch::Tensor _kernel, + torch::Tensor _scaling_factors, + torch::Tensor _zeros, + int split_k_iters); + +torch::Tensor awq_dequantize( + torch::Tensor _kernel, + torch::Tensor _scaling_factors, + torch::Tensor _zeros, + int split_k_iters, + int thx, + int thy); + +torch::Tensor marlin_gemm( + torch::Tensor& a, + torch::Tensor& b_q_weight, + torch::Tensor& b_scales, + torch::Tensor& workspace, + int64_t size_m, + int64_t size_n, + int64_t size_k); + +torch::Tensor gptq_marlin_gemm( + torch::Tensor &a, + torch::Tensor &b_q_weight, + torch::Tensor &b_scales, + torch::Tensor &g_idx, + torch::Tensor &perm, + torch::Tensor &workspace, + int64_t num_bits, + int64_t size_m, + int64_t size_n, + int64_t size_k, + bool is_k_full); + +torch::Tensor gptq_marlin_repack( + torch::Tensor &b_q_weight, + torch::Tensor &perm, + int64_t size_k, + int64_t size_n, + int64_t num_bits); +#endif + +void squeezellm_gemm( + torch::Tensor vec, + torch::Tensor mat, + torch::Tensor mul, + torch::Tensor lookup_table); + +torch::Tensor gptq_gemm( + torch::Tensor a, + torch::Tensor b_q_weight, + torch::Tensor b_gptq_qzeros, + torch::Tensor b_gptq_scales, + torch::Tensor b_g_idx, + bool use_exllama, + int bit); + +void gptq_shuffle( + torch::Tensor q_weight, + torch::Tensor q_perm, + int bit); + +void static_scaled_fp8_quant( + torch::Tensor& out, + torch::Tensor& input, + torch::Tensor& scale); + +void dynamic_scaled_fp8_quant( + torch::Tensor& out, + torch::Tensor& input, + torch::Tensor& scale); + +void moe_align_block_size( + torch::Tensor topk_ids, + int num_experts, + int block_size, + torch::Tensor sorted_token_ids, + torch::Tensor experts_ids, + torch::Tensor num_tokens_post_pad); + +#ifndef USE_ROCM +using fptr_t = uint64_t; +fptr_t init_custom_ar(torch::Tensor &meta, torch::Tensor &rank_data, + const std::vector &handles, + const std::vector &offsets, int rank, + bool full_nvlink); +bool should_custom_ar(torch::Tensor &inp, int max_size, int world_size, + bool full_nvlink); +void all_reduce_reg(fptr_t _fa, torch::Tensor &inp, torch::Tensor &out); +void all_reduce_unreg(fptr_t _fa, torch::Tensor &inp, torch::Tensor ®_buffer, + torch::Tensor &out); +void dispose(fptr_t _fa); +int meta_size(); +void register_buffer(fptr_t _fa, torch::Tensor &t, + const std::vector &handles, + const std::vector &offsets); +std::pair, std::vector> get_graph_buffer_ipc_meta(fptr_t _fa); +void register_graph_buffers(fptr_t _fa, const std::vector &handles, + const std::vector> &offsets); +#endif diff --git a/csrc_musa/pos_encoding_kernels.mu b/csrc_musa/pos_encoding_kernels.mu new file mode 100644 index 0000000..e56fcf5 --- /dev/null +++ b/csrc_musa/pos_encoding_kernels.mu @@ -0,0 +1,226 @@ +#include +#include "torch_musa/csrc/aten/musa/MUSAContext.h" +#include "torch_musa/csrc/core/MUSAGuard.h" + +#include "musa_compat.h" +#include "dispatch_utils.h" + +namespace vllm { + +template +inline __device__ void apply_token_rotary_embedding( + scalar_t* __restrict__ arr, + const scalar_t* __restrict__ cos_ptr, + const scalar_t* __restrict__ sin_ptr, + int rot_offset, + int embed_dim) +{ + int x_index, y_index; + scalar_t cos, sin; + if (IS_NEOX) { + // GPT-NeoX style rotary embedding. + x_index = rot_offset; + y_index = embed_dim + rot_offset; + cos = VLLM_LDG(cos_ptr + x_index); + sin = VLLM_LDG(sin_ptr + x_index); + } else { + // GPT-J style rotary embedding. + x_index = 2 * rot_offset; + y_index = 2 * rot_offset + 1; + cos = VLLM_LDG(cos_ptr + x_index / 2); + sin = VLLM_LDG(sin_ptr + x_index / 2); + } + + const scalar_t x = arr[x_index]; + const scalar_t y = arr[y_index]; + arr[x_index] = x * cos - y * sin; + arr[y_index] = y * cos + x * sin; +} + +template +inline __device__ void apply_rotary_embedding( + scalar_t* __restrict__ query, // [batch_size, seq_len, num_heads, head_size] or [num_tokens, num_heads, head_size] + scalar_t* __restrict__ key, // [batch_size, seq_len, num_kv_heads, head_size] or [num_tokens, num_kv_heads, head_size] + const scalar_t* cache_ptr, + const int head_size, + const int num_heads, + const int num_kv_heads, + const int rot_dim, + const int token_idx, + const int64_t query_stride, + const int64_t key_stride) +{ + const int embed_dim = rot_dim / 2; + const scalar_t* cos_ptr = cache_ptr; + const scalar_t* sin_ptr = cache_ptr + embed_dim; + + const int nq = num_heads * embed_dim; + for (int i = threadIdx.x; i < nq; i += blockDim.x) { + const int head_idx = i / embed_dim; + const int64_t token_head = token_idx * query_stride + head_idx * head_size; + const int rot_offset = i % embed_dim; + apply_token_rotary_embedding(query + token_head, cos_ptr, + sin_ptr, rot_offset, embed_dim); + } + + const int nk = num_kv_heads * embed_dim; + for (int i = threadIdx.x; i < nk; i += blockDim.x) { + const int head_idx = i / embed_dim; + const int64_t token_head = token_idx * key_stride + head_idx * head_size; + const int rot_offset = i % embed_dim; + apply_token_rotary_embedding(key + token_head, cos_ptr, + sin_ptr, rot_offset, embed_dim); + } +} + +template +__global__ void rotary_embedding_kernel( + const int64_t* __restrict__ positions, // [batch_size, seq_len] or [num_tokens] + scalar_t* __restrict__ query, // [batch_size, seq_len, num_heads, head_size] or [num_tokens, num_heads, head_size] + scalar_t* __restrict__ key, // [batch_size, seq_len, num_kv_heads, head_size] or [num_tokens, num_kv_heads, head_size] + const scalar_t* __restrict__ cos_sin_cache, // [max_position, 2, rot_dim // 2] + const int rot_dim, + const int64_t query_stride, + const int64_t key_stride, + const int num_heads, + const int num_kv_heads, + const int head_size) { + // Each thread block is responsible for one token. + const int token_idx = blockIdx.x; + int64_t pos = positions[token_idx]; + const scalar_t* cache_ptr = cos_sin_cache + pos * rot_dim; + + apply_rotary_embedding(query, key, cache_ptr, head_size, num_heads, num_kv_heads, rot_dim, token_idx, query_stride, key_stride); +} + +template +__global__ void batched_rotary_embedding_kernel( + const int64_t* __restrict__ positions, // [batch_size, seq_len] or [num_tokens] + scalar_t* __restrict__ query, // [batch_size, seq_len, num_heads, head_size] or [num_tokens, num_heads, head_size] + scalar_t* __restrict__ key, // [batch_size, seq_len, num_kv_heads, head_size] or [num_tokens, num_kv_heads, head_size] + const scalar_t* __restrict__ cos_sin_cache, // [max_position, 2, rot_dim // 2] + const int64_t* __restrict__ cos_sin_cache_offsets, // [batch_size, seq_len] or [num_tokens] + const int rot_dim, + const int64_t query_stride, + const int64_t key_stride, + const int num_heads, + const int num_kv_heads, + const int head_size) { + // Each thread block is responsible for one token. + const int token_idx = blockIdx.x; + int64_t pos = positions[token_idx]; + int64_t cos_sin_cache_offset = cos_sin_cache_offsets[token_idx]; + const scalar_t* cache_ptr = cos_sin_cache + (cos_sin_cache_offset + pos) * rot_dim; + + apply_rotary_embedding(query, key, cache_ptr, head_size, num_heads, num_kv_heads, rot_dim, token_idx, query_stride, key_stride); +} + +} // namespace vllm + +void rotary_embedding( + torch::Tensor& positions, // [batch_size, seq_len] or [num_tokens] + torch::Tensor& query, // [batch_size, seq_len, num_heads * head_size] or [num_tokens, num_heads * head_size] + torch::Tensor& key, // [batch_size, seq_len, num_kv_heads * head_size] or [num_tokens, num_kv_heads * head_size] + int head_size, + torch::Tensor& cos_sin_cache, // [max_position, rot_dim] + bool is_neox) { + int64_t num_tokens = query.numel() / query.size(-1); + int rot_dim = cos_sin_cache.size(1); + int num_heads = query.size(-1) / head_size; + int num_kv_heads = key.size(-1) / head_size; + int64_t query_stride = query.stride(-2); + int64_t key_stride = key.stride(-2); + + dim3 grid(num_tokens); + dim3 block(std::min(num_heads * rot_dim / 2, 512)); + const at::musa::OptionalMUSAGuard device_guard(device_of(query)); + const musaStream_t stream = at::musa::getCurrentMUSAStream(); + VLLM_DISPATCH_FLOATING_TYPES( + query.scalar_type(), + "rotary_embedding", + [&] { + if (is_neox) { + vllm::rotary_embedding_kernel<<>>( + positions.data_ptr(), + query.data_ptr(), + key.data_ptr(), + cos_sin_cache.data_ptr(), + rot_dim, + query_stride, + key_stride, + num_heads, + num_kv_heads, + head_size); + } else { + vllm::rotary_embedding_kernel<<>>( + positions.data_ptr(), + query.data_ptr(), + key.data_ptr(), + cos_sin_cache.data_ptr(), + rot_dim, + query_stride, + key_stride, + num_heads, + num_kv_heads, + head_size); + } + }); +} + +/* +Batched version of rotary embedding, pack multiple LoRAs together +and process in batched manner. +*/ +void batched_rotary_embedding( + torch::Tensor& positions, // [batch_size, seq_len] or [num_tokens] + torch::Tensor& query, // [batch_size, seq_len, num_heads * head_size] or [num_tokens, num_heads * head_size] + torch::Tensor& key, // [batch_size, seq_len, num_kv_heads * head_size] or [num_tokens, num_kv_heads * head_size] + int head_size, + torch::Tensor& cos_sin_cache, // [max_position, rot_dim] + bool is_neox, + int rot_dim, + torch::Tensor& cos_sin_cache_offsets // [num_tokens] +) { + int64_t num_tokens = cos_sin_cache_offsets.size(0); + int num_heads = query.size(-1) / head_size; + int num_kv_heads = key.size(-1) / head_size; + int64_t query_stride = query.stride(-2); + int64_t key_stride = key.stride(-2); + + dim3 grid(num_tokens); + dim3 block(std::min(num_heads * rot_dim / 2, 512)); + const at::musa::OptionalMUSAGuard device_guard(device_of(query)); + const musaStream_t stream = at::musa::getCurrentMUSAStream(); + VLLM_DISPATCH_FLOATING_TYPES( + query.scalar_type(), + "rotary_embedding", + [&] { + if (is_neox) { + vllm::batched_rotary_embedding_kernel<<>>( + positions.data_ptr(), + query.data_ptr(), + key.data_ptr(), + cos_sin_cache.data_ptr(), + cos_sin_cache_offsets.data_ptr(), + rot_dim, + query_stride, + key_stride, + num_heads, + num_kv_heads, + head_size); + } else { + vllm::batched_rotary_embedding_kernel<<>>( + positions.data_ptr(), + query.data_ptr(), + key.data_ptr(), + cos_sin_cache.data_ptr(), + cos_sin_cache_offsets.data_ptr(), + rot_dim, + query_stride, + key_stride, + num_heads, + num_kv_heads, + head_size); + } + }); +} diff --git a/csrc_musa/punica/.LICENSE b/csrc_musa/punica/.LICENSE new file mode 100644 index 0000000..a46e2cd --- /dev/null +++ b/csrc_musa/punica/.LICENSE @@ -0,0 +1,217 @@ +Contains code from https://github.com/punica-ai/punica + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright {yyyy} {name of copyright owner} + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +------------------------------------------------------------------------------------ + +This product bundles various third-party components under other open source licenses. +This section summarizes those components and their licenses. See licenses/ +for text of these licenses. + + +Apache-2.0 +* third_party/nvbench (with LLVM exception) +* third_party/flashinfer + +BSD-3-Clause: +* third_party/cutlass \ No newline at end of file diff --git a/csrc_musa/punica/bgmv/bgmv_bf16_bf16_bf16.mu b/csrc_musa/punica/bgmv/bgmv_bf16_bf16_bf16.mu new file mode 100644 index 0000000..4d49139 --- /dev/null +++ b/csrc_musa/punica/bgmv/bgmv_bf16_bf16_bf16.mu @@ -0,0 +1,5 @@ +#include "bgmv_config.h" +#include "bgmv_impl.cuh" + +FOR_BGMV_WIDE_NARROW(INST_BGMV_TWOSIDE, mt_bfloat16, mt_bfloat16, mt_bfloat16) +FOR_INST_BGMV_WIDE_NARROW(INST_BGMV_ONESIDE, mt_bfloat16, mt_bfloat16, mt_bfloat16) diff --git a/csrc_musa/punica/bgmv/bgmv_bf16_fp32_bf16.mu b/csrc_musa/punica/bgmv/bgmv_bf16_fp32_bf16.mu new file mode 100644 index 0000000..a7bac81 --- /dev/null +++ b/csrc_musa/punica/bgmv/bgmv_bf16_fp32_bf16.mu @@ -0,0 +1,5 @@ +#include "bgmv_config.h" +#include "bgmv_impl.cuh" + +FOR_BGMV_WIDE_NARROW(INST_BGMV_TWOSIDE, mt_bfloat16, float, mt_bfloat16) +FOR_INST_BGMV_WIDE_NARROW(INST_BGMV_ONESIDE, mt_bfloat16, float, mt_bfloat16) diff --git a/csrc_musa/punica/bgmv/bgmv_config.h b/csrc_musa/punica/bgmv/bgmv_config.h new file mode 100644 index 0000000..19c058c --- /dev/null +++ b/csrc_musa/punica/bgmv/bgmv_config.h @@ -0,0 +1,162 @@ +#pragma once + +template +void bgmv_kernel(out_T *__restrict__ Y, const in_T *__restrict__ X, + const W_T *__restrict__ W, + const int64_t *__restrict__ indicies, int64_t y_offset, + int64_t full_y_size, int64_t batch_size, int64_t num_layers, + int64_t layer_idx, float scale); + +// clang-format off + +#define FOR_BGMV_WIDE(f, in_T, out_T, W_T, narrow) \ + f(in_T, out_T, W_T, narrow, 128) \ + f(in_T, out_T, W_T, narrow, 256) \ + f(in_T, out_T, W_T, narrow, 512) \ + f(in_T, out_T, W_T, narrow, 640) \ + f(in_T, out_T, W_T, narrow, 768) \ + f(in_T, out_T, W_T, narrow, 1024) \ + f(in_T, out_T, W_T, narrow, 1152) \ + f(in_T, out_T, W_T, narrow, 1280) \ + f(in_T, out_T, W_T, narrow, 1536) \ + f(in_T, out_T, W_T, narrow, 1728) \ + f(in_T, out_T, W_T, narrow, 1792) \ + f(in_T, out_T, W_T, narrow, 2048) \ + f(in_T, out_T, W_T, narrow, 2304) \ + f(in_T, out_T, W_T, narrow, 2560) \ + f(in_T, out_T, W_T, narrow, 2752) \ + f(in_T, out_T, W_T, narrow, 2816) \ + f(in_T, out_T, W_T, narrow, 3072) \ + f(in_T, out_T, W_T, narrow, 3456) \ + f(in_T, out_T, W_T, narrow, 3584) \ + f(in_T, out_T, W_T, narrow, 4096) \ + f(in_T, out_T, W_T, narrow, 4608) \ + f(in_T, out_T, W_T, narrow, 5120) \ + f(in_T, out_T, W_T, narrow, 5504) \ + f(in_T, out_T, W_T, narrow, 5632) \ + f(in_T, out_T, W_T, narrow, 6144) \ + f(in_T, out_T, W_T, narrow, 6848) \ + f(in_T, out_T, W_T, narrow, 6912) \ + f(in_T, out_T, W_T, narrow, 7168) \ + f(in_T, out_T, W_T, narrow, 8192) \ + f(in_T, out_T, W_T, narrow, 9216) \ + f(in_T, out_T, W_T, narrow, 10240) \ + f(in_T, out_T, W_T, narrow, 11008) \ + f(in_T, out_T, W_T, narrow, 12288) \ + f(in_T, out_T, W_T, narrow, 13696) \ + f(in_T, out_T, W_T, narrow, 13824) \ + f(in_T, out_T, W_T, narrow, 14336) \ + f(in_T, out_T, W_T, narrow, 15360) \ + f(in_T, out_T, W_T, narrow, 16384) \ + f(in_T, out_T, W_T, narrow, 20480) \ + f(in_T, out_T, W_T, narrow, 22016) \ + f(in_T, out_T, W_T, narrow, 24576) \ + f(in_T, out_T, W_T, narrow, 27392) \ + f(in_T, out_T, W_T, narrow, 28672) \ + f(in_T, out_T, W_T, narrow, 32000) \ + f(in_T, out_T, W_T, narrow, 32256) \ + f(in_T, out_T, W_T, narrow, 32512) \ + f(in_T, out_T, W_T, narrow, 32768) \ + f(in_T, out_T, W_T, narrow, 33024) \ + f(in_T, out_T, W_T, narrow, 36864) \ + f(in_T, out_T, W_T, narrow, 43264) \ + f(in_T, out_T, W_T, narrow, 49152) \ + f(in_T, out_T, W_T, narrow, 64000) \ + f(in_T, out_T, W_T, narrow, 64256) \ + f(in_T, out_T, W_T, narrow, 64512) \ + f(in_T, out_T, W_T, narrow, 102400) \ + f(in_T, out_T, W_T, narrow, 102656) \ + f(in_T, out_T, W_T, narrow, 102912) \ + f(in_T, out_T, W_T, narrow, 128000) \ + f(in_T, out_T, W_T, narrow, 128256) \ + f(in_T, out_T, W_T, narrow, 128512) \ +// Keep above in sync with vllm/lora/layers::LogitsProcessorWithLoRA +// and vllm/tests/lora/test_punica.py + +// Used for defining kernels going from the variety of +// dim in to the narrow dim out + // Using it for the fully sharded column + // parallel LoRA A which splits the rank dim +#define FOR_INST_BGMV_NARROW(f, in_T, out_T, W_T, narrow) \ + f(in_T, out_T, W_T, 128, narrow) \ + f(in_T, out_T, W_T, 256, narrow) \ + f(in_T, out_T, W_T, 512, narrow) \ + f(in_T, out_T, W_T, 640, narrow) \ + f(in_T, out_T, W_T, 768, narrow) \ + f(in_T, out_T, W_T, 1024, narrow) \ + f(in_T, out_T, W_T, 1152, narrow) \ + f(in_T, out_T, W_T, 1280, narrow) \ + f(in_T, out_T, W_T, 1536, narrow) \ + f(in_T, out_T, W_T, 1728, narrow) \ + f(in_T, out_T, W_T, 1792, narrow) \ + f(in_T, out_T, W_T, 2048, narrow) \ + f(in_T, out_T, W_T, 2304, narrow) \ + f(in_T, out_T, W_T, 2560, narrow) \ + f(in_T, out_T, W_T, 2752, narrow) \ + f(in_T, out_T, W_T, 2816, narrow) \ + f(in_T, out_T, W_T, 3072, narrow) \ + f(in_T, out_T, W_T, 3456, narrow) \ + f(in_T, out_T, W_T, 3584, narrow) \ + f(in_T, out_T, W_T, 4096, narrow) \ + f(in_T, out_T, W_T, 4608, narrow) \ + f(in_T, out_T, W_T, 5120, narrow) \ + f(in_T, out_T, W_T, 5504, narrow) \ + f(in_T, out_T, W_T, 5632, narrow) \ + f(in_T, out_T, W_T, 6144, narrow) \ + f(in_T, out_T, W_T, 6848, narrow) \ + f(in_T, out_T, W_T, 6912, narrow) \ + f(in_T, out_T, W_T, 7168, narrow) \ + f(in_T, out_T, W_T, 8192, narrow) \ + f(in_T, out_T, W_T, 9216, narrow) \ + f(in_T, out_T, W_T, 10240, narrow) \ + f(in_T, out_T, W_T, 11008, narrow) \ + f(in_T, out_T, W_T, 12288, narrow) \ + f(in_T, out_T, W_T, 13696, narrow) \ + f(in_T, out_T, W_T, 13824, narrow) \ + f(in_T, out_T, W_T, 14336, narrow) \ + f(in_T, out_T, W_T, 15360, narrow) \ + f(in_T, out_T, W_T, 16384, narrow) \ + f(in_T, out_T, W_T, 20480, narrow) \ + f(in_T, out_T, W_T, 22016, narrow) \ + f(in_T, out_T, W_T, 24576, narrow) \ + f(in_T, out_T, W_T, 27392, narrow) \ + f(in_T, out_T, W_T, 28672, narrow) \ + f(in_T, out_T, W_T, 32000, narrow) \ + f(in_T, out_T, W_T, 32256, narrow) \ + f(in_T, out_T, W_T, 32512, narrow) \ + f(in_T, out_T, W_T, 32768, narrow) \ + f(in_T, out_T, W_T, 33024, narrow) \ + f(in_T, out_T, W_T, 36864, narrow) \ + f(in_T, out_T, W_T, 43264, narrow) \ + f(in_T, out_T, W_T, 49152, narrow) \ + f(in_T, out_T, W_T, 64000, narrow) \ + f(in_T, out_T, W_T, 64256, narrow) \ + f(in_T, out_T, W_T, 64512, narrow) \ + f(in_T, out_T, W_T, 102400, narrow) \ + f(in_T, out_T, W_T, 102656, narrow) \ + f(in_T, out_T, W_T, 102912, narrow) \ + f(in_T, out_T, W_T, 128000, narrow) \ + f(in_T, out_T, W_T, 128256, narrow) \ + f(in_T, out_T, W_T, 128512, narrow) \ +// Keep above in sync with vllm/lora/layers::SamplerWithLoRA + + +// Keep this in sync with vllm/config::LoRAConfig +#define FOR_BGMV_WIDE_NARROW(f, in_T, out_T, W_T) \ + FOR_BGMV_WIDE(f, in_T, out_T, W_T, 8) \ + FOR_BGMV_WIDE(f, in_T, out_T, W_T, 16) \ + FOR_BGMV_WIDE(f, in_T, out_T, W_T, 32) \ + FOR_BGMV_WIDE(f, in_T, out_T, W_T, 64) + + +#define FOR_INST_BGMV_WIDE_NARROW(f, in_T, out_T, W_T) \ + FOR_INST_BGMV_NARROW(f, in_T, out_T, W_T, 1) \ + FOR_INST_BGMV_NARROW(f, in_T, out_T, W_T, 2) \ + FOR_INST_BGMV_NARROW(f, in_T, out_T, W_T, 4) \ + f(in_T, out_T, W_T, 8, 64) \ + f(in_T, out_T, W_T, 16, 64) \ + f(in_T, out_T, W_T, 32, 64) \ + f(in_T, out_T, W_T, 64, 64) + +// clang-format on diff --git a/csrc_musa/punica/bgmv/bgmv_fp16_fp16_fp16.mu b/csrc_musa/punica/bgmv/bgmv_fp16_fp16_fp16.mu new file mode 100644 index 0000000..d225a1e --- /dev/null +++ b/csrc_musa/punica/bgmv/bgmv_fp16_fp16_fp16.mu @@ -0,0 +1,5 @@ +#include "bgmv_config.h" +#include "bgmv_impl.cuh" + +FOR_BGMV_WIDE_NARROW(INST_BGMV_TWOSIDE, nv_half, nv_half, nv_half) +FOR_INST_BGMV_WIDE_NARROW(INST_BGMV_ONESIDE, nv_half, nv_half, nv_half) diff --git a/csrc_musa/punica/bgmv/bgmv_fp16_fp32_fp16.mu b/csrc_musa/punica/bgmv/bgmv_fp16_fp32_fp16.mu new file mode 100644 index 0000000..b37d288 --- /dev/null +++ b/csrc_musa/punica/bgmv/bgmv_fp16_fp32_fp16.mu @@ -0,0 +1,5 @@ +#include "bgmv_config.h" +#include "bgmv_impl.cuh" + +FOR_BGMV_WIDE_NARROW(INST_BGMV_TWOSIDE, nv_half, float, nv_half) +FOR_INST_BGMV_WIDE_NARROW(INST_BGMV_ONESIDE, nv_half, float, nv_half) diff --git a/csrc_musa/punica/bgmv/bgmv_fp32_bf16_bf16.mu b/csrc_musa/punica/bgmv/bgmv_fp32_bf16_bf16.mu new file mode 100644 index 0000000..cc0fa0a --- /dev/null +++ b/csrc_musa/punica/bgmv/bgmv_fp32_bf16_bf16.mu @@ -0,0 +1,5 @@ +#include "bgmv_config.h" +#include "bgmv_impl.cuh" + +FOR_BGMV_WIDE_NARROW(INST_BGMV_TWOSIDE, float, mt_bfloat16, mt_bfloat16) +FOR_INST_BGMV_WIDE_NARROW(INST_BGMV_ONESIDE, float, mt_bfloat16, mt_bfloat16) diff --git a/csrc_musa/punica/bgmv/bgmv_fp32_fp16_fp16.mu b/csrc_musa/punica/bgmv/bgmv_fp32_fp16_fp16.mu new file mode 100644 index 0000000..0b35bf5 --- /dev/null +++ b/csrc_musa/punica/bgmv/bgmv_fp32_fp16_fp16.mu @@ -0,0 +1,5 @@ +#include "bgmv_config.h" +#include "bgmv_impl.cuh" + +FOR_BGMV_WIDE_NARROW(INST_BGMV_TWOSIDE, float, nv_half, nv_half) +FOR_INST_BGMV_WIDE_NARROW(INST_BGMV_ONESIDE, float, nv_half, nv_half) diff --git a/csrc_musa/punica/bgmv/bgmv_impl.muh b/csrc_musa/punica/bgmv/bgmv_impl.muh new file mode 100644 index 0000000..c50f4f6 --- /dev/null +++ b/csrc_musa/punica/bgmv/bgmv_impl.muh @@ -0,0 +1,297 @@ +#pragma once + +#include "torch_musa/csrc/aten/musa/MUSAContext.h" +#include +#include +#include +#include +#include + +#include "vec_dtypes.cuh" + +namespace cg = cooperative_groups; + +// nthrs = (32, 4) +template +__global__ void +bgmv_shrink_kernel(out_T *__restrict__ Y, const in_T *__restrict__ X, + const W_T *__restrict__ W, + const int64_t *__restrict__ indicies, int64_t y_offset, + int64_t full_y_size, int64_t num_layers, int64_t layer_idx, + float scale) { + size_t batch_idx = blockIdx.y; + int64_t idx = indicies[batch_idx] * num_layers + layer_idx; + if (idx < 0) { + return; + } + + auto block = cg::this_thread_block(); + size_t j = blockIdx.x; + constexpr size_t num_pipeline_stages = 2; + constexpr size_t tile_size = tx * ty * vec_size; + __shared__ W_T W_shared[num_pipeline_stages * tile_size]; + __shared__ in_T X_shared[num_pipeline_stages * tile_size]; + __shared__ float y_warpwise[ty]; + + size_t W_shared_offset[num_pipeline_stages] = {0U, 1U * tile_size}; + size_t X_shared_offset[num_pipeline_stages] = {0U, 1U * tile_size}; + auto pipe = cuda::make_pipeline(); + + // pipeline load W/X and compute WX; + pipe.producer_acquire(); + cuda::memcpy_async(W_shared + (threadIdx.y * tx + threadIdx.x) * vec_size, + W + (idx * feat_out + j) * feat_in + + (threadIdx.y * tx + threadIdx.x) * vec_size, + cuda::aligned_size_t(W_copy_size), pipe); + cuda::memcpy_async(X_shared + (threadIdx.y * tx + threadIdx.x) * vec_size, + X + (batch_idx * feat_in) + + (threadIdx.y * tx + threadIdx.x) * vec_size, + cuda::aligned_size_t(X_copy_size), pipe); + pipe.producer_commit(); + size_t copy_idx, compute_idx; + float y = 0.f; + vec_t x_vec; + vec_t w_vec; + size_t tile_idx; + +#pragma unroll + for (tile_idx = 1; tile_idx < (feat_in + tile_size - 1) / tile_size; + ++tile_idx) { + copy_idx = tile_idx % num_pipeline_stages; + // pipeline stage: async copy W fragment + pipe.producer_acquire(); + if (tile_idx * tile_size + threadIdx.y * tx * vec_size < feat_in) { + cuda::memcpy_async(W_shared + W_shared_offset[copy_idx] + + (threadIdx.y * tx + threadIdx.x) * vec_size, + W + (idx * feat_out + j) * feat_in + + tile_idx * tile_size + + (threadIdx.y * tx + threadIdx.x) * vec_size, + cuda::aligned_size_t(W_copy_size), pipe); + cuda::memcpy_async(X_shared + X_shared_offset[copy_idx] + + (threadIdx.y * tx + threadIdx.x) * vec_size, + X + (batch_idx * feat_in) + tile_idx * tile_size + + (threadIdx.y * tx + threadIdx.x) * vec_size, + cuda::aligned_size_t(X_copy_size), pipe); + } + pipe.producer_commit(); + + compute_idx = (tile_idx - 1) % num_pipeline_stages; + // pipeline stage: compute WX + pipe.consumer_wait(); + block.sync(); + x_vec.load(X_shared + X_shared_offset[compute_idx] + + (threadIdx.y * tx + threadIdx.x) * vec_size); + w_vec.load(W_shared + W_shared_offset[compute_idx] + + (threadIdx.y * tx + threadIdx.x) * vec_size); + float sum = 0.f; +#pragma unroll + for (size_t i = 0; i < vec_size; ++i) { + sum += float(w_vec[i]) * float(x_vec[i]) * scale; + } +#pragma unroll + for (size_t offset = tx / 2; offset > 0; offset /= 2) { + sum += __shfl_down_sync(0xffffffff, sum, offset); + } + y_warpwise[threadIdx.y] = sum; + block.sync(); +#pragma unroll + for (size_t i = 0; i < ty; ++i) { + y += y_warpwise[i]; + } + + block.sync(); + pipe.consumer_release(); + } + + compute_idx = (tile_idx - 1) % num_pipeline_stages; + // final pipeline stage + pipe.consumer_wait(); + block.sync(); + x_vec.load(X_shared + X_shared_offset[compute_idx] + + (threadIdx.y * tx + threadIdx.x) * vec_size); + w_vec.load(W_shared + W_shared_offset[compute_idx] + + (threadIdx.y * tx + threadIdx.x) * vec_size); + float sum = 0.f; +#pragma unroll + for (size_t i = 0; i < vec_size; ++i) { + sum += float(w_vec[i]) * float(x_vec[i]) * scale; + } +#pragma unroll + for (size_t offset = tx / 2; offset > 0; offset /= 2) { + sum += __shfl_down_sync(0xffffffff, sum, offset); + } + y_warpwise[threadIdx.y] = + ((tile_idx - 1) * tile_size + threadIdx.y * tx * vec_size < feat_in) + ? sum + : 0.f; + block.sync(); +#pragma unroll + for (size_t i = 0; i < ty; ++i) { + y += y_warpwise[i]; + } + + block.sync(); + pipe.consumer_release(); + + // write Y; + if (block.thread_rank() == 0) { + Y[batch_idx * full_y_size + y_offset + j] += static_cast(y); + } +} + +// nthrs = (2, 16, 4) +template +__global__ void +bgmv_expand_kernel(out_T *__restrict__ Y, const in_T *__restrict__ X, + const W_T *__restrict__ W, + const int64_t *__restrict__ indicies, int64_t y_offset, + int64_t full_y_size, int64_t num_layers, int64_t layer_idx, + float scale) { + size_t batch_idx = blockIdx.y; + int64_t idx = indicies[batch_idx] * num_layers + layer_idx; + + if (idx < 0) { + return; + } + + auto block = cg::this_thread_block(); + size_t tile_idx = blockIdx.x; + + // load X; + vec_t x_vec; + x_vec.load(X + batch_idx * feat_in + threadIdx.x * vec_size); + + // load W; + vec_t w_vec; + w_vec.load(W + (idx * feat_out + tile_idx * tz * ty) * feat_in + + block.thread_rank() * vec_size); + + float sum = 0.f; +#pragma unroll + for (size_t i = 0; i < vec_size; ++i) { + sum += float(w_vec[i]) * float(x_vec[i]) * scale; + } + + cg::thread_block_tile g = cg::tiled_partition(block); +#pragma unroll + for (size_t offset = tx / 2; offset > 0; offset /= 2) { + sum += g.shfl_down(sum, offset); + } + sum = g.shfl(sum, 0); + + if (threadIdx.x == 0) { + Y[batch_idx * full_y_size + y_offset + tile_idx * (tz * ty) + + threadIdx.z * ty + threadIdx.y] += static_cast(sum); + } +} + +template +void bgmv_kernel(out_T *__restrict__ Y, const in_T *__restrict__ X, + const W_T *__restrict__ W, + const int64_t *__restrict__ indicies, int64_t y_offset, + int64_t full_y_size, int64_t batch_size, int64_t num_layers, + int64_t layer_idx, float scale) { + constexpr size_t vec_size = 8; + constexpr int tz = 4; + const musaStream_t stream = at::musa::getCurrentMUSAStream(); + + if constexpr (feat_in <= feat_out) { + static_assert(feat_in % vec_size == 0); + constexpr int tx = feat_in / vec_size; + + static_assert((32 % tx == 0 && feat_out % (32 / tx * tz) == 0) || + (16 % tx == 0 && feat_out % (16 / tx * tz) == 0) || + (8 % tx == 0 && feat_out % (8 / tx * tz) == 0)); + + if constexpr (32 % tx == 0 && feat_out % (32 / tx * tz) == 0) { + constexpr int ty = 32 / tx; + dim3 nblks(feat_out / (ty * tz), batch_size); + dim3 nthrs(tx, ty, tz); + + bgmv_expand_kernel + <<>>(Y, X, W, indicies, y_offset, + full_y_size, num_layers, layer_idx, + scale); + } else if (16 % tx == 0 && feat_out % (16 / tx * tz) == 0) { + constexpr int ty = 16 / tx; + dim3 nblks(feat_out / (ty * tz), batch_size); + dim3 nthrs(tx, ty, tz); + + bgmv_expand_kernel + <<>>(Y, X, W, indicies, y_offset, + full_y_size, num_layers, layer_idx, + scale); + } else { + constexpr int ty = 8 / tx; + dim3 nblks(feat_out / (ty * tz), batch_size); + dim3 nthrs(tx, ty, tz); + + bgmv_expand_kernel + <<>>(Y, X, W, indicies, y_offset, + full_y_size, num_layers, layer_idx, + scale); + } + } else { + static_assert(feat_in % (vec_size * 32) == 0 || + feat_in % (vec_size * 16) == 0 || + feat_in % (vec_size * 8) == 0); + + if constexpr (feat_in % (vec_size * 32) == 0) { + constexpr int tx = 32; + constexpr int ty = 4; + + dim3 nblks(feat_out, batch_size); + dim3 nthrs(tx, ty); + + bgmv_shrink_kernel + <<>>(Y, X, W, indicies, y_offset, + full_y_size, num_layers, layer_idx, + scale); + } else if constexpr (feat_in % (vec_size / 2 * 32) == 0) { + constexpr int tx = 32; + constexpr int ty = 4; + + dim3 nblks(feat_out, batch_size); + dim3 nthrs(tx, ty); + + bgmv_shrink_kernel + <<>>(Y, X, W, indicies, y_offset, + full_y_size, num_layers, layer_idx, + scale); + } else if constexpr (feat_in % (vec_size / 2 * 16) == 0) { + constexpr int tx = 16; + constexpr int ty = 4; + + dim3 nblks(feat_out, batch_size); + dim3 nthrs(tx, ty); + + bgmv_shrink_kernel + <<>>(Y, X, W, indicies, y_offset, + full_y_size, num_layers, layer_idx, + scale); + } + } +} + +#define INST_BGMV(feat_in, feat_out, in_T, out_T, W_T) \ + template void bgmv_kernel( \ + out_T * __restrict__ Y, const in_T *__restrict__ X, \ + const W_T *__restrict__ W, const int64_t *__restrict__ indicies, \ + int64_t y_offset, int64_t full_y_size, int64_t batch_size, \ + int64_t num_layers, int64_t layer_idx, float scale); + +#define INST_BGMV_ONESIDE(in_T, out_T, W_T, feat_in, feat_out) \ + INST_BGMV(feat_in, feat_out, in_T, out_T, W_T) + +#define INST_BGMV_TWOSIDE(in_T, out_T, W_T, narrow, wide) \ + INST_BGMV(narrow, wide, in_T, out_T, W_T) \ + INST_BGMV(wide, narrow, in_T, out_T, W_T) diff --git a/csrc_musa/punica/bgmv/generator.py b/csrc_musa/punica/bgmv/generator.py new file mode 100644 index 0000000..9b46afb --- /dev/null +++ b/csrc_musa/punica/bgmv/generator.py @@ -0,0 +1,48 @@ +DTYPES = ["fp16", "bf16", "fp32"] +DTYPE_MAP = { + "fp16": "nv_half", + "bf16": "mt_bfloat16", + "fp32": "float", +} + +TEMPLATE = """ +#include "bgmv_config.h" +#include "bgmv_impl.cuh" + +FOR_BGMV_WIDE_NARROW(INST_BGMV_TWOSIDE, {input_dtype}, {output_dtype}, {weight_dtype}) +FOR_INST_BGMV_WIDE_NARROW(INST_BGMV_ONESIDE, {input_dtype}, {output_dtype}, {weight_dtype}) +""".lstrip() # noqa: E501 + +for input_dtype in DTYPES: + for output_dtype in DTYPES: + for weight_dtype in DTYPES: + if weight_dtype == "fp32": + # FP32 weights are not supported. + continue + if output_dtype == "fp32": + # LoRA A matrix. + if input_dtype != weight_dtype: + # NOTE(woosuk): While Punica supports the case where the + # input and weight dtypes are different, we only generate + # the kernels the same dtypes to reduce the binary size. + continue + elif input_dtype == "fp32": + # LoRA B matrix. + if output_dtype != weight_dtype: + # NOTE(woosuk): While Punica supports the case where the + # output and weight dtypes are different, we only generate + # the kernels the same dtypes to reduce the binary size. + continue + elif not (input_dtype == output_dtype == weight_dtype): + # NOTE(woosuk): While Punica supports mixed data types for + # input, output, and weight, we only generate the kernels with + # the same data types to reduce the binary size. + continue + + kernel_definition = TEMPLATE.format( + input_dtype=DTYPE_MAP[input_dtype], + output_dtype=DTYPE_MAP[output_dtype], + weight_dtype=DTYPE_MAP[weight_dtype]) + filename = f"bgmv_{input_dtype}_{output_dtype}_{weight_dtype}.cu" + with open(filename, "w") as f: + f.write(kernel_definition) diff --git a/csrc_musa/punica/bgmv/vec_dtypes.muh b/csrc_musa/punica/bgmv/vec_dtypes.muh new file mode 100644 index 0000000..fa3da2b --- /dev/null +++ b/csrc_musa/punica/bgmv/vec_dtypes.muh @@ -0,0 +1,1324 @@ +#ifndef VEC_DTYPES_CUH_ +#define VEC_DTYPES_CUH_ + +#include +#include +#ifdef FLASHINFER_USE_FP8 +#include +#endif +#include + +#include + +#define FLASHINFER_INLINE \ + inline __attribute__((always_inline)) __device__ __host__ + +template +struct vec_t { + FLASHINFER_INLINE float_t &operator[](size_t i); + FLASHINFER_INLINE const float_t &operator[](size_t i) const; + FLASHINFER_INLINE void fill(float_t val); + FLASHINFER_INLINE void load(const float_t *ptr); + FLASHINFER_INLINE void store(float_t *ptr) const; + template + FLASHINFER_INLINE void cast_from(const vec_t &src); + template + FLASHINFER_INLINE void cast_load(const T *ptr); + template + FLASHINFER_INLINE void cast_store(T *ptr) const; + FLASHINFER_INLINE static void memcpy(float_t *dst, const float_t *src); +}; + +template +FLASHINFER_INLINE void cast_from_impl(const vec_t &src, + vec_t &dst) { +#pragma unroll + for (size_t i = 0; i < vec_size; ++i) { + dst[i] = tgt_float_t(src[i]); + } +} + +template +FLASHINFER_INLINE void cast_load_impl(const src_float_t *src_ptr, + vec_t &dst) { + if constexpr (std::is_same::value) { + dst.load(src_ptr); + } else { + vec_t tmp; + tmp.load(src_ptr); + dst.cast_from(tmp); + } +} + +template +FLASHINFER_INLINE void cast_store_impl(const vec_t &src, + tgt_float_t *dst_ptr) { + if constexpr (std::is_same::value) { + src.store(dst_ptr); + } else { + vec_t tmp; + tmp.cast_from(src); + tmp.store(dst_ptr); + } +} + +#ifdef FLASHINFER_USE_FP8 +/******************* vec_t<__nv_fp8_e4m3> *******************/ + +// __nv_fp8_e4m3 x 1 +template <> +struct vec_t<__nv_fp8_e4m3, 1> { + __nv_fp8_e4m3 data; + + FLASHINFER_INLINE __nv_fp8_e4m3 &operator[](size_t i) { + return ((__nv_fp8_e4m3 *)(&data))[i]; + } + FLASHINFER_INLINE const __nv_fp8_e4m3 &operator[](size_t i) const { + return ((const __nv_fp8_e4m3 *)(&data))[i]; + } + FLASHINFER_INLINE void fill(__nv_fp8_e4m3 val); + FLASHINFER_INLINE void load(const __nv_fp8_e4m3 *ptr); + FLASHINFER_INLINE void store(__nv_fp8_e4m3 *ptr) const; + template + FLASHINFER_INLINE void cast_from(const vec_t &src) { + cast_from_impl(src, *this); + } + template + FLASHINFER_INLINE void cast_load(const T *ptr) { + cast_load_impl(ptr, *this); + } + template + FLASHINFER_INLINE void cast_store(T *ptr) const { + cast_store_impl(*this, ptr); + } + + FLASHINFER_INLINE static void memcpy(__nv_fp8_e4m3 *dst, + const __nv_fp8_e4m3 *src); +}; + +FLASHINFER_INLINE void vec_t<__nv_fp8_e4m3, 1>::fill(__nv_fp8_e4m3 val) { + data = val; +} + +FLASHINFER_INLINE void vec_t<__nv_fp8_e4m3, 1>::load(const __nv_fp8_e4m3 *ptr) { + data = *ptr; +} + +FLASHINFER_INLINE void vec_t<__nv_fp8_e4m3, 1>::store( + __nv_fp8_e4m3 *ptr) const { + *ptr = data; +} + +FLASHINFER_INLINE void vec_t<__nv_fp8_e4m3, 1>::memcpy( + __nv_fp8_e4m3 *dst, const __nv_fp8_e4m3 *src) { + *dst = *src; +} + +// __nv_fp8_e4m3 x 2 +template <> +struct vec_t<__nv_fp8_e4m3, 2> { + __nv_fp8x2_e4m3 data; + + FLASHINFER_INLINE __nv_fp8_e4m3 &operator[](size_t i) { + return ((__nv_fp8_e4m3 *)(&data))[i]; + } + FLASHINFER_INLINE const __nv_fp8_e4m3 &operator[](size_t i) const { + return ((const __nv_fp8_e4m3 *)(&data))[i]; + } + FLASHINFER_INLINE void fill(__nv_fp8_e4m3 val); + FLASHINFER_INLINE void load(const __nv_fp8_e4m3 *ptr); + FLASHINFER_INLINE void store(__nv_fp8_e4m3 *ptr) const; + template + FLASHINFER_INLINE void cast_from(const vec_t &src) { + cast_from_impl(src, *this); + } + template + FLASHINFER_INLINE void cast_load(const T *ptr) { + cast_load_impl(ptr, *this); + } + template + FLASHINFER_INLINE void cast_store(T *ptr) const { + cast_store_impl(*this, ptr); + } + + FLASHINFER_INLINE static void memcpy(__nv_fp8_e4m3 *dst, + const __nv_fp8_e4m3 *src); +}; + +FLASHINFER_INLINE void vec_t<__nv_fp8_e4m3, 2>::fill(__nv_fp8_e4m3 val) { + data.__x = + (__nv_fp8x2_storage_t(val.__x) << 8) | __nv_fp8x2_storage_t(val.__x); +} + +FLASHINFER_INLINE void vec_t<__nv_fp8_e4m3, 2>::load(const __nv_fp8_e4m3 *ptr) { + data = *((__nv_fp8x2_e4m3 *)ptr); +} + +FLASHINFER_INLINE void vec_t<__nv_fp8_e4m3, 2>::store( + __nv_fp8_e4m3 *ptr) const { + *((__nv_fp8x2_e4m3 *)ptr) = data; +} + +FLASHINFER_INLINE void vec_t<__nv_fp8_e4m3, 2>::memcpy( + __nv_fp8_e4m3 *dst, const __nv_fp8_e4m3 *src) { + *((__nv_fp8x2_e4m3 *)dst) = *((__nv_fp8x2_e4m3 *)src); +} + +// __nv_fp8_e4m3 x 4 + +template <> +struct vec_t<__nv_fp8_e4m3, 4> { + __nv_fp8x4_e4m3 data; + + FLASHINFER_INLINE __nv_fp8_e4m3 &operator[](size_t i) { + return ((__nv_fp8_e4m3 *)(&data))[i]; + } + FLASHINFER_INLINE const __nv_fp8_e4m3 &operator[](size_t i) const { + return ((const __nv_fp8_e4m3 *)(&data))[i]; + } + FLASHINFER_INLINE void fill(__nv_fp8_e4m3 val); + FLASHINFER_INLINE void load(const __nv_fp8_e4m3 *ptr); + FLASHINFER_INLINE void store(__nv_fp8_e4m3 *ptr) const; + template + FLASHINFER_INLINE void cast_from(const vec_t &src) { + cast_from_impl(src, *this); + } + template + FLASHINFER_INLINE void cast_load(const T *ptr) { + cast_load_impl(ptr, *this); + } + template + FLASHINFER_INLINE void cast_store(T *ptr) const { + cast_store_impl(*this, ptr); + } + + FLASHINFER_INLINE static void memcpy(__nv_fp8_e4m3 *dst, + const __nv_fp8_e4m3 *src); +}; + +FLASHINFER_INLINE void vec_t<__nv_fp8_e4m3, 4>::fill(__nv_fp8_e4m3 val) { + data.__x = (__nv_fp8x4_storage_t(val.__x) << 24) | + (__nv_fp8x4_storage_t(val.__x) << 16) | + (__nv_fp8x4_storage_t(val.__x) << 8) | + __nv_fp8x4_storage_t(val.__x); +} + +FLASHINFER_INLINE void vec_t<__nv_fp8_e4m3, 4>::load(const __nv_fp8_e4m3 *ptr) { + data = *((__nv_fp8x4_e4m3 *)ptr); +} + +FLASHINFER_INLINE void vec_t<__nv_fp8_e4m3, 4>::store( + __nv_fp8_e4m3 *ptr) const { + *((__nv_fp8x4_e4m3 *)ptr) = data; +} + +FLASHINFER_INLINE void vec_t<__nv_fp8_e4m3, 4>::memcpy( + __nv_fp8_e4m3 *dst, const __nv_fp8_e4m3 *src) { + *((__nv_fp8x4_e4m3 *)dst) = *((__nv_fp8x4_e4m3 *)src); +} + +// __nv_fp8_e4m3 x 8 + +template <> +struct vec_t<__nv_fp8_e4m3, 8> { + uint2 data; + + FLASHINFER_INLINE __nv_fp8_e4m3 &operator[](size_t i) { + return ((__nv_fp8_e4m3 *)(&data))[i]; + } + FLASHINFER_INLINE const __nv_fp8_e4m3 &operator[](size_t i) const { + return ((const __nv_fp8_e4m3 *)(&data))[i]; + } + FLASHINFER_INLINE void fill(__nv_fp8_e4m3 val); + FLASHINFER_INLINE void load(const __nv_fp8_e4m3 *ptr); + FLASHINFER_INLINE void store(__nv_fp8_e4m3 *ptr) const; + template + FLASHINFER_INLINE void cast_from(const vec_t &src) { + cast_from_impl(src, *this); + } + template + FLASHINFER_INLINE void cast_load(const T *ptr) { + cast_load_impl(ptr, *this); + } + template + FLASHINFER_INLINE void cast_store(T *ptr) const { + cast_store_impl(*this, ptr); + } + + FLASHINFER_INLINE static void memcpy(__nv_fp8_e4m3 *dst, + const __nv_fp8_e4m3 *src); +}; + +FLASHINFER_INLINE void vec_t<__nv_fp8_e4m3, 8>::fill(__nv_fp8_e4m3 val) { + ((__nv_fp8x4_e4m3 *)(&data.x))->__x = (__nv_fp8x4_storage_t(val.__x) << 24) | + (__nv_fp8x4_storage_t(val.__x) << 16) | + (__nv_fp8x4_storage_t(val.__x) << 8) | + __nv_fp8x4_storage_t(val.__x); + ((__nv_fp8x4_e4m3 *)(&data.y))->__x = (__nv_fp8x4_storage_t(val.__x) << 24) | + (__nv_fp8x4_storage_t(val.__x) << 16) | + (__nv_fp8x4_storage_t(val.__x) << 8) | + __nv_fp8x4_storage_t(val.__x); +} + +FLASHINFER_INLINE void vec_t<__nv_fp8_e4m3, 8>::load(const __nv_fp8_e4m3 *ptr) { + data = *((uint2 *)ptr); +} + +FLASHINFER_INLINE void vec_t<__nv_fp8_e4m3, 8>::store( + __nv_fp8_e4m3 *ptr) const { + *((uint2 *)ptr) = data; +} + +FLASHINFER_INLINE void vec_t<__nv_fp8_e4m3, 8>::memcpy( + __nv_fp8_e4m3 *dst, const __nv_fp8_e4m3 *src) { + *((__nv_fp8_e4m3 *)dst) = *((__nv_fp8_e4m3 *)src); +} + +// __nv_fp8_e4m3 x 16 or more +template +struct vec_t<__nv_fp8_e4m3, vec_size> { + uint4 data[vec_size / 16]; + + FLASHINFER_INLINE __nv_fp8_e4m3 &operator[](size_t i) { + return ((__nv_fp8_e4m3 *)data)[i]; + } + FLASHINFER_INLINE const __nv_fp8_e4m3 &operator[](size_t i) const { + return ((const __nv_fp8_e4m3 *)data)[i]; + } + FLASHINFER_INLINE void fill(__nv_fp8_e4m3 val) { +#pragma unroll + for (size_t i = 0; i < vec_size / 16; ++i) { + ((__nv_fp8x4_e4m3 *)(&(data[i].x)))->__x = + (__nv_fp8x4_storage_t(val.__x) << 24) | + (__nv_fp8x4_storage_t(val.__x) << 16) | + (__nv_fp8x4_storage_t(val.__x) << 8) | __nv_fp8x4_storage_t(val.__x); + ((__nv_fp8x4_e4m3 *)(&(data[i].y)))->__x = + (__nv_fp8x4_storage_t(val.__x) << 24) | + (__nv_fp8x4_storage_t(val.__x) << 16) | + (__nv_fp8x4_storage_t(val.__x) << 8) | __nv_fp8x4_storage_t(val.__x); + ((__nv_fp8x4_e4m3 *)(&(data[i].z)))->__x = + (__nv_fp8x4_storage_t(val.__x) << 24) | + (__nv_fp8x4_storage_t(val.__x) << 16) | + (__nv_fp8x4_storage_t(val.__x) << 8) | __nv_fp8x4_storage_t(val.__x); + ((__nv_fp8x4_e4m3 *)(&(data[i].w)))->__x = + (__nv_fp8x4_storage_t(val.__x) << 24) | + (__nv_fp8x4_storage_t(val.__x) << 16) | + (__nv_fp8x4_storage_t(val.__x) << 8) | __nv_fp8x4_storage_t(val.__x); + } + } + FLASHINFER_INLINE void load(const __nv_fp8_e4m3 *ptr) { +#pragma unroll + for (size_t i = 0; i < vec_size / 16; ++i) { + data[i] = ((uint4 *)ptr)[i]; + } + } + FLASHINFER_INLINE void store(__nv_fp8_e4m3 *ptr) const { +#pragma unroll + for (size_t i = 0; i < vec_size / 16; ++i) { + ((uint4 *)ptr)[i] = data[i]; + } + } + template + FLASHINFER_INLINE void cast_from(const vec_t &src) { + cast_from_impl(src, *this); + } + template + FLASHINFER_INLINE void cast_load(const T *ptr) { + cast_load_impl(ptr, *this); + } + template + FLASHINFER_INLINE void cast_store(T *ptr) const { + cast_store_impl(*this, ptr); + } + + FLASHINFER_INLINE static void memcpy(__nv_fp8_e4m3 *dst, + const __nv_fp8_e4m3 *src) { +#pragma unroll + for (size_t i = 0; i < vec_size / 16; ++i) { + ((uint4 *)dst)[i] = ((uint4 *)src)[i]; + } + } +}; + +/******************* vec_t<__nv_fp8_e5m2> *******************/ + +// __nv_fp8_e5m2 x 1 +template <> +struct vec_t<__nv_fp8_e5m2, 1> { + __nv_fp8_e5m2 data; + + FLASHINFER_INLINE __nv_fp8_e5m2 &operator[](size_t i) { + return ((__nv_fp8_e5m2 *)(&data))[i]; + } + FLASHINFER_INLINE const __nv_fp8_e5m2 &operator[](size_t i) const { + return ((const __nv_fp8_e5m2 *)(&data))[i]; + } + FLASHINFER_INLINE void fill(__nv_fp8_e5m2 val); + FLASHINFER_INLINE void load(const __nv_fp8_e5m2 *ptr); + FLASHINFER_INLINE void store(__nv_fp8_e5m2 *ptr) const; + template + FLASHINFER_INLINE void cast_from(const vec_t &src) { + cast_from_impl(src, *this); + } + template + FLASHINFER_INLINE void cast_load(const T *ptr) { + cast_load_impl(ptr, *this); + } + template + FLASHINFER_INLINE void cast_store(T *ptr) const { + cast_store_impl(*this, ptr); + } + + FLASHINFER_INLINE static void memcpy(__nv_fp8_e5m2 *dst, + const __nv_fp8_e5m2 *src); +}; + +FLASHINFER_INLINE void vec_t<__nv_fp8_e5m2, 1>::fill(__nv_fp8_e5m2 val) { + data = val; +} + +FLASHINFER_INLINE void vec_t<__nv_fp8_e5m2, 1>::load(const __nv_fp8_e5m2 *ptr) { + data = *ptr; +} + +FLASHINFER_INLINE void vec_t<__nv_fp8_e5m2, 1>::store( + __nv_fp8_e5m2 *ptr) const { + *ptr = data; +} + +FLASHINFER_INLINE void vec_t<__nv_fp8_e5m2, 1>::memcpy( + __nv_fp8_e5m2 *dst, const __nv_fp8_e5m2 *src) { + *dst = *src; +} + +// __nv_fp8_e5m2 x 2 +template <> +struct vec_t<__nv_fp8_e5m2, 2> { + __nv_fp8x2_e5m2 data; + + FLASHINFER_INLINE __nv_fp8_e5m2 &operator[](size_t i) { + return ((__nv_fp8_e5m2 *)(&data))[i]; + } + FLASHINFER_INLINE const __nv_fp8_e5m2 &operator[](size_t i) const { + return ((const __nv_fp8_e5m2 *)(&data))[i]; + } + FLASHINFER_INLINE void fill(__nv_fp8_e5m2 val); + FLASHINFER_INLINE void load(const __nv_fp8_e5m2 *ptr); + FLASHINFER_INLINE void store(__nv_fp8_e5m2 *ptr) const; + template + FLASHINFER_INLINE void cast_from(const vec_t &src) { + cast_from_impl(src, *this); + } + template + FLASHINFER_INLINE void cast_load(const T *ptr) { + cast_load_impl(ptr, *this); + } + template + FLASHINFER_INLINE void cast_store(T *ptr) const { + cast_store_impl(*this, ptr); + } + + FLASHINFER_INLINE static void memcpy(__nv_fp8_e5m2 *dst, + const __nv_fp8_e5m2 *src); +}; + +FLASHINFER_INLINE void vec_t<__nv_fp8_e5m2, 2>::fill(__nv_fp8_e5m2 val) { + data.__x = + (__nv_fp8x2_storage_t(val.__x) << 8) | __nv_fp8x2_storage_t(val.__x); +} + +FLASHINFER_INLINE void vec_t<__nv_fp8_e5m2, 2>::load(const __nv_fp8_e5m2 *ptr) { + data = *((__nv_fp8x2_e5m2 *)ptr); +} + +FLASHINFER_INLINE void vec_t<__nv_fp8_e5m2, 2>::store( + __nv_fp8_e5m2 *ptr) const { + *((__nv_fp8x2_e5m2 *)ptr) = data; +} + +FLASHINFER_INLINE void vec_t<__nv_fp8_e5m2, 2>::memcpy( + __nv_fp8_e5m2 *dst, const __nv_fp8_e5m2 *src) { + *((__nv_fp8x2_e5m2 *)dst) = *((__nv_fp8x2_e5m2 *)src); +} + +// __nv_fp8_e5m2 x 4 + +template <> +struct vec_t<__nv_fp8_e5m2, 4> { + __nv_fp8x4_e5m2 data; + + FLASHINFER_INLINE __nv_fp8_e5m2 &operator[](size_t i) { + return ((__nv_fp8_e5m2 *)(&data))[i]; + } + FLASHINFER_INLINE const __nv_fp8_e5m2 &operator[](size_t i) const { + return ((const __nv_fp8_e5m2 *)(&data))[i]; + } + FLASHINFER_INLINE void fill(__nv_fp8_e5m2 val); + FLASHINFER_INLINE void load(const __nv_fp8_e5m2 *ptr); + FLASHINFER_INLINE void store(__nv_fp8_e5m2 *ptr) const; + template + FLASHINFER_INLINE void cast_from(const vec_t &src) { + cast_from_impl(src, *this); + } + template + FLASHINFER_INLINE void cast_load(const T *ptr) { + cast_load_impl(ptr, *this); + } + template + FLASHINFER_INLINE void cast_store(T *ptr) const { + cast_store_impl(*this, ptr); + } + + FLASHINFER_INLINE static void memcpy(__nv_fp8_e5m2 *dst, + const __nv_fp8_e5m2 *src); +}; + +FLASHINFER_INLINE void vec_t<__nv_fp8_e5m2, 4>::fill(__nv_fp8_e5m2 val) { + data.__x = (__nv_fp8x4_storage_t(val.__x) << 24) | + (__nv_fp8x4_storage_t(val.__x) << 16) | + (__nv_fp8x4_storage_t(val.__x) << 8) | + __nv_fp8x4_storage_t(val.__x); +} + +FLASHINFER_INLINE void vec_t<__nv_fp8_e5m2, 4>::load(const __nv_fp8_e5m2 *ptr) { + data = *((__nv_fp8x4_e5m2 *)ptr); +} + +FLASHINFER_INLINE void vec_t<__nv_fp8_e5m2, 4>::store( + __nv_fp8_e5m2 *ptr) const { + *((__nv_fp8x4_e5m2 *)ptr) = data; +} + +FLASHINFER_INLINE void vec_t<__nv_fp8_e5m2, 4>::memcpy( + __nv_fp8_e5m2 *dst, const __nv_fp8_e5m2 *src) { + *((__nv_fp8x4_e5m2 *)dst) = *((__nv_fp8x4_e5m2 *)src); +} + +// __nv_fp8_e5m2 x 8 + +template <> +struct vec_t<__nv_fp8_e5m2, 8> { + uint2 data; + + FLASHINFER_INLINE __nv_fp8_e5m2 &operator[](size_t i) { + return ((__nv_fp8_e5m2 *)(&data))[i]; + } + FLASHINFER_INLINE const __nv_fp8_e5m2 &operator[](size_t i) const { + return ((const __nv_fp8_e5m2 *)(&data))[i]; + } + FLASHINFER_INLINE void fill(__nv_fp8_e5m2 val); + FLASHINFER_INLINE void load(const __nv_fp8_e5m2 *ptr); + FLASHINFER_INLINE void store(__nv_fp8_e5m2 *ptr) const; + template + FLASHINFER_INLINE void cast_from(const vec_t &src) { + cast_from_impl(src, *this); + } + template + FLASHINFER_INLINE void cast_load(const T *ptr) { + cast_load_impl(ptr, *this); + } + template + FLASHINFER_INLINE void cast_store(T *ptr) const { + cast_store_impl(*this, ptr); + } + + FLASHINFER_INLINE static void memcpy(__nv_fp8_e5m2 *dst, + const __nv_fp8_e5m2 *src); +}; + +FLASHINFER_INLINE void vec_t<__nv_fp8_e5m2, 8>::fill(__nv_fp8_e5m2 val) { + ((__nv_fp8x4_e5m2 *)(&data.x))->__x = (__nv_fp8x4_storage_t(val.__x) << 24) | + (__nv_fp8x4_storage_t(val.__x) << 16) | + (__nv_fp8x4_storage_t(val.__x) << 8) | + __nv_fp8x4_storage_t(val.__x); + ((__nv_fp8x4_e5m2 *)(&data.y))->__x = (__nv_fp8x4_storage_t(val.__x) << 24) | + (__nv_fp8x4_storage_t(val.__x) << 16) | + (__nv_fp8x4_storage_t(val.__x) << 8) | + __nv_fp8x4_storage_t(val.__x); +} + +FLASHINFER_INLINE void vec_t<__nv_fp8_e5m2, 8>::load(const __nv_fp8_e5m2 *ptr) { + data = *((uint2 *)ptr); +} + +FLASHINFER_INLINE void vec_t<__nv_fp8_e5m2, 8>::store( + __nv_fp8_e5m2 *ptr) const { + *((uint2 *)ptr) = data; +} + +FLASHINFER_INLINE void vec_t<__nv_fp8_e5m2, 8>::memcpy( + __nv_fp8_e5m2 *dst, const __nv_fp8_e5m2 *src) { + *((__nv_fp8_e5m2 *)dst) = *((__nv_fp8_e5m2 *)src); +} + +// __nv_fp8_e5m2 x 16 or more + +template +struct vec_t<__nv_fp8_e5m2, vec_size> { + uint4 data[vec_size / 16]; + + FLASHINFER_INLINE __nv_fp8_e5m2 &operator[](size_t i) { + return ((__nv_fp8_e5m2 *)data)[i]; + } + FLASHINFER_INLINE const __nv_fp8_e5m2 &operator[](size_t i) const { + return ((const __nv_fp8_e5m2 *)data)[i]; + } + FLASHINFER_INLINE void fill(__nv_fp8_e5m2 val) { +#pragma unroll + for (size_t i = 0; i < vec_size / 16; ++i) { + ((__nv_fp8x4_e5m2 *)(&(data[i].x)))->__x = + (__nv_fp8x4_storage_t(val.__x) << 24) | + (__nv_fp8x4_storage_t(val.__x) << 16) | + (__nv_fp8x4_storage_t(val.__x) << 8) | __nv_fp8x4_storage_t(val.__x); + ((__nv_fp8x4_e5m2 *)(&(data[i].y)))->__x = + (__nv_fp8x4_storage_t(val.__x) << 24) | + (__nv_fp8x4_storage_t(val.__x) << 16) | + (__nv_fp8x4_storage_t(val.__x) << 8) | __nv_fp8x4_storage_t(val.__x); + ((__nv_fp8x4_e5m2 *)(&(data[i].z)))->__x = + (__nv_fp8x4_storage_t(val.__x) << 24) | + (__nv_fp8x4_storage_t(val.__x) << 16) | + (__nv_fp8x4_storage_t(val.__x) << 8) | __nv_fp8x4_storage_t(val.__x); + ((__nv_fp8x4_e5m2 *)(&(data[i].w)))->__x = + (__nv_fp8x4_storage_t(val.__x) << 24) | + (__nv_fp8x4_storage_t(val.__x) << 16) | + (__nv_fp8x4_storage_t(val.__x) << 8) | __nv_fp8x4_storage_t(val.__x); + } + } + FLASHINFER_INLINE void load(const __nv_fp8_e5m2 *ptr) { +#pragma unroll + for (size_t i = 0; i < vec_size / 16; ++i) { + data[i] = ((uint4 *)ptr)[i]; + } + } + FLASHINFER_INLINE void store(__nv_fp8_e5m2 *ptr) const { +#pragma unroll + for (size_t i = 0; i < vec_size / 16; ++i) { + ((uint4 *)ptr)[i] = data[i]; + } + } + template + FLASHINFER_INLINE void cast_from(const vec_t &src) { + cast_from_impl(src, *this); + } + template + FLASHINFER_INLINE void cast_load(const T *ptr) { + cast_load_impl(ptr, *this); + } + template + FLASHINFER_INLINE void cast_store(T *ptr) const { + cast_store_impl(*this, ptr); + } + + FLASHINFER_INLINE static void memcpy(__nv_fp8_e5m2 *dst, + const __nv_fp8_e5m2 *src) { +#pragma unroll + for (size_t i = 0; i < vec_size / 16; ++i) { + ((uint4 *)dst)[i] = ((uint4 *)src)[i]; + } + } +}; +#endif + +/******************* vec_t *******************/ + +// half x 1 +template <> +struct vec_t { + half data; + + FLASHINFER_INLINE half &operator[](size_t i) { return ((half *)(&data))[i]; } + FLASHINFER_INLINE const half &operator[](size_t i) const { + return ((const half *)(&data))[i]; + } + FLASHINFER_INLINE void fill(half val); + FLASHINFER_INLINE void load(const half *ptr); + FLASHINFER_INLINE void store(half *ptr) const; + template + FLASHINFER_INLINE void cast_from(const vec_t &src) { + cast_from_impl(src, *this); + } + template + FLASHINFER_INLINE void cast_load(const T *ptr) { + cast_load_impl(ptr, *this); + } + template + FLASHINFER_INLINE void cast_store(T *ptr) const { + cast_store_impl(*this, ptr); + } + + FLASHINFER_INLINE static void memcpy(half *dst, const half *src); +}; + +FLASHINFER_INLINE void vec_t::fill(half val) { data = val; } + +FLASHINFER_INLINE void vec_t::load(const half *ptr) { data = *ptr; } + +FLASHINFER_INLINE void vec_t::store(half *ptr) const { *ptr = data; } + +FLASHINFER_INLINE void vec_t::memcpy(half *dst, const half *src) { + *dst = *src; +} + +// half x 2 +template <> +struct vec_t { + half2 data; + + FLASHINFER_INLINE half &operator[](size_t i) { return ((half *)(&data))[i]; } + FLASHINFER_INLINE const half &operator[](size_t i) const { + return ((const half *)(&data))[i]; + } + FLASHINFER_INLINE void fill(half val); + FLASHINFER_INLINE void load(const half *ptr); + FLASHINFER_INLINE void store(half *ptr) const; + template + FLASHINFER_INLINE void cast_from(const vec_t &src) { + cast_from_impl(src, *this); + } + template + FLASHINFER_INLINE void cast_load(const T *ptr) { + cast_load_impl(ptr, *this); + } + template + FLASHINFER_INLINE void cast_store(T *ptr) const { + cast_store_impl(*this, ptr); + } + + FLASHINFER_INLINE static void memcpy(half *dst, const half *src); +}; + +FLASHINFER_INLINE void vec_t::fill(half val) { + data = make_half2(val, val); +} + +FLASHINFER_INLINE void vec_t::load(const half *ptr) { + data = *((half2 *)ptr); +} + +FLASHINFER_INLINE void vec_t::store(half *ptr) const { + *((half2 *)ptr) = data; +} + +FLASHINFER_INLINE void vec_t::memcpy(half *dst, const half *src) { + *((half2 *)dst) = *((half2 *)src); +} + +// half x 4 + +template <> +struct vec_t { + uint2 data; + + FLASHINFER_INLINE half &operator[](size_t i) { return ((half *)(&data))[i]; } + FLASHINFER_INLINE const half &operator[](size_t i) const { + return ((const half *)(&data))[i]; + } + FLASHINFER_INLINE void fill(half val); + FLASHINFER_INLINE void load(const half *ptr); + FLASHINFER_INLINE void store(half *ptr) const; + template + FLASHINFER_INLINE void cast_from(const vec_t &src) { + cast_from_impl(src, *this); + } + template + FLASHINFER_INLINE void cast_load(const T *ptr) { + cast_load_impl(ptr, *this); + } + template + FLASHINFER_INLINE void cast_store(T *ptr) const { + cast_store_impl(*this, ptr); + } + + FLASHINFER_INLINE static void memcpy(half *dst, const half *src); +}; + +FLASHINFER_INLINE void vec_t::fill(half val) { + *(half2 *)(&data.x) = make_half2(val, val); + *(half2 *)(&data.y) = make_half2(val, val); +} + +FLASHINFER_INLINE void vec_t::load(const half *ptr) { + data = *((uint2 *)ptr); +} + +FLASHINFER_INLINE void vec_t::store(half *ptr) const { + *((uint2 *)ptr) = data; +} + +FLASHINFER_INLINE void vec_t::memcpy(half *dst, const half *src) { + *((uint2 *)dst) = *((uint2 *)src); +} + +// half x 8 or more + +template +struct vec_t { + uint4 data[vec_size / 8]; + FLASHINFER_INLINE half &operator[](size_t i) { return ((half *)data)[i]; } + FLASHINFER_INLINE const half &operator[](size_t i) const { + return ((const half *)data)[i]; + } + FLASHINFER_INLINE void fill(half val) { +#pragma unroll + for (size_t i = 0; i < vec_size; ++i) { + *(half2 *)(&(data[i].x)) = make_half2(val, val); + *(half2 *)(&(data[i].y)) = make_half2(val, val); + *(half2 *)(&(data[i].z)) = make_half2(val, val); + *(half2 *)(&(data[i].w)) = make_half2(val, val); + } + } + FLASHINFER_INLINE void load(const half *ptr) { +#pragma unroll + for (size_t i = 0; i < vec_size / 8; ++i) { + data[i] = ((uint4 *)ptr)[i]; + } + } + FLASHINFER_INLINE void store(half *ptr) const { +#pragma unroll + for (size_t i = 0; i < vec_size / 8; ++i) { + ((uint4 *)ptr)[i] = data[i]; + } + } + template + FLASHINFER_INLINE void cast_from(const vec_t &src) { + cast_from_impl(src, *this); + } + template + FLASHINFER_INLINE void cast_load(const T *ptr) { + cast_load_impl(ptr, *this); + } + template + FLASHINFER_INLINE void cast_store(T *ptr) const { + cast_store_impl(*this, ptr); + } + + FLASHINFER_INLINE static void memcpy(half *dst, const half *src) { +#pragma unroll + for (size_t i = 0; i < vec_size / 8; ++i) { + ((uint4 *)dst)[i] = ((uint4 *)src)[i]; + } + } +}; + +/******************* vec_t *******************/ + +// nv_bfloat16 x 1 +template <> +struct vec_t { + mt_bfloat16 data; + + FLASHINFER_INLINE mt_bfloat16 &operator[](size_t i) { + return ((mt_bfloat16 *)(&data))[i]; + } + FLASHINFER_INLINE const mt_bfloat16 &operator[](size_t i) const { + return ((const mt_bfloat16 *)(&data))[i]; + } + FLASHINFER_INLINE void fill(mt_bfloat16 val); + FLASHINFER_INLINE void load(const mt_bfloat16 *ptr); + FLASHINFER_INLINE void store(mt_bfloat16 *ptr) const; + template + FLASHINFER_INLINE void cast_from(const vec_t &src) { + cast_from_impl(src, *this); + } + template + FLASHINFER_INLINE void cast_load(const T *ptr) { + cast_load_impl(ptr, *this); + } + template + FLASHINFER_INLINE void cast_store(T *ptr) const { + cast_store_impl(*this, ptr); + } + + FLASHINFER_INLINE static void memcpy(mt_bfloat16 *dst, + const mt_bfloat16 *src); +}; + +FLASHINFER_INLINE void vec_t::fill(mt_bfloat16 val) { + data = val; +} + +FLASHINFER_INLINE void vec_t::load(const mt_bfloat16 *ptr) { + data = *ptr; +} + +FLASHINFER_INLINE void vec_t::store(mt_bfloat16 *ptr) const { + *ptr = data; +} + +FLASHINFER_INLINE void vec_t::memcpy(mt_bfloat16 *dst, + const mt_bfloat16 *src) { + *dst = *src; +} + +// nv_bfloat16 x 2 +template <> +struct vec_t { + mt_bfloat162 data; + + FLASHINFER_INLINE mt_bfloat16 &operator[](size_t i) { + return ((mt_bfloat16 *)(&data))[i]; + } + FLASHINFER_INLINE const mt_bfloat16 &operator[](size_t i) const { + return ((const mt_bfloat16 *)(&data))[i]; + } + FLASHINFER_INLINE void fill(mt_bfloat16 val); + FLASHINFER_INLINE void load(const mt_bfloat16 *ptr); + FLASHINFER_INLINE void store(mt_bfloat16 *ptr) const; + template + FLASHINFER_INLINE void cast_from(const vec_t &src) { + cast_from_impl(src, *this); + } + template + FLASHINFER_INLINE void cast_load(const T *ptr) { + cast_load_impl(ptr, *this); + } + template + FLASHINFER_INLINE void cast_store(T *ptr) const { + cast_store_impl(*this, ptr); + } + + FLASHINFER_INLINE static void memcpy(mt_bfloat16 *dst, + const mt_bfloat16 *src); +}; + +FLASHINFER_INLINE void vec_t::fill(mt_bfloat16 val) { + data = make_bfloat162(val, val); +} + +FLASHINFER_INLINE void vec_t::load(const mt_bfloat16 *ptr) { + data = *((mt_bfloat162 *)ptr); +} + +FLASHINFER_INLINE void vec_t::store(mt_bfloat16 *ptr) const { + *((mt_bfloat162 *)ptr) = data; +} + +FLASHINFER_INLINE void vec_t::memcpy(mt_bfloat16 *dst, + const mt_bfloat16 *src) { + *((mt_bfloat162 *)dst) = *((mt_bfloat162 *)src); +} + +// nv_bfloat16 x 4 + +template <> +struct vec_t { + uint2 data; + + FLASHINFER_INLINE mt_bfloat16 &operator[](size_t i) { + return ((mt_bfloat16 *)(&data))[i]; + } + FLASHINFER_INLINE const mt_bfloat16 &operator[](size_t i) const { + return ((const mt_bfloat16 *)(&data))[i]; + } + FLASHINFER_INLINE void fill(mt_bfloat16 val); + FLASHINFER_INLINE void load(const mt_bfloat16 *ptr); + FLASHINFER_INLINE void store(mt_bfloat16 *ptr) const; + template + FLASHINFER_INLINE void cast_from(const vec_t &src) { + cast_from_impl(src, *this); + } + template + FLASHINFER_INLINE void cast_load(const T *ptr) { + cast_load_impl(ptr, *this); + } + template + FLASHINFER_INLINE void cast_store(T *ptr) const { + cast_store_impl(*this, ptr); + } + + FLASHINFER_INLINE static void memcpy(mt_bfloat16 *dst, + const mt_bfloat16 *src); +}; + +FLASHINFER_INLINE void vec_t::fill(mt_bfloat16 val) { + *(mt_bfloat162 *)(&data.x) = make_bfloat162(val, val); + *(mt_bfloat162 *)(&data.y) = make_bfloat162(val, val); +} + +FLASHINFER_INLINE void vec_t::load(const mt_bfloat16 *ptr) { + data = *((uint2 *)ptr); +} + +FLASHINFER_INLINE void vec_t::store(mt_bfloat16 *ptr) const { + *((uint2 *)ptr) = data; +} + +FLASHINFER_INLINE void vec_t::memcpy(mt_bfloat16 *dst, + const mt_bfloat16 *src) { + *((uint2 *)dst) = *((uint2 *)src); +} + +// nv_bfloat16 x 8 or more + +template +struct vec_t { + uint4 data[vec_size / 8]; + + FLASHINFER_INLINE mt_bfloat16 &operator[](size_t i) { + return ((mt_bfloat16 *)data)[i]; + } + FLASHINFER_INLINE const mt_bfloat16 &operator[](size_t i) const { + return ((const mt_bfloat16 *)data)[i]; + } + FLASHINFER_INLINE void fill(mt_bfloat16 val) { +#pragma unoll + for (size_t i = 0; i < vec_size / 8; ++i) { + *(mt_bfloat162 *)(&(data[i].x)) = make_bfloat162(val, val); + *(mt_bfloat162 *)(&(data[i].y)) = make_bfloat162(val, val); + *(mt_bfloat162 *)(&(data[i].z)) = make_bfloat162(val, val); + *(mt_bfloat162 *)(&(data[i].w)) = make_bfloat162(val, val); + } + } + FLASHINFER_INLINE void load(const mt_bfloat16 *ptr) { +#pragma unoll + for (size_t i = 0; i < vec_size / 8; ++i) { + data[i] = ((uint4 *)ptr)[i]; + } + } + FLASHINFER_INLINE void store(mt_bfloat16 *ptr) const { +#pragma unoll + for (size_t i = 0; i < vec_size / 8; ++i) { + ((uint4 *)ptr)[i] = data[i]; + } + } + template + FLASHINFER_INLINE void cast_from(const vec_t &src) { + cast_from_impl(src, *this); + } + template + FLASHINFER_INLINE void cast_load(const T *ptr) { + cast_load_impl(ptr, *this); + } + template + FLASHINFER_INLINE void cast_store(T *ptr) const { + cast_store_impl(*this, ptr); + } + + FLASHINFER_INLINE static void memcpy(mt_bfloat16 *dst, + const mt_bfloat16 *src) { +#pragma unoll + for (size_t i = 0; i < vec_size / 8; ++i) { + ((uint4 *)dst)[i] = ((uint4 *)src)[i]; + } + } +}; + +/******************* vec_t *******************/ + +// float x 1 + +template <> +struct vec_t { + float data; + + FLASHINFER_INLINE float &operator[](size_t i) { + return ((float *)(&data))[i]; + } + FLASHINFER_INLINE const float &operator[](size_t i) const { + return ((const float *)(&data))[i]; + } + FLASHINFER_INLINE void fill(float val); + FLASHINFER_INLINE void load(const float *ptr); + FLASHINFER_INLINE void store(float *ptr) const; + template + FLASHINFER_INLINE void cast_from(const vec_t &src) { + cast_from_impl(src, *this); + } + template + FLASHINFER_INLINE void cast_load(const T *ptr) { + cast_load_impl(ptr, *this); + } + template + FLASHINFER_INLINE void cast_store(T *ptr) const { + cast_store_impl(*this, ptr); + } + + FLASHINFER_INLINE static void memcpy(float *dst, const float *src); +}; + +FLASHINFER_INLINE void vec_t::fill(float val) { data = val; } + +FLASHINFER_INLINE void vec_t::load(const float *ptr) { data = *ptr; } + +FLASHINFER_INLINE void vec_t::store(float *ptr) const { *ptr = data; } + +FLASHINFER_INLINE void vec_t::memcpy(float *dst, const float *src) { + *dst = *src; +} + +// float x 2 + +template <> +struct vec_t { + float2 data; + + FLASHINFER_INLINE float &operator[](size_t i) { + return ((float *)(&data))[i]; + } + FLASHINFER_INLINE const float &operator[](size_t i) const { + return ((const float *)(&data))[i]; + } + FLASHINFER_INLINE void fill(float val); + FLASHINFER_INLINE void load(const float *ptr); + FLASHINFER_INLINE void store(float *ptr) const; + template + FLASHINFER_INLINE void cast_from(const vec_t &src) { + cast_from_impl(src, *this); + } + template + FLASHINFER_INLINE void cast_load(const T *ptr) { + cast_load_impl(ptr, *this); + } + template + FLASHINFER_INLINE void cast_store(T *ptr) const { + cast_store_impl(*this, ptr); + } + FLASHINFER_INLINE static void memcpy(float *dst, const float *src); +}; + +FLASHINFER_INLINE void vec_t::fill(float val) { + data = make_float2(val, val); +} + +FLASHINFER_INLINE void vec_t::load(const float *ptr) { + data = *((float2 *)ptr); +} + +FLASHINFER_INLINE void vec_t::store(float *ptr) const { + *((float2 *)ptr) = data; +} + +FLASHINFER_INLINE void vec_t::memcpy(float *dst, const float *src) { + *((float2 *)dst) = *((float2 *)src); +} + +// float x 4 or more +template +struct vec_t { + float4 data[vec_size / 4]; + + FLASHINFER_INLINE float &operator[](size_t i) { return ((float *)(data))[i]; } + FLASHINFER_INLINE const float &operator[](size_t i) const { + return ((const float *)(data))[i]; + } + FLASHINFER_INLINE void fill(float val) { +#pragma unroll + for (size_t i = 0; i < vec_size / 4; ++i) { + data[i] = make_float4(val, val, val, val); + } + } + FLASHINFER_INLINE void load(const float *ptr) { +#pragma unroll + for (size_t i = 0; i < vec_size / 4; ++i) { + data[i] = ((float4 *)ptr)[i]; + } + } + FLASHINFER_INLINE void store(float *ptr) const { +#pragma unroll + for (size_t i = 0; i < vec_size / 4; ++i) { + ((float4 *)ptr)[i] = data[i]; + } + } + template + FLASHINFER_INLINE void cast_from(const vec_t &src) { + cast_from_impl(src, *this); + } + template + FLASHINFER_INLINE void cast_load(const T *ptr) { + cast_load_impl(ptr, *this); + } + template + FLASHINFER_INLINE void cast_store(T *ptr) const { + cast_store_impl(*this, ptr); + } + FLASHINFER_INLINE static void memcpy(float *dst, const float *src) { +#pragma unroll + for (size_t i = 0; i < vec_size / 4; ++i) { + ((float4 *)dst)[i] = ((float4 *)src)[i]; + } + } +}; + +/******************* vec_t type cast *******************/ + +template +FLASHINFER_INLINE void cast_from_impl(const vec_t &src, + vec_t &dst) { + if constexpr (vec_size == 1) { + dst.data = float(src.data); + } else { +#pragma unroll + for (size_t i = 0; i < vec_size / 2; ++i) { + ((float2 *)(&dst.data))[i] = __half22float2(((half2 *)(&src.data))[i]); + } + } +} + +template +FLASHINFER_INLINE void cast_from_impl(const vec_t &src, + vec_t &dst) { + if constexpr (vec_size == 1) { + dst.data = half(src.data); + } else { +#pragma unroll + for (size_t i = 0; i < vec_size / 2; ++i) { + ((half2 *)(&dst.data))[i] = __float22half2_rn(((float2 *)(&src.data))[i]); + } + } +} + +template +FLASHINFER_INLINE void cast_from_impl(const vec_t &src, + vec_t &dst) { + if constexpr (vec_size == 1) { + dst.data = float(src.data); + } else { +#pragma unroll + for (size_t i = 0; i < vec_size / 2; ++i) { + ((float2 *)(&dst.data))[i] = + __bfloat1622float2(((mt_bfloat162 *)(&src.data))[i]); + } + } +} + +template +FLASHINFER_INLINE void cast_from_impl(const vec_t &src, + vec_t &dst) { + if constexpr (vec_size == 1) { + dst.data = mt_bfloat16(src.data); + } else { +#pragma unroll + for (size_t i = 0; i < vec_size / 2; ++i) { + ((mt_bfloat162 *)(&dst.data))[i] = + __float22bfloat162_rn(((float2 *)(&src.data))[i]); + } + } +} + +#ifdef FLASHINFER_USE_FP8 + +template +FLASHINFER_INLINE void cast_from_impl(const vec_t<__nv_fp8_e4m3, vec_size> &src, + vec_t &dst) { + if constexpr (vec_size == 1) { + dst.data = float(src.data); + } else if constexpr (vec_size == 2) { + *(float2 *)(&dst.data) = float2(*(__nv_fp8x2_e4m3 *)(&src.data)); + } else { +#pragma unroll + for (size_t i = 0; i < vec_size / 4; ++i) { + ((float4 *)(&dst.data))[i] = float4(((__nv_fp8x4_e4m3 *)(&src.data))[i]); + } + } +} + +template +FLASHINFER_INLINE void cast_from_impl(const vec_t<__nv_fp8_e4m3, vec_size> &src, + vec_t &dst) { + if constexpr (vec_size == 1) { + dst.data = float(src.data); + } else { +#pragma unroll + for (size_t i = 0; i < vec_size / 2; ++i) { + ((half2 *)(&dst.data))[i] = half2(((__nv_fp8x2_e4m3 *)(&src.data))[i]); + } + } +} + +template +FLASHINFER_INLINE void cast_from_impl(const vec_t &src, + vec_t<__nv_fp8_e4m3, vec_size> &dst) { + if constexpr (vec_size == 1) { + dst.data = __nv_fp8_e4m3(src.data); + } else if constexpr (vec_size == 2) { + *(__nv_fp8x2_e4m3 *)(&dst.data) = __nv_fp8x2_e4m3(*(float2 *)(&src.data)); + } else { +#pragma unroll + for (size_t i = 0; i < vec_size / 4; ++i) { + ((__nv_fp8x4_e4m3 *)(&dst.data))[i] = + __nv_fp8x4_e4m3(((float4 *)(&src.data))[i]); + } + } +} + +template +FLASHINFER_INLINE void cast_from_impl(const vec_t &src, + vec_t<__nv_fp8_e4m3, vec_size> &dst) { + if constexpr (vec_size == 1) { + dst.data = __nv_fp8_e4m3(src.data); + } else if constexpr (vec_size == 2) { + *(__nv_fp8x2_e4m3 *)(&dst.data) = __nv_fp8x2_e4m3(*(half2 *)(&src.data)); + } else { +#pragma unroll + for (size_t i = 0; i < vec_size / 4; ++i) { + // NOTE(Zihao): need to double check if we properly handle flo and fhi + ((__nv_fp8x4_e4m3 *)(&dst.data))[i] = __nv_fp8x4_e4m3( + ((half2 *)(&src.data))[i * 2], ((half2 *)(&src.data))[i * 2 + 1]); + } + } +} + +template +FLASHINFER_INLINE void cast_from_impl(const vec_t<__nv_fp8_e5m2, vec_size> &src, + vec_t &dst) { + if constexpr (vec_size == 1) { + dst.data = float(src.data); + } else if constexpr (vec_size == 2) { + *(float2 *)(&dst.data) = float2(*(__nv_fp8x2_e5m2 *)(&src.data)); + } else { +#pragma unroll + for (size_t i = 0; i < vec_size / 4; ++i) { + ((float4 *)(&dst.data))[i] = float4(((__nv_fp8x4_e5m2 *)(&src.data))[i]); + } + } +} + +template +FLASHINFER_INLINE void cast_from_impl(const vec_t<__nv_fp8_e5m2, vec_size> &src, + vec_t &dst) { + if constexpr (vec_size == 1) { + dst.data = float(src.data); + } else { +#pragma unroll + for (size_t i = 0; i < vec_size / 2; ++i) { + ((half2 *)(&dst.data))[i] = half2(((__nv_fp8x2_e5m2 *)(&src.data))[i]); + } + } +} + +template +FLASHINFER_INLINE void cast_from_impl(const vec_t &src, + vec_t<__nv_fp8_e5m2, vec_size> &dst) { + if constexpr (vec_size == 1) { + dst.data = __nv_fp8_e5m2(src.data); + } else if constexpr (vec_size == 2) { + *(__nv_fp8x2_e5m2 *)(&dst.data) = __nv_fp8x2_e5m2(*(float2 *)(&src.data)); + } else { +#pragma unroll + for (size_t i = 0; i < vec_size / 4; ++i) { + ((__nv_fp8x4_e5m2 *)(&dst.data))[i] = + __nv_fp8x4_e5m2(((float4 *)(&src.data))[i]); + } + } +} + +template +FLASHINFER_INLINE void cast_from_impl(const vec_t &src, + vec_t<__nv_fp8_e5m2, vec_size> &dst) { + if constexpr (vec_size == 1) { + dst.data = __nv_fp8_e4m3(src.data); + } else if constexpr (vec_size == 2) { + *(__nv_fp8x2_e5m2 *)(&dst.data) = __nv_fp8x2_e5m2(*(half2 *)(&src.data)); + } else { +#pragma unroll + for (size_t i = 0; i < vec_size / 4; ++i) { + // NOTE(Zihao): need to double check if we properly handle flo and fhi + ((__nv_fp8x4_e5m2 *)(&dst.data))[i] = __nv_fp8x4_e5m2( + ((half2 *)(&src.data))[i * 2], ((half2 *)(&src.data))[i * 2 + 1]); + } + } +} + +#endif // FLASHINFER_USE_FP8 + +#endif // VEC_DTYPES_CUH_ diff --git a/csrc_musa/punica/punica_ops.cc b/csrc_musa/punica/punica_ops.cc new file mode 100644 index 0000000..8527835 --- /dev/null +++ b/csrc_musa/punica/punica_ops.cc @@ -0,0 +1,582 @@ +#include +#include +#include +#include "torch_musa/csrc/core/MUSAGuard.h" +#include + +#include "bgmv/bgmv_config.h" + +namespace { + +//====== utils ====== + +inline void check_shape(const torch::Tensor &a, const torch::Tensor &b, + const char *a_name, const char *b_name) { + TORCH_CHECK(a.dim() == b.dim(), a_name, ".dim() != ", b_name, ".dim(). ", + a.dim(), " vs ", b.dim()); + for (int i = 0; i < a.dim(); ++i) { + TORCH_CHECK(a.size(i) == b.size(i), a_name, ".size(", i, ") != ", b_name, + ".size(", i, ")"); + } +} + +inline constexpr uint64_t pack_u32(uint32_t a, uint32_t b) { + return (uint64_t(a) << 32) | uint64_t(b); +} + +#define CHECK_CUDA(x) TORCH_CHECK(x.is_cuda(), #x " must be a CUDA tensor") + +#define CHECK_CONTIGUOUS(x) \ + TORCH_CHECK(x.is_contiguous(), #x " must be contiguous") + +#define CHECK_INPUT(x) \ + CHECK_CUDA(x); \ + CHECK_CONTIGUOUS(x) + +#define CHECK_DIM(d, x) \ + TORCH_CHECK(x.dim() == d, #x " must be a " #d "D tensor") + +#define CHECK_SHAPE(a, b) check_shape(a, b, #a, #b) + +#define CHECK_EQ(a, b) \ + TORCH_CHECK(a == b, "CHECK_EQ(" #a ", " #b ") failed. ", a, " vs ", b) + +//====== bgmv ====== + +template +inline bool launch_bgmv_kernel(out_T *Y, const in_T *X, const W_T *W, + const int64_t *lora_indices, + uint32_t in_features, uint32_t out_features, + int64_t y_offset, int64_t full_y_size, + int64_t batch_size, int64_t num_layers, + int64_t layer_idx, float scale) { + // NOTE(woosuk): While Punica supports various combinations of input/output + // data types, we limit the supported data types to reduce the binary size. + constexpr bool is_input_float = std::is_same::value; + constexpr bool is_output_float = std::is_same::value; + if (is_input_float) { + if (!std::is_same::value) { + return false; + } + } else if (is_output_float) { + if (!std::is_same::value) { + return false; + } + } else if (!(std::is_same::value && + std::is_same::value)) { + return false; + } + + switch (pack_u32(in_features, out_features)) { +#define CASE_ONESIDE(_in_T, _out_T, _W_T, feat_in, feat_out) \ + case pack_u32(feat_in, feat_out): \ + bgmv_kernel(Y, X, W, lora_indices, y_offset, \ + full_y_size, batch_size, num_layers, \ + layer_idx, scale); \ + break; +#define CASE(_in_T, _out_T, _W_T, narrow, wide) \ + CASE_ONESIDE(in_T, out_T, W_T, narrow, wide) \ + CASE_ONESIDE(in_T, out_T, W_T, wide, narrow) + + FOR_BGMV_WIDE_NARROW(CASE, _, _, _) + FOR_INST_BGMV_WIDE_NARROW(CASE_ONESIDE, _, _, _) +#undef CASE +#undef CASE_ONESIDE + default: + return false; + } + return true; +} + +void dispatch_bgmv(torch::Tensor y, torch::Tensor x, torch::Tensor w, + torch::Tensor indicies, int64_t layer_idx, float scale) { + CHECK_INPUT(y); + CHECK_INPUT(x); + CHECK_INPUT(w); + CHECK_INPUT(indicies); + + CHECK_DIM(2, y); + CHECK_DIM(2, x); + CHECK_DIM(4, w); + CHECK_DIM(1, indicies); + + int64_t B = x.size(0); + int64_t h_in = x.size(1); + int64_t h_out = y.size(1); + int64_t num_layers = w.size(1); + CHECK_EQ(w.size(3), h_in); + CHECK_EQ(w.size(2), h_out); + CHECK_EQ(indicies.size(0), x.size(0)); + CHECK_EQ(y.size(0), x.size(0)); + const at::musa::OptionalMUSAGuard device_guard(device_of(x)); + bool ok = false; + if (h_in <= 128512 && h_out <= 128512) { + // TODO: See if we can get rid of this massive nested switch + switch (x.scalar_type()) { + case at::ScalarType::Half: + switch (y.scalar_type()) { + case at::ScalarType::Half: + switch (w.scalar_type()) { + case at::ScalarType::Half: + ok = launch_bgmv_kernel(static_cast(y.data_ptr()), + static_cast(x.data_ptr()), + static_cast(w.data_ptr()), + indicies.data_ptr(), h_in, h_out, 0, + h_out, B, num_layers, layer_idx, scale); + break; + case at::ScalarType::BFloat16: + ok = launch_bgmv_kernel(static_cast(y.data_ptr()), + static_cast(x.data_ptr()), + static_cast(w.data_ptr()), + indicies.data_ptr(), h_in, h_out, 0, + h_out, B, num_layers, layer_idx, scale); + break; + default: + break; + } + break; + case at::ScalarType::BFloat16: + switch (w.scalar_type()) { + case at::ScalarType::Half: + ok = launch_bgmv_kernel(static_cast(y.data_ptr()), + static_cast(x.data_ptr()), + static_cast(w.data_ptr()), + indicies.data_ptr(), h_in, h_out, 0, + h_out, B, num_layers, layer_idx, scale); + break; + case at::ScalarType::BFloat16: + ok = launch_bgmv_kernel(static_cast(y.data_ptr()), + static_cast(x.data_ptr()), + static_cast(w.data_ptr()), + indicies.data_ptr(), h_in, h_out, 0, + h_out, B, num_layers, layer_idx, scale); + break; + default: + break; + } + break; + case at::ScalarType::Float: + switch (w.scalar_type()) { + case at::ScalarType::Half: + ok = launch_bgmv_kernel(static_cast(y.data_ptr()), + static_cast(x.data_ptr()), + static_cast(w.data_ptr()), + indicies.data_ptr(), h_in, h_out, 0, + h_out, B, num_layers, layer_idx, scale); + break; + case at::ScalarType::BFloat16: + ok = launch_bgmv_kernel(static_cast(y.data_ptr()), + static_cast(x.data_ptr()), + static_cast(w.data_ptr()), + indicies.data_ptr(), h_in, h_out, 0, + h_out, B, num_layers, layer_idx, scale); + break; + default: + break; + } + break; + default: + break; + } + break; + case at::ScalarType::BFloat16: + switch (y.scalar_type()) { + case at::ScalarType::Half: + switch (w.scalar_type()) { + case at::ScalarType::Half: + ok = launch_bgmv_kernel(static_cast(y.data_ptr()), + static_cast(x.data_ptr()), + static_cast(w.data_ptr()), + indicies.data_ptr(), h_in, h_out, 0, + h_out, B, num_layers, layer_idx, scale); + break; + case at::ScalarType::BFloat16: + ok = launch_bgmv_kernel(static_cast(y.data_ptr()), + static_cast(x.data_ptr()), + static_cast(w.data_ptr()), + indicies.data_ptr(), h_in, h_out, 0, + h_out, B, num_layers, layer_idx, scale); + break; + default: + break; + } + break; + case at::ScalarType::BFloat16: + switch (w.scalar_type()) { + case at::ScalarType::Half: + ok = launch_bgmv_kernel(static_cast(y.data_ptr()), + static_cast(x.data_ptr()), + static_cast(w.data_ptr()), + indicies.data_ptr(), h_in, h_out, 0, + h_out, B, num_layers, layer_idx, scale); + break; + case at::ScalarType::BFloat16: + ok = launch_bgmv_kernel(static_cast(y.data_ptr()), + static_cast(x.data_ptr()), + static_cast(w.data_ptr()), + indicies.data_ptr(), h_in, h_out, 0, + h_out, B, num_layers, layer_idx, scale); + break; + default: + break; + } + break; + case at::ScalarType::Float: + switch (w.scalar_type()) { + case at::ScalarType::Half: + ok = launch_bgmv_kernel(static_cast(y.data_ptr()), + static_cast(x.data_ptr()), + static_cast(w.data_ptr()), + indicies.data_ptr(), h_in, h_out, 0, + h_out, B, num_layers, layer_idx, scale); + break; + case at::ScalarType::BFloat16: + ok = launch_bgmv_kernel(static_cast(y.data_ptr()), + static_cast(x.data_ptr()), + static_cast(w.data_ptr()), + indicies.data_ptr(), h_in, h_out, 0, + h_out, B, num_layers, layer_idx, scale); + break; + default: + break; + } + break; + default: + break; + } + break; + case at::ScalarType::Float: + switch (y.scalar_type()) { + case at::ScalarType::Half: + switch (w.scalar_type()) { + case at::ScalarType::Half: + ok = launch_bgmv_kernel(static_cast(y.data_ptr()), + static_cast(x.data_ptr()), + static_cast(w.data_ptr()), + indicies.data_ptr(), h_in, h_out, 0, + h_out, B, num_layers, layer_idx, scale); + break; + case at::ScalarType::BFloat16: + ok = launch_bgmv_kernel(static_cast(y.data_ptr()), + static_cast(x.data_ptr()), + static_cast(w.data_ptr()), + indicies.data_ptr(), h_in, h_out, 0, + h_out, B, num_layers, layer_idx, scale); + break; + default: + break; + } + break; + case at::ScalarType::BFloat16: + switch (w.scalar_type()) { + case at::ScalarType::Half: + ok = launch_bgmv_kernel(static_cast(y.data_ptr()), + static_cast(x.data_ptr()), + static_cast(w.data_ptr()), + indicies.data_ptr(), h_in, h_out, 0, + h_out, B, num_layers, layer_idx, scale); + break; + case at::ScalarType::BFloat16: + ok = launch_bgmv_kernel(static_cast(y.data_ptr()), + static_cast(x.data_ptr()), + static_cast(w.data_ptr()), + indicies.data_ptr(), h_in, h_out, 0, + h_out, B, num_layers, layer_idx, scale); + break; + default: + break; + } + break; + case at::ScalarType::Float: + switch (w.scalar_type()) { + case at::ScalarType::Half: + ok = launch_bgmv_kernel(static_cast(y.data_ptr()), + static_cast(x.data_ptr()), + static_cast(w.data_ptr()), + indicies.data_ptr(), h_in, h_out, 0, + h_out, B, num_layers, layer_idx, scale); + break; + case at::ScalarType::BFloat16: + ok = launch_bgmv_kernel(static_cast(y.data_ptr()), + static_cast(x.data_ptr()), + static_cast(w.data_ptr()), + indicies.data_ptr(), h_in, h_out, 0, + h_out, B, num_layers, layer_idx, scale); + break; + default: + break; + } + break; + default: + break; + } + break; + default: + break; + } + } + TORCH_CHECK(ok, "No suitable kernel.", " h_in=", h_in, " h_out=", h_out, + " dtype=", x.scalar_type(), " out_dtype=", y.scalar_type()); +} + +void dispatch_bgmv_low_level(torch::Tensor y, torch::Tensor x, torch::Tensor w, + torch::Tensor indicies, int64_t layer_idx, + float scale, int64_t h_in, int64_t h_out, + int64_t y_offset) { + CHECK_INPUT(y); + CHECK_INPUT(x); + CHECK_INPUT(w); + CHECK_INPUT(indicies); + + CHECK_DIM(2, y); + CHECK_DIM(2, x); + CHECK_DIM(4, w); + CHECK_DIM(1, indicies); + + int64_t B = x.size(0); + int64_t num_layers = w.size(1); + int64_t full_y_size = y.size(1); + CHECK_EQ(w.size(3), h_in); + CHECK_EQ(w.size(2), h_out); + CHECK_EQ(indicies.size(0), x.size(0)); + CHECK_EQ(y.size(0), x.size(0)); + const at::musa::OptionalMUSAGuard device_guard(device_of(x)); + bool ok = false; + if (h_in <= 128512 && h_out <= 128512) { + // TODO: See if we can get rid of this massive nested switch + switch (x.scalar_type()) { + case at::ScalarType::Half: + switch (y.scalar_type()) { + case at::ScalarType::Half: + switch (w.scalar_type()) { + case at::ScalarType::Half: + ok = launch_bgmv_kernel(static_cast(y.data_ptr()), + static_cast(x.data_ptr()), + static_cast(w.data_ptr()), + indicies.data_ptr(), h_in, h_out, + y_offset, full_y_size, B, num_layers, + layer_idx, scale); + break; + case at::ScalarType::BFloat16: + ok = launch_bgmv_kernel(static_cast(y.data_ptr()), + static_cast(x.data_ptr()), + static_cast(w.data_ptr()), + indicies.data_ptr(), h_in, h_out, + y_offset, full_y_size, B, num_layers, + layer_idx, scale); + break; + default: + break; + } + break; + case at::ScalarType::BFloat16: + switch (w.scalar_type()) { + case at::ScalarType::Half: + ok = launch_bgmv_kernel(static_cast(y.data_ptr()), + static_cast(x.data_ptr()), + static_cast(w.data_ptr()), + indicies.data_ptr(), h_in, h_out, + y_offset, full_y_size, B, num_layers, + layer_idx, scale); + break; + case at::ScalarType::BFloat16: + ok = launch_bgmv_kernel(static_cast(y.data_ptr()), + static_cast(x.data_ptr()), + static_cast(w.data_ptr()), + indicies.data_ptr(), h_in, h_out, + y_offset, full_y_size, B, num_layers, + layer_idx, scale); + break; + default: + break; + } + break; + case at::ScalarType::Float: + switch (w.scalar_type()) { + case at::ScalarType::Half: + ok = launch_bgmv_kernel(static_cast(y.data_ptr()), + static_cast(x.data_ptr()), + static_cast(w.data_ptr()), + indicies.data_ptr(), h_in, h_out, + y_offset, full_y_size, B, num_layers, + layer_idx, scale); + break; + case at::ScalarType::BFloat16: + ok = launch_bgmv_kernel(static_cast(y.data_ptr()), + static_cast(x.data_ptr()), + static_cast(w.data_ptr()), + indicies.data_ptr(), h_in, h_out, + y_offset, full_y_size, B, num_layers, + layer_idx, scale); + break; + default: + break; + } + break; + default: + break; + } + break; + case at::ScalarType::BFloat16: + switch (y.scalar_type()) { + case at::ScalarType::Half: + switch (w.scalar_type()) { + case at::ScalarType::Half: + ok = launch_bgmv_kernel(static_cast(y.data_ptr()), + static_cast(x.data_ptr()), + static_cast(w.data_ptr()), + indicies.data_ptr(), h_in, h_out, + y_offset, full_y_size, B, num_layers, + layer_idx, scale); + break; + case at::ScalarType::BFloat16: + ok = launch_bgmv_kernel(static_cast(y.data_ptr()), + static_cast(x.data_ptr()), + static_cast(w.data_ptr()), + indicies.data_ptr(), h_in, h_out, + y_offset, full_y_size, B, num_layers, + layer_idx, scale); + break; + default: + break; + } + break; + case at::ScalarType::BFloat16: + switch (w.scalar_type()) { + case at::ScalarType::Half: + ok = launch_bgmv_kernel(static_cast(y.data_ptr()), + static_cast(x.data_ptr()), + static_cast(w.data_ptr()), + indicies.data_ptr(), h_in, h_out, + y_offset, full_y_size, B, num_layers, + layer_idx, scale); + break; + case at::ScalarType::BFloat16: + ok = launch_bgmv_kernel(static_cast(y.data_ptr()), + static_cast(x.data_ptr()), + static_cast(w.data_ptr()), + indicies.data_ptr(), h_in, h_out, + y_offset, full_y_size, B, num_layers, + layer_idx, scale); + break; + default: + break; + } + break; + case at::ScalarType::Float: + switch (w.scalar_type()) { + case at::ScalarType::Half: + ok = launch_bgmv_kernel(static_cast(y.data_ptr()), + static_cast(x.data_ptr()), + static_cast(w.data_ptr()), + indicies.data_ptr(), h_in, h_out, + y_offset, full_y_size, B, num_layers, + layer_idx, scale); + break; + case at::ScalarType::BFloat16: + ok = launch_bgmv_kernel(static_cast(y.data_ptr()), + static_cast(x.data_ptr()), + static_cast(w.data_ptr()), + indicies.data_ptr(), h_in, h_out, + y_offset, full_y_size, B, num_layers, + layer_idx, scale); + break; + default: + break; + } + break; + default: + break; + } + break; + case at::ScalarType::Float: + switch (y.scalar_type()) { + case at::ScalarType::Half: + switch (w.scalar_type()) { + case at::ScalarType::Half: + ok = launch_bgmv_kernel(static_cast(y.data_ptr()), + static_cast(x.data_ptr()), + static_cast(w.data_ptr()), + indicies.data_ptr(), h_in, h_out, + y_offset, full_y_size, B, num_layers, + layer_idx, scale); + break; + case at::ScalarType::BFloat16: + ok = launch_bgmv_kernel(static_cast(y.data_ptr()), + static_cast(x.data_ptr()), + static_cast(w.data_ptr()), + indicies.data_ptr(), h_in, h_out, + y_offset, full_y_size, B, num_layers, + layer_idx, scale); + break; + default: + break; + } + break; + case at::ScalarType::BFloat16: + switch (w.scalar_type()) { + case at::ScalarType::Half: + ok = launch_bgmv_kernel(static_cast(y.data_ptr()), + static_cast(x.data_ptr()), + static_cast(w.data_ptr()), + indicies.data_ptr(), h_in, h_out, + y_offset, full_y_size, B, num_layers, + layer_idx, scale); + break; + case at::ScalarType::BFloat16: + ok = launch_bgmv_kernel(static_cast(y.data_ptr()), + static_cast(x.data_ptr()), + static_cast(w.data_ptr()), + indicies.data_ptr(), h_in, h_out, + y_offset, full_y_size, B, num_layers, + layer_idx, scale); + break; + default: + break; + } + break; + case at::ScalarType::Float: + switch (w.scalar_type()) { + case at::ScalarType::Half: + ok = launch_bgmv_kernel(static_cast(y.data_ptr()), + static_cast(x.data_ptr()), + static_cast(w.data_ptr()), + indicies.data_ptr(), h_in, h_out, + y_offset, full_y_size, B, num_layers, + layer_idx, scale); + break; + case at::ScalarType::BFloat16: + ok = launch_bgmv_kernel(static_cast(y.data_ptr()), + static_cast(x.data_ptr()), + static_cast(w.data_ptr()), + indicies.data_ptr(), h_in, h_out, + y_offset, full_y_size, B, num_layers, + layer_idx, scale); + break; + default: + break; + } + break; + default: + break; + } + break; + default: + break; + } + } + TORCH_CHECK(ok, "No suitable kernel.", " h_in=", h_in, " h_out=", h_out, + " dtype=", x.scalar_type(), " out_dtype=", y.scalar_type()); +} + +} // namespace + +//====== pybind ====== + +#define DEFINE_pybind(name) m.def(#name, &name, #name); + +PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) { + m.def("dispatch_bgmv", &dispatch_bgmv, "dispatch_bgmv"); + m.def("dispatch_bgmv_low_level", &dispatch_bgmv_low_level, + "dispatch_bgmv_low_level"); +} diff --git a/csrc_musa/pybind.cpp b/csrc_musa/pybind.cpp new file mode 100644 index 0000000..908fc16 --- /dev/null +++ b/csrc_musa/pybind.cpp @@ -0,0 +1,136 @@ +#include "cache.h" +#include "musa_utils.h" +#include "ops.h" +#include + +PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) { + // vLLM custom ops + pybind11::module ops = m.def_submodule("ops", "vLLM custom operators"); + + // Attention ops + ops.def( + "paged_attention_v1", + &paged_attention_v1, + "Compute the attention between an input query and the cached keys/values using PagedAttention."); + ops.def( + "paged_attention_v2", + &paged_attention_v2, + "PagedAttention V2."); + + // Activation ops + ops.def( + "silu_and_mul", + &silu_and_mul, + "Activation function used in SwiGLU."); + ops.def( + "gelu_and_mul", + &gelu_and_mul, + "Activation function used in GeGLU with `none` approximation."); + ops.def( + "gelu_tanh_and_mul", + &gelu_tanh_and_mul, + "Activation function used in GeGLU with `tanh` approximation."); + ops.def( + "gelu_new", + &gelu_new, + "GELU implementation used in GPT-2."); + ops.def( + "gelu_fast", + &gelu_fast, + "Approximate GELU implementation."); + + // Layernorm + ops.def( + "rms_norm", + &rms_norm, + "Apply Root Mean Square (RMS) Normalization to the input tensor."); + + ops.def( + "fused_add_rms_norm", + &fused_add_rms_norm, + "In-place fused Add and RMS Normalization"); + + // Rotary embedding + ops.def( + "rotary_embedding", + &rotary_embedding, + "Apply GPT-NeoX or GPT-J style rotary embedding to query and key"); + + ops.def( + "batched_rotary_embedding", + &batched_rotary_embedding, + "Apply GPT-NeoX or GPT-J style rotary embedding to query and key (supports multiple loras)"); + +// Quantization ops +#ifndef USE_ROCM +// ops.def("aqlm_gemm", &aqlm_gemm, "Quantized GEMM for AQLM"); +// ops.def("aqlm_dequant", &aqlm_dequant, "Decompression method for AQLM"); +// ops.def("awq_gemm", &awq_gemm, "Quantized GEMM for AWQ"); +// ops.def("marlin_gemm", &marlin_gemm, "Marlin Optimized Quantized GEMM for GPTQ"); +// ops.def("gptq_marlin_gemm", &gptq_marlin_gemm, "gptq_marlin Optimized Quantized GEMM for GPTQ"); +// ops.def("gptq_marlin_repack", &gptq_marlin_repack, "gptq_marlin repack from GPTQ"); +// ops.def("awq_dequantize", &awq_dequantize, "Dequantization for AWQ"); +#endif + +// ops.def("gptq_gemm", &gptq_gemm, "Quantized GEMM for GPTQ"); +// ops.def("gptq_shuffle", &gptq_shuffle, "Post processing for GPTQ"); +// ops.def("squeezellm_gemm", &squeezellm_gemm, "Quantized GEMM for SqueezeLLM"); +// ops.def("static_scaled_fp8_quant", &static_scaled_fp8_quant, "Compute FP8 quantized tensor for given scaling factor"); +// ops.def("dynamic_scaled_fp8_quant", &dynamic_scaled_fp8_quant, "Compute FP8 quantized tensor and scaling factor"); +// ops.def( +// "moe_align_block_size", +// &moe_align_block_size, +// "Aligning the number of tokens to be processed by each expert such that it is divisible by the block size."); + + // Cache ops + pybind11::module cache_ops = m.def_submodule("cache_ops", "vLLM cache ops"); + cache_ops.def( + "swap_blocks", + &swap_blocks, + "Swap in (out) the cache blocks from src to dst"); + cache_ops.def( + "copy_blocks", + ©_blocks, + "Copy the cache blocks from src to dst"); + cache_ops.def( + "reshape_and_cache", + &reshape_and_cache, + "Reshape the key and value tensors and cache them"); + cache_ops.def( + "reshape_and_cache_flash", + &reshape_and_cache_flash, + "Reshape the key and value tensors and cache them"); + cache_ops.def( + "convert_fp8", + &convert_fp8, + "Convert the key and value cache to fp8 data type"); + + // Cuda utils + pybind11::module cuda_utils = m.def_submodule("cuda_utils", "vLLM cuda utils"); + cuda_utils.def( + "get_device_attribute", + &get_device_attribute, + "Gets the specified device attribute."); + + cuda_utils.def( + "get_max_shared_memory_per_block_device_attribute", + &get_max_shared_memory_per_block_device_attribute, + "Gets the maximum shared memory per block device attribute."); + +#ifndef USE_ROCM + // Custom all-reduce kernels + pybind11::module custom_ar = m.def_submodule("custom_ar", "custom allreduce"); + custom_ar.def("init_custom_ar", &init_custom_ar, "init_custom_ar"); + custom_ar.def("should_custom_ar", &should_custom_ar, "should_custom_ar"); + custom_ar.def("all_reduce_reg", &all_reduce_reg, "all_reduce_reg"); + custom_ar.def("all_reduce_unreg", &all_reduce_unreg, "all_reduce_unreg"); + custom_ar.def("dispose", &dispose, "dispose"); + custom_ar.def("meta_size", &meta_size, "meta_size"); + custom_ar.def("register_buffer", ®ister_buffer, "register_buffer"); + custom_ar.def("get_graph_buffer_ipc_meta", &get_graph_buffer_ipc_meta, + "get_graph_buffer_ipc_meta"); + custom_ar.def("register_graph_buffers", ®ister_graph_buffers, + "register_graph_buffers"); +#endif + +} diff --git a/csrc_musa/quantization/aqlm/gemm_kernels.mu b/csrc_musa/quantization/aqlm/gemm_kernels.mu new file mode 100644 index 0000000..679f7dc --- /dev/null +++ b/csrc_musa/quantization/aqlm/gemm_kernels.mu @@ -0,0 +1,712 @@ +/* + * Modified by Neural Magic + * Adapted from https://github.com/Vahe1994/AQLM + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include +#include +#include +#include "torch_musa/csrc/core/MUSAStream.h" +#include "torch_musa/csrc/core/MUSAGuard.h" + +#include +#include + + +namespace vllm { +namespace aqlm { + +__global__ void Code1x16MatVec( + const int4* __restrict__ A, + const int4* __restrict__ B, + int4* __restrict__ C, + const int4* __restrict__ codebook, + const int prob_m, + const int prob_k, + const int4 codebook_a_sizes, // cumulative sizes of A spanning each codebook, at most 3 long. + const int codebook_stride // as int4. +) { + int a_gl_stride = prob_k / 8 / 8; + int a_gl_rd = (blockDim.x / 32) * blockIdx.x + (threadIdx.x / 32); + bool pred = a_gl_rd < prob_m; + + if (pred) + { + // advance to the correct codebook, this easy because we only multiply one column of the codebook. + auto codebook_size = &codebook_a_sizes.x; + while (a_gl_rd >= *codebook_size) + { + codebook += codebook_stride; + ++codebook_size; + } + } + + int b_gl_rd = 0; + int c_gl_wr = a_gl_rd; + a_gl_rd = a_gl_stride * a_gl_rd + threadIdx.x % 32; + int a_gl_end = a_gl_rd + a_gl_stride - threadIdx.x % 32; + + __shared__ int4 sh_b[32 * 9]; + float res = 0; + + int iters = (prob_k / 8 + 8 * 32 - 1) / (8 * 32); + while (iters--) { + // We pad shared memory to avoid bank conflicts during reads + __syncthreads(); + for (int i = threadIdx.x; i < 32 * 8; i += blockDim.x) { + if (b_gl_rd + i < prob_k / 8) + sh_b[9 * (i / 8) + i % 8] = B[b_gl_rd + i]; + } + __syncthreads(); + b_gl_rd += 32 * 8; + + int b_sh_rd = 9 * (threadIdx.x % 32); + if (pred && a_gl_rd < a_gl_end) { + const uint16_t* enc = reinterpret_cast(&A[a_gl_rd]); + #pragma unroll + for (int i = 0; i < 8; i++) { + uint32_t dec[4]; + // We bypass the L1 cache to avoid massive amounts of memory streaming that doesn't + // actually help us; this brings > 2x speedup. + asm volatile ( + "ld.cg.global.v4.u32 {%0, %1, %2, %3}, [%4];" + : "=r"(dec[0]), "=r"(dec[1]), "=r"(dec[2]), "=r"(dec[3]) + : "l"((void*) &codebook[enc[i]]) + ); + half2* a = reinterpret_cast(&dec); + half2* b = reinterpret_cast(&sh_b[b_sh_rd]); + half2 res2 = {}; + #pragma unroll + for (int j = 0; j < 4; j++) + res2 = __hfma2(a[j], b[j], res2); + res += __half2float(res2.x) + __half2float(res2.y); + b_sh_rd++; + } + a_gl_rd += 32; + } + } + + if (pred) { + #pragma unroll + for (int i = 16; i > 0; i /= 2) + res += __shfl_down_sync(0xffffffff, res, i); + if (threadIdx.x % 32 == 0) + reinterpret_cast<__half*>(C)[c_gl_wr] = __float2half(res); + } +} + +__global__ void Code2x8MatVec( + const int4* __restrict__ A, + const int4* __restrict__ B, + int4* __restrict__ C, + const int4* __restrict__ codebook, + int prob_m, + int prob_k, + const int4 codebook_a_sizes, // cumulative sizes of A spanning each codebook, at most 3 long. + const int codebook_stride // as int4. + +) { + int a_gl_stride = prob_k / 8 / 8; + int a_gl_rd = (blockDim.x / 32) * blockIdx.x + (threadIdx.x / 32); + bool pred = a_gl_rd < prob_m; + + if (pred) + { + // advance to the correct codebook, this easy because we only multiply one column of the codebook. + auto codebook_size = &codebook_a_sizes.x; + while (a_gl_rd >= *codebook_size) + { + codebook += codebook_stride; + ++codebook_size; + } + } + + int b_gl_rd = 0; + int c_gl_wr = a_gl_rd; + a_gl_rd = a_gl_stride * a_gl_rd + threadIdx.x % 32; + int a_gl_end = a_gl_rd + a_gl_stride - threadIdx.x % 32; + int lane = threadIdx.x % 8; + + extern __shared__ int4 sh[]; + int4* sh_b = sh; + int4* sh_code = sh_b + 32 * 9; + int4* sh_code0 = sh_code; + int4* sh_code1 = sh_code + 256 * 8; + + for (int i = threadIdx.x; i < 2 * 256; i += blockDim.x) { + int4 dec = codebook[i]; + #pragma unroll + for (int j = 0; j < 8; j++) + sh_code[8 * i + (j + lane) % 8] = dec; + } + __syncthreads(); + + float res = 0; + + int iters = (prob_k / 8 + 8 * 32 - 1) / (8 * 32); + while (iters--) { + // We pad shared memory to avoid bank conflicts during reads + __syncthreads(); + for (int i = threadIdx.x; i < 32 * 8; i += blockDim.x) { + if (b_gl_rd + i < prob_k / 8) + sh_b[9 * (i / 8) + i % 8] = B[b_gl_rd + i]; + } + __syncthreads(); + b_gl_rd += 32 * 8; + + int b_sh_rd = 9 * (threadIdx.x % 32); + if (pred && a_gl_rd < a_gl_end) { + const uint8_t* enc = reinterpret_cast(&A[a_gl_rd]); + #pragma unroll + for (int i = 0; i < 8; i++) { + half2* a0 = reinterpret_cast(&sh_code0[8 * enc[2 * i + 0] + lane]); + half2* a1 = reinterpret_cast(&sh_code1[8 * enc[2 * i + 1] + lane]); + half2* b = reinterpret_cast(&sh_b[b_sh_rd]); + half2 res2 = {}; + #pragma unroll + for (int j = 0; j < 4; j++) + res2 = __hfma2(__hadd2(a0[j], a1[j]), b[j], res2); + res += __half2float(res2.x) + __half2float(res2.y); + b_sh_rd++; + } + a_gl_rd += 32; + } + } + + if (pred) { + #pragma unroll + for (int i = 16; i > 0; i /= 2) + res += __shfl_down_sync(0xffffffff, res, i); + if (threadIdx.x % 32 == 0) + reinterpret_cast<__half*>(C)[c_gl_wr] = __float2half(res); + } +} + + +__global__ void Code1x16Dequant( + const int4* __restrict__ A, + int4* __restrict__ C, + const int4* __restrict__ codebook, + int prob_m, + int prob_k, + const int4 codebook_a_sizes, // cumulative sizes of A spanning each codebook, at most 3 long, sums to m. + const int codebook_stride // as int4 +) { + int a_gl_stride = prob_k / 8 / 8; + int a_gl_rd = (blockDim.x / 32) * blockIdx.x + (threadIdx.x / 32); + bool pred = a_gl_rd < prob_m; + + if (pred) + { + // advance to the correct codebook, this easy because we only multiply one column of the codebook. + auto codebook_size = &codebook_a_sizes.x; + while (a_gl_rd >= *codebook_size) + { + codebook += codebook_stride; + ++codebook_size; + } + } + + a_gl_rd = a_gl_stride * a_gl_rd + threadIdx.x % 32; + int a_gl_end = a_gl_rd + a_gl_stride - threadIdx.x % 32; + + int c_gl_stride = prob_k / 8; + int c_gl_wr = (blockDim.x / 32) * blockIdx.x + (threadIdx.x / 32); + c_gl_wr = c_gl_stride * c_gl_wr + (threadIdx.x % 32) * 8; + + int iters = (prob_k / 8 - 1) / (8 * 32) + 1; + while (iters--) { + if (pred && a_gl_rd < a_gl_end) { + const uint16_t* enc = reinterpret_cast(&A[a_gl_rd]); + #pragma unroll + for (int i = 0; i < 8; i++) { + int4 chunk; + auto dec = reinterpret_cast(&chunk); + // We bypass the L1 cache to avoid massive amounts of memory streaming that doesn't + // actually help us; this brings > 2x speedup. + asm volatile ( + "ld.cg.global.v4.u32 {%0, %1, %2, %3}, [%4];" + : "=r"(dec[0]), "=r"(dec[1]), "=r"(dec[2]), "=r"(dec[3]) + : "l"((void*) &codebook[enc[i]]) + ); + + C[a_gl_rd * 8 + i] = chunk; + } + } + a_gl_rd += 32; + } +} + + +__global__ void Code2x8Dequant( + const int4* __restrict__ A, + int4* __restrict__ C, + const int4* __restrict__ codebook, + int prob_m, + int prob_k, + const int4 codebook_a_sizes, // cumulative sizes of A spanning each codebook, at most 3 long, corresponds to cols. + const int codebook_stride // as int4 +) { + int a_gl_stride = prob_k / 8 / 8; + int a_gl_rd = (blockDim.x / 32) * blockIdx.x + (threadIdx.x / 32); + bool pred = a_gl_rd < prob_m; + + if (pred) + { + // advance to the correct codebook, this easy because we only multiply one column of the codebook. + auto codebook_size = &codebook_a_sizes.x; + while (a_gl_rd >= *codebook_size) + { + codebook += codebook_stride; + ++codebook_size; + } + } + + a_gl_rd = a_gl_stride * a_gl_rd + threadIdx.x % 32; + int a_gl_end = a_gl_rd + a_gl_stride - threadIdx.x % 32; + int lane = threadIdx.x % 8; + + int c_gl_stride = prob_k / 8; + int c_gl_wr = (blockDim.x / 32) * blockIdx.x + (threadIdx.x / 32); + c_gl_wr = c_gl_stride * c_gl_wr + (threadIdx.x % 32) * 8; + + extern __shared__ int4 sh[]; + int4* sh_code = sh; + int4* sh_code0 = sh_code; + int4* sh_code1 = sh_code + 256 * 8; + + for (int i = threadIdx.x; i < 2 * 256; i += blockDim.x) { + int4 dec = codebook[i]; + #pragma unroll + for (int j = 0; j < 8; j++) + sh_code[8 * i + (j + lane) % 8] = dec; + } + __syncthreads(); + + float res = 0; + + int iters = (prob_k / 8 - 1) / (8 * 32) + 1; + while (iters--) { + if (pred && a_gl_rd < a_gl_end) { + const uint8_t* enc = reinterpret_cast(&A[a_gl_rd]); + #pragma unroll + for (int i = 0; i < 8; i++) { + int4 chunk; + half2* a0 = reinterpret_cast(&sh_code0[8 * enc[2 * i + 0] + lane]); + half2* a1 = reinterpret_cast(&sh_code1[8 * enc[2 * i + 1] + lane]); + #pragma unroll + for (int j = 0; j < 4; j++) + reinterpret_cast(&chunk)[j] = __hadd2(a0[j], a1[j]); + C[a_gl_rd * 8 + i] = chunk; + } + } + a_gl_rd += 32; + } +} + +inline int ceildiv(int a, int b) { + return (a + b - 1) / b; +} + +const int THREAD_M = 16; + +void code1x16_matvec_cuda( + const void* __restrict__ A, + const void* __restrict__ B, + void* __restrict__ C, + const void* __restrict__ codebook, + int prob_m, + int prob_k, + const int4 codebook_a_sizes, + const int codebook_stride +) { + int sms; + musaDeviceGetAttribute(&sms, musaDevAttrMultiProcessorCount, 0); + int waves = 0; + int thread_m; + do { + waves++; + thread_m = ceildiv(prob_m, waves * sms); + } while (thread_m > THREAD_M); + + int blocks = ceildiv(prob_m, thread_m); + int threads = 32 * thread_m; + musaStream_t stream = at::musa::getCurrentMUSAStream().stream(); + Code1x16MatVec<<>>( + (const int4*) A, + (const int4*) B, + (int4*) C, + (const int4*) codebook, + prob_m, + prob_k, + codebook_a_sizes, + codebook_stride + ); +} + +void code2x8_matvec_cuda( + const void* __restrict__ A, + const void* __restrict__ B, + void* __restrict__ C, + const void* __restrict__ codebook, + int prob_m, + int prob_k, + const int4 codebook_a_sizes, + const int codebook_stride +) { + int sms; + musaDeviceGetAttribute(&sms, musaDevAttrMultiProcessorCount, 0); + int waves = 0; + int thread_m; + do { + waves++; + thread_m = ceildiv(prob_m, waves * sms); + } while (thread_m > THREAD_M); + + int blocks = ceildiv(prob_m, thread_m); + int threads = 32 * thread_m; + int shared = 16 * (2 * 256 * 8 + 32 * 9); + musaFuncSetAttribute( + Code2x8MatVec, musaFuncAttributeMaxDynamicSharedMemorySize, shared + ); + musaStream_t stream = at::musa::getCurrentMUSAStream().stream(); + Code2x8MatVec<<>>( + (const int4*) A, + (const int4*) B, + (int4*) C, + (const int4*) codebook, + prob_m, + prob_k, + codebook_a_sizes, + codebook_stride + ); +} + +void code1x16_dequant_cuda( + const void* __restrict__ A, + void* __restrict__ C, + const void* __restrict__ codebook, + int prob_m, + int prob_k, + const int4 codebook_a_sizes, // cumulative sizes of A spanning each codebook, at most 3 long. + const int codebook_stride // as int4. +) { + int sms; + musaDeviceGetAttribute(&sms, musaDevAttrMultiProcessorCount, 0); + int waves = 0; + int thread_m; + do { + waves++; + thread_m = ceildiv(prob_m, waves * sms); + } while (thread_m > THREAD_M); + + int blocks = ceildiv(prob_m, thread_m); + int threads = 32 * thread_m; + musaStream_t stream = at::musa::getCurrentMUSAStream().stream(); + Code1x16Dequant<<>>( + (const int4*) A, + (int4*) C, + (const int4*) codebook, + prob_m, + prob_k, + codebook_a_sizes, // cumulative sizes of A spanning each codebook, at most 3 long. + codebook_stride // as int4. + ); +} + +// Dequantizes the code and codebook into weights. +void code2x8_dequant_cuda( + const void* __restrict__ A, + void* __restrict__ C, + const void* __restrict__ codebook, + int prob_m, + int prob_k, + const int4 codebook_a_sizes, // cumulative sizes of A spanning each codebook, at most 3 long, corresponds to cols. + const int codebook_stride // as int4 +) { + int sms; + musaDeviceGetAttribute(&sms, musaDevAttrMultiProcessorCount, 0); + int waves = 0; + int thread_m; + do { + waves++; + thread_m = ceildiv(prob_m, waves * sms); + } while (thread_m > THREAD_M); + + int blocks = ceildiv(prob_m, thread_m); + int threads = 32 * thread_m; + int shared = 16 * (2 * 256 * 8 + 32 * 9); + musaStream_t stream = at::musa::getCurrentMUSAStream().stream(); + + musaFuncSetAttribute( + Code2x8Dequant, musaFuncAttributeMaxDynamicSharedMemorySize, shared + ); + Code2x8Dequant<<>>( + (const int4*) A, + (int4*) C, + (const int4*) codebook, + prob_m, + prob_k, + codebook_a_sizes, + codebook_stride + ); +} + +int codebook_stride(const torch::Tensor& codebooks) +{ + return codebooks.stride(0) * codebooks.element_size() / sizeof(int4); +} + +void code1x16_matvec( + const torch::Tensor& A, + const torch::Tensor& B, + torch::Tensor& C, + const torch::Tensor& codebook, + const int4 codebook_a_sizes // cumulative sizes of A spanning each codebook, at most 3 long. +) { + const at::musa::OptionalMUSAGuard device_guard(device_of(A)); + int prob_m = C.size(0); + int prob_k = B.size(0); + + code1x16_matvec_cuda( + A.data_ptr(), + B.data_ptr(), + C.data_ptr(), + codebook.data_ptr(), + prob_m, + prob_k, + codebook_a_sizes, + codebook_stride(codebook) + ); +} + +torch::Tensor code1x16_matmat( + const torch::Tensor& input, + const torch::Tensor& codes, + const torch::Tensor& codebooks, + const torch::Tensor& scales, + const int4 codebook_a_sizes, + const std::optional& bias) { + auto input_sizes = input.sizes(); + auto out_features = codes.size(0) * codebooks.size(2); + auto flat_input = input.reshape({-1, input.size(-1)}); + auto flat_output = torch::empty({flat_input.size(0), out_features}, + torch::TensorOptions() + .dtype(input.dtype()) + .device(input.device()) + ); + + for (int i = 0; i < flat_input.size(0); ++i) { + auto input_vec = flat_input.index({i}); + auto output_vec = flat_output.index({i}); + code1x16_matvec( + codes.squeeze(2), + input_vec, + output_vec, + codebooks, + codebook_a_sizes + ); + } + flat_output *= scales.flatten().unsqueeze(0); + + if (bias.has_value()) { + flat_output += bias->unsqueeze(0); + } + + auto output_sizes = input_sizes.vec(); + output_sizes.pop_back(); + output_sizes.push_back(-1); + auto output = flat_output.reshape(output_sizes); + return output; +} + +void code2x8_matvec( + const torch::Tensor& A, + const torch::Tensor& B, + torch::Tensor& C, + const torch::Tensor& codebook, + const int4 codebook_a_sizes +) { + const at::musa::OptionalMUSAGuard device_guard(device_of(A)); + int prob_m = C.size(0); + int prob_k = B.size(0); + code2x8_matvec_cuda( + A.data_ptr(), + B.data_ptr(), + C.data_ptr(), + codebook.data_ptr(), + prob_m, + prob_k, + codebook_a_sizes, + 2 * codebook_stride(codebook) + ); +} + +torch::Tensor code2x8_matmat( + const torch::Tensor& input, + const torch::Tensor& codes, + const torch::Tensor& codebooks, + const torch::Tensor& scales, + const int4 codebook_a_sizes, + const std::optional& bias +) { + auto input_sizes = input.sizes(); + auto out_features = codes.size(0) * codebooks.size(2); + auto flat_input = input.reshape({-1, input.size(-1)}); + auto flat_output = torch::empty({flat_input.size(0), out_features}, + torch::TensorOptions() + .dtype(input.dtype()) + .device(input.device()) + ); + + for (int i = 0; i < flat_input.size(0); ++i) { + auto input_vec = flat_input.index({i}); + auto output_vec = flat_output.index({i}); + code2x8_matvec( + codes.squeeze(2), + input_vec, + output_vec, + codebooks, + codebook_a_sizes + ); + } + flat_output *= scales.flatten().unsqueeze(0); + if (bias.has_value()) { + flat_output += bias->unsqueeze(0); + } + + auto output_sizes = input_sizes.vec(); + output_sizes.pop_back(); + output_sizes.push_back(-1); + auto output = flat_output.reshape(output_sizes); + return output; +} + +// Accumulate the partition sizes. +int4 accumulate_sizes(const torch::Tensor& codebook_partition_sizes) +{ + int4 cumulative_sizes; + auto cumulative_size = &cumulative_sizes.x; + int i = 0; + int last = 0; + assert(codebook_partition_sizes.size(0) <= 4); + for (; i < codebook_partition_sizes.size(0); ++i, ++cumulative_size) + { + *cumulative_size = codebook_partition_sizes[i].item() + last; + last = *cumulative_size; + } + // fill in the rest with unreachable. + for (; i < 4; ++i, ++cumulative_size) + { + *cumulative_size = last*10; + } + return cumulative_sizes; +} + +} // namespace aqlm +} // namespace vllm + + +torch::Tensor aqlm_gemm( + const torch::Tensor& input, + const torch::Tensor& codes, + const torch::Tensor& codebooks, + const torch::Tensor& scales, + const torch::Tensor& codebook_partition_sizes, + const std::optional& bias +) +{ + int4 cumulative_sizes = vllm::aqlm::accumulate_sizes(codebook_partition_sizes); + + int const nbooks = codebooks.size(0) / codebook_partition_sizes.size(0); + int const entries = codebooks.size(1); + + if (nbooks == 1 && entries == (1 << 16)) + { + return vllm::aqlm::code1x16_matmat(input, codes, codebooks, scales, cumulative_sizes, bias); + } + if (nbooks == 2 && entries == (1 << 8)) + { + return vllm::aqlm::code2x8_matmat(input, codes, codebooks, scales, cumulative_sizes, bias); + } + + TORCH_CHECK(false, "AQLM with ", nbooks, " codebooks and ", entries, " entries is not currently supported.") + return {}; +} + +torch::Tensor aqlm_dequant( + const torch::Tensor& codes, + const torch::Tensor& codebooks, + const torch::Tensor& codebook_partition_sizes +) +{ + int4 cumulative_sizes = vllm::aqlm::accumulate_sizes(codebook_partition_sizes); + + int const nbooks = codebooks.size(0) / codebook_partition_sizes.size(0); + int const entries = codebooks.size(1); + + const at::musa::OptionalMUSAGuard device_guard(device_of(codes)); + int rows = codes.size(1); + int cols = codes.size(0); + + auto in_features = codes.size(1) * 8; + auto out_features = codes.size(0); + + assert(out_features = codebook_partition_sizes.sum().item()); + + auto weights = torch::empty({out_features, in_features}, + torch::TensorOptions() + .dtype(codebooks.dtype()) + .device(codebooks.device()) + ); + + if (nbooks == 1 && entries == (1 << 16)) + { + vllm::aqlm::code1x16_dequant_cuda( + codes.data_ptr(), + weights.data_ptr(), + codebooks.data_ptr(), + out_features, + in_features, + cumulative_sizes, + vllm::aqlm::codebook_stride(codebooks)); + + // if you wanted to flip to scaling the weights, (though it's 30%-ish slower and not consistent with gemv implementation.) + // weights *= scales.index({"...", 0, 0}); + + return weights; + } + + if (nbooks == 2 && entries == (1 << 8)) + { + vllm::aqlm::code2x8_dequant_cuda( + codes.data_ptr(), + weights.data_ptr(), + codebooks.data_ptr(), + out_features, + in_features, + cumulative_sizes, + vllm::aqlm::codebook_stride(codebooks)); + + // if you wanted to flip to scaling the weights, (though it's 30%-ish slower and not consistent with gemv implementation) + // weights *= scales.index({"...", 0, 0}); + + return weights; + } + + TORCH_CHECK(false, "AQLM with ", nbooks, " codebooks and ", entries, " entries is not currently supported.") + return {}; +} diff --git a/csrc_musa/quantization/awq/dequantize.muh b/csrc_musa/quantization/awq/dequantize.muh new file mode 100644 index 0000000..7b7f846 --- /dev/null +++ b/csrc_musa/quantization/awq/dequantize.muh @@ -0,0 +1,87 @@ +/* +Adapted from https://github.com/mit-han-lab/llm-awq +Modified from NVIDIA FasterTransformer: https://github.com/NVIDIA/FasterTransformer/blob/main/src/fastertransformer/cutlass_extensions/include/cutlass_extensions/interleaved_numeric_conversion.h +@article{lin2023awq, + title={AWQ: Activation-aware Weight Quantization for LLM Compression and Acceleration}, + author={Lin, Ji and Tang, Jiaming and Tang, Haotian and Yang, Shang and Dang, Xingyu and Han, Song}, + journal={arXiv}, + year={2023} +} +*/ + +#pragma once + +namespace vllm { +namespace awq { + +__device__ uint4 dequantize_s4_to_fp16x2(uint32_t const& source) +{ +#if defined(__MUSA_ARCH__) && __MUSA_ARCH__ < 750 + assert(false); +#else + uint4 result; + + uint32_t* h = reinterpret_cast(&result); + uint32_t const i4s = reinterpret_cast(source); + + // First, we extract the i4s and construct an intermediate fp16 number. + static constexpr uint32_t immLut = (0xf0 & 0xcc) | 0xaa; + static constexpr uint32_t BOTTOM_MASK = 0x000f000f; + static constexpr uint32_t TOP_MASK = 0x00f000f0; + static constexpr uint32_t I4s_TO_F16s_MAGIC_NUM = 0x64006400; + + // Note that the entire sequence only requires 1 shift instruction. This is thanks to the register packing + // format and the fact that we force our integers to be unsigned, and account for this in the fp16 subtractions. + // In addition, I exploit the fact that sub and fma have the same throughput in order to convert elt_23 and + // elt_67 to fp16 without having to shift them to the bottom bits before hand. + + // Shift right by 8 to now consider elt_45 and elt_67. Issue first to hide RAW dependency if we issue + // immediately before required. + const uint32_t top_i4s = i4s >> 8; + // Extract elt_01 - (i4s & 0x000f000f) | 0x64006400 + asm volatile("lop3.b32 %0, %1, %2, %3, %4;\n" + : "=r"(h[0]) + : "r"(i4s), "n"(BOTTOM_MASK), "n"(I4s_TO_F16s_MAGIC_NUM), "n"(immLut)); + // Extract elt_23 (i4s & 0x00f000f0) | 0x64006400 + asm volatile("lop3.b32 %0, %1, %2, %3, %4;\n" + : "=r"(h[1]) + : "r"(i4s), "n"(TOP_MASK), "n"(I4s_TO_F16s_MAGIC_NUM), "n"(immLut)); + // Extract elt_45 (top_i4s & 0x000f000f) | 0x64006400 + asm volatile("lop3.b32 %0, %1, %2, %3, %4;\n" + : "=r"(h[2]) + : "r"(top_i4s), "n"(BOTTOM_MASK), "n"(I4s_TO_F16s_MAGIC_NUM), "n"(immLut)); + // Extract elt_67 (top_i4s & 0x00f000f0) | 0x64006400 + asm volatile("lop3.b32 %0, %1, %2, %3, %4;\n" + : "=r"(h[3]) + : "r"(top_i4s), "n"(TOP_MASK), "n"(I4s_TO_F16s_MAGIC_NUM), "n"(immLut)); + + // I use inline PTX below because I am not sure if the compiler will emit float2half instructions if I use the + // half2 ctor. In this case, I chose performance reliability over code readability. + + // This is the half2 {1032, 1032} represented as an integer. + // static constexpr uint32_t FP16_TOP_MAGIC_NUM = 0x64086408; + // Haotian: subtract {1024, 1024} instead, we do not need to map to [-8, 7] + static constexpr uint32_t FP16_TOP_MAGIC_NUM = 0x64006400; + // This is the half2 {1 / 16, 1 / 16} represented as an integer. + static constexpr uint32_t ONE_SIXTEENTH = 0x2c002c00; + // This is the half2 {-72, -72} represented as an integer. + // static constexpr uint32_t NEG_72 = 0xd480d480; + // Haotian: Let's use {-64, -64}. + static constexpr uint32_t NEG_64 = 0xd400d400; + + // Finally, we construct the output numbers. + // Convert elt_01 + asm volatile("sub.f16x2 %0, %1, %2;\n" : "=r"(h[0]) : "r"(h[0]), "r"(FP16_TOP_MAGIC_NUM)); + // Convert elt_23 + asm volatile("fma.rn.f16x2 %0, %1, %2, %3;\n" : "=r"(h[1]) : "r"(h[1]), "r"(ONE_SIXTEENTH), "r"(NEG_64)); + // Convert elt_45 + asm volatile("sub.f16x2 %0, %1, %2;\n" : "=r"(h[2]) : "r"(h[2]), "r"(FP16_TOP_MAGIC_NUM)); + // Convert elt_67 + asm volatile("fma.rn.f16x2 %0, %1, %2, %3;\n" : "=r"(h[3]) : "r"(h[3]), "r"(ONE_SIXTEENTH), "r"(NEG_64)); + + return result; +#endif +} + +} // namespace awq +} // namespace vllm diff --git a/csrc_musa/quantization/awq/gemm_kernels.mu b/csrc_musa/quantization/awq/gemm_kernels.mu new file mode 100644 index 0000000..cbb717e --- /dev/null +++ b/csrc_musa/quantization/awq/gemm_kernels.mu @@ -0,0 +1,446 @@ +/* +Adapted from https://github.com/mit-han-lab/llm-awq +@article{lin2023awq, + title={AWQ: Activation-aware Weight Quantization for LLM Compression and Acceleration}, + author={Lin, Ji and Tang, Jiaming and Tang, Haotian and Yang, Shang and Dang, Xingyu and Han, Song}, + journal={arXiv}, + year={2023} +} + */ + + +#include +#include "torch_musa/csrc/core/MUSAGuard.h" + +#include "dequantize.cuh" + +#include + +namespace vllm { +namespace awq { + +// Pack two half values. +static inline __device__ __host__ unsigned +__pack_half2(const half x, const half y) { + unsigned v0 = *((unsigned short *)&x); + unsigned v1 = *((unsigned short *)&y); + return (v1 << 16) | v0; +} + +template +__global__ void __launch_bounds__(64) gemm_forward_4bit_cuda_m16nXk32( + int G, + int split_k_iters, + half* __restrict__ A, + int* __restrict__ B, + half* __restrict__ scaling_factors, + int* __restrict__ zeros, + int M, + int IC, + int OC, + half* __restrict__ C) +{ + // Only support matrix n = 64 or 128 + assert(N == 64 || N == 128); +#if defined(__MUSA_ARCH__) && __MUSA_ARCH__ < 750 + assert(false); +#else + static constexpr uint32_t ZERO = 0x0; + float C_warp[32]; + __shared__ half A_shared[16 * (32 + 8)]; + __shared__ half B_shared[32 * (N + 8)]; + + __shared__ half scaling_factors_shared[N]; + __shared__ half zeros_shared[N]; + + int j_factors1 = ((OC + N - 1) / N); + int blockIdx_x = 0; + int blockIdx_y = blockIdx.x % ((M + 16 - 1) / 16 * j_factors1); + int blockIdx_z = blockIdx.x / ((M + 16 - 1) / 16 * j_factors1); + + half A_shared_warp[8]; + half B_shared_warp[N / 4]; + for (int j_0_4_init = 0; j_0_4_init < N / 32; ++j_0_4_init) { + for (int i = 0; i < 8; ++i) { + C_warp[(j_0_4_init * 8) + i] = 0.0; + } + } + + static constexpr int row_stride_warp = 32 * 8 / 32; + static constexpr int row_stride = 2 * 32 * 8 / N; + bool ld_zero_flag = (threadIdx.y * 32 + threadIdx.x) * 8 < N; + // TODO: Haotian: blockIdx_y / j_factors1 in A loading to support bsz > 16 + bool ld_A_flag = (blockIdx_y / j_factors1 * 16 + threadIdx.y * row_stride_warp + threadIdx.x * 8 / 32) < M; // threadIdx.y is warp_id + // bool wb_C_flag = (threadIdx.x / 4) < M; + + half* A_ptr = A + + (((int)blockIdx_y) / j_factors1 * 16 + (((int)threadIdx.y) * row_stride_warp) + ((int)threadIdx.x) / (32 / 8)) * IC + + (((int)threadIdx.x) % (32 / 8)) * 8; + + int* B_ptr = B + + ((int)threadIdx.y) * (OC / 8) * (256 / N) + + (((int)threadIdx.x) / (N / 8)) * (OC / 8) + + (((int)blockIdx_y) % j_factors1) * (N / 8) + + (((int)threadIdx.x) % (N / 8)) * 1; +// Why * 1 in the above line? + + half* A_shared_ptr = A_shared + + ((int)threadIdx.y) * row_stride_warp * (32 + 8) + + (((int)threadIdx.x) / (32 / 8)) * (32 + 8) + + (((int)threadIdx.x) % (32 / 8) ) * 8; + + half* B_shared_ptr = B_shared + + ((int)threadIdx.y) * (row_stride / 2) * (N + 8) + + (((int)threadIdx.x) / (N / 8)) * (N + 8) + + (((int)threadIdx.x) % (N / 8)) * 8; + + int* zeros_ptr = zeros + + (((int)blockIdx_y) % j_factors1) * (N / 8) + + ((int)threadIdx.x) % (N / 8); + + half* scaling_factors_ptr = scaling_factors + + (((int)blockIdx_y) % j_factors1) * N + + (((int)threadIdx.x) % (N / 8)) * 8; + + half* C_ptr = C + + static_cast(blockIdx_z) * M * OC // blockIdz.x -> split_k dim + + (((int)blockIdx_y) % j_factors1) * N + + ((int)threadIdx.y) * (N / 2) + + (((int)threadIdx.x) % 4) * 2; + + // preload s.f. and zeros + int k_bound = (IC / 32 + split_k_iters - 1) / split_k_iters; + if ((k_bound - 1) * split_k_iters * 32 + blockIdx_z * 32 >= IC) k_bound -= 1; + for (int _k_0_0 = 0; _k_0_0 < k_bound; ++_k_0_0) { + int k_0_0 = _k_0_0 * split_k_iters + blockIdx_z; + __syncthreads(); + // TODO: Haotian: blockIdx_y / j_factors1 in A loading to support bsz > 16 + if (ld_A_flag) + { + *(uint4*)(A_shared_ptr) = *(uint4*)(A_ptr + (k_0_0 * 32)); + } + else + { + *(uint4*)(A_shared_ptr) = make_uint4(0, 0, 0, 0); + } + + // for (int ax0_ax1_fused_0 = 0; ax0_ax1_fused_0 < 2; ++ax0_ax1_fused_0) { + uint32_t zeros_loaded = *(uint32_t*)(zeros_ptr + k_0_0 * 32 / G * (OC / 8)); + uint4 B_loaded_zero = dequantize_s4_to_fp16x2(zeros_loaded); + uint4 B_loaded_scale = *(uint4*)(scaling_factors_ptr + k_0_0 * 32 / G * (OC)); + /* + if (blockIdx_z == 0 && blockIdx_y == 0 && k_0_0 == 0 && threadIdx.x == 0 && threadIdx.y == 0){ + printf("%x %x %x %x %x %x %x %x\n", B_loaded_scale.x, B_loaded_scale.y, B_loaded_scale.z, B_loaded_scale.w, B_loaded_zero.x, B_loaded_zero.y, B_loaded_zero.z, B_loaded_zero.w); + } + */ + // uint4 B_loaded_scale = make_uint4(0, 0, 0, 0); + int* B_ptr_local = B_ptr + k_0_0 * 32 * (OC / 8); + + for (int ax0_ax1_fused_0 = 0; ax0_ax1_fused_0 < N / 16; ++ax0_ax1_fused_0) { + + // B: 32 x 136 (128+8) float16 + // each warp: 32 x 4 + // each thr: read 32 bit -> convert to 8xFP16 (a UINT4) -> scale and minus zero -> WB UINT4 + // *(uint4*)(B_shared + ((((ax0_ax1_fused_0 * 544) + (((int)threadIdx.y) * 272)) + ((((int)threadIdx.x) >> 4) * 136)) + ((((int)threadIdx.x) & 15) * 8))) = *(uint4*)(B + ((((((k_0_0 * 163840) + (ax0_ax1_fused_0 * 20480)) + (((int)threadIdx.y) * 10240)) + ((((int)threadIdx.x) >> 4) * 5120)) + (((int)blockIdx_y) * 128)) + ((((int)threadIdx.x) & 15) * 8))); + // row stride in shared memory: (NWARPS * 32 * 8 / cta_N) + uint32_t B_loaded = *(uint32_t*)(B_ptr_local + ax0_ax1_fused_0 * row_stride * (OC / 8)); + uint4 B_loaded_fp16 = dequantize_s4_to_fp16x2(B_loaded); + //uint4 B_loaded_zero = *(uint4*)(zeros_shared + (threadIdx.x % (cta_N / 8)) * 8); + + // uint4 B_loaded_scale = *(uint4*)(scaling_factors_shared + (threadIdx.x % (cta_N / 8)) * 8); + // - zero and * scale + // TODO (Haotian): can save 4 assembly instructions if sormulate as deq = q * scale - zero * scale. + asm volatile("sub.f16x2 %0, %1, %2;\n" : "=r"(B_loaded_fp16.x) : "r"(B_loaded_fp16.x), "r"(B_loaded_zero.x)); + asm volatile("fma.rn.f16x2 %0, %1, %2, %3;\n" : "=r"(B_loaded_fp16.x) : "r"(B_loaded_fp16.x), "r"(B_loaded_scale.x), "r"(ZERO)); + asm volatile("sub.f16x2 %0, %1, %2;\n" : "=r"(B_loaded_fp16.y) : "r"(B_loaded_fp16.y), "r"(B_loaded_zero.y)); + asm volatile("fma.rn.f16x2 %0, %1, %2, %3;\n" : "=r"(B_loaded_fp16.y) : "r"(B_loaded_fp16.y), "r"(B_loaded_scale.y), "r"(ZERO)); + asm volatile("sub.f16x2 %0, %1, %2;\n" : "=r"(B_loaded_fp16.z) : "r"(B_loaded_fp16.z), "r"(B_loaded_zero.z)); + asm volatile("fma.rn.f16x2 %0, %1, %2, %3;\n" : "=r"(B_loaded_fp16.z) : "r"(B_loaded_fp16.z), "r"(B_loaded_scale.z), "r"(ZERO)); + asm volatile("sub.f16x2 %0, %1, %2;\n" : "=r"(B_loaded_fp16.w) : "r"(B_loaded_fp16.w), "r"(B_loaded_zero.w)); + asm volatile("fma.rn.f16x2 %0, %1, %2, %3;\n" : "=r"(B_loaded_fp16.w) : "r"(B_loaded_fp16.w), "r"(B_loaded_scale.w), "r"(ZERO)); + /* + if (ax0_ax1_fused_0 == 0 && blockIdx_z == 0 && blockIdx_y == 0 && k_0_0 == 0 && threadIdx.x == 17 && threadIdx.y == 0){ + printf("[x] %X %X %X %X\n", B_loaded_fp16.x, B_loaded_fp16.y, B_loaded_fp16.z, B_loaded_fp16.w); + } + */ + + // write back + *(uint4*)(B_shared_ptr + ax0_ax1_fused_0 * row_stride * (N + 8)) = B_loaded_fp16; + } + __syncthreads(); + + for (int k_0_1 = 0; k_0_1 < 2; ++k_0_1) { + { + unsigned int addr; + __asm__ __volatile__( + "{ .reg .u64 addr; cvta.to.shared.u64 addr, %1; cvt.u32.u64 %0, addr; }\n" + : "=r"(addr) + : "l"((void *)((&(A_shared[(k_0_1 * 16)])) + (((((int)threadIdx.x) & 15) * 40) + ((((int)threadIdx.x) >> 4) * 8)))) + ); + + + __asm__ __volatile__( + "ldmatrix.sync.aligned.m8n8.x4.shared.b16" + "{%0, %1, %2, %3}, [%4];\n" + : "=r"(((unsigned *)(A_shared_warp + 0))[0]), "=r"(((unsigned *)(A_shared_warp + 0))[1]), "=r"(((unsigned *)(A_shared_warp + 0))[2]), "=r"(((unsigned *)(A_shared_warp + 0))[3]) + : "r"(addr) + ); + } + + for (int ax1_0 = 0; ax1_0 < N / 32; ++ax1_0) { + { + unsigned int addr; + __asm__ __volatile__( + "{ .reg .u64 addr; cvta.to.shared.u64 addr, %1; cvt.u32.u64 %0, addr; }\n" + : "=r"(addr) + : "l"((void *)((&(B_shared[(((k_0_1 * (N * 16 + 128)) + (((int)threadIdx.y) * (N / 2))) + (ax1_0 * 16))])) + (((((int)threadIdx.x) & 15) * (N + 8)) + ((((int)threadIdx.x) >> 4) * 8)))) + ); + __asm__ __volatile__( + "ldmatrix.sync.aligned.m8n8.x4.trans.shared.b16" + "{%0, %1, %2, %3}, [%4];\n" + : "=r"(((unsigned *)(B_shared_warp + (ax1_0 * 8)))[0]), "=r"(((unsigned *)(B_shared_warp + (ax1_0 * 8)))[1]), "=r"(((unsigned *)(B_shared_warp + (ax1_0 * 8)))[2]), "=r"(((unsigned *)(B_shared_warp + (ax1_0 * 8)))[3]) + : "r"(addr) + ); + } + } + for (int j_0_4 = 0; j_0_4 < N / 32; ++j_0_4) { +#if defined(__MUSA_ARCH__) && __MUSA_ARCH__ == 750 + { + __asm__ __volatile__( + "mma.sync.aligned.m16n8k8.row.col.f32.f16.f16.f32" + "{%0, %1, %2, %3}, {%4, %5}, {%6}, {%7, %8, %9, %10};\n" + : "=f"(((float *)(C_warp + (j_0_4 * 8)))[0]), "=f"(((float *)(C_warp + (j_0_4 * 8)))[1]), "=f"(((float *)(C_warp + (j_0_4 * 8)))[2]), "=f"(((float *)(C_warp + (j_0_4 * 8)))[3]) + : "r"(((unsigned *)(A_shared_warp + 0))[0]), "r"(((unsigned *)(A_shared_warp + 0))[1]), "r"(((unsigned *)(B_shared_warp + (j_0_4 * 8)))[0]), "f"(((float *)(C_warp + (j_0_4 * 8)))[0]), "f"(((float *)(C_warp + (j_0_4 * 8)))[1]), "f"(((float *)(C_warp + (j_0_4 * 8)))[2]), "f"(((float *)(C_warp + (j_0_4 * 8)))[3])); + } + + { + __asm__ __volatile__( + "mma.sync.aligned.m16n8k8.row.col.f32.f16.f16.f32" + "{%0, %1, %2, %3}, {%4, %5}, {%6}, {%7, %8, %9, %10};\n" + : "=f"(((float *)(C_warp + ((j_0_4 * 8) + 4)))[0]), "=f"(((float *)(C_warp + ((j_0_4 * 8) + 4)))[1]), "=f"(((float *)(C_warp + ((j_0_4 * 8) + 4)))[2]), "=f"(((float *)(C_warp + ((j_0_4 * 8) + 4)))[3]) + : "r"(((unsigned *)(A_shared_warp + 0))[0]), "r"(((unsigned *)(A_shared_warp + 0))[1]), "r"(((unsigned *)(B_shared_warp + ((j_0_4 * 8) + 4)))[0]), "f"(((float *)(C_warp + ((j_0_4 * 8) + 4)))[0]), "f"(((float *)(C_warp + ((j_0_4 * 8) + 4)))[1]), "f"(((float *)(C_warp + ((j_0_4 * 8) + 4)))[2]), "f"(((float *)(C_warp + ((j_0_4 * 8) + 4)))[3])); + } + + { + __asm__ __volatile__( + "mma.sync.aligned.m16n8k8.row.col.f32.f16.f16.f32" + "{%0, %1, %2, %3}, {%4, %5}, {%6}, {%7, %8, %9, %10};\n" + : "=f"(((float *)(C_warp + (j_0_4 * 8)))[0]), "=f"(((float *)(C_warp + (j_0_4 * 8)))[1]), "=f"(((float *)(C_warp + (j_0_4 * 8)))[2]), "=f"(((float *)(C_warp + (j_0_4 * 8)))[3]) + : "r"(((unsigned *)(A_shared_warp + 0))[2]), "r"(((unsigned *)(A_shared_warp + 0))[3]), "r"(((unsigned *)(B_shared_warp + (j_0_4 * 8)))[1]), "f"(((float *)(C_warp + (j_0_4 * 8)))[0]), "f"(((float *)(C_warp + (j_0_4 * 8)))[1]), "f"(((float *)(C_warp + (j_0_4 * 8)))[2]), "f"(((float *)(C_warp + (j_0_4 * 8)))[3])); + } + + { + __asm__ __volatile__( + "mma.sync.aligned.m16n8k8.row.col.f32.f16.f16.f32" + "{%0, %1, %2, %3}, {%4, %5}, {%6}, {%7, %8, %9, %10};\n" + : "=f"(((float *)(C_warp + ((j_0_4 * 8) + 4)))[0]), "=f"(((float *)(C_warp + ((j_0_4 * 8) + 4)))[1]), "=f"(((float *)(C_warp + ((j_0_4 * 8) + 4)))[2]), "=f"(((float *)(C_warp + ((j_0_4 * 8) + 4)))[3]) + : "r"(((unsigned *)(A_shared_warp + 0))[2]), "r"(((unsigned *)(A_shared_warp + 0))[3]), "r"(((unsigned *)(B_shared_warp + ((j_0_4 * 8) + 4)))[1]), "f"(((float *)(C_warp + ((j_0_4 * 8) + 4)))[0]), "f"(((float *)(C_warp + ((j_0_4 * 8) + 4)))[1]), "f"(((float *)(C_warp + ((j_0_4 * 8) + 4)))[2]), "f"(((float *)(C_warp + ((j_0_4 * 8) + 4)))[3])); + } +#else + { + __asm__ __volatile__( + "mma.sync.aligned.m16n8k16.row.col.f32.f16.f16.f32" + "{%0, %1, %2, %3}, {%4, %5, %6, %7}, {%8, %9}, {%10, %11, %12, %13};\n" + : "=f"(((float *)(C_warp + (j_0_4 * 8)))[0]), "=f"(((float *)(C_warp + (j_0_4 * 8)))[1]), "=f"(((float *)(C_warp + (j_0_4 * 8)))[2]), "=f"(((float *)(C_warp + (j_0_4 * 8)))[3]) + : "r"(((unsigned *)(A_shared_warp + 0))[0]), "r"(((unsigned *)(A_shared_warp + 0))[1]), "r"(((unsigned *)(A_shared_warp + 0))[2]), "r"(((unsigned *)(A_shared_warp + 0))[3]), "r"(((unsigned *)(B_shared_warp + (j_0_4 * 8)))[0]), "r"(((unsigned *)(B_shared_warp + (j_0_4 * 8)))[1]), "f"(((float *)(C_warp + (j_0_4 * 8)))[0]), "f"(((float *)(C_warp + (j_0_4 * 8)))[1]), "f"(((float *)(C_warp + (j_0_4 * 8)))[2]), "f"(((float *)(C_warp + (j_0_4 * 8)))[3])); + } + + { + __asm__ __volatile__( + "mma.sync.aligned.m16n8k16.row.col.f32.f16.f16.f32" + "{%0, %1, %2, %3}, {%4, %5, %6, %7}, {%8, %9}, {%10, %11, %12, %13};\n" + : "=f"(((float *)(C_warp + ((j_0_4 * 8) + 4)))[0]), "=f"(((float *)(C_warp + ((j_0_4 * 8) + 4)))[1]), "=f"(((float *)(C_warp + ((j_0_4 * 8) + 4)))[2]), "=f"(((float *)(C_warp + ((j_0_4 * 8) + 4)))[3]) + : "r"(((unsigned *)(A_shared_warp + 0))[0]), "r"(((unsigned *)(A_shared_warp + 0))[1]), "r"(((unsigned *)(A_shared_warp + 0))[2]), "r"(((unsigned *)(A_shared_warp + 0))[3]), "r"(((unsigned *)(B_shared_warp + ((j_0_4 * 8) + 4)))[0]), "r"(((unsigned *)(B_shared_warp + ((j_0_4 * 8) + 4)))[1]), "f"(((float *)(C_warp + ((j_0_4 * 8) + 4)))[0]), "f"(((float *)(C_warp + ((j_0_4 * 8) + 4)))[1]), "f"(((float *)(C_warp + ((j_0_4 * 8) + 4)))[2]), "f"(((float *)(C_warp + ((j_0_4 * 8) + 4)))[3])); + } + +#endif + } + } + } + +// TODO: Shang: Hoist loop invariance. + for (int ax1_0_1 = 0; ax1_0_1 < 4; ++ax1_0_1) { + for (int local_id = 0; local_id < 8; ++local_id) { + int row_offset = (((int)blockIdx_y) / j_factors1) * 16 + ((int)threadIdx.x) / 4 + (local_id % 4) / 2 * 8; + if (row_offset < M) + { + *(C_ptr + ax1_0_1 * 16 + row_offset * OC + (local_id / 4) * 8 + local_id % 2) = __float2half(C_warp[(ax1_0_1 * 8) + local_id]); + } + } + } +#endif +} + +__global__ void __launch_bounds__(64) dequantize_weights( + int* __restrict__ B, + half* __restrict__ scaling_factors, + int* __restrict__ zeros, + half* __restrict__ C, + int G +) +{ + int j_factors1 = 4; + int row_stride2 = 4; + int split_k_iters = 1; + static constexpr uint32_t ZERO = 0x0; + half B_shared[32 * (128 + 8)]; + + half* B_shared_ptr2 = B_shared; + + half B_shared_warp[32]; + int OC = 512; + + int N = blockDim.x * gridDim.x; // 2 + int col = (blockIdx.x * blockDim.x + threadIdx.x); + int row = blockIdx.y * blockDim.y + threadIdx.y; + int index1 = 8 * col + 8 * row * N; + half* C_ptr2 = C + index1; + + int index2 = col + row * N; + int* B_ptr2 = B + index2; + + int index3 = col + (int)(row / G) * N; + int* zeros_ptr2 = zeros + index3; + int index4 = 8 * col + (int)(row / G) * N * 8; + half* scaling_factors_ptr2 = scaling_factors + index4; + + uint32_t zeros_loaded = *(uint32_t*)(zeros_ptr2); + uint4 B_loaded_zero = dequantize_s4_to_fp16x2(zeros_loaded); + uint4 B_loaded_scale = *(uint4*)(scaling_factors_ptr2); + + uint32_t B_loaded = *(uint32_t*)B_ptr2; + uint4 B_loaded_fp16 = dequantize_s4_to_fp16x2(B_loaded); + asm volatile("sub.f16x2 %0, %1, %2;\n" : "=r"(B_loaded_fp16.x) : "r"(B_loaded_fp16.x), "r"(B_loaded_zero.x)); + asm volatile("fma.rn.f16x2 %0, %1, %2, %3;\n" : "=r"(B_loaded_fp16.x) : "r"(B_loaded_fp16.x), "r"(B_loaded_scale.x), "r"(ZERO)); + asm volatile("sub.f16x2 %0, %1, %2;\n" : "=r"(B_loaded_fp16.y) : "r"(B_loaded_fp16.y), "r"(B_loaded_zero.y)); + asm volatile("fma.rn.f16x2 %0, %1, %2, %3;\n" : "=r"(B_loaded_fp16.y) : "r"(B_loaded_fp16.y), "r"(B_loaded_scale.y), "r"(ZERO)); + asm volatile("sub.f16x2 %0, %1, %2;\n" : "=r"(B_loaded_fp16.z) : "r"(B_loaded_fp16.z), "r"(B_loaded_zero.z)); + asm volatile("fma.rn.f16x2 %0, %1, %2, %3;\n" : "=r"(B_loaded_fp16.z) : "r"(B_loaded_fp16.z), "r"(B_loaded_scale.z), "r"(ZERO)); + asm volatile("sub.f16x2 %0, %1, %2;\n" : "=r"(B_loaded_fp16.w) : "r"(B_loaded_fp16.w), "r"(B_loaded_zero.w)); + asm volatile("fma.rn.f16x2 %0, %1, %2, %3;\n" : "=r"(B_loaded_fp16.w) : "r"(B_loaded_fp16.w), "r"(B_loaded_scale.w), "r"(ZERO)); + + *(uint4*)B_shared_ptr2 = B_loaded_fp16; + + for (int i = 0; i < 8; ++i) { + *(C_ptr2 + i) = B_shared[i]; + } +} + +} // namespace awq +} // namespace vllm + +torch::Tensor awq_dequantize( + torch::Tensor _kernel, + torch::Tensor _scaling_factors, + torch::Tensor _zeros, + int split_k_iters, + int thx, + int thy) +{ + int in_c = _kernel.size(0); + int qout_c = _kernel.size(1); + int out_c = qout_c * 8; + int G = in_c / _scaling_factors.size(0); + + int x_thread = thx; + int y_thread = thy; + + int x_blocks = 1; + int y_blocks = 1; + if (thx==0) { + x_thread = qout_c; + } + if (thy==0) { + y_thread = in_c; + } + if (thx==0 && thy==0) { + x_thread = 8; + y_thread = 8; + x_blocks = (int)(qout_c / 8); + y_blocks = (int)(in_c / 8); + } + + const at::musa::OptionalMUSAGuard device_guard(device_of(_scaling_factors)); + + auto options = torch::TensorOptions().dtype(_scaling_factors.dtype()).device(_scaling_factors.device()); + at::Tensor _de_kernel = torch::empty({in_c, out_c}, options); + + auto kernel = reinterpret_cast(_kernel.data_ptr()); + auto de_kernel = reinterpret_cast(_de_kernel.data_ptr()); + auto scaling_factors = reinterpret_cast(_scaling_factors.data_ptr()); + auto zeros = reinterpret_cast(_zeros.data_ptr()); + + dim3 num_blocks(x_blocks, y_blocks); + dim3 threads_per_block(x_thread, y_thread); + + const musaStream_t stream = at::musa::getCurrentMUSAStream(); + vllm::awq::dequantize_weights<<>>( + kernel, scaling_factors, zeros, de_kernel, G); + + return _de_kernel; +} + +// in_feats: M, IC [float16] +// kernel: IC, OC // 8 [int32] -> cast to IC, OC [uint4b] +// scaling_factors: IC // G, OC [float16] +// zeros: IC // G, OC // 8 [int32] -> cast to IC // G, OC [uint4b] +// assume that batch_size < 16 for now + +torch::Tensor awq_gemm( + torch::Tensor _in_feats, + torch::Tensor _kernel, + torch::Tensor _scaling_factors, + torch::Tensor _zeros, + int split_k_iters) +{ + int num_in_feats = _in_feats.size(0); + int num_in_channels = _in_feats.size(1); + const at::musa::OptionalMUSAGuard device_guard(device_of(_in_feats)); + + auto options = torch::TensorOptions().dtype(_in_feats.dtype()).device(_in_feats.device()); + at::Tensor _out_feats = torch::empty({split_k_iters, num_in_feats, _kernel.size(1) * 8}, options); + int num_out_feats = _out_feats.size(-2); + int num_out_channels = _out_feats.size(-1); + + auto in_feats = reinterpret_cast(_in_feats.data_ptr()); + auto kernel = reinterpret_cast(_kernel.data_ptr()); + auto out_feats = reinterpret_cast(_out_feats.data_ptr()); + auto scaling_factors = reinterpret_cast(_scaling_factors.data_ptr()); + auto zeros = reinterpret_cast(_zeros.data_ptr()); + int group_size = num_in_channels / _scaling_factors.size(0); + + if (num_out_channels % 64 != 0) + throw std::invalid_argument("OC is not multiple of cta_N = 64"); + if (num_out_channels % 8 != 0) + throw std::invalid_argument("OC is not multiple of pack_num = 8"); + if (group_size % 32 != 0) + throw std::invalid_argument("Group size should be a multiple of 32"); + if (num_out_channels % group_size != 0) + throw std::invalid_argument("OC is not multiple of Group size"); + + const musaStream_t stream = at::musa::getCurrentMUSAStream(); + if (num_out_channels % 128 == 0) + { + int j_factors1 = num_out_channels / 128 / 1; + dim3 num_blocks((num_out_feats + 16 - 1) / 16 * j_factors1 * split_k_iters); + // threadIdx.x: 32 + // threadIdx.y: i_factors[2] * j_factors[2] + dim3 threads_per_block(32, 2); + vllm::awq::gemm_forward_4bit_cuda_m16nXk32<128><<>>( + group_size, split_k_iters, in_feats, kernel, scaling_factors, zeros, num_in_feats, num_in_channels, + num_out_channels, out_feats); + } + else if (num_out_channels % 64 == 0) + { + int j_factors1 = num_out_channels / 64 / 1; + dim3 num_blocks(1 * (num_out_feats + 16 - 1) / 16 * j_factors1 * split_k_iters); + + // threadIdx.x: 32 + // threadIdx.y: i_factors[2] * j_factors[2] + dim3 threads_per_block(32, 2); + vllm::awq::gemm_forward_4bit_cuda_m16nXk32<64><<>>( + group_size, split_k_iters, in_feats, kernel, scaling_factors, zeros, num_in_feats, num_in_channels, + num_out_channels, out_feats); + } + return _out_feats.sum(0); +} diff --git a/csrc_musa/quantization/fp8/amd_detail/hip_float8.h b/csrc_musa/quantization/fp8/amd_detail/hip_float8.h new file mode 100644 index 0000000..87c7c9c --- /dev/null +++ b/csrc_musa/quantization/fp8/amd_detail/hip_float8.h @@ -0,0 +1,167 @@ +#pragma once + +#ifdef __HIPCC__ +#include +#else +#include +#include +#include +#include +#endif + +#include "hip_float8_impl.h" + +struct alignas(1) hip_fp8 +{ + struct from_bits_t + { + }; + HIP_FP8_HOST_DEVICE static constexpr from_bits_t from_bits() { return from_bits_t(); } + uint8_t data; + + hip_fp8() = default; + HIP_FP8_HOST_DEVICE constexpr hip_fp8(const hip_fp8&) = default; + HIP_FP8_HOST_DEVICE constexpr hip_fp8(uint8_t v) = delete; + explicit HIP_FP8_HOST_DEVICE constexpr hip_fp8(uint8_t v, from_bits_t) + : data(v) + { + } + +#ifdef __HIP__MI300__ + // NOTE: ON-DEVICE... always optimal bias + explicit HIP_FP8_DEVICE hip_fp8(float v) + : data(hip_fp8_impl::to_fp8_from_fp32(v)) + { + } + + explicit HIP_FP8_DEVICE hip_fp8(_Float16 v) + : hip_fp8(static_cast(v)) + { + } + + // Host only implementation using s/w simulation + explicit HIP_FP8_HOST +#else // __HIP__MI300__ + // both Host and DEVICE for non-MI300 using s/w simulation + explicit HIP_FP8_HOST_DEVICE +#endif // __HIP__MI300__ + hip_fp8(float v) + { + data = hip_fp8_impl::to_float8<4, 3, float, true /*negative_zero_nan*/, true /*clip*/>(v); + } + + explicit HIP_FP8_HOST_DEVICE hip_fp8(double v) + : hip_fp8(static_cast(v)) + { + } + +#ifdef __HIP__MI300__ + // upcast using device specific intrinsic + explicit inline HIP_FP8_DEVICE operator float() const + { + float fval; + uint32_t i32val = static_cast(data); + + // upcast + asm volatile("v_cvt_f32_fp8 %0, %1 src0_sel:BYTE_0" : "=v"(fval) : "v"(i32val)); + + return fval; + } + + explicit inline HIP_FP8_HOST operator float() const +#else // __HIP__MI300__ + explicit inline HIP_FP8_HOST_DEVICE operator float() const +#endif // __HIP__MI300__ + { + return hip_fp8_impl::from_float8<4, 3, float, true /*negative_zero_nan*/>(data); + } +}; + +namespace std +{ +inline hip_fp8 sin(hip_fp8 a) +{ + return hip_fp8(sinf(float(a))); +} +inline hip_fp8 cos(hip_fp8 a) +{ + return hip_fp8(cosf(float(a))); +} +HIP_FP8_HOST_DEVICE constexpr hip_fp8 real(const hip_fp8& a) +{ + return a; +} +} // namespace std + +// Special operator overloading +inline std::ostream& operator<<(std::ostream& os, const hip_fp8& f8) +{ + return os << float(f8); +} + +// all + operator overloading with mixed types +// mixed types, always converts to f32, does computation in f32, and returns float +inline HIP_FP8_HOST_DEVICE float operator+(const float fa, hip_fp8 b) +{ + return (fa + float(b)); +} + +inline HIP_FP8_HOST_DEVICE float operator+(hip_fp8 a, const float fb) +{ + return (float(a) + fb); +} + +inline HIP_FP8_HOST_DEVICE hip_fp8 operator+(hip_fp8 a, hip_fp8 b) +{ + return hip_fp8(float(a) + float(b)); +} + +inline HIP_FP8_HOST_DEVICE hip_fp8& operator+=(hip_fp8& a, hip_fp8 b) +{ + return a = hip_fp8(float(a) + float(b)); +} + +// overloading multiplication, always returns float, +inline HIP_FP8_HOST_DEVICE float operator*(hip_fp8 a, hip_fp8 b) +{ + return float(a) * float(b); +} + +inline HIP_FP8_HOST_DEVICE float operator*(float a, hip_fp8 b) +{ + return (a * float(b)); +} + +inline HIP_FP8_HOST_DEVICE float operator*(hip_fp8 a, float b) +{ + return (float(a) * b); +} + +inline HIP_FP8_HOST_DEVICE float operator*(int32_t a, hip_fp8 b) +{ + return ((float)a * float(b)); +} + +inline HIP_FP8_HOST_DEVICE float operator*(double a, hip_fp8 b) +{ + return ((float)a * float(b)); +} + +// overloading for compare +inline HIP_FP8_HOST_DEVICE bool operator==(hip_fp8 a, hip_fp8 b) +{ + return (a.data == b.data); +} +inline HIP_FP8_HOST_DEVICE bool operator!=(hip_fp8 a, hip_fp8 b) +{ + return (a.data != b.data); +} + +inline HIP_FP8_HOST_DEVICE bool operator>=(hip_fp8 a, hip_fp8 b) +{ + return static_cast(a) >= static_cast(b); +} +inline HIP_FP8_HOST_DEVICE bool operator>(hip_fp8 a, hip_fp8 b) +{ + return static_cast(a) > static_cast(b); +} diff --git a/csrc_musa/quantization/fp8/amd_detail/hip_float8_impl.h b/csrc_musa/quantization/fp8/amd_detail/hip_float8_impl.h new file mode 100644 index 0000000..0e8c924 --- /dev/null +++ b/csrc_musa/quantization/fp8/amd_detail/hip_float8_impl.h @@ -0,0 +1,316 @@ +#pragma once + +#if defined(__HIPCC__) && (defined(__gfx940__) || defined(__gfx941__) || defined(__gfx942__)) +#define __HIP__MI300__ +#endif + +#ifdef __HIPCC__ +#define HIP_FP8_HOST_DEVICE __host__ __device__ +#define HIP_FP8_HOST __host__ +#define HIP_FP8_DEVICE __device__ +#else +#define HIP_FP8_HOST_DEVICE +#define HIP_FP8_HOST +#define HIP_FP8_DEVICE +#endif + +namespace hip_fp8_impl +{ + +#ifdef __HIP__MI300__ +HIP_FP8_DEVICE uint8_t to_fp8_from_fp32(float v) +{ + uint8_t i8data; + union { + float fval; + uint32_t i32val; + uint8_t i8val[4]; // NOTE: not endian independent + } val; + + uint32_t ival = 0; + val.fval = v; + + if ((val.i32val & 0x7F800000) != 0x7F800000) { /// propagate NAN/INF, no clipping + val.fval = __builtin_amdgcn_fmed3f(val.fval, 240.0, -240.0); + } + + ival = __builtin_amdgcn_cvt_pk_fp8_f32(val.fval, val.fval, ival, + false); // false -> WORD0 + val.i32val = ival; + i8data = val.i8val[0]; + + return i8data; +} +#endif // __HIP__MI300__ + +HIP_FP8_HOST inline int clz(uint32_t x) +{ + return __builtin_clz(x); +} +#if defined(__HIPCC__) || defined(__MUSA_ARCH__) +HIP_FP8_DEVICE inline int clz(uint32_t x) +{ + return __clz(x); +} +#endif + +template +HIP_FP8_HOST_DEVICE uint8_t to_float8(T _x, bool stoch = false, uint32_t rng = 0) +{ +#ifdef __HIPCC__ + constexpr bool is_half = std::is_same::value; +#else + constexpr bool is_half = false; +#endif + constexpr bool is_float = std::is_same::value; + static_assert(wm + we == 7, "wm+we==7"); + static_assert(is_half || is_float, "Only half and float can be cast to f8"); + + const int mfmt = (sizeof(T) == 4) ? 23 : 10; + uint32_t x; + if (sizeof(T) == 4) { + x = reinterpret_cast(_x); + } else { + x = reinterpret_cast(_x); + } + + uint32_t head, mantissa; + int exponent, bias; + uint32_t sign; + + if (sizeof(T) == 4) { + head = x & 0xFF800000; + mantissa = x & 0x7FFFFF; + exponent = (head >> 23) & 0xFF; + sign = head >> 31; + bias = 127; + } else { + head = x & 0xFC00; + mantissa = x & 0x3FF; + exponent = (head >> 10) & 0x1F; + sign = head >> 15; + bias = 15; + } + + uint32_t signed_inf = (sign << 7) + (((1 << we) - 1) << wm); + + // Deal with inf and NaNs + if (negative_zero_nan) { + if (sizeof(T) == 4) { + if ((x & 0x7F800000) == 0x7F800000) { + return 0x80; + } + } else { + // if(__hisinf(x) || __hisnan(x)) + if ((x & 0x7C00) == 0x7C00) { + return 0x80; + } + } + } else { + if (sizeof(T) == 4) { + if ((x & 0x7F800000) == 0x7F800000) { + return signed_inf + (mantissa != 0 ? 1 : 0); + } + } else { + if ((x & 0x7C00) == 0x7C00) { + return signed_inf + (mantissa != 0 ? 1 : 0); + } + } + } + if (x == 0) { + return 0; + } + + // First need to check if it is normal or denorm as there is a difference of + // implicit 1 Then need to adjust the exponent to align with the F8 exponent, + // in the meanwhile, shift The mantissa. Then for stochastic rounding, add rng + // to mantissa and truncate. And for RNE, no need to add rng. Then probably + // need to check whether there is carry and adjust exponent and mantissa again + + // For IEEE bias mode, the bias is 2^(k-1) -1 where k is the width of exponent + // bits + const int f8_bias = (1 << (we - 1)) - 1 + (negative_zero_nan ? 1 : 0); + const int f8_denormal_act_exponent = 1 - f8_bias; // actual exponent of f8 denormal + // act_exponent is the actual exponent of fp32/fp16 (after subtracting bias) + // f8_exponent is the converted f8 exponent with bias encoding + // exponent_diff is the diff between fp32/fp16 exponent and f8 exponent, + // the difference needs to be adjusted and mantissa shifted + int act_exponent, f8_exponent, exponent_diff; + + if (exponent == 0) { // fp32/fp16 is in denormal. + /* fp32 denormal is below 2^-127 so it is usually not a concern here, we +mostly concern fp16 here. In this case, f8 is usually in denormal. But there +could be exceptions. fp16 denormal has exponent bias 15 while bf8 with NANOO has +exponent bias 16. It means that there are some numbers in fp16 denormal but they +are bf8 (NANOO) normals - smallest bf8 (NANOO) normal is 2^-15. fp16 numbers +where exponent==0 (actual exponent -14) and highest bit of mantissa is 1 are bf8 +(NANOO) normal. In this case, the fp16 mantissa should be shift left by 1 */ + act_exponent = exponent - bias + 1; + exponent_diff = f8_denormal_act_exponent - act_exponent; // actual exponent is exponent-bias+1 as it is denormal + } else { // fp32/fp16 is normal with implicit 1 + act_exponent = exponent - bias; + if (act_exponent <= f8_denormal_act_exponent) { + /* This is the case where fp32/fp16 is normal but it is in f8 denormal + range. For example fp8 nanoo mode, denormal exponent is -7, but if the + fp32/fp16 actual exponent is -7, it is actually larger due to the implicit 1, + Therefore it needs to be adjust to -6 and mantissa shift right by 1. + So for fp32/fp16, exponent -8 is the cut point to convert to fp8 nanoo */ + exponent_diff = f8_denormal_act_exponent - act_exponent; + } else { // both fp32/fp16 and f8 are in normal range + exponent_diff = 0; // exponent_diff=0 does not mean there is no difference + // for this case, + // act_exponent could be larger. Just that it does not need shift mantissa + } + mantissa += (1 << mfmt); // Add the implicit 1 into mantissa + } + + bool midpoint = (mantissa & ((1 << (mfmt - wm + exponent_diff)) - 1)) == + static_cast(1 << (mfmt - wm + exponent_diff - 1)); + /* This part is a bit tricky. The judgment of whether it is a tie needs to be + done before we shift right as shift right could rip off some residual part + and make something not midpoint look like midpoint. For example, the fp16 + number 0x1002 (0 00100 0000000010), it is larger than midpoint, but after + shift right by 4 bits, it would look like midpoint. +*/ + + if (exponent_diff > 0) { + mantissa >>= exponent_diff; + } else if (exponent_diff == -1) { + mantissa <<= -exponent_diff; + } + bool implicit_one = mantissa & (1 << mfmt); + // if there is no implicit 1, it means the f8 is denormal and need to adjust + // to denorm exponent + f8_exponent = (act_exponent + exponent_diff) /*actual f8 exponent*/ + f8_bias - (implicit_one ? 0 : 1); + + // Now we have the exponent and mantissa adjusted + uint32_t drop_mask = (1 << (mfmt - wm)) - 1; + bool odd = mantissa & (1 << (mfmt - wm)); // if the least significant bit that + // is not truncated is 1 + mantissa += (stoch ? rng : (midpoint ? (odd ? mantissa : mantissa - 1) : mantissa)) & drop_mask; + + // Now we deal with overflow + if (f8_exponent == 0) { + if ((1 << mfmt) & mantissa) { + f8_exponent = 1; // denormal overflow to become normal, promote exponent + } + } else { + if ((1 << (mfmt + 1)) & mantissa) { + mantissa >>= 1; + f8_exponent++; + } + } + + mantissa >>= (mfmt - wm); + + // above range: quantize to maximum possible float of the same sign + const int max_exp = (1 << we) - (negative_zero_nan ? 1 : 2); + if (f8_exponent > max_exp) { + if (clip) { + mantissa = (1 << wm) - 1; + f8_exponent = max_exp; + } else { + return signed_inf; + } + } + + if (f8_exponent == 0 && mantissa == 0) { + return negative_zero_nan ? 0 : (sign << 7); + } + mantissa &= (1 << wm) - 1; + return (sign << 7) | (f8_exponent << wm) | mantissa; +} + +template +inline HIP_FP8_HOST_DEVICE T from_float8(uint8_t x) +{ +#ifdef __HIPCC__ + constexpr bool is_half = std::is_same::value; +#else + constexpr bool is_half = false; +#endif + constexpr bool is_float = std::is_same::value; + static_assert(is_half || is_float, "only half and float are supported"); + + constexpr int weo = is_half ? 5 : 8; + constexpr int wmo = is_half ? 10 : (is_float ? 23 : 7); + + T fInf, fNegInf, fNaN, fNeg0; + +#ifdef __HIPCC__ + if (is_half) { + const uint16_t ihInf = 0x7C00; + const uint16_t ihNegInf = 0xFC00; + const uint16_t ihNaN = 0x7C01; + const uint16_t ihNeg0 = 0x8000; + fInf = reinterpret_cast(ihInf); + fNegInf = reinterpret_cast(ihNegInf); + fNaN = reinterpret_cast(ihNaN); + fNeg0 = reinterpret_cast(ihNeg0); + } else +#endif + if (is_float) { + const uint32_t ifInf = 0x7F800000; + const uint32_t ifNegInf = 0xFF800000; + const uint32_t ifNaN = 0x7F800001; + const uint32_t ifNeg0 = 0x80000000; + fInf = reinterpret_cast(ifInf); + fNegInf = reinterpret_cast(ifNegInf); + fNaN = reinterpret_cast(ifNaN); + fNeg0 = reinterpret_cast(ifNeg0); + } + + if (x == 0) { + return 0; + } + + uint32_t sign = x >> 7; + uint32_t mantissa = x & ((1 << wm) - 1); + int exponent = (x & 0x7F) >> wm; + if (negative_zero_nan) { + if (x == 0x80) { + return fNaN; + } + } else { + if (x == 0x80) { + return fNeg0; + } + if (exponent == ((1 << we) - 1)) { + return (mantissa == 0) ? (sign ? fNegInf : fInf) : fNaN; + } + } + typename std::conditional::type retval; + if (we == 5 && is_half && !negative_zero_nan) { + retval = x << 8; + return reinterpret_cast(retval); + } + + const int exp_low_cutoff = (1 << (weo - 1)) - (1 << (we - 1)) + 1 - (negative_zero_nan ? 1 : 0); + + // subnormal input + if (exponent == 0) { + // guaranteed mantissa!=0 since cases 0x0 and 0x80 are handled above + int sh = 1 + clz(mantissa) - (32 - wm); + mantissa <<= sh; + exponent += 1 - sh; + mantissa &= ((1 << wm) - 1); + } + exponent += exp_low_cutoff - 1; + mantissa <<= wmo - wm; + + // subnormal output (occurs when T=half, we=5, negative_zero_nan=true) + if (exponent <= 0) { + mantissa |= 1 << wmo; + mantissa >>= 1 - exponent; + exponent = 0; + } + + if (sizeof(T) == 2) { + retval = (sign << 15) | (exponent << 10) | mantissa; + } else { + retval = (sign << 31) | (exponent << 23) | mantissa; + } + return reinterpret_cast(retval); +} + +} // namespace hip_fp8_impl diff --git a/csrc_musa/quantization/fp8/amd_detail/quant_utils.muh b/csrc_musa/quantization/fp8/amd_detail/quant_utils.muh new file mode 100644 index 0000000..1701e89 --- /dev/null +++ b/csrc_musa/quantization/fp8/amd_detail/quant_utils.muh @@ -0,0 +1,517 @@ +#pragma once +#include "hip_float8.h" + +#include +#include +#include + +#include "../../../attention/dtype_float32.cuh" +#include "../../../attention/dtype_bfloat16.cuh" + +namespace vllm +{ +namespace fp8_e4m3 { +template +__inline__ __device__ Tout vec_conversion(const Tin& x) +{ + return x; +} + +template +__inline__ __device__ Tout scaled_vec_conversion(const Tin& x, const float scale) +{ + return x; +} + +// fp8 -> half +template <> +__inline__ __device__ uint16_t vec_conversion(const uint8_t& a) +{ + hip_fp8 f8{a, hip_fp8::from_bits()}; + __half_raw res; + res.data = static_cast(f8); + return res.x; +} + +// fp8x2 -> half2 +template <> +__inline__ __device__ uint32_t vec_conversion(const uint16_t& a) +{ +#if defined(__HIP__MI300__) && defined(__HIP_FP8_EXPERIMENTAL_BULK_CONVERT__) + const auto& f2 = __builtin_amdgcn_cvt_pk_f32_fp8(a, 0); + union { + __half2_raw h2r; + uint32_t ui32; + } tmp; + tmp.h2r.x.data = f2[0]; + tmp.h2r.y.data = f2[1]; + return tmp.ui32; +#else + union { + uint16_t u16[2]; + uint32_t u32; + } tmp; + + tmp.u16[0] = vec_conversion(static_cast(a)); + tmp.u16[1] = vec_conversion(static_cast(a >> 8U)); + return tmp.u32; +#endif +} + +// fp8x4 -> half2x2 +template <> +__inline__ __device__ uint2 vec_conversion(const uint32_t& a) +{ + union { + uint2 u32x2; + uint32_t u32[2]; + } tmp; + tmp.u32[0] = vec_conversion((uint16_t)a); + tmp.u32[1] = vec_conversion((uint16_t)(a >> 16U)); + return tmp.u32x2; +} + +// fp8x8 -> half2x4 +template <> +__inline__ __device__ uint4 vec_conversion(const uint2& a) +{ + union { + uint4 u64x2; + uint2 u64[2]; + } tmp; + tmp.u64[0] = vec_conversion(a.x); + tmp.u64[1] = vec_conversion(a.y); + return tmp.u64x2; +} + +using __mt_bfloat16 = __hip_bfloat16; + +// fp8 -> __nv_bfloat16 +template <> +__inline__ __device__ __mt_bfloat16 vec_conversion<__mt_bfloat16, uint8_t>(const uint8_t& a) +{ + hip_fp8 f8{a, hip_fp8::from_bits()}; + float f{f8}; + return __float2bfloat16(f); +} + +using __mt_bfloat162 = __hip_bfloat162; + +// fp8x2 -> __nv_bfloat162 +template <> +__inline__ __device__ __mt_bfloat162 vec_conversion<__mt_bfloat162, uint16_t>(const uint16_t& a) +{ + __mt_bfloat162 res; + res.x = vec_conversion<__mt_bfloat16, uint8_t>((uint8_t)a); + res.y = vec_conversion<__mt_bfloat16, uint8_t>((uint8_t)(a >> 8U)); + return res; +} + +// fp8x4 -> bf16_4_t +template <> +__inline__ __device__ bf16_4_t vec_conversion(const uint32_t& a) +{ + bf16_4_t res; + res.x = vec_conversion<__mt_bfloat162, uint16_t>((uint16_t)a); + res.y = vec_conversion<__mt_bfloat162, uint16_t>((uint16_t)(a >> 16U)); + return res; +} + +// fp8x8 -> bf16_8_t +template <> +__inline__ __device__ bf16_8_t vec_conversion(const uint2& a) +{ + bf16_4_t tmp1, tmp2; + tmp1 = vec_conversion(a.x); + tmp2 = vec_conversion(a.y); + bf16_8_t res; + res.x = tmp1.x; + res.y = tmp1.y; + res.z = tmp2.x; + res.w = tmp2.y; + return res; +} + +// fp8 -> float +template <> +__inline__ __device__ float vec_conversion(const uint8_t& a) +{ + hip_fp8 fp8{a, hip_fp8::from_bits()}; + return static_cast(fp8); +} + +// fp8x2 -> float2 +template <> +__inline__ __device__ float2 vec_conversion(const uint16_t& a) +{ +#if defined(__HIP__MI300__) && defined(__HIP_FP8_EXPERIMENTAL_BULK_CONVERT__) + float2 res; + const auto& f2 = __builtin_amdgcn_cvt_pk_f32_fp8(a, 0); + res.x = f2[0]; + res.y = f2[1]; + return res; +#else + float2 res; + res.x = vec_conversion(static_cast(a)); + res.y = vec_conversion(static_cast(a >> 8U)); + return res; +#endif +} + +// fp8x4 -> float4 +template <> +__inline__ __device__ Float4_ vec_conversion(const uint32_t& a) +{ + Float4_ res; + res.x = vec_conversion((uint16_t)a); + res.y = vec_conversion((uint16_t)(a >> 16U)); + return res; +} + +// fp8x8 -> float8 +template <> +__inline__ __device__ Float8_ vec_conversion(const uint2& a) +{ + Float4_ tmp1, tmp2; + tmp1 = vec_conversion(a.x); + tmp2 = vec_conversion(a.y); + Float8_ res; + res.x = tmp1.x; + res.y = tmp1.y; + res.z = tmp2.x; + res.w = tmp2.y; + return res; +} + +// half -> fp8 +template <> +__inline__ __device__ uint8_t vec_conversion(const uint16_t& a) +{ + __half_raw tmp; + tmp.x = a; + + hip_fp8 f8{static_cast(tmp.data)}; + return f8.data; +} + +// bf16 -> fp8 +template <> +__inline__ __device__ uint8_t vec_conversion(const __mt_bfloat16& a) +{ + hip_fp8 res{__bfloat162float(a)}; + return res.data; +} + +// float -> fp8 +template <> +__inline__ __device__ uint8_t vec_conversion(const float& a) +{ + hip_fp8 f8(a); + return f8.data; +} + +// fp8x4 -> float4 +template <> +__inline__ __device__ float4 vec_conversion(const uint32_t& a) +{ + Float4_ tmp = vec_conversion(a); + float4 res = make_float4(tmp.x.x, tmp.x.y, tmp.y.x, tmp.y.y); + return res; +} + +// float2 -> half2 +template <> +__inline__ __device__ uint32_t vec_conversion(const float2& a) +{ + union { + half2 float16; + uint32_t uint32; + }; + + float16 = __float22half2_rn(a); + return uint32; +} + +// Float4 -> half2x2 +template <> +__inline__ __device__ uint2 vec_conversion(const Float4_& a) +{ + uint2 b; + float2 val; + val.x = a.x.x; + val.y = a.x.y; + b.x = vec_conversion(val); + + val.x = a.y.x; + val.y = a.y.y; + b.y = vec_conversion(val); + return b; +} + +// Float4 -> float4 +template <> +__inline__ __device__ float4 vec_conversion(const Float4_& a) +{ + float4 b; + b.x = a.x.x; + b.y = a.x.y; + b.z = a.y.x; + b.w = a.y.y; + return b; +} + +// Float8 -> half2x4 +template <> +__inline__ __device__ uint4 vec_conversion(const Float8_& a) +{ + uint4 b; + b.x = vec_conversion(a.x); + b.y = vec_conversion(a.y); + b.z = vec_conversion(a.z); + b.w = vec_conversion(a.w); + return b; +} + +// float2 -> bfloat162 +template <> +__inline__ __device__ __mt_bfloat162 vec_conversion<__mt_bfloat162, float2>(const float2& a) +{ + __mt_bfloat162 b = __float22bfloat162_rn(a); + return b; +} + +// Float4 -> bfloat162x2 +template <> +__inline__ __device__ bf16_4_t vec_conversion(const Float4_& a) +{ + bf16_4_t b; + b.x = __float22bfloat162_rn(a.x); + b.y = __float22bfloat162_rn(a.y); + return b; +} + +// Float8 -> bfloat162x4 +template <> +__inline__ __device__ bf16_8_t vec_conversion(const Float8_& a) +{ + bf16_8_t b; + b.x = __float22bfloat162_rn(a.x); + b.y = __float22bfloat162_rn(a.y); + b.z = __float22bfloat162_rn(a.z); + b.w = __float22bfloat162_rn(a.w); + return b; +} + + +/* Scaled and vectorized conversions, for data exchange between high and low precision domains + + Convention of the scale in API, e.g: FP8_data = Quantization( High_Precision_data / scale ) + s.t. + Quantize(HP / scale) => FP8 + Dequant(FP8) * scale => HP + + */ + +// fp8 -> half +template <> +__inline__ __device__ uint16_t scaled_vec_conversion(const uint8_t& a, const float scale) +{ + hip_fp8 f8{a, hip_fp8::from_bits()}; + __half_raw res; + res.data = static_cast(f8) * scale; + return res.x; +} + +// fp8x2 -> half2 +template <> +__inline__ __device__ uint32_t scaled_vec_conversion(const uint16_t& a, const float scale) +{ +#if defined(__HIP__MI300__) && defined(__HIP_FP8_EXPERIMENTAL_BULK_CONVERT__) + const auto& f2 = __builtin_amdgcn_cvt_pk_f32_fp8(a, 0); + union { + __half2_raw h2r; + uint32_t ui32; + } tmp; + tmp.h2r.x.data = f2[0] * scale; + tmp.h2r.y.data = f2[1] * scale; + return tmp.ui32; +#else + union { + uint16_t u16[2]; + uint32_t u32; + } tmp; + + tmp.u16[0] = scaled_vec_conversion(static_cast(a), scale); + tmp.u16[1] = scaled_vec_conversion(static_cast(a >> 8U), scale); + return tmp.u32; +#endif +} + +// fp8x4 -> half2x2 +template <> +__inline__ __device__ uint2 scaled_vec_conversion(const uint32_t& a, const float scale) +{ + union { + uint2 u32x2; + uint32_t u32[2]; + } tmp; + tmp.u32[0] = scaled_vec_conversion((uint16_t)a, scale); + tmp.u32[1] = scaled_vec_conversion((uint16_t)(a >> 16U), scale); + return tmp.u32x2; +} + +// fp8x8 -> half2x4 +template <> +__inline__ __device__ uint4 scaled_vec_conversion(const uint2& a, const float scale) +{ + union { + uint4 u64x2; + uint2 u64[2]; + } tmp; + tmp.u64[0] = scaled_vec_conversion(a.x, scale); + tmp.u64[1] = scaled_vec_conversion(a.y, scale); + return tmp.u64x2; +} + +using __mt_bfloat16 = __hip_bfloat16; + +// fp8 -> __nv_bfloat16 +template <> +__inline__ __device__ __mt_bfloat16 scaled_vec_conversion<__mt_bfloat16, uint8_t>(const uint8_t& a, const float scale) +{ + hip_fp8 f8{a, hip_fp8::from_bits()}; + float f{f8}; + return __float2bfloat16(f * scale); +} + +using __mt_bfloat162 = __hip_bfloat162; + +// fp8x2 -> __nv_bfloat162 +template <> +__inline__ __device__ __mt_bfloat162 scaled_vec_conversion<__mt_bfloat162, uint16_t>(const uint16_t& a, const float scale) +{ + __mt_bfloat162 res; + res.x = scaled_vec_conversion<__mt_bfloat16, uint8_t>((uint8_t)a, scale); + res.y = scaled_vec_conversion<__mt_bfloat16, uint8_t>((uint8_t)(a >> 8U), scale); + return res; +} + +// fp8x4 -> bf16_4_t +template <> +__inline__ __device__ bf16_4_t scaled_vec_conversion(const uint32_t& a, const float scale) +{ + bf16_4_t res; + res.x = scaled_vec_conversion<__mt_bfloat162, uint16_t>((uint16_t)a, scale); + res.y = scaled_vec_conversion<__mt_bfloat162, uint16_t>((uint16_t)(a >> 16U), scale); + return res; +} + +// fp8x8 -> bf16_8_t +template <> +__inline__ __device__ bf16_8_t scaled_vec_conversion(const uint2& a, const float scale) +{ + bf16_4_t tmp1, tmp2; + tmp1 = scaled_vec_conversion(a.x, scale); + tmp2 = scaled_vec_conversion(a.y, scale); + bf16_8_t res; + res.x = tmp1.x; + res.y = tmp1.y; + res.z = tmp2.x; + res.w = tmp2.y; + return res; +} + +// fp8 -> float +template <> +__inline__ __device__ float scaled_vec_conversion(const uint8_t& a, const float scale) +{ + hip_fp8 fp8{a, hip_fp8::from_bits()}; + return static_cast(fp8) * scale; +} + +// fp8x2 -> float2 +template <> +__inline__ __device__ float2 scaled_vec_conversion(const uint16_t& a, const float scale) +{ +#if defined(__HIP__MI300__) && defined(__HIP_FP8_EXPERIMENTAL_BULK_CONVERT__) + float2 res; + const auto& f2 = __builtin_amdgcn_cvt_pk_f32_fp8(a, 0); + res.x = f2[0] * scale; + res.y = f2[1] * scale; + return res; +#else + float2 res; + res.x = scaled_vec_conversion(static_cast(a), scale); + res.y = scaled_vec_conversion(static_cast(a >> 8U), scale); + return res; +#endif +} + +// fp8x4 -> float4 +template <> +__inline__ __device__ Float4_ scaled_vec_conversion(const uint32_t& a, const float scale) +{ + Float4_ res; + res.x = scaled_vec_conversion((uint16_t)a, scale); + res.y = scaled_vec_conversion((uint16_t)(a >> 16U), scale); + return res; +} + +// fp8x8 -> float8 +template <> +__inline__ __device__ Float8_ scaled_vec_conversion(const uint2& a, const float scale) +{ + Float4_ tmp1, tmp2; + tmp1 = scaled_vec_conversion(a.x, scale); + tmp2 = scaled_vec_conversion(a.y, scale); + Float8_ res; + res.x = tmp1.x; + res.y = tmp1.y; + res.z = tmp2.x; + res.w = tmp2.y; + return res; +} + + +/* Quantize(HP / scale) => FP8 */ + +// TODO(Hai): vectorized to add + +// half -> fp8 +template <> +__inline__ __device__ uint8_t scaled_vec_conversion(const uint16_t& a, const float scale) +{ + __half_raw tmp; + tmp.x = a; + + hip_fp8 f8{static_cast(tmp.data)/scale}; + return f8.data; +} + +// bf16 -> fp8 +template <> +__inline__ __device__ uint8_t scaled_vec_conversion(const __mt_bfloat16& a, const float scale) +{ + hip_fp8 res{__bfloat162float(a)/scale}; + return res.data; +} + +// float -> fp8 +template <> +__inline__ __device__ uint8_t scaled_vec_conversion(const float& a, const float scale) +{ + hip_fp8 f8(a/scale); + return f8.data; +} + +// fp8x4 -> float4 +template <> +__inline__ __device__ float4 scaled_vec_conversion(const uint32_t& a, const float scale) +{ + Float4_ tmp = scaled_vec_conversion(a, scale); + float4 res = make_float4(tmp.x.x, tmp.x.y, tmp.y.x, tmp.y.y); + return res; +} + +} +} // namespace vllm diff --git a/csrc_musa/quantization/fp8/fp8_cuda_kernels.mu b/csrc_musa/quantization/fp8/fp8_cuda_kernels.mu new file mode 100644 index 0000000..c036642 --- /dev/null +++ b/csrc_musa/quantization/fp8/fp8_cuda_kernels.mu @@ -0,0 +1,126 @@ +#include "torch_musa/csrc/aten/musa/MUSAContext.h" +#include +#include "torch_musa/csrc/core/MUSAGuard.h" + +#include + +#include "musa_compat.h" +#include "dispatch_utils.h" + +namespace vllm { + +__device__ __forceinline__ float atomicMaxFloat(float* addr, float value) { + float old; + old = (value >= 0) ? __int_as_float(atomicMax((int*)addr, __float_as_int(value))) : + __uint_as_float(atomicMin((unsigned int*)addr, __float_as_uint(value))); + + return old; +} + +// Compute the absolute maximum m of the input tensor and store +// m / float8_e4m3::max() in *scale. Each thread block performs a +// reduction tree and the memory in scale is atomically updated. +// So to get the right answer, *scale needs to be initialized to +// a value <= 0.0 and we need to wait for all thread blocks to +// finish before consuming *scale. +template +__global__ void segmented_max_reduction( + float* __restrict__ scale, + const scalar_t* __restrict__ input, + int64_t num_elems) { + __shared__ float cache[1024]; + int i = blockDim.x * blockIdx.x + threadIdx.x; + + // First store maximum for all values processes by + // the current thread in cache[threadIdx.x] + scalar_t tmp = 0.0; + while (i < num_elems) { + float x = static_cast(input[i]); + tmp = max(tmp, fabs(x)); + i += blockDim.x * gridDim.x; + } + cache[threadIdx.x] = tmp; + + __syncthreads(); + + // Now perform parallel reduction within the thread block + int ib = blockDim.x / 2; + while (ib != 0) { + if (threadIdx.x < ib && cache[threadIdx.x + ib] > cache[threadIdx.x]) { + cache[threadIdx.x] = cache[threadIdx.x + ib]; + } + __syncthreads(); + ib /= 2; + } + // Finally, since cache[0] contains the maximum for this thread block, + // atomically write the max to the target location + if (threadIdx.x == 0) { + atomicMaxFloat(scale, cache[0] / std::numeric_limits::max()); + } +} + +template +__global__ void scaled_fp8_quant_kernel( + c10::Float8_e4m3fn* __restrict__ out, + const scalar_t* __restrict__ input, + const float* __restrict__ scale, + int64_t num_elems) { + int i = blockDim.x * blockIdx.x + threadIdx.x; + while (i < num_elems) { + out[i] = static_cast(input[i] / *scale); + i += blockDim.x * gridDim.x; + } +} + +} // namespace vllm + +void static_scaled_fp8_quant( + torch::Tensor& out, // [..., d] + torch::Tensor& input, // [..., d] + torch::Tensor& scale) // [1] +{ + int64_t num_tokens = input.numel() / input.size(-1); + int64_t num_elems = input.numel(); + dim3 grid(num_tokens); + dim3 block(1024); + const at::musa::OptionalMUSAGuard device_guard(device_of(input)); + const musaStream_t stream = at::musa::getCurrentMUSAStream(); + VLLM_DISPATCH_FLOATING_TYPES( + input.scalar_type(), + "scaled_fp8_quant_kernel", + [&] { + vllm::scaled_fp8_quant_kernel<<>>( + out.data_ptr(), + input.data_ptr(), + scale.data_ptr(), + num_elems); + }); +} + +void dynamic_scaled_fp8_quant( + torch::Tensor& out, // [..., d] + torch::Tensor& input, // [..., d] + torch::Tensor& scale) // [1] +{ + int64_t num_tokens = input.numel() / input.size(-1); + int64_t num_elems = input.numel(); + dim3 grid(num_tokens); + dim3 block(1024); + const at::musa::OptionalMUSAGuard device_guard(device_of(input)); + const musaStream_t stream = at::musa::getCurrentMUSAStream(); + VLLM_DISPATCH_FLOATING_TYPES( + input.scalar_type(), + "scaled_fp8_quant_kernel", + [&] { + vllm::segmented_max_reduction<<>>( + scale.data_ptr(), + input.data_ptr(), + num_elems); + vllm::scaled_fp8_quant_kernel<<>>( + out.data_ptr(), + input.data_ptr(), + scale.data_ptr(), + num_elems); + }); +} + diff --git a/csrc_musa/quantization/fp8_e5m2_kvcache/quant_utils.muh b/csrc_musa/quantization/fp8_e5m2_kvcache/quant_utils.muh new file mode 100644 index 0000000..1a90faf --- /dev/null +++ b/csrc_musa/quantization/fp8_e5m2_kvcache/quant_utils.muh @@ -0,0 +1,277 @@ +#pragma once + +#include +#include +#include +#include +#include "../../attention/attention_dtypes.h" +#include "../../attention/dtype_float32.cuh" +#include "../../attention/dtype_float16.cuh" +#include "../../attention/dtype_bfloat16.cuh" + + +namespace vllm { +#ifdef ENABLE_FP8_E5M2 +namespace fp8_e5m2_unscaled { + +template +__inline__ __device__ Tout vec_conversion(const Tin& x) +{ + return x; +} + +// fp8 -> half +template<> +__inline__ __device__ uint16_t vec_conversion(const uint8_t& a) +{ + __half_raw res = __nv_cvt_fp8_to_halfraw(a, __NV_E5M2); + return res.x; +} + +// fp8x2 -> half2 +template<> +__inline__ __device__ uint32_t vec_conversion(const uint16_t& a) +{ + union { + uint16_t u16[2]; + uint32_t u32; + } tmp; + __half2_raw res = __nv_cvt_fp8x2_to_halfraw2(a, __NV_E5M2); + tmp.u16[0] = res.x; + tmp.u16[1] = res.y; + return tmp.u32; +} + +// fp8x4 -> half2x2 +template<> +__inline__ __device__ uint2 vec_conversion(const uint32_t& a) +{ + union { + uint2 u32x2; + uint32_t u32[2]; + } tmp; + tmp.u32[0] = vec_conversion((uint16_t)a); + tmp.u32[1] = vec_conversion((uint16_t)(a >> 16U)); + return tmp.u32x2; +} + +// fp8x8 -> half2x4 +template<> +__inline__ __device__ uint4 vec_conversion(const uint2& a) +{ + union { + uint4 u64x2; + uint2 u64[2]; + } tmp; + tmp.u64[0] = vec_conversion(a.x); + tmp.u64[1] = vec_conversion(a.y); + return tmp.u64x2; +} + +// fp8 -> __nv_bfloat16 +template<> +__inline__ __device__ __mt_bfloat16 vec_conversion<__mt_bfloat16, uint8_t>(const uint8_t& a) +{ + // Note there is no direct convert function from fp8 to bf16. + // fp8 -> half + __half_raw res = __nv_cvt_fp8_to_halfraw(a, __NV_E5M2); + // half -> float -> bf16 + float tmp = half_to_float(res.x); + return __float2bfloat16(tmp); +} + +// fp8x2 -> __nv_bfloat162 +template<> +__inline__ __device__ __mt_bfloat162 vec_conversion<__mt_bfloat162, uint16_t>(const uint16_t& a) +{ + __mt_bfloat162 res; + res.x = vec_conversion<__mt_bfloat16, uint8_t>((uint8_t)a); + res.y = vec_conversion<__mt_bfloat16, uint8_t>((uint8_t)(a >> 8U)); + return res; +} + +// fp8x4 -> bf16_4_t +template<> +__inline__ __device__ bf16_4_t vec_conversion(const uint32_t& a) +{ + bf16_4_t res; + res.x = vec_conversion<__mt_bfloat162, uint16_t>((uint16_t)a); + res.y = vec_conversion<__mt_bfloat162, uint16_t>((uint16_t)(a >> 16U)); + return res; +} + +// fp8x8 -> bf16_8_t +template<> +__inline__ __device__ bf16_8_t vec_conversion(const uint2& a) +{ + bf16_4_t tmp1, tmp2; + tmp1 = vec_conversion(a.x); + tmp2 = vec_conversion(a.y); + bf16_8_t res; + res.x = tmp1.x; + res.y = tmp1.y; + res.z = tmp2.x; + res.w = tmp2.y; + return res; +} + +// fp8 -> float +template<> +__inline__ __device__ float vec_conversion(const uint8_t& a) +{ + // fp8 -> half + uint16_t tmp = vec_conversion(a); + // half -> float + return half_to_float(tmp); +} + +// fp8x2 -> float2 +template<> +__inline__ __device__ float2 vec_conversion(const uint16_t& a) +{ + // fp8x2 -> half2 + uint32_t tmp = vec_conversion(a); + // half2 -> float2 + return half2_to_float2(tmp); +} + +// fp8x4 -> float4 +template<> +__inline__ __device__ Float4_ vec_conversion(const uint32_t& a) +{ + Float4_ res; + res.x = vec_conversion((uint16_t)a); + res.y = vec_conversion((uint16_t)(a >> 16U)); + return res; +} + +// fp8x8 -> float8 +template<> +__inline__ __device__ Float8_ vec_conversion(const uint2& a) +{ + Float4_ tmp1, tmp2; + tmp1 = vec_conversion(a.x); + tmp2 = vec_conversion(a.y); + Float8_ res; + res.x = tmp1.x; + res.y = tmp1.y; + res.z = tmp2.x; + res.w = tmp2.y; + return res; +} + + +// half -> fp8 +template<> +__inline__ __device__ uint8_t vec_conversion(const uint16_t& a) +{ + __half_raw tmp; + tmp.x = a; + __nv_fp8_storage_t res = __nv_cvt_halfraw_to_fp8(tmp, __NV_SATFINITE, __NV_E5M2); + return (uint8_t)res; +} + +// bf16 -> fp8 +template<> +__inline__ __device__ uint8_t vec_conversion(const __mt_bfloat16& a) +{ +#if defined(__MUSA_ARCH__) && __MUSA_ARCH__ < 800 + assert(false); +#else + __nv_fp8_storage_t res = __nv_cvt_bfloat16raw_to_fp8(__mt_bfloat16_raw(a), __NV_SATFINITE, __NV_E5M2); + return (uint8_t)res; +#endif +} + +// float -> fp8 +template<> +__inline__ __device__ uint8_t vec_conversion(const float& a) +{ + __nv_fp8_storage_t res = __nv_cvt_float_to_fp8(a, __NV_SATFINITE, __NV_E5M2); + return (uint8_t)res; +} + +// fp8x4 -> float4 +template<> +__inline__ __device__ float4 vec_conversion(const uint32_t& a) +{ + Float4_ tmp = vec_conversion(a); + float4 res = make_float4(tmp.x.x, tmp.x.y, tmp.y.x, tmp.y.y); + return res; +} + + +template<> +__inline__ __device__ uint32_t vec_conversion(const float2& a) +{ + union { + half2 float16; + uint32_t uint32; + }; + + float16 = __float22half2_rn(a); + return uint32; +} + +template<> +__inline__ __device__ uint2 vec_conversion(const Float4_& a) +{ + uint2 b; + float2 val; + val.x = a.x.x; + val.y = a.x.y; + b.x = vec_conversion(val); + + val.x = a.y.x; + val.y = a.y.y; + b.y = vec_conversion(val); + + return b; +} + +template<> +__inline__ __device__ float4 vec_conversion(const Float4_& a) +{ + float4 b; + b.x = a.x.x; + b.y = a.x.y; + b.z = a.y.x; + b.w = a.y.y; + return b; +} + +template<> +__inline__ __device__ uint4 vec_conversion(const Float8_& a) +{ + uint4 b; + b.x = vec_conversion(a.x); + b.y = vec_conversion(a.y); + b.z = vec_conversion(a.z); + b.w = vec_conversion(a.w); + return b; +} + +template<> +__inline__ __device__ __mt_bfloat162 vec_conversion<__mt_bfloat162, float2>(const float2 &a) { + __mt_bfloat162 b; + from_float(b, a); + return b; +} + +template<> +__inline__ __device__ bf16_4_t vec_conversion(const Float4_ &a) { + bf16_4_t b; + from_float(b, a); + return b; +} + +template<> +__inline__ __device__ bf16_8_t vec_conversion(const Float8_ &a) { + bf16_8_t b; + from_float(b, a); + return b; +} + +} // namespace fp8_e5m2_unscaled +#endif // ENABLE_FP8_E5M2 +} // namespace vllm diff --git a/csrc_musa/quantization/gptq/compat.muh b/csrc_musa/quantization/gptq/compat.muh new file mode 100644 index 0000000..99800ee --- /dev/null +++ b/csrc_musa/quantization/gptq/compat.muh @@ -0,0 +1,64 @@ +/* +Copied from https://github.com/turboderp/exllamav2 +*/ + +#ifndef _compat_cuh +#define _compat_cuh + +namespace vllm { +namespace gptq { +// atomicAdd for half types, to support CC < 7.x + +__device__ __forceinline__ void atomicAdd_half(half* address, half val) +{ + unsigned int * address_as_ui = (unsigned int *) ((char *)address - ((size_t)address & 2)); + unsigned int old = *address_as_ui; + unsigned int assumed; + + do + { + assumed = old; + __half_raw hsum; + hsum.x = (size_t)address & 2 ? (old >> 16) : (old & 0xffff); + half tmpres = __hadd(hsum, val); + hsum = __half_raw(tmpres); + old = (size_t)address & 2 ? (old & 0xffff) | (hsum.x << 16) : (old & 0xffff0000) | hsum.x; + old = atomicCAS(address_as_ui, assumed, old); + } + while (assumed != old); +} + +// atomicAdd for half2 types + +__device__ __forceinline__ void atomicAdd_half2(half2* address, half2 val) +{ + unsigned int* address_as_ui = (unsigned int*)address; + unsigned int old = *address_as_ui; + unsigned int assumed; + do + { + assumed = old; + half2 old_val = *((half2*)&old); + half2 new_val = __hadd2(old_val, val); + old = atomicCAS(address_as_ui, assumed, *((unsigned int*)&new_val)); + } + while (assumed != old); +} + +// + +#if defined(__MUSA_ARCH__) || defined(USE_ROCM) +#if __MUSA_ARCH__ < 700 || defined(USE_ROCM) + +__device__ __forceinline__ void atomicAdd(half* address, half val) { atomicAdd_half(address, val); } + +#if __MUSA_ARCH__ < 600 || defined(USE_ROCM) +__device__ __forceinline__ void atomicAdd(half2* address, half2 val) { atomicAdd_half2(address, val); } +#endif + +#endif +#endif + +} // namespace gptq +} // namespace vllm +#endif diff --git a/csrc_musa/quantization/gptq/matrix_view.muh b/csrc_musa/quantization/gptq/matrix_view.muh new file mode 100644 index 0000000..e92e9b6 --- /dev/null +++ b/csrc_musa/quantization/gptq/matrix_view.muh @@ -0,0 +1,274 @@ +/* +Adapted from https://github.com/turboderp/exllamav2 and https://github.com/turboderp/exllama +*/ + +#ifndef _matrix_view_cuh +#define _matrix_view_cuh + +#include +#include + +#include "qdq_util.cuh" + +namespace vllm { +namespace gptq { + +class MatrixView_half +{ +public: + const half* data; + const int height; + const int width; + + __device__ __forceinline__ MatrixView_half(const half* data, const int height, const int width) + : data(data), height(height), width(width) + { } + + __device__ __forceinline__ half item(int row, int column) const { return data[row * width + column]; } + __device__ __forceinline__ half2 item_half2(int row, int column) const { return ((half2*)data)[(row * width + column) / 2]; } + __device__ __forceinline__ half2 item_half2half2(int row, int column) const { return __half2half2(data[row * width + column]); } + __device__ __forceinline__ const half* item_ptr(int row, int column) const { return &data[row * width + column]; } + + __device__ __forceinline__ void item4(half (&items)[4], int row, int column) const + { + half2* ptr = (half2*) item_ptr(row, column); + half2 i01 = ptr[0]; + half2 i23 = ptr[1]; + items[0] = __low2half(i01); + items[1] = __high2half(i01); + items[2] = __low2half(i23); + items[3] = __high2half(i23); + } + __device__ __forceinline__ void item4_f(float (&items)[4], int row, int column) const + { + half2* ptr = (half2*)item_ptr(row, column); + half2 i01 = ptr[0]; + half2 i23 = ptr[1]; + items[0] = __half2float(__low2half(i01)); + items[1] = __half2float(__high2half(i01)); + items[2] = __half2float(__low2half(i23)); + items[3] = __half2float(__high2half(i23)); + } + + __device__ __forceinline__ void item4_h2(half2 (&items)[4], int row, int column) const + { + half2* ptr = (half2*)item_ptr(row, column); + half2 i01 = ptr[0]; + half2 i23 = ptr[1]; + items[0] = __half2half2(__low2half(i01)); + items[1] = __half2half2(__high2half(i01)); + items[2] = __half2half2(__low2half(i23)); + items[3] = __half2half2(__high2half(i23)); + } +}; + +class MatrixView_half_rw +{ +public: + half* data; + const int height; + const int width; + + __device__ __forceinline__ MatrixView_half_rw(half* data, const int height, const int width) + : data(data), height(height), width(width) + { } + + __device__ __forceinline__ half item(int row, int column) const { return data[row * width + column]; } + __device__ __forceinline__ half2 item_half2(int row, int column) const { return ((half2*)data)[(row * width + column) / 2]; } + __device__ __forceinline__ half* item_ptr(int row, int column) { return &data[row * width + column]; } + __device__ __forceinline__ void set(int row, int column, half value) { data[row * width + column] = value; } + __device__ __forceinline__ void set_half2(int row, int column, half2 value) { ((half2*)data)[(row * width + column) / 2] = value; } + + __device__ __forceinline__ void set4(int row, int column, half v0, half v1, half v2, half v3) + { + half2 v01 = __halves2half2(v0, v1); + half2 v23 = __halves2half2(v2, v3); + half2* ptr = (half2*) item_ptr(row, column); + ptr[0] = v01; + ptr[1] = v23; + } +}; + +class MatrixView_q4_row +{ +public: + const uint32_t* data; + const int height; + const int width; + + __device__ __forceinline__ MatrixView_q4_row(const uint32_t* data, const int height, const int width) + : data(data), height(height), width(width) + { } + + __device__ __forceinline__ int item(int row, int column) const + { + int shift = (column & 0x07) * 4; + return (data[row * width / 8 + column / 8] >> shift) & 0x0f; + } + + __device__ __forceinline__ void item2(int (&items)[2], int row, int column) const + { + int shift = (column & 0x07) * 4; + uint32_t d = data[row * width / 8 + column / 8] >> shift; + items[0] = d & 0x0f; + items[1] = (d >> 4) & 0x0f; + } + + __device__ __forceinline__ void item4(int (&items)[4], int row, int column) const + { + int shift = (column & 0x07) * 4; + uint32_t d = data[row * width / 8 + column / 8] >> shift; + items[0] = d & 0x0f; + items[1] = (d >> 4) & 0x0f; + items[2] = (d >> 8) & 0x0f; + items[3] = (d >> 12) & 0x0f; + } +}; + +class MatrixView_q4_column +{ +public: + const uint32_t* data; + const int height; + const int width; + + __device__ __forceinline__ MatrixView_q4_column(const uint32_t* data, const int height, const int width) + : data(data), height(height), width(width) + { } + + __device__ __forceinline__ int item(int row, int column) const + { + int shift = (row & 0x07) * 4; + return (data[row / 8 * width + column] >> shift) & 0x0f; + } + + __device__ __forceinline__ uint32_t item_uint32_t(int row, int column) { return data[row / 8 * width + column]; } + __device__ __forceinline__ const uint32_t* item_uint32_ptr(int row, int column) { return &data[row / 8 * width + column]; } +}; + +class MatrixView_q2_row +{ +public: + const uint32_t* data; + const int height; + const int width; + + __device__ __forceinline__ MatrixView_q2_row(const uint32_t* data, const int height, const int width) + : data(data), height(height), width(width) + { } + + __device__ __forceinline__ int item(int row, int column) const + { + int shift = (column & 0x0f) * 2; + return (data[row * width / 16 + column / 16] >> shift) & 0x03; + } + + __device__ __forceinline__ void item2(int (&items)[2], int row, int column) const + { + int shift = (column & 0x0f) * 2; + uint32_t d = data[row * width / 16 + column / 16] >> shift; + items[0] = d & 0x03; + items[1] = (d >> 2) & 0x03; + } + + __device__ __forceinline__ void item4(int (&items)[4], int row, int column) const + { + int shift = (column & 0x0f) * 2; + uint32_t d = data[row * width / 16 + column / 16] >> shift; + items[0] = d & 0x03; + items[1] = (d >> 2) & 0x03; + items[2] = (d >> 4) & 0x03; + items[3] = (d >> 6) & 0x03; + } +}; + +class MatrixView_q3_row +{ +public: + const uint32_t* data; + const int height; + const int width; + + __device__ __forceinline__ MatrixView_q3_row(const uint32_t* data, const int height, const int width) + : data(data), height(height), width(width) + { } + + __device__ __forceinline__ int item(int row, int column) const + { + int z_w = column * 3 / 32; + int z_mod = column & 0x1f; + + if (z_mod == 10) { + return (data[row * width * 3 / 32 + z_w] >> 30) | ((data[row * width * 3 / 32 + (z_w + 1)] << 2) & 0x4); + } else if (z_mod == 21) { + return (data[row * width * 3 / 32 + z_w] >> 31) | ((data[row * width * 3 / 32 + (z_w + 1)] << 1) & 0x6); + } else if (z_mod < 10) { + return (data[row * width * 3 / 32 + z_w] >> (z_mod * 3)) & 0x07; + } else if (z_mod < 21) { + return (data[row * width * 3 / 32 + z_w] >> (z_mod * 3 - 32)) & 0x07; + } else { + return (data[row * width * 3 / 32 + z_w] >> (z_mod * 3 - 64)) & 0x07; + } + } + + __device__ __forceinline__ void item4(int (&items)[4], int row, int column) const + { + int shift = (column & 0x1f); + uint32_t d; + if (shift <= 4) { + d = data[row * width / 32 * 3 + column * 3 / 32] >> (shift * 3); + } else if (shift == 8) { + d = (data[row * width / 32 * 3 + column * 3 / 32] >> 24) | ((data[row * width / 32 * 3 + column * 3 / 32 + 1] & 0x0f) << 8); + } else if (shift <= 16) { + d = data[row * width / 32 * 3 + column * 3 / 32] >> (shift * 3 - 32); + } else if (shift == 20) { + d = (data[row * width / 32 * 3 + column * 3 / 32] >> 28) | ((data[row * width / 32 * 3 + column * 3 / 32 + 1] & 0xff) << 4); + } else { + d = data[row * width / 32 * 3 + column * 3 / 32] >> (shift * 3 - 64); + } + items[0] = d & 0x07; + items[1] = (d >> 3) & 0x07; + items[2] = (d >> 6) & 0x07; + items[3] = (d >> 9) & 0x07; + } +}; + +class MatrixView_q8_row +{ +public: + const uint32_t* data; + const int height; + const int width; + + __device__ __forceinline__ MatrixView_q8_row(const uint32_t* data, const int height, const int width) + : data(data), height(height), width(width) + { } + + __device__ __forceinline__ int item(int row, int column) const + { + int shift = (column & 0x03) * 8; + return (data[row * width / 4 + column / 4] >> shift) & 0xff; + } + + __device__ __forceinline__ void item2(int (&items)[2], int row, int column) const + { + int shift = (column & 0x03) * 8; + uint32_t d = data[row * width / 4 + column / 4] >> shift; + items[0] = d & 0xff; + items[1] = (d >> 8) & 0xff; + } + + __device__ __forceinline__ void item4(int (&items)[4], int row, int column) const + { + int shift = (column & 0x03) * 2; + uint32_t d = data[row * width / 4 + column / 4] >> shift; + items[0] = d & 0xff; + items[1] = (d >> 8) & 0xff; + items[2] = (d >> 16) & 0xff; + items[3] = (d >> 24) & 0xff; + } +}; + +} // namespace gptq +} // namespace vllm +#endif diff --git a/csrc_musa/quantization/gptq/q_gemm.mu b/csrc_musa/quantization/gptq/q_gemm.mu new file mode 100644 index 0000000..e6eec6c --- /dev/null +++ b/csrc_musa/quantization/gptq/q_gemm.mu @@ -0,0 +1,2075 @@ +/* +Adapted from https://github.com/turboderp/exllamav2 and https://github.com/qwopqwop200/GPTQ-for-LLaMa +*/ + +#include +#include + +#include +#include "torch_musa/csrc/core/MUSAGuard.h" +#include "torch_musa/csrc/aten/musa/MUSAContext.h" +#include +#include + +#include "compat.cuh" +#include "matrix_view.cuh" +#include "qdq_2.cuh" +#include "qdq_3.cuh" +#include "qdq_4.cuh" +#include "qdq_8.cuh" + +namespace vllm { +namespace gptq { + +#define BLOCK_KN_SIZE 128 +#define BLOCK_M_SIZE_MAX 8 +#define MAX_GROUPS_IN_BLOCK (BLOCK_KN_SIZE / 32) +#define MAX_Q_GEMM_ROWS 50 +#define MAX_Q_GEMM_ROWS_8BIT 24 +#define MAX_ALT_GEMM_ROWS 8 +#define THREADS_X 32 +#define THREADS_Y 32 +#define DIVIDE(x, size) (((x) + (size) - 1) / (size)) + +#if defined(USE_ROCM) +#include +__host__ __forceinline__ hipblasStatus_t __compat_hipblasHgemm(hipblasHandle_t handle, + hipblasOperation_t transA, + hipblasOperation_t transB, + int m, + int n, + int k, + const half* alpha, + const half* AP, + int lda, + const half* BP, + int ldb, + const half* beta, + half* CP, + int ldc) { + return hipblasHgemm(handle, transA, transB, m, n, k, + reinterpret_cast(alpha), + reinterpret_cast(AP), lda, + reinterpret_cast(BP), ldb, + reinterpret_cast(beta), + reinterpret_cast(CP), ldc); +} +#define hipblasHgemm __compat_hipblasHgemm + +// Previous version of PyTorch were converting to rocBLAS instead of hipBLAS. +#define rocblas_operation_none HIPBLAS_OP_N +#define rocblas_hgemm __compat_hipblasHgemm +#endif + +__forceinline__ __device__ half2 dot22_8(half2(&dq)[4], const half* a_ptr, const half2 g_result) +{ + half2 result = {}; + const half2* a2_ptr = (const half2*)a_ptr; + #pragma unroll + for (int i = 0; i < 4; i++) result = __hfma2(dq[i], *a2_ptr++, result); + return __hadd2(result, g_result); +} + +__forceinline__ __device__ float dot22_8_f(half2(&dq)[4], const half* a_ptr) +{ + half2 result = {}; + const half2* a2_ptr = (const half2*)a_ptr; + #pragma unroll + for (int i = 0; i < 4; i++) result = __hfma2(dq[i], *a2_ptr++, result); + return __half2float(__low2half(result)) + __half2float(__high2half(result)); +} + +__forceinline__ __device__ half2 dot22_8(half2(&dq)[4], const half* a_ptr, const half2 g_result, const half qs_h) +{ + half2 result = {}; + const half2* a2_ptr = (const half2*)a_ptr; + #pragma unroll + for (int i = 0; i < 4; i++) result = __hfma2(dq[i], *a2_ptr++, result); + return __hfma2(result, __halves2half2(qs_h, qs_h), g_result); +} + +__forceinline__ __device__ half2 dot22_16(half2(&dq)[8], const half* a_ptr, const half2 g_result, const half qs_h) +{ + half2 result = {}; + const half2* a2_ptr = (const half2*)a_ptr; + #pragma unroll + for (int i = 0; i < 8; i++) result = __hfma2(dq[i], *a2_ptr++, result); + return __hfma2(result, __halves2half2(qs_h, qs_h), g_result); +} + +__forceinline__ __device__ half2 dot22_32(half2(&dq)[16], const half* a_ptr, const half2 g_result, const half qs_h) +{ + half2 result = {}; + const half2* a2_ptr = (const half2*)a_ptr; + #pragma unroll + for (int i = 0; i < 16; i += 1) result = __hfma2(dq[i], *a2_ptr++, result); + return __hfma2(result, __halves2half2(qs_h, qs_h), g_result); +} + +__forceinline__ __device__ float dot22_8_f(half2(&dq)[4], const half* a_ptr, const float g_result, const float qs_f) +{ + half2 result = {}; + const half2* a2_ptr = (const half2*)a_ptr; + #pragma unroll + for (int i = 0; i < 4; i++) result = __hfma2(dq[i], *a2_ptr++, result); + float result_f = __half2float(__low2half(result)) + __half2float(__high2half(result)); + return fma(result_f, qs_f, g_result); +} + +__forceinline__ __device__ float dot22_16_f(half2(&dq)[8], const half* a_ptr, const float g_result, const float qs_f) +{ + half2 result = {}; + const half2* a2_ptr = (const half2*)a_ptr; + #pragma unroll + for (int i = 0; i < 8; i++) result = __hfma2(dq[i], *a2_ptr++, result); + float result_f = __half2float(__low2half(result)) + __half2float(__high2half(result)); + return fma(result_f, qs_f, g_result); +} + +__forceinline__ __device__ float dot22_32_f(half2(&dq)[16], const half* a_ptr, const float g_result, const float qs_f) +{ + half2 result = {}; + const half2* a2_ptr = (const half2*)a_ptr; + #pragma unroll + for (int i = 0; i < 16; i += 1) result = __hfma2(dq[i], *a2_ptr++, result); + float result_f = __half2float(__low2half(result)) + __half2float(__high2half(result)); + return fma(result_f, qs_f, g_result); +} + +__forceinline__ __device__ half dot22_8_h(half2(&dq)[4], const half* a_ptr, const half g_result, const half qs_h) +{ + // Use FP32 accumulator to avoid potential overflow since unscaled weights are in the range -128..127 + + float result = {}; + #pragma unroll + for (int i = 0; i < 4; i++) + { + half2 w01 = dq[i]; + float w0 = __low2float(w01); + float w1 = __high2float(w01); + float x0 = __half2float(*a_ptr++); + float x1 = __half2float(*a_ptr++); + result = fma(w0, x0, result); + result = fma(w1, x1, result); + } + float qs = __half2float(qs_h); + result *= qs; + half result_h = __float2half_rn(result); + return __hadd(result_h, g_result); +} + +__forceinline__ __device__ half dot22_16_h(half2(&dq)[8], const half* a_ptr, const half g_result, const half qs_h) +{ + half2 result = {}; + const half2* a2_ptr = (const half2*)a_ptr; + #pragma unroll + for (int i = 0; i < 8; i++) result = __hfma2(dq[i], *a2_ptr++, result); + half result_h = __hadd(__low2half(result), __high2half(result)); + return __hfma(result_h, qs_h, g_result); +} + +__forceinline__ __device__ half dot22_32_h(half2(&dq)[16], const half* a_ptr, const half g_result, const half qs_h) +{ + half2 result = {}; + const half2* a2_ptr = (const half2*)a_ptr; + #pragma unroll + for (int i = 0; i < 16; i += 1) result = __hfma2(dq[i], *a2_ptr++, result); + half result_h = __hadd(__low2half(result), __high2half(result)); + return __hfma(result_h, qs_h, g_result); +} + + +typedef void (*fp_gemm_half_q_half_gptq_kernel) +( + const half*, + const uint32_t*, + const uint32_t*, + const half*, + half*, + const int, + const int, + const int, + const int, + const int* +); + + +template +__global__ void gemm_half_q_half_gptq_4bit_kernel +( + const half* __restrict__ a, + const uint32_t* __restrict__ b_q_weight, + const uint32_t* __restrict__ b_gptq_qzeros, + const half* __restrict__ b_gptq_scales, + half* __restrict__ c, + const int size_m, + const int size_n, + const int size_k, + const int groups, + const int* __restrict__ b_q_perm +) +{ + MatrixView_half a_(a, size_m, size_k); + MatrixView_half_rw c_(c, size_m, size_n); + MatrixView_q4_row b_gptq_qzeros_(b_gptq_qzeros, groups, size_n); + MatrixView_half b_gptq_scales_(b_gptq_scales, groups, size_n); + + int t = threadIdx.x; + + // Block + int offset_n = blockIdx.x * BLOCK_KN_SIZE * 4; + int offset_m = blockIdx.y * m_count; + int offset_k = blockIdx.z * BLOCK_KN_SIZE; + + int end_n = min(offset_n + BLOCK_KN_SIZE * 4, size_n); + int end_m = min(offset_m + m_count, size_m); + int end_k = min(offset_k + BLOCK_KN_SIZE, size_k); + + int n = offset_n + t * 4; + + // Preload block_a + __shared__ half block_a[m_count][BLOCK_KN_SIZE]; + + if (offset_k + t < end_k) + { + for (int m = 0; m < m_count; ++m) + { + const half* a_ptr = a_.item_ptr(offset_m + m, 0); + half* block_a_ptr = block_a[m]; + + half a0; + if (b_q_perm) a0 = a_ptr[b_q_perm[offset_k + t]]; + else a0 = a_ptr[offset_k + t]; + block_a_ptr[t] = a0; + } + } + + // Zero output + if (n >= size_n) return; + + if (blockIdx.z == 0) + { + for (int m = 0; m < m_count; m++) + *((uint64_t*)c_.item_ptr(offset_m + m, n)) = 0; + } + + __syncthreads(); + + // Find initial group + int groupsize = size_k / groups; + int group = offset_k / groupsize; + int nextgroup = offset_k + groupsize; + + // a, b offset + int qk = offset_k / (32 / 4); + + const uint32_t* b_ptr = b_q_weight + qk * size_n + n; + const half* a_ptr = &block_a[0][0]; + int a_stride = BLOCK_KN_SIZE; + + // Initial group + int zeros[4]; + float scales[4]; + half2 z1z16[4][2]; + half2 y1y16[4][2]; + b_gptq_qzeros_.item4(zeros, group, n); + b_gptq_scales_.item4_f(scales, group, n); + dequant_4bit_8_prep_zero(zeros[0] + 1, z1z16[0], y1y16[0]); + dequant_4bit_8_prep_zero(zeros[1] + 1, z1z16[1], y1y16[1]); + dequant_4bit_8_prep_zero(zeros[2] + 1, z1z16[2], y1y16[2]); + dequant_4bit_8_prep_zero(zeros[3] + 1, z1z16[3], y1y16[3]); + + // Column result + float block_c[m_count][4] = {}; + + // Dequantize and multiply + int k = offset_k; + while (k < end_k) + { + if (k == nextgroup) + { + group++; + nextgroup += groupsize; + b_gptq_qzeros_.item4(zeros, group, n); + b_gptq_scales_.item4_f(scales, group, n); + dequant_4bit_8_prep_zero(zeros[0] + 1, z1z16[0], y1y16[0]); + dequant_4bit_8_prep_zero(zeros[1] + 1, z1z16[1], y1y16[1]); + dequant_4bit_8_prep_zero(zeros[2] + 1, z1z16[2], y1y16[2]); + dequant_4bit_8_prep_zero(zeros[3] + 1, z1z16[3], y1y16[3]); + } + + #pragma unroll + for (int j = 0; j < 4; j++) + { + const int4* b_ptr4 = (int4*) b_ptr; + int4 load_int4 = *b_ptr4; + + half2 dq[4][4]; + dequant_4bit_8_gptq(load_int4.x, dq[0], z1z16[0], y1y16[0], size_n, false); + dequant_4bit_8_gptq(load_int4.y, dq[1], z1z16[1], y1y16[1], size_n, false); + dequant_4bit_8_gptq(load_int4.z, dq[2], z1z16[2], y1y16[2], size_n, false); + dequant_4bit_8_gptq(load_int4.w, dq[3], z1z16[3], y1y16[3], size_n, false); + + #pragma unroll + for (int m = 0; m < m_count; m++) + { + block_c[m][0] = fma(dot22_8_f(dq[0], a_ptr + m * a_stride), scales[0], block_c[m][0]); + block_c[m][1] = fma(dot22_8_f(dq[1], a_ptr + m * a_stride), scales[1], block_c[m][1]); + block_c[m][2] = fma(dot22_8_f(dq[2], a_ptr + m * a_stride), scales[2], block_c[m][2]); + block_c[m][3] = fma(dot22_8_f(dq[3], a_ptr + m * a_stride), scales[3], block_c[m][3]); + } + + b_ptr += size_n; + a_ptr += 8; + } + + k += 32; + } + + for (int m = 0; m < m_count; m++) + { + half2 *out = (half2*) c_.item_ptr(offset_m + m, n); + half2 result01 = __halves2half2(__float2half_rn(block_c[m][0]), __float2half_rn(block_c[m][1])); + half2 result23 = __halves2half2(__float2half_rn(block_c[m][2]), __float2half_rn(block_c[m][3])); + atomicAdd(out , result01); + atomicAdd(out + 1, result23); + } +} + +template +__global__ void gemm_half_q_half_gptq_2bit_kernel +( + const half* __restrict__ a, + const uint32_t* __restrict__ b_q_weight, + const uint32_t* __restrict__ b_gptq_qzeros, + const half* __restrict__ b_gptq_scales, + half* __restrict__ c, + const int size_m, + const int size_n, + const int size_k, + const int groups, + const int* __restrict__ b_q_perm +) +{ + MatrixView_half a_(a, size_m, size_k); + MatrixView_half_rw c_(c, size_m, size_n); + MatrixView_q2_row b_gptq_qzeros_(b_gptq_qzeros, groups, size_n); + MatrixView_half b_gptq_scales_(b_gptq_scales, groups, size_n); + + int t = threadIdx.x; + + // Block + int offset_n = blockIdx.x * BLOCK_KN_SIZE * 4; + int offset_m = blockIdx.y * m_count; + int offset_k = blockIdx.z * BLOCK_KN_SIZE; + + int end_n = min(offset_n + BLOCK_KN_SIZE * 4, size_n); + int end_m = min(offset_m + m_count, size_m); + int end_k = min(offset_k + BLOCK_KN_SIZE, size_k); + + int n = offset_n + t * 4; + + // Preload block_a + __shared__ half block_a[m_count][BLOCK_KN_SIZE]; + + if (offset_k + t < end_k) + { + for (int m = 0; m < m_count; ++m) + { + const half* a_ptr = a_.item_ptr(offset_m + m, 0); + half* block_a_ptr = block_a[m]; + + half a0; + if (b_q_perm) a0 = a_ptr[b_q_perm[offset_k + t]]; + else a0 = a_ptr[offset_k + t]; + block_a_ptr[t] = a0; + } + } + + // Zero output + if (n >= size_n) return; + + if (blockIdx.z == 0) + { + for (int m = 0; m < m_count; m++) + *((uint64_t*)c_.item_ptr(offset_m + m, n)) = 0; + } + + __syncthreads(); + + // Find initial group + int groupsize = size_k / groups; + int group = offset_k / groupsize; + int nextgroup = offset_k + groupsize; + + // a, b offset + int qk = offset_k / (32 / 2); + + const uint32_t* b_ptr = b_q_weight + qk * size_n + n; + const half* a_ptr = &block_a[0][0]; + int a_stride = BLOCK_KN_SIZE; + + // Initial group + int zeros[4]; + half scales[4]; + b_gptq_qzeros_.item4(zeros, group, n); + b_gptq_scales_.item4(scales, group, n); + // Column result + half block_c[m_count][4] = {}; + + // Dequantize and multiply + int k = offset_k; + while (k < end_k) + { + if (k == nextgroup) + { + group++; + nextgroup += groupsize; + b_gptq_qzeros_.item4(zeros, group, n); + b_gptq_scales_.item4(scales, group, n); + } + + #pragma unroll + for (int j = 0; j < 1; j++) + { + const int4* b_ptr4 = (int4*) b_ptr; + int4 load_int4 = *b_ptr4; + + half2 dq[4][8]; + dequant_2bit_16(load_int4.x, dq[0], size_n, zeros[0] + 1); + dequant_2bit_16(load_int4.y, dq[1], size_n, zeros[1] + 1); + dequant_2bit_16(load_int4.z, dq[2], size_n, zeros[2] + 1); + dequant_2bit_16(load_int4.w, dq[3], size_n, zeros[3] + 1); + + #pragma unroll + for (int m = 0; m < m_count; m++) + { + block_c[m][0] = dot22_16_h(dq[0], a_ptr + m * a_stride, block_c[m][0], scales[0]); + block_c[m][1] = dot22_16_h(dq[1], a_ptr + m * a_stride, block_c[m][1], scales[1]); + block_c[m][2] = dot22_16_h(dq[2], a_ptr + m * a_stride, block_c[m][2], scales[2]); + block_c[m][3] = dot22_16_h(dq[3], a_ptr + m * a_stride, block_c[m][3], scales[3]); + } + + b_ptr += size_n; + a_ptr += 16; + } + + k += 16; + } + + for (int m = 0; m < m_count; m++) + { + half2 *out = (half2*) c_.item_ptr(offset_m + m, n); + half2 result01 = __halves2half2(block_c[m][0], block_c[m][1]); + half2 result23 = __halves2half2(block_c[m][2], block_c[m][3]); + atomicAdd(out , result01); + atomicAdd(out + 1, result23); + } +} + +template +__global__ void gemm_half_q_half_gptq_3bit_kernel +( + const half* __restrict__ a, + const uint32_t* __restrict__ b_q_weight, + const uint32_t* __restrict__ b_gptq_qzeros, + const half* __restrict__ b_gptq_scales, + half* __restrict__ c, + const int size_m, + const int size_n, + const int size_k, + const int groups, + const int* __restrict__ b_q_perm +) +{ + MatrixView_half a_(a, size_m, size_k); + MatrixView_half_rw c_(c, size_m, size_n); + MatrixView_q3_row b_gptq_qzeros_(b_gptq_qzeros, groups, size_n); + MatrixView_half b_gptq_scales_(b_gptq_scales, groups, size_n); + + int t = threadIdx.x; + + // Block + int offset_n = blockIdx.x * BLOCK_KN_SIZE * 4; + int offset_m = blockIdx.y * m_count; + int offset_k = blockIdx.z * BLOCK_KN_SIZE; + + int end_n = min(offset_n + BLOCK_KN_SIZE * 4, size_n); + int end_m = min(offset_m + m_count, size_m); + int end_k = min(offset_k + BLOCK_KN_SIZE, size_k); + + int n = offset_n + t * 4; + + // Preload block_a + __shared__ half block_a[m_count][BLOCK_KN_SIZE]; + + if (offset_k + t < end_k) + { + for (int m = 0; m < m_count; ++m) + { + const half* a_ptr = a_.item_ptr(offset_m + m, 0); + half* block_a_ptr = block_a[m]; + + half a0; + if (b_q_perm) a0 = a_ptr[b_q_perm[offset_k + t]]; + else a0 = a_ptr[offset_k + t]; + block_a_ptr[t] = a0; + } + } + + // Zero output + if (n >= size_n) return; + + if (blockIdx.z == 0) + { + for (int m = 0; m < m_count; m++) + *((uint64_t*)c_.item_ptr(offset_m + m, n)) = 0; + } + + __syncthreads(); + + // Find initial group + int groupsize = size_k / groups; + int group = offset_k / groupsize; + int nextgroup = offset_k + groupsize; + + // a, b offset + int qk = offset_k / 32 * 3; + + const uint32_t* b_ptr = b_q_weight + qk * size_n + n; + const half* a_ptr = &block_a[0][0]; + int a_stride = BLOCK_KN_SIZE; + + // Initial group + int zeros[4]; + half scales[4]; + b_gptq_qzeros_.item4(zeros, group, n); + b_gptq_scales_.item4(scales, group, n); + // Column result + half block_c[m_count][4] = {}; + + // Dequantize and multiply + int k = offset_k; + while (k < end_k) + { + if (k == nextgroup) + { + group++; + nextgroup += groupsize; + b_gptq_qzeros_.item4(zeros, group, n); + b_gptq_scales_.item4(scales, group, n); + } + + #pragma unroll + for (int j = 0; j < 1; j++) + { + int4 load_int4[3]; + load_int4[0] = *((int4*) b_ptr); b_ptr += size_n; + load_int4[1] = *((int4*) b_ptr); b_ptr += size_n; + load_int4[2] = *((int4*) b_ptr); b_ptr += size_n; + + half2 dq[4][16]; + dequant_3bit_32(load_int4[0].x, load_int4[1].x, load_int4[2].x, dq[0], size_n, zeros[0] + 1); + dequant_3bit_32(load_int4[0].y, load_int4[1].y, load_int4[2].y, dq[1], size_n, zeros[1] + 1); + dequant_3bit_32(load_int4[0].z, load_int4[1].z, load_int4[2].z, dq[2], size_n, zeros[2] + 1); + dequant_3bit_32(load_int4[0].w, load_int4[1].w, load_int4[2].w, dq[3], size_n, zeros[3] + 1); + + #pragma unroll + for (int m = 0; m < m_count; m++) + { + block_c[m][0] = dot22_32_h(dq[0], a_ptr + m * a_stride, block_c[m][0], scales[0]); + block_c[m][1] = dot22_32_h(dq[1], a_ptr + m * a_stride, block_c[m][1], scales[1]); + block_c[m][2] = dot22_32_h(dq[2], a_ptr + m * a_stride, block_c[m][2], scales[2]); + block_c[m][3] = dot22_32_h(dq[3], a_ptr + m * a_stride, block_c[m][3], scales[3]); + } + a_ptr += 32; + } + + k += 32; + } + + for (int m = 0; m < m_count; m++) + { + half2 *out = (half2*) c_.item_ptr(offset_m + m, n); + half2 result01 = __halves2half2(block_c[m][0], block_c[m][1]); + half2 result23 = __halves2half2(block_c[m][2], block_c[m][3]); + atomicAdd(out , result01); + atomicAdd(out + 1, result23); + } +} + +template +__global__ void gemm_half_q_half_gptq_8bit_kernel +( + const half* __restrict__ a, + const uint32_t* __restrict__ b_q_weight, + const uint32_t* __restrict__ b_gptq_qzeros, + const half* __restrict__ b_gptq_scales, + half* __restrict__ c, + const int size_m, + const int size_n, + const int size_k, + const int groups, + const int* __restrict__ b_q_perm +) +{ + MatrixView_half a_(a, size_m, size_k); + MatrixView_half_rw c_(c, size_m, size_n); + MatrixView_q8_row b_gptq_qzeros_(b_gptq_qzeros, groups, size_n); + MatrixView_half b_gptq_scales_(b_gptq_scales, groups, size_n); + + int t = threadIdx.x; + + // Block + int offset_n = blockIdx.x * BLOCK_KN_SIZE * 4; + int offset_m = blockIdx.y * m_count; + int offset_k = blockIdx.z * BLOCK_KN_SIZE; + + int end_n = min(offset_n + BLOCK_KN_SIZE * 4, size_n); + int end_m = min(offset_m + m_count, size_m); + int end_k = min(offset_k + BLOCK_KN_SIZE, size_k); + + int n = offset_n + t * 4; + + // Preload block_a + __shared__ half block_a[m_count][BLOCK_KN_SIZE]; + + if (offset_k + t < end_k) + { + for (int m = 0; m < m_count; ++m) + { + const half* a_ptr = a_.item_ptr(offset_m + m, 0); + half* block_a_ptr = block_a[m]; + + half a0; + if (b_q_perm) a0 = a_ptr[b_q_perm[offset_k + t]]; + else a0 = a_ptr[offset_k + t]; + block_a_ptr[t] = a0; + } + } + + // Zero output + if (n >= size_n) return; + + if (blockIdx.z == 0) + { + for (int m = 0; m < m_count; m++) + *((uint64_t*)c_.item_ptr(offset_m + m, n)) = 0; + } + + __syncthreads(); + + // Find initial group + int groupsize = size_k / groups; + int group = offset_k / groupsize; + int nextgroup = offset_k + groupsize; + + // a, b offset + int qk = offset_k / (32 / 8); + + const uint32_t* b_ptr = b_q_weight + qk * size_n + n; + const half* a_ptr = &block_a[0][0]; + int a_stride = BLOCK_KN_SIZE; + + // Initial group + int zeros[4]; + half scales[4]; + b_gptq_qzeros_.item4(zeros, group, n); + b_gptq_scales_.item4(scales, group, n); + // Column result + half block_c[m_count][4] = {}; + + // Dequantize and multiply + int k = offset_k; + while (k < end_k) + { + if (k == nextgroup) + { + group++; + nextgroup += groupsize; + b_gptq_qzeros_.item4(zeros, group, n); + b_gptq_scales_.item4(scales, group, n); + } + + #pragma unroll + for (int j = 0; j < 4; j++) + { + int4 load_int4[2]; + load_int4[0] = *((int4*) b_ptr); b_ptr += size_n; + load_int4[1] = *((int4*) b_ptr); b_ptr += size_n; + + half2 dq[4][4]; + dequant_8bit_8(load_int4[0].x, load_int4[1].x, dq[0], size_n, zeros[0] + 1); + dequant_8bit_8(load_int4[0].y, load_int4[1].y, dq[1], size_n, zeros[1] + 1); + dequant_8bit_8(load_int4[0].z, load_int4[1].z, dq[2], size_n, zeros[2] + 1); + dequant_8bit_8(load_int4[0].w, load_int4[1].w, dq[3], size_n, zeros[3] + 1); + + for (int m = 0; m < m_count; m++) + { + block_c[m][0] = dot22_8_h(dq[0], a_ptr + m * a_stride, block_c[m][0], scales[0]); + block_c[m][1] = dot22_8_h(dq[1], a_ptr + m * a_stride, block_c[m][1], scales[1]); + block_c[m][2] = dot22_8_h(dq[2], a_ptr + m * a_stride, block_c[m][2], scales[2]); + block_c[m][3] = dot22_8_h(dq[3], a_ptr + m * a_stride, block_c[m][3], scales[3]); + } + a_ptr += 8; + } + k += 32; + } + + for (int m = 0; m < m_count; m++) + { + half2 *out = (half2*) c_.item_ptr(offset_m + m, n); + half2 result01 = __halves2half2(block_c[m][0], block_c[m][1]); + half2 result23 = __halves2half2(block_c[m][2], block_c[m][3]); + atomicAdd(out , result01); + atomicAdd(out + 1, result23); + } +} + +fp_gemm_half_q_half_gptq_kernel pick_gemm_half_q_half_gptq_kernel( + bool first_block, const int m_count, const int bit) +{ + #define SELECT_KERNEL(M_COUNT) \ + if (m_count == M_COUNT) { \ + if (bit == 2) return gemm_half_q_half_gptq_2bit_kernel; \ + if (bit == 3) return gemm_half_q_half_gptq_3bit_kernel; \ + if (bit == 4) return gemm_half_q_half_gptq_4bit_kernel; \ + if (bit == 8) return gemm_half_q_half_gptq_8bit_kernel; \ + } + #if BLOCK_M_SIZE_MAX >= 1 + SELECT_KERNEL(1); + #endif + #if BLOCK_M_SIZE_MAX >= 2 + SELECT_KERNEL(2); + #endif + #if BLOCK_M_SIZE_MAX >= 3 + SELECT_KERNEL(3); + #endif + #if BLOCK_M_SIZE_MAX >= 4 + SELECT_KERNEL(4); + #endif + #if BLOCK_M_SIZE_MAX >= 5 + SELECT_KERNEL(5); + #endif + #if BLOCK_M_SIZE_MAX >= 6 + SELECT_KERNEL(6); + #endif + #if BLOCK_M_SIZE_MAX >= 7 + SELECT_KERNEL(7); + #endif + #if BLOCK_M_SIZE_MAX >= 8 + SELECT_KERNEL(8); + #endif + return NULL; +} + + +void gemm_half_q_half_cuda_part +( + const half* a, + const uint32_t* b_q_weight, + const uint32_t* b_gptq_qzeros, + const half* b_gptq_scales, + const int* b_q_perm, + half* c, + int size_m, + int size_n, + int size_k, + int m_count, + int groups, + int bit +) +{ + dim3 blockDim, gridDim; + blockDim.x = BLOCK_KN_SIZE; + blockDim.y = 1; + blockDim.z = 1; + gridDim.x = DIVIDE(size_n, BLOCK_KN_SIZE * 4); + gridDim.y = DIVIDE(size_m, m_count); + gridDim.z = DIVIDE(size_k, BLOCK_KN_SIZE); + + fp_gemm_half_q_half_gptq_kernel kernel = pick_gemm_half_q_half_gptq_kernel(true, m_count, bit); + + const musaStream_t stream = at::musa::getCurrentMUSAStream(); + kernel<<>> + ( + a, + b_q_weight, + b_gptq_qzeros, + b_gptq_scales, + c, + size_m, + size_n, + size_k, + groups, + b_q_perm + ); +} + + +__global__ void reconstruct_exllama_8bit_kernel +( + const uint32_t* __restrict__ b_q_weight, + const int* __restrict__ b_q_perm, + const uint32_t* __restrict__ b_gptq_qzeros, + const half* __restrict__ b_gptq_scales, + const int size_k, + const int size_n, + const int groups, + half* __restrict__ b +) +{ + MatrixView_half_rw b_(b, size_k, size_n); + MatrixView_q8_row b_gptq_qzeros_(b_gptq_qzeros, groups, size_n); + MatrixView_half b_gptq_scales_(b_gptq_scales, groups, size_n); + + int offset_k = BLOCK_KN_SIZE * blockIdx.y; + int offset_n = BLOCK_KN_SIZE * blockIdx.x * 4; + + int end_k = min(offset_k + BLOCK_KN_SIZE, size_k); + + // Preload remapping table + __shared__ int perm[BLOCK_KN_SIZE]; + int t = threadIdx.x; + + if (b_q_perm) + { + if (offset_k + t < size_k) + perm[t] = b_q_perm[offset_k + t]; + } + + // Column + int n = offset_n + t * 4; + if (n >= size_n) return; + + // Find initial group + int groupsize = size_k / groups; + int group = offset_k / groupsize; + int nextgroup = offset_k + groupsize; + + // b offset + int qk = offset_k / (32 / 8); + + const uint32_t* b_ptr = b_q_weight + qk * size_n + n; + + // Initial zeros/scale + int zeros[4]; + half2 scales[4]; + b_gptq_qzeros_.item4(zeros, group, n); + b_gptq_scales_.item4_h2(scales, group, n); + + __syncthreads(); + + int k = offset_k; + int lk = 0; + + while (k < end_k) + { + if (k == nextgroup) + { + group++; + nextgroup += groupsize; + b_gptq_qzeros_.item4(zeros, group, n); + b_gptq_scales_.item4_h2(scales, group, n); + } + + for (int p = 0; p < 4; p++) + { + int4 load_int4[2]; + load_int4[0] = *((int4*) b_ptr); b_ptr += size_n; + load_int4[1] = *((int4*) b_ptr); b_ptr += size_n; + + half2 dq[4][4]; + dequant_8bit_8(load_int4[0].x, load_int4[1].x, dq[0], size_n, zeros[0] + 1); + dequant_8bit_8(load_int4[0].y, load_int4[1].y, dq[1], size_n, zeros[1] + 1); + dequant_8bit_8(load_int4[0].z, load_int4[1].z, dq[2], size_n, zeros[2] + 1); + dequant_8bit_8(load_int4[0].w, load_int4[1].w, dq[3], size_n, zeros[3] + 1); + + //half* dqh = (half*)dq; + if (b_q_perm) + { + for (int j = 0; j < 4; j++) + { + for (int v = 0; v < 4; v++) dq[v][j] = __hmul2(scales[v], dq[v][j]); + b_.set4(perm[lk++], n, __low2half(dq[0][j]), __low2half(dq[1][j]), __low2half(dq[2][j]), __low2half(dq[3][j])); + b_.set4(perm[lk++], n, __high2half(dq[0][j]), __high2half(dq[1][j]), __high2half(dq[2][j]), __high2half(dq[3][j])); + } + } + else + { + for (int j = 0; j < 4; j++) + { + for (int v = 0; v < 4; v++) dq[v][j] = __hmul2(scales[v], dq[v][j]); + b_.set4(offset_k + lk++, n, __low2half(dq[0][j]), __low2half(dq[1][j]), __low2half(dq[2][j]), __low2half(dq[3][j])); + b_.set4(offset_k + lk++, n, __high2half(dq[0][j]), __high2half(dq[1][j]), __high2half(dq[2][j]), __high2half(dq[3][j])); + } + } + } + k += 32; + } +} + +__global__ void reconstruct_exllama_4bit_kernel +( + const uint32_t* __restrict__ b_q_weight, + const int* __restrict__ b_q_perm, + const uint32_t* __restrict__ b_gptq_qzeros, + const half* __restrict__ b_gptq_scales, + const int size_k, + const int size_n, + const int groups, + half* __restrict__ b +) +{ + MatrixView_half_rw b_(b, size_k, size_n); + MatrixView_q4_row b_gptq_qzeros_(b_gptq_qzeros, groups, size_n); + MatrixView_half b_gptq_scales_(b_gptq_scales, groups, size_n); + + int offset_k = BLOCK_KN_SIZE * blockIdx.y; + int offset_n = BLOCK_KN_SIZE * blockIdx.x * 4; + + int end_k = min(offset_k + BLOCK_KN_SIZE, size_k); + + // Preload remapping table + __shared__ int perm[BLOCK_KN_SIZE]; + int t = threadIdx.x; + + if (b_q_perm) + { + if (offset_k + t < size_k) + perm[t] = b_q_perm[offset_k + t]; + } + + // Column + int n = offset_n + t * 4; + if (n >= size_n) return; + + // Find initial group + int groupsize = size_k / groups; + int group = offset_k / groupsize; + int nextgroup = offset_k + groupsize; + + // b offset + int qk = offset_k / (32 / 4); + + const uint32_t* b_ptr = b_q_weight + qk * size_n + n; + + // Initial zeros/scale + int zeros[4]; + half2 scales[4]; + half2 z1z16[4][2]; + half2 y1y16[4][2]; + b_gptq_qzeros_.item4(zeros, group, n); + b_gptq_scales_.item4_h2(scales, group, n); + dequant_4bit_8_prep_zero(zeros[0] + 1, z1z16[0], y1y16[0]); + dequant_4bit_8_prep_zero(zeros[1] + 1, z1z16[1], y1y16[1]); + dequant_4bit_8_prep_zero(zeros[2] + 1, z1z16[2], y1y16[2]); + dequant_4bit_8_prep_zero(zeros[3] + 1, z1z16[3], y1y16[3]); + + __syncthreads(); + + int k = offset_k; + int lk = 0; + + while (k < end_k) + { + if (k == nextgroup) + { + group++; + nextgroup += groupsize; + b_gptq_qzeros_.item4(zeros, group, n); + b_gptq_scales_.item4_h2(scales, group, n); + dequant_4bit_8_prep_zero(zeros[0] + 1, z1z16[0], y1y16[0]); + dequant_4bit_8_prep_zero(zeros[1] + 1, z1z16[1], y1y16[1]); + dequant_4bit_8_prep_zero(zeros[2] + 1, z1z16[2], y1y16[2]); + dequant_4bit_8_prep_zero(zeros[3] + 1, z1z16[3], y1y16[3]); + } + + for (int p = 0; p < 4; p++) + { + half2 dq[4][4]; + const int4* b_ptr4 = (int4*) b_ptr; + int4 load_int4 = *b_ptr4; + + dequant_4bit_8_gptq(load_int4.x, dq[0], z1z16[0], y1y16[0], size_n, false); + dequant_4bit_8_gptq(load_int4.y, dq[1], z1z16[1], y1y16[1], size_n, false); + dequant_4bit_8_gptq(load_int4.z, dq[2], z1z16[2], y1y16[2], size_n, false); + dequant_4bit_8_gptq(load_int4.w, dq[3], z1z16[3], y1y16[3], size_n, false); + + b_ptr += size_n; + //half* dqh = (half*)dq; + if (b_q_perm) + { + for (int j = 0; j < 4; j++) + { + for (int v = 0; v < 4; v++) dq[v][j] = __hmul2(scales[v], dq[v][j]); + b_.set4(perm[lk++], n, __low2half(dq[0][j]), __low2half(dq[1][j]), __low2half(dq[2][j]), __low2half(dq[3][j])); + b_.set4(perm[lk++], n, __high2half(dq[0][j]), __high2half(dq[1][j]), __high2half(dq[2][j]), __high2half(dq[3][j])); + } + } + else + { + for (int j = 0; j < 4; j++) + { + for (int v = 0; v < 4; v++) dq[v][j] = __hmul2(scales[v], dq[v][j]); + b_.set4(offset_k + lk++, n, __low2half(dq[0][j]), __low2half(dq[1][j]), __low2half(dq[2][j]), __low2half(dq[3][j])); + b_.set4(offset_k + lk++, n, __high2half(dq[0][j]), __high2half(dq[1][j]), __high2half(dq[2][j]), __high2half(dq[3][j])); + } + } + } + k += 32; + } +} + +__global__ void reconstruct_exllama_3bit_kernel +( + const uint32_t* __restrict__ b_q_weight, + const int* __restrict__ b_q_perm, + const uint32_t* __restrict__ b_gptq_qzeros, + const half* __restrict__ b_gptq_scales, + const int size_k, + const int size_n, + const int groups, + half* __restrict__ b +) +{ + MatrixView_half_rw b_(b, size_k, size_n); + MatrixView_q3_row b_gptq_qzeros_(b_gptq_qzeros, groups, size_n); + MatrixView_half b_gptq_scales_(b_gptq_scales, groups, size_n); + + int offset_k = BLOCK_KN_SIZE * blockIdx.y; + int offset_n = BLOCK_KN_SIZE * blockIdx.x * 4; + + int end_k = min(offset_k + BLOCK_KN_SIZE, size_k); + + // Preload remapping table + __shared__ int perm[BLOCK_KN_SIZE]; + int t = threadIdx.x; + + if (b_q_perm) + { + if (offset_k + t < size_k) + perm[t] = b_q_perm[offset_k + t]; + } + + // Column + int n = offset_n + t * 4; + if (n >= size_n) return; + + // Find initial group + int groupsize = size_k / groups; + int group = offset_k / groupsize; + int nextgroup = offset_k + groupsize; + + // b offset + int qk = offset_k / 32* 3; + + const uint32_t* b_ptr = b_q_weight + qk * size_n + n; + + // Initial zeros/scale + int zeros[4]; + half2 scales[4]; + b_gptq_qzeros_.item4(zeros, group, n); + b_gptq_scales_.item4_h2(scales, group, n); + + __syncthreads(); + + int k = offset_k; + int lk = 0; + + while (k < end_k) + { + if (k == nextgroup) + { + group++; + nextgroup += groupsize; + b_gptq_qzeros_.item4(zeros, group, n); + b_gptq_scales_.item4_h2(scales, group, n); + } + + for (int p = 0; p < 1; p++) + { + int4 load_int4[3]; + load_int4[0] = *((int4*) b_ptr); b_ptr += size_n; + load_int4[1] = *((int4*) b_ptr); b_ptr += size_n; + load_int4[2] = *((int4*) b_ptr); b_ptr += size_n; + + half2 dq[4][16]; + dequant_3bit_32(load_int4[0].x, load_int4[1].x, load_int4[2].x, dq[0], size_n, zeros[0] + 1); + dequant_3bit_32(load_int4[0].y, load_int4[1].y, load_int4[2].y, dq[1], size_n, zeros[1] + 1); + dequant_3bit_32(load_int4[0].z, load_int4[1].z, load_int4[2].z, dq[2], size_n, zeros[2] + 1); + dequant_3bit_32(load_int4[0].w, load_int4[1].w, load_int4[2].w, dq[3], size_n, zeros[3] + 1); + + if (b_q_perm) + { + for (int j = 0; j < 16; j++) + { + for (int v = 0; v < 4; v++) dq[v][j] = __hmul2(scales[v], dq[v][j]); + b_.set4(perm[lk++], n, __low2half(dq[0][j]), __low2half(dq[1][j]), __low2half(dq[2][j]), __low2half(dq[3][j])); + b_.set4(perm[lk++], n, __high2half(dq[0][j]), __high2half(dq[1][j]), __high2half(dq[2][j]), __high2half(dq[3][j])); + } + } + else + { + for (int j = 0; j < 16; j++) + { + for (int v = 0; v < 4; v++) dq[v][j] = __hmul2(scales[v], dq[v][j]); + b_.set4(offset_k + lk++, n, __low2half(dq[0][j]), __low2half(dq[1][j]), __low2half(dq[2][j]), __low2half(dq[3][j])); + b_.set4(offset_k + lk++, n, __high2half(dq[0][j]), __high2half(dq[1][j]), __high2half(dq[2][j]), __high2half(dq[3][j])); + } + } + } + k += 32; + } +} + +__global__ void reconstruct_exllama_2bit_kernel +( + const uint32_t* __restrict__ b_q_weight, + const int* __restrict__ b_q_perm, + const uint32_t* __restrict__ b_gptq_qzeros, + const half* __restrict__ b_gptq_scales, + const int size_k, + const int size_n, + const int groups, + half* __restrict__ b +) +{ + MatrixView_half_rw b_(b, size_k, size_n); + MatrixView_q2_row b_gptq_qzeros_(b_gptq_qzeros, groups, size_n); + MatrixView_half b_gptq_scales_(b_gptq_scales, groups, size_n); + + int offset_k = BLOCK_KN_SIZE * blockIdx.y; + int offset_n = BLOCK_KN_SIZE * blockIdx.x * 4; + + int end_k = min(offset_k + BLOCK_KN_SIZE, size_k); + + // Preload remapping table + __shared__ int perm[BLOCK_KN_SIZE]; + int t = threadIdx.x; + + if (b_q_perm) + { + if (offset_k + t < size_k) + perm[t] = b_q_perm[offset_k + t]; + } + + // Column + int n = offset_n + t * 4; + if (n >= size_n) return; + + // Find initial group + int groupsize = size_k / groups; + int group = offset_k / groupsize; + int nextgroup = offset_k + groupsize; + + // b offset + int qk = offset_k / (32 / 2); + + const uint32_t* b_ptr = b_q_weight + qk * size_n + n; + + // Initial zeros/scale + int zeros[4]; + half2 scales[4]; + b_gptq_qzeros_.item4(zeros, group, n); + b_gptq_scales_.item4_h2(scales, group, n); + + __syncthreads(); + + int k = offset_k; + int lk = 0; + + while (k < end_k) + { + if (k == nextgroup) + { + group++; + nextgroup += groupsize; + b_gptq_qzeros_.item4(zeros, group, n); + b_gptq_scales_.item4_h2(scales, group, n); + } + + for (int p = 0; p < 2; p++) + { + const int4* b_ptr4 = (int4*) b_ptr; + int4 load_int4 = *b_ptr4; + + half2 dq[4][8]; + dequant_2bit_16(load_int4.x, dq[0], size_n, zeros[0] + 1); + dequant_2bit_16(load_int4.y, dq[1], size_n, zeros[1] + 1); + dequant_2bit_16(load_int4.z, dq[2], size_n, zeros[2] + 1); + dequant_2bit_16(load_int4.w, dq[3], size_n, zeros[3] + 1); + + b_ptr += size_n; + //half* dqh = (half*)dq; + if (b_q_perm) + { + for (int j = 0; j < 8; j++) + { + for (int v = 0; v < 4; v++) dq[v][j] = __hmul2(scales[v], dq[v][j]); + b_.set4(perm[lk++], n, __low2half(dq[0][j]), __low2half(dq[1][j]), __low2half(dq[2][j]), __low2half(dq[3][j])); + b_.set4(perm[lk++], n, __high2half(dq[0][j]), __high2half(dq[1][j]), __high2half(dq[2][j]), __high2half(dq[3][j])); + } + } + else + { + for (int j = 0; j < 8; j++) + { + for (int v = 0; v < 4; v++) dq[v][j] = __hmul2(scales[v], dq[v][j]); + b_.set4(offset_k + lk++, n, __low2half(dq[0][j]), __low2half(dq[1][j]), __low2half(dq[2][j]), __low2half(dq[3][j])); + b_.set4(offset_k + lk++, n, __high2half(dq[0][j]), __high2half(dq[1][j]), __high2half(dq[2][j]), __high2half(dq[3][j])); + } + } + } + k += 32; + } +} + +void reconstruct_exllama +( + const uint32_t* b_q_weight, + const uint32_t* b_gptq_qzeros, + const half* b_gptq_scales, + const int* b_q_perm, + half* out, + int height, + int width, + int groups, + int bit +) +{ + dim3 blockDim, gridDim; + blockDim.x = BLOCK_KN_SIZE; + blockDim.y = 1; + gridDim.y = DIVIDE(height, BLOCK_KN_SIZE); + gridDim.x = DIVIDE(width, BLOCK_KN_SIZE); + + auto reconstruct_exllama_kernel = reconstruct_exllama_4bit_kernel; + if (bit == 2) { + reconstruct_exllama_kernel = reconstruct_exllama_2bit_kernel; + } else if (bit == 3) { + reconstruct_exllama_kernel = reconstruct_exllama_3bit_kernel; + } else if (bit == 8) { + reconstruct_exllama_kernel = reconstruct_exllama_8bit_kernel; + } + + const musaStream_t stream = at::musa::getCurrentMUSAStream(); + reconstruct_exllama_kernel<<>> + ( + b_q_weight, + b_q_perm, + b_gptq_qzeros, + b_gptq_scales, + height, + width, + groups, + out + ); +} + + +__global__ void gemm_half_q_half_alt_4bit_kernel( + const half2* __restrict__ vec, + const uint32_t* __restrict__ mat, + half* __restrict__ mul, + const half* __restrict__ scales, + const uint32_t* __restrict__ zeros, + const int* __restrict__ g_idx, + int batch, + int height, + int width +) +{ + int zero_width = width / 8; + int vec_height = height * 4; + const int blockwidth2 = BLOCK_KN_SIZE / 2; + int b = blockIdx.y * BLOCK_M_SIZE_MAX; + int b_end = min(BLOCK_M_SIZE_MAX, batch - b); + int h = BLOCK_KN_SIZE * blockIdx.z / 8; + int h_end = min(BLOCK_KN_SIZE / 8, height - h) * 4; + int w = BLOCK_KN_SIZE * blockIdx.x + threadIdx.x; + + __shared__ half2 blockvec[BLOCK_M_SIZE_MAX][blockwidth2]; + if (threadIdx.x < h_end) { + for (int m = 0; m < b_end; ++m) { + blockvec[m][threadIdx.x] = + vec[(m + b) * vec_height + blockIdx.z * BLOCK_KN_SIZE / 2 + + threadIdx.x]; + } + } + + __shared__ half2 deq2[256][8]; + int val = threadIdx.x / 8; + int off = threadIdx.x % 8; + for (; val < 256; val += BLOCK_KN_SIZE / 8) { + deq2[val][off] = __halves2half2( + __int2half_rn(val & 0xF), __int2half_rn(val >> 4) + ); + } + + if (blockIdx.z == 0) + { + for (int m = 0; m < b_end; m++) + mul[(b + m) * width + w] = __int2half_rn(0); + } + __syncthreads(); + + int i = width * h + w; + int g_h = h * 8; + int k = 0; + int z_w = w / 8; + int z_mod = (w % 8) * 4; + half2 res2; + half res[BLOCK_M_SIZE_MAX] = {}; + + unsigned int tmp; + while (k < h_end) { + tmp = mat[i]; + half2 scales_tmp[4]; + half2 zeros_tmp[4]; + for (int tmp_k = 0; tmp_k < 4; tmp_k++) { + int g = g_idx[g_h + (k + tmp_k) * 2]; + int g2 = g_idx[g_h + (k + tmp_k) * 2 + 1]; + half scale_f = scales[g * width + w]; + half scale_f2 = scales[g2 * width + w]; + half2 scale = __halves2half2(scale_f, scale_f2); + half2 zero = __halves2half2( + __hmul(scale_f, __int2half_rn(-((zeros[g * zero_width + z_w] >> z_mod) & 0xF) - 1)), + __hmul(scale_f2, __int2half_rn(-((zeros[g2 * zero_width + z_w] >> z_mod) & 0xF) - 1)) + ); + scales_tmp[tmp_k] = scale; + zeros_tmp[tmp_k] = zero; + } + for (int m = 0; m < b_end; m++) { +#ifndef USE_ROCM + res2 = {}; +#else + res2.x = __half_as_ushort(__float2half(0)); + res2.y = __half_as_ushort(__float2half(0)); +#endif + res2 = __hfma2(__hfma2(deq2[(tmp >> 0) & 0xff][off], scales_tmp[0], zeros_tmp[0]), blockvec[m][k + 0], res2); + res2 = __hfma2(__hfma2(deq2[(tmp >> 8) & 0xff][off], scales_tmp[1], zeros_tmp[1]), blockvec[m][k + 1], res2); + res2 = __hfma2(__hfma2(deq2[(tmp >> 16) & 0xff][off], scales_tmp[2], zeros_tmp[2]), blockvec[m][k + 2], res2); + res2 = __hfma2(__hfma2(deq2[(tmp >> 24) & 0xff][off], scales_tmp[3], zeros_tmp[3]), blockvec[m][k + 3], res2); +#ifndef USE_ROCM + res[m] = __hadd(res[m], __hadd(res2.x, res2.y)); +#else + res[m] = __hadd(res[m], __hadd(__ushort_as_half(res2.x), __ushort_as_half(res2.y))); +#endif + } + i += width; + k += 4; + } + for (int m = 0; m < b_end; m++) { + atomicAdd(&mul[(b + m) * width + w], res[m]); + } +} + + +__global__ void gemm_half_q_half_alt_8bit_kernel( + const half2* __restrict__ vec, + const uint32_t* __restrict__ mat, + half* __restrict__ mul, + const half* __restrict__ scales, + const uint32_t* __restrict__ zeros, + const int* __restrict__ g_idx, + int batch, + int height, + int width +) +{ + int zero_width = width / 4; + int vec_height = height * 2; + const int blockwidth2 = BLOCK_KN_SIZE / 2; + int b = blockIdx.y * BLOCK_M_SIZE_MAX; + int b_end = min(BLOCK_M_SIZE_MAX, batch - b); + int h = BLOCK_KN_SIZE * blockIdx.z / 4; + int h_end = min(BLOCK_KN_SIZE / 4, height - h) * 2; + int w = BLOCK_KN_SIZE * blockIdx.x + threadIdx.x; + + __shared__ half2 blockvec[BLOCK_M_SIZE_MAX][blockwidth2]; + if (threadIdx.x < h_end) { + for (int m = 0; m < b_end; ++m) { + blockvec[m][threadIdx.x] = + vec[(m + b) * vec_height + blockIdx.z * BLOCK_KN_SIZE / 2 + + threadIdx.x]; + } + } + + + if (blockIdx.z == 0) + { + for (int m = 0; m < b_end; m++) + mul[(b + m) * width + w] = __int2half_rn(0); + } + __syncthreads(); + + int i = width * h + w; + int g_h = h * 4; + int k = 0; + int z_w = w / 4; + int z_mod = (w % 4) * 8; + half2 res2; + half res[BLOCK_M_SIZE_MAX] = {}; + + unsigned int tmp; + while (k < h_end) { + tmp = mat[i]; + half2 scales_tmp[2]; + half2 zeros_tmp[2]; + for (int tmp_k = 0; tmp_k < 2; tmp_k++) { + int g = g_idx[g_h + (k + tmp_k) * 2]; + int g2 = g_idx[g_h + (k + tmp_k) * 2 + 1]; + half scale_f = scales[g * width + w]; + half scale_f2 = scales[g2 * width + w]; + half2 scale = __halves2half2(scale_f, scale_f2); + half2 zero = __halves2half2( + __hmul(scale_f, __int2half_rn(-((zeros[g * zero_width + z_w] >> z_mod) & 0xff) - 1)), + __hmul(scale_f2, __int2half_rn(-((zeros[g2 * zero_width + z_w] >> z_mod) & 0xff) - 1)) + ); + scales_tmp[tmp_k] = scale; + zeros_tmp[tmp_k] = zero; + } + for (int m = 0; m < b_end; m++) { +#ifndef USE_ROCM + res2 = {}; +#else + res2.x = __half_as_ushort(__float2half(0)); + res2.y = __half_as_ushort(__float2half(0)); +#endif + half2 v12 = __halves2half2(__int2half_rn(tmp & 0xFF), __int2half_rn((tmp >> 8) & 0xFF)); + res2 = __hfma2(__hfma2(v12, scales_tmp[0], zeros_tmp[0]), blockvec[m][k + 0], res2); + half2 v34 = __halves2half2(__int2half_rn((tmp >> 16) & 0xFF), __int2half_rn((tmp >> 24) & 0xFF)); + res2 = __hfma2(__hfma2(v34, scales_tmp[1], zeros_tmp[1]), blockvec[m][k + 1], res2); +#ifndef USE_ROCM + res[m] = __hadd(res[m], __hadd(res2.x, res2.y)); +#else + res[m] = __hadd(res[m], __hadd(__ushort_as_half(res2.x), __ushort_as_half(res2.y))); +#endif + } + i += width; + k += 2; + } + for (int m = 0; m < b_end; m++) { + atomicAdd(&mul[(b + m) * width + w], res[m]); + } +} + +void gemm_half_q_half_alt +( + const half* a, + const uint32_t* b_q_weight, + const uint32_t* b_gptq_qzeros, + const half* b_gptq_scales, + const int* b_g_idx, + half* c, + int size_m, + int size_n, + int size_k, + int bit +) +{ + dim3 blockDim, gridDim; + blockDim.x = BLOCK_KN_SIZE; + blockDim.y = 1; + blockDim.z = 1; + gridDim.x = DIVIDE(size_n, BLOCK_KN_SIZE); + gridDim.y = DIVIDE(size_m, BLOCK_M_SIZE_MAX); + gridDim.z = DIVIDE(size_k, BLOCK_KN_SIZE); + + auto kernel = gemm_half_q_half_alt_4bit_kernel; + if (bit == 8) { + kernel = gemm_half_q_half_alt_8bit_kernel; + } + + const musaStream_t stream = at::musa::getCurrentMUSAStream(); + kernel<<>> + ( + (const half2*) a, + b_q_weight, + c, + b_gptq_scales, + b_gptq_qzeros, + b_g_idx, + size_m, + size_k / 32 * bit, + size_n + ); +} + +template +__global__ void reconstruct_gptq_kernel +( + const uint32_t* __restrict__ w, + const half* __restrict__ w_scales, + const uint32_t* __restrict__ w_zeros, + const int* __restrict__ g_idx, + const int height, + const int width, + const int group, + half* __restrict__ out +) +{ + // Start of block + + int column = BLOCK_KN_SIZE * blockIdx.x + threadIdx.x; + int row = blockIdx.y * 32 / bit; + if (column >= width) return; + + // Views + + MatrixView_half_rw out_(out, height, width); + MatrixView_half w_scales_(w_scales, group, width); + T w_zeros_(w_zeros, group, width); + + uint32_t w_read = w[blockIdx.y * width + column]; + half* out_ptr = out_.item_ptr(row, column); + + #pragma unroll + for (int s = 0; s < 32; s += bit) + { + int group = g_idx[row + s / bit]; + half w_scale = w_scales_.item(group, column); + uint32_t w_zero = w_zeros_.item(group, column) + 1; + half w_item = __hmul(__int2half_rn((int)((w_read >> s) & ((1 << bit) - 1)) - w_zero), w_scale); + *out_ptr = w_item; out_ptr += out_.width; + } +} + +__global__ void reconstruct_gptq_3bit_kernel +( + const uint32_t* __restrict__ w, + const half* __restrict__ w_scales, + const uint32_t* __restrict__ w_zeros, + const int* __restrict__ g_idx, + const int height, + const int width, + const int group, + half* __restrict__ out +) +{ + // Start of block + int column = BLOCK_KN_SIZE * blockIdx.x + threadIdx.x; + int row = blockIdx.y * 32; + if (column >= width) return; + + // Views + + MatrixView_half_rw out_(out, height, width); + MatrixView_half w_scales_(w_scales, group, width); + MatrixView_q3_row w_zeros_(w_zeros, group, width); + + uint32_t w1 = w[(blockIdx.y * 3) * width + column]; + uint32_t w2 = w[(blockIdx.y * 3 + 1) * width + column]; + uint32_t w3 = w[(blockIdx.y * 3 + 2) * width + column]; + half* out_ptr = out_.item_ptr(row, column); + + #pragma unroll + for (int i = 0; i < 32; i += 1) + { + int group = g_idx[row + i]; + half w_scale = w_scales_.item(group, column); + uint32_t w_zero = w_zeros_.item(group, column) + 1; + int w_item; + if (i == 10) { + w_item = (w1 >> 30) | ((w2 << 2) & 0x4); + } else if (i == 21) { + w_item = (w2 >> 31) | ((w3 << 1) & 0x6); + } else if (i < 10) { + w_item = ((w1 >> (i * 3)) & 0x7); + } else if (i < 21) { + w_item = ((w2 >> (i * 3 - 32)) & 0x7); + } else { + w_item = ((w3 >> (i * 3 - 64)) & 0x7); + } + *out_ptr = __hmul(__int2half_rn(w_item - w_zero), w_scale); + out_ptr += out_.width; + } +} + +void reconstruct_gptq +( + const uint32_t* b_q_weight, + const uint32_t* b_gptq_qzeros, + const half* b_gptq_scales, + const int* b_g_idx, + half* out, + int height, + int width, + int groups, + int bit +) +{ + dim3 blockDim, gridDim; + blockDim.x = BLOCK_KN_SIZE; + blockDim.y = 1; + gridDim.y = DIVIDE(height, 32 / bit); + gridDim.x = DIVIDE(width, BLOCK_KN_SIZE); + + auto kernel = reconstruct_gptq_kernel; + if (bit == 2) { + kernel = reconstruct_gptq_kernel; + } else if (bit == 8) { + kernel = reconstruct_gptq_kernel; + } else if (bit == 3) { + kernel = reconstruct_gptq_3bit_kernel; + gridDim.y = DIVIDE(height, 32); + } + + const musaStream_t stream = at::musa::getCurrentMUSAStream(); + kernel<<>> + ( + b_q_weight, + b_gptq_scales, + b_gptq_qzeros, + b_g_idx, + height, + width, + groups, + out + ); +} + + +void gemm_half_q_half_cuda +( + cublasHandle_t cublas_handle, + const half* a, + const uint32_t* b_q_weight, + const uint32_t* b_gptq_qzeros, + const half* b_gptq_scales, + const int* b_g_idx, + half* c, + half* temp_dq, + int size_m, + int size_n, + int size_k, + int groups, + bool use_exllama, + int bit +) +{ + bool use_reconstruct; + if (use_exllama) { + use_reconstruct = ((bit == 8 && size_m > MAX_Q_GEMM_ROWS_8BIT) || (bit != 8 && size_m > MAX_Q_GEMM_ROWS)); + } else { + // The 2/3-bit kernels are somehow slower than dequant + gemm baseline, so we disabled them for now. + use_reconstruct = (bit < 4 || size_m > MAX_ALT_GEMM_ROWS); + } + if (use_reconstruct) { + // Reconstruct FP16 matrix, then cuBLAS + if (use_exllama) { + reconstruct_exllama(b_q_weight, b_gptq_qzeros, b_gptq_scales, b_g_idx, temp_dq, + size_k, size_n, groups, bit); + } + else + { + reconstruct_gptq(b_q_weight, b_gptq_qzeros, b_gptq_scales, b_g_idx, + temp_dq, size_k, size_n, groups, bit); + } + + const half alpha = __float2half(1.0f); + const half beta = __float2half(0.0f); + cublasHgemm(cublas_handle, + CUBLAS_OP_N, + CUBLAS_OP_N, + size_n, size_m, size_k, + &alpha, temp_dq, size_n, + a, size_k, + &beta, c, size_n); + } + else if (use_exllama) + { + // Quantized matmul + int max_chunks = size_m / BLOCK_M_SIZE_MAX; + int last_chunk = max_chunks * BLOCK_M_SIZE_MAX; + int last_chunk_size = size_m - last_chunk; + + if (max_chunks) + { + gemm_half_q_half_cuda_part(a, b_q_weight, b_gptq_qzeros, b_gptq_scales, b_g_idx, + c, last_chunk, size_n, size_k, BLOCK_M_SIZE_MAX, + groups, bit); + } + + if (last_chunk_size) + { + gemm_half_q_half_cuda_part(a + last_chunk * size_k, b_q_weight, b_gptq_qzeros, + b_gptq_scales, b_g_idx, c + last_chunk * size_n, + last_chunk_size, size_n, size_k, last_chunk_size, + groups, bit); + } + } + else + { + gemm_half_q_half_alt(a, b_q_weight, b_gptq_qzeros, b_gptq_scales, b_g_idx, + c, size_m, size_n, size_k, bit); + } +} + +__global__ void shuffle_4bit_kernel +( + uint32_t* __restrict__ b_q_weight, + const int size_k, + const int size_n +) +{ + int n = blockIdx.x * THREADS_X + threadIdx.x; + if (n >= size_n) return; + int k = 0; + uint32_t* b_ptr = b_q_weight + n; + while (k < size_k) { shuffle_4bit_8 (b_ptr, size_n); b_ptr += 1 * size_n; k += 8; } +} + +__global__ void shuffle_8bit_kernel +( + uint32_t* __restrict__ b_q_weight, + const int size_k, + const int size_n +) +{ + int n = blockIdx.x * THREADS_X + threadIdx.x; + if (n >= size_n) return; + int k = 0; + uint32_t* b_ptr = b_q_weight + n; + while (k < size_k) { shuffle_8bit_4 (b_ptr, size_n); b_ptr += 1 * size_n; k += 4; } +} + +__global__ void shuffle_2bit_kernel +( + uint32_t* __restrict__ b_q_weight, + const int size_k, + const int size_n +) +{ + int n = blockIdx.x * THREADS_X + threadIdx.x; + if (n >= size_n) return; + int k = 0; + uint32_t* b_ptr = b_q_weight + n; + while (k < size_k) { shuffle_2bit_16(b_ptr, size_n); b_ptr += 1 * size_n; k += 16; } +} + +__global__ void shuffle_3bit_kernel +( + uint32_t* __restrict__ b_q_weight, + const int size_k, + const int size_n +) +{ + int n = blockIdx.x * THREADS_X + threadIdx.x; + if (n >= size_n) return; + int k = 0; + uint32_t* b_ptr = b_q_weight + n; + while (k < size_k) { shuffle_3bit_32(b_ptr, size_n); b_ptr += 3 * size_n; k += 32; } +} + +__global__ void make_sequential_4bit_kernel +( + const uint32_t* __restrict__ w, + uint32_t* __restrict__ w_new, + const int* __restrict__ q_perm, + const int w_width +) +{ + const uint64_t* w2 = (uint64_t*) w; + uint64_t* w_new2 = (uint64_t*) w_new; + int w2_stride = w_width >> 1; + int w2_column = THREADS_X * blockIdx.x + threadIdx.x; + if (w2_column >= w2_stride) return; + int w_new2_row = blockIdx.y; + int q_perm_idx = w_new2_row << 3; + uint64_t dst = 0; + + #pragma unroll + for (int i = 0; i < 8; i++) + { + int source_row = q_perm[q_perm_idx++]; + + int w2_row = source_row >> 3; + int w2_subrow = source_row & 0x07; + int w2_row_shift = w2_subrow << 2; + int wnew2_row_shift = i << 2; + + uint64_t src = w2[w2_row * w2_stride + w2_column]; + src >>= w2_row_shift; + src &= 0x0000000f0000000f; + src <<= wnew2_row_shift; + dst |= src; + } + w_new2[w_new2_row * w2_stride + w2_column] = dst; +} + +__global__ void make_sequential_2bit_kernel +( + const uint32_t* __restrict__ w, + uint32_t* __restrict__ w_new, + const int* __restrict__ q_perm, + const int w_width +) +{ + const uint64_t* w2 = (uint64_t*) w; + uint64_t* w_new2 = (uint64_t*) w_new; + int w2_stride = w_width >> 1; + int w2_column = THREADS_X * blockIdx.x + threadIdx.x; + if (w2_column >= w2_stride) return; + int w_new2_row = blockIdx.y; + int q_perm_idx = w_new2_row << 4; + uint64_t dst = 0; + + #pragma unroll + for (int i = 0; i < 16; i++) + { + int source_row = q_perm[q_perm_idx++]; + + int w2_row = source_row >> 4; + int w2_subrow = source_row & 0x0f; + int w2_row_shift = w2_subrow << 1; + int wnew2_row_shift = i << 1; + + uint64_t src = w2[w2_row * w2_stride + w2_column]; + src >>= w2_row_shift; + src &= 0x0000000300000003; + src <<= wnew2_row_shift; + dst |= src; + } + w_new2[w_new2_row * w2_stride + w2_column] = dst; +} + +__global__ void make_sequential_3bit_kernel +( + const uint32_t* __restrict__ w, + uint32_t* __restrict__ w_new, + const int* __restrict__ q_perm, + const int w_width +) +{ + int w_column = THREADS_X * blockIdx.x + threadIdx.x; + if (w_column >= w_width) return; + int w_new_row = blockIdx.y * 3; + int q_perm_idx = blockIdx.y << 5; + uint32_t dst[3] = {0, 0, 0}; + + #pragma unroll + for (int i = 0; i < 32; i++) + { + int source_row = q_perm[q_perm_idx++]; + int z_w = (source_row / 32) * 3; + int z_mod = source_row % 32; + int z_bit; + + if (z_mod != 10){ + if (z_mod != 21){ + z_bit = z_mod; + if (z_bit > 21){ + z_bit *= 3; + z_bit -= 64; + z_w += 2; + } else if (z_bit > 10){ + z_bit *= 3; + z_bit -= 32; + z_w += 1; + } else { + z_bit *= 3; + } + } else { + z_w += 1; + } + } + + uint64_t src; + if (z_mod == 10) { + src = (w[z_w * w_width + w_column] >> 30) | ((w[(z_w + 1) * w_width + w_column] << 2) & 0x4); + } else if (z_mod == 21){ + src = (w[z_w * w_width + w_column] >> 31) | ((w[(z_w + 1) * w_width + w_column] << 1) & 0x6); + } else { + src = w[z_w * w_width + w_column]; + src >>= z_bit; + src &= 0x07; + } + + z_w = 0; + if (i != 10){ + if (i != 21){ + z_bit = i; + if (z_bit > 21){ + z_bit *= 3; + z_bit -= 64; + z_w += 2; + } else if (z_bit > 10){ + z_bit *= 3; + z_bit -= 32; + z_w += 1; + } else { + z_bit *= 3; + } + } else { + z_w += 1; + } + } + if (i == 10) { + dst[z_w] |= (src & 0x03) << 30; + dst[z_w + 1] |= ((src & 0x4) >> 2); + } else if (i == 21) { + dst[z_w] |= (src & 0x01) << 31; + dst[z_w + 1] |= ((src & 0x6) >> 1); + } else { + dst[z_w] |= (src << z_bit); + } + } + w_new[w_new_row * w_width + w_column] = dst[0]; + w_new[(w_new_row + 1) * w_width + w_column] = dst[1]; + w_new[(w_new_row + 2) * w_width + w_column] = dst[2]; +} + +__global__ void make_sequential_8bit_kernel +( + const uint32_t* __restrict__ w, + uint32_t* __restrict__ w_new, + const int* __restrict__ q_perm, + const int w_width +) +{ + const uint64_t* w2 = (uint64_t*) w; + uint64_t* w_new2 = (uint64_t*) w_new; + int w2_stride = w_width >> 1; + int w2_column = THREADS_X * blockIdx.x + threadIdx.x; + if (w2_column >= w2_stride) return; + int w_new2_row = blockIdx.y; + int q_perm_idx = w_new2_row << 2; + uint64_t dst = 0; + + #pragma unroll + for (int i = 0; i < 4; i++) + { + int source_row = q_perm[q_perm_idx++]; + + int w2_row = source_row >> 2; + int w2_subrow = source_row & 0x03; + int w2_row_shift = w2_subrow << 3; + int wnew2_row_shift = i << 3; + + uint64_t src = w2[w2_row * w2_stride + w2_column]; + src >>= w2_row_shift; + src &= 0x000000ff000000ff; + src <<= wnew2_row_shift; + dst |= src; + } + w_new2[w_new2_row * w2_stride + w2_column] = dst; +} + + +void shuffle_exllama_weight +( + uint32_t* q_weight, + int* q_perm, + int height, + int width, + int bit +) +{ + if (q_perm) + { + uint32_t* new_qweight = NULL; + musaMalloc(&new_qweight, height / 32 * bit * width * sizeof(uint32_t)); + + dim3 blockDim, gridDim; + blockDim.x = THREADS_X; + blockDim.y = 1; + gridDim.x = DIVIDE(width, THREADS_X); + gridDim.y = height / 32 * bit; + + auto kernel = make_sequential_4bit_kernel; + if (bit == 2) { + kernel = make_sequential_2bit_kernel; + } else if (bit == 3) { + kernel = make_sequential_3bit_kernel; + gridDim.y = height / 32; + } else if (bit == 8) { + kernel = make_sequential_8bit_kernel; + } + const musaStream_t stream = at::musa::getCurrentMUSAStream(); + kernel<<>> + ( + q_weight, + new_qweight, + q_perm, + width + ); + // Replace qweights + musaMemcpyAsync(q_weight, new_qweight, height / 32 * bit * width * sizeof(uint32_t), musaMemcpyDeviceToDevice); + // Cleanup + musaDeviceSynchronize(); + musaFree(new_qweight); + } + dim3 blockDim, gridDim; + blockDim.x = THREADS_X; + blockDim.y = 1; + gridDim.x = DIVIDE(width, THREADS_X); + gridDim.y = 1; + auto shuffle_kernel = shuffle_4bit_kernel; + if (bit == 2) { + shuffle_kernel = shuffle_2bit_kernel; + } else if (bit == 3) { + shuffle_kernel = shuffle_3bit_kernel; + } else if (bit == 8) { + shuffle_kernel = shuffle_8bit_kernel; + } + const musaStream_t stream = at::musa::getCurrentMUSAStream(); + shuffle_kernel<<>>(q_weight, height, width); +} + +} // namespace gptq +} // namespace vllm + +torch::Tensor gptq_gemm +( + torch::Tensor a, + torch::Tensor b_q_weight, + torch::Tensor b_gptq_qzeros, + torch::Tensor b_gptq_scales, + torch::Tensor b_g_idx, + bool use_exllama, + int bit +) +{ + const at::musa::OptionalMUSAGuard device_guard(device_of(a)); + auto options = torch::TensorOptions().dtype(a.dtype()).device(a.device()); + at::Tensor c = torch::empty({a.size(0), b_q_weight.size(1)}, options); + at::Tensor temp_dq = torch::empty({b_q_weight.size(0) * 32 / bit, b_q_weight.size(1)}, options); + + vllm::gptq::gemm_half_q_half_cuda + ( + at::musa::getCurrentMUSABlasHandle(), + (const half*) a.data_ptr(), + (const uint32_t*) b_q_weight.data_ptr(), + (const uint32_t*)b_gptq_qzeros.data_ptr(), + (const half*) b_gptq_scales.data_ptr(), + b_g_idx.device().is_meta() ? NULL : (const int*) b_g_idx.data_ptr(), + (half*) c.data_ptr(), + (half*) temp_dq.data_ptr(), + c.size(0), // m + c.size(1), // n + a.size(1), // k + b_gptq_qzeros.size(0), // group number + use_exllama, + bit + ); + return c; +} + +void gptq_shuffle +( + torch::Tensor q_weight, + torch::Tensor q_perm, + int bit +) +{ + const at::musa::OptionalMUSAGuard device_guard(device_of(q_weight)); + vllm::gptq::shuffle_exllama_weight( + (uint32_t*) q_weight.data_ptr(), + q_perm.device().is_meta() || q_perm.numel() == 0 ? NULL : (int*) q_perm.data_ptr(), + q_weight.size(0) * 32 / bit, + q_weight.size(1), + bit + ); +} diff --git a/csrc_musa/quantization/gptq/qdq_2.muh b/csrc_musa/quantization/gptq/qdq_2.muh new file mode 100644 index 0000000..295872a --- /dev/null +++ b/csrc_musa/quantization/gptq/qdq_2.muh @@ -0,0 +1,87 @@ +/* +Copied from https://github.com/turboderp/exllamav2 +*/ + +#ifndef _qdq_2_cuh +#define _qdq_2_cuh + +#include "qdq_util.cuh" + +namespace vllm { +namespace gptq { + +// Permutation: +// +// ffddbb99 77553311 eeccaa88 66442200 + +__forceinline__ __device__ void shuffle_2bit_16 +( + uint32_t* q, + int stride +) +{ + uint32_t qa = q[0]; + uint32_t qb = 0; + + #pragma unroll + for (int i = 0; i < 8; i++) + { + uint32_t qa0 = qa & 0x03; + uint32_t qa1 = (qa & 0x0c) >> 2; + qa >>= 4; + qb |= (qa1 << (i * 2 + 16)); + qb |= (qa0 << (i * 2)); + } + q[0] = qb; +} + +__forceinline__ __device__ void dequant_2bit_16 +( + const uint32_t q_0, + half2 (&dq)[8], + int stride, + const uint32_t zero +) +{ + const uint32_t c0 = 0x64006400; + const half y4_ = __float2half_rn(1.0f / 4.0f); + const half y16_ = __float2half_rn(1.0f / 16.0f); + const half y64_ = __float2half_rn(1.0f / 64.0f); + const half2 y4 = __halves2half2(y4_, y4_); + const half2 y16 = __halves2half2(y16_, y16_); + const half2 y64 = __halves2half2(y64_, y64_); + + const half_uint16 z1_(0xe400 | zero); // half(-1024.0f - zero); + const half z4_ = __hsub(__int2half_rn(-256), __int2half_rn(zero)); + const half z16_ = __hsub(__int2half_rn(-64), __int2half_rn(zero)); + const half z64_ = __hsub(__int2half_rn(-16), __int2half_rn(zero)); + const half2 z1 = __half2half2(z1_.as_half); + const half2 z4 = __half2half2(z4_); + const half2 z16 = __half2half2(z16_); + const half2 z64 = __half2half2(z64_); + + uint32_t qa = q_0; + half2_uint32 q0((qa & 0x00030003) | c0); // half2(q[ 0], q[ 1]) + 1024 + half2_uint32 q1((qa & 0x000c000c) | c0); // half2(q[ 2], q[ 3]) * 4 + 1024 + half2_uint32 q2((qa & 0x00300030) | c0); // half2(q[ 4], q[ 5]) * 16 + 1024 + half2_uint32 q3((qa & 0x00c000c0) | c0); // half2(q[ 6], q[ 7]) * 64 + 1024 + qa >>= 8; + half2_uint32 q4((qa & 0x00030003) | c0); // half2(q[ 8], q[ 8]) + 1024 + half2_uint32 q5((qa & 0x000c000c) | c0); // half2(q[10], q[11]) * 4 + 1024 + half2_uint32 q6((qa & 0x00300030) | c0); // half2(q[12], q[13]) * 16 + 1024 + half2_uint32 q7((qa & 0x00c000c0) | c0); // half2(q[14], q[15]) * 64 + 1024 + + dq[0] = __hadd2(q0.as_half2, z1); + dq[1] = __hfma2(q1.as_half2, y4, z4); + dq[2] = __hfma2(q2.as_half2, y16, z16); + dq[3] = __hfma2(q3.as_half2, y64, z64); + dq[4] = __hadd2(q4.as_half2, z1); + dq[5] = __hfma2(q5.as_half2, y4, z4); + dq[6] = __hfma2(q6.as_half2, y16, z16); + dq[7] = __hfma2(q7.as_half2, y64, z64); +} + +} // namespace gptq +} // namespace vllm + +#endif diff --git a/csrc_musa/quantization/gptq/qdq_3.muh b/csrc_musa/quantization/gptq/qdq_3.muh new file mode 100644 index 0000000..3e7ecde --- /dev/null +++ b/csrc_musa/quantization/gptq/qdq_3.muh @@ -0,0 +1,141 @@ +#ifndef _qdq_3_cuh +#define _qdq_3_cuh + +#include "qdq_util.cuh" + +namespace vllm { +namespace gptq { +// Permutation: +// +// v9997775 55333111 u8886664 44222000 (u, v lsb) +// vjjjhhhf ffdddbbb uiiiggge eecccaaa +// vtttrrrp ppnnnlll usssqqqo oommmkkk + +__forceinline__ __device__ void shuffle_3bit_32 +( + uint32_t* q, + int stride +) +{ + uint32_t qa = q[0 * stride]; + uint32_t qb = q[1 * stride]; + uint32_t qc = q[2 * stride]; + + // qa: aa999888 77766655 54443332 22111000 + // qb: lkkkjjji iihhhggg fffeeedd dcccbbba + // qc: vvvuuutt tsssrrrq qqpppooo nnnmmmll + + uint32_t qd = qc >> 26; + qc <<= 4; + qc |= qb >> 28; + qb <<= 2; + qb |= qa >> 30; + + // qa: ..999888 77766655 54443332 22111000 + // qb: ..jjjiii hhhgggff feeedddc ccbbbaaa + // qc: ..tttsss rrrqqqpp pooonnnm mmlllkkk + // qd: vvvuuu + + uint32_t za = 0; + uint32_t zb = 0; + uint32_t zc = 0; + + for (int i = 0; i < 5; i++) { uint32_t t0 = qa & 0x07; uint32_t t1 = (qa & 0x38) >> 3; qa >>= 6; za |= (t0 << (i * 3)); za |= (t1 << (i * 3 + 16)); } + for (int i = 0; i < 5; i++) { uint32_t t0 = qb & 0x07; uint32_t t1 = (qb & 0x38) >> 3; qb >>= 6; zb |= (t0 << (i * 3)); zb |= (t1 << (i * 3 + 16)); } + for (int i = 0; i < 5; i++) { uint32_t t0 = qc & 0x07; uint32_t t1 = (qc & 0x38) >> 3; qc >>= 6; zc |= (t0 << (i * 3)); zc |= (t1 << (i * 3 + 16)); } + + // za: 9997775 55333111 8886664 44222000 + // zb: jjjhhhf ffdddbbb iiiggge eecccaaa + // zc: tttrrrp ppnnnlll sssqqqo oommmkkk + // qd: vvvuuu + + za |= ((qd & 0x01) >> 0) << 15; + zb |= ((qd & 0x02) >> 1) << 15; + zc |= ((qd & 0x04) >> 2) << 15; + za |= ((qd & 0x08) >> 3) << 31; + zb |= ((qd & 0x10) >> 4) << 31; + zc |= ((qd & 0x20) >> 5) << 31; + + // za: v9997775 55333111 u8886664 44222000 (u, v lsb) + // zb: vjjjhhhf ffdddbbb uiiiggge eecccaaa + // zc: vtttrrrp ppnnnlll usssqqqo oommmkkk + + q[0 * stride] = za; + q[1 * stride] = zb; + q[2 * stride] = zc; +} + +__forceinline__ __device__ void dequant_3bit_32 +( + const uint32_t q_0, + const uint32_t q_1, + const uint32_t q_2, + half2 (&dq)[16], + int stride, + const uint32_t zero +) +{ + const uint32_t c0 = 0x64006400; + const half y8_ = __float2half_rn(1.0f / 8.0f); + const half y64_ = __float2half_rn(1.0f / 64.0f); + const half2 y8 = __halves2half2(y8_, y8_); + const half2 y64 = __halves2half2(y64_, y64_); + const half_uint16 z1_(0xe400 | zero); // half(-1024.0f - zero); + const half z8_ = __hsub(__int2half_rn(-128), __int2half_rn(zero)); + const half z64_ = __hsub(__int2half_rn(-16), __int2half_rn(zero)); + const half2 z1 = __halves2half2(z1_.as_half, z1_.as_half); + const half2 z8 = __halves2half2(z8_, z8_); + const half2 z64 = __halves2half2(z64_, z64_); + + uint32_t qa = q_0; + uint32_t qb = q_1; + uint32_t qc = q_2; + + half2_uint32 q0((qa & 0x00070007) | c0); // half2(q[ 0], q[ 1]) + 1024 + half2_uint32 q1((qa & 0x00380038) | c0); // half2(q[ 2], q[ 3]) * 8 + 1024 + qa >>= 6; + half2_uint32 q2((qa & 0x00070007) | c0); // half2(q[ 4], q[ 5]) + 1024 + half2_uint32 q3((qa & 0x00380038) | c0); // half2(q[ 6], q[ 7]) * 8 + 1024 + half2_uint32 q4((qa & 0x01c001c0) | c0); // half2(q[ 8], q[ 9]) * 64 + 1024 + qa >>= 9; + qa &= 0x00010001; + half2_uint32 q5((qb & 0x00070007) | c0); // half2(q[10], q[11]) + 1024 + half2_uint32 q6((qb & 0x00380038) | c0); // half2(q[12], q[13]) * 8 + 1024 + qb >>= 6; + half2_uint32 q7((qb & 0x00070007) | c0); // half2(q[14], q[15]) + 1024 + half2_uint32 q8((qb & 0x00380038) | c0); // half2(q[16], q[17]) * 8 + 1024 + half2_uint32 q9((qb & 0x01c001c0) | c0); // half2(q[18], q[19]) * 64 + 1024 + qb >>= 8; + qb &= 0x00020002; + half2_uint32 q10((qc & 0x00070007) | c0); // half2(q[20], q[21]) + 1024 + half2_uint32 q11((qc & 0x00380038) | c0); // half2(q[22], q[23]) * 8 + 1024 + qc >>= 6; + half2_uint32 q12((qc & 0x00070007) | c0); // half2(q[24], q[25]) + 1024 + half2_uint32 q13((qc & 0x00380038) | c0); // half2(q[26], q[27]) * 8 + 1024 + half2_uint32 q14((qc & 0x01c001c0) | c0); // half2(q[28], q[29]) * 64 + 1024 + qc >>= 7; + qc &= 0x00040004; + half2_uint32 q15((qa | qb | qc) | c0); + + dq[ 0] = __hadd2( q0.as_half2, z1); + dq[ 1] = __hfma2( q1.as_half2, y8, z8); + dq[ 2] = __hadd2( q2.as_half2, z1); + dq[ 3] = __hfma2( q3.as_half2, y8, z8); + dq[ 4] = __hfma2( q4.as_half2, y64, z64); + dq[ 5] = __hadd2( q5.as_half2, z1); + dq[ 6] = __hfma2( q6.as_half2, y8, z8); + dq[ 7] = __hadd2( q7.as_half2, z1); + dq[ 8] = __hfma2( q8.as_half2, y8, z8); + dq[ 9] = __hfma2( q9.as_half2, y64, z64); + dq[10] = __hadd2(q10.as_half2, z1); + dq[11] = __hfma2(q11.as_half2, y8, z8); + dq[12] = __hadd2(q12.as_half2, z1); + dq[13] = __hfma2(q13.as_half2, y8, z8); + dq[14] = __hfma2(q14.as_half2, y64, z64); + dq[15] = __hadd2(q15.as_half2, z1); +} + +} // namespace gptq +} // namespace vllm + +#endif diff --git a/csrc_musa/quantization/gptq/qdq_4.muh b/csrc_musa/quantization/gptq/qdq_4.muh new file mode 100644 index 0000000..881f353 --- /dev/null +++ b/csrc_musa/quantization/gptq/qdq_4.muh @@ -0,0 +1,147 @@ +/* +Copied from https://github.com/turboderp/exllamav2 +*/ + +#ifndef _qdq_4_cuh +#define _qdq_4_cuh + +#include "qdq_util.cuh" + +namespace vllm { +namespace gptq { +// Permutation: +// +// 77775555 33331111 66664444 22220000 + +__forceinline__ __device__ void shuffle_4bit_8 +( + uint32_t* q, + int stride +) +{ + uint32_t qa = q[0]; + uint32_t qb = 0; + + #pragma unroll + for (int i = 0; i < 4; i++) + { + uint32_t qa0 = qa & 0x0f; + uint32_t qa1 = (qa & 0xf0) >> 4; + qa >>= 8; + qb |= (qa1 << (i * 4 + 16)); + qb |= (qa0 << (i * 4)); + } + q[0] = qb; +} + +__forceinline__ __device__ void dequant_4bit_8 +( + const uint32_t q_0, + half2 (&dq)[4], + int stride, + const uint32_t zero +) +{ + const uint32_t c0 = 0x64006400; + const half y16_ = __float2half_rn(1.0f / 16.0f); + const half2 y16 = __halves2half2(y16_, y16_); + const half_uint16 z1_(0xe400 | zero); // half(-1024.0f - zero); + const half z16_ = __hsub(__int2half_rn(-64), __int2half_rn(zero)); + const half2 z1 = __half2half2(z1_.as_half); + const half2 z16 = __half2half2(z16_); + + uint32_t qa = q_0; + half2_uint32 q0((qa & 0x000f000f) | c0); // half2(q[ 0], q[ 1]) + 1024 + half2_uint32 q1((qa & 0x00f000f0) | c0); // half2(q[ 2], q[ 3]) * 16 + 1024 + qa >>= 8; + half2_uint32 q2((qa & 0x000f000f) | c0); // half2(q[ 4], q[ 5]) + 1024 + half2_uint32 q3((qa & 0x00f000f0) | c0); // half2(q[ 6], q[ 7]) * 16 + 1024 + + dq[0] = __hadd2(q0.as_half2, z1); + dq[1] = __hfma2(q1.as_half2, y16, z16); + dq[2] = __hadd2(q2.as_half2, z1); + dq[3] = __hfma2(q3.as_half2, y16, z16); +} + +__forceinline__ __device__ void dequant_4bit_8_prep_zero_scale +( + const uint32_t zero, + const half scale, + half2 (&z1z16)[2], + half2 (&y1y16)[2] +) +{ + half_uint16 z1(0xe400 | zero); // half(-1024.0f - zero); + half z16 = __hsub(__int2half_rn(-64), __int2half_rn(zero)); + + half2 scale2 = __half2half2(scale); + + z1z16[0] = __hmul2(scale2, __half2half2(z1.as_half)); + z1z16[1] = __hmul2(scale2, __half2half2(z16)); + + const half y1 = __float2half_rn(1.0f); + const half y16 = __float2half_rn(1.0f / 16.0f); + + y1y16[0] = __hmul2(scale2, __half2half2(y1)); + y1y16[1] = __hmul2(scale2, __half2half2(y16)); +} + +__forceinline__ __device__ void dequant_4bit_8_prep_zero +( + const uint32_t zero, + half2(&z1z16)[2], + half2(&y1y16)[2] +) +{ + half_uint16 z1(0xe400 | zero); // half(-1024.0f - zero); + half z16 = __hsub(__int2half_rn(-64), __int2half_rn(zero)); + + z1z16[0] = __half2half2(z1.as_half); + z1z16[1] = __half2half2(z16); + + const half y1 = __float2half_rn(1.0f); + const half y16 = __float2half_rn(1.0f / 16.0f); + + y1y16[0] = __half2half2(y1); + y1y16[1] = __half2half2(y16); +} + + +__forceinline__ __device__ void dequant_4bit_8_gptq +( + const uint32_t q_0, + half2 (&dq)[4], + half2 (&z1z16)[2], + half2 (&y1y16)[2], + int stride, + bool scaled +) +{ + const uint32_t c0 = 0x64006400; + + uint32_t qa = q_0; + half2_uint32 q0((qa & 0x000f000f) | c0); // half2( q[0] + 1024, q[1] + 1024 ) + half2_uint32 q1((qa & 0x00f000f0) | c0); // half2( q[2] * 16 + 1024, q[3] * 16 + 1024 ) + qa >>= 8; + half2_uint32 q2((qa & 0x000f000f) | c0); // half2( q[4] + 1024, q[5] + 1024 ) + half2_uint32 q3((qa & 0x00f000f0) | c0); // half2( q[6] * 16 + 1024, q[7] * 16 + 1024 ) + + if (scaled) + { + dq[0] = __hfma2(q0.as_half2, y1y16[0], z1z16[0]); // half2( q[0] * s - z * s, q[1] * s - z * s) + dq[1] = __hfma2(q1.as_half2, y1y16[1], z1z16[1]); // half2( q[2] * s - z * s, q[3] * s - z * s) + dq[2] = __hfma2(q2.as_half2, y1y16[0], z1z16[0]); + dq[3] = __hfma2(q3.as_half2, y1y16[1], z1z16[1]); + } + else + { + dq[0] = __hadd2(q0.as_half2, z1z16[0]); // half2( q[0] - z, q[1] - z ) + dq[1] = __hfma2(q1.as_half2, y1y16[1], z1z16[1]); // half2( q[2] - z, q[3] - z ) + dq[2] = __hadd2(q2.as_half2, z1z16[0]); // half2( q[4] - z, q[5] - z ) + dq[3] = __hfma2(q3.as_half2, y1y16[1], z1z16[1]); // half2( q[6] - z, q[7] - z ) + } +} +} // namespace gptq +} // namespace vllm + +#endif diff --git a/csrc_musa/quantization/gptq/qdq_8.muh b/csrc_musa/quantization/gptq/qdq_8.muh new file mode 100644 index 0000000..0c7ad78 --- /dev/null +++ b/csrc_musa/quantization/gptq/qdq_8.muh @@ -0,0 +1,40 @@ +/* +Copied from https://github.com/turboderp/exllamav2 +*/ + +#ifndef _qdq_8_cuh +#define _qdq_8_cuh + +#include "qdq_util.cuh" + +namespace vllm { +namespace gptq { + +__forceinline__ __device__ void shuffle_8bit_4 +( + uint32_t* q, + int stride +) +{ +} + +__forceinline__ __device__ void dequant_8bit_8 +( + const uint32_t q_0, + const uint32_t q_1, + half2 (&dq)[4], + int stride, + const uint32_t zero +) +{ + half dqh[8]; + for (int i = 0; i < 4; i++) dqh[i ] = dq_ns(exb(q_0, i * 8, 0xff), zero); + for (int i = 0; i < 4; i++) dqh[i + 4] = dq_ns(exb(q_1, i * 8, 0xff), zero); + + for (int i = 0; i < 4; i++) dq[i] = __halves2half2(dqh[i * 2], dqh[i * 2 + 1]); +} + +} // namespace gptq +} // namespace vllm + +#endif diff --git a/csrc_musa/quantization/gptq/qdq_util.muh b/csrc_musa/quantization/gptq/qdq_util.muh new file mode 100644 index 0000000..1722a9a --- /dev/null +++ b/csrc_musa/quantization/gptq/qdq_util.muh @@ -0,0 +1,60 @@ +/* +Copied from https://github.com/turboderp/exllamav2 +*/ + +#ifndef _qdq_util_cuh +#define _qdq_util_cuh + +namespace vllm { +namespace gptq { + +union half2_uint32 +{ + uint32_t as_uint32; + half2 as_half2; + __device__ half2_uint32(uint32_t val) : as_uint32(val) {} + __device__ half2_uint32(half2 val) : as_half2(val) {} +}; + +union half_uint16 +{ + uint16_t as_uint16; + half as_half; + __device__ half_uint16(uint16_t val) : as_uint16(val) {} + __device__ half_uint16(half val) : as_half(val) {} +}; + +// Max_scale premultiplied by 1/256 + +__forceinline__ __device__ half dq_scale(const int qs, const half max_scale) +{ + int qs_i = qs + 1; + half qs_h = __int2half_rn(qs_i * qs_i); + qs_h = __hmul(qs_h, max_scale); + return qs_h; +} + +__forceinline__ __device__ half dq(const int q, const int qzero, const half scale) +{ + return __hmul(__int2half_rn(q - qzero), scale); +} + +__forceinline__ __device__ half dq_ns(const int q, const int qzero) +{ + //return __hsub(__int2half_rn(q), __int2half_rn(qzero)); + return __int2half_rn(q - qzero); +} + +__forceinline__ __device__ int exb(const uint32_t q, const int shift, const int mask) +{ + return (int)((q >> shift) & mask); +} + +__forceinline__ __device__ int exb(const uint32_t q1, const uint32_t q0, const int shift, const int mask) +{ + return (int)(__funnelshift_rc(q0, q1, shift) & mask); +} + +} // namespace gptq +} // namespace vllm +#endif diff --git a/csrc_musa/quantization/gptq_marlin/gptq_marlin.mu b/csrc_musa/quantization/gptq_marlin/gptq_marlin.mu new file mode 100644 index 0000000..57e7491 --- /dev/null +++ b/csrc_musa/quantization/gptq_marlin/gptq_marlin.mu @@ -0,0 +1,1722 @@ +/* + * Modified by Neural Magic + * Copyright (C) Marlin.2024 Elias Frantar + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* + * Adapted from https://github.com/IST-DASLab/marlin + */ + +#include "gptq_marlin.cuh" + +template inline std::string str(T x) { return std::to_string(x); } + +namespace gptq_marlin { + +#if defined(__MUSA_ARCH__) && __MUSA_ARCH__ < 800 + +__global__ void permute_cols_kernel(int4 const *__restrict__ a_int4_ptr, + int const *__restrict__ perm_int_ptr, + int4 *__restrict__ out_int4_ptr, int size_m, + int size_k, int block_rows) {} + +template shared + // fetch pipeline + const bool has_act_order, // whether act_order is enabled + const int group_blocks = -1 // number of consecutive 16x16 blocks with + // a separate quantization scale + > +__global__ void +Marlin(const int4 *__restrict__ A, // fp16 input matrix of shape mxk + const int4 *__restrict__ B, // 4bit quantized weight matrix of shape kxn + int4 *__restrict__ C, // fp16 output buffer of shape mxn + const int4 *__restrict__ scales_ptr, // fp16 quantization scales of shape + // (k/groupsize)xn + const int *__restrict__ g_idx, // int32 group indices of shape k + int num_groups, // number of scale groups per output channel + int prob_m, // batch dimension m + int prob_n, // output dimension n + int prob_k, // reduction dimension k + int *locks // extra global storage for barrier synchronization +) {} + +} // namespace gptq_marlin + +torch::Tensor gptq_marlin_gemm(torch::Tensor &a, torch::Tensor &b_q_weight, + torch::Tensor &b_scales, torch::Tensor &g_idx, + torch::Tensor &perm, torch::Tensor &workspace, + int64_t num_bits, int64_t size_m, int64_t size_n, + int64_t size_k, bool is_k_full) { + TORCH_CHECK_NOT_IMPLEMENTED(false, + "marlin_gemm(..) requires CUDA_ARCH >= 8.0"); + return torch::empty({1, 1}); +} + +#else + +// Matrix fragments for tensor core instructions; their precise layout is +// documented here: +// https://docs.nvidia.com/cuda/parallel-thread-execution/index.html#matrix-fragments-for-mma-m16n8k16-with-floating-point-type +using FragA = Vec; +using FragB = Vec; +using FragC = Vec; +using FragS = Vec; // quantization scales + +// m16n8k16 tensor core mma instruction with fp16 inputs and fp32 +// output/accumulation. +__device__ inline void mma(const FragA &a_frag, const FragB &frag_b, + FragC &frag_c) { + const uint32_t *a = reinterpret_cast(&a_frag); + const uint32_t *b = reinterpret_cast(&frag_b); + float *c = reinterpret_cast(&frag_c); + asm volatile("mma.sync.aligned.m16n8k16.row.col.f32.f16.f16.f32 " + "{%0,%1,%2,%3}, {%4,%5,%6,%7}, {%8,%9}, {%10,%11,%12,%13};\n" + : "=f"(c[0]), "=f"(c[1]), "=f"(c[2]), "=f"(c[3]) + : "r"(a[0]), "r"(a[1]), "r"(a[2]), "r"(a[3]), "r"(b[0]), + "r"(b[1]), "f"(c[0]), "f"(c[1]), "f"(c[2]), "f"(c[3])); +} + +// Instruction for loading a full 16x16 matrix fragment of operand A from shared +// memory, directly in tensor core layout. +__device__ inline void ldsm4(FragA &frag_a, const void *smem_ptr) { + uint32_t *a = reinterpret_cast(&frag_a); + uint32_t smem = static_cast(__cvta_generic_to_shared(smem_ptr)); + asm volatile("ldmatrix.sync.aligned.m8n8.x4.shared.b16 {%0,%1,%2,%3}, [%4];\n" + : "=r"(a[0]), "=r"(a[1]), "=r"(a[2]), "=r"(a[3]) + : "r"(smem)); +} + +// Lookup-table based 3-input logical operation; explicitly used for +// dequantization as the compiler does not seem to automatically recognize it in +// all cases. +template __device__ inline int lop3(int a, int b, int c) { + int res; + asm volatile("lop3.b32 %0, %1, %2, %3, %4;\n" + : "=r"(res) + : "r"(a), "r"(b), "r"(c), "n"(lut)); + return res; +} + +// Constructs destination register by taking bytes from 2 sources (based on mask) +template +__device__ inline uint32_t prmt(uint32_t a) { + uint32_t res; + asm volatile("prmt.b32 %0, %1, %2, %3;\n" + : "=r"(res) + : "r"(a), "n"(start_byte), "n"(mask)); + return res; +} + +// Efficiently dequantize an int32 value into a full B-fragment of 4 fp16 +// values. We mostly follow the strategy in the link below, with some small +// changes: +// https://github.com/NVIDIA/FasterTransformer/blob/main/src/fastertransformer/cutlass_extensions/include/cutlass_extensions/interleaved_numeric_conversion.h +__device__ inline FragB dequant_4bit(int q) { + const int LO = 0x000f000f; + const int HI = 0x00f000f0; + const int EX = 0x64006400; + // Guarantee that the `(a & b) | c` operations are LOP3s. + int lo = lop3<(0xf0 & 0xcc) | 0xaa>(q, LO, EX); + int hi = lop3<(0xf0 & 0xcc) | 0xaa>(q, HI, EX); + // We want signed int4 outputs, hence we fuse the `-8` symmetric zero point + // directly into `SUB` and `ADD`. + const int SUB = 0x64086408; + const int MUL = 0x2c002c00; + const int ADD = 0xd480d480; + FragB frag_b; + frag_b[0] = __hsub2(*reinterpret_cast(&lo), + *reinterpret_cast(&SUB)); + frag_b[1] = __hfma2(*reinterpret_cast(&hi), + *reinterpret_cast(&MUL), + *reinterpret_cast(&ADD)); + return frag_b; +} + +__device__ inline FragB dequant_8bit(int q) { + static constexpr uint32_t mask_for_elt_01 = 0x5250; + static constexpr uint32_t mask_for_elt_23 = 0x5351; + static constexpr uint32_t start_byte_for_fp16 = 0x64646464; + + uint32_t lo = prmt(q); + uint32_t hi = prmt(q); + + static constexpr uint32_t I8s_TO_F16s_MAGIC_NUM = 0x64806480; + + FragB frag_b; + frag_b[0] = __hsub2(*reinterpret_cast(&lo), + *reinterpret_cast(&I8s_TO_F16s_MAGIC_NUM)); + frag_b[1] = __hsub2(*reinterpret_cast(&hi), + *reinterpret_cast(&I8s_TO_F16s_MAGIC_NUM)); + return frag_b; +} + +// Multiply dequantized values by the corresponding quantization scale; used +// only for grouped quantization. +__device__ inline void scale(FragB &frag_b, FragS &frag_s, int i) { + half2 s = __half2half2(reinterpret_cast<__half *>(&frag_s)[i]); + frag_b[0] = __hmul2(frag_b[0], s); + frag_b[1] = __hmul2(frag_b[1], s); +} + +// Same as above, but for act_order (each K is multiplied individually) +__device__ inline void scale4(FragB &frag_b, FragS &frag_s_1, FragS &frag_s_2, + FragS &frag_s_3, FragS &frag_s_4, int i) { + __half2 s_val_1_2; + s_val_1_2.x = reinterpret_cast<__half *>(&frag_s_1)[i]; + s_val_1_2.y = reinterpret_cast<__half *>(&frag_s_2)[i]; + + __half2 s_val_3_4; + s_val_3_4.x = reinterpret_cast<__half *>(&frag_s_3)[i]; + s_val_3_4.y = reinterpret_cast<__half *>(&frag_s_4)[i]; + + frag_b[0] = __hmul2(frag_b[0], s_val_1_2); + frag_b[1] = __hmul2(frag_b[1], s_val_3_4); +} + +// Given 2 floats multiply by 2 scales (halves) +__device__ inline void scale_float(float *c, FragS &s) { + __half *s_ptr = reinterpret_cast<__half *>(&s); + c[0] = __fmul_rn(c[0], __half2float(s_ptr[0])); + c[1] = __fmul_rn(c[1], __half2float(s_ptr[1])); +} + +// Wait until barrier reaches `count`, then lock for current threadblock. +__device__ inline void barrier_acquire(int *lock, int count) { + if (threadIdx.x == 0) { + int state = -1; + do + // Guarantee that subsequent writes by this threadblock will be visible + // globally. + asm volatile("ld.global.acquire.gpu.b32 %0, [%1];\n" + : "=r"(state) + : "l"(lock)); + while (state != count); + } + __syncthreads(); +} + +// Release barrier and increment visitation count. +__device__ inline void barrier_release(int *lock, bool reset = false) { + __syncthreads(); + if (threadIdx.x == 0) { + if (reset) { + lock[0] = 0; + return; + } + int val = 1; + // Make sure that all writes since acquiring this barrier are visible + // globally, while releasing the barrier. + asm volatile("fence.acq_rel.gpu;\n"); + asm volatile("red.relaxed.gpu.global.add.s32 [%0], %1;\n" + : + : "l"(lock), "r"(val)); + } +} + +// For a given "a" of size [M,K] performs a permutation of the K columns based +// on the given "perm" indices. +__global__ void permute_cols_kernel(int4 const *__restrict__ a_int4_ptr, + int const *__restrict__ perm_int_ptr, + int4 *__restrict__ out_int4_ptr, int size_m, + int size_k, int block_rows) { + + int start_row = block_rows * blockIdx.x; + int finish_row = start_row + block_rows; + if (finish_row > size_m) { + finish_row = size_m; + } + int cur_block_rows = finish_row - start_row; + + int row_stride = size_k * sizeof(half) / 16; + + auto permute_row = [&](int row) { + int iters = size_k / default_threads; + int rest = size_k % default_threads; + + int offset = row * row_stride; + + half const *a_row_half = + reinterpret_cast(a_int4_ptr + offset); + half *out_half = reinterpret_cast(out_int4_ptr + offset); + + int base_k = 0; + + for (int i = 0; i < iters; i++) { + int cur_k = base_k + threadIdx.x; + int src_pos = perm_int_ptr[cur_k]; + + out_half[cur_k] = a_row_half[src_pos]; + + base_k += default_threads; + } + + if (rest) { + if (threadIdx.x < rest) { + int cur_k = base_k + threadIdx.x; + int src_pos = perm_int_ptr[cur_k]; + + out_half[cur_k] = a_row_half[src_pos]; + } + } + }; + + for (int i = 0; i < cur_block_rows; i++) { + int cur_row = start_row + i; + if (cur_row < size_m) { + permute_row(cur_row); + } + } +} + +template shared + // fetch pipeline + const bool has_act_order, // whether act_order is enabled + const int group_blocks = -1 // number of consecutive 16x16 blocks with + // a separate quantization scale + > +__global__ void +Marlin(const int4 *__restrict__ A, // fp16 input matrix of shape mxk + const int4 *__restrict__ B, // 4bit quantized weight matrix of shape kxn + int4 *__restrict__ C, // fp16 output buffer of shape mxn + const int4 *__restrict__ scales_ptr, // fp16 quantization scales of shape + // (k/groupsize)xn + const int *__restrict__ g_idx, // int32 group indices of shape k + int num_groups, // number of scale groups per output channel + int prob_m, // batch dimension m + int prob_n, // output dimension n + int prob_k, // reduction dimension k + int *locks // extra global storage for barrier synchronization +) { + // Each threadblock processes one "stripe" of the B matrix with (roughly) the + // same size, which might involve multiple column "slices" (of width 16 * + // `thread_n_blocks`). Stripes are defined as shown in the 3x3 matrix 5 SM + // example: + // 0 1 3 + // 0 2 3 + // 1 2 4 + // While this kind of partitioning makes things somewhat more complicated, it + // ensures good utilization of all SMs for many kinds of shape and GPU + // configurations, while requiring as few slow global cross-threadblock + // reductions as possible. + + constexpr int pack_factor = 32 / num_bits; + + // For larger GEMMs we run multiple batchsize 64 versions in parallel for a + // better partitioning with less reductions + int parallel = 1; + if (prob_m > 16 * thread_m_blocks) { + parallel = prob_m / (16 * thread_m_blocks); + prob_m = 16 * thread_m_blocks; + } + + int k_tiles = prob_k / 16 / thread_k_blocks; + int n_tiles = prob_n / 16 / thread_n_blocks; + int iters = div_ceil(k_tiles * n_tiles * parallel, gridDim.x); + + if constexpr (!has_act_order && group_blocks != -1) { + if (group_blocks >= thread_k_blocks) { + // Ensure that the number of tiles in each stripe is a multiple of the + // groupsize; this avoids an annoying special case where a stripe starts + // in the middle of group. + iters = (group_blocks / thread_k_blocks) * + div_ceil(iters, (group_blocks / thread_k_blocks)); + } + } + + int slice_row = (iters * blockIdx.x) % k_tiles; + int slice_col_par = (iters * blockIdx.x) / k_tiles; + int slice_col = slice_col_par; + int slice_iters; // number of threadblock tiles in the current slice + int slice_count = + 0; // total number of active threadblocks in the current slice + int slice_idx; // index of threadblock in current slice; numbered bottom to + // top + + // We can easily implement parallel problem execution by just remapping + // indices and advancing global pointers + if (slice_col_par >= n_tiles) { + A += (slice_col_par / n_tiles) * 16 * thread_m_blocks * prob_k / 8; + C += (slice_col_par / n_tiles) * 16 * thread_m_blocks * prob_n / 8; + locks += (slice_col_par / n_tiles) * n_tiles; + slice_col = slice_col_par % n_tiles; + } + + // Compute all information about the current slice which is required for + // synchronization. + auto init_slice = [&]() { + slice_iters = + iters * (blockIdx.x + 1) - (k_tiles * slice_col_par + slice_row); + if (slice_iters < 0 || slice_col_par >= n_tiles * parallel) + slice_iters = 0; + if (slice_iters == 0) + return; + if (slice_row + slice_iters > k_tiles) + slice_iters = k_tiles - slice_row; + slice_count = 1; + slice_idx = 0; + int col_first = iters * div_ceil(k_tiles * slice_col_par, iters); + if (col_first <= k_tiles * (slice_col_par + 1)) { + int col_off = col_first - k_tiles * slice_col_par; + slice_count = div_ceil(k_tiles - col_off, iters); + if (col_off > 0) + slice_count++; + int delta_first = iters * blockIdx.x - col_first; + if (delta_first < 0 || (col_off == 0 && delta_first == 0)) + slice_idx = slice_count - 1; + else { + slice_idx = slice_count - 1 - delta_first / iters; + if (col_off > 0) + slice_idx--; + } + } + if (slice_col == n_tiles) { + A += 16 * thread_m_blocks * prob_k / 8; + C += 16 * thread_m_blocks * prob_n / 8; + locks += n_tiles; + slice_col = 0; + } + }; + init_slice(); + + // A sizes/strides + + // stride of the A matrix in global memory + int a_gl_stride = prob_k / 8; + // stride of an A matrix tile in shared memory + constexpr int a_sh_stride = 16 * thread_k_blocks / 8; + // delta between subsequent A tiles in global memory + constexpr int a_gl_rd_delta_o = 16 * thread_k_blocks / 8; + // between subsequent accesses within a tile + int a_gl_rd_delta_i = a_gl_stride * (threads / a_gl_rd_delta_o); + // between shared memory writes + constexpr int a_sh_wr_delta = a_sh_stride * (threads / a_gl_rd_delta_o); + // between shared memory tile reads + constexpr int a_sh_rd_delta_o = 2 * ((threads / 32) / (thread_n_blocks / 4)); + // within a shared memory tile + constexpr int a_sh_rd_delta_i = a_sh_stride * 16; + // overall size of a tile + constexpr int a_sh_stage = a_sh_stride * (16 * thread_m_blocks); + // number of shared write iterations for a tile + constexpr int a_sh_wr_iters = div_ceil(a_sh_stage, a_sh_wr_delta); + + // B sizes/strides + int b_gl_stride = 16 * prob_n / (pack_factor * 4); + constexpr int b_sh_stride = ((thread_n_blocks * 16) * 16 / pack_factor) / 4; + constexpr int b_thread_vecs = num_bits == 4 ? 1 : 2; + constexpr int b_sh_stride_threads = b_sh_stride / b_thread_vecs; + + int b_gl_rd_delta_o = b_gl_stride * thread_k_blocks; + int b_gl_rd_delta_i = b_gl_stride * (threads / b_sh_stride_threads); + constexpr int b_sh_wr_delta = threads * b_thread_vecs; + constexpr int b_sh_rd_delta = threads * b_thread_vecs; + constexpr int b_sh_stage = b_sh_stride * thread_k_blocks; + constexpr int b_sh_wr_iters = b_sh_stage / b_sh_wr_delta; + + // Scale sizes/strides without act_order + int s_gl_stride = prob_n / 8; + constexpr int s_sh_stride = 16 * thread_n_blocks / 8; + constexpr int s_tb_groups = + !has_act_order && group_blocks != -1 && group_blocks < thread_k_blocks + ? thread_k_blocks / group_blocks + : 1; + constexpr int s_sh_stage = s_tb_groups * s_sh_stride; + int s_gl_rd_delta = s_gl_stride; + + // Scale size/strides with act_order + constexpr int tb_k = 16 * thread_k_blocks; + constexpr int g_idx_stage = has_act_order ? (tb_k * sizeof(int)) / 16 : 0; + // constexpr int act_s_row_stride = 1; + // int act_s_col_stride = act_s_row_stride * num_groups; + int act_s_col_stride = 1; + int act_s_col_warp_stride = act_s_col_stride * 8; + int tb_n_warps = thread_n_blocks / 4; + int act_s_col_tb_stride = act_s_col_warp_stride * tb_n_warps; + + // Global A read index of current thread. + int a_gl_rd = a_gl_stride * (threadIdx.x / a_gl_rd_delta_o) + + (threadIdx.x % a_gl_rd_delta_o); + a_gl_rd += a_gl_rd_delta_o * slice_row; + // Shared write index of current thread. + int a_sh_wr = a_sh_stride * (threadIdx.x / a_gl_rd_delta_o) + + (threadIdx.x % a_gl_rd_delta_o); + // Shared read index. + int a_sh_rd = + a_sh_stride * ((threadIdx.x % 32) % 16) + (threadIdx.x % 32) / 16; + a_sh_rd += 2 * ((threadIdx.x / 32) / (thread_n_blocks / 4)); + + int b_gl_rd = b_gl_stride * (threadIdx.x / b_sh_stride_threads) + + (threadIdx.x % b_sh_stride_threads) * b_thread_vecs; + b_gl_rd += b_sh_stride * slice_col; + b_gl_rd += b_gl_rd_delta_o * slice_row; + int b_sh_wr = threadIdx.x * b_thread_vecs; + int b_sh_rd = threadIdx.x * b_thread_vecs; + + // For act_order + constexpr int k_iter_size = tb_k / b_sh_wr_iters; + int slice_k_start = tb_k * slice_row; + int slice_k_finish = slice_k_start + tb_k * slice_iters; + int slice_k_start_shared_fetch = slice_k_start; + int slice_n_offset = act_s_col_tb_stride * slice_col; + + // No act_order + int s_gl_rd; + if constexpr (!has_act_order) { + if constexpr (group_blocks == -1) { + s_gl_rd = s_sh_stride * slice_col + threadIdx.x; + } else { + s_gl_rd = s_gl_stride * ((thread_k_blocks * slice_row) / group_blocks) + + s_sh_stride * slice_col + threadIdx.x; + } + } + int s_sh_wr = threadIdx.x; + bool s_sh_wr_pred = threadIdx.x < s_sh_stride; + + // We use a different scale layout for grouped and column-wise quantization as + // we scale a `half2` tile in column-major layout in the former and in + // row-major in the latter case. + int s_sh_rd; + if constexpr (group_blocks != -1) + s_sh_rd = 8 * ((threadIdx.x / 32) % (thread_n_blocks / 4)) + + (threadIdx.x % 32) / 4; + else + s_sh_rd = 8 * ((threadIdx.x / 32) % (thread_n_blocks / 4)) + + (threadIdx.x % 32) % 4; + + // Precompute which thread should not read memory in which iterations; this is + // needed if there are more threads than required for a certain tilesize or + // when the batchsize is not a multiple of 16. + bool a_sh_wr_pred[a_sh_wr_iters]; +#pragma unroll + for (int i = 0; i < a_sh_wr_iters; i++) + a_sh_wr_pred[i] = a_sh_wr_delta * i + a_sh_wr < a_sh_stride * prob_m; + + // To ensure that writing and reading A tiles to/from shared memory, the + // latter in fragment format, is fully bank conflict free, we need to use a + // rather fancy XOR-based layout. The key here is that neither reads nor + // writes of the 16-byte `int4` blocks of 8 consecutive threads involve the + // same shared memory banks. Further, it seems (based on NSight-Compute) that + // each warp must also write a consecutive memory segment? + auto transform_a = [&](int i) { + int row = i / a_gl_rd_delta_o; + return a_gl_rd_delta_o * row + (i % a_gl_rd_delta_o) ^ row; + }; + // Since the computation of this remapping is non-trivial and, due to our main + // loop unrolls, all shared memory accesses are static, we simply precompute + // both transformed reads and writes. + int a_sh_wr_trans[a_sh_wr_iters]; +#pragma unroll + for (int i = 0; i < a_sh_wr_iters; i++) + a_sh_wr_trans[i] = transform_a(a_sh_wr_delta * i + a_sh_wr); + int a_sh_rd_trans[b_sh_wr_iters][thread_m_blocks]; +#pragma unroll + for (int i = 0; i < b_sh_wr_iters; i++) { +#pragma unroll + for (int j = 0; j < thread_m_blocks; j++) + a_sh_rd_trans[i][j] = + transform_a(a_sh_rd_delta_o * i + a_sh_rd_delta_i * j + a_sh_rd); + } + + // Since B-accesses have non-constant stride they have to be computed at + // runtime; we break dependencies between subsequent accesses with a tile by + // maintining multiple pointers (we have enough registers), a tiny + // optimization. + const int4 *B_ptr[b_sh_wr_iters]; +#pragma unroll + for (int i = 0; i < b_sh_wr_iters; i++) + B_ptr[i] = B + b_gl_rd_delta_i * i + b_gl_rd; + + extern __shared__ int4 sh[]; + // Shared memory storage for global fetch pipelines. + int4 *sh_a = sh; + int4 *sh_b = sh_a + (stages * a_sh_stage); + int4 *sh_g_idx = sh_b + (stages * b_sh_stage); + int4 *sh_s = sh_g_idx + (stages * g_idx_stage); + + // Register storage for double buffer of shared memory reads. + FragA frag_a[2][thread_m_blocks]; + I4 frag_b_quant[2][b_thread_vecs]; + FragC frag_c[thread_m_blocks][4][2]; + FragS frag_s[2][4]; // No act-order + FragS act_frag_s[2][4][4]; // For act-order + + // Zero accumulators. + auto zero_accums = [&]() { +#pragma unroll + for (int i = 0; i < thread_m_blocks * 4 * 2 * 4; i++) + reinterpret_cast(frag_c)[i] = 0; + }; + + int sh_first_group_id = -1; + int sh_num_groups = -1; + constexpr int sh_max_num_groups = 32; + + auto fetch_scales_to_shared = [&](bool is_async, int first_group_id, + int last_group_id) { + sh_first_group_id = first_group_id; + sh_num_groups = last_group_id - first_group_id + 1; + + if (sh_num_groups < sh_max_num_groups) { + sh_num_groups = sh_max_num_groups; + } + + if (sh_first_group_id + sh_num_groups > num_groups) { + sh_num_groups = num_groups - sh_first_group_id; + } + + int row_offset = first_group_id * s_gl_stride; + + if (is_async) { + for (int i = 0; i < sh_num_groups; i++) { + if (threadIdx.x < s_sh_stride) { + cp_async4_pred(&sh_s[(i * s_sh_stride) + threadIdx.x], + &scales_ptr[row_offset + (i * s_gl_stride) + + slice_n_offset + threadIdx.x]); + } + } + } else { + for (int i = 0; i < sh_num_groups; i++) { + if (threadIdx.x < s_sh_stride) { + sh_s[(i * s_sh_stride) + threadIdx.x] = + scales_ptr[row_offset + (i * s_gl_stride) + slice_n_offset + + threadIdx.x]; + } + } + } + }; + // Asynchronously fetch the next A, B and s tile from global to the next + // shared memory pipeline location. + auto fetch_to_shared = [&](int pipe, int a_off, bool pred = true) { + if (pred) { + int4 *sh_a_stage = sh_a + a_sh_stage * pipe; +#pragma unroll + for (int i = 0; i < a_sh_wr_iters; i++) { + cp_async4_pred( + &sh_a_stage[a_sh_wr_trans[i]], + &A[a_gl_rd_delta_i * i + a_gl_rd + a_gl_rd_delta_o * a_off], + a_sh_wr_pred[i]); + } + int4 *sh_b_stage = sh_b + b_sh_stage * pipe; +#pragma unroll + for (int i = 0; i < b_sh_wr_iters; i++) { +#pragma unroll + for (int j = 0; j < b_thread_vecs; j++) { + cp_async4(&sh_b_stage[b_sh_wr_delta * i + b_sh_wr + j], B_ptr[i] + j); + } + + B_ptr[i] += b_gl_rd_delta_o; + } + + if constexpr (has_act_order) { + // Fetch g_idx thread-block portion + int full_pipe = a_off; + int cur_k = slice_k_start_shared_fetch + tb_k * full_pipe; + if (cur_k < prob_k && cur_k < slice_k_finish) { + int4 *sh_g_idx_stage = sh_g_idx + g_idx_stage * pipe; + + int4 const *cur_g_idx_stage_ptr = + reinterpret_cast(&g_idx[cur_k]); + + if (threadIdx.x < g_idx_stage) { + cp_async4_pred(&sh_g_idx_stage[threadIdx.x], + &cur_g_idx_stage_ptr[threadIdx.x]); + } + } + } else { + if constexpr (group_blocks != -1) { + int4 *sh_s_stage = sh_s + s_sh_stage * pipe; + + if constexpr (group_blocks >= thread_k_blocks) { + // Only fetch scales if this tile starts a new group + if (pipe % (group_blocks / thread_k_blocks) == 0) { + if (s_sh_wr_pred) { + cp_async4(&sh_s_stage[s_sh_wr], &scales_ptr[s_gl_rd]); + } + s_gl_rd += s_gl_rd_delta; + } + } else { + for (int i = 0; i < s_tb_groups; i++) { + if (s_sh_wr_pred) { + cp_async4(&sh_s_stage[i * s_sh_stride + s_sh_wr], + &scales_ptr[s_gl_rd]); + } + s_gl_rd += s_gl_rd_delta; + } + } + } + } + } + // Insert a fence even when we are winding down the pipeline to ensure that + // waiting is also correct at this point. + cp_async_fence(); + }; + + // Wait until the next thread tile has been loaded to shared memory. + auto wait_for_stage = [&]() { + // We only have `stages - 2` active fetches since we are double buffering + // and can only issue the next fetch when it is guaranteed that the previous + // shared memory load is fully complete (as it may otherwise be + // overwritten). + cp_async_wait(); + __syncthreads(); + }; + + // Load the next sub-tile from the current location in the shared memory pipe + // into the current register buffer. + auto fetch_to_registers = [&](int k, int pipe) { + int4 *sh_a_stage = sh_a + a_sh_stage * pipe; +#pragma unroll + for (int i = 0; i < thread_m_blocks; i++) + ldsm4(frag_a[k % 2][i], &sh_a_stage[a_sh_rd_trans[k % b_sh_wr_iters][i]]); + int4 *sh_b_stage = sh_b + b_sh_stage * pipe; + +#pragma unroll + for (int i = 0; i < b_thread_vecs; i++) { + frag_b_quant[k % 2][i] = *reinterpret_cast( + &sh_b_stage[b_sh_rd_delta * (k % b_sh_wr_iters) + b_sh_rd + i]); + } + }; + + bool is_same_group[stages]; + int same_group_id[stages]; + + auto init_same_group = [&](int pipe) { + if constexpr (!has_act_order) { + is_same_group[pipe] = false; + same_group_id[pipe] = 0; + return; + } + + int4 *sh_g_idx_stage = sh_g_idx + g_idx_stage * pipe; + int *sh_g_idx_int_ptr = reinterpret_cast(sh_g_idx_stage); + + int group_id_1 = sh_g_idx_int_ptr[0]; + int group_id_2 = sh_g_idx_int_ptr[tb_k - 1]; + + is_same_group[pipe] = group_id_1 == group_id_2; + same_group_id[pipe] = group_id_1; + }; + + auto fetch_scales_to_registers = [&](int k, int full_pipe) { + int pipe = full_pipe % stages; + + if constexpr (!has_act_order) { + // No act-order case + if constexpr (group_blocks != -1) { + if constexpr (group_blocks >= thread_k_blocks) { + int4 *sh_s_stage = + sh_s + s_sh_stage * ((group_blocks / thread_k_blocks) * + (pipe / (group_blocks / thread_k_blocks))); + reinterpret_cast(&frag_s[k % 2])[0] = sh_s_stage[s_sh_rd]; + } else { + int warp_id = threadIdx.x / 32; + int n_warps = thread_n_blocks / 4; + + int warp_row = warp_id / n_warps; + + int cur_k = warp_row * 16; + cur_k += k_iter_size * (k % b_sh_wr_iters); + + int k_blocks = cur_k / 16; + int cur_group_id = k_blocks / group_blocks; + + int4 *sh_s_stage = sh_s + s_sh_stage * pipe; + + reinterpret_cast(&frag_s[k % 2])[0] = + sh_s_stage[s_sh_rd + cur_group_id * s_sh_stride]; + } + } + + return; + } + + // Act-order case + + // Determine K of the "current" thread-block + int cur_k = slice_k_start + tb_k * full_pipe; + if (cur_k >= prob_k || cur_k >= slice_k_finish) { + return; + } + + // Reset (to current thread-block) since we read g_idx portion from the + // shared memory + cur_k = 0; + + // Progress to current iteration + cur_k += k_iter_size * (k % b_sh_wr_iters); + + // Determine "position" inside the thread-block (based on warp and + // thread-id) + int warp_id = threadIdx.x / 32; + int n_warps = + thread_n_blocks / 4; // Each warp processes 4 16-size tiles over N + + int warp_row = warp_id / n_warps; + int warp_col = warp_id % n_warps; + + cur_k += warp_row * 16; + + int th_id = threadIdx.x % 32; + cur_k += (th_id % 4) * 2; // Due to tensor-core layout for fp16 B matrix + + int s_col_shift = + /*slice_n_offset +*/ (act_s_col_warp_stride * warp_col) + + (th_id / 4) * act_s_col_stride; + + if (is_same_group[pipe]) { + if (k % 2 == 0) { + *(reinterpret_cast(&(act_frag_s[k % 2][0][0]))) = + sh_s[(same_group_id[pipe] - sh_first_group_id) * s_sh_stride + + s_col_shift]; + } else { + *(reinterpret_cast(&(act_frag_s[k % 2][0][0]))) = + *(reinterpret_cast(&(act_frag_s[(k - 1) % 2][0][0]))); + } + + for (int i = 1; i < 4; i++) { + *(reinterpret_cast(&(act_frag_s[k % 2][i][0]))) = + *(reinterpret_cast(&(act_frag_s[k % 2][0][0]))); + } + return; + } + + int4 *sh_g_idx_stage = sh_g_idx + g_idx_stage * pipe; + int *sh_g_idx_int_ptr = reinterpret_cast(sh_g_idx_stage); + + constexpr int k_frag_offsets[4] = {0, 1, 8, + 9}; // Tensor core offsets per thread + +#pragma unroll + for (int i = 0; i < 4; i++) { + + int actual_k = cur_k + k_frag_offsets[i]; + + int group_id = sh_g_idx_int_ptr[actual_k]; + int rel_group_id = group_id - sh_first_group_id; + + *(reinterpret_cast(&(act_frag_s[k % 2][i][0]))) = + sh_s[rel_group_id * s_sh_stride + s_col_shift]; + } + }; + + // Execute the actual tensor core matmul of a sub-tile. + auto matmul = [&](int k) { +// We have the m dimension as the inner loop in order to encourage overlapping +// dequantization and matmul operations. +#pragma unroll + for (int j = 0; j < 4; j++) { + FragB frag_b0; + FragB frag_b1; + if constexpr (num_bits == 4) { + int b_quant = frag_b_quant[k % 2][0][j]; + int b_quant_shift = b_quant >> 8; + + frag_b0 = dequant_4bit(b_quant); + frag_b1 = dequant_4bit(b_quant_shift); + + } else { + int *frag_b_quant_ptr = reinterpret_cast(frag_b_quant[k % 2]); + int b_quant_0 = frag_b_quant_ptr[j * 2 + 0]; + int b_quant_1 = frag_b_quant_ptr[j * 2 + 1]; + + frag_b0 = dequant_8bit(b_quant_0); + frag_b1 = dequant_8bit(b_quant_1); + } + + // Apply scale to frag_b0 + if constexpr (has_act_order) { + scale4(frag_b0, act_frag_s[k % 2][0][j], act_frag_s[k % 2][1][j], + act_frag_s[k % 2][2][j], act_frag_s[k % 2][3][j], 0); + } else { + if constexpr (group_blocks != -1) { + scale(frag_b0, frag_s[k % 2][j], 0); + } + } + + // Apply scale to frag_b1 + if constexpr (has_act_order) { + scale4(frag_b1, act_frag_s[k % 2][0][j], act_frag_s[k % 2][1][j], + act_frag_s[k % 2][2][j], act_frag_s[k % 2][3][j], 1); + + } else { + if constexpr (group_blocks != -1) { + scale(frag_b1, frag_s[k % 2][j], 1); + } + } + +#pragma unroll + for (int i = 0; i < thread_m_blocks; i++) { + mma(frag_a[k % 2][i], frag_b0, frag_c[i][j][0]); + mma(frag_a[k % 2][i], frag_b1, frag_c[i][j][1]); + } + } + }; + + // Since we slice across the k dimension of a tile in order to increase the + // number of warps while keeping the n dimension of a tile reasonable, we have + // multiple warps that accumulate their partial sums of the same output + // location; which we have to reduce over in the end. We do in shared memory. + auto thread_block_reduce = [&]() { + constexpr int red_off = threads / b_sh_stride_threads / 2; + if (red_off >= 1) { + int red_idx = threadIdx.x / b_sh_stride_threads; + constexpr int red_sh_stride = b_sh_stride_threads * 4 * 2; + constexpr int red_sh_delta = b_sh_stride_threads; + int red_sh_rd = red_sh_stride * (threadIdx.x / b_sh_stride_threads) + + (threadIdx.x % b_sh_stride_threads); + + // Parallel logarithmic shared memory reduction. We make sure to avoid any + // unnecessary read or write iterations, e.g., for two warps we write only + // once by warp 1 and read only once by warp 0. + +#pragma unroll + for (int m_block = 0; m_block < thread_m_blocks; m_block++) { +#pragma unroll + for (int i = red_off; i > 0; i /= 2) { + if (i <= red_idx && red_idx < 2 * i) { +#pragma unroll + for (int j = 0; j < 4 * 2; j++) { + int red_sh_wr = + red_sh_delta * j + (red_sh_rd - red_sh_stride * i); + if (i < red_off) { + float *c_rd = reinterpret_cast( + &sh[red_sh_delta * j + red_sh_rd]); + float *c_wr = reinterpret_cast(&sh[red_sh_wr]); +#pragma unroll + for (int k = 0; k < 4; k++) + reinterpret_cast(frag_c)[4 * 2 * m_block + j][k] += + c_rd[k] + c_wr[k]; + } + sh[red_sh_wr] = + reinterpret_cast(&frag_c)[4 * 2 * m_block + j]; + } + } + __syncthreads(); + } + if (red_idx == 0) { +#pragma unroll + for (int i = 0; i < 4 * 2; i++) { + float *c_rd = + reinterpret_cast(&sh[red_sh_delta * i + red_sh_rd]); +#pragma unroll + for (int j = 0; j < 4; j++) + reinterpret_cast(frag_c)[4 * 2 * m_block + i][j] += + c_rd[j]; + } + } + __syncthreads(); + } + } + }; + + // Since multiple threadblocks may process parts of the same column slice, we + // finally have to globally reduce over the results. As the striped partitioning + // minimizes the number of such reductions and our outputs are usually rather + // small, we perform this reduction serially in L2 cache. + auto global_reduce = [&](bool first = false, bool last = false) { + // We are very careful here to reduce directly in the output buffer to + // maximize L2 cache utilization in this step. To do this, we write out + // results in FP16 (but still reduce with FP32 compute). + constexpr int active_threads = 32 * thread_n_blocks / 4; + if (threadIdx.x < active_threads) { + int c_gl_stride = prob_n / 8; + int c_gl_wr_delta_o = 8 * c_gl_stride; + int c_gl_wr_delta_i = 4 * (active_threads / 32); + int c_gl_wr = c_gl_stride * ((threadIdx.x % 32) / 4) + + 4 * (threadIdx.x / 32) + threadIdx.x % 4; + c_gl_wr += (2 * thread_n_blocks) * slice_col; + constexpr int c_sh_wr_delta = active_threads; + int c_sh_wr = threadIdx.x; + + int row = (threadIdx.x % 32) / 4; + + if (!first) { +// Interestingly, doing direct global accesses here really seems to mess up the +// compiler and lead to slowdowns, hence we also use async-copies even though +// these fetches are not actually asynchronous. +#pragma unroll + for (int i = 0; i < thread_m_blocks * 4; i++) { + cp_async4_pred(&sh[c_sh_wr + c_sh_wr_delta * i], + &C[c_gl_wr + c_gl_wr_delta_o * (i / 2) + + c_gl_wr_delta_i * (i % 2)], + i < (thread_m_blocks - 1) * 4 || + 8 * (i / 2) + row < prob_m); + } + cp_async_fence(); + cp_async_wait<0>(); + } + +#pragma unroll + for (int i = 0; i < thread_m_blocks * 4; i++) { + if (i < (thread_m_blocks - 1) * 4 || 8 * (i / 2) + row < prob_m) { + if (!first) { + int4 c_red = sh[c_sh_wr + i * c_sh_wr_delta]; +#pragma unroll + for (int j = 0; j < 2 * 4; j++) { + reinterpret_cast( + &frag_c)[4 * 2 * 4 * (i / 4) + 4 * j + (i % 4)] += + __half2float(reinterpret_cast<__half *>(&c_red)[j]); + } + } + if (!last) { + int4 c; +#pragma unroll + for (int j = 0; j < 2 * 4; j++) { + reinterpret_cast<__half *>(&c)[j] = + __float2half(reinterpret_cast( + &frag_c)[4 * 2 * 4 * (i / 4) + 4 * j + (i % 4)]); + } + C[c_gl_wr + c_gl_wr_delta_o * (i / 2) + c_gl_wr_delta_i * (i % 2)] = + c; + } + } + } + } + }; + + // Write out the reduce final result in the correct layout. We only actually + // reshuffle matrix fragments in this step, the reduction above is performed + // in fragment layout. + auto write_result = [&]() { + int c_gl_stride = prob_n / 8; + constexpr int c_sh_stride = 2 * thread_n_blocks + 1; + int c_gl_wr_delta = c_gl_stride * (threads / (2 * thread_n_blocks)); + constexpr int c_sh_rd_delta = + c_sh_stride * (threads / (2 * thread_n_blocks)); + + int c_gl_wr = c_gl_stride * (threadIdx.x / (2 * thread_n_blocks)) + + (threadIdx.x % (2 * thread_n_blocks)); + c_gl_wr += (2 * thread_n_blocks) * slice_col; + int c_sh_wr = + (4 * c_sh_stride) * ((threadIdx.x % 32) / 4) + (threadIdx.x % 32) % 4; + c_sh_wr += 32 * (threadIdx.x / 32); + int c_sh_rd = c_sh_stride * (threadIdx.x / (2 * thread_n_blocks)) + + (threadIdx.x % (2 * thread_n_blocks)); + + int c_gl_wr_end = c_gl_stride * prob_m; + + // We first reorder in shared memory to guarantee the most efficient final + // global write patterns + auto write = [&](int idx, float c0, float c1, FragS &s) { + half2 res = __halves2half2(__float2half(c0), __float2half(c1)); + + // For per-column quantization we finally apply the scale here (only for + // 4-bit) + if constexpr (!has_act_order && group_blocks == -1 && num_bits == 4) { + res = __hmul2(res, s[0]); + } + + ((half2 *)sh)[idx] = res; + }; + + if (threadIdx.x / 32 < thread_n_blocks / 4) { +#pragma unroll + for (int i = 0; i < thread_m_blocks; i++) { +#pragma unroll + for (int j = 0; j < 4; j++) { + int wr = c_sh_wr + 8 * j; + write(wr + (4 * c_sh_stride) * 0 + 0, frag_c[i][j][0][0], + frag_c[i][j][0][1], frag_s[j / 2][2 * (j % 2) + 0]); + write(wr + (4 * c_sh_stride) * 8 + 0, frag_c[i][j][0][2], + frag_c[i][j][0][3], frag_s[j / 2][2 * (j % 2) + 0]); + write(wr + (4 * c_sh_stride) * 0 + 4, frag_c[i][j][1][0], + frag_c[i][j][1][1], frag_s[j / 2][2 * (j % 2) + 1]); + write(wr + (4 * c_sh_stride) * 8 + 4, frag_c[i][j][1][2], + frag_c[i][j][1][3], frag_s[j / 2][2 * (j % 2) + 1]); + } + c_sh_wr += 16 * (4 * c_sh_stride); + } + } + __syncthreads(); + +#pragma unroll + for (int i = 0; + i < div_ceil(16 * thread_m_blocks, threads / (2 * thread_n_blocks)); + i++) { + if (c_gl_wr < c_gl_wr_end) { + C[c_gl_wr] = sh[c_sh_rd]; + c_gl_wr += c_gl_wr_delta; + c_sh_rd += c_sh_rd_delta; + } + } + }; + + // Start global fetch and register load pipelines. + auto start_pipes = [&]() { + +#pragma unroll + for (int i = 0; i < stages - 1; i++) { + if (has_act_order && i == 0) { + int last_g_idx = slice_k_start + stages * tb_k * 2; + if (last_g_idx >= prob_k) { + last_g_idx = prob_k - 1; + } + fetch_scales_to_shared(true, g_idx[slice_k_start], g_idx[last_g_idx]); + } + fetch_to_shared(i, i, i < slice_iters); + } + + zero_accums(); + wait_for_stage(); + init_same_group(0); + fetch_to_registers(0, 0); + fetch_scales_to_registers(0, 0); + a_gl_rd += a_gl_rd_delta_o * (stages - 1); + slice_k_start_shared_fetch += tb_k * (stages - 1); + }; + if (slice_iters) { + start_pipes(); + } + + // Main loop. + while (slice_iters) { + // We unroll over both the global fetch and the register load pipeline to + // ensure all shared memory accesses are static. Note that both pipelines + // have even length meaning that the next iteration will always start at + // index 0. + +#pragma unroll + for (int pipe = 0; pipe < stages;) { +#pragma unroll + for (int k = 0; k < b_sh_wr_iters; k++) { + fetch_to_registers(k + 1, pipe % stages); + fetch_scales_to_registers(k + 1, pipe); + if (k == b_sh_wr_iters - 2) { + fetch_to_shared((pipe + stages - 1) % stages, pipe, + slice_iters >= stages); + pipe++; + wait_for_stage(); + init_same_group(pipe % stages); + } + matmul(k); + } + slice_iters--; + if (slice_iters == 0) { + break; + } + } + + a_gl_rd += a_gl_rd_delta_o * stages; + slice_k_start += tb_k * stages; + slice_k_start_shared_fetch += tb_k * stages; + + if constexpr (has_act_order) { + int first_group_id = g_idx[slice_k_start]; + int last_g_idx = slice_k_start + stages * tb_k * 2; + if (last_g_idx >= prob_k) { + last_g_idx = prob_k - 1; + } + int last_group_id = g_idx[last_g_idx]; + if (last_group_id >= sh_first_group_id + sh_num_groups) { + fetch_scales_to_shared(false, first_group_id, last_group_id); + __syncthreads(); + } + } + + // Process results and, if necessary, proceed to the next column slice. + // While this pattern may not be the most readable, other ways of writing + // the loop seemed to noticeably worse performance after compilation. + if (slice_iters == 0) { + cp_async_wait<0>(); + bool last = slice_idx == slice_count - 1; + // For per-column scales, we only fetch them here in the final step before + // write-out + if constexpr (!has_act_order && group_blocks == -1) { + if constexpr (num_bits == 8) { + if (s_sh_wr_pred) { + cp_async4(&sh_s[s_sh_wr], &scales_ptr[s_gl_rd]); + } + cp_async_fence(); + } else { + if (last) { + if (s_sh_wr_pred) { + cp_async4(&sh_s[s_sh_wr], &scales_ptr[s_gl_rd]); + } + cp_async_fence(); + } + } + } + + thread_block_reduce(); + if constexpr (!has_act_order && group_blocks == -1) { + if constexpr (num_bits == 8) { + cp_async_wait<0>(); + __syncthreads(); + if (threadIdx.x / 32 < thread_n_blocks / 4) { + reinterpret_cast(&frag_s)[0] = sh_s[s_sh_rd + 0]; + reinterpret_cast(&frag_s)[1] = sh_s[s_sh_rd + 4]; + } + + } else { + if (last) { + cp_async_wait<0>(); + __syncthreads(); + if (threadIdx.x / 32 < thread_n_blocks / 4) { + reinterpret_cast(&frag_s)[0] = sh_s[s_sh_rd + 0]; + reinterpret_cast(&frag_s)[1] = sh_s[s_sh_rd + 4]; + } + } + } + } + + // For 8-bit channelwise, we apply the scale before the global reduction + // that converts the fp32 results to fp16 (so that we avoid possible + // overflow in fp16) + if constexpr (!has_act_order && group_blocks == -1 && num_bits == 8) { + if (threadIdx.x / 32 < thread_n_blocks / 4) { +#pragma unroll + for (int i = 0; i < thread_m_blocks; i++) { +#pragma unroll + for (int j = 0; j < 4; j++) { + scale_float(reinterpret_cast(&frag_c[i][j][0][0]), + frag_s[j / 2][2 * (j % 2) + 0]); + scale_float(reinterpret_cast(&frag_c[i][j][0][2]), + frag_s[j / 2][2 * (j % 2) + 0]); + + scale_float(reinterpret_cast(&frag_c[i][j][1][0]), + frag_s[j / 2][2 * (j % 2) + 1]); + scale_float(reinterpret_cast(&frag_c[i][j][1][2]), + frag_s[j / 2][2 * (j % 2) + 1]); + } + } + } + } + + if (slice_count > 1) { // only globally reduce if there is more than one + // block in a slice + barrier_acquire(&locks[slice_col], slice_idx); + global_reduce(slice_idx == 0, last); + barrier_release(&locks[slice_col], last); + } + if (last) // only the last block in a slice actually writes the result + write_result(); + slice_row = 0; + slice_col_par++; + slice_col++; + init_slice(); + if (slice_iters) { + a_gl_rd = a_gl_stride * (threadIdx.x / a_gl_rd_delta_o) + + (threadIdx.x % a_gl_rd_delta_o); +#pragma unroll + for (int i = 0; i < b_sh_wr_iters; i++) + B_ptr[i] += b_sh_stride - b_gl_rd_delta_o * k_tiles; + if (slice_col == 0) { +#pragma unroll + for (int i = 0; i < b_sh_wr_iters; i++) + B_ptr[i] -= b_gl_stride; + } + + // Update slice k/n for scales loading + if constexpr (has_act_order) { + slice_k_start = tb_k * slice_row; + slice_k_finish = slice_k_start + tb_k * slice_iters; + slice_k_start_shared_fetch = slice_k_start; + slice_n_offset = act_s_col_tb_stride * slice_col; + + } else { + s_gl_rd = s_sh_stride * slice_col + threadIdx.x; + } + + start_pipes(); + } + } + } +} + +#define __CALL_IF(NUM_BITS, THREAD_M_BLOCKS, THREAD_N_BLOCKS, THREAD_K_BLOCKS, \ + HAS_ACT_ORDER, GROUP_BLOCKS, NUM_THREADS) \ + else if (num_bits == NUM_BITS && thread_m_blocks == THREAD_M_BLOCKS && \ + thread_n_blocks == THREAD_N_BLOCKS && \ + thread_k_blocks == THREAD_K_BLOCKS && \ + has_act_order == HAS_ACT_ORDER && group_blocks == GROUP_BLOCKS && \ + num_threads == NUM_THREADS) { \ + musaFuncSetAttribute( \ + Marlin, \ + musaFuncAttributeMaxDynamicSharedMemorySize, max_shared_mem); \ + Marlin \ + <<>>( \ + A_ptr, B_ptr, C_ptr, s_ptr, g_idx_ptr, num_groups, prob_m, prob_n, \ + prob_k, locks); \ + } + +typedef struct { + int thread_k; + int thread_n; + int num_threads; +} thread_config_t; + +typedef struct { + int max_m_blocks; + thread_config_t tb_cfg; +} exec_config_t; + +thread_config_t thread_configs[] = { + // Ordered by priority + + // thread_k, thread_n, num_threads + {64, 256, 256}, // Default (max cache usage) + {64, 128, 128}, // Reduce N, reduce warps + {128, 64, 128}, // Reduce N more, but increase K + +}; + +int get_scales_cache_size(thread_config_t const &th_config, int prob_m, + int prob_n, int prob_k, int num_bits, int group_size, + bool has_act_order, bool is_k_full) { + bool cache_scales_chunk = has_act_order && !is_k_full; + + int tb_n = th_config.thread_n; + int tb_k = th_config.thread_k; + + // Get max scale groups per thread-block + int tb_groups; + if (group_size == -1) { + tb_groups = 1; + } else if (group_size == 0) { + tb_groups = div_ceil(tb_k, 32); // Worst case is 32 group size + } else { + tb_groups = div_ceil(tb_k, group_size); + } + + if (cache_scales_chunk) { + int load_groups = + tb_groups * pipe_stages * 2; // Chunk size is 2x pipeline over dim K + load_groups = max(load_groups, 32); // We load at least 32 scale groups + return load_groups * tb_n * 2; + + } else { + int tb_scales = tb_groups * tb_n * 2; + + return tb_scales * pipe_stages; + } +} + +bool is_valid_cache_size(thread_config_t const &th_config, int max_m_blocks, + int prob_m, int prob_n, int prob_k, int num_bits, + int scales_cache_size, int max_shared_mem) { + int pack_factor = 32 / num_bits; + + // Get B size + int tb_k = th_config.thread_k; + int tb_n = th_config.thread_n; + + int b_size = (tb_k * tb_n / pack_factor) * 4; + + // Get A size + int m_blocks = div_ceil(prob_m, 16); + int tb_max_m = 16; + + while (true) { + if (m_blocks >= max_m_blocks) { + tb_max_m *= max_m_blocks; + break; + } + + max_m_blocks--; + if (max_m_blocks == 0) { + TORCH_CHECK(false, "Unexpected m_blocks = ", m_blocks); + } + } + + int a_size = (tb_max_m * tb_k) * 2; + + float pipe_size = (a_size + b_size) * pipe_stages; + + TORCH_CHECK(max_shared_mem / 2 > scales_cache_size); // Sanity + + return pipe_size < 0.95f * (max_shared_mem - scales_cache_size); +} + +bool is_valid_config(thread_config_t const &th_config, int max_m_blocks, + int prob_m, int prob_n, int prob_k, int num_bits, + int group_size, bool has_act_order, bool is_k_full, + int max_shared_mem) { + // Sanity + if (th_config.thread_k == -1 || th_config.thread_n == -1 || + th_config.num_threads == -1) { + return false; + } + + // Verify K/N are divisible by thread K/N + if (prob_k % th_config.thread_k != 0 || prob_n % th_config.thread_n != 0) { + return false; + } + + // Verify min for thread K/N + if (th_config.thread_n < min_thread_n || th_config.thread_k < min_thread_k) { + return false; + } + + // num_threads must be at least 128 (= 4 warps) + if (th_config.num_threads < 128) { + return false; + } + + // Determine cache for scales + int scales_cache_size = + get_scales_cache_size(th_config, prob_m, prob_n, prob_k, num_bits, + group_size, has_act_order, is_k_full); + + // Check that pipeline fits into cache + if (!is_valid_cache_size(th_config, max_m_blocks, prob_m, prob_n, prob_k, + num_bits, scales_cache_size, max_shared_mem)) { + return false; + } + + return true; +} + +exec_config_t determine_thread_config(int prob_m, int prob_n, int prob_k, + int num_bits, int group_size, + bool has_act_order, bool is_k_full, + int max_shared_mem) { + int max_m_blocks = 4; + while (max_m_blocks > 0) { + for (auto th_config : thread_configs) { + if (is_valid_config(th_config, max_m_blocks, prob_m, prob_n, prob_k, + num_bits, group_size, has_act_order, is_k_full, + max_shared_mem)) { + return exec_config_t{max_m_blocks, th_config}; + } + } + + printf("WARNING: Marlin kernel is reducing max_m_blocks due to small SM " + "GPU cache. This may " + "hurt performance. Consider upgrading your GPU.\n"); + + max_m_blocks--; // Process less M blocks per invocation to reduce cache + // usage + } + + return exec_config_t{0, {-1, -1, -1}}; +} + +#define CALL_IF(NUM_BITS, N_BLOCKS, K_BLOCKS, NUM_THREADS) \ + __CALL_IF(NUM_BITS, 1, N_BLOCKS, K_BLOCKS, true, 0, NUM_THREADS) \ + __CALL_IF(NUM_BITS, 2, N_BLOCKS, K_BLOCKS, true, 0, NUM_THREADS) \ + __CALL_IF(NUM_BITS, 3, N_BLOCKS, K_BLOCKS, true, 0, NUM_THREADS) \ + __CALL_IF(NUM_BITS, 4, N_BLOCKS, K_BLOCKS, true, 0, NUM_THREADS) \ + \ + __CALL_IF(NUM_BITS, 1, N_BLOCKS, K_BLOCKS, false, -1, NUM_THREADS) \ + __CALL_IF(NUM_BITS, 1, N_BLOCKS, K_BLOCKS, false, 2, NUM_THREADS) \ + __CALL_IF(NUM_BITS, 1, N_BLOCKS, K_BLOCKS, false, 4, NUM_THREADS) \ + __CALL_IF(NUM_BITS, 1, N_BLOCKS, K_BLOCKS, false, 8, NUM_THREADS) \ + \ + __CALL_IF(NUM_BITS, 2, N_BLOCKS, K_BLOCKS, false, -1, NUM_THREADS) \ + __CALL_IF(NUM_BITS, 2, N_BLOCKS, K_BLOCKS, false, 2, NUM_THREADS) \ + __CALL_IF(NUM_BITS, 2, N_BLOCKS, K_BLOCKS, false, 4, NUM_THREADS) \ + __CALL_IF(NUM_BITS, 2, N_BLOCKS, K_BLOCKS, false, 8, NUM_THREADS) \ + \ + __CALL_IF(NUM_BITS, 3, N_BLOCKS, K_BLOCKS, false, -1, NUM_THREADS) \ + __CALL_IF(NUM_BITS, 3, N_BLOCKS, K_BLOCKS, false, 2, NUM_THREADS) \ + __CALL_IF(NUM_BITS, 3, N_BLOCKS, K_BLOCKS, false, 4, NUM_THREADS) \ + __CALL_IF(NUM_BITS, 3, N_BLOCKS, K_BLOCKS, false, 8, NUM_THREADS) \ + \ + __CALL_IF(NUM_BITS, 4, N_BLOCKS, K_BLOCKS, false, -1, NUM_THREADS) \ + __CALL_IF(NUM_BITS, 4, N_BLOCKS, K_BLOCKS, false, 2, NUM_THREADS) \ + __CALL_IF(NUM_BITS, 4, N_BLOCKS, K_BLOCKS, false, 4, NUM_THREADS) \ + __CALL_IF(NUM_BITS, 4, N_BLOCKS, K_BLOCKS, false, 8, NUM_THREADS) + +void marlin_mm_f16i4(const void *A, const void *B, void *C, void *s, + void *g_idx, void *perm, void *a_tmp, int prob_m, + int prob_n, int prob_k, void *workspace, int num_bits, + bool has_act_order, bool is_k_full, int num_groups, + int group_size, int dev, musaStream_t stream, int thread_k, + int thread_n, int sms, int max_par) { + TORCH_CHECK(num_bits == 4 || num_bits == 8, + "num_bits must be 4 or 8. Got = ", num_bits); + TORCH_CHECK(prob_m > 0 && prob_n > 0 && prob_k > 0, "Invalid MNK = [", prob_m, + ", ", prob_n, ", ", prob_k, "]"); + + int tot_m = prob_m; + int tot_m_blocks = div_ceil(tot_m, 16); + int pad = 16 * tot_m_blocks - tot_m; + + if (sms == -1) { + musaDeviceGetAttribute(&sms, musaDevAttrMultiProcessorCount, dev); + } + + int max_shared_mem = 0; + musaDeviceGetAttribute(&max_shared_mem, + musaDevAttrMaxSharedMemoryPerBlockOptin, dev); + TORCH_CHECK(max_shared_mem > 0); + + // Set thread config + exec_config_t exec_cfg; + if (thread_k != -1 && thread_n != -1) { + // User-defined config + exec_cfg = + exec_config_t{4, thread_config_t{thread_k, thread_n, default_threads}}; + } else { + // Auto config + exec_cfg = + determine_thread_config(prob_m, prob_n, prob_k, num_bits, group_size, + has_act_order, is_k_full, max_shared_mem); + } + + TORCH_CHECK(exec_cfg.max_m_blocks > 0 && + is_valid_config(exec_cfg.tb_cfg, exec_cfg.max_m_blocks, + prob_m, prob_n, prob_k, num_bits, group_size, + has_act_order, is_k_full, max_shared_mem), + "Invalid thread config: max_m_blocks = ", exec_cfg.max_m_blocks, + ", thread_k = ", exec_cfg.tb_cfg.thread_k, + ", thread_n = ", exec_cfg.tb_cfg.thread_n, + ", num_threads = ", exec_cfg.tb_cfg.num_threads, " for MKN = [", + prob_m, ", ", prob_k, ", ", prob_n, "] and num_bits = ", num_bits, + ", group_size = ", group_size, + ", has_act_order = ", has_act_order, ", is_k_full = ", is_k_full, + ", max_shared_mem = ", max_shared_mem); + + int num_threads = exec_cfg.tb_cfg.num_threads; + thread_k = exec_cfg.tb_cfg.thread_k; + thread_n = exec_cfg.tb_cfg.thread_n; + + int thread_k_blocks = thread_k / 16; + int thread_n_blocks = thread_n / 16; + + int blocks = sms; + + TORCH_CHECK(prob_n % thread_n == 0, "prob_n = ", prob_n, + " is not divisible by thread_n = ", thread_n); + TORCH_CHECK(prob_k % thread_k == 0, "prob_k = ", prob_k, + " is not divisible by thread_k = ", thread_k); + + int group_blocks = 0; + if (has_act_order) { + if (is_k_full) { + TORCH_CHECK(group_size != -1); + group_blocks = group_size / 16; + TORCH_CHECK(prob_k % group_blocks == 0, "prob_k = ", prob_k, + " is not divisible by group_blocks = ", group_blocks); + } else { + TORCH_CHECK(group_size == 0); + group_blocks = 0; + } + + } else { + if (group_size == -1) { + group_blocks = -1; + } else { + group_blocks = group_size / 16; + TORCH_CHECK(prob_k % group_blocks == 0, "prob_k = ", prob_k, + " is not divisible by group_blocks = ", group_blocks); + } + } + + const int4 *A_ptr = (const int4 *)A; + const int4 *B_ptr = (const int4 *)B; + int4 *C_ptr = (int4 *)C; + const int4 *s_ptr = (const int4 *)s; + const int *g_idx_ptr = (const int *)g_idx; + const int *perm_ptr = (const int *)perm; + int4 *a_tmp_ptr = (int4 *)a_tmp; + + int *locks = (int *)workspace; + + if (has_act_order) { + // Permute A columns + int block_rows = div_ceil(prob_m, blocks); + permute_cols_kernel<<>>( + A_ptr, perm_ptr, a_tmp_ptr, prob_m, prob_k, block_rows); + A_ptr = a_tmp_ptr; + } + + // If we have a full K, then we can run the non-act-order version of Marlin + // (since the weight rows are reordered by increasing group ids, and by having + // a full K, we have full original groups) + if (is_k_full) { + has_act_order = false; + } + + // Main loop + for (int i = 0; i < tot_m_blocks; i += exec_cfg.max_m_blocks) { + int thread_m_blocks = tot_m_blocks - i; + prob_m = tot_m - 16 * i; + int par = 1; + if (thread_m_blocks > exec_cfg.max_m_blocks) { + // Note that parallel > 1 currently only works for inputs without any + // padding + par = (16 * thread_m_blocks - pad) / (16 * exec_cfg.max_m_blocks); + if (par > max_par) + par = max_par; + prob_m = (16 * exec_cfg.max_m_blocks) * par; + i += exec_cfg.max_m_blocks * (par - 1); + thread_m_blocks = exec_cfg.max_m_blocks; + } + + // Define kernel configurations + if (false) { + } + CALL_IF(4, 32, 2, 256) + CALL_IF(4, 16, 4, 256) + CALL_IF(4, 8, 4, 128) + CALL_IF(4, 4, 8, 128) + CALL_IF(8, 32, 2, 256) + CALL_IF(8, 16, 4, 256) + CALL_IF(8, 8, 4, 128) + CALL_IF(8, 4, 8, 128) + else { + TORCH_CHECK(false, "Unsupported shapes: MNK = [" + str(prob_m) + ", " + + str(prob_n) + ", " + str(prob_k) + "]" + + ", has_act_order = " + str(has_act_order) + + ", num_groups = " + str(num_groups) + + ", group_size = " + str(group_size) + + ", thread_m_blocks = " + str(thread_m_blocks) + + ", thread_n_blocks = " + str(thread_n_blocks) + + ", thread_k_blocks = " + str(thread_k_blocks)); + } + + A_ptr += 16 * thread_m_blocks * (prob_k / 8) * par; + C_ptr += 16 * thread_m_blocks * (prob_n / 8) * par; + } +} + +} // namespace gptq_marlin + +torch::Tensor gptq_marlin_gemm(torch::Tensor &a, torch::Tensor &b_q_weight, + torch::Tensor &b_scales, torch::Tensor &g_idx, + torch::Tensor &perm, torch::Tensor &workspace, + int64_t num_bits, int64_t size_m, int64_t size_n, + int64_t size_k, bool is_k_full) { + // Verify num_bits + TORCH_CHECK(num_bits == 4 || num_bits == 8, + "num_bits must be 4 or 8. Got = ", num_bits); + int pack_factor = 32 / num_bits; + + // Verify A + TORCH_CHECK(a.size(0) == size_m, "Shape mismatch: a.size(0) = ", a.size(0), + ", size_m = ", size_m); + TORCH_CHECK(a.size(1) == size_k, "Shape mismatch: a.size(1) = ", a.size(1), + ", size_k = ", size_k); + + // Verify B + TORCH_CHECK(size_k % gptq_marlin::tile_size == 0, "size_k = ", size_k, + " is not divisible by tile_size = ", gptq_marlin::tile_size); + TORCH_CHECK((size_k / gptq_marlin::tile_size) == b_q_weight.size(0), + "Shape mismatch: b_q_weight.size(0) = ", b_q_weight.size(0), + ", size_k = ", size_k, ", tile_size = ", gptq_marlin::tile_size); + TORCH_CHECK(b_q_weight.size(1) % gptq_marlin::tile_size == 0, + "b_q_weight.size(1) = ", b_q_weight.size(1), + " is not divisible by tile_size = ", gptq_marlin::tile_size); + int actual_size_n = + (b_q_weight.size(1) / gptq_marlin::tile_size) * pack_factor; + TORCH_CHECK(size_n == actual_size_n, "size_n = ", size_n, + ", actual_size_n = ", actual_size_n); + + // Verify device and strides + TORCH_CHECK(a.device().is_cuda(), "A is not on GPU"); + TORCH_CHECK(a.is_contiguous(), "A is not contiguous"); + + TORCH_CHECK(b_q_weight.device().is_cuda(), "b_q_weight is not on GPU"); + TORCH_CHECK(b_q_weight.is_contiguous(), "b_q_weight is not contiguous"); + + TORCH_CHECK(b_scales.device().is_cuda(), "b_scales is not on GPU"); + TORCH_CHECK(b_scales.is_contiguous(), "b_scales is not contiguous"); + + TORCH_CHECK(g_idtrue, "g_idx is not on GPU"); + TORCH_CHECK(g_idx.is_contiguous(), "g_idx is not contiguous"); + + TORCH_CHECK(perm.device().is_cuda(), "perm is not on GPU"); + TORCH_CHECK(perm.is_contiguous(), "perm is not contiguous"); + + // Alloc buffers + const at::musa::OptionalMUSAGuard device_guard(device_of(a)); + auto options = torch::TensorOptions().dtype(a.dtype()).device(a.device()); + torch::Tensor c = torch::empty({size_m, size_n}, options); + torch::Tensor a_tmp = torch::empty({size_m, size_k}, options); + + // thread_k: `k` size of a thread_tile in `weights` (can usually be left as + // auto -1) + int thread_k = -1; + // thread_n: `n` size of a thread_tile in `weights` (can usually be left as + // auto -1) + int thread_n = -1; + // sms: number of SMs to use for the kernel (can usually be left as auto -1) + int sms = -1; + + // Verify g_idx and perm + TORCH_CHECK((g_idx.size(0) == 0 && perm.size(0) == 0) || + (g_idx.size(0) == size_k && perm.size(0) == size_k), + "Unexpected g_idx.size(0) = ", g_idx.size(0), + " and perm.size(0) = ", perm.size(0), + ", where size_k = ", size_k); + + // Detect groupsize and act_order + int num_groups = -1; + int group_size = -1; + bool has_act_order = g_idx.size(0) != 0; + + int b_rank = b_scales.sizes().size(); + TORCH_CHECK(b_rank == 2, "b_scales rank = ", b_rank, " is not 2"); + TORCH_CHECK(b_scales.size(1) == size_n, "b_scales dim 1 = ", b_scales.size(1), + " is not size_n = ", size_n); + num_groups = b_scales.size(0); + + if (has_act_order) { + if (is_k_full) { + TORCH_CHECK(num_groups > 1, "For act_order, num_groups must be > 1"); + TORCH_CHECK(size_k % num_groups == 0, "size_k = ", size_k, + ", is not divisible by num_groups = ", num_groups); + group_size = size_k / num_groups; + } else { + group_size = 0; + } + + } else { + if (num_groups > 1) { + TORCH_CHECK( + size_k % num_groups == 0, "size_k = ", size_k, + ", is not divisible by b_scales.size(0) = ", b_scales.size(0)); + group_size = size_k / num_groups; + } else { + group_size = -1; + } + } + + // Verify workspace size + TORCH_CHECK( + size_n % gptq_marlin::min_thread_n == 0, "size_n = ", size_n, + ", is not divisible by min_thread_n = ", gptq_marlin::min_thread_n); + int min_workspace_size = + (size_n / gptq_marlin::min_thread_n) * gptq_marlin::max_par; + TORCH_CHECK(workspace.numel() >= min_workspace_size, + "workspace.numel = ", workspace.numel(), + " is below min_workspace_size = ", min_workspace_size); + + int dev = a.get_device(); + gptq_marlin::marlin_mm_f16i4( + a.data_ptr(), b_q_weight.data_ptr(), c.data_ptr(), b_scales.data_ptr(), + g_idx.data_ptr(), perm.data_ptr(), a_tmp.data_ptr(), size_m, size_n, + size_k, workspace.data_ptr(), num_bits, has_act_order, is_k_full, + num_groups, group_size, dev, at::cuda::getCurrentMUSAStream(dev), + thread_k, thread_n, sms, gptq_marlin::max_par); + + return c; +} + +#endif diff --git a/csrc_musa/quantization/gptq_marlin/gptq_marlin.muh b/csrc_musa/quantization/gptq_marlin/gptq_marlin.muh new file mode 100644 index 0000000..05d5070 --- /dev/null +++ b/csrc_musa/quantization/gptq_marlin/gptq_marlin.muh @@ -0,0 +1,70 @@ +#pragma once + +#include + +#include "torch_musa/csrc/aten/musa/MUSAContext.h" +#include "torch_musa/csrc/core/MUSAGuard.h" +#include +#include +#include +#include + +namespace gptq_marlin { + +// 8 warps are a good choice since every SM has 4 schedulers and having more than 1 warp per +// schedule allows some more latency hiding. At the same time, we want relatively few warps to have +// many registers per warp and small tiles. +static constexpr int default_threads = 256; + +static constexpr int pipe_stages = 4; // 4 pipeline stages fit into shared memory + +static constexpr int min_thread_n = 64; +static constexpr int min_thread_k = 64; + +static constexpr int tile_size = 16; +static constexpr int max_par = 16; + +template +struct Vec { + T elems[n]; + __device__ T& operator[](int i) { return elems[i]; } +}; + +using I4 = Vec; + +constexpr int div_ceil(int a, int b) { return (a + b - 1) / b; } + +#if defined(__MUSA_ARCH__) && __MUSA_ARCH__ < 800 + // No support for async +#else + +__device__ inline void cp_async4_pred(void* smem_ptr, const void* glob_ptr, bool pred = true) { + const int BYTES = 16; + uint32_t smem = static_cast(__cvta_generic_to_shared(smem_ptr)); + asm volatile("{\n" + " .reg .pred p;\n" + " setp.ne.b32 p, %0, 0;\n" + " @p cp.async.cg.shared.global [%1], [%2], %3;\n" + "}\n" ::"r"((int)pred), + "r"(smem), "l"(glob_ptr), "n"(BYTES)); +} + +__device__ inline void cp_async4(void* smem_ptr, const void* glob_ptr) { + const int BYTES = 16; + uint32_t smem = static_cast(__cvta_generic_to_shared(smem_ptr)); + asm volatile("{\n" + " cp.async.cg.shared.global [%0], [%1], %2;\n" + "}\n" ::"r"(smem), + "l"(glob_ptr), "n"(BYTES)); +} + +__device__ inline void cp_async_fence() { asm volatile("cp.async.commit_group;\n" ::); } + +template +__device__ inline void cp_async_wait() { + asm volatile("cp.async.wait_group %0;\n" ::"n"(n)); +} + +#endif + +} // namespace gptq_marlin diff --git a/csrc_musa/quantization/gptq_marlin/gptq_marlin_repack.mu b/csrc_musa/quantization/gptq_marlin/gptq_marlin_repack.mu new file mode 100644 index 0000000..135d2a8 --- /dev/null +++ b/csrc_musa/quantization/gptq_marlin/gptq_marlin_repack.mu @@ -0,0 +1,352 @@ +#include "gptq_marlin.cuh" + +namespace gptq_marlin { + +static constexpr int repack_stages = 8; + +static constexpr int repack_threads = 256; + +static constexpr int tile_k_size = tile_size; +static constexpr int tile_n_size = tile_k_size * 4; + +#if defined(__MUSA_ARCH__) && __MUSA_ARCH__ < 800 + +template +__global__ void +marlin_repack_kernel(uint32_t const *__restrict__ b_q_weight_ptr, + uint32_t const *__restrict__ perm_ptr, + uint32_t *__restrict__ out_ptr, int size_k, int size_n) {} + +} // namespace gptq_marlin + +torch::Tensor gptq_marlin_repack(torch::Tensor &b_q_weight, torch::Tensor &perm, + int64_t size_k, int64_t size_n, + int64_t num_bits) { + TORCH_CHECK_NOT_IMPLEMENTED( + false, "marlin_repack_from_gptq(..) requires CUDA_ARCH >= 8.0"); + return torch::empty({1, 1}); +} + +#else + +template +__global__ void +marlin_repack_kernel(uint32_t const *__restrict__ b_q_weight_ptr, + uint32_t const *__restrict__ perm_ptr, + uint32_t *__restrict__ out_ptr, int size_k, int size_n) { + constexpr int pack_factor = 32 / num_bits; + + int k_tiles = size_k / tile_k_size; + int n_tiles = size_n / tile_n_size; + int block_k_tiles = div_ceil(k_tiles, gridDim.x); + + int start_k_tile = blockIdx.x * block_k_tiles; + if (start_k_tile >= k_tiles) { + return; + } + + int finish_k_tile = min(start_k_tile + block_k_tiles, k_tiles); + + // Wait until the next thread tile has been loaded to shared memory. + auto wait_for_stage = [&]() { + // We only have `stages - 2` active fetches since we are double buffering + // and can only issue the next fetch when it is guaranteed that the previous + // shared memory load is fully complete (as it may otherwise be + // overwritten). + cp_async_wait(); + __syncthreads(); + }; + + extern __shared__ int4 sh[]; + + constexpr int perm_size = tile_k_size / 4; + + int4 *sh_perm_ptr = sh; + int4 *sh_pipe_ptr = sh_perm_ptr; + if constexpr (has_perm) { + sh_pipe_ptr += perm_size; + } + + constexpr int tile_ints = tile_k_size / pack_factor; + + constexpr int stage_n_threads = tile_n_size / 4; + constexpr int stage_k_threads = has_perm ? tile_k_size : tile_ints; + constexpr int stage_size = stage_k_threads * stage_n_threads; + + auto load_perm_to_shared = [&](int k_tile_id) { + int first_k_int4 = (k_tile_id * tile_k_size) / 4; + + int4 const *perm_int4_ptr = reinterpret_cast(perm_ptr); + + if (threadIdx.x < perm_size) { + sh_perm_ptr[threadIdx.x] = perm_int4_ptr[first_k_int4 + threadIdx.x]; + } + __syncthreads(); + }; + + auto fetch_to_shared = [&](int pipe, int k_tile_id, int n_tile_id) { + if (n_tile_id >= n_tiles) { + cp_async_fence(); + return; + } + + int first_n = n_tile_id * tile_n_size; + + int4 *sh_ptr = sh_pipe_ptr + stage_size * pipe; + + if constexpr (has_perm) { + if (threadIdx.x < stage_size) { + int k_id = threadIdx.x / stage_n_threads; + int n_id = threadIdx.x % stage_n_threads; + + uint32_t const *sh_perm_int_ptr = + reinterpret_cast(sh_perm_ptr); + + int src_k = sh_perm_int_ptr[k_id]; + int src_k_packed = src_k / pack_factor; + + cp_async4( + &sh_ptr[k_id * stage_n_threads + n_id], + reinterpret_cast(&( + b_q_weight_ptr[src_k_packed * size_n + first_n + (n_id * 4)]))); + } + + } else { + if (threadIdx.x < stage_size) { + int k_id = threadIdx.x / stage_n_threads; + int n_id = threadIdx.x % stage_n_threads; + + int first_k = k_tile_id * tile_k_size; + int first_k_packed = first_k / pack_factor; + + cp_async4(&sh_ptr[k_id * stage_n_threads + n_id], + reinterpret_cast( + &(b_q_weight_ptr[(first_k_packed + k_id) * size_n + + first_n + (n_id * 4)]))); + } + } + + cp_async_fence(); + }; + + auto repack_tile = [&](int pipe, int k_tile_id, int n_tile_id) { + if (n_tile_id >= n_tiles) { + return; + } + + int warp_id = threadIdx.x / 32; + int th_id = threadIdx.x % 32; + + if (warp_id >= 4) { + return; + } + + int tc_col = th_id / 4; + int tc_row = (th_id % 4) * 2; + + constexpr int tc_offsets[4] = {0, 1, 8, 9}; + + int cur_n = warp_id * 16 + tc_col; + + constexpr int sh_stride = 64; + constexpr uint32_t mask = (1 << num_bits) - 1; + + int4 *sh_stage_ptr = sh_pipe_ptr + stage_size * pipe; + uint32_t *sh_stage_int_ptr = reinterpret_cast(sh_stage_ptr); + + uint32_t *sh_perm_int_ptr = reinterpret_cast(sh_perm_ptr); + + uint32_t vals[8]; + + if constexpr (has_perm) { + for (int i = 0; i < 4; i++) { + int k_idx = tc_row + tc_offsets[i]; + + uint32_t src_k = sh_perm_int_ptr[k_idx]; + uint32_t src_k_pos = src_k % pack_factor; + + uint32_t b1_val = sh_stage_int_ptr[k_idx * sh_stride + cur_n]; + uint32_t b1_cur_val = (b1_val >> (src_k_pos * num_bits)) & mask; + + uint32_t b2_val = sh_stage_int_ptr[k_idx * sh_stride + cur_n + 8]; + uint32_t b2_cur_val = (b2_val >> (src_k_pos * num_bits)) & mask; + + vals[i] = b1_cur_val; + vals[4 + i] = b2_cur_val; + } + + } else { + + uint32_t b1_vals[tile_ints]; + uint32_t b2_vals[tile_ints]; + +#pragma unroll + for (int i = 0; i < tile_ints; i++) { + b1_vals[i] = sh_stage_int_ptr[cur_n + sh_stride * i]; + b2_vals[i] = sh_stage_int_ptr[cur_n + 8 + sh_stride * i]; + } + +#pragma unroll + for (int i = 0; i < 4; i++) { + int cur_elem = tc_row + tc_offsets[i]; + int cur_int = cur_elem / pack_factor; + int cur_pos = cur_elem % pack_factor; + + vals[i] = (b1_vals[cur_int] >> (cur_pos * num_bits)) & mask; + vals[4 + i] = (b2_vals[cur_int] >> (cur_pos * num_bits)) & mask; + } + } + + constexpr int tile_size = tile_k_size * tile_n_size / pack_factor; + int out_offset = (k_tile_id * n_tiles + n_tile_id) * tile_size; + + // Result of: + // https://github.com/NVIDIA/FasterTransformer/blob/main/src/fastertransformer/cutlass_extensions/include/cutlass_extensions/interleaved_numeric_conversion.h + if constexpr (num_bits == 4) { + constexpr int pack_idx[8] = {0, 2, 4, 6, 1, 3, 5, 7}; + + uint32_t res = 0; +#pragma unroll + for (int i = 0; i < 8; i++) { + res |= vals[pack_idx[i]] << (i * 4); + } + + out_ptr[out_offset + th_id * 4 + warp_id] = res; + + } else { + constexpr int pack_idx[4] = {0, 2, 1, 3}; + + uint32_t res1 = 0; + uint32_t res2 = 0; +#pragma unroll + for (int i = 0; i < 4; i++) { + res1 |= vals[pack_idx[i]] << (i * 8); + res2 |= vals[4 + pack_idx[i]] << (i * 8); + } + + out_ptr[out_offset + th_id * 8 + (warp_id * 2) + 0] = res1; + out_ptr[out_offset + th_id * 8 + (warp_id * 2) + 1] = res2; + } + }; + + auto start_pipes = [&](int k_tile_id, int n_tile_id) { +#pragma unroll + for (int pipe = 0; pipe < repack_stages - 1; pipe++) { + fetch_to_shared(pipe, k_tile_id, n_tile_id + pipe); + } + + wait_for_stage(); + }; +#pragma unroll + for (int k_tile_id = start_k_tile; k_tile_id < finish_k_tile; k_tile_id++) { + int n_tile_id = 0; + + if constexpr (has_perm) { + load_perm_to_shared(k_tile_id); + } + + start_pipes(k_tile_id, n_tile_id); + + while (n_tile_id < n_tiles) { +#pragma unroll + for (int pipe = 0; pipe < repack_stages; pipe++) { + fetch_to_shared((pipe + repack_stages - 1) % repack_stages, k_tile_id, + n_tile_id + pipe + repack_stages - 1); + repack_tile(pipe, k_tile_id, n_tile_id + pipe); + wait_for_stage(); + } + n_tile_id += repack_stages; + } + } +} + +} // namespace gptq_marlin + +#define CALL_IF(NUM_BITS, HAS_PERM) \ + else if (num_bits == NUM_BITS && has_perm == HAS_PERM) { \ + musaFuncSetAttribute( \ + gptq_marlin::marlin_repack_kernel, \ + musaFuncAttributeMaxDynamicSharedMemorySize, max_shared_mem); \ + gptq_marlin::marlin_repack_kernel \ + <<>>( \ + b_q_weight_ptr, perm_ptr, out_ptr, size_k, size_n); \ + } + +torch::Tensor gptq_marlin_repack(torch::Tensor &b_q_weight, torch::Tensor &perm, + int64_t size_k, int64_t size_n, + int64_t num_bits) { + // Verify compatibility with marlin tile of 16x64 + TORCH_CHECK(size_k % gptq_marlin::tile_k_size == 0, "size_k = ", size_k, + " is not divisible by tile_k_size = ", gptq_marlin::tile_k_size); + TORCH_CHECK(size_n % gptq_marlin::tile_n_size == 0, "size_n = ", size_n, + " is not divisible by tile_n_size = ", gptq_marlin::tile_n_size); + + TORCH_CHECK(num_bits == 4 || num_bits == 8, + "num_bits must be 4 or 8. Got = ", num_bits); + int const pack_factor = 32 / num_bits; + + // Verify B + TORCH_CHECK((size_k / pack_factor) == b_q_weight.size(0), + "Shape mismatch: b_q_weight.size(0) = ", b_q_weight.size(0), + ", size_k = ", size_k, ", pack_factor = ", pack_factor); + TORCH_CHECK(b_q_weight.size(1) == size_n, + "b_q_weight.size(1) = ", b_q_weight.size(1), + " is not size_n = ", size_n); + + // Verify device and strides + TORCH_CHECK(b_q_weight.device().is_cuda(), "b_q_weight is not on GPU"); + TORCH_CHECK(b_q_weight.is_contiguous(), "b_q_weight is not contiguous"); + TORCH_CHECK(b_q_weight.dtype() == at::kInt, "b_q_weight type is not kInt"); + + TORCH_CHECK(perm.device().is_cuda(), "perm is not on GPU"); + TORCH_CHECK(perm.is_contiguous(), "perm is not contiguous"); + TORCH_CHECK(perm.dtype() == at::kInt, "perm type is not at::kInt"); + + // Alloc buffers + const at::musa::OptionalMUSAGuard device_guard(device_of(b_q_weight)); + auto options = torch::TensorOptions() + .dtype(b_q_weight.dtype()) + .device(b_q_weight.device()); + torch::Tensor out = + torch::empty({size_k / gptq_marlin::tile_size, + size_n * gptq_marlin::tile_size / pack_factor}, + options); + + // Detect if there is act_order + bool has_perm = perm.size(0) != 0; + + // Get ptrs + uint32_t const *b_q_weight_ptr = + reinterpret_cast(b_q_weight.data_ptr()); + uint32_t const *perm_ptr = + reinterpret_cast(perm.data_ptr()); + uint32_t *out_ptr = reinterpret_cast(out.data_ptr()); + + // Get dev info + int dev = b_q_weight.get_device(); + musaStream_t stream = at::cuda::getCurrentMUSAStream(dev); + int blocks; + musaDeviceGetAttribute(&blocks, musaDevAttrMultiProcessorCount, dev); + + int max_shared_mem = 0; + musaDeviceGetAttribute(&max_shared_mem, + musaDevAttrMaxSharedMemoryPerBlockOptin, dev); + TORCH_CHECK(max_shared_mem > 0); + + if (false) { + } + CALL_IF(4, false) + CALL_IF(4, true) + CALL_IF(8, false) + CALL_IF(8, true) + else { + TORCH_CHECK(false, "Unsupported repack config: num_bits = ", num_bits, + ", has_perm = ", has_perm); + } + + return out; +} + +#endif diff --git a/csrc_musa/quantization/marlin/.LICENSE b/csrc_musa/quantization/marlin/.LICENSE new file mode 100644 index 0000000..1d1e4cf --- /dev/null +++ b/csrc_musa/quantization/marlin/.LICENSE @@ -0,0 +1,209 @@ +Contains code from https://github.com/IST-DASLab/marlin + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright {yyyy} {name of copyright owner} + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +------------------------------------------------------------------------------------ + +This product bundles various third-party components under other open source licenses. +This section summarizes those components and their licenses. See licenses/ +for text of these licenses. diff --git a/csrc_musa/quantization/marlin/marlin_cuda_kernel.mu b/csrc_musa/quantization/marlin/marlin_cuda_kernel.mu new file mode 100644 index 0000000..f96ddf9 --- /dev/null +++ b/csrc_musa/quantization/marlin/marlin_cuda_kernel.mu @@ -0,0 +1,1138 @@ +/* + * Modified by Neural Magic + * Copyright (C) Marlin.2024 Elias Frantar + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include + +#include "torch_musa/csrc/aten/musa/MUSAContext.h" +#include "torch_musa/csrc/core/MUSAGuard.h" +#include +#include +#include + +#include + +template inline std::string str(T x) { return std::to_string(x); } + +namespace marlin { + +constexpr int ceildiv(int a, int b) { return (a + b - 1) / b; } + +#if defined(__MUSA_ARCH__) && __MUSA_ARCH__ >= 800 + +// Instances of `Vec` are used to organize groups of >>registers<<, as needed +// for instance as inputs to tensor core operations. Consequently, all +// corresponding index accesses must be compile-time constants, which is why we +// extensively use `#pragma unroll` throughout the kernel code to guarantee +// this. +template struct Vec { + T elems[n]; + __device__ T &operator[](int i) { return elems[i]; } +}; + +using I4 = Vec; + +// Matrix fragments for tensor core instructions; their precise layout is +// documented here: +// https://docs.nvidia.com/cuda/parallel-thread-execution/index.html#matrix-fragments-for-mma-m16n8k16-with-floating-point-type +using FragA = Vec; +using FragB = Vec; +using FragC = Vec; +using FragS = Vec; // quantization scales + +// Predicated asynchronous global->shared copy; used for inputs A where we apply +// predication to handle batchsizes that are not multiples of 16. +__device__ inline void cp_async4_pred(void *smem_ptr, const void *glob_ptr, + bool pred = true) { + const int BYTES = 16; + uint32_t smem = static_cast(__cvta_generic_to_shared(smem_ptr)); + asm volatile("{\n" + " .reg .pred p;\n" + " setp.ne.b32 p, %0, 0;\n" + " @p cp.async.cg.shared.global [%1], [%2], %3;\n" + "}\n" ::"r"((int)pred), + "r"(smem), "l"(glob_ptr), "n"(BYTES)); +} + +// Asynchronous global->shared copy +__device__ inline void cp_async4(void *smem_ptr, const void *glob_ptr) { + const int BYTES = 16; + uint32_t smem = static_cast(__cvta_generic_to_shared(smem_ptr)); + asm volatile("{\n" + " cp.async.cg.shared.global [%0], [%1], %2;\n" + "}\n" :: "r"(smem), "l"(glob_ptr), "n"(BYTES)); +} + +// Async copy fence. +__device__ inline void cp_async_fence() { + asm volatile("cp.async.commit_group;\n" ::); +} + +// Wait until at most `n` async copy stages are still pending. +template __device__ inline void cp_async_wait() { + asm volatile("cp.async.wait_group %0;\n" ::"n"(n)); +} + +// m16n8k16 tensor core mma instruction with fp16 inputs and fp32 +// output/accumulation. +__device__ inline void mma(const FragA &a_frag, const FragB &frag_b, + FragC &frag_c) { + const uint32_t *a = reinterpret_cast(&a_frag); + const uint32_t *b = reinterpret_cast(&frag_b); + float *c = reinterpret_cast(&frag_c); + asm volatile("mma.sync.aligned.m16n8k16.row.col.f32.f16.f16.f32 " + "{%0,%1,%2,%3}, {%4,%5,%6,%7}, {%8,%9}, {%10,%11,%12,%13};\n" + : "=f"(c[0]), "=f"(c[1]), "=f"(c[2]), "=f"(c[3]) + : "r"(a[0]), "r"(a[1]), "r"(a[2]), "r"(a[3]), "r"(b[0]), + "r"(b[1]), "f"(c[0]), "f"(c[1]), "f"(c[2]), "f"(c[3])); +} + +// Instruction for loading a full 16x16 matrix fragment of operand A from shared +// memory, directly in tensor core layout. +__device__ inline void ldsm4(FragA &frag_a, const void *smem_ptr) { + uint32_t *a = reinterpret_cast(&frag_a); + uint32_t smem = static_cast(__cvta_generic_to_shared(smem_ptr)); + asm volatile("ldmatrix.sync.aligned.m8n8.x4.shared.b16 {%0,%1,%2,%3}, [%4];\n" + : "=r"(a[0]), "=r"(a[1]), "=r"(a[2]), "=r"(a[3]) + : "r"(smem)); +} + +// Lookup-table based 3-input logical operation; explicitly used for +// dequantization as the compiler does not seem to automatically recognize it in +// all cases. +template __device__ inline int lop3(int a, int b, int c) { + int res; + asm volatile("lop3.b32 %0, %1, %2, %3, %4;\n" + : "=r"(res) + : "r"(a), "r"(b), "r"(c), "n"(lut)); + return res; +} + +// Efficiently dequantize an int32 value into a full B-fragment of 4 fp16 +// values. We mostly follow the strategy in the link below, with some small +// changes: +// https://github.com/NVIDIA/FasterTransformer/blob/main/src/fastertransformer/cutlass_extensions/include/cutlass_extensions/interleaved_numeric_conversion.h +__device__ inline FragB dequant(int q) { + const int LO = 0x000f000f; + const int HI = 0x00f000f0; + const int EX = 0x64006400; + // Guarantee that the `(a & b) | c` operations are LOP3s. + int lo = lop3<(0xf0 & 0xcc) | 0xaa>(q, LO, EX); + int hi = lop3<(0xf0 & 0xcc) | 0xaa>(q, HI, EX); + // We want signed int4 outputs, hence we fuse the `-8` symmetric zero point + // directly into `SUB` and `ADD`. + const int SUB = 0x64086408; + const int MUL = 0x2c002c00; + const int ADD = 0xd480d480; + FragB frag_b; + frag_b[0] = __hsub2(*reinterpret_cast(&lo), + *reinterpret_cast(&SUB)); + frag_b[1] = __hfma2(*reinterpret_cast(&hi), + *reinterpret_cast(&MUL), + *reinterpret_cast(&ADD)); + return frag_b; +} + +// Multiply dequantized values by the corresponding quantization scale; used +// only for grouped quantization. +__device__ inline void scale(FragB &frag_b, FragS &frag_s, int i) { + half2 s = __half2half2(reinterpret_cast<__half *>(&frag_s)[i]); + frag_b[0] = __hmul2(frag_b[0], s); + frag_b[1] = __hmul2(frag_b[1], s); +} + +// Wait until barrier reaches `count`, then lock for current threadblock. +__device__ inline void barrier_acquire(int *lock, int count) { + if (threadIdx.x == 0) { + int state = -1; + do + // Guarantee that subsequent writes by this threadblock will be visible + // globally. + asm volatile("ld.global.acquire.gpu.b32 %0, [%1];\n" + : "=r"(state) + : "l"(lock)); + while (state != count); + } + __syncthreads(); +} + +// Release barrier and increment visitation count. +__device__ inline void barrier_release(int *lock, bool reset = false) { + __syncthreads(); + if (threadIdx.x == 0) { + if (reset) { + lock[0] = 0; + return; + } + int val = 1; + // Make sure that all writes since acquiring this barrier are visible + // globally, while releasing the barrier. + asm volatile("fence.acq_rel.gpu;\n"); + asm volatile("red.relaxed.gpu.global.add.s32 [%0], %1;\n" + : + : "l"(lock), "r"(val)); + } +} + +template shared + // fetch pipeline + const int group_blocks = -1 // number of consecutive 16x16 blocks with + // a separate quantization scale + > +__global__ void +Marlin(const int4 *__restrict__ A, // fp16 input matrix of shape mxk + const int4 *__restrict__ B, // 4bit quantized weight matrix of shape kxn + int4 *__restrict__ C, // fp16 output buffer of shape mxn + const int4 + *__restrict__ s, // fp16 quantization scales of shape (k/groupsize)xn + int prob_m, // batch dimension m + int prob_n, // output dimension n + int prob_k, // reduction dimension k + int *locks // extra global storage for barrier synchronization +) { + // Each threadblock processes one "stripe" of the B matrix with (roughly) the + // same size, which might involve multiple column "slices" (of width 16 * + // `thread_n_blocks`). Stripes are defined as shown in the 3x3 matrix 5 SM + // example: + // 0 1 3 + // 0 2 3 + // 1 2 4 + // While this kind of partitioning makes things somewhat more complicated, it + // ensures good utilization of all SMs for many kinds of shape and GPU + // configurations, while requiring as few slow global cross-threadblock + // reductions as possible. + + // For larger GEMMs we run multiple batchsize 64 versions in parallel for a + // better partitioning with less reductions + int parallel = 1; + if (prob_m > 16 * thread_m_blocks) { + parallel = prob_m / (16 * thread_m_blocks); + prob_m = 16 * thread_m_blocks; + } + + int k_tiles = prob_k / 16 / thread_k_blocks; + int n_tiles = prob_n / 16 / thread_n_blocks; + int iters = ceildiv(k_tiles * n_tiles * parallel, gridDim.x); + // Ensure that the number of tiles in each stripe is a multiple of the + // groupsize; this avoids an annoying special case where a stripe starts in + // the middle of group. + if (group_blocks != -1) + iters = (group_blocks / thread_k_blocks) * + ceildiv(iters, (group_blocks / thread_k_blocks)); + + int slice_row = (iters * blockIdx.x) % k_tiles; + int slice_col_par = (iters * blockIdx.x) / k_tiles; + int slice_col = slice_col_par; + int slice_iters; // number of threadblock tiles in the current slice + int slice_count = + 0; // total number of active threadblocks in the current slice + int slice_idx; // index of threadblock in current slice; numbered bottom to + // top + + // We can easily implement parallel problem execution by just remapping + // indices and advancing global pointers + if (slice_col_par >= n_tiles) { + A += (slice_col_par / n_tiles) * 16 * thread_m_blocks * prob_k / 8; + C += (slice_col_par / n_tiles) * 16 * thread_m_blocks * prob_n / 8; + locks += (slice_col_par / n_tiles) * n_tiles; + slice_col = slice_col_par % n_tiles; + } + + // Compute all information about the current slice which is required for + // synchronization. + auto init_slice = [&]() { + slice_iters = + iters * (blockIdx.x + 1) - (k_tiles * slice_col_par + slice_row); + if (slice_iters < 0 || slice_col_par >= n_tiles * parallel) + slice_iters = 0; + if (slice_iters == 0) + return; + if (slice_row + slice_iters > k_tiles) + slice_iters = k_tiles - slice_row; + slice_count = 1; + slice_idx = 0; + int col_first = iters * ceildiv(k_tiles * slice_col_par, iters); + if (col_first <= k_tiles * (slice_col_par + 1)) { + int col_off = col_first - k_tiles * slice_col_par; + slice_count = ceildiv(k_tiles - col_off, iters); + if (col_off > 0) + slice_count++; + int delta_first = iters * blockIdx.x - col_first; + if (delta_first < 0 || (col_off == 0 && delta_first == 0)) + slice_idx = slice_count - 1; + else { + slice_idx = slice_count - 1 - delta_first / iters; + if (col_off > 0) + slice_idx--; + } + } + if (slice_col == n_tiles) { + A += 16 * thread_m_blocks * prob_k / 8; + C += 16 * thread_m_blocks * prob_n / 8; + locks += n_tiles; + slice_col = 0; + } + }; + init_slice(); + + int a_gl_stride = prob_k / 8; // stride of the A matrix in global memory + // We typically use `constexpr` to indicate that this value is a compile-time + // constant + constexpr int a_sh_stride = + 16 * thread_k_blocks / 8; // stride of an A matrix tile in shared memory + constexpr int a_gl_rd_delta_o = + 16 * thread_k_blocks / + 8; // delta between subsequent A tiles in global memory + int a_gl_rd_delta_i = + a_gl_stride * + (threads / a_gl_rd_delta_o); // between subsequent accesses within a tile + constexpr int a_sh_wr_delta = + a_sh_stride * (threads / a_gl_rd_delta_o); // between shared memory writes + constexpr int a_sh_rd_delta_o = + 2 * ((threads / 32) / + (thread_n_blocks / 4)); // between shared memory tile reads + constexpr int a_sh_rd_delta_i = + a_sh_stride * 16; // within a shared memory tile + constexpr int a_sh_stage = + a_sh_stride * (16 * thread_m_blocks); // overall size of a tile + constexpr int a_sh_wr_iters = + ceildiv(a_sh_stage, + a_sh_wr_delta); // number of shared write iterations for a tile + + int b_gl_stride = 16 * prob_n / 32; + constexpr int b_sh_stride = 32 * thread_n_blocks / 4; + int b_gl_rd_delta_o = b_gl_stride * thread_k_blocks; + int b_gl_rd_delta_i = b_gl_stride * (threads / b_sh_stride); + constexpr int b_sh_wr_delta = threads; + constexpr int b_sh_rd_delta = threads; + constexpr int b_sh_stage = b_sh_stride * thread_k_blocks; + constexpr int b_sh_wr_iters = b_sh_stage / b_sh_wr_delta; + + int s_gl_stride = prob_n / 8; + constexpr int s_sh_stride = 16 * thread_n_blocks / 8; + constexpr int s_sh_stage = s_sh_stride; + int s_gl_rd_delta = s_gl_stride; + + // Global A read index of current thread. + int a_gl_rd = a_gl_stride * (threadIdx.x / a_gl_rd_delta_o) + + (threadIdx.x % a_gl_rd_delta_o); + a_gl_rd += a_gl_rd_delta_o * slice_row; + // Shared write index of current thread. + int a_sh_wr = a_sh_stride * (threadIdx.x / a_gl_rd_delta_o) + + (threadIdx.x % a_gl_rd_delta_o); + // Shared read index. + int a_sh_rd = + a_sh_stride * ((threadIdx.x % 32) % 16) + (threadIdx.x % 32) / 16; + a_sh_rd += 2 * ((threadIdx.x / 32) / (thread_n_blocks / 4)); + + int b_gl_rd = + b_gl_stride * (threadIdx.x / b_sh_stride) + (threadIdx.x % b_sh_stride); + b_gl_rd += b_sh_stride * slice_col; + b_gl_rd += b_gl_rd_delta_o * slice_row; + int b_sh_wr = threadIdx.x; + int b_sh_rd = threadIdx.x; + + int s_gl_rd = s_gl_stride * ((thread_k_blocks * slice_row) / group_blocks) + + s_sh_stride * slice_col + threadIdx.x; + int s_sh_wr = threadIdx.x; + int s_sh_rd; + // We use a different scale layout for grouped and column-wise quantization as + // we scale a `half2` tile in column-major layout in the former and in + // row-major in the latter case. + if (group_blocks != -1) + s_sh_rd = 8 * ((threadIdx.x / 32) % (thread_n_blocks / 4)) + + (threadIdx.x % 32) / 4; + else + s_sh_rd = 8 * ((threadIdx.x / 32) % (thread_n_blocks / 4)) + + (threadIdx.x % 32) % 4; + + // Precompute which thread should not read memory in which iterations; this is + // needed if there are more threads than required for a certain tilesize or + // when the batchsize is not a multiple of 16. + bool a_sh_wr_pred[a_sh_wr_iters]; +#pragma unroll + for (int i = 0; i < a_sh_wr_iters; i++) + a_sh_wr_pred[i] = a_sh_wr_delta * i + a_sh_wr < a_sh_stride * prob_m; + bool s_sh_wr_pred = threadIdx.x < s_sh_stride; + + // To ensure that writing and reading A tiles to/from shared memory, the + // latter in fragment format, is fully bank conflict free, we need to use a + // rather fancy XOR-based layout. The key here is that neither reads nor + // writes of the 16-byte `int4` blocks of 8 consecutive threads involve the + // same shared memory banks. Further, it seems (based on NSight-Compute) that + // each warp must also write a consecutive memory segment? + auto transform_a = [&](int i) { + int row = i / a_gl_rd_delta_o; + return a_gl_rd_delta_o * row + (i % a_gl_rd_delta_o) ^ row; + }; + // Since the computation of this remapping is non-trivial and, due to our main + // loop unrolls, all shared memory accesses are static, we simply precompute + // both transformed reads and writes. + int a_sh_wr_trans[a_sh_wr_iters]; +#pragma unroll + for (int i = 0; i < a_sh_wr_iters; i++) + a_sh_wr_trans[i] = transform_a(a_sh_wr_delta * i + a_sh_wr); + int a_sh_rd_trans[b_sh_wr_iters][thread_m_blocks]; +#pragma unroll + for (int i = 0; i < b_sh_wr_iters; i++) { +#pragma unroll + for (int j = 0; j < thread_m_blocks; j++) + a_sh_rd_trans[i][j] = + transform_a(a_sh_rd_delta_o * i + a_sh_rd_delta_i * j + a_sh_rd); + } + + // Since B-accesses have non-constant stride they have to be computed at + // runtime; we break dependencies between subsequent accesses with a tile by + // maintining multiple pointers (we have enough registers), a tiny + // optimization. + const int4 *B_ptr[b_sh_wr_iters]; +#pragma unroll + for (int i = 0; i < b_sh_wr_iters; i++) + B_ptr[i] = B + b_gl_rd_delta_i * i + b_gl_rd; + + extern __shared__ int4 sh[]; + // Shared memory storage for global fetch pipelines. + int4 *sh_a = sh; + int4 *sh_b = sh_a + (stages * a_sh_stage); + int4 *sh_s = sh_b + (stages * b_sh_stage); + // Register storage for double buffer of shared memory reads. + FragA frag_a[2][thread_m_blocks]; + I4 frag_b_quant[2]; + FragC frag_c[thread_m_blocks][4][2]; + FragS frag_s[2][4]; + + // Zero accumulators. + auto zero_accums = [&]() { +#pragma unroll + for (int i = 0; i < thread_m_blocks * 4 * 2 * 4; i++) + reinterpret_cast(frag_c)[i] = 0; + }; + + // Asynchronously fetch the next A, B and s tile from global to the next + // shared memory pipeline location. + auto fetch_to_shared = [&](int pipe, int a_off, bool pred = true) { + if (pred) { + int4 *sh_a_stage = sh_a + a_sh_stage * pipe; +#pragma unroll + for (int i = 0; i < a_sh_wr_iters; i++) { + cp_async4_pred( + &sh_a_stage[a_sh_wr_trans[i]], + &A[a_gl_rd_delta_i * i + a_gl_rd + a_gl_rd_delta_o * a_off], + a_sh_wr_pred[i]); + } + int4 *sh_b_stage = sh_b + b_sh_stage * pipe; +#pragma unroll + for (int i = 0; i < b_sh_wr_iters; i++) { + cp_async4(&sh_b_stage[b_sh_wr_delta * i + b_sh_wr], B_ptr[i]); + B_ptr[i] += b_gl_rd_delta_o; + } + // Only fetch scales if this tile starts a new group + if (group_blocks != -1 && pipe % (group_blocks / thread_k_blocks) == 0) { + int4 *sh_s_stage = sh_s + s_sh_stage * pipe; + if (s_sh_wr_pred) + cp_async4(&sh_s_stage[s_sh_wr], &s[s_gl_rd]); + s_gl_rd += s_gl_rd_delta; + } + } + // Insert a fence even when we are winding down the pipeline to ensure that + // waiting is also correct at this point. + cp_async_fence(); + }; + + // Wait until the next thread tile has been loaded to shared memory. + auto wait_for_stage = [&]() { + // We only have `stages - 2` active fetches since we are double buffering + // and can only issue the next fetch when it is guaranteed that the previous + // shared memory load is fully complete (as it may otherwise be + // overwritten). + cp_async_wait(); + __syncthreads(); + }; + + // Load the next sub-tile from the current location in the shared memory pipe + // into the current register buffer. + auto fetch_to_registers = [&](int k, int pipe) { + // It may seem inefficient that we reload the groups for every sub-tile; + // however, this does not seem to be a significant bottleneck, while some + // theoretically better attempts have lead to bad instruction ordering by + // the compiler and correspondingly a noticeable drop in performance. + if (group_blocks != -1) { + int4 *sh_s_stage = + sh_s + s_sh_stage * ((group_blocks / thread_k_blocks) * + (pipe / (group_blocks / thread_k_blocks))); + reinterpret_cast(&frag_s[k % 2])[0] = sh_s_stage[s_sh_rd]; + } + int4 *sh_a_stage = sh_a + a_sh_stage * pipe; +#pragma unroll + for (int i = 0; i < thread_m_blocks; i++) + ldsm4(frag_a[k % 2][i], &sh_a_stage[a_sh_rd_trans[k % b_sh_wr_iters][i]]); + int4 *sh_b_stage = sh_b + b_sh_stage * pipe; + frag_b_quant[k % 2] = *reinterpret_cast( + &sh_b_stage[b_sh_rd_delta * (k % b_sh_wr_iters) + b_sh_rd]); + }; + + // Execute the actual tensor core matmul of a sub-tile. + auto matmul = [&](int k) { +// We have the m dimension as the inner loop in order to encourage overlapping +// dequantization and matmul operations. +#pragma unroll + for (int j = 0; j < 4; j++) { + int b_quant = frag_b_quant[k % 2][j]; + int b_quant_shift = b_quant >> 8; + FragB frag_b0 = dequant(b_quant); + // If there are no groups, we can just scale the final output once and can + // avoid doing so for each weight. + if (group_blocks != -1) + scale(frag_b0, frag_s[k % 2][j], 0); + FragB frag_b1 = dequant(b_quant_shift); + if (group_blocks != -1) + scale(frag_b1, frag_s[k % 2][j], 1); +#pragma unroll + for (int i = 0; i < thread_m_blocks; i++) { + mma(frag_a[k % 2][i], frag_b0, frag_c[i][j][0]); + mma(frag_a[k % 2][i], frag_b1, frag_c[i][j][1]); + } + } + }; + + // Since we slice across the k dimension of a tile in order to increase the + // number of warps while keeping the n dimension of a tile reasonable, we have + // multiple warps that accumulate their partial sums of the same output + // location; which we have to reduce over in the end. We do in shared memory. + auto thread_block_reduce = [&]() { + constexpr int red_off = threads / b_sh_stride / 2; + if (red_off >= 1) { + int red_idx = threadIdx.x / b_sh_stride; + constexpr int red_sh_stride = b_sh_stride * 4 * 2; + constexpr int red_sh_delta = b_sh_stride; + int red_sh_rd = red_sh_stride * (threadIdx.x / b_sh_stride) + + (threadIdx.x % b_sh_stride); + + // Parallel logarithmic shared memory reduction. We make sure to avoid any + // unnecessary read or write iterations, e.g., for two warps we write only + // once by warp 1 and read only once by warp 0. + +#pragma unroll + for (int m_block = 0; m_block < thread_m_blocks; m_block++) { +#pragma unroll + for (int i = red_off; i > 0; i /= 2) { + if (i <= red_idx && red_idx < 2 * i) { +#pragma unroll + for (int j = 0; j < 4 * 2; j++) { + int red_sh_wr = + red_sh_delta * j + (red_sh_rd - red_sh_stride * i); + if (i < red_off) { + float *c_rd = reinterpret_cast( + &sh[red_sh_delta * j + red_sh_rd]); + float *c_wr = reinterpret_cast(&sh[red_sh_wr]); +#pragma unroll + for (int k = 0; k < 4; k++) + reinterpret_cast(frag_c)[4 * 2 * m_block + j][k] += + c_rd[k] + c_wr[k]; + } + sh[red_sh_wr] = + reinterpret_cast(&frag_c)[4 * 2 * m_block + j]; + } + } + __syncthreads(); + } + if (red_idx == 0) { +#pragma unroll + for (int i = 0; i < 4 * 2; i++) { + float *c_rd = + reinterpret_cast(&sh[red_sh_delta * i + red_sh_rd]); +#pragma unroll + for (int j = 0; j < 4; j++) + reinterpret_cast(frag_c)[4 * 2 * m_block + i][j] += + c_rd[j]; + } + } + __syncthreads(); + } + } + }; + + // Since multiple threadblocks may process parts of the same column slice, we + // finally have to globally reduce over the results. As the striped partitioning + // minimizes the number of such reductions and our outputs are usually rather + // small, we perform this reduction serially in L2 cache. + auto global_reduce = [&](bool first = false, bool last = false) { + // We are very careful here to reduce directly in the output buffer to + // maximize L2 cache utilization in this step. To do this, we write out + // results in FP16 (but still reduce with FP32 compute). + constexpr int active_threads = 32 * thread_n_blocks / 4; + if (threadIdx.x < active_threads) { + int c_gl_stride = prob_n / 8; + int c_gl_wr_delta_o = 8 * c_gl_stride; + int c_gl_wr_delta_i = 4 * (active_threads / 32); + int c_gl_wr = c_gl_stride * ((threadIdx.x % 32) / 4) + + 4 * (threadIdx.x / 32) + threadIdx.x % 4; + c_gl_wr += (2 * thread_n_blocks) * slice_col; + constexpr int c_sh_wr_delta = active_threads; + int c_sh_wr = threadIdx.x; + + int row = (threadIdx.x % 32) / 4; + + if (!first) { +// Interestingly, doing direct global accesses here really seems to mess up the +// compiler and lead to slowdowns, hence we also use async-copies even though +// these fetches are not actually asynchronous. +#pragma unroll + for (int i = 0; i < thread_m_blocks * 4; i++) { + cp_async4_pred(&sh[c_sh_wr + c_sh_wr_delta * i], + &C[c_gl_wr + c_gl_wr_delta_o * (i / 2) + + c_gl_wr_delta_i * (i % 2)], + i < (thread_m_blocks - 1) * 4 || + 8 * (i / 2) + row < prob_m); + } + cp_async_fence(); + cp_async_wait<0>(); + } + +#pragma unroll + for (int i = 0; i < thread_m_blocks * 4; i++) { + if (i < (thread_m_blocks - 1) * 4 || 8 * (i / 2) + row < prob_m) { + if (!first) { + int4 c_red = sh[c_sh_wr + i * c_sh_wr_delta]; +#pragma unroll + for (int j = 0; j < 2 * 4; j++) { + reinterpret_cast( + &frag_c)[4 * 2 * 4 * (i / 4) + 4 * j + (i % 4)] += + __half2float(reinterpret_cast<__half *>(&c_red)[j]); + } + } + if (!last) { + int4 c; +#pragma unroll + for (int j = 0; j < 2 * 4; j++) { + reinterpret_cast<__half *>(&c)[j] = + __float2half(reinterpret_cast( + &frag_c)[4 * 2 * 4 * (i / 4) + 4 * j + (i % 4)]); + } + C[c_gl_wr + c_gl_wr_delta_o * (i / 2) + c_gl_wr_delta_i * (i % 2)] = + c; + } + } + } + } + }; + + // Write out the reduce final result in the correct layout. We only actually + // reshuffle matrix fragments in this step, the reduction above is performed + // in fragment layout. + auto write_result = [&]() { + int c_gl_stride = prob_n / 8; + constexpr int c_sh_stride = 2 * thread_n_blocks + 1; + int c_gl_wr_delta = c_gl_stride * (threads / (2 * thread_n_blocks)); + constexpr int c_sh_rd_delta = + c_sh_stride * (threads / (2 * thread_n_blocks)); + + int c_gl_wr = c_gl_stride * (threadIdx.x / (2 * thread_n_blocks)) + + (threadIdx.x % (2 * thread_n_blocks)); + c_gl_wr += (2 * thread_n_blocks) * slice_col; + int c_sh_wr = + (4 * c_sh_stride) * ((threadIdx.x % 32) / 4) + (threadIdx.x % 32) % 4; + c_sh_wr += 32 * (threadIdx.x / 32); + int c_sh_rd = c_sh_stride * (threadIdx.x / (2 * thread_n_blocks)) + + (threadIdx.x % (2 * thread_n_blocks)); + + int c_gl_wr_end = c_gl_stride * prob_m; + + // We first reorder in shared memory to guarantee the most efficient final + // global write patterns + auto write = [&](int idx, float c0, float c1, FragS &s) { + half2 res = __halves2half2(__float2half(c0), __float2half(c1)); + if (group_blocks == + -1) // for per-column quantization we finally apply the scale here + res = __hmul2(res, s[0]); + ((half2 *)sh)[idx] = res; + }; + if (threadIdx.x / 32 < thread_n_blocks / 4) { +#pragma unroll + for (int i = 0; i < thread_m_blocks; i++) { +#pragma unroll + for (int j = 0; j < 4; j++) { + int wr = c_sh_wr + 8 * j; + write(wr + (4 * c_sh_stride) * 0 + 0, frag_c[i][j][0][0], + frag_c[i][j][0][1], frag_s[j / 2][2 * (j % 2) + 0]); + write(wr + (4 * c_sh_stride) * 8 + 0, frag_c[i][j][0][2], + frag_c[i][j][0][3], frag_s[j / 2][2 * (j % 2) + 0]); + write(wr + (4 * c_sh_stride) * 0 + 4, frag_c[i][j][1][0], + frag_c[i][j][1][1], frag_s[j / 2][2 * (j % 2) + 1]); + write(wr + (4 * c_sh_stride) * 8 + 4, frag_c[i][j][1][2], + frag_c[i][j][1][3], frag_s[j / 2][2 * (j % 2) + 1]); + } + c_sh_wr += 16 * (4 * c_sh_stride); + } + } + __syncthreads(); + +#pragma unroll + for (int i = 0; + i < ceildiv(16 * thread_m_blocks, threads / (2 * thread_n_blocks)); + i++) { + if (c_gl_wr < c_gl_wr_end) { + C[c_gl_wr] = sh[c_sh_rd]; + c_gl_wr += c_gl_wr_delta; + c_sh_rd += c_sh_rd_delta; + } + } + }; + + // Start global fetch and register load pipelines. + auto start_pipes = [&]() { +#pragma unroll + for (int i = 0; i < stages - 1; i++) + fetch_to_shared(i, i, i < slice_iters); + zero_accums(); + wait_for_stage(); + fetch_to_registers(0, 0); + a_gl_rd += a_gl_rd_delta_o * (stages - 1); + }; + start_pipes(); + + // Main loop. + while (slice_iters) { +// We unroll over both the global fetch and the register load pipeline to ensure +// all shared memory accesses are static. Note that both pipelines have even +// length meaning that the next iteration will always start at index 0. +#pragma unroll + for (int pipe = 0; pipe < stages;) { +#pragma unroll + for (int k = 0; k < b_sh_wr_iters; k++) { + fetch_to_registers(k + 1, pipe % stages); + if (k == b_sh_wr_iters - 2) { + fetch_to_shared((pipe + stages - 1) % stages, pipe, + slice_iters >= stages); + pipe++; + wait_for_stage(); + } + matmul(k); + } + slice_iters--; + if (slice_iters == 0) + break; + } + a_gl_rd += a_gl_rd_delta_o * stages; + + // Process results and, if necessary, proceed to the next column slice. + // While this pattern may not be the most readable, other ways of writing + // the loop seemed to noticeably worse performance after compilation. + if (slice_iters == 0) { + cp_async_wait<0>(); + bool last = slice_idx == slice_count - 1; + // For per-column scales, we only fetch them here in the final step before + // write-out + if (group_blocks == -1 && last) { + if (s_sh_wr_pred) + cp_async4(&sh_s[s_sh_wr], &s[s_gl_rd]); + cp_async_fence(); + } + thread_block_reduce(); + if (group_blocks == -1 && last) { + cp_async_wait<0>(); + __syncthreads(); + if (threadIdx.x / 32 < thread_n_blocks / 4) { + reinterpret_cast(&frag_s)[0] = sh_s[s_sh_rd + 0]; + reinterpret_cast(&frag_s)[1] = sh_s[s_sh_rd + 4]; + } + } + if (slice_count > 1) { // only globally reduce if there is more than one + // block in a slice + barrier_acquire(&locks[slice_col], slice_idx); + global_reduce(slice_idx == 0, last); + barrier_release(&locks[slice_col], last); + } + if (last) // only the last block in a slice actually writes the result + write_result(); + slice_row = 0; + slice_col_par++; + slice_col++; + init_slice(); + if (slice_iters) { + a_gl_rd = a_gl_stride * (threadIdx.x / a_gl_rd_delta_o) + + (threadIdx.x % a_gl_rd_delta_o); +#pragma unroll + for (int i = 0; i < b_sh_wr_iters; i++) + B_ptr[i] += b_sh_stride - b_gl_rd_delta_o * k_tiles; + if (slice_col == 0) { +#pragma unroll + for (int i = 0; i < b_sh_wr_iters; i++) + B_ptr[i] -= b_gl_stride; + } + s_gl_rd = s_sh_stride * slice_col + threadIdx.x; + start_pipes(); + } + } + } +} + +#else + +template shared + // fetch pipeline + const int group_blocks = -1 // number of consecutive 16x16 blocks with + // a separate quantization scale + > +__global__ void +Marlin(const int4 *__restrict__ A, // fp16 input matrix of shape mxk + const int4 *__restrict__ B, // 4bit quantized weight matrix of shape kxn + int4 *__restrict__ C, // fp16 output buffer of shape mxn + const int4 + *__restrict__ s, // fp16 quantization scales of shape (k/groupsize)xn + int prob_m, // batch dimension m + int prob_n, // output dimension n + int prob_k, // reduction dimension k + int *locks // extra global storage for barrier synchronization +) { + // Marlin is not implemented yet for SM < 8.0 + assert(false); + return; +} + +#endif + +// 8 warps are a good choice since every SM has 4 schedulers and having more +// than 1 warp per schedule allows some more latency hiding. At the same time, +// we want relatively few warps to have many registers per warp and small tiles. +const int USER_THREADS = + 256; // Note: This is only used with user-provided thread_k/n +const int STAGES = 4; // 4 pipeline stages fit into shared memory +const int SHARED_MEM = + 96 * 1024; // max shared memory on compute capability 8.6 (< 8.0) + +static constexpr int min_thread_n = 64; +static constexpr int min_thread_k = 64; + +static constexpr int tile_size = 16; +static constexpr int max_par = 16; + +static constexpr int pack_factor_4bit = + 8; // We have 8 4-bit vals inside a 32 bit + +#define __CALL_IF(THREAD_M_BLOCKS, THREAD_N_BLOCKS, THREAD_K_BLOCKS, \ + GROUP_BLOCKS, NUM_THREADS) \ + else if (thread_m_blocks == THREAD_M_BLOCKS && \ + thread_n_blocks == THREAD_N_BLOCKS && \ + thread_k_blocks == THREAD_K_BLOCKS && \ + group_blocks == GROUP_BLOCKS && num_threads == NUM_THREADS) { \ + musaFuncSetAttribute(Marlin, \ + musaFuncAttributeMaxDynamicSharedMemorySize, \ + SHARED_MEM); \ + Marlin<<>>( \ + A_ptr, B_ptr, C_ptr, s_ptr, prob_m, prob_n, prob_k, locks); \ + } + +typedef struct { + int thread_k; + int thread_n; + int num_threads; +} thread_config_t; + +thread_config_t small_batch_thread_configs[] = { + // Ordered by priority + + // thread_k, thread_n, num_threads + {128, 128, 256}, // Default + {128, 64, 128}, // Reduce N 2X, same K + {64, 256, 256}, // Reduce K 2X, increase N 2X + {64, 128, 128}, // Reduce K 2X, same N +}; + +thread_config_t large_batch_thread_configs[] = { + // Ordered by priority + + // thread_k, thread_n, num_threads + {64, 256, 256}, // Default + {128, 128, 256}, // Reduce N 2X, increase K 2X + {64, 128, 128}, // Reduce N 2X, same K + {128, 64, 128}, // Reduce N 4X, increase K 2X +}; + +bool is_valid_config(thread_config_t const &th_config, int prob_m, int prob_n, + int prob_k) { + // Sanity + if (th_config.thread_k == -1 || th_config.thread_n == -1 || + th_config.num_threads == -1) { + return false; + } + + // Verify K/N are divisible by thread K/N + if (prob_k % th_config.thread_k != 0 || prob_n % th_config.thread_n != 0) { + return false; + } + + // thread_k can be only 128 or 64 (because it must be less than groupsize + // which is 128) + if (th_config.thread_k != 128 && th_config.thread_k != 64) { + return false; + } + + // Verify min for thread K/N + if (th_config.thread_n < min_thread_n || th_config.thread_k < min_thread_k) { + return false; + } + + // num_threads must be at least 128 (= 4 warps) + if (th_config.num_threads < 128) { + return false; + } + + return true; +} + +thread_config_t determine_thread_config(int prob_m, int prob_n, int prob_k) { + + if (prob_m <= 16) { + for (auto th_config : small_batch_thread_configs) { + if (is_valid_config(th_config, prob_m, prob_n, prob_k)) { + return th_config; + } + } + + } else { + for (auto th_config : large_batch_thread_configs) { + if (is_valid_config(th_config, prob_m, prob_n, prob_k)) { + return th_config; + } + } + } + + return thread_config_t{-1, -1, -1}; +} + +#define CALL_IF(N_BLOCKS, K_BLOCKS, NUM_THREADS) \ + __CALL_IF(1, N_BLOCKS, K_BLOCKS, -1, NUM_THREADS) \ + __CALL_IF(1, N_BLOCKS, K_BLOCKS, 8, NUM_THREADS) \ + __CALL_IF(1, N_BLOCKS, K_BLOCKS, -1, NUM_THREADS) \ + __CALL_IF(1, N_BLOCKS, K_BLOCKS, 8, NUM_THREADS) \ + __CALL_IF(2, N_BLOCKS, K_BLOCKS, -1, NUM_THREADS) \ + __CALL_IF(2, N_BLOCKS, K_BLOCKS, 8, NUM_THREADS) \ + __CALL_IF(3, N_BLOCKS, K_BLOCKS, -1, NUM_THREADS) \ + __CALL_IF(3, N_BLOCKS, K_BLOCKS, 8, NUM_THREADS) \ + __CALL_IF(4, N_BLOCKS, K_BLOCKS, -1, NUM_THREADS) \ + __CALL_IF(4, N_BLOCKS, K_BLOCKS, 8, NUM_THREADS) + +void marlin_cuda(const void *A, const void *B, void *C, void *s, int prob_m, + int prob_n, int prob_k, void *workspace, int groupsize = -1, + int dev = 0, musaStream_t stream = 0, int thread_k = -1, + int thread_n = -1, int sms = -1, int max_par = 16) { + int tot_m = prob_m; + int tot_m_blocks = ceildiv(tot_m, 16); + int pad = 16 * tot_m_blocks - tot_m; + + if (sms == -1) + musaDeviceGetAttribute(&sms, musaDevAttrMultiProcessorCount, dev); + + // Set thread config + thread_config_t th_config; + if (thread_k != -1 && thread_n != -1) { + // User-defined config + th_config = thread_config_t{thread_k, thread_n, USER_THREADS}; + } else { + // Auto config + th_config = determine_thread_config(prob_m, prob_n, prob_k); + } + + if (!is_valid_config(th_config, prob_m, prob_n, prob_k)) { + throw std::runtime_error( + "Invalid thread config: thread_k = " + str(th_config.thread_k) + + ", thread_n = " + str(th_config.thread_n) + + ", num_threads = " + str(th_config.num_threads) + " for MKN = [" + + str(prob_m) + ", " + str(prob_k) + ", " + str(prob_n) + "]"); + } + + // Uncomment for debug + // std::cout << "Using thread_config: thread_k = " + str(th_config.thread_k) + + // ", thread_n = " + str(th_config.thread_n) + + // ", num_threads = " + str(th_config.num_threads) + " for + // MKN = [" + str(prob_m) + + // ", " + str(prob_k) + ", " + str(prob_n) + "]\n"; + + int num_threads = th_config.num_threads; + thread_k = th_config.thread_k; + thread_n = th_config.thread_n; + + int thread_k_blocks = thread_k / 16; + int thread_n_blocks = thread_n / 16; + int group_blocks = (groupsize == -1) ? -1 : groupsize / 16; + int blocks = sms; + + if (prob_m == 0 || prob_n == 0 || prob_k == 0) { + return; + } + + TORCH_CHECK(prob_n % thread_n == 0, "prob_n = ", prob_n, + " is not divisible by thread_n = ", thread_n); + TORCH_CHECK(prob_k % thread_k == 0, "prob_k = ", prob_k, + " is not divisible by thread_k = ", thread_k); + if (group_blocks != -1) { + TORCH_CHECK(prob_k % group_blocks == 0, "prob_k = ", prob_k, + " is not divisible by group_blocks = ", group_blocks); + } + + const int4 *A_ptr = (const int4 *)A; + const int4 *B_ptr = (const int4 *)B; + int4 *C_ptr = (int4 *)C; + const int4 *s_ptr = (const int4 *)s; + + int *locks = (int *)workspace; + + for (int i = 0; i < tot_m_blocks; i += 4) { + int thread_m_blocks = tot_m_blocks - i; + prob_m = tot_m - 16 * i; + int par = 1; + if (thread_m_blocks > 4) { + // Note that parallel > 1 currently only works for inputs without any + // padding + par = (16 * thread_m_blocks - pad) / 64; + if (par > max_par) + par = max_par; + prob_m = 64 * par; + i += 4 * (par - 1); + thread_m_blocks = 4; + } + + // For compilation speed, we only define the kernel configurations that have + // seemed useful (in terms of performance) in our testing, however many more + // are, in principle, possible. + if (false) { + } + CALL_IF(8, 8, 256) + CALL_IF(16, 4, 256) + CALL_IF(8, 4, 128) + CALL_IF(4, 8, 128) + else { + throw std::runtime_error("Unsupported shapes: MKN = [" + str(prob_m) + + ", " + str(prob_k) + ", " + str(prob_n) + "]" + + ", groupsize = " + str(groupsize) + + ", thread_m_blocks = " + str(thread_m_blocks) + + ", thread_n_blocks = " + str(thread_n_blocks) + + ", thread_k_blocks = " + str(thread_k_blocks)); + } + + A_ptr += 16 * thread_m_blocks * (prob_k / 8) * par; + C_ptr += 16 * thread_m_blocks * (prob_n / 8) * par; + } +} + +} // namespace marlin + +torch::Tensor marlin_gemm(torch::Tensor &a, torch::Tensor &b_q_weight, + torch::Tensor &b_scales, torch::Tensor &workspace, + int64_t size_m, int64_t size_n, int64_t size_k) { + + // Verify M + TORCH_CHECK(size_m == a.size(0), + "Shape mismatch: a.size(0) = " + str(a.size(0)) + + ", size_m = " + str(size_m)); + + // Verify K + TORCH_CHECK(size_k == a.size(1), + "Shape mismatch: a.size(1) = " + str(a.size(1)) + + ", size_k = " + str(size_k)); + TORCH_CHECK(size_k % marlin::tile_size == 0, + "size_k = " + str(size_k) + + " is not divisible by tile_size = " + str(marlin::tile_size)); + TORCH_CHECK((size_k / marlin::tile_size) == b_q_weight.size(0), + "Shape mismatch: b_q_weight.size(0) = " + + str(b_q_weight.size(0)) + ", size_k = " + str(size_k) + + ", tile_size = " + str(marlin::tile_size)); + + // Verify N + TORCH_CHECK(b_scales.size(1) == size_n, + "b_scales.size(1) = " + str(b_scales.size(1)) + + ", size_n = " + str(size_n)); + TORCH_CHECK(b_q_weight.size(1) % marlin::tile_size == 0, + "b_q_weight.size(1) = " + str(b_q_weight.size(1)) + + " is not divisible by tile_size = " + str(marlin::tile_size)); + + int actual_size_n = + (b_q_weight.size(1) / marlin::tile_size) * marlin::pack_factor_4bit; + TORCH_CHECK(size_n == actual_size_n, + "size_n = " + str(size_n) + + ", actual_size_n = " + str(actual_size_n)); + + // Verify A device and strides + TORCH_CHECK(a.device().is_cuda(), "A is not on GPU"); + TORCH_CHECK(a.is_contiguous(), "A is not contiguous"); + + // Verify B device and strides + TORCH_CHECK(b_q_weight.device().is_cuda(), "b_q_weight is not on GPU"); + TORCH_CHECK(b_q_weight.is_contiguous(), "b_q_weight is not contiguous"); + + // Verify scales device and strides + TORCH_CHECK(b_scales.device().is_cuda(), "b_scales is not on GPU"); + TORCH_CHECK(b_scales.is_contiguous(), "b_scales is not contiguous"); + + // Alloc C matrix + const at::musa::OptionalMUSAGuard device_guard(device_of(a)); + auto options = torch::TensorOptions().dtype(a.dtype()).device(a.device()); + torch::Tensor c = torch::empty({size_m, size_n}, options); + + // thread_k: `k` size of a thread_tile in `weights` (can usually be left as + // auto -1) + int thread_k = -1; + // thread_n: `n` size of a thread_tile in `weights` (can usually be left as + // auto -1) + int thread_n = -1; + // sms: number of SMs to use for the kernel (can usually be left as auto -1) + int sms = -1; + + // Detect groupsize + if (b_scales.size(0) != 1) { + TORCH_CHECK(size_k % b_scales.size(0) == 0, + "size_k = " + str(size_k) + + ", is not divisible by b_scales.size(0) = " + + str(b_scales.size(0))); + } + int groupsize = b_scales.size(0) == 1 ? -1 : size_k / b_scales.size(0); + + // Verify groupsize + TORCH_CHECK(groupsize == -1 || groupsize == 128, + "Unexpected groupsize = " + str(groupsize)); + + // Verify workspace size + TORCH_CHECK( + size_n % marlin::min_thread_n == 0, + "size_n = " + str(size_n) + + ", is not divisible by min_thread_n = " + str(marlin::min_thread_n)); + int min_workspace_size = (size_n / marlin::min_thread_n) * marlin::max_par; + TORCH_CHECK(workspace.numel() >= min_workspace_size, + "workspace.numel = " + str(workspace.numel()) + + " is below min_workspace_size = " + str(min_workspace_size)); + + int dev = a.get_device(); + marlin::marlin_cuda(a.data_ptr(), b_q_weight.data_ptr(), c.data_ptr(), + b_scales.data_ptr(), size_m, size_n, size_k, + workspace.data_ptr(), groupsize, dev, + at::cuda::getCurrentMUSAStream(dev), thread_k, thread_n, + sms, marlin::max_par); + + return c; +} diff --git a/csrc_musa/quantization/squeezellm/quant_cuda_kernel.mu b/csrc_musa/quantization/squeezellm/quant_cuda_kernel.mu new file mode 100644 index 0000000..edf8edc --- /dev/null +++ b/csrc_musa/quantization/squeezellm/quant_cuda_kernel.mu @@ -0,0 +1,225 @@ +#include +#include +#include +#include +#include + +// half-tensor +#include "torch_musa/csrc/core/MUSAStream.h" +#include +#include "torch_musa/csrc/core/MUSAGuard.h" + +#define BLOCKWIDTH 128 +#define BLOCKHEIGHT4 16 + +namespace vllm { +namespace squeezellm { + +__device__ inline unsigned int as_unsigned(int i) { + return *reinterpret_cast(&i); +} + +// 4-bit matvec kernel (LUT-based) +__global__ void NUQ4MatMulKernel( +#ifndef USE_ROCM + const half2* __restrict__ vec, +#else + const __half2* __restrict__ vec, +#endif + const int* __restrict__ mat, +#ifndef USE_ROCM + half2* __restrict__ mul, +#else + float2* __restrict__ mul, +#endif + const __half* __restrict__ lookup_table, + int height, + int width, + int batch, + int vec_height +) { + + const int blockwidth2 = BLOCKWIDTH / 2; + + int row = BLOCKHEIGHT4 * blockIdx.x; + int col = BLOCKWIDTH * blockIdx.y + threadIdx.x; + +#ifndef USE_ROCM + __shared__ half2 blockvec[blockwidth2]; +#else + __shared__ __half2 blockvec[blockwidth2]; +#endif + + __shared__ __half deq2[16][BLOCKWIDTH]; + int off = threadIdx.x; + int column_offset = col * 16; + for (int val = 0; val < 16; val += 1) { + int lut_index = column_offset + val; + deq2[val][off] = lookup_table[lut_index]; + } + + __half res; +#ifndef USE_ROCM + half2 res2; + half2 tmp2; +#else + __half2 res2; + __half2 tmp2; +#endif + + int i; + int k; + + unsigned int tmp1; + unsigned int lut_index1, lut_index2; + + for (int b = 0; b < batch; ++b){ + i = width * row + col; + res = __int2half_rd(0); + k = 0; + + __syncthreads(); + if (threadIdx.x < blockwidth2) + blockvec[threadIdx.x] = vec[b * vec_height / 2 + (row / BLOCKHEIGHT4) * blockwidth2 + threadIdx.x]; + __syncthreads(); + + while (k < blockwidth2) { + tmp1 = as_unsigned(mat[i]); + +#ifndef USE_ROCM + res2 = {}; + tmp2 = {}; +#else + res2.x = __half_as_ushort(__float2half(0)); + res2.y = __half_as_ushort(__float2half(0)); + tmp2.x = __half_as_ushort(__float2half(0)); + tmp2.y = __half_as_ushort(__float2half(0)); +#endif + + lut_index1 = tmp1 & 0xF; + lut_index2 = (tmp1 >> 4) & 0xF; +#ifndef USE_ROCM + tmp2.x = deq2[lut_index1][off]; + tmp2.y = deq2[lut_index2][off]; +#else + tmp2.x = __half_as_ushort(deq2[lut_index1][off]); + tmp2.y = __half_as_ushort(deq2[lut_index2][off]); +#endif + res2 = __hfma2(tmp2, blockvec[k + 0], res2); + + lut_index1 = (tmp1 >> 8) & 0xF; + lut_index2 = (tmp1 >> 12) & 0xF; +#ifndef USE_ROCM + tmp2.x = deq2[lut_index1][off]; + tmp2.y = deq2[lut_index2][off]; +#else + tmp2.x = __half_as_ushort(deq2[lut_index1][off]); + tmp2.y = __half_as_ushort(deq2[lut_index2][off]); +#endif + res2 = __hfma2(tmp2, blockvec[k + 1], res2); + + lut_index1 = (tmp1 >> 16) & 0xF; + lut_index2 = (tmp1 >> 20) & 0xF; +#ifndef USE_ROCM + tmp2.x = deq2[lut_index1][off]; + tmp2.y = deq2[lut_index2][off]; +#else + tmp2.x = __half_as_ushort(deq2[lut_index1][off]); + tmp2.y = __half_as_ushort(deq2[lut_index2][off]); +#endif + res2 = __hfma2(tmp2, blockvec[k + 2], res2); + + lut_index1 = (tmp1 >> 24) & 0xF; + lut_index2 = (tmp1 >> 28) & 0xF; +#ifndef USE_ROCM + tmp2.x = deq2[lut_index1][off]; + tmp2.y = deq2[lut_index2][off]; +#else + tmp2.x = __half_as_ushort(deq2[lut_index1][off]); + tmp2.y = __half_as_ushort(deq2[lut_index2][off]); +#endif + res2 = __hfma2(tmp2, blockvec[k + 3], res2); + +#ifndef USE_ROCM + res = __hadd(__hadd(res2.x, res2.y), res); +#else + res = __hadd(__hadd(__ushort_as_half(res2.x), __ushort_as_half(res2.y)), res); +#endif + + i += width; + k += 4; + } + + // col%2 -> only set one of the two values +#ifndef USE_ROCM + half2 res3 = {}; + if (col % 2 == 0) { + res3.x = res; + } else { + res3.y = res; + } +#else + __half2 res3; + res3.x = __half_as_ushort(__float2half(0)); + res3.y = __half_as_ushort(__float2half(0)); + if (col % 2 == 0) { + res3.x = __half_as_ushort(res); + } else { + res3.y = __half_as_ushort(res); + } +#endif + +#ifndef USE_ROCM + atomicAdd(&mul[b * width / 2 + col / 2], res3); +#else + int tmp_addr = b * width / 2 + col / 2; + atomicAdd(&(mul[tmp_addr].x), __half2float(__ushort_as_half(res3.x))); + atomicAdd(&(mul[tmp_addr].y), __half2float(__ushort_as_half(res3.y))); +#endif + } +} + +} // namespace squeezellm +} // namespace vllm + +// 4-bit matvec kernel (LUT-based) +void squeezellm_gemm( + torch::Tensor vec, + torch::Tensor mat, + torch::Tensor mul, + torch::Tensor lookup_table +) { + int height = mat.size(0); + int width = mat.size(1); + + int batch = vec.size(0); + int vec_height = vec.size(1); + + dim3 blocks( + (height + BLOCKHEIGHT4 - 1) / BLOCKHEIGHT4, + (width + BLOCKWIDTH - 1) / BLOCKWIDTH + ); + dim3 threads(BLOCKWIDTH); + + const at::musa::OptionalMUSAGuard device_guard(device_of(vec)); + const musaStream_t stream = at::musa::getCurrentMUSAStream(); + vllm::squeezellm::NUQ4MatMulKernel<<>>( +#ifndef USE_ROCM + (half2*) vec.data(), +#else + (__half2*) vec.data_ptr(), +#endif + mat.data_ptr(), +#ifndef USE_ROCM + (half2*) mul.data(), + (__half*) lookup_table.data(), +#else + (float2*) mul.data_ptr(), + (__half*) lookup_table.data_ptr(), +#endif + height, width, batch, vec_height + ); +} + +#undef BLOCKWIDTH +#undef BLOCKHEIGHT4 diff --git a/csrc_musa/reduction_utils.muh b/csrc_musa/reduction_utils.muh new file mode 100644 index 0000000..05e06de --- /dev/null +++ b/csrc_musa/reduction_utils.muh @@ -0,0 +1,66 @@ +/* + * Adapted from https://github.com/NVIDIA/FasterTransformer/blob/release/v5.3_tag/src/fastertransformer/kernels/reduce_kernel_utils.cuh + * Copyright (c) 2024 - 2024 Moore Threads Technology Co., Ltd("Moore Threads"). All rights reserved. + * Copyright (c) 2023, The vLLM team. + * Copyright (c) 2020-2023, NVIDIA CORPORATION. All rights reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#pragma once + +#include "musa_compat.h" + +namespace vllm { +template +__inline__ __device__ T warpReduceSum(T val) { + static_assert(numLanes > 0 && (numLanes & (numLanes - 1)) == 0, + "numLanes is not a positive power of 2!"); + static_assert(numLanes <= WARP_SIZE); + #pragma unroll + for (int mask = numLanes >> 1; mask > 0; mask >>= 1) + val += VLLM_SHFL_XOR_SYNC(val, mask); + return val; +} + +// Helper function to return the next largest power of 2 +static constexpr int _nextPow2(unsigned int num) { + if (num <= 1) return num; + return 1 << (CHAR_BIT * sizeof(num) - __builtin_clz(num - 1)); +} + +/* Calculate the sum of all elements in a block */ +template +__inline__ __device__ T blockReduceSum(T val) { + static_assert(maxBlockSize <= 1024); + if constexpr (maxBlockSize > WARP_SIZE) { + val = warpReduceSum(val); + // Calculates max number of lanes that need to participate in the last warpReduce + constexpr int maxActiveLanes = (maxBlockSize + WARP_SIZE - 1) / WARP_SIZE; + static __shared__ T shared[maxActiveLanes]; + int lane = threadIdx.x % WARP_SIZE; + int wid = threadIdx.x / WARP_SIZE; + if (lane == 0) + shared[wid] = val; + + __syncthreads(); + + val = (threadIdx.x < blockDim.x / float(WARP_SIZE)) ? shared[lane] : (T)(0.0f); + val = warpReduceSum(val); + } else { + // A single warpReduce is equal to blockReduce + val = warpReduceSum(val); + } + return val; +} + +} // namespace vllm diff --git a/docs/Makefile b/docs/Makefile new file mode 100644 index 0000000..d0c3cbf --- /dev/null +++ b/docs/Makefile @@ -0,0 +1,20 @@ +# Minimal makefile for Sphinx documentation +# + +# You can set these variables from the command line, and also +# from the environment for the first two. +SPHINXOPTS ?= +SPHINXBUILD ?= sphinx-build +SOURCEDIR = source +BUILDDIR = build + +# Put it first so that "make" without argument is like "make help". +help: + @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) + +.PHONY: help Makefile + +# Catch-all target: route all unknown targets to Sphinx using the new +# "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS). +%: Makefile + @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) diff --git a/docs/README.md b/docs/README.md new file mode 100644 index 0000000..46488c9 --- /dev/null +++ b/docs/README.md @@ -0,0 +1,19 @@ +# vLLM documents + +## Build the docs + +```bash +# Install dependencies. +pip install -r requirements-docs.txt + +# Build the docs. +make clean +make html +``` + +## Open the docs with your browser + +```bash +python -m http.server -d build/html/ +``` +Launch your browser and open localhost:8000. diff --git a/docs/make.bat b/docs/make.bat new file mode 100644 index 0000000..747ffb7 --- /dev/null +++ b/docs/make.bat @@ -0,0 +1,35 @@ +@ECHO OFF + +pushd %~dp0 + +REM Command file for Sphinx documentation + +if "%SPHINXBUILD%" == "" ( + set SPHINXBUILD=sphinx-build +) +set SOURCEDIR=source +set BUILDDIR=build + +%SPHINXBUILD% >NUL 2>NUL +if errorlevel 9009 ( + echo. + echo.The 'sphinx-build' command was not found. Make sure you have Sphinx + echo.installed, then set the SPHINXBUILD environment variable to point + echo.to the full path of the 'sphinx-build' executable. Alternatively you + echo.may add the Sphinx directory to PATH. + echo. + echo.If you don't have Sphinx installed, grab it from + echo.https://www.sphinx-doc.org/ + exit /b 1 +) + +if "%1" == "" goto help + +%SPHINXBUILD% -M %1 %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O% +goto end + +:help +%SPHINXBUILD% -M help %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O% + +:end +popd diff --git a/docs/requirements-docs.txt b/docs/requirements-docs.txt new file mode 100644 index 0000000..0e76763 --- /dev/null +++ b/docs/requirements-docs.txt @@ -0,0 +1,12 @@ +sphinx == 6.2.1 +sphinx-book-theme == 1.0.1 +sphinx-copybutton == 0.5.2 +myst-parser == 2.0.0 +sphinx-argparse + +# packages to install to build the documentation +pydantic +-f https://download.pytorch.org/whl/cpu +torch +py-cpuinfo +transformers diff --git a/docs/source/assets/dev/dockerfile-stages-dependency.png b/docs/source/assets/dev/dockerfile-stages-dependency.png new file mode 100644 index 0000000000000000000000000000000000000000..b016531f1e0a06bb38b01b1989df932f161e3aee GIT binary patch literal 118207 zcmbTe2RN2}{62h1C8|^?QXwg#VPrLwB%92JC`B0=S&4+K5-BScNf|A& zw`65+-t&9Y^ZefTcO38Wf8V{vgFT2Cn}{-bFh+EwdtH_RqXc-qcx*E0rR z1*scb-ImW|e79nKG=0?kh4TUrS}O+Sedv)8-0^79$yTXH1;v&x-mxlr(6O(x*|&Gz zysH<^6)mscx}5c}FO|AM||lCh~bKJ?{fr#c3|hGm`9(zD5ljp!Z7IutIbwko#1 z{fL%OU@;vA_4gGi7JOXKdfwmvl7FW|zA*fc|0`^X9{iu5C|fLmDgO6L+pWU=-~T8o z@$Ub}k9T`5oBezl+cy0i883F>l4qxjpILW&iE5_O1hllYI=&|yyFBzeAjf&)@qQn! zz5B^{-Hs}E(9=HHz?u#BY}e8+T=Kz_Ws~JQ zmi?z{URp@@ylnpRaF0inF`1M?WBkWf%gkTPXGf6u`SHOA{l)szqVkiC%mbbJGg4cQ zC2f^r&EtB68%vJ0`0X&vzkZurUaq_9#TF@6^0AeMQFAlS)7h$>wz+=%M@D~buZy_r z_}7T^%c{L~*;Xz0_CJ37m`~PmST*Bft+oWOLUqRZ@9|qPU-B+d{{`QPi0)c zDD$^g*H7EtSi+%UAVnK-4BxnlAo-3m@Uh1H+dpjbdAd>#Hx+kkf0sZRZ~?>t#DJCn#g$yHw8pWDKe{U>r=oaM%f4z)FT z371s8F!kfvb7M)-HDE5gz*A^}nl-NM&ST$T>XxlG8uI@6FpszKpla1XBb)6o| zoa`@tZus`b)fKdT8%PigEN+e3L+(LQY1iL-vA3svRVzMnNLI_ChkLq_+j45CBKAO_ z&;mYs1IMS@F9ahdTllRorDNgJFjbl{W?vo>+|EI+uruK%-`~BuEM%1=uP@+a z-BTl(QSaiIGxphB(1}VDseNTroI5j-+g>8w`*c!#@+N=yhssprfmr#exCU!-sWKvPAyCtHp-da#EsaSPMZ*N^ymNM4qST}ah; z-K9J_2iUXdrG~t#e#D9L)OO5yVrBL=I6!luPx4B+WfBc;GULAy zko*;S&6mfjz2q~-6LjpPLrlXdGJ=$WIs102{qZH{z>bSGFUcM%`?74>cj}|RfaIS% zdM34n3RY`GPbm@OS;8r`Q@OuTep)0p!sYVOB;6C2M9GCJCFg=s-o-eWDEDLo!@%Iz zL;T9iWd7vUxXw&#dabhds=){zud#dGFl0bFLyu zuP1kA`u1A!q+iErAI;_qnTx+&NAGVv;d?+hd#AZm@AswRHIaYlQam`sFY4xuz8B_G zUbA*BalJER{jCdPX(6&Z_TP_AcSKAJBxm)j__jQKUh8Kj?@lz{THkvhk-Kc1e5i;EMGSVip( zmUnfz!OR`5m2foX)hj7K{zE0Hg-E{T(-(OTS-4i}}p3xgSrAhQN_nwS!0qSmDju z#>U3p5>1|;Ld3QxTh=>|w0=8sXd6a^*M${+v+>d`-F%?!b7s%0PQlJ(r`9bnMjmB2 zb)f51g=^wd`H{@hRY4?IQ|Uf)5umJ1cC_JI@Q$-;zAf3k&gN=zV=Y*cvXD^c;h&Pf zpFHyA`TF=^@rpw->X$Z>Pgd@lGZlpgcE#H=G`aL`zH2Suql*+nIQkeXHx&1AoP@6{ z=g7MasZ~{m`z%F( z3{eY{I{oJAf{w4Thmh9riWK;FX{*v+_vT2$e3X5?gmaEJ(z_-8{uXycDC%PyrptR6=wi_d`=jVsEO z$(Fi-@DWX}(jbvXc|7Fme@gy_j9r}_u@Ol!a!sC_7OT!v(CPaz;+2u(L*Ard`WL&T zoj}w{l;M)HHlbba#?rM%9VYXcgjWX5C-1Ba{=5BWw&}gvsr&_4WAOL)god3|+5+eC z!8jS0W@p+vwYS7DlTT2QpS9C3obZhXFo=tf7nE2^nf+7f&G|Lcs-4ID@BL%u3_0xt za4$V~F@)CNr8GG6PT>5yL ziMpCpN3?ZsoosukmD}rLIM9-FleiTc}8UL+kxs=S$EnRJH z`>5gNGvmoKAs$?pH)9$fOI@c2oRCC?X8{m(@bH{fxydRT8#l9c9(=HCl|OB~Ze|bX z224463OhA2Nj6?zfe;@UYFl+~>|3mymNgH#85L5N^NaQkcCGkHTwKv69~lknCA1%< z-&6iSgqYHejUL=3*BMt>{(9)~_-tu-M_sn#jb)r6KF(dn!BRHo%lj6KIrO*aUhasI zY`MGVOQKE&wt36j8%vApXQt}g5xTZWagnWAYl(fP3c&)KN9`d0U%>}I!^)oJQ zpMw2}-QwpCJ(=y6@cWh^WACC>$8_((Yt4t&iPza1!%Nb~ci?u3J(8VIpQm4*{D*-v z?Rzu3knU)D#cFBW3(uzp0{ZvL)%|Mg+>w9|N{Lzwq;!%y6YK zxxHYa^F(!h3GsR0<%bR5wi$m85#x||adP={jaxGu@Mb{c*s(w|ulKiB1&f|m5y}~D za_V)gFJ8c|d)PpmEQ{NZgMY0e{j2L_p=-+(2Cff2u9M9*SOpHT^O~=0I`4XS7i4d}CEJ3WMqbx{=CrH1 zr$5=mK34AIl6GJRNnuo))?8$!Qo>HUjJ48seWb>!emIDv9f1IqfFY#OyCbI%>gf^(f>8apSr#8+JUFV_iuSIEmLE*t)AEv51%JwjS#sZFd z7lvK_9Y=!aX=nH&@U_LUeOo>e)9})tjC$5n{%C*QMBPYEb57%1kFy9CuV)dVLiOJe zgR6ON@~(W`e0lKKy=%*)o;*95X?YsK`$dA$s`E$)KVMoHpO$qR{j+Vc@Iej{qjyQS z7La8s`#0w1*~Z7k{cJO?*B)2$MGD+y7kac2(Y4Mfdz;_ky0To?nKo}4=}OW0*XO&x z!HLY|(h&!Bo(66`@ukK>+r8qXqnVctxJHIU@9?A}3GGyu@?REXAgy)Ge4_9|n6w=( zZQ1&<&HJk$!0&mlp_7i?FFrd~BUjW|kPlI|O#f~oQ=;B$TQ8fDo?0fAXQorFo!S`} z_5M+weH-_`Zws4#_MzM#&SRchcQ(Q~WLsguowX0Tb)^&Rdh0O6Z93_n9giiSto`42 z2qU*QlN%=X%+?9?d12@!+1ej#cU`bAw3<2nFj~oV?A1y8Mt8Hi*RnI=w0k?U&ZPvZ zQms^G;L|S745zXFZ7STdmj^!}Amw~Fer?_H?EPi9Rj>Ut;g;3@k9cpd5fcOldipfN zWxT^IT=7}r-|MaU?|MT+Lk>gVkJb0RbnML*f3V9e{;7QbJ)h`Tmpi%3hdREV^%|NS z8+afyLU4q6U6$Rl>fNubJAAE5Bx7j?zy+*nS2KdFJ(pRtmi~8jT<@cJPTw6qAA!K) z%*GpQ1v_oKt6H16XGW{JaTq z`?8%Fdx9e;Z7~wv|BFiGS~u2S{P31pelopK7i$|V=Y07zT*g((gVVOMnyVgEJk;`| zK)_I~U40n4zLP~lq1@27Bbj{#YX_3-vP~Jj^h)jLcD7E+>i_t09E({OHQFD@cQC}? z+-2Ya!OI8U(Ja!$>Obq!b7&19G+0e`=-cF1`KcoWeu$a=IMw)CmN*6PX#(p9z2&Z^PDqsL8Y%_@3+{#vg8P#HSi` zcZHgS;j!#@oVLw(#M_*E9rbB`&c}^N(cj{C=nU?2BKxw93)orviilCg5~+Bn@ zWVZMoSgcCRYGs%IBAvXWH5P@Oz!341Sz1sF1_&K>CW7`zIqy_FtGdnZ<;S-l_o$Go?qpUtlqCW7M-4Mdn_yZ z%vT_c8i)2ULdOTVXZpAu2Fv$TY%TpxzggW+Q1{Cqp40rNHJYcVo z6b-jfdENwsCQb6?zY|BdOipZq71nVlrp zq{Tl<;rYMt`zhcrpE6+YkxQ&pm2be7V|>c5?0Qe)e|VYvmEg+G3;~@eKSLV73Y>oQ zLd{E&gk#BS{cRI8c%F82F6t>f`2PHCDUZ%HoiMetHF577@w)xwN|7%#{v!UTQzP}c z{iC3{!rK`sv-djn!gb?vw2>v=`xpJ*SuGOPZx4J!7))Z2i1EJGKArm6Yl;1Lt&Hi^ z9-SFacB;;M_M>Ju|Gh+ZyKJxWYuj#Lp4{o7WF|Fw1)V?0bG*t@wp~wXXz)L5m3CsH z>-nL5K3riu4lqVh*Xc<4{AFXc0Kz$a&!VdS}g=ex^ga`a=fvLD~7 zp!se;dcqxh;$-By)4qZOkcn6C`^!DPmLMd5Z0>a#c}-(wTM+4{hO7N}5WvRtXF6S1 zh0@mYKML>|p~+AZu*r{S$C*zb{0_;T0M*leMDQ;ZByL|G{xE9|B1g?KK>M@*j>@hz z;Mh?v*XFhLo3Y4=-^FvsK7VX37JB`3mbRpH|K9{hX=6AK$4-;OY}>T`VVgv}eCxq1 zEX778zJo{<0Jo3T6*^0pp|ssaDeGT{XNk2v|9|W+3uIvnVb;Eet);rbDe zH+SN0YL}mJaHnyWswIDUacw?eT0dkDX`&KhCAcpCdYCmc)k|)@V4_vnHF!s<%noJR zTb`f!JiGIeiQDH^wNke0Ma9XnCidJQ9+EnU>;R|XVMl47HaQ!*ID@o3z*n=l!=&0(Ci!~?53&K zdp*du!Z(D-7SQBeXs{3@Nl>!<9T&^S$>hdnJ5p=c-u1i`;8G2_frvk6Kiu80w~!cx zTenDg^elNZ$e_(kfBZ|JFV6#U^AaH3yJG|G>}wahu?xJUCCo&m&Q@@1^|h4ey@d-=B{s1TR)|DUsykU!=4+b8uG3Xt6aErjsF#ThBLWS!&{{4*dD8e zg^<(;ORJK9jC3=a{Biv?2Ii8;f3AH2&P>Df-#_EB`KC_Gayv>SHjxFv zUiXW;4v3Et3k9Wk{oayFG0htPogg_i_;mmh5U()^$)vC&1ezX^{2`js2egx%3qV*%5Y?60 z$^AU|R7i@(g{ZP&2pO%v9{C7$jYHiiM%E1^w69ugRO#dl-zdP_PAI*nTQdoOMQ|%6 zKw;_HT|4b`sgO4aLzVzhXGZ@t>a3JYdi6VgvQ0c!eUz-BLB?PCrt2;OA-iSm^Af2o zrE(J&rd#!=dAqAK>K|GGp$gDSF3%&Twu*^~DfvULGBtr7-;i7Af${K>U*j#%Ykf}RoOobG<S!wo zdqB3pkMH0|h%3H#P+jSpAdW?%peT6ZRaSCG1+S3VEDgp+0>^1c5U;x?jSIa$Z_B9h z1(tUT)N2-&MSdL1of(;F1G=m{|L`<&6o;IX{l43)@6pjOj3(j>>UL6gJvBxgbXBjw z=+2;abpo0h+t%ZE!7k4LWt=lH8BH(--3$DxCcQ0Us_U#(S85^KT{f}~iyzHtM;Bff z6cl_4-oXzx1dv*M2&(W_o%E+alb=J4Zbt?wmY;5xfBfpztDmL0+uA-q2<&xgmmX_m zb`2uD{q7RV@!6Ye3?!%%2>uKqik*{z+~Zc{+&!|g-I>_WfbInN7LpR!Q>X%gddX1s z*mW=cbMaNjHsQABBxR!Bw&}qAtpX?!UW`2yAt#C0OUf4{I!bn> zyuIa3gOhvz0>6_PE>q)^(<8a01UEx!8xH+N{Ph<3dt=qYp5b?AGN<1#aEB7-m+X7D z;z6s^>o0`bnf`d1HC$y%mXDYNa_mY@rpTYQHWhjfW37RHJbJIJi?Cf}AG?SgY*PnC zHX1bdCpI0bal|@+8sfrMibOtgtNjbuy5iQXStGbYhVM{#==k{fQ=N>YB3U>R^Igy%V zbyM~SSLXbhL)1wb$PlUzc5NfOn4DbN>?0oz#+OFNO+hUcF2Gfj_=JQ9 zykDVezCKtOpb}GaxigVybkB9!$yF%>yT2l5Vr748@eZ5c-&Dh8GBx^-zp_5(=SaYU z`lEd@nu7S-3eEZeWMkVC1;8odq$--_Fpxezf!YDT{?zYdR(8txeBw@u67rtLD@L-PCPNHemX594dT@$aS0El-FLKCIHm z`925?PxW4>|Fvr!e;E>&cHgY8BkG3}YD`a`GRqE@5en@9!BfXTkkWpDL=p>QKp;5i z8x2KOvibH7O8|Tt2BK!d+9q|vQo-l0DYX=c0?43Rp|Pm!m}!LvD=TfL$2dTmSHg1U4G>s$@&o_ zRdI-(@|?ZV1@}2MJcViJ&YjUoNl6<0qv$sXgzzk$8Te(~xmR zR#g9`+uF|wyNHVokN1}eNBM}v#DhH@m<>T@-+ugf3|mc4-$hs@6E96ChmrrSLCoaN z=8;&@H^%)fMZvO;wjpamYGG9CP{qZ*y?F70W7mb_&{wp#OHy4wPeQgu*w6U2nhd0@ zYffUSG~TBBayu*{(O<>|qB@w5*ufrxA>%YLFyib$#c79)s0LxNUAxGbpjQWPH{2}; z-9*rhTr8#fiJbQ^X8Q0Qm6!gq zH)q1K>RhbW52Eb=Is_BsPb7_5bY>8HaIWy?R;z(FnA2Aa*Yf(M;uKCNMYjK*T*qAIdVjs zgh>yeQ({+UtWL&7Nz~v3d3_Mrm60pCjoYnA=fAACy;=@p0|VB2<~iu8TE6@SV21dzJ}Ek?t$1 zYwH|4)6RgHat2#~Jnk=Wpl7gv=4sVnIqw2rLWyX!;|iFffuL=-BI7Y%>qh3}o!E!-X_qQwSDd~R#| zD~)|lmq+cz{vbQU*8o9oBQiTr44YwZsL!UT@SQ*aL?%3Dy^^fl@Ya8$k=u{cJvEsK z8-Vdd*hT}!gBq(mItEQX``et-yEg6jR>iM~8&tj^WigQ6Rh%^9Ej!mGm&bJhMF9uM z7nmOUFhrBHhqe72FAsMGKk}A}*XVzenD|+4qU$SSE4e#mP&yaPQanshZh~{oK{tdz zC`Gy5;RwX-P>_>U|B%oAn@s7bsvMvsenOA6XJy8!xw?g0?KdKHBKZ?Q0mmjy96&uG zHAjvd@zaRY#G-KQ;KOE~Ba8Anx%%SkZNw0WtgRZfeM57>ow;T>GX<^8xa3PTkBB_@ ze;m{($NS(FGLVqspL(E~7@qauv3S&*?U9yPm*E5O zEj0<;z%|GQ$!sc1sFZf?*S2PWt^@tW!e_C<%D!k6l0v6i#%vNjC^~BpkpVi;=}9u0 zg;VW)h1{jU7L_eDg#rb&C;l=-cqghcF*CQs6DT?X!i3k@R+~AXMQY$0_vCt_{NJLv znL?+S`rm7V-BcP~Ekr0FJz;z!q<1PVG4WuWrV4P5NZ~p^&`pvisPqc#B>F%M{3^&U zoBXGhRQe_AW{N_G#6U=RMhh9TJc@0EY!PaX>_B!0+4v&aot+*~8rV3AwYcq4m>Y@v z|KFfS?VYRhRqiF)P~4H&Gf12Wt*GHYsQH?#pG&G(=*+=i1bJN`{O`n6O%CibtMVW2 zcJ3m2TZ;66t1Foo2tU@kzs9OGhK5^$kbyt8_E-0iy2UcN(FUYHL`W|Yb-O9jaZ!nALO!)o^YxWG_$e%zwx6INbhk-V+Owz4KlID% zz&lLJB0q&miz^c;r)9h|`H&BnY=35{bJA?%-A?woTspn`|0uAz$*M!l1Z2vAJljsX z4@mX{aT0-gB|J-bJ{!;Vpr-8C`TqjfsD|WhXlJyVSczo&N>1W#Zf$eCyXyor1D)9n zfySfQ)KqXahIN+|fGDsDgcfQZJC;H0m%I#^#ZsNiX$l5q8}R>z(^QHc{XZ}oW$!oZ zm?=8{JFLYS782b9yjT~5LH_<9_CdSzh1!dHkX+GFf z*oj}fxG_QZ`8251F7$}vOM{C7*wXo(#+*2mV+?j7Dpq|&0eqQN-H&gb06Ma&& zRd`V2Q<2GSZb@67k}3NmrZ2fuHW3F0{FxlD$Spwt618gk6ehHt?$6_pkPyOr_j5U% zqBWIXU!kT1b>o)zi?8G}=!8rHbU~USncvS5MUA_tw4-#Ft}!S9&4fTaDoqyEzY$CH zGx8~uY?wP$o%$D4e?yggXKgpjC0Bvhr2-&F~|@>#qMK!0_ke>hDtRH zHqFISwN%HCKRcyNYR9CKgYg!DH9rTM?y)9u((Q*KX%>VwG=RoGVe~hH+by4x|KnEdO3IfRzo(;1(N-N8}Gy5`2H@B&9K?gOpEM0p<=5KsnBlb~z z1wEB#JNnt&<%mNT+5<@X$V5rK9|A}FpSR4y1om{D*ma@uF3~7UNTW+VrbgH&!kw+o zKznH^O8wB1M_THqyX&7}`C8C8T0|lk8k5j{`RbRaK-!=PW;TiLm9wPukBXs%S3POV z1t>j)h77yWNpw3UI58D@S7E2^&btN*>DfxX1x&;DzM*JCk+unMTdmXlzg>Z51Si{F zvf?}^`hYPAu>p>jw$U42hp6~DF{3VoU;_-b{DOwkJhA(_ry|du6VHWj7KnzNDY$qe)!U|E^?`-PW?MxT z85c<1DB{Ww4DywIT@W6nsqL>4b1_%`vb9TCwXJzic5LgAlZ*^kyH!tnXz7IQ>vrAR z(uskbYvSg0o?Ozvmfl>lx3GK0(Cva`97Li2@*2J~QdU-eY17FzkDi`o_3B0P^77ZO zUtei|``$ekG=HwxQ1l3s+b$;dUE`f1dBuE1(S7^rk1H<#&%qwpeSLimOihcgGjAlX zZH!DzSmL#R(C)rbP*AW`4vK%uix+!=JrAQLXEg`MIZ2g`QNF?(HqiC;_1Ta0^Y+wb zdyS6TH>Orr9`CAs%?`Tg9uvchN|+C}_6>ge%-%+H$BrE@WPd?fN;&tHZ~pxGp4ZCD zuLK0FlCt@I9Ycyjek%LAh3&nPCwYC$3JO{+Auat#H`8(yI7q~@MM-&P$Da2(8XBxX zPabI5-5?~SHW+mCCSz!5XhT=m5(Wl_pa%~QtmI`;?-&{%GS$s-;^0>c%`5WbXJKL4 zaJlBgheZI~@1c$!Mxse9War`%if^!(8n=m#iCJZ@iitNiG*Hsg(v?4dJ~XRNFRQ4i zm|M5ghjeww);T& z>$H;~Vtk&(iiEBG%#Wzn_Li$9j^4cJSl{FB2Eucp7%N%|h`^YWzxMz;_V zXLpHVf>y!}RIr+!m&#oW4h~Mw-$2`XIYzNPnK+m??{U!MXRy<}nW1|uuWv^x9i*Ds~j*pi>V;o-~7&CS6^IgoVRQ6%6> zI1;-|Mn>ij>iVWTc)EF=LuDLTp%%YEr@Ol|h+P4qzZWEqD40RjqH8qMxV2eIY7NA~ ze5{S)7K`ets{GHNm$I?3vF_Xd{vQ*Q%@98+ZfJ3v6I12_W53o}k<2u)Wa&~6$Wu<}e1^{jY?s=& zah@Chp>UGoMmv*p@7}w&p^JN`$#TG;Cy7_tLk^8Asi@>blWOSf+|rc*v`pLv(eZW> z7j1cYRZpGbNx$%O9+vLm)d*ysoSA9ImoHx`t>aXFUtG+DwiA{_xdAL8A?qp6p80MU zg~b&kN~M~VZ=A$0@~3L@@+dAYt{|v5_ewh8E3U4t<=N|n0VQ^0Da&%49U&#t4G#}* z7Znx!67=8pD)N?t{cSh>vGt);hAQ9Nmln+pZEcSe8Q&=?MqiDLTn~iFfY#v^!&-^j ztEBCE6j6#4lg&EgCx=GL8{DpwEE_g#$d}u{fBz&h#gn6nA&+nH&c>RT4g-8R7I3w! z?64tRW@@`#kFLM4{>l6gN)Nt#{VL)*?ey~XYbL)NkX-IS0XKmD$hv>OqM|$FqD6T? zaeaRp_ZZd#F{PX?W(!y$LSl)V8&xhRD=T~a#0kb#GF@F=WlvA+xc`u!+){8)PkpWr zZYLornHt4SJUGDN=FOXEXWcs7TQA&YT9+l6ZdQE*JoLb74i*-Hx7F2aSMaMT9zVXC zQrN$r0UG%mG&Vkrjg5^n6nik(*xT!aa?r*dJ61rRJDy@dht+x<*GMxErDBaEM^X*n zE}TUBO1_+_sj0oaecrowikRS9R#w(O5H4>DN_(#snTJL#H$c?u_gGl&32JC)Y(HDF z4%G}br3Ta5S7K`6GI1Imquef|bR@Wjg@tV{dbHbpameB@+Kp7)P^vaUZ{hYGJKo~J z*!FW@ct|MZWFu3&_ru-Y{Y_C3<4A8k!(B1;(L!$5cM$p?CMOG9SYsQXBJAK?7}v>@ zC-ZaQZTZE;2U^{S{G)%ON&p_b0)pUtJ3G7kp|pJyj4271^+H73Bq_;39Zo(ei$r~I zhGFx2BV*&nuCCjtvEwk|k|imYttzOQ9BldkM|nch7XSnw)j^VNm}ADGnKO002M4XsY+$9` z$@0ycH$hT1f+urbmQqiU*JWg74Gj%9Se%N!*x1;J`!b*kHJ^H-{%B!Vc6ND^p7hO4 zn>I%I3?XJbguOzYTR`P~`m_X+(;meCn57s3UMf79y2~I>kqiq#VBh7-Qd9$`P#CY3 zz~k*r-l8N;4Dn?4vN5=xT%wQ_uM5!64S` z+O-S6QB?fhkF*HeZ$ur6a)Y<5U%#G$^d2aIkU{*TDF5wSl0^_9^9u_Z5EwN)Xt5yL zqBKH&Kl0M`_bbdUT(~qocrGY7Sa}_MB(4~(4-G9X*HN#dqb``4^`R6Wr4k_cd(Bm4 z?%7YD1XLiHg-gz95rr6)kJzx@uLzqacN7Wz@<`7e#4Gv_+E)rHD%iop{m6(kGJqW< zq@*@(-@cqOFf&^UbaT*uBOe)Rl;}R8zyWi2a(zuce2GMArx`^g&M5uq>-z_+hnE<| zw`~#U!pPMyTy3x<(D)YHj6(*7`3x-T0*G{CqobpdXw;zG0J)ikgoKn19r||_J4Q2hlQ9Z1!T&`r zH7MZo*hpXz1KQ_L!}_7)JwkG#+|a>j0GxYXT7HZ3ax$2E`?6f7*5Ev%JA|Qc5AUu) zbMa!ROY_Z&j&uWxaFhi{Rtx8vEDU%XSN&1d$@DszOcci;(h zrw-1~vG3dX>C-13?^~XpJG(&vh=BvvGSEDnPc3vbHCC@AdKLz56ucV|!42+m6Ats? zl&HO<<0DwShR0q-MQzLODlt&N1*jR60mB6bvcvzPmwC}_^JB`lZ?=1e})lZ%UsNp*U6VW_2K=VO|O zvZ&|y`uX*t1T^!ikuc_%jH*fV50r&1*K}q8w zLd2ip;e{2MJj7USfHi4tLsp%>r_q-3b7$bycnqW&O;{3+!&?_F zS?vZmWdNJP+wMv2B*w6jKr!s<0?G-;CQ)`uOQXYP^BjG8{0eX-9fe?X9c0`2+nBXz zX4it*Eo^PXPMOr_$cl=J8km@PBEgcN`8x%J*{H6rZi0jr_2>~D1(2Z#9g3H1FJ2!* za;L|cU^V%`C3`1)xaF6fJ$v?M6m2B)maUVZu=11w>Q^i)S6&y?idR~BEUfu-^}NCA(>_ix4huck;u}N z8{E4QRp7Gk-vcBBXNN=kdJb&kAu^%;(C<~&*4CF=KdwqMt3FEh^hpp-p2%;v?K(~= zKzX}_qp@0tQgt8yX2HQqXo;z^htWJF(Tqacs>yZLZ?$^j zKghz)?t#e==fkNV1%ym3uDdkr^1LN}=+GfU()V>&TV#Ff-(G+s&6C1zNPwAWC|Fdj zlYZgecGmwn@zzNf8w4uWeb|Th_pqW6g*T>o^-smC?64#FAT4btm4`06$mHbYez&=m zcFK01V4E1}WuOH>eV4HqG9Pt|mS4X-K~?8bY7ugG8X649E|QnvE2KqZq)nxR2Lq&5 z(tc!k_3G7z-d<+P8XU6rX61#kgEMcV)}y7EN*TypS!ABgjI zv#On7-Uyp4FavygF;7UgLafJ4*;38Z5MS{Yk?iw7*f*G-l3v4 zhf7qhhq`iud)%G<koL?%LZpgYCujAS6_{Pc@5H{$oIbZP5fk-P0@ zr=B@BHf9L6{Ojj)e!|RbB<43j4FpA9NDX%VT!COqb9%uZl=GP=^yJZ_muZ8=&e@Oe zH9mXR1E*)!0}Pb;6cxt($La zD05q8uEQG{C>&Kfw)vb2Z91Bo=+|INkC1WLXvpK=jWnKbTYP^T@9pdGzFVOqUZEo_ zvwP{Q3QzsLoZR`z{DzSPpawx4-Zs?fR^}EF5rJkQOXcO~?|xr4J2*>IoUcpaWMkvQ zg||uni`aT`HR!j(?7grKy0RbIrZ-Sd&X)M_5w+m5EKy9d_p0$P)!qy{$3yJaxQSn zt`8Y1MH|6Q;63P5rOU){;+H~!fWdn~L7OiedwN{m9l?{l`j5qF@KQ~RR=#S(u6(CO zIc9pn7gdT+^6U$cSDA$s0k#m+ufKjRZDeE=z093_Y$wk|Zxd?o_a$fpi9ebscp(8{ zNh8i3Q6WG5;lqcE4ev?8ewM&_KMtfq(|@$ri%r|x^V|FP?-u}vWe#_Cb~dB5#Hy&s zBby^ESBJW!#pGz840rXj(@Y?uY3tO>{qMK`qv%jPJ+oIs!}s02<$q=vXZG#ecelu2 zdz0If<4E2lCl5G1ML(W|&2Rtvn>Gs8KJ$|}kvR*mHhTe=K`y(2rUyfusb0O^fP(ye zlMq(UHDuM|)Q3kBbPxM@ditL>Frd%|fOHc#h`A6`(yjbaUal@HwUKuzhv@u6;W9{H zAP*)#pCPE;dnMyKy$(kg-UFS|QJ`^(5M8~TaJBb~c4=rq` zdEGjI>p<(>FLi~$LME>eMFlpC-2YB~W^&UB8IUOni>CSW=FNkhU%7sLeo#MZJcb{_5;}q!FWnq7)qxTq8IJYT4}7B69nT zj5ear;Tqb{n_xc(0{dcA0u0WdzlEY?-p`*LZEbDmx$PiDj1E=kyW1j{HuUr)4O=Ys z4MYECaNKtb0rQ{9^fUFaj`)VDP+|n0N?qt9yIXYXZgDG8hdY36R#sL+clVt`@>80> z|IprY@Y#$|$$}xgdDYJWOU*GCt+;4rjn61&r2-G3scaz<6fY#G$om5&Vbbha%>@)$ zJfKgl@#6@!AkrWYc{Mfbi0rmzr%B}fm*040WOS8Ap^j(=$Lm`DY52!*0@Bx8T!HuY zzA4}}H>hD7#l(glmG}h*-vp>@j&PlR0}_rj8h$nxYU#Uo?;5atl^N#TTeoc^;z?y? zrPH_3co>!2g9mF7G-JM73~UT61{_<4Q#lJbI5?sTL4WU{zXO13Eus3TnlVAExw&!5 zHD=2B;ZgvAcQ9l+3cA>8d6%p-x>f4u9JqMy-@ku@FnB0X%v>@bo*zP_2U=(0;JArH zK211CwI9K=51qUZfrH)~EwMqacnj6RDxkACCt|^Z1%?yD-K)jUuY};mEGs+Ybe)-* znVfd~ZYcyX_83bMCGU;yIxZl$fbFzshA)B#&`Yuu;FYlJ>U6U`s;W!Qe|vHTTv~b7 z0pJ4x3aaB}Zde?G$SF7Q1=N+7`5t>pq)!5VsZnrLScErDdn>Q;i-PFSR1MT*JZvhk z9Q>Jl6n3mtJcjGK0~y^MhE%o~Dk{;!@$KF4aBqyO5pa_%IN>RS9Y>r7K8r&Gy7J>_ zi6nRc&Qsh%9y@nB>*|MA%SjYV{tOK*xU0q33Mp_O+7S(mjIO4nY+om7IUkF$2S$zM z_X-r&H-g5`x?fE(<|hP<)Kf*Q4-+3fx<-UhurQM^(Tun6+_~rPf4$I$yBRtdVYFw? z@J&yR=-1>puZ2Yr9nk5v&kG#Of}p(5vlsOq4=*pK-#-$)p!Vp#yhKM9&&f$9-Mro# zDg}r?J&G{gvctZ-2kwT3PL{XN%FSfqqiWXdy5I?*ln15Nak%S>uF!tK!I(MDhn%xM z##auAfJZYZB|ZIzi9!tC!v12_vG0Lbjh4LMz_<67GPmY4J>-70@8^M6P!t~@AJ~z4 z=W2wq_h=w@7u`<*_c3qTvIW{5(W{8?hV5;!l*B_CZljm)O_^rX$_&LhmnyR0$gR5!*df>D-Zw_l2nOm4gcGVrGd7ijs-VT@4M!Cs{nJwEa>iP z?HdGdVcqYai!NY8w95bmE$*G7`Sk%>HVq=Z5T11;6F&8L^9IS_YuihyEH5h{N)y<82)bW-+SP+vLP%hFW ziKSe;-pOwZ#~yVg7ZEJWLr!*fhC_!+jl^yc=`-cRPtgr0DvOE)>d$LJ*2g)% z8)b6#f7V~D&(Uj1eH0aya}bU-$GGS6G$Jbw)FbI z;__sDc{HQR|N0W+RaHoOSv96m1o1>zW9$a)n7?2_oGcf0JmVtg?%lhuE22wdYyoVM zEC%86jmVjZV3oZ zz9XWE)aANCN=n-VpG(S{2BO~s*m`{!NzhAX7*GT7#}i&>P?dIWuN-q+J3)mgKd?h7L5LiTy<js#y3FL2hzB^89uY#1|FSYgGE%q%#+=^ayWo}+P`O(b2x|Wur zN&30VD3tCzu3fwKj-|YCySR8Ebd7Qpqh3V3MnDHUSqN_MP(xH1#=f^ZbW1|QS2&$m z$gik)1dN+iaONYwUxrRWq`%Z$u=GX2Ud8zo94a5?l7fcs0PiEMAxU>cZZ$zkwI67^ zQ+BaBJyD}jV0d6?Nb6h;bW2o<*H8_hsOS!ix0X?E21FYL)GCkshn}cs2-0gNw(g<9 zV)EsE1;9snyw(BpjH>77=U*E9rLLQ0PoN+`%wA}z<>*Ky4oWo8Ov~1^HD4jv5^13(+mRi} zR!K?8?aCE8!DC6c(clz?l!)S`P=Dz(vbEBI1N2ndxv%@;1|U+>LkGx1=W}1%=e1BF z@=#Dk(7YB9unO?3975SQNZd*y=kfuM0evktoI&mE+Y@zqh&qS@@_6RKlzKbK^LSvF zd`*_a3LsEY?Azd1WN_iaZJhUMz=_p2cvR2M=^b&X740k4Ux|`sRC;*$86js#)fFEc zygt^~>k}M%1p=u?;m^uS&nl>gD*U&fsC?~ZqA=#S@C1v#roA$k2N$8B03%hvKn*Wm z^u{i{MQEwuF^}83e}6OD+g@6Cu%erZW$juffMuew9zA;WS7quZVAE^ZlLMURQ||)p z5Uh!}6V)Gzn|h9u0~GLwFqhS+=fTBemK_m@nR;GQsFC&9w8`q;RcIDh3*~^PKOO=A`4P^B5T!;WUe(SH?KyX>J8b00w?rU(XHi-2-ikB$Op< zb}mBZ^_P;_sEjw*J2)7^dr0YQy2vg>IQ5fS`Sw#E?uxql&}4Kt)8{cz<@AfSw}3=Q73ku{i=*iL5+0%`r(Xzu za^c+W-SbGf1QJj)T8&Zq{$$lO1eNFy($zfUpj~EbAjxc~8o{$?@@i{k>y1&%^FeqP zSmkO6Y1JJT%fz_+C!AwB8{0yZK6je_SOO7X^PuieNY~3*S$*4$f#y&pzZmXQ)M^J$w~Aka^i5H1~BX`+I@UYXON?O@R&F8ifPmk zoA6$D8R-TPi{m*?lBfU%ccy|8&1YaBHAy^nLjch-c6Vv)FId;w{6|3*bNc+ ziP$cMfnKi*I0l7!f*5SfN)}Z_1ldI-@&LINAXAAzJ^gU1Y2)#b9M{dh#jWK2sc;3| zgq<-lF&jigScvS4F879xj++nzBSB{u6vQecw-o|gTXhE(oqCC0-ta;^Rca|Ju$jZm z>m-)L^aKn4^tT=$>IgJzw(dg*4nSak6pq;3;fzeFilOz-2qKvwUE*i z+barI-^xw6RZ%;Mxjl+)0y}+Z z3?MCAkv)!8FT~lhfi80+qx^XJxm)HOIXr2t#N5wDwQx^2Y9(aPr}+u%^} z(*96kCn!};EBFCxNfSo9V+m4&5!xBh*K<0lkc7oGYi?m<8_^F`03-{N+vL>FdsZ6s zCI`>Ulxc6-yLayfLBYjzbaaGkLoB<34L1OR$!yz$e)Kg60iSHV0d}1tda+2TvfY^1 zyh$)g<h>qMNKJZUjkM@gZKv4xL}kT|A4~S*oyv zg#eKB5V2Nsa5N$%kdh0I3teGc!PkJ(gI`T#aJV!d2M@2?+V(4=E#+{k(O#sh^_w>@ z0!V#{LKu)}s==FiSrC>%47DzY0<{v!Ujc;-6uVohN5Bh6wsvPs#~lh`KnCXKK4`hx z0ErcEeut74@oee@BMXs5fMWteOdefCphG+g*{mD`izkgs?u^k;?+WpIi>^u$9$wxz zH8r|1BH+3F7ajx$6YbcfB8gK~RrPL2$SnY-Mg(06pj`;BDX7Gv-Hx@VrzevyBk(A? zN+MFs3=G^Lawx67V~ddF#kx>4*xJmnXAXt1{N*Mf<8)b!-QAv|M5*<|Eb zIX(J@Nw?K0Q4WCAOUTVgBpbGETMG8VA1>#73n@lHUw@`K!h{9tuMx~*cT_MGdK^Sv zjLOlsgk#K@oBimY4b#&T=15bU(1nAR!#(!)5-H#-V>c2tFAug^6;L(imE32{yu}G$NB+va>5hMeo+& zOgO;5F`m32fu=jRr}2kK)IvBmJa_J9OUo$&TM)Y6LVWH+IUSGlTq%4qD-S*J-^QK( zqHV}JZqPwU<(j+~c@xgZfFtXBf6of7PyF!&J`RL#{xk+=uG{eTM=1V*>Afc{a#rnX z*f~b5I8q=7FeZbrurQ)ii-Q_m27PgxuyvxrOGt3Tg$>Qj?qCO-0J{)X*Px|}*d|g1 zIgo@JZTV={xu~Y3%1`qwt*x&=di00}SGPBgBD;`j!x{}uO}7ljVZYE+d=n_n2q#M@ zx4UYYF=RMMmP&iP$#;l z{{VzE;Gvg-Uyk!CU%0Rpz(qKXVc9xQtb)qwd$ya;yg~HW>oCUQ8g+CbLbanlglI+t z1wBcRc(ALE9k3_rI%%A|$r?>X;uMt$iY+)c z_!hwtd(X~=ens{qD}qrFH|Z#Rqlhkp#j2rSR|0jH+m=k6aIrjU550gpDk|!h%rxx2 z4~E%Q_2Pi$)Nc3%vaBL1ID{+{z)z8G&7EddD^U#nHa<``c$Ra0!k0oQf>yFVR|W64 zd_#9tM*7{{xsGTeDWEZey$brl77OF(*jTT4W{x#$;x!w0 z_gyA8-|Z(?Q(H?ajaVNS%^@Da>AD=%kxMUd3g$y0wfNL;jR+_B`=`4s^=HNI<|gN z{`ljSjcMA&+BM$Z-h@4%3f>5r8F=YdTW#AQ8rex`rcwYW(a(8c@FkjDQPm=ykVar7 zuyegDB0ZI<^b}g^8dYdh~LV$8TQo zdxz&7-9h6mJ=J2FX~KHIa8^I){IRW64g?LA`lP#m3{Rc9j)TYSFIi_G4e$m}hJMA$ z?2WM|4@odJa#SlRDM9sLu-{Y~{cY;`9vjhXi$gdnCTNV%i+jg|CPxIfM@WOC*vjjS z2YBD3%kz5s2~>4(D(z^KXWAy|U_l*Hin%gvXOJ!{@N+XNPm)V&|2#CoVR;mSgUe1nJH?0|oq$_$J8#~; z{SH#8A|tnCM`tJLrHrcYKuBKontAy5g12lvaSX0sr@3|cL*GEsO0N*?4+6IYx@jf!{tiL)46&$p{;2z`Jl{(;!6iN4`a zNA2c{-Wx@%tgK)xUS8R=T|`yxH*Va@skXj!DGx@6C!-^atA9)B$Py zrQ=9anUG1{`nxYaoKt64=NG)AqWES1|Hapt$JLyFfBa(~nHgL5C?rXi>{*iC&DNgm zOSY_K-2R97gwRjj(scT#}*P_J2@~M1UK-BTJt#gJL?A%;> z?fUg+c!~|zZPI;weFL~_RX*_?f1EgR;vRA!H?#{!2H0%w**D+I{_EGT{k`?bzt6

W`L(y* zQ~7wEr-&u2pO~->K4z=WT4d9ksl%01fBez!V7(U8I_(J>#ZvkmXiW{(26C9{1@xP2 zQ-lRD&A}ZdrDs+A2mjOjf6$yU$i-%^xh34z%P)Iy*IS-vAzCnVI6&TD)yvH6;^l>} zAm9++-rP9C_Eny$saq$BYY2@SJUj}HcI6Y$jbM=2?&oQ#@SuFL`u5+7KYXY&BGA_B z2kaLYvhc%!?c28pg%4k>wP*N7Zs*?_8Dr6kfv~eJriB|9D zP+5O%+g3$^3pmJcEdO-Nv7X|V)XJGJSOwjW%6{Qm$m{mw&gh-owsq@zXTvH{Cr_^5 zv!@YMhIg}J-l?khGrl0AfT8T2^|fEWela0`^j`MPyvy>lJ;t706inO%QAh-|yGM(H z_&tS+LL_OEw=DLn`-`?(bI*4W=>swQ!SQ5zhwBs(1tbPpwUj_od3Whsc}+IzmYvm5 zn_@vGwx}~ex58~*#{wGT;RmbM$pWwW07f*bX9^F=M_emmP$0zQY=||oq^gKL* zdxrX}`AN*(Z#8OcsnflxXW0tK^z=9vyNCqh-hKciH+Ofa*}Z#r@9R6R2=_|6lf@0y zTGgv3es0Kj!Z_Ea8&88CJ$g*yCXyU)Ae+7(XHYsC5ln;C2CpB4@3r2dEf?_Q@!|a< zCR}hQA^W8O=j9$UH@EeSKl^xk-`n9}=K;$lVX$)i4Kcv$zlo(k_IZmc(*?li>1d5_ z`}coFs78&7BgEKniaHZ4pUq1_AJ)-Q+cI85Ok(l|ZP{ydcT*X+@)%#R;)^vHP+eBa zp=eeuZ;Mtf@(z&+Zun%iZ#_(w*V}LYJ>!mQU_=$w-$ti9Gw%yO_x&KJD8Bv;zv)ElPOoP=^4t z5;s^;NH!@7ABFoF=F;jj`CrhY^HaVNPpn-uJbl^3FF-c|#@Pb(^%)HNvmtQ%>--bp z;o(8skG#=!&wWj-A8dOYQh=&}K?wTY!UDsnH+V^9_~KQw{9ms=fu9KYgtG)Icbs$6 zFSP8$$XQUePoFl47Q|Pj?6PKssM5y5RL|ubvTLSV0t49Bdn_ z)N0kHlH*}_Z!4s&lI!u_l@$Se^78Ux%(igEc7H}mSG#G`-}Lg+9W0aL(9luM@CQ|K z;--Q}|6^UeyuG|CLq-K()o@(_QzQohj;xyw9A&rS?>v4{OQlO{Lz#tkJ7?0Vg`-E0 z7F~{V3_~wj3tgLlpzW7GwGB8v*kG}86Akv@gR8Y$->Y}Z=mH%Dj+GO%LYwMmT_mGd z6c}sUh;cFZMtju*uRuk1a6Ad#xcIsyR?`n#EhaW!hp4N5`*D9k1P5>&MN$dDH|QHx zZf$#x+!Lahw!WXUyHM_z@8$6X&Ep~$yA*P=t$NL9<#omIW0~J>tldF)=YoQw<{)G}G4hn$dT}qrS=YmReY7`So2JQ5bdj zkK@Hx!5{vndSPa3eSLwFe6@y6^ZguVGX{Lo!Y7Bvn<$XdK#v^-QzH}HBUesHeL=!1 z=#shpvuuc|;3;0V5AYLDNGZ>(DL?!2TkCN%6^MIX;RzuYF-e%jwv zkSU2#Hcd#_S+#1_04+WG&cu7)Ru`ZRdw6PY6UDydA3n3_+}dwuJl+NzaW}fg@-7 zk8%WH8kxGW@vq8)m-m_rp44eikD3MmPJ~^`wYsZQ`nh;p+1cmcHFdGG`BGjk=mH-d zv}(@ezTdz&MIWLZx4)Y(|L`Y9!nmimnl@57B|5H&}9iZb-l8gD%$hr#CR%$_0 z*_J%Aeow6yBq5iPdbk>>;X}GQh6Y;hs~q+!X8{i4(r)tADNg4&~k!c?6m;91ry7uTP=b;r(UMZ+^w zXDIL9y)z#;urfM3ti`VUQrmGwNNWb_7dt1BkS;)(7QSr;L;+PrzXAjGg9rBy4Xr^; z9KLN-piykXjF44#9X_2nI>j0nGQfmJo$~Y;dxzW}icb%h76h*C^mFEuh$gtVATk?> z2?Hb82=f#&)I<|+k}u@Wz;F&7KKvTSKWc79H_Dc45_i=Ozcx&B7N%`^R(zPyIF`TP zLQ~}D%>mnUWrO>G!wY<^5M+%@MZrsXQ0;~c?x$){l!7ImVB-5h8hn`-ZU#%}ps-M(wKf-UM% z@uAyr#Ym#SNTgx0!Tg{!rg8s zwV*Da-%X}GCTc1<$ia<4(T9kYgojU_Jk%NM+IUoH$BrFE{`~aSD=ofIwC%?hNxR?| ztj2oxeUpb@O6f2o_}O;vtQiZq0ff8NfJziM?#Jb;S6e#dMuN9gPz3q=+I;5^+n;>* z+O^88$-pU7S|Hl+(CzC1Dbmu|IBa0YY=TTe>#AQ zF2dV(C)!2|*dc-oAO-}t8$}k7w!Ws)-}M0@O9gYEaba5#uUbvN^yAB?1{wB+GuI-(GfV8Non~qtyAuK?`686dISMIGYTFJ!4 zB^Yp+X@^%2_r#|<*Du?S^uyEm3qPZyN6r=MiinIV`f2;ew)aYi%*tf$jQE@0PrsdSdZyq(G zjo!wiM_WrI$@4q~mbwnox!w$!U&9^?^aP0q9Cz0LJwV+*IB+}b`;>>OsBn?B@T*BP zEz_;3BoF+>5L0wJvBI@j7HKv5G}eC@O@Y@SC&u_EBBxh`|>IX8jV( z_02g~q!k1GlYRx^zQR#uo%>_5xS`fPd-fEyG@ejVL>628X1{g4Uevn|Dmbq;!(Ie^ zs5}WrtsU9OHzddZP1&YX&+MvsF{4? zb%ANmo+0h3@BMKxhwcCq6;z!1<0)MF7^4i$_FJU|guflO5FDZ(b}*4DQ2ImYkicBU z`3V}P7y2Pi{2OTc?z|0`H+zk_+kNx-s2smpLz>gS?FBsq*-!9~)pT1u>mczNv+Pg-&;K6Mc*v;kV616_#A+0jr z`h$oC5!`L+zul@^-ar@Yk#}!jy#gQBHsvibOTekK6OXE&@ij?>@b2A$!*Q10J>Tl4ZymeS zb%Fy!W`d>+Ut`v7IA2=y?-J=Y&T?2(nzv5nslm%#Wkr!~-H$EM$*SPL5eLB#mb zPYILK8y@Ot8ku1J;KFd>g9Z*Byz$=M{L=SqA%r(S>RJZYNqSIxxFit{fzc5gp=#ah_gl+JK4#w8%JHym z(vVpX(A_6GjTQ9f$dML=+-T15wSU9zMeK;WMad6ZgzBR5f-h{dILv70Slh!rV#e(8 zzURHC*4_ee-z!UGhQ;f)JLB&mR!Qu^m$!A^M^ySSBLfn)nPkO9(1*E$=1#L;^V@H~ z**U-FO*h*=Uc)G&q|gIx%hC<>_5VtDpBDW=jTARIW|%vlHd!!qgab&{=o>ikiP0IJ z?bzwlL$F}c4G%A(7_E)-#0x9zg0o#eb=FAqE{u7Wc)jdOdU|7=j&?e@oONDl7d-IO zvsR<6-PvOIzEh{wUE~&)k7A=G@jlh5VTCTf{;w8y?bOLS&nfPUVcEHZpF6h;y6L5z zaHvNoa+OwVfY%1gnv8vEBG)~y>bQMxqBl9`s?vjN=}Umi?##K z;x9LR;$T+={L$R!4V0s!6k5Mz+_-%&OSJWg&om`qAVzKMOa_lH-Y(~^&QJ(l{SXT) zPdriew`+r2yS1I*P-E}7;McQlJYT1Vbu_K}`yY>rv-SlpYvyZd*{a>N?Fq{xt)4eq z#`?9`mP^?WSgh0i z___>sYbvVuI(7cUKIMkA7+{;uri>_TBp2yh+~W&z#pAwx)4lZeyy=0t8%KBD^ZZ^$ z@LbpF4{O~yt+S=Y>i!4%`;H1&WtsC)W%bfuZLQy}_SG5byE-nsVY+^f&()1l*()#g z{c`^J?Kw-9&0TnILi_P6E=*W?!7rgKWA@5Rr;E!Z_S-N!25@PvXxkXQK(@D7QLMD}1sD*Ut)$8228JbqZnpQ=MYH7LY zA*#ol5zElE4rKMmz0&w zXq5b!V$O!Q6x84KoU@=~Z2*I}mJ>H;xfo!X-}Sm{en7W-q?zlXOTEavZrNpE#ky79 zUq&a?#KiX=sYuZbZcTd0eK*85#?3}AN`q@_l{Ig4yy-9F$!(0wztQB+^i&CDc`s%i z-ddqjRi{18%V~^uCcCS&BwDPyVflf1_3P9LpyViRoq3-4!xZ&CtZeicSx{ zw8Z-(@-d|j5plX}#}PNA!VSA#XfbHAg@uLnf(6H*7^O{*HEGJ(?5+ehoi5f)*k^hO zD&_eydi;Le_V#bBdPXl9L3DTNo4xfuC2Y`VLaODon_ecL8qh^LG+!FDfYRQX*|NLV zhoY&JEL~sX>gpQtb#9n_sEOgdyLU&9>QiLvS86*O4qoGjXF0QGWkqN&8DF#Atz6sG@c0L+TH$?GzvX6mlU>f26^XLkG@;D z?ziloUdf^>K;E|Qk3CbI?Z{KQ(tIHWd^4fICo>N5L^6@=VkWLRWYwi(yCpxdtsKhW z#j>w(Wj?uCOHq_9)ve#keuGxMF6h*lZ)DG9O`_1fgRbrYx_K?Oe*fA)7Y=80R0}DtKfbEK+hKxaMqA6#&A$jqpyylT zZ05w}dT018rSfpB!Mi(md7=}qyU$H8JoV!7mt~zhb@HO?Q*J&w!Ry(p0lMBk>~5)J zRd_J}L_)R7l^d2(@sq@^+H!HEarlPx(_6M|p=x3N>xl$$7%yuWiBm5Q^jvHqBR7g) zXPN@C-LNkDl;SW^n!}y$vzFzHnQ$=YdFxkuXM9Q63{XlqHvQLU7fs%kza4e6JL^(T zd$exTr>~`s^F+J$QKwI*$_#>583of9B3+8S2;SfOw_0;q_FayogqgnCdT&4l9Hn_t z(S_*I#;!}3cKH zo;=ZUIA)UVqP>yS-zlvx4EL_`CRx4Ig^WdY%D8?fD-Ou~(%kn{Wdumm;1~Vf2^L4} z>tSUdY6mWL4=*=Ae*eTliE+5|X!Jwl&o8qAOq54_JK8ws*|PwKF5OwShQ4S8b7utf zUGm!JD-&27mT_g4?Q(o+brwD*6lGB9n{#g$A<>|>)le)zyJOyG+|>q9{uQ^@zO=?u-;OTcGIunl(-+nOgG*T1k`MU(_Waed;9iLyW$pR&(DXm zs=w@y7MdGkKKbVD+g_LfG!HvNQ?6FvPqqE_w!YAZB(2@VOUu>c=g)s@JaOUDr3sx9 zKY(5q!707y@pT^Jk5c-Rw^e=k_|b+cs)e_Uv5haWYNyLrj2Yt#6*cAc$`vb~_h|O^ z?anV>FHF)u8dt+hTYL4FwXhPWZ+m6{z4dPwaYiC?Cg1)QrHd(@;kSG+X`;pO;kM4s zA)-%``v#}viH{B>2)%gkgOoXvGj;w1PkDqyU^Z25s$iC=p)mEG=~(6G2i*RFPCTgeD%+O-XVEryZH9AU6Os2tf(5ElvwG|UA~Ufm-~Y9V z$&`T+rKU&<+YK7T=eK_sx6`3}&3n57sW2XQC*&K$(i&|rArMNU5lkWMi&3|3_S^tj zL(;==Di*REoM2%?F(OsI>Div?+n_shL+t2i)_2>1x5PR#xk_U$t+@bsb(H=Oq-iRC zHU%0n1Yy?==jad!Ip1z`$N2e`)xdEhm$g1>uyijVHX9M_u;LAMXV)V?1YpK_YM#8b*bm3sYgR={s{)x`>5ZXw7rOl|&N^{eW z52Ag0N3HQ4IH1zwHZ>tH#xeG&VX5c5)obp;5~t)(ubqa*f(BskH3{T6rvQKeF8&bVRxN`JqVSFZD!VMX%Bb zX0`Q4D=~dYrQt80Jn=_{(5T8gR_4{is6))9T04t7X)$u7Jtm!GDU#eUAHBo_rhtB% zR#M)%JFe~!fRaj$vLuE2mEWNMXfbpT2QF8E3NU#z@Op1fpkZEVmOsOue420t&gVY7 z)7DL6J`}IYDDFx*LR~oTyv;;eXY?4iHO*ol3C_5!0C8M?*0-;x<*=|F?IxUm*s$)$ zmxosM^7Qye3$T59uY;M4ggOkCklL_P@quq&h5#pu*{;{1!BMW8SW@vET3SiWZfZ#g zLr+t48PV4?PMd0oi8N+NL}Isy(nwS*ZQHdQfpYQaMIxG#&!1(x)q#M!d0d%>Q zv#k9x%6p9+n_=v|ckNQ-QGB;v!=A0-u5NjgE zJOu0^w!yA1s=e!8vb_BBIUp-e>S!t=Hp3gLvtz(c#Sh8i%~>0~A5)F^`R&6OcOt_} zrbUnG3WPs)SNXX;WayxR^=j4f=hDqMXEB5Z&D*?3x~|Y+;~Xq3+-13_3V3vC6tFrQ zf3xBFS=X~WW-r~3@V!ZF)3m7Q=oEC&DS(3+ekX`*E)*sW8=Tm02`w*cGoNJuDbK~Z zk0H`#loYSc4?lg9gPn0|&sg+L$rKnLFCSx7*jy#oS%z!pJ`~88faFC;Imp^9PAmTP ze1R!FNgMZA;4kw&SB1ZOhhNC;(Q?fa<|ZF_aV7QqOHKtXH$rI8YSg>;`Fo{psx!zf z5W>H$q2W*}U5_};Ob*&1`fJjdoX>r#`xIb=4uUO5cYigY}Tl0>(6-@jvKY<^tfIb9QbEi%y|)6E=aAkPf`vJs8){bj;d-) zfCntSEsOJx9j}J@2_QVg>f7;C_Usp) z#||}X*O~XsrP&SX+Z*R;3bl~oqoev0YFj%w1+Mv4zEmOrfLU&kn!!QMqp1f2+alz; zVUzJ&<1Y6kV<5zpKpSK0;8AJauq_bYVmV3sJgA~e286{4U4A`!1#NM~9}XHk_~xBE zBcKqSBecu{^DiBfaRILq4{hS&)oau!7$-IU;wneG5L@|{vg}|8p)jNyz6{m+_TpJp z2+ZhtnPa#JmYAtEWd+^GpIOk4s`Ji%oiQ>2T+mR0tEHKv`dwUuoEN0Lp9moZ!Nu-3 z?X$vG@*mP)(8}DWPY;TILr?F?sZ08MV=<$-5E4Fp3W7Vkw~*3GSPRt=fF6~qah$Q?^% zBeq`I#)Sz%`9a^>qvUmmbTZ`1A&@wNg{J$e1xeFA7Ss-*uwW}%zy4}VHtHJv%3)SY z)YKt;Xa_B)3uLn^bFs%YST)odAU_33g}5Z#g#;8xr2H`<@hK^G{HeXUxf@mBWB=M- zF}B|X`&$*>EJnR@i9(-f3UAvv9v?{M(>+43tuOAzHCs8HuZ*Pn~)QWYSrJ zLsD*8MegD&>LzdAxM9hC%+1X`j4_yvaz4c7=-u%}HVk~C&BQgIhJV><(ZV@%<`jZ*%zk=hL+SIq^9!l&ZUZVK2hbtX zkKs3A=zcP>0bI5e&Q-gK7sug@ACL!x5P}=FX`_;Cjkf@Z(B;EkdTeOKZ#V9_C`j-G zj`~Yv$&E))0j}1k zw1+-4v#4${9cl5q_R`qthiiJxBX;Dd@5Ga|Kq&+g@+o5aeQ-1Lyek&Eu{AnJS5n?o zjnoX2blcm?z~B{Og;!{3+nt$oZnoa$&EhOw=WS*=M?MD%bcg=(l`C!DNxt^uW!}b? zB9;+89shp)eeQ=><5SbCUA{0GPUHF#0?^wvFqr@QWnO(psJ9ubsnx5RMGSF2trPcZ zMmWR{2U$afU-A2o-#WAVy_u=ve=OJea80zLQ_DzWHq@6bxqKm)pC1$*~tn%VR7ZUGCkKc+M}u(Fb}pfz3MFyTAAPA51l zv@3L3xURd|HBG1XFK1Vk9M`Y9xqbt%JGDMaP%Cec7hVS^IjpTv?WVDhmOu zi@HFaaNV7XV&T4bVuyKaGy@eBy}o{{KRGfK$l=4MPu)Cd8Z?$mVQ_?Vb0Y*8>pFVr zb0gSN!!~a|eM}iwPcuZxyx+Oyt*X15xmR=NlrA5XsQFC)j<7wUXbRFVtUOHyA{PyH z(?~`e2c;#l9=~>P5cHwMNYU#)uYH}sm*VElql#&JVWH+g=H%v1b8S4-8r0PG$nZug z>(QfqXtg+EWM#$hjl5l6X?{%X6cba;;AQ_Va=FAnVTn&1;;o#(Cu&SByIw};u}34J z-e{+5s0tY!6`*NzK5>}lDcxE$(AAAZu`pCk`-&hoYtO!4)Hyjhf#ix7YI4}vIh-~f z8u-kD^Hl3^iEV+>a%Wx5y|Xw$*yAHhr?-WozD9L*1`isP2LfWyx}u^zgWfG$aNDg_ zABu~2U%0S3BJ%L}Z@Ht4Lb`2s`m0v29x-y=ULe%WhZCZmy*H~kRO3Ot-miEJps@2k z-$V1VESz_~uOEpr3+O9*zK_!Ph8fGtS9u3$2EGouo0cX`|JC~Z#N9_7Z=k5NBv4WZ zO1^;9QS(F&63a$q(q>O~{`+f^$KEVmiMch~dKYa&p)`q8qh2FmIo*5ws2>ZJdST^K z32GRgd66)9(h{9V*D^E5PaVj(hqdXTmhqeU)`{qYB^_2JNua_$&2FEz#iv-RY?R=%F9Q2b$$g zVG!G-Iay!M?ikx`&zDvjQ*cJb`JnyBV(Q{mNx1lwBO{vZw5g!DZy}w11=nFuZw;(} z7)XuCA*XBR?J7@BNpuVeW!R_Bcy&%eLAW?SDxnZ9cQ$RRD292&HMR(sB}(BY32MQ! z%~)crn43uBn|R99)yWi?6=a@sK+U>*)|8%YzfUGR4taN<+c57nD9BzPHvJLpArl+w zcV6>lnbvn-89RXpje`qn3g(4yD-rTzxD1uC*g{J!@j>c9qyra9p6}J1fCkiII9|`B z$gEXl6Hw+20)%=nL43d8Tj{i z8*}YgRFu`ENs?{^Q1_Kj5_s?lpRDb}d1~A_W`>i~OwNSGuwk7|>nWFq3la1(?1h#cwZjp^Sh6)>vC9VfXFlB`Odo96V`$2U>| z$xlnSrr#;A!-L2W2!x%&KD@^&%!V_SmDi5u1_gtQ5jc60P=dxvlcYUOH3yd8bDJ`a z4|7fo*ua%_nm<1hIZ_C}BS%v^N+N9rDeTh^rZI+GGP-xToW3)ngQwPS04Efe%%q^! z=yGGt#c_+LCR0f9soyXKn=taTr?gPat;)#pi*k03GHTg!fVvA!%YPg@KK{mFLf=9< zT3QHxRK#H7JW78xPkxx-i5S)D`d2d14>s?btBgDYwxf=l2~1lG4s=u?2t8koWzeoG z|3b)Rq&n-G>kfK-h%dcopO#{&O<8C%*~$JsEA7?E*u(6rMzUh^S`Z3(6N-D)3@#Ln z3s6@#w%FfrpV{g$lyr9WD;~0~@nL^||5c~Q0*Js6d4(4g9g$FNH4IsmL}Pg)e;+AJ zGOoWxU#8K$dVJv=EqUg+8Y$*-XlyJkJ!yNuvoS@ZknL&mwvuMo7IfI+;qd^L=#C6E zbB6`(wVEOv2|o2_5rkZAQ@}YY8fddC7 zta=adN3g{yI(Y2Zu|H>OW_()yN|n=WvW^f2JL=Ek4mwV!`KDKUjQI+x`lAt>sH_R8 z>p@bIBlv!wlkE0vGM#Eyy!*SFV0v7ugx9pP^7r8!ghbQjr6tBCO7N{@i}jYVFa9oAg0eEKrQ87hQOl&FGFKn(|tCj+c)$Ji^(M`%81r zaUkT~di|~y&&)ZBk?n(NnywCn)rp>;H35j-grLh`PqbLLI)NiwsYZ?*DXbSQb*55Q zDlhl#Or*4x{%ts6{qedjPmm~;qvp|k&)5+}-zNc)k#-DK3#C=`I!*F?`CIMvlJo;; zS&>QW2a|=dy9bh~`l(nDXF$Qj3B4v@$jQu{)}3l#u9Eb3!pefKhsF?`lRhw|sP;D9 zz7r^A+^rvT_N+6p?DZxNhO5P7P<)j~_NCURm4F=35AWa0_jxi;xQJ1`->;VfIF%8z zh6Pf_N&kC{NOx4gRjGg?PP1o+0{h+o@TTdk;@>d+*yMHe;|`}E!B9=MFX+F{=gMefl7}(v{31ydSPsMx!lY|E19J)`h zA3EW}S)|AU)#RMrM%Fr!CIN&K-g58IdYIgk#6npx3LVh{z!#}ylz7o?iZN;*R5xdr zy${frGf>Lp>*S-ObmfYCkZx9lGeYWfty_(c^#05y*)ove%FE;L&@QwwHXaUybp**H zMAMQb4`5c3FRfjR+pA}PT0YYKGE_q$arsOrx$99x4Ksn@xw*SG$6IiKNJ>rF=<46O!L}sus*>{yLfa3^aUN*y=l`BzjPwYR2J0fbw)mAq3cnUx2(oO5Qc5s98X(X7mg4&q4%w2 z!6fJT@Mdjpzj%M8U8!dvYd$}%c4vPt2Vkde z9>pALm=JMSoAV}ZgccG4jC8w;{YqmHElxooCi51-bk2Ik?GhT-``E;=Cr>7k;x~-j z^jni7HO)SogN!VncY>n}GU4R$g0i?qZU5+9`ts26(Z(FrIes)Q`2O+_6sj376)C`} z@cTz7Qp;y+c2RS&IZsj2RQv!2DwIEOO9-Ks!%Q499`~h289I5%#Lt6t)sS0VIk%%A z%Dq#Zz4d7rG!vw3xt~cbKFQ4#v4xN*gg7D>NROHTl)_b%(HRnr@utJU&_#xA9pr( ziWP|J=zf}sua+TFmqnXdUn=|OFJU<6q(6Zq8J{*As%qh`Y_7?x^ypCeco8Y#my`2|9-4oj zct$t1-lnqmBn%-C+t)9GmlJy)hWBYu^2E|>oY2GQqqGTE3JVLzUVQWFl~59)+zJVC zxKn|oEWxBE27y)F)bQPo$PX=s4z)yY%Y=Y%D&J-}9?Y)_e1^u{k(|>3pmX@pm7%m= zqUSF&f5vc$gvX<-z%RQi!I2Hu+$m~w4yX-5MhFq~pzlL*-TtH>fiVpaFh=pIsSZ-6 zLJV|AXv^WpXghRgpQ&K8)=RFge0%P#S=V`wMiZNext1p-ySRyIUw=}u>;xm7@_AJe z#m$0r0L~vqHSpLqV%<88lSJ+@0+yk3^YB3RxWXu47ix8kC(b>OSmzb<>My3hr2Z27 z|F*Og3;7blD&Aoux;qJ(N}Q$FV#tIGrQ;_ilz*;KoZaEk_WU!tYoE$KxHQ%LJNppP zwtaC+I2rCG0Oj6hYrnXp(B*Gp!PCdEQf*e#z_*mcyodz%M7DCd&ru#z(h?cTTqmk@ z9!m^Ccp~f>$!OT%!movxGxm3yobA3pNL@L0k+vCM~ zeY60)empptEILG+yHl%Igy*whC`zdwfWmcnSzbXoj!#SWY(Ah1o$*(V`g5FeBxCP> zd293cMTtXngE;}m(V_R!vgkp?a^d~;CM&YHbw3FNOR2gMJ{A#liBVxi#VL{#>P+3H z=n($}wW|mMu2iOSpqp;}0x83PQ}i>+0T* z4AuP$Md`d2v_?V}CfN{;zY8A4*r>JZ)uU~UzC;0viXdXY->MI)saaqW2n=lz3kX;Q zzIiJ?UWIPY3aOj?33n?LjTMoAJ@HsB9Vx&|;VC3`lyHlZ-46Uo_Bc{z`QxObzpoSj zMi8P%{$$vti>`6 zdh1r(EYo?nSFJkDclfaXAcCxt2%UCNt`{Tz|Es&Zj8-Elt4>$=G9LL$3XlW|{ zr=nU6MeR#q>QO*SIvdOF0=kw*8|8I1wFfM^Z1A zji^3f1c=D#7(+N|Nf&-bK$G8p|D7~or#?B1I4cT*jlI6^+rOWc4v@=9tU7vfAUl95 zw8Kyn%$q^|{LMvWYc{B>C>DpB90<2(>G_D=r%#_IL!P`OOec-@^3#2r{u5z*piFfD zy;(ZCn^Wv22a(=FM+q)~i?&s)1ssUDtxmL&#$@{!MvVz4(;pY5yQngdcp#}PBHGU3 z-`Js-geki(UAi<24EM;Mk>5d5Vhd~HjA5dM;r+FnHVwzd^PuQfjqJmM2xA)#(j%%1 z0d=a)pylr`?IDB@iMYbydFMzvN|9vczIx?s@l8?gTNQFr<@MyT3)lV7FZu+wT$|%# z!cX_fx_fsdY=@C@N1puYQvzwAddyb&w~SYui?>TE)%g9B*yGItLIS;DVz5VxIf= zmsSQQmuUE8|F?L1vr;fj1b?~xY42#+!t$S0P)t>XnZVZDT6od}gu7)NH~Z$UcA?_Z zrbH24y^M-$W<#FnZqQNQ_=#V4WPc)MA&v;7C;M%WN{V@~j1TKW47%aLMVdHBu1=MZ z_848otKJ0a^Ke?cbfcaQ=KDK_21Jxyu<<6|ZIg z?uq#tn)K%5`1!;x_Lb8vt-a*BqAZ0yq;(ROoWKGc-MwR1`BeWd0#PAOJ&^lQa$aJz zc!b)4wxA2E#DRc>5xZ!e%!KRC6OtS&r*(2Xi_q_%Q>$oiq!5r;@{VCex}qdY!@l#^ zVGjwScaweafjCdh!{HyU!>}F4yO8X;EtXB4gJ20{H3a>Un9F3ea0Ut9ZTa+AKt5XFf=I1x|(ZowM zso`zVULHhh@sfbibb<|%6Ts6ZgY?iVg%I=nIQCux;k9U{)uk8LHr5({Bq*hT9DK2R z;8!R5L=uol3`f6le-<;-ZXy^kw!#ZH(5hOsO<%56RUY*+ER}8J<@St@2M<_ruqVP9 z(IjVg{lDSk=gX3^$ZK9UR0L9qLwrGDsHK)F<0FMi+SFdTxhdS#n?#0m+rCG(`r(n( z<`x0|2ayGpY^$l9IsNk%#X15Kk(h{u;)c$BeBKdkB>ufGZaA0_j?_$apv1IxoU|XC z&NIX&knssb#m;WnvqkFzj@+F)oWS=q)J2Gi-?bh1V~)JyKa<4dCH3r9xO}35 z!*ubOv2p@F4)thv;naW_LM#tnX__&Q$p$RvlDLlqKO({s&xbT-`5L00zy7`4h9#u_ zJZ$?ziaz%n68+Zu#$fXj8e+@_oBTye{;$b%`*XLKS0L|X#DNDWIbRSG&sJ&u@{1*| z4c@f%%9Ulki#|RWJAW2+5vG3G;p2 zoyj~>AC3Mk=Xt$;G$(bJfx_JDGfaoxt&Qvm+G-D@uHD8;L!-Xsg*)W{&=8x%=CP{> z#Tf#I(0hu09K&k%DYU#;%0Zst5vwB%0{nolXI|5&pD*-nkjY zrsP#iMG71^8oM>H zy{K^sEN2M9HLE_!H1VIrh3NAVsWq5q8b=VyX^|Y1)P< zp0ltZ5!|Y@*FyR@30Tz^{wEvjXI*^#p3Zk^$cMC)QertW8HS#K9S0V0#Se7GB zqziTbHEK%E2(k|riaRY5C>p)!M7k_o*xB72SRR};K5VHhl4V7q|68_PcL`Xh3O_$U z_*H`WXj-5(?YYP>adCv4%V7t5C~Zxp)xv&N1d z`DwdrM(N4fe{~o zrWU&2Va%BHb}xiQ2bKmEx2CZc0<5NW)qK$F|9sFY#3e#4eqTg78%OSY9B}IN>D`PV zwW>TNYP1N4{tfe<%ws#rmm6+qM!KBl=YVpE%7}h+SrFF`#AA6)RY5WQ^mB`RS|YB& zwzDUE#$q}-vPHC{=L>vauVe^Q(wN|Xqq8&nK|!3c0Rlykk&#E_%}Jn7qviM0nsRFH zSkQ277i+jm?H?o@yBs?JDpuSf7aCJzU@+J|wCGeS!FX1_R_pB;`e;yXx{aqkeu(?TO%+ zgdyj|5D+Yiw421|)_dRoi%R{t8G5Z*f%{BtugQ0L3Fw`SY7?25)y(qDhO1uVk7lkW z519V*n$S|wE3;!VGMOOKV-af+ZcEvaW>by+v#E}ZVjyN^9f%A@5K5R%$Yz!Vx-u*X z|G|@4kOlp~GZw5Pg%N$*)GQKFAxe=%wFC6(9qo-)ge0L28h>u*^>1izo#%X`ajl4* zz{^J8t64>P(};{o^omk*1OHPF*_GzUau&;>6$6N|H9SMCHt9dlmebd^Bx-EJz#TALQU*s*qir=s?lJiTWVoywXb{P*o@zzTjv*&kmo33) zHGDzPpGRDeIK~S%=3VfK2ykJ_QJfa-P=e89ow00ISMeImzTAi943dbEvNAUA?x)@~ zen5@c2JOT4-XDhUdf|%M(EGW+US|JK^x(5i6E|ta(-1|(?B&ImzNIke#0h0D?ZFQD zXAkaoBNIWR=-9}5&^J+GRft@n{=h|wVYEmAa30_kucRL|vBG>)T22ijG+_Defy_1s z(vWRlFzVpHDUW3-if@f6sxTBgfiv8l-IZ;1Mx%o~WZRTlHEVAF5l$6vgpdg(Jaq>L ztyHtOKdveBePa-Wm~-cDktUFKd4s+>r$&Tp#6>7{Ir)hLYW5L@#xS)&*eq(jPXj8t z)se*Z9}7xw2n4P7UkfS?^vUu&JPX8H-~YLWkM@$b%WjdRNvcW%@VMFY!m7G{bk{?4 z{o^=VoEE&Dx+--J<<#&EDYS{_#CCG;MK4cLhKxI1BBgK8dFnu-U5ReQA$om5&_uW0 zS&$A=0YzZ$n&IsYL?eL>Z-kWp-J|M?+wz}}WPaA{#8YYxwrVoG0xAJaFKnAPe|`s= zT^&-ck>oL~n813FkA}Fg*hIr4(X$d^ctU~CseK3Y~R~3 z#Zs-)Ysqk?bU1J|h&GNUCnt}+D~SfRzvAbhFvgK)8R&y#r1}55vu+tNbh?E-4#AR0 zPKZ_$E$NN-zTgufBwgfXTnu1ADm5ER^_&Ro za=IHS+G8m=Bde#5P6Av3@A^c7o=Now^~Ch&>R=Z(I!b(!+qnNc)Oi{f2jTcJa1_%x zhCj&goBxw%7^ElBXR&`Pxu}NQU_J1^vwpSw3egXn^=;MZ4C-PqszcR6(gAI@%bHu! z+(pK{vXE_vPM{4?P~7^|8*CMn%8Vu6`$v@pJSkyi4%&rszzEBpe(LO0T z{NTZj+x|I+j-&9P7~bd4r)&Q)8l%f(T68>vyn1|NaRjUb1<(+qu<`D}II z(#Ok(aE5O!-AilD^C;>&s8*2F6j4+0;@k+u(3xnstNll&G+QqjV1CQ1%^`MIR!f8m zv@a9iWM1K?p>VsxIISaK;N+RLziq!zw$&qK+&8nYdkZbq=AN~j%~#B*5=erbF=GGm+usqMOZ%$#sD#6n5y!IJ z1d#Up(Cy!=vjXPLpq))b2>^dXN|3~etaelo4!^UUCR27nmi;iAq;5OqEQ6#wSzS{+ z`+$B+VnyKQjCh?ItNzE!-RrjtflQddS9aj-&$J=0*#5ivyYPum>Ub3`=SgZKL^?%Z zvs5mw+?iJmagm(7(QW^P-5LUvZL00^iVcXQ%6oK1hf80MQ>fDhHDcXEg7SIpmXd2K za>Dgz6~0U?Wdv8}J}QK(x|%^Sl7nvQMhk@>FV}UH=rPfEqQIfiEp9-jdZqWxrf*s>TJ;C|o|250 zmA#mh{H-%mi`D#Sw>a{2sj_zl*v08|Y}H=gYN*4G zA2|DCS&mI zHZVWO`)E*??Yl(yLxQUgRA9gUau~>1;PoFS2jq5(2i5w`4=dY@oZG`@Dk z-Qro);_ySrV)l5{Q<7X?+-*Ad;jw=5%)rVt7WADXbHaiVAibjLEd`Kh;*RQgh>dBX z{||)9nZv-m0})ha4%g-ET8nU-C;^D09KknlbOk+;ljYIYCsSzhg#;b$dBIM}8&gJ#p#55@GfAC#m?tXD~cy%FUt}~Z#k6&Fyp8*@AiPa&YH?S>7bK2j(eW33pK!J6txN@x~SDbHUU z4trMR-7>-E#1d(12T~^c68U2Mr;or8m@qLBeZGD9C{F9CtZ}n}|5@WhM01Wi&EGgw zK}oJ!ycPCMl%3(f5N3`Cv%DD>H-u8=I}41ZasjuNLRr?Y2?F*%Nlb+imMT}Zh<9H` zeX$ng~9lm{M*%ulSH012J%$QisZAct?g+k&jXR;-7)-^Ae zgKP<0BFC89r$=60H=r^WNayFxGk4#-nT}9pg@iy%s2DGx0%@}6tA<3n7_`~6qj0P< zK(uer@JB{l>7R~S{_zOlvNn(n4Ih1){;uS-slR0q$`WxF5+oBhoFG1p8@z1U8TbVO zcL<54E2hj*5=ncZy|yBmbc>KT5oAt z?OhO}_+xLTa)&|y;)2ljbX!dnTJ&gPf<~dAb@76GgAVhoPqLD=`~BRKp^piGqh};E zn?1-=I610q#-UYq@9VH2Hy7lFgG;9!yO3W$x z69*(Fv!XB_%s3o?r50ErS<>;IJ$;+jQF0oQCCc5Dia1hjrS^Z`SdOnNtX!67BQ!J?~7HdR>{Jmxv_`baR4?s(Y3y^Oh-@&@|`g;u7^ddMlbEX zFX&iQL?zQJqJ-9IzbnlcLv;jWKZ`hw`*>=3#K6sUoEad;Sw9MJY0qYMZDXMJHBFowM8nj4kvTG;V*?TQ z@kJA^b}S?<%L7P>j=6Q7Z4`c;)zK(>2;z4|Fg(qXnsUJ!L@|Y3!<+tD6eIQMxCsZS zb}*sgTk9ydy*)ks;nRmhB-(Q5iq62|GPmjo?4-{*v`*fNe98vFcu`E9 zRu(U%zlTQJzaBGOv&DCzI<`)6x=oFJ0ONf+4@tefyTbvQqbq_gqM<#0Z-EZPXT!8i z>GK>j(0sr~SqLMmfPqI+`Q^Kuj3NoE5GI_ZwSCbt(fRO|gW-w|23J#_OCe6xUmN-p zcaGhT0T?CHE>X^CBF0L}&?KmvuCN3&=D1<78ByO|q*LAO23AnqE*x3T< za$9r`X{N<4<2~WC>8%>feK2CJIJQzBv^SD(a_i2WcH#ulF3)$afnq83KTIB z%&DAPkd}juk%N!vnBRxUwWTz-lt_X{yU`3Cfi1^SG;ylp_S$PGtrjInoyVNP7?|oP<&M;_cIvtbtk+_9`*O=po5=O~r*O`; zQ{MjebOZ~NfR?yD@z2Wp*p)Xu8e?rL@Nugj;1_g zPwCUcp%q<^O1){*CNYA<@q2WTiXQq(QO#@=;N;*t^`+ui?&ppcgNA#+W##n$j|nJj z(rZbW8O;oA%jgK++v~>rN#4ItoS3EKZZF`QZ}<5T9MoZIRy-I!CKOw3TECK_xAHal zme40efch%1u=}XE?9MKlwXi*&2| zyhon;&^%nH|32Jo{2n=`G`%2%3Pt_UZDR5E7gdRXgkW4m!+AES>I71I z5#;dMijy}|Q&Di2;-6b(o- zFu;X6kdrYJKOpCUwPss?MwUua2M|;swdtF793gfIg7xf)t>E28g;7Z>YU=nMNSx9! z8d6?M=Z0}=!RsYMpyHwn?>O|5L?AqeEU|tbf_BmA<>uwhaz!j3wuF8^gbKsCRXg=X z=!;Fh>DB$az|+ z(of9TF0CD0bO$QSg=&$gV1&#OmmpS!(dZo@dI-+gozl1UNF1b#(g+VS!W@JDY{`%$ zS~VQSj}23kO&*aW$;TKBsiNE%32~Q%W3mezT3cmJc0pH*0Xq*>bSrE`4UF{gQGGjC z>kQ-YzW+GIoJnv3VRo-Xu89Tp)u~4?(ayN)>8?M-D;NSF+yaMOwQ=$d)c>rwEzJJy z-)ktI0|bWSzip`j1&tYlW}GuOdC6$S^9bl|2nmup5FklP_F$2Z#9LKRTyKKqCR4-K zXk&9KbR}Se;p$AdrA&$lw*48#Z1+O@FXhL@Z&!+30IU4O<{H z4S|dHMfZUa&@AB#7=KZBtEGDqS*bj2Z(}pF$443`lFg_-D{gV)a8%-jhN<5?z7Pt8 z7MJrE^yr?TpT@OWbs@CB7tIjnA`ES}C3cGD*`!V!NKU?^KK|7LX^@Gjk@EQf;!X=t zLvMY7@1Qffno`{~k2!*exFJ%nsSd=Tg^C)5JynLAAO07*@hqkm+xg1+>JL7_Jq+cx zn5%OD7P$P586w!*oT#{klL1KQ6g9e&uJem}|%q{~l=)@y^K2WVVYX1568#nI& z`e_M?K(n``^y#`a$PD7AG~76O$v-+0LVFY(Ezow!3Ii8G@@z#ap}C8<7Yd|{i#YQ6#J!WVY))LgWiM?_+#^?G#vPAeCj+$^ zHx&$pe?AmJ&a4h3soSGd-7U^;-s89yFIJHUw7Gcn5*@+fJp+~HG{qV%5dIR|s{{pC3bFqJ-d|3lW9z~!8GZTvP1Gou;LGm|9@W)u<= zSwqWMyCaf_RF;G!Tcu5E%ws%=xXa##WJ`#$L|MjCQj|(1%!CSMX`@u{_uM>VdHcMd zdFL^?@Bi}qo!>dvb)D;s6E89e`*0It#RgR-D;t#GK;vSqgsWr2(ydtN$GrWd^EB_L z^V;ZE`{V{S3YL-srwh(H!8)pf8ThYtI~ljMDWjc(Bx7e=BFnwrB` z=RKn+i_hzmSH$6v(?DO*)kj~s1qFu6(RP3QukS;2Ko0}>S!uRYX(OqYY>O;q0Fi&n z3^1?l^ml4I!@#$t&Jd>>5|g*L_I%&&_oa|&t6mzzpvmJCQ2Ux)xZ)^<0(yg-!+;o3 zzUz6xx6>F|kud+inpzaL)3es&ovQo2B47$dQ4~4fjvv+X-21(UefX&Z-AgurHpi5K z!fkEyZ9q&lE{Ss~j=Eb$27vDDgdV!{bOVHeHtZR7%S0|Un?S^Xj$uR-bzoE|yvBTx ziEj83g~aqMOi1Jc^3LOYiVa%4NHg$018=&h{=iNYDD*5!tK#*e9MjG`jV>eA7wKj9 z(Z_+DEy((XC9fnHC^{Vj|Bitu-Ve^5o+Lj9SLLxZ$GFHx}=HtUrUXZ<48rFOCOqa*@N6=`z#~%=V<6Y(M0_|^8R?g z;RA+<%8VTI5Vn^4MbHgaMea>HkDqo`U-BhUD3pc|fK^4n$b7s)YfO~{-vJ1aUqgFR zCE_~HO#_Y2S%+nage(Ed(TPW<|0=)9JG}*7#X~zkCL>}q&~|r}Xuv;=IEtr1s3*$_ z%;_3bxU|oJw-RGFlpTE_bh-G6>m<}4r#sk7liA$$A=m#MTmU;~VH4mV> z!vfIftzu&jq_@h?$!oRz9c{ z!lXbU{65Txm_|w<2x;HNe-95c@i#84m|a|a82y0@h?;b{WBuNlUqx{x&6$sB)W+Da zVATR!)=q$3`r94NKdDzo*mhQ ze-O>=T>$8EzWVmt_$MB3_^%_W-pjV10P?s@^J^eV_b9_>ZW%^%e#}L_cd?g75iL5s z@yD1DdXL^b@zk(iyw?UHS#SCkcf(jMJ)6UwY0+S(T%1Hv8ug^kzfpvJIfhBRa`&K` znwk@e{`24q6}0p2ek%(}9Sc46K1r-UV4tbx7@BPn|tC_cfyRAdLY7PFQDH zPztQ$dfHG{CKx6$K{zpoIi>+GFgU$k)kpkW+fZOzEDh(u?;Tj)@T{KPGLUs2eflz} zSJ|4u2e;NYH0f0H8lpx6cd{MH7VPR1-uK2nX4Q@4{Bpev1mA{5rgS@7=X-i@xKDSz zX-(s9liy@5i-5uLk8=!e7-IBF+(l+x8FG6L$+h$40g$abdZ!#MKI_8^{{y6PjWEFT zj$L88@wSb?C7N}KG=gGx=zKXsOPMhdK=B27iRjw1q6(v@C~)i6t$mB{(f)OlsRK~^ zQLp>lN124o=<NTU?(q6aTErApbYRc%$QLT(`@mQrjYVOpqvr8x14&nl130$qz>nt+1)SvI3pGKojHbt>i2HMU}L zC-@iZ#69TCfY9mVJc2?jt3D4}!K~?_3zO>;8i3Cp^KDXqBia!Ubs)CUD&Zg?QcBs0 z!YT?B%_%49t2M|lc-+ftOS^8u%UUgJ+v4D(JIr@gAbEk2Po=1~#Tks{t_O+CumWi` zq}gYFS&hKQgi+^m-yL|$aXYEKG&c2|I*B)%3qmOVr5HBO;}!Xo#pg5=L~gthI5 z4V_m;)Onu6D`)Rfph(8zUSLp0FYxF<4vHytR@8x723CcgbxIG_o)62fkZD)DIc{>E zxvu_R=V|`4c7MzWHS*1|9pOlpw!hl%d2!c+-TkyTxd(+yh&s4LyHM{t{D$PF*v&iU zH&UTpL|U2e&k?!wQpmqsTt+9Wdy&@guQW8q9Bo9V>Dp&DW-LpuPeUnh-Pna$H8-P& z7Jv?iKR%OJ^;#&moFuZE%dG9qGw($e{|(<^;j!RBWc`KE&bMZQ>nTq+(N(rqUV z1()+3h@i~-P|d#w2r80P+E4_9ub!5=D@b;Kd<0DZ99smfnhgi3gsHl)Y z)2f#ZW;dkH_sdStu8mPgi=vye0Yu;wUFyJ>#URn%u^8Hg7jA?BzC`e)E4LC3e-cGq zeC;qRvC<@AJtY29vN!3eR3?aNzsU1kd`TLnv|IFzsKljhYGrE&v!fZ(OA!#DaPh_3 z8(3>RR`q6-?Zwn_3>6x(bCmj!;ko` zgQUImXy`?hSGVo1ZVN?fA@qArHHj54;M`mvC=VO*)PA^_%!*jZylvLYoLoD_hKZC8 zfJcYXuuKV(3VYQ0_D5ZUHf(m3{}*?N9WHzBn{U@t@FB5Sua#G))6Nr_vr73Sy$$m z*;U{-WTKgNb>|;c0oO~DLrV3v*o#@OL~KLy(B4F5&_`Qj_aCcb=#qHtwWqH@~$z`@h_wePmNaXZd zy3Pfm)!6VMx=K>{7=)|yJU_clAv0lFIt3-&7=seA;F1K4{07xoOx*x;c!Y6$qabloAmW9;kE zZMy#Q*pr?g{%%>@smQB#RD+w8nb$I$H{IGJGW1`x7}2_$-`)jl-MFv+Roa`CwCX?> z+_jA7nNYESfeZThTMxV9iFE=3!9f=*crsNxD>^A7Fo-*_dT@0~-m&6gp8aL+Nguya zFQ|;1iIx4Y2%!r9&bm8y?%b?rtvZ4}3&HAjmr)+GGG1^p%=i3k(h}}<>0Ok%bBV*U z0!OZI?_4opa%y67N9K9tqLy^8ul0EHg0jj5!NpSh_F>}%<{-`Nhf$WxrXcYbrLbLU zV==VcVCJ7+xn!<#Kb(I>4(XYV1a)&&ZOyZV$4;d6q)-i>J#=bjs-xh+@&3rp=I0w8 z-8=5IjdC?~EuH^|$?KG-TTc}|S>67SfrAHMLkv{w*`uq8zs^}hQ+TeYZEXMrAj(8D zzCl~S4pjdcnHd4BhpU6WoBfYCPobz2(-F~FP~Zv?DONs%QIH7MB(5+tEzBNN9`*H)Da!4ds=T!(6PEBiaF8Fxdz}ENhj8~N z;)b#NclR*leX9{b|El#YJi-mGZNMMTC)1-a+rG=-04^G+;N|gzWc_}tYvB6&0AC50(N8QoZ z`7+~wS^%FkI*frKHlH}*nQ-_Ss0&EqZR_tDQ#?r7(c2m{+amJ;>IbVkgXst}Lxw;z z@=fA_TlYWRkIo4SKmN@~TSrw=&79ouhF|keCEq~?L$a|4h7H~4OvdB*m$?0YY35Py zFvHic^_)jssI>O>SrpWo%7?9=ZzRTthHG!x@&D960YmN+VcJ4-w(2pt)5Tty{rbl@ zaOt8(mZh^J(fc0IwVWr|zkm&kQt}A}!$fp4ecZSFef+>OvzlL4#9d*gLB)-*E5Rf; z6N#GFdtdC;mq~#ZjDuIl-a}DE72Dye_U3hUUh?ab!+l7SR+D3)v5Lf(jX;^udup%kxQxSZQx$bgwvMTHZCL7zf+o&8kd*QQhJ(}$BroJ1f z8Ka6upM8FzxtNf=D@0}3knoP8XFx;i!5URp^rT^O1&2dJl=a zOPMU0uCk`OM4m5sSBJ*~=-g3Su@R1y&FOK>AntI+fi*hs1hJVrfocNqGp)q(}jRl7V!-1MDK$DNE%dWM*W%o$vcmBEx6e0IMuHB9ue&`fxK^V zX2*ChHWWx$tX1Rfk<~NbY9#A?5`|UA(+UqlO8;!*m%E}oXQzMr)S`T2hq0|F_R}6O zTC%d4-XgQo#I2ckpDmsgo`{qzu@;5loV@LMdteb{0?X!%0={GZ>%mhvP3NC#lZ8ph z)b!uA&sL-l&NEm@2sq|+7Y)Z8<%Qu^vKVNPI6675%y+=dKmR*_TW-v~AW)9Sc&`d- zAkGKU`cLW43&|400um_qv!nKO!G0ca=@x$!gSk#*zi8};-| z{I?ywf|}Sg{o3|%2j6W&Vq*ViXDZfPT(|y}pe^5ygMgOH>tj*Q}{lX)#H_H^5yrXz}^fY~e_ozU*l zzUe|+ffbXlCa7g3HRW$i(9*GG#{n>1%gj}{vHQ{sl6#^9(H-$uz>+ze^@bILR9UVUp;Bik>Z)<)Mq0QAp+uA96JmaF2bE1 zjwsBI>a{wC`Yg}RG-6Znze+t3CM6!6fb=Ujy8iykg)KNKPblA$fYiPoq#R1BvkT>>=kw*9pm z?L1nH8a3*G?~pEHwd>a&r!+tRGt;Velis(OIK#C1h^L8^Iy<6ZC0ejduSbs?Y3yH! z2kqvkuik9xtE*GmVAv1UG^mDHkid79bs$j+2>oaq@VI~4u@q9ozKEqdy{0WIg zFdp;sf$L8Ae%l8&CL?-KD$mjCY(Pa)W$3U`qpoON+MMn7K;-#|t6aC&i%gkrt;q#I zWC_YZuMOb6*PpF1@*H?K&S`Z1`A3mV87VlZ+S=a#Fx8Zy0otw z14o@W<@i!+%(7|J4o`}(o9vvO*mL3SBj+Fel5y%+4yTFuHKs&!5R%$=Gk;|7g*6M z%?S9K*e=XHbhU|@Jf1K(wr0?5Es`ROI=VO}pv;W(qy4BKB?q=kI`%Fhh$(9amVRlg zIu6sL6!$17_18Bls5Xu}cMU5Nn(3QX;-FzO|I}){I7Qe6d9(NS91d)x$_eoVC_hMn zZrR=)V~j7y)Eirn6AczGs;kFV)iqTFt*D!5B0a}sbIRIj*+Ei^+ZJA= z#!tI3V+G2VeCv?JY0ALv9Stdf#?z1@b^U~=Bqq;Fh#)!D9Myn8jh4x*KU@DA9cMB- z2>k_XDp34ba+WPW?U>GDmu;jp6)8SkL9u4>STnO5(gU~Jg;HxGJ%w3yh<_Cue*Z8M zKIB8ADHkUfh^Zrma}3Y0+mIo4!qm_x1IKz0{>q&8(&PIF%?RS7_H)$HfwWn$0W1_s zC>Z58g-_+mUC8T5XLj|Ne58|o>m57F$n&#>Idttv3o*eY2if{=Q20ygxwv-HTtvE2 zNmje3uKm)I8|Q1iD7I=5ZF$f@Ao&VJ^9hGp)fjwr9h@}x<}QpaV&jd#H{~Rfq7k4& zTxvlY24ow)TtPxB<7(9;0;jDD^on%#Q&4~a@1g|W$6csJRYFjR1^*#WyKzbur8U)= zOQL^cgSvkK$94fP$cD42TSS(|fN5u7>b%-PeRdI$)#ARhbU~7WtEbhFiQsK{kS{L< zd76YBoO{vbZ+EP1QYIV!okOsy-2Q3_s2*F_N)AY&zY?LOVCgEe76Fs4Ua_wJn&e)r(R5!~to5@teGJ24Iv zffL-gSUCtKCo=v1uG{1NK^rIB8Agu$1SOmbsY2ClHe(gCN*HPT({9DrdAxRRFZ#Is)74hX24w-M+>A&FJ_>A!}_t zX?)n$uv~hXA+XHcL@24zw(nIh;QI+r-CjOOS@kFx_?XMt^7szRvp*Yo)675;^L|49 zggFA)^mhaRnw@Ld9?iT*8pMNZ?pA`!oD#bTL$@xM3}L)x-r;~L;SaYz88GkG)q$Sn zp@mNx(EeYhpKIgeP!Rype?7sdne>@>q*peCGskj3;k}?J^yiKj3k!|1Ux>V&Sg}w9 zK$nc^xiDM2jwPW*tUjifLrFHLw#GsA*Ry8}YibMPwpKf7i$ZQ zHY)m&yN5?VZ7ms!+-k(v$6%ncAJs1q@+GLA{#ewP3zlUrMDJ2+E=j53|Z%AZu;M;dA|Y%@>dE*NZ?d zb5&`g(Z6M3sDH-DJ`u0Qw+1D*K;6ZbSZM|0A=90xB8SSDWkiEAzPE3&JGdJJb#}79 zzeEEubC4vNSeRxzF_Lbv?(|2Hk?kg55KBJq0~fCRE~k>{OJtNOQ0HELTP$rb#z6FL zMWhrPAQ_lsgzXIVWaxqewh% z;&vTF>NELS?1CV2lTywCF*=lvg0O*$hk_wZl6Ehm34DXNl$6H)t;)K)til3y(ZqYp zD%MGpCi`BO#8bd7Ku}S5o)w0QEa&lK&f>^ZixDH0<53J zK0zvIzH$w(Kie!IW>NBEsWN%QO+*y8KPjTF{4$}NUsV}&` zbkjk&h*ycyijTa7`2dgjUU}*ePK+$+C-QGtCX!{um7=quWA_01oNk>unet`{!e<#A zJTo^mul_ZO^IUj#oMV%vK@aGs8(nVv%0vXF*wjR^6T~^~alr}kgK)|{!{X`{ajBQa z#iyO#E;RmWaDlcvoF~Md1;Eo$^ngQF)U;5{c4H9A{a?Cz>}7F=z7mGd zz!0gj1LhREN@?0l*{JZcJBmYSlnrL`gk>H1ANLnmG~Q0EICpB{_1}$p}ADj-HX?QBi=&!9oGqTbm$9CE3SbJ+s>g}lL{tt)PZrpzNp!3f&X3k#V zyy8^A{UpWDCnGOBe7*b1V1t?4K3ny$V)@x+XVdG$?!69sR=>)3WrfkI_m|o21O$MN za>|q`O}jQxgtSI-mPydlsjqYULSf$S9umsroUjP3Wn5$AlAZ4*>#03qsFf@A?{n?N zcf60@Da-h`qMPdp{{}m{k}#}kx|h#u26d6(S^>fskh=22Z>240VSzEF)Ne|K;^C#R zh}NFV*JBBGy1t?q*hZ!y4E$CRIYb)QTz5P}z-j$U$Uim-rl&Z-n-^5G&opFklfJ-K z1xXch%@e@4T?^!T@3ctby;ypWul?r@KFkSgZFtA~1yD=Pg-c5msVnHmDCjS@SyPm* z>LmZN&#v2h$^Vjrl#y5!Y!F>Y-hGs~FFT=|5eJww{=P;9lSJ|pUUz7&c%}3D^=oy9 zlE}FLNSbzKQS4d$;a!VX8Vq%`N;VE7Y#cvtqDVas1cDo{nTnv(1vS|3KQ!4FgP@Ny zw3|`}!G$o>YZ2o9DG82OzQYr9w2HK>D^(@Gi3Qm3_d@0n7m~K)t_3K#wlYj8XWRAJ z1=RsfK9`5o*kBkE5g`**+RCt{%`en1W!(ptmrI^Ct3bxe%S&;Z^6Rg^L2`6aGWaJr zWtPFfe+Q(l=S>9EEvCs-5Dc|6RrdV-;W+gj|L{4BV}gt>e5?ECy5(9^cR{$w_(9dJ zM9+ovthWjM%G9Yf<7x@3qgA9Fo8V8?w<@i8BH~erbqp~m>;HenT`;-%1*D5jyRgzG zBA7hlx@zT0E$+ObEIneBG%~1fq@}#o0DHt6zuo-lpQX@Yn$w?hi14BgMn;;_48v*}RI7<;(Aog19=H4~n zJqJnFr;^DKfKXPo3d?BOr{GN1Olg5fk!n*UgWQYacfVJEt@5Gf&{5KEa+7*MmmOhg z_~LzOZP^=zwoGcw9OrovN5(T8$Fmo8R|ITiwe$m*FL*+{9{);V`N+%LJFomfIBT!9 zgO@m^7z|`;k(qQTldR73H3HrKhFEMp^Meh(vNv)88562<$9hHTSXls_r6EP`Ph0d1 zZH)0f&AosY?LyIiw@Hb`aOqsh`!M_ft#%cK5-1(lc_XGoC>FhK>>&V^Q%kdS>0XUn;sy zL>6$9tMaJp@Lw4!uDeXptpdff*taRI92~+$fQJsNolF$kE}jsOhj|W-MI9R>Wq9HT z5y5)k;WIDS(Bb*BGL?BNutuALgB^KMrp_nPOD(V1aj)Cp!BbUzmpm|oM7Vq3TCvC< zxvwl8WhPCg!uLFU^r)?TRa=U$yWs8)JLc zCmy72olG3+otKveL0^LeSp$?EdOn^$Mts&bv*VWR1=N%x@d0*pN`oEOQ8fl<&+9jkIXT*PXMHAi z2~L^xij4UNSv&7Tp=kGBQyFg)e zFtqf>{aK3V)5S%O6_gilRdc>sM^PMe2y8x|f@dHY$Y{xT#EnyFMII$C82cz@ns(*% za{BsxuYl!FBZNrutkiNx>A$Y42H4%AzTNMn{g>iSh8ZcXxH?oWLdWym&Rs0!fy!`t zI;E7Eh+UaxZHoI@HWwH0_acBY;Z%ijRCl)0Mkyv<-|`d{sck>J|0lq8GK)}h0_mI= zFIMCjTDPUE{8GtJ=ebI(zrEaqpEh8VXPo(7-Hp^WO36!dn zY*NCh_I`5oS}NLZzffoV{J~+fX(*hBwA_OdjaD)0LCkO^gsDH{zarzJ=n^v>Z82uR zSV!bfJmg3(0nelf6XlJz5}ad@-3M^lbg#UH zmLI$Y=i`_cQW#7E@YhjNc4rBV2`~aVoO$1!b8{nOqe$zWf!$8icdd<1YOL6cpbGkS zY%_(9SUXUJh-6$_$#Pc@ZpCo|=A=#NcZx+Ocua9|`t_yb{3Q%%?+3GxVG?k}=SwNY zr;L1m&{%ip2Y6jv zg;4@W#1)KMjzshMCy-;{M^VmVJ~4;ilfPF@8oS0+=-=!)j+ z5>KB#fCf{jYB7eO7S#&jhLa?vp&A1P68nDq;9Mg}CdBsyJ&sKKX&Oz<^HB5s!Vehi zT6lUQC8^Yz-`Gbj-hpHS`6;7enu{AZK7|Y!xwGd7#qyPcgX!swCW}Jk5F(bSv}Kf_ z@bSFv!H`!{Rk5nZxb=JhQ%Ha&1v_VRWlZ{gYRAxt14zf)qSu#6?_h&&Hm zFh9;|tt1^zPEJ}R9o^eY?+(=kV3t_vBDJ}>k!^oz==*JdKUbbC`s+CF=ohr`U}HO8IH!KB|JOI@jdd2!GE&nOgJIg$pVf(AZkG*5IQafNlFoq{tz3vUujb4-_DPBxcCGszm8SiUd6 zBotV_z+g@7hKgw;FkG}qP!fVVs+)H59{KqA%%g&n96)1?M&udsbP))j7^rzFnX;-a z%bL#@9|f$dQ@_S?>Pyr2_b*>z`dTWTsip z|5S8u%Zxsb%ar;3W!V$yN2DKgID1+;w0I}tqQ^4Z6ROFsobbV}l&=Y~r{Y@~etYqp zQ$;{0H4~qnY|RT2D-)0m|K44iT5iLg{4t4WNZ$M$JWHlL$moW^QZ(;ff|cOEljJxx z!<8WQ>-Wd0e6}LcBk}h5TR#l$kZ^mkKEfvfsfG?4w${&;KqvhNB%Fk0GL_WS)b2|h z0E*u^(&LaazPh^l?KRK%``lLtWQ^mzmYuLxnJYX?U<4z(#_ntiu-AaEgGvNSZ_bfG5yc);S z;zIdY&LbNKhr9M}()5foo0tYM%2dp9HRLg9d~krtQwTUp1sYA=u+utSyr$9UhoqiT z++WT#YoP&Tk6<@N&9OGLHMXUIQQO9JiARymt*M_tn}GK_Q9e=O$W1;0$IZoPck-^{ zZGD|pxF3zKY)_`b?0R}>IvvZ4VPia@XJkaW*qotil4on|S9|r+r9B~HKWg>CmXl8n zcpo6fwa)yY9*}D_%{wA>O@|>TC+9+bpWxcWaNon8qKV59tp`OATX`(KDSH1~ zT0aM;M3K}ogl50`39R~mpL&yeU*<7hikrkD5_hx%gttp^9TXuWFm@#^3eKCq8SNS% z@S0E?S|}x?pqCVLBYK_94S5b;icg+H$dA;(ldzoV;1h;Qvs4HUq;kEZJ_~r+Sx4v6 zBt;$v6?w5oZhJ5hBgVFhJvVv@$sp7ZpzhQ7kT*S^MxyNQB1uB5?SJ2c)AfKPo2Y9p zZT(J>8i8TIz^Aj1O@W=uf;v|(ux>mA!U6;3ucfEQs-J^zd%R=G#Cog9kMpsOsST;NRk?jf=%4tsfNKqe4P70e@ig=E{DyJ>+>E z_xTAN7-%ujh@0>H4&-Rm$6MI6lul8)$ZyhIcAVT`XdOYe>TPSCUHg%T&Vhfrrz zSQ;5v_Vh&R&6!gykI!_^Cy&oGW$n6k*~p^iNUO)n%ak>T$$LSz3O&ZKOQpaD5AuzD z^5{`CFxdgp!b{;=isxAwe~Ybfk5({xC$4twCBvbR0ohPP^2UIsXPa69#%O;M!Oh1~ z2w`C*_I*Q(FxI*O3m=X6CM7++Jp2j+uFM;uG}qiIMj1xFXa{!ZzL-|#JHu>kCEL>` z4sm8ZAw2W?@aSd-IT>v0JNHH>3fr`RC@_;YpX?URWB~3baIJvLb~_1k{xaa8R)4e6 zg3$~Ibqq3!CV6Pt){|_D{}{vNv~*dZI9D(Jj~I;o3125Ny}4uY#v$!(F447gSx?K;ljp)IRXv=>d*Ie#gdt#bl%h3 zM=z;(tc&O#uA&Rz+Wh`{UAl~<=Huu^fh`lV2K@l5a9`9g{TGFzYWQ^ic-d2Z?wG04N5S2&3ZN89=C8E>*B|B^ICfNG;k_4Bd3 z9^57I?QYfaE=rN60x8L`9H5QV+}vDwup!=`JlG*8kp}!K-Xg|zPx};l>#7)gMW@{X zQj!#=2=2`2L!<2V2$N}27oj}s=wrkn+pAZvS_1R4T-gzOQD3w+)Te`;eL>t4Qtl9R{`Pk~j1XpxSAWkHPI=QPy)hPnZ^_f|~kLM;MmTF(nxtV&t zca}nI_ndshlyB~lfZ5OUC=pnD0|ve2c}@3_lm8?W*QLjh7w1|;PMk|DK5(zT+|w?!x#FZg z8Y0eP4tfEtt#9cZf^8cQ^1%Ax|AcaqXgq!EfUd|s4#<-Z2h^h;TcPdjl`xjHM@zrfvYWT~`OxSL; zGt#QuyVogeOUuZ6ZypVhS=xP@DI)(aQg-Y%$?R8tP-uTm8j` zcgN)|gXIJq0mQ_{5ja9v|2<0=5Tag2@4or9U@N-IiA48u;Ol<X<=RF<2w?gA9iDKIaw5U> ziGdY$N)$~7(ip6xA_%nhIZIQCKv|6an7%r+#mCf*TumgU_B@2c;&aPIY}&CyPbtnc zw5bD)@U=_=9rru=S@%Bv{{AQnN$d5nz?UPOX(8Vj<#jeOnMUBxg#;GDl5SBmZc4Nx z+4KBLG^qY=;3n#y>&1bavKbzx4X*>*$PtzmSo;GTs1ZAqj+3UoG74 zQdkwb-D7F>qlsKFXiBpd$6>%ccYU01SPumj;BPh=0i+&~N`qrROMHqEUdsb{ zehrLYypW~;0Uf9n!ejQ+UYb&fUN`&e8yVRXopN~@4~vR+1DCtd%{qu<)h~i~)A)C& zoKP(POgQZhCG|{|Z1?eb9{GI=05gHFCBP1j8BTNNj8@Slxyxpn-$cZSxg04Ikr=A4 z=Du+1Sdxk2nj|v|vRra^Zy%rhM+`gfPG&$~nz(w2Wgjq+NC-faM#TEUr-L_CB^e-L zu#z5TdggfvYajYGQFQP4@zG?evmq?(5Q|J&{sa~xKhajwbt*mUJnky=3+Pa7n@R-s z5>H!-(Hz=!4uGz^uo=XV7rVZxa~+rzhVZ6RLJI^o3Z^ZTbR04B-9Rtb4n6Fz1d!u5 z=qWust*S2yhy}Ypc~R3z3~`UyZs4>Acf>;a+UcsT~f06I-J_K)| zs*i%PIOi|T(GEVPI#Qam(#2|s-hA+su$hBLLEN$1as3|kBRKx;oG%sDb{`+l0pHwD zj~m5API<8PiBA`ETB-dwXs$a?x!SExoI6O~kxZ1bMy2@z6WGb~pFBgQ#h%QjVRo%6 z)k!8SlfcZ~f#nB7>U*H{nJKiU%kZjUTYQa@&^QG1kelnt&DZ z2B`#|3TCTge#DxMB;bk6u-RDEMDg&`4b=XWP2R=4ylruWUY$SZ_uo=d%Y+<>4iGX< z@wTHY?Ld|;-7NXwjqIU0ki4xM_I4i|!@HB)Jk}6Z0 zZpt@oqHt>cNgU8HA2&{V1~mWmFUx&T|H7{p(*Y&%X!lca16|7f-?P6FSjJ`Lp3A%0qxHf=e*W`a*lNpXJLU9Wy|r4d6(_&>?*~*h*+*^L>=xVLju4t zG_F6F#vB^JYl}@9_d|WjNy6F$Z?T#*Y4grJMdX>I%%cJlMngf=6f2kAo$G)8I0gm$ zChgP7^<@j$bU6fcR~d3G>}o$cp#VtR0{I4{>T&u4`6my|0HI?use43*3_&6zhBMN> zB!7s%CCCYUTo;%YQn;(QyVE9&oPQ%76VN}1Y;5QDqz=Z_t{V`MB&-6n63@0=7~Cu% z^*qHAOt|2VC}4NfI&hV?{gTNmKY{5q7GVH?7B?CnJi3~1GNPP7`_)DmYc*Ok1~OH0 zEfkHZl!uVoH9h8Osx2W%G$c!8q|P@+OKSmg-ieihE%3pkx9(EA6SL`{# zT=#^3fG5k=uv~$qPAtd5T*Pf4?8nymdU=&dr*A|6=2Q zyU-43?QW0>RpM$+8Jd2M@$N4~>?+wj+F}NX?IrD(;2@JCz#AC(R3#h?WPN~w5R8SnH;8Gm+&x+IEbW9x?{`Zef!^zXcxs4&?b2$eS zE@1l6uP?lcsOgv|ypz+#R7aF(B5s)UiVQWE`NguXz0w=bJC zzHu|cLzpCbkLvf2;X_LeIgd#l4#EM7!vd*?j*@cxmJI%)Pk>wnaJ-!W#%=zBQT3s# zs2`Of_|$0CEnM>8MJI7#0$vPrsq#miGAi zlcR3c5j65r9Rn$Jv#v%Ny1w5;3CEZ+0+h8z;WkwLDpA5vGKVaBg$D=8?q_nEVv z%v*%Ug7v-qW?@<(V?^I$2~u-JP+ z;NPDS2+4>(^k~`Y^>ixyULeh2K;;;pSnUvtaZ&6`zFI^79Nrp5On8 zbVI@SRF$2$mtQRvi8Ow)#9DUfFg6dC`F1EmD6v$BbVQLuvOWC|IDLfr-TwggscrbEp@#y`H{LFNU>=D7`R1JPV9pNk!y^PL&(nQ&0*!}A$r98o{ z*`|lfgpU@9=^jsF@jGjjOt5!xvnoise}B3_*sQFH0Cj4PSa_B=pj;PTzDk>VSvms+ zfJcxN*%$q>l|Oax{Kh%U9&WOkuXAqIuO9=Xn{sFg>Psv^1T>en0b$x)rva);2*>>h zNdNN*0PZjE!)QwB;Ch&DPZ>)4IqEm_0_aUKw<_Jcqj_KlBU@fli})@dpE1^hNCL#& zQQ#dOiKHv)-3uZngM>u9fWMODw5Hy@DGlNGl&Wy=M_N4MJNJhx6rIPg)wuJ`+Twag zXXEmHRUhY7k(noHsUokEdBqq(b|bG*%>uNQ@3?z*DwsFHsn0+_+iv3o?ULuHdO%r7 za1+U*hD;$6vp#mTDvDF6geI^SXF7axD_(-0P#9I0Vscq zHcb%ttdBQ0H_xQ|UIcGaaR$7cC+s_FQf+bP6{WG@K2GQT8*BWZY#h~Lqc(WC9~^m$ zNC%Gp8#%L%3bYgyca`zOm$QNbz)p3FOa~xc8_nYUG^= z4qq0JwhjHt+E8Eod5{+7I}CsiC$X!`ehsMh@cCWeXJT(8S{8vGfgpBwep{7BuF!qU zH!z4|KUA$|elcr|(OM;&Y#bz?{goubbVL=Oke7fXj9Tiu7Cjg$$AR_Op>5XY2sC3b zg`otdHdGr6uXV>bG? zO$F%Wn9_C#Uq0Ua7@4iX>~UFoyLZ34)%M4157{O!KT9T7UORQFscC?W0U{XitWjqu z0@n1Pr~x|*uFSHS07A7;ErS&^Cu~-xa&_6{hy7>;rWAOX6ttJ~+4O7iyXPc*am;Ob zdFyMO&GD|&PAwTB_ztR0DRBr`95Dx3@*_I96Fj_Dbh9 zUu;8EFW>f4&z=?_$Yu78TJg4rWPY4~^;z!aO{i>MX=)?ik&){>Pgt}^zlG0#ixdZ> zmP*{G>aE(6{AL7#3EkuR7?AqEy4bO@;9kR|_3Q20a;TY$4XA5@xaz7!y4y(81=HYy zB@@QwE}Le#XLVk6-={SNDHTI1Fo?#d$)c>h>iA>(_TOADpo}L$ZP~J=jxsepJqOZ{ zMu$C#>#t{ImG z&fB?*X6EKrU@hnpj6xo#p7n(a73E0Z zl_+#A|CaWjf^x_`^OUsICPSIlbiJnwvM@-iKw=SxlfNaTH_wt1xyny z2g(`YYRNc(QYD8W$gaaOTYdtpmNvZceuprL=OIf!Dx#r7D}Zh!xj2#iig@$BWHoVl z)LZI|9e8?xG#JFVLI1E{w3U0uMNG1_&G+j#nnNaF$)99%34SQdhSo{{zut-=Z_7#D zCF^8t6}<8b z-$DAi5?Zt$$Ws?VWMuo4>(}F5nZejHA40WvLE+(!zE6+(#{PBxek3K5naaw_>YGE} zdvO@NhV2a4XPmvYcU?*+;G(qOlfUL^Hj`E%>1;%q#iNfPPKm#$J2NHZM{_r78JQ{R z^YrMI<9J&Z+)L(gBd>|(dYvgpg&|PqCr&lYFTI`cIqF7rlScWQGq3vXX>mpM)#nOR zr!CvIIr~1od=`E8cq-B4d4;U3yo9L<^QX+6Mtbdhe+#y}gfa z(>MUdYEMmCGNDkcjqxhng#fp02%G%=f)j_c7vl&D&v$eNlDDC#KV1uWF*SbeCDSoK z$6jFgMomPR8-3WEY{f~rXV_Y^Gs1gH(?k}G+T^+Zg0mJ}yjV0R7k>f$mnR#=RCOK% zKrOu^A>h-Yq3t2Eq<*RiYsX8BqLiCge&>{=fgG(2V3e7m;nQN@E|`$TNWkRpR*Dle zOKoK}>Hj!rjM~Cy%ex8#!hxc=QvlhjUGMHVanz_^u6(I@6UP_iB^-;3E3X@9>_l$Y zL>M7zjpa+z&xXHb?^>47+J5R3iuGH#-=QNC&9(TOfEE}{EIA2YR{wYY36$LV2{1`M z8QzeX+*B^%K-wsjnd0|z%w@Mb1HcPQK#1;~ke7Sb9ef23Nc_aSn}m}MEQ1q@MDp` zEQ@qJG|6S-xUUg9WDzpOn?(e+o@L1u>zgoeeNF~B#hikrOO~82K7;=(sg*O0a-iiD zHX=PNFLsUmPhry3FL*ZF5x>FPf?)|>igV~b1Q8=?Cz79iw>Wgf*in*B*q4>nqB!CO zyd$8eTMlI-)p?3No+z%r9lK&$5onJva~tcVC?{q-jeP;S4&f7h)lrgcir1Z!`+TCGM1@e9XTJ%mN4A-5J*A9{a`m@yZb_6TuM3RV;axUQzwkcNXv) zxn7lMw>U;UjaC}NI)La9berzsS$p+@gHdVICGmSOwbTy`pI0_Ev_I9+?ghD?RuNG~ z#14*9WK(o8{%JSj4i|2_@ycDlD>YsTjUD~xXstGTdfT_}n(uKFTC-iZ_t5=+9+;bb zs5~)g-#lm6dD`=|;|mj1wj=&~Gi2AVI+xeAUH7lA|2Cz{S~ z&Oky>O2McCn5jL~*>h^W~Xiac;!&gvwf0qGIJY)r498Ig{b^^l}WM6RuU~~O7Lj6hpBs{ z`{LE}znCBQAFV#Db_Mg_#N=n;J!7TX+KHvj9JE_$Q-0F*F=tqYN)k>+;@Z2Pl~j^p)*tKMvNHYOuK|M z?l5CE(2FG}1MU$E$bimybcx3qQKQUhPfKmXh^YEB=p=U_ZAUvax(`vO4D%I#ST*R% z{GWZmw_3J*?%{v{g`?emD;;Q9r@36k^Z9JZb?ZBT8w~IqBv-`)XL`vYf`TXEF46gOiii6#)NuZMkaWqK2aM}H&3oKg@+GX>%X z^5a;-wU|SVd_yXc10Gp*|NQ5oC9OqlFlJ3>vXK2iq@z_YA6?gDm$z={5I&Me3yn25 zFeAd`FmAt2bA!N$gq2rC;d$|xD9%k0z&QKc6v2I|$L!|I3$mllSomOBGI+Z_qW?hs z(>M(>U7BzqSjNJ0^=qhLA{nVWZ zCtxDkIX3-Y4p8qA9-4T8h;-mMJvfjeUho0APb0Wc8B(nC1HU1WAmQ9wfjL0Otv)#%1QA%l=%kMrXX!q(TO zo$Bmz)C}!eU(fHoJg`PF6^XPqy)OWs#s+R9)eqIJ4qRL|gOYc?2`uYpQW>ehF;X$BH3G77Q70p8f`bamVau(3hbD}rK?q+(pvdo|H01)7b0G!d5fdI+aaz-z+T zFf`ykpjH^lEG&0-&=T_#rpn&vntn7sKAT_=e6~R*uo4P&#hGm#RAunlySRQqo|&uo z^MmZhAusN{J+0REMq9K_mf3sB`TS40CdFw)dK`0zyky zDdU(4xK;GC=x5&%GkJUyjC(iEsUl~6d=Deu7rlK|Og$Tgd4a{65r4Dkt?*H?va2Z> zgyX{q88yKR+ws^?E`}zP=(?Skpd{s><-9!6w>W|1N}Laf)mwTj1?z?1GP>k}fH#vY zNL)?Wxl`2#z5^A390Q_QxNav@4IelUaNYb#jz66z!ZmSUs(6po`kyj`O$^47=Lqow z7_s+>+xW}@o4195knt0MzXcvf`-oD`RNL$Y{VyrmYo8O7Y zqv%EuIKGWW*st^fQWul{6uw26qh-+vG4T ziO8%)1i^{w^mH)e=1{@vWWD?js)|PCg__GkIB^7qVLM50LrDjc19| zMH2Ft8?kg8*@QLWR?uG7S$8J#K-JwM;hp8=q*EI4b0`8)z3WCbWNXmDZ3g6#)-h&! zl!&1~IO367QhZ;2HMklboXq>SPr?}ZwIEHRzkwO7qSH-}&+R%c{1#%Ub)RjbU>9{e zMzrJHlEt%!H9}osjZuWy{G;O-F3SqwOk%h+>%}D`DP36p2$E{gL?zLC# zWb!nM*2TkQ1Tc3RGOu{W74Btx7JbmV#n_=p!=xWZ zy(y2v;bQQ98NY1V8FhPa9E2iZf~d*!Zr|RTmh9h{B$_4OiJ3}1jeOF$cf{U*gGR6k zpP;N#MF|YR!b@eV#GRt#m#~6{7BTAEdK0(e&5s*`rmZ@0*Jm`SHbU>~Vr{7D|6Lwb zsxVJ@a+vwNS}bd%N6!i6tDehtlgWI`IdQ@~@n`;9vvIv20GI?Ex)J-l%!AiP1ZG=XK8*ee!r9=7V%%1pj(yqO=!|C zrKiOj5jwVO%U$o<5=uEz<>4=5rO&KZ^D8_Rl27@&;m@6EeUdKL-S;o1r;lZAjy`&Sn2Ok{!(^ADXMd6o<{*3yEw7NI zi~uS9kyiDFQ3OXh?|L^No@S(CoOMM<+7ez=jOEbj2Vra7xZWOvO_JK1+7YBudj>bM z?7EYDURu$j^sjz2{%-1hmm_WbgW%1KD0sBmzBRf-IbO7i_?~tvdwZQ4{}HYFe7hNz zUL^X`H%3perF1$#Xh*FYU?Ovf2^UNqxUL#-m9*l1)lEvk;0r6rJEKl@A#YH_YQZ!A z8xtjdFs&91u|i%Kd=$sR?%3Zyegi{OFl!lJrE^C2S}nT6fdg;1fsC{#*Y^5*J0gbd z?!yhCL~zfDD0lPm@`}pKcETR1%Lsy#v2Hq1WCwY%Q()npJ0T|i4PDC%>FL|_i+z5Z zMmR%MlV;-{pZ2RuK#O~^Yx;irMi;jy+q`k(M&Fuvw4j2ehvuT&Dtbk6no(cT-84#mJQ;M+cq(fp-z!{~&P-tXRbll}d- z54f_BI)d6t0;gyU@^p?Gosqt@DGkuS(B_PE&d zUMwyHc$ON^5y?pC1ZHQQWw6%ef%Ghp;rPaR6ZZRQop8?TVsD7d`qscCQiGHXOJ~_! z;cfiSL=IO;CHCiQ(Spg^>+zqB9hP?YZa<>q;ihZ;aVn8Jbr>%aN8xNGCzl!E++Uy! zauEDXT(E^4VTn3j9>a?cc%WMP#Oz?fD%6kK``LS`^vlfNQ#A&KQpKNMna{3eZC^-v zWsn57nWeLN1%9HzGn+MU-kipSe)cXM^{;M-!aCgjkej=^HR(lGzX8sk7Z|3NJ*zzh z4s}X9NAgGV`kwZ_#QGh9K@8G}^&l&8UOH6S78}qpxpzxvxVrY_$aGxBCMr%Ckk++q z$-DFK?dv;Z&BTpE+`0%QSqr@!P8>gOEp-^OIO%W?RHuy22O(l)=(g-P6+P13xp8y0 zEg^>kOXdx+heS5nI&9u)!+`^b^lo_dZC&QYpI3Cnz^S*EiP!V{rQeS}kBoE78nbU& z{}Mxn8|8v@)O!=Id4;FkMyUjTAD*$SjqCfxqvunBu6Ay5T*jT=x zvsPX^{brx6%9$K2x#+8;8pDS1lJ+Fb*}ahwQGZTKTJ}0kl1#<(%6$4{Ku)y=RMyqj zcG0W-FD|iwq}oP3zCT~_!fT6$vb6l=#{4%|nwwe$-yPdtw~UI@$zbgE#$#r-is0>0 zVYcBDA;}T0t;_Itad8pV7f$AuWc#qr`YhbwemJFh#r`;L`f|ZT-v6BW&-xBV2T0me zGMFdz?B0OYLg#>#(Ww`_Zr!?V22z@Z+k-S^6CI`fLM2r`FjWe^>su>GKy3(l6U9Rh zme_4iZK^U~*?G{QLBpN|r)SLFyCJYVqVK(^A{bQZYs%=iDoqX`T0z==*|AltR-&~f zeVh3x?fI@xqPI$-Hz`qH)^geHqZhSjJ2~y)j2HSDy;AtQi%aInmqB-=AF3!`{8mjr zrdgP)Bz+z|di1deTeF?Wj;72xcU-%yRhxhRd*jY3BH4kwSyrSb8GOkGme2W6Ne?Iv|=6<+PKa{2XNqRdY5d(ijzK`g@K3h$td3y$O*Mw z(@(RTr71eQv2KM(12S=zswlR&EK8 z+~J>}PHUx0Pl6&5k%*9-5SF|_wKlhNcl zN-8lBFsIYGM$#kPrRd0!BYku2m^Q|TZQJyH+JAPbakO_=AJC%{{RP-Uh(b>k`0xf# zpn}wx!9F-n7*y6`Q+S8`pvo3DdteG^7=>OdB%=C|=+}87)4l+X)2(Oe< ziq&85^d~5@IzM>k-@$ATjBP9_%Ly&E%C49(mP14f64_cLTO|sq*%$`pw2-9+Ehltix;RP#K27(o1>chJj-J%Ta=FG!N6wKAVSXwV?RjN}b@-a^$$ zIs$pZP}uYRm}OdlF_la%J?@SY4(jVh2kbnj6HYAh3#bLfe5P#Y8iMZQYkTaNJkOy; z$=TfuCW4ETo0@1@1VLD4#swapopu&X?I0pkkf) z@_=7CyYFBlE=@&*)tE&nKj%NS1;`f`T173nfiy^SMR$#>J@^>q`L|LKsSibanq&a zV>Y#wW%ZBC%NMo1JmN5%nY@#X^9w2x`ny83wB+DP`LiPag_l(Dd{4|$EjySK_V2*% z=j_I>Hqsx6I21eag{{RF6CJ1tTHbbSxPAvEZXkkXFnri z245CT8Rt^Gd*Y@`VVDLdPCUs$1Q^Y0N7i~nd$43{Jks82Txv_ua0w4$&M7{g1UbO* z1DwK)3m3Wlgl8how<6V7?T~R?j@OeSRe_~O`l|gN)}mNZSJ!N-tTr| z#M1^Zq>2|+vf!~?E&~Fwi1a1)bIY%!q@)y#P-4!l;yDJ+CEs= zJl>x%tGTTcsklighz%k|nDiv(nEE&G*N*;go8_?_kX0m-_k5erilayCj6K6oQpL$U za@m%_cl%qdZ+WHT|Lc{;pMCKK^Y$lc%vUrw`2^0O8I%56bR=-x#_HFl8ImqbCc_kM z`hR8lpRu76$E!)v#)#Ho%z3CeX>Te1 zg=%bA5ift!&%C{smLZxGX7AZmnSG$D8E{4h%8Z@==_vg+pTccR;*E$BsR9OVWNP^e zLlQz@NH~rXg#LN=Avm^S*~1Q+0jn^r+x`QGz>N}XN0ld`c~tvv=wc~tL?SC@)FPnk zK4#MaU;(j?B5uCER13Tz=nJ8e&)uP2-&r%zgNT;Ia!H#mydF5I=bEAm7rqyW0sveV zH^%_r%6{ea-3IFQOyc_8x9>H@G^~`t(}Y<{IDUMf`gVD3 z1JYI~`Sw%i&+lGT`rG>yPeUAO`61ulwl_>}rLwhGhchwnltP9~AJh zA*V>@)>WpTKiP&p*M&vXJ5ATDOqjtnq~|$=JW26;pIN7-NO=j!juzmoLp4pRi10Qh zq3tiD2NqKeU={P??YRUl@eC6n6kJM7{|MxhJ;OzTq(;XfL(aATSO+3n?2537wAuUp z$6_4Cb^{f~i6RD=)ae?|N%t@R#*ht*fi~HdqqOAB^3JYVg}_1&T#bm61C^Wy z%m^-j<@W8Nc_X+V`(@Q-hjC-ZlnJ~mW2T_r5?W6qDWZ^f$P7i2C=1uAQWiq#LaEqo zTEpUH;YL;3C-(0g;x<2JfwvOgVWwGqm-G%U=VLn+QCf^Xdya12);YT{;h0Yum9U*=2v6D zl@je`m5JZCeLFAnR`8P>`$A}Gc zmW#I)aloGBE_4&bQSf3`r8-LrR;f1ulPSvHes;Ey=h#_mdW_}e&7pjEhc45mOK!?vX=4af>eEl;?m|&wWPo=e+(nH@JO;3P)bU2>Y%2QU}6AdG6Pvt;1lAI z?CS~R@23#rGNqc*@am4wHSRkSE@KX|I5*YqLD{b?=+S^T95t8Yd2AhlZT z4nMZ=K5&c_l!_RjV~}i>-G)`dvvH#XxEmOUT4l z*&Lo_D=ul|6>C~z8Ho@gjZ#aF4=uOP^S?e7qHe62IgxDOb}pz?HlRm2JP5)$i>DU4x^KggMcWc^ znmT5SwUSvsIxL>PD5aVUWwcgXi41kS+gciShiY<~Sf7h;8tM+Y3{pRnj3k=rDuqv= zm<}(0+wXVK^hMwxdMTE%CB8M6hwCz$LhMvQq6?-^@mRdz%_*X_fuUh$L~oJwaHGIy z+!EgK39}KR)Pki}KuwspABmS{Plp6c!nCJ>mUzV z`Sjyj#9*-^R0Q{y$(zHpPiY9GDO#ooB=38!pZLuZlx575)HDe^a?sz`+EMH$N&haK zMVaZV9{!mUW<%ooK%AQh|5d^3c68)j!u*NbGR&>1J>ojSL<0} zkWr~nfoXF=ML2nb1WuENLJHQlP9q*xRV{71R)&8B$3y@iLyFvM1Bx{orF?F%rD-YE zwG4$WLbo|Kdb9bE!LkaVF4FTa4(B=^ZQY~aI^aZ4tf znwm<-B)UGDRK51PE$xK@8^?h(oJw>Nq}Dx*BY(C3eh*rC(eDu}5A)FvP++MUsIIfY zEthHaV;zD7^n?To5E;r+L7WZ}dU$k2nPhKwbR~4mRFwP5wt>)ePK6yA?-|CaLvqc1 z?t|qg>z{0a?yOOkTb8fBog0~#T z4yn)1yu4;LyA-qog3U!#Gct40x`9L<=y~fo>QuFG8>UcpLTuTmqZDXEcDI1li)b2+ z<#Z~Pz&$5<%!HKG6%zxNJMKHm?EOm}rDk|$5o1=ehLXiR!bu7Z`_lObK=R|X;;rJj zhVfC{*9;U#2w`NA?lG1Q$OIHrtBOs`3&o6q&!}ECOA9H&7HTAwz^sWab(3v6J5Kg- z@a66jg^V`__nTu6Yb*X-#Phi=<+YJ;u#!0Fc19yCkYMT-4n*lMn=4LjILUK}M$LEq zO)I$tU$ZNFg>xDU^sFi=ydAYl)3z0F^w7DiGbd1+M>Gc=DAPwhL{lpgDyuysk&w$A zB?XKOujwiUdqjJ%bMMQCIyB&@9WScdtsSqKe94drk&uvPld734p;n|{xiSGnz(ntL zKm;?oYA+EeVc^O?Wb@CH%onG)6`fN#Vu2KvNQ7nIU#Iwy`pcE5VkuF?534s-I>mST z^9J*npK<2#t9jm_;&sCOOT>b>5pycyu~LO#;!$a7FN`{Qu?4qy8s#wYztQ2|m(&Hk zIDa(hG^F30!%~(Y80a|wRG7Vz);t@8n>Jlc`K{-fG|(6)>M99%N5x9c*qNZQh}z9T zi`icXFH@@Xxu4K9rBvsv@D0#Dv3m8R>RnA%FP>;Iq6hbGUR`pMzFMlL4)>B*S@^P` zmnXYd*}Mm78=K-h`5o00vg78#b6W>JMJlv1m+I>U076)Eg- zR`?v)x0JUL=-?J;5reJ}Yq%+_gAK2)FaCHUZr>_}m?+X#Pb%)B3rcvRRlIi*Wy8bJ zn=Dot&j`l4C{sa)5CH#oZ34jy$E^e1G+b zOSN8Rq-V)|&^(K{U?5S1JwG2OoZNfHCnBrC5W<%Y2s*{$s1FS-~l5ggl}@Mvu7e&Es!n3v2ep zPh%K#Qe0f4?g5^^zWTy{lfKavGWw8pXMR)YB0EpnnH#oJOItf^{)SHwlnJQSjC_$d zi8OiHX@u7ivGGPgC4OSa!6$lLeJMU$#W7h*`LDPqc|WY@5XDfx=%s*i*jm5v3TF7zB+MSw6J5g_3`rITqN9ks&{c!c$pDU_052ucvE!Zk_P!rBuE_GK`NcK;*pc44fa)sC z<{gU0sE0QO`n;{BxE5w(1W#_*25#Mwd*{@)IpAx7rf02G9Q%9`rpJWDx=3$ZqOawV<6_j0MQ!Rnp&q z*^J0DlkXqO=EO<#Nmf`DgV75w-|A35&bixLn#9Oq{knI*@T4ccF|9~KR%9mQpwZK$ zWZs{sv~=-&6QBU;V$5^+!FpJ<+h8)T#tE z<1>9J$LZTDJ+3~s`4+^`ydrR=hj%BLQl`-e_YQYv;<>@OX@AI*yhlzr0gf|uTDvi44x{kYczmYUzlpCuT+|r&>v9v^cC;yW_DdN;4sT$ ziAa+VvOP3GPD}-U72h>8dRcqhIlPQ&L&`Gof=8R0^RZvP4c7(O+j%pT&+FW}{?GG1 z(#RCrf_fla;R1+<+QxPLO^EN#sgw~DJnfe+U%tuJ-UgwJcT_D$#>?gRIV69_B})!b zHNcHNCL6W;LRJeWm3)mRZc%hlFFor6*FCNldj|Bf(DsP#%4IBd|L{d(8iZiTsSiRc zs=v-J)|ut+yIdZq5xXQ6^6Z4jZkj#Aw;NV5LMt?jBw4$YCEH8Ym*LEOK^Fl|7IGlc z*9{JkVuiRtDgcS>sOiUV`Q2utLKzyCT>7%MOlpBI3ArcF%-o)0V8kC(s7q^ZQP}E! zRX@>qyD_E#%H2bW26x{2-_2+=xb?$j9~w{1B+t9WVzp{EJHbnj2kZb>x|b_^eESsnezJjka6h+7JD7+Tt0mn4({fyy3rS&q{}nKmIkIK<;gRXT>?Rd@zD;jgkXLS8IS#;chY~=Dt1^xYFd22c+9FcZ>~+`6N#sQlz>8S= zOh}J$Ux~VxW0*&8CbD^u{J~)2g%0sEj((3ezGJ}x%5f?6)T3NnnzZZnxMBtV z6oLoFZeJ^Q4>=1{N;z;yXgKt+&do zL9_FpI^ovQ9z|w?Gdz~?AbYT6xPj9vDm-*NR%~p~+|xMYz9{!TvxD`c{cgd&I zo_GElMZO>;G&<3SosG_5*r34i-NL7Tm!T{76 zEjUs3EkX|BPX{KZqMn!c3EstUlCmNB|x=;M?2;DYi`^WSO4Xnc@3|8#dOTb?S(0IACSWba6D2(Ia1m z%B4D&B#r#PczXc_<-`-`Xw^glPlxG=Fb^5)Y|414Y5|7Oz#1q?eSuU3)!*OioGI1V zOweE9FsO>+(0b^z`;2){8IoX2gN@3XHc4Pi4xzC}3O&E43?=~eNzc`J(BdhJ7cWj2 z%6kxQgOiho5B5+;ndh4X*;-d$*6Zr-p;lB(<~M6|5T{GQiBZksB}=Xb(V=Ms3;+ZZ z$c)$%BO+7A{Lu>LLd89q_Nhkv5&Qsz;8pARX=KMCFJ0Gn&C9$GH8KzKU8TtowWmP7 z93hEUn@>TGbCshROo}>nj$svjyAua7$&IA^(+4X2O-3mw2&6vZjv6XG4Ycjo%MMAXye&W%~Sf`qly9ztn)%ETS`ZXz63@ zf8jH`fNA0nG(dYs?QcxDGTtDr7_pFpEr-Hu5D8SC`&^m(N5(@OQ?H<8Y|BIETa7b6 zQ9VcF81sbDE>#4hKC&t^tMacxHH9$O)iq8=s^_pC@UZ>aSWc=qEJC(T1=JRP=~68j z=LUa7y3E-GP-H*=IF{)u-SAU=>1)NMEMmjRg(DV?@xK4J7NDO5HX0M8GtGss)^-$4 zJuQDogh~87ORmavYC;~fm)50+8HtpkBI3Iq@ZGIAKwj8zr6;XoVq3)U5V||gi0~4C zduQR=P?AmY%V=)6MX>W~rcwVKf}Mhl5cq=_NoW0TxS+AP*XZ#h(?Ad>QQCoA-*Qkfx8NOI%6V@1xkSFYB9T=g`(hS}3S`Aw4ZrE*-?0mXw(%<2_eYGE6?|EhT5Iz({0yU{^==$l4|?Tr zVEmsHdc7xjg~77c6xOZn2uC`B+^{M2WJr4kwoZ-9;&K_S&=71QgQYW`AyjapEJOG zJvDEw)Ban3651DmjIfaek37n)(wi4rbm|VK{;O(dU@d@F*8HIyFB(ihnm-(hQb`DY z(jcX3S7F%)8e!R#Q9|kgb&OU$V)&4DErxTD)i{f@EI)qc zd5O*zJ}f*E(htr|e!8{+2ABv&BOoSN)TH@vSE3=NSCuquYv++HQP^IG6p%xhMzS+p zUIOAEwqgT`4YJ~+WU{74zz0x>;#DP@d-9gjlBbWUhIpPU=PN_s^l3?gY51K68_D>d z`u#=cMW=_9Q!s7iK@Ne;qoQqp-9@@y$RdA86QS5cEXvfF$rz@7G6; z9m@qn6PrErKCc7*zVqt0)hKIA9nH|Dj2s9)jo{=PXF6&8^RBzw(7<+(IS-=Dz+4t&hU5!yH&WyM-QGi$wyQd-Tj@U3}Ewz3&iW5hd z&^NjrZ-%b>q~#;1|L^l`X}l1Q?3JQ{e#oNzDmS+q*rVV|H+GxE*MP}n@Lbf|*bw&< zxt0pgS#hyr`S)1Se}Ag8wkyV8#?D`8#D_ClDVj&|U!$FycV_8=1^dBjtx=#)#KYk9Z7LjOzhPIs8*>eDYNsx14TzF^~t05@A?RFGAIQkfg8Lp*9Br6CEwo3QvTLyKLC<4RcrsGO&|3mXv?=ghQo@WAR_A!;VfJKh;M1w@$smuBG8iw4}ZP$ zvNL;0ox;9efue^;Tw1aUy2zh$xq@6LuAI=LBL5R6oR4>V7Tke6+1A@O?Y znD~+3^f_Dv@sX%S9YdX4GI_&)Bxd&G06t_TgY^bmKG{JIHj?nK0Dp ze_%T#!;|%t^v9rT#o9>5O?b4f?$Opb<1R}-iHK74uqf;hFPU2vWG-0CKREL?RNn12-FaFOJ&j)$)5zPV?q<`}W&!MKlWQql(}$r^uMWsJ6(~rPrc_k#pEj zlVenTUWUtPeWVph%30n0KZ;qe%TfdomC%^w(X#|z*72-G@{}yf?e*%(zld?B;U?X)f83eZy6u>l zeXkZj{4Q@%WS_{ft^*I5jcvoO!N`F#FZDb$&D81g;)B<(|MFX~XV}PDL$#D?W|tZ~ zXCJO8tb2CO_xv-Ts&5|EojtD+P&EGE($4z>f`fu0fG35U;p9(az{fTjd4&|KQrm+j zw%_z_T5{;G?>8axg|v-@NvE(UG}SP~u!UkGFdXvNT-k}7P#At=p*1}ZvJG7bXMQm!e9pQzx)mRgX&+4$CxzlIgO4$W zK5>Kk+l&g4vVkso3KgQLo#@+l>aO+j@>+=4x}K3Z92k1S7;XJUv*jy{?~arz0y$@* zvLM};!VC*?L-LV~3QEV5=ufp8`s&NiJ{#|m49&9ZL}#=mw!5OEqhk&cjEy>NiFq{u z`Zs4K_4C!j?>@ZU6*mzbpPFq7u|G*jxKgAul<|V?d?!YTjE;q)JB5SLSBgl544c?C zm3~P|7}DVr5JKDXFKN+S=9+X8;ftWYBO7Z1rSG6L)Cac9-m4C80rSw8l*0fq1{Q@X zhD92eYSx$m1;{9uFf?>H15uq&)n2*qO-pfd-)(gErvb{b`=^SmAJkmDqQuz~x^Md0 zwQINf`=E8&^gtRtd#DdZAPkZ zh{Z23Ok}JI63ldNrhL3a-_q6I-m(r)G)yvd4)btjZvPqm=eOL%5!X1wMt`!C!Nhhr z{#dvP+~m0s`HwQ2E>z0gSsNVzoRW;b3g}368|AQpYOmyVKYe-#_BwO{B&gndXExh% zfbFl7DRfTnAxJTWuKUhl$1>`ntU7bKC$eH%q$K-laK`(}<-VT&#DnpwR-cGR+CheQ zCY)|qWfr?m{o%Byv$2BV|EW}7HjEj`{4oeZ)JhICb(a8*GTS1`7BJ8EUrBF=_ZJST zzvjxybE${v87Io4p`ey6qo%rc)D3Sdg;IJ1&iB?WA3BTyG@~}^hJsjDFbp-ji;VA( zWai`f_BnRT&PIaOh8|B%O;tP6)1gz4<9&5`+^gHvy;k>#XVE#N-RB;eO)P#^Y*Du58(d45U!=_GOkqSM?V0d_E8xf&<3AvQ2V# z6suWPRTqn~Ma>iUa3>vO2?l;=t}HZ>q))1m+BYIOy_T!UE}H` z*+EH*))W6w=znvb?vuAL(&8s)k9yT5tLGsMQwK?+y$$Q^bHDsj4RbfN*@})~&UtI^u%E(c+Pcq%#%?oA9>pc1B`# z<_e~Z(r||~yUYm6aDmG!*`XlH5%Ep?qP?NugHlxsb<79PE_?=1Zq%U_|09e_cH!Y4 zHn!%!L`D43Al6Y)W+IC4ZF(~f4kAPVxza4rX2(!x30h-Wxdw@PROA0X`I+$)mql@a zShlMpPSIcwLcOb23@+<&QgC9v6Y#5A#!YXbREgs+_4Ic_ut2avyoFSm zfj=531+T^ZPCKcQ?kw}%OKSXF!*icz38k!ZnnMdhB<(+GCHQJoPm8dFjE2}#UbQg7y94=Q7GXnxna;E00KNj+9LL*A>cuV1vWS{{HQ!bg1$1CQnSFU=WZp^&K5Xu! z?}hyU6dKRCwNH82XqwmxRot4_JiJ6`Rh3Phlum(xR-1fyhN2`f*fhSzfrFM~Ql2`u zj5<67WSlt=YqT>0&J*Ztkcg1n* zfD%)=JvbRU-L#++5`MIi{jTC&CD1lOI%-%6GA_&)=FPlIV<7i{dMNRS2$sFboq+zb zUD>5CG3nywvHR($BdMH1o3xkq?@%K;K@Mb0&ozbplFG;NfrUAs`q>q`PKqYxqK#}m zRA+VS}gPSF- ziBttD6PP!xE){N`L=I7xis>Q=Lsv;fCWaO!GAPGvjlko&N|8&&y!avEQN%VvwTLBX z!^I$W1@d=-dPdN}uklC3!C@FPNl;k&wh0mNRIw!Ra%wQ~8#{RDkTfooQoS#mp@Ry% zvCyP36c8k)lJo1dglx`8eP(rWKm?u9M3oH5@vS#Qq!|P_43RFH zXc>^^mviIioNs$4g_@DBRMw&rsu;cKfx(Wnp+XLUleEmXmhjgcb;M^RQk zoP*1{)V*9SJw&I>U~OA5_&dp}Q;n>Odi1ewR6i1rj!8VB{bicZ3gGZ+YcnVZNs8hR zBsvZnJmovzpix`9m3^4@iBvZnDut4H4zaV|>VNy>vN_b-WvKo{1r%0XY`TO=IK$xRndyFGg$wgGVDv4rP0yDN?<_siTp|L01?Kr${&+gsk%2^`p zbT@3;Fci>-SP+5drf4e2+uH!Hh4PmvTo^;AVDt8UXC0t=8-IGu9(HQl%kO}=vv+zR z<5yM*DU|v=(TA;5#j8*tE9s3<4oB5;X00g97z2p~2cMvld{2!r@k&w!aNCF^9!^=h ziA5_qfQhH=iY`Jjh}@kkXq30iJ4$eSc@rzn zn^YoP@gUn|pf>qih%O|1c8ndRTC0V$+>9N=qK?c*W)g+Oof#$b4tn)8wKA7CBO;JBSWKW)H_V@3S&NR$`#ph5$ z{@>*$pj*_+>C>l+s|i(T4)RNxy=D~Q)=z)81ulWlZtT(+e}dM{4Ku_cii_;2T|-AkSNsNhye3Co&J?4EU_$D!KvHkpbtx)?!A6s9$YF4UbA^96)< znKQ3zP6a5m&hORh(0EG*sZVSjO1m^E)JWH^K}^^jO*-i+iLh60p3_MoJkr$o7hVHF z=TK!$xtE^0ZFi>X4q-(W_a0sns8ZEHYLAyWg#F+!5vOPKVr%Rm2|2s;6G!wuzD94@ zu!6-tFB)}=l2hFup8mcZuz1J(*(6<=W-_cz2LElV9Dxa;`RVwJz<&vsSNPn0)-=H= z^c#pptZN5cuBT4gX*$}toP1CJJa$tpch#CdVN?{az-{2-(^8cJ7PFkVmu7ih26ki= zQf9`*Ey}jMP67=bo8`f01f!6iuH(>k60Lw&>pNMm+k3^+xxu(!;59P_) zoeP5c&Yp?5NjT~|y?R9iAMt3ezI6HWV;+_V#FVJ4+os#HELDM8biVC$biyBY%5Z^C z1e2bWe}Znpcv9@zTgUel^3TFbj_(Ou^Ry|e?%sm5E)>#wH$iC?8G$RQCSt^=VDW4Y zXyS+%AL0=+VCF~7;zlNmV@vre{QnybCsmz=wTf+A2O(%1mWxSRc=V|{A#WcmE5Z0~ zQmAL%q?~v5wrt>=ST**Q4l~lZk#@tf@+7?hFT2+0_HA9NX`xQtqmo9qk$5x7_EK@& z$R6U4dH6(u#Yq+U>LQ?}ojpk<&ep$SkDsX1l3(f_c&KB17~-1vAVhs@t7|}_m(eOo zHtY!h8<5c`IcV5La=?RZZcW)l?-vi#i=1Y?HEVpsQ=e zxTOpFcQ9|0Cwf0!U0p}7-DkQV`ob{1s@b9eMtX-?w9V(;lCsotf0yn)lhZQ?z8JzX zm;pXpH*P#SSFx&>d#5ca*1ZNf{qVw#(aX}+7d9xE3^FXb6IEAm?!4*Q{FP&?q{B&n z`%DvV1T@F$u)-*uL^j58I}Be8;O0;>wK?Sd>IEj9z;wToW{zVk3ta({=GXf!p`KIk z21M?_)fP!u0DCvoXE^nXU^72+MvsD-_@cPeh47+JbfIS#rU(`7yc0{DdTxqd6|QZ% zTJ1gH<#h@yYx@H?qd53saZU)S|4dUeD`bXIc~k2`qy*+3!+ESk)~`aRV>}Skl=)Gw zGX5uXnFO+J4;nXp+wU-?Z)Cx=i1FWmF9B8H80a|gQtE~C%}oYM{h7xOj7edp!=mm) z+1V4%mSsIXz9(e&slx%Msfr?JOh++hST`JkiW=tX;@NP;iG}fKo`vdqyiQvboYv>R z?AI^eu+|O=#k%1Ty1tP5V43ibIIz{~j8`?UXE+foAoLK>P=@lG&zh(-oDJD2{b+oR|=ZPfp8VFB^jdhw!5LGkS|c8`@t&o z$Whrvz9)s06Ajdh$HnJI$u+<4(i(D?>TePJ34Op1n_kfs$}BYOzVmwfKPklI=g}ai z5gNDWQeU%JI9$B4g^ZKkdME%URn|=(`J2DLMPhivz16$!e*O#U7;8X{b|{W0HzZ?+ z4*4nici8o^m;8gA!YZoTsE92Mb=xun-BrDUOnVb#E?+u%2HD+AIWY7G1e3*O%M-@D zaFe`uVcUqAT-nD=f^eKi)D(nN&2G@NzvJcpDEH+#Q~^V?mr;NvIDY^7XYOcNpEehC zSY@((!M$qunB6@Zk;4oe*K;ZYgkg3)e;IjzdT;2fE*`#xm)~7ZA7AkoMuRMdxgs!> zijzuD=51sHQKIi)$IFyExv-m*_07hIUP37FK#3!_Lx#F_^GltB6|D4La!q-$-ntFq z7$ckur^5zv$MA3D6v;%$R~l(wax4Mbdn;GUT`gT_vynzI;5whjHTPnCr8e!Z?LAC10C6+>*6qWkx%5H zo^oEgE^Clk$gjWdihc&vX99R^f8Dh+z`CVaX3UN20~GYbg%*{CMp$o!kH+l?`X~{3 zC|r_C%0*|>TEvyQ5&rcw_SfjOopkou5Odhvvl6`fyv7N*VO zJE?ngl|1r!Afi@_J!rCI1~R+oBY$%7&g1B&gxydbL)vdOCZIubZ1QN|0}b~c`2eKM z>e8?-J-><%h;*%}cM9?EMZ@lmUoExxW4mIo%*1 zhTc7v{u1m6j5cU?n@@iJE9jW&8{4yok##I;f04stT*x=JFo05*>Ig&6{x)?(yq^Cd zx*0z|jl|duWu9zi$!SVYzZtVd4cMmb5>W^!xg;6dh#NF~S7T-V#Fc7Xx_8upy2!@i z>;3@`yLy0!CTyhgL?qO$Z0SJ%PkY&6-eaj+LC-4w;k2Up)c2yqk`=C`dlTKokcBcd zk#YTXdY+l|F?plC_1E&y>r)RaWRsh;>wq&WfLLS$2znCf0To4ix;|qlcAw0wj91qZ zp!empwAqrn7y27~&}@>>XH-w{z>_LDBr?7wvL-4<8Jo~&n*8G)@7U{vtx_e)2c`s3MSwb45;lyE zp9N+_LKurwY6YYJ@?T}08r}+FYpRCkbS`Ef;=gRQ~ z=(lfD!77vhRdRLEwwv30u8P09TOynQYN{H{)E`~hG1QSesfs{GaA3;b@7r(f=|jt0 z19ZoqX6MO-Y)VF(DD#fDZ{L>Z0bn?I#Yy*cT4qD7J`i!893#t933y_UJ+(-E6vK>! z7Z<>cFU7f+^*A!@8FLmqNMJ}6DOp{2OuDB=pUMM-pnem8&_Na9)}FK5q<2}R1Oq_C;>sp%A&q? zb;agZbYI_rALej?`uzM?KzWm-A+9d~zO2E6`o^8q5_Ck?aSN!MF-{??0%#Fs4g-DZ z{G)RgD(3vo!z%ojIX z(Fl@WEeJs;=3bJMj4&(3jp2AU}zz3sPt=BN9^xN&iq@yjX< z0+8r$CT<9tCqY{F{$dG(FT+oRf1=QO6o6uePhXDX?EsrMM~)u7@h-m!pb!7vUw6&; zRdH+R0Ky&oqs=+4lY%S0(lI+oD?4LIclj(syD`-w-xc_>U7I!wZXy9N6BJw&8^ry- zTY}934G@DQE7(OUQ=MAy6UnCD=3V}du zV9N_Q&Wi%%V|+uOe@>*tT$~EXz>_P6bTp!NV_T2RJ3)c*Pxvqvzmiy7`S$8;}!4UHqMgsmNV0Y#-4d@9N?!j(l1zxJ3APCgdkMJ0BFQy=sAb^VqnRWhSf@CcQ` zkH4&6zf=E5d3%kpSyZaY(xU_UaoMJcL`Ltyy}A7beabs|Z=Gxi6bpKqR|use%X8JQ z)9`!ze37}vsI3%b{*>7Z%K95!0`!6%3;Bw7ydP5nhC`cW1LYb?0sfsV90lQdfWj>T zAX+xhB82kYv{IhGszO`l*S3lRffvNy4s-_|3O|RItNr1Z7Zp?0%4d%1`TNc zH+-ux!uCJJw!=)3MMFjbCldzxjY*~=rF!3o;wKIwxBYLT@a4zLecQj~BWVGUm~0ZZ z!GB~X>y@aruNG9Y<(0;dNkbbzuI&W7U%R6zm7lQ16f^ICvL8zt6b=%@QiLH<>fT(u zHMxp12l&tXrrG`H^F{z-%oM3SuRTUh-nJ6BVzU_JNgv2G<@dtM==79lLdQis0I3IIK-ZWUrwvQ1*!(X?dj3ro!Xw6wVIwN%sH{%eNl zta3A}O3x@QhV*p$TnA0sqPu63B#r~8jaN0jeQ9AeoL{e!->q|~ZSm*!i*EDo=zwn+ zHr)SN6J1o8VeL^E2d2lJRyTw5$-sy0rg_vsB=p%K4R|_St7+mmSTQ~jGJnR2#i88r zZINazAL7B51+95Z@I$8 ze}oRBLd2*mLq6U8YAgODqqeiC^|JTAyXh9X??o84_|5bY$eaA{zI=I_wGL}cZ8z-& z#w+LCh15TJJj~-7~9U==aY>kl*u5oq;q%oSYe&DIlB%WT|}GVXT5u*uRRZLkOI(VqKpqiQ5kBrQ@>|gpMRd3KdlVuSZT!;-8*-- zr4N)}8S$ssvoHb$@NTFS*C}9483qRMupKf?{Z|5V{&|G_qI1a_CWuuh{L-}3J=cu!Ur$vgOQ~|Qc(o$x}xeFN=TGnCv?f&7P=W4{SvRN7ip^=GKl>#6` zQXVyG`>oWS8lx6r{{Qy|c7jr%Lds*>6e}K_U-zraCyjrpfxNEF!cSPp7m|XmlI)1z z^EB$0K3X#T?KiHB<^p?`ZUi)Cw&+s1l>igAL-2azC!o#0z9|0Uo5jCE#%@#-f_m?z zI6&=s=fXCLwd3PKI;Il5^qJ~c$WrUoB9q4;j^Tg%!ctWVO`so<66RK!u!YndD|I`P~+#^UB(x0!a2&A z=7R(3zaeuqA`g~UQ6=2Gu-EOkw&;ExzWwJ(?CQ_s9WL*tWc9bGRw-iVz565IqF^95 zPLRf1c9j9{&lsXFe_tb@%livG;wj}_C)3r(fG%~FlslFjmOknO?Dw<;?i0LEsS-p9 zPqDwByM)nZ6W_JDT%DoZBWo1hiWo{}Q~-&~e~@=^zd~@dVR%%09OzF%zG``i!-xO+ z0>T2x9lrkOcm#|S!5Wy7^d6C;)u~+livgbtobmtt z(R&{W-v7(z9nf68|F7@<)BOSOpSJh^^3Mgo{U_+O{2Ra2_n+zg?`rsW`{#1~>z`_m z{^v;k>z|HT|Idy6*FW`Y_5c5YuQ-?e>$f--99uminE;mvCO4lCa>ql)(@@W{aOg!@Dl_H?31taF_-TbB$)($@SD3JhjcUxW?uU;*o>1B4M$f_P91P4V{vvW75Z@o4H^+N(dA9bHzQ%J z#t$hME5}y;+uNZznu;YWn;=|H+_hBdh%lWAffZm3CMS<3B*-RX5g4w0<6hOc6jhUY z##P^E#37j4tJj7Hn}yACb8{2vja10Qzlb&Gr`vmvfph@@f3je}*p)|eJqDpLW{ubGjG`auh62!7(9HZtniH-?fq0rX>pfRZv+xA zNzxpFSZpyw(V~g~-iZU#lIG4(E6)_;fT&;mxz!_sKiBb|gn&k*A5gveEEBh@*Fr=| z$vG(lh)1U4WC)r*=n7CWJca1SYxv)C6hPoFH_-{mngW^3L;rKTb;iZrcou?X9xV$? zJO(7cqxCD8MyawT6f=S@ui|i;&~}62PNMOZf$DSh;ht)16sX<{}ND*94(Qpv9}2sqTNS^q0-Cg9snN=>!H6bcvD!{@tck1Ch8hHfs7OoBUDc;rRz-r`q; z=^3jpHv{46lIyu{V&@2+DNZmZ>?XFgE!s0dl->~3&3l6f7!DXa*7n8FWMY|=Teej# zscppB_aS?PC$Mt_`8ZA-98sT%7e!fsLMe;NLX8~7Gzy4DHd-EAG2W@lqTY*7(dXOG z5*G8o%Rymexi@!EIB~wr)tdv5$sM4}H(=JsWvOf~77-(VJ<5}tZfp~jzLb##v33#F z8SN{Ol?#A08pX72)!D^7wDrhG%$0M%No|0YEQ?MsA13vYi#OeobjpV>ecpB16Y);F zd8py~lC?fQdFU>4us1u#d3ZBp%eHSq)?ORS_yC_!6(Pv}>6SzBd_|9ca^~|~9_ei6 zN5$Djj9Fmn_>!s!k;2IY03SuBv?gv?P(qVD3@=J``Nag%iRC|}Z6dVj6CMRjCVmK` z!7XvF=s=^o`n3MSw))m-i(f1eDq~4O(4nJ>1{5-4Mu)eUIH<|khhTX<7DS3IxF_cO zISJw6_eP&&{y&K`?Kl%`G~UB-kcRi~|r|N@kP=4c5;vfqf+SdfIXg(eUS$$ylM$}-O>}g z1M>`^vG+j)2p8xSxTViu3r)&_cn0@D`Y5oAe_=dzaa0z_jP;BeGqzs(wCgfG^cp~@ z*~gbKOCv~Gjyn<`%DCDYo7J}hZ|KPkMj*QV=p$@J^)9|^?Z>|_Dg0L4V$`(dk>q$? z=90P_md&H~$E7)5c2HIMpb1%YVr_Hh+lf}pj0qBus7wi2!76&zirhZ-7K}C0_57zN z?|>LHmu#fg~dsv_-bi*0Ppo->GehT8wQk90q@z*pqw ziH@B+@6zfR4fhIFC}Tu9i-(@A6OoR%H-PXcv*|`f%(mFS-#~GGp#_&wEFhb5CeaD4 zM<|RAN);iSI$}QxvdZcQ^(6Qh6^gij)-b#Lgx{snpMr?T1`Ul2J0r$NTD4?-{Z=dr z2vhPX34{&KhDsucic9j=c$34E5-lcLF9z9Mh2fivR7^0*rYN$k3xiP1ZdjV%{;onfp=!gzWv~ zHID8U1H3I%gfhUevUrvTGou7@SWL+vs(2EW!U1h- zEb7OV6Ok0>+>_d?Z6%Z1!ema065|z~f@lbK=Y_j_-TrymN*o~e{n|Z!HbHQ(-Q;B- zx27%^jzVTd6haGZ^7qd=>d>*Q>e5rljDjhQ`1&yj_CjrNvU@RTL1jAj{X^E@wn9HHL$au!coVf^L+Ae|C;)8ClAxc&t z2^Drc7g%iZM6Dy2605&MhlMuHVN-Y#3bz{wR*xZFl=}wPIoUYfc<|O@*-@f7@@+u5 z87roy@Qp=ss`>R z2Ie+qX7f|ORcn2({)r>aEMC1T2cN^$(+B>lnL7hU+q zIg=wRYtkls0Y5SoU{MxApb>(UvJo|V+sgv9GNWnlcM$)j+3-5U+v5GhSyR2 z%BK+Vv+P$xnv&4yr8ACe=#-(EeC>q_uqd=obg#0mwrqN( zAc$=-qzUovKXc}cl#x)5q&ZLVDMOSm4I>no-jE@$_ioe~TKiViejNGzJZwtQA?7hC zj@<4FY>7yZr6pQfIa7Qqh4=y_Ux6E`)lYM0;x_0S5_FwKwjBGZw3@P1PkK<=#zi34 zautO;V9W{g(ry>?e$XB#rY87z0W3w4y<$<6aF^6)o)Sx@kvPOMQOmG$oG`+oN{GBT znUj$NHotUBm*LaE1A}7?Fu4&N`b_k=TtZ{8Kck^`gNCiO!$ zZ@ZIqo9bm}m@a2K-!=Yh$X~y;6hM~m0g`~Fb?3iX!^d|DyvGBo&y*_C$;*Zh9#Gom z(lF(r@yoe%4!2Q>R5BZ}Bp5-4vuQ)2jcX9pGTwppO@LPG%Jtw(x(N{0;^{3D8oXBH|PHoP6<&mrhx_lA9&a<-+J9^pQg*7abV<2|Rr+N+_LBjqOEQ@hCj3E{p?I ztRYtJY&@ukK&Z29lC2@x6JD=?#u);&jf|mOc@XXmlWOI zBs|oKcO%BV|!+FxZF@_{n7c4>jXG{2gr*<7ARKkAFpac|`=7{9dvqU)NZ!89+@KG&Iw;t0E z8IcB|x?&a@JZmapog@nqy}6BrjC!CrHFXcq6n}{_=lu$Z!#5i{Uu>OqTeNBfEGkC^ zbJAw~sgC6uE@^M_H@7?%kZWIn?c}qS*J{swR~SVi%bjCu$?Mb?ioA8S?$oxfGq(G7!bGWzX7vu48+Y(KWm;vyT-5rzig6-$2+C{CoR-D%{yGkR}&?H8SG_Xox6z@=zjgU~MMswWk>n6J~;Awz(^4Ifien&^J1%x0N^^C0EOc!BJ0zi7UbTz4S0{EUCGPT$u1?tiRfc}C$q zv2#DYm-qp+j+eS^=Vp847 z`>z*=+kVk|@W(+f-~Q#E@xisB4fls?Ioypu?sTYec|0hctx+32vN7GMnQdh=eZHk%U9scVa)Y$Y%v{v)OYX*j#LJ$F%`2x(3n7-< z***w8L8W_^qJG-vI=ZHC`zc2+&mYiwVsp;hiaE_jTZgkL9fjOI_XXCkhT)Pk4X9D6 zLZ^hE#KT;#mUDDM6aXzh%QpfI+%6oYrzG_rU4&KNb*Fpdw7oCIp8kuz=)v;0SOWSq z*5<1sP?{Ic^G)RLM{y?#7dE_kW4olJj|Crcb1%2)pBV2_Al+@JrM37NEc_a){Ju#) zp2pk#@hhc;h3vcEyb|%d_!)1=*QOUllx=?b!B;Y~Lcg(DE=T23v+WTCwp|4Zdu`*F z?`9$;MWk;AXm)}3!Y`+c?dPnYM5{&34H;>hB}o#viar>|^D zY&UyC-74qo4c2czS~t#SIFO}lu)|S2ayI09*4En?=^6CumHOcL)q^&&>A=SQ7A==n z&z^jpE3eNPw*Or=%x@^R_WbaZR!4Sa_7V^GX}}*_R+1a0dPl|{n4Nq=cKp0-q!EmJ zG;)n^y!CX^-EcP*?cxUlk@U4liQ7Ct-(%0!Yu9el_4QGnCi;nUT6BtfNL%B`hJP@9 zzllQ%hTNgRio3d&j_>I0!M%Hrmt0UF<|3qvO8b$xA$9E7QOp|YJ{B4+N~K&lIOrSd z6s9}>eBjMzTxr|tgdwFCEpPorou;|5zVz9fi(xv4KbyJG&yk zcEV6qO_py{&OD<`;2TKSGP(&_Xfgy7|QZUPrbczWLxlK`IeX zp(JB?y43!L%H8hxv(puQtxCyeX^URE*-Uw{F)uVU^s5HuXmY$JyfUpT+)pzVa;Kl3 zUV3b$dG}O6B4Y5PZN26{yK^!tcFCYB@>Nm~tJqUm9pbmz>+-nab28h6P9feaGt##R;Amko;`c!n={~cYb~u=C1#0{HJ!ZCO>Vz5 zcV(k~Xb_eZpUd=$v^p`dI**6K3)md@qj8armdD3|B^FJN4 z!t}Jt26Fzv0^RUI&*bdupSf|XBezn<%|xXrU;*Nv(`(-(6ghlgZ1VxLnTxgV#NC~` zx`zivme!}UKek@6w7KGZQ{4B((oG1K59%gEmX1zN`hy2gr+FTfl|)JX#g|_$>1p() zzP=pYp$*KjRK_Hi>3|OHHpLr>H8d4&f~Za@nMC6=xpA@0rw~gCi%LsNhkLFo3xANE z?-9uX-%(|k@cmELXUy&tm)FX-lca8GWyRDZU59iU;sn94Aifvq9g=C6mzKXQ%&ebg z@YhYJ7A5@kTsgtgN$9h($5bpoINI5UlwTA&;?IaeWI!TG~`~d-~lO~ z!AGx^Jbqku=;U3MLKJ^66)TM1nO~&-gAV&G+Q$h)Xe^t4#-$8?8RYG3uzW<*>tbP` zBOYGqis#*R__LW&_iFXKf|Z9;`aR9G8*aDd!u+G+*d%`6s1}~)COOg52p+rQ zjc3tglAkad0Fk;>98XZtTM^+#beR@uaMo-{^3gLZnzQo_diS0}BY&}?5RgL%o~N(e zocb7?)f;AD5LdXaYUX+H=RfG#<>ra9%T-}RFzZc=3rnC*`qzD-QRaD{;RTU!U^Aq(m;@6sJOP;tKNSibb34#w=PCD_&y9j+t;{ z7BXfax|8UOFMcDO_r{{q#l=On$v3JW6A7yqGyET}pd^e{8(Y4*rw)*m|7pIXx> z<+QLyrYzH_#|m6_Ljob$)edF3rT{a7)BD!vgwsWxav$kzZyy#j;A-zZU01~oXc&Gq z)AYWw>h+q((@x?*DS{PM&7U7?in(jw=FU+@DZQ^2RXhblIPP?{lTxI+3{sc;bf6{v zTzTIuPH~jmVq|0~*+h@SWP7^&=SNMo6{ksOE*J~3mS*4NaI-^vAW+Ghi=O`U)BlN*QO3y`B@A^9 z+UiRiNb&COeK2oh=o$^1R2rOPTt%g&zqYBkvY#H&alcW@k;{83o)V5XPw20+_W6&} z=SI(`@Cd};BI8UL`6<^^9m7Y}j{8UT^T&kFm@hzy&RZO7hrgvwc1t$&H+{9kZqU~^ zW2{nR_XGwms4vX6`F<9Cs(}b7ZX9N(Ul`Ns+qX_ND%(#rIH*Ca{y4=~($f0dXFw#p zJZlpoy?=QrW4^ep6SdR5`V1d#$1;nrvOJyanLqJ}d&Tt1TbK~xVlT2`9E0`=#p{|mdiW|%jSQCC&#FX2o_~fFzE3rP{O*y3@=Dn9k0yoao)kf8u z9@+UDptTjUiQ#hRNXDt=oot@Hs>m_V3~Pyu`PDC87;R-ySeI9;dg?{*^Qg!?(}WH< z#SL2&?#q(;!t#gIBI+|0HbRf|d$u~Ib2;kQ=!>zf#m6EKY-5n;-3J#p9Ez=|LG&Hz z7F?2P>RVsq61TD}+;~;|94FxW$ruadTUnSo)~*Je4m}r{f3m`Le{_<^l4F;j`rLn_ z-8;Qa_Tr*U(R03@S)62C}Joz1u%|@2#X!GzDLz8XN)c=0v+moQ4b#-cHXzS z>G7);0;l(TXQffWbxZ`XZ{p*R)&2IvtG}p_%=;NxELasx6EX8Rt0NuC&m25xh<(jp zUApNR7-YmQd8JpbJGfs#BCcu0rw~eh)v*2Wt+}s;t0L&l49LkC9?wmB*3i(6_p2lquF!FPmS{&n_%K!{o=z0b{w;H(AN&&RjYOtY(|~4 z^5kVSVhi)G@hE%V1$kZ?J10HDA9QNi)8^}=HEoyd?(2Qdl!5nb$Pu+pO#h?>Kp0W?nnQxS-9)KWAo|TkX1h^_rE9FSPAnXo*- zwE9vT?X9tkIN(D2ZeHo`o)9ysk|L(ROccZu)=ul5s~`#rBfOz8J3ITJ%8a8X3tDY! z-RkOf+oifXjXnc1ocAn5_4#M{#BbS-x6s+iX{Wt2k~mCvdMlC4ALmBTFCSrCm^7%s zy)khJI+^yxIOp4OYZGqgw+X-4<;>Y-Rn05wV3X@__qUqrgigL)CszofokKmMsOE9_ zl=3~!d-<UQtj*S+y&c=HN7a}vR^2?@6-Goc@!hIud+Yh@8C8?38M>cG(OS-JaQ zD0u40f+^lOlUQ}W1ZuiS>&}Eij2nX8kZB%*9JzJIo}LLQcbBbeiUi9bC3cW`lpzQE zZX&0+vS|2H#A^;0j58eACWP^%nWs4`ZU zggVa}mV%U^7L$Pkj3~+8`fZ~gxrk~VphPzPh`9f&hLN?oR>1Fr3M2#=!JRVP=D$3k9Me}JPR%>zE&G2%J~gGl<|kEKs*bA?kpn#0+{tL;Ln@1 zyGVrF@`awvqj_$%t(w(&i6nkmuX!>H^x#nIrLyiSc!=%JUjI*X=O0(|y~pu$@9dgw zGSVL9M>aa7n8{UsMD31LvXYLpagxX=$BeL)@?(AtJ+$IlB;0-|y$g`~7;o-mmw>&Eq4J=PqO0vU)&m zU>_sz0Hra`BR9sd44!)CWw8Ww)F>&a7d?x_dYc@Alf4Gie*co|WLEi-z!!J6)8{UG zxKG(XeP*f*kJ{Cx&p*B{eLK*?;!*cFt-8Tohp~4vGt#PugqHWh3LXD!^54SzqM&uR&P5l8iZ`RK!hpLS8bD}SM8sw_Iz+bP5OPe1bQ13c^`Uv0wVqCdwl)zv1DemHi@bu1xOKZENS7ci69zq@{FgIhTAjEF;Xcg?F z!+7u(S_2)ZQy0tJrUByOew1~ey7-(35lk*GyjG*!h>Hxplv(m=hjSGK9QYlrqV%!FC#31@l|twE)zLi}m*rWsHU z^(T-&-guE*Hv&8mgyo_aBJWEE@UXPA^A?*Zl*i%>*41>iy}QOEWW)-S5BFM1 z3DIM_wnn>}^3x*UNMvwWuoCm_8gRauk=^y*M}#(iRanbZ`B(}klf1=Zh(w6IYok!` zdVJgV^b7ek(73-g^u1o zzVvDnpTbAlOM*v6dSvW^F%>~)SM1{i4-YI1+-co1b5lg$bR{c{IZ2^U&ORYoAeR1W-G{j~ zCoeBAHza27UMx@ninkKYi;W9iU91qr27XQb*(pLyF$qOr$?jo^z{P!;RW|rp-6J#mRjE6E zIlDGoomydtX%QiITtY%EnMg5^7`;Q4PB~eI1k%Y#mL5?>cMSj$rKy(EXh=XXpCjo0 zvGX!q2-!?Z%t@ePFJHd=d1_A+6D`zM-;#wwtM8|v0RxjPM3hjBaYQ^dB$YDo9#cgw zGWqzQ%8&VOR);jHDi@kdShj7iB@3r})0~C+-0l9u<7I2Qv^&8m6v_;=z7Ukl%a$@M zxww%Yvvck~%T<5U+JM13Y^_0RI!x(~0d}`Og)#V1^Mm~&sq`aU=>H^|d0U^=)@qe{ zVC-@SpjZQ&JyY~zG7%&$(PWc6h^eBK&z7p!6#BTVy7fgu$&ANJ>J4TDp^%G;h?w`= z?9k9q8L^`I*%j>OjvbgdODvc_zY2?E4Bp%lCvfs%b zP7=Y=Y)+FlZvFU)=NZo_Nju9Pf$U1ZSzYtCLA?j3c_|ccUUZuDfycX7VnD{(8@)39 zj?H@ft6iHHx`dNosL7ZFw7}}|2fqWzkfkUv|Ki1OuP4ao!rDR%%J4|pUH)s7r}_#P z(&yWSbQ30lypa*_#S&J66x^+TP1MLuEBsSV7&V79ve#)5Y8#mv>Vj^MsUtl)jCCul zkP?fWvv2gqDcg;wYb$A*HHiguCJldq<^!p?X84JZItl7JfHc$YrnY7E4uPKHbuTW| zRJLMt@f8 z=3rv}La1a;IXyvtB4Z<{>fmOVrLRdwFTSH}VV2Ml!p7w1yCb=lXjM+ShH38M}9{P~=x(Yd^b&TzU?UX}G=K{P_pCKQU!vDkbTsS82 zvyl2oj4b#JTmKGxd3ta$I-=BBcR8V%G(k06t|)VZL>&zL9QqCk6;;C z%Oas>Q0)fNZ+&{+?wea%h8cO5qQ?_*o7k5IJd5O_k|ZofwIu>zG(+NczPvKQ+?(&> zKa1;;3o_=b>%m1=4^9 z&_iM+DzV|ol0jy*s$Qu|6~iRa zn6WybU)aesIfywL`^Pk~k7s?mSzCc-8o)cMwJ9}pD1fQ}@SHVkd&!~6Z%lY}VURQg zIM?Odn&fe*C-iyaDYp($uEH@wz54E}R75yi+;X#{q`gOb^bl z%n1mVb*@2Il=E2}sEmYCERQ0zOK30482nN}iWK7k{5eO6&YIDqe+SQJx7%90fq@e_ zg4F*0(MttRtXkLAv|&Yi@iAv*{(X7z3rbY?d9o#~P`SCi`@^_)pJYYi zV1s^Rq{fUQ+bJY_QgWN{p%8j*uhI&__&nqP$y_u2fxJ41CHv7xf?ijenK?#?nupE# z=DD8ou8FVHA!0`*8*>i_JuSHJm%2{f`_buEd6l4%rrW#l$M!1sM_E0H#;U$|)V)L4 ziI>rSlBC>+RKZLz7W3?|Br~NlXUu!exApsKY)z+jI%zB8&*Z$)CQ#1dY0*rpq5Im& zNKI!6qW$PA=+toPof^U9P=1y3)7>rdwyQs8?~`YoV0BK_pQ$u>ag+Cb&;c5H7>dvX z^uf}Jad}u@mnEHyNkk*eSO6|yz^lIo3dO=sR=UJdnm<&6m0sGgOWrNK`gh%>w$(|w94f7K2>P^S?vpQ|sp@@s^gLGxO z)SwL|1%Ng%)zyK9B?Qpc6N(^0SKtFdu76&k81q#JTbt;ICPvdQlwd-CU&4k+ubte7 zzTp$@wci>0$(X~J}I(4n+2leL|dtBNlkgx1Ay9v)DqOi?_^n<|Yr3*Pq zVrs+_SJdPdT@TA0yVH#|^$LaZ!&##xaAxs|F_62iP8WJ9Mcx-gUF5g z38C?MX^58~Cy}b`i2286iT%KfQ6VqNHu)~5qcrZ$L}F$YP>Rl**xdX=@m={F{nuyf z|MUD`JkR%8TOU4IArs-hzRWxCP3LdIEdH=&?tlJln8hVMk2T9D{J$G)x$X=YoGLu z6>Z7)Rs2<0jU^Ka1Xo(JN5#ZwGU`bjc$HYoq}81&w>5rOcd*3y9YaQuFNJYJLP({K zMRU;wNEVp}pwMECEmCmP?NU#8rb!rCwtMk2I>`(=^0U7X$M|}JW63^eYpm2AzTvHo zIj%UR95O_@rHElUTg}XSErN|9@MQMLcK7F3Ucoptky(E8s^OO9%a=C+Y~6OLlDba9 zkEJzzKyE$RpS~T3EPI&lqZ!bY78ubh9m%W^@D` zQV3@7Je3n?(>~g;TEJ8Y3ya|I%%)-H1W}c)^?$SfI1d7el!j)Kv=WL5mRf8g8}r8C z_^j6d{Q)_D5AUmc%)5BLyF8|hyjS^`dzG7__lHZWhKhDXV-RVU;Bin79_Yet3NM_+$^7+X#B#kZ~+b3#Fsj&U~#jIy+i z%0*;GNG9CBEb7s%+u3Skk+4S(zfl<6+jpX7t~|Dl9ANXlJla3KX5~aGu~NYd@%V3x zKdL{IB)bs>g{@QD9glRMxc#5m!*k1vd%paP{{OMVItN$hZ;Db4M#d%p literal 0 HcmV?d00001 diff --git a/docs/source/assets/kernel/k_vecs.png b/docs/source/assets/kernel/k_vecs.png new file mode 100644 index 0000000000000000000000000000000000000000..4b7be1385aa2e012b3733835394175af97f073fd GIT binary patch literal 27676 zcmeFYbySpX*9Qs+NGPC$(%q$`G}7H&BMlNmH$y3cBHi5`(hbtxC5<#gN$0@1QJ?qy zzVG?_tabi4v)0Vqb=BVc+VR`_4pvf-e2PYZ1_uZCR9Z?*1r83;1UNoNK?Z)qW0X_j z;Lyq}MMagQMMcS!9KdFl)~0Z9Qo%8CkJVJQv3(CcS(~xQ$j4EerNQvoA4U=Aq#hHJ zqhiUR_@x_Z;%R=%r10HolL3ep!qO3I2<)5#0_n64P@&4-m{`O>e z$P3KoYIY!Sk;;w<#}L38liSe`XCmcb@)KUL_(z=^9t7^YFACjvj2|aRvnmQ&S}4M% zD);(Ju8%C_ma7Q9oE_fZYbrncT!929Y)D6oR*8g9M=IIF=%*I@2(ALNnvi8MBJ@Z+ z8lSC7IGQxn{umMStNrR5<5z2f5kK{!r{KwyF1@tb;BY7>hx>7mfvE%6RKVFk(Frh zV*7;3OGjVv-A;{tOrS9-7QgtN^lI^_G#+{ntcOROAexv&c2`G3KewotVWTh2|qvUNwcw-M_yGFG#yvbeH0MyRP7v*4Gu|I*Rww@mL694 zf|7dtlVOj|y)F|HLG|67p{&(tAAfeaL~~Gea7a4%iCS@rkb0|Bd&1`g;-Y+cy5qB? z<6xCbSw}CN`s^J;G#_OT-}clL!i@N&0FvH z!q|iFea1|{m*ty8ug`={o45Y$%{DW3x!ZHUGpE?*l4eRcV^^0|sV39VW$_?)_x{+O zKoZl_1G4!%>)=Ol9E*>H1NcqZMMi{d;NiX!B06Xve6ZQ$g=ZpzCwK?bO&v}PS2PgiH{Rc+BFwzfn4F8T5Itv8aHf=k2 zJ@}b6n_qCJzK~xCPy8<4;rNsBi+$V6^2E<}fj_GvX8kq$pn*ujFn z3Yo$Ga|T=f^TI%gpMtP?hOE*HCrs!^V zb|lG|;19At87{FFP|`k@eXspVa*4zr0{0>0)idu-G($=~Dx6@+NG$vcB6=~IaE5FQ z%eTDYNm7`{&#!{o47Xq6b)&6?qIQZIBE~ZfIt+Tq;ct@VKAQ-w58ml|V%TC-pk-4` zJ^gf`quVH|9K9Ur$MZdi^`mqCbFLcF8#rL&|Fx*6dbPpM zyUviGq|C3j({n}i$jb|(1+Ep_2gL{3=h1T5y)YT}Datvbj|l%;5iNN-@<}WOY+J0< zXA;5vV#b+-ucPUxSZHi14k&A|%AVZ>XHnrs()R`6NXt-{P{c$+rM9ForNMnB74Wk$ z{7FnSWmIvIVd1Qio_(bBg*fPFacprEdDhuP=~XHR%1bH*YS@L`oT-VTi&7=Isab-# z5T#5;ZN{VM4>F9={V@bFwsg&z&n&w+Fl+>w!!$(rBpM|=3*^+els6epV&WL%7}Jzm zviOTdG|N>{)%%pR%T%%)q)ml!%id=~^BB~IG#86J6sp5QN1yKKHTbp!wuC`amqxz7 zmc!?lV8{xxk00xMsiLaxsP;ueH(R6BMW*Ca@GJRveluQ2>=U}mq$|tBVUA&ru`1^Y zXzpS>S7vF>K?#crt6G_6YssB-K+ey+h}?4VEa|lf8i@%J$C^!%A%Vm#PSE?;RSVUK zIv;f;s}HO?=J4iz&dF9?ya!E@Opi^Wjz&QDCVBEsRN@MDOOgs3U1Fbu#I&1=M0!1Y} zB-p>k5=;>+a?=o0epL-|ed+o#EIvN5GrpM9k`4crI`>AxX2Nu$AV*fBdxB|#dcq=G zPeSF;#Gq>usMeP`nMsAIpSe`KJ$5+OXkal`f1-RsKTkGSQ@+($ynIz_LuX3oOozQh zv{+M@Tf4R5bwzOPtkHzw$<%Zo*P=_>{v$s1MzY2!7iJgm9`hd3`NnxIQ9i~U1|v}- zu>qeB?`NWmH%rc$d_zt;yOW!zU9(-=(Ux>7rV zo$IloS>~So{CUuU#)0Jl=fNX(IrAX1y`-@%!K>Tx}XL@+IRGk!Jc;jr?srVd@yMv!9 zL(HYkI6wL;8Ej9FN*d-GY8$o=cMoAC=?mUi*6Cl4{|ZI%==2Xt47!LAj7Xv&6-yBB z%bLry9COSn%91qlGX8ArVPs8)_DmsgME*tnH2 ztjlpY?lp9^bE3$c&b-35ckSoVz zOV6=8v%fHPk!H?88c$`aE#B7_t#~(kXHDm6;Npn!$R@P3e`sB(*V9qJallcCDa63d zI-;MVFSF{(o?&RkUEO|7{B?0wkYVi6Is=9Y?YL6hNpPG{=h2Z z+b@R*8)fOStuz0#IPM=dbc?@ijK{l&<%g5bt-Ebvtq+z~t2f@2%h(&(FHa}v64sX2 z;M#iDdbgd+QioFG@=5Wn^LfaK$|SiG>@PH(IBLtx?mMI%U+d2`PVW=UGB0==hh3yL zprw4y{H!Px?9-r&uQaP=s-j-;qL$nBaKd}+h8JRXuh$H6^clNjeN6S(10#^sy3D1Z zv~;X=RtRwvXLe$c^($7?RwvS@f^#k*-o*!XM@RZk zPXpT}^6l-{lvHI@6&>{)XM)qQZI}<3pSJ4Ud#q&N^&P9e&*Ms=Nf{P`?aM7C{M4}A z9jNRxznR0U5_t2%^FZTf?|k^AZHv|5SF2rVz=Zu~W=~gC+yYBV-C)atQ`eQuntEQ* z(^ekX^ytpQtlQ9r&WZEPF0(#+gTnH!2K)2e!y)uqx7yN$ROiMbzQUV>lzLZ>1GGbo z1=-e%Ysdj?>@g#$5h&AZSaI|@e^lZQRnrgK#K=AGI7om##%Z0T2 z=l~V zgqG0Awtb@_;oKbI;NBC#o$kVgZ_1rct|7>KJr2RXROBCX4g10l=cx-Hq=zbuQs|mF zhe*r;hU=colmf%?y`k$ix#wFd3JYerE*bTBwojCPp(*b1VT*2lwdrbp$xLPnK|q zf1gnR{vST!!1tlepZ`aZfpCw3v**CqEgj*Xrx8ukAN_L-cMjZxd#frcEe-su8atSp z+B#Z*oz4f+{DBLoc2e2^1@NdJzVOm2RKI}!XDrpUoV4WSc#Xj}EQTgvBU2VP8@q>o z;P~BmfkPWpCqptf8*5ufUN-@X-%s!Y#}AiTDad|5;$$U2p(U?GCJJ^iCF5dw#qx?m z5RHtCjNie;j8{cW;%{@{On}0|$;pnFmDSbNmBp2v1?*tX`kIG_hxHX3D;pa#@C37? zyRDO<8?&t=<)2Re=|{}e(b&P#&dCyNOZL#Op%K{GNq~alVW9tf{>;QVE|JR-Wj`%-XYW=t6 z>(^`?|Fh};xb=UVsymuGh=Of^F`Wee%dfwU|L4uW4f$Cgmi|9T@rTa8uL6V?MB``u z57PwEXk)qQ06r2~iYclA|A3VJ{&)hv57!_5fiDAlx0hTr931_$AdU(vGoUIXNO2(1e9q+%e*SDsEXvmx5jP7S zo*bb~2@T~l8bUvL?Lv^R5#oBFw2$ZzZzW%4Ptzig%e^X6qvP4pF673nb!I}fZS;%L zTR3<`EaCtCgCduX0H5%^xDVyu_g*2P^o~CJzb%B}{Ct5Mh))z;P{@4Y5dQZMJpT3n zy#zdg^$iX_!MCjso{(p}dpM_T@3seh@ zx;~35VHf_HtFR>sRyh4vm)zLj9C(fOqrtgIr`e=e6wF$R5{E$5P`|~m)Tgv05Qib(PjJ}7^c*Y5Jjt~vDn~zf^&H_;!|qi z_7iCYwii_My)EFedX~i3$xvcGesl)EI`$F%3CGrILE!g}^3zBPg+V>zl~QqP}R{hQ>hbM+7dincZ@I z2gkA`)wQ(~Jnmr^6S-?OjsU@AY(0(t?rXnofH%Pkx=PO<#_@ZJ+KX5S5ZocU4w&;M z5qaoW?a~keP8n(89xKBSA#rm2HHY)Hic|T@ls1O!NG1|tBqaweHwRUgZu?Ep!lFrk zB=m~ZtZqi)8p^-ize7anT_1^z{!K3#DPbCm5{;6?tnL>euk!2lC`F=q2knRm%#*DV z&4E~E)hHTyhh!Py?zU$GxCw)+8$c zB8wioCD6Uf5e-I-;#ZG6Nxgo(?YC!x|F)ph^PA1s zEV{;oSbSqeG}$9K@;P+*qSN3z7)`IL25?-J;chbtT)11-x>ODu%@Exzk?(t(a2&GY zVDcU9@3veQ2Q-1fOKHT1Eu20HEHYZ>%T{emJ+cWPora;w6>+OI^R}w2t+y54s;kZX z&l_##Yl5A3zlNmO-yRQ?s2e={qr=F=00|;rjpF<*LHtO7*cX`Kh#XUX-iW1kKUQhT zaoFhJaY?!v{~FF?x8keQ;8YN)tGRxj{~4FDFjqb)5mZ6QVUb=Ty<7sPLfi35#YOu1 zc%;SadM8gt0>q?UA*NoS(lw|nak_r}fxb^Mm4FI+7;<~m^L}@_1X^4=~j^g=IOC0P#ptiLy5UD6nn=*_4Xo0g1` z^`Di)`K=(Gc449aC8-J4Xw)SE?B;^oY4mL--p2Jzxv``%?D{N~2TIGGlU)Ypetl|O z{gr+A4W(&XGrxX=(`%>nZg!g+4A>;eT502fOj0h1X~Y}T&B25~j61i@`%Y}dmy)4G z(nN=65|3j-K9P9P*|cb+oZ9eqdmc7##YFGA(08o&MSV>b@Fd`s+SE2+#B`7GXa-bM zAckChZh~bNhF5lV1b_BEGA4=jwr}`Yo&8$0!;WsOUZwpmCNZy*oxpU9J#?~w-o46d zW*CBU_+PeOS-t%4+Tz*G75d|JZ(dyME${ZJ3phI6Af#GdSDKHU%Pr3x0rWO6jv~1- zvYxLQ3THi@jsDrsSjN$EcWuRp-cy8FO{0*!&6qz`ph~+|IU?woy;V)|Hru)%>LRDD zt*!Bvnh}i3veTn|GdyB|T;@A?d9GD;~xx!=s3x#`KR$d1=LBw z`Tl%Tt%xZvR}#!1>RRx)ml6u7?I_A%^E7V_*l}E|leMgYebSaxXap@6wNQn?mEQDU zVy6EBEHxTf4&(PC#L37UbM@@IaNUc7TT&!;o-vf}Y%r2#Nyg;++}|vhy7zY9-(9qt z3AdfVrSUD$B<)Y+g+-EyxWDi&3LURiaHpYktm|!J-q1HWh~cuCbEBbb3yo#gEu1RS z^dyPe#VGTYD>G=G1XxYf*L>7X2n{5(wX=5AjaZoi3-eQU&&^$K`A7qDefDp7OQR(aNy;P7By>Ti@;QVTTiV~ha}YU1?O z3;%vB43a%8cw_A-vXGbR(Vr^6+AMd+7&YC%e2qtOb~>S$qzH{@kP;SdDK@Xv^VrEL z)UCCZVTueu!lsc^T5NPxq+&PfL~%ZBzMz?oTJEK6-k=xRN_GsrJ|19=Nd@)DO%yS) zHlIc|z|QBRqNIxlwv&5EJekT_z}Zbgw?8q}E6s+Yt0E?0UszjS1*ou_4cZYgGTLT& z?LF>Wuj}}PSqqkFnmOr+56g@V_RPT`ghUDp^*q%4E%&D&Bux=-%Kb#-qI1ripvzy z4#hLbuueeE7RPUUgzgN9gdqG8A?xH(t5D%j8Q@)q!M4mayY&P!Kq-CozB#8JFK>+q zc6Sc%JJU-b;-2tgyOx&huCknX0&}8XTWJe8&sJT!-(dBT?vvasKGJb|MYZ8JAl+XK zMwQ`Fe_Z;+l5h7-ttN4}G>;-zZ|6W?k1T$Lwi6k{h8}Tv-==|3@)ev}48NV+qqvkkDnU0O1tED%eg!u9s_}Ggztg_`- zJG#KdKu`!^^~Su^sdrjIr;5D2at)AOV-{{)bi|@P( zt4?&&Z*nUiSL~1}Zfut3t_Xa_W@}{q$2&}S1N?9t@VpB9Z%IW!LnIC#3+vaI$Wv6* z%@Vr58iCff!b;X&3S5gmefjEp`H&U0zOV!WSx4Yg)fh(2bKXVg#)au@(zhuUiZs}z zN*N;kF11hD?&Xd%)9$jD+_zHJheDkNt|fwl2YD)uy3l%Bn_+rR%l!PdUw zc!JVraK3V3GnJgonEvSVAG!G7UZOi53NIPuNpTZEI7_S74(n1mq-}d|Xh`7` zuvL{L^SiTc!G-RhhJdNPYsW;`R>dXIq{FEunwSj|+#WX85HOjoH}adO5i5!{Z4BT)oNSeaP2%rA=~P$G)z7T|LcwY?$w z|)QM#?BaUs2t6QKTs>AwdsYebH){eX|R? z*MXgt`8Zn-RIR!+#sc>?_amOEoQo@-7M-s5pUcx90$%ic!NmENtm0z1p(O5+mf(wj zY(gZk((si&yoZ&NEdjc$!b+&NhsE60Y%?qJ5gXfQF^MQQX12s! z6_6YU2)zWkv2ot7p5d+Nb^Z{-Z`%mCzM&i@U5sRu@X<9k^Sq&PJ#GX191nh_b!{2{ z$sjvav-LX0I|@2qTSmb7KH8Y@KmK|sFcG_jKHT3E+5A0`?`g0-b7UQ%gFPOrv;##FKbByJaImR02v-dwhDh3|iuirIwA2S0r?> zX6oq&yy8jc8U46Oe||%0iEnPqfrjy6-&)S5b%{R#6kI1mU6R7;Y3a>sUPMqlH(qb% z?<+Ly2&Bi}?-wld4cT){`k^UtTdd!x0eEua81_uh-6C`gB46O+uXFjxAKMrI<7~qr zT#I}_`XjH14mfW-PV+N-8%s=1SVZG}HSVOe6&7<`Eri9-~ zxZ6Ht{xhUPso(vYUe@+ggYA61qj~=oFWH)hbq%3rsa`^4t~xF8_gA5$LaDFmy}yKI z&`xlkszyD5;m{lRhLd-14kZz94aL9i;3O4j=(JC+rnrLk*b~YpaXAYp&UIPs%vR>@ z@&N7}`}x7*u5%*!J~SpRe%n`W&XJk>rU=6&=jCv!zWU3F60jW=^(?U<|f+N6!!v0i6=9^7d4d z)#f|4G7|+n_og$Rtz=`{89yXWyA|OxK%i*%z357GYc(8_Mr&KnN;=&G9%E{-`HM-& z_flj*7W!!^eIj2-OWsvmPDlvc{_5D=f8E!kwfmq7F$GS;tD$Z&Ri0;1()mXkN&SYU*k!g-T|EP=u`*k`WU58A#TW&(cdyOt zH^TrRit?LhsrieK2%`XEEh0=S``Z$%V%ar*?Nl7%b=k8(wl8hHyDnCUyON}lPc$L9 znN(FwILR_`T>bU4V+wY$3?{iAmt`#=5%4f^)af>6TP$@vUYBO?UBQ#Ii~tMtN9Y1maC|%D~DDKd`^d)q=|a9cF48;}+x48SG! zG{8E=>=w}0{nz}vUS%^Z`VCa$@)r`DQe%`($B|_-3vWD3qz%RuON%fpveo1tXFMUE zWIMHyaUXHq9?hV?>O<~3X?sjM$SXwL#D2O>GE?;<=r0)c6D{AalGgQ)Iay`>Wg`3-wtY_Grbt!CMR(jw#Q)>um?(^@`x`HIX& zNq9uw2vA3YZ6&F!-QlQ)1K4umjsyMuPXuUXGx=wm#vIpQdMsg&c<7wTLt+|suNci% zIVYB$Z)}n|4oW5Un$)nB+vdo`3zb=9O z)ee3n^KZzGnnqYSx04_lO7Va13IG3%{^CaeKOz1W7{K=%UVrS%bGqqeR|vGr zxo@Y1I9gv268Z4-R|j3$tJ_J&e&=J=v^P~z(XW4Xp;+Q`ScGRV>X!NdfbT&4DdafD z(Lq1k8Ul^85_D*~GaTY!6>>f{rZqjOyevPNAia&2A3idU(4!UFIEyMvj@lKu7l6&G z6~nk^?!F)WwkBhEzZ|{4I|q9vIp}*|6ZDQa7dBdKmYjkv4m%SzRKn8lk(Yf`b;)a) z8EQLDT&tE@z0N@-5J9@z+wUhI&z%}?${D9wt4=`>`|KNzDjx;Yo28L0?FP1Qm=Eq& zx)Wj@%g+WXeelS+PA5b!e1`fIbPZ0v0XI~bSF#?$CjjoC2~o*dgR@nZW|;wgSfvkL zUGv4s+n==g!or(Qn#aa3T!c3P1KSONYa#Tixi24FB>;|7KY&QyJKyJ!3uQs@D6aAZ z76nJp#(Wk9WoI`R^UB)-1%*k2JV;j>Ivc|r^c{4*E0SVB8$R60-VdER5`4T%DhSz_ zJv<~-OC*{=GU0VSux8S$du3mf#%gqqY=4t3MRjo0t*GSZJ7_5<3YX9lL7eq?=`Sp%~D z{D^a(<&1T!xxN1W^q1uM&hZiR&+IXK)7z~dcUot6Zul4uqpks?r&et}t8&4U<7C(E z1^8*1;~-!4uLPWf&bzGu6wkP5Sv-{tf~D@7Kc^6Skg+zF8r}_+G}SzX0>GJnz#0=^ zrLKodGI#aNryy?J3!C~XzZn0tG;AL3{HDx!MHbkM!{+ZWBK-}(n!Xm|fNNqFYBYv2 zy3MEoXH;0+T~hP+9ZHy&X79!pmHvJ8Dgp0gq*ZyPU`slDf(Rb}E`b9X0tv!%EFv}& zDWfuyw;3jc=m_>$>G$0kF?5uFK+7ER@6OVeRgUG;<0bbldp};k9?cH7e~Q4?HCL3c zsRJKeRbx3pv++_Odvpf$M&-HDBLpOz^m{z4xa@y$LR$DzS%=ltl^|5Nv*p*u-T8vv z*YP>jWZ2~rS#P??!--I1bozdAq4s|x!*Yhd22bh+9shCRlz0*J;-VFFUl_|TUW32j zF-ObwP{=YpV!Ni7dB69gyz7U-WI+(3Npy&q_&$0q%PglGsVUGywC<;hzdh(Y^q}GS zLf2;Tx=g)C3)zIrb|D`Kp}XWT728Bv4~~LFM!*KCYeh{(GUB#mkH>|j50JRSNPaDI686{ zYEgrmBZCXfrTl5{D{H}OzuD*V@U;omJFR2Zj509{;R`;fz1ali55ZPYlCXntATa;x zcRR7%R}ErR+pRhCq^lZ^wRePHd_(488XuY576@XGC3MrxM^fM4ULJ*tjQbE16^m(+ zZG(uO<)&KzT1r?M#ZF%zXGp6eW~Cw9B&mgtfyC*)B)!#%;3^!wVdaXB-IDAbYHyJL z)ybA_(WX@@goDVk!e5vi@$#1W1B2y*JMk0sS3OCp08bpq_f8igs)C|OwtXinOw&|D zvxS2CN`yw_>sw_9yztiSexVzRRynQTaKkD+*69m>HwI;BKAP4HH4uHMj0oNoYs=k@ z;!jOy>h&RL+zUW%>j-$AEZS~Z;S<*J-ZfJp)%~LHGs_$QiJup!j}0%;hvU7fu;#rJWY7L>T`M`XMX~h*JhBP5=M^ z7+G|Umpp<`7`O3!PvQ^vLXbQ?iu!MopbpJWf3?58J02BCTxka=>wSWQ$AA2o5d}xX z7d@^0I+FLFYyvB=GtV`M@bw>3fHs6OvDyH>0`ORv42+<6x&Afs!{3q9lOHw#cK8G( zIe@u59}*__LcI@ z`_%{6Y5T|f#zK(;Xz@M2FW>h8I|z~;M|u$i;Amp&35!5{qVBvq&FRH3Tm-gc zbZyt0RyxFDQho$B@v&rTO;n&qMg`&S(r2v~Hy4MRB|6nAyEA1MOt;XfBKN|KJd8i7 z-1g@y4OC3-$3vflM`3Zka)LTvIgD7}!B(Dy2Yai=njjV5S(Wj_<2Op&E?q#UWWN$} zv)HtmBFb;{$4roPDEwiaKOWG_Z^8K6i*8tz5#yg;Y_MaU+Dh)MB+1B(XM&b*fY_Q2 zAAYU6%4S|;D1p7lihKSKH&NsP_K$A&y}?9ydjSGMU<;1OpjO|r07+q*I~(oKoZqg? znh$*F(o`7;1c*{`c3``%kQ<`O>TH5(`vidUCQ_mS8Lcdq#yvJ5aZ}=^(Jfg3 zOk60&H$r@edT#>Eh==N zom#VTIa3P;tpWM)kF3Io4le;mQrnd{JXqM0PNW9G^L+1hgVWCHcCCic!C2O(j5LKP z8zpv~a!a!tgOoh52mInuqAwam9~*@LRxHmeci?c}^Ps-x z5=g5UQ$)1H>&0bXpjsUQIEL{<`mg#Ppv$BikcMHyvw4Wuw+5X1gPu{cr4q(H@;_*G zCnTpF*^Tu{^SN`UXlERwj5 zcUOggr(;&V-8yEw2*7Ctgy1ISzM>h5EEAJ;x$>}BRQ||Aq~Y@>^j?rMYtt_g$%gSX zpvC@=ZXjEY6X)x!Z&QXlL+$c)WjXF)F^-cPaiG%mDJ#o`y7Fs4fYbmmstF^w%cWE{ zb4D(qpw2Gi{bEBk0CG87Y2Lt(@SlV<+#EFSCmCHWm89~scQ5=ultTJko{dT-h2MR=Twa4gquAn3cJ^62GJV?2*8AK%@xFa{TiK6jxD_Nu z-q1<1e6}JW9*}phxlj#U+#Zf?K zKRRp|qdqoPSGgp|Wq%GjXH~{ordw=fKkEuqUljf9_@w)(>p13)u-Qx&qUEcnJ6WQu z*$mKh^YB?w=QwMv^=#yEqk3o~P)JeSp$5c0Q@EOX4?x3i9Z8%c_2L^=zY7oyJ}QoS zJ43*o-YJ;&7)aJIzh0MH=CjyaY~&_qd-vV9QxLyhV{hOAZ_Y2fgL}RF!ZT$}PY7Sd zvO211?voaj)wUZ*c@y7U?9ad0f|@b$rM2P*8+rQb&I?|xM+IL?f#OEMHzT6Qp5CCk z*Wjh5PM)_dvp`ZfLsF+<3D1f(Z-HyR)yGG2Y<;T%&<~coG?v>!G%tagljJqMo6i_( zFM)l|ekyL(U6oyYZ8Q$?P&mXS6||O1els@u_AbO-dUj*_ou%=W0ON+|-PKlQKe+2N zZucWh{&&-h2ixZcPYxS)St_4qmo?qr-Dq#Lz5R(+2iyCkn>jw*+%!Y%VeqQQydB8S zJ|U*!R6g?}>h``^jQhcwhGY){Hi=c6-=oKg-g05>Z0oK;-eXyP*f2#%*tXE{ylc`S zpEET2jf8Du6om21sSWILV5i{DfIaA8b0%4kZTo*(96cs4yq}A^H22P*8;BWtJ$)n@ zM#8C4qGP(@&H8CnTx6C!? zvYMoxLN(V-Q)wHnAFG)sidvdk^|_GdY2icVOe+tU0T@>s4>j|L&*RxJdr#K4_^91; zFHn)Czjy3k%FPEp!8f{t)aJ$QG<&gZsF;-43z#- zXl8$A=vdI5eiM+{&>9HxhGY9Rn-zYqd1^JtVD0V`)%9VY*!Q9L#y3`@Gea6n$B!SE zUA>*Ju}RtvIMyF;p}J5M!2jl5n{ikymCOj!g}&ILesDCqCBgM4!+h8&!*@RmzzMHd zSJwUemJ0TJwy&g#c>1%mj^h|_UQc^oeMxB1@3r?j?k_j)8En0E*GKH{Y7(0}!*cB@ zaA-5%pxo+bZCmexkn;g?)%v1%wSf%a8g929Yiif$1>|N_UY-CW0#Ux&R5$YpJK zmBP?NZwCi@)7MWOwS9n+nTg4RB>iK2lY8$3SrXfx2Jc*HTaQpd(YdkxWWAbd@bXaQuWYZ*JO7>+~Wi1jqX!+ zx!Sw;lk;uS8}9E&;o$D?r~6!-;ovyTA4);@Q&2DUJFa>pEoE(T#BHYL;sp7FG%Oh` zNYqrtb4YTge-z|QQv{WCsx)e0dn?_PqEM8BzmmC-*939KYWd%!9Ui4kTmWEf^M&h$ z6qQIHjPe7;B z6K-~33WfAL0eb*%Gud&t!Z7B}EXhXW!92>oc`$4r0S#7ebW66DEgiMW0o1U(?9sKU zo?5**UyoK*2=2&-G|rY(PwUugKPt0j0@qe8%Q!oJhkuMSwUw6!(V>?qg?VmVaL>f3 z1^iMJf;lPkO@k+Z-bwSS0L2-1`DwXq{Rl{Q1g}*?#YXfw59`Qwfm8f1F1sbQ8v0ya zLeTuI)jA;UZlAUqcsmga)Jp_S$nzy|+32J#by&Lp@B}Iz+;q6cn(tsY+}V1b`_-Ce zByLU^ezw>8AfVEMef|K*8>wCbW|9qd=9Ld(2C94xIl8M_zW*A`R}NE16Kqa5SKN2`!dw$HpE{SGI+IQK+DMFOGd%!9 z#zS(3(56b6$n11`tibTtp>k?}h7(P$2nig$P@SEnU{?`UV8yvTB6Q#M%D6kk-zNI! zV-2C2PoxmH=#8~7ArkJn6(OKn>jl@M(?mN%QL)OZD=CVWWAl<&A$j<;!$x(c3pC}VT9(y z8R&$MgAQpK(*)uor8m$>8L5g>H}qX*bJE{_d1^HhlRGZB8brAohIauBYUb9Z4{km~ z1Ii%u3@7uIKcchLrKc}L_^>K=cJ;2zdHUYC{aP8kon%#ld?I$yicS-B$UY;rPK8Ql z*C&BncsNNnUfZpFZ%_3+(0&7Cma`fJ*TY5o9aK|Rc+-Tr&txS+i4v{}$*9)=zv{WV z#PMRumdL*D$Pd~yFBJ>dR-U4+)12ks<&uIu=iI_uD&4MMCVo&xvzD1cyU(p~nKB^l zuS;N0Nd@^!dnOe4n5!AihA}Lq+~=Muv>hp(+D)UHHi4c#ag!{#AFh=VSq){W9 zNJ}|=$H1<%4OL-JwMh~EZvl~Xey@FI%DQ6!2(#2PT&*8#z-HAB_eq=p_W+SpN@Cdf zXj~@;fTay}r0&9QfLhWVaYnDxG0Bf5wM)F5ww%O^;bDUPTGSeBm3#+vlu=%Ij#^vN`vFo|R@Z;{#ry|<>%A{tE zea-zj%C?jUK+uObr0i{aBi+>j(k5jR6;;-F-6vE&I}_%#U! z@;d6$f&x$&&Q-q%Uco^lkyl^?nX!+fE>Jk{8C{-R`C+(SscUd- zg-hXtTm)H>zxL{;tVcNK=tEaD%Kxuj`xWZ8;KIVbW7XsH!-ZjaV_;>3KeHsVw} z-cKv!m@3G~dHYv2?a65*?fAk!l4)mTrH`yyC+GS0)P96_w>Z;Hhrn;(v7fD4?{#KHv-*@TRN(wa=GNzC8IVBFJ{5MsWX-orlHw2+Ny%>fQ`n_deU zH||E;y-PJ!kh@K5jWt0Pb!!tC$SQvf6T16>yhemjPm!*T-{&e^Z^f)Bt*YXrl5s{N z!`QLtx~xx;+izrk^wAAU*K)O)ME$c8he`*7T3nXXM@8d<*Pb%yd6a_Hw$00w-7j}Q zgcS88(cQ+!2N%SSTme<8S&zJ;E<59^o6^t~m#wgEg`N8&m&+*jU!b!#eS75$os`r5 zwat&PILBH}O1RGyZyd@)W4qQj%bqd{u3^rOg-EItWQR!sn$e2`U233wZxA0gY&!I< z(qa8)6F|$=#i#q+C5zhv89Bp0n=_Y5clVf7W>cgc&pWn({gi-5<$4iM9u>tP+d@@^ z$5kH)tKUjxWRM>h;<%HMk@-ZGB@%gZuWA%);XUsJZ#KCeH&J8tnreaV z@f<|7zUs+{Ai76jHXIBv_99rJSiRKnd|%2J8s7%|T8}GMVEzSEH{!XTTPK+CQS3yr z^NC#UmNv5Bb2Kc0TVW~_7)MYZ=s>zNG<}x!irj|&?e*%gz{P@=+0+_fTa)o&KnZI| zN8oKCk4(W?J36aL?ib0e=iAeeU>xl8tFh+;>w}Nzu}XG7KpeyKrXim=)mYYA`V#!0 zar1W}T&O$9Gd5@mO|{DoHvKHdk^ZB+Ry|Au+f~&)c-Eq`9LH`;3?B#O{MSHGXTH{h zi7Ra!@f{AjolFkCqt%BZos+~aDGuUMDe65BNSFYzf`hpA1Cx2M!#c%4Vjdb?us^x^EVfEgZf=SX^Kob9=iH zbyE2w8DwyHyZLOa8uWSVHd63nAMC@|Fx{%K!#y)`I0}wSeal09MwsSgRn}Z{LfW4> zzeM+bVM{S^VB#z*Ob~i~w)-M@fqAXHsb*Pbznk%JQAL&+7wM!QcT3*BQ~8#q#P;XM z7olaG)3828$aByQ{(kRp2%?k4c9wJG<_UZv#5`}a3z!Q7qBMKrAhZ>||O ztr*T&Wjk!*w4^9glf|)*ry?nBh-o?kr@y6hGUZ%q?+%-yXsO0igT}R1q-j%d2AE1V z=fSPSrbU*KE-oRllFp4;u+1V^Zb?V%P*(Ot9Fx^N{muv{B5#av?AT0|>sLtY@#(d`iKh0OWwa6tZfg90t z&X^>b7*ntqHf|kiNfYGVnmvnA-FWjJ-$qKZvT@Io&q{ZXpKEntC_F+!)2Ah(f%NXz zr#tSRuDQeQ%z^uzftWN~JvABcw>KB9_xet(dRkxY>+;GFTJO%v7BL~e9&PyqrGPXK zZxat!_A&T>32%W=0JgS5ZwCr`mlatZ8H>M(;4CvbWU@YaJ5eeHDJh+Z>siTep499O z|D?HBw|sYAJIomIxqmh0Ej9Zy5rRdBC;Kz8v|i2S7Y%3p;C*fz{VpUaB1{+vQ9!pCaduhFe;o5vf*6v=c|R)GUAOu&HGRzl6@a$)D! z)ab+Dw<#fb`c~6HOWs2c(3$OKqU7~c)y57}r>2cq-G%GjGLTc5_6rC@-3cE%Z|g_f z)0Szz`MafbGSHN9TtS7s4M)9*fj3e^;)n=c@MW!jFw(mKuZT_@&)%Xt^ZhMBe zXDdev%^q9EHec_#Y`$}te)oj@6YWy52!G}4{aS%f;-AHab#I8dtm97t&nRa(Z^RE( zzs|rU!}G(uI-YXlsDDheUV)q>E#oYDuJ}D(VD-5gxJ;cZ=+16?RmWglX zo-(}lT(@*6_sVoO<&qSBm~4~>aS6Z!CYK-FW5F|s#fgh%HioN~bm(6DW(1@n5|t9v6~GbJ-)Nu1?1jiYjb;8$h(tu4qPG0o;4k}J_e8JW4< zPc%G%!wI|z%r;)K?;uKW?snWq%l+|V&5PQrZN0E#&AFDo1Dy(;5T=r*_arcAf&OzdY4kXgPOcBSZ$)-fty@keiXsKUp^J^}SmtjV%{ zUw5qt%FST@_8lR@&>%q3QZqK|6c_dQ2Tz_5V}LV`yThC^r;UCI2y@_BmJDlffi(LZvx z9=3-`izFNyCC^5Gp{ehzzV-B~9`JGLyjFhPC z+i-ue#!ic^m-Jpu@W*ONtaHtMxi@jGw_W;x$6s&-l%IXKl*EgX=aak2kyGf$n~gfs zMeeEjX>BrF6fT5;faME^NCxNEkEqnEW+sO6upTU5;RDp(_@zVO&5w!=DS}wwy}O85 zDG&~!;2}{MUGK(|_6}P4OB~{o{ z06H*1L*I*sQ2h*Lf2Mnyg{jU9YqS(;u9k!JDgkFQA!nnefWA@>C|Y z;Uv;tQOGfeigZ6W+AO-ZY-7yqkggBMwTW-Bz(urAX|j7hO$$Z zq#8o@U3OAPijvfG$MSuC-{;le&-2go-@N#I?&scf&pr2?_j#WK2qNFCtYDw zu+UKu+%%f`up8P4$*1I~kH@;#Ci!{+G@%zYY}|ynCbC+0Ur+R7@Eh@3u|m5UZZdM3 zs@r6{Sd^jATMb6HfSs3x<)6ByxwDH9X?>DM?!y{lZYw{aiQMvV*^(3pX}?GroDdfG z-hD|`{9E5=5G==|81c{NqFC)m5JGC4)lxdI0-HBWy$3tpfS1yn0PS$zm~ZdxhN2bL z+${1iDJVr#dstbCow&Qt*r2LL%x=w)K~Qj%VO0qZRo^4Q_QS?8>Qn~Gs!auGnHwwE z%T?a1P4V14AGYqP<}+4nKgWE38p^7u*x2{4}{0 zZ-7YLB7kOv7A)T}8}md;E}C@az=yKUDBq`4#Y$~U-BvCc_kO_w1KV=<2Jrnwy`H>0 zeXpDP_!$PyZR&6Xq3OW$K75)9Vq;rrTjR^scS6IJOB^_O91`{pa1RH?LkFZufrAs` z`nz^8sK4(jw~YWrsaG?-39-t0U5EQ~w6X(DAUu&&Nmc{J zFdf*)InZ8dMCsNX&#NuJdLS*%YP|=Xnq*OWugz#FHut7_lOaBPZ?1+%$kD?dVH)@Z z9_5|Y`^Cx_2u)SPZVsmCAvUJ(CNM<)4w+lVcdd74-wV_PiGKvC*p+S&lFB-^Qho!u zOXc9Qaom3;0pIj1)R@$V7PDk8IIOXHeo)2BRF+B7}o*_UXv~1N5+l61N6NC zK%cb&-moZTSw^?buG_UBCW*#=-6D>j@IwQ;ZmeoPyGIf%K#W}e5~qzc z4bYeY<>@z)&HkYL8wJSoB|NB2BiGCT^KY4-LSKu6n6UTBkNc64WRx9@*$&1DC(uDW zVXA_X(oq=*u*G>sGhCEuSHcpm;##m``=^17;wI_4Z;3HI;Uq2ii@NV=EeW5XB< zv&cBJez0LF4xf$Z0neW2&?Yp3N2*dT*AJ8Q?dH!I`tVDPKzc&9ly?q~*V0*MQF~P?a=2wJ3oA!LKvQ=k|brL&K*dX zO6EG-cDHx!#7r9B;RN#bI!K>&q@h9@r&+lJT=M%T%&BGmf@#!8gpvz$2zX8a%?r>8?O<858vS zFPcs%0Q2>zMV|;58ziF+S7glIT`a1>oZ6pu&m_qR1@59kmOXbk0c8O2LmW)!Nzvo9 z!ph^@3g0IDo;G$1J9N|LAw`3R?E=p`3odPriO%+;LkRg$p?xW5f1&M>T>dGq+UiMx zSe1?JCa7w(%9^Z`Me|X*3t+Z-5X)E=m>iA2Jo%V+!7#%H$=s8iifC{)-W7fH5<^(; z*aZxlWt#scN}H7TRte~KomS+1ahYa7i+lBFX_x;n@d<=B9Cq5G+2M$R=!6=H0)>8i!^8Ae~m z)1ybRSu^F@p)s~^nS&`FUS>)tO$p-&NmXv##rAl;c#>jeyfwsJIDq!^R@2bJrSU6y zLXJ}xkUzGq=k)5>j`GL8u6mAw>1{NaZ3|DCFATPC$Eb;ZgjPL$q*!A1D$<|-@~Q<( zv7gn%o0s{!Xf@3h)u(S}E%kTAUqdeMt$=uz{Kw$bfR)fU){1xI3)VK`3r4}E9lG06 zIS#5jgx#^FZ02$Akw;p-f}0TYt%kQZXXyg~+eJ$Sc8f32r*8)Bknj>}8n|$a=)_js z_2#5ovF=2qafW;U$r*kdaf!B1Z+gd7m;KDE@kOUm*iuOsTE`t*?z22?UGnClx6FU~ ze)C97+1ly)M7{bgeaNKjW_$VAmW{3r|B1bw^_K|O`4lbn+geAhITC#rDT`R`cvn*6 z;526%;TxB66Hr9>ZK6;X0Mi7an>n3v-u*U1N@*F+{N zPiDsB6bp9I{rO)#2WKfhHE|dbL)98MD&5Du#RD`r_9XJ&7HsMJl#!uP=vn!a)s!AI zqB+Q|*(#V`G=#4^-F;yAB>(mQJky6~i93bz*(?F!n~Om}teFmN%D$fM-QwJDDSZnt zMMqI1a|&!0JGZ_Kx)98unfu#jB>q#rQ7`F3ll^Zo*V0+uZJQ(up(VZ(AiQW68(wzl zz^3o)e>Gf+!s=72dE+O3lah@O^Pz1#B@*)~3*c=lU)c}n)0O5vMi;IB=i5&wcC9;1 zC6qo6ud~5qeExA@4rjkVPaWE94AiBSb6>j;%;LckdQ3@{E;gm@k46Ma&H^Lo)gAel z8QMD|WRz(-z~Y*G#>)N4Bnk_JspjKp1NTJqAJQWIR5k(gL5SEh9Xdk)`ab~}bUbWk zUr`P|bvTNf2$+xlOWoN&?Nl}%n0I5riSU0faNniVC(WnmhBn8ZfjAuRKumZhWge4` zNQFO+S_I}JbxQK?p`&-;BJRB!ZVZH{lX{){PoqjDf%)Xm3i$jnrQtuz6$zZ2-4)g{pPC4A(n5GkL@e$hA2CTJmP@`S?5v=;#Na=vd2?}D7*R6LiG ze8SUi`Hyt~5|(MI5h5YOAs`QzYtwP7C5(IxbcG1?>pUM#Z@#$=Ey$P!kBWT*T(Q$H zt!{1(PJ&p3vp^yA`?~N1z-#STyg;Ps5AsG?PeEzvy$*90;RUCX(i-1u89o|!|CZyp z_4D%@NK;CF9tQ%L0B+QR*R#(WvYsR7YP)2{@sULfullcEUj6lBxH);iK}$Cy#o8TO z3xEjCpt2{gAkRgM`_)cdY*PYWz~@GHgI_ySzZ(RSEn8iD=n;kSVgTDOb7)3;0k1uf z6h^PR6F&p50R=Mo!CC!~^G=cDpa_-t{(l2Ng0@NUPj#d- zK$rqk?1K@euY)9pd;}$aK0ub~> zzBdS__|6muw3Uo4%~VRR1(D_M`y~6LO@i#Egqh7OefxGJ4C=%<*jz#X7yTI5OAd1;dKXAzbZ4#<0&Q0 zz5!6u{|g99mlMx5=A(mAdmPnsNw+7o_ih3vas1Zr-5q>+-vc=xCze{q-yh6~q*fmS3l@#rZ1#zLI;+C%YTudw zw?@@^0zNXo3H(Y9)~EJcI`6lavaa4lA)A1^b~U3wDKUb0r^H`Eo$F8oNWQ$kbv*Y; z((&fv&t`)_=7p%ueqhgLG{ugOvXFWdtj8wEc->cVJR;o0N#=mwUPufa(n|t4b?Jm( zAj6kT%>N0Q$vao!LZSzn;CA2V{v8Wn*NvXN{%%osMMxdrk@XYjD{`)aE|t~(22YdZ zE&BJ;hBaQTk+kYMr5Yk|)l00l*Hy%#T|ZK81FD$yx~?Z3yH}m#isZ${o9PVIkHIWn zp;PQoRvfZ<4|5&forU6n)S|QL~`wj;W z!Dpbit;Oz@>MGoZc4CZvIWfaiW+j%fNWSGyDZ4K}6Y^*~LDPwaR$$UBO+UG_Yg?S$ z!6bu})>o&CszLr$Na@Ivn$Q^45x~Sdmu&a8tb;eoX?^%f?}dyA4vqsmSUAd2W3jrJ;Q>N{=Q>acn_9i3J9kK6-8L=%~LZzS3a_P}%RN zJ)(Ne-|K*+__Ml(gQc)p9>dByJChTggo8;gnF z@UFsPdGfMsxpJSG+AH*H>-I*BgS6^w9VCWYNC02E&1`2KmC)gonR%4|s79Z5J2Tt{ z=nU?iw1Z7loNETLs9V*(Mgpni0)@9ef{7ZMkJC7UTI6!M%TRT!L2N4b-w6OGv^T;8 zk4Cq7?piLSZm{5_uAPDzTLQ2hnYMa*-eQs<9s(-o@Bn`Q#tpR>QDO?R*is{ji|;q@cF51jrBa(2kf&1X8X8Hk zL!siSb{$)SLHNr`NPpH-aWbm9qZoeQx?B_r$FG3~CeBu?p3zc-Fxg~|=p?KF_qR;UawM;k^o*@6BP|nj(Q0Ih#T0R?KLL}3{J_s*72-vK!ux{Tf zXVWv^&KC$klwnmheDahlHav!y4-W>E5;B~=f@k8P-gA!Tv0-LJeq_E|JH}R&1*U>{ zsFD$vVErQ3y26nb9UH-${)~5!D zr{B}!^W-I%-cFX1pFPSZy7XzKn8f2~_SqZPg`F#hrdNQCX5K-BjcHv&KGQm(z^sCj zYHgA0l0z8tFt^^xbsf&Ts1TyO;8{>f$438?aHaQAA^=*G&)6GCuvh4W8@h%Zue@5( z0o2LKdpt*96KO1C4_TICcL#8x57vr~?P!l@(!8%|3Fb*fex`*nu@D61Lj7j|4@B&m zTv1A+tJkRF?bdma&ypyP2DbJBob7Ch8F6GPUPENSNk#6!haAw#e$vhJ<2aq~oLu=T=r-qxr*E(u7zM!a znhjNLAQ8gmS#w2uudv9x;x$08Feq7~Hn(Mgvz= z+{&C50`Yrgqgm$yTy{xgaJoGuYt-S^=gECu+2~E}24tr)gMlMX$vbMMN=3y@8VS#{CC5g3aI&wi zF!2(~T_C87(Nx)pZV`BEwoONLi&r|b=57w+Voaae{l9Q8^0TCm@ru9Im;yQn&6SE7 z$~r#y0C4b*j`p_unnM{-R`P(6IID8waXj;JJ_p}+0d;Mws7^rqK9&WT5mLuAE1f)V zQWI>A=BXS8axq22h2G!jS`@67Q-5CDdI2S{N~=M(No#3 zczN2U&Bgj!W*CxtvcbCeY-xRY4SQTob0=!1SZt=;W&|g*3fiKxyyh@dZF@-F_E9o6CkBN zV~(%xdZHo~{qZW3+BiXhTn#FhRr9v^2Z)~IQE}ptp4doV_tdxmU?ie?$=-?V450E2 zGTn1!zdpkOcxU9-&Zo0}%*`aOT#~FVkMj}2ldF&~jevQJ#DBNdIlnW`fa)|36N`#V zo>c-J*VLcNf}%Kag?mv;l=~N%VK`VBUD*1b*BHLY)FW{LAZ@~suL!nyyom7~A|ie2 z-2D@;^Hdi>)rbV74w`{KX52PHp1BrXXRI+IxxQgy*_9Wx{+xO)VhVV*z1fPX3CTll zk4N3Eog#bqOk1s{!=w@|74lj_fzAc;EV-kwF6{yb9uhifrFIJ2QpB z9+4p*iVEn`QknbcUvocoH6RLmr1f{3nrq|Z*!z0Ue_uYs0vlr*gBT}gS-K-|W(;ma zaW0R(;x#O{k_2rW*0k#h*SMht^RzA)eGYK}Is=77!l;8$P@6yrGN$0Ci-nNbOogZE z@EA-<4k}UVV;42}g~z$Ix?;GKz1{K4sVO*H^%bbqTGIS4v_8@I8t&hrq4HVdIdGGaZ)E~x_l1AoN1}%S|+;w zQlc7Opf;lOc=H>Hr&k=mzLnUdU5<24n@o^%iWh3n^phEL%N@XDD{Uh5202PqHEsub zDAybRG%>g+VD`Y;Ys^K|YTG?KbFoc8)S!OH7_QZ8C=o}sJ2Z-HS9?}P@R2d7UK$$l z*HTzLvoa?<2}uY#I&LKKD(hKay_;$3lT@o|R;B#j3<#=yP8M7j^$b$1O zL9Q=(<}fTOU<7v}LOMc`WR8R5!aP&?o0C1x5hTZC-`#N5Q#&H1kS+jBSPSHaOIKab z2~icc+{eZ`BdI4H849c@K+%pFhA0sK@y_)$$92G!i293Oq2op|Cz)<9elbr=Ths$n zKG6$Y;IRsNTn0U39|pLIg}u5-QZIx{f2lPUpIwzoT7%iTS0qyLV$PJske-q`Mk?62?0eslO&2;vaPSS>>II4e>1jpvoqd&FSiAZyG6++ zIQ%+il0m?{lqA^Ik>LN|xWZMEuYnshc=*qs{u}dg69U?_ns@cxAJCky{NR5|R;w%r zKIgx%oPRqo^+MCNmy*MOrgfYL!TBCnF2635L3_b`+an;Tc6p9of7sf8g>6tTA!h$e z;cFnpkstB_i`@ftl#kJpy7P2NqFV}$awkCc)-G@no$mAv^l=@^G$>)V{44xu2Fkg3m;{D<0Wx8c zY-;kWDR5^@se9YN?Wt(&u& z_ifz!axGw~4rgdQMq={q+x9H;(~UmBSpu`v)8j;|*ZA z4ZB-fC-)<$JrwQWr}7EXDjW$su*0wX$Ke0}!T%TO|EALXrY2{}omi9b4g{U($+XoC Ku9T_%74cuDm;Rps literal 0 HcmV?d00001 diff --git a/docs/source/assets/kernel/key.png b/docs/source/assets/kernel/key.png new file mode 100644 index 0000000000000000000000000000000000000000..2059b608caeaa7991113bd0ca05654e1a53d979d GIT binary patch literal 111314 zcmeFZbyQT}+Xo5=QqlrSgCIx^N;lFWA)P}BNO!}~AT3BKrGO05J#>eX(hOY^LwCbH z_|4xN_wT#beP^w44(FU*&))lq&*#~Ot0>9fV3A=VAtB+&$x5msA))ReA>EtAxDVV> zPemU`Lc%Jsl#o!7laQcMak4kJv@t_Mk_}HxMpswUA@bk#1~uZ-&<5KGMWR)jMHko;Sd7I~!z?u(_|8P{A+{Uoo7XB6oq>7UH|P|B^|$A@ zw^xS){XX_D+|74{Pcu02keGu&iMj1PNN;4F-Yg=E6i(K7kU)@H{4tnXa3>GZ;Hpa6 z+8APHs<#Gn?)Tolny(~_Jl?&%)p|`3QI3WrX3WHhRe|=HiCU(cB|tst9#T186*+rf zT+F^y!s8c}VhPkSjt8iC(T+=erX0&6af|v1W5_frXFfVFkRCo69q1vZ&vs;LVw#o@ zHo@#!7u@Un{{3NA)GC^JUhKnPWoKI*yG!)9Z01>XPct(M7s9Keuv@j#*c8`5H=?j* zKLzPJpBG`60!+`!TO8CmhlJ}>lO7iu$t@NBlq13JzV7CiqI$&rBhbd~iXQvCsWqZ@ zqeA>CzueZKTE~w?DW!6zX^=_0*~i5QeYqFx7V+oh-x>}onL>o6I@CG_6~d!FuIM`+ z70L}LMPg(eEHZDs@T|###L>4{Fqbr&Y(Iw27ismW_4UhzKTt1B7u9H%ZTlGUk+L99 zk?G(#^&njJOu^abVQtQaF_zzJFaNfTbn-7LsX;&6x2_Uo5Ad?v(;e5t^ z5b(HEeADUvP7C>201bY`I0gFZJ!Dzr$71(M0>2s&Ga*SCaZcj)h@nkl;|8`bFug@# zY}Ii<)<>RbwcSHH@`vo9JP0^7dKgF}BpI^){(cVnV~M2q!X2SPw4zL?SYb<_QJEgc zi#ff0R(YQ}FyhOLJi`1?NPv=<#TNw?QWrc#$o((8KN%kdJma`G9-wGMo`U1NV8ln5 zfzH^j<3N>$XCJJv$b5!BgOM3h(gItgIztnRLJE%JAn@(LGJc{@|1ex89{+JU#WP9y z_sluCmM;a~r^@0T5T3tlHC}&2(uK7ggV`ZzjGDsQ=hWx*>hUT~F2P7lZTLp#1LG!> zZ`!tn4C6Sx?Oi4x%CO7OCJDD7Hup}1PI#)RFR8-XpF8IAk?Pa1<0ePw1?t64M;CNg zE!8>r)))&>l?1>#ycZ<)eSC16keZ47F#PWO-J6fS6{8_K!Z<bn(Iv(sWY^^IEul}2k9`k{_x?fsEdL=kR`QEvI;c$!#pg=Zo!4`!-_(indATz_NzBR? z@<7h&xxWiuVhDTH`jf zdR{qGg{m8(D^s;&-9AM!wK%0vd1_@iMm0V(hWRrNu{FvMJycEpwpo;#U$1ytx$~LI zU{FoD5T>{rHRqf}7TGRukyt*ZBXKcx5pdyjEq|?a{ra9-5L3|D{p0(w59C5^+UeVE zLv2GPq`jm$qm#(S$Yyz;l2t^jMY%t6e-xXN@~I=GklXUbV-5}8UmsULj(-y2%Kqf} z(d?tf$JrO%A1nGt`rK0uVg77stg5U%Y{fclNdrkHy|YOMBV{87P=#DA#b#5fvL)?b zx?{S>x|~H4g<5*NI?d(J%fn%ClM&;?vGE@~vu>H&_XIWSY3j$^*xc;5*tXD4ex1N5 z@^Ei(StveHf(3O2A}CJz=3KJ``(1Q5M^}$J;hj3G`fEb#CM)p^<-NafQ(JLOODqgs zI_oZKiXDVEE{6KyY~9;=(}p{mJC-}#JNGzWS-dmfN*!7=UT8hdJ6hj=cAf2LZudIMbU^MG#ZtpT*>T)C*IAUqY)x-XQPVqnfBP@?(9C?p1jCQ> zrhfNs-`winenEozGawfQOtvhvum!k#2nZpUeByn$LRiU~C!9h^L7-1^Mc+XuNizI` zDC04g^JZUIMU;h{Id@2)3V40|r;Kr~v5s-`KvzF*s)5L*WsSkv@LmjtS4ZHxPw!6S zMB-BEs3kv2{m7omvK(^GF36TK@iC1s^)j(FN$v!Pk3O*VMoa@&<4(M_14*T?26firb3) z1?G_Ipt3^COd_msAj_;oCVL_~_em&aDN!~4K(&uJIouF>KUXs+CMP79QuZ>5>H|3Q z<#*Zde6+6=_l=YFvKtr3(5+v2ztp&$fvrKZO=3uI_t>8x1BN%E)1sX$G6Gg zY%{*5v8NezSm_a25z3w?*S%NsMbQU6is(#PXvjS_k`u{W5BZ%w zrWi)Pd)5>xC6_%?*Ho6QJ(3_FQ`ehY!MTFiaQ2I$pI!vp6rV5M^fUwgzDrWy8qD5~ ziV+;!Ul?js-hcQuwW|;CU?a@5<;)cok(hyN3?x)!G9)zM3K{r{Ad~-nEse~Cbno|l z6eOfDOC;1k-%$d7?>_H=?_HTce(%MHBB29+5dvS2&nSPrjk@>w-e1@ECV^*2FV!UE z?)Mu4!1dj25FO3$S6r-x>9iG9Xe8{N z%xHMnIoLVqM6hURXoQ^JmrrGty5y&cV6y~ZZ?t}eoKbaxH?{rRJx zW*(ORv}EV}=dgeQg6^Jxp0jg+{;nG+Ds*>OK*iF-%tlAj(iV^z(1r*YQ^U6-G@pj@lWx~TH8ag@259(w$xT+g8jWcz{T=TDZ&1xh&`RSRzyh9|ssbNwb zh_fSPZ0B}O`0_?*^*lJL`{zx`ph(fIdx^!a1tepBZtxIXGO-JTa{Ekp7g_99iF)IF zAl#+EMZxz+LZv}M#t=jLkIOvz067egL{`$M!2j{NznDA{a%{wZFZjE<04yT0mI8-G zBeMUgZr~OCy}Wz>Tgx2Cs0~=8xVbO?vqyjT6FFAk|C$Bl`hPi}xl6DNT$MS?Q6EA9ab=Q9FO1NmU_N%ViT&ri;P=G*JtopC*T*VB2w zW^8n8Q%O7dDla!(?l`lQG&s#KGl*ljh#TEr zpSpKbc^7(L?RQrg!R8_L-oIE3KT`hDV$WTR`T~>p|7h`%Dau-!b4;p;Z(aTQYO3?Y zBwed#ry(GpR^JT;L#O%k0rwflZpt-!eALsriN=BJBdDlrK}nMrU#D9X=8VU3^vsj)-z5B4f7riyjH^(J* zi5i+8jUojUZUDK|2%fKen1x(!ui*-Al@6dRd*`9jJM01j+lg0T|1msvt!V~Z%kUn* zfE0k;30qCW{7H!xAEDsqy#N#?b24uApNg_8rDZkMeuuqgAy5?sn}pTj1EVr~17!5I zOs(+AWcD5zV|Q4Kjz#*f>S??BYR% z@3q|9Ip_rq8jQ5MP8h6>8~R;`9uj#j6&POU3jz<-h#*JV)U|^?tDHoZN-~zBr!&rW z;S5Q#?e7{xQI_)+7DCDG8~{NS)ot0j}j8$RQA-fO5TdOGKAzi}0s|0MsAtgHB< zy7fMJrEjy!%Hi6e@-FX1Q3R}`t!iAiz-(3#=kPYe3-N7K30~7hW}S2D4G-`q#V89w zLbZ}8_NEl_?L=ce*l7=_dR<`>7liLpdlQ?mRjUB~IokOL2_}NeQVqq3Ar=jRKJ{ z;Q6wIRuObYPkr$-jO;4RVgJNA^HN`eLSQijQR!oHE1>Cnc0TAghrd(SO{D;6#>mi0 zJpPsUqa~y+WQcgg&@SY(o|PR`$q4ywBS9hMmZxOk+|R8unZM;hz&GUEm1J6K=y?eU zA~i3qu;6l6+Ou}ExW3#;t&z>?1iaK)b{_<;QEb|ew&$7=?Nt7?9ai=ff)yOZj*FG#A_`#!!Xu%Oqks`*#F}c-@O38d)lQr1r;jhAZ?~ zy3G*TH*v70jKjuiC(Eg$_9k$CNz(;_)nMK_*DM8JQ?GHo+a#23Gt}2y5>Jq@GSIgv z*?c7B3Wem`d^FEgUaKMR)6yq>aMYbF;`#CI zH};i(Za(j~$7*L{uPyQU#S$_hdI=vicRce%$G%R0zcB5JVznW5Q%@E>|Djyn7bJqE zykJGy{3>3%#=*eq7v8aP*twuE2MV%`1NbD zPdIg1rUafa*CEj^h0k6q|1HFIU{b*_mO)^=zp{usH!Ocf?gafe*{V zv-xgnKcDtYGb1TJ-?LI--FuAJRFPh;`668dC3R`bEXD?nKJHVN(9k;XLE+Ptz)cAy zSxy^<@_gR1ZSF_KmXZF+65o*lL*ob-8e$MlU(lb1Mliq}B?ElJ!1s_Jr?VVnMfh-3 zqDw%O8m#noNFif8I>*?E7B~DMg#9_WP%U6^%Qp|Sxaf!OzgKZ-K}M%ngDNS8rC8>{ zKc|J7-=D4;Q?Oxmlb= zkzxDtNae0$gNrF;iH&9h5l!1ClTn@V*q{au(#BA`O_*u#31Bq(&WAX(y$vqO>hk6T zgi8ceMlV(4HcknzC7p;B8x?(_u?SveaEWxDF3tZy^UOIsK~6j+vV)^Ma+U#XBv%ueO`slUkm- zeo{-Hy>dv7=^re{@UN!4s2DkPeYiJwa{{x_SI6r~^9n)3CA282IEE@UlmGfciaZ!$ zU6&kOXAaBf3`ng$X(D)p0Dt{biNMUq106um+8`MAoz09H9(1I;GquhVF16cuerN4z zqNoiNRJcG?6>%53dLSE9{Smqv&0uz*Oam%@Thc_|BYZ@YPHHPgBtrHcRaB5j{)gC>fMfY(tHGauk*Ovp zZ_tcS-UJ_vOlEljWjNz{IXrG-*eF$~ru=Lgd!uGKMgcT!?1Jz#@h*n%5mhw<4zJmH za^6TE{0dGwWf$@eQf6=NTw(2oHoNP)>)qc#0^UTg4=3DXuiq*!oJk!!JkuTWc@s#4 z7x`eZ<2(C;3Tn9>3Yc{VwXBKB-mMj@pV-3SZk-1a@AZVbO;Z)WY>9-2cnuHml#b;8 zI026`7ZUGPzE#*OIe(JM&muN;X;8tTf4BKn&`>_S32ufj{$p?$Fwf)`XHS9PVwC4L ze)UT7;A1-EtL$2s?$_=cvypTD*crM=)_AH+_c_mj)4txTi!sjG@?Q2-%H-{a)5cxb z8#HeB^GVZKSBTljQJPbDI~&A9cZFz- zawYibxI;wPV8ik(iCs%;T~3fW5m-B}V9nPg-Jav1+4Hr`-K#Wx%TIuLJ!~&=-tca1 zXDjO<{1`3Y{5`z6=cUs5fdp@4gW1}Zia1mV9V*ngp=gg4_Em4p;&S>x0+OCN&wja0 zjCRxrow>c)y{+S((63l8x8{uv31D1b(X}r6b!CEKiKv750FfK0vI9GHr`Pbt$xE>r zS#;27wFFpgW(zy@yKV(V)w-WZXVtD%Mp+Gw*Kq7Tf(|1UbHczazoW9c=KU^m!FeL~ z*;D7AUz@Y>b8RjG?`iTpmTTje1TKGv=RW6Q{sq+g>}jga($x{_0JhJD>oxdSga2AJ z5E!`l%{6!`S@w9oKuO|cI0`OboFv=v4I3B%z z4PmJGtX6Dpd592|$3j2{cJvP*EK4jP|D^n?pdgg5ats9-;b=kXHstB3S<*~LdfKCk z$Ff=ZE)wFtl>G(KpGNw5Kwv*_V9N-b&{qh@O+vA|cP0qTSNn<8VI2QT=PG zy6pgHsqXi#@yD|zk*jE9GgACaxbf@;;1~5@Gk*3Wek%exEjF_?E8mf9q^ptGpRR=WNM%l(sJ@^P4}^UmYO!2&Y^3kcp#$AK#bvy zR#s)z$Z4Je;LbF2;W<*&J%xJS+C17WD0DF>=;~?m$je$v_+UhAsOB+G#L-wB!nRc7 zT3E9jBJfb`9QTGH2jLE#hlXx-McLIXhH^z``8By4k;ERn$Rkc;u+mL6t4Bnm($4ZO zSqmi5Fle35NB1zW6H!t_&K50iD(UPSu*wkf+IbSx>Py~%ZJSM>y<;TJ#{yKSD>W!_ z2iUQ4tg)cqu;{6}-9q@(d0u{wfoiKae{{k5tH zTdDrSYgJL)s9s#Q5uXIT&!MtiOq(6Z6cyd(e`4I4VP9qM?K+wD-Uz=rO@0N@v0XW$ z#IIL~50?|#z94?m$Or4GD4zDobn1Eb)p)ex6+0vj&oVf}j!wF?!l+;t zAvHhXInGx9B0$0N{7d4ua3HMIqn)RThxlgk6ON4lqq+_ZS|lIZhuLk^Wwx`r`<`sUahCM1P@r>WxFc!Hvt+GLHH?xkOZMu~n5vXC%l zQdxJ7yf9YW$a@eOngDj6bq&{CiLx#0E>|#n>=k$JIZp<0C!C1kTY5K2vL6*<(+QbyC9F2EHHNqF#ml6RJI@hg#CSm^K-4Bt zX)97yQjUl6k&=w!DqL#4@v*iwhVNAw-5H+V7;VT%spjUWH`k61SzBH)$?-y@AFnP! zLtYGXM%H`~q(o|jMZ117><`n)YO=Vy!uQt#lg{+F_uyBoDMrKLay)nHWn0tKF-tre zY|g3cB+)BmK&^ipN&@8I==XAzg#kY5;7$Zfu`HwcMDTgLlWs!GDXueMQlW=*sd}zs z8tG574I$^h(Cx%UCr_<%NZeK61-PtDXGDkGV4gke=toH*-jpQhD=exl)j$joRNYf9OFZl2#0B#dx zGx^!$88COA7_DXx#&^@RtMW5ui z?dB#6o%8diBhn!5iy1^awcsDI2 z0f?^jTQgw#T5Rh!auu%~2vG3XGyFq+?Ew`x0D8A{v*vdLy?lp(`3F>*#}90`Poy-f zfA`D}g>VmibC4|9?>E+$4nQ(HqBqN;pF9@<&gbV7LIdXRxxb_#H{~55!c6S^7%9A(~Q#wXps~?8QlWvzu6-J8h`%@WY&he z&HpzS^v|WyB!KOPDyAL%?FRf21^%rbkbwc<8Z8q4Q{+D?YJckQ&ug=k^Di*^-z-xA zE(XnlD-w3e|I{{6(IXz9dSfC+;lF17zjVVz3>f-7Z4Usa`Y%s@cd(@tKm%}6`8NI+ z((Cgbh)UVg#_~UwMnVk*Bx^s}A&2+BjE)jttYs641WfzyG5rI(bJzgYw?9?#`d>!3 z?vI25MlapN`lrahD+(|MVkUAP$KgLC@4saJ*E?e9z}yJX4}XSS z|1mFjJ^H_?|G%mKZ;RshHv0ehu2-UI1r&y<#6pJmj@*@E?wWP_xev1A179_5u?6<-IQ=4`3qNCtHBISyuGF+-*AuyGOB=?tdZX zoAU0$XrypT1O+@MkLpdsL4(3O=aD__xt2aM*-I6APyW-}f9XanjGrPb{%y~G>nkRD zz~_D!@&W&mFbaSaQO`(|+a{Rzxua1vK8Szvw@OptM5!FK(lkoJ29%@`3ZTywX?)^C zTR1R(icvhZ9lirt_CP2Q0xDkT)xM*SsK=Z)t^t6(FxtZWDMEh94CE<^83kgDmidPI z^S(?Bc|!}RWiI@%KW!Q-eam(7CqShp5G>rNNQWNYX~z2spJPr9Q5#yvF-^k6%rHjx z?`u5vc!UCU;WIkQjGwo^j0#2+AMy^~6L8BLz&hly1_;dg5$rMRrE@$$xn@x5CRg{v z#3u*rN>v~DA;5u? zz+(SzHcnd1WcGPC#c~rs@Iu=R+!iwh(WyK>-{BqWNno#yGgYl_JfS`RX}4+H*iS`|L09euNfRT1i{@JzTyP-!;McANv6B zT0fg#P&^gq2@prl^H;09zcH;)hhEl-D8ZeY~&tM*KbL2LB&as2496 zLP)_5w`bj=!+BoU@g~jkXwye#()*flILN3Ga;~cHcQKNAhle0|W*pj&6jT!r=L&4A z`;LiK4WDdo9hvQe{LUl2Pv91_sdGoU0v2e~fA5x-?*twI4>$uj<{PVhA4j*9p?RNA9(K9bbp)<;mgAd{*gvX?B$e2I5@Z;EbAHiwsZmrDmsJTQ~4Q$avMuFeU434kM+UBpxAtzgi?aNGUJ395-xsuw|Ph7s- zsOr@Bx0?V-@~)hI$h=zl{9)Vr>ZaHBAYZF7pO%`r-VLpxhL$MVo3Cj-EAqlhFz&a* zDt3(NG9pp{DK5oFAyR0;o9+v)48jxP+s}UbBIrssr4om9$`DcbQ<-#XI-*$*8C2cv z7MNG=Pr6l2T_Q-lclCaG%lB7d#Q@gpTs=`%feH3ofQd_~7r{jb;un>k6#|ru77Z}j`6Ya_-*SQ@A%^6Y{wOCPYUKf)a@HlQ%#TvBC7OpGL+gWjAg(O!ht=g_*JAY^5qJG!1868{VA zKt%G}nYNPsJbj{rQxWk)f_?$oEZ1gO@g|7cwl@tGBb)QUz*PpE#>`kMND+K~s6 z8j4y4WQYjx1BoT^+V6)kXvmc|-X;V}yM1pSp754pIqe`)PJonZQhX7H@_33G#Q8HW z%AE^eZdHtI0}$lc>-a_t4d!TGTChEjiDu1D+@37JGkg$DC1GFELMx`i4#3{q#PM5~nuatI{5R_| zq6qIRGXjJ$^>9(DKXh@n)lF1cp6;^UkryR{jJ{_7$sPcz3)k+oVUINL_X_ieBB6dF zSg-OOXSFMirbfoJ))d0DB~iR*l8#ynaIj3)Bi}Pcn5HaEAizd@h^zO&vFUPlSE;yW z35(pe8q}Ki7Bz6Hk<8IVbcb1_>Km2s(a_Pdo6k0oDq$7m7xMb*LTv8ap))76-BLnC zV~rcFA||Bz~yqr6FyQYp0#c-h`K_dw5-hw65Fz&RL2tvf0qy9!QC!+;96wB zxc^!rQbLF^-9@$jCHD1LneTWe@4h%i8j#eIFL@{3n%7n=P>{uq2y;ru@*vbSrR7r6>xQN|$r}&f(InGY zC45|39$O~JgAn}KZKev^j&I;dgyovnFWX1NZa|(3s=a~jrpK*R%VvHaCThS1!TRxi zGyCfKi*Xp1J;IAgLEp05^~r+VJAOdzliAIcD;m~tIa=ZCM%cVxUr&w)RT? z1Tsq&Ll?^Yq3EY~aC~T2I8_p%XW_{EpcZAr>4>y+i=sT*H|^^$OLlIo#PXgG_l1WI zKiGgxC9%C2!oH+K-c2!$dB)nERpQZ;X;=l@>os8FHHbAB+OzOAi+=nrJ9xoOzXT=2 zOXNbMBL0qA2=|h-;%hG@A+}`nJA3@#PTE0b5!!&bnehQfEkyMZCHZLTvy3gnz%d{n zC=S_2^s# zJO!3Wc47PWN<@G~7&?0W*rlQ+X<4NA^L#bwK7~a$AhGTKw8T}}ewn{TSYs$h4{Yu= z05A4^OE%i0wpSKD9xXSr%`92ICPgmUUg~^;ictJKL9XsIV&irdZSRxKP^g3^g!$U- z%tYtHON{vV{^&$S%@o!KJ8fHri_u66h3a@QVYcIo!6C>Uza ze&aN0gO*z^Em;bEOFq~;!~zI@5G!ds6c9U^fbn(0wzWr)tFCQfpZ$>MF~a(n7x7-i z@R-AcYZ1q6h0hb0yX*0^7J$J$MMt#IU`(E;at}*7m%@48c}f0&S39z<$mZ1DTx?>m zrGEHHx)kYs+Ib$QLUa{cB=S8BF9osv)g4_h;RVV2qoI)qbHawgM_;+PqTWP9t> zI|Bf>0{vc<6WQ^TGC8^w&3Cq|OVuzNHIGgDa=4KEL)+CU;=NIGSk|{kof2l;c;Kz> zT(*}y2_I}?MV$&}MxU^s4e_{Xwp3gm6!1-NxaRROa80{xK}1Pz3zn~8ac-|NO#xS* z9^=R>sGPU26i~gZ&XLVW8(u5~`9#Rd67t8LY&iu*R6+vKTFN7u(g`lcAm8r=NUj4H zn2S{c3BuGTW0Pg|io8jl?H7pn2WdlSSIDqp%^S2!}-~5Kwg>wpQuT}IfiNd z((>=XXGwJh$em=O$G2-R_C8w-TMI2fV23yAYz9o(X!I+gHh75OVVYdDJxm)uzgiQ` z^u~vS4C-pp>#N3oB9ntJy=rttH8E5Lfax`4k$41G=b@Gs1WX|wEjvzu&FT@i3qY~S zclIi2e%LlAhy=q~sH>mTE)MWiCs<%cEwU8$!^+jY-UO>7Qq|S z&f~*H{iP9Hy`)iEK#n|kLNTUgHHJRM>+4$`t;?O;i%^lxv=zP!qqLQWwQ+ujAatKk zjNi`ZC4ZDma-gUttHnPtC6L~Z0|EJ~@Z*J&x6|SNT2GleznV|G^`nYTLtCO1=vgJ(r-R&YaYIs7)3yeh0*y5S)r0tZUe6rc#EqgbCR0l^)tH@h zjYqHu>z*am0{JGFKdpGyQYl|pu9Z!jB^#87y{^yz7yTr}mIp-1nlN8u?U`o5vao@z z8do8_r;F(!wK&Z`5$!+0;te-L6Y5pE{PdypRb_dk_xF+>r9+A?ziOXfgFOC){?Kw} zGO+F8+jf%Y+tMv+*(wqmqn{(wn>=)%BGTWH3!}K&(8PeBz5gAnFH3xetAna%2~MEO znHfROK%&&@6l1I5P&Kgggke*T+gKiMcFW~FZm+$pyhEwp^o-mE3v~7d=@lp$BCNd5 zZt98^RKe9nR-%&Kj(*$LhP^$C+c)L|O{c*j^rM3831uQ_?wdWRLRFp1ua(ks3GfPh zm%uO;1$krov?pbP=KVO$X z@xz)(68SOTd!3Q`Zm01#ljAF~*NazQV?2YD5IKU?@cb1HTs`n`%Ww&ROSlW%aot2Jy z$jWXT*ID1gE4I&661wq=4Uh+mPnhk@mNbQ~Jb*+OOVv+a-BWMY7#>u2=n3XO(+5Pj z&G5LVHzongSOBgY0GkD;OYKkGK^|J_@o7K|LxvGdUR*8Jm?DnELfCou4NY|FPA*@J zM(lW4wc1&N3~|OD49uzIClth~{ASlwxS{2t3KFEvgkEtIHA-asW)uNL0suiYQ7iLw z-x>yQgCP5nfG%IW%8nKJBn9OUZuhiZ8_di!7>OyK#v23#^Cn1=fz8EnT*u<;$b0R5 z#R-NNy3h;_LdxE$QN*@+NdAjBwvKPHQ(F3tQiRH7M8(s#tP7TNzqHdv>JzU03r_vb z0f{M_ne{3;hjgPPhc--|9c3K#aSp!uF@4^894ClNEo3!dkLkLU_nLDYn9#-xb|49Y z!XB=D#7;MmpX~*UuOSgs{E+Pi`EFWJM;b;QVv5@Enn~&aDS#FqgIC@*m95;!{-O>! zxz-I$CaZ5WPt;*F`YeWOfY zvV>&&tbT1ZSbq{M{%)D@KDeK6kak|zUy-_)SmeZ%H1ek`MKl#wnn^RQNczO(cfbwF zj3oV%;-I8Tfe&AtkAZVl)M(+ zrbkm$If&&cX?&HLiyr8GbtWR)PMVvGZ=vsxv&Uk#NtcY85rLXZV=7qOUN*BS zZ-IHz;eK`0IXOjaGm+EE6Z<@9A(ri*DGT;rm9Mak0_aCxoZ^#W;btI7n^%$?gllol zJ8f8M^zw!K&W!$5|JCNFDQH=Rc=T-Sa=cM4&u3SwOKxn_<@ZSA`C92B4bvP`S|XHEf?W{gm0i_235kz8neC`$f9;+{a)mq>)UhS|-;?bN@5FjU3L>H{cyB7yle^>DYQZwR!U8&Eof5Epom*OG=WUtqp3~G9dSN*-1mP>$ z7P;Ia>QjR4)7R&#StV?+`~uQ#C-;c;qE-IbUs0q1cGDKiQsFah3N6)s+Zbq<#|Fv@zTs%sT&8G;1|teH1IgTtNA?Iwy3*O4j9t&sKY9S5_@&eQcU@Pna9;GA)r)%Hh72?wB$jNYL^bo*1HjHa z8=&|sC#iu%q5=2EyG&Fe^FD|D6&zq2NAQBf&8DJT@!M`}4$G-KT7Bn$`u0p_EevC2 zfLm}6Te$|G-6 z)}A|Q!h1x6b=$D{xeVqRL))scG*X5N>n6sGyAwX;%jJ~w-u?h}Iyi{u6agVu2w-nWOXD(6*E-qPnyo?=*kDbx z2q4Q%kJoZIN4z;d6T*(W=H1Lck8iWLOpY@E;(4EOD*ojVC;Yys8DqyC5Tpp|J#lYX zrW_a~R$CPo+$HxIIJI)UJI3P{L-`w$e&mZXTRAA5iN#-tg*@N>!!w}kymnFe~vdi z;FqAx&-{KV<%L;f=w zG>zeT3VH;xW=VYYcy{&P0A+Pu2?Ij&GF`cpSs zTq#c9d@%GyQ{Sy=sd-y4=YbGr9B`_Jn(USEDG6-k9M!FQgg3v+->MAMa<~w9oUHY( z*Uk*)RnVmrnVXkvnx;&yc^F=Kh{hL_3Iu)thmj|Gw`Y$B`T70D_>reiRLLI#?iee* z7@p8ANk~u&AO89&DcF~IpD0G)c-R{_jP?m4rwbh1Q1H$<{!m7GnwD8Yw=GuPv=Ft&^|PRgN#+=7F~YDItulAL z{7j9C_zghG{2)R{wGz84N@mn4c~_JT)8W=O90L#kksc7~c2%$c@IXdp69Dp>)@s)> z{Y2a~uphmB-lWJo%jKSN593Ju4^j>+pH_;WM}X`qlPTzKXZ75$*h<|Y1<0Wo%@Cs` ze^(Wzbe~3r=#Dj1RY?KzY@#Rveg{#~#boiczt_nfNhbL@2B(90zn$c-J2XG-fruuP zcHy|51fTkEgUx|jnLpR}k#mHT)4Y#EYw};2CMg(y6i}2@q*eU8n!l*IKbW^lIUqqr zQN`nbN#SUb1TsicF%UU_<^%n9mjA_q1@HibXB8%v{l8d{e_sM!!0;e?na)KEoK8j5 z^bDXHk!{kGvT85bHS9~n=Dap*fpjvPLY?=Xe{`^g1K?oE@0$(ZwZuV(N(fjx4ieO2 z;4ECneD!d1*>{eQTkl#3WGLcTEBanoUjOEG)Rg$YX770bAoLx3s2=j052^ALpJucH z!acCnYQi%$tad=1YkoD(RZP`^M)z1BLg;OCq%YU#aokX#Y(%zApf>R|g2A zKtT>?egGhq@)o~}(akvz3iJAy$`YVFkp-A%3=(FYd@f}0Pf`**iUlLTyis&SyW|*p zs$JsA=A?)zM$zro?|p9cu8S|h$=$Qzj`R9tT@1%R`#8aBrSRAP1z?fk2;TG23Od4-pcyx96ofMaoT4;z`O#`jOTyREdLx#H*ItB| z(8E7NDE8Oq*d;;A0?=>3@7N(a0;Fmkz*;ty7A0Guc793cAG^}vP ziSF>sx~QEd!5TUhKXF;oSQ0!#?TQAH5gb_@4+P^-&l(l!eTVNRSA`nI)dx(oFb0Jf zKSTARTVRxO{$AJGsie|hRA$Qm!`64lQ~Cb?b2@Z{9E2i!zU|81krI&>*<|l6dt^p5kxeCJ zXYXB*%o?cC?SuXA75^?E(mg@YgkDmZK}0%Y_sD;^7k zSD6rAy7Pix`IygwN$x2$Ztjf5b>Qj$<)J&JaKFu3BvD8)c5?ZNRX^K|HLXXqZt1Aw zPYg=I2+Asd#{bU;kU5uZigNbwTNG`$a{OORFmk7>8L%t(9k0*)hx0~WU})hc@fkR)!?b_dY;0~?T~r={|#<!_ATbG(j}v^f9c9k_ z8lS6n`*K~70&4tDQbeN9p<6V|b?Ol?QA~rnvMa|hsw!X0bG2#aPNbOHc$17x_q*$o zMI{si2+RDfJz*hA$3^2exP$t$!NRAaZNCZnKJ}*#^@^B{8?Y3a23*0Cih$*E41^M+ zGf$;|sX-f^HuDsGz0dhh|NYJ2*{=E>HY3F9Z4N<{74y-bw7wNS31VVi8_9JqL9;! ze>~hz$o&wp=r6?H(8Em+gkFYfZ||KrDukg&>QSkAkFPu>PXAvry- z`XXZ#exQ@D%FAB|AfgmBI1ee<{TTQ^z-t;|(ei>1d6#D{Pd>TSZb!eHyLOr5S?NAITq3 ziB{rUgN8kc;fN2|>2D z-I~ZS+#|xZ?X_gx@voI80NIQD)V;V`p%JSYkToTo%Sa)hmO{FUHMs3>ZgyzD%Ved{ zz>XmsB#S`X379|M6Kav#=QmjR#ljV2#{n7n9;NNjvat;)!Ob+)=L{?*9a4Lxw!l<^ zMdwfl9NR-H?KXIogApK05}}FJG6qEaAm?G#R~`2ssXq+Z0aBV!j=9K6BjoX~?VkC{ zLZeFNed7UkqHW}y}Cj6Ev_)4#z~C3Hr~cVc8a>VU!V@Tt!}b6B<6eZ$hh2RBOcB8DJcIRlI0cUy{0Amkn(z$P{kn8Yip=* zK88}jwM9vHS{G6dz`(1;>uNq|(fmFj$2dxZt;j1iQtivm4&(wz|89R+eMu6ig}V>o zPQT^+c2H%Rlw55j&!CO1Q5h{f+i+Wnr_bdgP}s3?H<@DJza%}$GrjsTx+-g_@ZRBc zj);JTynXYbNf-jk$s1b|cX_&{=w^8GmVbYf)sFU=*#0DpXuYE8=)l^lvi>VPEOTzX zo=OaUud^pmh<$`yI`{IfF#Zs07_!83%GKufrU|EQptr5{06 zT&I{Tf`dJF2-xGF6*QCB-`MYLjF;wfVMYAB7kv83p?w*V-gJOeYXO>-BzxSzXyp)v(+owYK=l>-)CJI4{(i5_m3$bTKaJ_9$T$4~E*ayu}dt*3*gBe$T zyVzsiE%(7_eQf6@Y2 z97ZON4u4w*s(X?N8B!6tBBrH?l%lt-K_V)&NsjHJWv;y78DO%tPRblPD6!f4z;IS} z!BYHxHbtUP8J=J>qEq;*vih}9HxQp)c`cti^&)Azkv)D{GI~y1?k*7Q3?ut)zgF6$wcYhFbp|~cN-8wwFMP&rJD;Jf;D)Fp0oU%qg)*5?{JQ4PDD(G)s2KAG4!b^Q_tE)!Ldu@9@!Qv zn5pq>aTxJJn>A+!?Yehyi0c)S|E_&$eabc=X@n>G8nHRhv`c>T^OQF{8u45noFR$q zTY0bFm3gDqjc~;;f4` zI9u^ZuW8kw%udj>>Af#ysYl;Z*5bli@RpmXJW^J;X27aj12se9C`-Oz789C`HMGFFVH5?g&R)#tCs@V-H zZV7CDJ}CJov0D2|?cXBze|LkR`J4>qHhB|Fuc=w;>q}{ZsaN2xR}k6hj;?;CLvk2e zoPcpPeso(KU#-@XZQDHGU;pLWo!LUOt1INi_D@US*o`Fu)6@9gL}l@jvSw{=YZ2F^ z*WSoP`}7F(NnhV6bu2?Q5;JkBU4&WkH=>0TANaBb9Sg7MbO`e)k9yEDAO%nCuo0gW z)FH%SW(op}S8xhfDVCTgDQ^8(7DZPcuCWOHS78l>_gO|16m#lqjcC90Ar}xsmyOc9 ze^0RvUcOR$rz6`%y8@M%B+OT^{{8LKcKPh7cVzii)vSs|)Tp?ycVQ>ECe1?}{!r)D zCXyH!>OzKV&ACCe!1@Ia_bK8(%X}@$w_4Wn^lL-~Xtb{I>F--~mIq}J zVu&@0wO*1ija6#j?US&e4$3U+WvSi4W9WA4dX#jT(3R|kfVPlJYz^;Va&|&P2Vdhe zE7+{d7D7Ms`I27i`EAHG*}_|sr(i%m&3&0`HihFUqdq*Y&eFXd9?ObP4NvD=V_veQ zy5VOlFJVl`6b()BBu11j&K7BqDD)MS{<1i%`&ij%{?H-oV(GD*OQn9Q#0!eTrGk2P zw1u)SA5#%;_45mdkc-oyyiqZr>k)oqyczSiO?ay`S&(#K#daERYL=>ITi#~8)b0=% z_%@mJU`>%+>&lS893&g1b)QJH_9Zb{WMO9$;(Iky5gqZk0puAz`*W4)$Toj>M|0P7 zXHX#B=Nv8U;Zmh{%q^V`ZOrwye9JNV6t5B_32!d@zT%-CK5z9(zYlr2GO}djhgIT) zvJL1Y+I&oB?$}-O&eoxZc9y^8P1{y87&}#IPdK2xM{e zP~2aNx9)#BeY%5av-w3Wn*j%a%R?I@_TER^YVL8L`N1#2N3)S zvKGme7t<7NBR#us`Ky%acc&alYW@3Z67L4J`NHl+C`Vp!crJeCrA%q!5zrPGJhyi* zekxY&<(SXWf#WyN-xU+q3VIJxg2*9ork{8qLi%u9+qD9FHBWEK1x&zBn;JuWd-a0W z&dMr|(@jFQgkJZ$&BnIq)mM-GT0h>I3chAh?(iHeNZ!v z`Obpm#IyaT*@>cI-urnBF@TU=GeDwif;O^!O@Gg9^pW$^QEGwW&7i?P)9vTCwzz?3 z^mCV7pqH`muX%R0z?vf(n-oxOau}8)31u3iYk7Fn$Qyn!2>sfds}J-C>70v^zk5SQ z^n+Vsz(Zi?-zr@1;#s*H_qgOv2tL>@0?^!YOPXhqxPrb@??&~70+wAkQ zPeCwQy|L%fS7QKzxW%iprY4ALt-D)9c8~(~f$Nn{7fZZuuLU*u;3KURrQZo`1IW;LfrXaT?EF@`s(> zFWp>X6NLDbS$y_k^Hsq}nF zM!InRe-uZCFM4b(1|_;MdJcMjyOh)tnbbHqxWzDO{@SLinX=C>Pb~XeIiY}}@yEP2kf?bsaJtj)Gi zl$>2>=%j(}qiU5|04>H?yPQp~xlJpCE6v^O=GV3(`uUh$05@%B+2(-W1W(mQ_X$B` zXc`=VW9V6_`ih^Zo7vio&li$vUE3x+F^W(**^g#(JpN@A_P?K=m&T+almg4VE%qc* zRmK)4(RVyv5L?KasdOE5xs4>btyf`t#FAw~4Zpnl2We~-7p{zuPUQoffaP16@RM z|3)d^U*_}QYhclp5t*#Ge|Lvt_{NWuEPaMhgF^JdodbeG=1CoB6%izFvx8eS9Sy&Z zKOg?Tpw8d=4B653F=$u6^C+Vi>BN50KzjYETyJ-TfG*GWZ%tHs5;~(cJ<7su3G+Cp zDiAA<5LRu~GfftKCeZskBF7h#X(BKyorK!!4%ZKjyQ}r}5^&{rV2C8H79st=xYfU~ zaBVXgxB@-9m?nu6FZ?5uOolEz)d&6e9W9gA*S84b_z_qO!MXkeBmcbV`@5|!n(<%! zit{pTIguRrPyYL_kPkQ^5Rst~+E+#raOb{)(L8E|RRJ{agU`7aC;pZNSHu1|N8a8$ z*+Ps~gg=dZGlc+Sety6th^tm!IsKn67h_XBR_AjTafx?ddc9qNNgMdOVA|L7+uaYrJ8g%N#(Eru`*?_b zJIq@d>a{)XCp|JwU?;8Bj0G!J4clKUR`kIyg263~-}yws?`Uis-XTsLeJ6s>XIl4* z?R@bAE#y}@XQogtUTUGsnWB3kQ#K#EYuUe zoCU2{7UWz=dmTHNH^TnF(whGZmZs#s-)UzaNTFu8kXz#oAav=k@`cg;opqWV&n$n#PiP6kj-%OD3n(vL9FPzQ*2N-rvr^uo}U)Lc454tBHWEAFr(39HVsNQ>pq&+ zJVfc4r#QRPk5-p!{^bG zVNlHf#|ix@9;c(^LM{%H=I1LDhyKYHB6+wyWAkgY%>36Eu}g(9n<;K`wKUWgWIz~Q zjgc9nyZ=_3614Ql`>=b>cV6r0J{dXkd&73O{$M3F%vad;(>r)IpWx!s-8WQiHUdX= zIty+M<|!b2CKe+AV{)1sCWKG#sLgjbxBw2^j{dpjIpcO=dl(E~|KAV8wC17QJ{`rE zkD$sN%&H}OwquJtGrDu@klMpbYvnIPPta@s#D~5>dL`XepZNR*0_1h$0IygHR%t?| zzQmT}F$Q_YYsl(i+};}zTEf+Van&(k#-e{-OSU2LEhi1K9aALoK)hav11yh~V4`k~ z748rRWeo6H)bn!_@O3j)9t=td%vX>sZ%qeXi;$pV6nYLK_x`*d~oQ`ZzE5i+sn^2`onM| zV8ZQ3-8C1QZL$koA+(eo!9ohwZkJg@k)|{E12UJC5z`GPm3sO~q@_`}Kh4~!UcDA+ z6k{CKG6b@mhYuq+Af@8P07QzGl{V@F%BWBtUT{b5I8;X1Mx3}m@(6H-ORFe#-Wmk) zDi5_}I;(Bt}Rv%l~e(3oqTIA)!X7}$jJWGgz-)iDTo6rb1yS@|GBtkBjFJIa; z3G*ntcdzyiqYu>F$`D~&rTsRK22^{CRt>j_KRorEWiE3B1;qSG&D>s(i-Y`6=zM@a z_TJi*dZ#6f%a@bMNIwj_Wnnp)=<$n2^t=}K%L1WCUj61iswiFQkr>4BG?MjE(Az~_ zqjmvq7yuLwo(g-46!?I=U{1!!Z%RzrRPUI~&*wVA?ilL<%G(79?9C`{8<-}pR@ePG zLFj7JCOBXvo<&s_Pt`TrKq9@UrfJTKdn;H{f1Yi5*&|})Q-DV~ zzM1o|3~`Go%bDjw#^{e~TUTR3&+h@0J0S^4*FNnedj9wU8BPtf&b_xp`GDsrtC)O5 z*zM@u{Ql*Ux488AK{wnE9bmf5dt(Rhk7jXw9Mc^*Lb?{q)|iQ|wf1U=xddrnLi9 ztMeFVX4VUxf**kg*XZ0>l&@oT@VS;-XnRlZX-FgnsJ4aX-y=(S6{$Sa%aR+)dsuNig;6L5tgb2BEP_Z zYB=)kVNZYl^M^g{i$&S9X&ep~H0E`urQZo@IW-cx{sMUHZxKi3Q^+WvfMcLSmmkp!i~8q`!^(M;1

$$o_U(0>J&3c!ZRVb=?kkbNwcd5Mb8AQslJO&@x_C(26|!X~elDRhJCz0czxi$VSWC__+um=MP>p+H0@_hqn2`6wfNqQYIw0)&K=yF#JWv7Q8pqz zfYF~}JJ_2zs?J^sz4k^G@pXmTFfQeVTj#lC;+_%Dwxh1+OfXT=S@hSl?&G)V#}1tL z?)S+&c(WmxRbsX|D#F_~rEa7&%F*|s!G6$g9}K!e(O$&ryjoQv0bOeK=G=6d=d+X4 z_Lj1Oo`8{@?lcq1zx3$p^(UK*o}bINPe!?U)&AolHE3F#?UV15t;S15Xr5nV$34a4 zqPZieT{2=~Z*hzBu~EN|_#WB0BuJ+j3>dY~Md?cT*cD`m<9D?$-mP6I%I>igFNiL_ zoin*#$AFg5I-1$|;HoX`k=dLVsf=;W#@HVhIW%be)ml!=`x#y7d>(2~?ljzb)CU{z zK(8jv`@Zw8RFEblH*;4nKhot^!#%ys>>jGVQrs*%ZxMO_rIabl21{wmmHoUY_;Ign zH)VUN?k@mXMJdX|LAu|ZW1_;_Tc@^=^;0YC8*A9>vyb$q<@D{{# zygn)M)8%jJ2(!r7p~kJi0wDc zhiq4Be~M&&za=+Glkrj`FI%!mVf}ZY*8B6!BEPAJCxCFefLb~?TW7driF5~iVt14p zzPqV!wf35Yn&hbC^8Hf&9()va{MNCfbaSl}ybCW?SaVTamakPJOQez)d|NXgI$^OJ zho29d6a_;qv2$J^Kc?EgL6NxLwfWtZb6hXX6)Z0efEy)uxcCBB7b_hnR~P%1Vb5to z()1~=5y8f$axgPr+yQU>Ys*(73&SPECKdG4R{aZ|Le(>_&Q>0M*Yhmq8g*q;&^NMw zjpP)B|1i<3xAs(I9^`XDGT*D>hizseC+^o4hIhY7^gK2idiR9!Akj_>5T zlxRX}(;MwiDe>}E=ExQY)cEu`mnYjrW}Ym%bFJPrzyk7LERV*wKjKj|)$XW!sIBBJ zvQuo0r+kmml#2J1oQ0r~kc2uC>mF~vo9qGs7wnp(f-c~VZ?Dgo*S?hY)QB-WuaAFX z#r|%Op(ku;O1)BqyopR#rc@X}0^+{=Qu3&W*d;SJg1oP6y6e!TO|DOiik!^;hP zVEF~VlJ1CD@h_*5_Qm4tUYwpJnOd$l^<~}=$g?1PYuaB%o}d$l-l~r1k?m-n7(nTS zm!_y5Gz$_5=H7_0RIe>SJ)xE-s|by!E-1eBVQ1}8fgC@O-ivl(C7Uk=P2~jp$hlqe z&8WIxOujcrY|~!Jv_|YXpqmSwHk*&97>0lCH;#zD&tEy$-CHSB96b)S6%}u))(A%t zcPMq4Q1TKZdkyvJ^GCFftrtXTHoFZIvZbjno*B4a)rpRKb(Jd^5c5t~J^IFjhn0`N zIg(6BkVNb%$vA!qn)j1>@neWCqM@H}GBWPAEIprLnjM)_uMLJ?sHmu;MXh7^er;G% zrhTkfF?e$67KB$RIj1t6}0l8SfOUJa(Dnk106am~H`t9+VTXy#>0L_r)VA zTt78f(+r3iJ-J450RWl}VUAE>!T*$Vqn1na71QZVCx2oKj!*6Fe(EfApAIV|e?gY3 zOOgA~YiI(au2q%ZSMTSzHxwnhH;o-ED(EA8;jC7>&h=>+_NssptZn9Q3J%7V7$a(c zS@FEYKNu*}h8cWn?oM|`vZ9wk&c zyB1R*n@Tyy;kYHrjqC6X*`ndW;T<`!Fs1Ah?u#+8PW^cCaxEA0ow0RiD{07}E|D@g zhu&301dr9IbOWPegH*<6ZSfS31yXF0SEL8~yz_g}dh3}cOPX8*MuI&&Ny>+z(;VXJ zY?B$!m!m)M5k@7JNVbIT4}U5QiX-|m^wFFxCuoBYDB34g^+Jva-SJIWH{?y$tq%^8 z&?20Adymgg^;d$8bpXaS3V+WZT4?LefXDPGe^z~u+ePkM={ov45=(LgD?l_sT1tM) zt6w8Rs+Vw=5qd2#m)b<@8=GS7g1UHt4sPy1u}aNw%vfRMmoKMrbD!LB?ik2F0O)>f zH^3PC9fVoq$-AeLgPwzdhWK`N@4Z<8+=3Tk-iFO@s7M zD{^1{_Zp!8GEC_cQWQLykdUit@ZgB?s5s(#sv@CL;V!bdQ$0%4f~?`wr}1yd z{FbQ_`GGjGdY{TvzLs~*p|EDf;-yVgZ@wjY=$>fVuDiLRwk)MAM~ZMJcf{l!XBDkJ zTEB5UUGt3InjH}1r2{il@JDS9_y2&w7%6_iQ;?Tys+w*YJCREtF|Q@f5%u}j=ilH8 zT3eevg}xe!;9AW0aCD5=C|3$mX58)O7(X>!Vq$y(Uii&TW3y#5A1BMj4_o|-a;1|X zb}L%F-rQ4ZRcDS@j19h$CE~e~*2K@!n_-{H_Ytuq5l<@dmazyma_O+z4F_I6#MSn+@ERU?zQBjRGA5sFZ8|I6Dq2G0sGuG_Fxs@G zaek45{7YjxJt}e7bT^q=dSd3{*(=DabhCH^2J)JCPt;9z>(tk&5hocZDN-BC4oUNt zPaT9g=Urg@(*`DAL~pF+L^5v%XA0pyzHVVRoLiLAmFB2vuBMkM_*?s5I;Lo{bxcEyifDBW>b$Q`K(we70tT$d8V`n=|E~wwxZTZ;C;phditB{Ns$L1`Luh~ z24)4=btT?iMI~yFPTm!nlatkuxY8%|YxZVlOH*-X%2-5?gj=23(0ERm`t@USY2Pv@ zLF6eI;7NMSiez;ZxU5(wzx2OxDA9B88)5kN;c7-aT6VtW-u>T2DRMD<;)~c}gvmJ{JUqXvK%HorPrFq)jO%h(Q(I@V6rb;2JVKILrw6n9 z=Ty2&RMCZYk5V=*0SlR>fJvmBjnEyAsVa_qRsA#VO7pYNA1fDjVefj&z`Xo(tNO98 z@Y=l>L`YuXSqZ<=hxbPrIK=x0EEZTcB6oPi>4k}Ig1o1AdJwT6bq`hR7^QG4d+1ip z_Hwl)67InoX|aMQZBeS37L5{&uitdd42ni)s^xs2lhtb(u`jmArqR=nqzM1~q0RyZ zL>BE~hcGU^UkK|`n8od8OY`051rQ|{Ji^xM_u5IruX6p&xCihjSVhe zP=7LGRFzR=uJ!A(4`SknPi)DlcS%^gls~~%CuAp1883x@TyY=&gct>jGlSI@1wxa# ztKO^|bM*wf@>~lfxf2(#FuawSuye5NQ^P!B+yPNXw#wJ92|D%k6Fhh7X}>$@T^PCe zhF#v6B8f{=m1oxQZgFv&Re5t`Xb#MUW3UaP0&)a1&IwFK9e20Xp+mKz^m);Mu*p4EaPyWV3V zyX4!qGD5dLwK?y_{Lk+lr!*i#{*nn5T4JZiPKV}Zh!U~;!bYHOm`9`cQoimPmT?PS znle}2>PdAzLrr@Xm4;+PN>1^}J9bDSPTYH%>3h4zgbo3vh_SPZ^!xmJwy+XL@(Vf( z``%Q`&IM019490p=<%?@v#A|yaqJ19Ma#_jx3u=1+*!nFrZ!z)3j1w}f(7ORDN#Lc zUJ41ID!yphpA}^4+#KxAJ+u|5d}&;rL`Ye$t2HCd8(9e*N&*Td&buotJJhM z`?)^W#1%&vcPfh)&|9-cJ{(Q_=RvZ=vrdWAIW~`8U-kI|F6n$ReL=KILHx&#D~Vkp z7Mhn25eT|Gu2vN?w=q9|-ch&G&bw5hT*v6Syn9}CRkHVmKmlv%&rh_>QzJ(ahRoBy zxTxzMn+XoolT(MomSIvzE|>vGxMEK!o|ftN<-1mZkMh1rt@x3Yf=V&x-m}ro$5y6W!~HSeT})2TzNTBLJnLD<{D*IX zTs#@yll^DUT+~2>OZ-6qCWjlOyqqg0xT{=iIbtUvtB-?r)g{8QIKzt4MF+?sfcF@7 z(yxzLupU9tzb63iJAO_m47a-NyPdT*!FEdWFNh0v@c#0&@L`R}_Ecs&NwD&@e<~s4 zGjsiah<VtB4+Xzon7|JR!XsG0c|D0@_+tk- zi@{B_JyECl4j}><`0L=3#Kky2o2hVZ^PC?IGOC_Kh=6pX`~G9Oo{*3Gan-()@DI)#X)49eFKbkKp8N2{I>oCAyZsDGuv+-! z+>jHu+Zk?ss)smC7??%-Sx~QGqgfX6a1%;xmA7UFFyI?)>(*mnvzQsLJ&KmCb2ynh2DC)Mkv!p>{-FuU||LG&xcXK?ogLjS>$Ss>UH zDg&#^rS`GP(j%X(c07fMpXC$k5?5|Kstb{2Fltmn5irxB(YTNBf~P462p^#ao2N-B zIb`utF)XjbjSeNI_3dzI1WF$?vSC7=Vf5*QD6*fq#*hUBv#62j<5q+OyYXNGWY(8^ zz9ek4ZnU;1#3Yub?Jay5`|)|nqvN1GX2iqWqe7f>mT}VhiU!GN3j(XU%dA{|WXtJ# zZx)s)R}{;!HshfD-vRw|*#igqBI$CKeKpEo+-V}?CzhDKxd>hD7x6kM%L$Xf&KAGV9mpWS(^6*3{hxw@zAsc)G+@4+-*{DFslB!jZ%C7bfaqgDDN zgH=i|#mbS_d}3HD3DEeMjFmg5RsiNTixt{bDTj6xbb^OvoYCC2N)2InYTwy!4N z-aqV-K6qIM=g`SI?wky@)onOF1K<$b8Kt)^$6FP}Fqt|~vO$EBN0;K9nKIkGx}s|P z^6Ol^yI|{S*;-3?jH-bwTG5pM`lU5J9n#hN43px1rQ5WU?u8oFo^EDhsKjNuMk$O8 zgEG4Cpi1R1%4)+r>|~0cZ?x47RdIABV=Z1Sjk>Er2*t{ylGa#YxB3VX4M!xVnY*%E zi?}SZn2cv@?jpc9?*OS|B|=thJu2zAk7UtvE9f8|G_zeFgCyGPuD|GA0@% zTHP@FuUkvD_g%x+{X+CS<_4Eq7RL5+Jmk!NaliJkTQPaV?Exf_g)(~g{9M}D%oOmp zCLvwnCXhmAHp@+4PY$h9KfgRaI5haZirV)VM7#^H%?D3-0i?MS=PQT-=*I z!YD1|<^=p?GJM%>*I&&)&X=iuh_bicVDa|)O}(Xo>e`0(_wM%b>m`Pw#*6FkGOuK{ z_ADCWqJ!UBLL~!{Df@Og;D&gdCNHrgt<}Wbh!bjSh4rsld%6U3l7>EX>Cg4}NK`H~o<;Bi$NuG*V`@Rv|s#btM@l%2; zpY=>8-4;1+99u^H$}wgw!9Rb-v$Hc!x=mwRVGx|iJ00pm*`7s-ph8&d(w5P${pKrz z^Kz+iV{mz-7Y1^LK><(GQJaY$W_Y|E>ASnT-!L^bRglf0ujbQSW1J9GDs8lM2~Om(X9w{e~ymj)rj+tedn9Qr;*EiyegyQZE*0WP1myI z{Y|ul@_4-(MnEPxH@9Qfp)3#o`Xh~4C7G{ygt+g@ywi&N$r!w~zQ*;xU|fMs<23^8 zLxSd*ouo?8g(9STR5*dlvlD2nTy1`x<@aOX=<`|@5LbgP)WNBIr0#z~bDPPxck)K4 ztCH{bX20OsZ!-F;(@iGz0R&U$BE;YEJPi%H`G+o8RM+c?&*H*YKl3g2REVn@ zmQ@7hgoO32pek(oOW#kpAKG&qG$~LRhI^BC%^s-6+~2(QwbY>N>ce@=gp4E^!=KrU z;#DeC4EH2mAtNT>4EB{q3!GLx>{NQniedP%YE0=NyKmC9@MGZ;N_W>&@hMa+wCLtw zr|pb;Y8EglD%IpqF3BXfA)m6o8a4c_qN1YhXJ2Mp2q|;Dl!Yt4fcuHdi4Sy8Q*7<*@)?*mqmom4Bk?IOLBO#)e15tt{Uq=g^ z5>o!@uJobnlX_g$^c8evRKUe>s0hvfvnAL>jZe=TPLQs6Zk!T=*HQzFi_v&|J=ZWY zwb%asS}EodkKXRkb+d&a%R=S1&pd+gDxG6VUV0Sh8(qhc`W5CU6+CC9-HEGzYSxzo zW?zG2M-QUz0wPYlw57ktb`S!oan@dRTs*E;<{8~VaSx^ zm*w>JJ&mGXULCmX@-`CWyRdY=A=Ki*-!3TOe?9DX z9yxM%3-FM9g_7izn{?Kd^61J>=Wd@s-^F&i;Hu&ZK6ocojd3NN&;JnQOCf_@=`AIa zV}>hTb&2tmq8~Ft$T+wmRd_3FIL@{!K7jEflT&KKu7u%G`5GOhz#gjmaaCri1)GP$ zpmZ;KW(aDaguLHoGfc{x=l#+8-9onG^rb9zIUHQRb2+`IaF;Q5xGU|f)`A$^-0W@h zN)?nE&gI%na)z{`NEqXEQjF^@>MTTor4aRJCbhWd?i5*qZfRg&B>5`F^}5Uq&Kzs= ztF!Ril3g>&qCqb3_4Ul1ZXKGMu3!K8b_NY(1GC;@Td4XrknJ}5-Y zW^G`dpL%ZG68m6Ac=!}b_Z9vEGEy&CWu#tlLwboNvNipmc|SHbcGVli*pdZs)K(xX zYlhKkQZ49^wAR&qHr20=bI|IvOa9hgN5TQgWt-vr+SbLPf;PyXF@y3g9(!VyCLCXS zU?4|_=9CK|6|cZkiM0?n2>OwggB%g#0`RErnmeJ?;Q4SOIgwc^f@Pq0@Qw6)Lp_6w z%Xh->Iu#=Wn&pSyN2BYQw{PDv@6W1lVV=qS+}MEwE{))eN<%;0jJen}3fg&!r)hJ( zPjr}2@>C}L^5)w`hv2Y^0S8Hgle2RZb(Ar#ZSylm6HxF4E!bO`{dwJ(koo-BS0TMS zF_ErkXy{x&FFC%ap1`LQQ109cJl=p~;n0=b$NXA%BSV(OmkRF~d>aEbqAi@6yL_&U z*uCqGJRTP!_i}`0n6kL16oQGqW|F;y$(3Q|6opr5&=3?eK0Y35B?3j`Bt8} zRN$;d@9`P}R+ZW5=va_w5RragSh!TC&AXW2QL@1ZS$AJRc-Ey2THchYw^ze2BJlRq z*8Gp2v^YRSq$Gq~!dB4}f3pA<@2`W%ZDAqVzX{`dI!}UQ7cBGCZn{%&sH8rN3v#g8 zC7(mjbvX7hsGo5Xwqfl1*T~ir;u}>nP*i>;j^X_UI_yfJw|E|iy;eez4Za^OQC4-) zJ!~%dvj3C)E76)?2G{gFR|7T598%pj`_#i*lg~koI0P-l(iE%S8lX)cFcq)wN6yELP7lOFC zQG;2jPl)|p8goal?m;ftYp{fi3Cw^ah^zIye63t4-Q_0=sVfGPdSp)C=-818FoI|%MM=#!|yk6&+v&OV0|5Z)P zXQ^o5lry(hahBaUm2(OkWwbb$50%$t_igss{i3$R!+?R-xe9xPcoVu^?005f1I0&N1yOeLq*;TkXO-+Ys zAxBGo&w8(J>13c7;yRR#nd~2`Km7g$dQ=ULb&{hMk}kOnEp~6`CR=SBrb$HIYX7_r znh{uYIarffYw}zy3kHU>e?Ug#%aS|_6fss`#u`VLIL%@zjup4DoQLyOi*;V=SH|G; zx3RT|4fjpMHCpGpQ?D^r4m5I3&qwU$w$!<*TjpV2>2f$gyFo?!a>#+{{IGoge0ze{ zT{hQ<1j+bZ`P#5zdpIC~qFs6B)M_S|_sjz@vFfnt|$MQs7I zLKem_^1x4q60@S(WmJ_FXxp#*fKk^6)`}Yp-4v)v@Dg39 zr{Wj24gPfe5RBqd$_jC8omF{#vCa;d(po_)W+L1M7#%Cj@SYmTaR)}@cu?R@yPIuV$*tEYYw*y*qFaJbu) zJHuoQ0^nuc&~(Apd@>82C@uZEro_$tv?+`#PNtbi*k5{Q$>z8tf)STT-VFJx1<$$t z`pp9i(YTnxmpDgt=ImZR!|l^VY$^0!8iAD?b-Z2YTv7|x%PT4f3ydCd*|#lF>AI$5 zOYwDN7t90q8y0ww$_3nt%OU8?nx(<#FCPr&Af(pADPlIGWM244TFp%8I}`d<%lhc3 z`@y?(HT~`1*)OfU_M-|v*^g`(g)jbWxY--VmxjBWqtCgv{CwMc8rK%HLVw;?yGu>( zqlo`?-{|&2tt$5-0$nGAPfzLfRz8PPaJ4o+XC@qhAmh7TtMz`kYLDh;VkV0R^Ljf| zu^pj^N7pEI8R@j1R^Pfc`-4_OsLef)ToP{nqhy}f$=qPd$`)feA zi@v+Y3WZFBkN1+**yn#Hk}!ljr8>8WG(gWJaIJTq33`D~W z*ib^VHLvBDb*QEe~BIJ;v4isp9TQzav&?uTq_X4_2+%3x=Ow(b5RpXmf*=a%$%)cTRo=D8rf&wf4};%nGC zP`y>O(pfa1Yl#@S?R46GR_acM_iB`b9e`%aaihGvycMuvvn%?AU%^ylJG-eh2Lnls>#j6GQPZcQ`1EXy-rw(!f{2q&bVmip&n0juDv-!Gf(DD0T&dWR%Euu?&ej8-XAAH$MS`X*+4vn81CP7lmC6~p)H(p)DF;i?E zF`3T)c#$uHRGuo-wJ4F#HhXDtk(<*SIUcm?K+RzPa|lxJLfZ|Qb>J3rF^*)|TZyQ{}ZXJjF77W13z zZ`p^_GV1AxxubZr-q*FhA2~bf5=+upULU99Z`$408&r8&Xm_i15~Z2mkn>gR0g^}N z$={Hk)h@%*ngr%bxNOh5msoY=tx&t(|#Qy4+yjN%6E@kIX z&-k5lt<#G*wlYw&CND{gXe9Uzq$f@I9C;Nu%qbRF^l@HtUC}Q9Ggu4*sW8;2Z-*X) z@!)42La{FBuou7YdAKt_Rj@d#T(rh?D&uXGyvt+mXjkWxp6Ask&eCr9n&>kDC!gGBI?UIT295VxZOTh9Rhn;Dv&i_>Q1$3U3j z@!yUC*ZP0{HklgJeBua@8nan>dhiN;n1V2bT6$=MYQV{0N6Fh!yZ%}DK5|45w5xxB z-tX&vR95w_7(MWfAkMVDJ2>qF)q~!a8f=gEKDk{W>KE8JA(ce4(mC}D4mWI>{LFja$3VPj+^x4)V>Aj(NNbdh zlftg@`?Nw=#D~XS2#p?#w3%BZpO;llJx$VdjfbOaF6BW+A}B zdiUG^B9QMTwgOUjV3x&pygEiLtf_GL7}YK)66&F!ajMKd`wgJ%vQB`XAohNxYkmyB zf$aSkTD6uqS4oj|;v7=RH>Z0HcUz+?i}llPVG0wGi5m0!4`3z4#N?k#hziL~tjp1a z=PXiv^JE#m&GR>Q3vf1b*5tWm=dVfoZ7&qQ-O-Z3;Grvx9{6c8PSV1B94~vvgU{$& zh$kK{^cZJ#gyq@Y9yNGR5KI)tCI|u73WqXz3I*2WYkMe-2MPVekje9Nku&Yy|Kshe z!>aDKZUIrcTSBB$l$4YgDM19KK~lO^q$QP*RslglKu{^^P(VTir5i*fB@~cS1PSk4 z8+^|>-#PcY-+i9@-2dLkbJ+X$TYIg!<{V?pF^>mgT{=t}R>Lj7UuN3dQO@u~znx7^lfqgy)h2ZC-{eM5ou27*Czt_Q0y8&x_LCPOxE6zy*9`|wYy&g%OY$P)@}uiaUO%ThZB);*8ZS6avT^r zo$&#(4-HpWA$iQLl71T<3c;p6H*E|G%5*Dm$nC);Y;*zM#D(Z+38{@sC@FJpmzMzI zztbPor-(VWS6!VZU!mS6?}G~=6@5gCsw3RzrV)oB?vLXmV6X;mM;un1F&Hjr2~De0 zg4)EGL|7SzU;l+DGcz-!kM1XKqE5d!3$%#QFBIzg=Ea=gYiig52<5~XLm430K2I~N z69>yP1%3PWP3wEObh8}(CSpp2_b#M}5@2_EKC{@byG~PdCz46t=)Bj6@0E5MM+9Et z6X5Vg96_UCcF>Wlo7WD8F)tY925p1u0nljlHwA=1c}%$tA)A@2(pLe|mC@+jzqg#( z{;+(5GspgVEd-A^?%n($ERarR9~ojIu&XvV#9jv1G;t{X^RBq^k_HV@k~g@(Gb=@d zZ$Th~x02%UN`n059a2W|CZI`+wkjZY!N zNF$UES>g=c=ZEiG0Ownavd6|TMd;}YPD%&`x-BJ7CuKUblXLrrpBh#ED4x_^bSOw| zRoZ*a3b3J_BCfrlDXjaMDA))q!z0F@`dOA;4acP6!V_{kSvW}Vg@-nx?gZh=Nb@)O zbE(IU9M?K8pSI3c+1@O>oK3sh*<7$I@FGB2V zM0N%oYGW(ix@q=q{O*sq3NP}b_+Iauam%5n7Fds(tGDSwb*NSpzlY1Yp2wv!1PRQ; zI~4&iDkA+r^IRKHpPb8;^ttEsdm7?It3N==1pz+AyTQwZhQYy!D z-zemd&`R+|u)++n54uxYzYeqcJ4TjYa`C9X#57o4cXvExiF^ET%_UNNFY&2;GSCR; z^Vx!#C0|Dbuy*}?-YADJ8iAOeb0*JH=y4KoWq2L#Euj9h02t;+XLG0jVUs@Pie)?B zVu^TFnPCbC9baAvTgA&Vi#ENSlBofN3XFkKal=6HYvjJoVEGHf(OihAdti3B$ zS|R{a-8p2q(Jd7H{qbgjz%0qPMm*F=K&Ma?M|8h1XS~d+nZxp1 z?QoOp%Q$_kg`!JjEk1r+bBZmoMuX*V3!@OHdOcn9A_g$BI|8hahRCl^xl(1T12%=5 z@ks`_0SqN5-FbI3$U&%>$NIQXIX>m*-cMq#L%uHAcn$iFRcRG|E-JrLD&z+G9{&|` z>%sE1$l^hu;A+O66DxZm~7+C81^M2@8 zid9OnxYpxZw;`TfcyyWqyO2f-BytaIib8=m`+kO*jpC0a}&!ExGO zK=PF(>{CU?Z*Dg`aOo%0w{z)JaxcUAUc;@zm*HLl1wP8(Kw=*+U(P~ zM(Xl5lL=!fodsnDKd=i1G(k2Hk_4iJf!O;Zs0gtN4#dbZj)vf4Yd#q&0)ZEKw^wWc z^Czc|yudl>)f<;PB%nU%kIm7c$F=yex7o0a(+rXD;3)q6=H~}^^tk5c|2Ww)s8eo&Wbv)&%OIARda=uA8$VJ}2QluK6U%EUR z`oQV3mhW|f`<<3-{tw%dM(%QI9j)s<3M^H74q~$cZE#Z7h&mpLeTdo5_JWQ;;bVto z+Im4ygN@>^OS2Qyp4@zLA6+QxFxhi>Brv$+wV}XV$fqFS2IVw*{~@O#kx$pDzgOwq zy56)e^^owPtb6TCvMm6r4@hR+F#+TMh+0L0et;m$GR&s^`#~*1_akLf5VRRBkeSJN zkUoX&nam126M@9g=sA?;=$HEP;={cRY0j&NZF^aQtjGQhxg+@R&iC9VqNAlzu1C_2 z7F_xO+5hg(0l4b8%cWkFG*k?xU;F}wq$9MYeJUsKcmn6g4nrGEX?S}_3* zxt7X-q@de{g2B&(T;A^ovB!Wy;AX*XN^3vvz(6E+fD@w1k1ZHb=r~iTL5``ijOv&X zJ?*80(I7PC6*bVS0Qt^-@{TW zmSK zw%(8+qlK9`j8qY~v;T|AA`rVA+e_?G?rk9)Rg%uE@Jc8NTunkONO5U}Or5ySKOj$w z@0w%o%;Lekqg4cwMX1Z!dzyhPVhMXBOg<8o{Qt|<}4%J*NSZSYs0;3T+jlP$o1)*@VH1O(3vsMwcO zWWY%~Byh(^1@XhoP1Fpx0+ZNmduc$LKm#?X?zct*s4xILyqU1D z3QmfSPcYpPm%WGIz9~X!ZV;onLiio?Q-9fFL1Ca zq-{DMG2HQ7Q=ucm6{RRzClD4j8gTH#ez4S&Swr|cQqVFv@H)|P!vVCM}tCr zn?c~ytZF0`P>(n(GDyN$hYI5;$;aPN2zwr~e*xa8%368`FIxqn0h?B-mK_0W>IcBu z-P~Q6fi8+fkcLoAegL|t1MGzCD%*pVz_J;uV9K z!~GASXKjD_(8Z{4YlP^-X7mR8<1Q5mLnH`?b3u)6OTy67yY#RX@yald{uNc)aJvzi zc~2+q)IiU=4R~%7WvE_yKP3?amMRi{jw9gh(?%@2`@OHDZl|s80SHYKnbkkW{DGpt zGrkA8pjrG3H~U??1Lh+SkqwIsSbbYJ$1qC|wx@`-F}ZjDW!=(S0V*eFmTXL$%w2>Y zs3zi+t3VJ^vL`Nz36#KMCH2(^B$6-r_GNilLRk47Abk}6zcwRrdsPZ`D!+h2Kn!TW z`|(|BpeVeT>}~%zntQweT|*Y;N3aqomZ|E{!`v9ztQmzW<`_BNy&jzc&B<~aNStPyMZaxZO&D(lDK+HcBA zkDybgwjCHK!W&G`+=UpXlQ7V?4t^U?!fn8%?U1QyqsLLe6Tf@k0D}lp*R6^~^+z%E zry9Y+glZO0{_^S$V4^#Yfsj>a{6gENv6}=x+KVGlSE6F3;&geCvKT-a*WFJ4YmA{+ z#K~!k0N(39LUS7Cfy^#(%bV0UyJ`J7aRkVvmu@?t-<3dC%Y+~8jlmgjt_M*WJHQ~8 zB0STNACAHo5nmO9odl_eB z>Mx(zz&Y|mfA$FoT0F_M&9w$8Rg)WNYu{e$PX9~UKT}=M+Gz{6Yh(W9m%=rvx?^mT zKHu$1p`Byo%^`m^<29^+2rO??Et`3WQ*W$Od9{Q7LvdfFD zWfaGG;#HE*hJ0{Wdgc#h z*%lxw!T~V;`STe#MVVJhRzV~|J;2aD%+G)CDYJ0shpO_&_Fv7)gzUq|?EF zR{4qc`n&|{8TkL25m;Ftc?a5FYpc-9+16irvuA;RlYlAdPT6c+o9s=v!4JVS+yBZ1 z;8od$_;M=V734#MF0XHB(&!%RQVke~4qOmof~KLNcIXniH2k8%(p9s}8mFOa=l$cI zh)?G~aj#bQWE)NEPnG87@jq(#)y(Mn0I?|KPe1}gX#14bBg@MgSS4kfv5k$5r&USr z2we^gz}4xjS|hun6p5-jSA5eD;jfX=?e*(#sfMBc=~ri!I>b?hf9v<{{K^yKUC5;6 zj?J@k0oF%G!F4{7%*Cvu(5y}_ppk%^dXLJ{dl8H&x8!*UtM#SMs_Z1U(svN#J33?wPDL?R%r-_Mp$69$!4bqO%<_dyftj>bcJ85=HBK2 zb6rDJb!Jtxzbgna?BB+Ju>s*XqmUX>w;@%Z(b{ol(dmNENcG=hMCX!GfHp>)ERNAr zbK4o(COcd?h)`VQUkIy%=bVzw6HTgW-6j>-g_Mihpu^VzMk)-siIC5=!|FPcCCfh- z`5$N-!1wOUtnHa?!;H)BGcZ`-B*=n|tN2yF@xP{L_nJ-JY9RAB-$Oj4+pF(O8DT7h zu0I7A&wn9Yb&Q(bB-x`D1JO^FAwbRqHkF~S+2t90m3eppn-Af@^?g%Y;;!s|I#|9h zk9>+SejRCE)1fP_OxQLPZ!aQjkrlUWr-e6Ci170qj<1l<^3rcP$HPFH^k_&edxQq~7sL zdbCqRw@QTy>crj_mP`I(2=|^8{IaplvDeYqNL(Y}l`MzudAmoD0XJ^wJggmb(Rkcm ze?87DIcN?mG802v{Jz=>k_&>jy~!SV`i0_a*CJM@TjPyrC^p%wyOhEa*b4>>kn*f?xSltt9HZhW--cex)B^eA<<+bJG{5JfG;7 zzjZA)xKCGy+m%fm`OfK5a%$4XZ3ZvEkZKvhq300k3tI+p;lP~a38V7Y3{e+Ld7TMU|z?^DF z34Z?=7TFP8k?F>1@CwLGJ`>-XNL!x!V0UWy&-C!R2xX-M>yJ8GoFioh%c&w!H%Jo_;SRaN+7m zt;TPZ+4O57eYo#1(1VI1@tnWHkNQ69YKF5Yc~bFD!d;E!h3AG9Qnr#{b@3k>$y&;b z97JuPymSaQCcRSq%KghF7`F7FS#F@ zz|?tv8z!C5CccC&+72+@ynOn11Ku&wOSWIuAo|_v>RI^uRaj!o>k7|FwPZ9Qfck6Z z%yP~8h4rC;-Ju>6Wt(~A3wPUgn}+#(PiXWwBr9fD{G!b$8aPd122aZh!`a#@T=spKe;+kUbwC2fOiPw#Yzr7@U8b?wjCcd6N2F!Wcx|M{ z)7gffv`zwQk|FsNzxpU~^?bfOFZ=JMvdA(Cr{R;;?`>Q0?IOofc8KN>kk+!6E+B8E zm1B5Fp%`yaiyRtczQf(#x%1X+N2%wvs0*~g^yGbq>bx6R?D2)quJ4r-D!lOR?R?D5 zs+~z*%ij)*ulz_m5*Emh&BFIpO#tQ&2^uGUBh{P#1OgB(`j(JL^D36nPQ8rp&7bOa zW`x>0i|3ke9w`mK^OeYEMKbkRO?icz|K;6*jl-_b1Ru4j{X$QPZj9OCq-6~^BJW?1 zX#1WhMbgq#H8DXGGum?;iu-xnt7wQH{I=s#BDowKf6PI`Po_sJV*N5#T< zkEoQ|zXcHIV$?hw2WiQscwI6`gfoni<_mPNtBo|DFyYp))>AtPjZQ>U7}nmdxQJH+ zt8O#XnxM>5bLKz@al10VRpd4JDn?@G1%S5hP1yIgkYAv}TVL@|@YY^JLjSp2BnH@r zS-Y`0Q;XizwB382G^{1$$jz9CtfHgfLFxsrxaEvw@Fg;(y;ARB{DmN%o zd13&hVkn+hqW4St{X!CI^Z&k(Jc5uu(O0-oLpuK7)R6D6kSI+#3D(Vv1QW84$DP8# zDE1kMx}W~ z?t3%{CHHB7HtI8Z5QBsiDt{_?Tx=mu(hdU&ryaFQ3Q#lny#jbbKAm)5WHLB;x8iYd z;M@exLER*8X7>2~%(V=YyNZaH#?;f-cQu_0j+-9_3*^C-rwOx^04S8ZK!>;uqzItv zzRsY{2Fn4hFy8eR-*44bK*RnTwg`}RPd{zD3OL?>~PszW-v~vqC1nR%!L-5`a1+0O~y0|KD^g2BVz!|3K~y56K%o;$pE+z|yQ2|A^#z=f(^Igv z;v9a8eeyz3?i~4%|5DG96Ic;GdN6r{b;5dhi+5gX+LiE;Djh_)0qFSAsV@A|jOjH9 zJ9)6bL1}`+5oh}9@Dm3P^Kk`6!A^C7Oi_cM$$>S8FZ&M`nz@3tm^k$8ER?<+Mk)-% zj|1EMC>^EAoV2jpUPJ!{f=vK1J%C4bqH)8=kN$PB-4Vxe=!F>Ewu<%ZBkJ_Pk7mWA zlXmYI=D;@L;G_ZZ@%P3*(Dp2rw->G4_zP=wKx9~#2z#oMR_*@kfwBWNKo;Nv_8k5B zd&#>il1OlKAe(2hr2YGoFd!@gQTzQHNt+tUKoc?pa3huGf@uCM-4q*G*Tq@ z+JJRPIBtQ5xnY{fkyfLx{6IT0HzD}6zGYCcTyEeKCqeITY#df&a@hD+JU3UYt<4J z8%6GNj|*1??jYVOP<{|8rAua$JT~aCyMp8nunL6Poakxx{7_;&NgQXDgJF)b@$Xaq zt$_H1@0IO7yx)-znkRpYrCAY4&~uPF{W?}=hm)5YkbYq#j5a>LzOv=?rD&zH?qSX^_dlKAMy{G` zgs^W;-%(&a^as`U&9(3#pEf)aEX-*VMRi88iV7Q8{nu3Ck>gjVo0CDU0VRs(9IEZ& zq;N*|OowK%oP!@=o^?|%Zbjah*sl*77a9v z2Hi&(ajtk;#5~2#u?P5EB|5%i2IW^0E;Dm?>hL-KtjuM$FVuD38jW$Y%YN)Ms_Rgy zx7s<#zCl%|kWrCM`P0Fcs*dpszBHp}!s(ICgjGo=+s*)8EXFa*3f*z?1ZOjgSmlA6ff?4bo^u_mdVjN&?5L+JQ9qysDSN? zG8yRD+i4u>u)PNNE7P!^niq3!IuGuMGKxU+<*NnaX#?!R{b%X0$*_fvN?cvl;`38% z(>LPw*I>bRz2aVWss4Hd2pQDIO*b$YiTr)|KsE>Ih4JdnRh(mV70b8-EEw+sROte5#p>*xMXTjhP zpBFW=_z{+H>vO=QWGb!J{B+{6Rj;gx(;U3KMTCuacEl9EtYxj!;^mGcHgs z4|_digg3+5eF7)Q{0b?+5=C3)c1}qQWeWsBJ}sqcWVU3K`g3WzxsK^~j4<+a<8h}Hj7NS@A}Z9%iO*9pDnK|txBtMPXoU{m=F+IcJc!$>IS#p6H2+ClO!5j8 zsj*yt(~HV{KDf2}IcokvI$6p9>C_7;KFF}}y~e6=Fi!hk8vA0k!c?<{)3`N+=2IT` zR+|3+VFldD2@ZwS%t;1a2Ki0uG|s;;o;=7t~&ZIzYWp~_&M!?tW2_5 z?v;R-w*4c*5iPE+{%EUx_9MvCZp81)(@0PKg;`vbL2C=|H%=1- z15O-6?eqcE{Ki=*IlnD~V>+|RFRjpM9D0ix9`zDVl&uamhsh}&BC{_BOU=82(( zf!v%XB;LPFxhUbFh!rmg<`F_>C-zTY0vq^0eTgaBfp(=sj&u4a!8jD>LDeUYnyWfU zW(l$=I9?)Cv?zu3g?LmRCS8Eg+HNG81YM`|D(ePR!Cuj~hW-@@g?6~wQqAhWpvaE0 z9bqTkhUY9m{)o1SG7<6L@p|*aAdjvX6Ij>K-G`X}Ys3fHnOG96!XeE++u={P&1lwK zip|t&f!2nAV*ViYbL^U1Cn#4a#~h7xh^;#c(yk;MkVkq&96VS3VOGPW19_IXF!U^2 zF*+(NMcHZus?wl8`--M6K!+ z1-m~Ne0H{$`O`cQ4g=EuXl4J+=tZ&#>+k)!FMjynp9`5cG9M=TFs>F|$GKF!n9VOj zIe!$7XPNcsJJ#k>YRP*q;LUU4hse1`*9<=dFB?dX*lfi!_%TYyhE+_I1Acm+ceH}D zd*{(Plzi*L#jo>{=nCXi)D&PQ5ijzvn+8+dPI&ESaY)cm?==g822zmFoWf=Z;MPd7 zf}~V?RQ(mIN$bym7=rOPk6wi;qnYytp*L<#>#DxTty`7E1k#u-PTzH3inJ7d>L2iJ z{nc@#@U&_Ks2w6>5H*2Q62WaVj=%qXn5+@&WSX^r{Ff7OtVn=hcfAbLzTx*^?N)@! zJ%h`m)*9MUB-6PjZ~Q}VFm2e3b03KV#^8=!Z>FI~0(C4L4Gy?x9V~~8^=M%4IQ-L> zz@_q^zNCih4{py0Rvfhzx>0?JWZnR>$>Y}0^CWo0ujsM8@rM}y@o?m7)PsM!)5;L~ zVh0tFOn5E4mnl2uA(#IcPmG9NX(nD?>~o0&OlLZIBOLQt{6S&I{D;p&H~+q%dGM>! zA}a4N612lxHfyIKv95n`ux~+HKEYg%P23ANi4#`XH8jv&UI7>N1@`A?zZrp>M4>=S z5MB59c9{O7PUDbW^e}7(4}(FmN761fcqKZ?>?@Pi;K$*Mw>X0?St|lm6>TvZB!aSN zrEks*kYXQ|KNt<`-yec8h6rEH2V)56LwR=+Cae=nKYjtZpfmIxA!{S!RAZHCOi6h< z<Uuw56wjV&;IE1+4yP>KP)T?*K>-0 z>F{2&p?Z~xIs%xOG!L#VWT}LAO6ka{w7;__ENsn$>XQ(~R4K66GN>g>2~2u0nNJM& z{>Fk#1EMx6;9*Uusp3{WVNUzg%1Vj1wONj6L+-GHl5RED(i%g%7RIU6=P?CXtEhUl z#T6Zb1Whe)^?c6sNeE|Jn(%G4L|@=HO}~%9J01QB@7!Nez&quk6Y|UABC#(3i%U0` z8MV{7+Df4g4edY2yDs$MygF%VR?9K$@?>Xxsi^ zvb@0ZQH9?--}F;Q*e2e}=afa>s-$E<`9CJAH^Hy?U-I1O{elWq5Q`wmSULK4s%qYZ(&8T<^6_xJ zbl@TSEXayJV3E$8MBjev@1&}HZhh;PQHBxb+U&l7uBX(>Bim3-)+qj+&mh!t6#_T3 z;$bElaKinZ@UCdz1uzrcLKi<8YN8wXkp98lvoCAW3!(4AxD|nW1+;Fp6|H}ff*_^0 zzlSRZUfvKzJ|<})PtN_1qBiEp6jo^UK!2z|T0A4m^np|6BVEZ zklcIJ^!fr+coPEMNK0BGDtqKvCDD!YS(QUy1b$xQf4?4H7~wFAJDq&!R1#sdeX8aW zB!ZGu8>l-ps!q;_(^%ZzH}oYqNL>+y8|fh9Ip7hZY#E5OUgd?>gO3Tt>rsSqDYG2e z?UY`A0K?~7nP7!i$QwzO}(>`1>TXeAE2QU2Wit<;0(_MH(a1+c7t_4aMo`=G#TF> zyN)SMZvG5-1pL5N9lk~O$pWmMV(?Wz+rg&B zz+H|pnkp`g=h)RI3LD_&#q>A8Gq?8<^lXbIJxf3H=i)jWvbI%oCe-4b3CN!p8fQDs zMb)7y%o2(1EML{X;DSST9c#ZV{X;kGA6z5D?cQwlj!G~-OnK-Bvm`oTMp%c!*6*Lc z_a1M-AX^>K+FS#vNHS_O9=PMy9v?VbGSJgwVS4k2?lX~@iV<+BS=lgpM#Veo0q5V@ z^RGT)^Bw1-tra>=n;&TxMZbd3(SNdA?9Lzp+Oe4qqF5k6z9;PS{))17emmaZ+!D=b zn+Bt%QxSlq7gdtO&J{%(A+}tU6ldng z*npks#@8^PbTr?jx}r#sAM7{0<9WC%9q$b5ze2?GEE>i1IEU4)S9G_v*-aTU};| zPQ`QTwU+|ESS}>ka-NgT(<>kh_^iYV6I()%67mDsBE0$K{GnHBs$#kBw?C4UXP#ZV zMCeUa&~I!n3HWda*sp23dx1-owJYrjzWPv4To(UL3lZ?&76Dz8mfz1#dE#qq^_#{! z=XILRo~>lKMWVvMDC&8}Y}w31zUkweAqm;aI6)yZh1)C7nzeZ_C5Jjn%U)%>Ji%Fg z;dZlRCUot>EOP+{HPFTa)i%G@7#T|q<(Ua&{kL5kz^wxl;n{xWgfk`FKs=ull=Tf93Hzcg@aFe84PF6Txl!m zGs6+ygYL(6vqQ!05m?hg(}mjgT2%$m6ctSK;)$~uM|L!(`X^xAiWLF0S=GxCSLqbk z63UF#raHQ}3~s2Y&z=F%gXbw4EpCP9XzvJBLl^|*O09Fsd(^5)jUGai@redCXl7=f(C+nP{;H8 zXwV>X``-Z>ib=U~gH!Xchcz*Sog}y$wRYZwIS5CjqNmdUYFckUPLlFwvu*REIsR22 zB7%Q-MA%!DCflq1Ap>7u_*Yy_T|bZogrC-pG3_z;IICnu1M0Ey?lV_f-VZk{!L96S zPV@#vQ{!}xnXpJln%T0gTcm4Q>+lb%3#T(CJ*F1#89H9wepoEz72+gUH6vberbOhz zeQJvri8FC$NqI}Rvy)77XLMS<6ydnB+ln?&H;+eqat}!P3-5^^pMMTaCxSn`?-b`6 zLe;)XeT-vLaJGRKmvtoowO`S9?R9zD8{9Lr&VH}GQd3dn2x@K555;yL-~b8=NVtyG zlw3nHPl2>5W9KI~21{6E&YY04Ucg&WA3@X_zvoQsf)cVV6}6DYK1<)Br~#vyjuwYXOZ8O9T9>( zC$LRX@Dg5e9NWruN zv!ZxPH7_Ji2mE>#V5Fnze%6%fIP8X3?l}ZDgnCOV-(?JcKdjqg6tQf=r;;HmnwKdS zsj_A(Q#bv7Txc{-;w5PrZmClvoy9wK{$Rt1S@5S8Z*Xp<(ImRnvob^5gCi{N1R^bK zfvnfZ&xR^SCKeD8ty;Sym3?E9qvFrvZ)X5nnBnB2^cDC;aB?qkJ8rcSbZ|yJo|%YuBY=P#+xhoZOPO_%n6JfrUfYh7BUXhM*Ld4VBu2@7 z1&mIRFY%4ZZ9~$fBl-nG=oM3&rNcPplW)lr2t7SLTSksoR0|xcZCChp15CZBWy3C{ zos;lh`|P%nQd2e!Vsk-pyOAwNL@_9If)SqvJ>hiOifQQZ&BoCGOq~1NZ+Sn?uy#R4|o!9CV5~@EA zfZu*?&CR^{3aF{Ae?&T;hzySyoUPLI?lj9et*^d&E$`~(n!UjDjDc9w%gWfXtDb@K zlP7V%a**#bKI!B;EocpQF%#2&&cuA(q)q=KF2cE6qxu&vN43e)ItVN~T56S6Vd`hW z2#L33Ga80a2NJk-;mCX=#;Yw{;73!w*Bi1{F;L}WHBO^{`C^~3v--mQrK)7-OLMpB zOzHJyJRYAEC>Okd6}^7W&G)KPYPR&Wsrf3Jv{y#|kr2=?UbjopT*7<8dxP|R)FJXF zsB*h;_LNT)xk z*~h0*#I{z6J9$Lc5#!-JM^5zzyTiCR6aaovP7|Qb_*mB0BfmnAP1A713(a*3G8&jL82mdrE9Mmg=OE`Bg(JT9X{-fZsm@ z%=DB-Kk|jpMcq%}{}kUg)A|+3>eVBQ3vNNZOQ|wo&Nb~y5|>MDu~UE(b~a+4-j|?w z#%YkF{QCDXEUe$ZiKQ#W{BArRY%-E+4^RjYZBa>ipwrUT33@TZYHV zeAR98$;VwCAu|6<>51tT*$P&Al1xR>=f!S3aush+e;vXgCL z$j6B(mnzM>gsyJ!nUSI$=>@VAM zo6NF1Z7cou*LNrVcQzY;S9q&%GS9UqQ=tKs$y1Q@51g8O4FW&&y4{Vo`*Jxu^R?jj z*3zn4y$YIsJpO{ROZkh?!XqSj?&-FA(uwF90!gsKd&pZGA?h|!Zy*P47pdNtPb)J_ z{3`8|L6&sBfu>*AUPwvTbiR8~&yGV$a$jZh8aLsRp}&sL&T0$Y zBkXgeY@;7h(nTm_RQBBIa#IUkv)yU1j#Gb%v=AX+R4NEe=OLWaW#Zr_-@nP|Qe`2! zeRl|UdicytipnKyyYr%O=;}b4GZh9a_A3?w2>D|CXzZjIeL51OiTr z^lVwjoeLzDN%m2{o3bGhp;qBrR`=obDZLv6MQ3zO(t5Ao8)liWzixM95f}(don?H~ z;m`n`4V3;hR53UMsaQ*fpW=t|>jj=np`xg0C*b$?_$gbh{+wGqradb#WjX69bT>=~ zL(`G+J?a;QFK1H>Mke2c(WTi+B8`YZEXYnzs`uW#`T`PkgVz(-wpNKfH<23H(#QSl zy+#OHERkdt_7-3Qjm?yBkvW=4Qdp2+nTXYGd#Mr+{PYFJTX5WUFDQEyi;in#fr9Ome+b5YBJP!Hgr8R)A0C8_2_<~|`=cE|(Jnp1oPaX@_9b=By_-g5)H zK*#omE7E?XlAq%;cP1W{JXYx{gsIP&n^p9pc3J`*aKSLuY|TC_uG)wmG7FGBwFJqu zy$ZjtfA2Z&x2>9YOP!SC@y)3U>Rx^uuM7C9GXm3%<%S`Fr0TPDzub$jOkE2pMpG{n zBuW%HuWN5T>3er22D)I3CWLo4KaP6M5@8RIzrMNsi;Z+<9vgdPymhi?Pp>nEm!F%p z$3olNdExP;qh|JN3Qy2WIR5)fWIxSq{1Rec>cKNK8Fk^y&eyy`%L`dqeoMkdV`HF< zr|LVt1d+={MaVa;?8qo6xsAk4UgFOT|1}7U&rc6_Dw_?WQ;CU*{XWO{&;XH0jfbI3 zFc${|x46hr(ozy?dRvD*Kh_W|W-sCI_mx_&iQn3gNVSyoD69LjxS4eghmu)5VW?t{ zH+waKOGtEtD4;CJ>YcoN;n^p@ek{;6b5s!5)8EET2@m`h$17{eonMT*E$^H7xOpq& z*pB=SPfu>W`Oo(>^+20t+EWVtWS76>s4M+!zs;iitZcpGa%c;LI@N-jwN|B{ZpAQ$ z`Z>zcouSj|tMvF*4Co!T)@M7keV!1Df#rvR-*zaA(o-~$C;Qps9I!1bSW$J=%V>^f zdJ%RL$Svhv11J0H%An!rZW(zZXXWY z5I9yq&0hXJDYl91>9yBT{1xAU-o}>^tMOgXkHr!h%Xn^~C_7=nXWaCRru6b9>+2m7 zQa4T;oyzZ-O0hNaJFo6P5rmhi)byfwTHtB@(;4lra_(`~CSavgGs%oSY<3~z#|6Po zoSr?dI$!e^yMWY(R8(>DC-7Al$j;sBNjiQZuYH9eOU>Qf4r7W6?aS#SS?aE$w6T`x1T`Uc&;II;kfI1e{H3Guyq72gCkh>(|j!9T|7WV_9THg(NLBjA&P`GA?Y7{vV_&hBNRn?ju7(Q{6V`h$8%*f)L-*kFrOYj8&@pL%VKRrD%qec#n4P^KD<)ab#ry*M zhX&ur&T%;vm8wyj<1d6y`d=aF4jMAp8{kXbkoXj&vTa!ckAjxp}fb6icGGdpxD|$cbNR3DnbDs`AoKiq0hy z>~Ag!pPN?q)l+-ur*{WS+^}1^AQ#{)$x$~8*zVen1?7poF_kNuzxT#}YcJ%NUdRzR zx_3VIkv5pEy2LXLlTX7FoOq?b5DE8g9@N}YJ2j9IepNa1>*6~Xh~4^N;_6%wX}d?l z7T9qil!TEs@kQO3geoB|e>~Qb>yI+!ZDZNQ_8bq~tRF%SD#4PBIPMb+lngP8D>i{m zJY$p-MClc|J?x%3#>U15??6@HU~E8h?U`&Ij-t)+#CkZ@IsR}R(Y>z&xa2uP5awBg zRMTm3=j-zh>(27t##$<*)|k=0U_Uk70&2mXno% z(~~AfeTK;OWDvv;jW5iuky+i^;RDBhZ?Ue2q`S%YJG{R>Ff&MrK7tT_=FfbxGFaYRx&i7v)N?#CcQVt(a^J)4p zK|eYj<>{?5+2rZF37I*5t-J+i`g?jZjEs%XT9;K;&S~?KgTXDP`ilr^~C3wGgF|BFcNYnrwl^f>2+ytgP(0+|6Il$7+4^pgF~a*{vSi zeO~GK>MK69D1^#3Y`0)h$d$w*9xRfHMP!QTXAN&p`CQ}) zyBS2iP<>aknZ)p~b1nY?apZxnFw_QlEb z!PVFhf)_?Q)vxL&k0Z${zy0tK=WIS9+%|W0SfdL?y?baORoRR+-w#??1YR0}8|{ui zw5s?h>+gT~y&3!4t@A2?+j=l0udb1|yfY#>5LXTTbhu1%tu%1oUd|DrONgH@ipMoY z_iUzMchKC*vHi`!8N|iTx^!6HchHKY?0ufJ-=>=w$bH+X#Yf%VxZR5cQy8J^N0m8( z@?j<0)b6aV$p>yokQrGqtFodnpW#%##!t)aMf&UD0sll*$C4whUvr@;O&c}z5EA7+ zyKP$QlLN_xVF8?TChw#p5kGy;2a?vXuv;Bz#P8lC)TV#S8AM0`fm+v~Bb4jOp)Tw* z(x7vqPryxy3M`|eT{65UrlpsJs|rFV_X+POgoFw>9P*%WnTOl4pr9ZlYH4Z7w2g2w z>ql4(UJ5HKvloF@uS@ZphsvQp&jsRU)fU-x4cowfsivQT_i-flOjJXk(2IhH>OFlU z-@qE!d3Jv`MG$yS4Ga#_wu;%b(=UI%cOniwMe=uAl`ZsflvR*v4?qSo0A{e%<1LrU zRbOI0d;^{B;qm;N57Q5I1)ajY#DFwTuPL-L2(ZVAzAm%1rfzM4Z|2Zq#{o-i9cV!% zx(+^?SEBFOR-k{E$5B8Jh2OwubRM=;f6G74Znn^vId1xLaKk4 z%k^x6@n4gaTfISZ>30A5FdoOIBv*o5`{LKkB6wy=Hr9eO92%zf8cJ+t@YqHSf8HJH z0#EYm9OO%eG56o+!b69%$>XK*+_-1}-^WB@9>Am0ny;YCv44QN4&F9U@eo*SFcB#F zCbC(I;;OQ$;0_;ajTg?-qKQ}vB9*~=Tlxl+e;45=sNs_Loh|p_ zVS)vTA*e^a(*`#@JZ7sB^N*qx+k}|)ruodBFO=t|`87=Ao*fdZ-F=%@^oH?lBO_XS zg6=c|U8c8O@Oql?daHa^hgAb{Z&5=29e>N{KOUp*{Xq7{sIs!s*UWcBWLZ}c8=mEw z8f@}Me2D{W5D(JiJv}RfN%nu79c@{hUM5S*kB8s@EhYSGQjq`tC{G^rS%8v6YNh7l z;lr@htk{&#+0d!u)2nqc>pK74eog zz0d3LwLgcjZ)@Cpj`?6c@fS){p7=;FU1xZAV$iuVKFHeq}g*CJ%i(`46b+ODdEM5?AN$X{oG}N^*U;aiHLCfOfdy4vnG+2{6bNN9|NkV>iU z&IkpZ^V@++@Hvc3_~!?74Cel~CmbOHvfmo_ZgbK`ev|^^ju*)Pzk;4skKSa0QqSz> zjUQ;(Spk6h4sC*!?kUO<@yAfUF*^lL)iC+dY?tr)PK^@O{XzpUoh_4Az#z`3Ty|sl ztj0re!5`k5EapZXLIGRqC{4HNoCUB)2bvtLW;de@82^qhB0^%V<>J{QL)Rc%Z=-P7 zSy%ozE{c2)UP?hV-p7&$jGn znk}2>>^pz%8|phA0i#CWyGjpewYk9t!tw_iE~0HpCbJTF^t7A#)2i??Xd@|y$)Q#; zk1NBMUvIxKaxNcG(<6E^4FbCUyI1+V@oo8)T@tRbaLq(g1fd?a?;}}rHTuUODh-5P z=gjWj(^Dwevp;Gd(B~ix<^()?DGjuyd^_ksc7E{=tFb+H=vAtsNH@9;7?9))GD26QtVjrSTv(R`A;E3Wb42agVO}(K<{1fa< z3Qh*cW?V(rAk+3G&c40G%<1t`<>7G-LW4-^jmrL5NmVp3HWy$GZtDi$8yuL>J|>l0 z!lxARIj=5V#EK)GCTeN3QL~vNN*VNP`zRKFcWBzvixZ*019p`Pf-mo=EBMYnz7VD8 z6SR;iZaXF|EBOr?L%DqCo{T+b}yiSUH{Ec(KD+}FRp^8Y{$+I=4r4Q&|8VV>^ozE9o$ z#ay((;*X-KFY0`ldvrZUYC1|pA-VWEy$5;Q*>u@;{m4zR-@sRBqeyu4l%-;Py1|=8 z7@RBgDvW3;?xJh$k3^6ig!O|x0hMnIBWh;;_06;H;bwlnM%3t=7s(!heqaoodNVJl z%N5$=#N6k**^UgbWItzi&o!&_Cw#)OR0Op{4j^&dE(XEzLBqowM~ghhjxair*LEyO zQB2_1NCmw}rym(7Ocy(SPIl4Nh^Eci$l_TDNz2b`u+PXPr*ezmT{C&rv;imf>^&=p zkcn6Np=(^#z5VPtPg;9Jw*K%4W{ti(ut<+*LraNYpSR0j~G8rFAI=;de18!#Ux z4-R#o%n9Gi!0gjhycEZdth|!33X>ojM){;H3o;<1$em|7QAFDVb=y&o2WMaw%BNv+v0E_$7QENp$}*N?&g=CK!{-!ixK*N^sfck2;%ft)7~ z8qTXzltcxD;8axNXYq>Gj~k1O>h>~?)H(TULKxwIJ`5z z0yQe{c?)D?LcP`!OEyeHN804zoVW^3$$2n+fj?jWmVMTyK;`JQT-Wi}lfRLRNxr#| z$(so7wC5)PB+Rrb%JcI4FbVTU^fndW04>c79jS%$OPP-6tyyHe+-Hr4F6+uctL^_=)f}oO$2nZr@pC7EX_TJ}x&pl_{d+vXCjJ?L#YYmvpIe*`H zo=-h)OKZG2Hih>2;D=;0q+#o@k8uAl1MAFagoN1BFXA8?B~P$Jq0DLZ8*fG ztPz}}G;|2waK$hB-r#~ub6;(ruZnd~U|scSi2b$?9>?%=4t7~3IvF(2wz);_(^`KL z9z_(4j!1i}`Y@a``Z^!Xa~Mfl_BiXK%Ka4L3baQn89NpHPYLxEs=B{?bKj%+amiqE z>8CK~3gMkb2q2N-x$19ssjPlX)t9D9i|Ts*K}Lfh1Bs^yf7s63JKB)w)I5BsB}ET2 zA=o>$L-xz3h~A`3#uxj)jp@r*-9+ni$xg`y;h;Gg5p6=fkjJwdHL6c=gKiZ>_m2+t zdu6&(TRs7h3#Ul+AxS(;#>DvX8r=d`Kj4-I&VCBUxK{YytU{uXgfi4V>x zzmV+JM5_p)Q5|GzxtZkpqIS&SzArPzepOD7FGrNJkw)mwubJvtlIDi~7VLlpQN3MSR?!NXfiu zbeLQUxXU6-jZ3{Fw7 zYjL=HC$HrD>-kpV(O);e^InZ>=sgmPW3c*Qdr?bzzg(5Y3IeCE2m7YX(%&(5t0Hz?lr+0hCBl06*aTd8s{N^G2=IZezhuBv@I$l}`k zi@r-iD29MJkIj*>PtlUg9Os7C^>{zL3p82y16;Fv7^cen9Fb*(-ZBx4C5%s3`bfm?{Hk4prrbf#*m_qBkHwEOAwaS ztF29E<6Ibj0M&>HmSu^D>z#!`Df>H_BZn$k#^_QiPft%g^^NEg5W+Hym{SWk z>}ypUDbOIK6}0uwT#qNc?WsxBjLq-r(5=dJi*h>WDWq|fI2w=KGUTUEmfWp<@!PH7 z+ph65D{-LAh<-H2(xMnn1;tee1aC~b-=^FQ^PttvpsXx-UL3;RM&rv>8W&yvvvx7U z7dTt$CNY8V+rxqKS!bcLBa@S!TJ?DM$|OW5*hH|Uh4tkT;g;u}(KTwE@Howx8ou%96Ei*HM*d#G7yPG!?pe^>4MF@yu>GTy?|h`HA(Zto#E|fEOD7RbZK4k zMGMatJ2HZ^KaDFn5W~``eM~E`j{kHVi#(q7!n8AI#?F)75B6*KB}B6hnr7bS9sT*5 z-iAlBWqU?oWyVWLb=u|>d3g$k3VJAAkmZk2rmD=C@hDSeSx6v93^aX#iO^R_7ErN^;v zqPDH(IJ)sWZ+1?EIn!&KT^z&-reu2YGUa#Iqs0#Ii5$)+bgBYES6jVamKWUY+>VH# zX}EI#R(xWX7tYMDw-RVa5v+A{5spbdQt{Z>!F3k;)*U>Swj_FCzf6s(S#`G}4k8k# zsnFJ>>@XD_l>48dZiQO|B2)L=@l$VxbUGE|wkc>{kTRs+j2J-T!LVZ=%K}h|wckre zF(fMs`5dmVa3P=0?6GvLfmLh3jdvsW%f%MMbbjEF3L%E>vBopwF zu~C&ieixDQk7(hY(&qtwkCD?2;u``blZWI+*Yvks&X&|{U#n3Lw7*OrP;fVTkmrpb zK@ExYpX;GY+flIjAeLj=TXtN6`+6X_VH)DQK zxJpax_AP@Y`=Zx1BbMP@EvLoT<^HV5Qnmtz_T`J0kvL! zv5^_YYaM6n*JcTmFOHyr?Ah%5)S+}?Mlhx6Cp9OQ+sB+WXi@;DxVuG>&EdyxG#&|9yE64D|-AN6@hdIhw-}**bhy%=Z-;gGcP^ z`tP03tlsP;sH<_4uV>(PcaK4zTcCF2XXz!On>m`-)N<$dSe~5_zzFQ_n~Ks}-MUGE z5ca~ccQ?8Z!*DuzG;905mNY%cY+t{8x=U7&&7k1m)Mil=!wD}I($X7`e7hP>d8iZSvlLf zO6EjZWPE;L0ZbEIpwiRO4Iku`oN9ht`gloD=F#1~@^Z?HsB6d3|h9_22!5U^vDKMl)fNAGYh9vJUDQxIe4Rbtd|Pgq23y zRw%}A;Sp*{r~K)oS3e?Xo|x+n)!3Qf<;pY>xl>GBut8(1B-&c=ig!gh-I?zQe5MzL zO?uS|5)AVS8<-;r(%Jn%jvS$*?>$8CiI-dTO^b0td@xV0UF{I2UKz8@Xa#mCIM+J5 z_}o8=c^n_Fg?n(76@7`uVmr=#wFh+b;=L5x_VI`ZJQ`XvVUL90bbrH7^PbDlDa|m6CSk#9zR}xH z$rOC?6^hfkw3|XkMM~G4J|LlQuq-KJE}4?>D~n{JStGw|G_7Rbh;M@R#PiWK!c_4M zP!u#2{(i@DnfdOGQ#M@Jg{MiRO+P6ATJIwzTV%;O>#wk^KnjDNI=`%i)@Q_wH zDD5de(hFujD)@n%QLQAStoa9V$*v~*`_P&7c$C+d)TsU$y0ohxpV_UF*?QYW;yERP z13$Xbj(Jj+SPhw&&c`!Ttbh$k$`N0({?GMS+S9NdB zIZj-Wc-J6%2PE-CLW+|x?#Njiy$~>*HJ|LK zaj9h^BWuck&X{Z3Aee6}{V_}*nf3-j z_an>GEAMHu6!I(hyb$1W>kP-OXOJMH;qM0Q;ZnA2rm*iDt<Y32U;Z|;4OZR^^gIHH;}&d^-Iw zn!noh=7Q|X!^E5EHO_c5^-e1;n}Vv|Ose~)YFY=?m;HP8-1(5hJrNeM3;*^$s~J^68qH}iuJMU{P}F>I>PFk zmnFF-{NB&3{{cYc4G*DTzRrrCs_LGdL&lX;gSL3?XOmPoyHwC}r!G5&^VKgr(^PAq zD4`FlV^H^-n21`vz_QRn){$A~LIe%5pXTdn5jOdK%h)Fw>j39oUia6n0eorF?rEzV zErJi+@2t$X*;@T^ty@(w;Y7=1j+6{HAGkeaj(I^}-$N!^^k7zO%5`QY>uX88tKona zoj4xZu)ff*prDt+7Hi`c0|_W=O-_lC8xKzTuxgal=XG{EM6%-LGROs8qYv;>yPW6P zR@014x*D0$Fwf(4BW;?;YH&z=AyzS?)WiFwOBKwPV&0ZW|0O(}yPaa=Iokcq5gjJ+ z4ZB~0C!sov}d^&#!;z*S0n@lI5N5 z7fIjcpm@F|w5x!z^90bE+xSYHgn(-kafCZPX`kLDkqcc=&ER3OcVB0DVOdlg_RCP_ zh3V}oncUP1i0r6R_JvGxk7;J>uo$4l)wxK~K%$=qp}Jnh_YOw8CFqrDZ$-}Us}~Un zD3ZErN+=cvqNivRlTa8E)=!sOGeXG%)Vr6+NV8m7WiEYYCXvjiB{RH+uH+orehR4V zMn5jSE{jSm{+$W2&oipAuQe@$S0Fq`XWauGC{GI0M!3jFCc^L}w9AeBCw%6V{h9fi z;`0-wmX}&oaOuy$bUT~4oV!1iNpIS6)ebOy-E9wmfXt-JUM;G@U+z_1OXPn#7oI*i zf0~O*z+es-55hWY$59vF=>i`2(hMO)RFi&ssl$jo2`!EmT(irhAkq;Y$GxdKBUe*Y z^qraJ@F?k1*NuqfVsos?&6h`ya)!Vhl9!2k&!ODPwAQ-jJw1y0Ojn2t4zK#Cy~{bt zb!Ta1u066JtS6wnD+l++PgR;`R#dyf(^b*++7Da}ITsJoZ-eB|S@W%#FIy;6pFbiT zF`Np|Di&}h5sX?dpg|`chW93fg?_^+zkTpKLNJOKTR_DXA9LNy2#Gr%9ebE+ZDzI{ zZ?_95sIJoDkG5eXt#JiJ%!IGTb8a=cj9h4Xf3sgArdrBDLn^h~hkOltJ)<$&JepYuK0ao5uggM=+di@Mq%zT1QW0NMz5nJ%5>CYn^g6^`{??)PFB zT?17d&2GG+Lg@V>h`Kc`QPr#hDM1zwp=z6X!l+J3m+#i)hdO$4`|pOv3ArDinN-r{ zHwLO%L+WXfez*JjxJgh-tOekx_CsPYRC9Re730!5v|t$&O-xNabnYDE3y9ozl!U*e zY>(?eNzaV^P28zz-E{%d>+Y%*qjHiQGSL!?J3v-lE{gQUput&xUiEhc1)g|?S ztx$}$@8Cd6&2?K06CU8jTIXNC&bgnp^YI2~?C*VFhg?MC`C$LieGJz`vDv#2Q0io1 z(yEK~?3ltOm!%Il{SM#jrG}_ZtTI*-zRk-l(b@T4tvgkhZ@adOkK`H`eqJfd5OFst zILPob8(>ihQ|{(0%Qn~+g1q%k7r}Xv((@&8#opByCsCI)&#_W{k)#vX+?%%Q!-*~B zNM)DQAY&mAG-x%V+c!s}188L`a^f?7*8hB|CKuezW=O+$P7`D2k)g}ED(n;;7#rfx zG~-m9BUW{w?N<ozug)EDN1 z$t4+dXNA3SrH7WP=pg@Ewj~!|uJTlgW@fY%7wMW=F7+12n}*M-KYMQCnrUTr%0b_; zS3rUO=t0s7htJ5QB&4QZ4E8uWLpcyL#i|#7qOR)y@#}{KLx8MXgis;|!FVc0Hk}Od zsI{5T+_uregnAxoUVfZxA&9tl3<@O*ngJEd+Q|pvEHshQfbSOoK{VV%5hka6GgiIhWCZ5&USHfQXdVz+T0ac8|?zh`-l_+_`=xjBwX4)fLrR$Vv@(tBmDLAA z^#Q?|EbAZ{fTYT#U2UmfR8wXsYwi$HLl4>0nf;8l% z%qQw>ySqHL{P%L5dbd~;GN1r;G6Jc%HLUjb4J?bn2%sLn5;|@FVXFf(T-i5^&lk2q znN|bF2)PvPE*g3B%QDfSIr8(Y@_z|I3b2Qq*Y!j}ZXpc?8~+g6@dGFVLfHS(Ehv#C z`{n)8_S^aBRz>EzjwuZ5#52q08*73_eswi3eOQ^lt5%%hN+*!eOp?(Fx_Ppl-RV30N^c@FocaN|!+}@i0X}A6`8(8kT;7aA1iDdsN7BNAlzw4t@e7H4uzd z)>Ueyg#k8tmRXPxkz|aAB0y>A8y-YK!;B5j?L3c2cqbh`mGF2MlzJvv7f;v*t!Q2l8NNsLY}U$ zNmIXcmw~#2RgW8?m7{rnf<%isY!qZ9TmxnAazRH;CAEQMS0dz`UT@IUrHH-m)2S2) z06h*?l+5%_a0V*jSzkCi>vNv@Mtr;G0qs;Ayzy(Tv{{J2qEdMdnh-rurq`n3piR9e zl3rRzG>EYdAJ{@JJaSOG-(7AQuuvzP|+dhxlqf(_7_T zzB)X|(46z8IdqQ{QpeTUymi=b?ph@b%oQ?Etf}kHIix2<*&mKO$d!^ga7wg<*AVv zW_(In{0I9Wa4Uxlb$#1k1HO~ys-6vf?DGdMl2%y_LE9@-x%_5z@@=qI33a-`^Ciux zzQ$Nuc46SttC;RhP@5@5Fi30S8uES0_Xs&N8}d_o8>AT(P}f+%7IT|#b2~y@!0B?E z2$wVd^u~e^)eq2|RDjh@o7V!RRt>85708>HJj zfci||jOGSxMPs?WKHYIJ>tLU6q;?iUdN0&&;(XbuNngH>94(2$ISq@gV+%=uaec!sSbJfv(PGU?||H{9K@# zgNq(9c~F((Xe<5Pqix4JUu=j&!}%(FG?`9tyIa5R)(IYz_=x6w>*3U`~19I*^@FfiH!8TW1k}ii0%7Y3kBG>*DzDZR zE~a**2*Kt}kf+9~YX^Pe0D0;SWZl9494cDxD_jTWR{R$iAV^a0_g#nDhp=*>h#@-e~$|Dh8un%PK(HV!rR5k|`Z{GvKB0 zDC7uANk2T?1Ch))Q2AA8G&-8mwKrq5=v|lVBlzLK`LmOER-y6?H~kwMUw z*}O#Qiwo6^^CjTHZwEC<5LbvFxFafl4#~+$_ihb>MMTy1krf4qXh;#UYs6|lBFJ__ z;z1Kfi?+tE!>sqhRrO?c&X<~Ky-(2Q7?w|ghrLS}am|V6JV{WX(zy+ID%YB{Wi)TJ zf(#P7tr#^;Z1(V};W>{wh4e$@Vw9eN?tbEB6a~=~Wbv1`9R6MCq$(@pJE{;JERWF-6xU<rN`}jdN&cD>Q>S2{!p@xcSZO8%j~xV8CgxYR`)bHP=J}zhRP5m#v1V)RsEuu5 zpT`@s0R#16;Vyd5Fd$yv+OIATv0sPzl~Xa!?B&9ii`Yk-o4qFe?5b)ue^%$%xCHHT zggA7$d_mXv*2EK-#vHdx8NS@e&oNUln(gXl%+8O$PgB}limRpD^IDR^v^JH7=SmBT zRwoH+1DUu8xy9b(v!%dTH4ddkeD7H({lEUTCM5;Tj{Oc`ADKY`y45XHIdV0kac~HhGBcD zSJC3}K<^wLrYk>&T)E>TMLLjMHms%z9_vptaW)o8_ee|!LNaJh?uHkLm~=7bMP5_I z>o9+CI#9g`NcmY~x0t<`ROrviTbS=J%PZZDFDA8gMVn|SUg8VwzJ5SW*#LEJmcAco zUOeV=IUC>qc)$|>;B$3ZDfOlVD3Fw)AagtV^m~~+hiPgD-x|XMMdkHph&o~yh|{F3h~;jaY#H+;X;-&odCnZ8!7-FA*K{^=cyFRV2S zdue=A^kc|RMQOt$7YD5j;XqppZY;khkzX2d8;xR^orA6TK{C%3Efy0|&v{k06sAgJ zLG>T$x>xoCXBW$*l_VqyYMnfkf;p_1%qXY>28ul=L221>5o`3f(&*y0`jojt;_D|D z@N#bqcSx9UbznNuOQ;(f3m!~L=RtC=z6lJ+u$xCR2~4+6lW-@KJnzZen6`gN_04|8 zJ?U=W%LseUfgd7vFCQq%dRa_da&U~<{&D(l`A>>XJK@4MDc(|g7HQydBYe`Y(L3JT z{@xRN*6;zgYJs^F>7W+ug>-Z*f5Gsj)+saYq1@cJ%Z=x6UO!Y5)|>6a>- zm>eu)_)rl==}|-=$G~~yysxpfo>AebmI+mPcg00YQZmKwEk|3J(o>}hYbEUuF!u_5 zr>N_L4H&wwMt=RcH|U>|TT=mZrx~qR`7dfb+~ImTF(_NYV2@~hNPa$>zkhBVeu?Ir zRW#1@73${`ZoQc;z+O`MDT+u9GX~6sY3OPa`I{nlz=Y$YD<;$HMp6Y`yVC^QFy2ZZ z=0(_<$sXEFp%KaoQA!ftv{yB%W-Vd3WjmW-dcEsRPo9W-TGzKm^x2X_5V}$ORguKmtYp}uLR(u#Td^6SnP{u=J2H@S_+HdJ?~xUIMjQw^==HU=0WOMHp~bU zY8-P!d*<&pEu7hU(2%Zln&iC9O}-EkygZo+^5sYp20EMHU%-4v&rKR%1XTgIR!l(i z7{S=(?@-cgWb9!}=;eZdr5B~A^%3Tl7gz;rOGwq+if<^?;Wcz(Q}xKV{hH?&-=dW+ zaNYz*w4`pTvTkIbWLI#}1vY9ZJh&YR4R4OfW@h>${;7CTg&Aw)9X77uniU zdyb)_y>^eNm)XG%Isfxu;~uU)Acy*AF&SeG(8OCm%_Sy}?*VC~XkQQog+L0wGMx*rC%#&?-!(_X+AxF`f2eP&~!QT%y%xzeLa^b z1TJeX83wVGn`o3Pp*x8X)JY+lU^xS#?7DC7rQ=)vB2@Y->RFBeNg!QV3^Z(dZBLCF z=_NNR)h|T`+ETJmTjqkN)z+KHl~6+^d;&mSfvAYzdZ~(NIHzyZ za`JlIePnnWZ~LFkk(pLLB@DSz+u}b+65+Pjv0nYxmX6dqWzBYxShfMuFT7do>fJ_I z**3LC`Ydo1X%_{3MYT`Z_ho-V93RDf=PrIbVvyYUGo|0n_GfEMm+H25LpMN%9an#O zGGhU|-&qO0bczqe^JB55u@!=ygdv~-ZOT}DrNKdcNh=6ATEnWl#2KP99*yH)L-e#T zzJEpThPZQLHMa2nr2wG7=FX*> zZnuvG*tO`(8B7EL8u08p3}Y+ozygP7UCGrJ5b8=!Dzl}h?a-DvoH;kRSOP0H&5yHu4t}a9VgQsqedQaWiHiqQLGTmag&A7$#PgI&Fcka&zQii z_GNKnUWp4$2YP6VrZx_Q$n01~lwGsN_N-(7gER%g##vD zsJ^VJRkaR=^s=e6_`b3xR(LWni%a@C`eW{;x85YE_J*dJ4wm=Fzuq*F zfZ91T3YfAWXqS(|u!wp8IB0dbu+9-U(?oe%ap4I#moS@|V zuA{2J??$TuljhE;TrEyRI+Dx9Nxq2W#LdJnS7>t83>P$7-s)*3=r^D!&AUi{Vf7}2 zicxJ{Y`~JB0Vn!b!fy)OE_Z@4hLVd<%7e=T8LtvvKVsR~dh-5p7>h|mlASaY!3GUI zQ`4w;hRYCL?+dHG=};Tc_)Zs6_FCbfJ7HE|2ekQFlm zENP!j+%bKD{>$DM@jtcq{WQ(^_1^m-axq$7!h~_8o=s@CstKMWp0nKwQSl870=xu+ zZ1;Vz5UVl_9Q5VEq7OpU)X_Ce0nm`KUGu7k@mftF`0d%8kNb@r7P8sgoDn-u$lV4K z@9&5)1v%Z}U|5WXq_PwT`qSH2a%|d3&Ns-gB`&p8K%N_25v?F0u``(D)zS7=^qlnvudKpkZ z z0t>i7AcFF*Ei88E4qtGA5?#D9DciXILEpJE1|%v_TynF`=+5ewe&K-?a!U_o6`#n_=4dsSb6fA=a7aqS~`pm};; ze8BtV{GLW(w+i`n*N~FFbIlQNx&H{-5S?3O#y!6;#KXMbmXMjX@Z<^?cS zGUp#?d(eTbONb`QpY56jb5x`7i_t}8OT8qz%SDckkxxeGZ1z~^_%X$0P7KtwDIYad zf_Hd{@&>nDD$^Tr2YI9*>W-#f+-nV)ch_Cg>8o8^SdG$i*nVD{3zvr=O6m5(k!|c16#$!&t#QCbep>U7&%0mVzQF&)s z-ap_FujTky+H?l;)5->Z#*i1TX6Wz|kzQ;4(PI|q_12L9 zIc#~>A;cu>UozX7Zjjl!-bv0pvwoeN3d#!yqz~(B+SumqiHY`akLu6HFl%7# z1fBcAIv;bd4d4{}8!EB1`jmL_E#*I5K$6T8ZO@lfidGqOdUju_`n``{?0C$VCh#(x z)5oI0W0agx#~jlrax@C67K>Hmx*)z**Ij9H%0cON36>8{bvMd&OoNReZ@FoYoS~HI z)|UP|RIHv90zSuk>fS6p82Hvv1Rj9(z9YIZr;DNQ$jpLpmxA?571`=zipi6TjZ=Ny_|J;%$H&j;nzEjj9`HIT0S|fg<^BXa zU@YfBNV;0ZxCd@V`G5JMypicfip;M3Jm^O9Hy#@-u?F302@vASt`Sb=K0=Z<%PUI# zL_|(hMeD7>(aNnJY$(ta+yEgLsG?T1!HOFi%MHmd7!wu@Sujw?pILr46qA;W? zU0^9*IlM#rjj&ge>&=Vu+hdVyBxOaNSI#^3<$N`IM# zNm~@449N;9-&h+CCe^q{cSmz4%FmCB&wya7>MF#uDVHn}Q;o|V?F|(Mf969Osu43t z3RACxubzK8YP$P1@%oY>kR{g_YYc)G^DKadxBmrhqDxI(gxoK`m3?>$4R>lT-(%$Z zqb~8fdOg1YA>O*0Bu0v;kf;JxZ}ZLaoc!Zz|J6tlEyZeu>Q)8t4`m!L{FW-><4Ky3 zp8dlU{w-nqZJ^<2UzBW(JNUxnbc8@h25Gkd{`S@}p!M(j*@u7m(^t8pXF-6x#V|fw zddhL0zfwhBH_#MuY5!dK)mLd-9jJn8?|T48&9mV{cJKKsVsOyVTx+sli^lH=>rIHm zP<`yM@Y3uy`5eFa?UfRJ0|;A)4jFpW2C)J}EdE=Xa?)}jzseDMmFZ^YJy0yB`k44D zxd7WlannOrcWm%>+DH9ZM~U}3(Uz#xvGt`3kDkxGUNS{<#Yf*DaelCEM7k03w&C9B z6}PeQwCF%Bx=5I<)>?IN^a1DMuIl!zIKi$2WMzC+|q?vB`59uTn+!9P*_{$P+V z?puw?ZG3Y+k-u!%X`&|nZ-e>2EZ6LY8A0P#Z#k14#SES|k3^v7$2*;fYR0^h{aScM zB1|5Z$Moec+DI-YD0$qtl%sT5+*)~qbTF+rVetWgt^G-b)E{O52e)q+%(zs>5q~R; z^VvvHarIZ{GIw|Pn38Vv{nK&aEwx_o)r-P`uX7w0ofWe1LY}c4+sLnOWgC+q#9@Es z6^4iZW|}-nYS1bbN}japZ&AsvxG|dPmg?cLF|0)TivF_e^X5X2$1~KvmrJfZgekNS zEO#DgvyX08^xPW1#K|i0Y0GD#xI7}*ELcFD-9tQyI_Z=1S!L(%iXNNk(0*75wq8|5 z4rww!s+huqxbETum;}oJOf#SCd&fJk@pyMx?w)w;_D`;qf6u^3F4GcUiDl`bA1Ux5 zM%

dC5AGeIi*HET3cPzlZ~rHy)BCIv}u|6h9p$rm2ReyujJ6@tsYZK*#&#Eg<39 zl#z+y_HPApo*AhnJ%5ic-PYsym_9b%s>R2(qHy=I4#EXeEOAl8L3%oQM{F(ZPVY`&{97b9KSohd6b0(r5uiO-;>!#k?w`Nw7V^Y7*Czb|df4x}Z zTfgxxr)gp+cM!TpjJqo@I+C@h3n7_G(aItVk)?UC8LMzEJaXzD(+jp z?e671!1l9k6*66#lT=J-AbL3-PH~|HFiiSx*slUFqRxJa0g!oAE{`YpdG$&BM z;}@~X+?D>1HiRo$Vm$ z6`fX+bGyP-fI*UuiA5w?g}l_M3r~a%V}AyuZJBAEZ>uyRbtB{A`gK7X^hY~JS!;Fr z8=qNvg)@qgG^(fGgq{|wnooyFu47To0+owTU=u_dx2#0+sDcgUd)Evn!c@GJ-y-Yei;(A3i2E5E(T739(q$e`UW) z-@o3HDJt^|+8&ELnC2b zcu*40%weqNpVzjyeX#$Fbc=FEUiUlEoS75|WbVR>l@pQOdpS6wxaYcv zd+8B(2KW?5hMIGV4&uBE4tRfkeED@iO&*HA!g$Df8KyhV%=jntBqNV_D~A7t+WWS_ zZ)&MhQ>#6x)%4k>pEvt{OZV*wd!kVdd#YtVxj`!xj$^bLHDGWqltieujIm7V272xD zyAjPPV`GEdEu61F7m=pk45@XQpQ7WvUOVy5q+x&cvbOU%4?PtT`P!aW zG#2%UdwAWWSOYq<-$>$5<#RwYjExvYO<)d>2CwsQz5?6NSCsQEp@u^>nibw&8idYy zxBR|HYkVf%6WpG-2X(aLAkFI6j19nE95nDEFP4gy;Y!ayzbN!MB4{1h`%d5nGSV2n ziy?7A#3u3@KTz$T84TV-Xu7zc) z8j@5svooChzFr}`-$i)gK^NK=!ZB|0(Ve4buI{_Ij-LVj3Yjzvg>vlgP@|5OSLF5LhIcHY8AJ;#toFJ5F#V;OhM3eM8A zGs4(9;BcsWO*+`c!}1v-L{{fO>!S-M>`;lxm-ZW3p)wZZ*VVIhL38Evl@bvY{xrYW zGi7FRRQTiNCcgr|k^Y@!``7Yw0D2f5{N5(AC$~Ih6Z?q@bG=T#C3$eilBN3iBNg3e z7N3FcPF-fB16$*}Xw}vdZxgK8DkL7rv##@$#>03+sdZGcg>3}Y%W*FdhilZvm01UV zPTklAWHNGlL`M;E?pHvr;Qd~aNi(JDmX*p%(4$;fDV-uQ(v>^#*$o@Ky;a17)Pky> z(D1s!8Azoc=|p32BCk?B{YUoQTJ?O^@3F(t!%ev`oFs%KWt|B7RjCQI9^SFN6^V!3 zz@5v{pyPt<8l@kNUP#x=4pj>Ucb+j4oWp^zWv-i|M$f1y`4ah|&y05ZJDTJ_gRuZ_ z*N0t3QNl-v4A5mc;RsVFjaWBJu!jm#>;HQ8%p=Im7gfmT`mC1m7}Qar9I)D>3}_+% zV}A|D;6VXQtcdu&hR@5(JD+%`QdQm}Sbks`&W@A7B*+^2f0nl+Z)*C;&E1{oGaUq_ z`)}&!|0d6usp7w$rh*8V?sov>RCS+(p%Q|)OLk>NjRJr!WDY}9AhDD(sVk|>)$N?e zsMxUeN&W`>mNf;$Cs@c(#sj0<`!C&+x0R*|#D<5$FdBqmVS42gb$pq<9t(RkduXRS zJs)>|o)}*Jh=tiRju3rG^a{dPxSU2*O5Hgn$6^&#)krr_Phthg4x>;@B~bHzpnjZ% zDQxkdr&1%kd;ViS_#(xSQ%;47^4_tsB|jRrUu31(c8jAj9A$3*pZ!wnNvCUFo0@ll zo*{tKCh1yZch=>IU-0?mk23&c*CEje0(z8yo9Xd#u0Xgz=C~8#Xh{>^YY znI+3tsQxE{1r&PlqX{ZDNQfh_iSrB0_@jkCWKRf~WP}EAx#>Gm@*BKyHh(Y7Q2tPK z37=tgui-WMI@;kv>%qsh{}kGz^ZmMAPTB1^yAsrzttky!mJOeMdWYx&zZlRC2F zwKwur4&^NC9@zlw(P9StXy7E@Dy)^cOJUG?O`y?q(bKcD-x|vcJl5JLOq55_*e-yX z4zvILN&Y>Z#Tm4USlUP=7`usV6}+ioy^|f{S$>n7!|FItigMt8GZ_k=E6kNbGW5#B z1nqkk)hKQoDT8uA1iEiD$^Xk#s+STl0qAX_Jjz+ZfV~Pr$e5!$AlsqG7v1oi7oA7jXk6H%4c!t`SJJ3bM8;z#j%{Qik3ueGk|hh?k3^R?!5J zY;nLJJ*QrN99D|Pf=75nh8RTbjL2Pt;2y+4IcrbKnk6O}yh9f#b<3B{1 zRR3dA8;tj(`c3=yVfvqEzgjS60A5|JJOXW`>T@t1XRO8HYh{b;6Q2|1c`Jh-PM6x{ zfxJ0LghO^)!k}gNAH;|n^>@Si0>iB;>8~g`Y9VMU(9|Xo?_Gx~F%r#>X;NHxa#a{~ z$;bafg|D{%V(`$CD#iK&5eRXOfxf$^So9DMMDqD2R^ZRnMorcoA%986KJ9q3sv}#s zuqznZx_ZGUd#1YWRF!4;aoi9J^Y~o*FF}g`U3S|`k+<$MyTwg-!;oB8p#66>^1r%~ zAhw|REtiopQnU{#{5V-P5XlKLlmB6#z{4yYS4MWm&C3wdL(kbrXqK9Q*-lup=Jn?RFHdm{mx)N@oj626|YUI**3A@4F?sMY`YzJbXT@bi zy_(X(!2_LbJbs;A3X7vj0oO?uV6^iIQSq;|PjPq_^b{wV%aFnT_6D!H2=a|>JPf@Y z3FBMG*pd)48#8p6LHjW>wvl4@?}@=*xd-(dMWuMOHe40Cu=hS0zb1rJ2A>UTE*65O z6Uqwq?7ZB`7m8#O|GfsAu3f+W>%|VrdA8~S79v4OfftZ4=BZMK3tk2pT#tM1qD+ygZ?7(cJ&pzy9~3oveDw$U$ZNIOszIl6E5Uzw=9w^trNP0Kq8*v7lT3VPS%Q zv083XW*7%^xkEEF$uEXsR#*r;OsvODNslO0*&w?=4~xqFX(P~8uyIsQt{o^u7LnE9 z{?BU2Uii-Nk{nAt*mfj0Rgawsxqan{3Et+)SQ4KVx~!cc6yYu~k#MOI*%z40eC9Ha}@5{t7>0) z6>dXOf_nKM37<$tXp<-D+Z^Ox8HGFg(Mg6W{F?`SaWu_q(zQr3B>ht)%Opm>jirMo z)St_>9EuX+S3_O!hcl7BQ~Pm<3A{#r0aED}_s!^iHqm=}yrel!vLpZ3YZFP%hHO;B z#Mf};Mw;~eZ&qOcZ8j$`9{-Ex=6`aPAfcux+j|h=zVTXa0N}94fxbhTh*cr&@2tl0 z6i&lk*=vuv%p~E_LAp0&bru)?U5CDhI<#%+WPc4rADRtiM~`jp)?QQ>rE|; z9siBF&dSEdOTA4F6Y!tZU`i}R@gC}HJyJtLKf|RN0+!eLckVIF(vr`@dfJu4x^m?T zBI;>HTBcuC5WN47m|Ak_l$}ehzNrf-u3rD6S$gy?tNNS5ih6Vg%ABP~vEE*ny7fFui1w(G5{jq^Fff6*0$l^o57r z3adOe9mY~L z-c%OTFV$E(?pcwD8+FxM`-X?e5U0M57P|G-K)o%b#*l40G5_N8gV8YOu{rRAAp7F4 zGOYB*uiv*j5;8)sC4`wc?e=9qAVbz&cn5{9^!Yck4o%L8(0japT5f6qf!7 zh;tA1sM%KdqY>_xDgMYp zJbBv|A>UUkul2z917AB*#QvrkT}++M=30$Vd+bW(Fo>S7R@8qRj#H@{nc4Nz#tq*3cKP&YgW?@BJgPucBE+q+w12tDta9?Fvr@yZa=jg!qKorL^}BJ6u42?j@u z`n`H-?vfixGnA@xx5h@f&bD!Q;y_es$z#GiY=E=|&rW?=p(+@>+T_YFhgT;=6<=B}M{{YPytx!Xzb3gr3Ptgyc?Jn2B3#1fv$_Tl8_ok9f-1KT zHXpUe8eh?^_)PPTmTGn*PTpc4qDBoTz&WINLjr3^tXiKD-m)|WL=O+4eQUFh37xyS zf491S-!xV=hQqB@r*T&VffYcqs(mkkp84av{3e+9Ae%XpMb=Buspqx z0R{5%qeMhk&5)UlUWA``pS&N%LEBROl*d?~v?jd{8h?F{_P}8e@A32K+shs9ByEjp z$N@C0g~JNh%}NBk&Hym`NGdaOVg=d08+#^BT;E4mxv`?6Dp-F&XzG$2b^Qsr7c#Z> z5Qqj&bWBY3gYy+DNGstDMbuwiYm&8@-$in_LW<%(x~jPq^q{jLar0HBaMh0l)9Y|E zeQZ+RLyqkHAMm&N3DAW`CfO?=quik94{%epv8Vb6^Dk$`o5sc&cX z`6Nf)c^l+SoTg%gF9==+BEeU#=97#rYA<~Ic?-Fp5wQC>J)hXk)A&Nfbsq+Hz4^@C zCeL$k+k$XF`Ch}Wm9Cd^e&#wEiFJThhu_>;!FRWb3I2M)LY(3){=%C-Tl}Ct^|k># z=kF|j$n&Oru_iy+%pmX!P2Qn40(l_RREl_N$FCE*GiCo-PZ(JP8wsH)hY^uWH@V57 z1g?bo1wK`f>M@v8{QS9Prwa3~G(#%GYjiXA7i&v|`?Y-WwyhaQ0E;dU2H{o!-6Y5p zOR|I(ay~|lZ2nG8pXZ&n|3@H+K?mO%iU2;djUY6QyI31KY{X;_NLugPI2RaS=-9Dj zOL$vafCFq&(W%q4XWqbdrexfCTyj88`-$K4B0}4GyEvXazrS0-FnOQQMfoUgE8OOf z>?XJ4D|PU!|H%V~Qhj`4q6A!7iaom|FZHe<8~y)c?#<(|?7p|*%jJ@(GE*{V$ShID zkd!$YGb>X_M49PANr}v3hC)K-A@f)nqs*C;A+y_XA=A4~b${>s_dMU{eLv5C?>~J$ zD(AWPKKI^htz#YQShm+dOyv}8g~O@-?lf>5^BSL>Q2EXTPyXKdf>mej2ViN_RCON% zKtPR$dvZB5d%EFwQydY}zo)_8@z>_@TmRdyZUCnw3wq3{F9M_gd%T-?7Ci#!Ah0F) z4}a0$zAVOV=Xa{$5V(1c6J~&9H0mi*+X@=vgn@{*za>%rUNqi^CN?63#$}rtd|nfZ zS=c7iLy@BpN{4t5;R7m&(0DNc3}^o|Yyd=N8(sp`s}CwA8gHLt27J!>-4e-z_8c?+ z5iUor56vh4cGBTI{b?N13M}qWPPIXgX~V5sD6C#aS{XT|`)X z@PF)nTL%X{QaS-`=onz~HXYn4&TiUl-Z<1?9^-(IJ-t*62t!H{8@zswj9nI;i=1Pl z`Dqm|7`TYs;imoFGvM(Cs3&KGhUdGM;tz9&XzUN9w}=~~1{&F{p?`|VDFuSl%C zzN~Bn_d5DGbC@iEk`TY*=e3;T%LZrtix3(}LPw=Z4?XMahn{r{Z`Rq}bZRpUw44Qsjf_fwM4ySM%>L#5k~FH25}AYs6=HRv zF*(UaDvZEA%?MB|u=dbHL$H$~Dn{}U11aO@+yH&t66I2zgJH-WuNGPDm|##5y`{G|hzatc1t`{{AcBki;bfme1A-r1zCS-T>G#aaG;RR6+OT&v%O0b(!}8WTmqDwdqFy-*eAQ9j72)pvZ-jp9_Tnom)#CxQZ)C6Gp;2M zoU^&0@dDYIx4M0~)I1}hQwL&?8SuFGUO8>7Y=ij4@j;Lw_}dn*HWW&RUQsZ*JQ8}l z{T?LwY#}vC(o~GS4nmSKBJ|5p$8jA%pQwhj{C8@$r{_S2S%ddo{v`uyicLcUgC6MR z6=Y$#!{2ayGbpLll*p@BM=vuqM1n^LXY4wnqn3Qp7+e;o%zNxo`HP7EBXa2fD5ex( zgtE~xD{SM0Ze#wD`=QGWDlJzDO}Hh(tbufiT?vO_%gVNrwk^rhf1gF2WuJS9@lU#5q3Dy5LOO^3u}zuRX+E zFH=6fU^Xp_cDqEFWNMIoWycsHWrL&>3C$2t1sZXl&^U;$WbA9|Ro(%?mip4#`{PP| z?3Rb;`4@R;SFMmpT8rIe^!+sOi46a?sb==?N=r*M1ZERimd9;<)2i|pj;DQBBurwx ze((d@pk=uLG4fJ9(&>BeqpD>2u@kb82sp zhV6OAoBK9ez&Hy^g!n}74wtQbQ|8S{TVZNEmkBd^OZKLE&6`grA@Wct!SYtek&y4cKY|kIx_-bb8=PR3?qAD8ez&1A?+l@Eo%)qme?x>?6#1Q-pFQ0BdsaZ1H`-An1|Mp%-ru%ZT#5Y0{s!x&7FM zWhBWM*pR*^R06umFe$Mwr$1lP^)Swnl`Oq=Z#~|*9)#76#I?l~- z6}ZxFK1en3l8RJI#9mZ$vM}O))P-}X_(7wBN6f?`0}r!vqLG9`-CGPS095o6J^Nb4 z4ro=3ZDys0`<(0`f=sb3D6eu>V2k?0+o0Y7Z3Oc`gsW@Sw zg-d|?MajFFYi}cRv`=HrJ3B~^*WSkZfBo>a+rc0DXdhXd=_DIx>6(8^lQbKq@-=ND z7Y4meLGi0YWoHCbwn^=^9jvbRaBc*CW#g(Ibuar_-V+3DRk}I8lHCpszG;OCiynG;XXLw$kc zhaqT9VcvN*%OS5CN3!12Gf)H-gHnAY<0sC!|G0f+@LLG+j`%3YlwXqW$;+0^;!YN~ z3Xy{FD2Eu!WEmhVJ)PwkvDndhRV03GvEM~=83Z1DO?J-S3EyHqanZ}|`p~BX7bZyV zxqo4s2kXER_Q&}yA^tx&-|kM>6Auveo5$)Gue{s?HthKq4XGxsSt|^wxP|NYW0mbB z%Y||pjwAIKVtc(Isji>-F2{jc8SOJ>fTBXE@#>1(?ci$ruQN(v%oilR9G$PJ3Ep+H^YB%s_c}4CpT(2U|u7 z?Ks)5+m%G<|Mm}%YMMhYMWaKOjw zgh2)Qr0lHxI3 zem9OSN*)2htjE@|N!EjykQ*7tPcOV?es|rv2?}TDN!$lQ&R3^eeD_)Zz@!v89Vj2a zw6tV~)B*62b%^(z<$np4g$jS!>n}Pds2G^kj9Kq|Hr-K3kc0~Cb0s&QhkTX?X`RFN zSfEcqd8VH^{03LDW%QSa=fxV&ABKe{6t_#N4I1JZBkrm(o~?rhT2bPz{yJXoL!j|ky3U9bu|K{S6~oj zT{&Z7DJfTR^|vb`_j-tlXOc%1E-+XI&er3arNo3kuYb6GQ8PDUA(Hx=O3Sx*i~U0N zZwExK7FSzQrKTC*uFn@S*muAi|0a_4kZB4W;*FD=m!c~g8$x~SpbR_n zpH`O&)V_%F%J&Fr0|SHbyS&x-Z@ScjK3F5>+Q2eOhcelwNofKMim565ALyb^QF4E_ zYakUNogj@Hkw$scy^MQA&-3GL=!-caesxv=rZB@i62 zAPi%?+9%pY5>%jdQI0uKhY#HdRdKFwoH(aDs?QTgDQGLAg^ZQ$6 zvY=xjaN^|yjq) z|KZX6s_UT`x6Ko)ZRiaedHi;OyBBx@I>1?Ozq#C3JQmR)p(4Knr1ST|(b2T&o*CTI z#40O$RUqr1O)5|(i?mc!eC6+D`yr6xYj@R7mDOkZ-WHfIUsl=nFu_*LK(x1o0 zl@Dg@G#-iU?#&w*U-tHP^Enrw)LA)Q#%qUmqv^rmRgvT?K9l`~nXGpP3W;211Ugb6 zG))x6R!SKgx!S@e$3xtQ6xG&SOBXzfhw>8>A=S-0S`~T*jz;?@g)IUH;ks_@R5~?M z>8e*@ZREHG5;hQNsgaZ-S4p~O9Y-SVLcesd_E4$)T353>6f1TNWq4W;_y(P~ADmo# z;q#?vkw^OZg#ms$;w^<3d@?DGxbvCT!^hT`)Mgs32bT{Box8vIsFa9H*FUZR8KFjt zUwaX@uv%m?I~8)M&i?q*W6&_W61#gfPpZ3ZLZ$8#*V=7VXGWE0gdU^26}J|awb=4f z*iql3B$q3nS1Pkah%^g*DC&^lz0u}k5Ixlv^Q2LKL-2dmBJf2%&~`Hw^2l#kbtW+; zs0Y+t1n96FQM}Axu4xp(Vd$M|c9Q}PEhd7X$dI+U8!Yq*&Zno&yTM)YsOfWns@GWv zN7v38wfR)O6ZYyeS!d34Z;IhJv|`5Hg*eIP{d8O9C3Xw3pv8WT=F5CIVd&~=_U3k_ zecKLaccIXu^ZcSp;^eX@Jj@+D)1)U0Qahz;WK|@Wa@K(<5R^t_p<4I;;9fjP9)u@Z z*ce^`GPFf(V)f6uc!M&>SA(}xgBM2XOZS{d7$QeKgzsMqz|Lc1;?LU~h+%ZPn#nIZ zlbu9o_+#ChHausvJCmgS*B*=u{@k|qsB764PSf$Y(@NRj*s(-&#-=^r>|6BCtgCg4 zRsK)c!OUflrE<4P+x(9BJ{+4q(DX)ClSBT_hUZ@mop;K(LQ8PGqA6UDuY;3dE0m^W zQq3}U3aQvMnL0`@qBuM5F1OrUb(zUKg)r%Al!7jWEXtRYxw zCUd+!nRxVB=%}`6@7=NTnf2{g^&AO=xnR={OslAEI5gy7x1rW2^J#^5(RIB(}jQ z6M`G;3M}R@lm=xx)V27LmG6y>SnN?d{+aKlR-ub5&kiM zwuwP&eb*PCaGD8(_NttJENR^_H9}nz*a8=B3b^{F@=Lt(l={2m%?3M2&qQhOahv0!Dxu@0Sd;1Q@VA;nlV;jJI``-p`e>B>w+Ru0JL zhRQ|48R^m(y`YG->Ui^OFq^G`?9@gwVvw>%3w28@{SFyt#YkX3wXtxe+f|GKv?_bicEN(a0v`1Qmz<;JuLgk_P!r8FcZZu}) zWiFF&3=4~(W3-Zj1zRYzP2y*zsibh!sw*BtI$a?;V{)9czB5+FVZ0b}#SE+i-r2F$ zgbv*wx|u}dIvUKF6`ndaZZwe?zbvYAc8=AV7)^;fDQX)zH&XdNT*g+Pd_@U>70FJ_ z%AI4S$R7PgvPDb2#$UUUBb_S%W8k?>_+K8viMoUm#0wlzCym9~S%;&Na_Y~+2h%Xc zL*Lrin3~(5W5MeAp8b8}qR{H34`y}>wkthHT!FnsGC zJuJ$^+q>Ljj{pX4rux4xgBh9yvqFg-SS$trB)kQRRI|Yo&cee#B9>?^`~;qnri47h zt|&cS0U<4wAxhlyo<+}15|Nd?_W$`v^+btG5WbO2H2wWJwZq3zkNx`9+DrV;phD03 za;QYVh3gZO9{oh5pAPMo+fTk7L?whrj&{2zvEXbBOWMMPC)IBkw9AT>r6=&(@f>+C z0ZVc~fBK(w0ZZhy{n)aiNYJQE}ZJdi2Uigz8CmoP^2;0R`AxsqGU1qQIw-x}oY#oI!^myWZD%U*G_^@neRccak zLpSOe*v)*3|6Ii}rZ;f-X_;(jmMeZG^RPLL@>o~(RO|M)RZw}c)J=yM`Q^Hp8B#67 znyLQDp$t*j0k%Q^oPynkLS_~<8j8Ael<$mTkIZ-+3#+|_#&w#a-9Dy?1t6!M1zjU$ z8auekU&}dmF$Wf2!|=}zJV~`OMHjt?vdIcWC54$SR-lkI4&0@o8lQ1E%NW9nTx6FC zyxeO7rxQ9isE|F#{~xRGwN225Z}ExVs`Z%)O3;!fzF=Zy?SeUtIvh!quP({fs@K=n zTTUJX$gZzr`@N*HbxL?lEQ#-Eg>~Lw;~w{^tCMLO`rF44SkO9ddmw^|NULUxDBQeL zaM%Sng9(4mU@30mmOF9=dH-{KxZw=8*s^L_l6>)Ld&CykjfUp-8b|@hsv?}uEH-B% zmQZ25G4!pPsSB2$KekMe*JY;|z(cSKniz6NO)M%fqW999R_c_!XkrE73^*#{#?lsW z0c!G%8_M1RKtuT!d115N z#04v2dVDel`}I<};Ue0N zSZD#x%T${nADTr(H<{pe?Ztj3_Y3fmpJVJWuw{Kkh__(B7l|W#^~FCg-D`*!cYZSm zXw@g%rxS>g{7kn6*??ZqU2h;CoYo@{M;sUOx39oR$&2!NGzuud9b*e;#w?85_c`$v zj&vS8CJo3jx8C5tZUiq&*+ysU=4eWL;@GkMyICxRVC3McuaP$0iZ;#Qm0@*;JI-vv zg{m${>n1arC=lyVn?E`8&1J|wL*o8VS%O1bEuvxetmuJli zf9=ImV#L{{|7Y9pJm!$;obE`>4uNvZ=e^V)6=2lyyh1iz><>VxfgrAV%IWjxqpnwx z$vEmF2F#jOEAlKnMw|0rk0Icd>AdoSDF^a@p#|};%W6oP@7%*ba&$#>XHxP9+GU4P z#-~c+uMbrC6lk}T4kxIY;dy7+#)sAv!d^SRy@DJoEc@R#Roz@~cGT|k(2q}8{I}iD zTX!(@me}<_@1^~?aXkh4eae+0JvKRtz812KM_@;MrmO|^o4RIy=Y4KNUm^?-J) z-$e7ncV#tTT&e@;lVxDUu?djgczkd+2NjYAe)3B!Jp2An4N<7gn_0aYK%P&)Zg#=8 z=Zt4eJoMe8t6!~BU1N=Hfnby6>#yV{0JUr$^2+u6WTdU*ESRvFS zaAb$Io55_kPa8#rzQXvHjXzm;ib64wqH{?(4AJ_1lFacYi#_i!_j5 zzwmM}bdY(A-u7(b_rtt$7kZYd*!G1ImzjEL*YQB$sr^e7CoJ=gu!&X^TouUoelBW?jZ0O0i&-tK`Y88FujjT%obO z@zp%7ju{wX`BeV0sajn4TzAdgVV434MTuzb*(;tlAd_oD_K)Qb@rVpNL;TooDeyF8Y_DQiZ~))7z+7tl;pQ>fx|vF*1*OHCUtcSfgBv#8A+^>%jDX6;mAA)JUovyN*`u|K zh+PNt+%u&d+z?z*Ki$qn68b-cQL%7gNn+r;A>C9*gqjwhm~ z8i32r{6GMabEUc_2~rccI%feOnGpZDkNa}&$ET!CfKO*exXx2Z8uEwk>KSDVCf1O` zvZ|Tda8sa(wAn)6piGK~bB<}VORrp6Hh(C9dm`V}_@Jw?(0k;kO9|Q5*ZIh`a{4CQ zR_$P8O;nn8dHCGxfqkbTe;=6}Zl2;9nxsmm!R{i}u9Nf;3fc*W+8=j$F%`s^O-7Tf|awx&Bm90R;jUneH_pV{V(zQiqCaT_g zbs)^^%8l#Vbjp*HICRy0ShPTZ_83XV!yqx!W@VqGmNeR5uZrh{bv*BO1&s$;rp0@f zdogp{4Yq2lxr4MBm3HNEzqiWP(IrC;shpHy?dRuA1jep1gwItDY$mo@D)YRE6t#I; zYRXxl7=B9ziJ!3iFa|d$aqSr_v0G%X$3YO5<1*&G>9<$-1LTRdd3E2N5$nq}t~5Kp z?uYNusWC-1I)L6pLb1xIJo2R`4 zm$5Rz>sm8oL923^P9s6WZIxQ=u7GHfr?O2p@kF}mQ_2b+@_rxBjk)v?=tnhPpLv6& zyH_|&@X+#pBHF@veLWF%XY4U5N#|hoY9!__i>IElXEy5W4#H?%YaZ~Pr;uf_gc#J1 ze`Af$vRD3|6ixm^!6Ah&@{}|OMRq~?i?3xhcNoQ;(h%h#KfawW=0ggGtB>FlSjd4b z3*zfhDr!?i32Nd~)SI8ReERI%J0vMqi`T+6yS+?O2gY$sf1ixIQq-GFW z^_qM*JNr)PM%_SGIK$H%LRJ+HAqmJ>VaUg?(7 z{9_y+;yNBPv>Qaa*NrP(9*D$Pkoq3b2~pw2O0PG7hwV)s2q)hf?RLda6p79rBEBUQ z(;)>^c+aJk^Y=A}>viDvKPwv1Ln_E=Q!jvaBVb>E8>|U4X*~?x!TnhSL2Jd4&((s^ z*Uh(V>~j^V;OMjJCE^byrY#(JQk!$n7{JVV5O|S}r@!YsqhA8HiE=V)_VLvf0lbJ2 zu)&eoy|v(ELv@{Faonuriyw=jbSMiN(Z+}cH4>GvlH1pY^P=7E_8cn*V{?ayysqtv zbVhk3JjV9I>_cU$aKcJ=a)CFEXJPRaSbmYWbPDL%26#nM=KFVY@-OxyaZcUc+C+&5 znut3o-8M-Ex0_-Vm6?rx@`nWRr1Webdja?mjUgJh%q;EJ*G0Q2%$0iZmDI06GLgv6wZNk8|Cslc)a5c8T&{R9-vjiJddZb6^GQB9X)Nc^ ze?GgM`;;pYDLkX~HvUJC?-W(M_@V4X7P8>^AK{5(lQ_Ks#jx4X;~2Beik(p{2@Mk_ z=zFK13Xi;k%2O2#M`A2}nw6<-w(O1W=T200yz<7cM<_o884`_u%*-eAT>x*Iw2!I5 z()KAT5Q4NI2+~Y#pLu;EZcmidTLkG#TgbR8`*0Q=&XR?k3j$dMqtS1A%&=6J6`wX5 zPP^%GGPRI5V?2p+#i3%9@oFl@CC_V@fLSDDXn=iYvX#Eiw75OfCV z(nfUiB7qO5u92V=JZl1hLE&44kbpya`}xT;Y?!D0LurScU_pv_^}yM5|X zGyn-0yO;x*wbP;qmjifMj_*BTfwF&llFwlp)>(efi!Z@a@vexK%)%nZn-EyQXBOu9 zeN3&2^+k&Z;5Wxh;Wr^{KJ-EVuYPkf+Qf~J!4Vt&#Ecd((HCG%YI;v>2SYGRm(g(1 z6!zq{rq(1CTw9#7vV8WU@ztvkCgyjf(06ux8~7{)Q9)%(0~u2BQ8of8|9_1frr|^-kpykxkdbIq zk~z&K7x)g>;MG2Tr^<0Evioj~qPE&5Z=B1GMy)ThjAy~Ym3%OF8P(bQjLj6Wx~GwV zPOKq%?hasG4>mN#Fyyx*A(AsYd1>fvbiKDNw;7>q`vc(_r~C|sen7plNXG|G@cAPG#uxA;QMc}1LM(-gF%~xu zK#B~qVfewP6CCbRvA#<*o?c|aO@k&90Z`I3A40`p*)n4_3=y{HQCN_O5*9ba?shi_ z7SD|bUiLVn-NZ5hm893JV1>B-gK!gP2cOond56La{p8{Neh^>_yZM%0WP>QfeIK|T zJ*GVGza9mjGkf^?Ib=gD10>;Y`Z$gcen(Z;)0RYSM@>yflN^vy?cYJwMzOwIls($b zM29X82}EsA%cAnmhm_Q`LVwPl-R-rgchDNmxui{?@-5_I^brwc%Mn`u3+6z?01;8q zsrKB|g`aunrA#R)Q1>d|7!3KS%|fj34)Pc|$E8+ei+E}51|4{^!Y;wrXhLH?Naacu zGQosWS&>WUBxS6`gRJm8NQPtsaxQVETuQr8L~k6rKk7ieekP)%FLwpRn9`9h86|+~ zQ!^B~8bMvYEr@I31WJGp_!j9mp9od=?mOOlvZxQAOLUuPU z`%Qnci;hKgI@8a9PRfvhT%J(Sgw;E(D9|keTS_f;<`c5_eh|1}?^L;GN?wHcc@E@# z_@h*nLM^~k3s$4k%X4V}6&Z!Zd!J6?1O(szXl>yu{+!banKJ~4d?o*cr~Jx&B~Mh| zpM7LohsFE)dhH})fKt@qo|?n7S)SsAg}&J{Oay;>y9U0`bibSkAs`Xj=d3%9x&qxt z5im%Fz2$@bJP+u>)vyO-zC=WVwBFG`37QJ*Je~y>w_pj#=wxRk`$(J!xP832+{rkZ zZZOZYq(2@f2|@M)(7*gh%d?1A1!yYwNuKuK7?BvvHMnS6eFH8YJ!ZXJ*v%)YLX+5` zzKK|wU+#z=9hzmSw>^;JbPuFX(>>OIz5=h6sicX?^HH1^usByQwpEfM`YtPFcNp9! z!{(p^9SIJM+&x=pqL5`jRBHPyki7j+nyt?)~lv6Fx-uzsaC+_Sn*8vuL=g^ zg+em=d|opj#jxVdo; zZbZrFFam`yzl8pE)1TmQ6GWjAG)4WO?s$!XduBzF4^X_GY_GG{eiXV3l(UyU&4OoM zB$bP9LUT02j3np}C(c;Lqv#B3;!_uaEC*geG=E#7Q!6*Y><7hbY+;pR< zbZ2DQht5;5{1bFWdD$&*<_ruw)#2HvI|!MoN^16-0tTfsQB$VM$R{7Oc!Kq*Yj{%q1X+KhdK&Imt zneTm|xEYUh!Z;u7Ao<`$f4*wX)W~EEczXK}6LF{{?pdIQC7s4vi`+O2UfIF-@d}H4 zi0e0jp)IPFvq2Q1Q5R#H??OgSWwBaP@hSKe-Ma0$DIq&)3z3W`X-7U*faXr4>lWk$Gov7nN?_#C^|b2#n~aeE^suV$H4 zu!>wQiI)q@)fo8s0TZ;&@Uz=0JADjK>PONP&hBWt1TS6{G{So_7V&1`-S5pCh@kNl zpD!fOL-3jBQ)&cvnJ~M?9NitG@pg>P`ejUkCP->JZiQNR#czOEKBZ^K(HRk&*LE%P zMi*}hPJUH)mkHcUssz3ZZ+kUne8C*T7@`+BM&-ht%A;Cb(hILmp@_uYq(TN7Dt#AK z2Y%p6Y|@nIF!PAmY+6La{jN1Zo3B((lat>~IzPm9pKA@6Q8ZQ-ABAh9N^21{dhEh* zsB61dzUMe(GkBKX6%G5OmdK(6c^a(Y+C}+#ko;`WsEhd3Y`YU&s(^O8PI2*Y>-K1}{9<0htk`C@mC&<( zxpMb4v+fP#R>aJm$v3MU>VPAh`8*?ATjS^CWa`9@;?k>wm4!QcWh~4E04TdVEsF`-{Z0T^0ukDmN<6L>>3K z8j&_p>W`elBu^!StD!o3GxNJOaX1nweV<7S22hz-+KWxh z3$Xf3&|@MP@^H)QsvZefC&kTgE@zh757DYoNsA!}dXC|Hv-e91ZvpK;#rd^Sz>g*U z+biob9m=<&K}&yS05uvro*JoxPMpuHtr1^Xl(5H>$_(Ty9lMRsj-{jdeh%@TW%&tS z<5d}$B-pWjA;$ww$>BZR9EerzVrdlWl$d{K#>f~xd#-VHs;%*R+Aq4>qowUj^zR4G zeQsSSDzfejQHhRV&A%qSp}piw$kd#pU&N_ueodBTrtM2dnF42x81z)}lloTlagNEq z9L~eEh0DOt$x}{@?xv%HKq=1WLt4kzPc7;@z5xBW6$!Z1TffPBA+13DoV`a_mxIDe zRd)6c;=;Y{Yh6xR5as+%KVxNc2wuzIw)6JReIi>;@YA(HYg4b46c=OOSAw#1FX%B--%(kvKsqJ0 z?&1E-!~Ls!>lrel%>EDVpK;7HsrHka#ikk*-@YEJo7bE6U-mz0%Z4V;U&YxAPHdYZdm|e`~sv-6~=4p6B7>R}4*k#G2Uho9H z`$XC!d8v8h@CqHKiiPZzXIz$C$o)G0g`77J=Lv@XrjT%pHN8CbJ_P6I3636*gehl| zP|zg*64Sk%$r=x$Tyar{y*y|OrI2UdS(D((#{7~J*cwqM6MqbPbk(#(a7pTz_F7%> z=$kY1D9LbUI@~o;XyEKnhIGHAFrzgUGQ@`@$k!YL64sFADX=u>H2sk7j zaK@ul8494K%Mq%}&H3FSjFJIG=CqYfpw&&SD$FWBdD!Vm>|MJy@43lV(P#Wlrg4uh z$qe(x-H{IAt{4U;f?9`D0k(1;MB45;#ukSNJ_^qoeiYZD zvB1E8yZsXW+b4bJj6q@}rxuR-G&UT~MtdvnYAFSz^@>fUC#8R8s*p)pXFq$6L?RGc zrva+LmHk*1`Edg-EIm`xToYzIV(fEzQJ&=q;XAPec+8ju9IyNuF$DN_hXFne&=p== zu0=O__*w)(2)OQ?N>4I?NA9eTz>UBTC^kh>^MBq`3h#M8|6~~X@c|M8(CG>oSVL0! z!IA0J2Q(>reNiz%hjxf1)B`4-nRkl>LD8v60ACQf`CX2N}jUES(gvl zX7byiFxew=&IuKOd}N>Er$b=cVJe|U0$Ks1w_xrlEgT{GTC$8}U?qppmi@?M<58k7 zKGm5#!VvUuBSxX?*=LOcfEM2ta`V7M>hRg^>#Xqx^EykejxORVVZ&vCYCrc{GSAOL zKexveoXNf5a(Qovu6z4qvXO;SCmtXRZz>Bxp8Cb?mJmc;#JJ7}D-y?wwML*$*_{Un zDsvXEa*wi(m-8b4w1;~)@DEmnoTzj(Fh;o9@T5-H4?ci7&Wm2&e}+`DqD;XYM?=i>ofSZ}Q zJIsK5VgNj<5WwRXf`Gi>)w6$I2lg@-2lBDK(1b4Ks*U+ltMQ+6TIUMuR0FvHBuUAG zppU?~NQBQkbEh}7Ko|o$NUwfvE;Fa45!#$IV?VT|FjiSCis=|0ysa78P)BuKo&_l* zpk#vYYl^xySq`nsNIKK3C7k2Cb_W;}$^XZzqs_FJcU1>dwX<{S(HCGs0@D zTp$0#m{>hKKZyVsD+Xr0SvpzPok_N!x^5EV-;V758aRDxIiH#xwBaGzk2;Zo9>G`; zP5dEBg!*=A!V1dPG5T7zMQ1{vYpOcu4CQ8Gh{rC&uUz4g`1p|>8#B?GejEASa4cr! zZ;C`3``-!YIb=)q$dgB*Z0+K8D+gA^pz=5rpSz;QQXflOpyQSUwejeaiQ7{|mPZA9v-w`na@W ztB0ver#J0-s*P%1h=~*?saC2~SeMsJIy1J&Eb39W8io%CC93W0sb5`ye{vF8bOpEx z(mR0JF2J4rMRvmdFXI&&{)%s=EfNT= zV4m)np)oPPzmtV33pbXFEH0ht@{bU`#;d4DsKAlC6N6FTtO;cnoL>*YBL}JRel8mX z@Z$Zvy~BZ}+v}J@;(jA8C`*ePPXW`rxMp^r9P>&X}#Q;vN(Lf?C2 z!bgdk^s=CSA^TF~FrtpTl`IHVd5R0-ke5@J;-bG_ESL{P=%12Yp@d+lWwn$HFs}h; z373V~n2vpHqbt4Gc-`h>evL|;hMNwKWgK8ism=L7288t_3UtA%*w@$bNS5RcQ;$)X z`U1i@HOYII)od-1V>x;i&R*+p)8<<)5sS9yJ444UpSm=)>ivN=Q(*E&*Mvu0_^R=p zhemPG4YC{m!D(Ig8 zdcnUZ_~#wt@i3Ks+ef?qWetc}%wQFYga$7EmsiTxiNaKJdx^IHSmFP<5On~|Ti?uo z`d?m&#oEDCjIt?K4`HY5CbG12)^drU`e)Zf39&+(Z{`i zzO*u86YxIVL{`Linsj}>|627PZivQu60z?HRzxrDQT(fsc|Yaiub-(a^gxo-JlHTj zNbdu_J+O&XMIkq^G5~G8-$7g9vA0lpqqn#D@Zf#L&(}dc0G&`Vy}tncgGqa`sgA!M|k!Wl*CboUGN9$T9be-j#{*I)0JQoU{C^6+iV#y9V zplfV=pt|-J;BmHLhx6>qkV4Tx+EQh_bb2=T<(0p|d9LInN>X9fH@pqZ_J2Pm?bwB{^S|>%$az^YU}-wc*I=vwe9yi>CVhq#9vpL$_|Ok-o@YmHV2L#CUA%^qYjctw_(*r%gXv1vYZ=TW&Jh zmb~)6X*pvKc@+2e{O>!`q@YU5bhLa`u9aG^ugqJ5h5n^!_Hu$>dp`c(2Xp0ytouvX zLIgBn38nDu!>l~I4K-y1a+r}dzKkc7)9huZ@ltQgeg~Q_*?pu>c_IF_X?keq0S{97MEq?BSlJ zeI~iAa>ciIC`?DR_|CuO^1V{@)8r|^cC^#clfvwqK*5^4zg#u&1Nv*~sc*KD3OIL& z&wPj$oV$5ZWi%SS#Ysu>ycoW)cOYpDhUgbj^P}lQ8cE<=5Z0Dc1uH*%$MmaW3@tNPc&)J0sJSNV!mi zf1NTVd|uJ<8ktJRVgC&x3#EPA5-(^7;*Q5he0TO$vH01uLF>Oj={$S2)iR`|LKNpx z9fZ`L#kwv-H6}mTh9w_ItR0~|WZvdhV%IGq>WuG)VH3nn{mj2AwAvCDUW7u!{Nwvj z=spb^-ci=~p@YL^!Rs5=lH{DylW8+4`e}@cFRn+p>R-io3)fwB7|g_!s(ILKTGf_Y z-M5c194u3+g?XPV3-M(nMVUuOS675@O}P*xK-qlG_n3YgxvFQu6Gs8?lEm13d5MoG z3(2ETP4AYmguYDSVrnM!P?Y`j^0aS3+Q;MLQ21ER5{HSLOqw=;gr=aP)?$c?KskF= zE~fP&G^Q9(`BqO)95AR-f&*Au*Uf=@2(-)XOwSKTR}@0e0@UJW?SZ%3u|0vZ6$Vf9 zkI!gs#-~*jx-qsSuRjVlwEfGQ%GLNVTtBqQe@A18vmGjjSLvFt{xh6|8*5z~ zqv|mq_i^c|&I?r1(Kr!bO8uegGIp|izC_X|FFA%_&R8N1w*5LO*`FFNuqJeAwJ&XR zYYtZSKCyZwFS*}L439+S6KL#gz6L=Tjy=u~z3)FxIr=f`shVD&<( zi3640Et($`DCAnlm`YjLTt;16dH~(z9&KLBYPz+3O@dR-xYDSQf0?PL=&$KFs&Sg+ zx78~`zM0yV|AuQ29xy-{J=EKX( zN6i5v@+w^kH_HwSA+c5vxrd!|wuF3B1cHH>@D7sZj=#2C2u(@j&UVwO9KfqmURb+(wYfJn=u4YRpMleR~+ zs{%Dn(aQL4IMI(iHF@hqk>86y(pIW3IDdA@Tu!*vKm9k84k`%1WlXjWg5m+V4eG}Ar1vzbj!P&zwVJtMRes>L zQ_hTie|yu|WpF#a^5j-uz_2Z}Q=puRRXzZ7?qTaf^0Z1ob$rT|OeQx@KPu?X%jgZiM*QvkS&vhO}cYW`uwJ)CD z;q>7DooYNhS%9LfmE)Qvk&|^3vBUM?H{8-ZunE8hqziXyKidfTXTteGKEHFjQD|BwU7`%?m9;fJd1nk7n5u zYsQU#hKy6BEB~zinRCq@Nz#&A!m}3kP}@xGg36=xBv${{kJ#0fJ8an z5jJ*>vk&HastlfzUY_Lm!n`nQf^;8syfK<-?I%>IPCu~)F6C5uL|veMINFU@FSi|N zHT(`ow9A_iQ>JdeApyO(+iNPVKF9mb3xEQN;DL)D;5kZq?W0Urcr7EIo?cM!d*U*p zn4)IX&Q`B8bN~`P%`MMMtezfu3yn0dIG=GCd5%g_eRFGO59#a__w9k&w8h(E#;Qn^ zaRId0NLr6AYU8h_N^NY+CLUKImJ#fXK#I# zyfPq02ysaVE$kKAuiE7;iU=ENd8jG0@zm2+KBo)~Gf{I%`CxPaSi)yU2w9$PmM&YD z25t0^kQ&wp91SbxY7%m%a-Ln}XnJiKDN3lk~VRx13G*`#K@)x-$|~jX9rw z^;OFYeMFtb{eATJya~&}OeO2SHw>$L-4t zVckNmS7bjBrOATNNx~DxhWw;Yk?*Pejcos`{aP;Hy>hU|Ykn9P19!R@X|FFAIfwL2 z+%4}ocB*qVntf*TjlHeWpl;aWvOePR)tRI0Oi9N9=cApLAq%xn;^#9rs3fElysb9#1NN&GP!>#?AFZX7Zi0xc3AsA?w%r8 zk1CN~PD=zf&=e2?IoH>C#Kh;D9j|+ajjI`vogqH2xGd>TXlO(xK6{d5rd6_k!tX@k zHBQ1nf$&ib%_Ke2Qc)&hZ|WV#mW4^5Lq^OF0Hs!8C9x|;T$ekV%6J7OWi}z9_>;4();JmCN zSS@mPdD=_AZSxCgh*_$W_9>a*+;OFooqS!+y%%+t&O^$k8HytgJ>D+PFIPy?4$j>A z4{$=ymg0X?xw9<${)+Z&3r`QQ*HRF&2S8z_n>%gQpj#HnbTOxN*hwWF|Cuc$ueo<> zoN9a=lDdj8+047+goluRc1|hJ&aM?_q|EmELhT%!c&%R}&RM|n%(Cmx&4Rk4&+W9E ze-b<}m^1RQtxhWbokscFA^#09rRamTjOXa|{s)Hm7asF}^Q(?VvZHZttT8zfF~BU$ z`8E75IncY$5H)pJiH5vw6%rm(o-4$EMo?TQ+HaJ)el!i?7V3Dyk&p*jwvhxSrYwQm z!Ph?2d`dAwGP#EXRrH1ZM6J!H@MqY4XyJ4JPkYxM4rTiH@pB4cvn4d>AVN#olpM=i zhpeoWw2%>wf=Z6*YzCl=eg&;zxU_+`Fy?;5aD!B(wVR%n*>u4|GBWLQ2uF(eP8LvF+mUu;MJcuK5a4Z}p>kwU!_iX!mt7Jhb_Gb|kaT6{>i-Xv zyvQ2qr#-HSjwF7Lg>dXDVCh|=MEcsi{MW|Yp^5u5MM!j{M;;53&j2{8`s>t?zm1(2 zErt?hU2Orh%sNx;f6}$W1}I&Kl0EY0AD}(!bZweR^mWTKd2jwZ`I0vWBviQMVN{U78C1 zIlF04*E@0&gb-c8uj>LUuF7yG&bRu_uL|a*(I}hNS0L$0P>e-*Ib8c2re5`1_Uj)n zsMkHzly$xshg6WQ^;}3!$#k{==3D%b*T^bWS{^s|PuQrdHs!p5D8Cao=n9rNGVMCL zw;md;z6&`3E>0@vJ(DrvwrKg$1=w*`q>+g9MHl6nG243vGR;WjBPcB@7>p8XD9E!7 zi36{cF|O8`huY1d3YV9DW$+jFBPenThzD_3r@x&0aQvmF~@GFayNCS75tzX6sNSIJKJo= zK|OUoXh5eeS6;BoMsGBoI3@*nufbF7R~+o%&cG!yVP;?PZ`deUX?}-utG(#)VhX+k zN9YK{&q!r!F=2GnWenQ$ED!MD;i$r~toF546IpP>^M}E&fV*_=m$7uhdB&TN-d}W; zjg0~EA@Ul36@f=L5!`yTEXMysdfqmGUru;8ZRE6cb5*QF$^kmKgWiHpWvoKE(Yk}? zv-(D9^;=ZN(~37^2VSo(hZf@{G+Z8F9koB7zxPz}M7?Sz9=xyOIiVL0%>i;R(pO(Y zu7~w=Jg;Hznfq$p2a%7&NSe8D-`fiV?&Br>$YOKjd`r}*{CpMoRow=0!4(6O zEWv{YrFzBNUKomH%q(spyQN%xwAAoOy!@A0P}f*ci_(7rcF1rLrsPEZ(>(aHdGQ=X z(q(r8nN<8h^t%N%r{jicSN;mOm`%x=n|P_Zm2nAv@FQ}K0ABk;t2a)Xwt^C7nr+Y{ zJ-D*GR0f`odzQ$-)|2*0dnw3(;*yqq)p%1L(c^yA8XwkE=(5+}#$DSB{FwV<1Un3{ zj`c7F^oE<<=$DUgXeCneXQ0fEZ zCJ%fINs{k%-Yi%Nj?L||mY?17=Xo`nPgub|YNn*;XpzI2JF=KBB{*89PNXe{FU*+I z-pdu#e$aJuh{rBiEK7fOsi#pXEOvjt7A+OZPmfYLT*Af>1R-R-1_vSWRCYXlWxR44KdM$wOJ+iLq;I+n`AiP4_}w+%G|?g-|Hme5iqp)a)fEr`==T&(sFZAu zlFD^9eyj3WlVe*!Ae(8CNXq$oU!0pOuMs*oSq(9)+;C$hfc;PUEXdZ1kS}mky+!#~ z+Wu`0t!Ez*YA^|m()yp<`lIu{8oeeZ3wQ+MhX(XDZCQ+b*_*r(-&$)fgL%ur%M5NVD z6xg%$1tNTo5KJ0}{)8pc zFyUpWiRJm~u2V-2<05jXI^Qs5R9Pt$*?~UI$cUWjmyORm0etaQl6(N@R?GoNXSzG_ z?xLx2uZ36mc(!K(?_PHMtWB*Q9UOz5!hYVMI-){~7|M>CT`|OH()C^2rC@5LGGx32 zYfb|f_U#JqkrF{4$IHWAcOyc5^^{&P+>PhlWEQQAi?_EOdlmOUDUwK|g?XwZ# zrI$ORAn3NB*ik%Ep|6KP1xfHauG~GCK~~v}7bp7zu(UoRHVlhRdndJTu@~3hw&n6vlzFOtvc|v5 zw-06W8M3)2Uwz-)Q_*kr?4|<~ zjH9>S>UvCd7e2Xk3DaZ;j?hWHK$*;l=KVQ_-0>jj%wOo}YTA{2nZY8DXSVTR0b^tk_PFSjR zLM`NN+ek57Z_HYCR^JJgNSHO$C$6fFQhc+zXjvK2G49t+&1~1<<;aDHHQ8Ef9ot8A z^j?MBw$5~qs_OXm8mmX9+J_wQ7Lzv^kHbr2q8I6Iw~bd9+i$p(a<`cm^azBr^e4`l zr#75@CCW2bc7fGC<79ri@On47Xk)S}Qu7u$URN~E_ZH&%K(m(~J(%4Kp#m)wf6Bi4 z#n%dn`Nz(qO43LxXqja_(@2xlS2J7+sMQKIJ>DrTI53hWSmC|kt4Dufu5<^LG~2aM z1B`s3BaL=r6U-#5<4jw1N5`j2b5q6KhS#lD8gUP#j24uw%Y2(onk(Xpa)1H2b}^G1 zfwjZJT_81P-%YRUyz5?OUHWb!H2Cfu;hf!Nwbgdic|7?@yaHZRQ`C zvY&`BR2PV-=oDQvQBD*f&u!h+us)RW36RCc3#^%Ji{;`l?OeO0)2$sXMZ+cgLv;@8 zl;@TyrE`+JTBV&ZY>kd=u)%DT&^W;RGN3L=vU_ts0jO(N#+vJhzS*IsNzpL;edoN-_JI<0#5>a&QzD*plx|(Sf2n-riV*)G*hsWM0DXzBy9i5~ zRKfAM;??in|5_vX<;F@-(mgUzVJZ--rw}dGt@gfUO}U>gw)Rst^=yb>Rp|D3lkAFN zt~^~L5tFgK7-;quNARy(Iv7P}_|=voaq%shqDeu4_kP)O8<^mz2*sTe2Y1GtORp9! z!KZ|>V9~DkCao%yatS1LlQ*xsiX}0vqFS~6h%O3+(JH&upKip)q03+pF zDN9LavTEZy@3a=<5tn3R$a)`pc&ze!r(m)7N0tKp>UzEUO_TNW;0m=nm1$>v7O?$= zgXc>SDpYGa{oYZ6kN*=Su+CFMV2a~__$80cd>-^@R8sAbI9cdvH{yq)#|IWX8(8p& z#{IE1?^XS8As*yZsFSXhBXR4}M-cZ7obkI$H&TdyJY)U8SQ*%vTOz!F{qpNO)($qI z!qO;GcK&Y>(qFG-=sQTpsosvW{$CeG=SpjVhK`xo_}{L#{sf26)PBWZS>}^_KlQmj z-opklr3>Yo1wYXkK7oIly7r3y`KllK%l};mX2;B^m^GArW)Q~*f0kx8UsW7+kN+<( CS_j_RG zotbsNwZ3(K+@H7C(K=PTPVL&Y>)B784pCN=K}R7$fq{WRmwog49SjT{6u7QIdJY^L zhb$>!V9>-YB_)+*B_*kp9l_?7HfAs|Z$jcU5w%qN2-0< z+7PoCPOHDbfZ1WFH*ugSf(5k{DC1BPbP}Z{cT>5*<3V8Y>tHn5%0Yvn5fO0T(Q5qI z5AEPyPZ>DHTHaMXv=S(^eDgycbCJ(o_ zA>LocscV9GO}?024~A1ZSKj9(apv@`o1+_d>8(k<(9)tBURAsk8$WK9_f2;m4dyAC zu#t6dvKi(8*E%vt_jBN{7^p})qjly*X{4uxiA$9hZWam2!N>QQgu8B;T0oA`VoL_v|jiRDdsfx8;lGTQJKIVE>(#OZ}& z>nlUdoO;=px%ay^urh^_h4>Z>d7!rezboG)lA%|SbSgfpM~;$9iRzl!+Y%=Y;Wi*> zebIRgr=kUspk(Qf{nX8}FMopid^5~;n7}bB?h<6|YEnldriB{G5=lAytdRz*BKo-t z!n5q`=bJwX5rP>vzPg~kmPM9J!(93K&Am%2m1a0heRg%@V-p847^A{VT;-$V%XB>? z!W;?85j?_fdghhRFsC}~ict3>Gb)tt;v7aWOeMrdG`+AtYZk8!-1E1PpXpJ-YJ*_N z;s%$$;CVzM8Q_9AFL7yL8Hq>;=??MzX?p$0P4G2kAWVm>(UHB+l&FUhqo^q(k%30)gUv5@g z-0_;8|J(`XayV@F@UCSLeo>7}fo+E+h+NBnv?=Es>WH`X={mDt@BM8e>O6^+=dySm zrCM-*9h)5znS=;sN3#YD4Q+(@9{0SmE@WX%Eg7tr!ul1lhVxr(*S$aCJj^ zujcf(T6YiOdS#hQOus-SjrcyeO_EKDP4>_hiFUjhA(HP5q4{#3v}c0Q2ySVRh{&)< zEcrVoXC}#EW-g?TuRU=Y9_*+ZN|)+A{9u-+xj8uOPlX77A+-6j*WE>WZ`vdsjc1~? zUC{~`=DcbDQ;7=O@?zqTA{PvO8{F@oS9Neo%cyYTNMB$$sjzcC9&=pV)D6EddXm|q`nTaG5SEMF{xUTU+v9`qx zXfu@2a{77_1KeKz2lH6;mNY<`gN7f=8;R`a2MGe>SDBxb6@M%8zvYL&ct`k$JnGHF zi&?DZWrMdbHn8%s332(bE^w4^=0YMv?&xs({MSHG+Ac_4j>J1;OL9xT!Vu@U390LE zSw$vAD@8a((RnoQl8WDzRm!h_>Q|4mV9w|xdCyXbDe!raLFoni3otETGPSHBuQ=g!rau%`RpbYb#)%iGmQvU{GwqE{o)3tv!)%%$w)QUB9S-+a{0J- z3CeEDO7Fm`OQRpSt#znaBQ)OVN)+d%<%xXrQO+(FD0G;(w5pn5p4gcvD{59_DAp~k zn&>GeEKVqoK4oNW&tq<-_JQoz*67xBj&;L_?hhN*Y*X{O9^zHHVL4`{ z_*q@k$>l%PcdMsN1*L1hRc#Aj3t#hI6MH7`s?15JWT$28=QYbTDl{s|$oJ?oXj_CT z1`Ru;DeAwS)=9`t$!}D<@IO#kQ1a~dd?^yJNYkv-T>nPm#kYnGtrNg~LE|{FiwFil(k}s4m*}QtPV6tH8 zcPIa#mQB;}Wwk?jNSW+6*{^-fF}U<1dCsbSs+rr&sjDm0{g7ecSS^>!W3qWkrA1OxDQQgk$Ww^8m?!kj8tKJoTDMl42)~ z!&2K4)e=IjX-4`V}(!B70T)EPL!i z-!RfKB!bZ~_B)U}nuEhWYoZ_8=LlAkg^^j4Q3`GgN(l_P6uEf{{B(Rh`(>SWIO1|+ z?@#Jx)6i8}SiJnF0lft2gz2*LGT~{I>0LYSTIE`8J9NA9dGh&Z2c!#Nk5Lh6kC#Uu z&M$Yi*O`t>&i0NgPvtI(PIru~xJ;A6k~W)>7sVH=nj$^?PLEHSP6d`v63Vl|Wru7_ z2T{DV&uD8Sy!R{)dd}unrng6qlrEO`mWYiA+RzJ7-0?nO_hQadT!x$V^c(M+H={XX zL?e)6c#fPKj~Hd3b${&mC?7Z{Q7sWIF%>L~#Dh76Qi{BfdV-Wf;QA^Ri-NdCxGB(d z@Q*{0fDg6-HW!T!O<@#r)GjTh)QpU57Kuz|HjQ*CnG5?YvksmSG>U9!Pherp7QG8r+de-$7-aKazd^6%$S>8e>$BoR=yCJk+7JqTk?%~4 z@un|LpI``ywP6RLbmMd5S#?leX%v>85^i_UJYKs8mt4{5fE>r8NNs^Pc$4o+3Qx~D1 z%azN1IlTOi}$};99xp|%Hv+bHS+f857dyVD=d#{=Ea-r8kf^OZ1HihR!Ws7Fj)|Tixw58iK zBCZ#4BJExzR^-;Tdy@;gHJE!b2VN~=J`Y3iQHaTyuP~=5Dh1U%5`F8cF$z<3rZqG4 zmu4WdqrP-2Z=ABhtADZ;W!%WPgftxVHZR9;V{uQE(75sW&NA>a_C(@bb{tMVZ8YY~ zWV9^eOpm$JLkF*_GA)!cOw+6d58dv6Kg&RsVSB|sWe9Cjo8I~5dFd9pgjwrRGpT>x zFmK;=BWHTkdycCKUzXlL;Jv@+GJGb_mj$j~dfVVpY-%I3$$DRUFy~QgxWLqKa&@rh z^~o#ug6AMXjKfp;EO>#^OGrd0QVh-a_ci06Ukg1=J?!at!dGKF4@~=(S6t&3lM7`F zJ)RO?Wx|vvO}jytU7L&1VmuEe-j+AeQNsYgALkJVyPhvRc#b@eNdx(W78_b^4N(pv+(y*#j_ZuT)D4*_DbFD$mxdtzSjHmro;*y z5K)AhX~~)^D8MiS*GMo3up}_gfGb#F7lkGL=UN(;5eEJ#9VioQ34`!=9!22%_=5oZ zW0}9s@X?=P5P>@!V0Zrp_dmJepx@yC=NhI4cm^Y`Dk&=qoK;O7&CKkaK7gHzOD*<+ z1Z4X++D zF_pWmjh&OAy9muw4ng4hF`1o)>M4t}wFr%tf-;pP*wKuNmyLssgGLmEii%3u@x8g= zyVufx7YFV{Xg)YQ+Y7R@yScfsxpA?99WB^71q1}xIbN~9dc_LlV0H4ab2fHowR58V zOUVDod2Qxo>S$^2YzelbdX#Hy0(Nm0p`m%K=%3$T?`h_4`EN~jPJcHGXdwIJ6LwBE z4)%X!14V@&Qw5bR-OX&YUt8J&Is@tu<>Y@Q{FMLy^W@(e|5H-y-;#Vh-2W~4pC|u& zNp&YPM@g_PP^GizzuonB;r~ATyPz=pW6S@eiNDnRlnUrs6h)Z*pFR^sX}{3K1-g;U z^0ks0aDFt*$Db~+GyZ)BuJQchKwX%?Szh+FxSBic-h%cQGAW|AQh&FidhAu1}z(p_H=)V!5x5O`=pErdtW>7~W>tDfUx zehIb1aS^G^O9Mw+Sz9%D=8xUEF@)rkZ7A?cK?5hVf8wp{ZlUz71lSH0B`>^={W3v-VlolvPtys9f%493wH%0 z6v0gUKd*~K1)>_^{P)fu@xSm;Rq-VuAcY>8AhQ!34lOLt2C1<1PySyUma9 z6d>I5pF22M6s%sv-$|fuhFQA`9<(z0JTQC|J~T zcdqNwOC4M=t|X?v-}-R>qox!&NPT_oQ%&5vV2z_$xACv!LRn(>m)ZHQK3D4rzhoKG z=&-$S4`;2*_nBoVh)Y}}m*v#0{LO*hKgho<+v*5fqk1$Qn7eec#2 z3_o(k6_5Jx^pS!DH`?Ga!*w)m@@?iMF-0aO`w|R&uVS~OUA1a#v^5Pj#Br`km1p-l zG_M^#d|><)4-(ye^Pc>%)gTR6%GDJ8o*qg+VTY3Oiq0q*3QGoL^O@lCUHGW;YD45T zQhR=pqc-^Cl;!%(^pr~JC7%m@=YFRAYIwheOb@s#Owmg!Hce<$@hvJLTX;FyZd0wz z%qz3B-w@!`NH+ ztri%T*mUiVlejDtn{1C)xK2bXzm|_ztH!G9DhrktCcU7`q_u~2DjCG0Zm^xs2>Lf9`=zyi)?tN-^B~l6ogn)Q&Xwr@w)03?-vH{ z+qns|wa*wbd(&_Q)3qw!%Z{-{7)}!gH0j-?i~0G^6E~>pmCK5mE1;}VdaOmAmy)`Q zLeC%q&1X}(4m0l}b7T*!$FF)ZZ6uSuZ)PlUQ7GfYNji&H4Gruz^R6NE1i%%yZyZKJp$GVeGi zGxmPblh$zk6Zh5mk5Z&HTkQBJzW_zzHui2jXxLpbf{w|vN2GgfByB~@vxR&WI;cBu zK87lP6??elQaWkMxBOU+`sc^ALD+(1bE)*~6wowp))yDK49^y!mIJp8H}k$X`}%iQ zh)`&Vmf#fHvf2G_)5wT)Z)o4w(+y0uHD=#2=d(%89*^c*=zwe^w`&^Mkn}TXk-+%L zEl@>*aJy?yhK1O_V(ukMvdb$!oJ|{UT4qgiD;&AN_C|jBQNBx4!eSdUbs?I%N4}1FVU5^-d`85)+9%Y-Sb=4QcYp^9`4hhgf`vLH{2XFs`~ZWJcmM2w{!ZM zXv7xauS!~K7y_DDe?B>IkV?QSxFO-y6k)hIMYlh>nSI03e-Z5WM z>$xOSg1vPksv|(YV9Z81kE?#`qP|;rj50%!l#AYhNK?tQeqY-26>DAW-Z`3=PtF(* z|A?nWcC;)|&ZBufURN;L5irs2xpfa{rXnD(ti~*}R&1~1AG;tbaM<@JH?6GJHht;(aCgxoo-6$M zo!t!w!wL2@j{GOVE9j5rHDiVi#C&v8-rtP~Ki*Qc);11;3=SlPc1x-R1$E#~fk7iu z;WWgTL`JsceWK9gYdu{SaS~0rsHmvuYJ+K+8O~>-VK^83Pv3qk@Gn0e^1fZ0vB+4M zMDuu=g&{0?KgZPKZ97V6=*@YKb`=<9r@8bD>Nf8nGcOK(!{8MyR%t)2?7a(})*H@AIS1MwqfML%1& z&cf|YAsKwqXDs!ywRa47+a;W;9EuozO1GNUN$1(PtLxY77h~viHA@fuptN7_y#3D0 z$$r3xu(w?22lD~dcEO)Nf3~_w!QpG1k~m*>hsWpEcCJQ8m3urr#(U`^6{2r`4RXI? z*PPtBGJI;ba)#O5g->Iv(gM4KKDl5ovGcpBJ~ zD3?S_&S8Vg@4ETw-@PP!X6DV+(}>Tau|?P3e0@5p1r_!kcUtX^@KpU?4!lxiWTe}( z5%c_{6Q~RUh4)eWSH-w}K}dpu$Y!cjL(Q)6p8ql62;B}?EYp3?XN)lM^c^T$STxJt zx=h|>8m>nQ|K-J~AX05TmwCGkn(O;L2D_XJt^MP+5-mK_`Q@(ndsQPV?h?L{qnN5d zZ;UY+dLK)1TMVbxRh_1QTKn_o`Ifw^7W|-y1UX!1ydFI89#;sKel+pxq;_?g#SiN! zE#3;It;A*zz;xMS{h+KlvCpEp=H)Y7=6!U%U%Moco4febdWvT|6ib77{iUtY{VL+f zt*fpuLKz{q18z4=$Hn|||Gq%^wG=N0^!-Su20?ESZ#Bt;TMW1Lc%H(}$%vSykn5gu zFr{yu)w8`Km*TKwPE+aJ{kTdQ85#9m+#11^FSN7k7-(o}I_3*WH>ahIrLn<}b2Wz# zf_EfXx5h>#g%7Mzy0lXW%)Dyo7#Kx?n4)!H!M56R(7|GJBUI~q8OH7$sD%yGFa9ss zX$yu4Xu@>qfBkRLw?~{M4vPK{hFX^N2Q3B*n$i3Vq?7@`)A`59tC)}9pf*WnTXZ5Nq}N&iX010W_51S5Z# zeUtaq@KbSlp=Xw7_Ghx=@K*rbqHk!Njd7*1&z0qZ*p~H823y`O%VkGa@7az7fi381`!}Q4%Uow&mJ9nd%zMp1!I+sV??;)$-zp{7xE!n5MwS zwZWX}K|(&8?Eb5hx5)|I=K5Yq2@O~2O$bmA%mV{uvkYO?7)(4L5SxAjHwh8Zmf+~= zd5-Ial}5L(<=LB}l#&6jmvvBi-HiOm7p-nfK74`?wqRD|Sz)m}$hEq9eA&qC4{P<^2z=GIZ{HSFRB-Z$ic*xz7Y7f^oO|pP=1%}|#zNGv z9;BukL(WNkGR@c&J>4MXWY`wo-Ll*3r?SqR*+Z zUpv)XizXlcQsrXaDRQs|tomW($dDE8dbei^HgB(d(y9<9=&wLkIRIu=SD>I+qx2o2 zzBBf*HtcX;pRCJD2t&~`R01^$YY*hLF6kx8>FZ8S|Ii!{gieYJVaJ>&hy0=Pv1~IJ z5b}K$z20(sm6BKAbiGq-_2EOoolp<6DH}N8mU~|_IpQA|#et5nF_e<;dN4n8JQ`h@ zqCjO>!+7+dmGD5yOYq0}O_8b2s+`QyDJ!PjIDVg(F4azJ^1t}2 z8s#Y9tsS)Hz877FgIe4TJ%+^Z>*C2?dPXY^mC875OBt}>U^(4jri2J-vNFRl=stS% z038IPrGDcw7ExLVdwy%v`n&0XmC^uoHg_YVZ`rO0P;#=DPYpm|ZoAZITHOg9L0SvD zh?^$c5mavCt;`H^%Ci2@qs;mwOad+(9CSGwvinc2Q_8l{xaBDlR+YPApRD4Y``R5{ z3FNP4|L&w6<(IydW9Fg#iyh#z>HuABa^ zBWuUItE;PBTs4jb!jNetHDY37l`4x7JHn9|krD&O?;j``&8f=wUC!&QLmUkRe#Nc6 zVS5yy0oz?$Vls1X;XSYsS6xer z6!stcxz5sX186-0^vs2eq2aoO>5(|%c*g&bOtL-#h?@966jgF|*|(BJzovYpYQ1N; zUjwJ`!mmHQkPP-}z2$1x=Qz&q)p79&ucX;SUxlq=UUPlA$5WR(J_GFMtlG)cyGp8( zJ-^Fh#>pw43gR}0&>DoRf298g{;3mj05ii;*5Y9WK*3GjoWQR$hs`l=K!OnoDo2aL z>Tv!xIe-Mn$?kr%g_N~k$4ot0-5$1)_v#yv(V>_4K}tDJpx0XQgu+M)u6_fi?i0BA ziD%@ETD4T*as>w%z*04*fMb^RTTSi;nwXdvtM~6;+X7Bfv!5unH%E$dFS>JP|Ry%&ZV-K;CG2~{~(u@Vy# zmGjeFlV`sh2bx9>PvR6jk`-wz+M&-n9i~dOk1Nx)>+CaiDolb5*6?8wPypYDk3i>P z3%5H}s`_YaVk+N5^Lg(PVPGB}rrOdoVPL4L9%;q>G{ixCDZjzzZyM445kKOERcG0k?$Gu{7C7dQAHN`(z~0=SlnoE>VT#|G_7d{z|ruOl{>iXIrb^PQqk>Jz8%{Y1G)UhsIk09Y;PQ%)u z2?)jIC|7D~Rc{j4t-AzxkB*BzhHaZP)^qN_hFxGmw6f}0!|MJ+pb%Uju=V~`Q`p0^`X zenIE`O@-A1j{^vz3fQ1J0z%SISor5imc3z*f!&3~FUSZv)3a!h3sN|Rh)yaH4ppFd z1A%84;5fxJW?dB~@|7q#U3Mmv&j5nP?N^~{zOwfi(p=!r9NCyo4PDzfHN5c{7-R?x zaxBRIvo|uN@FT)(T-!=lSXXwWD07}t1{+`+ktENzD^=z+g@6D7x7qXTUGTgP(sJ?Y z$7iT;7S|Vt?2m*LNN+aKZlNKh-f8oBrQgkdt;vr`4b;wKe}p%{_Duk36axP0YQ*&B zn=R5yy*lNR>M^;1>$2}ZQQ(fr1)Y&PPreDm0TFM+)?%|4=uc5`88e{zMi3e1V|jms zFo9UjTli(HXmNgzQKk^_z#{L&a04oyGd_K$a?bIJ01M6bJ z&5-IVA{_`0*oGvH07I2fbYH&d?NAauR8Ec*79F@aSP(jd);0?h>0gd_JR{WmM(Nl} z1xIgj3kti8B2e?r|IJd~^e)w*n>gTv(7-*D|FG!<&OsYvnd^Q#KO?lf<+}f&V{QXq zQ%~h|>zCx|{@gSrQS90N1AFT&4PZ&EOSW48Er$h+0Rnr4?U}0l-|u4-I?YpTH#Gko zwmuNla6ZVh{!lAnz6U6osO@+?$G*3QuatRXv2WR%PsjY(swV*!74OS9X1taOazHEh zpA5fyY@!_0T9)}Q03){=rtLJuDYt$4bu?EJOgYu#?- z<$!w;({8i}Fi^k*pwrF|1L`n8>Q0yGuYL)Q+W_p20FdgTK-iN0#~OXs3P4qaj@?YZ zfZo)qO#FfX`lfyeHmk9-JSR3UH7;O-RXm)jtBE1})TS0#`s!{5LM;h*CLzMPZcOo~ zYDDxzI-KZ_tY#VnBW<%`w`{kM*iqQ>h(UwJki;D-o>PV)0GbfGT8p6_s5or7@f>4B zixVfYT;!dz!A;Vf+yvNS^z}$whaPe__vLSEISVGqa#uVrzRHOW0deUTI;H4*%x5G0 z1*mZ^si1Sgh$lewovf7zR-CP+9pz{ELeJ;GtquEMTY_DuJFLr_H!|7Ihywe75V_0( zeQyAfLP^x-N;PmQCaAI>aRzixhv?`X#(?n>>US@admebt-cA|)hd}V0Ld%XdzcXU zr%)XsWo|!_8QzaIZl-%psjB|=AnJC|D&TfsmIN)8To@HXjl!0Ky{#z=5U&x?%tZ9( zeM+7gd1;|Ba$p#>F)&pXzq}mcfcQuF&sgZJ-5f7VoahQ9ELOp4a(|Mete@bd{{5cOrtJ8D#&gZ*@PXl#Ihax3!{dIL3Fi zV)s*Rt>w=oi*#b$;L>^Xd40D9eC_Z$JBd8Kli$wTA!^C1vUN#x+wk_q@BRSUWIL}{ z!q_#mn}JzONW~x~!8hEO41W~!#9OPY1M#uPw|sDKxYzV^=8vD$w_vLr!UgoUTpMFO zq|_bg=|zA*(`T-HT+zshf;W{bBiQW|nDS{|@dRhuvN3bmWzFj?4?F#mBJ{u%KJs_F zJDc_b%+lw5(r6w>%gx#DCb?rT^&+q?VOe^kZIPya!t=QzQmjgBnMm~95^iw?U?mrQ z7xXHP>zEhHScxKux#%mAE?10Lgw`-AgntVicjEdGe_*J0Ph>z;)ESrjoGQ-nPBV%j zlhpTs&47_|_UsGo*P~^W?2lxLJLFD-oKa^V-}Zhc4R%U37rE5*qv`gFyrOq zst)B)w2P{ezjimsscJC|K7=~VzNA)Y|ewI!t<^o z5|L`*H_u6X@RE7#Fo0m*&O%H{*@cOuf&h2G-wFHDuH|Y&nO9&S+KGds#pq8H>vHj! z0^h>>NqBv>3>V?IDYiLZDZP)x^CLyBp~k`F=0hpxunQXO-9IQ`CtD|K@DIRdwb33* zkliZ0cb~@4u8voG2;T*`5?Z;&W=Tuc3R&phO+Hh!E%wa}jLO`}0>@nclK48*+o3Co zHdaj7@^J4}9QEne6Qb&+JrKrM)_QlIlFnUy=&0Wg51RF_ofhQV%MRCuzf5s-*mLBO zD;nY5MP3ATEO+h+vx($^uh9ACA!;th+sEd!-Ge-+(bjD&6d#U`ES?kX@?Jg0dPx(W z;lc2__ZLfkVzCrlbHB&3B6jLmK}0RgGf<$qIcoiIDp;wuJudau)Dz`G(ltXtk`HP5 zQH7fo4&1wNWtY((3-L32&b2YYi^j_>(0&$*JkCy|adQ-4Lah8DLa&%g#7w^vAl_h} zcD#HE_~g%HjcC?&`)#1eli*+qRm!iAh7S>~b4arMqxc!N-y!fS7aeo91~Ik{noOPM z3+@m-1YgyJ&TJO>`VAx~3R~v+Xjn=b9AbGa`mbc#?5ml}{JPqiuNa{;Y4ORNy!=VU zwTmD&M+e&-U<;x^YomiLLpIm8F9|1{+Y{t3&%+DP9@vmae+~>fy_EGJ;{=)S_3B11 zB@JLt&Qi=GozPY_*(72$IUVByg9UWS*Rph|MK%(i#f|XN!zWcp2gt=-OgVS`oF)=h zPV_wDh@_GZW9E4iK!_sYc22@v; zGMUtd1|7AtLBr?tcfeRyL~x$3bD-|nI_khwqJs}HbpI0sA@k9|tXj%uWox~K{`TZh zp0EMd4R70bk-G=k@|r44d!_~mcAJLmI|)OC;m!LhL^u*%9LWdP=TBZLvQ_E@%2|x0 z?+-ix;!EI*|G2J@Y=yNKus!$a%wF1s)l;aq)I^zDuld>17xdY?rBtqE#v}QHi3B>6 zRFi{Hl|2e!FHh3lmOxx`suJZ_yv{rOiOP=FN6cyRdPSO8(D}m~jBqo28&!3Bhp>uL zqoN1b*z`KxI+ny+8+gLie%0;oS!GOo`}3!A<`V?>OUp?2?)H`SE{W?ZJwvZdBZ`(L z3RNZdlr?y$Ce7(qy(eBOWOzeE)bmUrF2#C1YUDO|-akJf?>Os%6{fh?PWCa@QquA* zGfJz51!sbE=$t{RHs61KY|=szxUf@bY0^d!V9a$LOtV2)Dm|XK`wZy zd#2=CG%JALW9tbQ_#7SO{nQMiqS`VDLkCY6-0bo#$I(9(GLH6qWrsW!Sdo{w)-zkO%$w*-m>}f7Nuq)GQ}i1?q+bB)7xM+eH5^Ck3{di)j^MBXw#JuO7DDa-{nJU| zwTCVE;%>NW>sPkq@fppUuFfOQQ+{>Tt?9h?n-edCZUun01Y>bD5Gf2r{IPW!tb`31 z;dQbTiD5?i(S+Qva4qvBGl{Gm2d2jmAxEsK?MxcuH377pJ=Sh4M^<83mogOm{xtxu zgUGNm=r5dOH2+5=#DY+Y{eEP$6?-WOQh+bwyV1+^SGW5Obg$6ymH zV$I%uYM=EapKKY@M@AKGU!Z4h8`0|QfLlp*4^;?8>wUcd)Urd*ftB~mxf68M2 zY1U!p-v$$@Q$I%b2}Rrdp__AF+Qr8k6IHKpNY#tV+wI>_7CPS zu_;O5lGWes1Fv~hDRQro&X7zjyn%BrjnWL%K)*fxUBsHwWN;}QIr0$0phPeRgMp4KM4(RMH?E7NCB0=6CumHSX1Cfq|h|Wl06yT46 z1-{9ML0IfiuA~hjae%2I9pbwkz<&p7@xdFT#Aavy1T4^uh+L#XP*=BP;cg1@Ph~JW z3*iO$-}OGr`NRzg(u>lNnyx8pNZyM#^z{f3+umhgOZla9UqD_b)V?o^Y5xa;i30aB zLDx=z8vR4SE8pWTLJ16BjQVJ*Lm;mCqz79~zcDI_i9eQDPQEK-@7HPJV)^~rw%+~N z#4z|2lMo8qKU?Eiq!4g2L@;Wg-{4%R`%-{IgUn?@#bultR?kWA4u9}7Iw`&9*^c_n zes4LY@7yxqKgyLELyn=SRtT`L$bc7#bP8X8ne^Mf1O0Akt)Hd*z~rbQ6!Szw(^l$qS%wa>$dEv1n_ZsoBlDM z?6)||o7q4yK98j986%txK`Wz_eJ_<8k?gm4ULz={F(F{NrL!A&)q5N-#tcq&y8fx5 z(e)rTo2UFX8xw4Qw(MD)D$NTug3U8W-zru)n{}Di$7TKcss1E;Emy>1m4WiLa0;oi z4mmSr3yPXZK|=xSAy^%(5--LI+c1^$LNma72QI&+X;l5&FBHa=7YTaNjelTQf8sA2 zKqg_j0ul2n!%-6dz#mW*`B}^~L3^SASCfciojCl@}ik)G=%GA-0D z`Ff=malcehdyo;UDqYkm|6b#Qh8M}LTe7%=Vjm|*J02%9G`l*c8RuT*3$Qb}>srH2 z$fJ;vln=Olt3hQUB-sH{4nz62kLoyku4h7FH#gzo;JgQr4C%?cjTF4wuF(Ue^3%`* zA~MqaLD?~*Wj61bRfLz}K><~=YrB@5b-&&tONvIab8ESQhF5U%NzfGnU+ug-z2ja5 zshQcdY&PuG-CWWbkwQvNjlzO+>!5nuM`l?`t*KjQ5mn%+n9ts1R&U3V;M|FNUb9P` zsgOV;K1@bCLnSnEw&*njiRISRqLa{JPZ^qEdu(_e&{c@!645*N!ZtY3h}>i z%QR$1W4;40E5gp)4{J@oZ>EtHBAt0Dc01Ems`-kA{g`${;UVV%HfZ-IPy8&0X6L~ z4qI)9$nf32_&DlxwsnA&rf1H^Sg)SHt~FT(e7Nopp52_O41aWuPC>aKQfQI(TD7xD zg1)Ot@O(7Dse!f;Y*00fpA!xK?06!e01nCn_ZxKvpGrS1EY1gcI4ymG;=^Rv@q6QI zvFA@LA1qF93P5u;g}kEx1n6QzRNsUGbH943)389Mz>jqp5G=e{Sh1vT`+GClCt3^# z8<2=ZhzqcI)wr-PKRFMu)?SJpXw|*J3?pEL-ZlLpJVbG{Fa7Pl8lj(0tn|o)0b3G4 z$Blf7Oag@H#KEAQ1;8U-v()Al(rWO=aedF}>lgkjAKfS(f*k4IEZz^>b_t`k`vR40 zD;^drHD?7_H!Fm?*Y4<2lCE}jf1Xq4E56N5k@jY`8GH>#H%gBHK;FgCr_i~oR$cN% z?4$~bP9MGcFsi;OsdSkq^BUPq2nsc!CXS;6`LANl^4C|1iH|(rYzWLDct*GJz>w9mn_?GT)BN%HLxoBWx;h$p%>qu)q+?Gjt+tdySP-T`tp%-$Y?GlVa^07hfz7+ zLPvuwk|hq{lg7qG9_x7j)+XY39V;e3>hy=ZL%$6sip|%cvPQ=9B|kq-yZO3Qx^6-) zw*x&t&XP-k#At*!RBV9Um1xKOCo<#EE>JF&sW@8=PP#8>EvW4Mvf8vW!grm+<@P|* zcWiG-Q8bx=4iC6RKDv)jvwRAs_iQJXkpY6?vqq(C+=;|=4tj@4oV3yH<#9`jyd?^c zl-S*AjQ<=SJYX32kJ=rcs2-Wpwjk&9xjxn~~4E=iu@=ly^u)tPj+fOBzyk6bh z3}O>y$+J;_v$x%uF3-4x4zSA?Q-xp1Pd1c1HOaO;#s+GzW4FL9aN|$d;LZO|?g#QM5MY`-5{rL+ z1_f!nPcn~HSZek)qg;uO-Y8m;knD2? z^gfBRA1OzSErM=)w5*c82)%q_@v(pvf#773j;>QaX+W#jQc{zWI;uxxrMe z*@#FPI`pa)^&}@h^3EWJw%u|?+6N!#(4)RXU#+21 z?pX=m$V;j=U9N;xMslo)8;yPwzc(^@A-_4$&KxSosT;$7&2z~GR03>x@@-A`a@n$p z@J3;*;@0wfSJ3Y9zhcY4u{%33a-u)h1_4bX9o=jE zATad^qfxG-j{JCntDrJO$V#XbQ1^Q~z);eeWS#+7PJqQJpaCQn{kD4PgpDA_xi-XL zq4w#y8TJrytYzcquSkyGFEW3*fJs`Q_s(X+U$A`!F@0_lSjJb+6xV(mnEcKbQ^nc# z71Qz4xP9}@3b4vzov?}~DJ;e=h)nxcAWPQO&OmkZk2WJzq7$^gJ&LkAU}^Z%jsQoH zv^7@?c0ieZ{mN#^wsxUrlsHa~Yi)>nOJjM<_TK1< zimU*isbSyUT#U$Qk;(&KH;3~d)UA5MNj1bInD=JW_kYs^K8)=WI&7j9blv;it)OQx zIVp+9=ID)UNa1Eh^P~dCc)-9Qyr79 z#=f)5WA>BCOaor_){2@H5ix~z(DbRZL<9IQ;Hu)ATAzi)c46rZjN!{*j@9jP@vIA8 z%Q2M=j)C)y#Wk?xn>>NfmYewe?!7Gp)C6-PZi0nn1G#^j3``G1sbP){JXb71U1t&6 zIty>i$V(lX@BYB@*US?zMT2#bo|nIR$NC#F`D!u3RUWr;)}q%iZvj#kZ`sQa{f0YH z%pug@ipg=tFMwqcI>m0}aG|>|5$u^?s7ik{`1WaPQDzLNyWkelPxcb8n&YtC$sx&` z+0E-udXK6r4?f|)Y4C{;vWRvpsz&EqQy15p;|67aYdnSVR%^-G9Ie-6c zK})F*_GW*ilghM#rs%XtM;yh)rvk@1DD`3-Mj>z10OEt zwBnM`*0S;v9}?(8l;WJMUWApaTC=#mht2U8+2!1a?1)rC6#beO$}{-!@$H+T{`T(f z@bTTuLm3ASvrd8Mw3_zoO5>EH2W9?FZ2D!wK;-6;bqz0|)?-xhgZ-wXRjoJEN1(1O zQ{}j3*Pwa#y&9#{wgiQ)eQu8kbrD&aH!t~@Fy^Xs2Ie&ReC=BP?Xd}T$1|*p7UDQX zc9t=wgNF3V3*cK(^`BqxC%y$fv3~|6wVC|*0jD)5I1O?*vl5yHo>cR>1G$JEYDq&) zq3~mg?Qe@S+}6WVWHvMV^9D3;RJQdR1}mldzHf`C8qgu_5Jf2Yj*v`D1g=^{1c=`2>Zp%#_YE!@HiR zUi8pHC=>DZ6#YQFj8gvYn3>e}GuD}zycoNVIsD~C4!I*AO9{TE2a3lOv32n{{ngA BWH$f+ literal 0 HcmV?d00001 diff --git a/docs/source/assets/kernel/q_vecs.png b/docs/source/assets/kernel/q_vecs.png new file mode 100644 index 0000000000000000000000000000000000000000..f55b3742f3c6a862883c6a966e0c2ba0dfad67f6 GIT binary patch literal 42065 zcmeEtbyQVd_bwo*Gzij2i-gjh($WIb(j^UtZUyO*mQG12K@bjzbayvMgVG%8uA}e! zeed|iH^%+r{&yYYxHmgi%r)1X&z#TPp(;u;Sa(V9A|N1O$;m!bLqI^~LO?(yL`MZz zB8V255fJWHSV>B%$Vp04t2o)4TfH_zK#&bhNJ4v|u1(;7;KkmAOZ{*Jqe;#lG4IU~ z5|b<%*+UFmdGvrRV=W@BPdPM!J%QzSnd+IPCIa6X>k{6B5Syy1OtuFWy}Usq4{W@> zxV^nP8tC`7=WsLM7e342#6e&VVoxY&>p?J;buwK>6e*pmcPH{iXz@p9YQdg5LYY=m zdifGv%uMZ8f5GjJ<@3cF((sdm+gmMFys#=11TiBf#=F%hBurE?osR=vB;G-&!l@-= z>x+&0CH0Y{~=>~6(5DWpPM^!BjFF1#V8`Ba=N?*yXln%)e-Rr#Wp+z}+4A+U|<(-bHh3 z*q5zp2?l<--9h#5-OEx+RZO$&#&KpT%VBzQ9BdYG7gZm>9@a3u6_)z0{(Vp(G$Lh9 z&+)iaZa^szhhd6&bZKF2qfuEm160&2WRGQC)))u-OqFBgjWqBLDp11kG1B`k%a zq)3tJ@FeXpRP9{B*_-f7-j>l_KUGivZyD)i*~w`^lwBXc7=;NxUq^eZ$bpWC=#N4p z&IKb25YfZM-bL)fVL^-p`G&qBahxb}%}la-*-9Y5g2C9$cji59`c4YDHp)hudJ z>Yay2PvTZErP=X5^=#qm&^riR^LNC8gv>Z421RWV5hBTuowWDg*zO7-vQQ(EzCyst zx`!D+@=0RH33b1P>^y)PH*A6&?Z+KNSws>sRHDHASNE9^q+W4OVfTokOx?o{Y+GWo zL}F~!c0klaoNTrIiE!-i`x6N>;OrG)Ahpo5x0^Ajd1xe(i7~?8gM}W7G9lj$S;<0X zB8d}ovV2s7${ZM$%~6D39PAsQBxaGVphE0|v;7t|n|FxuLBOLYcP0W9Uy&taIWN89 z!_PotY}0n2{D5QsMq!!x9Cr>q^KC^--7@7lickc?n}{cPKHu*e(dy9=hRVd@l2nmD zdL|#koQG{CE)bI@i*ty7@vhZq^8r!E-PI_J@6U{olUe$l`aGYL{Gcwt8;$xBy48+p z)NK6mrEMww1XgcbhjD!6y-JiR{9WJIcTR;)c|KEJQiilWbu8c`)}v;_P72cv)Qz5v zEa|LWX>jnVHxi<(2&nt+wIun=+Z($X0ZQP9?uY7kXEFL#jGEvW{S?_xLP%WVr6SYA zFCm2MCOKqJ-!sv%G1$@U(|*RSz`G31rMn;Zs5^*IPM*GuCLwNHc0&$F z&c55U3UN9?D2;`of-Wg8I)**YtDEXkG2y+tNgPQug|G9-vuf1#RTtDgz91;(<<3Zz zoR=*t$jB8b@Kwoqto``c`#18B-}fYtCfG4G<=|O$aADhuutsZ22ue3fdwqQVf=BfS z^HD<5KGc`D%urlxlVFsV)rYobG8eaU-WCumv}1GMn?@{ZRs`m zHwQOI`(`W*wmf}KA|%b6`_3_Wxch;cx`y+Ma82Dj&2or*SxD#;#bhCK0cV0Ers}i{ ztJDFm0j}X1*U{~Q`DC7)^8EcWHZ}Ga6bIKP38v zQ#ZH`t)JG+)gtS>)sd;)w`rRpnpvJvs5!GX9H*QZ9>*Ap-QFGJFFaC9`nXe;R@|s~ zRYrIrV#TDYq2|$rBCU^^_ke6R9Fx zxv3s0W+@se^BkQi)%~M=ZfS;f{;VHZ)L430%eB8H4kQ})&L`@RR*vcyDimlbLQSM9 zS6;5`jO(1}aF$7yYU%Q7L#v)vh1N|Qj~X3~PjvIlLo)a72x>G^H;zMCA@;khyC|pY zr*-5-*f-dZ$x|r|1a$<$$j|r|Tyq5bU37NFejK+?w`>2<+Ys6`UW;3*>Rrc9YsEIH zurPFKZ-CX8I|y&VhWn>kJNJra4fi$ot@gS1?{Gf1cxS$wHoRf9)OuEQy!p%2gTeh~ zZF)8Tp#7})&~wFhll6Gv;5x!&KyICUYgmeelkv&%^oXwLzSBG- z8sQ+wAmMV}=?kfju#hw7ejBJ_usJa#V^mu?FONv5d@vwovW+4t7n}!gTK06a;*6%BN;4prF+_vRj;ORUNdRcgj_sl8&M66JC92T9Vi^=Y*q~3Q52UwMI7uqknQ4ruE_LFs5pwjH zz1w#G;xSAMwN$7+tcKZk+`hzCi@3tKj=@|}p;_ss=c<_s$+w7Ag*oQ_oc2!h=M|a0 z=oKo#N&eE=FZ(vxpME;U+N#P8Z=3|4B=Jt!GR^Vlz z8YlKhr&;HGOrp;+8t$ft<%B7VhWa(=lBi6-G*i>4BCg|gI~esDz7+5^ztw9pboLv* zVMn7w^TZCOdR+neSYAF{KKm0dG?y%}}HYhFrY;Zg+IOxAu=U!Jnm*Lu2 zB3OL6pZ>+obN}uE_M8Ir4CcFkJ&g94%9x5sM9ufb;mb?M@q-`G$uqvQ58EeuO)$EZ z?jRBI#c`}(xlhOCaO^i6QYvg;Q@`DF!lSL}&2NL}gZYMi z&>C;78`w|Xjyij_<_OV+yVRTh@o@<5&IkzBPn8}f~M-b#=>M*?(TqubSWrLLsdE)JnXI(;cH3R*+ zo>ATE&)<%T;hosS3^i&{2`$q)`T&9rGkYm#uBeCrU@QVU0x}{g0tz@o1V0f(vVWaR zBQhb}`F$M;0U^W+0r{_Il)w@GF9!VJ@BBI5i3>(R1ApOzpL-V4-%q1*W!?Gv9FY** zLl9S&l#>HTbrUBuGdpKXdzZeahuz=;hJ&m&kN^=q{D&x~M)wo6KWX*irOQjj=K?17 zwrobG_Qqyx?zRr_b`XTz1;DATnTrv%yX|W`X90I%n%_?dfOGg|b{gv6kGR+f)4Wtv zp_a6FGNa~Ud&2gFM&vFvHMNkFskwmKGwHux2Y(6ESh~142(Yuexw)~qakAMvS+GCl z=jUgC!okkL!3v&Wb@s4xF>+_MbEf^%$lvWeGjld^vT|^-vbUp#w`*i<@9H8vlGth?!2QQD% z@8|#9oqzZEk2hcb`^~4^e0=|W^&hwX>#G{hW=@j!wxCZJk$cJ(so8yfOmY*d~vJ z&eEpt6=^8yLm;jev+QhVVa~5+=8xlar$CQ#=d$PmhUF)A&o?k@}BK ziDmsncCaLD4#&m)pL+Ut|Ky~b|FJIwq_>RzlDR+i-}wLM!7mMoq5bbugLlXzgh)>M z_J9WY&42DO1XbXF718g3i!Ea;4KaBWKcxN-!URw9aSi=%qW((&ca;%Bq?a4f5g-1? z9$T^lu>X4Ff0?3+JLv81q7*v%f9??*%jJJu`hT?^@Qt7)TlJhxLhOH0lE0pGu*|yi zpBIEM0|7hon;RaI#DDA&s1n8hSEc{2O8;q3{!5iU(>yWjd9NuKcqS7~NfBXBl#*Z0 zav~e-JFcdWG?|qh~jBJgJ{ywH9R)$i$nku&Z z#;*C|N0e?xzQOtym+WVjy`37${JIXiPFFK4bs|U{t3uZ|wr~;CSVfPjPqLpMg@4uF z*2qx00~6e6{8$W|{U{Pc`P&|bDN={~`0L~EtM6{}^QaIBazC$eWr30TGT14WKShDf z`d!=Zq8R@f1qD(;`m?+Mv0-#mAERwiZ?mU0)eri<;%4LuYfovMynO=e4brMUcya4h zJb%QpXwh%FMETn?Vd5a};_t?p&n%R)t`zPR>IdHBK+Pr=&8wVoeh*^=7&2Llwsb_? zWC2&}-f1KheeoUM9`(RaUT0j+`Go|Ny1MmAN>@7CE$-usE_tZVZ@cN z(G)v?MPQD-5&2rmPuE8&JPTT8x+d~_v@S~2vt)S#I}g#2XK04{vMc*}X2RX=KcP4_k%7$;Re&^_QllOVF zr=JaN8eTlzjKBL^Kmre23YI1Ws0l+_n$8w%CbTWmWvO@O-bFe0b0<}@$Molfv*|Ey zs>)E9l@4nVPj*X9eAVW8+N-7&yMwn;T>CNp)cbf?#{5TLiuqWPYHZJ5WhbSyU$*~E zaM7Dpfvr1Q5{ZMMho>dwG!vttHv$9Xzdb<~De@>tKZ(pxoa*1u7MW43F!4 zket=bJv2AhoJXGf&3j>829~Q=%HjMr`mE?nLzal|X0Pd&1i8+a57@n|cOv}BtyeoD z!#St%uB07-E(ajMbb}2fr2km=76;rGHj*gca_h&Eu*Pvs-S#`&9u=zN8HM_|{w>r% zJ08&aY}-)-4~&R`heDSXE0XrQN-oG0BJW;+2f;$YLkytA7xVK%Ps;h*g>AkAGM1^A z)q8?u1ru~!c~-rV;Uzt?GQ{+<@F6xbwF(AsRm{VY<5tMnk=w^+d=6 zktWLYBwlU1S8CXkI)52#g77UV`$3f zY`&j=t)F$UsfZ8+df3RmU2p5 z_u{=&u@N>BZtM3cPfesJ*VPb^VdB&up~VCt(xP4JXvox0UIXxksT)mP-_%XzXKaykVbnMoo){w>Dv;c;bHssIyTlM01@3TF5e5T2HEKW-=fD;7 z;}oO42Nw}0n7FCGbmA4p&$diab@^Q?#7Il%dZO+&^Y?7kkf3lRf~Cv4mh>P?xVmuQ zE_n(6X$q#BTrL)ai9EE3In;#&22XtlpiPpjSb9ICtDhT#@9{5X{7Fc!O%xR`2!>=0 zt}nzFyODn@ITIt|?zYn@ncCpPrQJB~YV#Dicm}ChOhO649*LH%12ih{R~^w*(Km_t zt1$!s$5Mcn^LMJO@Ad_0abLQ=)OmY@t)M)r^k>V6{la*aL2;Xv_(l1=W?{U@`W%tP}S1Q^+O3A*359IVV8 zYJYtWgv}Qzf}n>lwD~O}_+Y7Ire2^RqGQs7Ym(D@?KWIE2JJfpbk>;&uvwAEYJX$J zAzh>?`lfY-t?P%t_K1G?n9mDV<~aCqKKuy>aL$KMjwTP5HDIOikPYlceIz=;zq|f| z*TU9~MUR3N10X2kx5DjmU?(<7AELmwm8Cqu-0DJeyTpi;Utv(p;B6o{%^t?Z_Z!i{hiC}A<)4)V+Xd*nMe}KGg_Opab*;e*sTBAi zgHyq*v-fXSxG6)Vf5DJLbqM&ji*ozw9am{AI>tV#>snBwAm4okZyX=TKm=>xU+xU< zDtu!LG7Ny3@DfkpfZyPPt8&+yWJGX$Dt7)NJ;k3>k6Z9gh_rcxDLF1;A=LMTkIVZX zYsd~?LmEZlCTjni(?Di3xFjs;fNsZPJiP;sCjLJ^3UDG?)UOP6MZ`qhp<6nhBt{Gg zRt|Fhv#o)VmI8`pnD?=e7+4ttmHg-M8<9Y=TpSP0{lO6vS46{tVF^=w&(}dHhO!%FV&{p>PL!22Ae#KmL`0bOj=NW zE77Z;l1D?5I4M@61u14p`V*yJ4|~jiwG+G5J@#{f?eG%*Ljq#H$PKlE2HwACREh9h~IC49}j;X{+}8TCX1)YU5`^&9)w`fnCs}1hjbC>yz=} z>JXD*$jxaTlzS02QP-zy-=x8KW#F}Ero_Ka$hqi!G~TZD>l;ZqA6L(KW5w6QuxaN( z;eN0K$EnYi1oupp09vm8j<3KqSEBP~)DWuwPrYbh94ecCYBRy&+=7UftibjqPdxd+ z=fLYnvPsm_d+c8w+M24QfHkk~gyUe@PP_W$U{u7(mnK-8aA0)U@>xbX{9491p=EFX&DQirx+ifZI@JZ&_g<~nb}%UO{fw_2h$YGDni~( zvM~xuSDHat-#=2yMhmNuP z{H7m+t{uJ=na~lNC-05m`4Q9NG@5`XBW($$nLHCOfu`wOgg?H?3c_aAT;-Gvx-s{k zSY?^e)-3d^iry>Dn$fW-(;#MVstI~dmRA`S)!@=&VR6JewK^zt^jvbGN-7I#w)1rD zEl^_ltWWmyyG~TY(F?6;RC95A!!lI*XLlZ%V25;cPVF~e?xTuWVMh+(eDbO+%%$M} z<=D!f@pikv(miY&F#v;r;k##kQX7QSN3Zwh@_=eFzPfT7F+}={=Q$8##EJT%O#?(6 z$0*6r*JJ*A($H^yzI-kAU{N0OI5H5}++>8so%Zc3yS=EoMc>8&HvY3YXB($DrYY?c z?nX=vZ_z!HikHt)6@qSfHaOokio%AYX(-}NR>4?H30eotqfV3|wfYhb`^{{h7d&_P z`hlW9*eYAepM|6ppJ%vy*+sl&^}?YM1wjQ7(px26e6Q6x_lTc;6-q_0#&~jlk)6)Pc71sOwa;*0 z3AMcVke_5QQ`)dyOrglDr9U&JNQT*bw%}#nO`jCOV>v2qZzWE~XhZAp_Ho76k*Qb{ z#}VEIciI`f>8-SMsr$Bsl#E4F-J8R?{j7C!YLm=Q%d9PBcxUBS&yRKeYB5uNlEaFD zy=;qg4HHydS)FV?;kp*jYBMvo{!YmraB9u1Ifu+Pff zKdF1|e^t*Ak9aZGTDj1Ga@^#y1Dk|S_%Q5)u1(|Q^>Iee?DGYpI^g?K_MuAt!3pO8U#=0q>~vBp z#cMPRDdbZik%mQE3%{ABDr=acn~@5{xen54Q_(~;J7zr_LBgpQpR?*ZsZYF^q)(}H zSjZ;X%09HnKe!_`yr{W-*eQDRbV78ikRn1SsU15i;=pFBM&(n`%T=yy63T2LsmPMJ z8={1#PVo2yH$Z}opFVexB!{EtIF9#%c)K}IQc~T4n|>q;zesz~?>L9={b2NHxHgpS zR%)+QyV{!LK`pB3CVjGD6veuM*ZFdY&7_{QqV4wBN7m6x5Bjqjt5Iv%FRe7Yrjv;o z;GQ-Ztvu{xc*b_${gO2h3=>J6nC{E|_3a+}_rCUps1HRYtDk4>!VgN6$C`!OK3)B^ zF?>)so4raKEu>b>X_Z=_D#|QdF;1NAbj8O{D}lqaeYCc~>D#>a<@P7twaeM0zT12j z^9QG%M~Dp(5Z;1^a2($MruR<})zbzPU9&~zr zxv%QS{Yf!Xc&qHx(J_;o;>1HS7z9x5R@nS&F>8C9GgL1)N8i&|$-{JseslrQ zgUJ{m!OwMkxX9aeMb)|X#r%OKmp_$vNU-no1cnr4+Qr-BLeY!q6=z%YwC5Gq{JUx* zSZUTacT7n-x`+C^s2ab@DblXQbJW||elER!l8VCrR3aFEj5Mzoy71aPp<& zjd8PDOLlHuOrp2V>!(lB!%q^0?7mE@jgvX9(}SfIym5LpOJ8K%l(5e^%N|Me>o*2h zd4Z%+rNE$^LGE2J>Kv<&#JC-}W?$zEHPb!#)P_BrRV6;#Z{;}>I-lL@F~DFCVY%Fd zWF}+?>*-#fbEIpj`uU)}-Jx>B+**`lOeZ{BwJJ;}+HD3oFF_>&&rbZl7e8hF4$=a{ z!`$Kh$;)M_U}H5!xQYhvk zF77TynbXpzo7}Ku_OdmWvO&$&yJA-Y2P*1U!Wy_BRG4mpPNiR9qvWK%6f<;Rdw?@Y zx(Rks6*ypzXSkl|jnl0DC1LhEQ3AHr17H(p=ovHIjbA5O$3)xLh1F)1lv% zmr#6GqjI!--*AZwlDbTYaE-ES*ebAuh>wJ3;qi2fHkB$iPvP)$tHUD6gr3_0fL&45vynI3`&-pnBoDep z_wbgIZjUl=h1lBs9K8U({Qj#u{;5gFce8VL$TbP+$f(Ls3(KV;bUbD^y_6GPxdl6e z#V3#D*|Fvh?YfWk{t7?Co@a$YYJKCcEk1>_H8AZX?TKzIHlDA9uX;&+N;5IK@r`TW z=L7K6IBl-(Abvw6$+XF=u{=&PS046wBQ-HMQ3;M+T|GYzE}KLPYZhAHcm)G!F*ff^ zeB*^mP8xMr=67&nDGk>tMoY@Hz~1`H0k>v#-Udx}@AToMQ+!R1&Grg~jVPVEF;++V zw)3@k<#Ju8;@McyVl6K>%S9~Pt3*ubMZdjqN}LxFU8H-{(uB?#n?L-{&|%rm>x zqKEknGe6fDmB!FwjDb~5Japtuv)`2i@I0N6&!_X~C|fnv?X`rE`$)i{4TaYLGM9-X zphtkKhS*;k=UiN2zn-dR?VgXQ{zx{+F?)^iOJCIpD3hjWOZPQ$WO^TVr<%X&dg2-! z5Ovf1Ak}syWbu06&2?|&hkN$YAcEXyzad)2V%XCCZ8TA~iqYY`X2#}m zx-XRqbt$f`HkIn$%cRY_k;aidfsMlSwQpp}6cx*z#QZj18@wdCc5Ns!8@n0|6v1(# z?)j+nS5Dx4f|$l0zOeHJ=hK?6R!(b|7aN&|leR#A(#5p*u=lDrcfGNyMqhD@cCnr9 zrN~fve241K92N6(^^`;V3A+u0fW{K-4v5(}fA* ziqIBx3>&OG36#cyq7=6U589N{Ve8WDnM7|beboJb7)0zb)Ak~mCn3JT${$J}HgvcJZ5>;c|fZuqb>3=}$&Vg#k(9v%>M$ ztkXxEDP3&Q)*Vq4WcoW79c-i4H_KEv4;v<+IaB@fQxrGOsTq^IBi)XdbDlMAQKa0=&7U75Cc^`+;o1zx6LNS!(RtSE$>DYSn$u~mOZKXG(_Z@$=?h3thT(rQ zEPs2?gM~l;`bk`Crd;fY?4`mJ3rljJu9-E7&-K|Yt?#@c$3Sh-V4K#wg zSkI+@FR?rPUUH2g#^*V@2iJz=xC;LBw-XPiRb_>D4Pm1^cb^HeLN_CFg^ zTU3&f^kTF&4{oeT;L_ty)n8+O`x=diKAe4zo0Cg^))gJ(hu*(I;N;q6x8XSFK;gef2zcJF`Tatos=ECx`cRs z()KwS&%K(!!;f@urfquP&E{oJ>38L*rN`xIo}i%_+WRRAuNtVfbi;OCyMQ{W^taBp zFV?K2*{Qkz+PF8-F)z@PnX{rF?jo z@GbN7xY_8UNfbYW0ir}T`-Gw2^`^%>wnXG>)5b}Q^wjBAPxI=2o|5n*tuRpW{4qmn zuSM|a>S(ajs>IZpQ<8RHQwg3@Jq{MbqhLRrnw{wI>sDnrXuh=D1NpR%dmw({j>LBp z&mGcl_8k~fxt@=>sucZMITt7T!JR&pG=nW&Q2yCa{SpGPIiMV(q}1oOBkpk5-3zd+qXQ%{J* zh_~HhVJppM!j4C|W;}(ze?6diV?SoG>2~Y0e2z|2RhhVI>x+z8BKar%BNhSmo zff%(|R!mx;!oV8C6dJjMn4)OR%+!?M(+VoJrsNaDjz%i2rt`@-!MmxRW%;gi!W0^v zCzFO$^ONQ_sr$(+x?6-BH#K-^1z3zhqVyBv4+{f$R&H+2poXJkJ*L-?@7^z-J8gel z8;KLv*_N?&Sig|z&gJ=%71?`z*;B~J@53|iGTPw9mfkbuq`^j)G@z!t*HFV<+qOPD z9QDCL+es9=L{gG$D!gu$1x-_MIXi^1*tTX^+EFBFQM6~8LGs1u+jwy$f~|r~5s&6} zBHghXI&Gby;E0=z@%1F%68$&Z1Wai+0u;7q4?HVXt_||v{)HM=@Q13iFDWKD6AdUo z-wmotOaHJ(Il7CDWjq?(o9$#;G24Wp^Ki*m9R0nVUoA#cdOn`_f^$RB849W142xw< z+Wb!Sa{9XkIX&Nt_5P~FLrOqHK*%Hjo};Hx>7p;(#WAVDtK*TiyU*2h4uU=|JN^Pw z7=aSs!EHC!!3_^Y?sU}#H2>w zCA~BbwKTC$7*-mpGMjj4Mr7b_bo7?pJKIjO19BK|?v6ny?iZ3I!ul5{fJq!sHA_)7 zUgq2;bkxTw-3#5e&+zy`Qm#b4GJ&n;ckxwO4E0xc3sZzq_#TYXC-q0pu9vk?-yd4dKlH>ZG!-=`dxQ!do70 z#&h!A40*A`wE(hyu5x=3_l`twk3>5LG~Z3klLLlx-lq%^-f${hFi&(Yf5C6>OfZxw zT%Fw$3~Yg?Y_WDeH`H*jt&hp}tnBX;ozU#{VN@Rt2&SPBy`*k1D>Nr9Q~oF{DA5{~ z!wXrH@K$gt+JkWZ;#1mrnc=zp(R_ER((!{|)!ygX)ROWB<|s812&!~CZF^J=&x&ah zaoCx(3TnPEfOUO!%_n7ie(XwsmHCT{_zS81y`zV4;H->ziU3F^88N853<=&L?5F%R z8k9S{nt8RxNQ-j=DnK3xBIz~cP*g{K7?Sm<)k>SO%KfSA2OIt2oO!9YR%2BcnOd72 zdkQB4QomHrIrU0aQ_h*s?!D(MiY4IVn%SZl`Mbk^FhVNN;Jn+^<1S=G^cSIG%M_#< zfr`31fNmU4#d{Pcvs9ED#Q`Dc=vo$VT=~9&@$MO}CE!UbGQZo2Y#TcL&2<36L;?^d z?8zVVWB?N~BP_Y1A;|Ue{hyvL0CN+|t}MS32gnaL#5f;nWYc=^x; ztG?u?=7($D=J^Sl90ZI?5#Oc!!WV41Hqd`At;;&Vxl9!-q=E8N-5~vy(WS3lfF%r@ z6YQD@1Kd{_?irkpSc9{bBIJ+q6#z@`h7q%ShdwI_xo*x$AssWqp#oHT1YrEU)?FG9 z=Yj{RBIPmiQ41$2>1pr@K{g+XvQPdDUhx6uy0`=S%zpx?$Zswvfu?}gH20S# z`Xi*Jp)Q0-mJ5QV5K+H^_fw)sCLIw^;~AC9OJWpTcN`p4Um>X%Jbwy13TlqW`0e&o zlt8J+`Yvx?hGfe$eYJT%2LQZbG?gX8QB}AOuuI@sl7Fps<^q0>56;iY%Yb#;YA8)b zI$F}`_oYbG%!am&jMl(a7{2b_fNpI+prw1QxU}GPz`avit@Iu@MB2t6kBKM%AZFiEC6aSlBZm@xmUs@Is z{&Fguzb3@ty_5n)4F8qfh7Y|9m`=KT+=qmbKpk>U&<|ZCzzi5`WbeUQHfPY3fQ57v zCGJvd37X2IDtn*?&DjMsf2NIqtOKgHtUxJRKD=^_aQ;oqBQWng|A(Y-9aRG7YshiN zhYz3F&Tv%&ZfsrsLH_^X2oM59@ZB%>>iUuHvEYusEE$Kr%TVRPy4fj%syF)NSm(Vd-ihCZ?h0X1%!7|nU_b7ZfX6iP~_wh zoS|J)A(i^CuLE5|2)Mz6(-3`t=e)1+4s7YUj~HMKl<+YWRwxXs@pA$9`>47NTkySv z!5jDv`)>n@Y9UY-GSbws%HPBf3C{h72N#4dTo9dcaZc3KAsry?YRcApzCtdai;jN$ zr#u9ZL0*P<++ta1K)#^XME39Lali{)-b6k}dAPL0o;E|Ar;d9E)bg-^X(jBtPr8tVg4$7&TNuw9gGEf~ov3hc*~&y?If1D_KOXON9f zfm7;zsocHZfI|3Doq1iV4Z_x3(Ym=+87d#s<0MP~rIa~}?I-;%XBVH(pm%CIeN4}$ zzRvX8{~}pOy7Jv*rpD?AoISe9@Y?5xv*RiEwe+6C2s)*R^=lp)PP_wj8|kz}v9jy+ zI=kP~aQ5bUczmBj&jO+&A9xI3+Aec3R&crBI0(!;?Fa^Ibas|NqMnmn^^a5=X;?O2 ze!5Nh7Z-W>HXq^y+gw;uq1A_aY3D#w*VYVPpjkA*b(xPtqlyAwyKQ(H0F)I5w}2uK zo6}LvSx_Z-kCq@%j5^Za)%Y2VIo;KdchTEf$7$uE3KQ5w*$ufC-(Jdy?SbgF!sl1G zVFa{r?o8v557ZN+`UDuv+BNQ}jTV0ZtmhYtLl_Da;8pHMWHUHP{5(O%cIf*0barr1 zrx74Go1OATyHW2Rn{?z_@Mi^Eg`ZKn=x++k{OrtHJJHk!q-CylDbk_fb}hUBz}(`x zwp5|&000qgpC~}}N6I#XY6fT-jMDpO>&#T8N$zJxS85N|;Ksery5FbR(p4KlFEhnK z&)_>-fF8h;M^8tHL&aqM=IyOx*Pi`0${G(_FtIKlOEO_2@=8kLPvdAiB1__<)1h1P zbbun5ZR6VG_pX*>?T&~6G{7T;Pd&cqHATZn26Q|p>Dx?;Ez!&A!9KtOXIjLzR^41L z-p=cRqT5!lY}qocvJ`yw=JOnFvjh!zfy90J3EaXIoSxOe2^E9Ok;M-_r_)$lx>B2U z%Ycj6v6OBDOsRGA<@~{CR_9dAYKuW0IHnA$TXbK=a{i7dlRT;<$m$MF?<%WXn9&1) z{vI7Y7JM~7gVMH|$gnpkbhsmnaHzI9AB=RseRs<09RQ2(;a?n#xEa-w0;{8Qtvx>R zCPK4zJgV<$gN(n}pAyjYE7R)Slm7C?{ZzvM1G&}CXxyICHUosYQ9VBiG6BQ&BSiEno9ez$(w!Ah zPtv!>hjIV>bk;r%WH_PA6M!Hzzu8;7DYMy_Akuwp1iEbqJ`9EHZcGBO?wU?(kC6hn zuIE8!yf05Ko(9j+k}$h424o_NNWo^-*cpziD@>4VYhrb@+r zr}97b?bR((VjRvc-b}m(_3XCyT;YKU)$MtwDh%ZKyLM|vm25X`6NlLLOhs-l1`8P~ z6|7Std1ktp^eLA)GcO`Mwm%jIO4)w>)iHeYs5On^*-U=ZZ+cxEJB}#DzglLlva5>?!s;g{<-SvO)je$~%gvITkMr42P=e-_zgc=bwtaO2(lBT{dY~dD;sc53^_Ftv+KVCC zE?_(+=lpK3JN7Sd`z+4BzneLZwD+cUGPnHlD1ZJ{5LV#U{YQ-53~8^86Ae7KEG1oS zr+mPq3I^7OI_4ql(XTw>K*g`ybA2@h+i8b3tbXXt4ph3=vI*4k3^ObPkffxaC|@n_ z3k(m}nA`QqWEv<3sCsDND%$~yinw)hY!*B=xFI;W&LI0(-~?x4Xupu%rOk0c{#3Hz zqP@W5P2`nr_&HE#uA(7zJU--i(Z4f#-$kbR>bBiNW;f)dZrTgW5b~$QjhYtmQcTKU z%m}?+v9(Nqw=1t+=yrt1{xhn3`Nn@(6iU!AZCuh%eGS{zDO5<)APEvM^@#{|}bk(6`@OpyOdC6>XhsH5A z+i!0$K3*Kp=V5(DqnH4-ET|-P^N`sBV}0Ojusp^xLJ1go)~d1N#}uoeN9hM+E~13r z2iY9&m&&+K)8|V}TSKVA$Aq->9HkV~zJvs(SE|i=Y@`QyXqYoit=Ex`%*N(&+csnn z!Kt{^^k2gO2&fE^7#?yCUS|$y>N` z)C%s`(^}vDA;`xmcy2}$@;BeONDY7g(N?_ zl|dkqQBMEm%MRU{UBxJj^}v@I7NkXr)|qbd1%0ZCAdo$4kLhX#XuwF-Q%H@?b~svs z@a8MtYV016JwHH^@&dYLZS8ef z6x?!7rMN!=jk&2KOFEq$59i5uwQa2WoK|jVTO6i865cHBH(Na}DG+%xV3f2L17x=6 zmH+$~1Y%)o(mk-fW)D~kRZj51$oDSulcSmG&%;Z2`IVQh;|XxRomApme#2Ce?y95f zHt)K#f4Sb(e6g(s)wzV{>Ab*q6J(!eQlPa7nmUiTp@xZqTAret>t38BBd__JN++`j zYdaJ6sOTF}cYya0{n!Kw(|i+z`H88<89m}MZEHO$noh%DTYa2^5!(leO^fK$2iAo% znWbJkWp(9mKQM<-KwbtdFx>)&PIUh|ZSkt008vFmJza2KjYhDI`DJi#E+o7gqtFXc|<^kI(tjmXvC=1?$KJ?>ka>ble#bzTu zJfEwtIuru+obe21u8LXHI8aW5J$%gCdAG%;<+jW#n%A!B=Jh-_Z{o8xrf;tE97jvN z-IS{?%xb*$58a&l4zp776@k4LcCWQIZ2WrVVg<`m{X)uGGTgu4*_?Ko3%+)FUIycK zp}J0QtDc0B87u2mqJIF|+Pa9f_`sH-l9I)=1-2M2Fc~C{Co!`dA5$%jeNPyZaW?04 zdcA}+HU7t|p`c*$`+HYgW`0#7`nQKk(APzSc0~&@nm8&I%`j`pnVNTsyz@2XnEeIl z{MPUN035GSAd6GKetm`=O1X5LRi5&ISvj|37xkzXAR>N6!W9MimT9vo`YHOd7ktO* z!_4KA5DZvad0n}^&WX*Y2*lMF;#z%1`tZ~&>|WD5SI@~^%d?9xRMouvSk`>vLaVdU z-Nu^ZFnFz#Nlnipi|veX)199|2K9yc8do^JKktZoc-tj`-1Q*4I?oRH;DcbJ|8foF z!tc#oH)HPWMG?J?vE3Va6D8thc(XNHI5`18Q~}GFZ&Q$?IK-+5o4Bx5L~eF!epEFI z0c+@RRVTeCIdeguUVOx-9nGUEz}aTFIynrVaO4iCc~8)zB6hveJjSEJ(NcCl2uGy8m2J4oahlw7 zx{P@9WFxCru9sGl8*(9VpEa(A%8HkYc>rqAMw9ll z8+@kY*Ug$VQ@R^W(|S~C3&pyWH1y}Iv`DIg;$gJSsp1q)PF=rN0?m!-)+cJ`CE0X} zHGkl@4hUtd)DJY8MjumhBM21m>tCN_ImyeGc)^H3-dFet#G z#a^jST2+n3_jh=zNwZW$v_89ZS(Lts#{6#GJ+kxC#L~r zY~|_q8X3bXdx?|%ZZ`bvQs+d3Qh}cxoLbLMEl_B*lBp$W-1Q&s*m|>#52EnhS@02A z$&cS(NH+ZWh%C0{Lo&V2+xy={8_vkfPbg3tt1S-)Rnbq1IxMISPq|Ih#rQj2F|p(%4_dOJ;D z#WgfHm9&pnqOL4FK4n(2@-=Qyz++{{PRUe|t#P-iLqIRbc(>QrgaWan`30|1NZBgA zb-XK0yM~+yaf8vpS!0hn6hPx<$N8$Q@dHSQ)Qj{P+oX=L%WrI2=5SVfK0Zn=5s>ST z8z8r1TIHhSY{F#c;24;dbayjRx|NzVXS}-B{_VZ5b6w|mzMSiP_gs#*Nx=Mk&sU4=5%A7+$#T&wHCjYQJpZ`^sB$l3`!2taz%G{4NWs zYWc{L-EFANFREm}x1|1lwfs<&y)mo=w}fwHyLZ;@XdiQ2L+bCgFrb*gXSimcNA|V2 z$W3CzxNw>U0Z`^IK+A;A`;;+va-s#xaE$5BmF4mIt<2mS!4k`Z;p!2~uftRtHZ24z zos&Nf-%qaA81+}|k0>!xLFI(rISNZQ>(%vzRX(Sx&n8K#HFB4 zx%9Ct>-AM~M#YPGDo5Wr|1!W|i1AE4U&%mvoc;n2U~52*oyqJuFai7%7Z2nMlJiEc zF9ZK&eA8guF_wm^aF;7lmrDsWz>4Mojr6Z#K9_RHc@!T<>c8Zm7Bvd9M@6`X66;+S8mXaWk`PQAbh(R z$=yo8J~wO7^C)4aD7bNsC42eCFX&HFyrR&FAfQwl#2wLMHcWvv2Q$MgVu?tz9oOdL zeJ+zLcQEAka{w|MtG||FUIDpT-2s=gKgDM4{K#CO^JG}>7w!ElLblNE{GtHhgb(RnJ*De;6Zme=+GES*MNGD{uN#o>pm_p_KAPDH=g4 z`{nwE*O#1L0|M?FgyYcsbIU1QgZ4KNAymToxlgsYz8Yui`K3<+Jj?3>XHFgyd~W$k zM_r(@eD+B8H`_LpYB5WRojwh2&4D< zb7NX^)(^J*{Uk(bsXv=Sq87BIu+*4JB2&JAUb9>2onUsL`WzFNe7{li!7XdMG*XiO z+-=m&IPbl>=A`8978C`ftUPS!UKc=jnWgqVoV8-Gs?5<-SW2=Ynigji;8hUC<3gPx zXFUPP;a}B#AKt}KtcoV?_-4?b9iJ~ZFpl}iU_)MZAWdt1m#eilcS~_;dLt;wp~-2I z-m?dvk_JudXfZe=*V?0UFdJ(*p_m|*Vf@zVs(3POadbKTTS=wu(B7?nj>;N)O%=c) z+0qr_Ewm<74Vj>ufbpjz?C5lf=uFRQOYweh>Zi}9wC*7X<2fX2uT>I9m-_ydIsCnn- zZ0BO$Vmd-JH8QcOu@-MreNBCR=sA`N$LdUYiMztr7>Y$~)}M+hdk&~LB-0%pG?$pr zX&={gZ2cVRczydg<)y9^O2bt~>c?-siFwjvZl1Q1iS7P6iMQBekMS)=g9XRBf(9!c)BZlhI{(L3o(87el^qj2dPVe5Kpj{5-OB=K1g`{ni@(2N?7P)X!YJgRn z?V+Boe20s)7-q&b0UOB4jP>=jjY175+mt``K#L1g0ud|n-iPCwEq_9+fx43xS!1aw{U_GsIs8q} zm71a_C24bI8I00+@|NRg4%^Rg+}nyn_ObzwLFapHmiM$^dX7RN?Utfi-(;>jtx0UE zN$V3!{`pI{l;umUllF=?J&!#{%;`Yw{buKq`M?`l%)&3|dfoD$tHK9_OSbF&>x)dm z2kz9oCdxPI-jlOl_WE?R=zLLvpELmA))L(JqE|}8B_)%`F#y)Zm}MPeG3bueUU#dC z$iI&K)l0R)Vpo>UCZUIn#YSs>rX==KqK!nMp5)=`kAuki7EGX|Bv<)?;Oy43am!Ls ztNSOC5B5Ulj@+-+NG6}#rZ$^yxv*p;F8IA?Pd=$vK~y+!u;yISP21VF+W`rK3QIz{ zMQd*eNmf8Q-)&|}qUF5RuXZ?H9^Y#HNu5D{E`-3;iO*y9rCqjq(~qvGn@=U$q~CSk zo_beCEG#O(H!^yK%JtCN9EG(Fy}=f=^nJ&n-RHdFW}N5#7aM6EVl1t|h3UJs9>$(W zv-si9_N0C{%Oe6$G z>3T3WC7-x6PA+tHokoc=@e@RbUZIkJb1u5@s!;P;Zz)Nq;8Io@k%wMDOZO3~S@bxi zNGEDP307hM;!dpp+^$(_;q~(e!z?j}=ZIHRs#_jc?w{cq{nDH>J$mupUaBULe~2o1*X>Sz4G@sZEK8TpcW zZPUBD1LPyIU&2S5A93FudrV+l&~(4+esofib0TSbT-S+mTA`#n@!uAbn1gs@doPkh z17UMb1IGfp<%7;3a9w!Mk&OOhQ2?2-rRfc+g?l!r*G<6cEG;v#=P&p{5(a_za2V%m zoRx%~ri;xEI@eehPxxo;3HybE=pty>gGRe*2WUy_8++}j)(Ks+5$D-A>}~k4*V5u1 z#|pbRNIG?j?3=|OxO*FslBAe6Oz1j1Sn1`;bPwjPU!xJA|5i^R5KBu*$J^A5S>xFK zas+t4t@=`+j};1K>#VySsvuIn4OGuybBg*@4YxF@zfsaTKVKu|ot1&*@M#+pbz18C zyAp8zW2z7^)>4$1-)U{1YrKm*!8Pd(&`N^+prdqRVtneOi*kt3~u^W6~ zsqhR-*28-hV2(I4M}IlJs7 zP))fcr~V;LNTow%GVRUgV7n+8eg_b^$DkL=)-w~vYZ!9=TIu~E^p}>fN#Wi@!yQ@c za#ttXEwhRsV-g>2L?ozqJn>CcxOr_0Ocl{Ep zJ4YPALeV1p9cSiSn)h!@5NJF-MShVLxAIJAzAlG7S7MC)?K0MzxI!Pthq}+(pZ$pb z^ZOc)(_;c| zSuz-*a@#=4OGg)p5VoNAjYdjm?hlbV&dpCwzIsI0DU6NFH|3+L+%3;1Q?=HXWiJkU zq`jTg-X?l{5WQHiBpNGv@SUuohWzH|UO{D~sBH%x)-nQ$|9MLYrTnrt^6#duH!IVE zhJ`YXGx(D&aJF-8O^;~tzJ5aM?tGzqM|ZpTYz-Zig+U`|^R{N8`Of-gkajC(C?(*X z_A}NO7}B;|+bMbZh5kO+3&zhgVia6|oa=V$uWY%V$um@z5gj|0=X3uZ-J^sUiIVVT zo_)L#v%EUOj0M5+r;+ho4!&OMdR_#A%>!K72IFRh?QPIsr;)0HH(XQj2&6-;IYXLh z!TI~r7lm5u;jj6mN*oIHo^!|k1~A^x4H;xj)sF%9LxRJ~HrJD0Fm)ip|=7&vjT_{+E5`xuvex zAAGXTwIQ9;*&!#_H2r+@1FI&h?_54+z`;2_UU|5x`H$b=%J`|G3-$4hzb;b;oDP}% zGTHFvEpbPbPwl%YifwcP zg7Y%?1O$(8De)=EFI}FDKPt{ht+2#kS1V01H5ieT6_%X+{RyA3I+!CzmFU0D;lXCR z^TMj(;<>#@PqX&~`0HokuH%-lSF?{k<)v{Zip%hU6xwIpqd3HIUA?N-o8WYTyGOAl zre^2meOz4p&n$_UxiK}hfP{0D*Ywt8_qJDP`lc?2PFvw$XVF_)OBDBctn0A`;O``L zvWM+vxzomMG#H++8HhTrjbcSQ#g~MJ%i~~*f%_!=3q4gGb9ZGH=o)H8>B`lQ zbjI>?mW36Y4(`nucFuH=0m%KXCV_{?e5~vi_p@)WhpHTx2G}>g`!O`o=LlYYj&)rv z$o9)WNm;%QC{-)1$HA|oUEtb(v_U0H6C311D#*%hzt}faNJ7JRWBVeOx+p6l2?*<( zVNx8q^ZDC_>(A2=F=+0_i2jP}&d=|BZwlICuc@pVJS0+9(Q696WHjHElypatSCZCM zCJ$$ezwVXf0J&JAIPRL!;O6o$cTp#}CYXX}43$RXNqq8jI^0!RleDZRV~gKHD*1T9 zU7?@VH%rfHhFr_D7#A!AHt&%~sQR7hmNP%SPs{>ld{gIUbCxc8Vi~?ERW6p#2pCYi z?dzQ%I}QQch%#)Ah(=J+hSPab=B49&YoQA}uTE3Oz+{guLnb=wd4lvNyJkt-{*K9N zI@;8ql9Nw1l7kmq0?LQs18qQsnQIiAsI;5)_m!VQ^(kAY@E5xNdSE#`@HPCA$5K*Q zxfFiSH7g*?VnDvZZf89~qnYZsHj!lSDu&}Va%QsTlS9kfxLFFu#P_4V?9G($-vh48 zengClqj>l{CY1+|LtScKq@=PgtxKe%?bVmHc<^|y{%m2(jmp<=j8PET{Wc%KA_o;A zpXor>DQ*A3hr=)Aa@x;G(>{e<=iEpGOr1;(4wku@^NP(-84hkhL%`6La`^HPO}^>B zaPfOCiHTjIn|ZRKR!5l|!}+D624~Syn^KfANKB5XjLbJC_B|Z0 z>!+=BAJ5=TZZe;-I=1MZQNAy8|C2X-;q}+;8L)sH1VpGl!~CmjA9sI*S8-jE-aW18 zu~GU=0oE`OakMk8!kvwAT?diF;O1g~W^GAiRT*xoVAMnM0>$j;zIifm*B_oTO}8skX#>8(!Kz;UnQE~!t&y>C zFsG~T@@CNAwn~hKe80o3rC40I&jBE98)k(HqP}n#hrP;JM%56n>A*z-ULI@R_?FA^ zTlda%$=Lr=J@_FNM88tn)m80!sTU*+Q^)iwg6tYatW0kzgbDYjDTHwYrX%FEXU*!o z2~xEy9qgS}V0RKRdg9>qQ$+C?hX?>MoEZ{;c&vOS6Ro;FH~Z=L-@-3q8Tdq~um>S_ zgMlfpxiFXRCn^e~62J#|vS&-LVS&wL73DdAKp#o@s5;@dh>v@*{}C z9>NA`X1z~@i%_8!H!Toca|A;*Q0uIqu=9LRMdkoY6Q_YwaeXkgL%I@ z$lU;(HP{M)K-t-F4bm6=GhlKt#%APPX$ek~z`0_CFCj^Z`e+Jg`s0_ZWZC=^mW%9D zfpkIx-Ql(w#^iZhgVhv-7(3o4Qoo0bw!qwEOW}R9L5ubE zp1bgG7B%3JwJAN;IX-wwt+%|S<>k0(&_vE{H?Jrf1$Hi34P<&`_-C2)mU3g6YhIL7 z-?)Oz6ijytviHMy=C1kSY?ab}{)Eh;!D;fYCcdX~7m#CcCP9hvhMuKXzo3t&RNXt; zC(JN}@!oS?R{yudTeK8VzJCrTxd{nf=3ip26T{437D?lHQae$0wu?0?_(UcvG7bVf z>Kww>zh_(V8U*p{%f^#-{ z7YNJ_?F+@kzV_=;Au9@VaGkl}qbAHt$tly%tZ<~>?!;B- z;kryc5KGpRAkF&dnT@ufAVQV>znKg)ewt3Tb8)nQZR+lBr-ub%19JA$6Po+KM^^ku zr&|m9)cj_l#^;Yg8r_Yc=;sBmsL0`G{_GzsQ`ZZ#(W~`m{%7_Q?eJ*$EqGuPb8pOb z@&j3(HVhULZNL_VU{Aq;o@UqwY;v!W&Z-u7mI1mM*F{7Yglu#}md{@AciTXOkQ5wR zsK7hxO$NGF=WL}9%`ac9MAnx8rk<5N@N3Hzc=|gzGY{;?E9~8Iroixz2XX@*ZlkuJ z3R^&N?CW;4*iU>)F}2h>g<{G!pzi2wpc6dM+ z`{myzPO+-CC7ca$I@AYSxP$`^m5YC1YfK#!fx%l z3mZWWUmv?p1RW9m^9dGq4kN}_kC`f{^d6*oPW-hrURmli>2>lM_Y+Ip09YvS6KseY z$kvRlaogWx>B9_tejgQ%r2M<39{Z(I8_ArO2m+D3-@i`k`s?#d&y_XgP?UI9=Q@;H z>HaWLcHafugc~NgsQb>~C%;OQ^7om=4M-IL8lT7|tKMQ$HSVWhUdC85x4Yf%xSTWM z`)oB<^k_#PnoJmyc|M-Q4M-{zYf9ZeeU`Fa1~FQ!GiUOJJ!XYj4?HC9d=9X ziQOr(l*y{sSHID$=18qa^b7k`IOi#qT8ux8gz{1`wH>rU%-vt3C%bKyJp z?|3nRH$g&i4o`*hr$r67P729krgLhOn7kKUDh5>?P6zAj4Kz(iGceU!ou#6?j@XsC zeB-gn0IIWEmD!EPRUJIqy^!0kMn+RcwYJ3z@!J_sjJ{7=-^6>7v|_wpyM`$WXC3sx z!Q0}fUE4C~_e45Q?R1km9o8Fqo9+PIowTCs`s>v=ZQDsw+KbD{E|i?Fzgr?lgv*DJ zgt&y)a^lVq1+A9;=|0T#g(tVL?Rz@u&KYYmI%+x5Y&m6@({o`^PL!Gkf$8Xbcs94; zv4YcZ*m>4T5A*Z|2o_i5XxHX?=M&;%cukEqJLA`f8&{7(pcr${pJKOt2!uguy*b)K zh0xVB(jL%Z&q4BY8%Y}BO4+x3S8P5yz`!UlShwh~xRi?~-COL}e6~oba*my+O2{G2 z)s|{tanE(pNMwI#DHnQU%Oh{7{}|jV!(9#%Tx8{TS~n2&hr^K79UEpGzqo?6)M$Ob zBmtWzON`nh*8?o1w$~4KL3HBNHOGW;ghPL2HcUzW*MIs3Z)FU3b^pK>K{5GFLB$s0ptHm?5FKqIa_&{_AR-haG3Lo+80gU(>P!}0rCKO@t- zwgud*aB^u`GB(}hQkB^-*YRStHS@DI!|GdXTobMQUW&!5vDJR%(V#}=NG=^4%+aQq zZscGM+9q+(o&$@V@?oYnI32gQe~A71sSnd|{gKjI zY<1+Ecg2I#0S$J;fylKPEX!D}Fm9uLYw6sz;8dDHU(agsZf}~~`zhbz(I9=GvaBs4 z#O^|Ja-j6WGQh9qplqS6l~^iWz{L(ypCE6G~7TLRO+jIY_A%Zq#=}W)6_&)qh?lotWXnYnv5; zo2T2B8m=v^m91t&O=Ez*clYdCY`AqUP4+{cHA-!}M`p}+KJ>u!q+>phrgmql<#uhg zvXF$<`(NycF&Ky_*gQQs`#Gi%ShACfp@uq_D-j8t0U*%pW z>}f8+q z!T8T_`;F;@1$HpbvthP0WBSAPHMuP;l=#H`_SiYZXxW(aK@J&miWm*?jH_~Mps4Uh z_d7RBV?VEcigaJbSQal@X&p78UAyjRYQt)c7;HeuDp8S6;EV!0*s}3-2%D)$!e1D!PmTK4{+5Jtzh*a-z;G z&u;ZvR&k<^<|uVtyX_Wwn1{9@@r_P6+DtooZuyR+x4^JfFP5_GHQkQo)?)wcAo|EY z4+9sCc~$M3<8ocJ`z!>w%}CDj>9ZRk(&iiL)Sic{wj3J24^$>7vj|U8=gx`OWuP1S zY!0`MUdMp&Znz56j}0#Vy?5Yb*9CcD6>FZ_|IR)`2zp-V78tct7eWF2swlj1um8AlW@n7D26I&yA!=(m|^TdSgSH}c~bS+(HVyZYLqmc@q z+kB)*s}g_7IOzcQ$;PvZK5f-#oG=}A(J8&qfV7Wi z1(i(5jJOpx;<~ z{$7bI^6%|`=hku;WbDwa)Si3^_qD&@jUAm`^x#R@>?sK%jIgcr>z2!HkbNDinxhr% z^yAG{E=nyc@dnZ_X*AO|3dV;&`lB>AWh3_R+yUs6)K!K&^Wp=Rit$6P-mZd%!MxD@ zlsUlW!CyJ%Tvs~UQz~ZBvS_`seWCx5<{Yvoaqx;S2M_FxN2}-bbGc~5#jOnxsV$Uk$_I@{YBkq}ACr(nYrM6XKT7@AYX+t_SlIW^DeE}fQ zlvrBa`ue;7VMh0e*=|=y?CTB9k{eIv2!bHgQp`O; zqj!`F-BsQ17(^#@-NCdwfS<@|>{6ZJTEI=j0fI1Lielcz1ogUppP6k@|01!xw#ltazt4!`X*}baTzWyf^zJQV6HHC@FT!tpV0saKtd6nllUpM5 z*pWOgJd$m3D_kvA0QoYbuEx)aA%2jfzcL=kQb)NdiM}}D8~gpM-$_5Gso7{y3|!gK z$~zsUd#4btuKoKeKLjLm?nJr>Ys-ggMs1!EOI#&b4{%ks zK?6`mU^$UKY^C4(@t!}|qC}&cz8?j3Y=7S!==$@dpbD z(<*_=!NK8oGLP4z%ul3G2mI`m!wFq~rICf!5p6h%4M5nWfy9s3IY^oow#?}cv=&KZ zSpHOjCifWIvHb~d+v-2D!`06R3TisP$@SM*1}?F*(B!hQWttd&uNHe>Jdv{lSnTpg zQLK*lI(n*{)Pk}kDcHp$Gc za^aprzggvF-j1Z%SFns*YPXdu>S`M3(KA2{Nzs=^B&I+?bxTLFiQ(beqz@S%I0zYZ zaiyNMlz*W;c=2$^`n;<`zG31To6cbN zthDIfd+iHtnQj=`LAT^$Qh_2_X;s*A~ZsEa=3%k71M z>{t)Wi6@zhUE-&XChh%Ar2?pD=5ESi`-@dQ{HrZh70BK8w|OU`-ie#rE>-RqCc4-Z z8;yXKfsdQ1y%z;_v4+hw4KBWk6r-1T#JdL9pE!peCiY1bG~FK7y8}aH+qa9mx@re) z=Q`Tw*~ftGxwZjSM*|&&MjF5Fm~FY`7gQGFCu+d+Gkjvw9Muzh67f}?h5Kb2<8Eo@ z!OuS9a&)kJq&HTvl%qNJ0Jvm}n-8zlAlsvUJhKZ0ycnMEL}s|0w5R1t_k#n}YUM+lzI_oO74TCZACB zTvasPas+j+)1dC*SbF$Bag&Hivew1=DkMHfCHy=p8#Mx}6J<&E3c_2;11T@aoC|Y? z<-RU?mvpbCslO@scpdN0#BF<9lMHB=(wjQ{`1VHSTIAyd~;&4Ip9wd1r|2a`0 zf7YA+DAWJizCSvtoW4n#VW$F@ganoC_R-jGWrR1`;sqSR7K-s>aCB)-m(83{99c}2 zCj0d<)I#w`&!YuyQ!&b_G>(s_z@|B6k(LyYn~#=N~RI^49xzL>#+LoZnl zCr9G5nUM+1~DToid?Y|Lf6m}ntCMk&wIOe!KTI@YZaly0#l`i##EKIKl4xdu-WGZHb##ancCb3Is+^j7JFv*~=DyOx+_9bo`TqPtY}>bvFdM$=928)$ggcd`g_ zP0xV08J6c_aM$f~I*vkhBF;EjB16?V*=?Qf+l7kGf~DAjcgJ~cOA}X%+FkS~;y_7~ zWSdRUWm1oF!Vz=-g_DAdVjhjvZDDf@&Aw31?167_(N{1cav z2l(@UtZx7+54jmagjA_YjK#Pl_Dh>^okSV6#}H8Fazt!S>MBW+Z6@Oe03;RzCffBw zwKBnz7|QdLRwu}bS%)TlD%XH~Y0rgr4!v-Gv4~#{qeL*T>>_>Z(@1{-=2dV-;$zc* z-vk-W*eGUj$)~3ZQinQgut?xSpt582!5I=M3ymVw8MD&U7N>)Kb9DE6XTntU1I4EB z7LPh;&19WjupS(2o*x>{jiEx+W32JyB(1%B@fxaxlO?I04JVm*c=Cz)< zvsm>1zv12MJa=IB4nz#op5Q#RP=&@!1`Y z$csr5KO~yInnrun(+x%p#Q*Qx`sG=DZy? z-MG%LnCE0ZC3`jAF2w2JSK}d_W!ZPaWbx{a=#RJoki^Dd4rcj}k^l(cGI?~C-bynM`~GvlV6@}nlBoF@{=F~L!+=u?51G9)L!NjPAsNnt z1Q9c@yM#P^7Go%Pux=tO*Mxdab->QKqie(x!W%JXmOo{{u`Jwc`AU|Xb#3YBu0LYi{(OdeCrq8>fUC~EoG<0%fM9u4E zozKZw(ECX-_}nKu;?2vz`)ltjm(8>cy4Yu2e4NwxuhS{nIBy#A5VHv%zat0H(ye;% zF;cRHHfL)4jcIwLMIB6mY}9tg1?jB6ok0j}9^w{M76}2*@+Y42>54e|5HAZd4=s}V z1SQ!;!&VP=H+;+;#GNYOnmkzQ5hSD?@NTW?o>j)C5`aatQIlJl-kP|nzrhR`!9)_wtP#5;|go0k-2|4a~jIJHwTUdho& zCzON#kMxj?nV94WYwQ|ERH*SiFR|(!P?U)~K78M91fqZ=&*AnMUVn>qQOA{${sW*| z_>O-u9G@xjd_8`v;Bod{#KFExgW%zL#wN$dts#hY=+P?JMI@C2F1s$wGt@aNSr;Tp zCftXB%tH}tUp}j39^~IyF1xf#%Mb4=*l5)~;!4ntEh57WV6o1(mB3#PV8ab~d7fw( z6#~@IsyIH6773F3Be6B{G2U3D@`IR-FiFonO;VNLtXl!WS(>h{ujh+tn|KvVG}KUI zL?#4iZ;AAwhi77y<{eLH-4;U-r@qJ^`UJ8)$=F-peHb&BF=yHCE;HDaou8Z zqPh~X@A8&w@yj_x(yakE*eve;q0mbLMJ-31)t6IJ<2J+hje$#VCHs{A%D3XvTT_x} z*izeonWVZl^NE$cXR}Ia@>^uO1qN?`j154K0SV#J0q=p*788|*_Jv=XY60*@i%yO$ z{)H5&qb#v8j5Hp#sviP4DZ1co*Nn6aV3{EX-lv$XVQNm1oS;6Jc*Mu@%>kJ+9=tas zsG(M+CENgk{1?BTc~qSu;2J40*X;3I*w#vk^MG@01WeE@KuuekHp|eYW&~EPgKO$n{QX%r1(-U$T9ZDRj-L9TEvZ6WUtF*KHmT3 zhko1>mqv@{ER)DFGxp}>mp=o;T87B_RUzfH9OWd|xwrlHWdS@O2f_#hj4OckTri^b zLb8F&!8@K(mniFR6<@7oI2vK&wy9G0^lUIX9buy2i9r{u8BJe=y?~Mdu7zYFk9`5Y z?*8l%%5Q%EJI$$_*D^~4=>|ez6 zKPDxC+?S6x0fuB#C>jP13(iMSmYdUVb7c^7dT2ic(VzevFrwS+##@}Mif?u}p7+$J z96-Pfbn#=WX<&&>RFHlqJ0yQ2%(c5`#28m#k$QB#@Y=Wm3HaXoa%?^jFZO;|ZqSC`YoZ&=5TWgc&D~Rl4LUrj791tu2Uu+&05xffDa;{ zR@4DzyJbmsG5ic;;v=f(9gpdO#hkHbR)Ghsg%9VQZOgn~OPTw5-FE#l!M`{um-K9<50aV*dHTui?slm2Um8 z2XqWf*>{^1l;D4O?)keRcW)|i{HO{l=`e9mP8~xr=H|Dj6g}9a+TZ-0f+e1TVN>Hi zvz3GSN`Le0a;N}&dfM|TFY-*6;SrdHU;esd6VA6Dc)emb&1H#CZx5HG|dB=EiZ9ry&{=~D`s zQqB^wCR)QwA7Vjq6~V|GB9?f72l13wyTb0;@tfNx@C=b;2REGQpG$-h=9Ku^RN_mb zvaLw*m2;0-4Nky`2ffdg;(MC0lt#xt(O@)W1k%8qc7Mh}d*-ezb?#&5lR(V+{zdgz zZLp($paw$Lc55)(pP@3c!x8zE!nCv@IJP&Mt;ye;p>&nOn}T{vouSMGCMy@V?D}4a z^Em=Dv1Ek=ky)rAOn!7P2Z|XIoUgqoXos5r1m0&!^-?k~$?jiBJYp#}XrlZxky}?m zQgImW6q9p1=V)Z(<%8t7OW4l&KG64T#`yv1`}mnH5DsBL5+(71&= z{>Uv!^7OS(9sfnViK8A`vD?hcTf{zi%bG0HcExm1ZSjtvZc0b^^hQ%s$c({~c8={V zOv5m%wQ2qt8_Rjr;u==4pm2*LQeR}N^pa`GZ_u~z0}i$y6R94uMrm_6ZjoMc z!AAH&?&G;0rw7)$5{D^A6{08aXQ4@i2dy?bxPFh^3r}Z*p9TNym75^;d8ZbB=+XUf zcuOu?y0>Yr5POfYNwQ{cot|hm)DFc!7iziZAUcyB+)R%NE9a-U8>*nv#CzdcS{3h% z#&V5Z4dWV)i&>6S&2Fc?tK`z@W~h($F${f}HB$x2E!olaZ1k(Xdx7G}QIW|J)AwNb zQ9^0MI~ECoiGkQM=b4;VY|S@e7K|Z3!!C!CM+VwL+c+zye5*8Rj@lyTkM!s~OCs*u zC~}#1VmYD*6#}Sb!pjyemna(o*!XiSs^4U)ImEm4F4CXVPCathP1`I~X)>se<+_^q ztJ+S|XmM(9(+7c` zqdeRK<(IGicbI^+@5Qfbw3id|tp0sisX?mI&3Z;i`7tatbFskR2jbs87lv$ta)bMn zupb~zkP?n`$II2m4UkCHYJ^fTzqxtlL1zBT>-ITJ3DQ!pc9;)isM+?PmCkWxhWW4S zX4o?C-mz&?p%{rUeXs`<6aUhmDDnDS(^hHTV~@44vsuc_6@ z7K=oxr)b&Hg$t^MbYAh(GE6crPyM8;l;g@@R1<7!!Yce{3tf~H^U?UPVEiAyeS;`U zcP?M{`L{>1gfy9v#gA zy%h}@KqH7-xM$%-beegc$U6}@$|mu8;bOG~%f%QfZX^8BqOJ;iQxMNx>`jt9iO9SV zi*&!hL5FM);7QxTHqBeV(qY8}Uij5KSy*W`{a!i-EI#)$?T$FKE22Q+*JuVb_^O2y zC&qax8$m{Afkr(=XA8QnSa>7MWf?5!nYOW-iK$gc($N4HwsUXlkG4c zE#X}mEyZ>^HC^%>l5>wn-UKiU85-%pOyN(!F6)%AyMOLu}cIei?iPF`##AmlJ$)v|{P8 zq$`>CMIDvCto$5s;e=?&9QNcjj1W20Kbe}BAbHcxKU67%D#PbBqvOo^52~l4({Bqs*nm_ z0J-FWl5<@a3!P~IFy&FkpG^@}XUW62W53mZN;e8onSkoj zpB~nJT6Qj@{d+=W5^KR(pK4zbc@+ZIHq&oVNA`8x{tI?)E*P)NfP=6dR)gB0N|i3U zBVOpa`QVAgfcii75+SJA!zbypcs+XX#}iucJH>;qQwH6pY#SO|i^lQA;O z_HO5bdky$y9`pu*guT632CK_BTgu&_Igegp=r(i|QD%a0$aomG7nz~x8FojEQO2)TDVtuhhEzfxDfmH6e~ z#QC3#ORNGawNI6<<$`}E=gDn^!)S7L1qfLbDD>WphGh;|TxGIGtV9 zKF%(X3S0bhCfD-Kh6^Tt*85XFpOKqmtN-Nb@3Imkdi27q0BNe2sax8+Hq=S8ad{|J zEHFoOPoa*^befQ70{VRQs!Nu<%IJA2d&kOcCB>5-6Y?xO9qlxV67DLRo{2raz@`d% z$q+8U?X>c=cQ+fbd45KOe_bRuL#EkB-<&$eW!#ovf5_AKtm1fRbTRT)%!8?|u4Az2 zXgXMRM4u+m+S^*9j4K^_g>q9Csa|dLt|~KJC`rs`=dp7i9;#ydc_IFz{gsOGr4shr zI^v|J+hAI3@LPIOo@sG6?b))qhG5!Yy+$yZi*QV(egCTE9C6-|8=|Y6))uX-pdgrD zc4(I!|I#q)^IKx{&!%INOyt4zgj%ytUWkP175oXgpRO@?2OqUYmQ26a?J<-0A8`WW`(nOY^h#O@Iq6#%*u@k3 zw#5l>4>i(|eOV2$?#$K)(b<$9#6a!OADbK>pb`$K52a=QYNf&0l@Q5wm+7{B3pxw#_>Wj-5i9E=B*7a@Vmu>Efs#8T~f4ZtbHWnD+1o3x8sg zr-I$%ya(CYW=ARx*4j_*1x@|d#r`uheW_v#H7SF$TcM_ZpUCt?F<`>TE^ql(#b2lD zQgpBKYT+(#*j1c(zmw$fq0qfT*(_6Y}0j5((LR`E{E(=1tteVrY3!^!dc8q+sY@Ag$k zoDAzcmJ>C$QQ6vUTRHkx-NA>@nw{sPPeG-&cR5*qe|vCC8=dG6=+xRMOs!gqNmhdS z)b`5AWVgRHL;YJNLa+044#<3bV@J0oHWWVYB5~|X z#z0Rym-_tNYty8pft!nOAJ*NCrEE8(iGN$H<}B&^*QpMDP1bVukWE`kHH%$*p0FAr zoC+GEUcSHOC4a6RnII^tH^Z3KynnB|*bjm+PhSnGvbqz(=_+csW=K)2Wbl3>BKjPY zJQK&-#Yf3f1f!dtrD35$4X@U}*PP0=#$WcCDXR-e#wDrDjnA}~9-FSI&RNQuh48UC zsU=QStbw#C4%Du6TO^G6EpEq*mzy63FYUn(S{|>MPfZrgxW(~6AcMDKeWuli$(Dep z$VWr=Ze^>@32F;9r+2QrNu|Ge)Oe{SnfH|My}usnoi?&WgbIINC_?@!N6E`|KCF z>eFoV4x05?gC}R}Mf&;$g$;@mcsXZ2@pp|g23CtW2BYFPK*rW7XnqZkTF{aADu3d4 zYUxN7Qpmq_STpNIl!3GB-WcvQ5tuBoIAxgll7X5S^({U%441?uySrS2y&W19S2v@d ztt7x3CAvDp7E3E$^@tL1nd|h8Ke20pBgv6zCfL0?^3izxpH`6Ht7M{Fg8(N`4WupF363LpysLjRJx!^>{(E+q|*{4 zHzRrs@q=`zJI;YN9vxQ`lf07Q{YrK=44*T3{c|uYHV?Fgj30N+L$VV@|DpM6;&;+d zAHu~>shvO9)wRP&yjOlawmM2}n)Gx@$1cY?O;2Y`Ijeeytu`xb(FAJ;$-BG@o)_l*R_XGEHc~%mUe=j-2oB^Z!1TgK{zi)d^J%*a4 zx@4MQMOotgn?3Q;C#XRxjLJ(O3=Wik(Cd~b;Bn;Y5n1Nx%NGZdCvj&!#>6cP`pxpb zP#QbyPUZy2xavP}a3m(t@$&i5KhA}oQPGZS7Mdy$FiTs>L35jVUbrt|iz-_P;@`o? zP@iRYg4){0n?KB9OwxWO*ZMZnq<*YOa>G(Md7QEwHvs8txg9=M;kZUmn>K~K2T#vM zfZ{V%SDv_ti*~noU9wNVDf-zmugF{WbM~ew9H}3V`xd{AH=5rIQvglHO_!?K&tD(@ z&yoY?B@e*a!VR??;@{V?*InXiViap30OY5I!}@%hQDR@+lC`G9%e2oKPd*j?Rzk*S z&#|w^qXu7x6EKzJVWGy)N7CMUYk|ZDOY7j&l_)Nt5XqK!MtJg5s625`2|GQHk(aRF zEOjvG;xR$=i)1p{IeyDEDek@1w74YL%RDST?UfY*HfdV8BrC}$CoS?G+CQ&0xsriGyak-zWVnN1k4t_rpjIZ3(jftFJ2$YVr!= zP)k4rqId!VI<<%#o&g1G3FkCMuGSKhK%hjq0zpK=5ELngiimg=P!dOuX`oTVDI_pK zX)%Bu1my|_3T;G=Dp*9K4bkpL5Cd(=e{bf?yldaSx4XaJu1TlvKS0Lok0b5+Y-4n3z>Q? zDS|KB?-?ql*Zwil^+Sxd-bnCumESth4473`v$@0-5=j2Ok!%DJEADoG&ymc$__B$}b>{V1+-9!iwG~u0d zV1OG`@Yk3yctSw_c(wFr5liv8CJM3^*_ zZgFsDKA$;EIrD(n=iP%Fxl=DP{d_Na$R*R}*rNbowAl7ich3h^(Vx)L&R6AVa&a> z`Xfj8!{tPQK+r^B;AZ9OtM1I4jbF@x$5z;AG<}h8nh<9!<-qd~U_e#n_`!qO3Gp&- z_mdAZX0J<|jisUOX5S2#Qpw?EGgxVN-(X)e;7Go6Dk7P-Hy9Pywc&%m$VnR9vwT+5 zCyI^UOf{>IS}>OjYU;Y0 z$#ZV5d#m}tB5E-RX8+E!2#M z_;11%uuut;TQqgn1th$e_piOKw^a2Wg%z6bKz_RX4V;B(&A*TwztPVhy&0{FkmTaG z2|JLY+8N{Q(tS!!_#q)P#?ej!s2fBbpMM+*;eAI~f;<8x?mkj>A*iSPh(>Te0CS&# zi^~>pSLSxh)r4VX@Q-4g7+st2Qrsf!bPXy{NJ;K2XsuaA<7FrHcvsQiN*Smb8)l@1f2Ou5=dNu*l3e;_!NXl~?G(UN;?t^Xatgboe4mcrOTclx2Nrv6-i6Pdn`0!K<6x R8Gl|4emGYT7oJl<(m#&Dd}ROt literal 0 HcmV?d00001 diff --git a/docs/source/assets/kernel/query.png b/docs/source/assets/kernel/query.png new file mode 100644 index 0000000000000000000000000000000000000000..e2d15ebbfe26ec00d2d57581a8709f9f2ba69369 GIT binary patch literal 32710 zcmeFYcUV)~_AX3EKtKgVIsyWM6sgitq)UVl6;GOw(fI(_dDnRd!L)hM4gc410#fUL5xK1-c1=?002OGKDE|cwP%67PNKa*s6HiF5=3TO*X!~8I1EK z+w;au16&e0n{yh7d>IikpkHmXMB{MY)qV zR4)l1r-rhgfxq`d?7rehI-xrGk1Vks2LzNc9zP%02rkKfSTy@MhRdpb8fYwpL(M+g z-%Z1j>%ra1J)`!R0cNAJjR~tAzGNanvi-r`zB6esW;>>~q*SA&|0#i27)< zuZExaG}Z(#a?mBPx>9qXP&kPs_uZN=?s~#QGO+X{y0T}gKg$MHZ<2MQJ0%pEZ;5uj za(2DoAUkXQ8rif_`-oFQb$dYfd)K0(W)1g@fOWiG>SCmsst~_J{8>$L%RwFYTN%ag zy59%XBcfB6%{-3ERQolfu4Eo8@@xzFHRc3=;P~ReQwg`;p_@XK8}{n<_Nhi()ho-8 z(}$~kON~rrDlO9BK0u})MChEVdj(QAxXwo}l^c?N*Zdw8SK)HigLOWt>X$b{k={w4%RjT@ z*noV0s$P`wmgY;zc8GJvR;cEXCz+5$5p|llP9|QD*qV`H;&>n;Yo^a zdBRCDidSt5+>Ur$Uya>y&2T5ay8Oa93JLy&cQy3n8TBhx>Bn!^-xK8#(gTlodL$b3pD9gXuQTJU<%cTy{M zGY;wq<@jrKHH>#3tG(yRqi}jC`958R^5DkVyRTO3w{CWjF2$03e{4mNa>@k=Uk)jrOZnt=7dxE@gMd$$Q#5tP`rqstG4=YzI5zA4?yLHL#$V!`tqA6g;3c zW96qvjx>E`8aESD+FAdz**&PyN}9Pc6#6}2L18~IkfId_P6fUKCIaIl;;{0pR7Y2i z3BZq}A3ieD;J!V2O_R#?TIO}7i0;QWISltda&z!=y0Y!EH(aZ{j*7_TpozcR^_p5$ z?M^vcV*IAcnkuELTbFGO?o^_5`aRA{j^y~b_X6<&T`YG?sL4o^g_79{o%0y8>U4IW z&g&HGQI&{`W~M33sgxIF=E@cXYv=G9^X`9qqsIHOJCQ!omHTtfb*Byy3Kv;wO4yPgi*k6O(z9c_G@Zxzc5i46AiX^t-f;D|K=`Rqf#L;U$ye^2tkb$ zX?sa8szdJD^fRZlevy8Wp*o+D&4RfUv7Czh-Ew{%0li8?csWM(b^c=Ehk|OwT-Buy zoJu2)yc$*?^~t2IiCR9tUpHG%VDi>PxqkOW+w{%p#cB1rljoLW%;Q62B!eF|w?`!k z4|S4@x60E?U>YZNyIIWU1G-veP>sFld9Nh;s5Ui+#F}Yih4bn2&~s0;8d?+m6kqo> z_vFM>IdM+<+8Etv&@IrdxRjK%?OK1q+r+Yg0y7?-Mza{l2S@+B(nU7SD zm=~%S7;3<66svz4t(c6NAWekJ70L`v#f{-L_iG}cQ`RF^hhyViVspNb9egQ$7%Obd zm(SO2n{S)&c;y(%SVV!L;AKo>vXC;7jAT4{Fz=Hi)#q)pHM)A#KGkl#YPKf5ZoM48 zP}8$Qk^Ys!rqaREyS@3mvBF(u<9w)Zim!90XvT8aVAp9^bQfRviNibl?ew8FtA(#8 zMMvxVwtk#1G0Rg+`FrgrB?taLUDo-I=J(LiHvOtAj6Yirs*xYgL{6jK=Nfw&%Uj>M zg}7zA6+%iZKU$_DY{2+fTdWB-8>cYj4sLnqU2PL1ZL z!@pv$_B| zmU5R)Iw=SC(`$n?v1Wx$gkG?<)P7Tuz2U&VvyDS7zAU~4fd@$;8t+t#R8M;;R}A;V zsr({fH^K?bzEXT8%Z9qAyK67s=D8`OexSmmNw0FOGM^@$@-tB<{y?XfCON{gkf^{Q zFE;OO0h0+%FfK;f{Wd{`6z# z4woe7-T9yOE6=LcJS;pAsRHT-r{M8iH(n0h)vn_{S z#%fbLo{)oc^J&=l4*eA0Y>-XdNoF%?Mr2N;mRtn5*_2Lu%E(SfzlIhn4%!TY71uhIc}+xQQMvH#@{z+$u>K2q zn63le=Y?V!oEiOc=$$9?sfz|qTRpX14yfsybutfV19lBi+sFNfU)KaIe!<-7)?nmvvS_WH=6 zFQFB)nLe;msS?y~Mia=cN}8&T~ebqb;2Gt51$bm+&+KiKD4b zwWNnYaZ$oJ0j9X`%t+*~lz?)k37ACOa5|=QRNQc+9&mTqVx{Iw<0AOa%Lo0h?=WVe zd`%5rfqy(^!J5xuN9C_0-Oerb>xifw(>rjv?kdKB1a98BeB-L>aQp)LpK#JM z@;1_VB5C91!f$2kW^Kp+(#8F<9~|kIlE9&howpV1OBZKXFUglOY=2!L2^?SkEx^Y5 z*CpOBWY~-}v{@D0JndM;_yzd|*9xMIb9QY-}=IHJ1E-4@Y0)hBJ z!u)QY4g&WjBqRg`g#?6z_<$?;y!>3ftzPoEda?hjli&S3w)3*_baMB0a&u+9?AOZL z&Bt4YjqUPAe_sC@r`=1Zf9~Y!_4i`|4=8YXM&LfbpunHLfu_=z|4M2*y|i;Se(dA| z$PBoLtdNk1^k3KiZ)g6wZ z@dL5g9WQ3!8&CD|L%o-{+jAsGOiSp_eXb;MX@kO8aS{i|I8XV4Q7owo_vR#pa-tgF z!$<{2Q}u+&l01D=hKHZDQ97fZdc4)klkr8O^tY{}jI2o_CRee6oIxGJUJkoLC-Vrm z)%g>zegR?7xP!%j7nc`guU9w-Fa0VBF2OZ=k^osER<2`^A^5nR9Jfa~fG5=*KSFUMiQ~alK{?)lC z!Ih#>9BZ-vHe7l6x03&Dq*q3-v1aMuznl7R!v%!0`QJwR|Ag}YC*uFpHHGQ+uXb9? z6an~!kaUu4(0NLJl1T}BY}9M{EIQmEuA@13r;Yq%-`uO$|ExNCYSye_)>~137RTq? zf^PHDhrPlBDOeO7E6m{9m~HIZwBpasuuT>Ei1eOG-H+}135(T!slDg}XusrE3Knw$ zA97H-wyqSKpd~wH7P#MTkGVJ-T7+L5^b}aagD+41x#K1+puTV#b++F#Aj9w8!v{@S zij$jfIT}{44sxsM;#~bkCLm(!Qm^kU$#QL-FO&~DJ1mXbt|i~Y6c=U+p;0GpFJ-`Q zokj{$AHF6!OXw{rt$4oLpTUscK}-;jC(wGDBe7YO+p9n+L)~X$pK8~u%9UXoD|2k* zg*jVGhhMA+*!O>u7`oUC+6!1c->MW3LLJ8x(0Dx7%FmO@SCr3xm#>)l_o%b(;SuO{ z>*y>~{cWq7)fRx^iV-uv%xXGYO)q7J(qD8$)7c~$Sx1!g+@}ilB`f-H*0h%S;-&cW zL5mmV_4F14mJiM8(LZV7f+uqzh~HK@^qVn(Uf}JHT7rI=6Em-GBwTs@2A{hnp%(66 z39VQ;ecJIfMzNHVN++MS$FCGo7lr&F{o8PS)C5f0Mr8EWzo$)J{0d{>udntOr~B0k zOhUHrjH{l7rb_#HI{O>bA6uucDs++@*~_Xl^6c}CEzhyC)84@&0t89~QWL((y2t>>Gome})TW6+$B%}CUR)nT8wc$^&O zWr`vmk0SrwgRs9XjpF6Z9^GIA|81EDeeet|gX*WiSaj}^fxF_v1h>E?hQ&H4B6x<; z{R{4DX25PllHWlWSDzAt3w4mM!0Py$Wv20m*KVPk@{&PY0=ygj_M(#fW{rY@;3#^k zE!AM=x|W_!2T0J(R?5{F@p(Z8=c-U+uRc+m^uxoo@d&CY(*0Zh_+gWJk=D zOk0t>6}My~sypak==G@`Ov;|G=ejdg!ZH4*^REF#mMS6B7q@j-<%5~bXr%8E>wPpN z6mf;4kAKZ^nqYpg$BC?EfGip}#b%ssnEjSNxJz%5Bo4@7rOQB-pO4dwJ-hC-?Is>s ze0;uQ&X@HQHePbrozlH~wgwS*YdtIYaP}n>Ul*oDO8FCvJw<8DBF**Bz`wo{EBP6k zYi+&iLMUDDIGp5yL(ek@K1qxe1sYoT+0|msx6%_0Oo}VodTcBw!h~H@`H!+cj_Bc-u-qGBK*$;C{+J(DTLRng64^jcO4a_zQJ?j zWFo7Zz3 zI{?lt(3xLx)|&`KB4ql8l0RWIfM2Kg&TOvf3%GcM3C zM0nOCB9?39K@+t)%&0OKQ6co0p=HcLiI<{D4epjB9Ya}>(K28*Nie^{<@7!wyk~2w z=3EJ*Zt|5FX;H~u{A&B+i%9Wr9XprojA53M^a^sZ880)g#!_7zRGFu>celxA=jf~a zGjkFI-XOMrkX05do|YWid(^S8!;p}!CjK)@%ETbXA=st{k+Mo(Mic$35VmhC-!2<* z1F#JI#XZeFB6{o)`qG4r6q{I8^9i^2N6=gk6yEy6E^JTJT#I~+f2sncll&Cl)!=~P zyj}n``fxmlFSSBmx8BC5v*KAQeXe=)W=W||Tpu5VahgA3wq&SkswPV8qs@0p@6RXm z0gE?l@OWn2;6dJHq#R9o@?C@33b}q=Hog#=U)qB_sTX@8WJR`w`E}*CZ3i2L5DPDa zi54!O++FR31A~D<*`}pRi+B4i$Hz|5VX@}8QFS3k7AS)SQ3mayrU!hdpUTK2Y4Ztf zIG?0wCud(yD2`}T<#R{^Qr0E?iNYVisRG!m_}y=Mzd>l0Apw8Wa(t216VSa0wPu@n z(N7xbVtfzQK6&x%Zi8@XMCWHJzVpyy;&Bt}h+JfzR&_E5=*LDK~MHvg` zvUd+ko<-9e%sI^UxxvnguPi$h>=YWpL{5Ak_6D|%eC6ht{^KrbK9=xZy1 zCQv;b3YnzX+A$SXvShtZLXeXv`^Dki6f|4RSKkb8`&GcqG1N={zA&+ z3C}-ji7gwL3JgfYx7e9r2b)}?VVwBV$1)5Uw(2lH7X|aAraUPWy2BKVD(9Di4)}+Y zx=torDBG1H0VOU`)-BYG87tpk>g??H=}oy`%|%DeIHW66!R(ZBHSY|Urca9qG5V#9 zXx&yvdk|59ch~>mo0fl#>Q^b~r-l^}ENK?nu(;RLTF7op@UXD)yG+|?L z?@w$~r~ifWebI+ngM(x^U=(uN4~g_$!0q^f-lD5avF0F@6KG)yKTZ!FTiIi?bq6!~&b0*UiZ=y7&AmqahbH0f7HQ z8!t8@+;@`wN_#4-J0&=63H64>R#-swpt;kz7aJSIa(70!0`qv-8@bIZxE_uCo}^x01)AxW za^}p6m!{<$NIVZ*d`qj;$&ZgcHP58Er$1r2**ciPq8B&YSbO;TKB+HkgPunvHHrGu zNius@e8G2-Kc+zz2>*~0)zrTuw?THi;I^6lP8Lfq2nMEk(E);o)_vg8i`UXRjd36A zKyy90c!I)Bv^5mPcy@Wf(+h%5=ET!>#0PohVw{DiRnnD4Q#PWHs8#DlTxS~{r`Y49 zg&G{oYovDSMsj@6A!?;P{7J>1SS}6@EY%>hpE8X+eCatM)BDNb-KMo1yRpe_F1eyb zH!Y+fZ%PhQpmE`qV>-sazGF;3Q61Adhcca&7C-7K;>Jxhm;wgETiG(q_Ocw2@hF-1`K8J~k z)MXw$PAc+u)FU^3`xu55Z-l;Yoaiz%TB1@@U$q$}R z5AP8`qV|pvKYn#_L7wy#Ag-JNmc4*KL2QUplbdC_HB)mR1}ouO_}H-Ht8utjXV~Uf zzYKqKKBzz$!)H>u4Xn0uewa~#XK;~tR=o2s{Syo0E-*=RD|_(4Yx=HpUPGVKk4ZHu{CuS23picW>1{{%%bBu7I(k8@(){Dw=c+Bmoj-hwa6s>7+X=Xf-b^#Y z>H_A~PsZQKoXn0)cC@)S1l@*BxxvxjYm^@)?G+rER9h|$^1;NQWcc^$ed_!qs zq0QUYQhjHO4KuF7u)WrR#)hCr?URMqg`jzo&k+(g`k}a7hdq`UAs=Igt0Hjpj$50b zn7bXP=BHbH&}U!qY607!1aYbAp4^Si8L0V)0+4X76aCqm9M)+Y7Lemq_{lTU7%=U* z1IFx9+f}ct$#)i=!H9;?+1?B(sCE5L!%Y7qm*}^RV-lKL0OlFo8)2X_v8t3TZZ5Wk zXx~2$dQ`1Yy{9h3BX9@xCb9_t9p3=k8P4$hCkqz#0-vXY$eXvJVZVcc0d;Wd-b`MU zGG<`P$9Mv-EU{E(^myFf+(iP!o}a4cI^QPvv?Afy^UNZCSJ+O2(i$m)JIUG%DEm#% z)~EGW-CFy;$bL$(M#pU_quvQP2D!W&4gX|69TK@QQEA!k`Q%T4d#y+ztNN5o{g3us zsWX)qSZYZ27(#e-GSQxDf!58L{8`?>qnm^G6Pm#uhi;9FVU9`Xv)CmT?5zOf7VMiy z?3?Iu7SZQ=S*|Yes%OPN5E@9GwO_bseBMBwHusmabQxv`hS8p^ozOw4_&qt`C9_o#TLTU_*F z6Hk05%xR0Hc#`#eX)Oaioaus&b*FXN{3E|t%9SLDovv5j^u50-V8c*{pQtZ%SB@tE zKyEUPKzw0+ralAm43cSDKGxMrwlp;JP@JUs*!+4JaS5rHIZY3mXpS*r^cx52b1(@i z^215-sP`u%G&|yFyKFL|IfWbXs^Fl3m_hWxAMG;QvL?u_-XX4WAczEos3>j>ROW~gpTjVMf%jYuEsG+2u**t2-u-1-=deK$)W=S zi>#BNQ*w+e2GDtiib|!uY=!VJ^GS}w;(=YQ&Rl+P>h+?e(U_Q-aJk;i=DRMY{rC)~|vF(Xi0Q>Oz+CbKeUdFxId`BO#@-eXUjV$aW=M>q$qwdq2 zUw7ioE%3YrguG4na>%z1ykja*JWqp zgpgoOxZ=g57*Vk$L?b5R`;k`+lxN>g8#6uus!BA0w1AUbn zJXPX4#YNn91_WRh?bS{}OpyL$NtSbSn&(oa?u~gNO>mcT(9w``IF^`WE8SER=?Nr2 z7CDcCyhcRz=Jg!v=jskalZJeKEOf5D|Hp0)rr_WvcC;AL|M3~uaYcvo3HdL=gkwrS z%!{g>(;ZqGd@|$Os(oP=0EFH=^fP7G)20aRorpUD8t)?LbA!UeZJYr&{xjC4x)*b7ppc3F`}Erl|PL z8|tP?T4lC(uo1N7C@37w_hKmN$-s-sze}3x1aR)Mfw(y#g-dJX$ zX7i>RXq_w07&6vaR#6gfS6J~XekFgP`SpCh?Q8fdB=Dfi)%QWKsc&_R$o|*XS`K7m z#@mBW$9jn~`SeXYs5?5)Y#*Pspqi^An;9(He#cV|yqQAU!J~6JHBlTg`teu4HMPL9 zyA8CG8!Iv{3bST~cBM*|bTTxE()1`t(Ygl1ClfB}hzZ-EE|bdC4Zm%NzBK*)qxT1V z>^{c%ACCzlGh2-t-L3Ay{I+cve(ZmhlY8Jc#zb573nt`ss85+NM^UmRB-6f@q{>jD z=_upVw!%e7kTB=jgxrxbw_bap0X(GVkk)aKVk&Zc4{zATa3x{9JOi%L@lQUKhw$ay z&!2NR{zNB=)K%o(t6Xwtsz+?L$;vY+_Reh?Q-OzlF+0cY7syJmXv4#)$qwVgxxL^C zzMB*->}Ayt$!ri_)&|CQiN6RtVp^`4Zfy~E_)k?^)RZTGdBK7>>j(9A^H!Q2zC~8| zE}v#C)sPP;3B(xG%$d^7$QUb_4o79M_^;o#VYuH1WN;W)VcFy&)m0W2d;#Rh_(KdD zmCkapKZ3jMt@;DJ}PPQjMbA#;WJwaiI z{;LD2V$Y~E1oT19tejp{$9I`*5QVnRg5#5{km|3r!q2|o)B*;~z9QjMwpT{K*E#s% z8sm=FPW^+}65iG58ziv$3_8Vl7tf835H zuFr`V8acs$!VuV4YP*Tex+~Q73iaHYL9|}xVjt*<>&OAFpzqyv_FiZ!vfS6caA&4r zk3f*Yt;J&u&F#M2{H&yzRror0b;zq+QpRb}YD#HkYx8&IN8|r!=(Y8Gz@0PnGa1Q$ z-&)%wlq>TkTcEO1IewMER|{F)UB7Xhbm3}l`0Mt#yUq?~EQIk_9<1m*6{lmF=qoB(;Q0b3Z`WMj2+5vLn)0({$ z|1OmOdCm4^-rVBVGRyBd`3EHZIhqF4B7f)m_Wx-6zx?dJyk`533g+L$@TY^{N&5dc zp#MuH^!bK%s)&;gKlJNsV(}J>%%GL2IH7S}6wg8M^83!p!dTU_7h)@2rN_qJ$PzkYp_zCI$F%{7aMgB zYve?JG#5sWm#1@8nm~Wpm0zq#RIst=W-i@TkhmM?qW+LF{{y=+(J^b$ax8 zTFdegN1gctM$25((800# zmlq71dR*I%fJm=>(sYq44OTjPUUBxk?z4>_gEKlZvk;d{D}RBkzL9^qdTUgG8ktO% z_S``QE3LoxGV{>5URp0*8J&+YC}ly88-trN%iv=N{nEcp1gl2KmSqKaWbQ;f$Ut zeJd9N0n;xtspK=&zS5SoSQfn{qa_k(`Sr+sL_?ggEu50lz}#o7V2cSUj+SE%QaMNUzNeej?SVPodv8hQx%pF7wn>aoplJZPh6yLXS;E12h!RUpP8Eafb-n>UJ zJb!{W7`~6{!%}!)G_aXJxV3W7&eaa318~1?wkFNi8^{J7zN{M5E{ymRq9^t1UcV5q zr+-$rAALPz7VR=B+z-AIb^maZW}hUn+NP5TCL_BHEKQ`QWerg#WaGbcD5P}jDRB}0DDI{_bR0JrK30Kpns`xP z;DLrs*3TaowfrSQxAMK5bjTW`7Fwgy(r-G=Hns4o*_o)&^2JQ%p&Ps1`+DaQfqaOh zPsx<`h=z-~PzRCWx5XRk_pF@p8_rh6;dH^i*l2pu?#b>2SuyEem}_S8*7B6$hZFW~ zO{oH<*tH%3xJYH|dC3L>4mR2gBWJKbN=_<*JMnCc(BH*}zLiE$mCpbx=x1;dyyDoF z@^G{?ljp^z8+r|~eZ!vOw_|zF|78#Aom^LJhUGVKI&mWg1U=SzTIZV_I%ArNcAU#R zub}pj8?`hHs0sJm)YnQBMT*1FU)u$o-*dp+w?2&whp6!i={d9-Ukwc-q#k6D@%OIF zjCstU!G2qt2zS$sPAx*(o&*rHZOWgBj&(2s&kXBpUF|0x(&SSGcvGP1c>0o$dQ zxi%bmN{E}NSth}#`!*z}6F?B=?cS&&iwY@99E|Xcu<=bXOP-68gWml`MIn z*PBIxKh)arMiF6MM%nUs#>kU+b^iO|Ouj}bx_CTvL8K+`uW>g6 z-K^hvj41&IS-K%e`NxxGjSw_qcIs~IR4ip+>wB~!JTFq)=M!U3SCCJVKL~wGUM9r_dW;jB0)hK>>G@n1o2l#Wmu5)$y{Ir{ge}jTAB!id*j$26@tbXXy zixG@=gUrR z;bmjgD&v*_`KQb1Hzp~Q7s~p!CtjjxOb|Z-?-oZw@8v7Y7Q;tFg~;;85EkS`c}Ruv z$g7D>?|%gN5rk1NJqK!0*Lnnz;vHYZ@yGxHn?0~CFOY}vJ3ik{A^eSC>hS91J~piX zXrx&lIBlHP^#guQ&@OA^aEbAZ@ zpwpS!_AEO$7S6NHYn*BSQq<6XpsQ@Jo(6{4=<^x!{Mn{X74*(*%}yBm3q$mstYPx{ zaGSDg3lvz=8=N0iLe5QdzOj|k;5E1DfrEz?DgM+Ef8201e62E$Nr{!`C7ea>eUk9% zZS+|Brs1XF!25Q}K#H&kzB6h!vvboOc~UzdE9{WrW6%gQ_yi^?s)H?02Hz%ZWEHTj zeqSsJTQSRKYXy#3OV1h5{&$fzF7zNFU?-ybx=F*FzXym`)oC_ zDtc!!XD1S$J!_bzz4u{*X$aYYFIg?x7g*^t#XSxGJa8oE7o^*e+MA2z>sb#a;?g)hNfYVn&6Jrr+L{pfPAC1+{lPK|K{spG`4-n^W#!kGL%=gC z3fw*j|L0lMa4W#3#a#{l3eNy8kW$RBTAkdH5=qs7JzyyM)~j9J$f$X;*rGQ^3e%Aj zmT;)~q}(5s_|whr8sQbzYb4jSBe?%9JMusAKd?B50TJiH1Zn=*=-&(W-(r+^fHWRc zNB_yG{4?wS9Blw$%dmM$q`U&&?%OcHm$uM}IBF|w)d=E!=95S7?A&;U?Yq)=zUB5h z+4G$KpE|*n|J{6B8-i*1Ol*VCM<8M0Hwhk&gim(5<`tL!(m5~a2_Zjv^`+=(Ztwa~ zZrig<@Z^7+c>6}^1)!1_zGYJ=yb8>@gwuh}cLep)KjXVXtp;fKBO2mVCv9MT0Q4u0;D2CNd-x)!mX1+xXXLxW;b8=DXsayk*PM5vL=&d1+s=zOnL>lL4}; zRFQ9=cm@<9{nZu@2js+)ErXlr#9cp~0c?m~pfu!qJnJ=(IW2?wD*f)m0us=T{-&rCFQOEvNftj`?Gsk#tJf2Wx&AM-n+BLksPV(`Jq=)OxNX+%*%MCd1Ex#FROpVm zWsIS)eXpRF^!^(;T?NCfx(~XA}OMl%KW` zc`7gDwy+1rwf1kZg_Tv`NbNIx#_t2Pv!qLU($@4-C^T;DQ4VJOhIQ}^8dbn#unyLJ0m{?b9mC}obctCnJ=3`S+p{gaNlF|Wc|f*V(3omfg$skOl`7?7(s0L1$o z$azu{Oqh9TxOXyv-mIB3aJW5C7ZapwfQq(Qe>HS8ME9ZJbmyriL}TXt)|hI$c}gb_ z6Yoc!zmMc|2bg*38K8Ly;RJwc7}o@lu!Pq@-O)e+AdQ5I^Rjro8)RWW>pcQo0;zw0WU(09j_#G{XqnPwYDh5FOJ z;VL)pjR00Vlsa^)0@i4SGuR^d#&7iWjM!dY9>0PCdp(h~awL!T_4Ly>pPOI><)JPww->`TiVJXvhoOlH=T zj;n8}brWy8k*eWx7Q1et!D8G-9~c<4MUu@tBdW#O412w(Ml-QM`Zq0tiYshJTR=(D zVIV_lf^YbGTf-QlB{a`h@#Vp4IPy7J;OSy`HJ9!-?VYNxZ=R*QG(;#K0pzyTX$Ic2 z8x2uKllNz8A9uwS)z(&WE$S&5gXfOlkRqXGNHkzPuX<0^T7rE#=YdtixGue|g#ao} zJwEOdo;^qf3K4^cIH5^oi~-x#)rXb+?Z@E`Jtu8M*X6TB@yz7vI)+^G0U0#oPs2O( z1#TU8R8ERym;!nG7tomsCLOS;{^hO_u}F4jGOY*Ay{v7=!d43h)Qzl8yE2EB0KCn?xD{~QEqIcjDe>7os;4gGn(wY zrm@^rjA9ISy@`LIiA@mXlnI5Tb3*7QI-X+Pq^PDrOl=@mRu>0nJN0# zeYVp>2$pdY#{U`s_)6(dmr^lu8kng<`CL1He~whzAOl(R8c_e+IiE=`J^TzQP3isb z3jn9_O5uu(;pWSFNT1luJ>T6izv&gX6Syj+A+^RRiJD~!*;0)uqJm?eWWHr=** zG$#loDY>f-Ms>B?eV;+j;Yg(_f~Sgn7RyZy`3Yb_YXl`0YXHzB zXq|RT-@5`nC`VrI%dF)zWr=^1hhnYcP|iZCLng^3xAM~0g$6$sjjyW&C{A}SaLZ*6 z>c~;Ac1zNK?J+m4B$-O#W;jr5!82q%_izAPQBxiSYjLrVW#Ov&OIfPX>8 z?_`V>FDF3y;NF3Xj8AQ*-R%8iZ*=`!_hZ_7g_-^(Jxb2&C+HWjx_5nBb(C|y=s~BI z&V6d-1;86n9s}g5^^2d_TL+y!4psCummI`iyPYW0fUUp}$h^bc^33my)*PLo=?mHJG(DTUr zc9sq}V%7ESR)N6B;Ibw%TyPP~1JWw)TZ@n~wB2Za669#u%IEN>z)}a&R;7HmK^CY5 z$NIy>D;HUJDOM3%Art^1Z|WS@rh*jb??W1YL1+Id4x1Pv_5Ef)4$k zz8r5bnNKvm_+lR}GqjS0hi^Pl<#$-QQ*2dbGMpgg2Zc0b27kA|Bl9q!LDMrW2=s7F zJ`&zSA9TbJ@|;(lGT{z^p!cm2yHF-f%1_Napz{V7?t)YNFJ=# z4M(+J)Anet4{tJEl-wsv>_B!fxIG36i-O=&pdJ`z&T~2vzqFbDRQ2h;fAC|TTk+$a zxxsIE*5hY`$JZ{sE^15;BgH-1{?J6{9^{@jm-O>=`eh4^dh_P7-+HTE8|MX3jHgz1|-%#tp^s$!y%)$idiG2 zIyVXiTs48TVm_kcyfn<}GqDEJ7g%;tn;DxBS2Y#R>CSkzF*%S9T7RO%X=1p9Yj^+sPuOiy!CV`0$SBg^AEor1 zj^pxy2tFSza;NMfEvJIDeh0D%hR>nI=U-EwGSQ-ezAA?VR>Q2_NA-*+Oz;?4$ho7Asj}{}>|b zge{Q2d^=O6KbeQ!W`U?MN7isiCQG9>)VvE|51K7<=WK8(3e8EAb7_8BAE1DdAEE{) zg#48w)y07-k#mrY#!vX5&XV|?x2`iCR)!mS5x%&mfPMn<-0UDb5Sl>LHLJ+yA40VE zdp?hBR`yQ?i$_VWm1-9HkgQ5wV#m3=Sww+YNg;`ft=DzqpaGPqI-@9}v1d4>p*?n-m()}nle%|1_^uzCN) zv^>Xk_56X$OpFC&vw2Leuq0bYJdD$gz-O(+b*n(4uaSyAZzPs?pw3_%@uH%M^Ke?; z#j5Ku%19@#bs$3<%mt)Mq}-Yg6Dq>7S9l9gfx z2~oRjK%lqjMVhRJNkO8lM%CZU(LPl`Ms3IMj{aKP=uF`ki3e}mAS`zQYO`1RJJFUH zz~QE4GSW$UJJtcjt$r*e`_5aB=~(O=QryHm=QFwFCQQKwmG(wOh#J=z;_PtEP_GI;tXlYM^ zoE~O)o*~C;9WaX-%@wb1)$1HIo1sSIZ08c^Vwc6m&O6uQwp{0$yw;Rp6OEbNhOK%X z7!94FN71ZzA68xlKbN}zP}S97 zEDAvkl-{go@dWs9-@ejOXXhhs~Zn`D%@=zxp|-EwJW@ zo}8O=f-5VYx_Ivp3+zV4VnzDWKH}g=Qe5r^+?gZ0ps~Sx*)Qy=p*98$K^_-P*ndFP zSdqP$F>v`jU6s#vV~gL}@Nv`QyJ`k@hRaE&U3W-&O77jA>$w90=tS{{;8q*Rl*4N8 zC*ieE5^JFTPXjF$@P0E$>Ka-aK60@tHE>v@mpKxGi?5@0cR(vsRe#h9h%s8y*MO>Y z=jY?`a#Pq=z|s0*MRF{F8frX!ws2B5>E8cV=T_)ef4EOP*v3dNLmNZ zX>#D!fJ?D#Pubse{fsmKL4d4m>R7}@9;QU^&KM#~?Pc%%&PJrqP>$k57>6&g)IGQM zfN&xue(xCyhW)6ksTzdZfku>9+Fk_!ITb+66P8BMt)gszoiW9I>EaGgE&!?wnY6($ z5LYU7*hGDJ-#`K9fun}O^2LQ+6 z*gE|tX@`Esi980I+x| z((Qm1pRMsTaafNW=G0~rU~sFMgVD(1{tPLYQ;!4(%T#8v%WUJ2j8d>$yktbSM;v<` z58`nNGOUAz@e<KvsjSf{h;rai}=r4xj^qlU(aA`O|-rVuL!;1GfZI2IF7 zO9lZl5FyI)7#z}O%k7Cuhy(ly8IzRn)hFzx3%7OCJe(80M98@vC?kgR)!FX=c5bhb zNnoQ(;p3ALV8g*kEDYc+2rhJ;c2>4#^Rw)MJ8Rn z+XN&8>KqI+BSrXNx^35X7;?(9_ndmv`K;H#Cj1+jLh=)<>X&V)&PS?h9{ zA(56G)1G`&8nl7^BLG`6(Y1X+f`1@S+DY%*w@NVjMLutlS22+Kf68lEp{FH-{8(Ft zVAG@G(mb+NfcuplvAo-r3=+}cjoiSN#ICen)RGC$A^?D>;PN3z5`-~ZX3>635v9@P)5YuTNT zk~rDZbxsKlvd-=NuFu<28 zpowy#oJMBL7znX@@x}tN>BdUMW)Q8EqewsS`RP(zK&J655Py~cu4Q#pA0+|WH5;q3 zQ|H2t9bDWqwOTU;*XF~Vr-bSOf}Q**80G2qx7G$;C8=)*}u zfQH}ky#xyG5+L6tab$#kRXFY0!pF0TcPqV-dlLPWMRa=7`boLmJm1S)E@4aOPG0M&KwuxtMY9Yxx* zi_D1V8*f4@E$FOgRC?HBV>y84?NIAYYX#N(uuB{-`xMMu38XkU`1D%r)=yM7J?{i# z)kaLQSh@ou2smDx02$K?7VQCcP`Be9qjS%ghXf{PK8dIW?mvf=qzj9lhkG&av?8eGyoFJ}OC1W($cRXC!?X~hf7IDszq z7PZkjC+z!Jmm`AcI=E{tk|qw{2`crFb#goHd>VE(YFX?csgfMOyZXu`Ky5;{5E9&M zN+|#YXm#oNhL%;Kw7$VtAKzobPYXp3k+$|B_S;+<&a7`#PD6?-p2bp``yR! zeZRgx989fsuY28bUFUV4=c3GVU*Cu;tqq?k8}v%ck44tq#bqS#9lqoA*<6i@W)xp{ zBJFK?rHU&d&LZWJ0!P9ZYu9qMckS`0Z4j<9SORR+JC%m27E$Nx0n{bqPG{mmvrVQ- zLhoK0$?==!^z-uhH#5D(BC5^mUQ0ap%p7o9i!d31~0P5Hh=~>nmc}E025JHyc&Aoj!=FP@T0k^W5E6 zh7erq(^RRFZr$*F{pX=}4r#zo6v$WQc(kD5r6Y>1pKI2Kc&$CV4j|=puYiKCv&0p` zlv&^gE$8x1`ITayZ|S>t8-#Pa3ZI)@g{7v|b)w^gQLUa+(!e@TXSLVYCX46R1qa=% z`aUEsRgYAizuQ&s`TAX2>RxHzJcl-CI3w!~^w5&GKhP>K^v@iz8%b(e2^|A*aJJXZ zW{1NeSB_J*vcnr?nCY7`YOAa*jg29- z2`o{OSbknKUS7ABJAsdWe5U&n-CV&)RE+tM*TcfCDJZyJ zMixj5H9KhGKM3a~&!3;f=P~act`v=k6aX!8Lci z-`5o%1M;Rq-MNCa$80fa3~a8`Wq^ioOZ};+yL7pmE>)+%P$7hBV!mK+fWm&-1U%aN zZZcgO-ED&hx$Hxcko|KG1yc?yrUb_JudIN|(5!6=AbqK=t&k0Yt<=xU>&4zlO0Doc z^Px4#^#K#co~}Cd&HWdZ6;a2IajIFYiRRMa3pWd(2&x}*|K&S!o;<>zeLaP1%Tnz` z@~ph#6ng0?>`qr2h%9%~o-TpS%POhICH5WS67ba78grzUC@=jSS-!9>jXm+UP%eJ0 zn{>oGsC1scA%jn0+ctNkn%?{bZ%50Mq>%|=h8i3FBvi-C=v zhLBhCN!u6AnL9z2ec4(T!)EbG5amd)_jlL0Z)oOd=SeoUdKSm*qhX%633pc%D+qdb z+JAI^S#YHGt@SkjU~Z}EhdteT`>T3%J>r~}2eD3bsM2o=uQV_Ccb{mpJcMat@!-=f zz0X^;efzjs_q%4)sMhlxsyowD4RQqk&i_C7a|Co|Wsj0hDHfN5)qh)TS zHWxCGQm`o_2p3npZRK6aWH+8Tf0%ObUTk@l^ym47f@UNEv0@Z{mWD-Ia?T97q2DcK z-Su^|y*a3H|ISNE$b^F$C#$Y+geN#ZgAlM?R4kY%I21ZC>E?ZMl_}Q30Q#Ic_4KjT zU6kZ_ILXU&D?1Ld6pBe*?kRomT79i{g_USjvp6rSQJgkUdyt5ics z?LEETy^jao*bJ)W!})}dt7RNr`^MRU))gvz56fmehow;A^MhvvEH1}@Hu_BZUSmrr zV`vb??VE$0Ui4Gv)@2b9ay#kAX`6I>HscCTHlFf9WRa^#ESyhgQAkf%hiz2{%Wi*b zgLHH1MEY2;hCt1#`jB1m>EU{m@BX(H%dk;uJv$*vt$ZCT8_IuZd*k)yf_i4$QKq9` z9XUL7hj3nA`3K#d zvE7}dApqa(v9}UKrrtzUBnVVa(v|C-t?UmMXLsW6VH@-87V#XADnFTH^i2!<_G!03 z-^I~0LpWqqb-$r|o*T&47WxuhcV1OH7zrfZ3FF7E-Vjj0lJ-nE#PubrGO1~vxf(=q zvO{(jYvcM!l4kwn(Lz>(09@3=?tH0R{l!MdHqfg;iL7m^MEsyx?eVod+GSgpwbCIW zaDEQ^v@VQ*Yk%!UnNhvm!kfI+x(`=!lR%M^g=~QoBHW4) z)No{=^lil1P@$N6g{{ygK#5VNYN&>tU7`E|rIL=Jt{dd2@iJ^vAqI>L2tre&R|y2# z8}%k_3XGiZcK18?jz2^CtNv=xGCii5;ljiLuE_HHk~-HA?9A8Hf6>wr=v;EgD0*;L z2)5r}8#G~THtxc>r#2%49eLuBe&C{CKjT47mot%qjFiFAFA)n&PNEeG9~4k8FS9>L zt@6CTuQYxRekxqH2x9<@?G;nl>C~65aE$AOAdoDwS-$fTK>- zS{N!TS)jM5Vpc@#K*8_+dW20wKFFHo8_kBZu7xlwY*-~&i@x~I+|i8Qvr%_U`<8zG zH7MzSyp{ZlOlhZY!mTo|acar$5+`<>M>BMSR~_M%yzkH-Y1Cqqv?2hC2DI&EFsOmb z>MY+D+vG^Ek%WNVNY?;PFa*&}mX^ciI>2u-c1#G#8w|Nri^bFH^5S^rDvKTt^Z9y@ z5Ya|tUDrfohz@7`eZQN>a4R9++o@r<Dh?BVQ zC2!PIXsVeyt^QR*IC1sBMT`yHcfKC`jz?KA>GTH>30D8hbEDc(oee}rV{`e>EQMtreqlC&jY03EPN zz30Y-DJ^p(2}vU%eNOm#Wqr)sn^S&L>|)3x5v%tY!x}ekMpNlwHa@qsK6zpS1`Z@? zf1JM?>ii_l0U}L?qngj{!|n9lHWm{irFp+|6nKn$w#HfnUR!pMtd++q)-A6X<`NlA z%X9n0n&j%YFCA8E-WwuksSlO(i7WNwN55Yl*2N5k&q0)pD(DAwtcOaAbRrxL z8zd`XWs#bRzi5{tPQ)?h6pFq8ASGsiX&zr$1}~2>F2-|)UZy^qk$igMChczq({;F) z-->;2Pm3zFXPNg%K9L6}iaq8YnS>~s6(EUN=C#VlkdaOkYv7cHVpry2Gz}n;g%3$H z2t%1{Xk)8$AqYuQA+;(p%6S~riFKgFky?|TwgT{p>WTe#Uiy(|19Hw@L#^_ce+Yt` zykeIx{lZLup3mlqaxPqiI{F5Y)4;9x+#%wb51z;(xNzmwqe)i8PCH4UL90rAuhG^k z3e3PCZD-Ufd>kn`49o_@9eK+<@b>On$QkyEGqTLPYzEPNSsIOXv1WfQaCB$tcWjLV zig|>gG}vOSs+19^<-J0UX#;KMOHV*9;=P`kC2QqqcZd^Y(Iez=t22PTg^s;ooedJF z34Q*x@)5;nbHb#(MrU4{vmZD*_h$wjU&S4=0QN6n7lNT?5`PdDWmA^so);Ty=!Ro= z0uiX2${5zkY7K_|gZD+mteA{4dWL5G0>P_9BwE0nWYN2|(y`*jJ?O@Jm+lFe`x-+9 zb}!FcEZUx*ET2z|)P7|LpcyxzHz&H8sp(s8zyN4?^g7+_*f`-7?_mkn&~Ts9tBexP zxjmbvFdjRKs2KP)6(i3g6*C=XI%WY0xWDgu0hsX&tyk#C2S(dXG-Ms))y7;2q|b6) znNCkgfhpX9bz7U|#?j4#`;MlbZH?2AS1wGtS^9B8GPbp@+d*|FZCRN7yAml)_BVxc#IFO0Sr@XV{z0${Lci)C-9^rGHi80;qh{_wqc5* zc-K?{2l3@=<31qdA@fBh`80j@cPF2}huH(X4f3OOQO9}Ia!@W0-93 zU4qz5p;dprCAi~=u|&0!0JT?3?lE=y6s`Ffq*yuEQ?E{WoZJs4U&Z7V-tx|P4k?%3 zv9zfB#Vgl#l)t}xOC-gF89Fo>q4&_F?Y(*9<|(+$1KeEv!fNOnqUU`%P1s2(87MYA z1Hz_|;T+sJLQZrKpk~=NfTF~!3YxVXHN%p$;%`wyf4(_D<~jDx%)vNN!j2KHDPKN^ zN3M4)n`iNj*^BRE{!{h=#(>lHdvRO}^BVbZDtkW0eBJ;=k6dCzGwa8r+ID!x~rK8HZ3hO~Tk%82pDfFB=xEE+F)@+_(d*rh>Kj}=>a&N)mg zvCFKpiHa#vu!bhpL-g1SVH)Orfl-p6Z*w>&FYa`1Zx3J{>BQPgQ06E=8+`yVX1@SS zm!Pep!z5(cI_MiTm$P+H@lopaBfDPPUmGnU6Ya=yC8)K~jq5+60CwcNiGf)9b89Z3 z=OdzQ9aC?1b4LnUWYuwi2QJ=mM z*wJ)y$)T60DDTTbsifi9E)ev#WT>XUuY|=^xrQ~m3x%(#2VKs-Vg{sHohT+ftj>WX zqs4H*1kTAS4Uf}dmgg+LmvhRzcg&J&YI6FykAi(`WPF60&xvI^V~6YkV9FJRhp_?= zCiH;ZKTKqGn-_L3TDb(;`@7NJn-#O7wI1bR?9dC56tO+T&7JbdjWWxKu=hN1qPG0H9j1~fai>`f+$13m zg?A|Aao^b^uz7B_NIn&t_I0FFEOT21>@Klb#aM+B!&ao(tkFzfaY{f;j5FvuEWu|u zkUv#$6sQ_v++cKB{}veuv-~xLaa8d!IJ5P;`?n7h8FdI_2C`!4xJ2bA3sYx-jyu!+ zap-%;egE?bRuEB}>)kn&@az7E;xj>taE2&0T<=5BgT|`O#6LYU{->nf6GSq#0 z6t3@^Ow0MqZ!b^xq!rMe)T-u-74==&n0~bQpALj(^(6vD5X{=B{9$%P?Pje<JIJjF=(<7%GsR;^x#(2JyRVJy%q zMmj25?+I+6>P?3 zak!R8IMf{AHL|6yR>V(UQ|IcdoRqZdHL}gWdFjW%E~fgj{gE@(W0678wII5Nt;T&a zoCTSTat)XuL_zJQaz^7r`dKU6g|=Fhsux?I`t;MyIKyE4H$@&i%0>F`=BykoBQ$(? z9qEYIYB~>vEu2sr{%fYm`Y0|}eECI@%TiHZtc>gyoznJ7y{qkB_SSf%>2LY~ zd$5Wn@gzx3nQ-<;DjK%cikZu^+zLLl738*ayU|=XsdG);+k2CMCJ;2;h57^rukP$* z)tWklEqLq_G&6a=Ziyx>sR#d^(?Wdn#NPY!7$=J$!E0hkwCcmRIh1!y-PpIS_I!cs z)4X4oM7?W5ich|MRYx;c=dnBFbXrfUq|RITt-$>G)DFd*Fu#mKs<6P0$RhLtUjJa7 z+V-WvH4cnnjMKV6xMz@eB6Lc^C*M_7kyL$~yO_FnwhdD*iJN*cYyG>PZ9+fBe5kU| zUnf1Yy>rLv!Hla{!-{a#)o61w*(6EtalBlw_^tjd`yIleK%;^F#8!`76U=8L1joNE zAn05&$MLlBIiLrfJ4N|F*TnDR?8hI37!^tx!Op_Qc#z|+_++S@pSHO^L!@?t|7wGJ zLQZ_=nXCA35$=h_5uVAzuiF%+PWKySJ4CFwzB!e9)>LmmbJ6uc)ndi_$1xEtSC+5; z*GD$*f2v$W*lC?RwX(N(nq9Iwv^r)EKQ1f|wZ0AGhI<}6q04edn}?zK+W*x@!g-yX zs)W@8Hp!n@vTd|3AM*TLs%K(5`YI)euJ?>L5-&HU$ii0`o} zLz}x5P}~3X;ot8ehzen2PIlRUxpw(jXmeTw7ydU0>OZ^FcK{HfH!2E#|JahhP6oRG zBn`aRmw)P=|Dj<;R3I}*K#~0tR{U2l{67yM$Z$J=3LpZxfQZMp`)lCdm4(Wj*8jcD zYFD5`vN46tKh>(xmB=Fxren-|p=$rxbPjyBw87!kzPnidf7~Io5{yR_QuaRMujBQd zDzuxpT4q0`um*u{HbfsPP@^=o>^E@93vOdL6$Qnv_*gNAjBxupubtlUm;QnGBbViu zdsJndW;>uhuLael@3mmSE!4sShpSynAl!)1h3F+)uRMPUC|7;Jq0A#f|6wZ*biduk z7FoFcVkpx7pW_2nh$*=wNS)~)gu-rLhVqHN7uI_28uKg5v>uR6%mTSjNpTBg2HIL=J9ra{sA69L+=5eR5^6wf|3;Q z(k+Iuj<}Q;qt1FWZUvRof6Ra6n3UGSqDtw~Xu zIcM>i)vu~fDs}I%=bZvuv-_9VKNhun0|u5QwLWmByX+v7c&~#YQ*MA`d{Gz4;m)td z7B9@UBq66AHJRX&5G^|0?l?d2u3ZnX0TZ!S@JslZ!8n<|m%YQ)&~vRXXp5XF&N_h_ z^I#e6PnVUGkL6i*1-xcfthS3>Y!{3}|G;fyhebMZ)9rB#xQix2_s0>PUyBO?8M~$; zx4%Aqg2*((A9t|9uMTG#9Q+H6`)lTRkscubZCDFYR>QBjUDd2)JVx%sS*uTzU~IBJ z4RUNDzaYvFAo&ci=h=xWVFUG zEZLVg?^K4{hGpZH>UO2m8}Ct$EUosasxkpd(sx)%S2l_8coT%xs)};6e!ZMoIfV0R z`HflIq;4DdnJuo%abi9=9{)0G1s#y7tz<6}Q!; zvqKr^rU>RAl|BM0U)M?G`P?Mtc{p3G+L}& z25r0^ULVk$laM(1Hdl0OQ)a;@fK0rdfIT6piq`B{pm|7dyi`a1+vK4=2Wxu}8IiX1 z_m-JVO7KzW3SN$^bgh^LO|IS15o>_P>}ZS%Z^E-Y2#f%9h#YGUR}t0uzGfxUJX%BxG>yThZ6gwfByy{a+{K9m#w z;tFb$Hu`C3#PrZq4|PWL=)xEuPDo(b)Lj8BJZ!pO^TNG>@p*pP2<<%G$_w_^vM+lt z&6gN34F9&m=)ktI7AYn6GtW9rb@U8+8CLyDnky_|z}y>WTZ=o}s<(bSk7Qn(a`{3X znMs?RkywWf{d!%yKRR-?;n@idNaG#yWpz1B zW9<)N2KczZ=68}UZ-4wo^+y5zY2UKl5m}N5kCu)kn^UdMq7L2UT16aRh1L|A7h7L@ zxEJfmHhIx~Dak}X-s}#OpUs{Z$dgW+J4gAjshDy9)_bm1a)o{CBMPS959AaeoK z_-yX?EAk%MJt~40GHEy{%EY)T(SanTcMjZJ=S%izQrVl*%QOxK$G4vF{k`k>_ScrX zUUvqmF$0Y;crG*Z?tqCNYLBND0|SC~@rvD_$Li|l*!eix*koy|6tmG^&9`aRp2gr% zU!VAKNZe(c!DJyx6@ZuMxMG_>OhCOC1S>6Mnqr#>+L#jyp-QKd{9Fv0UU)j%W1;=) z?oP4MU}gPS^FlF)Q(*iLoX zM+eJY!g6(M#_S%&pg@nNJYovE@oT)1ZoX+;3pe_7Uer8m!$Zm=#Rgt4htu89SH;uw z>wiwOsFLT6L`Q1jrF+Zw@;JNbkehRakg?qrgdI~=k!e_NI$i0*gNbLkSu-cpo)_Jj z_eqOa#p<=5pR!8gR(|mdFl1Fzq*zrxcjUL+Aej`{AnH)svwt!7IA#|1QN>{$8lHkx ze^v7uo8%NQ%ocTF8RZM_HhFll)vb-HFIjHC2t18I#B2M3dBP(08t(B{NA)6B9z0T2 zI;!#4Xh!l@U*9N+{;J{9pAHWxpm$-}LFkLvj11g-KPtp6)$ypRJ(1`)nWX|A*ya(; zclB}O{xxDRTt^oo!?vr}hpalvB-eU15=(dbmj*Emz&OsF?Jro$eb1+dqISl+Q0fyp zy$Pv~ffO!rsK?AzSNrk}^k65tio_GK;8|cweN7a__ue#5m`Ytm3ORJvo=o2>E@^GaI(13u~xKayExvVOfiao7HF`dIlbU3HS5J9 z3B>7HNGraFsN$bz7sCr`;`Rvt8gEdhqSg3``v5Q<=QDbVJvZ)j6*)44zYaFVh_1_p3`B?rLsK6%Oxlx1NP3 zjy{SheRa;2$mdSjQNV|6Q{!0Ugb6BY{klSnA-xt3&z8CCtYR`ebTLg!DW1Vl464iw zwK}IFj`$3;^M6>8qm1bHmHg_Wm!xkgFikORnA0>l?WWAZEpk@EIMEF$8cj1Wvs9H7 zT(}l2NMpE=kDfeLaFgZy{fhW&b#_a*?z_`%G1IH$-Yt7JrniNtFL$xgG>HAZ2Hv!ez5{kS?1;($_M+{Pvb?IAuBTK;$OtV9sucPaome|Ic!@AC*{#t zhpnosyl*~8?_c@V3lswo+uS=e{bLa(AtNOZIHvYBsZH`^gsSi;7@sc6X;_dSxj+m= z7rmUv+0LR*%N~RWQ+;_WT@Y^qiRSIhX4e|YgQB?By=J3)VVs95gFzqCFnVph{pi^y zSgjv?xurTJ>z@gMwQHBH>TQx^?kq8)uBc99rEZ~?csZ9Ctso6kB@I&g>FxEIRa}o; z2$ef#Ne1EKJCl)DRbqcZF|<>sy*rv_;TKT!W^y!MM_Tx1)5}US&9sO(%0h)9tYQ(W*VDa)6K%{Qup+xo@ccbeuBSNnXF#ol z`LF3h!B7Fj(1;*|+YQ&AOF2=I5VKP!(9~r^2y+us-XX>#FVj+39*UdTZNoDLoE!As z?QM)x?#MjJa(-~8EAPB!XQI@o9Zwq}Y#q2i5kU^8dy36kQHuczxW|wbsnOsY`jz!ltjfiYWFoM?soIATh&)zP{Q#)9yXG$u6JKyGciM;& zR5$gY*m+S~)luslo&?A|2J$0brafPSc-3>X`8F=b2yPAYF049=s$_#=RY9h@vub{o z<%IJxd1qkK#aJ9{``TbPpL6qUb7qiTF(?Y$JJR*8a4~h=Z%8f@ZHnd;dLM{NmFkcW ztvPAFH9X<3?}eWtCg7#v+Cux@)ov~3Or8{?e!e}>14_a^SetAMTC5EBYxa&2)rvrmz5}#&)UuA1=*(PbzPjB_h>mFc6ByyaaIYJ zUoF|s*ShR(t?DHsA;nEKx)wV@_qfK>@VBu^d!Jm%^wBTI6A9TisZg+At=++$=lP*R z*8Md8!Zw|bxuc)n)Fm;7T*`MDD7>9KGXT!Om&)-ysPc~si;dg#cz&0~5wT3GP(ht} z1pLgxYX@=dU{RBrrsM2hshHM8swcOTfiSXI_N}r1=_z~@G9L6Ti%w78 z0xj*22f1}lNKMhf&Qi7K57XDw`6u>2GEg1Aa_6Slownnuh9y(7`<11*XA!C@jWoy4 z)jN1(jP0VW1P#32nk`ixr~j3j(S9pH*bPm*4*Utog&y~XO3S&0ml!gZv^buav@Q1C zy(&8#uGzL@wcNPirp4*TI-?9T+y{A5PJ-v;NcW-Wn8>p?=`PkjI=IO1k7+IY)^o14 zyC&-3L1~X~UwXk$&?YQsO#>?%W)+D>K+q;vaHV{oJOr5+7Dj+tV}tf@_TS0UD^GI@ zG_?&8vL)qt1W6gkfg6Irv=52V*HKm`J{Dz0H?1`bjU+wgtYdq9q+Qw>@ivXmB(^!6DmTO-tiMMfRpU6wESFHtct(9c0Yf7_vgv%l>Rvj50H} zQmx#lsT3+=f|!6qe*w*Z(O*%19>AS=j8+TET> z$36@Y4{=<`Xsoo$zbR9#LnHYGamBzpo`HAtao&xhOSo}(254Pv&svn@YpnnzHnZoY zBKz7pq+S}`ZzH$mqYR)f{v)@e&wz}% zsGwh;1J(ym_X>Nc&IYazKaa$wi3S#ff{Q>V2$WB!H##Bj@;B4}JShBJ(+|$2z{Q>q z74{zk!k34>Yr^j0XsVVV$<3eH6nyF{M0|qSzDM=!zfOV5tcSTD!R#zp5SRn;Og0c; z3>)ofNgq*c1HDthYdOZUKR<8s@lQt}Vs-Jm;YwWLzMm&Ww~#|>moDx+j{#5RvTAl` zpgfDfx(#}Ug6f`e1eyKe&~YQs%=Ci9Fsb#n#n$@gcXGfD<+qml9d*GF0p>o#y-mMN zVGyzPZ=N2}f-%?geg>NYWN4ydq>{OTKrzW)2pDG>fB-X2xx47^l~z#v5&F*ltT_Pt zp^pfyV!t&@{_HtKXg!F8lZVP}3qF9QE4uyp`vth0v+!Gu1PPbmQW=R;w2u+62=~EG zo=oI9=-7i}XgEN0;aEtFR>TwifAlwWf_yRD2DT`E*Iy4A`BjAd`#;|sfT+BAC4c(2 z8~p1&{Pka`DG{pXH~9HzrGNTv5j=11>7(bDe>?wwo{Y5_E;&peI|;(Y|MhFehoRO% zb>``RHsa^-1A;{u!0T+4j?E{2cHQq|_OI5x0RvWc`m1#R2Gg?vypFWTfwts-Z4T{| zD}Vr;4Px{A?^FD2B9b0v$z(UX?LT$Q-|x{9=iiOv$e)I=|Jge*fWyv0o14=WKm6~_ zL9o&jYaQppDE{Tz*=|Cc!xW>ce@lRV>%Z?;f*?Lc%=k9j-SEmLlr02{zWBLzUH9vbQepwR-DfA#_35gOgU_Mf70{_`>D01#;h zVEpqjCaBLp1a;*<&i?05P%hfvu9%DdFGpht<%0gDk52fMk(R{**sYL%pG)gD^0$z}Psrc&Gy!hyipo5C|Ougo*jb-=GDceg`l}FiGzT zDq@l8z69TQBNqxu%)@3;s_CY9Iu2(Qe&rsDgG)(8O+(AZ&cXSBOGH#mTtZSx`Jsxc zn!3g#y=VFchDOFFmR8oUZEWrAJ>GbFc|&}B!`_BRL`Fr&Bqe`HNlp9sDLucSu;_Df zNoiSaU427iQ*+CguRXne{R4wT-zFxfeooKK&cRmK);BhPZEf%DB92c^&(1H9zc2sL z3uVuLQVaF|Pn!KjFA|hq=olCv4DcU%p`m;Kp*RT!<~>0yQbk?xOETQ!@iEad@%<0W-6F~@AKt+L zd=MJSnLs3fJaG9rhBFua|F(a%0ny#q|Fy|q{DJ6h{C|T@j+r8uNDuvkyvybnU`1c% zq^KW}K33ffUd&me^K%>3;+`o1@;j6eYH@-55QlzZyDxof^Ye=fAl!%Vg+$mj+Zc)l z=>2;|1N;;)$#wsIqsXWTvbzJU7Vm&p`&ZsccYw`_+)+mT{%E6{g%E_Lf0=Z&56=P{ z(gDv&*Oj4T9-utpx*msK6^EVW5|!IRxsi1Uv}iIn;opwux=0K%z8XxSVCT6r< zHOHBd(Xwmr@zb}6%FQ2lfTdHdQ0Ip(7DhbfgcPj+G{z~k^Kgnj2&iA|RsudnyI{|f z#?BqpJ*Z(w7kthf!Psl)@bB;WKhooW!*@(~6aTdX-nO--7I|&C#=wW?0=5sDn^e1J zr~MH@Lq!iDLVLhX%I0w@KwyBsBz!dPTE#7ZDXf94cnDCIq$2HZ6Lf0pC)_9Yotfw) zY@=K^!YB&JH|S8C^o0Aq)Bw&tOYlpfo01FZgC;S%5dj~3K%kd#aN`9!ZOuRRxWWscTn$m^Ps-4Z;gYT-w(dw7_vEjAx{SV?2G{JDqn( zv#s$+IM#2gNT&@MgoyaAlGAtbOFNEYAEgMz*Mq~X0Rwd;a7wr?5BEs|=TN)egF$q1gw4QHr zHUx(n0kXDeonU{P66EODN{{mQGhQBa-tX~!g1Ks2K*3tBZae9Sqgb&{QW?&TH9A#%Thd+5{^!T zFfHLs+;YU^KnFR3GK?~;z#K25)%47VLicO9`lf163a;x9Fvb{|JGTcBv=&#d;SeN3 zNm_V^Lv;eIgDr!;=8sg7iVeYSz__?u~(Yi(`y2u(hM61=J zKES39mE7i*@9w0QW87gRpQtaNKacTqP=A&3ko0|38OXgvajO}sgg8&XSY9nMt9wTTXJfpk1v0JH)z3}JWzkzobzW9 ze7WmJ1}V2|SGi>!cJQQ?&B#hf*b4tZI%0%~W@RQ3;?Q#k;INOS=^_j|-P5XY9Q+{P zKFBZr;#dlTVc-Hsx1`9?-c&isJK!yo!Bm<-L#BGokJ_6&P(BSv?ZHMV5K5#xOYATa zD0r>j3fGV8_>^yX^$1B?W#}3s*uO%A8NKR`y|UHd|9vIE|+g$H4Ke!w@! zNA)Jp1gv2E3S7?EQ{)>tK*DNr2Sj!-&q86RC*;!9Op7(~%0lRVJ~0b!vzI!U>VeeP zww<-K8?YaVi?v5atZkoO(`-4b4Hll$1v4O?5A<-izrI)(jlI2R;|ISW8LrG1MU8kd5|r&8jVs+Z@lIQ~)7Ii`%w{2BVg>ZYs8+q5y9S+-SZEuD+>ZKU_NT2r zICDsu^^|Pt$LH46+_sCZi*=Nh~^|F*4WGUbpsw030p!BZ(>`V%;>+UfsyH}eZEMW#(g@(%T2a0NvX&Ui8qcFx#8hl0)mLNEb9~RFobh2{Pi$MWmMqMu3OG84mVm|yx(tjUcQe0cv=0FBB2FiS|zQUD3VgE zRxEo4zb!$^jyjNw`oXn|?@w~H4EDIf??qXpkJaO%6TV%^b#Lx7kn<~m?T=63EUQD! z!U0Jyif@ghH|DM{0vq<9>44KZyAvXk9&d@o=S4`6X;FoQ97h8Oqx2V5$7WROIge=R zrxRsk`fIRh6l3qlasroV14Z__3XIYmT{)3N^68E4`FFsmXtri&DevWU@-;Q`^)ZfC zd<*r8rUX4c(FcwA1;nWD87?PC%#7)A&ZFC&beMR}@mo2?=f$Z7Xl~m8^(j;|Y{k1L ziXU^f+8cNNt+q%UW8mA3gjeck(C7KVEU2N6ei^QGA1EsmAV=l6Z_o?#V*t+oL)zWc z|DfZCJ2U%aFR~h>3gG23*G6os?+V9r3!5MaBUfjiu0L;!8Ih#3r!l2Vcur&j%;o1b zHAn1XA6CxFJS}#NWx6SM(C=rcL|ALAdKy#u zGRUTMuCK)IxMKQ*Z+k;I<>fBdccJ_=cl44Wj$q(OfiS;?Kzv|{U|LkLz%ISAy>D)0 zcD$|?NN(uZdw~w&h}qi)@?$trEy6E~*HZZTU=1@fr8~J*CZTre8p@Vo1EL`Xkm8<> zdr7wS(%UYy%Zx9go17NkOENZ#pmnM|of4&EU?0&`d-~}`(_8Bek8m9=NqN2J3D-QR z6><5LuKo__*ao%cr^nZFzL7Mb0Z-9-0_3_X|LHC0?&m-5!ih~z(!S@TPbS6{m^f{Z z5PG+i=AGvhlZ53P^EV~ppUzz~$i5%{Ud*(VJW4;ch-ysF8vSZI*Jeggn4U=z$%~#E zFD4X>Z+-h>o)CxO1eODZ$PXCE3AZDHLJl3@+3O78Bh0aWKd~>P@e0c(XY33qgUy89 zKG@49Y92ddFx75uY5zdutvWtH&w|0~9G&EizfrudK))P%?XiADwxIYKDKtH{Bjre0 z{oTzgzQsBYBaCYy^Kd?VC)6!{+h5PdX^klM3tx13{;0KGu?D1C9mMizWba!x(V||0 z{qP+SZoE!JXn*e8*|XQmjU}e59DrZRk;1brFj~)oPJ|IKL8$Fv78H9;wI*wD@d#lU zak$br^JeykvlV#o6Se>M@hBSJU_Zv(l`$;v+x^NmSwynwSp8d5Ke{X_={j`$`-2tl zjzhYx_T7$l%=gS+u4rTJnP`=kAfbItf@b!YsZ%a2a?f-*9zGBD!$-`z`*2MDldD0q za) zf1_ z{C3(BE4=euyH}mJIMwn~L4zHo5JeL^p)$IFaG1FdyjZJdLdI#`Ok6Wc*$HFXf!-mL zv$xG7*(@-_5}~yGb%wI(0`>+r$i@2^_KP>n`A8VIux9Ih^RR0+6Wp+Gk<6lFdiqM8 zx9mH4TF##7eVLo?%_<4Yg#cU7HWz1pFz$bHlQW&NSTnrlqmk-P5YqOB~% z;=bVLNRb>IKo@Q%$(qxhJ)?mtA|gZ2pj1eMe%RVBwdZ#nmX`Q8dK;g{h*hy!AB-03 zNE|58yYvj(^qj*EiX&XE*|Qm^o?w|iVAW63pt4*1^oaY2i%?s#8_dAHO^YzY>kgEh z@_%W)w#;Y-BP;6PaU%FA@!FHi__%<b~&9a>6k>MF@ z$@VO_q(3M6;(L-Ee)+U^M$*L#?xn-amnk$>m>7{c+VF-<)yrE4PRE03{za=Qi%q3w z)+Xv*?df(8^~pft=k^8(RrV&Hl^+N{TQ$ZGo^WM9J2Dq16rv`X!rfp;O0N;PYX<2d zo-~r$n*>Mie{QmSrp#kZBdMUT-o@;Ewu=e)W+#4zg!P(JH&5KMD%zXx9|)QV2j9P^ zj_>ZeTK6sU4fFx>)2cjvKxwAq6ADD-E=?bs- zK)QZrYM^L)L&xz?%EgtXi5E8BAAgiKpLTc4eJ*&{J(J|Gp3y&ZqQ=(d<=|vPJc1nB z{BC90Y6;F7kQiWQ434VFjl-f)0DRH3{)^(QyXk*?(fJER`2SZ3ndoljAG1jdKfOKd zJbJafQmrS|!nrM$pyt?>$PgwO(78yTzGs_D^x+hhzMIP-Dl82U(@mmvPw;pMKA9;W zD{2lV5}iNuBj!4~7FJ2P!P>k7x;*cIwY{B7PEahI<1Ja&CHl7D^Nb>6b43>Ro|E-h zbl+!7;_x!aWKSI=&@{}kPDt=Q-?5m0^Ct$kJu@eAv=i`OuC?7x^u&xxrDi>S%Q50f`aen5q0=nVB{Do+!C{-PZUsl{V*Mi6KTkhfBa=4|ShU|EDWCv4j%S zWn+gEt^pnwE*@$8s0tjjuUbOFQzQo#>f;LZvQ;7LoT95i^3f*inxw)3(fheib;81h z*{4@lwp=8j)YESg-sO_{LK-mbt?%3 zieWIQvEo&Bfy!>zo(tp*Q1SY}B-CBQv)Qd6#;;uHL}Ft5ygKuwngq9=Ni4*Z_I z9rl`FIN6(MeJ?K9M4!{Ie7)BIC+hz0g6>dk8u`?(_9eu>F^ai|*yH`cXIXBTZ7zC; zL+4Fqg^NpZ+wEM6%x`ne4y8;N)^+KAxAe=WF~EsMBoiSs(g$;2V5wnio>q*l0H^5% zOC9}|=)6Pvt34Cm^VpGBRE-n!X&|cLuMKj{gdJK%dW>AYRluF-DJA@F`b3| zo~egee*F>|aeaS@@tvNJi`Qu5?@1rS_l*O`iys-nT)vrNB={ce;(el}vwQvAn&hL+ zXN6(FJ)3S^#^SS?hIo{}@=XmM`_#LAH&aK)u+}$L$^ELbyPsQE?*Mv>-KS=@*_^jO z8eZf%-FD+&sRLx~gB%<=_P^u^A?v1y{*00(hu88#I6sLWwmy5H*e@xoD9L&TwaUfU zCN|omjn8zd=T*Dn+o(-++bHTM_cV)F*qUijL@htPwAK}gkVu2u*-ts{kI&yU&NhRb zXNSOY zkEa*~_E9v>@D!pz6%K$TK69%<1-i{BHswj?kq61R;+riCaXF`U7U(Hm+F%TjHuO{h zEcY5s8uLE4S4Ecuy*3kk+Qj&1v>ucsCJAh~9q9Xue@2EqX-MKH-d&z26F$@k97&z0 z0-WY)m`{>(e78~Nq+|f~4?(k(T*!RVy|X9qn5w@DlCV|;x=MHGhP2X1RPvMAnX~;Q z#f1NAxQQQ66>6HZY&k*e$6{r1mfV>UokET3rDtlTQvq+vZQgn7N^GUiMum@MR^{dc znAnEfGDZ06Fj2ph^~vZO6G@Q6e7&^+Q;mvK!}kY79YPvi7?#D72WFMzXkYg(!Ep@? z4;Qx0*>Ar$a^;%dM9{?w?0UC=bF_YTR?O~E!5*8~UM8G-uFk!Kva$DLFJ3j>3w$}y z=-?a!{Av>|LcV((YJ06x5#*@8kMDABd(g-yNgey%-Djxc4g3y>r!u?pLl$`DoQHAD zc64Lk3^_)%EgY@}n$3>-9@*v+TJ*vUqdgyn_)As>S=L*}R-PLj`ggg7L}Sm#`+5%B z+D)4@G{!a>UPi)>LWLT(l4>^Ny_!B0+s-Er_#5<*qxC^Nlo9ct{7;aPy%wHH2jkGy7GM(~|nk5=DAFr68Xrft~-X^C5k7OgpP_kPJ& z=%AnOtbqyj8(i^t%e|23>V4}pAS?CjOAIH7>T%69uZT*`R0C7va=5FD%!hY8y&qg2 zXpPqs3og^hI)Hm%L~$bXCvH$=@|DT_OI`46UQ-XL!2DS9fQ;Xa8*1LTy*K7>S8DMt z4%C}7CpM4%QLc}m--s*~<-U-0SS7N?Uz6%B-$2AK4#OAj$gz@P8g)Dz6qD-TjxzoBHH zb@*IQ^1QPDOkpEM?a7P8o9iuDn`e*>WrLho#v09wkDv%*%Y-Ktxv_UZqJ=iki%eO^9n|>F%+-5I4&P-gzgR+h|>k&uNMIjn``0cr2ZFD*R|@tO>+ZsSymjy?QhIUkz@|5 z+9bxvP+sFh+VM|sUlZpwJuq(_k-9YO7+1jbzgi0lPVHcuITGFR^bUuz2~qc=g>E%^ z$IgtQV9jKINu<6L{QUUvnC_xYT(<&Z`xO({Bb%nqyi~^ApHw;1lOp-5n6Axey854jX)w-q`&(h0AnC_H zdCuM``dH1Ud9akOuD_A6^1P+g>!v`}w&2Bm-j@UKuJwwR2Jlt*@Mqr&Jl*zBb@`OQ zrYjsWL?e@aQTCq>U;aBLLD#g5-s&>ZcXHaF*J4T9x0P0o!XEKp^M=m9RemW#%1{W^ z7q$g4+Jtx^w<7wGKEQVT?`kOee)T?1uj8<1A0XgA+SaZ;*PX<>(pl_h zR=%@jy;mqE;Q2TrJWE*NN2C|5ApMAYy%ieEM7dsXLB=2^@uv7?Oy1L)^i?&IcAH>#Q2ho#sK#tK?zw5sj&zgjTtDFGkM^zpoL*fT+;7yG!ae9Z zA0gbzAgu!~`+GB?%1&s%?f}X+ypMa$!EF<#AA6JQR97Ek(pkFxaPK7y&4R~SSwu<+ zq)8qd#b&~-UUh!v9^-o@JU;|ToRFl+eWI?=#@TjSg%TQV^Kxd+^|AgtVlzF@ zatR~;lJOB?A)Qv_Ng>_?Qiq-8N{J>*pW2dG4PKwHFG)Og>)H}M5*_NPGS^u5gAxj8 z*QpfuyeS3;UZjkjWLn}%8ycfr-asp>YWuQbcriP}lc9H7$j1;nl3jd=&UBe8hzpp` z&&yuJGx{+j#G{UlnHf&Q0ZQJ5l$QMS&=!>Jngk}=u;-O&rA{O8+mZzIX;$0=w9$k4 zb)Vmctw9Nq*^;bd{P}GpmJsC=K3`|OP~ouHc!%77<@)^3B&~nt!VuviZycQcSzJnZ zNZO~wt1F&TV0Jn6mN@4Mf|wcIG_2Q&qG`cVZ~U0(=Vvp_+?8?h%$BT3>0g>A5wY4F z1+@>zg8Q_wA?5vElQ%#H#S~){i^YQxiT8fc)w!|dH@ya zOV%J6>FX^c7?YO(BBF<_YT`I|jHTFN&lc{23glcrtrFBZm;M4XZY`(_X2ayi1L5cV> zyxG>EFeg#!roLYQn(YX6-0T^K$IuAo5&pFVf`_0~tX3pW)}`WgeXN~VH)~uG;Bq2X z?zWShEu{k{sPKP|XiRaF87b@)fL8GC{FmkaN(ABN!7}BF64rm zrKakacEl2I8$3SQ#xuwPt7-MOvfW7+buWvG{l-@4?0UU8pKcruk2sr~9qaIj6pqtAFZgWO=3Cq@|7CUatGD?_gCmQagRh zq2m#eNa?Z6Lfi&APB+bb;tUOSE`bxRrhyaJw3##KYKQHIQF=AbTFYO!?vT-z%j|}e zi=HFA?La;_G|IC*5GI{5fAu|gQavx5xeRyT^4kGxw%~j<#VKLzmohtp(kdAje->S7 zC2VDON$&R&*O-u$tsW@T4wY16)=HM@1Ot;v;zlCJ0&7SSb@F$v;wE70vlK}>uosB8 zv|!;cF|oAyu3pLLSIEKt16Q<`%96U>3@%=V@mclpbQr)Z})f0^tU?;%bM ze!g)5!_4fcJmwz3-rAZksYb_qEMn78A{0U!Xz)#o;7Ho)F)`~MvG(Cv@$zDOM;WKV znq7D{>>5PtFLuiUm)Yb%<$bT~?7`fvkcbCzii6$KUrtV|X1Z&Bn-h=cb4WY#Mg%7E z0&hP2T``8j;9@F_}e$}J#*u6CHO~=D_YFK1|#87e+LZu zK2X|Zbaj-`PEMeX+D@O9p-!|sZ0O=?7(AD-XD#Pl2P;xuY22AP8_$O=ifwIvDjR zYeJ6tZYRkL1tGxL6P8y4#nG(DiHJU11LCO|55a|KmgdX)a z#exc}#UB`g-bTgIpJ<}wf|Dnc02v*yZmGXIa&U<+qleO)v#G-;nzwSTDN%7Rc)zlX z;juXV7PaWER8H?Q@wFU0uEJbc{qS1jjafm+BOnnJ?(@Wg-~9W<7poVE9NkR7;{v0m zcV~UCryAH6nLd42BRfgbmfqO)7!i<4Hs?TuK5*&aUVzyfqEAp>NvVm1#Hi84X&nl= zHP=WSr33qY0|!*lhn{|(sW3{q_2`Ghh;5Qvk2?Ups%e#|Pt&DWs=k8^@hYNS#Vg(^ zRGeM-q%cN69BmSKBl)Xs@s@sRz?p9Yv6Ov}<=*i8iyJxn?G921i*{sxfoUBAv#Oje zW^khLzUIcaii{=+w8$UsiZWy1&)2%ha;lJCUxPH##oZ>n7TwyXY#eBrgc_C?&q#K) zpZtD&2lS{}ba}eIj>cZ9;h$S~D$d02xL}MG4;0OCD!23MWa9TEqEfF>BluBH_AT(C zwOMr?W3W>{&I&z~?`8T{zd31L*nzlIdy-mgs7*pnE|_DR6ZEx%;QAq=>D{%aSXSi% z&v~_RUFTsUw(D;0ro!)dEj2AE1E$!STe3SKZaR=Mhu`vrf%x<5B3+rCj_{6Exv%ZT zt4qbGQPzsJ3Uf9K<9^&DscWv@VUkQiIWH;D9PBP zaqZHW6-bfdvh^hkzaI-l5IcT|y^_>R7S<15J<7&| zUm{Jv!U97DOkl*UEP06?cG%~9TAZNbzs8MaNz1d{QYNEr3zvzpY=%K-a|zO6-D&j`-hvk9M*#X2#>;8*j!^0pk~v zB$@^hxbS%rq1#cV)yeiwn1&v!_btY&553ah*n5ni>(O3Y#$K`=AL0G(nCxTLbMb_U zdT=jRpXqHPbqD?Spq2-Y%hk?YXC;8*()?S#E0efC+w_^H+8Rf6#Tob9p#kLwrDXNP zl6R8SI=$f_<`;$&q8qokNE?o~ZTjVq)W@r?qAcvQZ~eS**iIy+AMeTb}n?ayx~xAYYc+A0u;m(^Z?Si(yNqB1AH&Bp*(!()=jBSW{)T zI9fz^8Ceo~g?5CB!wZ{Yu2YU-+k#Q}*K;tO5h+)P-qTKuYpaQdVP%WP&hhY(63Z(^ zIXk^ge7>K@5fT@d~P;A1Y4?sz*b(?>M|*Va>4Q$Y@2nHLo+aly?&Qb9k(4 zV~^(c2`RhZnif<`i|2vCG|Y@@{+_>DvRE{c-`z@0%hl_5{Q8m6T`6NB#= zyzRAoU(+OH$u;-X)Dr(fCceYX#w#kp_^SQ-oy185fP)NxaaDtowvg&cW zxOz>su_Fg$tJQUnP%nYkeSk-uLk6y@bS9N8O`Fw(LQ(3f3ixB!j#Z?;sGr>zPpw*Q ztO8ieZY@kM(vSI3sac=)TJpL!qnYWbA34+OhKL!t=k|do^dTqoIp)RL!M+FoOMHo- zf0UfuY$t=6*BQ5t7#9w*y-$p_x*1e{+;J#;p>Z_bn!Y*TA4sQ7#vhixcgtQSjxEz7 zFJY-=P;YSINPioO&&_#MAPqPa60ec zAS(r%7;5Uqe?YI9HK(FOzTxT)Hi0k0q;DU-lfS}IL50Atp=R)~a-_D}8%Y97N5ykN zRK)V^?;;j*U4553V7-6+wnhFn)D~!LuWNrg?J*#s7i#&Ui^>0WrQxteoyB$Vnakzx zggZb1T4ek0GTkcf{#(#^o1Z0SMD@wT#^c{5aG~;z&_e5!enNN1E~Zz#4p?xNAA9>2 zZ-Pp7Jf*F=Z*hZph@}Z3iKZygMNbG+W!E)$PvuWkZH%uQyBN?Ae3ATGpQqKOrG~NI zd>QjQ`Y6cm^{ay(B#{>E7Vftb_X0SrXls#TW*RLzwWbeAd?1( zde?&GDxs|HNZyqL(8~&cpqkegU^v7PEBe6+%lcfKx*Fgx&Xg1^#{V~14H zyAKQ7+{}l{R|aVO4H^574Dmnl9n(Kbi|sH*EPP+(k2evIc2>r7sE-wiJgi!oK8lwQ zSAQibbA>n)wcJBR(^Uhc%LAWymw$xnuc=%0gZdX%BXy^&@KJO;azjC}g0gKXnKa)v zA$#{e_FgkN(uu*h!4;OQ!17WzObX83?JUQ9?UJMONo1Kws%t_%iM=k-NG-|GgK9q0 za|`+ixQvK_L-8ROYZNujjN1}p!nMy5j>N#fK2Ku4p6i0D!!Hu9 zby7)8k>x!m<@R(@i3hwNx6KtcJfw^>po3J3B}iKg3moN*PEt;Wlh)llC4XT=$EF0 zg&$@U>c6tEk}zX}=soobH5fx1Rj`T5RUrQ?FkeQc9JDVcKB4R*C_~7|Kc$*!f z;Mq*Y{#gLk{<*dgUNr5rdgDOO#|W=}H`MN*g6%5qXNV6l^UpBbF;BnhB86-i_7dd+ z+Mp$~jQ1clR01&?bIZ3QcdxYxKJuQge(<`JT^ICd_=_m-g#`zp`VK5u_BaN?A1TLy zEU0qOCVF?8XZ@nOg_N3+@&wH+*Tpe2)GvtZU|+_D_q;4!Z(FLpamz}a%rrN|Yb$Aw z#nr7FIL{@76{L!$bA;D|O7-HKStw5UO4k$OQw?i-u)=?RWp$(DEljdT>tZcP9K19v zK!S}Czu}vQMe7+j=C-35F7z6-qxS)2=6Bo?nC{{)8V@-^pW#6v_41$|50|{(`O?LP z)+rY&B?DmK-^CX{z`R&piH@jBlC1eUo?SMv(TQ@ zhyY~NMzT(3(v|dZj^?-N4{&@WvxfphJ4Ia8?oqzcO#_ef8Bykgm|;o{1MRGkp2p8Ddu80z~ z$x*qTHRUd}B-1!IR58u03e+rWJd{{*9%#3Q1ob_2m=Q84J_8qETWhNv-5hm}0_^a+e4_Q6q%K|(bpY^GEKXNNDX+o|$SkLlW# zw{Tz8sU+zT20B%Ie=og~Jo>~2pc63wN1jd3R2DWnNnL0Ob_}PDM`bmG-Ghe{=bVlYF~`TnA#8_x(1hix_w#;#9_Tn-{|B zK$P}WI9p;1UOo+LOOxV1AFOVTYims2Pr6?@`aLJj`Jgpkg03|NMN)2I0QDs~%p|;q ziIo;(#18#h>Z(z7Re)i8{f>9Xk^<>Cmp2TB1Ej8Q%dZW z%$5=Bq>t;Uue!6;M<9C&47?Nxc4b4}1#w13ix2l>*CzBIHviW6{U{Aay&l`-W-JAM z$2tRL9IkiL)54)i9&QhL6}u8rPkeAfzk&Y-D_ClmJ7&7gS}ZPbnnm#^G&KCagF2~t z28#?Cvm^sHbD)uEQ-uS6p}iIy8e^hSC*n!YjM%p$0<+6)>VaCnK}CQ!xuGbk zl0B5`!zNOx{FL^fvfDfwMY1TtL^In z8b@~_6~JNqd3?y)km%k(ofF-&$xv0R6PFi81rp3o7}EX&k{|}O63TZ zDcach^QVw+ucuYx-!Pa31#AWsb~DW(GF_Hfv z4aa8@i5~ba$6AlOz8sNCpnl|VfB%?2$#JEd`a?M07eRU#~Ro0zw78QwJ&2qAg zHn;YVEB^jkH-#fHTk;?s*vU(kT+>*6F?2t|fB~C@c`9WD;@8U$EgZSDzRlRApzer% z#h-cHYF3LSxns_<#zgOL)r6qfj1SL~Np%|;0u9+7v?z8-|7;}oqd(0hDzt@y;LGuW z9P?(6IpSobJ=q$6sPgdkX#66Td@*se079;{Cu{x8(HZZrJf|!7vi7>-*)DZECKXFr zct+W8_^Tg#=a1OnTQw3D>`xul$jFDqUZ15_~^2SG=o=&4YJtlE~|vEZKU!v zw_~(1B4RN9_XAAQCLafn1~TXUEc33nL5m&CJ7Q9!VQ1V*-gQzI3d(PnjICmbFc?dsR`;&hnw=U^qMpM#L8=7Zf=R8 zhRpA$-jqLn$h3=*2SkRYVUArdwViG?Do9Il z4_uqAbux)1yevhssAkHHaxIuH^;~V>#TDjv(zW#C3_|55Wqc6dj&JcC;;;wE2J_V% zBz3q*#TkCgQj?>d5l{>`bnvAqw^KUg+c&M`y76Y>@c1VQCf|SGLm$c=jM$0^d|>zG zgXYQ&%Yhh?6MbPD0qZ*!vKDni3}V5=cuvp)Clo*x?K=1z7+D({5PS*enxbu^at4i% zm^HBnp$Cgy3`8cUr83qczGLI@Vdcabicl$|18$PBMN+3y~b7-Ud|bl~JoPs9p`6>~rIvpn%{1A8>4GFZSsQ6D-VK5$hkU-tCvDV|;jC5W zr1RLc60}(R9m$c`j|q6mQNhI{{N*d{rOsoI*oOCNpHseW?Q#`;9-p-~9Cn(V;3=41!ZkplZW^1El{m`rvKmttlkp4p2(g zf=}{zQ#n^_Jdl2(Gw8+|+R? zdn*8JRVn5Y7Hii=YF3A7H}Q?n_t%srzH7l15EtA?Z~41YFsjD+|M}LsXH;Ti=BAJB z-1F8x(TeM#w*O#XE0%8O5#IE0Xi}C)w(DzA}2G52* z*dA?cwru-Y;S0z7>Ig{xcp{55xBsFx;09}b^qI-$Tr4KT}O5eh}5Sj#3x4z29X^KzqNDoUvjscgEJee>PMbKHM6qZBrH{J#id{=U+x%qQ!hUba%hPF7p@`W z(4OaMyMZ^!EIy!O?l}j*MT_6}f~tcFoM|KA4FuvULfoz z&*>S8UGx%7J-_V*(%jC?yfJe*U^F~`Sf{bMzxeBSX&qe?LG#|&uVAUNzo5?l*bWLP zFERxo#T4oAnzs9>ux;rX_&8C?Qqrk%ba;{+ZTFdB7=&!y4$%~~3P+Bc&kq|~3XT4H zi(ORP^Zg0B0?p`C?(`{nJ5DE;*NshM*$<=VKpGyKEikulKAQDcR%77FDZ_g!E)-3u z1(L91+Qsr$9cSFUmr|y)!PiagDooE))Pw9l54|0OZ^M2rC1XXM;Av!&9zXX{i4lD2 z<>L}EKtBeacxD)_f=Fjsp+fXtJ{%A6JbxA}r#~Xx`2!}9rRsIF8nbbl1LJMb)SB0{ zAnI^tGa5+q_$B_s?dxx4`6n^J zx-MmtjDaPd-*0vAe_0R^>v5(qeJx}J7fd3$@Gz%C=$Rtj*68ren7Yp0p6@1Ug^JfV z3BJ@)9ie!f%KFk(VQ;{d2--XLF>s8*zrFQh!)ZQtOI9%cY~8nW01E6Fs!nq>cfwpp z9cXJo`y)d+C4;A>XLkT5vRoHwQH+#-)DnZ&8v&y$U=c8gBO?s?Ab2%`qfeXZy?)$ zl;43Lb=HdG+2St~+kc2ny4k<{fu%-wv`TTUVULg(eT0d4bWPufV7RZ=R47TNIhnj$&$OW-U z|Lc~-R|*O|8=OND^f`Kcnilf6V`{yiuU)2iLHqfLo7~1D`5(*2PPYsl?Rzco)@$9{ zF>v4aqRQadB&XPH=cCpB^;q9u!Lp1ozukjkm+sH-9~QwkM-o5J&Py6a6x<^njb8zE zu`onPIm^rxAvp%VKoexxe0++OT?iX&pQuXW(Rr{7Ri>~t3XFOT?y8BP?NKP}{(Lih zpQypUNe8BK^E=D(`HfM`?_~wVRzDNPkoV^uN-;d%w@36B&I~WVxouV%ZAN2kWn&MT z7yoSF$1{esD%5^KEM?*CQoe8fpyre~_)UD>!JTjng_n1l0DGrdmQQ9J({v{|Of$TB z%I^nEJUfuMBr(z7kVn4*HZqKYW*#Tp(Ei+;`db<~hu@kR(Lm!2nW-HajH}P*JV<)b)gmGqF651lv-i^@=JNb~$qW4m#a#lxKWJsh zQPd*Zqb(q-AfJ|90YMAv0eqdRL0#PdazOr(YAEIVCxb;Z1;j#f5y5i&@Y7^u*h+uV zY8Hug_VKqJXPa>1Yd94cR}CW;wI4S=qEU|7Ulr*Ch2Y!L^1dsYz`l<9aSyYqpDz9| z$D8|jOY&Dodr2y_F6@~K;i3_MbD0jTUXww0OwxiK&m`W7HZ-IU@NG!`APHf{Rr?_! zE1cfC65!C|dmfs$MA1aqsHCPy9i;>Uo%ojzNRFtTRDJyzl~|%a#T6fnj|==RUXP5g z$EgH|v-l94W$lxG`z%Dx6{}LZ7H+Yfj zfV)#u1M$Y%1S=rkeUwy8@DDiV5^|~%FGEOTV+o2{B$)(ctX_3|hd(jNlp~qQoA$=* zZHRirbcBT~;fUG^z!0!kXq#_CTKm9ryB9wpuapz0!t0|lSup2<|7^^BeZ@Mt5KygJ zSgN1!pe>|p@le?8z$3jWn)gkd<)iW9xFt>F&3LirPI{y+(zS}+jGuA|SP)iskb*9`zDVy|E0IH<$!bz(w;oQzMVeSsl|0!eb zQChiJZ=(5g{>itBTi^aI53Z`=mLX>USx}DsrEmYY8O^##?V7k(p%_O0jl8#vinGhs zMGGy0LvRls++CAkAxLnC;O3^r1nd4L8zeG-|2)Qm*1oiQKG_>$ZOnl#4@`V2qgpj z^uB>A;v{$W4X2cl4=o=ygje9{-=w;!J|B!LrRFfB4Ui=>TBiU^9JZwt;bN1CHYviq zv9)I{RNErwxpStL71zW0pduhh2`DjnwTi!i4&10hr<9xP)qU0NOdB$l`TcHQEM^0> zB&@x`Q>{NhXnyF+5OMWHeHdBYHUInQx>7iE*=uy}gu_C20WwGK8}Hi3=(<7^u?(+F zQ$o%b74~<*G zdzlU7OcOV9Fb4z2HX0a;meQ)K6HmjPG<+ejcn-4uI1q$&vxk%kIV-Pd>YlRhK8AP` zj9-$*UEh7+pvlpcVKNar(G2Yk$!{>bokFmW7r7^5xRms;ndj)CZb^GV{Xv({@*0AF z4cSj|K5j4~&z%k!p^#nl`fPx^C?hHG8WY4(ts}REEjUKUPcGO=F&ZCqk0dCrVKkjO z{Lws?$F(*|RreyL)OvA^2HE9WF{Gm0)2K(T

8P6Jn*YK`jS zOCU51P#Hy?agTuXXD>vVd%SF$q-#M9wP~bVbSc8JAAZJ3qx`bjc8Ku9oN6OJ@{+Dy zZ``hLNSe1(a7%1r|h!SQ1|z$uFf-!zqfvq5-Fh!a8)2+x5q zPjUzp$+UmD6+OsrDac($?#o)vL~hGbNn^gL_y$K*>&RwOcr@L&dZ#4>7Z=t(s z1c|v4byqM5<8{4N1ld-~D; zQvKVhZnkL>0>I}D`R}34}EayHi~f?DF4}5BqFs3soU#%k`^1Y!k^1% zKlO|ms<#*DTiz27ajk(1+Z1?s&6dan(UFH#3>vbQ2NMO1jbvn}Zxtf9D9J;W`V^S; zrg`se9*Oi0L&a;J?UcO4rtn#6kDQAdC_{HSge_xdt%vLqu+Lw6qPWsKY%USVQ# zNr=SU9F4dLNvFY*w^V9;JyfR3Y>nKwW$cLK_oPIMS6l4KOgZ}XG%MdrP;w4IU3zpd zbDTwZk)w;X^kfZqr#IGE1fQ@w<*3k2cK6CB*4m+CA6u+8#xSipb;RP4%Ut)zJqe!l zn~+4p7$Zjd9N(r>zBgzF_T}3RLnI#qXiIf|*;VYefqv~uhXe1RMc-q4vq5)Y{AGm_ zETb^`lr<-;Glf8z+kueM7SlNo)Qnnc(&j8GSUwp6f;oh);@m{8Z`kD?dU)BUiila6 zh7DUqifMiLitW2K>g^P9mHKBWz&{n+|6)J*r%9w$rx&dJs?)Z-x;?C7siRlhoNzi) zi#>szTeEe{A7N~PMTc!n(A83F!htFJh6Lx4AjJ4%Jl&k<|oI59IvyWL_LJ>OA%*haC6na@+_ivXX5dZ%$ zLjI0j{iq!~&ia*>xN(lsj`EHI!b<#MlnA~szC&>ROcDXC0kGOc7&iG}Y{?GvHK~1@ z#2BqT&&wwhsYtuBHs56>HQyohy5cM^y<7T_M7AZMfw%=NK1!T;K-(aPzpy6ZOT#GZ zQ2F)-EMT8o;RQM{ZLABa8)16U=Qs@4zoo?0Q*}Fh_#AXXYXUCh9kua8hqi~ashsLf zZ8j(KoXzA7zHn9qi{;H`tVZS=9>~0Iu9stpVDXdN;G^e@5e80n?RZe7EP5#)w?)bR zop<;86<>Ee2it|+Pl(6fnaoyQu%q61cZ+#kVSLu`w_Mu2HHu#1t{1#m_7}<*4|t$- zDft6ruT&s$-Da6KN|k($7erdlg_4a~pluxD@$N7n-Ru*Bcr=r2LN??JSLR2u*wL!X zc1+7m+8|*&_Zha$@cM6q>BxQDoktN_G83+4@B>zNyiGbwp)53tU+oKF*=XS&Hq0L* zOwC0>MoK&KbR#vabyn{&xkHUG#YuKuW8rSZC0q4nWT&X=8YEr~1f0NyVsd#$r0GH} zLLTItQ@nPw@RzF!lgsGk&=MR4FX;n>xLM>W(?@eSFgK++yj5`YUm)1kv+b zbx}&G$r`j5h5NGyHugl7(L!cTS$$O(74SDQZ`w%;k_x&VTyp}FBv;NIJ!voT;3fxM z?9l8|)m}$!d_joq%QOo&IsZCM54SAahoxwfKKnqt;g74AzuE{;uUM>hO63w>ZKq@U z%9RPa>ND9Q zy6VWrEdRUq@}ssLYk!rdR+rp)hHXOzmr0fjAwWXJqZ{XDI#nz(-;&_9(nQ(h>@w#E z64xO&@o12qe6zM5ttt)YJ_5WylQx@Oj$QTJPfqFFAg7x--h2pQmk`~}3{M#!sdqj) zVhQGHTJRu4Q7OsZM-kplZIdkh$_UtoO3@_z+aPC&&dY4fuwjKv`wie0&mzig)c0kT z65{Qdsb;@^9+{uI)P*mX0>k~YbyedR*q$QCXP@n!U3B!mA8%pU7{lNx%bxEww?L)k zjL~9qO20E5N1oDKV#$<$8+e91*)1q4aHN#1m7_l;l+8eF3>w?gYyATh=tEOt9i}tQ zGjHB%F3rs$94-N$*&0BZWNqU|Ing3xT_?0tqTdxms}^Sp1_eM)Lup{y7_#{hW-Dl= z0ZvI&QBU|Je=-V2u_E7EgEcJ*V)1QcM;Z$=3mhXosyZE zf*S~tbQ8RTWVR&8-=y%3*Dd6~5f~*75O}aw;^gye4jr~%$>f|9yeRk9gNB8+$g{W{ z)!`V3FsYMB$SEtz5{cDjFS31Jl(~9VuT<&z3X94yQ;TS+-&PsvrZC9omWz3-d=gWf z$>y&nK}&}cnEZ`m)2vsO5hJ_G#6hG65V&w$X`)L_M%F25um0N_a6k<4ZlbYmP=SALaTMhhakJbCtTING?uj&YMLn>Y8}xW!Tu=PQ?c z=us5KO*UrmKQJ1}8vU0wvA>?%XBnM+q>$JUspPDWng9NX)kQ%9o&KSeqa|g2&ZueG zi4*{k!0*P9w^kN=cIdw1hbe0z+^QuE;W0N^s3C5#NDx2RBv1Q**B^CN>`YXqs4|a# zCMR{WyaknJI(2|F>T0tOD5z@)NLGI6OO(^1-1e=bfMbm<5fuM4S<-(wc=fDrmL&XP ztUCR#YvpChhoj=7;I~0aqR?yrq;0k?4n9~+J>nBG#W7&!4wn!P7I+cMP&^2rFtyi% zDO@AI9yA1Ijb`l9kMTP$L@!nM3GIm`jU8p;Lf67gBNV*)nEO8B5GOKN+BbBbMoj9{ zDCjfaW7v&r-O{fOB5a>U95Y|^mNspZT6< zj3juar&gCkH!=0VNE+9NL?%_2nW>`wGIK36etvk>A7V zdi6!-+g!;g&Du>KE?1h6Ui`ce2gWPKqRO_fuZIv-jA#KZVEyyxYcu}7!K7HM8=}lx z=jzo=eTcabmfHi#pZc8GCQfgUgOymIlCeTk?=F@l#`YBdJpQgZALfnI=bqwAv+oUj zvpkY^;oVRBewoZ!K9u|-5&LSF@Bg6_K0oF2hcoF{x3(zEM?n}My_nra^Ck46J>srP z2FehczjPW6g1~{YJzOtRr3p@Fmz=8wx#9k$pIJs|73hyQHGqj# zP{ua2@BY?ANqvKOzk#k)FIu|CFG|teMpZ?@&m%BE;RAS*cNg-`G}PD=_(F*Xj{0?6 zt2>cJ4)D3BoDjq_oa>(8p}qwLyrsTec&)g^Z#u~D2WjhErgtlj+VG**;VoImM~oF> zW6eL$;e z6=+rG4Ss^WDc>OMU2SAdHpf^>p_3l3Nkxo;l;dQ#F9yH>k16ov*w{H?IHJ3V1@56l zGoJd@S*1F;oB(Bp^I|`+HC3!;yw;xj(A2<$H?Y2kHtu$;E7TTSU^A;wXboi@X^zw3 zADvX&wx9?NVDJ5hW|Q{!W~-VS%Kq`g%xEh?8tNLcj=u;(`>?S*zrpY1N#rl*?frD^ z3zPh@;-ya3(}bu^W+MtYzCS>y=x4qqnT;F%ST{+=`d*{Lop2^Zi2^F~qa5Dr8p#<2 z^9Ka)trkt09qdrLq=Qkj(_uHMpbn@DB-596?WXd(hldJJscqn^r8iDLSOSx&oO=?S zofeyZ{+$%yFVe?2OW|F+Mt{#nw%e_`JLG^Z`yo3UX&W`G>W;#%ii z->B>kD_6MAbL#m1(e#JHbXRd@=BfQ`bVJNVYj+Zi=;T`Q#flGsxSoLw-hqRIo#HB7 zUa58rjhKOo@&_|nR9wCf26Ro@ z7^f5F!6z0g5)z#oeLLIZ6!&T?RK#dxBfIVFj{A@4N&9jYWG!toK4@F4{(Y%~iKw-;Nx?9UQHl|1d5M?%O5^}}pTy(zGA$`)@6 z*X_exzTKqs@TC@x`I7M|&{W>el;h;P(VkBStmc`wC1+#GD8o*Jc}fR)9%VFYgEncx zqVq^U-%-BSZ89&*KjQm#FV_@t0xx_1!tp46+LQw2q9RMgA9vwFvo^E88+!CZKA(Hg z`W4&NIhkj4BM~!M@* zq7MX~!oIoh^bpFr5+P|VT_KpK!Nt@q7+-TuzXJ1`?*Z#sJtJPlRx50I>cQm6N`=)3 zt93eb<`6v^X^~v7fa`>oyVuv%YZ+DYW9x?)a^^p4q#PzoIF8{SbuwVCI3jG4+Ku02 zsMoF))$AUp<>IkEy{cwZ|6&im&eTHW*Mmt=Y8OU5yreIC{Fs_iZTJk`KqAiJ9IW1d z9Z1x#2UDvMqlQo_wfl0NYt=?u|vGaL)aTAWXxQ)~6pJ#O~ zZvW*Q4R6=zfT?jVQWCkOgR(Up^zk&T<)x>cTaNZk*sCw{{8zo{RgpP3w6~3p{m#x1 zA|k4E3o1%o-DOD+L=TP8AH7UrGO9^{1coHJBw0jixSMy70AZb5CmB?{k6(Vs(dn9>xx@+5@B#v=GAhuuZUuT) zol(MeUjPWT5K*5**7>P^)g{EZ@k0U^z)H`x74cnfRXJvHebia>+#c=GL`;WQ+J@uR z@{xza`3CD%k2*con4BLNsIMRM{oK;6pA|7Q!3-{1IIQQJB^ZBA_L3ukjzZg1a`)qQ z=^kQc%VvX3)GhG{kxRrBeCfiy)ici>et9w>@CW7Ra2)~FJLU-{R>fDlF zf=pdsBn9qEl3`oxoU;dCF@Jzwzl#yhYu3i!!62s7^iEK=0Pl!k`$zH~)WG%W`@DgQ zCJonVdi8!+l{hB*G!|4|h>iuo&_%?Ap>DmO?>-BaRqN_});7iD%Sn78DOL!^-xSC8 z0T-v>Usua)T}>Ar+L88ItlpMEj+&K z$go(T7f8nWVlwY$s2$BwkV2!&%Ql*A$VJ&%G)Q|B7_5RTsJ=b?q@xtJXxK|VYOy0Z z00cAMrTT}xn2X%slw(Ex7Ipg_J&#Ll7ld?>0K}!$tX=~WAQgukYOEQp)E~o3ah{Cw zoq&nQ>}w-|%9JqRpO47L;-~8scu=~GnA~+r;=@TP2Qm*E;b>mG&(#oP=XU1Uq;RSr zfm>+~X)y0b*-N(!HgfJ>FEtX4@Wtj`&~MA8Ik3)v$~R*-v0>(ku(mTwY}B4$~#h_n_4c=$r@<^mq9r&~lF zeZ+HkUb!~7Hl$kS;wsk3_hD~2;+n<>i5+uqgjSn*Y#8F)u+HQKb=uR_=zi?x{oWk0 zL)ui&uo43DmWqviz%O3T&xonAVwohPex^piPzfd$>uS2YO;muUgs`_EN2@%It_l}@ zxA>TY-qM)a&>zN7F_M!!Ovi zqLyq~DXU$A)8_!zsHa8R;D`I=XH3w4zbJGKR{g_Q1(YK;J^>h07_y+nXhj59YG5X$xKM=X<`=pShVR< zGx1UbnFWIQTY$wE;<>JL2dCw~F5>^70CJJpI1Lk#*+hBpHv7IdMDwG@&dxOy??PBFcT^9h`kc@ynE^<+2MJXAYD^PPGh|c{Ng&-XL>DIvVd5_0GN2U>7EpgoNb21 zb)KfKKJo>1_$%t|?G0(rAZ?acy#$3IVJAB}Z83UEYSf~wvuAOkoTEaK?H;L9x*u?N z+jQ*Fp+oqIH-lNX^_)O~yk3&bOoKa!_;s<0tV%8j=eGu3xAueRI17-K^l%WjJet!- zY@o3xBWyWI;Bl}>D{7tV>G+_HQwLZ<4;O#TlM&VsqEfr8F=1nR!!CNg?eO3RfN7Eg zv@NME*^B2Fe4`0oAdE&#loTzZu*UZZP36K)gXrRoWA&yMz65|8V`(#kPu1pD(V;Pg zuGfozzKN3)J=!2G%IG{1F|8?-sl^Wj6;WZS+IL6&DT%1t-$@B20d1H0VOd^z4INe5 zkaRR8sHZAjj*Jvh87^2p}~DS+)|CKW(PH!^k>{{UH+MI5s3vF6YGWO*Nz`623<%5+QQ z0zpLM^nnEns2YuXC;_0bFMoh;N~UIfI5$?D)v~mw0H$cPd+8W%Gt#Id8oX2HOcIcO zop)qiz*hp_h5+AA1$Dv1yIavV5y!2sNy=^l<4Ndh8kDNjO}|H=3Ji{|BG)+hQO;Xa z;3|PvtVtr5O6{$N*Ovtbh4p{N4UKZv6Cq4C+t}E63qy`sX=FcyzoNCwQCD*WAq0T4 zi}w)*PW><~J?;#$a>q)nSEW3yrKauCX%%-f2<>+jH{v)k2~+f_aB?&Uw(W)6!ZP(`yaCF zd%=l)a=ah?kS*!Q?gfr?))w54&-|t{4b~LeG&qkVu7KqjV)1)!nd<+^?m-?iw>T}* z=+mdEie!^05DgWTT^L#UFF!Ihq1evH=PI)&m$z;T8p3H4)a@yb*@%gP(JsVx!9Nlt zI*tvh0Tqvt!j3gmiFjRogF}zHcx;VX@Az#;m+SY>+jA<6BN*4CU;K}D4j?iRXdg-L ze`?zM|Fbyoe^0Rb?;zq*aE`jf_WRX^|(nQ)?3tKeoL1Xq-khUsY zoP7mrdE<}W4Nrva5(`LHZW8A-hOn`&n8rw<$9&}a5g!MfZDd}n%xZ+?F5dXd)J(8M z*6=y8$m;EUdOsZp8axG-yG-Zb4!r(%RA z0UV{5cbbB^H=Cj+?;{oUNwsRYP9863(t1&kRo1u@1JjMX5|G?deG{x^v9+Yn-qyjs zf7mfT<9HucO(7>bqW4=fRE$vxOAb{mt!VAGGse~~)v?*BJ4xhurl^equks!H)!c%; znkTY<^_NeL8y>c2cc-H8mt**)QXuD53YZEcRNGjeU!{_h(RNLj2X#T9EBR2yN$paJ zR3`thBA+j|bon!*$h{4JGC~VN-1>T8I^`>r^4SPb-<(_i`1g==9sSjy4UuMncW39B z?lIkDfVx;YTf6ynq?xBN?rX{v?`J`;12EUiW;{E@vhOr@@ZfspA*;fV>O48Q)=)xs zB_SI=S>)w2QwAb|_3P5r7qC|lFWL7OE7E^}M$uhNY^Oq&V#tcu(_u&Yfb`tw)FNNP zdz)YNOd`SJ3&U^ppv`rjz8;x@UV9Nk(CdiA9}pDu0Xd0Ocb zxI7D}s)l9TL{==cZY~&TeT?hhS?w~4TSsVMb~A9qDiTYIwc3(BxeUz3pw#&El|;No z-g404Y;X7cS>IF)mLGG?w*WzZ?3)}BQCh}3`&|D$WWlOBtFGgIOf{$?v4cSil0`z zSD3Fc+hRu%StORZC{_VcetB-kvT7N#kq!nU#yo{K13Ijfdb7x3#tx~IhZo3bI}1JL zDVg_R0rC;ajYnS}Njtiq^Q-fp*35_Y(J_}1k~FqsyzXWn0U6mv$fR(qk!82c%r|=J z*kjy9F5QR5+m1N-EPT*48{bG(5$^J%*^O|5W2x9(2F z`Fm}hqhJ?ZK(`j4!Cu0UYEqa@yp3sm)7Y~)gdH)}brS@Sj`{3k9K5&jc{3_l zTm8-7^l;$K2-c#(L~s31*_GXkd8ckyLHrzmD_R(qKv4&1j+a#f;uI`s$8u54@DK_NghXU>alNhicGob{`%jP=Pyh)?jScRM6tGD&| zZ)z`b44O-vDBMMPTLftHvYkZcn_+FRzpz4$lNxOyKkuO+(p;t`>>n7dO6| zrX~c36T5F*Bj;qkonybf4OnFV?k6fGY)O^_6JL`Z4*mLZ9`ps`pP`4MM=wR}NRJHX z+vo3^vP3$sg3ULVLYLZy4=OJ)rpV@Oq(ClhFJL4P&let*?d|80=vqrPJMLR4XyatC zBTd`d_r7^-<#qL($kP7QmJxawjkoVC;`+%jA2fgy_JAb?yMiC=*WB9~mbU=)mEBGU zWfX<Ro?=*Y8BM1yi^a% zY+RhNU!1Xi;~MpdYru(-BObf}r4|E8IyS>6s$|!ywyLu)Y)ktj$N;DJwHw9dIH0%- zG`;{-OYDHQp#G-T3+F9aHIMO6BXguJt7+nk2fxX$e`iek_;_%!g+~bQC;BSuQk3D# z*DNw=$omImJEP8%`_w!Ty=M7Bwumm5)+);rOBS$09)^abgV;Fxc10B-`162mx;JkT zT`;dz)+Ls#CImhV2}@_Px%BObN(6XToL~rZ%n5Lgn8A%HcjB2@q z?lgc9G)*NPWKzLUZ@d`uZP1Qj%=o=g$nJXRJ3*Onzie{81#n(u7+EhG zP0vmqPLr=SHh${m|LJs%C9)N((>wlPLDA^LqA54?JzQngY$vyyki0&V%NzB#vixg9 zn3pACxcQ<+v5x^K!&fuwsXi{*MQud{rvy=y%d+Oj_qIHRGYYlC8#so8q15LY^XtGu zjnUNe7XNk%3c!Fu=v^z@E6?6W8S2&`)mwbVm_~Ow(Eb6L6sXPlMoy_=Y_8=uUKeH8 zW{ms?h=jnMLA(NCU@09SbGs}hp)XI}%}kAQ zpW_Q$Ncq_m)yv5AHa%zCS)k0T*xv#(*Niu0^pwffa?J$K?mLI$Pf74H38-GmSqy#B zZ3XRJqU>zB28^}9v^tIU=WGLoRtgoy(EzPefH+0d{|4~eKiNs6;nBaRdjE-PyhbZW zQ{TWU*8^xxPP^7|qV)C`^K+Fn$;kIpQ)<|xgXFy_Uvl!+>i5nD5sR0mqnu^~`2Jd{ z_`iM>Z<*@NB4{CAzfT5QPf%wJq4Cig4&pdP&r&mfEY2Y03k zogOtmI-PUK1CDBn%>>BeFr7v@Fx>CjstPRmaII5&yL5G>uJc?1$0WciN=AY_aQRXY z6Tk6HI6{95G%u4;kqy*ae?7uAK$d>)$3bdFX!Qro;{4%yf;qG{!RI-Y!`Kk2Gc0f_ zs8x!Ft0dVNgg~1Kf^V6Vxv5=W@itV#I8c@69WLMhDD6{sHuEKJ^^1yRHkSOie_9Xu zKXsE4qUz?@3t-24VP7g1jHWgkl))prfWBGcL4ZK{=non|B+RYR)zru#Cy?H$X8X-A ziQB4}Y*^~tza7Q-mrc3R4RQSKFbH;9grG8h&pCNJOt3qV>XG(N_9$*DxS`-)eM$}J z@%JaD8J4okmzfe1DOym3xJJZ>MxvwHRweEB>M5SmwBuLmK`6P#Cwr*e9fQ{l_-Epw zJY#VDovo@WZ80;o>e`$+?EaF{2SB%J=Umqz$BUrvBaD zSQB(Yqhx<3I9Otr#0N%UWXA|(V|l^+V=ji6Nt-diK#hKunvzLx05zzPrO;|)U;uP| ztV4<6BUuC~?9?DRDN$Zt_FTGRo4KWTB_g|eZzyK_qehMzmnk8TBUHEL}xOytgCwp_g+<% zq{o`C-_NDq-1lYTH^TNlmX}>J(ncyaG~KI~ni((CAUILvQT_}N&Fb^o&DmJPTs}-y zPdp^Fsx=PcH6Id2LJUFNa7m156*w}G=Z|P(gz&$gEN`*2WwUkD{a9yd*NNLP>E*~B zQM z3<2dC{yAi@<#4U>^zJ-9@ic3d&F|y|mH4_n=45jhyFk)`1pieB2V!%f z02M41Dh{t@<%fSW;*xzh{tB9x*pKk8q>H+}HB>_Pq;3X8u~15q#|*33vVGtegi@m`t|;zwYc*vbTDrRDyYvniR^yZ9Q{kN9U+) zxs2{(whw-R9&c@so;BSwWZBOwo@9=rlH$so`!<0$2coedJVTVo5bH%|H17_JCTzyn zj8P#`f(Wdmai+ceUZVn zP9%n%-uld&{10@=KY9t$o_mh#P{$3ucxN)sXsl&2ZxR8z@>LW*a9t{@86;q?50wgB zHq%haqv6;Kn5PF~;93`kc=y%hAq88+n(B-_`PWRzJsFZLg{@=;x-{ZnqDtS7gjN$b zj>QyIwHxJaAa~7~h2wBT&*wpM6{)rC1dypM@{3e(&^KRkDb>2ogC~`trzOm<+4HW3h)=rgw;=iqOe|&>PgYCq+C?uC zGv=-jP#W$Rfn^3Y#RiaM{#a#fcbi)E_LWU;Xx*ko4_qVeo7De;(Kz;coCG+M{KNat zzl|+JM`;na!wR>6aSiO+M5@)A3d|Gmv7l@T?xn7kv(2(PJUu^J$mjMqCq>bq(z+FM zbZ4)1iC%I_Kyv>K(&Wy8)JwzgpYjV1VvhL|`Qx35Q3LM3LKrF-ZFDv2+ncBCS!T!1H|E70lq?slL4ZJ z8nZkjsDriS1{MNw_1fcRDr*Ef`);_gJ^D;{kBQNXHdNSlJw?z5Xz#ZUe_NhMT&-%Rfckz0n2Ac5s zt@2aPE(*UEG$Tikj(NNl`2&P<`b!+6#jG4q!3}wAY?9a4nP`cJ#D}Ujn9-h%A}gZM zfIK>Y=)0h&8RZLN@_Rrs5bA_5=JZJ2J?@#>2JZaM{LIjQwW4sCO{b*k|IpaF?ddX3| zg$9QBNYOg_U&`6U|CspCPKT)18g=!vr&iv$O6b8h{nTO76d{U|lsf%Ns1O}yL7tnM zG`hN^;6ar7DCu9VXl%>4!nn-YKyIHi4k($3UVLI<&Ha%md_0S%mFA(BO{(Y(({?vN zfx4$T*1HRde&&9;TwY0KCRXAUf1{}iq{w&mAwIn2qbWk8X?7k==*%Oy)o=$1+_Cma ztNVWHDqk){L$jkY9zz?YR~t z%W@g9v1;Or&6S2(UTIv(y%=I`k;66?jtc;+2@mdT;eL~JB3;j-Ssve*>yzmSE+QAd zNEJfj;KNv%WHH30dvUl_tfFAiU?8&AU=wA>sMQgzn98CSXwF7VVi9&hj-PSiD>gfr z2NJ9gp=JR~xWlE6C3cFTr%I>VVJUhzYkw{KK$|#7o3x#%FNy~j%^oHqDwai&$d_h$ zrChgvyofR^yL?upX(^H^abg_=q3LuI8X^cQWX?hVURN1}vM;&Ri9Zd9db`{ehO@Xv z>%N;PSJ6n*@T3Wed#TRJcHWYVi0<0KJFY=cxN(G`t2*W8{cY2ibvpqg4{12EwjP}< z%9jzr{a=UZn8lMdG& zBVl2_w@h~|!V-kAyXs)eW>q1aZHKb=1v%W);RR**p@PSSGC1PF_{PR_D^1wunE12d zeAs)^j8Yc6AWhLPIA?nZiK*wLq(Iuj*0thVbex|!q+Ci{j8KUXc;6@BP)s{F2WSxl z+<+x-dRD2!v|;7h6WV^cIRg}tZ6(b|Y?C_pVuspsnV3N5f>f{9R^Q#z$Dk^;-{ z$7HUeHj96IU;7jO_HS4|rZX0H-s;OnXn(@%rNv&i-IIY2G+f7h0yGX~u5L*b8F~F> z^2L26yw9k#R_e+7@QpQ@(@oqRopgS4r$zZyXKKFMx<*T0G^a4v?O@BQ0qqnMgkppu z&d}IDim_D!nZAu7u>$$X!D}b#MT|+?4VSO7>)x8Jz(sFLR0{(*s)d(7%z?qOW#!|$ zY~1pX*h>&ZTtPBNNUu|qO!prk5~*U~T)N8E)D$`@<>Y|s$xPoJNBg49Oq2-QS$M8u zLV|*vy-VNZ?KGRc2v$W~l9k7c_+ta4uQ+MNulfVWF}Xm8Hic6n_?x1_-E)iDFxKJv z>!|N3Ww==kR>aSUO*+Xa)0T}}nIb^Y473gQhzC(_(Xhzf7{PL}2_e>Rv7Dp)&zm%e zjYP2~TSuiVqT`8jj3hoxKVO3u0%Pz9^r3WEkA`#AGUsPA^~}#m`4GQ+Ny9({!Ys^p z=lOFBg8$$acn^_o#1dKYfT{dA&!d(Fr9K~k|XfFGh%q`!` zoG!myM1BFUWdA@3$_uBw;vDr%#-M4HIVsgh5&+ z!VJ{p(_xPw4srBrMEKWwL}5z%gKp>#EgBjoYz^d|nVZ0yCJr*h6t)7|@2E!|{{I1B zX@B;FzW~gp#RGFYT#SF^ck$CM{}4C#O?RxwfAn=V^||EE&AVZ^EdUMr;`7b zf5SJlKHu02_7U`J$_y{0tNmH5e0cUel!yks$K~QT)qndza@_$HQm`xB>etD(6E0F? zZlOX*yS4LSdA4jfByVkhulw5lh%P+Z_)Lz<_MIIuSj5Pq%!?5r6%Y`6;NK7zf5JK9 zt+`hm5MhqxUDy@7#&>;S&PDw%(?9yeR?}F0y-=L7k^iK5qc)*e5GG^vx#0cU(i=UsqqX^7{*!Ng9(KXc7q3cPbaV)PW4a@ERF2pfQ z0k3VxvVK;wzi6$y+(*O`m>x%D1Ra*4{(ckOv5E&%820db4ALLM^|Txw&0~}6|JmJG z{sXA;HcYrSw^Hr%V>8cx`G}0GC{hbT8g=QQX#9SPc_ZHK_x$`1&;~`du2RjfbN6b# z{DnWjWdT5U@j{G3H5U#2ZY&}2s!2b>MZG`91 zYr#G{Mv^I)03?7Uc`GYQ^yJnE^qt-JLXO{P#wplO-dvpLbRuHuc5xHuWJv9u^rM9E zznbo=&N~=HnqLV=5Tt>;Xkd|{y3;fRFb{rIQ2r1RRTuKrcB2&Mo=hEX?qakAr?biv z8x4Ilxil#X^5hj=CG4{H1iAjt*-YgwnINDWa|Q6|69v>TRD_v!^2Tq_j%1xNu2k3_ zE!}t1%X9|Vyu-v$XLvPdPcNpEFIP;hRY9P!+bbxHowVCosO(e2Q>7x_bN+pArL z-D>OEOy~1IEzcwFgo}oJIPk}8A^dh&Tz24PE$K<@{3Fy^(or2)pBJAFenTLKAi8h6_zQO)I)#xVFX}_Id%%c@(?Re|IppKzudNg5GyFX;kt) z3M`ZJv{6j7D40E_mFt$xG&s@s zu^{gZfSDvuY|JA--QyyFZyAhSJR&dH86~`CrcS16gp<1s7sc~ak2ZrIb$Yp%Hyy{M z#v7E@1SyZjjb-#BRgEfHA6xzh-UtoaACVVoh^@`Kq#KHpY@wf1mDMDphdQP3Ts{`q zgFbpv!FP1nwWOzezH5HFC)kH6v|9^2s@eU70<90RNZ#wdVz#3FiSx6%ZU;>D;Y&=d zp~j8x`ih6D&}dm9?X`uyDa(3B01_}5xC4D4lS(Yx*HQZPi}c;>@$Bxx7fxwf^x^?Q z6|5uWeF~s?IlK9tU$AXmrQD08czJBSfV_vE%m{I&$6rE{hWQxFz*5_SU4uwFY4;#b;Vr{%CqybbQ zQ19oSefRzcL-}38O@$Exo5$uD%@ZyuZ}UA6TA5wAj@P2sTw|JL4Bw8|j$f3rQdts= zBUa(?P?IHy)D18v$aM~$>eMKM835-&0EB<%xb*i4o>{RJ?=&+D<2HgRxhXbt=EQ)I z+LO|+I(xC**vN!EFns7-chrH#hLt|(UMPcc9H4uh8mSF)Wf#-(8!yw}QM|j2a@HlS z8Wj6d#*PC4whxvw@r@VNy6x2tXZ74{D>o~^($u*92f%2cdb}nLgAC#~xnfdGn%Zi8#~IK7a$$1B`7A*N|LE zcPe3b8xTq5E?#T}|LkzmZJG_shlDBlvTFzE>B_3rn8rO*jsF=3;RWDwMiBpQ60DH2 zyqa-VjoFum!)_YpNR`Nb#@?tuBY^|KhuN`D-=^Z-5V-X`JGaMDd#Y^h`+(V2J3k4|SL=TeY^)~ieeZ*hglmFj9p!9aK=JB~wr2OP2wyp$942K-*GulhqOWvIS z36A~iE-L?(zyI5Twtpi>`d^;0{+pk{`ftI%o@Ygf5jVpaLducToScw$QfqtA5I*B_ z#Wr-fEpvh2k$DGik;LQ{w0ER2r`a6sHYK>pcBFsRIi=HohCt83&!OkqFSrZzIQrHw z1_!bBN4>FMQ*3Tb-QDR(c29jxzo0F>^U8~gCh+FEy!>#*{@AS6-Bde1?gytPTqxIf zK}2`aBKThXbN}vEMu^QOMeD|j!4qsd2#aeXKmC`tCRH*`fWD;R7mDTvqH@(P3LG(@ zf4a=3OYx!8e4mAa_~VPyRBGDeJdPI*({wXM3i@mL!wdouj+EoAinhF}z-{tHf2ElW8_%<4M~P=bsgEo{I~j zH^*@jVxPXX!0)XepM(`#X|7_-up>GdytU=yjiK$HQY4BEmhEF^G z2{xW!zkMZq*6j+@ulE@<=gD9DD&43Pc;tO?vYq6qmB-(6B)oEbXL-Q%nLJmGB-`SZ zSyF57EnjK;;?|mJAC7hKkBm-{0%`C)kD*64n&SK`2XTlTh^ZV6w2*=OyZli#0~$ItkBXo3dwmPMp5!N9oOi%MoN6TO6* zyrC9wF|sf3@;;GlGvMyZm%;xuXJ>D}vLN>5_VcANdI!pR*RZQ3XMKIPZz6DKl=i8w zdzBwu`L*=OF1f9V`6*{-Bv0bmwSKWs@8tLMPi;EZ{&By}k7i&99{{e?JNANU`7wT` zAI(REZf4Kpce>S+sXOOjtg>#d(FV(OEd!&&lIPDJ_j>idgxCM@e7^dJk{{O2ijO&- z`z=wjw8L;%0=Gtf{48L3KpnLI-g%^J(Wz zXZ}6+?eE9@ec-NaVf$Cv^^%?L!Bx>`*S;z1vYEF&E-0iL&t+CnsW8uTIUeE3UkA(ZE zx@_eN+q*}Oy;4dPdR%rjJn6Q>gXH;nTfYDY{66nLvYx-j;rgD`^y17VYAYALyq(f@ z_j#Ab(#?E9#!U~HWVg-9 zvsB=_xxQ>sgPre(^R0DCKdO$MyOqx#9d|ad=zD``6v9WZqyY`YxX5O-<T-w=YV*A-Dtx9w2z|V1q*f!CeL!EVvCi3@%Am4*>j?2e}1B6{e}IvvVW`nKV-tf z#>9E{0s|fQ`QOI=Bk@1pFVWC2o})ec=f$(*+ zOz(?jylkg()U!@kf*{U~)`c$f)670{^pCXJg&s1Ol>5tHbaSJT{9drOZMXL55AyA9 zj}e9Z(J2sy8Y1kJArp1N2b_Ji*0a<_X|KbyW!90E*3#uhe{Vp;{;O$p)v`GL*DHP7 zl2(WgpLPaMHI9!xgSJEf$vV7-94hg;W3hJM3dzHB)_bsZNYDlJl zc?woq{le9HlSXybBb%|y(hbHvmJ=?Pu{tB*_?sUZ#c3-O=X5`4NQ(g}O4ohLE^@aA zD_-09{_1jRUa;MPYcblkE9;rF(~MMny8#BPQvNNcD^X($QbGN|Lo432n-4WJhpR18 zu-#aXKxxM#=K{o(2nG=W_r2PVY2FMr4?7f_lGq@V@n)NSNP&{^0b^4nRSzAYWb~`( z0*}u7jI9gQZ{mQDsPScmy}cXr;7>`InvK%k{{BG^5bcf=qnVxXZBPLzBt1`u)q1tp zW}K?4SQbmfUc{E7j)B)xb0?E~aaEw~f8>uUM|32+Qe$(^+Ap7wF7#p_`e&dU%`EZO zjVK62_-En}X!HNWP&(~>iW{utmq{^T5o>>Z%y$S;%^x$0Wc z8Bt7c(7`~PC%^X0y>zh^f-m~h9+_G(KFET_yVoJaV@+U~DHBqyF+`58zjEjk7QSGj z_29I}VZ0sIAXc1G;@NHsec7S8bM5~OfvmVC=Um_+<kQ)c}?50e0S7ytts5-pll>tOHEa$laxZfGv5GVV4#_&6mO9f&C|tM&dj8K#pp zNz~_D_JiaM^GkP{`}3olfF|TSlKIwPnr}fx<#}7>$=WXS!%(bRf|vRsnxx|f{FH*4 zK>km?ypY87=04Zl_jGd@trCOV3$@C zc*5r|+38w7umz9qeRexc?wW{(7fw(u>*++ov(r|z13$S;`V4(X(!8(MSS0(HVLsYQ`;YFOv1_b=!wRB7ZU0x5;hzZEG|0iEW!4DG%7 zPS^4qVg-uUk2;KJe2sE33NiVPa>aZ>fwNBySCQ z2~|nRymKa~-6pUpn)P6nap%sMbT{sGHN z2mLC*H-BrsWqRLc`Qsh(`x#`TC(zMtcbRp;Z@&I&;SZYN#Xc9EqYfvDcCq=go^wJ4 zv81Z?OfeovNE0{}qd6H~kb(4jRS0l-*)w9AVQjWOLn7TL2%FL~?ytYT$}Kf?f2^~p z4r-YaS=nAch$^}>sqUITVJ)ofTJF-eslm+__+Be^maG$DKwvXFY zh@F@dkKivL6qg3d&?(8Y?p;MgKsOsI7mcc+?s0Q|6e_=14)M}OEEjsulTx8De(n#9{!H<%^qFPjTp)vv zY&yth)vM+W)AU&C=%n*mid0i5^=}^Z3rYsTJyP;pL?khdjwWZQX)lCGeo9u3p_R%z54XI|9*tgZY2w>}bLHYTds|h5n%%2*^2P?Ig{FNr zoPf$e1bsqk?Jn+g{*?Oh{)jtzR7M62q%on)Ov4vx6Cs8O_baI(7L~(A8(;ZwV_0&m zhL{z;JP~oGJ{Okv&=aUMZrr^V(}M0^bIuQ_K4K79ZDy>eizYb14pKOaB4*Vwk$i4< z!Cim-=Ez3wi^;Y_5sts*8ph}jG- z*APB_sHTyE@;t$j9D3!C=ATGL)ZA&30lLp3}*&aOPouIK`vvjs>KYhQM`q>DVrBNszJBv}BZ+uq9Db32w>16q?T{Z&X$LQcd#JvA64?^ubTOx@7b1_Z4d4%AH`3JQ-m@!zLuA2gio961HC+kg&_g)C2zISBqmRmYhZrZZe;e1uHk1X~%vr@3BJ zO3nP~*GyJ=g8^(oR@MLa+*pc6FS`XH>AlL*JOJ2)1+l#`+wf@P9(~F73JPlJl7tVZ zl=ZQlq0Ydo=|?l%61&`5Z&iNv_PNky*+}$U^5}z~6dueQ!77U|dHid#jrNPti)Ob4 z(fc7PXt(Z>H{n6pkdh#LF5sXV5H%cKHX%@4)+~=PEc5A$`yaFGaZOE-TH9 zml4Zh9G1fZq}KE7?hoZ2TIdYLz}7-ZqNt$cxgSP>HcqjWXW8rh1K94ebt)b@fW{ri z1CEzS>@5F1d$nWuo>*m=zM8Du;JcA%{B=p~xi+y)5>-ErptTqLS!|!F!Y|@Xy%2L! zQ9Hu6!;bfIrmd^S_H$HfaV4!qcL4hW^=1Ir0Cqaj)@n>o zGm0OaUC+xB;J2WBUY+z|K0r}~FJ8Qu8CbZ#V&v!LjALqgBRuWN>?zqiqqVTnubKq; zfmniFo6l3s99`K@s4}} zPqh-fGn$KCFA%HXsAhqWza@7qq_JXFe@S>5b#qW;HD;`3Sw#Xa9?oIp2>>_SggpM! zZ?{#RSg6O$xMymW*op-T8;_1ylbq$W0KfOAII6YYj=a+)zV$24N_u&gH__17h_xAwQPn?beSH4%xW&4#M$gJyJh3flJCDek6((( z%AJ}x1ec3FcWA8Q0qC8&4YuB#MgO)+65HPU^?jqs!R%lNaQBe!-P0Gcl8q_mI5l$A zCQ|NRaHMa@{turm~A;m2E^zrn!hWuJ+t6$2;FLPNu zQZl}E;`kc)k|Ar^Wj%AO3zxAMWlHVoSx%F>>tCXNCUy z8r*+cw9}*kIBqP2^;di=C_jzGSI9#;71>&_sLpi3Vm*?qGoU0X$yXU}5~8-R1#?fm zS#P%JH`wp`2Sq_?Hz@Ig+Y)Jz(TfzJ5=BAhrosJ3-j4n=5vSxwH__-!-=s@e7v?ob;tj^>h#=*%; zZG3C!vh!QM1JTsk+3RdE;4+{Xam$gcEU(2c3Nz6%DE|qv0veRhI+g zJOo~JOhU^1tW-{oJ)3>V4VN!F61C(?R4&J`EHxZh9ViUQpy11&H0xTSfS)7w9gPfKj>Yv@S2-y^^2lN+= zL0xD+qTWs4n#h)^GH4AN2)Xuvx#`eUc>&bYtJG#zFdmz}ebvT+YFuoTONX6Hgr4SOl2!gcu6GYruE0~q#R zbqS2?ZRAC+;_GzqDVG0EW6?H{3Rj_9tH|+pgB4s}@CNQ8CC~yjsNB4}Am10gi%&Z` zlLfZ$*HQS?5JBZ*vIb8W`s3%q$Za5R>&qxrg-Zry!(Im#wjuWpGLqp3f5v^2FJH_} zTo^kCwEIa`F_~vW2HAXT=-vcxDa>kSwZ+EYJ7yek=y&wXHo(OrQpFA{&o+DT^tG$} zWUQ5c{rVm*ZoaRME5WgCGvfC&Q0YcHj;h`+H}+P;>CQH|N`O{PRTzQ&rK;a1Ukj@n zD>ar0e~I(i*27O0SB4r1+q)+Mp=h$)9t&}sf6(Z7L}8ebR|L$f2TvPw39s4MjSVD}%EpLQwKhjnB9wH7EtYCCG}uwG#U;nIFY>1JRyyNS z^Fx%1HGC?U&tsM~Pf8RUuw@m7i7)ACDk_8y@M{i2hY)E_QIw#wFatk%60C>XV8W@L z-sPNC*VDy5fY{Y+#fhQsllRQe%%$X8T+Vuo5tr^~^=?9dNj>&0>4nqHbGeAB-)FAT zrdxZco81(34>a{RblC+Ql7UBm&_p_n4h3>~4fVd-I~zDSK>0sG2yR>}EQAg*GA7ui zmWCDd^TDWrdvUVp@NYwk!cx5k0LOQi6Oi9nWuM>uK_fqG&iAr^2(2e)ao6cP*)6z} zyGB@m-x|=4ICG%}9i2f%`Av6t+Z-u2#UN8?hBY}Xu`5k9G+w7zsMjS`g%qkVLrT-U zaR*n>gFH#z6_NIu5cS-LEF*uH-af0GO9a+ed`*qmhMjr*LF2b&3bK0Xw<1VsUt1tD zX_+uj`5JAGywg|CoUS)8#6)|J^9EAkyv+QJnVx##(cCvCDX9d@b|fEeb}}>!#wlbU zI@`V~cT}txZpBjr+9VsZ91HFrPiek^(@`7dYrFkH%UzMJK8&q79psgh*FTbQ^q$XR zg)8QHPf$8|)Xt5v)!!$jiiW880G7@1NW|F0y9_Ph+G9u0Y&2=DNG`i@FDGPVHu3)%H|1 zvZBGBG_a;`Tp8IjffN_**HP5ZI{xpu>u6|r%7owQRXv?ScLbeML zT5+KW*i+2G-ZDhZB2o+9dBdNYy#3_4-9)Jb7qvC!`Q|{9sGpHpxuUG( zN=XjO>-r!+plFsStGrQ>fx5Bk@?Tl9Kx1TmC>A%R3YfBX-ROxdV5ut0_La$SvYx8L zQm!hWW|}qdx&;LdOs7CuP(sR-Oo}O4NjSMn#V2S9#7pHP{0pkZf-2`lQAs=cSK;qs z#!sMv`2Hdkqva?|7$ABk?8_)XgvCvnW=Qx$Vjy-`ZK*`@SQ7@rWB@?i?;Kj1axQZ! z&%GaYXqH9AeXBf4bw+zmavsp+mZjQ@Mc8aUZq$6iXDh0b!Oa_|u3|xfx7y*|{{c!v z!(}sZ=WOWA6ghmX&aIIXfrHY8U)t1?HN1!sEeHcTeJW|gFBo&@&4kcl$O$ydg7vbjL zJBUV$Yrze<=}akN0UM!)jL!2oJ=_4rS_C<#-1cZ4Dce~^*w4epN_X3!&{TIlJ$@E?B^9MkEf#2|cob9EwUiP#cMeZY zt})^Q5rRfW7MNdKW|(Nrv<~fu)DKqv9u5+Y=SZoG_VI0(`h*?zarQ#OP&?$i1G}oc z(S(Cz?NF<7p6*(ipfH&?P_}lGtl89xj*x7TgWcjmfJh~0!$vZvd4SO9Il42_czdO^ zlDF8sA!J%?zvk7E0hX6>Gu(c5+r^Ee=wn*KSFTjPkG~T+7!njb@%bYASR|T-iVvSX z)YGw_^Kf(x=-w9R3iJg9r;EwjP}t);sLz(py?)0~TI*@91t+3t|8lOyU4+%zQ&r;v z3AjEQE3u%-+$jGdq&BSXcC^B=6_be=S`%_O6q^Z~(TxIU)|03LVvAXd9Mo3kUdyKt zKa{RgcEQq{_(WI&ZSg9bngd~?42>p57nowZ?K>JJ%^XA7dam|L7^9UlG$|Ptk^AkK zQ21z{bTg;7ODMrSQ%tbySD*X9PlV4Mf5nn@>OhY>+g+VmmTi4vg67&a6&ucTa%-lc z(UwVx;t8>{h~`MhfW0B+@f3S`K2?+@N~4LlCdq>GS+?T>Tpx_{{7Btz_LW#wWm34q zGv>-l1kPh@5IuSi{_j50BS~|KE$?Y%P66d#jbadnv~Je=rmKx{#;dQV_dP~%QABx` z2w_%zc5^p(CYS3hl~0sjGfmQJ;7I@WGBs9A98u(&jKx;d|Mto2f*L7`c8AVx(@42EO4&ZO#HEQvx$mvXcP z#k0dLVT*`j!H=||nHxwDYyy?B1ag4co8}?}a>_o}r6iut)k+`CBAE9zA%D=+-OA{M zgQP`n*eP=UpxM<$hiO}$o_3M5s}h11Z-U}0K|8=ycUboq%k4^j5ye^bdT}|q5hWU3 zwbXr_Z-%de+-Lp6FR0fRezKviGbxbIMO&wQLZ0B<@&09q)@4r6qLM2Vw?)vb56&eg z!SvD$$o0&&4#jI|8Du#SMVEZ5Gmzn;W$~i_+b4XIjsBqKuOq&l;;v(YcA3r97a^?< zqx>H#jTRddx~eaO#4O#3buoCIu>8`vh}<&4B-bt~LaUjnyoJ-pA#(o8R zT5>DI$*X0S3b8PZdUwJ3UN#)H-mG}d)hV6mnHcuM_!#wHR*T-rni`RQBvmpmGC!mB zWjd(XJ>Dt>R$k|n&x)m)3~>f!Pywb2bx$tIt6*y?f*$Lw_Ax0Dz(w^)a6y&UO@!tA zAb<4d-+uQ4ONhboGZ*nePBTC9WpXOXj zU8(T4=zBVpu!a|@|h zTNt1j+RtSb8TmWcGtsVW1{kikbkHd|Re4QO%V^Hx{k+U}b)O|w9P$V1Hxye?te0Y( zamfjW@s8;5VX^;+nO1zB!DoXGaEkWj^C(_Nw?G9AoBMp~GRb`7HZh*dX0UPguL(@4b7Prf@NL9Q%(q4Y zZg#4h5;3EhKAFC!{}>&#Z?C78`ZV+b8_2pU;;;IBBC#0O)+$$;!iw%@lkS;B!qm;u z6@)wDzM2Hc7LTn7KXukh-_|}V5lw7YD)w{08nlG+FXH+6t*rC~>1heqzrDlhSo;~_ z;Fa3bXyY8(>n6h%f~jaXg!gQhp(npxD^ z%DDEF!NW3HceqG8G-^(ue4HLSnnO2Fs~*o^krx>(w@$~J%TS3shcV=8VZ`E4&i0{ay`k`A6N4$~Bkjo85N*mgzKUP`)F<&GMq z1-E;HEVgxwjAz1K#OfPqOn3%;_wf2_YU8hPbC(NDa#46wMKOHg!zGZlxTc1BuvpxI zKa86#nTnS!qr9XmLP4GalJ*BpDl3WYp}Bo6tfF0L6Dd>_RdEo&&F+GaZoX6tIS?DfpIKu&m^~#+R>Iv-8W0u2emwM}w2(-)c!23E`v&foX~YqWa{N`+ zRCNokm0eS2n&NqZ4ev3lGw0i(J=@ADEBbINe>3rmV?cgF}5cASG z!#~waB`S%7oEaN`cG0H%5}&M5Y@jUyf9^Vi!(0=f^mp$IQIXtnZv_&)B8Po$ zJ1N^#fDqGgR?A)7|LnCDiu_N1dyV}Mnj*Xs@dqs`GUw+G`U%vFNz`i0_W;MTJd*SK zT9i$vjqHHEffzeCdx6jIpW!1_dhxfMpQ2b#hmWwyXIl-+5M&+jIT7l_@2%vY6pUap z>Icmo5vAwa_Ln01inCS65k|+a7VG_nj#$!bEW+y-nQB76sly?Jmu4TVJQ&={tus+BV$))n0 zE(x!?0VqkJW!X&VFh}!z_3rBiFly6|aoT5af94OvGjZwzI5iAq& zA<@2{P!y|94&3D!HpUrlTM1w(#U8yvhme7#5S_xeW-CG~{{2nMhv>c}K@LkslN}57 zh0iwW3@1dr7V$?C>q2S)ZVQ?4xBW%t)k$P5^h=aTx`!th71rPQCwRv7qRxXUrujJs z*%p6AM~F;N`>WCCDv6!=o4rr=)l7B9D6Cp{q+=_!I*idPsk%M0j28Hwn$UwsDM-@2 z)%B0U<;v8)s%+(V&a8x?mycmJD=UnjwlA}0rGeg+6*TlC_$d!5dt#`LT#uL(S4s(L zYP#@5K~?B5i_Ht20(-Z&G@Tsa*w_VFshFNN~-X$`~rU&oIk*qpthez! zz(>&&g*QdWLxV`P^`Knl9`eVf)|Cdi=<%#mLkApm>!ggYx{h0M13hPQ>!;0|ar-Ns zYs)4tueHb)jaDrWo~%K+lF5-Vu-OE-5frhEN?>*E<;ZH6wIoqzxiMgY#JS*Ib5;9b z7xOaLsY2VdxOfRdScWRnRLf=(=2Lln&r8f8H znb7n*B@3<5xw&$+gn-8y3YfM{Id@l(xNORIG8cpAoIoYr9k!l>Et-@`UqjuT zDU?w!>7e2J25+x!`*g<>gWXhnU64?$7Ry9-kan52d3b$K7y$`CVbV}#^&$peXm@&- zH21Dr=9!iMs_QGR)4u|Y-BB(sy74_er&+n{aT(~T3lGln)$&o5cvk9=bSBrN%4_*^ zk60lzhU2iRNQWJRzGwA{_TfZT!n~;84KuE6$6C+Y%+h2DoRl5)ej8|~t-8t*?8opj zjmFsvVyh+_5#3?TP=+qUeB(`Dm#aaE>g(QRx(#j>=@gX{-mBP`o0)Q3@l=IUBp!u0 z3@V6BOTj}cVDMB`Fs8T%XRgAnERK3c512%91mopK<@xersQvM9wHHQf-@EDrm*Y}? z7ymEPdk7w_IGQ#k6Ski~A)|@e(nQLH12Oj@sF4d)n50=^1 zw+q{DFAGL_v*cwvPW&y(EQ{#3Kb6C)_}T63MinK@W3*C47#V)VsIx$Z%!P0Z+y^%Q zb{w*9ep#>-5yT<&)@@1M_z+cDqsapWBOl7oljMM`np51&j$56E_IkpN${z>#trv+3 zZGl;4$2#mG-56{^IjZyR{0)LZ!%})~o9YaQ(~h+@T!NN>`wDjl`4m9H?_-9_?M|IC zoq>j7Wht-I4hy&OVWA%Jw^sNeky^nPlQe>xeJA4+RM#Fi&852?ex5w;He3^~k4F`N zpIPD`Iq_p2`98rwgxY$CTJo)G{qpi9bBUJX2A0ulGVF5w^gRq7+_%R^5<&`A{Y%e` z4|ffkn#1($29ZC$oiijG$T`gy61`es->P!PT^Qt>uuA0*e&r-LFOSZvn)G+)WhJ(I zm+_py!)M4`1%L3L6-IXA<5OK_S_B;`VXAUKGu&=#s;*4H?z>58v@Yh-vZx-nfcXdfNN=att zPB)0$^k^5YN}7LEXM!k_hLWqZrF1F0Pc}BLz0=m&pRRmeKEiA4SdVWT9;F9VIq?}2 zty8K#s!vRu$pTYbX>GmA8$UJ#DAe58gcz+MRe-p`F+rGvgVZv`4KsB~b;`TU}03-7^xaoHy4X zlEXyz*W@f6&vtka+H})R*uno|&G>&l`ycTG<94K;>ggm5u0K4Tb8^Iep!t0&!3CA3 zsT*$NixKLeo5R)jvw{(0rmpiFIeYaG58XqO>9%UJ>cbp1-oHfjoT6d5#pu?{3oMS*4+*M7nRLxU!Uso$te1Z8lr@ zR_{E6@@pkv3zcZUP9u^-&iO`>^+nvVE zXMMhOKR6;cmxV!0m)Si_q^qop(u?)i4o|JzR6g}P*NwX*i#he}`R z7fRx0ea?OJS3L}pu_q=1d;W#h0y&@eXtW!zipm{X&dsz+JgjH1LFM{(GnWzad{}JNc2Yyr>{(d zNXX6$HrK2j;XX9rrTS=b#4vIrc07gqa;J1TlV3Q*oIo(8>9$f`KS%t|&yN$M518n) z;-;o6!|XIlmUuw3zRv=q(Aa0ueo^o_M$<35#O!Wmei1KYjNE%!z}Pw4hOSh&C8bah zp}WQG7Y<%%O|D*UYvv>_%aHeyRCBNUYD1fT`HcDfUqSzC>BA#-ud5G7t=K9Y1jf7) zp*=|0Ho&&$k39F90d0sc7QdS2OpMD>GFZPdGNdj&AKH?uc+&WTHcF1Oas5N>UxTH~ zaP}EG*Nc+kqt$9a*yO&Qt;&WQcnkS?{rcS8IiGmB>v-C1tl=bOov zj1t3b_RG8qw`ajjTE4nfT)y z$AdaHzUvv%1Frp<6j6z@I-u`I9ezRWSSLlY9X+8Li7W`y&6EFR+ z=*foB;B4)jH@PQ{b+yi@1>ZecvjgG=(PW)vi5z57q-~O?E3yIw)K2AH^dl>KH5d<2 zEr6bwU8O}BYtFUttBwOD3zuLGnLeR~7h(r(p;cpuAK(HecuD|Ai=NgMW`s|hzvx%z6 z;s7A+ba?x@?)hSbT6KjHi??sF2o&`NhbweSCK|$PmT$O@=!$lmqv)Bu>Hy|%9V$rDT1sUd*jgSZr>94eKTRCqTBOWJ6c0q**cB!1X#b`=XpjU{H37bfzj%4}b`cV12E6h zEK4-TDoYgDrK$Rkf~T8cV+$*8fcllw%Ngpx{xrcQ5uYPa^WTUkjYAGdYs2Y$H8(A? z+nNgQQg6I0yQtsaVfVIAWMn!&bfP+OzrppZIIsy9@(}60`d;grC!+FNZQ?Gpw5>re ztx6+ot~C)+xi@a8_DDPrd@=#-1xAHBIMnmX?1I}PBt4pOLv4L5=&+@1P5TpKH)dCa zn-<+Y$OeNG5z_YQ;nA|%^u`SqJkX{^u ztpz5-*F+Kiatw>ppptc(QY|;#HU3ukD9gBw#_4>8pPAj=_T~N`G&Y@vR+;zxM?3Nn z`Sy=W+0xu6X3gN6IlG&FCzdyBCM`FInu&^J!eNO9KyrSaj5a*RvX;&>_8sKOMwQbs zKJS2UX6~KSwPj@8@snAe<6z=JQ{)lwp}t%sO1kzk-G{iwzQulz5jm@?rin3JFfoCY zjZW8=hg|E(hvkY$s&YQS3wJx=zCG!;<;Le#W$^vNrE$l_9=k<%)s|}!hjNDuQJtXq zjGK?vt83N=TNmva9&(#j zy{EqB!NueD7SYg8N^PM@X~D08nlJ1i!s{%X6Cu{7FKOK5DMs)x?S+X;7#3|%IB)P zFfJ$CT!GBAT2sAr)^Am`5`s2TKNfI5g=i_xj8`}-%dS|HDs?-negcoSd#I8|p2}_6 ze~_llyQwVJwLD(ks1yX^BoLG8P9-#`vhaF*gSSPpe9n%7P`TP)rZ70)7xnbcIn06+t<=5Bs?4 zLQf?f|J%G^>o3#Y)`O>?OO$dqY_{?ZY?qUI@Ah$f3`ZTU1(zF7y=&a-L-I?wNpH6b zQ^TSm_KkMgnQZXfZ}SsWjatkN#=6x-y0Hlcli1C|#ydF(u27;txg{Koa9>`7z=Z z0r7{MrN{}t?Zyu`Q;F*f9Vj<{AID2k{EpgJv(1ia!`y9AL%qH)K zC1}05S5l*=$p7Zs2Ppxsv_-mvPOkbL_?wonutwUvUpmQAt22wt%#T_Lb!LEK*$%+> zIn`n?*PD$^Zd8;au`9{jpbcU3i(F!nA&w17+~|!HLVtg*MXITRu#~E`utTodZ{?8R z;!~BEj@Y5?lP&EImfCX}nW-S&@bcR13~%bURod(>F{H!wi!CmvE1-JCCa-d*iiZk% zR(LXg;1umUj~8Sf{SKh0b`Mg5YxTKH`-<{wQE%PGvCggOazqg-w;ua!Q&|usyMpJGHJ{T6x?~+CyDo+(5t)q-WHK8ao;}G-S z0*H7nlje}h?_YZi(CA3BJ#S>|=A=-)DAJ~=0aJX=%fpbUvzuAjjUKl9U`B|FCuVT=^Ngve&*hpRBg<713sO|Qq`WACrj zkPWxK<+Xw`D4@vEi+ox+r5G35_C>#5`Y$8eia*qVvaJ zGE$`(#c8bG5)HE13s%d5+$BeY3mFJHj(w!tPy$RbX+-P;Ni3=(b(0k* z9Kr3!JUE$(qmX{JiPONA0}y7pxbb*R$;HQ_%~&;$kUBw*A#HZ?M^js=@KU4#qZBW3 z&>iNcN@=i|=mhV{;!;8v6dAO`I>=O&Reb#SbbPUcKnIow~KoHE&uK zLInJy2s@)%*S_RN@J-^hLZcLi<@xZo^Nsq~W?r+Ppot$#f*BW0V>>lGPPDyQcAej( z1_*UZ>^^yC&$RhVlw~>IL}4T>yxKuscwhZNv;LZ#FXvBLf$vivNqC9l)Y87 zA8~YXGnhW;{h}N(*rH=)W*5d|PUK>#-K$V6DN!l{R@wXph@0?GW6hU(D-ply(TbmS%R~|`XxayWN zR_^-AR^0`NZbn#3UVA$=9pe(T_?!Az4CY`8_SLxTRi%B!2MkFUq2*0I7M(5BtFd*L?+)kH&Np!Wzkh0Ac6*kdZ+0UBlRJ|iI zR32~^*z%9<=YKgJ{d$FEb?YkyEfK{SElN_(53z|AK{XA60V(Yj*~heH5B*Is&wCHwr=Ak(D^H zM?&IF&5HcxjDirgH%TXggU*UA!wQ#lG-c{%P^Abpa4!9S?ZQ=uFa-%c>c~i2Z}%TG zO!D0o{ipo}OR@fulivc$*vz}c4qCz=!Jn*a0)FxAwG3#Q)H9$s${|+O=nR%cXgH1h*O2HtlPXiOE@#lKeee z;5AvuLA1p!n#ys*&M=+2on5v(aMyohIEi@j^7f<0wb(ob3g9QL;ijm-LrTfdNX?F- zn4g{;_6!~p849Ns{(A++!t;tks37Tn$4A;F<7?gW`ymdrKpHODQv=NL`%e63n%HrCAFO}^Ks z_#oV!=YA!)(mE#?3owm%C#C-+9%hnI#J(odyq;2t50HUep21o-GkLL^TI;Ge_6;+) zXXQ;gr068`fTNHOVHGHoi>VVOc&5qW6I(CyNMW>|`y%-Qw1D!5e}2bFUPC~tm-zgJ z6sy^%8zcIqfS33=nNV`7<6xwDwteXG%{+w&V6=bTvh0K zhz|Ck#fiS5S|4~57$LchS^6}a;c+xu&lv<)zeUN%-6>Pz)3 zXP=-Z8|X6*>hyQ=w(ywveL6BPRQ!yalpoCkb((el${^4E$0~QOKqKz0_IjnQ7tdtD z96251(hBAzHQ=vaa%96ELFNuPsbi;fpb$U42MQjEI zQrDSZ<<_=4x%{nICkF@A=UZ=zJHJS7w`4kh#7?Ufg0?<9=y*8xCB?1w#u1ufu0{S zk|aumR3CAWkqT=hPO*-_vj@Cwy=|5|3u>Q&Vz5ZKE;??Or$0oPJg1Co%XfBJ&ny8W zKxTf*v}Qwpeg8feOmofU!2(FX;_Q60;XL zd_8A9W3ukty*BZcOU|+9W)n!hoMoh&>qHZP1+0X}X)vQonqST=nd{K#vG7c;Juq@t}cV+yGM#v^*meh%GqGvzD^(y_Io=>j)5 zyuBh3Bj&}* zbOeX&;PmF=RxghdL&>gr+@YvzN*`Vlc6FPcn@h~1#y+5Tl>dDs>n?#~nVkgVlL_5* z=A^pZi_j{ztVWB6l3NB+G7OS~FaIdNPZKuY#uEtn4ZGZ!ML(hP#GzUGa9I~ny?Dn7 z52PO*3gCB4PpUYo(>I(qA{yVT(;_R~;vgm!=HusJkHF9m1!ZF!o6%CqPXlH8^HwZ>+$A=vbp_*FEy`HR&H7_U#KSA_SY zy$j4|DWyJoPbVp!iL*+e&(+SB^*;EiOi3>hp$_6%QE6QmPSSi(d=CtgW)t{qlil?| z>3xhOwe<`B1Sf5a8x~cI7^wYvsGS#hKDZw7S2=JSv6ip3#UVSrB!D#+)nV$O;RnD| z*HNot&eun{_AzR24;Q%8y|mt41ljzLwIs5!r-L%)t?S{m%3|v&4B1+%dkWL{qxIj| zZ%_6IDgcAe5a_j%eUev%_8Dq635$pb5AAP*_!iBB-q3L5%HoC1Qu~<5n1?Az`#C7l zj5zL9lB^RRQxMr{lu@o_m0D+398w}a*&-e`-T~4h9MQGOW|=&qB$$ zfGI)h=F5-i#X=L0@*)h1FphcY)8cMhxe}~`m;J-!25_Cjaa@Y?tTJ3@Z%$mn+?}jV zlA#?I=~QNeIjp@l9;dWS<{ubQLoLpk!-_=mA8Nb%mkfEoUjfr2;y|huZnXyXl%1)y z$oNE?%GNOdp4r}Qf|GEg_ioyONRw@PBER)MGjZ9s6;-_ci-?z>1P0MSwFq{t4*@CDpzG`2df75Ygt2VZ&q<^;o zWA_TOe;oV4JD%qrL{i7dlUF$toPju!Z~ZH&4X+^*aePVa!3|qzwp7*Qc=fvOplp;) zjyCODn}Y@p=<~#Gwz7x`rKDEYMriF$wSV)+kA=fp3Wd@BU@<3st?dZQ)dR@czLO9~ zBs^b1bjY@iQK1!UvNs3%8d=(uXoAm>;I;t~o*!is@CJ0xo*KYc6WLZP-gr7X!INo& zLV?JtJUM54y=v=~+QXgTSzbA|^aK&ZVF9-xKPU;a{g3*`#2@gA5AulGTR8&WHLW2! zL|~AFRF9G1aS>jgUlXYZ*4BnqPDuW6dzzgUI^0Sfl>)E|bc6ni_IhgW9?`5lpRvM{ z|FN}|DLrQB2&ABRNcGTW%(i7Qf=q}%A!>Ap0^~*BsIJ1X?njj?)Wm)Y2QSMv&*fvn9OowEb0Ayp0En>980X5Y}zb8hvgchD2l6Czo zho;Oqr*J8t-)m*ABz^2L5$e@X{9uhVTncor7JGkn1=8mm6Ro}=0)uMejOBw_ffYEh zU&DL619|Kw#UlwLB-yi2=a+ZhdlZtR*pSvBV3a4<9lPOtz!nV8|6O$`;B`+njivd5 zF>p?f3#fsV_J(vxlOI0R(NlocbAm8En#C}c2tKU$#HC+=CV1}ON=W9Z?jIGbqZ*x! zGW30Uj9TO-4)vPKq65ieMolb3O%a(7~-JG27q9Dgs0tF^JaDwKGMgSdzrTb*FJ2b-tH2r{Gvi7mFxi zZ>SAU*OFq;-13ho=;!boJjp@PjPXlr>iIsy8SZH z;HLM$a3}CPnDwBe7=R49ySqDT>tY zVqWf7^lPQGL%$Bkkp-XP?QRy7aKhuf)6zH+mEZQ-MVt9 z^F~RJw}!kt1!fNDR5&xtgv2KX>UeXL;2%bi6*99a;>yRHi9(#SO8DWWjA75ZT~>I^ z-Z#wjK&BSN+Hc$n>MU7k^j&lfCBAEl-Imd#+5q&A0pFRWlX9ivybP6NXvcx>um37Psvf8&!>MJNEtZHH-Kqz=w#&jvGx`^TOvHP~7TKTe39-ZIN z)@8_4$syI%Vj1_AQA9rb3s>Z8?ah(HR%Y|ng4K=X$$;l=#vL|2{ z^>9#egD_89@ek02@Hk;ZfGv&si8sL2;&f6eHP&9Ek)IXK9|HN!U`!)BXQeq~-07EP zLKsE&4!Nx6_eAX?y)>Ps<`}F`HWb;7__+0PTBo{!lVoqP9rj5{cWGd)QBpoe(&g)Z zt6|azWl`c=nkPKp)Qm769dh%h+jroR+tc7YOci6X?WKHCjrm@^7*81ad%d!_Nf`_H%uD@H9&?mV`+BfF)*A{ob z5%}HX*H-Au(@^j7fG4230Lp7onB{A@z7ya4N;#R+eTT1VUBY?ZV$c4;UGRE^=nKTO zEeQT)3ZHrNsv)TV500GUC`_y4BxRAPGvF#85N57D4qIkv<(EMB*GObHwzu2ah8cOi z$aYe_G44prH0TBD(cMw1+Wvj*%JC0wxBthJL92z`9X8tSO+Z3VJGL1(jpA^(I&W0b z5GpBXrOWm`Ow!IP^X)(;n;m^Cd=O&3#7{AH@R(CL5i>(29b++1ooPAIsMib^;#}Z9 zwwzn7xL{-H*NJATD!ISwBZ~hk7NMA8AezMwhnBG5TU%4U_);2F`l9(x0h7Rq_vyD-+jGS#OxVrMUZV99M+gSjxS=*g6VR^AgOy zJDfIM#nOE;Ts_k+N2MmGSHq-;ypz=?rb;<7SP6^cZ5CU?B^7tSS~fKGADfbw``+4hN)|h-1PAczT0Y#xSG3<8L43{J>%* z$1)5V5P`|2`B`C`-f5p$)E>HzH~NYEO;;fz?z;^tIG<{y1a!B9oBi-zFk`f~-F#}g zi#xb7s3my$KBDBmqwW9V5edd)*V+%#TPthi=b0A*7n);#5VvvX<}Lp|G6E1K1%R%qt$;Wa^n#2u3gkj=Vmw=MAolc zuWNE|lp2y?5nI0(#GJw*SFYq8hI2Hf{ql94tI3<*%ZekmAiAWZhQ*vpv?(SVrG;z) z|78J~BO9CAozUD}DQb=b=bTqDL$dXvd)aqw!+)bgE=5*@#5rjX;WoU55`8aE79Ys< z4ki3C!NFnI5^M0Ip?cRwqciLV0JU6uUxF^P;GVCQqqPw0GU4j(sueCIzq~0mmrU$1 zmnszP6&|Qcsr4LVM$wwt&x|_R96xv4S}-|rnaV-119g9$BepJI_L{?I3y$+5JSL{` zg3bJK%5IBl`G`_?qW`zUcM<_`&Iy2yCl46j%m=rLB{ zPWLC61};48PESrCfjA6E&0caIoIl9%xsDQ| z^b^empS^McCabQzYjI)g-D_JAP?6pdlouiWn84R&Tr)NUmCfRFqKs9vY>2UN%DgXHlx}xu0n!nUiv6 zq8PK?+>p^h1{DFn7qTihZNq+l2KOH`n=-Reh29}$qcZMlngPWQ-G4~$qWry*;4fj& z$hX@Lbi@bkP|(PjD4ovsVDTP)K(I-AfY;H{Q98w`4$Ne;N3iPa?WxE*5N0NL^6jLyjCH-gS~!W2F~E5t~^XMtG}C1)uQIe-dyQ43c5lkrjR6_(s@Sey&DHzk#Y@k zjiv_OdF>K7yDIaDhT1eNOg2&KsZD?Vl9rh!8f_caq{(;6mWKmeMx;dKhy1^$Ycz~X zGn+zOmPIDL(_10UoyQ3>uQ-o3+Dw#DP#qpJcAoaV97;W`9ZfaRF`msH7N4-o(|kh2 zf3Ks6>gJ1=v3^{=dT;!nnhjXKtv8;(TCa8r7}=0>;^5c_BlYIks5|D@U3>?RwGFa;_w87)+=X`W zGkd^4Hh@Qu}n(Gcf_53?;tW}hr^RK9m-{i`D^UVo}bX*3_<^Wv@$PK zU}bK^Eykl=fs{M{&4`Hl8@}|EHfc+VShVpp))BW+kkJ1W{P|`sRJXh-8W|urg z7Q9={u-N51I&L|G<-)0#iSSbj&+E|^7EI&JjWFY$08{=`z7*F(3^4!jGY-mC)+{w0 zTHwR2;<9+Psjs+4g}#^_2U;|d?bhr&*$B^dO7kgGK7W5DkwEiue$^q?E&(haBNBAt zqs{sJn%y&8`!d|V*01iQ(q&FN_v`delQ{c1J>R5~DsUUQ)IMeZ3i*p_QVXtz?x95bI~`8SnTQP8Ufc7cXuoXk(F(MCI4=N0&OV*o^!NlKluhFw z^j0-CxrvM#X6&uW8O!J34`0c`hm5GqBqnS%akw*pHfcQgGS(mZJ~|gHzWuVbVsR6v z-R84Ged30hHkwo(^>Ra2A9hra9!gp7+*Aq;jpy2pR4N%*V1471Z?JbnPSLLeW6L@c z240EpG#2xBT6-O#6&(1^^LKjAKqZI1zr z9il%fkA5lGVy6S*7m;uaHtI7(jLb3% zag9+%+NQsN_jS!#EpP;$F40-|nf~k&m-;n>)&)uf{!hy9q2W~?$n5wZwkCeV$zqEKR+Tu5U(qD(j5IgER`S(!|s%^B( zPP*5%)ucsRcAh&U_i24Pt+LwefwqsxB#Ae_+BAPP4Zu)$Mq`d|G}RvP769Z^!&(yTHveTYq<(JH?Zl8H8BQ1tFoF{bjtdTLKTs&}l{ngB8R{>aN zpa=$W_v_ow3(U)Zg5-wys38ERve$E(tvLEflu&KapNXgS$SijQ;4rm5I(Lmq^KHt< ztv-oidn`p3vv5eo3^g)U1R=uGTI?F^Yx@yVB?wH?`Pw?g@{9ft+;K>s%f+UM&XQCm zEOm_mE0^&zZWn6(j^Q%tq&#!7eZo4`ypmil41{qns(jiTN^?aq#f*hEs^dHC2SccP)oSsR6+uyXhF~#|<1p zl1`JP^L+zf<3p4TW{uDBCAguwT`ZEC3@x7;V%3P0yjG<%ar*e5(1i~9_GbPy3VQr^ z>wX{0vh(i!aq1!K_wAOew2KvDeHTz82<3P(lJ;N@9P8hkA+2`CWXh6sc9l+u+Lck5 z`r-kdQt&>Zu8QiJhQ7M$uzRRJtNn}xJ#1%w0k$)Les2EY|Jj+Z^?!EeS5p4po%sO) z)>BW@|2t&n^%jaha?&*=~IcoO}V}O{oE*Hhk)@(3GPI_u~q{4*q%la z7$QQlfFBs_=jts%HsnTjU@RJEB_LFx81v@sx8t=SW+H>gzF>mS6zfp2u%x@kqm2dJ z=lQuS=p9ArmR6Bz{c^FCY4D{G)5I4em*gX8jsyTiZ<^kzbKaHhR#sMnddE>YfXbjE zi#npC4f+Yc-PYD+ty=opZ49=n9EohsZRmA2maI+ADC=3wonwG)U)&00R2aB4`294% z#Fg*9`O~CWQ~T2+7;j2^e=x0iG-1-X(s(E5?Q~%}2g_0ex2En3Txy?qovHno>i~9N zf(739M^w!Z6Ao|Y0LgY0CrQ%Ozr+X##&hV(cWmdyw^3}=H75CX^6kdeO~}6aeLHA$ zx3{wFnjZFCuZJ(DuypOKz?cZU{iKD@6pR}F+$VS6FVjLG6Q&7cV%qM*v_6oW`GopP zVd}W`8ODO)+1Qzv5~pJsf?OuXnXiIz3&K-y_&VO3e7-OxU51(t)HlwLX@H)m{K}wq zQHh!F5uW`1Lwm!%Fh^W4F#AJO{i_9zlN7?ZwD>~4lvUM$P&WMPyU<;>C0e!2&_=dV zzzr~%u5ohFdA2weV?UEpdssz;N}0#TrUHh6FHTsM+O7*;46tciOJ+Zgzv*xBJFY1( zDh7DN&e0Fa#e%WHp7!vXWPNhdQtwJh_g8ziY`$))KXHZD-x86q??&LqoFv@=9B z6|L$Y`j0K1yX^4fZdYrR)Xhn7w=~Yge^WA~m|Q=i79A(otPyV*>>pN9z$^?7^DH5e zHP-L)d2&98v=0ht%=Q=ZKe^lCN%>`}DW`4k9r7Eq&}POgMsJ|Ejo=#37+M2Uq)`G= zA_Ysdj;Pk;;`~{*b$NZ8BMh?9Qh|@&n^l||uU`eYW*riiiBac+?Y1+0t*ptb*aJoD zB|sskFmg^2dzYEVVVmYv4fx{J{XDT+_H0FSGSrjL1_({Z^H%yyqWFwzE(q0jO>sp$ z69V~kW9Kt>sJ!=|>UfvTNhQa)zduAK83}?bv^6&Rl(lmy#tt8+js2ohlAue>)OOO0 z8Ee4|9tODy7+9aw2q1K$xo&+6T24`ulC$FUfA(MfW$hZGyt=)8)O)|Z6y%CQX84(z z?jpFqGw-@EZgSVnSKmGH^fyh+@K`Oep7Wt!Y7rwFgEr_2CI(sK@Z0rCCF3N`)2fJv zIs(5leiPodIzu-v)e)1HX9M*;=BBg6hH=H>RRs@qyxRd|*vHdX{; zeBg?|m(L(iwkgheK6;Kl@Qy7RDtIGr>OJ>I*K4w8&8LU?`SMl9V{XxqT!&A`}G4a16HJZmxe?M1K_$C;aK$o8>4sgtZS|F^=BAz#x3sSA%ec;epiN$}R`s9G^;QNY0+s*)06X z9(XfBMVR3bG50`+y7&vh*yko8hVmbNz*Xp1gD(W6qaqnpw9W3mC3z*}7^kv8xj56D_K#ujIc%c~7%=O$hr% zUd$w=LA0cw#p6wzGJg$(0O8rfd|z#Fns$yh*3lmWbrSX@GbTlGA9kzi=e;E=1HYCw z*9&~n{Ys){sg(tu9UG4`(yY_x4{25%PQ|9gjh`twq`nEpS0`%2*1<5!D@j<*#${_R zq%js?3I;k8ygmxiJ9Yk~H)6-h_oqtFr}BHupXjgh!rr^_pL_22mlq8&+A~zB_aL&A z6+Ka?G|44a>8AD_53zF3jhs6qFn<=}Z@ zDs?~XtGJifLXst}v>!KvuEMzCd)p;ibDMRS1IPzD754d3Truu@szEv#^l|%P7SH`x zXvm8I;PBFkoetEeiJVP#zu9Lz3k-wQx(H%7>-7g7wh3gVh;!5g9amYG`qGh9y=1|_ zlj`mK!fmq#*x-Si?UK<-pvI~zp(Fpyp}A8W>W6}HagimfIfhnAmS+`7LO@>?Ps8>l zAuOFdhVH{TAXo-p^FG7x?_6LuO3eAFZ#y^R!r!`}TxhCsDbFyiv$;W6E6r%|QMza4 zCX+hni!{b=fWp%&Smcn+Y(MIq1V-WV>Ye9ZY@8WvmmBcSWCfDzsVw7c_H7qFuYsUt z2V7VTzTa)KA&JuR%ztYuWx|}ve%B!}`xsK#`b2TIA*NLe&q)*3@Yo^cPJAIIB2og~ z!PE8`3y(rRqj^FJl(8EoI?hpetylL<6S71ZXF zKO=aIp595QlbN`kgO=EER`&JP_3Ku0Z*J*|5@`+%D|3ZOizgdob1)5dVCA>sAMM_( zSG05^oQN@hZI? z+M7+9K%Mm>!&6PyV|(S#Bm)k%P*47j4@d0g(V7~n0XX3w2N!g^M(O<~a4bOV9Y&co ztIF!*GW0Q>dEr~p0!$kmZ8jDP(}D3MYgN*>x4DnRk52#NV6(w^hw8ub62?TGZTqm* z%7Jk(3wEF7!9lTIPWrjbHXpHjGC0+d%)YCXmMh45+$D)i<_sdwoJCyPh?i|LW7ZOca}NDwjk62B&j41D7l~;}*8|ZriN%5oeydV5Fx9 znsr5?N8DqzU5kIxl{(wn`e_$ho42N-_DSjTveGqPmiv*AzATtrVmCNtf`{LAn$Fr~ ze$;4WnZcG3LCR32TD(C6fqqrW8Yp1CETBMpJdr%=`1ess%XbsSBZ8|&w$?a3+2&|R z)>_^fw!I}_T=v!II{j-;5r$SSZ7sb|^NNM>1vk@gc<}b2zj3m+-H=-$%?R`&ASz&= z;7qRyn9*oe>$Kn_*Tq5h!;S!kN62)FT{T7nRDd;=RYv>g*G_Mi^Rs{8?^AM83iq~` z2*%<1^&0?k2KvO^9^#-(73136OO2X(`<6nfd<2KW!T2WYQYgyDSJ)iztK`Q(b8+5k z!M-1T_2`KQ(>-*^mU@ZR=B4_=N;QhGQS#~r>r=S@eWZ71EH(BZ^1JEg_>N9+i1@?3VWDub$0DuT2!mu1MOTJS}Nf|SOfQ58Okbu+EE(Dv%2={E>X zqQ(Nv3N9?uPlRXzjhbG!UBgHHHE2ui+}!)231#L#1FO1I7rYJ2p9MfGs*Zw+yR4Vf(8&F@MZnFJOf-y%|=uqVi zc{;~%D-ZpGzNd=!lQ$hIq?fJ}4rn~mUL#t%*N0T!WK*60g#3f$Eob+Pw|*qbAzU;~ z^;3x%R9cJ4IWbHTQnOSR;y`I>=?b%W*vY1dKXvghAj;0;OslO1hMyKbbmkx2MrrbW zJJ*f;1zQVM-ZLDAj~Oa)x@TeD?21Vv`Fh--ffrnamYEr1Gfy2Ln);l!vbB)+ChH4H zA20Mx13%7b0)ICN(H4Y;JHAV+H*iMSkxjpu*L{AJUC6R52}-?;9$*;1-6gV0eMu^m zXktI_ZB74BF=Zs*7S-!(J_&Dd`b)!e_u6OgT+WV;(9|N!>x?M5YL36Vj6$*VLL&d{ z&T}7nYk`wgwIe~QTwCl`x@)SU)z~yVf~ubZ^@=|4TH$}eHT_pJCpOZVDZ^aYqsTc> zx4)MmlD)&jWu1O@S#ajtT=kryOP)5O3GceE-=6C7M83+HroTbgq`Fv7&Aw=*Y8)A7 zzih=~mC5qZ(qB%f*vJFjg2m#+<~atiTfRn+r8T@UYGlArH3Gv7@bzC!{kjf!=_O{! zRS)~~0)nM}eukxt|5ofo0IVW|cp^X{ZJ4;>(j`jUovEB@B}ZAqZ0!bRImH(PXC1(b zMfS9zYT#F!t-)Xk?>!pTt*~*uKv9%4OZrq3b#9bu0k0brJNwR}W7b42%R=+@$E#4~ z@;!;?4v=U3)oKu87Cfj0tnbN;sn7$VjV$a*9lH?q0Wl#YO`2@iII^Lqm}QBaI~k%Z z>Y2Lb*!iigHEPoEdhPVLvs7m%nZ>W+OCfvXY(USaf))m) z<{*bzc^5<5_3&TxYBlxCL9d&!Ud{q1x!OU3R2@n0RpDr=qK9hI+#5ycj(VzvBSnmW zZILO;g#b$Y86YzgmAE1Eh^}Y4w$7$VHNK0Dm{=cl$`~+=)5fa1pAde^{miIOtX*@T ztel_i4MYLz&M3B!kM<)>uu%$2;E2r8%(6$8g`^8)q^-%t+$iYGXQ zM2LY4Z}*y{KRHkgT+lKs$>Cd(s%Dk&)kCc*(H}t>1UN;A4BMk71b2lChLy9{Yiq#* zBY2Z55JlvgQy=fqejYE7t*f8t(EBtQkUj_j62Hm*sc2#$VT%rvn>~42b0BJ}eDhE4 zluc@Q3x@uX2zNjj=wtky$z@$+Wa#3lNx3xV*T5lNf_Qrh;owiI-^PV{q?4&m)3pkJ za$GnvTRNr>1hh-1@DP8(A)HDGv+K=Xu8z}*26?HLTa}vF1Acn$(5gN8w9}fGM~yI6 zadMg-L>@1RH2rYS2@=Y~;YZ{NqtBt*bi$ zq3wZ)^hyxA3q%}yxCUKjC(X4wi53-g0!T|ZUJpK_WLTArwN4jLE93Kot^@iW1^hEn zQBq;|SvlP%O|>f@m7{546{j@{lJB!AdFAZq7#&)amyOIYx~Kb+R-dxbiLH{N$IrJ4 zXLt%EXXa~+_c6$X2}hRGB!w!SCB;TL)v7pq^gF9rj>A^vZ5H-7gR$E77>bL>GqdNk zvCXrX=7okPSfepCFQwX5Si)NkTGy;4)CPtO@d8-!_kh~9PUB3!2Gh}6K!!J+Sv-87 zFRnOf=&ZWeI%*x$Ri>WPeEav&YhR87xqLdpTv9FJ!05py_iW1rFPuhU+K#&utu(D<2df-g>K;(!JSHJ-jWHg@VK6`*=~A{+?JM1d2bm7#3gu&*eUJWLYz|Uu~n7DB(-KeROEVY}#H%5V7it-(py;aQT zv`}l%BdF9Q7lXys<7C)mu1>-ySM4fP5fy2#%T&obtBDG#)Pp-4CC;L) zw8x}bl0>7Ag)qrnU4sTx@M)*A@r$$TnDU-6sA6m=s5fVbf zX^2w2&!oa)X$a*Dt;|WRJO*vws(i`8haimm`Cj8zr#+j5AZb?ziCvUHU+2G%GJLZJ zOOp(3heM;?Bg^WS)xp66NhY8FzRC0dI8&$gu5pyM;A6P&yQ=zfC!if!<(n;C@F-;s z%bFx^>hm~7uJNz9G}a^>l!Dqk-iP#_pHJ(k2<1K+X}AA$bXCCxDV+@pRufTeIkSp1@r?tZTqj$uO`Yh*z+6 zso{hvG-E4BaZiYh1nLjDRp!*TCdTOB#Z1{9?i z;y)xl>D9bDGf^s}KYMkE++(S95BsB|y2Ty`wg2#VtAfg0ZC_-JS*WvYCg)b|Qz?ZM zq(qZrT45|})n?H$wcmqJ^)0|L3ir*wkMOtnBsPC`NO;D7zQ*cn$eC^HHJF}P!&(fX zL~u7QJ%p!PExJ0FDLRa4CNneA|6K*uyKKb_{8Eo>ym<0SiXViw?j>56_G zX1ZOkroW(m$j(`&`Ap4;>9fY=I9$Xm3IpoX#!>=fpttKE9-mHtW;$me)s~=M;@q0m z#vhoN*k(WGdUhF>Y)msm%g9>$FsxdTZ~#5^leG%C791CF|udL~`&LecWGevj4; z0`pm}Pxj@m7UHSLoYu^30nYOnqV(X35=lclEyFnO7FJmRk1-zO(y}f9Qr-WhySj<> z2g1muZ|8bHa*PIpCR#HNtvuqu!($7Ly`qDHZQz(yvT|-t0sM>8@+K7u4qN_gcDy@2 ztS@XpX;rlSL+Z423NvieH{n_u`a`xuQOU${M8zJFQ{A95tWc$Eh;06u%9lDA?Y

0j1XC(j{+wo9R-}AtY^T@FfnCAT=jGD2D#m=yfF%~EgD13*DItl zY%W=IgY|nfOZ(b;qlMA!E*;JvXS2eG7rWLjfYl40MA^J)J&9H)QZ)3@_usppN{Vz{ z2AO{?Xv55!e>}2w)R{G^aHzfD7y-V6pjQXg628P*>oTd_^XQ-0)(APIeX`C^pJ+D3 zXZ(}a6?sK2iH#mJOLip$5KPdK{RD+7(0%s|YrH?VQlYL>irTu4$I?9H$o!LRtxp?C zt3yh*;`WfgCIj_{3m$>bH?Ftj@-CFAv&Zx1+Diu~>YY;j?SCXi5I%6%BV4DBHJD+Go=F}&k+Z3pq7t8Yk6$OQUeh54oWwXS zCO64GF_?X?cKj;ke>}WDZzXG z0^l1?OfN}4WPOMC_4YrvGVi1YkT`p|C%9Q{EG@5<96LHu*v z-SLk=Y@-s5yUZH5@8k)FNrmsKZ5Y(=VE~%kyPKp{{R5~Lii-mJ)aLR7Zo;S+D`Iv` zo+bYG)aZZbh(w9lZrh<#UhUpp5ZA67JCElwzL01`Hme6l@X~nH*4|xKtN8uTN%C<$ zFQYfk8Lu==Ns~c;&@#!rKWk~#`|r)x!dKMD8x{f zw5yt*=t*_^y6@Kb?AzhSb|ar-Dsz8zxM-ZNM}B*asbJ!K#6tQ}Zf;mwkoD>EMsprH|qU#IUQi^xbMR@au``o_PF)c*HWMJ1^@hjWl)xs-R;!PJ~U-- zE;?%8)UPph$;7MwU@S`fW~+6L#_?_uRFttt%Gm&nYt!-^RF4m8Mr z$}KfwLU+=NgXW-rV|CT46Jsoqx$o<0>`GN{98O%}aCpO@*z7CJK9mEi&*wGlP@)5u4x&GfSTK1rB2_m$O5+kT}Wj@|AddAE?N!yCTH8yZ0AuAZhP_U42e7>9br(sG z7Eo`36bz3zj2HfPOtT^umuQ`^O+Xd@KI->$w>I^bjez)dCGphTsh2E>d*qS?5~Fq9 zBfb`gGVn~CD*OnjR5)IK-{C##t%r7rHswQFf`b{XrfX>rXrfmq(g04kEuW?xMYW&5 zc2{8z+iW}6pNB(SF2rf--Zps^Q4T17AAq;5w8lX%r2@TtAssOz zQ)cc{(9MC4143xsJQ94##Vb+(_Stqyn1EP zPAiNu6vm9pD`a-}w@ zE887;zAnk9YyrwbH%UnV%4C^h)9rIc+j63N-oDP!e{I#$x~?{({n+J0fJaYz?hXDObOd&O;p_%8r-%to#ou z`Kye;h>CX|s|8b|+I(Sfy~c-5Tw z-xNVBY$7yzUUDo3>W|il_y?VYRf}SaazYYGpTHt2?i53vv0E>*_eja8wl$FOwj|p+ zmhE!g{i(BsC)}yDw$_U(szNUHq=Bl>rP($<;-&2>&xfxoN3ekIyOjFOiXeH)Ndp*yi$0mSq#>(zp>S)5@rGW zA17y$uj)5(u@F!rt+68OB*1t|_^3ZcD4ZUW5WTH{ZmhBzqc? z(^B?Y=_+RfRkCDT5|ETQ&3YpWlPwcgPfq#v-|+u`>G$_nGOm4CX^L+m8Q1>LJF-ht zCl>_VL=jCvP~%)YS9g`%7T)K}K)o@oL^DG+7Zk&moK=zS`=Dg(Uq=S5G+LBeB;Rjxp0$*(plALto#8ZLMw!hgze+`KwvZ{Z427K4{zf^S;I^?y2Wwl}qJ12xgm z=qoE%weXqb8v9~)j1o;colv!2NZ-xN-taCu)7ly=p{2EuePqHUeZney`Wc)qFl;7i;bJovk`AebJ8Q(nVq zp0qekckMnUIl0^9?CPvJE#AmP-HMry&W7sxlB)Se9>h8f;)Uf1nK__*z&ha|nrCE5 zumvVy78Arb!#+i+Nl&h*0^%D}Mdp4Zivc~;V(psRs@|M6t*oBjYZ)GPbHv=vHkOH1 zS8^TeyRbiK&8I%o6Rh-IFsY#t0Z;BawTcars#SU;QZwn}Zr6*P|AJ7ViHAb#ll|_B zyEFx(OJY?`M}zw`-6fnkZmc;V5FrPzCB~&nw?c)H`E^e&n7Bf~bXYxx{!wpxpaN%$ zuyOe3`>9sVK)ysTrw2S4e|4Cb|BUKwMPUrU`D_XWZ5)Im6Ika{4a z=*Q}gQPo0|5~R|zzt^>=NrU++jjl0n#(cVh0g1hLV`c^w4uV;=`sO)&Z2s)B z1VmCty|ycTjQTHmel;r&mu9~e;V(Qr%pX3vP(E!OZ4axx2xK>Z*T$-9a1fyF#$Nnx z+t$fw8XU-@tX47x2G}_VTP7rAe6M2zd)|oKg%HEQMe@e;9wl@m?j`4W_6a@;97`*6 zxV7M#kDA~`9~W0QTSXLAzah*f?7R3~{ie#JWk)#bdHAP9aOj4}k>_vi4MnjV9KX8io3chy;7kg8F+*#LRz`!Hr_-21d5SGV=NlN#Q%K~Po7g_PYe^8nFeFT+ zl*kDgrrt7I^-=yE>EcZ~;mnYwLPsg70r);Bg0Jo=Lb*~iP6I4!_y4G6J~VgHk=SNd zx;#F4l9Ry_B00axII}`U{(n*TmQigsU%Y52HQE9N3KTC|q(Ff}kOD34?q1v}UL?@B zw79#c#VxoLZ}AY^f(42f*C0uN^St-2`|bSC`Jemct~(!+weqZIW@l#4-ZNW%+w$jo z84`g46S4PX6%j;2=&F5Q?s+ z6X;yU(qF?_5IE=aK#9|XKv5JvmJJl zv}TJNs*ur%MQju1-jr7k9z=4mMLNfs`p|Xy)kgq-JFIB(RgDZxr+jbH;$J6s!9y@_JCM+wRz zljlQ=<0_T8#m%iMxl;aKMrtdF^b>FBr$@M%*w4nG=q5udQy&GE1!ke*;h+EaG>TGD zH&|LK3E3Jo<8$y4xp~P{M9Tzhv*%kc;W7nJQ+HY{-GcYg3cc5z^$!9;_{N>QORoY` ze1mmQ4vsfnTesT5((B5!6o8>YJgj1a)wuz(+V?&Q?i5&1=ZJ2R*doPMyI;yi^^Dv9 zL&C?uxXG6)Y<3HJ`|beKkuc+Zkl3kno_Y~pR&7`BLrz`csH-&Rq`rJQG3FgzR>Wk) zX%gSe@iF0ckZ1fjFKH}HnZ5n8819s2~z7)|RFYIC#@5|@!W$S_BVQZ`srix(4 ztmzZWEa6e0xI^DNe+ys;T08omBa*GrYuG=G#4$Hq(Z{G>f6161jriBaG=)|ND%3#H zk>Q=ViswyxPNChpy1PIT8{>YP$eA@ko=5v`;X1Da*PDfuG_wAHK+O6Y=+GbQ)VUok zuKGzs-_LD#r|0if6b2FcZE^0Y{uV#q;H#Sf5-2lzV60eL(Gd?F0_qR08l|Uc=ZNt~P42GZkA8y4Zg7HnG?3Zrj5kKfidg_pL--tm3|rfP{4 z+cDX2j1WtyDU(!449nbZ_loK-5g7D=dL*bkf4=;*Q%L0&w6`=?a!sg>8=-OIe!b@J zyd!qa6grDkb`|1&{%MoBxCO=)C~{6F*ALsB(PI+!)Z*$Qdqp98@WeR~KgHaknx+;s zjX<#4ow&yDmV$G+8tEj6lk|{h&_Y?I+BI@$LwgbGL|;9&2%SG<7ITauA%A=esxKM6 zEAcw5N#Z2=El>_gX>@~<16QQ zLHB;`O@R?2s?#bo3sp}cn&5*CGiP`?cuYC$Nj_l^$JB5=~y+%#Qn;9TJ-pytdiC_AUN)4oEu z3W$~;bNeB*k$MYaew!Rqet4L9L$@n9hmRZbuL&A03$%7W8a{%A4(9%8aXE{f^33-Y zgHY_Z5-xTW&z5ZWk02iTR5a!_5PWe$x++>bpSSqz!-{M@S1_ElaAjz$H0uG6v|?vn zu<+<51K+{e>GAoZ(-U|%JnQaQ#PcGDfW4}@+HCF zk^z5?AbjnLjLviGCF}JO-3zN(s|VjtQV|1p!>G;YE_+ksLiSiIo)K3U+GtN9UOU~% z8Duox>R0!XQ*k;C^!KA@GtY3&n-w`u7B34>(U?}=TOJX>?E!!KhGtFR7DNbgjY6;= zQY`k5sA9Yqe6QIk)YDWK7jZ0zF zxU~7D+n*WkCP<1l#k&QuA#8%G?A$D78LA{rT}IR-{0R(ps%9m*a58!on(J2%cQ+un zAmvLgyH(1y0L0hp9{`w3@l{#loaj{_Vs6S8v+}rC)#Y<|H#dZp(?=Bv3ca$t; zs2JB&`DsIW(7{&FQCN9%C(|d6xCU_~)chwIJegg*BV|V6xG?86%FV9v3@W|2O39DYvU9^fYydVF6cY^V$&4zGcRd(vFg zQmC(((Tvi{mIvR0M%3oa>-A{$(YK(#;IS+ZL~uf`lHByg#c(hYI&<%z8(VRMPP=Id ze&I{oONnk9^^aykBXS2@#?EY*;RPQWMt|`dx}d8$gPd}%mTF$Oj?a3cOKx9ItHT;t z+6X|kziLi(Fw+?wRbNarnRr9(jSXzcz-2NUz#tX_|#%^O4K*XL#h60<*F z_2QVQKirRStdr4I)UyBaMuzqt4G&qGn#R?Lrogh}x65L95S0>@6+G>@Kb8p7OCiStR6L#&Vq%QigxFxBP(`Og~g>;3&|Mp7*Xv-F>n9@YTC1LiNE1 zgNL!Z^F|K_mt!!y*O6tX3dmP`6{CLe$Br*kqf`r~bQefyl9Zes3nBVE8N7T(hR=tR zhmXoOOv?-DzV3rJ2wPUM=GsTBlYObJd58P$p3_tI{BKXBVZ3MTemSGzA%KmTw?!U$ z-T-NKjxaFb%p{!hI4fy?cMHNFx32zJPs~elACRB{Ug_^r}4O`8~H)+p2PnrvYvL(2~r`#nMOf%3u`WTkEb{&}*x^cE=lr zxSD({uWZ{1UiJ2AwtvhQH*^apN80_o>#0Y{3rreNJXL-9u-SW;l=%JZZbc%h&hZ() z+u_(ji-r38S*B^)xK0zao0Ra{b(VbGM#Fh8+~w}?1!9IRAB1i>&!4mNSoLc3?gD%f z9L{eoVWVewpc;*nPG4@1uMU3lbUFTs+|=UoX;h>BJ~h0r^V>BYhRjvf?F3w3VS`y7 z4kq@_-2Z$F5-``lkO*EA=*Qlf|QeHb_F6JBE8UX%#i4C#?&c}7PAYzEf=!9gK zB(@XBcytRoyai?4f`Cjj->Li2!L~Q<&@DUjThP%m74*^`-^PvbI9HdcIreNBu@Op- zm~P+6YF-06;9Cc9d2$mgPsF5juW{t=Tr2T6cq!v-06w7#aDP>0R3|_JeXD12bM?1hLsEn$W76l!N!W^es#zHOKv*)v1s#7EwIND1zLAZLqgdd zSO4vN0}%pjKUP zLq`gJsyyqz3L=;TS`HU2;Y-9brHlEDcJ;6A8?T*`{wH*_{eR9?P}3D{+)rBcIo7?q zRW!Cc5!v>tG178B>|V|!r&LW%zbhaNQ`-ZA#X?7inu#+6Q_b0UAVk*Jp##9Nt8XaE; zJG!3<3E|~2%ynklUCkkHK{#-q-3>KpcA@)?T^c)#rheh-YjyXWulsqQ-VE=Lr(+KV zm31?Jm@pc#s|AF7uH5MInOaxR?vNRUlu?ddDbH$sMIOYh=F>)X>aH>rXH{f5T|H=F zZL69KW@HSoT*>d#rgmqE0u)s=Q!KQ#`j6^;Va?as7QQhGdD|{%rz4h zY7MNOwWLf(o@mx-8d3CJu}4_xZWbDuvd3e?)SV-nratJ2JSL%sTfRCZdVQ>?EFh2P zY9ug9oJ0hj4S*VY_J^<;D~*0Pa^xWVp6r&~;T#_GHD>87WY0yDna(nMI@+;ulk$0k@_a39iFUKmZIEa2UN1;2SiY3~a#XeNZ zo8$q(uC57(r6j?Y@8AlGf!@X&BI-qfEzdb)$^J!2ynngReQ4AUKli;GjF-wzFi`X? zvh!FOaAKkFkT2X@R8ohy=}LFkltCCztlds32fa-K)x%Z(6pn!8dob8R!hpOqgRgc?}z8^F1T@A`>q;aGEdu2+S+EiO0FT^c|k`O zG8Cj5!wYV5cRAN}V>)MMB|dHK$mw$%2gY@ac-TZ_KfvxSRh?KBG!8+ea6>2w8RP?>eQv;RM&6uz*#*bdlMx+c5bMnVq z05MqP6C1YyT_wvme5YB!Bw`1wVChYhh`6|6T#J6E@&5RseNE?w_k)TGf%CAF=&Fg1 zJHP^Jjo?!ro#Xan4j)uAoQDCz3Qt>i&Ymqks^>?5sDzJ4vMgXxm+~VJ7iZA0!XOmmJ}4z&SBb=<<9ZdJPzdZW;01exb7X&&S<9_I z@oe!q-fnkWmAqu1zE?V|oa7lyzla^b!Aet-M08I{s-bgSIS0%uzi$shH3mC*kQAC`|S0D5tX+AR`HQ1y@5vG>HOEU@x=um8mp^mJu$fY_`uU-C>9!_<_xTRP)90{)KvWBK6( zDqDV&?PN4%nxbBXvk5+5LWcgFwDHuxWZlFW2#^Y2mG2@bNf|#hx#fYQF~O zY42){^*GzXqYm;B@w^8$&1=*4zr6HImvb^Ys4pYrBFC5glS1qTrHB(Co4 z&6{vdQ|qJ~D|~E>^LBzOi7D<%kx2CUPk^w;?s}!V8v!>*;!Y@!khdII`5_G zwRa@(?_I`BjYz3G|GRLx`tFV1hp%pmz2^OQ3T>nl0k*5}%$Oy6_&awRCTUj8yy$cm zG@^^0fLITAC`qr2YjS*S3Hm2Y*zQMbF@(%6t0B;?A7u8jjI2pA}EzE3~Lm392-YO3rwNyD3VbW~CgtLg;mr%J(eUz!F3 z;J{lXPrNf5j;N#Z3tFV0<_o9v2 zQbnw^19D^2?uJ%CsOqL*al-|geJRY_&b-CQdVrPBw`f*a@W=pab9&tfx){xx$pG7J z`g(!iNQi1UOFO7zupd_Cex4kHj0tK#Sym4|ktAWVC?|XVgsWSe429h`%~qQ}9>(1% z)KklJajiP`D;9;Kbx!5#R~Mn(Q%CWe`H)0|^dU^=WqKl}=~QAlMStq`CIS$Xx>KdyLH_M|Ibw!s-X<}iw2>ueBbzWv{}x0_J(_-lEQH}%#QQ?93l6f%zlU+j>z)q} z3twcImZEuMwF8BAtQfbi9y^}pu1`8wwwn%`|4v1j{T8P^RSas;XnDDAVTDu_R%X~y zYl@AkXvY3SmZ6@+X<%4fH(%tL{pYUmMi9VpjlLCv4~lT&!33hJyUL;ySiqhGP~XuJ zzQf6SI=CP?0Sh*aX@o{EMMV|R=egE;D8*x>iU|{2j#$lrfJ2J8BH=;6Kj!Dm- zFmyv`mRdIs;L`OtH(;~v?Giec?Ka(l`iL@j+@>!05e5Wy4ks;nM07^_^BnT8C3_$# zk8VLb5)uA3ExA9!8-de_QZf4RwRRa!JgDYWJD@?7AP$&%V8b;WPI1(!?(P`71^>_! zMUX4W?3k`B%Z~jCOTrkUI#VipYY4Zut6BeOeF1mfvEo=KVlVVWymtMOP$~TXF z#w#Wh939+{kq_hoIoF;D3~`sEX^i@MXX4O%m5mbe1c5%C_*X+)OBzpsW8J@7ZsIQh zxG{c1-}w#l^m1lV#mf%&TV&xAg!t)E;$d0QHB+9R)d7F-r=dE>?s3-RGr(4Pi3kW! zIF28Hv`z*|d+Bzi5%vYW?u>hsz2UH!)3uFrw7)UOQujpX!HlMF7U!!;PxRcfV>nE4 zyV?nNua_SSU#yAXF>M0UqHew3_pbV@Yp2qI z7VW=6FCX__BSJfsy-$|;Ge0Rxt>JwU_AC%P3{-ZO%gJA}x+I8#_3P7)vEG8BO?juF zrRwY1h(e1POi;;}Fn2YqO{-8O&aPP_F35L2moHg;=Xt(2`XadutTwnaEV8nTMrLIi zm1^?k=mKU9HJ=$ty$Us1B%O&%=a0rrMC%%HO+mprb>Np+{#EsDDEX7*ShBu5dOoph zJHw1+mfVYwzRrI7F+cx^rR&Xzn^!@-z6-x^zHMqTh_uj89%3sW(KqWQM7$+;r?N#a}kqa$8P^{k+L@5}qxa zllZx@klX=H?yzYZ42X4J(el45yCnf^z1kKbVk0lgu&Na()V!kwoUskP`r_HRiFwQ} zdi_NX%7_tAtf+2Pf}D!cc8RU9bP!Xa8Pd(!R9n&qltt65ooLc*T@GlXzjQD*dNpPu zY5FEV1XGzSn|7A&U1g*TKfENcJrC2<*JF$QGcC}QEhFfhY0IWlmSCOHzIH-|J3BE6)K7g# zh#qcK{|$POiF*c}S>4bW4l>+2b$gva=9%1iFwGWhPw)kBecWM_K9{S>+HbA8SO2Bs zWz*m6EuC$60CV`@m0(>adDj(J6*CjFEm=L6z0uOwDXFqbl!2Ead1<7jsA$HZ4T0@{ z>plU2@E@F^Zl+7Gn!casP!LHC`^>aRI-74#|4jFzc?SD3t@>khwtUPz<$q}-kG3xu< zH}>V|XL_0L%uV*m1SRA}o2HALV~6yO;jkA2J%QJ~C9tF4_H{TP{#lK1QRhGD93V%} z%fic(R>^tPxAmmjdPrA7$L8S{QZ%T%5pC0v?I`B3$2Yq`ajFnASD~BKKTI!0ai0XM zjhlMw92g>*t_vP~_M^@IKhpN__spWrFx_PN+8d;Xr08jmyenEy2-b^|$^v3FhTfdAnk6 zUy>rrb^fe5RiiOkOEQ+Lb!1c|Pp1n4ow<1B9U5FC9!83A=5|pcC4K~d|BsaYFCQ?W zjN{R&(v|0(PQAj7S?;dlvQ_o4hPviZWo^=8Cq+NMvYqOquMx6XkraAW?l7*L9e5d# zZl{vb=NX^;83Ym@f$(JmR|~4qi_g-;^bn}Q31z>iag9TL^S+-E>IwJg8b_nU!z;xB zEgc{7CbMQ$>}6g`NXT11EzRHjx#Eu!dwGHbsPNtZWa_G?QA?_rzZn(m_gHXA-B}<8 z-0}9aGSib2f>+jO(L_C0?FH@s3|SXwRXK3HyP1=Vq?0jW#1<=DUVr8`0D8D<9I-C0 z{dkU&Xw!CcA!pKe3j=uP8jL<9Y~MwuaP<`ZiZo3ou~dBVEqbzcXGNba&?|QN9vcZq zP0~8+3!sDlwh{5+(r-cK4A<{yKmW&fdgurmZb9$QD2|pS-($Q`0f1JFBm##V6ukrW;D24vJQ!ycT-mhEr!O(I|mPW!f>NL zqubEGfKF@us*16@fbK9XFci-l+wPjHx7|$TL!5SQL5_ikm#oDIT>tMI&$(kY8u$M^ zIscK!e;<@XjEAv1O&bg4{)v8J-CL>|1B=-523#B}q4LJgavV0ZtsLT3rP2sfe!zFld64J z<50ZaV{3$ZZ~vlFKhCAPK8)QJ0P`Tit=%esh@DKicEpkMAo-=h!<8C zbaq;(j`WdtPEew`UjpaI=6`W6p%g~uc&GFIp3Ptxd z%7nY;E*#lw!$iXdbC*q?7IH{keFkRgf8L~dnVS>(<0OuUq`h;Mt)g4P_*`h3G#A3O zgeNGWiLN0sk=`a1txyN04EH43xjsH1W38~Nr>Rt^U+uD(nY-kKaGrLuo%PC1f0cs# zxz(E~%$odPMGQhDi7785JzGf*aB^^oOV;l`?cs5cvjsV?|IY~le#P(J+l+#d$)k!o;8Z(L z{Sh%c1aoDtJ+vz;<91vOAibbU&v1B{4vu?hpsei7mDC_V+d*VvG0XGBqV6*YM6y-D zjvEauAGc~rsBG}o3R@JNAFooZe`)1(%yICSsxxak3b_b}_p+StB?Fd}DEo{lL%_e* zB9jb&nll1Dfb>(G>R+AaY6%5{%9Z|+f=aFKL#9;D@3^fKTE|11U0Uxqd+K>Ux4h?KsBmm;V;j`78?+ksC9w1%7@rUACVg95GN7*;Hz~%xDr**0+&L z?%Hc0n{rW+^^Gv``Q4Ng!DTL=rZCv}QqW7W+Km#X=Dcp}dSmXCYw(K+D=Qi^Bmh9p zb5%L_tz2DOX~noerqp&8{3-Th=+i-`71`I}eo$IyVt-6~M)}YFTwirvHSXzi7v}G! z0a{yuGrsftLQeS%yO<&G8y?G>9T=T~9n+zu%{(WZBXmqzM6&sToXmFiZx0A+y)`P( zS&Ivq`wK;4G~Nzh`->6$P@%KYi2dYEp;w$bY?h=Oms%|u8`^z-8HoE^NINj1!L(cC zJAmMdN$UWN`FHfiM$2;IUf(kSoPoP0<940&a>#8bJi0|jF-3{-R{Ng zMKD3l>+DQ#{+Hu^v{gQhfsbps5*}toMNYclt4!cml-9zt`bd>Y#Z=ZOTil7V9a;+i z2$o8`7kO-X&NJ~x$#pd@bC-@!#V%;J1ty9bF8#i%{*UY&AGmNABMFvx^kvi%;ZouvYO?(vuk6C~W#&5SOEXu~(DoQ?*hf?DfuRNcNrPdH75BAH%ByoydlxROsf31Hb z$;3>hjE#~y5%~?e+Xo2VFn>i9e-sy9O4AOp@d;G%Y zJbXv*tV?b|A{{)?V{GiMq=t;lX{7NZE3Nuk0}f+~JJaHIKbQ@MI`ehC+hD4dzZ%tw zAG3ult_q1vu-JOO%KNIyI+PyR#L zatB&UC;+EBv72GE6?v|(1NHIb`B0+h?u$$rkwN zBBtvH+tWTQcPwez7z^cOgqFz-FzQCce?(~s7#aGo23qT0xkVl6of&2=W6ve&xV}(OPWMWaJr9= zAK@F=W;u;)E*|fD;XZSAyo-^Ji^^(t!Gy*6VD`V}#)04L?kBI7f@#>UhVlqn{G+{n z-uIs#m#IjExg8_;QKxNzPf>Bn;61ReC$YF!dk~+!0_3U?UAbg)cf<0hH%K)^BfPE@ zwB7ZI)2SuIOko&+o!A!*-x9vg_(cE+Bz&^Nvacgu<5{^OCd}T>;28B9$EO^vCNQH? z0h{6>son%czD2L|;$d#m18w1vhvp_g5Z9gPU@9p6wc?u9SD}&UU4I>MGOKya(p4iP z6C;neF+%@M1WM$G!*Q*WvJR`wn(!ik+e~2>vxEXnGp?&^syqE@<@X~X zluh5uNgIlU8ig)J?#wbxx~KNPf74D&#xuYE+AgLFUe(Wm^iI!t>!RNchgVSkgcf-KQ0# zCJngi;*|vKYn?)Kc#*WFsO`86UAiDKE(>eZk;8dRea(+VP|B$YgWq!MJowOb1LVi zHRa`xT*tLv#(qS}g_`PwCy+jA*xGgUYuQk25GBo-7md^TxF1#YCO2yulcX?15xaDf zpkIPE9fa@gwkMhWObGupqW2R3Eul*u&S~_>%#g*mT6qR$!h3>Pr-@gecl>n1vlcsQ zUMSr+psfjM^1SRVw|$XH%K#>B3hKfj7a}R-x1l*5nrs# zd0+5v@PJ)U)2rn_=-j{C4*vUPzuzN`-$(gSgCVmSsWA;Z_YoNa3B%BV!M;P zG?`q~UsdEDee;Xt5Qf=$cQg{-cH6obG%Nzi@lMd#a{Y{^s)--3 zC>}(@N0q^DA#Ag7=3@z_epROIjYn%$H_}0@d$^5iOeJ$ck=`VAo#lkL?2NZ5UB}a; z==ME(_*JGgzu<;o2OA4nW^}nid6)uAzTBm~Wo4ecD658eq2wrWWNQ_dnV1B)bd~As ze*3J!)XZbn%7{8nfrCbmhMDtCFQpvnUbWcfsylForrYf}bGcJ=? zC^fAT52jyf)t@Nib3E?$PI|XPM_&Ez@{{(E8=l|mtY%GNq%W`UTyiEKub!xPzJbdl zm2W=eQ#DS*1!mL>hE>w)LJs`frL;h0kQr^R`kJJ)m)B!?Fq?)zByjB&qX+x-5AfPM zqLL~xdhOF`4fRkyy*kZFcXw*QG3cv2h*McBHT6|!H6EGhr5;pmwnyz~N(xuJHyGzpdjVysat*Mun&oSgc)@N4C0 zf7*_W5eVeY+2?;>{XcK=nR{{ErITXxJQcaA{icmGUd6_S1_ot`#*6b2ej@z4tEf$C z0oHNU)7o|4vjQ_7AH}L;j|e5_ED?eyfbb2m8*3iD_}5|*0OB(4d0@47_(DQNq`az$ zg`B9Nwuwcr3t|^CqnY~9QQ%%#K!C*PgX?+a6t8tvM!B( zRRW}_d-(CKi6LDb+X%6OKWq})) zTyiFt+N$f-(zNr>HPyfrkZcOc1Ia3>pGuO?8pT+-xy>OTb)$6^a`1A4wH98)SR|dY z*Kwm%N918SW=a(3P8K4%%0u@3I)N z|M>W&<0S0&I`1g)6YtJ&;ulBr^15fkltz+O?@n{8=Tb+4hxOH62+JGiO>Jl2j~DHd zc@D@inmbgQm%gr;{N$FR+pTk?OW_qkMDDOIRxIq`*4y9TS2Z**Ld}M#%S?*1oct8ytD_Rd%B9ifw}AP{L2H)TX8UTlI~=;$icfxYz}W zo+UJaxL>sxxU@;$hO4S*zOu_U$9z88L9ncwgMuha&M0}3vQQleS;+J6l$&E_%p5Q*$ZyC> zJy+gmy+f6R)Ph$%eyZ5V*Q2Q2p7oFQ;Op!@nPPk`EL1G1+Uba>)hbijxE$7=g!`CeNfNP7Lv@l%pGm3J7b92 z#*IBD#TAKA+=9xu>3P9H-@~e%B-qz|)X@c~EQ9TI(>a1cx2L)J?Pbm!5a~A>EM`O1Ugc#Gsc%(>2pPAlKnwL zjjgdMNw&Yr`)VWW8eTdml}4!PtQL%UFdqV5k+i&IX+FI{s}Gu3K@xg&QADF=3UA#$ z$~)~p=HE79EXwy}i5&Pn?h*x-Ws2WXQ2@KH6sgi5^3sH&+wgw8ys`_uBF=UxulIJF zc)L<8SlH|HcxA|==}`(15!;MC^7-$ew|0x%*|?f(ah0$pk?ZfQvmnqDycsc0c#)r7 zVbeV^lB*`i7`$d7U47DF6%3b<5>HFb{EK6@fhZxdfm={%Z{cxbSHyk05{Vby z=5*+E3sOI#q(76JcBeSBK8OvR37|gOA>|Xc|F$=iImw}DV`Z5j8X;dN)-asNq-}SX zcdzfA>MuY@QhWFrFJPoUcfL3fJD{)mq1^2u8281M+Sb!iZYK>n-@i-ZL$Q^PpqkN0 zAXPIT5sa*tv7Y1Pq5nWxnq4p`qX7>?%`N%=6ethWzIyd#4AWFtmf749Tl06`buM$O zu$h>W=Ga%|;hB z7c_>=A-RNVXP!W7CK5j?v}{AfY>~b}VRap?nRxY zATG_vG}ftT(U^CGjn-TPRMl3;TdL^zlpK5xC)`3eW%JcPsdWzeB8VC0(UTH)Lb7Lh zM7iY9Bmd0P++o~os+HEum@e1FK3UJJ@iDAt&Kwr$rqSm8cG4rtl_C8>knjtQFk#8t zKtJ}4&X=Ms$fjm3*Ilq$p-FO{0cDGZ^BX&bS?x$Gfg^)en;JvqrYnMH(KggPOY^PGxGV0rz^zMe~Y`@z+1&g(`aNP@Nyv0&G&b!(uUN5}xZv z&Rsa?K;pR5pI0BBhmEGifhFGk#J7-h3a_{Yk>PL~#Zsqla9a0Ri_v68c_*sFY^*1e z_vVJHxs*C$>=zN_rM4*+`zm(airM$5KdDvkjf!?i1A$M&lNby&gTYZpDDv%OfkTgT z$Y8~mIzo#iX_1DY)g(`0BkmEbOi!003Ora-Vo=_zYC|4*Swy(}&RbyYwNJXC5-O9P z=eJq5685o5D=2nET!6T;b(3&PBv9z*rAVHxcR*aE5HxCwKTWw8wpsrF96 z;0FoGmue51>`lR|UwTIxu%_mbm$K`H#1_Nj4av6a-uYTx^AMwvqqbcUN|KKOFXAlG zYeVqn9Md+Zxi{f+r+L)I3{N>F8fpZI^?e`U8KbTMplnFHcjMJmC0fVq=dnUNDN>V< zcM3V!V_2bql{AN!YuKFWC>?SuevlF@?$>PfOa=h!+PQ1SMQM5&*69`?n(GS&D1mZJ zn!bb1&#H313hPF1p>I@LNc7-l?~k4zPv%|Knm%|k-?de9r=2|sFQof6{a#4>xv55u z+3Qy3o@GNn@HS+6Z7u7f410ffWGJe=WjwU9TGJ0cAxJ~0QSsMauYk5XfU4eQc+wIr zy-8T!?6p$s@j;tpu;`KYZk)uDm=9&PB(-?HLBqa0HCKWlQb*I5Hvd}DHnff}cTTj) zUUO%=Yl<~;#6yv_*f~aNZ7K0l17l3eLzDR4Uv9?3IfNCMZkb*Bn8`R`5<^iHO^p-F z2IZcDRB0Lq`X8qS&Gq*Psgz#xr8x%MszJLAB^^uYWYjW4pDY3ASOjVlrB~{MrFphc zhUwIYX4xjyvghlX%F46?S1%S>9SSkeiqO}APuGQ%voSPO*Wcgl;Z!jFZoYDhC(AQ5 z$BTDEmlyPwxCMa{t0dHmq*sNzs;yFAx*rN5UZG-1t7aC+)g9`N$uAfr>zC3ANB)(D z|1a7*>IdZTiMIGNUO%qt=L#~1L?0*p=_(u5+E&_PyqC&rX1B5pX$@m|>KrIeS#r9p zo%IwlXp56#ZGmfm z>Ky;U>x%=CA088;zZ$j(;Bt~hE)M(gFu1Xr&B*j5_^-;!55o@;L>Wng83wZea)spQ z|9;Y>h}jwux{v#o!hTWlrAUsD5^O`Q0sNV?2o0?)<^~14vDKRXAE=wu;$jF^gikZXfA;fsiGxh#TOZ#Sz<PoU%)yrAqrc!FC!;=|DxPlG~&>DbEf)r+Z15#uQ$W zP=hPv^Njv{L-P9g=_{Jv_p<#`z4bf_GeWFU_M3ze+3R!6KF$-eEv)dm-M?CE^xq{X z|NexlC--D~641W>6dlEoYmZun4+vcZwr)aSXF_?7iGv`KNn;!6(IWR9{6josF&Ra4 zQbuyV)kM1gbRA_Pyv|kerE>Uy06}ctt&>;5SQrq|^}EpXGigW+4(2ne^LW zH-$+wO%o@Ac5}qT%1UI!GaK{yW-{fH=8b=ikb*+gU9CBe)adLOJ8t^Yh~0hu3TWO+ zZ3b^8L?Cf%a_Yf26*=WY`14u&84{A=sKllZ&xc#@t0J;7>m?12nF=f>KtRzq9uOhr zb3c|Mb=qGif8S_+;V-gZoKUDX)T$_Ff!e%nQOzqSd%*um=pj%wcSQUn{vp78cu(Jc z{XH~yN%_;sd&qy*`RzXwV;>E!5^K>JsS_7^B%iLjbNK6TK?~VrSBvRC)o+lM;vlEf zOE+g!Qf;8#Ua}9*?>6%Uq&NVSVT9&_)%s865F6{)KX@ZqwYvjkd3SP~#ofm^tPZUj ztlTF|@u`_qDAKul?GgzLjt@%<4X>6L3D1p;$%^=-MZRd#N>Qsh5}EnckiXgbm^F(> zi!(zt`g!vifD{|F2*NA~Im27HshZyB>%mDV_OnqE%QJO&_oF5k;yiFZwFm28rj003RZ-ht?XCoDTk0sfZ1c3KZg zL&qh#pn;5+;7p9~lmx>tE)KX5iyQ&jsl=;67sXZkc(b5WeGxckuLROMv_jd`W_P-D zZlKKhQN`2jMz~CGOHg0UleY3AO?!m8sH+}qXv5kV0Ax!clJd{OgVlC;;^Go?A3@h@ zmikr7XbVJOVH~k4G{PLwI%k9Mz0MnkK9sd~4Emr$PJhde9d zQF6BE72?-^HruCUxkaDR=MJ6AwgZW>D>Ya)oSM7pzp#+-w)z=xrVxHvm1QcuAxr6f z!aa8NE!NX|j_+DEdkZOohgSLBHM@q4sP66^=@sdFy~%GYUQiGaZH1hCTpJj%P zn%%*?l(C0Wgsm!4^r243?9d8(5AWDGg%(Rwt`vO9)GrCw^5 zh}#=V|6B6V;e@V$Ch9R2ket9?Ue1}@o4 z{vIs1+CTri13|OEAmM!?XCv&RI~te1^b^kBsHS@T`H0(+v-=UpxdP1$T1HcP@#%S; z9FZ4<;$mmh>}9J#Q~K$zKY}GQlo*fVx%yC?|8%@z!=b}C7s5&h&BhmU834HRHWp{! zo9sd$>wZx)>eEf$gqNMb9ZQoB}2(cNzh zSd%CZ#-R0Mj#ElS<$G*%wMh#86W&zwO0E3Du(n5xe#YIgW!7}kjv1ePUajSH{9Giv=dp^sof$(9% zvY^+@D|md7>gj2kiC%M5wJD0gBKsAMKuM2eM$%nn>(3)&)n9=7tgD#bggZ^ zvxH}x$%hfXeRJ)g18{_jFYHnHL=&yxCj4njkEu_kYB5G1_<7B(wg;e`bh{m4VQd=iFe)poOOd;@R z(p3G^u02JcOqRqKJChSU;(a{mcFs{ce#>!nV>(puZ6aK@L?$tTv^$;zOU);wcr-^; zq|ec&AozQq{;gERMDq8RId@K{_G^M0q~5kujRmfMp`q3wH%^zcy>qKke4PJOM{PRgbPX$#ZNDZBhs}t;JrS zRKJy+*WgVssAi^hs)~FM(kv%G)Qd8Xve|>OR`Z!cua~Ph1f?8$NiY))*u>HF4o>y z3IYt8_0a-4`R&W{2n<0W+@W}Fn|0oOCVi&qOkXo=RZ@0;+*cFoE2G6nrV}S8-gY3D zJSR0n&?6~I3lLqPAI6wuOp108H#md?qCGz`*)XDYX~I)X&5<@oMpl;fi=1BGMP~+s zO1Zd15i+jwO_COfIkRuE$)gZehj|%FvFwa{#Sg#Be|(XQ%P%DVM@#Lm8?G5e@aDU6 z?HctL8*SU-T~ux$){7_ki|n&2DGrn?DCYLtrvm`0k-S`+M`eHf|NpMTUfY`PqLa6f zx!d=lTe+-kzc>KX`5ZsAtlvlHeKU)bYQ`q2%ymw(EMR{vf-2K)r5G*PX`z~30Hqwr zS4`a;G={gCXGm;9AH(kz_*k&KpzzHdNlS@2JG80rbXwY+W)MTees0ayX&*}o>se)G z;HP(PM6NI4`o$Us5ok+@PB3T}vFsdzUV0BK$9M{_Nu|*=6E?6z`)1kAOLt0lDWADD z#rlj?rI(LE=?{x5ht#)=%9q82cr_YC<@B~R)*0e6Sae*2@a}%VSZ00p&I||W1)%%@ zOt$+Oe%hxDVmh4&PL8YKMZA4Ut7iH}sN&MV)UG9Upr^VzUZk{MO!X|SQD_u>#k{7I z1RQTEp{5}zZJL5&YqO6&7c9nA<7wyG!_1x*&45Fg!|sh=qsszDHpC(h5UP;ekSU!7fPN{lE7+x;?EocDa$>m4Z8+SW=Ihd*KMsM0M?iH_!g_OW@?)97_XBm2C zcq9R%oZ6|nqJf5#Jct!i0s6(T9}b<-;oQuG$gEn!JC?v?F2-CNI6;@}yRWX{gVw?e z^}SNN^v5M7Y$c3(i=b_eFX~qjXe+PdLmfpNUyQIzr3Q)Zqx2p47f~Gx=HFprUGSqG z(D>-;;#sPCvn@RhDzCU(*(zJTAv zEgtv&6T@v+Yy2?&=9z|NXk@ly&CGO2HT)WBq;BY3=pGVJq$4+++7@HRbpD)0e&IrX ztjIJikp0ueEERS8-QpI`{=!&9$(1bWGF^6w!~&Zq%zx?q2(=g940g2 z>0G)84KieO=mZb9DO)854rkI76LxgVec%qyXpISc9e`F8M5jr*Dj&;^5|RwTg(bc# zxfYw`C$C>i9-Gh7u?US;Za4}$iu(AZs9*KXt;8*$C$Tg)_@=wmt&#BDfvmbQTM^1& z*ut{whCy+ekO3Nj&T-+~P+$C{iO}B-zxx1NPw!?~5(kIY)6I#*TqfKjQz#m$SAr-BNlr!tm$RxaGT z&J)QAh{hAgM+P_Ftdi;WFqXntym=%??{}SOGhN-P%|3Zd=jdMF0N*hvnc(sm=l6)X=6x6*z{>`aH+?AWoB z3)|Kz9eWK3tB9D&>Y{^&JaMDbVHt;TO^4!6q0pVaCM{v?gjcD%y;zCXXdTr}VNs-J z=slhMQ#$77--CDpy2mRi#|qMV-|=ZtCTI@JllI>P$ZeAK8x^0(4cjjA)=~35(pNO# z@Y7LVd!&Bfm`o~PQ)e#SNkbPk#`@@`3v{E1TGaOJr2?*EJ&nw61Rj7;{->GIe}Aw_ zdEnazC>w?=LPY$Hr_tflX*J3|$(o>Qp^DUSYf2wZ>p8Ml!D1VPHrM9zu2P84 zg;omi7c7jq+K^Yae!vVK2fFEGoHKGwaVHn7Sn|O(uscPS$f~}dto!7bJ_q?qOU_oc zrSHVMOCa1UK0;ZU9{<*`1qtjGZ&< z;<@ihv+9jowS^w+9ef9UssdWzGT**EyG{=f&rBwF4&_t8V_PAm7DrG1u7ySswG8Oiit(S&7tjkmw~%1;#_<=tU2lN1FjwK+Cy|SVP;$q z(lk)cSXN8Sy_(DmmQ?pH?Hy43;$oTh$df+1R+OC-AEC8C#$k$QB~z z8=QqCvfxkFp#+=|ZW+J3_W88L!ADla(NnIUX+&)ky=y70h`47sbrD-|i$&7^p?qen z(}{XYv7=sdHqCI-v!4bdC*~%4deL-Aou;DcY^pT6JXyV@Z2wH zinFssFTLR7KM~1UHe!4hV$O?7jal+k>w^^r+ci9)CqOytE%=g9BoVX5-6ROlu*M)`U8`DAuF+^3ldoG3se5{_HExIdV zN3hy>*(hJ@nHcxBGp5TFFK>acY_s>Tro_MpL5kCRGnCDksKVX)j zz5%Dz?O}H@Hb*NP35WG2-zFZQEY&7m(odl_|?Y;LKOTu_>% zvX{q1X}w9OU>Bz&3RRND2l}r|89I_Tx8r; zoatrWCH1Ow&e4_XISrHM<5<8i^w{GQw8ce7(=_yRsBvn}P2*I`{ZP7i_uxT#ZL5|Q zv6<)b8Ib3s@lXP-@lL<7Znr#JhVq-X-j`w20eyj2ROCMXmgO;SrJWg7(khie_rS+) zlu$g6YEultJGO&d=48$bAf#fw#;e(4#8=#SraCSqO5U!Qy!yOnAOQDg1y;lh$-CW4s}*G4Pjxjf3hNe{#BPunX*Am?BD*?Y znLUd+Dt(mRpmw%RNh(Sry}G|mOILmtZWolVpUe%2vIWF9nhzih1X;tE=bzu+kx4wx zN$S)pVyel{9na@*8b;zcIV(=DdiNGP93mc!nee`#IKwvGbU4JH@d3kC|Ax)}2h0r~ zL}z0K3FZ;bVwHc~tbEC~6Gz6><#kQuu1Ixa<9ph;0_8DrZl@E(o>Po+#>Fh6;M)B{ z5lIh9C2T-4y>1IEoCiAd=AGQqFLty<$=O$a8Sv5>gHLCw%EdIHR+J`4g?T)3R3@Fp zMn0XZeS%vi#8{vT!35UtfM6L6?N7uf%oEymr}x+D0Z6@7*)55Tj6RG7$tMnlWQSnH zk7&Nk?qP96(>Hzzh+8knJNU;f#RG6jeO-@{TUp)Ij z_sePsk=8-lBiHb`^lWdPANUb948Tj*VNK`#E(};A3#;$nrNBl}-}GvjQ-TuqE#1W+ z#fAiwM-r(-d5;v#Np~?HkY#l^KkK&RSiKPA??{z?*$n_aJYn2W;N9S6QOPu`+B;{> z#}7}()e+iX4jAEmC(X zM*b>O^_Qi>GsT^RYv}>f#L946loW*bu%8!CN0WupV-kM*u%H ze|US-@Yu}Si;Uh|LJ+@F+mn>!au&kuwydI@W9E>sRX_G~z@?++o+LB%i>yuC7 z1Gi)?KgEWme%Wp&M&U$xHK2}XM5QpAm7`~-wj+Sl^-etRs zL`bbsTl)uzka!|%>ch_b4?gp@qif@Y^ai`@OKuZqP+FbZd~etVT8rK8d>$>XF`8zb zdXrYg{Y6_m^V(w<`?z9AjZwy81#OdaeGQ@lU!voO zS5%oq=Y1nb%JLKtn5Ax+(*=n#)hTJcXl0R|SoqCenDHG*53yBJjx)=T$=q`TD(F)Hg2%u@0I8|`9uEle-lLj>PHhl$ixl^&Ot9GjdB0Y< zV$nOu#OqiSlfApA&fq3n(#s7!QVq6n(=$7?DJ>oUL@&~Tl&y=OlmEz1Sv$;MW$jLW z%Gyo-x2zrOf63Z0|3lX9VQ2+`>W@>=_pvNxR1Idm*+xpbTj<311MejWMahjwC4cC>+RncY@>n{m`R&D=V-g5R|`de1_iW-s*7ugzk67B12l>c)+>NZ#o*`C4+d6SsdTgw z>?%Hc2UkK1kL-yOTz&b0bq1+5f;~#eYqRD6Z*y2}=wtACRVHqQ|Ld4(1~2AVeyQ}u z9vD@VKN#wNxNLI=Z1Z;tHE$1Hr{P`YgW%|6(koZoW*7gDj?#+XXAbkY9hGGe1HYFl zvFsLUYtZU;(by3ju6RnL8=nwa|toy{==gZ-Dy*&WXd43O0@)I!dv*!%<_d~`4OKr&0C$KZOynX0l zvS#~-{qVvi7A5^yW$9`w7cquk@Iu$VidKbJwv_vnc8*Uq zBNAy1P-ubCQ*U`+w`}axlDiE^gQjM*N$sUgZyrUIbI-hRLd=I$c~ZIGW=>%tSKEQi z9o8VPIxopawXlb#G@x|5a1U2M?*S+V0t}vh0&8=#=i%cz>r&?owU_Dr_c#}gSFX1M zQis+h?gE!TV0K5Ab=XhSF@C@pUjKmcuD!hfmGAzyR>Ty(0@xJFolYLgjsWgm9S#_r zrr)0b{W09vyB9xTzT7o>YeS;sPHKYSE`NW)U#`6m4>8I8daZZ&X&cFp-%HHHmQEqs z;CevK6m`&>WAL%u#ApzfDTHX}SLkknn)Y zF&-Y-PAB7sU}b=YpvrMrj&= z)DylfBw)DUO5c=S_%Ce`@_{*JSWe+%wuX+tGNahBbWC4mYgDw3Q2n5r! zYqC@#EB>xo7oQ=Ma(||#hhCv{vOwcM9^kk3_)iVD`Sm&+_l>qrD~W_5%I?cs+wTVU z!guo93lEHm+bMnVE4s5hUejl&_QzmmA3#@=8@fHp@#wcb|Le!F%5$|Y4#nH?UL~X0 zYTTQy%7J}Nx8BptQe&@FRn!lq3>y}S-JOZcpB1y0%k|X-g`4|jrAYBu4b`bRHWWnJ!~ zwE_O>Vvc565=n9lw>jQ%YvaL zFyDV_-ha}A*|C4^eCM;jB|*m#vPeL|6f|>r=ZKTKCE+CdLSb@NZk06EkuG|)Zlqv% zKV^Mll=12hegBKq#~MYBTLrZjOOUm%xzO<=dhYyc=w3~|Gg?2MUe#edBO{8IB#}kb zpnxk&a-UMg=A#Nxw!o;3LSk+((~AfI--&Pm-JZDiWZebk#k>Pp6@F|BPc_Sv$U!Hf zMe7=MM}m-d{T$QQO?wlHC56d7x9R#-#cT7bWnF?pq6}tIE<@EF!_Nnb?SlN;zuG0V zo2)J$fQvmnrzMIqai!|A{&1KlKub^{_;$opxWtW`^N;S|qOrDn|6b zpd>b;(bj-=u6A5eVt&Xa%)DKRrUWB;6s73n0^7$Hz<7VB0N|HCx7-W>{8GJ^q9IdP zOOy$jQ=Xe$=%gqJRH)8wYMOFXbrBNDyiXYjr;O{21z#9UQKEdwm0suW`vv@B%%AY9 z3}PDzpd8?&h117LwtLh*9cymM5Lurvx7@DGC>lUtNY4`sodPXIsg}L4fBup;ktb=8 z;=5*1_@@lMrGQHuSSCS$I;uAgZ(C`$>XNH^!t zJDkn=8)#FMcWBjnoVq{1_FtWW`|R@qi>dUQd)F$0Fg#UO3=loyY@z2k}l~h}W@2Uzg+-tNRlt=g5l^jR9LO&N9 zsx-H?mNv}A@;WvMLB|EOGK?MdK1Et%y@I?JuM0F=T($o~*-%d@`MwI62%fG(aC^L? z1u6w@1jW+iH=9|z-5zP%6Z5O2PdPwK%-?_$VX1VJs#?N@2F+3%q++`+0uMrl{8h@- z2n#h!%TE&(+fjCajoHuKTDWJ<;bx-Wr<*4r*IIyATraDZCqUSq*QM_k#ceLax;C}; zP`PF^l{+xu^;T@>4;cO|mPoSt-lJ7TfTsrLr`Qn;?NUzGjZ#1&Vfz?MWM%h8GJ5#^ z$3j}WJ?#}u<=CZ&_71$ zJkkNZMB9+tdUT4VTQD^wW!c#-J4+RF;)_ChnM6lDK*-*l&_Bg%0(If`oNZ^4-<}{k z*d{5X{$t<&S{?C`;GqOBD2m8$Wy zl3?j&gzEb+$qRk@SQgD(IXO+=2Vm;`Eklw0s&O>*63wM=vqg28Zbcso&igt`cX+VR z`c>`KnD$6LlyB|zQ_5uWAjTjUd{zZw3I@KQjjjjzn@>N#Vt}5A9i7`sy%`%lmy}k4 zUA<;?dj5s4U&qj1aewJnuFSohn)o|jQ_jb(_>Z_pmW;Xv=#r}iZ2F4A7k|Zd3gR7` zNy=Vx%xVjugJw7azMTxC6hzK$met8i%G{5b%b7cx7AjRfdsm`X_6{Fy;1WH|5iM6? zfTD+Q7U}K}CWHIxO{4>5SQ3~fT>JsEztskJ`RNar{HFSW_>0bWop|g0xl@S;jxj;) zmEWBFHx7}}PJ)?cz3G0yM6kgkwh+Cu)+R~NQowq{!D|ZEQi>KL^>`gC-CzcZ8%SVA z*2L_B0;2V0?^~b!8*TjN(&t-^goCE%;NF-bnw(0Am18PR>9i!aa(3w@L#B|d^xPH1 z1(5r3Fco2|U%aO={`x#B_)e2lA0MITmjv?lpR#`c*a~i*P|Z4nUM;<;?=5rEQqPfU zvNCDB-JR>ktb{KVEs&q*T=eRTginu7h;Jw8nHz%XdWH}z^e4%7NB$ilu*s1OoD$y*OC#b9L6MNWjL&=;? zD)Q=1eWkMmYbXdj8rhJAGktsk-Dx?ksRdNBLA|ACB^p;f^5zl2Lu$=iv-a)PNEh=4 zo<>f4o=#4bZ>kgh9zwYuMLxQZ{ege~ZGjaL2xq*-u_oR}18BQ1&U9hj+kSig_s7)X z?TlB9gS8ijuH=Bmfyp<JBsukCW?r;gG1ZZBSeLysNH9Rx`uDtf|g9 zBaqZf2EH;Jdh$FedG5x7lbe%de?5Kn@%);=n+Ik6{nhSe&|L;r2GA7 zc!X^C6Z#^H8q-;%T_g(BbI9}Y0@+A51vz3&e>rXdlmDyd1aYeOmMR{438*=S_8^Z! zw%lJ!AvXYY12P;fS-2^lE$z?sZ&2TS$Fdiu3Xbsz!(mY6yehTHZ1KNJJNqSWr~07i zc5H52nzgjelziqoo*Qll7mCx| z_DD#3wPF7>GO!)g{QY(i$NcF3Z3qAS-Gf!a%gf+F?%X0Pam{Qu@Y+tg7$G`q8H6|* z(p<#ZjwMs>+tj^V*u-B4=_(rgV4u@J6sq~6T3z(A0A40wOr%~pOghyw9jZ)e=z z%kOYVE< zEfz40F26hfy9)gCD~ca5@YhZc%^>phbxTb6zv-KS`6lpEfflq0IYk3)Vh)68@2yJ2 zEa>)J6&CRh*@eW3Y>pg#WJA&d!iioBo5F4YPbQ4c%J2H}mn(?1$^c&6dcPUe{aofS zXF=WmgZbf6kE6HJcG72c086Sh6G_a2ukBAtHo#CIzk6Nlo?D6tRjnDDsWM#iT zIT>d#+-7quI!Y)UoXoCJj0j4#y_@ihesE>+8uuFg2{8UY3xNL*jemx~%bwtoIP|{9 zfgjVSgInrSMoxt&0?~8Q`eq`^^!*j6A9?r~P9`fH^2uG<{ zx2!th&T8#UZg6*h?6@JA|%iDTlIVZHLA`MfA(4n5`2jp@$I&RAkrjwwW z60h^9?oZJPU)vY@C#TgIzieuHz#|=<3)I#9q|AmZ^r-8(AV^Exs;cMH8jo#nbw7PQ z9G*%@V;Pvv5olR%<5?kPHhEdBT|ey)@(y_Bpx;{zfpU?2{S_T9(&xoF&6HZ}c>FH9 zwwvSrIAt%Tmr=u(N{7G3QYdek!im73$%4nxp#*Urk`%8*vGAIyl8l{3@?Ew9?)!GS zw%=I?2Id3$`rt$NeD6(XW9z-g42<^oKV0yeDl*x3fcF0M@ZMcpLZD}k8U>is{!W1v zY++gr35aLxx@)@9lhHV>u);>B0JX_Y+8EPCrRagx$r9V@>q1Q2<9t4XiSwRiXL zN?X0lKg_2JXVj-Q8F>0NTZ(hegXf_|8g}56t|6>5$yI=4>O-P<aZ?&Nv#$9?kb3N#7+$>Bak<&^KG3$P`H6SpH`nF&_uXe$ED zUydNiQ86pDOQm4eb64oj02}GQQy#;9bIMMI^rH)XH*2`}^^gtUM&)e5JBT@8AP}EP zJ(MC0gYUNNHW)*!3A27GPDd?9;Z`0ukjS7wl~*d{4K(@NNMPaj&_Va)pgS$QbmntH zS3eb-y&1~{@HQ;2u3uGonD$^Jhtl|~uW~BdeVyVT-R>hZa|cj}krJgNmI7$XKUM4g z3@+q5c(YpvrL}+@pBg;Pkxg&7a2kd5%`n=}-B)@-gwt+nO5Fw|Ta63rlj6+zZim<6 z)H)>|0c`C>hJ&}C$n$|d0jc+g5)#Wm>V5C(#Vx|5 zx_oju`GwXCM$4MOs*Z+*{@pE>io*SB%ZtZ`)2V^EL?2ZWLR}ik-`mt+(qQsSrRe@N zIT~xaw6VYfupc~tDb;!58Si$Ax;~iV{={&qP_eORE4Z~KwfKV+lX|v#EIlxc2Z7Jj zuDJs}o)1~dU`<_tl}38sKP;@IjjzyI0BAh3>$mHk$#Xa9>G??&PJytDh3*>hZbDt_ zx{|7-`r2dXsmDjbnu@ej1I^`w!J59_M`GU^=o1TT57frt?^m!D0Xx6}g6G^a47D}t zYXoMR(Bu*v4d41|V^cp3r}#9Dh3qt?xTIVv23M%t1|)~@WA>L2@hO!n9#Pd^28mBm z-~M39|ImWq8^9%BPXFO_@EQ&rO?!6`23@0$f7w46!!(T(k9Lx#AUErSrCGw?+RDvg zW1$PAjPIswFt+`n^MA1l?`jTatQ@~fJhyggXlQFLI9f6uZ=j_0_x1SVM9pofVOS~K zCnqieD5%zj#Y#E^W+u5@WwW^Uw;I{NbqWHl0M5WBd;Y#rywm+&E5{hHGH_?>N+f=+Lt~mY~wOlSoi{4a9GtPMYl6^+uUFF zgOn_ugt&O4i@}lJ7@=V;s1{9;Mv*(|62hjfrzTiz~txFoKGJ2 zR;s+F)JqfMOrnz|kM8uKiH%~B*-SE~Vn$Y2xoWqwh?t?a_?y21a3vSk;?_k}@m3|9 z_1gePQ7U^nnf=ii1M`MPraN75i;qU!4Skbm;AVok(lbuMCGpZt>#Q1#&>Dcl3tNZGBY~oF!89R>Ds2@#?M(m(rt%8u#mY z08OJ7_k4>mRgTu|K4G<-S8PL_7Ywo*EwXF<&N!&yTIph&#NVW`*wraDpUnV^yO+6r zuHHBrm#3O7Q2*)@SCZ|dp1O@%n_GDE#Wd`qoEe^XhB;^*TVZ2jQ-nUoWgJ$gOUW$T z)EqretxeJFa`koFtIJRs8j^Fp>34`yc4io?fQJZ9&2a8n3LK4l;)sg4Ik^=VHp7AK zR`YQ%F%(3|u-cW1i~Ty~DEZsQ7QbDg0l%~+eAbonJdur)=TBwv;R!jF3qC!xRzOBj z#LEwMa^;5A?4EN-+~(XB>xCHT)DcG-45!>rP&W>#s|7cBeG%=_xdwcDJZDfTTZ4c$ ziEoHT1+J-5T2Dd{#+E2SUT-<5NPAS@B)!+as8ztx)ymp>&1}p5)#-6=jZL?t{UrXx zHr;y^k9n+-XzJ+7WGV(p56n$w8=|ZM`oV@BO`VG37S4Q|TjCNtY4Z(}*udP_yhCMw zXAclZ6R%8YL$@lZ-Uj?Ft5YAP9xSP2+b2vdzmZWWccOEh1;Cx>0MsG*S^NX@r$wAN z!&>P}=Chb%@y^=PbLr`TNtXGemNCTXVckpVuolppx%Y?3!cCE1t(I6$f%MpSr8pKs z(Jv*0qv1J(NWy#%LiZ)hTE3Y>o^S{(JVj8sf%|LQ0`oIANQ>bH>ke*c}q7HLfnmX{BHeb&g2)QB19Cr&9Lo(iZsC#EBgsFA}66BoEh zC~VmXISF35qxK)`^GppIgTcL}Lu98BJEAKEk(rj?g>}38L-dnL0`80)h&T3K4i~9Z zPVtAQkkN?-yt2JO8hU#-%G`Hy{bsxep^sMH6j&6&e@9T z;Q+(tLW~?nHhmsVEh549dlZS6+p{su)Qjv4K5Fofu-mg&7n7b(t1Xw3K94#zOeO{6HG*s)iAfB!)_NxP5Kx8wAhFGOrw?1{{1tyW ziZF0sf&-n#;pM&c_~ruk&ehG3rVI<=7q{kWw}c$EJ=C3vIBLfhtBx6V=UT4eo-Q=+zfc)=EYVl>8M{GlM8+1sH| zw~m`d7qZnnC6H*Jc*Ozb7tdw_F*3glIopcya^TH zH?!@n{|}hTh@}{WYJLM}>kZtJ2D@PRH6LMRj&NSy(ThOy)Gua@1)Hg?i?0s|zk$2a zl9Ly2N*LZ9?hL&SUJ0z_wZLt!P-5}OmwycC36FZmq^?)8Y3Vf7H)Oosn3a0Gz0X&} z0Ls))?`o^aoh8@vdeny+tD7XYeJJyigL9xtP5-$?Ov?hGzBgZokgSzBArSWzUB!VM zhvKw^w0mf@QEw9?iwvcF)kuebUP267Ul{raUo zm3gDwrh{t+=aQ3N4?0DH_L|*^L8}JrPeZV*)w}NHH!pEDMpzt9KRedYTKgfEW0G10 zeU`cHk)!OF`{Lg=jg_k*D9csKX7$kF`zC3WPAehX3~ERzl}4Wxoj6Du*%;qVf}T8c(cSwA#Osf{Ju@{oRmm`amc7GHRrF*PzPlZy7Q^5nD9n5zp3# zTKY~4O4zU~Voq2K_iq&(#H|n_Z;5!EI3nrKOLU-G^&R2rmCf^ewxU-xRr=99fw<1_ z_v7pEBqPxXbpqYqTjBTUCd7BqV4bU;NA9C|lh=`ptr12wy90DML8z#zb8ilgg{LV> zosbHT^5Psu96)7GKD?MbR@TmYRrUC_SxlvjUnmz2%Zw5F75h&!#Gj}4#=m`#cMcmG zbM)HV+Vgu7ZAgcaUJRXSXbBAS#(*10Hi!(oLVv}f^u?4oy_!vTW|?-(FhsOM{?Uq7Jcid^C$y7{bP-?&V&3l0aNpV zxidqu)u4{<{hc@vdnOk0`p^*doR4AEH)k|@#$}S|dK=-5lEE)jA-X+WGG03=d&ncC z*7HxWA!LX5a_FE9a<){X541N2lvXkeY#*?tYO4m{wsBvDFttb~2bjbPufsFgdBoJC z%GZNVEe_ChmZ!8W&uV)ZxV5V9do-tRUVubr9KM=R8_ttEkms3Tpd3&(a+La3o+t6A z6iQ<-B~rE75^S~;5HeMVW5r$Fa>fx0`1-iwFl*~reCKOH>wI>PM15U%>rF4dn$>qX zjHwnpQ$5q3+2}aN-B*rtiNnoU%Z=*ubDQ=z3IG{a-NQFlvUuS5>iWhLl3%TbVW+7I zLTJF6G}XXUR>rGp^87Tg*?5hm-AV749FxEz+QXW%J^71suf5r`{`^5l`oaT-!rl3J zhs+}t?N|pMGyP;&MATV$^HKE(87=4ATxoQbQ&N0Rm9vzXxXSu^cFG{wWM5_RgHn8f zgfS8c7SH;19tzdnslk2r{Wr=_z{4X>;Y{=R>z5p+$o6lX)TT0uPT$bleu*&8Q@zuBCbtmNXte*;+KQA^_)&bvNg>K5N+>(QcC*eLLNkbERZ+!f$A zcfiriyJ=rwZjYm#H|xH1E0V7UwJ3hxaVpsMaTYBz^e-o=c={xdc`PjwYzF#HwKE={ez*|g z(W4&TnglJ4hrX@Kev9*(Cgj^pX|!T~-WUIDoxl;)b_dznJrMQ1RN;fS)wJ_tPyF=t z?LzBU1w1vbX0yeYYFh3ZaSjcr{FBAA`(=(X$O~Vm^ziSn&4_8EHy?Sn{noC(JAVGC zlIwNU-RJ#1XxV-$r_THZUTO!?nYxh7WD{Ex=7Cf;mIgDn*@7=$7z@8=6t^Mu>=TKs zSN+1|;CvuK8+HyYIQ!V(26E6$iCuUj6St$VKaO>F>^O1G6zkN zDQU+G=K56fDVF%h43eN)==BZ!nHn>}+^K(Qs}^|@*&47{N8w9=bLxF4zU0B+PxVH zAA<(Z1wu^LFS^?#b{6ZCGeAyj zVf=df9F{jJU(r@=m1#`4;cd5q9@Y`NaXsnKVb=(>9wIuxFCi!D3x7_3#ZXlIAj*XRj=U%2@PE}12 zcQy~FLYCAoZCSTTa`kQ^CQ|JroX^NX&!D@voGvEe-mmSK+yESBbQ5JRFP{SI=|atm z&9u2)OTuC1v5RZOt8HeC#fw@(N z`IeZCWq|=GVZFeb_@*QDy;^r)(Z0p;%;K_fVkH&++nXEzk5=6jgBBo~^RBW=9WwzY zYwOBi^e2C3sDH=l`&jpI5n{I0!nofOoY@Q6ES-CjzL?>8+rqSTd9NKJ zo9t5yDWv>V2r-E|8NvGK)NH|>GDNopUMZpp*c9m^b)gjjqelD9|E}}DtN0U%E%2DG zcQz9;3vX%HtfOVw47YZEuugCY3vbF_ME?>PRMCp1x6(3hjKB%XW!-~I6Tl2>nB~)7 zi$nGA_RZ?Z8fg5uOu9cIpJkjClQ=!sD7mvw;bpa*I8Pr^!5I(^Iz~JhBoJfzKpxp) zG0rS&_{dQAcU|~x<>e0;Ao=L#^{n>;=tyU6BLWrA`^hv$S@E0Hx3Xg5T?`#M;mU6; zvfWOO@fzIm#|vMdL^yn5_-dG#yKqHwvU>m!p6Wlj=(GMAh5Q=@4rY{BUUq*YAj5@S z%d3N*O6<%(c^TD7(g_ER+(H(b6V?m2R`6WVR zo36Re)q4Q8O_%-wbAIF1h6NxXlP^ce00d;I({2y3)RNm0u|ai9*=;Gu$?1^=e!Bkr zN0duxk&r5f+Lm(uzNL*68sFw+6n=k7sb4XWMfq$2?8iZAtsuw87MTBE=xbM%G5VL# zm(XuRU$Osx3w`n53VY@;4huU1bi`JmMLjq=#y?*DWUEXHB0+`u_ znR2?GHn%+b2EJ|}_eEwQW11Q>G~g%^rFsk8g%E#L++h2-dx z!3Jdzul%&dnh}P(Qc_wFuX9+&Lf)6v-Y%L*j;ESlBkyMtfVk0fSERGZ3@-gz7f{sF za@FH3!fx-THpiy>U<+YU@Ts9PN4j(hBsxBS8U*%ZcOSoWSmL;W;$sbIP@sKg_50fJ z@wF4DVUJ?~!?{R6pe$%zrgofX2@dvLWN3J?WqO(|fH7x~v~`b7xim7aLIiJ68U!Xo zsGraOob!wZ9PO)7Ck}7q;&=DSwewruh}7KeStU2G%0N{x|->-cVLCG zL{M4Z@LI%PdmrMs9jxQ_zleLQsJNPSVV5XL2oT&MxI=J<1b26LcXtRH+_f7B4&Ask zG#VgyXuNR<9;6|-lhfbY7w21Rk2A(v`~2hIbv5f^&aPQKN6lGP@B2LF0q~nGT*5k zMf-TyowP+mC%F-|GL!f#&|%fy`_TCqz2Wq?FRz05#Wn&0bJ zwxhW1s8bw=z#7R}Bbpy6xT3GqOcw@FEaHySx=9zC6EGCauVm<_2@%AG!9#OwEF=f| zgV>1L(tNSR*mihba}sJMNN%WK$Z_=(DG}(1m@Ngv;%lWW4tcRNXl*|Oyn;Xa%%f!a zCl#v}T9Ce8#Dn#;ZkH9)u3Bs}gcZ!$a<~3PW z3eV}50aPSL7Ii7IuIUT*_QAybe6mumZtm)-G=KWL4rMIpl*gHd%(j=Z$fN#XFxHE1+w;B0G}Jk5=$lzISv#c~BpiGz@u!lgw`{sbd1QH$?^*T46S!t+!Lg;W5%U891D8fMaE2v9_6 zt`7@fd#Rskk|vEAY~h_CZ-uA*p=Me!k|W;H6bgGL=8~U0K?;@Du=e!cf#(W_0=B3| z9T)i+m@K&}ZKPBY=F}_7BG!5W(Q-u|G?QI;u7$FW={Y?fz?ZnYwKSw?aC|yXn9HHE z*K$_QOU^0Biw9~4P0LH-lVmxFwy|OpMP;cQi>JTxkQKow3vJk4g%okXuXwleWZ`d4Woy!1S__0|MMa7 zpH(J-Plu|o4k7~weK2}XthU5l^?2e{9K8mg9)YujCY*tr@+tKPtBpW$^JWw9A?U)z zus(@448NlqaBvn;!<{78FGY7WA=i+Io*H5BD=++p#P@7#U{$k?z5{xDM_|a?3~_dT z0%wK|Lo;g_^kH~c*;2acR1 zKv&PXJtKsr;qddIG&~#@MS5le68|cxMZ!V3IO)@ek58l(R-Aa6uG=y~PM-ro$fkEKS3c(j~W8yc(WKZMu%;F~s3a;5!<5*Yn$=yt#+1OD{E|s-}r^~Am9o3ETrVT0< zU|(f9<$9j@VS5CZ&W&~%T!L5nl%bYalDHOsRs)Bqceo~7Vb6G=;!-qzbz+sVZ8`yR ztULoc8PQymJH!v*Gnk?;r{R?Kxf7??>CSDI7FVc`PnH9O@#q%tXQKZ$#G+OEpeYuu zGNkbF_))NAML6zKh2CB{zv-|u9L3*Z$;d!AuvDE~s@T3H;`_6kLCGq<)K5}x0Z=>u z%>m&Rj=(qc^cn!4p+oH3)!BKzeJcuG?XL+ob%zFC`S|TJ;ngYF{M`4|AiV=w-gkdc zo`@G&(`5Etk9)U?JYYnYu|Fgp*{ zFXV3=U}8ta`C5A3YTo8nc`y|;YfmQ{WKnRY6u+7JNbm`O3liZ0cF+(+Rcd!xyC+n;MD!Y1?{A(rwJshoFAqvO87~E;J^y$q)D{~DAjk457^TtNCWL+2^^wa-8Re1 z+X3z816HisUcLfr$&bA~Cc3ecC3#C-Uaww-KkO0D6jn}O!-X7mZBT@Sv_SPzRk}xW z0i<|>IVxe9O$JVo9N*gq>stL#(~c@cRG$#bdWj(mDYu1|VY~OX=7e*YwVDKOB+T{Q zPB^mN($f|}X>&&7!*l!(dddOyyb`VGH&tteGG7O}%m}Exn;0_E_@gQZhew(mJHj3Z zzO;IW$!MS&Q2|Aw(vO{~_nx!CJShx3%GM?Hd(Fq{(&qqC(YF2MO^*EI%ibMQ%KY3f zLMWJOI4|JX_J(DSme(-OXFYan3|#{Ln3mM#=LIevC}R zmJhbd5AzRk(#k;x_($6Xz?@!m^Q#|s^6kjv-m zR^w2`8upiIu0hxDkj`_sYJzEmYm1?b}fuE zqUObQl?$OMq#9c!LoLbUsIHwlWBQF6fNn=7g@f2rzD=;kG{g=RJnbUcjA+GAS}{wD zm@20_d8D58j`w3>S%$FpOJQ^AGUF88D*V`uuKqmQ9$wY~_$BuK-CcVTLz{%di6wr? zT>9vxM!VVm>9oI_V1|3Uhu>ul$u8#W6A>ZaBl>OZP3ORFm!+x^BGV8+j z81gr=$lREO)$CNK-i-ueU{)_u?04&QgSYma<;9(0;g2z?& z8ZI_A^;WkljI$o8t1yi!Y2aiD--)<8OeNM$eUhfJ2j7!XRas=we3MiCmu~#eSOCxB zL+Hfz2mhd`qP|B#;qJMQ;pTgHutrBtd`t4R`DJd+esyP;k1(nYf<%jnm1(CptQPJ` zN(Ytj8>8uCoY&LYQ1?xvwLC2!k!dlR=>LRsC*FIm6NE(i?F2{9+-o)Z-l^+ZawD?L z5HZ!XwMN>N>KFdZR_o>&4M+Tf_PDPv%q@=wJiAk7i%mTB(chi37TWAwHT%{pzhk+i zqO3V3Zpc6OEL*SLeaG)xuEi7|FDkE#gxNJI3Z*l363(WHYlcUfbB4m34K0-O}P^ zaaqcYG$q4QPIV^9EICVA{E&${V>-hdfvudbC=D%~;YOtpy0r|P0$_Vn2dm`G0d#8A zS2pLl*I|rCrgr~1_4wD~2I0};(p@w&myZMFp7+`g<8&Pw&4pAK2xFNlo>_eD;V}}z zlecJIW;V5g;xkT`Qv;r{7sLZo-}RG^NEpiWsn$=O9}2LZX1^>4a-oC%puEo?IVk{_ z3UHa&Bgi%}Q_Zswo)$bA&IM$3my&qB@=VnwDNb*PIr(NT0TJ>ZW6UEtR9GGTmDHst zawp0ZMM1m(wK*^1&ZnPsYo~0Ra_>cy0ZNs9b+xnm8UE)1zS+bG@0X12xuM_UknCcCx)eN>T9D zP+2INbe7`pH0grg8j_Gl4RD3tFZrLogsunJ+&G@tyclj~{g##5c3G2mvThwv!|a`- zt$HV8#TTZc-<6SRe`;bsRT`DK(E7nC$?8E!yFBQQxF^sUrUnV9NAZpQogI^wV@pGw4x~4v}Pt6ZN*t8(mr45vsDnaU_CuP z-3X@D)1=^%6K_>nEW;onzhMUM+>G-F3tM+|%fo5;wdRt(9|{_)&!lfYs?~WJRwvwE z4MLj7i5Kx^4B!F@MhnI?0BT-Vmwm<*5R^__^_TwOX6KU~m2^n=TNhS6v}s5}NQ>7t zpSas0GtsJll9wWN>W+ zPk(J(BxAnqMRAS6GM_II19OO^;Eux>iL;izvRbuG_23E}gS}&fd3s;wUaK|CAJ2!8 zV4cWtccMoqu8~a+|^!xAWNIm zP$Q#XnjA!6;xEUyM)e~yUu^;IKx+L9@@iJemLLhO^qZ ziaf;2&fFEsr^T1wJJ0pq#!1G6eD@$2pY^w))>bK6mc-oANjm&lm=jL{JX#H!Ob{mO zYd?9(2WJd>0KKu0*LxcKhWZ2(or0dc{0|eqkEsZxq!$zjRT#3n)>h7+?4h8TCYCMY z+)8BMa(QYI^$qEJVSHIy5^w75T{&n)ojykoGHdSR0y>aT;!y37{fL*hbO-n-WeU>q zz5-dpFhl!Sh1-bt;6Se72cG(3rr#}Nh$0^NM2qB)VBGRU3{i9FDE8yMRZ zk?|}j9AER6ij$qdkE-Y{-p?L{5aCrD{ZpLo|{9g#upd-NK~q~4ajlN@CX1M*0m z1QiF$DFq7$OSVW~)SNXPn`_w#Eq=H}MD2(gI5-%LMa9`BZ87X)bB^!_$)>d%|D3(m zHpl*W<~(7TsMscJ#U0Lg%z9CM6%utkI3?ua2yL%TLg-B5Ik9fFE*?p{E3LfP>9o;i ztC}<5Y5rJN*FVs|KR6|C)h30cC#7*|6_5CI((Loel^axRyHO|3++)uHHAJFcqk~f4 z2kkMCxBYfZZk#X~5HZI7Oc$y=OHH`&&an_DIgU-fP|=zWm)ib)tma1#X)1!s$ zGWmm37B>wXRz#%02k2^Ypo0Vtxft;`h0-NG(s29c3h&j!VluQB?^a4YJ;X_Oc;!>A zdJ-*2M9YvI5roq$am_c7UPTsY8E+)bGc+4yu+RG?lWP{NWZp-DN zj6t)3p#3@zmTOoP<%l_ee<;{%WfTwm2Zh3U;M4&`B_L#SxI(>kms*;+5hw=N0(-Ff z)U#;+x^{3*(jxMf4;(0~THL|Q^D&Y=p!5DctNA6VUxO)Gq$Sk^H_L@sECJFIHM@9G z75vz7Sj}g5s!;Fn?n~|!eNC0)v3G=5M$JS&wJiTEG1x_JhAMvgjak@uHqG_-0WIBy zH=maUVNV~%7sHfME4#^Syx5>Al}o>b*mFwT^rPXE*j{DZ#vBS!4!5b3&~n;o*Fpk4 zz3V}jFAq;ya!ZU@D4x{UrU{X!H~T65p0hq(u&G@V4peXrSJ1TToMdq)ZT31GhBUI* z;CK%*^bKImGP&h|RM5_pNLqw61|Otwi_`t{pDL-n7bCeH%rbtz7Um&ng1QavX>Ii@@fxniN)x z!N;GEDeD|jfxnrz3XINtq7vLfj+Oe*s<>%Ms-2c?h68Hmr1Gol78h$oIeBHY6@~}k zU$@3EVmXzZu)rf?O6}T8%;myq-La`hns3t2h-y0|&PuOL{0L6i;lypbTgbdCQ@D4h zhT5LdEb5%r!)VZgPRlf8o9^P?^XQwKoM+QkQ6dRlT*amRwuKCz!^tw2T3v8tGK|Kk zuU{}biAAwJ8_q^c^<|sBBtAtCBrlr8{GHR1ECY~6B+R6^6>Hjc82lsjFcifvpkoO% z%eH1=4)8M3**`mTobr~!wFg&JWVtMrZG8cSA@d882Qb4(qmY&s2@51fxX>ha&hl{W z{oTRnmBo?$!<+ghKA!d%+@{`xSEHmzJ?eZL*a7LQ{3NxA(9wgJ=;MRJV4}ilz`R9J zHk8VbN-k>i>%Yth{>>_LlA&APe$F;#X+Y^iS;>L1bC=2FE$7p(6VpV{eqFDiqDcXE z_77Uc*x}!~!~MPwE&XiQ3Is-7l_~n$P+A~gDMaVz@Fd zfe(L8Z157cWomg_i0#>YJM8IZ>A4JKTK#nLSxQ>Ji{x15p?a8fEw)q>h7RRzUF8 z5_?ao&6hd_TG&X+Ewjom6V!x)W=~y8)v9}^z1&Y!t53tw0WC?E8*;r7iZNLfSYDU~ z8l($Dj)6$=5zbKpdP$pL;+5XFou zY+LdD9#u*&k*IqPQezp~o4Ynx{&N3~RG)zZIp_AG5~g|IMt8k^oglM5cl3Onlpg(y z=y}%QB>@+#R--nXYfh+l0hANz#%w*uJjUBR2s3RSD^Tt^A)xHD7wTA+%G?E0L2DHU z9ROa5DADpSgU4m<6r>x)_+I5kG0QkFg4&TIv10##`?JN~Q+j%M(~mU)?VGs#gN_?3 z?N0&tv7e`jcSw|BwJb$w^8DFww`R!9Fg-RikL*F7wTxi7Y!s4(mBs&R4|RjO-^nX{ ze(0Fg@FVy&>fUg>3)kqa{eTvEk&dKurILC12t$m1*^POTxMgLLma;l-E3aE}a=T9w zR3i8wx2tfaW1*P0rly$Bd@Al^=l+ORX1SCT5qX59y{4}ozs*~_{-iIC07=nYX{cP~ zhnOIo!KRm*@Wnv`JCIa}-ac7~{A}({*~Fp!IjfI$MCr6cRfcP8wJXvou z17AQ@GFuQtc=HeJZ4|_VUdV(`wCmSsP4aAJ=SAg4LC=9wpJ|CssW%TXxdJDV51B5c z!?fv#g5i$Qjs}x_)~oFUVEN%_Wam0C)C^%XE{G~4zgF&9lQ6v)=Hj68FxO=5<=t31 zLF=tlykofZxwJyn zf}i^Y))&k}ZXek2&Ij4qv!mR-<)kNBjO24M4g^@z?JW*_^SxEHSf<}i0m7j>0Q(dp@_>?6kCH_KSHQ_oa zN3Tuh;)+i3C#U5hE^QIMc8gV1Y`u<6alWMTN7!IfR%A|0@<)WpfPDWD)p~rq$IeI& zPl_H_8OVBA(Qs4X+bbNc6UL4G6kFP<%FXfkb;n<#@=EbwW4Bo%b@EToaDNGhI(Hgr zd+oj=&E~W=2LvXsHFf1YVdM)Du*JwO-84LxPM-zzM#1~zO8xJU_=j02moME!WF&PZkb(Vmm}jSB&hU%UDj%KP zQ#!UD$4pmGGm z6E5eQhp%LzZU?5`&hBudS=Fm%OgFHl+2)`@=+L`G1asDC36^uD3nU~bgq*6MQ2as3 zVt*g<^;QMx8~CP&u3u|Z3};Sn3uJM!FID~g1w4C@Jf?1e^~9)luq7rUGS>XFY&idX z#X!vaFv)>expL{PgJmc_-%uzAQ)Hnz;}k)&XojEqz;6dhJr50WtYb4!mFXPAvFxc@AJaUCRWKl$_rCE9p(k;JCo>H<9{xY>Ae*2u$aMsR^yvTxX3 z?IOtFlZRu5d$=BRGwydKZLEjSr(-##2q{`JCi6`q(T`5f$N`WLfnY56f0;yE)lvhV z1Ed*^=FwVK+==Fybj%v_e*_^RTc%a@r)q1$<7gxd>=C~k=y(++2#~K~)Y*Jtes^;HhA*Z_J62u?;l^#b(l%3=3AO2=GqoeP1~X>rC)W61IJt zoGuf#rCn5vY>lLBB90Oji#u{Btua1If8Ov(&z+l~yMal}4TTMD&A!?US#W;u&Vzub za-8HBL6GEB<@W{+nmD~msfu+8z67fXqQW!)jUp5lBr#|ED7MvQ86-kK*3k~@xS_SP zos7(vklACkt6bBauhWxxt2ZuG2sb5FXtG?_Afs6(5~uU9>_^7dZlSt02&Cn;;P=gv zxH$ZJ(5r_8$7bn=D6Ji(`(Gq3Rh0qR#um2rJR0Ijhdv53U+=)ac<<7wdoSP(BlKus zs<4kAF3|G1UxGUvo${)F+XvF{%nCh{dd99b*H8#rcV;3W_1 z1aI>W7i>|oyEkHrMClM+D-pN42U<*WCkFn6qTMc%o_*y@FY7K?5#&=r+>B|6Tp3sA zEb5Qs1ZTk^fvs@?SF|r4AlO<6&XApTBIYc&UB}&x#*eFiP~0VlC79%;29%|d9}Y6e zG}27VeNJsUwPL?l3;er7WZ?Ommu%BoV7LkiljW?OI1$e)$Mks^7Z9a({}0NSHT(>N zPtID$WE}en;TxcLcAiiAaqAytH#WiTckqmwnu!^gqO-IEl_;DX7Z5dnMO5a^LW_* zB--z6aPX~r#2vtf*PTyzk2zo!_8GoACM~0 z-#<$<`~2-|=ijv(&KLe(4Xos%e)i&v52vZ_$`iA$SZeAG3Q(g>=xhHD!M98e<48Qd z7b$2v6(k_gIkI(g{)E$cxo2#NUWs?t;4*yL2mN_38yh~C!aPs2sDh3}|F_bJ_e4FD z!0+TOFZ293v-WOmS8zUX$Xnki_OrQS0mQGfPs)Gr?Fgwzy0MEmjHSJ+2Yy?%Xudbh zC~G+E7kGPF&LgU4Gs`z1B|)FDsXS{~;z56ON!UO17m6E`qHd!Arpo>o9WHln-I)#- z;>@tvxQ=5L*H(U`^+U2MlHT7q*|E57t7%3a)LOA>wrV-d=6=r?!R8V9%KaVRUs=Qd zgYp+fsnUn1{D+MGDd}uvcmf{C_bTraLD!HMjMowGbYQy7hJ~NQhc8Kl?Jv~`=Xiyi zkYyx^HO!ShVh9iz`pX#eccyM=Fq?0U%AgObEe3DS{99gNenvva2d|*`T7D2mo%LE1 z+Z{TO!8gIoJU6l^hNak|(bGt7(yn<4YKy(P_Y*AT*6(@xgp%2VZaI>Zm0{4KL#H?~Y6NlJPX?|9tj)-_Tcnpp0pCS`MS)nNdtdD2EVqj)LoMzx;YGV^=! zG*p;Uax*e>_n*ptw5Rfv-&-^}W3NVXKi@eFm{g^7PQiCfMu~2f>AbzA<*(y#Kq7@9 z>aJ8QyuMU%o#nCR+F)n1LB@R+_IT;~^x3t@3BdD2qQ4dz_Rz$1=iEmPJr>eZ5D~}F ze@WD?V96>-R3$*7#BX#Kjjo_I0%8Wz8E%JIquu3izUT}6r)K}XGH{ogcwoIx2CmNT za>2hda|jqC1Ew|2=v8@ut<2S5wC^%f|7xgSWch>ghP32i6BjxlX8R`Rx5&oGACwLD z6ka|)N2f}arD-4MXm;3!PcOJ(|<@<%zW}}`NwqSA%_k51K z{tsw1e{aG+A3u@o60}zA&Ndms?#$d~q`K5ej7e>~&Kb_+z9RYh9dVk~NJwd5-l}1` zdU)5x=C+sQfioALgr&_)VP~xfGQXGQMo{D!FI|ZPQA8abN7ua9{ZjJ;ajF zkj|#VkgN~i70o$+Li-VU`rpfc!UlO8qPVy?XfAK~)7AhZ%_ARvHmc=Kif2BKF2Ez% z@U?wlFs$|aps*XIUM6S#!QDImTqXw1ALge0gB5Q>|Nf)@bo`Xm4{OQZ))*Bb@&i7# zk-=OoU=d_+km=2-WNx)lJ#jnmV*}`awp)|)UO@sNr&o+y&`&v+qK2DxAut*r;qx0h z&2pPGP)@$O-Gb)S2kHLEGwdVTcjuv?bV72X+ZmbX6EZ;FmTkqwDqKJ)QpcPfhuI#Vj~^k<~Lq=(u|3=FE3MKU@L9T68M67*hflseYOy!V$~S4!hIDyx7qQ{;J_0WI!NjtJVBqu5x^7HZY` zMD*XDmfOZzY!lwzGixWDrH>TUce$Eqm??&?%>2C-|NilFk@Ax~?3|@`K%1_2g_=Sy)C7r@dGB@E6T!dqkmd6)JkNQDm+(17j!^Enbwys0?1g)AALPb8YcUgZ-!Y>1UpnqTLLZ3w zI1zW-Z^!M7t=9XmSSJy+OYyN9HM{4exGs!(#d_*Ex6uz{NNmz_cmQpP20jKG3W^1I zUxhu+9w!d(uo5x11bP+>$!MLxmx|a$p&K;7v(;hfu=OyQ`shITRy93ai-zjhM0VoLLqzW9+R&zQ`AU9tU&I%&qKX^N9%^qkJ`Z(T5h8I*Ht6`tt~K2& z$eL|d)W9pM(7*kI+E!p^V%-8^poGKVwVHXWzz9U04F`r{@s5%H&N83+56T_z02va_ zBZm|y|0FREEu)~#_u)_L7o9G&d(2B%#l}Y@Ao&C8U%9}xuDq=!;GFs&v%@j;jRf}f zOGD(!F9wSwl?(%gaTj<+Rzs=9=pNOKx!akGdDz+aeG{|H>aRq;X1oaimsM3s%noxR z84?pT-+{(Md4a=i+vb)dg~ZJ94CZ&8C0B%n^>ygp0XU~#Cw+aL%*}opULVif?!=UT zNQ)cbmmUzOBaC#^FIo*WXeqbz9fy2WNb>3Yx|A|VsLU&m*c6aa6GVLja|O;sr-dEW zfj2+ug;pX@^D1|JXd0HpKO2F*5f>Y`Oxm$e+fZT!G(z<)xP;K7G$hA{B&Z{mGB{Fk z%h4cDK(o*rAEA*B%gI#WXW#r&-M8-mR4tNppaTRUx_HgibC4 znxrvTa|x< z`+Q_V2=%+k^UwP49m`Udi^F%x^ABw%UP4TkX#SeTFYh7~qLAQ(M|PBXKw?|Fjt>Ek zeGPbOSpOZIB$iy=xh!FdvI(z{E(Ko>1fJQU(vwr^ut58CWP7&5Z<&?{=FZ-c~GtTzs? z2d34K;+pU3LHJot?7uZqm=ZRDZyluC_tR;M}vsxv!UCYRRQN(!x@4Ow({Zd~FFAln`+;@I}?-pr)FRzPREUW+AdHZ>@ z@f7PP5)>2gFwT4C$o2=trY`5;j(o&*)5>9g%orlSVl4iWQ-LrwejcCCCRwPR#E~YG z#OCFI5-_`!9f1aW0@hkPy*N>;t;xK_ORR_Q>DuAP+(zs>@!*k7YwkFx*Q}SmTA1YB zli!9?aEiVvzVQ*k}~HJQExp<`lYeAGH1%aYvz%yETo_P zlA;{Ov~Mc8=~AnP_AUraaa#9;`0(>pV6yLY4_d=d!~1w)Ix_(W=e+**>+%p0WveLi zgU;;U0`7<qb`m_=ofDjYT3$9R(sr300@!LGn)_c)=AWRo9>v z$AiZ=-43k`Mn@6V{!&ZcV-8p>k6rmEZpzWgyrt@*TYEE|%8Rif%Jyl81A<)fMAJ4s z?Md-R=hc2}n+U&o?iC~7nGB}{j=-3xDig9#L$U(Ok=}X5UD@$zpS|1l2z}HiFtB@I z04v$O7gHmCOqP18x56~#9oG3U;<2RRrfHR+C9G|7z1OYd?t zq|FXQCNaxz0zox~o3t>rc-#fP!#5m{3-n)*go|AyLFNHnJ>7RHGSD{dnNcN}UqFVE(QdP}zQ@x{PebA)fXYHS%*CH*SOxHc zrSfyc=A6Y7P&P8Wd3`K4{A{o#F+Zclz3lPJFP|mR3`{iuz`)qSem1r?DocOa3q-`5 zI@SKBZLcq^U!FRn{UT(5=i@HG4?+;qIIno|kYvU0*3f)m zU28z=Tub?kh&EnM+7XSUafoas0fMc7j^sPj!4wS%E+cnQW1Tw{PQuR8+wh`3C<+kqrA&yUv( zQj1}^d>-fI-;~Yl$zx{j2;7ZQYB|NFgY*;YDin>XVtNLle&h@HJ?mfLeU_T8pYaT6 z8Pn;w(oA`B|Jy(R;?C8^-L8P@epYS2^XbU%L0uzj)`{dVhy47Dh+?)%Z8_qqk;EeN zaz`bD7Vakfc^NTI3o>ET1;!w^P%^P(=f}xa?wgfe<#TTDZz=Hlt2h0$8~*T@bi)?n z+RMe?sSK^z%=>knOM*$chVcYQiNr0pVK~D<0}N`Im&3v`e^9Oj=Ki4kvQPVi68DWa zXy%Ry3T?J-TE;z;H7{i&EU7oOvP4OLd-( zljt)Fg2k5hg+ulZ=5UQi17nse-f4JlaGv4*y#b`_TiJnrja4%(YrHl~78uvb7?aaQ z4nmp-44K)+z8+cU;mi##>rvX+|N)po;twR3k*vQwUKYA6+pvR$a@} zY~Qcbr%>%IIQteLXJ-^?xN*lDrY)Udf4%psVXdCSZz&l)RVE08@{zJ#85Dz@)te~;&$>!&6r zVOe@{{!JGdZbX6Xx$VY_zH8ltWvo9aeMl=;`C16*d|p0j;6dvSiTh6a`arzqa2wtC zz?*VY8-t{Rp)Du1tR3^!iF9XnVYT?ZUk~aWNmp98oq&u2yg8G-b#)vHb()60S@}5e zz!&X%8qPTV^Ov{2E347bQKxGvp^BkzIL#z^jSF#TD2zhU{s_T ziCNCIC_XsFI)Rzf^CgrcY{6W!mhzTav0)}RbFb2FwqM`9zWb>3*WKf}sKZyp=YAyq zTv8u4;TTk36}yNe{@NIqq61%8t8ecb=dxDj4ZpZ}+q)90khx&2u@_@z-8Bg-rg?#ZGp7E zc|qzKoPN!93g(w%!{>^(sXe8Jq7GdO@u5rL{X3p-xcaxutA6 z!DnZG2LhH3%GFpzCCmmiCHXdB5kLJu@c}`5XRFdB~9VlIU{ma zKVumr&k$^`CiBl|j4E+EGt}x29uic+YrlL5eP$25=nzkE>?;;cX(qVr7=*~2y^62x zhWuU*aymdfV=F+4TjgwS1s~rPKWcQbbe~TD#Ju9$HMtjf!g0~n0AF-Yc~W(O(OJA- z?WXCxUTR`;;15J6PBE}YoK^BUePzms!gBq9SRNKN>Wy$-;mq?&EOSc(E)4kv&5i4$ zl`_?z8^=xFhJ>h_JEA+Kcp~Pi83WY8B=#0$%oqcs6vmz6uQQ_(eZ1+P0us zLR7JL$tffbv;>+yiOcxmxa~gtLeh*2m4kdMQVo?=Th1~mo-k^_ZU3o5=ob=&!Ir1T zB+ExrD}f?IB;nSJqB&P={fmavrI#;``p;#3Zx8wAoZusxona<>c-K+!`uBa2qDBDC z6#8cFAwyKRvTKtVfvM%PZ@VzT?F5`aeKLff0|c_-G~ro56ghUM?$ z<-ry;>jeV_kS7jD955OBP2TmLxIuqGyx)zUz``2oG9?cm&-+t-b8~~_n_FpZeQl|c zBpRBnEpmt1yzb;+gaxQ!1h~&sc#70%!rY$;+aBrdh?+Z?TZ{#67vAh8^n7<{cMu_~ z@VYE>u=RQi_-;-*510T?yc%nbm6qxuA%o#@zA#e6q3s#lCt5~9RrybS-h+2(;=fko zD#~kbF%|8D5E9a68Ii=wTXS0I1#0ra0;mbayExS=g}an7A9|X?BPKl}TpJ{f#M9?{ z#^NK8@6&%BhLo?fvnyUz3^6baUF*u~UaEbVm}CLSNJ;j~@G1cVEiD7MCvAI0!X;%> z8A`2Ox0I&LRj_>|k`xT?NZJXrMfo|`GDQ_p7Xsp0Uc?YM9Pc>}lVy*rA<`6W z*F~_Q?AgLtCvcVswUAuHj}9=_N0i+ht2>~%*I_d^dQiSBO-)k^&k+{7;y}#kL9|Dh z@}B=N-=$%5#Rfiqmw95dNgk}N?Cmqi_ex1=OjbBEJ3DhrG&m(AGb6>lc=)w|0BY`q zjms8AyY7BfL%CpVMIcha(+CV5Cl<=|OUllj&ptB|unqRNaCIX-S{#?LCkv{~T_lsB zJ0Xnbr8{Rjf6C=dm6GBrJ?Igs_*qBe`LL^pf0PbHbH8P=Iy26{TIVPoP#SDq?pAI< zSzP61V5L1sx;N`qFP4h3k-vzGk)xOAcl89MkJc2Xiz<59 z2w)bt*rBsec+NJ%?q&qLeGcY0-_iorcrTt-HdNNon!~&-Xy8?b8`gz*$%$GLL@(l_ zG$j{cazLB+RMGe4fqEg^<=-fdTwvYT?oU1Dz!P|cuLMigG05w+t0V?=z*?YlBzEV` z`EzC(YO}9D*Nau1$Z#-mNEdjKW114TsuX8(B%?yS>4$;yts~ui%;xTxV7uMNFLx9N zAy&a{cAZ|3$MHMB9);UO_0@xW&n0rLhQg)p<~aRYjPI27k-hzAIpQt5@k!98bg>w! z1IYJyJ+{BBo`cA&TF+rQ?Q5aU!SwH1)zm0pcuv9`qQu;q%F=*ty)kp2t^tisBe|c$ z4fuTb2{!K_%I~P#_F>1d-G0&(=%sL~#jGX$c!g=q(TCOV>G3!{qrzoOnM8Sx36b;|&Aj3rxX%bO(1AJfpIQO!OcefVmSDOP%_#xFdO+Vtq2) zNT!d_!^ycyXNdUuYTHbKZ&2e!NQ>y+-BYN^T{z+oN-M>E(%8LA`pqr{+R{n-)yt44 zwc~L&uU`m%dla9vXK?IqTqebNKgLp&BQN)w&9@;-KTfFFeddUD=G3r z*>4XcVD~5yJn2A2U|?ZWHQ9mzSvvPlI>NWtcf0d37<+IqRHC}Awx|}$@FC#4-*AKB z8Kq4JZy@08N1@5zkiGCeG6W*pzLjR-d-cF|V^Sx8pD-&1?)JM60SE0h+3>W?7nnX& zBn<`e{puFFP^o4;>7wts5f2Dv4D$*7mtOqeaMC>u?=Trd{*DS^raMN zLo@JhRIkQR?f)Y`G5GzzwEW+!Qjd8j{rKT;V{!Oq)XCPkCJ*ijdYtqj5+UBS2ZPKl zx0Y2?8|O6AwADpHa#}0f;gR~te877}yno~;N+5sb@A--6CrS&KA(Vkn#zo@XOTQhA zvi*wh5=$?GThh?JUsq8>#=rWn@M3Pu>m0o~$&=$lz0SU10u#1AEF&G~$ZJ)(FbPLD zx3jBx^i!y{R`TY|uJny`Vp{R^njJFJG6-5fT(Nc%_RoJ;WZ3^!;&W)dyd_7Q{t%@l z96W*l4-~oZz~oI%x4PaXjfRt5Vtew`>}_fDlHRXb-i31sX2*y;yVv#$mg7u|u?9sj z_l5O^lo#Z8!;y-PmaTu85&x_YK0da85kaa!H0md}L! z*8|ZLRa?9G<&7%Au1Q;9R#rCCWXz`x;!hnLmQKHoKwZib0n>CMT*>v{kY>tTf19}* zvAv5&6_wkdzvPrD+t8pVwKTrk0J`Qb}LBRs&HVofej zWd-&Rli0XZVjnDS!D+jcl%fA6r5jvWRzphZu0TI4JWlTnkWxCpp%Ub+RhT+Y28Flr z<6N_RZ>dp}7HLcKVBN#@#hl(Kcjg0_#}J848$n(JMTg8H{#Z3w`?4UgwP_|3eg&hN z3q+y-C8sF@8e244UPPOB)rl}}zJ3n*XLzLTgE}l>2QWmUn67CY6e;i3ah|Fx^HR>#S*pB z1qr2~F<~W+HvF)lkN>Kw{{J6VBa@%Jw7gvgI@g0`XH`mQISEA@IJ9A3y%_~7&>Q6L z@|}-aQ4mg+(ClqiVbY6v0XFGkop2w{STe4S_v$qBGOHK^zCap8Y?xy>(QZ-MTMIy@f(s z+$j{NI0P$D++BiGTtWy0_qI?dUfiL$I|&ZOH8=zaQrz9OC+pj5jkEUJ-yP?UaqgXe zt?t33@_ZePox~87E#pk~LP+aWkK!G?J z9rd7w6){=?XZq3H=YNiq_Pa-;dO~r$QrYgOMk57VcWTOFbR|UkIZ5xb$%@c;{dV4H zDdyqz{Msbm zyH%BA*ciX_qv&S?R*o5M6$`B$aS$6+gNT2Kd-KuFzmM#Ho&QuJ)}S{Y$XF#a8i2+| zd6j>&l;2slqSqOp>!SaEK==GzM-Pr=WjTUP_m$O7_@}3^Cta__#roe*SJ{5y${I3k zbZ{QApT{4FA>IIB8kivk9{5kY0NMLRK?EdYq`1;*JF!Vz;U;ywJ3adgYa8%(&%MC3j8YeW`e=Q2lTp~J(^=R|=nVq?FsBdAW5^T4rcmjX z`MG3@e>o&8VLsjMcq3|!(-2x;b3f{EmRCJ{jWlftmy9siSA%GIIekCh5KyN%P%H13 zWt_w-up@e=@q|q&)eJfDRQ{jAdGPz(cN9sgVLW>dpKqFkmBF$l*@YdN<}IgzAcEe& zD{6c%7IykZSFi?G(Howl7_Uj4RmDWkA@x<>ThNu^rJh@r2C{Q8oDLb_yKP>C)grPqJ!y8}>#9TM(t+|1)&+B^SnI}UH{A^s% z1d?tjF7f}wRiT~O-TdTF{lX&j=MBFw2y>3seh-gcvFOt+_@OuCI8Xo{uH1>+ZOxtn z;ZO79F{lPbQey^y4A`RND=VA|EN}=T(9S7nBW8lG3%2I*i5G^S|6mxtE4U7laPIKn zzdrGziHWSSsNK(l5hT~g2eA;wbN|8M$Zk*5fcG4j(24GTPWg>~!gqrZ^{piPoa&=$ zGORPVJy8YV^+V4x+uJe6QZx$+MJ*eYtUQ!WgPdz}G!4_m<5p@*qZeP2z0lBQP)DG$$YJRvs)TW7jIHIYi~DRN{RyZ zy~LQ-JSh#2zDPw64=JFy{po3b6I0KE5I<4mt&2zPK5Nfd^;BcIq|%J@F!Lt!R9)v; z;LH#zd3B7k|6F|wR3hl{@tYSKG?>gu!3j{J9WBJ?L!f!)<-_Ul6*SyR=auMex#Ov@ zZ{M^KP5#fb9!_u_sff}O*1XZG9R*M}lP+AZPrWq0I&VRVpb=R9LtWm4KvR&pMexP9 zwM(pZBgkRl^4G34z9Kj;yqJ3Yx>$R6{wFXzbpUD8uy$AFh~rl}%1p2sGdzR>(7N`8 zLbrfusJx(WG~-gXvq4#%H*gr?)KkL~QgXJz?J~KRg%j~5*UF5Sf+VE|PlI+~dg4Vj zE0p6_L7MM){%a9Ji5ct;X}LsjJIdI<+98N=3M3)-0#m>&Ay! zXr$eQqfy%VM`c}>lQ^#zSrjH{JartW@|PHBj72kEuS>f6?eNjT_kuL-oxIt-MyVht zCL3PN&^)F?*J8y0FsuAFz@~Np1wLg?l{B}V%9CUx@26iIjTIfps%-~O6J(+RH0+DG z35ISk_+ylB;^fYbO3lbObGR%}tAk4IOd%SrzV^<%0vA6WD)@r9Z6K8kGS^HRcnWkN|%_YddbQh zKjyc1)dy@Fj<{z&s#V^^QJIjl0f!czmo;Vf_0`tSl|%q?m3ISb6#eyW!0}80i?h0e zZ)cTA9zPGskz-(7t(V-2!cE54%^%>Ih}iG7n`tdHp(mW7tqY5#@`?Ky+d%zM4- zs=ba}<*Xr&^=hD{^N^GI23iEZI>zcP>)%-_K6&BZ{od$9#r;Pah1HfC@JVNv-&d0N zy*}@oU}2r!qnsHL+Pbc8i;w8LU&P!(AY~4IA6x!zM#e{7J+_ssBGhylU?8pJy3yFL{O5(Sgni?jn$*xoCpkw4E5_f- zjQ9b_ILiBl#;M3co^o~A2Hk3|M)0eH7%y=Y`FV;jf*Ed z#C2x{kCBQFE93Y~_}9Hdos7d(R@NS6{^->*G5v>NgOL>R+A>WGzv*Z=>3mrf6FLB>(XE0n2Kf(!hgr^Q>8&n7~X6fSECfC zBK3Kr2*24Ii3qqRq@i0Yn{kP0-YJx=J7*oN(v`~N2nDhmWX3OXpS!JRtb|n3-*fA3 zQo?7MDxv0YGTaQO@JUx2q)t!0@NJahSglz!Rzw?vD&NPSb^B;^qjz8MR(uiH%i)2T z0T{O&e42FFX{P|cG#)Gf&;Ct3LpiQ^7a&wzDkxtqY7$hVm9@U6$K{^iX?v6s1U_uk z>|J_PfXmPq8M|P3B$c;AmXRM`VV=!Sc%rt&v!|;QSu?-K-P_Tc<|&QM@E)z`PR}_y z$Yn7n8e<)$TqMb+=(K{tOs0Bo+(PH-iS{MuVY0taX1pwsg9^A#{8{!)xdknesaKB1mk&FCimhKVeHyRJ3t2jVL$UHN1#0 zQhb(-i}Y)=6JUVW-#z)%Wq~Wk9VV+uGN>RVS4f#-Lmxh=i8jsLt0)fe|DAE$@2x1l zIIlr6E<`!-c&bz91DKIQrqof)xr03fp`b5a?bSeJ#$+f(zBVLpdu1N+#c9{tg-i38 zxNPsL(0%K3W9bvsjk21Sagtts%jUf%%JR}=xvFP8iU_S zJ)Lsgo-q$97aw}OUdy0s;Dz6=DfFL*wBzcO>+fhBg#gQE7v`i{%?&imU2@|bPq#Gn znyAMOiw@pV=BE}Q#sex39^|1Vxu}{hjN|;?&mH|vNVlgg1{HNx<(xp}Yn663w> z)^(a)EF$JkfL`WnwU&jf6#(Y-qDIZc9!_}?+3@~89x@Bv8`K*@I%S!QJB8e(1W5-#YuAJK(fAuwiF6(GFlPEq=)}- z%q@y!Q#X_&$@3^RQdV(ha&%m{4H-6Dk?GR6*ciw%*j2Hd5r&a^@s;a?#L;!-j?f0C zcVjj;254fz&%Z269}9n55qwYOy5P0eoHq;aX>pR0NTjtXCMDYqbRl(ZOPDUPdl@3z zuHw_Zt^R3~C61D!b|AK8qQXzZoc{HH1nB>dWCE|qZ68F`_A<5vDW50rlY~t6{OKHG zx5CU9nhP%dni*!`H!H=5AJFWSG2@@udb)wZ^CsZiHalHIGbX}_Bg&JZ3;4p2b%^xV zYX2t-e*au&>B0dZ3tedqO$)C-4==QE$bb>Z;S7H3OU=b`!GEd|IyvL)9eR-U1sWdPgk>~40N zqrLSLJL`-ge&^MhvMR_^&9I_kme@tIIqwZ5J$8sFEwPbn_z}xgqo3p_cwi`8K$LU97=GX{;$qcTV`3eYq`I zd5FQF+m6*KYfo+h}H?s-lH?Lu^xy>byNCSSTDqfVy=~)sY1!+@Z}eK_umy9 zX9ig?DQ1Qgs~Dq2Fndi~+h^I&{0h9&&V*;iUY%#b5K4q+_i}rN8%K{S&Re=#8)TL# zq`v#7Q09jV9PRV%^$9zrqFLP;tvu=3;Mqa>!QSu>pXh3lC zWOBNoG<8*yccm+K`UUUlYM5e_)oTU-XH3+G-+;@dGEWA2$9z^N(PAyi&G{F_yd^7e z4>o}n&M3niMB|V3P4|Fi5HgJeD%zR*j|L!xL?0-xL|G30_c=YXM5v5`4+ncA_ zioN~z>3D%kEtq!9(G>3;=Z#VrnxGS7hOSWko*CXn69G&O+OQ^!q<6?i zm&awK-^8&S>hleY55^XYI^rKAIE^3-UMN0On0<_MbODHzLGZvbd=tA}Y z_2+*u%4+wQ;U~C8f-bun^*Z5`Xtc8fEG)t^3rgq{F_ywwV8cs09b1^SRuhAEmKq`e zPFq>S`KsrllvVFoZ-d>~vE1JtW&Es@0PpB5WLGOW#TvzzInh{!S`gIi9J-hS%WtT_ zaKh76GPMt21rGS zb^-S0jsTdRTNIY{Y4aKoS_6Z3A_nyW^KggJ%##dPJbLV{A90I8;cPN)9+JEJmGZf2 z$heT9sJb;b-5g3}kYJ+?0!bkl$ufP0+AGs(HUgHhqn@qc=1CU>iJ?$`znAvpBke+z zLQ?2k5tio-s)zo+s>!}gULK`cnpV|eQbfw0s2(nWJ!CKvY|ge~j==8_zRKi^1< zb#YDd?Aw(nwQf5$25g#yT7mxiMReMT^Okk#z2Xf~uT!c0HPkG58ibicou8}2Z(1@L zN+xfp^!{2GH;z!rid;2$CSL7j{x{tIo;C;Zn~G0+t(4qExuUe9^gd+nj^FI!;wiw8 z5~&u3DtXqm#vEHs*=4wr@rIg(=Fb+#H_;oE$8XxDI9n}!+N`OPsPutnG7%vVX3ivY zpD&XszD32?GsZb`kI&drJ>%QbE7^K8-n7l)ne|N0zB^#HDYDSwREgo^o} z-04ywupW46vb6u^Iym?Kg7|lR$K^oxT5bovC*+nF^VO(P&Y5pFk+_5_~nw6?b zSoAjo`C;Li5>=kg)%P94FP7k{fqw2Luc1TI=DF0<=<7nmQapz`{uVl&vO@Y2eJpiH zWzw~qfMnXDKJ!GbKr|%je;UQJ9$8Uur(>@R>-RN?b-@-&(J;F(+X!~`-TYG9ceV{W z2T+fM_{d;?%fq#ln)-I#cajd09O4^K`=DlzbI?#otj3@k7IIOsiP{}4HqYZ>uroYj zJ#q8;Ho)5{F&MF8bEC&Rc&3Y+M7R7i;oha4_TEF>x7~LHPZIrjPc$_d@35NLPLnu?MfrL$??t^648mi{qqPn_SKtm(3sk`+L;RMx4w1ZG1VgJYZw-8+n=RaQ5g{m&8^gji^+-DK>{ z;~r>W8pIctosdlk)-_-UD4y>9WUbI8A?iJ_yO{ZvFLuA6UbnOq`sz?4_&`6*9XRl63-}qrNSB0p;}TFU)85Q@Sk$!}ZH%S>mykcs*;ym)V|{X{OTN zThyrhQc`>q9ih#QcaYA3M>kl;48aTxVesjv_fj2A7X^A`l ze_1hYd!%8)E<|S*ql>B?zYy}yVq>5;6g24R*mx$`eack1sY*6(kKI>srP34Mxk$F) zS}R&7@CW0^d!cu>!&ADZBE^;{&jVHdR9IuNREfVg{Cm|x)>NmHL5|`Q-wV3kZa?Q| zYt73k?LeO^+;Kjm`DMhUCQ|5~9OR&qntX>_A5?XX_-JTToKo>wGqK?KsU zmVZZ7l$pOu6ISg#Bp9VO4*8z0IJ1Ol?m*Uflux`#iM7GZz~+=D2`e>|Hf``2{Q6yX zdY^mDC`4z!w(u*jO(=H5$N`DgN?1-b9out%=O4Bq*kiAT&&@j7(<8CwtLq>}3`OVQ z&|Fu+7!ID4Z>&E06~u$KSHT<5NkQrPDU|S#A-oyXPCu8QbPegZfOv*ec{3=ZKC#Ah z41W=6&)ALoP2zOqt{&LuGvQrwW4v+(oSbHTSXSk;&K0 z*+~6%F+FPL42Q9D77#0$fqQ7)p+)tm6)S-oE*W4bkj{L}>g^U)bYw7126n$PE~C24 zcdG8{hEw+VQr(8@j11i|51DjV-UI7#dv-ueR73ijki0sOl(>|Ir#a5HQM&Oa&ARXp zM#wcU`P)>JF&(t-0{-KP+gNWQ51d7xwjA#;hf-nI-SZ%&skn>TExgg3J;8;Q{C#b# zT)n}es465K&}3wNK_NTgjgBn+15 z2eQC%Kx-r{m7_Axne;!bvIb^WWw2Pu_9%5cD;-TuoYuaE zpR&fynbU%jhLc@`3H=hD=creTkbU-VifQ|Uktl;}n}~Qa;4DDFTS%(2wkta|yZZ-& zEM8iPes?HJ4{9-&o3~?E-Ppe&8z=y+nxV1z9gH{rLIM8}gp zG7mNE>@!w2Mi=e^qNPzs;YneHn`T|<=YSN&wE_1&ZrTZupP`n+3bhPu{nY!)bg zhPq9mQd7Mc&qM0oN%!%Zk2WyYeCop=l})b`n80Saaq6f|QlSu0haY&mhnFkwGP`cp zEgm>>IiuyC+CCM+Q73Y`aQcjyhFTNGQ8Ef3;f4YeVYPO2`1iJA24;Q`%_4~+Ei#h* zQH!LPQ|(T=ocH0hNZEbhek+UANW6fa;_hABp4j%-gu4Q-zJWK`tmL%R)MRGWN#80X zG11qh|1Oypx+n9c87*J(KimH;ri~1V%1~K59nQb=MI+j@+@&{KzN{Iej77oG5!m6Ch}grTq`h z^lSfA9zLasvpe9=L`(5nv3bu%r?Kd+D&@>WKucwM=YF%>NhOpsRPMHE_`pY4q9Anm z=9%K<-DldL=qG$mZM+v*c11hAfR^A=ya(>$NM*aIY#%K6QEe}Jf=LkIbB1YA+jqxQ zO4sZ|cN#ki-jLu!U)A)*kWXp9{}m$Y3pF?El~I3t=bc{dS^C`f(OG&-`g|+6 zNWGnIJwLG}r>=4>5#4E^M@)5vluRU7sAk002ja`K2OJ;&*$;5o=SpgMn1sV~!LQOa zAg{rymhQcy;2m+epD)@<)I}A9v)^l%EnB8S$oV|S?qze2tGNi90nrPuV~v}K>L=&t zNMWH|-UP925+hc3zYOl;Tc$7rT%i z*!|J+zfx(Wht99!T27H7Rxo7f9m|1#?(rh|Mr z9{TM^rXDE%snTy~%$5L67k!l!7X+{G5eXWdnGnh7<=^BL`px2l>*i&1WS<^%v5L5h za|^Dc?P?D#X&*&QZn%ESPD4_{=9G3hr5t_-n4D@Z5Fb>GN-BSTZanPk?!D~t{>j*A zPLl@oE*{JH^yLQ2%%aLUTgg-_*3aTnlXSW5OG}ify@uiJ#_@tro<1cVUnv!dVq8@* z2`|UTs&wJw(mBJ{Wo1x<^DByNRHX8|YSzPg&pP zz3;&o6u{>1pFd>hV>A=9?us(_3K!Tm)Jv`*7^~+c`(9fXa%CzL=iI`jj0+G~yqrII z=dm@EHhud5b(>(iWVDTRo^VyPtckC!Zc#gLX}WY=_cKLa5ZaM&K(G0@=u`($?94N+ zHf+^y{YlxYdQT29f%TkuNGfZ=eEv}LbVgc2g)CZX1-pVOqYrIRj++E&;8*;-_8 zWe*^*6*4mw(F@G`1tGC%9N^MhdAP#FbyYOM*ZxBn-*5?SD}ngbG|Xb?v>N$i`;8FG z_sbZ)GwI4ARXAJ6CK-6ceex`2&-jUC*!%0y$|4mNZF4ZIR(Xx*>Ux;A*n;xn@3{QB z&i*eHw*pu1j=`%+qchi~6{}GZUkwB>I=c@t2J5us8l-^u=+5J*Xi+oBW*i2i6bBP`0BVQ8!s9qXhGx z)=6mq(l)skH=9_^v&D3#={32}Gf8`pvvT~<(=A>x?mENAu`lT4Pxw{=cT1*_elX~( zOFNR0Wxl)p4~FhEARw2%#z-bhJ3~(Anh+Zi$3wq2K>aDiVX{g5neDPzl=4vm;?wu6 zU1|dV<0mIT?e*siy%|TwgLVS@W~x$<=|InVB3DmgdnXQW!qti2oVh*WG9 zr&zb+gE-xS)j9{sR>)5<3gIOVbUVrWI?r8ETI}TICBNOA$!%zv{Z02*|KetacKvhh zqDCgeqFL6t$`R_gZhTISZly?LRgwa7X4{3rYWn$G_2II(g} zwFVKIkFtSe@l)#sL}D$}G&$4H<#E<;^|R*9`xs3Mu6I#ElVE3c1>aO6u<>LaW(c}?*)&Z7jBy!uSMcz?VuCO_dRod{tWF)#Z z6~OhlqZZ(oh+-=p_bx)_INrM}W-G;|mcL8!{qcq%HfJ1eWu&X{I zi#soZ4*JXj_~=M0OV@1-5W|Gw10S0qt2uCc|7op8Ui+u&thq@qFi)Avfh1PLHo*ec z^VJ`!+J?M61#G=<+a8^w*^PhS&@x7PsDy0KVyiv*2F(vH%^6xF+bAtdqIbg6ffGN} ztIA+k44X3<4m$E%7HL9PeAD5O1bMH4qu6s#eA-1y$7vPI{5Fb7!e z*&~VD5=8e``2WkH^qr;=d~LlMY==@~QazfEo@J|*t}R(uwK$in!3Ph?TUMmTJ=@tP z_$EPQg;N!fKaSf!)ZBa9cGvpq-vo?on_K7_kH1_!J7O2%I@pOoY}|(eT~5OrQ{{4b zeWV|W*b#X2b$ByTS6V+l`&FRWnXsI4ByD$Fq^aXG!WP|gxd{DVP*y@I;#=kBpD z{LPl=^V6+d>s*sTn3`}QUn@DHy3-)jmypd>?rQt2O8sGZF@43T?qeO9(gY>CSY3nb zYB@Up=TM5!J|d>8hN-z{P2M4TH!DVV_jL7V+J|P&H(QMNJai+2l|&qXcY zt+Mng!jFr_0eQSN&Ns;|brX#i^K#t_Dk_uIq{#gnR?oDr7!^@cwqed+2a?75**Kz0 zQUiRKMd|Jp3R@)IyEBaz@1=w^q@ABmm)f>ul$;?SNS{pV8*w#HgC;F+aQyjnTkKB6 zyC+ZhmrYFb)=7#9kLZ=4&2m%npLI79(t_58ga%qkPpBdyeuFg~$F7gZeVnrPlg9#3 z8e4jS8Fs^XUN|QXdZZ}(={Ii3(siw?_f=w&pX;tVdNAtZ8HKa)=~QvOXg%byp2%E%Y9dMq=Sf^3cCw`!`}boFLm8!beJc}<{=%c3xiypUBudy=a*0@$?^SQgnYZptvHP?7y@RP2Wr{PaBxCOq= z0F!fA3`NB%v-wj7@>ojpeL|Y$LTw5eSIL=Y70c8+E~xq7Jth}^F(PA$N0Xgod3-&c zR3e)cRLx!ze!Ut?uG$BzDPAMRd_^taCp3j{qjt^6K6dMq-PwAVjX?dEN!TL-U$e+L zd{s*Wtqv!rU6mePz?yBy=SuCM2uY0S$PQie8>xeFgnmtb#c z$vZ1(8~a}HuTaSGP^{+L&Ae{Y%=s{<;_m$iBL#LbBsk$g>RVU^5r*Ujm$*FIw#ci0 zKn-x;^Ydw?UBIDn0~uMWUW1;yRXFj3XkCw(&9^gXEy;dd-p$`o<<_}HH{e00k|td3 zw>A)3)odebhzSegfV1ul0{*>SP0xKJfw17@fk9#0Q41~IVzujHg@#9}A3gBvj|@^@y#hKDM0%Voj5tcQYPk(GBbxH5ukO~$=uOEuV=m~4dc)W$ zv!ibno5b)|mQx=8@@^rn^<#0|ND*b6L`1EAZ)VrNr*yfoJQhYV3dGzvcYA4*j{_R306(1znV z(d(>z6%aK+qoZuwc+Ap)%P1s){G?}R*ZN82&o!OrNXY9?Dc|ArV!+aV@!xHp%>!3t zn-nQaJykPKYxZfGt_veutcxAjY~R#j-MzgAXHKik0E*xVMP0F}t?ls@FzL9(2YT&( z%PS#?RYhQ#vR&d6cCh4)Lbp-V;mizBu9ZAslKdMtM=OWOno_PXYlCO8`g~cX)6E{s zQ!8`B?uBK_brfSj_4L${O(#3p6%Eu4lN`h9LoH~1a7WzBczYmU3J$2V4ppRcF>DbE z=cO*q0a=Jrq!wLuXspkr^Z*V`ysezt^z!VKm6&UAgC9k)fdGz2ygLyI6X`%d+A@{J zTaj%IOKU2s+x*()i*m7^DXX1R6m}433IG!D@A!7Fng?ImJxGyNj92!^^16m&^aHKBj&YV zukb-{OZkokBP9!f;vw+MMS6Fw>>fX2n1T4(w6^zpSL&a8hmN`m~izdTJ*|C@?Ca$x|t9zl<4re%wfBc{N z{rmSZC#F&%)rBouo^JKD8`hw0@Ob|$gN#=3Anh*Zt~4RS#I|NgrB|i1t{7IzTh?g9 zk2aN9FdUD=XFAM!K}5{?wf`3e2KUxhhVkkjjC|ykFq#?kfV2*+m#1-0?vj5fb**&E z=A9dKnt#f@*_vz2aN*;>524JU_3j36y74+?lOdrnipI5|5`k0m&a26Uo(QV zbM?c|of+ciD;FX*tZi+v7O_CSX5UL|V=Ac3;Vt7AtdS;t1AP*OK>4z8F?U_(Xt8z; zEMe}f#C%&xba|c-oyPgAJ^Hn0`vOo_G2PIn;Im`QP-#8Yz?Fb7{&b^aq234j>hsvk zsI}%cH3;RVcV+B7r~e1zb8Ww{9Ir3QUCUfxUH|?24-FfpRjB>jkgG2quI&!tGboo7eH z|Em06Mv&@qXfyqgLHl9{d@<@d?6@uWY*&yFmy1km%ne_^u0vw$RvIMjDu3%*CKQl9 zt|QksDD|ta!u+sQ3yNq64y2!0jL2u(^3~Z9k;^@+jgloay*_4Q(I1Y1aPY!T~JY0au$lqKsodx`DGt5LOO$O>!HAd-TGJU z!p{=zvlYvnAU;B~3YJm}) z-H{2)W3GX<=T*W3rk}3P+QMVVs4GMgCP!x#)Ine|Jlj?yv>7IIqnwq~Y5sQW%z~kjL$8(mAdU1dnstt- zh8P{}2acBN0PKn`+VQi;RZgyrek%4!*a`J9@zDE5&d2+$-k>)I;_x^c-@JBd>SDkP zZmDeNl<;$PBw>VIbg%$lQU2+p6`nm0K)wY} zoHY#70fo{4gaanMQ#MPxUUn5H074A_hCQ8~+>tC?d3W2> zQb`F~?Mp08uI`VH-qgV2^R_L?%csvm0(P&F*FE$XQV}!y7K(Km`MI{=XUh!k708?H z0>LlZy>~Pj=oqH^#04QocwAT{?*k{msGp;c^*F998q^kW_T4w945T@9| zoDjK$C;#Be*P%=8o*4H(X;v3;9JLbcb4LIoqH6^U;=1)!9V{!>w9u6X59cS^_! zX2|^qcMIDb&B5jfwu*8(YhIR7IN2j&*}kpy*ZDs!c~b}Hz?Qs?J#sl(bD5Mgyf00E z#y=;azv*nUfZ34V=0ntj3JuElo*|S-`=F-Yy!6FAX^$heOR8*AYMkwSIl@#R!wL0>Z3Dn%M3M}wBBkVS)w`bk&e)rxP zvSv@Cjen}f#?PnRw&p*Mr#O#;4S78yKc#A5mB1d5nlM8FQ=HaORa^=@LY*lwFX9yO z(2^!De=wL935~C84xinmoRr^_e@;Fvl494J0HIkH1{2Jum=uB(;hx!YxgVvpYqiO zphBJOf+@TnRIQ~>ENT_Oh;_(5#NOHo(XQL!Bt#`73+xmXRlYV045rO;zw&4OrY_4| z*P61(dA%~llNv$l7gmiaem$j^{Uv0;-9A8P(hy%Lyu--0;9H91T z?{>m9!NZY(PyN7Gr0(3T?0l`!0$!jj&4JYCvzGydl!2j>qrjed149Iq z0@(UH$M$Fvtk>QQt|tI+<0;K5>(esvJbPKrN4&jiubdS|0~8xQh8D&>50OuKAdwLf znVNomyfo^yois8*2`bq_aOV-v62(p63r8B%P?_uPk5uy1^{8tjYCS^+@rou4gDs*v z#Onn2qWTD!(1v_uo*3=(L?fOVu{HK1n}`h?u;L`?*IYVi{cs&qyc+T5F@AweSEY(5 zC;O+gAG(B`C9m63TYo@9o#idGsG+2<7yn?;QSsfSu)tk6!r2&Hp7mYZ?0!9t*>yHd z@zT{7m!b>i)tsqS=NIN}{?hpZ^KOC%i-tecy0Y1zQSqCn!!-BN%1MyW8cKW5wrn?V z!rD;aCsApAErWPxwVrlY7V3mP0DJu*p~YbK_FR~Ej^Hj`_VL}Zqy}pmZRYcXHNC)| zPk%-8zrFC@9DSZuY(eYUQp?yb`DL@$)v;E;OoBK;3SsLWXBLN{e!&?5Eu>8rN3ZsJ zI@zl!>(O~nz~lec$zK-_Bq>e2FVGvrpaHbC`^`ILuuq@Q90A(;O45SBa~0bYxB3~! z!t;I0-6_7rzSSIae0}WMYRetEcTOa2BkkY4RdXZvTAbKVi53>OgwS8;yyQvmWzlW$ zWlTT3>&Iv;BRD+A?ZjGug$GcHQbKzR;#@s@{*}#Y4z#0XYpKoVDgK};&tY4j-;NgA zM`uX$6@jW(!$`YV^HeDiU=M`{?yaQTC2Y_C!7y^uh{(6q@myaB`@oylQc+7ybRL9$J14Rn$#e1Y<}Wzw{PUI+>Gp-Tgf?SEOxPJ_TW^R zHb9MDFQYdE+B3Dp)<8nP+A>sMYBr(LPb#)61_d5L61kU!n^2;+q7t9~iqaeIh70B0 z4SR`2ukuu9=N{Pu3NJ|)-csirET&*xg|4uJ#Gah59I%PWDxKE>c4xZ0D`t+tO!T`E zlZ7EmnuyPnPHnXr!}PxL-eJ8^6$zVtTO@v(Um0Nj#IbwaMSC&NZZ;(uZ!<*0`qA=H zJy;I8`VOvC#hL2wc2rbHjaekn4zhao*1iPeVOrDpmD3a@t$@k?U^FdV$Io+gcAk7b zf7PILug*XpAURwLaIM@=md`HPe9++)pXGgo~oHqm$L~GL6qrCO$$98xym&Riwz%l*ky!K6O?Ie#0oN1%e5ip-VvDo z2%`A{|C`b{CVaBlthO}W@VYiUkMd)Nye*s5#(bZWX#nYSogWRmAeRQx{-$rAi`F_W z*K{Q^1N0LUnWekY{^w|Kcyy3g_|Li>@8+zi7Jp%)g+96o{U}o(41aKeHhkO_VoL~X z;fSr*He03ny&h?g7WxR7mwrj7XtmdmNP_`3E|bo<3KFO9=6?Sr^bu|-R^{QRqj`2( zt6eG~o$+bjvmvf5yn-pOQ98v_kD;PRlL7Nr1tEFoEW!l+FX;28<>-QJhzN?FfBwrv z9Ic7CgWb%zF5}>cW*E_r#xz zb`iI6nT~cOUTOy@$uLkSIM{hjaE7O@{sp)E?>+wSt1QvSt-o8@7Ll;km|Jkx=jLDk z`@Mgik+S$ggxInPa_YR432|Cr`xh_({h(go_r#T1vm(!|tf``f*W*c$B-ar<4YD|? z!*R@|hW&TPc?vOE2EPU8u{NF^jjl%!k2C}tOK@x&#uN)|=&6<-Cao}ocAmDk0t+fe zU6CiQ%_(@#jNL)lg+y!=f`;4ELj;`Pr9_1*bVv9qpBS%k6; zb`VZFY0QUbjZ@1F6X!3LQ_Fdk8u@T*?!<}>Xp@JC>=5ru*4Y_p?QTs0uSSkmx-0i6 zM7@^?!xG_!D?Rr3%u2H`yQp}FB^Q;{H#};RN$6-@f2u>PzMY_MfwTO(8JsQ3AA}-0QTA72iY5YpuV&htBV+Y~TY{l-;v(8Q^quEF>9LSVF%!C#8Y?H{LR_p9e ze$=~3rE`+WIr(%FK5vum8DLIfUuh8T#3?MyO|!WLHz9a~j{xEjs&>)H(VLO+cXK@w zXFa-l;bDH9FoKM*JrvH$qj5Ro*df08aVBU0@2hI7^;~N7%8mDO@y<&eIU}>wV~o{! zVSWN=!P@VbNFyIi4&a|{QEFvi$MVuQm=oHXYw4RL>T`-Bof-q@11Jlp18ic`m8OLr>v^LDQ1G5{c<0Hj1?_o)wf^hxg%f+;vo zq@79$dt*t~Sq9H=)%Q|9d@SNpuskq4EFz#|qc_c?)eb7m>l-I5rd7;48Rb)c`1153 z4qaIu@0@I76+)6_*}^_e$9rAyCP@rv~0pq>YuM5Ds3{d;Pwr3S`fP%&!Lg z`DnJ*I=IL;$)DaBdW@y8h+SIG`Eb?}M1EU18^Uz4s=4DWTv|>#Wi3|W39%m%2nk^< z0X+h5ovKMuIYvZVwsEGWP}PVE7?X*pr(9+qIgSI2K+~sn5NZnD2_9x2UtH;eg{yZ` z8%pd_db-_+7&DqN@g%}^`K>D2Wg1-sfmYfETWStM^GEwC_UERLr;s(5DO7c$8|7S@ z?#46ya;^ICY@4}RQ1qDs3m`#StvXa=+xdxJsQALc>aI)YlroHtbYv9NWY`#ouNHwX zB2j%iEGWOjP1Y$Q1gj1L_1PE4Ml3gc>=D&1F2+in0VEG70O#Q!SKkLnmgaQ`ZZOte zygNAxCuD+VJ>fE`&A^8Smi$ha9skf(nkXqg?%drR_B|;<>~SLQu$5E<)h>4Ehr&Ze zCU)oQqI2EjB2P0+0ZxUl^y;rObDk|N5`Ww2g~Who%1M}B!kA*%*M^U=%Ps2VWt)-h z)pRaL+%fNM_Dy3vZ4=^#D+}EI-sbZR0fs^- zLH3H!pvuZkcMCvZTz-H46Vd_FK;IDeW47c~uWCZE!F_w3E?R7v9Fwp3RMQ-P`3)5C zPO4gWun7Eow1Jko{|naM^U2>>YFx(z8;jOgc>@LntrSV!}X3+NTocVFC)bB;Tp+Q@kk52klQWIKQIm^DIu2fHW zMf7X-ub^4pNgXexPXb$lSM@rkD0hsx1E?skdn&KeL z9DiqAH4CJ!cuojojzOB1mgW@`o*WmE0B3co8F5e9N9#<~|03?K!{Yj~ZPAJl0wf_M zI01qO5AKi~NTvyttaYo@k#%@>qbfj6qL#$6O@+|w8M1%PxiuHqb(={<$b>fqiL*TD)3Oo>+0 zPq((9?Dg`Cn}|2A+buR~TQ05JgAIzMq)Aak*+ewJkxn)6`xlA3?@ab5{x z{B&}wVg!Qh-06HwEDSWTJn%K0(>{r|(`tuJ#f5sW1!62G*Yg0QEZa3@?CP=l-!)vS z&`?>B%^gW6T#8W;s9oXQ6nl%xs7KCuh4``)*s?WzR4lQb5S(w@Q3l0ZvZ>3t14+&C zy9=LYw4RHpzYhu+jX=p;f+l+$Gbebu;Vjh))q-M{IZJzP@$v{!U89)H#TS*vL^A~& za4lfj(~PXCiMk=~=wJs%x%c&{QruFHhMLqAf)b>VDsOyked%t}#V-#~Xqh)cX;5P* z!3lc1$_#Nq8lyU|2KNx37D>!Na)-12NNo?7*zu&IT9e6uNUXZzPHkeAb*4cG{Grxs z0(7_r$d85Pw0`Fn{H~=neY31RPZwwfTU3xZhSbfzaCS|s+iDUrv>+bN10N=HQjWEn zkwOR0=TqgxbZSDS=zsQj`i}0CQgtn@*7HYxsSK_GR#{@Yjl5oc!zf~1%YSa8lMjWb z>UGc@%+8w>PbQF^&;{l)R61uCf`&r3L_k){0S+;yrVdSV;$Qx_boy za5{R2EQ`N5yzK1^Y<)yhp~AW6*S=rzoYpMd&T2kh4sN#3z+N~(NlDzp=2^IPlQY86 zDz{&?ge)P+n)OGnh0N>Z0zb36Ja#{MUMd0Z-odRPLFW%qT>JDL0H8p3Kh`yH+*=lU zmkh%`pW=@#b>@-fNsO)G0nzg!C3Jjufl68x>7N|yEKNjCF$L*o1gw1#FI4UZF*;C# zyf4^)&XS(8D$eKSMl;+S$-;gT*fy)&-698$nrFfqgiC zvB=7ok1#l*SY(Hyvj~U=vA_KSjBU%E*u}F732Ig8p3$a*9fWI5zPNPEFC1seH^F5~ z3^Eruyk2^D&`tzqTt9HZ&l>`Vs`Yr>PbR1g%fY2A;Jytq&ld$t_!TPo(*nJ)gN$mxVR(#Y=9pM-84rmsF3l z(zq2*BDVquWaGV4qe8*l#{D0TNb^6UPaxasEl5fNTEime@K;PVUA1< z8na;L=G)Jz^h#DvnYb;Z@gYn5B`_HgOZ&Ga`7i3Sa`)3M^MtNS^1y1Fv zHYl44mj;F8G}@Bd>rgj{CDAYQBvrtGthJGSQ+yc}kB_@Ugq4P(?xXmlaap#H+bi1a z*e2@*YNS8KWCYBUFwAwVbGI+#Rnc(^q-P4uylHINkA_H7FuFL>X&Dpai9)o@sPsQF zA18=K=I5_H?WB)o5p11rsaJKMT?am))3hOBLFO;+&oO^I=$xi3h8Jw@;YK@E?!T21 ziiYW6o(j0h306F8ogF19ETAI&$nq`I9zI?jwj@Y$xpFxHzv98H=fsE2%ya~Ba-1^b zd%>&Bnq2Fz`NuF4@s02047gvp?rtw+jJ${FJf!sreq(_C`SAO3`n~mmSgz{Y9eL+D zj)m=y)0&4>ZZ`8t*>3JNhnrNsd95W(U>)aPuE5PoFFq-^1G6XL>5?3(5sj)pkUn`^ ztrdfF@bT6bnK&*UsU_zUlfy95Ry>0I>^=N(9%$sb4N|W++Xi$m=}dwn zs4he0%{1g(qnB7o1M=0va70e_j!3B#k0+hro+(?iDNnB&d4w}6_enYQJJdG0T^RM| zviX)J8kWkK%F+@o z$4aXBIkbCM7=v4Y7#1AKTN>!Z2G(=iCO)t2qOCaOHi+qylIWl0k$tQzDBTvbA}`~K zWo3QEu@PwiY|bWVIX`{cY+qVol(i**w@^FcqeR*bXFiBe;6EFmsFe}dAS<*s)=E^P z%I08ak@Tjm16WpEu`E+MD{Lsh1APyT1y!W?cI^oE!y8Q_TyN{R4y*T~Q_W!U%*x_4 z4$QAdD|l>^tx}#e)}8PUiDc8t5)5pA7`GrjJ-Tl_T*G$eTfDMN0wrFz(A$;FW~|%e zDZ%~pH1g3FG0;)FaJPnzczQvN5yZ$FDt^@CLdS8kL!F{1w-=HJ^QG?)W?oW&XUnC6 z_kil$7bZF;7Xp`-4`mk|v6R{T?eL1flw@Qz2}q$kj8pFuqHqbLQ?>DUVHrCN=^rc< z{|J956PE%`#9QfMa`PGQBeO8X=cqS)&@dvAwGKCxIc?&~J_bAarDZBm_>y+WUL_Y3 zPBbQy`>x~XKQ#1;u#eu+!~%`pArVEP`cTy+ra={E(u6vzN^(9vH3^%EZb`hJ=%|=)LDnkj?Cf5=3>m^V5%?$%4`h8c*?uGOMRo z+dhfqchQS7v)ugn+%nnqVdZ={rNz^SJBt1DdW5si9XQCW4%kKa2S0qR868yea^IG z`!&zXYMI1zmYX8GwRP^8RW++~RDNnA-0tbC1B-8%Yi`d@rgtDx6Nc3dTP*&^+n4@b zmXVy!Ga4q=I$O;x&aWtBcNw&!HJ28_SQXOFCw$zb^>L;VTxooe^kMg_zNJQ$N3`-P z!I5{Yq_OZo{mYtZ;sA2rA5}^n6xIT?e)(^ZB!((nE8XcZCxUP&dsCB8)tDV_ z$!~li+B23uCEIF|m7wUTRLvUWjSb0lrG!vQha(xJKkrRKYw9~Eg@~Ap?cLd>=?Q^7 zyIoIp(Cjr%@^BqVogR=h+P)fkz@dcFd)nAPps(cerHx2TsH0+4eH-&Mb}=Ayo6YKY zetIc$C`)Ej9%!J5U=^~>EKhV$M1;N4t#BFPdSIqWJX0pH70wjEcB-m3@={!FPdQ#3 zzEOaumRP`}B_{b@M8OYN-@eIUusRLua9$N%-k>2V-!Mw65?iSuJ?eS!(ymODc1xL2 zRb9U+(#LOZE7T^ncAPRJcZxGas#`9b%XPj<6@6m{jy@uiShZJRvt#DG`)s*}+vbt~)pT8SNS(L2X6 zl%4JPqSJF+R_3(#;i8Zq<8X#Xo`;_$f1&IYlV)pvkCC$TbRl>hbR0azxSidUa%ok; zZs1g(KHPK>a8K~&EpIHuid_%44pZX99PuGGFuk@U2R)2dO1^o7EUW404$)O;4y#ez@f zp)2HBh@|yKGq<#2l0N{z03Uf8TfwO9m8Iv%;R9OV`UPm^<0dg9P?#~Vsu!DZ^uqdX zGgG<$eQhgCV(+Y?6Jm5dR5)K?x2k43f_$g_yHxVA7goQsr0_x8#fX<2*e2e4D6e8Fr;V*{bZrIiF%~m|%HNOUH`|B0 zV9^!|f-GxJl6BtOfKjZFw)t9sY6g!5yM^-9LB=-INa zzZVT%>243n+@zsqmfNKb^un?`!aF)(!jB1CIP=}BVs8h4z@V8-Lh=iS4_yVW(zSyOk}%LsN_Teg1ZnFrPENWk<;Ww zwn@i!^gJOHr40~iKc;=Z2aREdPIje1i)KPg(9?ouD8g_WGs7pQn|o0I{vG3h^^huF0&1GkUj(?)l`FlR)pbvrr0aNBKmnoN9MnWRY_phwcp6 zrFlM(1&M{z1ti|%zlm}G2w$Zmj?CulKgwBdxvmutOJy7v zh8${RQN9nF2L?KHlMK#|MEOYCyn zD_8lKuh8B|dOJ9V%za+UBpU*I_V+0y+HfeA9_&xU!D80+=rnF5tUThCU104EgphNX zF;7yQ!}DT?+^9Z-Wkd+Hj|HVq*wiJhYgk0@@EbiN#1>^jUQgH$_$O-c_x%M8d#XcI zV+TdQnkkQ13XONa-Jy#08<2BnF-`MaQxlR2x_dDMOXw2f$hwYOLA54?dQbfN4o;!qXAN^XCL>eeeR;{j&D(Vu=31X1}WxI(vfWSW=Ptc4dBc zs}9R6{A4`q2xLNKm)bO6?FjoA48kIvD5mpTw8%drN1&4!oA^$MuD|AJ>w-@M=#&5VxgirSwC~-%pM3SXP7Y`Pis+_!-TlbsOr@_W&j)2XcFZ_ zL-H3@ewKV+6o*LIsXGtISY<3lm5(PBNz%QCMH#}Xz01-x>B|V;(C@Qb9wKg-oq(LvNi{uBru$@F%!zP?-AdJ+2kCLG zH$-^jy&ytLX5J;kP8?#Qb*t*7Ocy)k%BE!))?Sq6hrE@x&HouX;9~(tW((Ty(;qDB zO}%Q*HteVaD>9!y)+@9W(s`df$9Af&CFSE~R4^(vZ&-PPDMNB_mY@c&ubk&@MkM87 zH-#Qs9D}>{$g8g-0m zR@YW#UOA4gbB^WI+9$cVwfdVTQ>5fFU7cy8 z7SpU-lLv=koebqLbHMl#9;YHbas8)x+5;%X3)V)yLD_!{2mm~r3ShS8;z+0Z0<$gm zWYeKcya$3+8Isa4$`7SK#OK~~Y{k(IesAgWCEwN}R4s=(=k*ZY5>?Q!3w8jU5K9Ub zW5$F~sm?17B#M!_@S)E7*oPTGl5d`kiOzwMs1RJLHBUJ6O224cL3>(a#<^}~DXOB~)-cLkC^Ip7;gh{t^hw)t3mY(QLL5<`D-b6aVVi8UFF8UO-1V2* zA*z-CI{q`t7f*e?4Tc3ZzF}`LW4d6aUEX$eu}3R+n|6hr7-TTXk<%wT#D+PR22pt% zHjhc`Ch`pw%jGJ<2>+|!|IXP%@&ZJm-dyKJHezJjn=bHqq~XaC0*kj#qY)RKW4cloojgAQCV(A7*65@3Y#5< z;`GzC$+$qPrz7D{M`gJO^m~wf=HrfmIqb<8VW)9}QvBT*gr%9({N}Qse`mR=#w8tB z()Wnl@v~bi#`oj+;r4qr2P!~v!;I?E0n=T}&L+#3So{H=eLY+8pxllE@hI0Z2V{9% zABrmAcar+C$b*G=rw%a)K`aP(aZvn5us~{84dPMBJRO~Arg#N|4`s<;vb-XH0V<*hM6SQGHvRLK{%@Z?^r+q?a|}gobhPZn z<57EtY*y~5RwZ*zjqS{gZq-yt4Ye15Qp<8|T;ml-i^y6yEKChQFOk!-sHrSQ8UA+n z-=Jng0H}4QuqZ4p`;;{IrxvGwZO@wZnICGso5~Jwc#J;;Y=2+t z3vX^VU2{%m98#qc0lGHzKWRq2pAhx`Z%oVMQ<74Tp;Tq9i!4)d$!s0IqKfMUC~tK3 zRHMsTlsH7ROE20>@!LYwg2sy`0sDM~W3r5hrlG{)tuA}nzhpE1)?@d?L}FQy1a{J#Rtb4waSv`H|3ga>r&lOw@p0lBQ!H5!OYH`dD7RG%~34iofOA`Qirs~JgN3B=`O^F;H+Zf?x@yh6p^~Pip>3fdvGq2 zaVb*2-NDm(ar^am$us80iOGCwHPwvg3*@^B)v5#mlX||?Oj@latheS$V7+B`q<)tG z(U}%6_}DPq;s!2Q?m}W@fb30T-M?pIh#EJea#cT)|K*4+?C7IY0~Z(CQ82iZtyg_< z2!;*PF|b6Y4AgFHZsbi@6lq9&X=7@Udc8&;oRdB&s{*EU?+(FY{KI+wp~v7+XUByamPSah$hYW4?^;ROGFMJhHt+3Z;e zj;*m_841xC2kI3w;WuS5e2!?~!XeAg55NfR;~~_#@w7)5Jf0cec~HCGqR)QWt-Prf z8y6XvA=GNM0(t2|_%@j>w~>+Cb$T&(-C}w;-^Ap?$sZpzmEGrZc{H?nYoszawX6+( zO6(Kpvb(F^!u_>4)w<9hHqvo3%90xbORT5~n9 zOr`7DKss|*?~(qyObO*1zrAPSfreSc81paZxvN&q^JQ9`jJuod*I%DsHb`g6CO)KW z8amF^^>2v#!+HNdvz0bL(Dsz;9eafbIrj>@{}vT z;Mm}kTpMKj-Kn=i^v(fm{dAY#>wn(X|Ls#MHDDnpb7DEfkNNAMw|wNRXLWdk>qEN* zM>$~wJ$*AXW3K0>;S6fkqq6}J`FtiGaD>_X$n=1(1O4UH}la@RL;^^%h+FE4Q!qV8r z6rXE3MD_C=En<^KA;9CJtRV8m#j0hDZmv940ZVF0->^G1wSRQ} zUo;=G6dn5*r!FtHwrVe1ohR#G>}`|vZh&)Ja4u_&DR+@p1?DA?@Uh4xB+$(;o9;UqWa(SY&$OLD z3ybCgtMRBz&UMQp=`NdDf7FnJ zC%g?b15fVx9Gz9yk1VmaHOpM>L69i7D)a3t_m-e9VCrA zUmRTxq)YDdUHV;EzFMt)*o_+XJvF??D}}5XawCjP#XrVL7~xQLwvxhY^#xzQfp>K6 z%vf6E_ny@EDB~>f6%s+nn%>_ZBIa33{;7}_WVw;b6>B`uTr4(7hrDG zAMNOGzy9O=Ux7XU@hAV(B))V1n(W$`dNio~d`n@2s*(JWG=v2siZRqla9nV5C#K{i zn}h1%>Vm;7s_OX{VD?@uY7YJH9FIjl9!R#<_#N3kl`-gyO_{t#SFsD#p^?FI! zF{aB(r5?h>ul#E%fK9hc)={>%tG{U>h``o5Y}f1D9h%!F`0SX{3q4(RfWB(C50FmZ zkY+ak_vpgWbaPv?zCZjOzVD%b_flqY8%*gwM#%ZzHuRwnwb zebnkNZmrnSSjyXXTWB6w4Ci^}_!+yD?>d4&xDpA(2VMf@`wtyo)GV73;U6>W3UWJ& z6KFi_l74}rK|&`QFXqTSG~v5L0N#lZ2$j)od<<+rH~qGUAOIn^F^MFYzi8}Se_W>enF*lR!0cv4h}n@OK=&e?05Y! zASyJYN^C6UyLf}JOu;K(U?L7nOCb9e>Ew-aoLq3LuE`@^)Sh4z{5A7y)bJi!MNZyK z_f#EZ;}8(tmv5~7(Xvtskty-YaWK;n@767>1VdO{Kq)Rjhe?d3oMwKrizWJ-l;%~k5A6p7qL}@xdKY?C7fMJ$5JJGdW`jVPD?O$T#p^c1@HL8h-t|>%- zQfTb`R4mAXSc}5gxOx$xqeW`1j7uYX$)+C%xr5Y#qIt#q9hv)H+|u`TKRq- ze0gSXO-e0PesG*2TLPwWcT9tY1Zcugj=-F*aOfBdI$4AWOfDTfm3!~CW@Gow0S|KG z`d~k0Zozl6Zl90O9sbUt|B=j4&0N$& z>d37JCwPw4uWm{8db%XphGMMiza{!w)Y3Bsy%++C^7Uvibf!Ob!A8q?uFM)_ubSZ9#BcdR z0Gja(|0z(;!*4N*{kUC*SUbMU6I!^IP4(<7)yZ1Gb{8~3qv*wNX);iw%Y-YfB~_;x zugrTv&PYOp_|Ej>54ZhiRs*8m8016LnGgT%?^V# zlQr*>Mc*U8w$+z#PcNDM^S1_re{}s{G*Kw%ANoGEXJZ3f5x4PyIZ1QcW62-{jTXO8 zdRCM{TRJ+C%G|N1&w$8Wlj*#thz2odS!V@M9bm9y zIWTSC|;2B?#Y)-jMEizal6pJCqGgQ;^&>>}$#x7i!iFAIbp(;AtPnLN_a)Eua*7pOjg!AhJK9krC z_B|vub;k=L)7YaMXTJkL1L}DMJa55{+x69jGRn+@iDP& ze;RDGin(yuuAGtOe7`I6O?cf;>G-$S7Ry=w8L$8DuA@rxLytFq0gMmew?@mWcOBLM zR`)-C<8RD=Ao6_JeuA+X818N8o8U*dNZf^zm(&%$z2(2gZ*4R1t-7{E7%Z=HAxx>q zr+h!0-)Ega6~Ttzn0Sl0^@udQ?g!oE3i8<;B-XeHQGKbwuFe!bR+&qF#BxUVBgX1v z(j~`8+uv&I{W3FmrA|{yqGYI+3La-+L!>dQhR=m;u}KM0 zDel{9>F^Y<6d^Pk5T{=A$hYL zbtVuLw$>8yx>+W$*yK5EOw{D^7vN;4dqv*N)tioNe-G_iL1AFV%ucSv1$-TPwnf0h zG5R{y3f$lMY()mnTg^v596siJi_f70v9l^= z8X}jrD?)K!;AI^r`J*2K8Zu|?<)Z1OWeIp;W%ID|zKE4u0t6D*MjPYd>+(quXw27K zTa7ZUPX?AQBx?;PyeCV(@bzc%EtM-evK6&Gaq=1sY1v*UxvCwib|hp$y>6mw>tpwE zk@!w;$?1oiazk{aW@M8Zbyn}tmsx4%o*zUZyVgzGlp=p{bFy@c(R|zcER8=M#glTq zedkYt1t^W#I8BH94M=R6eetZZTcFnwIw!+6D!{-fb2Xc(W!%9=7WoLlw@ig{p zGcM!hygCfei&*5Myz2bPs8AU{3Ty(>o{1J@D&M$^E^}+D#qAuz#ZyD}K)L_}8&rm` z7ix5QijZ!wX2eb^pU!15h^iB&hjyEuD{iug?)fW@A#RdxB%N%x7KqVpk7WA1EOr+A zBJ~X@h(9ei^3SqQswe0(XjNFINoW2@=iX_twKE=Mavn2~g^Qd4Qtm`EBEYo5vgZ?j7>OD>+lKoo2gsg`?^QRU2k&kt#wH#wax4LsxU5Fd@vg&y@z3o8cC6$Y8}H zlk&nZ>+DtIwnp%X+`&X1S+Ay0!%$ zaEp#^*PYC^gD~UD#R6xKNsM1mzEraYj%aX(FA8<%l28|dMO#^km%E|UQq-WOT|7s; z7g>E>xU{l_@c;B&QBQP6pH0V_BWdFAk%M7LAs4fRK+b1$D7iJe0qmu4Zo7^Z8#-*Tkcu;!uDD>`Y=Oy_es3_gtaVH<*X^JIIQ+h!A!6u{M853O+||+Q#bo!0^tnt}6_S21*NLs;tkSa;D7G zfn(B5#ZuTvyh>&;4aLe_moX}1-7}zFB z^S3_x)^De=kIAUcdy(Adlz0u*IFN}iSRSFN!BhAA z+(0V+u_@k$igrX}|A(}Gw+}htCorxfh^<7f-sQQTY%+mdlIm->C_=Y49v(MA7Guur zV+v08LHroF!)8OTx=xu!<}5VRWias2Uep!@ov5VEvnd?XI|OWW)&9-u@ZUMbY@F%r zv$$DqX*}$lsrkNlDY3meWl-T`ab|HIdbsDdOSeFd+JJZP**x=0yiWak=lA_aBpfLO z(0=zDuo0_kLB5RQG5LwZ6XFinyu91glNtOCwl$k=u}RL0Hg+-}n%lBtorLIHUtGOZ zkW#Zc8$_3;;sDww5ZMvVp;?O!Tfe$w55+J?rDkYdu}y(wZ;F77E7JiT3cWVApP@^u zdNwt1#uJci4GCxf(XOs;NkW=mI@7K!^b2q_$PzH7PaS(8UcPnv^~E>T$kBABJ0r%g z1`n|-?oe1Sw9XS$9-G|UdwbSTtTW7R7AdkUB7X4;aELu^SLds-bw_?Tb3%^2jLh-> zE7AA2&il9C%TEy_vQLRC%v-e$d|2ufYQ8O4JNhvkA3?tWkH^f4R=ty)+#H|XDGTLC zcvHVnGZX5Vdfq?A7I$;lNmk`B)A6CL#n~@`S*sC&Eppt4U3M+wHV(92D4;ub%-S6> zJpkL_(Y;qYIj>*$EWM4GWn_qtNEiHCd+ixM*l2k2mZy04@B!R*1BrDB*>3mi5Rtjt zxfMa~6kHfs-5wyem;Gd}==Ocd=Y%JtFYk`rYKBU7P`sjwQD?E}zBF9zQ+ajtuSoM5 z=cMuS@9!w)T4FI&lnS8eBMLVj_Lwezfgr#xMfRtoC zaX>@QdO#<($b(MRmN^GOMtg60YL)!Wx|#bY>LgK0!QF=BPcb^R;wpH8UP2C0cc`j? z@o_R*Z+l1EiHK#v%|YTo2azv`Rl+H z^ehcn7GvZ>-Vl-jX2JbUN)HNIGarRO(?Ud0ZtK)8k}}n9&duEwL9Bk0zJM_ZBR{#OCWtX@PW48dvxZtxb$o_uFVa}%tn^kp3psN75ZGiVyD>o zD8My8c_ycYmerLW^JL3HCj_jz5hPeSN(>Dxe=jXkGf2gV*}Ao_&n}Q#Jk%)HEzSoL ztxU(>Iiz>;ufe{0aB1@kFtYg0?`Qbx-728r*6UsIZUh>+`d-GF&qw_|Z6EW`V&!7- zV(S&S3)YMb2m0-V8RZORRBJ|5an>S|oqwRz0_V)v> zG4x3seB$X|(;M7+$udw&RCRpfbv!r+x!{gcE>nwSvyC#`@ z*dlp?qL-7K%XO+NS+@iHfoe7jJafKN9Ww<*aoL}d8DKVroGU!tk(PJ4s+BM@kj7Ga zyOO=P2~;@!tL7zU(|iuDx9Xu>sOut$Oqg@uw#mMi*z(k|eXfb%q506KIuU(kbL01Z z)p5o4=ckFeG!&BQY+1O$^lSm&snHqSFV{QBT-8)>fB&6 zn%U3RS3TP`DLw9+CD~bp3e@ybHDy1olX51K5yUrKp!<$hCNXxb`k)@vZdJbC_kJM# zQ}!YE9m#DP`Y;n7&%Pn3Fk6-d4Y%JXOq8TSoJh#(O?w0SFTmsMUx0(37$|K4@2asI zk7|z6=13*z83fD~pgdb7nlz-YB%_6@eh(^yMzOS}KBFdUjJ8dm<_{U~dCzV#QONR^ zEYRl%Dc7`i&RqUAy8axZI=}4}>u>?D2yZSR2WdWnZU6adBa_#v`7YJG2IQFv7QJR# zN*}Kla)^WO4PvkN!gY`I@aBZ^5lkW!2IqH!)nmF=7YCm|xron>!gFwrFVr5*STS#P zCwd;LXr9h!QAzdPkL<<4|avF@ltZ@y=S}C`${FpypH!OAwSv`dUbObur9Y9)avno)np6-9FRvOZ6k$e`=kS%>#xco8)nB)1`xr3#5aznH1)d z($nG$_6q5E6t}AE3Yd7@U0sl-V2Y$b z4VBu-BPYsXzT=$1cKS?+w{@v50or)$UEE7DZTg@Ml%`}kITsNd8G=E)u}=#-&FJ;K zMC|j+(P{B+5u%oU)@F{R>~>hg@0zvVRCe**Beox|4lL7(P0~*N(Y`T>H1bi3-?M zi@0598ALZ3u?(sQHFar^7iXMaAHJw~1-t61H3*p$^z?-D4SGex3%FRNty*p4R~kO~ zB$u;B!?ktaoL+gH0Uv0FmoDOZS>CU69vzOJNDs5_kP3dQ#-4)dO_^mF*RA%=(zFjHTqxuQ$vCIS}n;8UMxS!gV&fQ-@}4WoE9v#HovDVJ2u z=t0mn_gj-p;bK%U48Kl|^ssceH(vOQ)I8!LxgfsvElEV)M8$E;#AM2b!J6 zA0+YFb_a1jAWy_*RnX2ZGN&nm6h3!i+dI}JI_QfuJka9xc%)o6<{Qe=Ino*HmuHwF6dG64-N!wB&K#_v}Z{=jozuie>_LDRuIuC z*PX%2D$pW?+mO;8X|t{R zA?s4h!pHY;XwEZ`W$s!G`+;ev!pZLQJXk$4%^%Q*J=onfXlbRrabZ|=_q0g7dRQZ) zuRw=7u@1))X)pm1!*7g+EqZitTKx1n*~)}Bj8w1lUouY$1m=In%1R0z$a6pf%wn{U z>$-L2tU&sFGHEU$F^xKV8|7L|s-*+bn`RZ3gW%L&#!!Xa4nC7n+9ABLbfGl?RvsN!)i|X1y-7mKwI;#Xb9^Mzu!M z{w>PeP_<7$T=wDombw#+*B~s?=Gk{4W0;QH(@nQuxQI`1F01C&S&6sH1etYE-5|>; zvBliX<%+}~ZF)OB5nr?XWknSJKAn^Ka8o+2T#7LrU2%isGPQQM&3rUom_*{)eMfhr zv89L;rEP}vHXA%(^>o#OpnVO9xnUR{QKUZ#GUa2D5I#1ndDrTGJ1zU9xA30&C)lpV z8m%2n*${<(8+!e>i`0Mp{Pit4nDVgw7ofd~i`n;1Sm)ypQn?ON>trV8rH`5!Hd0rY zxQsR^dSvl$z%-Swn;)7vW+FQ9y6n@1I+`)Q{v$T8A{UE)v6KrtDn@5cC@cB+Sz48t zbrK{%hkHXW(1WV^>3H4do}YdbzVC!qGKXwj%DktuUO&xbDpU9GZ2(eh?o5(`kMs%>T~xmUW%hR7WJi_Jl7ogl!yGHIi3cRhiA+B(AStCa%T~&zI4| zsgEqBZFfZnY`u4DXT8s4BQ?^o2%2Jsq@$x(yi+&MD=_D}eX0MBtN?!s_b|OAMTWPh zjKG2;u-G|KjQO9iuKu5nqBbt(>NV)v>O77`JKuC`aU$jbGMh^}LAf89rZl402`TG6 z9+jwNCK8LHt>Jb(vyR)@Uf$XBCusUVE&O{w?{ld)_;I~IS07(JFCD~fDZeQHk@Pf| zA^6baHhi|CY^;WoFBr~(|w8d7jEN~g>c%KUvtzp z5_S)8ql+lajVF6HL7K=>1G5?unjvxb-3=x(asZkbjDGl}cfM}Plk4*G;rd7H_|oJ_ zka^e*2W!%CpkWhRfUiFQP#(*o3vO;1kv-KDkeo}?+y)PLov_F2ml!MF9X0SXZ?7p@~jc!ET(IU!N1azzvr1h z&ys!U(U)vPfZ><`nuRsb4r|R+4T#Y>dT#8Dlstj=Fz(2}@L-DTicQv2IMLxq6wZB8RvYtU2Fm7V(YoR!nfd5^yd=+I2L*8{wQKpK z!GF>G4X>a$unI-ND-Jv?@Te@82TFJDyG*9rTKnxh7Lt805(oIG~`(!O<%<>ht zZmhbJ%Kswnt;6D2)_viL5E2NG1oz;85@i&Bm}kkjGG5ITJOMh0`2_w_loQv9@gFVKE{zW{{E{nk3d> z`R22$nSOatj*)#p7pclnK`lg z;!I540d`f6ctV6B-t%PtQI5eP<$Bx)K&!OK{ZBk@WK55%Y%)XZa(y}wqg$WX3bU9! zR@$sbVn>uyy}X;;NoC!%9Z$e*ft=D*;}t|`+Dz0NmC+>&-eHNdk>1sK|JY4`iHG+A zi^=eApOq%({wFCiL4UGe6`+E(+VB8aib0MUZwwC&kLi>G=Fb4naF_K7^P9ZlhfL2f zhs$ijIiuRaBBmDg0?GxZ8NTz%yf!^VHk1)xZ>RImg@oo>Yl{Q~~^=$gN zpDy2Ttdrsns}uukIk-zQQBi}I8@_;hS~61g6FCJmCr?HvV@z@rY@jHajl%E6zPY3p z4t%U6^HaKF9K-;c8ZWnshT->4)BIexJ5uxoIzEM?&L7PH0@-goPvkKqP(Ba{bGG}%esX9ngp zxjJ1 z-D=&}>K%3R_ggtfha$@-V!lsS+1lu~L>)Fp72mu~DS87q3Qv1! zJSwsY0Eiq>E8A}idU(__)|6#vN3-lnH8Ak7Jzt61;B?!~>;JF^sy5duDbGxyrHJ^7 zoNDpj1#nH>GWg%XbWf2}B_w?E0dU)VQjF2@0bq97U6Zc2|whyY~qlUbapWQg_qa%qu8zT_n)}5ZNb|i8V%?M z{??!G*}zTtC8(rCWi_F32YIi&uC@A&q7}mBRkhVmchm+=2^t4JV3Cn&G#(8yS=kmL?-SJ)a{jntlPAn)Id|L>;&fWm-GW@ia&tc<|%hotu% zBKDSw<0`A_g~l_B%3o9=plHM4G=6;sdo_?$i8z1Y>%CpP$ou@aYB&znh;f@u{AVvu z*SCl0s69!c-U8;IFxePMvaP*fqEY{)bsH!%zWk2KV-X!qaPq+NW}Hi`2t0L-cfZ zkyTsx=*^OZSD=?0W5K{QlX~ZDTy*7wN=Ch^-i41^Cs-Cjr_QHYi}E&JcJ-lQWm?;1 z6T13X>BVCb{Yw=@Tbyw=b)%Ro;5Ko%zoBvL?_f5xR|%>9lzyN0eDA<~Tj1_?#bP|t zDk~8%uIqejhibqKZUYN3pIex2!MVTl{3Zn>V#>xb^|J&fww>IqxbL?l@#n-(0D#?t z_fd8~u#@@+d8MNQ$rN6%m1Cwm#M?4J+%a+N+~W1$@KQ2vQ$q%vS7!T5Tw-RQXaf7! zztwHW2p^rg4eB_%m}=MIl{9@K5$E@}8kzx>PhysNJ-(`CYE1T515d8tyTde1ZYc>{ zb@hznbYDFHUW*W_hD-G?!rKhAkiWjCE z55VTtcs+@&bDK#Dq1x$+lqe2-e^UX=ymTX|v^aB%#mx^wvbR&mu_sqlR&bMdjK>)?Inil% zjSwNwz#ov<=g?_xVb!wf^V5nZ#}d6MawN1Kqg=MpX1sn@>~bUX^5?Xy2H~ifTtn3C zi`fo4fitOuOooo_(EY+{O4Od#DBnjPgY$!c6kte9Sc?=lo_TuGZ<4u=?iu ze!(kX6ae6a-*&huDh!{xr_SRVq}l6irQQvJid}pr?M?Muw=_M56b!Rxc>K6?hrK0U z2L~zBl7F~}3t?YoVM};d^-HOV1!X;lF`d|DP+vfE5RBPB9ZkD!`+jYe6nb** zZo?#sZTatevgW*l^&z!5Wj{VkKd(PBwk})I_2G%;pj>^EW#VnJXt_r6MS$x8kdvJs zcI%BSILtD-$%id?$X`@qt88R-y$>p!Z$O{h9oDSp3aopz$4!WDZQB@3%5SxBK)E>k zYOIPoGP&zT(4$k|{&wdx{D{T;n(Gcoxox@Y=9MoWA&j3O-6lplYNj#^;p@vLodivJ zz9SJ$m%m!Sp~k^OXVO*XrpwD#J)tI7h0Lohu)T+44K%E^fVpkmPnzR&4Ej5go7f`3 zI%#u>ZREvt#iJXhFi3tOwK3DLDu23QRlazRn%MT>Pd)@3*$ZF4&U7UE(?V?B%Z-Y7 z9HZ>xeXr+V;G|oO5uzKzJ;7S#K{jH;$M7bZmyB{O>vWgNxVcV|=9C*KZJsDlKWT28 z%q2%Medc1%IR?;rOxDhcy$y$Mv^z__+ypX}sasA6r93(rQ)N*21r}@n)^e1pIQPDP z#c~}r^JKBIHDIyn%^d5xcb?j~?~^hJUv)ybZ5cf=nWC2nbQdNmy(=ZjlgwUqmNukz zs9iiLO&dXQm;r+7ffl<4ak#LN>ftByw!ik*x9xZQX*lrPPj{n7XZau5>!OeHqkwuE z6dDa4zhPB_FdV*~CJ~U=x!N#2k)tBYNUNr&42B7FP%z)1n_{$YMvw1|32^hz4D420IVf#T1h=aS8MGQtLK2V{Ef z=CoPt%6XyPiYBEYFIKT=vUS}QBk$`^Kf*{gZgQ{ovdFEWKAmGc<*&gd;r8HzKIqJK za%hJobMq2y8;?>R^eEwIeYumzwW+{Q{6xz+F5ay9i|W?x7w-xzeU3dmG8PCII{adLNj$(d~vhn#QQ2n>7#vLBikLGZ0i>i}oRT-Z>vzTFGUblv{!@R}J zh&73p^%L5((&qJHjyYbLyZMW+wp^0gMEkXjam2{XXKk%drq59GdGoNIB%d!i)ls<7 z#%#3y+PBy5_d9}t5Y%|Dh=>ge!N``Q3ftL5#xfKBH8e?y-8Nr}3a*c3ahMm>RaK@U zBb-^xI@((*RHe7Iwr2);!v$uhrO9tzoWkK#y_dlmV;?-xI@IC$v-)xPnicdaWeUS{ zm#iXE`=5EQ?<}f577q_g^(xs|uO?bsZW7c*hx6~$C?DzzcT=lB&tC%u@H2X9-^O@c zCXgcC5fA*dt=~6KO2+2mWR$h#xrDn~*2+oP1rso00MUPxb9%_lF-8(5u5*0fTpvr0 z^#f~s3}{KI;<9LkqTYUPrPyf{I&Zc2YTwUgm0Ou{vYymSDr4^PHJHEfZShHSVi>hG z^1qrsJ7FAX(SJUd(kK`nO)VEG`g4$Ub$k*Sof9ZTJbKnrx#TO{MvCb?HAAh-uRxJK zUQ5H_Fb7wT+fac}?RzaGyumbjg*3u;IqShwq&M$O!0_^Mu&X3&b=>Uenbe6W-SK9@ zWK6lJGc3L)h@IK_mf>%9^cA7C(BI+hP~0icN@fYu;w1<@B#wJ5&TRwkRn=rscyiVO zcocEznq3(@dE;g3K}S}f5(d4z5^9aCl?%F6X>JQv(T3jf@xDuTX0K7+E~P#~=d@e& zz*HSomFLKuWf6V)D;O)n#%EQo;TKsG?0P-p5^ROoRg=}+-`44Ryq?i!(>|BAxhWxo z*~HmpVo`-k6rLjtX_?G}QbQ@6v(W=me*(C8Z(Hk9#I*$NwcX9h^IsL5R&*qOXI~Or zv$xhyHFTGsYoKk6p3$Jbh^TY1C`vI=^8eDyv+h z&cT3_WyUM^-6JMp7s}tZ?F7HFYf9X93*Cdj`nHd%?cnGpm2^K&%hre`rK-$M_$$dt zV5aO2NCJ%3P!L<{<<+S30a8Z)P>r zGpd7QHg8_c+Zw{&TE)WJzRFqSN9lS*2e2@qtJiZhCX&*VlWOwK-I_~zyhwx;6RTTi z`;Eg}`V0wV4x{LbFAlQnR~MxPDmL8x8j1{alt3GNqoBpxUbe+jBJExSzh_vaJ9{iWN-klQ4iBPsz?ITa%xOIN?5E&DO@28Jn* zi0FkPj)tyL8+SX}$saJ&H?iOCw#sR6Q$BSry*MivJ*NfPpAHY?r6~z4Q}VYoa97gJ zgY_sTU;C_2RcYCpe`(>H!4)X8M=tM74mHEfEk7$butWwQ+Kr%Tmr>q*?xI0lyc5S6 zz+kG`L<-mW5aWNB-AEpFe2aF4eNTE?^Z*DR;mE4c)cQK??m4!5RZ7yavH@}H4XRrc z>^`-qK?1qEQZc4|KG}>;Px~~eRC*jqR`0e)Cw=zTPll6M+UVSs(-fRBy}5$EFiDMg zmyt`)Q39m59G-};99T#B{ZyX%PUJIS$|kh6wF-&1$rkeO&1%@#B=oa_r{~)`jBuFv zk2I^4h%HEQmFb);YY4aw;?dh1o4_hf$z|TH)Hqf*8Kvgsc`Dco;!ssa0pGT3;f<5R zH@)+`!xalZ8^(;amGb9nDx9{L!(`p{Ls~G;gIPAZbc)nNvnP}AxSQtpgA;A6TaCrZ zB{HTqX4nGr!mgfr$mbi%yOE@%^WJ$(KGm`4ebo6W^ZkjT)D~M1I4QplauG3@SBSV5uJ{%&=-y|t;AzHRur=0T1?rEqYX==ygHgizmQ z13D%GvAcHDHM0F(FxapZF>$~$5CNXSDbb*_u)9ggrFLs2@%5|G`aW%8L2XjZ#rbQH zv*sjHr(c}ZRlE3-CC50PAxQa{{3j`HYuUYI%MB#DJZ$;`$W)uEZ%s9z6fO@lnBeQ> z=a{FZ;ueX-4ao_f)kqfr*(LS89CS(>cRt=8k^-uO-~QAenoX}Uu=#o~Xz=PxkL-Gh zyUoyTJO%X0wy}1XMFxhqCR}OnExgz+*O+9<^#gdj=jDmJpaXm;BYE~Jk%IcHyrL>TaBqp?#?WKC9 z-mZCVYc_fIduft@>SA>d$lp%R;+SOM1jV?K#P&xse;vBI_*-_r?e)B$n9s)H zs9j@*$?g!SZzY0;`|1MG{$2Z`#N}m0V~Qef2Ew`AHiAYjs$V0?==~|re2`XN z{E7L8KhISD_44b|x>mkYm(qH_fO6@dV5pkBrdY?p?}B#;!%XsS`qdg5!ru->O<1*T z^>d;P)W4Qdkt}IXg;?|{2GJ`#Pwlp`{a@aZ1=HB?$PFU{18(TQa{noEk0EEIs%8%Et_ zl7hg_Y-oCEe3VWvhM5p3PD#VlLEJd+|IcW(Pf*X6j>5ML?01CYvcAUFbh&KBz=_*U z>fWr@@aewPD9#;QrhdCl+S;eX6`O1gaNjAeU{AwQea3UR{+6OYtbNdFAYP^vrF&8y`?tS98e38hs06=l` zls$$@oveIx(CE(>7gYUY zLPC>LlAISi za`A#gWDUFY-{{Nq$dTB|nhnY_IOp)7kR7qMDGj1MlQ;&i9b_?PXW zzol+OdIM~u#tfq+g0_#CE@NnsPD9r7s{0E4^EwHMiLlwI3hTRxy1X?4UB|M@v{@D9 z)8RMHW$oSZZ#}rl5oH+cvJh5G;BAGRg`F zWi%%9{#=oHz13Qj)aV%dSTBxA)%$(G`IgPwUOjG#xC0ap+lOa*0|SRrzF`UV20Gmq z5mCt%ChpwU!*-rfX?pJX0zp|W;Q}|Q=4cVlNgqB>jn;!Ub>=x?H;?48sreiusN)7q zheM_N^Z?f7BA?{Q_!`@#b;@2I$K@r$Oo)ufug?)}eY}SakJ1+OI;H?f;xG>h z2+?I%>9nhvEwWcZn7-yuwV3lL<+4nIvA+PB!yw(}%#8h?J%;8L|I)eK5w6C02%Y^g93t%~=L|k2Ang`vbQyzuhtcnPI7`tqQFbqdb4d5&6sGT+akI}s z;po?m6d5_RW4Ymv{}3(y_yC}4SJc{5F)PkI7u4w_J<5F?IiE3?zxm*~EfXF@ zFU%uaTMuNR>vhFE$@OVxd?s;P{^)I%_n*$9gW`KCU~TI)@z1&J zrD7LGU)HW1k4vMW3$`Mu#>QlZOz>&P^|fipkfuE6m4iLR}sx{Cd~lG0&=!Y?7(`E=w?1K_tA=YDoYgYv>! zg8gAfFZ3C8A z%ErY#Feex+^rA+qWrAa_=r^`oz6x$f$;T4vqdB);2v!_`@6xjhq+ zqz(<3SH#5#eqw?pP5kO=di$^=q*-d9k-a$hYmK-&g6R9^e9Xf4xu<%<>eR2)*LDk3 z7!YE*vQV!4o`Q;9*K!&yV4pGl#jwa>^7A3)P=nr26wVBXH=` zAuWU72%0E30&<%r*XK4Ivu(jv(XywWe$dRa5l^vQ&Y>a}Wp3fVnIEhh`U8bea6Jq* zF*i*qTh60BUQ3oYu!BZbEuw|qE9DR`cWyXJd}8^sDO5D4dp$Pk)8_5bP#fbdWzK$R z1DbO(Pp+a6EYp1f!z7hJ+oXLPphxBQl_asQEm&#`9D{E*Hb( z_U#qg-d$Zjg3Qn-Hy?H_n&a-~?+EqmR^PE9%P!VuRTsz6X+ODSR{T;>?Lw4H#&3n- z5N-8qbt%kf@EThu_>GYd0GQs@JOFko%^v{Q9Zmib@gF!XZi!#p>7>Lb4F&qlCkrcz zyBN(4rr8Bqijh{lRP@M|QrBZfjDduZ20{x+xrJr3e|@s@=OdBQz{h&}j7Vu+J_|L*P$j`aqYWAP-r#GzB_61N840S zHZf7u(8oz-t2vTI5e6Fnc>Ko<2noN^_0D&NA+_K}EmP{d=gWnT2AU7A+k}6>rq5yN zO$Uk~yMZfihgCXhQbTXln@VzAPBojHBV4nZo1cHv@p?-BWv#K1l~NadfP(wUB?tbY za>2>u{7>lCZeQPfkR7BvHVY!4Koh+?1wjEhj}KS(1j&`$$}!n(&jn=u`O5rJ{wqsW zPoJMuOd!d#_}NmaRd%cU+pVu>V~==*R^QfeqkwYSDrgJp&11>))+B~1i6~-lt!TLe-A|CF7KAr zpvx2y!+&o2u10=Tm#fc@=&AIZIQ9@!#p4gh&FNt!*DXtyzb)zSLUyYYK^Ga%E>a;% z2}+GTMIdKYoY@CJJ}(kZT`feGk6*d#hDJhka+qt)<#i`PI(*v`9OBud??6+)>2>OI zGSr-unWCOgNjTKcP`o-g#~x15&+QU>{->ZRQDajx$3C%s>P!}rHkCU;l@&{-r`;3K zP-N#f*&BPk%PCc)J{iN_(9K}pC+CwfMSyfu*3apF(+1Qz_G~AZUD;whN;>lWj-YjM ziZRJzfGT)y)=F)1AB}%BZQvguNfl;`-{|2M@^y+Ob`JQ{FXxjD-}-*eY4XC`pM-mu zxgB6!{|@$iQC#jc?ult`kfeEilHH2l1V5o6DOB@C&FTS zEOd-qodtsLc5gWL@28~wf4?IbzzC}yEiK<^lP>4%Q==l3~G z$Pa1S>w(PlxSbbGvT)%QVJD)ozmeRfZNfgMSfYd9LE9s`qkhV_C*!q#Z3TN#(x^WK zJ!kfaE`ndqw-{7pk!d6~jhP5z3RS-@(}9dECwpWTW~OP%R=A`Pj|n{wYt&4xE+S`!=ko zptR1*>`CG4w7N#=mkp7&f%TN;+?YOuqCqvtgb}s?YBg63j!Iq9kl|M+1fXC%~hDgTc zh6gGXiF5W3T)y(($Ep!_*$3ts_V6HmP!4nQZjj$Z-Q$_58~t}$4}d+u#y_<4pVeoqi*Ol6iCy2e=513mT4T{Z=7YV`0&|4Vkgz0Y4Z+s>U{7ZO9Iu; zCfxw~4Ll-Xb#3ricpD97T`QN+$J@=rN&wn_QS4e3NHB|(TL(ye|Igq5hZ33JoJDw{ zy4?&RwB6uMp*iIUSpb+xJfqLOHp>+MI#vDte5 zl9I5*4RAzJWvYZDeJ`!n!H3i=2&BE#H>gFSCuWs#4@zc=%v3<@g^}}3Z#pNxh&1L* z?&G*wS|K+2hampdYVdUXc$vOHi^rz~9ko+Ni}(rt8gCgx{MZwLLP$X(3WOxCO@jqS! zo45?1Tn4-;RHQ%-kT-dXv+Yo*sYZ1DpE%D>5pC9K*BZ;7ixtiZg>oR1@P04ih0&2h z8h;aT{kv=Smw2>*Dcio8jkHh6C&W|jK*556U0-S2vLoBW)JbnD{r<7eJy;9v*E)MO z=|y83B-3OrD=#=|PPyml`nFsuF7Hu_OFYXaHVF@ISaNtycw|;!LQHy4dW<St$diZ!m5QQ6 zKoL#~HxHMT#erEtW@d~|Z@v3AEks>kC9&|Gl+hnhsQ<+GKW(*e@4xMB^bm7*+|AiJ z+Bb7UzNL3Nmi&v6*w38m!}Oz7-_?sks(qmHjp(+}vaIzlKhsq&^3_|_phM(8gvPy= zXNRxrQua1ck?{NL;P|hLnHf!oC3kbgO23KnwGLP-4(R*^=%|PibB09kFQ=y1BU&E- zKg7mzW*<-fae|VmOJ3WvO9eIOIQI5|`asJ`=pCCyJ>IH?j`UJ`CESep$159U^(wHk z$w*z*O>hKlq>3VpuR3lai+xq_|Im1VGk=@)`K|J1de&yz;l-Dng2TPaj1Fqtd3-Tj zI_2XG%8qA)$A-yK#UnJVyvu91h7C!W6+X>~HVD9LY zqZ=R%v~H`Y7v+u&kHZL3SEX3uST&oaGtI1Op}d~asKKuc7Tnljsu~<`FZGrYS)l;F z^y6C0h3LC|Z$x`$;cYBJ5eHdT|Nf z+%VZ*Wl!m%rurs6U8mVXBg1T;^nThnsJ2Ig|_GEpbHR@p@vKPVSQkr@URV)Ld5~3np>_%7}YFDv!8|y!OH~1 zLw%U_i?z&)uqo0{msSRh?$>we)=}+zQ28=3^i<1n>AD7emVNVKwlViF&8f~BJ|8!*VprmQ5WsGn4IPY>3?Qh#+%LIW!r_WEI z4hqUc){u;+)e3KHjpd)*T2^2rP-U*%r}e#m-7y>!z1X%fl)BSQC3|+;Pp4z!y;fDf ze4wRuBbZQ9{G8OcmVUHEhC;1zl3Hp>LgE^Fq2OUd^;IH;P^u4tnQE(g7jJ+xsu<1t z<2D;?$+>?U;3%LPh#UlJm)*{c`L-B_1YRwFIa%gA`?iHdfpMKI6ZQ|?_7C0M<-EEd zGgNeH6LQAE-iqiZ7rhdm<5##T@E)8PIB*LlgDB;ciF7Ve>%d86$GFo>1@lXz#|o%I zF_gQaPSO!cY%(evrQ6RSGPo1{9g!(kE{1BSV;`0TE8&+Yl5&S=c#tZaE14yjL)R&; z(vPZiHv@f7U#L-}wbOI+~VQxrcim}>$vU3 zGI_^yO_miAFgK=!bbX(8Wp<(aMXK3=t3hhmNXvjpgYip?0wHj)a(}uqNW$L#E#`?3 z-+BT#$5nXegwAp=y*fR6^`mh)Ef|iHq2Y32?FH_uuU(wKXdC21eeErx2==vk&_WZQxAgXy+K<~Z&Z$?K? z;!Qlk%cE6bs7f5va06lB_2m3v+H& zws6G{FuH$!kQOgh)e5cnD|M7D&F@z;3T?0LWi0#`HjpZUNfnw@#v3$uN0|9FzS3}x z^1YHskS~%$nwc?;LVX23el&isTy(W(D0L~6AE&jO>YGFiL`TQ{oM7wf=IJft+NUfx z?JiG$7L8&0RQ2qxtGE zBZXHEfo7K^N>G|Y)2To#3z$T=^J5Si@|*W&IY-$Nc>ov&HQS*lRgi>iu+Ok~lng5#8+D1mY@7d^$a? z-_8bQE+W>o#G8W{vc?ZxH_UxsB9ak~=uHq_t>$*A~ z2BGlz__`{jA+(1Y;ayy18Rw2I*o?}Il3K$_JuXfD`1vASJ<^OVkRh!ND`0Z}pYMkG zmfjrPEtS4#pA!o6>=unML|Ex@SeB6|B%z>fDLgB&dC{2lOiv~Hyvv(flvx)mc zz!Kt>0{Z7FDUYVE3|F%fHh{5oP2H+zV*T&+sU2~=y-5qviG^r)@#?f`^aef@N7*dO zBulq;Ytr_qQPto-lPhGG=&Uw*Rt}t_IjX$fV?|O+z)cT;wdc?FThGf2T}J~auKnD* zFI|^Prmm-S<#dIy6NR^v@+MbpPVmSBkGuP=?yMKI7jD$m)zOcAU%C)a&!K%eHD8nU zyOO;pd;o~HsB|JlVsrh=mR)_(7H1W$=>yb;n zq$K$qt6k_+@#Q&QmaHYt#MJ>%$wABY;~BpX-s}&6DgF*fR!7E8ed*m}G%V%bZo6G> z+~r#OD%>j`DP{wB5`k-b%X>)o`fEI~Gl9s|Exut>Gl3W!;m6JW{h@|GUHxWHK3eWx z>)Df?D*1vfaxdge)faX0vF*wE(_!)@u3_pu0-()NYypuB8l^slLZn%I$mCDXydA^d zmL?DGyCvd4^kJ_1{1gfVYHW%e>R5hTxp?#J^7tNc zCt`hc<)3tM_r3#r?K)vK_z3X5k{}jBu~~WY{D-mm%UaO!>a02OMq!sU4a(4Ac0mf&90%$=u zw8DD9t-N}U%S6NA)iM2h%a`shQmfS)Ue)0_i(hHwObG|H|6tWTKOI!_vxHqgF;Hnl z)Yn;#jN4l#z~pOh&+IewXeSV*?lQd>(;H%$!!Bo0pFH1M63P!YQVo~NZ9ij?4T$}T zq%w>_u&lIy`)lsBoNFXK@${^3LCt6?l@RWvpdgv2#QMDG2TQ79*Wd-Ids-oJNTUs6 z4}jrf|5^Xp$K8LNrUtWW>_shW%3agK7>QaFK2TON}1DN?<-Q$Nu z7_}E}_eWWpASCl1DiGtu9_AF|E;1VMM-zYA{sOxffJXI8_8$ENap!Fy4eXnLhyL%n z&LOqM9}k)iqi!&``FZSs76frL2>6nQ{h{c?7cYlyOE_`@G@Gp4f#fuOSn)pO9u@Bu zKg{C%DeAvaM6t0|7?_)3WoQ%VcK)Qs$;F+#X^-7%1H*fr{Is6x#$v2a$7p+fdoxpq zR3l4Mhx4n{@TB7_=%5Y3m9m>`CQh!o5dgqDigcm$_dps?{d(NwBYj-B-#-?^;6w1+oi6;2-j*eQDk0~htw?yM0DvM{c!S0i8BLk6FU|84uauFp{)H>7CITptTj3LvRc&9h2 zXgavyawQQw3K}JcM1EG8oj$fjQo&XB;6Ie}|8C18?@zglXoxuv_3*#G`T=NL`MceJ zSHY27Nx8+}gkN8@#`{OrD*fB+`#?bxxg>w)@peB$F|hCopQq zpte-VOE_0IG3}-0;;~bVoVUM~;lG(DMkCzfXJ2wn&c~ED!9gU_1DcuI*qNsJEQW3Z#d*-$R=(7hs-oLL z$CqwWv&(IXiAZ&_Kg`&%=nhv0&_$)?)5;;^y2Ge2n3I1B|xy6+`Oi3Q{9#^1av|Jz`b`uwVV|#vSTbo> zmS--zk=sB>3_Rx!fHWOE56L|gc7kOo=4LBO+1US>?7H}VGV-j=!O4sf?*@fTlXX?E zw0c-hdT%H%;TgB@u2E6{0XN_eL?W*8=8&VSL;9594Bzm{#jLk_QQH;1{<3BfjA%Z) zc}i0#>}({BoOWb^dY1}(Wb^=VTVDf(N2%Vel_mRs{tw5gufGH}@liHpjXj^z{`A{2 zxf|rn*3s1>YF*@=?ZX*K^1wj~(z2*X^$* zVE?b#4LtyUcZNELrsNDr_-uW78&m%l5cRNi1R;MPKDlL^d%aS3&s>R-stGqH^@k&nc5noShf+7 zz)Hjr4RDR0UY@2OBt(165Cw<`%9KvKWZ}nVmtHvwc0ry0x0Z@Z8Q$niIdWzsGiVD< zM03mbgUWmr)z8#xGm#+s097xY*ER8v%`MCem}{M+8132|0=MFs9J|?ry0lq7==|fG z^#9RYfb0JFE7PtctAA|b>t87li9-X-WXH@`vxY37rgZy^;dezHH0)XukFYG>>zOq{ zYBINH$iDn|{x5g?-yQf|UlxJZJJr(Q@q(*Rk-grU85_(~TX^jG{G72SPC&-p{YB7A z9~<-Q1W$rOqv*Ur`pg37l1TiE^|!R&kcy@l`-riklWWVV`wqp~$EB@Q%Gaj4zneGt zHBGqZxISr-{fwl4!dV-6nI^ay`cpRsjk39LLk=VEuSty|q@;z=ceNq^@01k0=IWuMaR zt}G+UCr!8=>$~tN@guKU@c(4K=TGml`6L~^-wnf-@WqH3>_SLHUn3|h_;aRhgsV@j zestxzWeuj?ob9_Vr+G`w?RK+9T|Ua-QS{8GUK?0A?ZSZ!+`llWt-Ju?ikBJ9F|H)_-y>Z=JMOSz<8l>aXAp9b0(5hU2S(ij_3S$or$dc zwG~cf@<791hAEEw^C!~x+!G%RR{pWQf31#&eH`_+Q;~^0_N9sqK6~Wl4EBJa!2KF+*+G7L;6)*H1M~S&z)#41lhwIvn-f;ssFZDqD{GrUvN26q-aly-~kXi;mx5w z!R7ul{-#=bdDym;v#hL8s9~;Dss#7-FEewJ?d>-<=@q`dAif`ZrjP!R^KnAz+VI_P zapW8sJ$-YW1}8toTU~rT*8%d+?pp&_>Rz;EJm-@k-F(lT@|x~sSHmQuvO(zxoC%#Ka`W9z{TO*A zQqg;Y`-7n)_tBoCs?~`7`1KmgDt<*h!XKa6#~bP?Qy%CSMtQzPKjZw9Wpilu`o61z zdfuHFvmL#cVne5R_1RFC#O||C|LvXgNX+krf2W^*N2M)QTG4)kh} z<)u%OswHn9+s@<356EUEDuA$sj$+S#9oy{pG9jTHuyidID8!aAt{#J+Y=imYg*W-Q z18Pn!k-6YP0kv~e^<96LOVXRatxf;UeC6~+bRN%-9D2=ir(XDB$-us%bL-G1F6M`n zlqC1JuS-0Jb%l1rNzwF4209oWr{xpCQwH)2r0dR)pZ~>a5lQ=*kO^+ur{}160;gVX zP+mB44Scj68DY~FNWEIHxr(4Avn35o404L|niSQvn*$~>21SKFmFQA;cBbuVdS~6e zdKv1Nt=h~#8_s$6U)|Hb}d#YxJgJb5yM9t*=F4f8^Sw256&m=#Cc2t<1T`1kE)|jj_ zk6Ic>g#ufe-Kek+J<B3^O&!~IY*bbF;M>y&!aM*j>=Pio|7D^<2Io8+^;r&%kLNgKmXgX{>L2} zpjza_)%gTG)yhMMA}XqC?VvFs&{uA3OlTx)0<5>YQ3qlA3HxdH=0dHZZuwH2kt4Ln z7@n3#g5c19ayBX@VJiL`6as5i5h3Jzt9KuN zmv%h^?vVG_@{2fAoEfY1zoon}-OsrJ?x8LiuK0g_0My=&-7WtwWs3i=y{`_6Yist6 zLJ|T52@(P%xC9RnJPGc>-Q9yb#DL(zoxzFmezE^ejA0xH*TB~=j-MxGD>i+eAzg@nL)#(PVlk!uw#Q6`ZdzWj@6ML73 z2e-DS9uCi-$M=728m6c_#2~un+9fsgbE94WALR>tO{4yabHwbdXsDBF6Qw30h%L}r zonEfd;?H%!3G*!)r9zIV=X)$^Zt-926s1R;wX?+zE88sg;@SO zE&uuduTw&d{Y*o8hf5dt^=cRU#7!|PElHy1=|j4&&Uhs(<<6B6F+WB@#(ec|KEcbB zRL28Nr(xJn|4GQd@+)2tx5(9^6{tHd_Ao7K`Y`34!-1;LH`JvSYyr)j8i)vK`_`ZK zbibI?r5-6{m31DEhl_ds>K|6b)@6SS$6483pl#zVEDoNp{S$f9n$}?I;G!h;>$3!+ z6riC{NOwX3nltvMZY*(?9v96YMwaeOE-6%rrJ zY0`iot5wliUGA@;|KNx$skB+nP)QL6&KIL$y8R!Yc-re)iO(IT#xyu>w=WDI%$GL3 zPCYQaTjpimM`k%vJbK3BQ%?0>!^D6F5*OQCa`d1!MIcO*{kx|P8P@X?rQE+VasRmX zuhUK+;$v(z$}cRhKCNqV_@#Mwyh%w~Tk*`$k{dW&9*AOJurV%*vi4>=V%ECHo(xls zNuPW9@k7V2f0g3@bN!Q$_&#?pAP&fFX+8C4=u57)*F+`b`{4SNP4rr2wV?6U)ggu4 zsU`H)rsBxNR(-|-bt1F$L-V;0HyD+B{}>d8!x7xN;KO?UaVzhh{|0x_ltg)+;Fg-_ zM%ZESJfI68-tPcLo@_2*C;sF_+VsTDn~QagrYg)M5@| zmV?E=jhN#=s^tY?Xq!1y)Cjt~&U`P3`A`Fr_O&=`>z)uL zv>a%gWqb699W$hoO7ck>5kpDM?cuy7a)PV8dFP){_D@dTp_{_;M|z@{M!QEiW@jP9 zN)EI(ZChvdL{ubvM7p`fN z1VDEH>X+A2=8sx_gN%2F({P^A8Xr__J*I4}KuFOC6uae+*^1Vod$Wk8U9uBekO(&o zu+;IGG?udf>iRTlT7A>vR(elf{15l&zkd96uQ&OAfw>uRTn%LRA!>0bc=@@~$pP=@ z;c_c$5w~BmRQn#Oqy_@5vHo95zorju;lY%QpSSRu{Ye4CIi)wM1$1|E;^9BQTj(<& zeAcU4)(6CHp6O1@C<~wT@Hp}7KLO|F;)(RIPNLHHKfH~u{%VEKR~KV-`{k$qy0W|V zO;Q-EDS*qk-o6fX@!J(`p4Hzqm1`LA6!3TR3Gx zsP(!8zFN2NeR0Ee_OBm*l`D{(WQ~-n%-{9uOVyi?R&-jvzZdy3uf)yEQt{L2Iv8U( zF)%edk-GF2o+i5Gu$arOCG4H+pZ_|&|7fApSiO#J*E+<^v?uYx?qzDZ9`Z=;v^uS` zJ~8cWhZ`?cPYG!=ac(FRQOPq6TRblKH87+j>sz29rJXTvXJZ^>A4ZCDuP2lNN!?Q|J<|1Q!`qDECP^9s?h5$m8orm3Wvpb1P@B^9v=0 z?LGhod=KB|><2V?U4Dc!m0+F?XW3Lb;%mW5Unz|*4NTdUmN-`zT2?}33z-s@v-eG2 ztjNSpV+)Yr?Op!rbUiP;#QA53zf%y353G6=TXI^QX=IeZ zn-dOFsy&vC-NUacSlEX(Y49F;kBJ|Y_{*zlUf{sI*s279xp0?_`6+gi_|0Ofu9p%P zX<%r{S1fsUFEmD?Rt@6e4BRkIhFTXNvph1!v;Q-*^q0xA;MKwB6SIrXkoN$Z&6~)} zfi8gh8TLkgx3fq5Y*x|?e$knAp`LJ#1hns+n|6Z6@qU{B8G>=!x>|-la^jLMS1d6lZ}-WNm+LOkAt~ ztB~w{ToCi0I^fv;$I3-P?KkhWN{7vunOposnLBtrZGuF@HXWMc`3piu$jfc0k0mm? zKuLCZ{kYxUfbgF${?u5vH~`v6QJb^$I57OwPVCiU;r)R&j8x^k;?JR%6^ZkcaDuOa ztKewTgh<&=Fgy7YkiJQAzM>%YVG@h69rd8$&p$UTuGJ$$#2>ffc_Pq5AR)<>xy8iY zP)l^>$(X$}^>F}eMV=O*-I<0dr(;FzyAtx?dyR~(Z6EIj%!tn(Q6QqnB+E3_rTFRh zAGZ)Wzca2MFXQ6q@GV1opWvU`1+j!m3*$&aXCzdQJw2Y){Ye@+&Tqi;k^pFKxKT&4`r(z=AQ2Sg&=ekE^* zRPx8Uj(Z~Fcj|%jse2%z?fF0-7#BX>iH> zWuK;S08NNYV_2Z)sqmYdTIY&tJYoH}2r0cYSm(u~*RtVbPH|r^C9CIOf5$iDk@841 z+thU|?714N9nN??`SOkiIz()XYZ)5v2eEgmX}Ktni&mel5o}t5mzl*Xu){Tv%Y6IctbkQ6oM5S$f#Dso5E1fi|LPm72Cg}?aeeXskwMV7x>LTYl z-QN_lTFF(Fb~>A3lw_{C#H_<5s5Z*=F5B3( zZpO~I(u}Z3n;o#jk%kb*M(JJ|x^W7JAieWls4^&iG91L96h+s&|^>Ff5Noizt-WgU<~Xe+oSb9!{^8007@uy zfUvD6PciUm>lX_;mAH+)Zi6+S+aam3E(sxSq|HH{T_*-M{NDWLJwbe{sIVYsN_(=X z2Dj}NU%ql%g#NY4q8R1Aw9|+YpZXYZZcezZ=bDi+QvIg(!YuuwX6uHvM}8%oqQg({ zzzGykt+oI;Je$4rHv#A@RyZ%?CM@>MIaBN0Ky);IZ!os8$INOc-f3j)&@Zy*eg|H%y$hc(nzUf)g@Zz_(05ot(Dhe*4mmq2>7bq-Tv} zXI<4P_WiQ>F+!?GV`ozNf{dL2drMkU`M1OBRJgFVl#Is!Pe__88&&jUa@C#_P=V_* z`%}kQT^9eE1uUh@LIw2dDqvF`SxHuwR?{NzWUl{hpzHkaleMhDj-M4S+(TP#vz$u| z*-C__z9()0IJ&iyGE?hFS$8r)b{Z?lOBFRyqqSz&O2AeyfNV~aJfC@g(HCY!nH<|I z%;kL&XC_ot>7at5htz1?v<|1y5E2_y2k$(!v#RYpbM_7{2o*bVE93edJPh=t+T^t` zr9QChx)Gv4Ko3l#uBr4qSk;uRX->r4_sp87@}7+cBm_gOEKH#!$4SQPN4q_Q9-Sk9 z+>$tud}jKyCrh}iCfcNL8V1p_Iy_r6;Xkv`ZY<16yF^Uz(UW3j#YuKS%eGxE&8@d3 zQyr&XU6otb4uyWcI^+nPKD(EP4!N@)&yYFUREnk73aLd^#BQVx)#!y_5t-PXGyQD> zLX&Lx4A^BjK@@{4m%W^i%fHvj@32Bz*XNUo+@12Er1mt!+i48K<-t5#KCaY|pR;pO z51KA}ggXGiYr0<~EVP6;3y(u_MQqct+aV zFC8FGXy?5G?#}8QGLH*6I9uAdu1v?`9E~en(`;8mQ@MCw=krK>=VGb?t`-?_;6liY zHXna#$keHn&CwW&78a(Xo1~R-ZGbd9+Yt;aM#9RekI{M{wgdI{$$A3LgX}y|OyG61 z7UE0VZKhXkzA!x^RrT<_0MxE{V=AE9Ugjip30{)Br264=RF$41AfOHep3AuOdd{dV z8OVQ;XtWYI^U?dUXF9Jh;d#X`0F7ItN*aZI=tKm>yI3c#$LK`b)7cDH88a1NTA$W4 zk0m`$R-by>AUW0&X+-T7zYWyxB0I%nT<9PSGQ@=BhawjA(mHl`43w+IEF}^2xckOG z*nf>TbL!0=4?<c&FQOfe^)Qdb@p0f$FI7`=Ue}*A#0-E*!7tH9Ci@&~kPcHU5+QkwsoTEo(@Q zO;fz7-tnM}YAGdLbcsJSfg{x4nE+m#*sm*-k$Aa33|_2ix#%8Xyn>kq-Ma>i{^%$< zc@R2+BosGvUw-%gL`uZtY-jcyn7ddcfx-!y%Q8#gB2%9S9MH&Ppp-ietnYPk>&7%J!Ams*TaP1}KIm1d za6CDoZ_h$Z%#~b-?pYGvgz{e`YatGZ)`!;z2F*ig4`hFa2-}=8VJPj@Co)WGXiXwJ zMXxoNXvk0#C-VGbFKDg^YwBuL3vS+xtQ7_#Na|+JJEE-5*?B+oi8bM*%rs_a$aHYA zt^F)sL3N&8Pf;}%CuM?|%z9LF#X7SnE&>U-@HM~i*l##F!0i5;dSUm&n~lNpY3+fB zM)Ts#!@1IeGIBrmXbmpgD}U8!h;eY#o%Ijo1y#>JA{{V^%DkMdX_~C!ys8pxd`F+E zftaV?l-N=jP-T1~34623ZT5>$1{kp(Y;~zzq1QNSno@DSQ(KdZ(g~~5Vvke@Izw$> zgJ#)IkET2C67wRY!ggh#N%SlaVqv+sN%L@%e|J(Nb$B*Mi*NL!b@=@rF;1t0gOaWz z&1z>{Ij_sT7kosO_WVRj5+c6EDxPXe zWo8n1VclxcDM_gpycEDwlILKgl!q)lt9!LE*EtH;;0y&^cFqXw9Pn>(TxR3lUcr!= zO0~I-E$>}fKCCD^nb{BzJ0rcyr-nvj4dqzZPFc5O z{byc|^n`5POj{f6s1zM??X8D)h-^Wq={s*uT55P^=ZUAT+~Ue98AlUxfi%Wo1; z0tqg;!)R7$*CbLsIj*iL6hhIrZd5AqQqYEIVhy&0#%jZ@HxW-Q1Nz_F)Ic3`;^pjEkK!3Mq}o?q z$<-0t(&Q~d{L~knbe>6S%EFI@;bz3CswK3ZD`4LxcwxXUehRpP$Wbbg4(1XaA8BHY zz6r%;Uuh52^!8+P#yr0luu4p7Dk<)6fYIaT+xp|y1cYVyb4=Beb*D~~YtQBMbMY^U zbyXqij(b_q2bk25y5#d}(sx#uSQSsosat$aa(=H59A<-QL}uIXLYG62pT=HusM+G+ zi*T}LmK|Kfd2$IB2?aYCxi%@f9s6-q-jgVoE0L$ai|io(u={+48$$oR@C{zoyAF;6 zcrXmIqEYE9)rzG`YSV zRyQIIy&L+uLWt{WTj})kcdv!pcb@0)gl*?4ex=8Ij3L39y!VG3fA{LPUYHt~sf-@2 zwnFw%lQ-yhR@I$lD$^mSo_0o-X$;g4Gh^AL^$8v5Twi5!5Z6>QvY>aOaE0m*pc zJ2g0g6Qj(tZA~j|2|S?;oQu^E@B7EQ!BOgs<0~dvpGrU@Tb65`n%i-iJTJu*y5x z6>o*KQN_%ToG%7Kk%89({Hhc~$}wBx_pbIs1s93ekcIg?kXfu-M)C2e_erPff*>=g zosCX4SXblZLQ`F^p~psac}P*-00A5+_VHfKtLbC#SEqjSxM{6t2*ct3F3;3QogHds zgEHKO4Hw!P9@RLL+mNOIhGp=T zA3O^#Z>sJ-@IX>DH`o<4xW*PWBgFWq@I~~iHf__NUI~V@BIUWpQona5I3KK9^OrFc z$cTnL#&prC`ArR})z>-S=**WHASRabdaSkf>h?Kkhz%fq@=$q8j8@tZt(-hmIL5|N6 z3@+xM4Cx{*Y(xcBZhw!Y@K9t<(J$K&VDSvso|j4lM02SYU10K@g|Ay)R$saR2YoB8 z%^PwMQ>E;?(Uqg29!l=`=)x!ALBoTCeX4ac0x2}K!c^)HDl$fB)u30ILHdqYFk9CV1uJ7jlZhK|w6&qlN_L}bR`T`elAwlp zr(cFplDkIIEB=;r=m03m+D?_0*E2PKfn}%Bm@g>Ww<>LzBMq52R8nDk!al5J_2#F~ zEx}pjaQ9e#0bx#;A3C zc7}$_yg{+?d!#CSQadFi%c|W+g)N_^{CDK8r`3I&^^n#f6SVw0K6*4uemciM3dG@XB~Je$pwgyy724gO7+`YZcZQyN^W`J zeqF;u*wHe8g#2;qO}}Tn*?7jX${M!$B)LCW0GHGm|AA8aXUIGMsUh9i!11dtX6KaI zgEPdn>&nb<)owmdw1gx79a9YkS#gu@5?eVR-4+HUMmmdSpmhIJ)~A10oyF^>9tzTv z4!w+BFk5|mP7GU4E;kKoYFax|op=t;Yay`wm{L(ZuHl|Yr7JR^tE#=^CeeX%cs?rR z)8$t4OtEiC{dKm|gof%CnRSv@Jzwr6hdC~tcUD5HHIxWq+z3#bn}o=&@jF}$EJtmMsN+p&Wn|eYN;>sLuyRq#ppstQP(lIs&JFtSQu_f4 z%)ETF34$oIRh>A!4jsm)?+K|>9=jCmn6+=8dT$R#+BKqGN?%yc3sq@gp1eYIIM9L2 zLafH)Jh~DeKRRF~``rGKsKZcGh=@bY(dNj0(C_j1*cEKK^;2T=i;aoe@Nl0 zZLB7LgrsK-c`?lM>eFlO4VeF-MxSvt zP6l<QQIo>rSd;;@78YWzi5c57+FqL%kwFIo%~i%?O*|eDb!exmgMe?E z1zPbz`NKvb7J;nW=wKxoxgjs()pw6Ak9!;y^^X8gb zhHE9({KQKdSbX<~muw@=9pgNtC)LsGE_*TNbA>*1;WqD5^7+1|o6g{1}9x7_2+u&IJ}0Fn_DBoBhx~)_fuK-sD(n zt)(Xrm2rXnk#|aO;F%_M)TTPmL11!QmN#6>>^tBQ!eQcazi%F?xpi`-Y+vH8?Es9Qnrzta|DnJ#OIQ_b zb0*01XsAKJIx6cu%V(t}{$J(4n)`Y0bhmx><66HtU;J$%>2&taqelB(S7Q~=xN&iv z<}5tW#r>-+W1%H?uP?!yZnK>_s7<|kCaI)0S|M5|2WL+bPCcGj!}JP2^IL<{$VHYp zQK%+}b)@)}zIDmL^3a2Xd@zbxwG>-|y-*qwVgdPD(f8C|3Njww z66+C+9v0*(?E^0Mgl3lPx<5D^g|s% zJIG9@mP+;3hVa(Tq*PG%oKSm>p+S1JSt!e@g!Q-u*>hXw&oQ#)Ag;}_Uqwa&(_wQE99v9UL+xj@Z>*YJwmnhQJ)LMYy&%XqEj zoH08z8gDBAcuP)|wkq8BbQ?eOk6Q+(dI?JwW!3l8Pk3}@n%b$`m91Ji?mIT*d|{SQ zKoa&Q&Ds}5^?&S}6j3B4k0hhI?KXvE6^WOWS`BjDRVF&zG$FwMzQIrxglRxd`aUAg zGSqHSQH0Eqk)&piiZ$yS0~_khKwe8QO1?*l)(p%FoKA0VxW)ZPv8nieC>ZR}$H>{V z7wEijS2%~KX3bJaHV(`c8QzOfnre{M9B{CD(UPLN)hf$MkhfwPWVs@3_mZJ4B1Gy6 zupVDlaUBS5sywLi8<4w)5KY~w*ZV`ZnxLu7 zg8Rx(BX5GoO1IRXj-pk0UjJSO>#0dH?AqN-)ZbYOvhi!2pyN16otT+tG{>l>&@?qx zF7>(ByJ2laH{9JnCf0Q8nbA@Q_;kQyXYA2!q)z2&s*9}}lNH&}Ej(P3g7&|wj{Fb0 z?w+?eFRgjci@l@dGGVB+p_1_9WyZsXG z#Ved6Z!fNCcG7=~JXtCr+EK|7KOM#`a0spm-EfVpQ|+n&XM_&{fQEYTf$fK$vGm6P^9rhnJJj~4qYP5U>M+b|9leu%KIN7OZDR;z&3~&LY zg?)Dq!Zo{p@pZ;a3FJy#W%UWjXEr$p&?ro7{Yc-6&C3$SyZEdMu@e6=G%s$iOUx=zw2&d0;R7&x3G4HICvt_ z4hpG>c>B;)b9(0+<}69VrGvUM)2i6zdBX!DP~011a+5#d^cwb*i+Ssrr=)5ajFAlN&? zcI!Z5ZiBMv!(l<3!B9ULU7*FLJ(~TCgT}$vZCzDt*k_F6a=_$cN7nkYjXUc396?E- ztmW$9hnAafwsrOiF~V6g28MdkPRWJIiR+f#rjIwQoqvrgZ16XIq#g+Iw?kv)dVee0 zZfv9Ps!Vq1s8(yw8R9~3%$k>kQxmEf);DTxvtE>uHz6CU zw;{|IiD&Br`D;bAhNoKkCRum;GabuPmPXhc+beE|Go6v|alV1G^E(eq0G}&}#UunL zYcQy;E`57@V6lZM^Vv#VlIX^wLJm5eQNSOS#z-AsDHf;eDWJ)&@`hZBO9utmCU)T& z&Ngk%iVT8f@8m}%D`%$&J0pW-GE^6MaY$-l@hJZhxTHn=+ELsIhRRKW0G6c}on`f@T)^QHC*e!$ zGSV5n*)eLV5ywFrYl>sx^7f3M1`*z;Lg0g^y{{FwmqH~PSgfA!#%XOV(d%e{9xl~1 zZu}ZGoXNZ>?0Zo0WKTP`s%q!x3)6}=6~EEO_{X(hP1@|N`B==@Su9JdaE;hzOV;C7+ce0gc5>d3k2cOZy>$*0|7gH&jVl5>+I(C?v~6d8cK74`DgT;aV0CtrWaJ&vTEOwBqeVG zq~d*+C?8ZLd><*cT~&F>B%fi%%4=-1(OukFDV`;d!H|(<4lQBK%jtuY$7$4!_MW~{ zUxQcZF=nTJfY62T=T>~^^J?(&+_0s#@u_;8RPcoFIXpB(8Mi1In9&3-Jw=dVm1Zj+gSd*7> zYBsznoYt?!3elR?^H#|elmujS^>q3%$ zfwl9eySV)+pfC$OW`2~rxk=lQoe!SH(qj|pMEg# z7_fo7Cm2h1)_2N`tuqLHz__{wOLOWcV9vMz&k?oFr4V88g_Ap>hzAa_)i7kr51#Ur z%C@C!Kd^*<%c3pGNM0bub?~6qWFF5kGDIa|nB;CAnbn|K1gc4P;au3|H|7KQ*J#rk z4DU#0f;+<@YIYs|`*IhnUhTBS%hw>L4WWAAo6;HZPElQP0Le zLz~xx-&z&K*BYH0IV;8mMCYCE`P7v;L2Z0p;Oo@@8(7l}m_4I}T7H`; z2c`7mx+pVa|0hUMJ$LW@1BA=Ip2S!^7{pa^3Ju(@{>=H5>`k|19kFv>Fh5J{O!uT( zZ6)XhQZ{I~{lhD>=&L85$eZ!`JB|bE@FobLzuTsA59YHG8_OdCJns9F@Wd_l{a1uw zM*21+HnGr5qdb{U6i4Ka6r=RYv}8R!ak>R0$HFWtzHrlBkQ;>bJpODRjQKAZXOZMK~Arv zh(l@Fq2!lRl%5C4_Q3qG_Q&DRgzHo z*27<{et%l^{q+rr1tgS94Ru`Gb4v~CswM=AW*oMuTD*;z@@u`xT#LTy;mZnSH^V*} zuTjG!JnoJQyDk-aJ$wC*qS}Zd%V#vJNXDq?9jV3Zgm>h_+LN+O&@aiw{k+CjQ&h*d YuRs0u^#0xB-yHax1AhSrLjOSgFE$m>^Z)<= literal 0 HcmV?d00001 diff --git a/docs/source/assets/logos/vllm-logo-only-light.png b/docs/source/assets/logos/vllm-logo-only-light.png new file mode 100644 index 0000000000000000000000000000000000000000..7aaf1748725945c4616e838484fa46d4bafba46a GIT binary patch literal 54209 zcmeEui93}2_doND8cVVh%GmcU6sZhl-(@FTmMkGu*g%GsVrM>E z-9*6Lk{mnV`Xufdb%l}9m00`_E^ND=gpi26on0$gNqmbo74F+EtQDUyN2?8)11tGX zjH4I%gHG;mjCOK#b32PuqOmU6rGKK^64A~w_-`MyBZ?_-9yzsG?3bjN%(z^#>Dus{ z+1X(NmxHkwEhAAa+{07B%1(yPL28%J^Ge+@V%|R!GkcopD$UiaJJ@hTw1z7RbetJi z9bD)fFrjp*ypIXQ`Zzp(c)LpLokYAnp7x)A){HMErEvdqA@txs4Rs35*{V(k$JmlX z{TM#{LndgiW!Y5FjsnZcJDF=b>*|t;z-J5@d4ww&8a|Q3|LpKT85t@I_4{worCI;{ z`HcLde*zvuMusEPJgjUSK>n@YD%M&xLFoAP=h}xIFYe~gt+*^U zrgbuDQFKPCW9FdbjJ3g=oB6IX$7O^#ZU@T8RG8#uMU&x;vX=NKlZP1bRMA-e#zz0A zbG7wS?s=`x-?s9X^Ct6Vak8mf~Iu;KmY!f9t@kaQ>VyfhJV?D%{g$(zjgp! zLNC?&;;sn;|G#Vj8$k1~oBp=}e@y=01|aVAUj%GK&VLX@GR1!ov;pe>z$*ZDIM)Q6VJH0m?ET6 z_-%Ixa6cyX{3l;{|AmjS7My8*=YGEVt~4Wo-$%vf-1T70O<;o+!UudM6(?uoRQtW{ zkC+Aq&-jR}wRo}HKX6SQij7}>s0SsX@8~1Tzm{cJh+289Y(9CElXGHKnPzXfn)a~m zmWcW@^w_D`Q2TCumO7rzw;)G_VV<%z^c1NCj9V*N*D4N(rzOf|Bl5~T8e9x&zp ztGH}6B*uzL*91M{s;f~5X6PTL%Q#?vWoW^tFsB46;P~piJsTtj`H+-K7_8@@d9X$F z!NC>3X{Ob&D&_7^L52Cb@NE;tJc+-*eT6V=CB1x7>giA7*jKMLNl6{9Luixz4{os|Mw(G|NmQ-lAs(+na zmXu_7%r;|wKn12Z`ErZ>ue#XI8CAVix~7c2p+1D6p-t@r2l^r`o! zI+zo4o}-<`?2_b;d6)D4b+7#twAZ#+Q7Y~95MCAvKU3ZK)_de_?BGWt5o=u^eBgcU znZ5aig?hW5$D_4`?2=5L+|{`JmrU@2_aztS#o2*IHk&nUc&b%t^H}|UE|ns=)jbXH zpZCY6DDS1YaW2MjziOr6acYza9avR`wQKV-$WhG{4p)c1nQ_=30=j0BCeQvVX3v3|9pQmW%Wma;+e60OQ9ab48VssX{AFvL8(zQg>C|XX@WQn7==0l+ z9J5pN!S!n^BKbpZ`9p7&Lc}E{BZk%j>2qQbpQ*af^0yag5oabH78>mMyYCI^F+1m8 zVck+qZ5uq&%pIOr6uz~3dq*RC;ftX6Ei4Vsngf(vAe_^x1^xf}5e=TH*@A@?zbD)4 z^cXyIv1>C*Va2j($9K-K(>|E!uby9_a}RjF)M+CsDyqM9DUaJLL9dN-6|ZuAwzZKDj`m$!UGX08Ei10CSMo9V>=dOld|?MA*yD}Z z{l8Qc9Q$u%30V{izg?p9VAZg_C)OYhQ9vR=x8=mz@)R(viXs7YDr=w^7swV@AI zGBo75EDg1!2C@V@pD4L7a6PX((Bj$sfFcnjiof33w>gRpj^I0f0oLl=i2<@PcSTLA z*1-#|KMcVB|hrG3iYGqw+C8kW3yZ6}#a<|nr z$H&JTy%kYF2?I3%a*LRnllQj_nI!vD$$FgpI$ zwwTR16<15K`Ycg;9jpp68(Q-EG$VKIT0Xd6rsWE*3!W%34OoUprM&!DEf8({H8mZlNkCS1gIp%1P}gqv+WlwW_&s_aEm z-skuuRyTXtCR3iB>y;WixqF}}1-!OvU+%8WUK?lr$1SWs@VO-Q?)dD~X3$rV%L5P| zua|paGuZI6RQP#$7cZ?lL`r?1*$0Pb8slM}DN1kUt>epzi=~><&&A>DV1F0q^hP#2 zlTa;Te4K4yK`6ZbEPKNZ;cng)OZS}@KUcN41fd)EqSfH3*vXsKH~R$6ZvSSdARe!A zADjL4T@+;8m3T|Lxesg^^VECK8NUzaFw%#z7d)Esq_Wxv_w#($X~gXe_3 z_4yLRxtW<5_~08J?|R}7NiM>QYBF14OYSFT{mg+YwXy~cCKe5N2;nN&;;!t@(Z6g@ zj#>$(`NL5Ftv)%%q?w{qP&g$zhE~@2etOGg>5yUj%&|koN(TLABEBc z=PN_Sy+gLUfAAOxLP+lOwJO+bMGfrKTT$udA#0_xfxODaDd7Qua&1GCZO$@cl9F$Y z4SRfY@zN9r>sd$`<286K@aFY36=LJSfaE~xrzl22JsqE9cZ{Prv>$wz z`1ChXkw+PVv@n3oVUeFYz{4Fe(&*t z3m5taxj3hGx%hUnzyI9oRy9-7JMOpbX`BYp5FSeXz*+Upx9O#RKX7d*>bk>1%ROn% zWC#9`UEObArzLi|m8B)d*C?V!pa&9vDDjiV=BsvKJjNm42$J~B<5x7E<_%V|9Mb!% zV@7JktZ3X9$b=WqK(AsuYV-QsB#Ql|*zAt7(Ucs#X8$>BgpDF`tvc4q{z3AHK)8h~ zr)sl4q}lq6=D*UcCYpBZ8O-3KM{)L41KY&70HyHrBj|3%jKr?5r!#!^DhU|q!jrkp znV4+eu7G@8Jy*k-3y1n`NtEaMN)i_n)?46Jphbgn_md|o#EYpR{6ODX_H?@`+y zwHZdjsI05!iFWhzTesFcQ<>yjkVn;ze&bh^1js)5>D;48u!qUF+>}jFkdyv5dYFFw zSzlKbD<&9GFL;|t=m^+ehePnh=JnA~Jeq^zCzfqs;FA5?B~1&8cD{X*dD8OA!GVF5 z`A#pc5Oe?qoS%FX*t}&LqQ(BeKv=h2$iIH1D&EMRlscU7<#*w2ASH6GkacryjAf^ohQj zLGb&we#s^b$r>R+$TnKvkd+H;o}OKtyrt7`-@Z;y?D8y^$+M|KzXl$gy_fmMW;<$9 z0NrQ+x?w+~_qzCen+CCU)GtPHTgMBxGD}SdN)_z?K|Dpo5d^8($L4VE8+|e?6dY=X*9UEY1AkC)D#Y(y0-RF04ep_87 zhR(~n2aH%p_pJ#PA2d&n{~))2;oQkrMN`6Jjn8cfA(V^r1{9n2%TdJpjAFP_2vb9K zW|OyLWV|aYSLU2iDOYfRPssAtriIJm;m7P2j}Jqp6&ZA(*u;Ac_TUR@*#?&R!prwM zyi*df@|)J=pL%h>;HAf%%iZICooI$I-2Y*Y=m^4qGL6@72*F-Yb5f`z{B%nZ4{x9e z@1v>3&;=k(Tg^>LJrwsC6>;If#mTdK-We^})A;*S*CkJ7(+4~#PvlD+Vrg(LP3B9$ zqI2OwlQ4-~{GWZs8&Ul@Mzf|M|7K-&M*96b5vRx)5e=fFyV7r{qjrla?g2P(>f3jD zQ$cUwj4!oqoa(I%ephp3L~PhJj_|WrujD0dh`ySzfhw}kuQi-D?=vkvs*P>c8%pO2 z!)s?9rMd8sYd<^5u}8ghD;qg-v6?~XK5Vrn2e0_1t)6D62oMMAA^JeVx=qxphlSU> zDs{yBp-mBM_&?&%9S>%ZYW>;##B9^H(Xm+(Q>`wHU;Ml@v^@Mk(pOoPB7=*jMDO9X z7-JU_%@Gj*Q#=S4iP&U{9*lMMUio(rBLk%Ta!T93`LNx&u0V_AhgvDghz)w>T{qc4 ziVLr(_)9Y(v5vDT2GiAd0S9_K!iR^4W6VSkph}@7pHf??xXCx7ZxXs~XuN_M8u~b- zzuoImPPlnKE>nW8;Yxq)y%3L!`f2hf9WLrGV(>qqpz136EV4-!v7^D)!V0UT*c{Js zHncxxWBqwtoWf?8hqQAGy@FVEOkPkHAojeR~=@xY{4mfzF} zZ~WP*%(9^P;X~usBH5~T2q~MH_1|2CYVmo`<+0f)zTKYcuN4<(TtaVt{OQztt(2J( z59!vs;|%Fgq;+$??2p;B2R@V@9!*mbr7Kg7Q9i@`sWs?ee;-jX+0IYzf5!|GYwwKt zH?dl&6hfmrppN-v-=Wdtne)OZeFlxPM(=>@e$^{?tQL3REdk)Vs#YQeH@KXEKHkey z>78D{s?UKjGhb=cecBMNUCVXF#RfVtw-|E%57bd(tnhqK~+0GeD@Q> zfV-!pUb|$@aa+-33VizP3;UhB#F!QQ$BE_WDoPm}IJlaoCP);dE9+yaXcW7qo-@~P4nbN`p>1| zY9Y!-yS%#oxK>UWVc@Y{s4sW;UZrdHf^F?ZmKDN2K6utM)v>nAFXL-%BxO@Cukw93 zbn={LmKKrIadW7Z1){_2)8bCHT-PXIG^jba#jgRNkN22iy#+OR=aJw$bJ)w4DrM#gyCMyl(0vsvB+u1 z5cY#_jNG3b*n|aF#l64xUWhkmExdN_J~f7qHQ;_ZqiMoV%@4f}Nf!;H*%8j2)vWa= zUm8$=XFQwgB~eq89JOsG^yaqOrBD>S`g%I|$t_w`7(fB@3sLz^LDC7VgS8>^V1PH_ zOxm8LF-krb4c~>OseCc0W<9TY$0N+y!<|`egBfAH{ss@zfwuhPb5V2+GoojH6MH#e z0lAvDuJC*BM@u}#2RRNIn^Na2(O6K)@-qSZYjABMsc>GxWp8^=}MJid8&Hyi+ z@RYDsD6AzD5(-xLsY??n-shT6$B(> zcJ=7{aFM9J5idA9yPVbiKil+z$9n>>;sAmbFJf-&$6OVhTKZFB4ozN<{eBSt5w`c5 z|M$%$0R_gxgO)@dUPqzMQ)}9VgVVLQq;Qv8(FYtRNR83c4sK3D7YU7H+rz;dSm=-H z05%6aNF50+C#6V*CDi*HxF(2+9eedhH?2)#s%@NYRUWWn!ly?jo(unqLa%JTp~&p@ za}e6jd)tJEzknuoo1`04I1nTsw>Fae=fsLMH2?>X}q>? zF<;xm<~V*wLFLQMFLitd2l1?1w${%lbn1AVkQTEIaf*w8rS3Wx!@uG0cq2;AU5!%{ zAaQVj5a!=0W+ZHKE{1zb9uL)9#COH?_K%()&!Ny6z&K{noJyM-Hq;W2L5g|K3(KjUgyL8#AlkwaytR=kJxX5Eg^ z<6og2*gR!)zLzvgh23KNcV%jK7(oh~1AW^QJ+B7fuhWCz`BDS(JawEavhS7zC*N0{wKb`>2MLf`^mivNUIHw6k9Tgn# zs5J2~fut+FF^BZs)MmEtkHv2i($m>im!Um#mZiZklad_ujIzeZM^Q}n`cm`4q{8yK zvodCM0dV1%{WjY_+njUbFYoBe@y=#Q!Vv;37_%xb;*?>56pyL2H&7`nA9V(r1P9pc zR{HFs%T5_|iw&!0S)(BAfnVF(+uyWqY!1gc!*$7WI#qw7M1WcT+cPA;)kwcH3h?)T zEmX*BpoE`$VkqCGC$s2lW4e;T#e5Hvqn4Kl&!6Wqq6GdqxqZDhDIAnEaf{)O4kIWj zmBx$d19skK0_%iZ$a&Vi-iWPSNfKHBvoM=5t~X_75YoZ|ON&zD1u^f|Yo?nP~;zEq)cD4cjiVN$|!WV8>35$PEgFgN{uf=#7Xfdi0XLQk(QanU?-2%qM}V*O${@T4>3L zKWdaJpJ*v>g)5yVuD$<56qXO!O-ey&1BI-_zaEP=7WFS4 zgg2t*oI8f7Fw66g6P{GN`YFa&*)-D&#o}^jN)i$i8>ZH3FmxKosA`kpCe>V%;-@sU zF`K4-620fg=3~QIu9D&D(J4`E8DkSsQ_c569G#qozj94eqR?tAAoL}_*t+2>m{v97 z`*p3O@O^UI%KI=fye-wT-w8!Axz6P%Xk4tkeyfPSi`3g{9c6zUnt=*oqN8?~e7^=g z$x@-mO5Gs*gr$uR*W??=eE3%t24$x9TP~Uhd-i-zyVS>sow`lxf&yV5zOHZ$1o zFO>;|@34K(ssj=?Zi(|jxX1Eh1KR-7k_n!kLNQ>YH2@VZW5c)&kj8~_@smGMu%K*r zMhT&mLj}~3VpgXL*Z6S&75C0mYZ`H^GXE|xG0S{__pxC`e;kF6`JLj`q4B84*s)l) zN1dGYv9d~!F?0*rg!^;BK%J*rxOAqjiH9R_M8Lp8d9jO_Kc0OOO$$#s+{w1OWT!pU zFH60@sj{|%0V@?oNG=-}E)4!*v@ZE!<6+SvnkP5z>?Zm0x^ztlXz5!cs%`6S&(M;g zOPI`U`!!?yk9nUzo7bhqpiPHD?*iTv^rpf8&&~@l$j_V56s3{*VxdcN-5pB001aZ}gy$D= zt^+^XKiSf551Hq|Ww8PMaj;h17#+^3AY>z?gd>$%(l`+9#y92jgrsRkeTvhvdjvKq zo%P%M8!Paw6ov1v4PA%Y_M2HUD-UGZU~ZD=r>(8s-tsf7a4+T#bd0rJgg2@`To?tC zcJvYHbZLLM`Ov<2wX8x^)Z>sIM0{$omCheKa%aK+LY z@}n*m=i!;ylXS2@<4w(HdB!q1WVgTb(-b+lV-mZ zB@4GJ_B%7Navg^N7IuwJY}5x*M}UzWTlfgIOHYw=kkwvZ9QGglV5XM8c}Bya%<&2) zQEm`EF7FXJ|J&|Fc{~SbrPU#H4QE)7v8BySKm~xe#iUti&wSSAh*CRVW?6q}avSPj zDxqCF%HPjnVMblLX2z99eMLd=;~d0_CGoV^Ud`ZNO$~BXBQREGuWz&)PbS0gZs0m; zu3$`sX36y$A=z-m^AIwlkYUwBomU{cW-c%3zH8^{jw&d$y2@rofA6-j29kBw*l3DZ zPe?uQ9Vjxe*W$)y836AZcl@572!$pVrH=%($c^nAv_H<|e_Z5UYDt0!NJmt3NP|O( zbG^aA8htYwjo}2M?3)%j|JzB>TH-Ic(neNSvmXylfS+VcY`+m-<=jk89A;ejphmOSu?9TFGf{Hy4!GtzK`H&+w(&;U5bh@|4QGE! zbh=?=^9;j9HtsjdBehH&w#CBt>!W>?OoHA5LxC1Bvrusue4fY(K~s^YX)84F$NWDL}EzykwJ6J|%(2frbsHLqPyu1F#ndVOkpcs6^bLa7+ec3vWZLQlVPs&3 ziG&HN3z?u$<1+b;e;__Iby>#U*8t11nA`Sg`bFs7ixo@2LHPl4n_NmP5U5}5BMwLW zP$nxXtYr|%1Yi0_lOuFgE#%OHTu zawPRDlIN6T@;wk;8H0a-a3XL~8{f7@@7c4hbObed7cC+%HT|on)Ob4X5EfCy_W$J5{)NA|aSDl}MF+^q zP-q5B&1r|d%5ij6{hr77tZ1d9kIzBEv)wz zLv#c>p2WlYa_-_zEjpTisk3hTU~HmD4pnL4FqMG8CCFfUB^5BOVj+eHx7C5$?%r#r zD_nf)>)N)ZR4?5fjl;sV4 zTt9HE$H7C0gHEcdUoJA;a#%5g>Ct{c8v%Hm(o zMUmhB^Trd_^=%|bqi92vwlrEnjEhe>c=>;GzgBHmA(GegL-fbVJp;T_?&NFy?UwIX zcF`B_+oZ;<(IH8ouUa0gC6!LF90T&%Nne$ML*_8FdJ=x}Ir*BN3o0-o#f8*0!q&u{ z33AsGf%|#a;BxC7`zITR0&Sl+T1_&8D;9SV*Paf*V>v)w!o((8)B;) z+9UQr!7rps-Ivo7D~TgE7k)JJT0OPC^X=?F#~Tn=GPOh_2jx$lR{W-4wgjyrMR-$*IH zJ-7V)CCY9wVbcjJkmF3D+3$44zN+rhVQe-O30k@jVkqSmMR#OFVgl=8 zIbh`J%2061pi=35i(6>S9oB{M=7Qp+FME$RQN-K;(e%>$K5bzB8J-y{eX(u66Z+`dVvnl@>GFnAvEaUP#Nfn*lEGGZ%jMA z5QW*Fz%#ruC1wtETqUfW&U#Ahlq_9z*qDcjG0>~6aeU^#V)nUrXss}hMq;VpYlHJfI@ z;!KEU>9vmoEnMeB`y9->C0ec!{(*@KeH9jnlMNLHe;FBJdtx7QOZ(_yctS3UZZrSj z<)B_k&eekjS!0w;qCge$i@sb&=eKL2g=t8)61o{WQ=;{bNtph*0U`}GjE6z2?4Z&} z#+`#e9_<|aCR(;nbcsb|UHRe^CD!C0v#8MwB_xrwuGyep-S}%37p^m6rC_|N`?%gg z6C_-;UZRE9qw*;-@#-<4$!RQ)Lc0liS0Sy1*Wb(@H5U6_zkXhZ33~CxuD*Ibqxt*% zZO0LJGf>5EufN9KBw~730EKoIF0M;}mYv=`nU**GkmM<0bpp6k&CBpKDVep3q>P5= z0kB}9H_<_%M+J)Os?zU#8nUoxZ_!c_#^6-|FR6x^YcQ~~NCW2eda6~OFf=)H(%%ea z!f^Z|o6VOAe&OczTq%Uita&@^)eyJyHFY!mPQoL?S@gtsllRK%*@^K&(2mp*CpEX! zD7Ed8%G=iwrj-j5JSO9-rjPdJm5TIPh9N`_7ll4pkv5Xp!G1$Ho(z8sg29(2vK(WK z+~LEvLbpIum){eK_wz8J~d1RfXqnkGLJQI!e=`xEmxIh-Z@NaBB`|vA4i-T(L^=E{DR8pRZv$i|d33GqqF}v_$XuWyA*iQ=Y9o~8ToRz1%i$yzQ ziS^w(u+^#$z>k^7MAt=JAr&CJJhvh>LviV7y+?D^H_(~R)k16zwtDV0|ZcQ zj$GGcBk31D1kr%Fepg69*^rX%xe8KvTV#-0>~#2Kgb!Zq(T!v&-j{dW{`)%o?E|us zUCMMB@Uq*iJT`9dxU9-=slyox0yz#Be_?ZYob&jwb&g6&0dr7vM}$yvECF8+r|@TN zGz~r>Qk;H~<4-2=tqjzsXX|+djgoHi{u)Hda04=_1vL<{ZMDO<`Q+Xq4K+HH4AfsK z!sV&V!r)*xs(^hV$Rvx(JjeP(FJ&gBlaG%IuA|B?+9xU6(xjP!>3gZw;^e4!`hdXV z#^N%z*f$?Zee?}#QG6T;FBXms?nNcJ}LTVQEmK zBtbw6;81Etw&-fAAf#cSqxpblq^L_uCOF*Yf-+g#N9k7>NI)2zviI+_z?i4YQ9I2e z)+R^Yru??E35H}|9UMCXzW_MxrSBOLcDTOLue$$PKVsE5YwRnffB1`xg36e{gAz`d z$s*VpXH5L)rNWlBM$!T9qm52Ta|*ojEWh`nu@Q}Fh50r2;JJzY|A0N5L%*%>l^vQK zlyKZMJ#>6TkWS9f<4J02YW>~Zs-}M-bdnJu)Dcuw8X5kj&3>wm)3h-f zzZ~cks8=Z*wHzHtYO~O+-cEvac^f--$#Ad9U28Ivz>V#ULDqQBJ_{}Bv<473KK%og zI$+f3oQ6agM3FF4dQh<-}5$lNxkE_Q$fC{G+lvk6gT6_e| zxj8#^H|(ktz}cO}+xpKvX`UNeVBK{pOg#KqR=|~wu1PF%ugY}M^=x_vH8|jF(|yOU z5HvDDq=l4Ompm=nkS|PrR4)q+>*GIuo(W>tx7#DFKa2!H`^Ab+cID z#vWZL%?t0NPE1S;nJe@Hw9th~!}4p*{vxnbcR!qxCRq_CW1ft>ORPYAq@GR#+A>9N zsT=+wLT&fl3NDyjZ4TCf3!2a~C)WWr8+`9B`m_Q$YAAa^?~B-M?8ZI;mqQd0GknY5V-@kvE(S*_VNjHSTMvf_VR95RA{gKplKhP>$r!j% z?2F24WMD!lV|5xMT$~gn$LuX^8K0ZMng$1&&r%@|K{y3J0ykkZ1OOhIVjbs|sI042KPJ|JV-OCbT(;zu zB&4`*`PlF2e~Z)la42pO;rMSEceH81RXdccw;^|XV}u;XsK|7dRux}if(QBs#uQ|s z%lWEze2*y#ErQH4-%t}db`~+)@3st^2|Uzotwm09WEKS`0lcHjDk>I`VH9%I?|I$3 ztU%Xm?_WafhO-kQ)`)e0fYC`MCi$bGNO7q%d~r1h)KU;Hajvj5txl@Ttq4Z`9?&s9 zngy!v6%LakqjOLjich_0E_iwTPUV%ElGpN)Dp_LSJ5FCO?jI2V-+BL~pEPtTA~IF( zb?BX9{_&G9c#sy**oB6{BejqNw2U}W!w7v<=W>)*ho>32G}%qsQ%wkO(H>J(`lL$W zd!8|8Xw97DQiEr8&jSI8!7NBt@kK>9okhZ-sJ~kMH3P|Rgcc)ht(AX=Q<;-dOcRaZ!GQRvEglfWda7E zGf|?u%{pP=^6O~^M?Ux=C%`xnEA>w@{)h<%Q0vM2E{ZF7=LKJjLo| z!$Ec|sChro$U|8CeYT^}xnj|7SB75Z^n02mYn?#5A-Fp>C%UQ$n{z<(SqBM2VO*HY zTCX(b!>gP+UR;(Z%w@RJhIo}(+6Oo%0JrJ_a-8dIYZ8wGk49foK^QgkZK0q_H5HA+ z*`uzsky_$>b-y0Q5+3IDBQ*yndn z`{j6HPjpVT`9NO%-EWsi7~(O5n)T_K zT@3Hu$+iYvYDffrfPHDPpXy%kRz~Z^7VWgeS#8Eix}0CfB{EbH@_Nz409jSeB^YC4 z7CyWKwpEu1CoGyL#%uB&<52WMFz9URYQ_6tHJ> zw+YF}_|#sQr}Qk3eadi=3_lHpWqkSG-J;2a7n5t(W1RSQA~(WHg}^hn)oo zK?HIE&`9HyzleUW#7)A7(GbcO$<^|U^uJ2dMQSML*LLNG5^5+i#=nJ?b=$|t{t{r3 z5#sndRR*zG@EfPmaBC8#4Unkf&ze%i^?X1OB+XF~r{>>hZdqhxgB0a1E11bMRSOsG zTA%QpiY2%Vh84$-T))1;^u`PVNe`jDnr(47axedr860Fc|A|`MoO%xsh zkVrrogdvavFE9c`M&8(z^$>wh5;-K{ih4olwn6 zfl%wSf3PE#>Fu~*>{EJhg#7@7(_do6v_&8aeoxp68QV=_!@WG2gQy=<(&gX@$&aN( zyO4P>Dt=nXQm`zimwdtrX0WX3IW=d*a}3f5&&)QlLny}YR;@-{0qwrAv5~88wjMSN z2ld|;T~mk61yGzm2$s{cht<$NjK{?}!)$%$V(*+-FMk{XKjTfR`K;7nH($JCRv=ZE zHxMdsWafdIr3)DT5^&4Q8pmNbY*90WI=hZXsc2p`@Uh=gr7dXk7UIM@S#}u2v)w3gXdY$B!)qAT4LwQk0vJD zZR^eH41oXHZ`HZfNKJsVzDdHsLQuC-MC(%-b1o8s)_c?ZI!wx;ga!Ea$JEO6gd>pq ze>dHL=O%hJdAlo?-pK}r{S}QIjc|8$Z4R`^9xJ?vX@#Sv?ta}9#t6>3z46F~g=GnV z(g&fcnHR5_C54Am17W~37FHvXGiew{(0JlcjPcp{Ar3OYy@)u-CK*be1QAF}`l_Pj zn7sWYeGXtbA|JorfpG0#$BjWNk$?f%8Y1GO#Nn>fXW|vp^^lW8p@feRA-e=&W{nbp zT`N>*J-Z%7xZk_p)HCd`M(ioP%!OawXfX+iyBL1x!~Gr$|I7`M^?OKezXf=O;L-xn zRee@-w>daq=X&=8l(YSPf*9kY5FVpDO$Xm2d0EXwt#6(EbnA1VJtnxs2~(7w8=Mx-I#NttHRmn=b0D)fZY zbp4*l5HP4PhynVQdR&0U{yq}sCNb3ZMAbC8$2NrrZloci{{#{7hWWZv9%6dCF=a6X0$RO&=mh?E1m z&pQyO2lmVX0HiJYNg3w+U^3aUa=V#xAb6e~oN^jgz)BqhXLP2SAVC8R2ek*&q=M}I z2IfWR77XnUz%eYSf#Kln;UrQT z1dR7rqH*Kk$8K+ZC=yixxJpi|>_ULcI}M7sYe&ArIn|FyW8WVc_}}rl`yhm8BSLrw z?1tkv!F*0Np+B9iaImQQ{7-1Fm_n?iyB<0lvNDwqgfyG)0UEmQ?C?BYUv7)+W`-)J zvq=*X$Cz0crrhs2nbb7CK<0i`K&zOX5U9NVi^2(myJ(dik*-7>!0Op2(77P{c~}>y zr|0s)YzH0&aT^cUpD4`FXF}c;fN>O#{*sv}=AI~vHv)abByx`iX>ef|!@_b(yv6pD z3o(dh3RR z>7#;JWPDtN-}}2|_6tK$txwUJ+v>u`RQG2bqVNQnum?5`$LB#1SNiDRXkLmLM3Ua6bVCEbIAJ$@y9 zznq_l4B&h;u2HwU20!0MKUe@iZ*UBm>3u!R{*O>P8J-VO(AhmDt!-@dVq&4wY7C-L zUeEy(!x6Z|X#n>XDn#rNe0>P(lXm61&Dv0@y~fi7nLqZ);0 z==UjPcr8qt=IMo{*RNwhfct(}=sfNuczt6}q>kNYS>-Bs@kzOh7j8>SOc_ELFY9FW zKmwZ>=om=`w_IrgUP>V_MM&z2u~p2Hl>&~$|JXV4djJ8^>vpATUSoY$m* zPa9g7aEAZ*ErwAf=NK672T@o(6g>qmm!*zYAUk@ zGoX3yYfWDS`Z=NIYq=322070R+hr@wNp-rq|17toUMN8aqSE=rxHI}LBczsnN@j@K z0ktl|^TrJ62VqXMaQ(R6`Z)4QugMvp8-Oo)ccaYw>R@Mv`xKMmDwqL7gg?{dpWLjd zN~)UEJZ)0$6(a7)8x!Csg}?;2g%)Y>3`os)%UJ`d`F2n*h=!AnCus3Zy?gFO-829VFE@XOPc<`Mi)z`~ zd+Ln4bXz_DBR11%dPAn)ZLKuutZsBsaz&t6BfUq$Sr9q`o`Ej+I+Krnm;mXHABpl; zYN~Hcz=x~+Z2AGilC0sK84@$x*upueNs-edE{cTra(H(jh00TNV;?kxePPmW!MpY@~YU zU=AHfsPeo=KKYqz{7AiHZ@M#sO^!iB67pBpkIR>VVSa%Ji9%os(6t)Sb!-)#lS%65 z1G%;_gz9%=2w95<45i7=4MTSz(-U*JU}~LT-zW(FWJ79a!sG}GinvzZ^6PC0X z#KVhXgf+?0r(QvJQWRf*besKJu_?bO^h%zh#v5->=|J~*{cycXRs!NH9@V$<5Ltw| zI#&ShbceaZTkh^QA$D?986%WjrciRl^{*ehBg6M1_ytoig(a}}CKH9da2u&wL>%7B zgva$$K%c;aM!=!KG;bnpR1jyU0_LS0vR2Ew3eO$?K-34E8m1`bwjWy5}Mv+RAvzVz$cSA0q5>2hv{h8%EvcbeY}DZJd~&3uuKG$t0Ma&yiK*7hQQ#0;6G zuXvZFj}Q-7(Hww@NgbrG1;aCxaRzV^!yZ*F_?8eJ29Xfu9OEc~^i1pn>?_rlO{1S;GT4J!HoT&vNqN*#tg(1-rX`vw4z3D>C+*J-1!T}9{vZ8ApE0=G$!$BL~!KHBc&5t1UM2U{fpmziGiQabBjEBFZK)@9# z;@%ua+=etDoZBpOTL&ZvxW#?ZJ^0P!QVZ+tA{ah4uN{2}hVq`9lCs!f zbQnhSYnBAhfV1g{7N7L(#XxP6H351BlzvMxFaq*h7d@0k26znGEs~6qG0Q8}vU$S0 zh0Rhx=f+A|!_jRxyWa5OgF~c&1FY*5tSd!{>_t%oIOpt_QV{_DsZ+=-Y{i4w8UJ%2 z7sIKo1lFHO3VS6P@I>&0^jXw3WWfm_`k7lF)WO=q4X4M`TM-s;pU`yI#3HsAX-aHG z)m!xekz?N$QGXAn2g&es@CsuJv>Otny6go=<9R4E`qIX{@md#A=oTVEUsb5%;hjH_ zT`d~of!m*;0GB#3#;k`;#=Q^^hsJR?3(T$kIwSuEX^y1GSyzG=nt~==cB1wPB_AWu z&{H<1zeeN}5^)e7q<30FXaY_0sEPrtC?2QOt-ANe;-ELuNcp>aytkP2Dig$%&;g|j zm!y273Cml5Madgo;*&1(@|Z4oCI_g+(jDoymBtuS9S$WZ0r2mqLc1ZtmM%w;2h3q` zL-}d>(BcM~iW%v=z>6@pjY~K4E`vuM%;Tr99~=C z`Us|30%(fM0k(dP=3zb|qZR`zu}A?IECGsphw54VP?$npe`~?VfdOQ+e99P{_#VR| zlo1g2Fl>n7Sl3J%Kpbc_ynDeOURre04>T?uam{5?ube~inyRuO#(^hS zqq)S%T_mA@zOH$tr|8uR;oLP{w<+vSZ4>_Z@YjU<8WRE@+PV0D!r$RW+PAw0UPc$o z@YWG-K9zqEE9~mL$ONL1j>{q)LV-9?f*q+p7R&petFoXm9%^=H#&*}%o?mfGXN?;l~ zk*s9q7p~H%VXCBdk#avCP&5b~AWHqTcmn@!Zi$r58c=a$~Uy9?o^izI(TJjv`<3ORiMWuS`H0qQ0Z|`{{9?pH0g&RlQLf)3o zVYgp5OAXc~XL__9jXAAKtUu^)ZffQxyd9Ow0IyzL5cY77sE;tTyhMg~N6VHLDt~9f zr%=Fctj|@#sA&n|@yD3ktcv6)1uL+&k- zTV_z1XI*!@&zR7W<4Y(8szVrJ2p%Ht-7lvR_HG52oM^U8gf~3gEv1skcnLmiL~Ned z{W%>WawHKtsLH7N^t8LRuL7UDwYN~&KLo%n#~gR=KmU;*RR-qDEt{T(k!fT61@*fn zS_SD9l%IHt>-{-dB4 zj;?t0=+SF$c^Zor9@wl3O`Ob_z>W^a9Baa9(_t<$DCrF12+yFWl52CGKROc^JFpGj zYBErJroH7w5rK#2r2^QtJ>FOa{Q-8Q(ZUjWecuVn<&M3&K`j3M23Gc#iDdYzU;zIZ zfxbj;`l?2$i?85b@42&*eYZv@Qoa$h+m|;o+5#1(5z;2o%6_VewZ%V&Qx~zCb(bUI zP*^)WxbNLK2JV;FLciwwSr>r9gcrhJct#s?z2qS+-jZT&-T~NFdORHBvQ$!?7AUrX zWz7clML}VAw=_~TITQ@6k>#55qr6B&%N!#8BK40mx<4C!$T}V^XA76I1T|d&c6|gl z&4MbU2K%lsVv*(s^TL=3@7TRk>)VXb-QvgWulOk%-e)Dvn9=$5;l;Q{sMfZ^b^8ns z3)3C90;}~s=|_a;QZfs-6gH9Stj`be)O zeMN=G>%n?YC)X>GNiGv|LT&^rUamCjftkd!$M0xj8NmoTRNOSMY>2O$S`Hw_u;90I z-8=1Pqy11eJ7XJC*@A3lZ@r)QC-g}(S< z6>zteyoSNsyV_eKV;({?VFq@l?e?bw$V1xg8)K!iMSTA@gmAA=eH-Oz72-;NZ1jHT zBNkRx!DTd$5jQ?Xxo}yAHFhtYx-5n5%;aB2#%EF)I4BN3D0cj8c zr&I#r5Jf#i%+D^;+eF@747H-!d(%@N)z9#5@QQ+@yWg3iHI4>kjM`ai6@V!--37VWXa*@C!Rs@UxB`Ifs?yE3SA|TmE2Vp`67al0s&A! z*$br-U4Sa_OTOmyx--L?g9xW2#}Df3zdr5#fWW-BzDqoOZ(jiLjX79oZ)w?Xd^`Bt z=-JEs;7j4CwUxlwRwpNOHg0+$4dRgJdzhkVzLV7&rWJ}agDa|xGz010g@Wa_W%Yqf zHcCNTNE?F82N6V2hA?lu<)6<^1xA>+4A|m_q0Zf}ktu#V0lR%IpAkWNj2TWB1l5kK zWx1nj^LpC-CmPcdhglj5&+YqkD2o$3#>b7{2;G%-yC1RM-~XMv+sSCy+TDc8j6!MM6%{fX=$4hx z5UI$@xGN1R4I?BPMs`GH&ikkHKF8~RJ?C{j zuE+Jbu2fX9g zt{|P~6mui(yXO;$&t;RmyZXLM*t1CKPBA&o*WXX0(H%t=M7qp5BaZ2(UEo;eo30j^PN~xEUhS9!u#%wKN74}OQk6>fv&p_A#uulhX9$U;4Bqsoz+nJQ~z{*hZ z*i+!JApCh;(fp}2cXB;56t4km`LM^G$Zxfj5)^v6HM%ipgU_YM5XQ^OyzSyca*m_5LCj)m+O#!Ika@JArw-Ner5&BO?aB=&!7$4Tv zDs>DHQob6m$_^5QDlM+)|#@9Q>qbRMea@9dgo`Qzb{=Vzb1{imx>Jh!M8Ah7%jCgquBj50V; z9G_I*ev(1CgA#)du^CE&45v8GU7zy};g{hgWT`wV=bbeIcJZ z;>@6}yu7IGS>M6j7u#@^oQ`s4H!jx6YnZ5{*nhjAQJgY0rsCx#dm>+ zlV&NQ^$bDyO6@~8E@LHrD?!i_$T>ffIS*cofzi}glh>xi`{Hw6BCZ;hA{bQlMn3x=B~F` zJnf9>$#M)rV5=T(t12Nl(~iJcQRalXlN-3O@^y{QX-+o%8laSZoQaR1GTPRq$hm(< zEaity615GHVlRV9Xkm$O6lA2a*nzk;uW7F$nawnxOIL10KCoRc_H#N4uy2{Zd$a*5 zVBS^pgI$5uz1TknVzr-Jf11?3A_Tn)?;2Y*y0faY-QdU^ad=!3ucS*DUdFiI=*U>n z=%ymj)CSxQkpa9Zi2j=X_t90;CY=b5*TxRK#N>4nFS+qL8v=HX50J%zgkbWW=s3~@ z-5)2Pp5+0Emv>QM;!lVdf@e&RWFor#3KtUIShN(kDur3Gs4dmfMSW$MAfx;~XMA5^ zMcV8Nk*Jv?p9okpgF!fueEKfW7I@Lb^dq`JwAFOGGfCGos;_(_8Rhq4CzmPNtAv&0 zrK%HuUyrTq5W6$`&)c&_$_XIQKS9BW_K{~BXi%2rAq&Ml9^=HTBT52d-_fzHImU#lIF9Hz4z*{cajvOn5^S!UI#>j27}l>w8%my+{oE^14@eIhvvptg2`0 z;4q-7d?MT*y$T|@hk~+SKbC`MSP#p7yvae=7nt^PXr{8qbm0%jIL=O2aQaWYdm~mL zur>fDARVWxm=gkeaX*fJ0F(!)dEwSar21OD<+0KM3`(gxPTiZ2fAvDS(=S)kd2Yjd zR7$4AAW9_O;|o#rMav@?ns2_ywoo2rmtTb3?z+wpGEA-3efxQ_jRM}ojMl?VR&z;) z-19e(_TxpfkhCMbp#Wc9^?|$X^x`TO+3#{EH{TV1UneKEf+mXNl5n?w(_F*zoCiC0 zutbBmCD0#od*3-2{%P`#kr@fwCKMF!ScMZS1Ovlcm5%GK2$SowMh?;U9gabqo=1;) zgR(z@|6J~Gu7M9}m-Xz(JEG}$hfspl+CSdGM!dc2!o4IuJ_RkB0$=BNT+Xh(3a7<; zl{e{eEy#tND8s^CFfGGwuk^k^e%(JA4p`34Yf3ZvT z)~_ZHIymm!8Mll70nk(wvpg_=u*qhFFJ}?nHDoge+nhQ^7 zS7Dv?;tCY;&WxJM68dlf2QxHbzBo-3ZYCl6=_V;dHs`^d%`DL%mOtH8^i|^-lx9~w zZSVV@{CE$MR%`O_veODMMP0u!UXxj5kAGPuMWCR)-alCq<=X7e-?TjJzbund|7ny| z@ksL2ItUJa9~l@|gZp)zBZq+K_5#s;-rU}6eW1L=fB$VDiw@E964TT>^&rg!7DSF( z4K&86Z2V*o2DFA_km5407VT-Aa5I~4h#{8HBT!KIwgZpz zmL~PL89k5tmq{)CIhlzt`wm?7A#|QcShBI<3g6A$`x3ym(fber3QwS6WprbfSs1CG zGhS*Ae3n3YNy4>~3sU8JcuRd!ViJ!ttLbgx(ME$Ef6Hc#x4nblh_e&s)~ zBt0WzvtO7LadlV$S9`gIv`X*}2`TS{WDHMpKC>>43^*t^^93Of4ckNzLk#M2Ch*Hk z!MVVonVu|;z%^uH4azD7r?a!UN50I9k+F-u=D7Z~aQs#)AEo!*eV^SfgRzhc2|{U< zId-&twP*`)nG)8)`)W7t`BW5($z8YR@nf=S1tVH;XRP(_e!8>iV&x?@PJit+@MR{! z25dD*6l4g@!RC;NEZ(Ro&;%qx-la~}3~S=ZICdoF>ez?Yl&0AHN?L+6e1U1xiHkDJ z2fW*g&5>PU-_CE%m7hHh*D@FWqW9YIRII@<{JMV7Mj^V!oNmh~s09}m2Jx^b^5*)i zj13#baB@w@i*{`Ts^=b(;(v5oge2GjxZ}hmB@A;Apw@7|1JNr>^Ss{Lmi+9iQAmG_ z>76MftDE^z^9F<|o9p{Bf9AO-P zOg$lc=)&A`2Xg#VQagEq7~Su_%Xszf;63kuoN1m; zHXcOq)eY8C=aM-#-zD=@-De3!{hY^f%wwvU)_TkXcp=ei$5L`@%iUR-A&8k*(?@z9 z8%T7Nh=u9LZMSEU1zv)2wQQMq3$s+iJnXM$39=LD`cdj_8T%4+Sb$J;{!ym=rQ5Pe zXUh&CdU?7A$R|+aBmm+>Y-J5swgRuSSyK~ov;nbiwRI@J+b|h2utNdONmpA4epduq zaJiL$OtqCF-g8jd9iO@2=b3T0(CIIQeK;OQ>^m|$ckgo^X|K@r>Jlr6c}2>GBrqXA z5uHl}CSXD{O%jFTjYvMt81u0vK*m}u+qAH`YkQCw;_x*%V-o@-VKJ-b0xx-09g-|} zL24qRr_j~>`6h4t%>1-`@rsg~ni`#g=Co2nz)&vs84S)LsPR3id9c!uJ0W|L%^aZl zj6B@VYBAaPH(hOh`Hj9$QOD>`T@A}YovkNK^{}bLPg{lgv24B^Qn5=wMTx z&dKT1e`)hw#YX(+Wj>8_siJtc9PwWip6F%Op86O3`*sv9N#tvu*`!~I{zw?TiP#(m zo`x)pOB&-!^*DK)xtrEFQ?;!vFh)zk0@Sz=amWj%z^2DN^hdN}jB9DSJrgHR8k~Xb ztv%Kf3TV!eA-yk;J@1+O{q!?GiLrpn=S}P!IPs0hh&8NpAsx64oJPiL+GpS zt_BtX*je;%eP;_oAPbxliR46B2g;3u4_Ma8LZ#h;eei;$c%H$ zI_oJE4a5hxCmX$fHBeF>Okh=qgE}pSf5x(^64|`tZ|0_HvsZqa8DpU&1J%M4D#RPY z#(YUV50v|h(}j1`tGHosUw(m;gfPMNBSs75+Ld!k%gKi|RAH7X4Q(O&PD{g1`aLsa zM(ePB4oAKWj5CT=L^yba-hfXvrN8VHD*%#f@UtRErjvWi+_^{F6)KKo8ziEG?&<%A|YKNXc3o&;Es@DlWIRk5uX!n&Ia$D$-mcyHdEZqvsFrOyto7lrN~saNrX^bt{Vx|MPCvwSIC!nwlH-Z*>l5QFl370EcolpEM)@YOsFx8GzyEA~;_IWQN~% z)Lj!o<*GCxryHo4On$!0EfVzG;-O$(1;kYovA}!c(`WFOC{f6-c%ZQ>60>pJNSxWH zJWPmutLe>4l$821MX?B@1d%2zn!n-gzuAt&IHgVZ3*yE-Je=lv`S|dqZNc@{cdts; z08RY189`F8OgHwN3TEa2l<%vqZl`A}rukHs_(KZE9b^&Wep#y+ra@}sUCzhN&sw#-0TMpiG5A;_{r^y5&~_z46fFr1zVX9T1HBJ!Xt z59KAh>_+*GFBn@esY#4iX(PYlgHmv|HLPLGP}^}@q(}oeANPwgCoTZr&Lyo zl-d@=)`?#x`}QP?_K)1vHn^Xv=5-1hdj?@llg=Yp9w(Vj$0J2}L^jn<66D!9Ok}Fa zDjvETqr1^^vAq4b&Mc|&sqDy)Uwa~+eLZ(Lwqo~F4djb`nWwNN(#CovumRvr!*ldPFY$oZcP2^)XTEv5%y^E>kA#658|V3Dj<>?0e#NtUY;_lky{ z(*mEEzdcXtp5FuUg+~-kC+uN1W?rqgY)y0`&EWpNmld;f!XwczbL$DjV+O0U!t`uQ zrFEtuDi{Z`nA0Ba)Q(cMjV-gg<0)j@BbvhRomE-4Xz<2r6Z7J3Yiw{mS3hi1hat ztC`$U1ELpqyDNUM-!s3_0N$&^>Ux7K-p5C5 z;w4~EhoCGvM$Xk!YgEQw{gC6NsF6Lia&7{+7L>4<+7)Rt*$&|Yz z&lY*(^Ecb|SANC^eu$DqEtt))h=F+G@E!>v+6!c4W{%8q*s~njqRuV||6@Ih5&L|D zQLRzWLMdS|u2jN}@|Q{aP3gN|&V?Q+<&%(%06g(@e9nvF)&-`9XOigf2F%@gTVdqh z?>Q7VKIh_Ma%pR>+CrH2OqDm1X}O%n`#Np?17E6hz*c5+59Ss3oJg#lFlUyd@@d@K zKZ72o8cYBcg&yODja^Uz&T5Z{8*Va~_Kue&37>_fcEadEGjYqK{^b>{B{LF_v?t{x z<;Xcf5BW*V#ExFY0K<-i0V_V7Wk2td0(NvYcJHJLFGg?yI~u%2;g--{^QNI|+CH8h z-#v3lX1tbAa%nGh1b322%sm2g6*nwnIroMxxAmZ5TbLTJ5-?h(64#c4Y zd;br~;X?<+F5I{|YX8dxVbIQsY?}551mL6-3}_mHauOah-erg1&g=D17J)anlSm|7 z*6(e7x*2}pii@BqTZ?E8GI>J#gc#laoC0FbJxpfadhKwW9BN?Qze?q!kU-(NO^liJ z)?%Tp7ddTTWxT$XhO1?uSUXb0?MJ)P)V0fi%|WmyZaWsE$|LaMxNp?S`uOWa&ASEf ziuDr|LkfQqV6DqY&Uj)jgxq#Ghk$8Ra`~7jey_0GNU#x1P!87OV-(i_CY^wTWbCe> zA8o`QcV|jgJIEU9m@f=gJ$RtsvwEchPPx@atbHX%*t^*QO16E^~mLo~g?ChroP zDtPJFe|3*8MIvIlRO(ZfOnaOL)9aZFfT~7@{0h=d*_rd$I&|-q%k`EXEk3umehhJF zko@zlBRa73zo_}U+D+Xr^ithOGg?>vWfP!S$cS8&@#)*QNvK(bMNCM2k93078oWb| zzHb{%muJQ@wXjTjgf84~Ajcbi{pbV6#1m^$s2%dhu;du`k)MNbB{rkxLZ%{nD|=sH zII_a8RFe_xET?VDlgT^SB_*|eGi8{V(MQP*mrn@7_LyLM?q>>4Wk+$3d_TCEwQ?JA z`2qa7J}%wg(`J7?XmhBa+A*G7MTF^Nnc#42PSq}j4U7%Q?!DRsj2*q29gnP|GxFi9 zSIq|x+T@6>$vKltaM4waj6_}xGn{K>VbNwb>l`5>$z3?JcDJWz&dJN%=%~qdA#~fe zIso!5Bv2U)q|#VB)(5 zPo`L~3n0B|PrhHmo21zNBr)ImZsj(w$k%)jM^G$XJz}VKJ9AsK8GDbxsVJT6P5^vN z3Nv!}EUk~xKOnfSIX7vZb_YmsExba}*hn9o6ubzzCKX5E>UI*}M0!stJ%6oy(VlCU$U zRCKO6u>+8r=uLchkvQ||qHd#j5c#a*A>_L9)X75sO!P#%m;)UXC#XXJw z;hv$YJCvJIN0a9R;M|oQ4||;#ud}{;G6aUj5I4s-IXF}u9cbW(iV|iVxoOx0a!u42 z$9zuNJMxiO9ch&47`Y2R5w17az1lAWara5kU0>C3x%h<{$iK; z_S#W3INZ-R4K}QXDjqRUj6bX+NF*yO?<-21?guv>!BkRh%k!KovAwjcn5ewC*g5b+ zpVYqK*6}21fz>mGTB1pnJvr)zv-dzv#TAmaG^2G{sHU&2RbHtH>RJ3AbTLc@RC`3O z$C$&8{L%ZvM&yhk&p#4QNPcSYDd^G^css{~+T%A^m>46;0w63yu;!9-LkL&IhW)Yr zN96IYp{NU+1(vekKvZO5_Kd!PA;*qKFV2GP(+nSXJvtv_^RVRn_Wo} zg0U>GnR3|tI07vGnLK!=sJY9<`?w7-zyaE-l^&tBb--?5pz?53JIt5qGE50U@Zv_* z9K&Ev0f&-&86CPngaEQIABpI4jhn529i>t#7J|^OP=?4#g8kaY_|cAKv*Nt-XWt5f zKlq)a{2hNPYB#;!fAPv;%T150!QswjKl`RN`0<^*l+@3-*dGW~RyGUQYo0Ew6suRr z!F??HHqv{9S|XB2=qP;jw!4o~h8PCope@e~0TIJZg50!Ss}F?rIMM>6NbG%2OBx?z)Dd($csQhCO4NyC9bf(XtZF#2n6Q@;08}8b7z(SS zH&L9>9r3ekuj`1$5UQdDr#a*f>9<3#LMGJS;UBdG&8D|Xa@t#<-@eTNQGwb2P90~R zy9rK~agN+g5~brg3q*OneCVDiKc51LBR0#HRHnYu0=YvwY6hnB2^f)%B?O-0B;DKexo}N^w?#~MQ!^G)YEP$ z5Z0TM26^C}gVQbq{9NHe3`$hovK<^Bd|G1X;3@B6+U(`pZ3BgaeVJ%zP%m%^iCyHK z_V;_9zHWpF#02QGp{5wqe27&ha6Hw#iKB4bUj2>)TM-a^mR}57Y%S2?heNuFeUI@A zMeI;de83rvBJ1U}6UZG+zHN@&5hd=Wj(vf~pz?XKzi9SC-J6~D%CkraHXRnJ#xT=P zoLm2jdRxW5>`P&nsh|kG1w9TPo|PW_a~ds}Mz9ap4I~Z*@K90e)d@A?i`{}=+E$FL zID{>Wp!po0^GR$?X_BM&S{qbvcwl`gYKF)_FO~7l&9oX%2_POM=$zsV(jimsq`Xo!(5`l*;p~b>^5gJ&#R) zsHd<#S_X>UyWm1DqNfJ&k=cM{?Z=IAzL;W|c>27f3>A^Zxf(MSQOV3#6h~pE zHlZ35@#XMiCQ4Q)ul)^M(K|3M z4P(Q$B&qIJC2Mb9^Z7hP+Lkn$H~@~@IvyUC$xz3ws*{x@pP5a9F2`Pr8S1J-?nh)o zoDEOXwlVIA1z2iT-&`Ar;YT7|ZQr|F8rFkIuJ)wdud@{C|8VMAk~q~qYlwC<839_` zvmx=iFIe9OqMSx@9J=F7dN_bC<6I7|3bOn6U3dIQ@iDHq~tEM#e=eHy4_aP^8m)(@%7fpT3B* zuU{#^51U8B-*Q`lEX4{sTO5U7-E&4$`vx5b+G>~H_pA>x}K~#zq>n9q)Pmq zmWZuCe}E9`@!S26HdW#LaIr}q$%4~OoIo~qDlxz)xWb$mr|f{I9l&ss-Kz{2+N%W< zjGD47K95x2#n6Xr%>7GA1^rJi`*9CVJXCM6!z3}ec@KgT(f-nY`HyGH)^Po#Qh9O_ z+4N33H5bcSz8n{*KJn&hN3s5Q?H~s)HLls$@CXVMXJe9kOr28Z3-)GbB&w&m*m{Ig z-_+PZ4lv1esan_WxlS7oiDx`C$w7)FG1mF*VYAa#jT8OELB(LRLTm+*pQ69PW)()5 zDjTrZKvl<@vQco}>S*mQ2r!Tt44=u>g{$xv?K2krbF49_NM;tfc?xGZN9bi`$iJi> z70ZAqvywM=;c~r;AB)fB{tVQ3%;0cc6awbrjla^|-lF~8+B)LO`6ouQ+ zVAGihP5fONiEms2sPjG@?k2OD_UithvFfYFrIWPxQyjeJOG!-lE(FXS9^z=H%#6+; zmv5WtDRXiir{LG{VOb=RyCUU12Xw0u4@*-Y(PBVRDR=pwBX}Ag%XBs#Q59-_LtD3# zXaoZ+TlsKlO6V(IE!U~trpNQRtW1BDB;#Utz>KE2eukI|hy_QXq9s}FUj8zj`R}XC z_wY*VNAF<|N28$r!KL z=AKK^zmBkQ?T5VVi$0B_z|D_|ig**a-i=UG7-LwJS7SIu;3_z1SP&LwO!6uk)7PrB z!PaPt<4qwRak9+YD%?leVPDP}Y`^+TH#TPBLinZYW#-SnG`0U!-=z8UhJqCu6;fDh zZ8at4_zrKswr9jyij84oCVc*8tNdp#R}M2;6&ABTT9p00ptbGHXZNQyKM61TC&Z80 z7oSci7r={LfYkk=3l9$^Xlr>mm0PC zWisv`Ql}is$`YEBHt9E}b2+w`Xoa>Yfw3K4$AuBnN3e;-bFY$;5@b#@5518&ca$?e zNBKnlb2vn;VeLmVtR4{?@H?5rNYlr_I{wKrxq;3*Yg6o?5Z@yN`h5|h-{&JGOVRHF zmr%f9uPC#R6pPNpv22e&7@9uR83Eby%b3&ekj~fO{szBup#Qsu+)gF@2(sv7B^W%p zz`z~c6D-9fh=NYCU1~k*s!0Si>f1_G_4X4deGt16DR=W2mMe{L2bDu+c@`v(nkruB zNY{``tF(XV*|(#t_&~t+6Dy`{l3=3GNokT~5q=MD#{lu!VA99i;Y7H+V!uVzo|7pu zhPODLeP^Kt{5oC=fx-ww8&v8UB5^rZA4@I_srYE<6|B$a9{IK?=56aD^7hMcZzWr* z_PVy(DwvEj!QiY|c#~}lRl_uioJyztOM`0I=vuHv@20@ke9p+HjZb4o^?)amY)O(^ z;F1B>Y{lFzvSuz=vjjQ!{k*eL<$pJv_YzZa)V+DeNi<(78m$ldL#n|*oYI!$vB%sx zmI#-tQ*}<8fz5u$fY_HX@aM-Ec(5>-jKZ~+JiIV|114k3tu(JbkOZhKL(Xbk7r9Z? z0q)KC9xo|L{hfefT4`hVA+Jwzq4+b$z2j)+Vwd#nw3a~~`71DETK$FDbEiQ7hNp4u z6K=H+v_hsiWRJiETe&wX?v9MI4<9Nc?isVpVAaimGdn~>AfTih6T%WY@d646X6$wx)-u^!-Vi1J&z` zW(As@e%yjb7-^?*^Huhw5X5PVHI|igd*3z-SL!PtZy>XQHk*zTgq%XB(XuU)KLmF~ zNyh}xYuFilafeMglo>WGX)P-S+CIIqm!Cc?vr+m5=YHqjdHz9KIPrdX>!r_==jsi< zVVymH3RHv;63K6ICi^{kMa6-+%?Yg8W7|TM=wi6RkkCH!qmO)fx}C+(ru;Hc_RRdA z;q=BHO5+}yR|hf{Q-Qjv%HLO!_;Wo>WlzGPEB9tujA>X~=;1vhP*LC;A|Z*siTHZ~ zgiNB+yGiG`EvVgrta)endp5o%lYBaj=bcjbr@V`c8tzZv{l|Tj&?Ayb)KXO3JBRgk zA-kCIX^G8nsR7XgPC9=Q;tC-sr%4(a=RO#Y6(GW#Fh`qITS!J(l>kV{#w>*TzlxU? zd+`B#J43Fw}PL13k`zCg2*0uU?AI?8jB{Pl;;c-MfiM z0UPdRmo9jNt};AY_ISoh73T}rodl_S;J}Tx4W5x6c_Dm}HND`DGD4LVcacb1`aym6 zohf1?+X?*?DZn#$zJZrtn|kG#db%ojlm{#v5=;-?aSQWo!D1H`#PZk6MhJA5wXUIV~INpS!C27h)V7UOF*gxkcyi}Ie}oZY`43oum9e3 z%dYD0R}R|I!nC(zN+`wH(V7)5b}$^PvCM6}pWaS_OxNM+^TW0kHv*%QcFaXUS+2!? zZHW-YS_S!-Q5Tk$fZb0xl0tUBg&gD`TuBnc2KoAJahI<8t7_UqnRGzdZ3=u|SHc{% zv0O4_wLrrI(@qjlQ%K^0`^Q*wosfV_>X@^bc8~wCfU`xFjfys^x4eq-8s2~~oAlSp zGad06@&v|=^5=>81g$%((&uk3lx#MgdGQr>TZbv>sryf8IvuB)stChFGl?s^X{#T0 z*;1|TV(J0^_40^7r52#u$uBaLY2`w;bYe2x&0jPVOz7~08iD?f(D{*t6O3*PUQtE% z-^|LVZ?V)vDx*AaRN(8&X~zyFPB$gKnLYyDM)5icQxc(4B-453MIvrTMs1QW)lnH~ zF{Iz)kIut}mh)m@k@I#KYtPUaTAt_uDUmj9U)Nj9g->ql<7RJ20t2u$ zr!z#*n8AUhlkex{MIJBMPL)hz^dZm^n9@q6$>nw;p$h!EMil4j*Yx*CzbJxB5vJG) z&$+3~^Gk$r@5by{4p!jfpegx2j1f6+>7C8mvmw%`1H{`S=+-r|f7-W*bI!kT=`p}% zN+yMDkj;)p3Yxh=VB2;+St;w!O~W?v=^Phzn)HP^Qxgq zd%c{XHY(YmHGaVj+~9{}`Q$}Q7Zwxj1F-;BAaT|;VrJt=tdVfpUhh3rQTDxAQ^6Zd z_#WEUS5a{;=ok1u21Dz2(UjPl--D7RoU_(5WMUu{1J@9fY%$@4-57`yfLcRT0hvIo zmzy@6IaJSKYnpPd6FuNrbq^AXr3kv=Y};>HMy%_vdIJpV5Vj>U)N4STaC;gp?5_d=nLFaY22tTirck2G|zB-rKxLSJbQ7W5U z+n~~C4;|%bjyH|KvdoLMWIDYtgUG)m+6*-d`#fwVAcBpv~EIVN4wX zyO{_{s#*lei?WIOXldr&g%_VZ5AP{8aQI{e;d8cp+U$SENO#vT9Y@T1o+-TMhcCP& z?b8BWDjR1Uak>Z9Q*~1LdAuS@-K)x*yMC8#2|V2lhxt7FB<S#7{5kk$j#|uY{ZDQeLor*F>X71&!C`NF z%;hIN_jDCTN90iWQBksH@vrz}LHcn&$OQ(oquu2=E5!DJ8I} z+P#TY+~RDfO`{-C>^@bO34u*P-X$c`0+41f zB+z*RR!*f|N{2jSjAwT0E_JmgOyIKjf#*u|;!xCr$`UZBPu}i_)V-cfic9Dk#1b^W z(IJ*#tit%?V<7ZHUdj5=#k7kbA&z>+TJQhMul+fH#9Xjw&e=c03mS7QcT5%9rEB#A z+iRg@*W2biE#n;MMvRQOfvQsulpAoxDah81&qDq%&pCh}%S|?t7CWYFS%3ZVMz!Up2C(*pETMfCD z2`*r&L>jf803+g3WnM*>q&G~7j8T8wDyS>{3*XrjNy_xhGt>qVb)hS-OP5QltxIaG zie#E1dpHjum6eNAXj0ip*y$;l5S2;5&yylvu17~g`kQZPTP~BEXLS{6q+pbB^qqVH zg5t$4kn4rB}uM5yJfmVaj!0NcGtZ4GTZN z>0QwBbGjKwp+2xW2fwD~&P#o=?stVbkQ<@yswUij{}DoDJEvN~59c_>soGujOL?C? z-7+d-Vf<-5>F>ddss?Vl*R|im_+F;)61LuG8aaA2L{l@@`;Pl;McNXy)S1D~gH{In zocZ^=i4LEeFJ7|LR2RAL0Kri-(%e`q5rECD>$j0-epT1vnUC4GG;UXY3>@0(n8B5ym64WfS@q4eJxF(=K*-O;LVBdbC#n3&)!0=3yuG6+1tTT zEMM+5@Ju>?VQ{9@BzxF#fd)Vlcj$@mueA^dWi|rQPRZ z^AT@-9q6Zh?M&NM5N7xN-P%_A`RlplU-)6~2E?g5delfWeuzC#B6cmf%M(4}Npgwd zihA62+a-XgsJbnIS5OsJt$pPHp$e&iMo6$$A4c$W&Vb?&Y@3z?j(JjBJAcg%Den3Oct0&PAJhX z<6Q&`$=>N49~g1qsQ40+fF^RO`so(O9wDzfN*=*US^SiP#}43nEeXnn`nH4Kkmq^oox_Hi(oR$uF9u!R#NRLYy^!SSF*MD%X#wTLTT)QTrUS1ZQ26vT1g@geuHC zPBz4v>A)GvD&*cK!2mb;{VkuH_GfE+eoSCr0RMz6l0CjysFeGjZ}ZPGdOzSzQPaiS zDYu)?eQ2cbM}Xu(RJzz%Fq#D+#T-i>+iFDGUA_rD2Ck+cJK9H8K0OAhesUZZvp;_S zAvdvcVC8;()A?pdUy#ZqLnvj(+9ga=KSdbO4s54+YT6KfgYwmf1BU81A|D*J@3163 zp}LRIVc@pm(%k3DX^^eC1a+LfqwGG)Uz;J8Y%vWBLiD|xHDJ8XhWxy#fGzJRzk@_kX8g5juApO z5mO1qg-L!?T~j!Fg>qQsv=l$c*$Jhco77;mZttOptCTdt8HIJgfI-CVCJ@C&^}43d z88Tj9H(My5g8KgeihYKq3r;>k!3S>ga={S7)`KMSPH5f4yPup-P7zG4vCIT>j-~R1 zmfKSxwI?OW=rFUs>=)&jPJlYZ6YzvwY$HrRS$2x+Fj#@N3z9XXRlw(ha*05mdG@R} z*u$Sy!5CFL>~iqBPRxkmYjt77rlCSXd0YwLs>AWMItMGA0KA) zJx+TB$C|*v#Nc3(pOz&;=X;Ymg`fZrSX8~|v)W1X<0pM6rc6Ma9C@=f{p2n>MdB{H z$P(Ee7h4}jtY*X1A0vxmD!!I>I+)4}&ib0|*8@~Ka>gl#-0q5Z2=AWpzc=jg8hK&4kZe=bF1? z1P$IttHu9Jw&P|YcBp!2!*5BX%Le(Zeid;X>n2MQYnH~QXJrkAzuiINIz|5e`Hf=% zD%}D(v0kBd{pu$IH}4seC~z|27m8oiG}29V){rv#1>{%=IBm^mJs^wCynz$8zO=d; zM}R8CL)p`bRj63Zim3hL{vD6DJ)o9g|1rmH?t!3Pn zdE2^>E-=2#YIw11_4X)QD za%8j%!h=YLLx~SeMf7L)mqGX%BAU8K3mJ$Z$5Fcd+0WiezuVXl2XhTOqZJVA+&#}* zPY##%1R<#ujxS6`v^hSU@HmF|07piM=Pwn(MTMkt3CEU3E| zeMYq(>?LA@ZJt!K(>?g>{VhM*Pmo%*IuBBVIE$TZaj_wPHkuwUKcGTHgQOguig5rc zO#v!53{2LuEo7Sdp+7}e!%}3AOTs)Im6N*1N!h}_;g}?3^$3w8jbB%e!bdHLT%36Z zHCp>yZI=w?JvKOZ8RCdvdT7jY1XU{by^|lFFtCo5H4c0PbQ2^)RK5YtQ2D+N8_k&; zp1@2AoFyw+NAe1PUs@71GL$2;JYc}wxol=0ibPU)D8FTL2a;0|5~Gn=&>R3g{bWrF zw-7~=>8x+0*si75Ogq|<$`^0Tn*tT-V^Y?E>^fq-;7#)(48sBtF=t2|r0Jwi-7tsw zj_T=iNFN$hX0$A9RYH*hi`c5Wgt*XCQD5ovdQsc;x$60!NVk?cY9eF8_I7=eTKiH=L~UfNgupNB?horOn_zZ?y0B{G!p3&w zj#XP*a$f_5v{$P%$;FCzyW1$2mPgz|d+Zne#O)I1$kp#q~IO&Tc< zt}gUs68J#MMW~u${GXEqrANI*q3nu*1NZwpDD98vrJpY9C!L;+ceE$m*-O1+5_t#L zaBGVJWY(S5Kc5nzGQ(%fED!H#OGOF9qDazn!w{{Fj9;_}Q*rKX*t!l!+~o#`cQYYO zRuXUtXx@u3(qmYLbd`aP#_e{=&b>exY+IPp`eYa$N;!%&CM2Th%Y%wUi~Ljg`X%Jk z*48d1n{9;6PBAvN!0$8C(y`ecypf?Uc>x&iU8p0v`ah|cem;vp%kIZloH+b8>=RLw zlj&HI?NeGa>{f%CkMCa&-p4X}s~GKu_w$lrK-IZv^n%u+iwKHellm zN}1hggAza_K?;W+D0EeoWOF~!CCkIaJB~=|l9~a!F~m^4iBScOKmVM#+G%Gt&aZ4^o;7%Ob-EgGhJJ%N`BighWW7qAM8x{vO{S%v~$Q_Kc7ZAo%!+3 z_ec2!!!XU)M_9yc*;NYb4s!2_Eb|yjx~H+wez@Tf8de-;*EfHq>gNT2Iif-wRZ+{; zWZy_u6pv`Q)g@hgjjeY^KQIfkxpmE5GY74x;ry;q#lmime%zK+unxA?yT(?}%7u+s zUodc#9=D!P5+%OOW-N`LxFLmN(!ALJ z;lD8#D#lmU+d2IF&irAwZ2ARAXyPrJK1ODMMVPQ3{Xi&bF`>~*N9(J}QFUa8$7W1| zO!re@=rE7-TGH+%@@9-C{@b4#86FX7!(!*Es|Ae#gJygoOnp)#-D-Mn<)AMwTmbXK&VymkcT>* zMFJlNovRwJ#5#LetX0s2ddcFa|FK++PdmCx&z(kY@T+(dqYgOWbLvDUz1wh^MPb zmlw7r1tRhtI=xc?PGY`iHWz@e ziqbu_l89|_({fTnu&m4 z23Bl0)0T!D2)+J9J`_1u5kwZQr&EJ+rcK<}V$)OeKy~1m=pA@GYn%P zOTT`OLvh%{68=;H#UI^UF(HN$`*K`HxJ=BC7bQg<a!3Bgq}KRbbOsXw=MT`6n%-;+DEI=SrLUz2zo-goHZ1r9VjHrInF&4 z+HsuU%9G3lBa0&=TXH?N%;0JgDT8t9bL-VgF}_4~99vzLK6-U5@v85tA=Lia%sW@% z90HxekS{tT3X(D?s<_MU&XlsTtn8nCMX@W<-0T^#8D)wdZJ!O$D*0+|h!hG0@3`t$ zlr^=N=lS9zcjHLwJ#=fwvW=?wslCKgBLf9e=4!DLUj}VZz&@2Wxz8HD&9dF|_!RW0 zt=w!?QNfDx%D*`GG&+SoHH~hxe@Ut~t)0gzZ5W2Z*m|Ru5TcUfc{eZZW?RzR)@^OY zb1DRuc*{J}1VQTBH9KxqJ}+;@uRNO(Y=}|twsfp9`2b*SY{rW3-UkXHa+|SI*jn;j z4yV8zUpKUv^BHajsTQzYDJgZ-@TRc!Hth65BE@>eY??HxmPZ;ku3rz%y0on13+y}Y z(;t!_JS|Ng81__2j5#K+i!QWqKmk49+jg(_{^fuYV7#ejF~q!@aaS#g>@8re^zwX@ zn`E|<|Bd9i49-#Xv$%8QlVMd!(Y=?}uujbT7}Y@4T2vYk6VB0y%UIzBp{DV-&leQg z3)o)hWxztOWh~3@vR<;jqHGtvo%agUg1@kWAV{7}N(wf}>;|hf$LR1bDUwmN`s>Fx zp0O+}7&j~P3Ls_67@ZVpSbONh2TR|%^Pr0&)zBK9Z~%o9Dxz{}^s_#@QIC#pr4E9QsNaJ5vjCJ=c>3y1HPh7C~-l1Z>Yb$)tB(|KKW@V2&rE> z+TaM6>>Nj>f)xkj^334+k#rGi&dXjc)z6Dw8WdNtTXFX(N{qqxt3eTlI7t5#Ue@lA zHsrz zZ&9igz)d^N*{dS=MmcQ5%PP4u-%h$DQ+Aa!>#StC!h7MNX4)OSoxR_cest8kBNo}# zkM?ifk3UfA??bLX?IyEnB~9r^wIY=;!Lj6F=b(+0{+qbg;hx@GU(>z6bsM0LUi{VM z@S^aKAG-Sn>hq}2uzx~neRl0r5;fyxA>?JlGrD&^ue0vY@be)Z=7>6Tl$f5Q*IU?= zM=Eydj<9jrdQT{7g~`~#cE@H7d(VA4;TVY1`>0?~qYB1<7wa+^ItVRvUQ+xE(`p=9 z3P}3LY2A`W^R*#wk8slOJnWa+fYE&fQ3d~$`y{o1u!6TvEwZ=%*&&ZZgI_GwPt>JzJvMz^&*FGxm~ zVR#D{ps+9$9k_s4*EGcvpo9#rx~x1uy2cS!nz$!5)t{_1T2%u(iwaN#CE?#wBFesm zPS8!dafx@YMN?kl+Ny8H!a^tQ#-cmYq_s53gn3w(OoFZ>;^v~!(e`4&%*cJAm;5hL zjU2wFtC?}tQXy|7?4qlXJCG*1V{4iU+vM*|3E8-CqtMfh4D{?l@1(3ds+9+mZc%y) z0+ZTAMMxthst_4*OVkpj;#PF~k*2SOUpmp&CS-Eo`_#4xmZQy&c^-jhqnb&OXKW-E zM3iOk9n@YZ7jS@weAR36p}@_l;=7_i9U-4Qi|J@?epY1p)0J`kA zGORx!VY;m7iJMMbT!J4-FM#Vv95aADEDrX5I2=M+m|zh^d1pkK-{=Zd+*i{7`R%&4 z&d!0%)X67~PPfeYhOdM((9S0E!9b;@irMlQ)|`$9N5>~B*>IB?GMvu%3QtYNhXzL1 zhh?SG8Ta~!qeo{?;|9yap9?dU0@n^Z*DTHdrivdT^_BohaxGru*|gJ4&W}dhFb8ua z;>h!Rb(%(ozwiEL@|c90Kia;$USmI&E}GZ5@a%PPK%O=C zY;q@cQG;<&jZEEkQx~-!tYv#jekbqk`3oY7+KB*MZCobGxri_m{%CI-o$^N%i<221 zl}NR6RyaP}0B=TCZQZMjF+jV1~RYaMCB|NR+~e968limf-Y^7O2oeW^dV zz=X`Tob+MTO$#Ah-Y>0*wUpsm-R_ zy#rQ_l@lzY`SEUut9U3WMy*s3>0n}A`3Plr81xZvq})eM&tlOEniu>=P!4c_nF@O; zPsT7rADar6UWJH2vH(X@}X=-<`Y1L94Tu6>$dJ0@9HNe_&N!Gbn~~{e!fOAK$NRkcr^u zl#w4a3}$wua{^H^i5a?_)opi9)w|DIBV_z6^ zn@WoGQWpGcwejzpOgLe^Ig$Unsh52h%BkOb_&=L%`Pa^>u0t38XG;&z%985d@%_)g z_vXIL%CFV_pACJ=yEx;(MY++N^v8zwi#pj2JI?)|bNSx{#52yYZhH{b?@$b7W)a>pxFCW-kAO5a7K$%X%EYw2sIthPGx{{W}{3;qBA literal 0 HcmV?d00001 diff --git a/docs/source/assets/logos/vllm-logo-text-dark.png b/docs/source/assets/logos/vllm-logo-text-dark.png new file mode 100644 index 0000000000000000000000000000000000000000..959a42fd36c72152254000630dddeef6a84c62bf GIT binary patch literal 88342 zcmeFaWmr|)`Zo+qP(l=GQ2~{11*BmqiiCu8he(4U-LOy$kPrz)x)qe}1|=mVrBO<{ zyPh!@!h(JF|D5;pd%4fGJ%btJ&fi^Q;(jSBeHRah90v^z4NqL`wj3H7Zax~?2^Fl9 z;EFG`F+_$xRE37YPEPDnR9%i0%Qoft ztum+vH(yT9=M7?+lB!9Xm7Y;oN9P+_W^jWf4t$z|N}ZbF!Z$6n2c+niupU~VXS7|O zCe-1=U%iwb7C>c4wRIw(F%-jq12O$e@_9NY+$_0-it6{3NksUXEB`DR9Mx=+)_0ug;Uff1bI&!;610^3?-F{0CSb z`0uY)ln<2!ArPZyZ+(joMd%{VoH((k@bqoik}%_&8QyuTkr^`v4V}>z`n!bba z|M41hXa6;%T>In%pw0MXtc>*m!w_3S*94FRj$K2sCjjljCu{7WZ>DnF*j)dSHS`V# zJ3G4o+rQrW*H=|hukvu+IQr(1JAd93V1pEOL~;98L0$!B5yBB*J2bHn&XgBvCmI?Y zP5kyv1qbx$e(WGkLsznS=fQ7A8Pt*?q~al@b%8k!oIRqgM%1e0>bcAffvM)4VRr(r zWd~fex`myMXWmMY$=N;{&xxptx5WS+d&zS5j@Iuqli z#*w!so_kHS;XSuS!C*;Q-QzKq!NtA3UH`R-rmb1m`y;!X3zU6_r#6S00K)7mqcD*^X8&ubZ`seX!9I z&=69(a6XE9VLdsBBFXoQ`RH&?JW*aqU_95$;ZB~N2#Uy@SGTWDSkw78wCd%`N%o`# zQ8eUyxzDFY6xupmxtx)0ND%5j-da_*Y-CAoZaukLH#pimQB`J>8Wb-)I$ke4;WBag z;if!e#CF7hOcno%ij!Wvi*UpvYQMnOHciVdoD}C@{`zTEmaQ2`p^>=r>nG>HvK@mf zt2yCaHlZld(0G}kaj_bi=TcG3!8PZ_3CX~}Vt1Kpu7}(4b>xP(*KLjdY@?XCI6B_& zI{rianoN!iZ|l}oaXzuYZ0f5l;(WVxTM3IpS4X?02iW{Wo1!-97AzgZmbIM9g6i?k zhSrb?Iqpp)mDz3o$k5wMy7v?M><1k?DA&Bmb_J7EBcL?G@5_X<`le`jsbgY;$c8za zYY`LMiHQr^lA8u^g`!Gv&nB(4CT~~UDd|q2!YuV$FD4DKT+14BOeA7&wrOO}Wr?eO}ueL?EeT^-a4%r)1-7Q<3 z>0JC|^D*1KFgx63c4SbpK|aWu;Z)SBF`iI72e!H|!h*S<}YZ(i#RGHYa}n81@hOw0-a)6b_5!B&EC; zZGgb>&!e#J>eL7Mu$4@?&TUEXoHM-I%I3b|;P}M(hUYm<=+SWbawmJ~*tQ$#0}&x# zpMaoPn_b@iLEg}H=)~NvN#YZtK5+#1ZqNI;n{@9T zf!q%-zm+w&y}=Ofe9|8H;-A2Z$SiL#MEmv6M3c#889VB=NbI`LJ3<3jWzI@dtL_|l zu$&1)I^4+i)f$dJ^Wa5tzWE+MKR?{ei*Y}|7hm&?2#1=cOZGi|yNty4*awn(;-^;# zzPOGZcpUHH1Y%{E*1o1~(A+|PDYnR@Q_^5zt}hDxR7Q_q^IET1o4RhgBuCRU_D&b` zR^zvm^z1+(#S%Jofx$kTTWf4?9aXaghx}vRDnGeKRZlGj9y%Zn7M2oJyY4rH9ax}i z@LkMQ`;D}W(-3<@K^}Osx+MgU+#4(Ma`Wwqj$G@_eBUi4PBWmkyQ{D9`@^F3BR53!+5Kvdwcgr9^$X?Upv zUD|mJYWefqU6>@fme~080r0zbYcQXmJ#&VbUjM_P6-H#sDeqaSC@?_oa-ca6xEp4x z&lYb;vxVcnf!Tm0#Y9N`(ZeQ)DIAY zREVE)(<(yIPT0Yad>;wZ2NcH>Ng?FA)#52De85!A$Rz-Rp z9mY$d;`H-dFSl0F<(l&nz6Z*QriTd;u}LJ1ilg)e?iJv_kXhf{)F6^0fEch;0X0PG^g$S!%TQd8SP%t>4o1hw6(+G(N+cEGZ(8DZlG$ zM>Iq~fCYU@Gk5YbIrLC+6x=v|WT|I!;l{&}of2nO&5Q?{$|AHJ*nkUZHvGB%qT$bV zZ)hWSRs9d)f{z8^1;!i}aOm5RBBS$R8I5PZvXp)Umu0B{6@)j7BA`N~LB-&^9`dxib3B!Us!JdX-aE)r_5Iz%5xxgnDk@ z{H_mcVRDEFIr2^D4G(x5rVIVRG6B}14)+%dtSy1r`ByYXHPj%n;nA?pA~-Ic8Kg6^ z9FI4JqEaDsXE+SLy|C-goSEh1shNJ48W4>&q8wdPvg92Tvpc(OK+kbt2ShB+k0Gu0 zdQ@CSlJO)W^V6<gXZjgJcn<(J8q=99j)Yi=PgNz*q-TZc+XoEPK$M$)#L2sS712 zS!IN;;M87!u(F0$7w>85L(Zem!*1kri}Z)1v<1l+ErI@?^5fGsBz8T4k6|jZRzO0P zlVOCnbDb=HWm|HkU#D>6+0;O2M*33`54ayD35%Nb&zP8sfs0A>T!&J?aDfysPWANir}6Y#Q(Ofa z{e-FXxp>T6GDJguO4s-9p%hevKnlay)1Lbg;82!e?4Uq74OPic`*zC*L@Z6|tdItY zfDAGoywa(xEy&iBZ(LQBNs+V;f~2<2L~FvEGndF6tk>Vw9@6kApTIP;6fW_(d(447 z;Ix>czs}VxzDs=-jZ|F-prU57?=P9m0FungNrgYoP)rYY43s(<*p{qZ0ix)`tj9EV zHdm&Km`qcNfkyUKgFpkmMWOTk={*!-ppvh|MlUrgO7c5GdBLOJS9MC5q(31uWh^dG zmV26qAI;47Fs_KQ(6Vn2Z+pO%FnW4>Yw%7}f4c<}^M`MwSWLHbR5{_)yBgowZh zlq2XeHmX7b7V!;BW$wB>4g%@!5t!laiiSh9c)~XG>uMjg_H7L_2FtAWf8UWHr?2Y{W}s1Qlw1ppyVhRL{} zW8-8~vR-$+yW?z#8n~sC4J%=BEh(WiGB>|ues>1NHU^E(6KB~`0_FqJkhF+2%g3I_ zmcd43Kqh1i$N-kr4is6UCht+WM-q5-pGsU}f$Vw9zCSWWqnz#sMyshA(l5`TaWqm zZ+e|2k#i}mhG1JFnQ|xP;;;s&g1kd)DW_b@G}zio0F@7Oy}(~b`Q^LMQOQ~${QNqs zpPxKq)9Fkx463s~Sg7>|WvuFlr#$=G+gBFA5++Ri1CeseJwl{j@v}WvSbr~KM?%y^ zF9Va@*TL*rPJI7}xiQ?)(LMSn?t3)ED=f1@x8Wj#=2Fh9-?5GWWCt=yS;qN)&}hU6 zq|T@)F$g;e(2Ish1!Y~P+WE(j%Y(240Al)m+PABxhaB(HMHhF_-(6Oy`-IDLStYj7IU zatjZrAh?t@w*453V4f=1PJqh5f24Vq=PMCN=*2bjorNrL&`0y4IuC8 z>Wk#C`y8;poyq+UmFA$Q+%@S!LvsP8F=8x`Nzemn zQ62}`h>8g|f!TMd!7zag+2J7yu~p_KMNM^}7i-@FH@>bE!1s*9u{Az9&3*8-4NlF9 zt<4QigaG!pC}H>QA)UdwaR$1A1AhhejY4^ab%I>aK{(s@1jK19>iZ)&v*(ka;6qoU zOM%ulH_4H4AoD8F+H`yDx!23H{an^7le0O#ZyPQG)38}~DB_+q8|Wi+yK?=|lc{9A zfGaCTPwcmZ`xGGU)3>yKDUjL+E;&f;ra{?`NNOJd>`fTmtLUO`n5$(w&&@6>_C*vA zSh^AtU`a;j<>e)mmCco~u$=L&{1|R@Xa^H0N`9jD6*-npFvzKfrlu}M(tF>X zAO!s1ppmN^dmd&?j`XbqF&l}=#0jFhZscL$MePJivijLg_bvRic~G;4*TsDqp@Sje zY(H0OTKROL0)tr=O{w0?+o%tn+5=`KeQ9HHsV-yqT6FleR%E8Bw08 zZ~B>HTDV21P)iN9QEilm@Y|8J#OqLqLQ%YkOD!I#)!um)d8sxJgvY^^;7ia?+Zxdd zjJ(xbymw7-xRkSL4;C|p$OsJG_h;Y`meab5XV?`$OQ0guEaVVeyP zE@kU`JAQE#RkcUUL4k`g0m&oELbV?7z}fW%IN7azmDz&e*b&?{1ThkD83afDWHKqN z(J)hzyP8uLwMo1;Q2YMuc5?Q{sXFgZ=}vLZxKRY=_JFEAO>#M7Qi*EFxSFERh*EVLlc72kbXgfEIr96ksMAex8Jgk7$&O zYx1(q)3-V|nv3jxEGlyTMPFzn;Q?k#PEJm0r#8|2rLqCKvV zNxtWOxV>}kS1p+E| z6NE$8?2Nrkq<~}lC0kU0lf?j%kjB(oQ6Gy?eOO;;lr5X3uI-OV4WUBxbu}SUdmI4Q zqp8tcVurnq>eXlO4%>C;(2D@T>mrDm3Q5)|f0yu8UB)?_GI_w|K?P^QSXPJfVpIr0 zu>$bSfLIu=xqIr=)~t8z012ooMwEx1y}wa52Xa!Ah39q9a{e(XyC|@94OTM$as3#@ z!><$YhFDPN7a;M=07`XKm|6y-tu_GVG7{X5%u=ua7et9-;)$$Ypn z3er;Ar`Qx$NXUIUI9I`}uQEt27Ig{S3pj<4WAI)IczzN2V0kwHv|-!AMTnCTabN?- zj|MYV?>Hxl_sg5xQR7{H9by>QueOxLOkJ`bZ>qZ|07F%o0e8?(eNj#IetSrFf8X;M zfnT2Y8b&X$BX<@9h>m+8;RT7(1oci?i$t4suFiIvt=$E>nlb07WvHIRW@cvEEF{?m zBdgluk3`l)5Lvr|+H{Tya;VIMMe;qseK0R^Mo0UH@w;1VbFA85`?(8<7ci;D`*4`G zGd82xG<#n6Zc`u03`)?}60JPPw~j`KJ)V9tbCUhNCsOy!pmMT8zXzH?bQm6C`WV+M zzS@_obAufA#^XK2H&3z|!7$v!qqDM>iYpK&EOrv;1P-_Y#wcgQ{xP&GNlkIoTfk1; z#Fn;Y)19IZA>y+90M;|izJJ7i%4GTd4(T0dyGMJLVThFc=jx)b2_002S|!Iz?W&&H zvJKj!nhZ1q2bhYu8C+;Su>HO<5Z|!#3$@22u24a6OMU_>jWfy5AgmznK|_%@`gIDj zJEvCdK~G_IKhcxmGzeEDVrc^dox&|!O+11>7LF8p3L=T>xPJYGCjS95Lw7^HliIpj9rJ z_BpTpY`9TF;?!W<2s`ivcreVgYt^jJcB;u((XQ)A#JCJHkXCJ`Byys5Nb?r)i#Ku? zO{RbC&v=7Do@NYgMapxF6f`ftiOn>&Pdz`G%c9lCoAQ_=40QVB*k6k5=kiTk>b=V-wRy>=Zw}XX=POq!AwPbo13eREX*EX zP%%w`0n`@3mW4+Y2l=eCa|wWKMGe8&uTjSTz){yW{T3e9yrc&J?&6YrKv@BZ}NdY3v|%@$7^!Vfu`|> zpC~Fe{iiwr7?Yu@i;>k=9s#n9MYwR;lnWVXMzn`E@dVu+Hbz7Bg=Np7n5>Nksq8$n zcfbLeeR}2ZFajTb$=H5pDS*tY1)#7SO$;LVL)?Tp{c{nMTB+55x`{yQPXNA*@@--! zRv{Nl9cAC=T*z>XPhd#I7n8C^EX@N9^e3&;H;y#WIo9cpkmqwODPzfihvVafKKp3F@$$gB-AH^MN z5G-o*%b(#*#j0kF7hfLM#qo#hVuHyZ-PubQ*K;aiVyIaR91oiOp~qf6t`~R}Mj-v8 z9~!j#wM8SC^>RrL6hkChbRdt>nGe4gTKXuq_Y7YDHTT`^1zvG+attdPcK4fG7dmb2 zM;YnrP<$C>1MT~ga8v$wzUx7uyU3R;r?WH`*{ZlidRDPplN$`Wr~|Z0T>FOLgZz`6 z#kweGClUqd`Onmdi{P@>hf)GO*WWUCi8^ zP({~)>!7p;q9!8a)!N;o=LR@k-P!1Wo5Fj#@l^^kP4x$N1w#$4hMC=77jFWNXjEwgit9lH-0ekl#YH@vE+#i$MOG&p&w(Nt`eo>IJyD|_O6~djj>K&gv zacu~r5PJs8=XM=jMdfB4W^fcr=@VIc=z{;cNLB)VMqKB*q&~P!>b|1vuhgEK?CZ2P zQ(!e(|2g{o>asKC5Mfy+UW<+fS^;mQLBGYoFHQySIvNUektUJ_-a< zPQH772G7jAb@^TR>OkC@JEvdZ)pw_*We+8)0CPKvdmk{s!TwPj$|0y9GtYS;WBvvy zh{k2r*O8@;JO~aSe+LI#r;(u$UiG6ZT~F}tA*}>b^E*UrGoKY<>e@$jg~2n^Wn&| zC!~Ts3Fl)RENKXlb^OuTa!mb>_?B{Azdx#!GR_O$opm;{De>;utY>2NzcH9T;qMci zV}{MCr2{^-AUt{MOmWEqhoOD-DGG;qi_*p5X5lyeSa%MKa{NFL^Diq(g6}bokdt*l z{OH)h*&|1T%91P4QH-oU?>=@WF@$z%4c%6{4a?Lu$32(H!JN5Jry{c(Q(QR=BkW4L z&i$I9l`eCW`aae0<-5Y0gWqd*x8I~m=}BgiMWH&ENPO(J{!Q*#Uo1!OY1I%s*ZMh^KYVVQ*QZF zUJ!H6cXA#zL!KuiWekVPNNY65j!zZ9d!s3cYC&T*5P8|V*x8YOOieZ9Ncjii6A{O+l?@1iCgvG-9@g%ixj6*()kK4fWr_Xhc)HzVjXEBaH80T+DdL5*MJ=?w26V1%Ai z0H2ekC`k)L7Q>P%&kK3==|Ms(Zq(+IjLYJlpLvkWnpWN;b58q#SnWFdkj$$sWC#}Z zN9)7qISe=rGSLhiM_%vkR{HPm7RX@Wf5bnio-klU09@s<*zf)uxWGByxfjEPbrV)Q z%SpnnzpkzculqMBpIfzd0(gk)J#)7InqgBoc9O-<`{0=9oJzV*_wK%}F@sKzmWC(U8 zD1a(e&~G;7MLEsK$n1Qr{c*Q`4y$X$>=g;iBv1I@X?@Y?UDyTZ&b^A_l(A0tSh&G@ zU!4D)nzH4pttO(`chdLx~;xY_V(M07E!yK&xN+S zn>!w+7N5ynNxaaTC{^TOp|$je6r4L`+ow7-c#k$r^4^iv6W;aG`yLv#zU}-3M=kx( zl`BA%Dv%$D{of<128gIBvDd2~yL2{W32%(N*|VJceri|h&J*J7`|9fIoILG&F#`hw zA)W+c@8OEDP+NPMix+w3b3gKpXSaQKo+}&vkQBN3Si0>E$$_uKMWGJ^#V$WZo_iiL z1bpU295@iAk?n!h4Q~O3Y;*&i12RCh5FzNwHoshw5m-n_{C256ooe*cStPM_`WxVcq6#r_^sv9lI`auu6s!;5Oo8QW0~aU+H_Kf|*LsV`8IVAQVkmTYWNGe(cl&6v0@`3C5hI|yrTi-4GNc*|0B_t} z8yM6Zf_4!cYTfsYh}#(7+kBZP1nPw5?;n52%=P40Z}#L|xIvd+$XG@J{xZ?TLkzNy?F~+@YG2z?9h9a+)(S%_`fWNsn6-oR{5j zT|JgPlam0^6>y4jVD|mslAHm*Nar_dYGwj`nvP!^3B22x>m2gyJbbJ>Lu`~A-!AOV z&PC$U^d)}jF0xn~ZBR~A?+B`slAyvzaGBotG?-;Lo(-@3Xb+Ua;V}CHf|p zTfD7zud&Q{({yXfyw$diGf%3GT@dXY2jML&-|i}`3wo!-#45$gr+ssncOI&dQ>oO5 z@bL`~Z%y9e8{cq?8h3QR^fYyjx3kB)jSYrH<$?BJ{;0j>;Zs;#cKZ2QQuiEzwCFEe z94Us?b~8OVSxGYLld~_1@>2{|C%zQH!0~?)^P}!qFMC79c{5C=dtVb0oMjmEF(vMz zp=02~p+98bqFv}C&eaL>`ENA$Ig(BBFdcrbKBiUdkkN@Ijde?{XL|nk@5|>@s65K; zF->-R#iSzo*NP0Y9Y+>NnJRm@rp5}2?2Y9;KA%GpgZ|jQTz=BtJ^2mc0SABXAWq`s zV_cdWy3}1CV;1^ze!EW0V zbZ54-z^AB_B+Bg$CaqNDdY<1Ue(!2yn(~K?0!nh3b1#%O6F|;7;KC6LR0#r|2*$%J zsC&m&a6Ont`)C%X{U?UL0N3GayeJyksxRj!K4iRpT`gqt+lvB!J_LUIX@;SaftqLD znEa*yx5WUF63OQ}yS4jixW1Flx`C|`$&`oEmT$q#EZ0gIIIjG|%0rUq0}Wepp#?}? zja(_8iqKH<v~tJrm||W^ZV{K=&Q6tUn<{$xrkYooofDHQlp`Nz*sNtv~O)vOi^ymQAsUr zi1vJ!bzQDTR8(|T_9j!d2Rsh*_b8Js{CF$ufc29I7IyB%%k%423J;$eY!aI;@0`8S-;)t%{#xbYILh1b z=e>Y`yT;Uq{|{7Ad3d};D00p=HHk%mbiDJTXDeP9 zLjXH}?}E|mH*XeVlgL;iQZ}5eRcGtXZn7(K$Im^}dfOhL@#-d0DCiH_BtX8S;Ioa_ zp+b-biB>`nEq8Xz`OM;a8M@%YR6f<8M6!=$}A48v$M`Kk`?Ch^>ibKc2FAsnp;3t5|jdfld zpWBB<9fq{qMmImd-E<&x6anGaGuLole}A$k`7%eI1LjMNg*|c2dOQEx_O}`B0Dk*f z62>#yO{S?#!$)jw^a^bcZt;pj@5r?S#2;q_aT3?^zVefa$!qDUsneZ{eZ+g3{+Gi} zM+mcx*Ua*(=Cwau;{2qG2=Zvp4l2VudC~-&@^pqW*X=C4B)k){<-d4(pKIi{#+W~b zSU+)h|1U)Yxv5EboJLxvPMr;r2v<~{Ab3`dyHk0^Ejsn%2NFazHZ`@zy~rE35-HV< zv60`mR*7+miD}#wW$xN|9{NwT0{V@7Cfm{E3rARh6dDJ<=WQ^x88wD(w1l9g+4rbf z6y8B7U31P^;~6_UmYwsKSdwW{yOcqt`wX4aI&gWKL<)weO&jW;WQ{ zeIhJmH_eg3E)_+|dtDsM--utk4U04FJmRJZ!D(wbgO!$hRabmNLfskP`mB}G5>y)> zu^9C^ohL=q->6Zz+Ba)CKWtWJ*^zDTl7s>BUO)zr`nuLd36XsttAG&`e+`xl9XD69 z#1&%GOzXn6w!Xf1@!YxZU&iaiJUrYnZTw{}?n!U0&TOlDy~<>3W>c|Qpa01;Kwqa$ zO6wR$GKTUX*+eu-Zzk*hWb)&hN76d>2N7%e9nDs@p*d=vTuvzTcnQ z%n07SNh8k)TOoPj?d{6RakWup4#9Y7{%fcvQl><291BWzOq}!zO@xnV-T#bV zXs{JhdZu3Y2OEnn);o7i3~%e`L|JK$u?A4FZ37Lx2g5!tiBIb zpVxKVD7DTxX$dU1{E ztVy?g^edXUdM7Nh(-~>c&SIt1VA($Ztde2U{JpjH&A@bP1F&!w=h&a13s{hSfo35} zoGBZ$FA~b7%P*ma`mHUxFd>1EtnJaozmj(C(tZvf(njwp%$Xb4D08bSQ>+!(0>6sZ zLrX2N`u1Bxya2`|4L(W84@Nq=rEVuYxySQgB}1RCObxhK_Q!I&%Rlq}YJ_(vJ)99; zh$vJdOyGZ-ySxNMZQHbG@%}zOd2?J~$EoiauurJ@YnJRKSK|I9&bcaVPdR%Jcq@6+8vT7coF;PhuB_ zgPX-nk^&ls_O76Mbk!XUZ>caIvf_g=UHH3qo2KtV8B`s)ANAoYz3((EAl&>l zr&6~`bq+;TBCffw2e~??jnb*=PF_kn#Dq;|4-f8UXgOJ0Qe;$met)kEyOE!_QOPyMv^K}a=P-B8LU8rF zm-qmqzaz@Ih^We+W8kPzOWZz&VblYMD@B}n$4``@ImyYj8CTp#-<)|~<*!&PY&#j= zz4PF~uecFy2I4ESMQ{lT3An*#co_x;Mmt4(38lzl%+N~&&5To1%PbTBl;CN`MTxoO?L`j6N>WC7Km&X~3SklRyvfk-Mx z|K<-cy|6$*7MdpUX}?Br`98X+ilZIOI#GQ7y!FZF7k-J*NJ(4=JVwjB6EuC?mRK?H z{(c2V$9AW8Umr2Ar_M(u%L_q%hk%N(;M-w`j<_#V8VOhb}VBJ{;c7A!6d+()o@>eOI^4X=Qt>bh;;$ zly)QY(U|C=X#@PxgxOy=tI1;jlX5I zN&?*`Ds+L0kkUKFT5|#iWs`qjJ%p0TqH1)rU&vmk8S87Ao7Yq0o8wBn?(2z`Uao4T z$401OR^lzhfOh0u&Pk)`FMbh4PZt*hVZJ$1MW_IRLj5D)yna11i?1k-i3{i6MXtq9teUslf2ciiQp@I+3F1;+M$cXcef>6vHN6&ua4zAUcLKWGk7^x)d4wlS?`}+D@G9zOoZ!ZYH zDKvZw%3iPZ^mL`k*}&N#X7+gPbzIS59h$tMjvj04rO@Dz5UrKT_$4WK`%nj~RHkVocjE(gUDg2S2fB9BB)$o$sI1MGt_T2A&{sLnIr^yuC zmmRmhEeva8@#nfPM#-%*j(Um9t=?fpnRWs+7OW9m?+!yU@>T~nSzuD0qNe%jc^>}ys7eW}_ubCE6gF7&r11Y%TH zsTfznG(*V`Pq2HHmwj8}SAPKn!Adx?HA zdTudZZWDi68x;H6SeExg*%V5yCzE3q##8I#nwaCstbzM+*Nr%WBAm}*nk+O5~TDX?Qy0h$3olf~lRA3Z}_% z9{?r6djW=rlkO#ZT3?E3lK7vViBgb%l+%AjL6L&1q1c8v1eBJ=ILg5xNJ~)=4Xw=H zJQw-L)jdXF((<~xeqSHF7RU%B@VmqzLD~ArzA(@vQArW^xBS>QjlOv7E{*oo#jif3 zUTfTL^YOw;eUu>h$H`csCN1mIz;v#L4d0G%3{3Y6^Hp^XzqFg~=wfn#7x;voOjtgt zZKo*7lx`JUdVD5r&(-79u~#4<$jYFzPJX0&8~;<2a$^2_&&r&#c$k829qhyjuYzt> z3pzTwQ;exL^@=fYv*{NxSq9RkX_cMi?XC5SUc{%)&I-)xFC15Fm$@!`=Z=?@--gcSA?cAGLnTtWjOJgk3Y&o&@#-2}}czsLFPVfvMh^{|rEO8F@h5i_9J!k>Q$&2}p5qf0=0(n)VN zY)~uoec9rj&;|?V>5}iyn#bQl?fLqbr8t+!7vR(8E9?f_zsrOc#gf#TOr|Uq6@$}b zquCq!a|U;REslGM_o$iwX~97PXlDWk2a}rWeEaR`K}~WS^OlInle_V|)53uq%7bn; z?LK9vO$L-~?n#vgv)N(azrQ2hMG_t!e(~+ww`BKw9sRf^Jk}O{f9bJ97V)@+;ZP!9 zd+R>!zJ1j2dA1r~+UL&w_~4~@%!<_s$`cS{@6(jcKC|i^j3QP~Yl` zfp8^7>m1`YwYL%1%~c;Sth&usZPhI!@6m7eCtda*PvxE`HQ&tgW$XnJW{ zMy@gGA;95Yu)cF}FCPiXV?JCig0Y-2JT8TQa%bdMIj_ZMlW$|Iy`j=rilI%@pXzkk z$v}TeyHj}A-=GWpSEn}T8fUo1nL{qzHQ61(J>2}jzwl)X;l{=j9W;{GqzolzwY!83 zn^H?pR;Ax8ZK~SF>LVuOa{d!B_G#9ZKFT{E{W#r$G6ctsm#UhKa^yI4X(u$dL2;nmn`UEtThS zo1Vw%;JkemrP(kNP!~$E2{)oDC;%UCOq**}Nf(lKM|RjwFNIpHb&jo)vzh9DXi2ax zQdZ9~*=+&py{4#`E8MYNM|!A<1NqybIz6YicHy3*<3>S(v>%Zk=N&+AMkcq z|78-dJB-r_^Vm*in@znD(bX)nJ4Y+?Zke06{TALNvE^WiApKR>Cd%TCEEyWpnL&5> z`u5UQ3#NioCP$D9eyOsVpJ3K^}&1xMO=L)HqDOicjx6Rt9&)% z8^mZoVV%Cj=kjIQSiaXJK|4bzludi#5r_tg89Yr#U=U3W^)SXc$;8LPhb8XQXVrNW zuU9AEi)7-mtx~K!Fp#x27HW8`*o)cwwfS~e`J+6ub9h${W6hs{33f@2^UzAO@3^M$ zk-clvb}B1PyX>-}(48@C+$lC{Uoi*G*I(tc!@=a);G0T&>>X>~E_zk-C|5U)A6ZGi z_3H=%v5v6v5-R-9aT*^{BpeMPsrPp*=l5J>{(lMYL9TXkmw5` z6B#cUd(qV>#?T^v(#8S}GOnWWn9XN3&9~+mD)DuREzBoHvhhhNSFF84WV){tkqPT= z5Axd>u~pT_vrT}>vzNb6-)8voySgJYdyhFxFm{^rtvhT2kZ#2(hHO0Sri&e)jFa|} z@r?Tl=S~Ym-&}Gq4S>+Xn9{LY_HRyz24lqkJ&@2Al4HuP^dXu!DmNfFcz36zWxW&X zoxhJk<+|IH!x?ZL;8tIL*^nvdw0qR%-RN>H}Q9eza&e0?;TLzbaf2$2T zKO?%m?PSwANSZe*#8W#c$Zwv9vDJ}wyCGdgga7suER^3Jt|l)>w@qlQV1+WbKfJD( zreUo1+meU6;lr2P*doK2BKHPEgH-sr8YFev=SPHBr@KY8?reMr!zE?KnG*U@TrM*q ze&kTxqZw*=tsf8^hoCJ<%US8L>rOo~V-einn%T@i?#tryUw*99aXyPGKb#^fUxW2$ zON^kQ;K|3W7hs^~e0Ieexks%gs5uML@K^qvu#{gyWEskQ-nCr18|iN38Jw=O+oueQ zEo*vwqh$ErZnd9v7DpntO>s~4CykuVTHU2cJ*yeX&!n_<9X^F9V3dL?6JwC;C)P1{ zr^@kQb^4vZ&cK$+cg>+<(x3m##le9cbdPo}hi`@-m=_#M$Kt;la1`tfozT3`07LD! zzgo{ji|X2af9u^@Mn*=n4adZBQ`YVA6g$Mvm)N&LAC*LX5cK50av&kQ&bgU%;?y(e zGp)7Vrd^+L6S<}``)3wO{Y5*&ZybVy1!w*QArF^O5Ota3r>2$Mrvp$q|;d%!V5S8jY!Z4e{Hzxbdd+k3Kd=*_& zvgUZcVV&i60ZgO&e%HN)TZ2*fIhJt){oUsvgeUx5!|96T&9@k@P?NSkI=fMXvI%j6 z7~xSG(VPge;q=mBlF}U0d3n>b_WljnU8m~TvIJ`yB`O~8WYaHz;Y!)Rr?`5IZw5;< zqXn2{a;oZD7KY2eYnEMaRb>?mT_gOcMV0OTg`zR<3M%eb9v8=wTW1mZiT>WoG%Ht0q%>kSNQte}O_=h7RLoUXbgBkSv;5E+zhm z&bX~LZ4x}{^8fCBuCS;}g4oe)r1rB2!t;I^x%x7d8; z`1qFWl3gq5_wrXE5)0t3I3VfepPeW$l|A8m{>Y#S8KqUxh0+x3T0_AcO0$sD;osy9oSEwF5f%PZO7@ZT&UdnC=a(`NvX+J*yiJ-7~>;}1aS%j zM(NNbk75_OV#9grMUy*8c5QiYRl;fY6cnO=eQTA{=hbEn`(}{wiRfQboNPup;#WT# zrJ$2pg$KGW<80>RPh7^Ww`-g3sc(xtSMs8L^P%u@)-|;q7z4WT#G8RLryrV1HmX?+ zY-cFWWx7P~+9rt{VFd$`qOrkx7{HLobD?Sxyd0xiIbx`NO<-Yl1-(s6etOKVcXZe@ zYtlQprSU!D`M<|Maw?s+3E~Cy;rfP#W-!Su%|K@{F{y2@q08a19&6>1;*!e3BX}Vr zAZ)Uk&A>v=siObnV@itGS4rzARz^mv2XgTwJRFp8TRelw>e)@ z@~;)dycx_Fv}}rU`<}Vv@l1;P&Ha$9gxKI8Gk!ibelvv(&4R5PU97o2?M?A~8qJ+}-LF!!K!**hc*0*%34fPr->B&E?$N9}db@=KFkK z6W{UbnbiFFajuHE7tL!+gNl|`h?d1xNjZv6PRx_0VG&v9r`)PfxOc?RZo=0WlT|R>K^f5(kCsOFmyqwK!j(rD5k|D0_6# z>Rs92@F628RPT{K%ls3vqgi2}&khV`4^?{kfQhQvy%eQSPusIqx$Ga?YmZlu!UoT^ zNJU=X=+8CpbsK+5a(22uPn*YeaeXlR2mg_HY@`fEc|DrWd`B7pP;z1ajvcP1lXBAS z=zi;!GC0R+d5bARS~IFfwOJX9b<5mef;Ch!-zs6_n*Yu4|2R@ZJjb!T-zB12YAu_T z#W=}b6C8Xgxt2|3W5b^OXSpkkAnTT14|S?5R}w7@3?`}AcNx+h7BaQp zH5xPvCu1J%af6|jNgr2&|A;23hOE*1qc2}Q`Tyv;%CIQ6u6;xr2}uPJR2m8C5ExNJ z>FzSs}@8V79r?kb+fY z|ESgFSqkosqa2v(`}tFkkT*(2(*5{^ivQn-2275DXmx?x*ip$q&P1nFBlcpx>#<$! zm$06YQqT02?3>u%b$-E+-ZXr1N>Crj-lynY%zICrR6Cw9S0%4@ELO()(y+4kFL*G3 zbE0Z0x=#PPjU4n3WY2fV$~HRcJyQB9R*6H#lqqhl20700eV)raK(z~3ETPdh_yILj2~9W zl=AcbAT!0;9*D`;gliyJP0A9{XZ}80=MQ)5zU5jI?~mFp3ri7{REc@4z$`0~=+`_c zOpw9i8X?@332Al7f$p{3H!&E;d#f%b!98~_S#1!g5ANl5x4Q2j>ITR2*s88U%eO}z z%D1Dnte3mGSia&Ijb}gVp*LEMDk~~W7wBKLDnX8Fx9fg`N9#%%J5y1Ps}P0{_nsg# zAPdlBDw2wJ;Y-1oizZGXLfhON_WR6Hg_)XVx#@8;1#HX@tfgEO6ig_xcEn=}&O@K} z_I8KTJ#^EkrZ&yoN`ST~FDy+~|ML())N@LYhmMSRQlJ(RhQvvc_~98X%)Vo)mvXNJFsrdCL9h2 z=PP&ItsS)Vg{@EhtS2z?o+{=-j>mQd-y$*YeV%t;Gy1Wu+Tf2l3L!jRJWZ4{~wcrk~%Syha6l zdbJkG`y^U0IqbI29V$`DznjY3Y&c zZY+0Seo#X>+9UUSw4D3R$TUaGHHsgsHCSvpL$FKw?3_;hjVBohfUIdVaiHCAt9l;b z#zK*rnwlVwn$p5vu}_rV4m#WyDp^kvN4?B*mp+;vkUwu$r-q5F@6Na$peoLFSY9w?80& zTrqf<!(g3$e)WD&%xA;1)(Q-CX5{hi)7Mey+j2J21VSp`3&H@#9D9v5K+@;!T8PtGgY_fs96(vGf9haWRku zk&tr>8g^r4v^PA|fVrhW2-KEr_?Y+{R)?G_M`sSh18^eVq+JD|CuIS}c)XRmJPYX% zf{k6#g@AuRDh8)(unFN8HxUN+2QU_E!7%?}sL|@`N)c)a{QZYg|us?~Rx% z|Ivd;ms;V!vjC9s{KNpmEcY_tw^_Kj&=dMt?^`E>v7bAb1@&A?Eu2Yywu;&ufx!I_ zMn?BCXg@BiFNmy^*^1K=P^H<7FWMQA;IBq(_^d zE$Q?09>*s~lMc6JsTX}zWSnTR^Id?hVT-)IGr*5ma?b80t;Rt7#}}Gg?TrMTs7~=} zi$?R>FL#+;e8ke6YMp+#ZFS1X$5;9hNc3l*A=~t%NT~Ie+Ge2P7;i6mV3XxQhqjw= zOzorx5T10D1wb*!0>IcI1Ov!$b5V#cZX~rN2TXXp)6n5WBk<MALyv&tKLRkM275dv1yQqou|S!vH?UmzWbKz_sRV{L4R4*1kyvLZ+>_z zJ4@`7yoX9JbjR^Od9iuuCZ>2e8w*l*yvZB-FghB<3%wgD2AwfmGb&+SG7~zvtmUGt z0H`-{bE)Tu*Rl~;?S9RY{CP6$v5;BZ(u$~P1rJxMBR_?n{6H@NWxi{r;+ZxZ6=dOG z_1w=;ic`e{U)Ww#w|ComACMxIk_`W zJHgz3Dnem4{ z_TSr$65ie{^cF=)SxZiR-{+L40f0lqU-sKnL)OvnFfj?_x;UOzu18J+Agk#?z=!XU ztc5XqKiYKs#R;%X(Q0dcSGrL;q)kCt8LK6DUO-hrVb)-h@Wx*c0=Q9IxMv>o@5V!T z?qU4Jae?3A$wP-@r!&KoDR`M% z6b-MQB?#Q+Lx3XZHXr_vX)P9IHd78!7=`x7zbr=@0)@y=vPNtrWcBTbp#UF}u4!lI z^tF)abf}_b4nLT=#ktE>{$@n@<6gU0{^9)|T7R8L5}aC~XB3G-cy&q(QA%oRYDyYj z>VmUYA#LAww|(%)&Zj30SV-AM5dip?ut`o=@Arwnf&d0o&jH^eYv}u=W!cu>OD>XC2qaaUz-EAtON4v7;d)F5PDFao&-*$bt z`!>Sc8WD6hQc2u3Ut{0*dZl&hwHNoY#>luc2BrdVtM7WCxgWyIFvd!4=ZXQhE_M$n zXr~WsIxqbPipjLrPb!dR&tnI`&JH|IOVkZsn5@)1mQ1eQW@8k$o0AA*%N-iZ3!!Ps zX2mr64%&xewEf6q(=yvYW*{rJY1#td{`?4ou{WX*HpmC+w+uP%5g_t#?!G8CvX%)1 zG*#c1$$rYOyAB#oeo*3zYHy@3;I4ohogTZgnNFZ-J1ZXYza%+{YXh`W_!+F$tqigAUUuVce=hPO|{d9(vh#wE5_oP7v``2|sb*tc=wAeH-8uWE2G8J*j%2qO{bAWR2VI2Lnm2 z2;+>aoG=h3+2XST3Eq&LF%=%?y)W;oL4u_@*jc(Q!L^b3swKyJrYq2|mVLyHmWNoy z8c%$s!+8W>oCY0L);j-b`R=Epun(j@!e>VuA1VTqgl#kkhK~ei>KV%R?@hy1C+~a|d z=?L%A83=mzO!)YlVGc8QySjaHM<|6Py3x3lJnXnkRa`gC0D~)H?R|Ke*Jo6Gpn7tB zE=h(44SwzJRX|d6dmq#9cN5TUpt;4VLwUX>wf`tsPFD6+3kxgEvn6Smctw1x^8nt- z1y7003#K_6{DY>;f=9y5X4&w?a7|Y`6&VGXlO{HJw7CbA%86iJN((X}ob~n6**=U|Q*(>PSyy3ZQIzx51Q#$PuR2=5Io&FRt|Q6jg=<>GwxVtZUKM&E1-51K8;iG=c#icy2G^040RO`IWhM8Qkz9XJ2o+CtYr5 ztkWD(Wp$;nf5w*9q;+j??F&Skhc{z-&wso> z=0-RzeUVE5`N8zBb#L6zIrUfxh878F&gakSzRRl7tjG?q(MW`E3;-=_{hS(omO!JP zCocK$SxI_CA}_)Vm&40&SSZ<05Kayd?k`@x1j^qc*~%6YC&oUtpB_&P?{I8s)v<@Y z)n^K#jxTx0HG*&NC(q$`1vsdtMN$+DQn#y%S$<{&{vdE|jsq zR&mdAS;aV}gd|~ME@Ie|24%pN?wcICN=Jxya;`eLL3`mMR2_yac zo}5^K+ilAbH%B#twG+DY~OT#ui6JgdAzbo<^hUlN_gSgB8lx4roUy$G`vf?thAg@@;_h?cG)K&jj4 zH&}113D`}=*x!oQkRKNiq zqdat~&~xLm%+KGc+nPH(cEL=4*Yw~%DrPRmPjJ>dC4~`T8yg$%*34cK#St9=(mb^+ zVY4SS8vA(q0vSj%Jh%}iadasKE2isa-{rebI z>%Fcnp>X`($PWM6TsGT`TBp6qixXvcp<_}^ z3nlD2Qk1Y$`z;~2hv2Fb%|bIKt7u-=*L~NYJw`auKi6(v8wXl*{#Ee-C*I&)Cai}; zA%OE3y~PSEpx|EbBqK$^QfUwVq-37Ub4o}0>{49*{x$jLT7}fYmL=wpB-mmi8%85P zN`!4W$%k5kLASek? z1`5oXB=};1AcXleen7R}Eg?Oq%g@pB^I)Th>1l1j^Vm|is6B=nyTf7wZKVv@gR2QF zWS4uB8U^XBDd}F?*|Z+~=5Bi|W%kkrG{lXLhRD+%QDnTX!+}cOZv*Uu$IA9G^`*eO z+2t8KlD2B)tWDhht$&r|Dh?m?jFOsC?``uJyO%;`cf(-NcrF2-axmUa`DVkGq4&@c zG+u#Vb!+=L_kS8OuHx_kW3UkhpmlN~U|h%y6VMXfRQEvi21)#ls2G`-{9@)$xt$w! zMA{;_m7OTG(!gE*x>X!Njb8=Z?Er*}i+yy>jW)1>IMNP5#(_zef=+`oO};d3oVMr` z6Iv-feXr2(iQ&TqFW5MD(7ss6CD;2(dW~|D0ABLydbs5S5T_k<=m2pF8}{3Es$gZ| zU-6OGc0Dcf^1@U%wY;-m;avJiYCO8~X7*n&4T~31gBl!b8r|SCikG zD`8eZWVeKnZN6E)@st3nu|l3kbC|C2SRh4=z;ZG1XW>~Sy<9bN^UtU^uShGrxFcMD9W?szjgr#(Wj6~e}&$hB8b+sG595K?Dg~pyYte{=uNRH zV>RQ$K1F5Z#$9tG0r38ZpO(&zV~q&x$mMw_DInqe{tF9P2e=_Eb1_JS{sr?m`>I1> z>p;}8{a#Wi3t$IU6I8|0C=N>t3EVef89HNEC8D-{L|V!E(#UA*ujMx)0??J3 za&YBeZw%B1y$2mkzN%`?f0CLjjeJ!HF99w+tq6A zbM(szvfH;t<-+UUZkD;jqe^Vz2!Rgrx|~YlpsSdTjf%hUv>V99M8z5Q=-gv`;F?RB zh)h>TFsDcXw3WZGlWI_yQ-s32s*aXFY%~cKRxQH9!Vr50OyMam^%cpYOVdqC!U_TXnUp>?UgB;77YX#;4V3mAB;~?8Bxm0)eXYdX_YFUP%WL}HOb4;qM z9;1O7!L_?txVgC?+LUxRtM}EdJI7)N?yOn)E4dvfCqX04E#A`3|0$yS1n*7_{1@vK z$fCnR8bJ$qk2CBJRaa`8>5k|dX5{fF$&0b$NQD`+AC^Z*L>d(pmk#q4{OSkD;M*H$egyX4iV2PUqMMZ78e(rAKYa7Z7s`aEW zbCl~B5mUHBLh}+?lJw8lf$EXU)8sd&yMLFHo?l-auy^PwtpM!xVkC7Dsswm#Zl2uf zef+Cya8M5=|GKOWCr__V3sO?1ftG5fW89o(QxkcELQVQJm+s}}Vmu8}DZ@9ud#i6| zC~nhI?BUo7CrKtNIRn#}{pbn-`#l82P6`h#|6Lu?%X025xS92#la7wu_vI#zTwDAs zcDW@zszs*|_Fcv6f+u5+WEZtQJkIyL0&CTsB>cK@?)A?TF13)|EtNvAZ!gqi^ z+u55c%(#2MQd6X11YM2WV7;KnbYHEm+9DP@&oJ zR8v3-Rj;yMIxl_R5$TQG_kpSvR_V|_+J~mCi;aQu-*Fi^v$U+YIw1nnRl+@I_X`tT zV7YefnqYHmq@nqEIen&@YUo;asJG>5fr&W zB?kSPSTav)Stk^F0dndqANg5p&V#7r+J``l1{*f{+5^?LbrpwOJFE?gk}75QlXG#E z1ID1xeq`f4H=eVU)yC0xl%BleG#UC~I|VSMgh~ZFb9#v#w+YDw603H?3chWwWgBmW)}_%g^5`o{HuBZHfN)6@V{aRX)e1CFy`9qWp$ zD=79xMgmm{x(Y)*m_NJT4E&UNSMHq9_zR>iJO)r@D<-x_-qgavv$v6Vr-HFF%UJnU zZ6e0{sB3$S7ExzPYji;|VES^T((9Ulj$s);^YjwuGWpNDz-Wg{lV%0?6B^1M2&HUz zsS)nkk{yGzE5u@fN+=qATs2Oc>|J|n#EoeGPtbUvUR_|emRvHBt+{Vc{-Yx%>{~eD zUBTjzRV+mnh{xcohQJ|qVM3)s>1LQtkEdc3f6>@#Kof!%z!QjoS^McNy>fZD=5u>H zyhGZpbr{tNVL^e8R*V_$ht|$nVJNNmuslY!JWnDGA2I*^cp~pP=Fu@K>Et3m}1pFT5gWgztlFcKS8?oy4;j%PVYMCXv}Kn?Jk z+F0!8h5SIBokLqdix^!l&VqBsKzmkty4>qE|Nh8;AVKQKqk|&di40Q2W}k4|UP$}M zS$f-=nYJ{ZStz+#H!Cb$Lq?hL@AytoXE!qc+&o4J~TJj&)2XR{V{^BmP!I0G$8)q(_ z=YGws9(_hJ%27A+qZOgdy;=x}1>5bB5ag+usIy>=_ieOQONtdea(WgwP%jz~pJt}0 zE_ik5xA7jHh&a5LwJIMjk-U6Dlo12OPCs(`CQ$VBlk~A~;SJPG*wP7r+oCpr+-(D6 z1+IB-mGy$zRQE^rb*DGX^9T?04`dy1j<7PCI|wCIpqogrV>15yB_guSPp|2_&5?rH z_3X_$Vy^K=F~`-8E|66by?p)0eIdva1Qlbn*s0Q@C%LU16}!JPHnaxy9v9b^B6h)Q zfRZPUH%zh0&DE8ysGCOf$HV=z^XSabR$P$<@QBoscmI6l6*RwlGs?S>0$=0d!m{SF z#EJ$?6$bHVDc{xBj8SS7m`GpzN88W;6M##=&=&!2@Eoota|bt%pJJCAgWmW_Bn7-& zd{5q~K4t9ylc zM|e#Y)1^XRTG0&Zb+$eD=7DyQE`*I?`SamWPUOk*p!ukMWffDxkGI-laK@IMUH8q6GHZL2w#_o_{pc#^%=r$_`f=yen#O{*ZJj$E za||2Dwc$X2X_geZ?Z=PUIEG_mv0wETYxUtUsjCEFWR>>OoI(Kmhp35v@Qq&U#HdZP`7m8m=V0U2dGYei)_I}cyY*pcD7Cfc9l#D7-)UkN=CfE`_reb2X(Ceo zHIIgHGCLXnb-+anKg0vmWW?5D;K)^2_h}DXL7hMLR+y{V}hNLtey5;9TfDDa&N*L~=_$KdI17cEH8+ArJ4c zG0TY3+DwUGTRF7qTFO!~hFp~b0KEGT5;}q6b^VyTybQ;DFwM!(F5ji&9gLPOdvdWR zb9+?D@W+OaNsx<49pYlWvm+`xddh!giaoc-aJTg-|g0a*W)A^I5Xc?{q$-l??%|Ulzy;gZM$J_ zUM{d+>3uxR_rZvdij`I7S9HW~FhJaQb37XhA4q8r+cXQHkz6Lunob4QQ(=;EgEs_- zie4Ba*q~qVDv%#)I9|W$`viCPoL@~qUl~5&(oNA7l(g2yOOGi{ImAP-iGiMto8-nt z{LFXx3s;g2D6i;e`UlAi@8Io9;R3MJCh3zJOm#L9W9FTDp^4B~3@6<^dlQq|&LQ>` zdj;XF%IsX7`gH#XNlA^aQKC%S*0224l>7z%2FhnZ;5P=|dwanY1M$;{J;l)=09IRe z@BaD+`GnFBfTxq5`Qa~yevDLiJ!u;;j8_gfDRzgqYkaTQ1OFtiBkz2cbX9jm;601k z7fnsNEn9Z<32JAGs@9@gnVCEFCwE3>NEeeOV+vChGnDSW&$rm>PHF*$`B6Ny*l+HT zEBHRSbJiM8&4~(uN24Oa=10J0#XZuk zNZe#yF(QoONSqn~p&N+W8SnD_KO`6-^>^Kt{*?A8p`6rknCxnAT7Y__ptGC5kQL)S z{kN^1*Om;eYXfC6a*Q=}oz^qDyT?3TqC5`~*d)-TI;7g1V(U)Q{_cbvH)N0 zsBe_{iozAWsge+-tZN` z(c*iV8k(Uqf(>IM>mR2O+wKI3C%=XZqUt$a=y^GdPI~*9Co_=ua@G#MT|tY77fbRE z={m(xmHR(?x?InPv(1N76bDipBx+Q!0aPz8QkFkNF8cr1z*5F?z~>r8%xeM6Er2Do z)-zRm_#5i5#e*N`wh}xS6}9hZ7L)dO4D$~x(p&TJo_FQ97x(`{g`iuM8|Hv~oJG9* z-9Aqz8fzduAxXAZW=5Ct8Xh&7W8|;!83=P2bbzarIE4f>D_xmWV z^HKGT$`#6Kzow}lmfEf;2|G2fQbPkz`n#M*3Gn^hdKS9I_wRJgKlQG^KbxcSXkbjh z{$OCf=pW6?Zj%F9KX!)zk((jC(_uH1Y)l zzyREfuevZzsd)fuTagfV9(i#5sZ>4x$znoQ`!>)jMeIv8{R&AVbcdDb>t(pzD+;{qZr6R{3%?(RVY57L z{?h}7WUu{gs8j|AsS@9yPrxo*@cZYSWPPn5xGp^7&mFu2Cc3J^3m~L6k+`#?Sw{1h z9+XX!ZCYN}Ed78ke>Y|)o8KpSeR@1eeI4cH%g=)S!Dl;CDe@}e5ob2=Ji%$793u0x z3}=4$27X^ZZ=oc<#ka~0$JPQ7O+q1?`B3{q|(WLfc!`Rl20<8AQhF@8C(^w zpZ0cPYaWQjE=%Mt_Cj&lqkzeJ3nEtnt+)59a~_$F3M(jRw2c&+iq6nr zcL@O0F!y|ndw8J}%ha8TZNu2dAJd(D?g7a@Lh8;^dtomrirCa}dL^f8D83Jt>l=j0>bm>84HHqi4*F zjBmh&3X{Lf4bG#)SYmh*3hC#oixUM>!G){bln*+cohXu^guF}}YuwXQ{N3&8r~YvF zwPs~u+dk^9!Zk(3=^{sxsE=2a2K;l48Uv`K9@QwN+Lxs6J2@LH+djx#=9fL8efSOw zIZM%)D0Ns4OOEipS{YB2lxa$o*#oG!eM-w(tIaUK^}=|G{XFMBx3#``3VVgAe)12h z50VYV9k+cQ+Yckx7q%V2J%s+| zg3?(=70}@f<_Ip+;iiIS?f2!jA+E~-+~m%yk2qhDX9bdRo8oZg3mg@7rHgT1&I0eI zpLBQT19B$tkNiN8n+c69?DYZ=vJ7e@SEPkV>-G;A0eU{~pXg9RB;T`1l)(uf}TW2IXN4wCvVH%W~@LNoMyi9YjaMU*NIW< z=$km$W;$Y=u%b3yLsoyGr{?%V)Sz!DdbBO>3DW=03CCyrzEu+} zG+GJM|Gbu?;iKtLlR3GXHvvnTQ)5YADYtI;(lnrDJ%=Kjl@o5u$*hw98kk$cpAja{ zjPL_-SSH<%r%XpLR&nJL92^qv2TyJ%7dM)%5J~_~S$gytd*BA)qLXFw`fpUr;xX_X z-d6+LgrrFAhre{07f&HU1~A^14r}i(cT&t-eOv?fPEDmu;=+nEE+yjP?jYe9Uj~KZ z74l{=BwQxO7_0KF2eOgvl*WUZ-8w{S!t|qIXmgsJd>Iup*%_4Jnz(?f&dr16?_n@V{5wj|{b;g{;xPb@DoA0YOAv(>0o7#`#MeZML024ZW%cjCJ zz(Vhm6e|)%yv@f;-Cz%-6$ElWAUwqlIzZmsADdk)k#NAwFD#soP+zVz@N#~99n>jQT~K-Wb7_-eJxpb)v0ir0;ZF*bF?drlY-!X4w$GMTd@1BP{iA8 z{F@e}##Yd3_Sd8bS@x;f1Y{!tb^qDp`0>-dJad;0XXY=WxkQ;;eaibN-XUX7lGco}Qk7=&znbOQM2XQ#Wa&U51H%x;(b>-KFTS zT25y?ExY?rh zNm8OWL|W_}f8~QdBSc0e{AKg;?1VaC1vc*PU3|We5aAI3`^^Utz3Z}ly=b)5MSGc) z;`L8pA`DJ;)m}PSL(Q&ESbmhCvT0a69|XINtu1BqKAitGWPCN_$!XA9+{#;A zLM6{Dr7W!4e|%oVJDY_NlYYf{aqYCd5q!KjKK_k@;wV7Y`Sso}#+=d<&xGgH)0w9? zr7QQaEG zE;=9`xeK``v4I-p5G#f8f8!aIrCz~G&G_o=vg5b1QY#jUQOMmxvM>jQsG;FWHwFHxV5)= zACsPZPvqv1`HyM>M~g>#@chd|k^KC@kUk+g{U<%OIXk#7H}rKN9WF=qG#nZSZ#_Q*4({z4g1-$vI5;Vs%os`%9C1d9{B#mfsad4!!+X`%S zOkpwu?A61{4s{(msXS1L&f{4x$7(Oz@|qB^{SKqy%sva@oo`j&ND2-P7M64VBCHKK zPt3Ad)Zgz4Do1&Jh@0aOkP&@;1q#|a+IDXP=%Ib~A>XRMq2Ce8onjQ-D%THS@dNK3 z#%`$D8zkiT&}Cy`tC`se@froJ>op+O!a*^gV_H88~;EQuxxDdVJoG!zDdJ6 ztZgwCwVO4%F-NoIm4l@wFZ=w#J1SxCS&40PQNh-5dGjUQ~RleIRk2dtm>gN&o^P7=E zTCm?Jj_uzaWgtpkz@yXZv)Brss}(BY;xRCC#ZYmBNPZOUtyKVs3VB(Ye?0-HY4>@? z?|Q<0 z!=B_Q7ntpU3sqv4!f2(TS&#N^#mXi`fg|JPrImO@0?7J2S*pwVN34Vpft>ky7HyJQ z&#f=O((QB>R=3wbxzl458+b5g_a5IS%**iz9^GF4BzYwwL*K!$vB`8aY#&V)VXZ~U zu3X--PLUQmBcJ+K;LB4r6<-y~MopZgL}>ieL$YSx#}}q45}_2DvVBhUbM~7Vs+Bgz{KUfnT?}fMfb*SNxq3eZ+pV0yU%=L-* zl5ukJaUihNM$>0olnYk;nZ(Vg?w9PhEa_JTBy_v;??<7u!dOiUxDRo!G7F$dN(j+@ z=1FtAW*JWD@>~s)IruoNhML0VOo+lofFXw#Affk$jkAwhQhViF_`<`9$0~thvzT7R zQRJ#n1rd|9KfV!z2oSG0vJ9pR@$##NDsNc>NR|Z6%hLBBNH}_LUyR%7dLLC@V(!gf ztHXqWPI=e+)fs}`2{`XP=G5_vKzM{dzZ>V?_as+S{RzmVq-XJ5_Y+VE?J1ig_aywp2@Y4&LRL4okcjt9&0^c^A4iiLxRbV zMkw{U_7Uz6nkS*+(^$YoBO|8Q;&ttCw^SGk+yybtZ-4x1I#Q@Bd>!S_asK>P;G_-- zAj8A87W2X5W=8Y!ndTgfNOn^r)|GeLr}G2M&nN4tLp0cyn=`Txs$AvsAIM0WimQCL z*bM*rwLhk>5Oe4fS>xSG2cGsR5y8a*IQ;wH7y4EulZ$EJvKM^21$oop>$?AlHaaZS zjYIA>#Khitv3AG=T6UFE=H7JRTJBL-ctz-JXrAWmZm-~-W$%yj^W#lCtwJB zlWM%+ZA)=_&?v~Km)b$ibXrehk5)8hxxWz5AL4fT3lrghpJVmM-Sd-H-$RzL6hihA zM>cvWb5`ihoRg7}VM)|2su~+QLNTl5Mw4H69Su_m<7z3}0MV zDE(~J8Qv*6bNs;bTw^b6qQY=>us6mWmzYVbR9Ai_JnQCg1YyuL0P*vO)Tn$xYj-;x>ya?-SWN3dAZtt)4sk*en?f~4M%2F^59;^=kmq^X220`N)WL*e_wUC0m>tn%%sJgHPb zwrnyZWL)Y|*v3rbc-cx$T!Fo9l|MYit_VNSZ_;tGsGJPB2vXqB?}?DKW41>-9Z`sY z;-LgFCySCZjiS?;u9CcFT6Qa6cULHGoOu{YW&|SI)}MfFb>q1`aQ?eNV#v`GCF{v^ zEAJqU*HY?)Ban^gfx)8(+l^8@S;5r5$ge{rl${NNhN4$-h2Q^gsQa3V^>rOOL8x)Q zc9PuQ*Ru5z7WIUk+?VRg$|NpO598AT5fV*>o_jPsDKTeXjk7DKR|l&VQ#*l0m$8nU z{>Dea``bB~x7CCHf;mtU3P26yf8V`0U>$sqSss?!yPSyWyPH08TGJ?Bg|;hT=0VH2 zke`@@IIB3AIkx0`r!3}ocsh?aScLYpFFo7e=McT$c_sz%k;JN!d&oz7XZ`?!Th0<{ zQQO4rQLngl>gl$^eYQIrAf5fzOC{6wc#D5uaBDJx;*&@&22(c+4Ykdi0+HWXI5i$n zO2xY1i+6pdIQdcY*bRV%3ihI!^fzxka+_|=kn7Ii@}&10+EiH#sBjfBTwQdqQN9Z0 zTtj+K=-{&5(#FKJkITka@#=x-*BfTV&}DEapY5`2flMgd+^Bp`6)@T{Qv^yk*SfG> zQ{M2A8@a}RoUSj47~!XS&;TK2XZf@9pb+v56YezC6puXUp`S;!B04D9&YKXLQB=eo%5Gj0qNb*XDVr%R`Bxs0 z(Qjx0>RF@y8NLSh8q#NC)_Q@ik&Kd0SiRCF2Xb}d3R*rWRxCmUgw9$xS@&bs4X;Gc zAmB{E3a6a5u#ugLdxF8u8@}M=+JWOgvjA2T-Z(jnx&bGX7SM0t?%=n7&ME(#ZD6?% z7g<`U#L<$XBu0F-LsCIp!@$N$Sf1pTMO7ER6GlwGqW#@NQg$(;e<5i>5H;7G~>Se%)svh7_<=PrrBm5vvOj2_13f zWFN!Ixy|kf#?xZlke%FYjB#8!=#l8ZeP4aKmVImu7)xh;2wuWs_=t-6KO_aPDf0$^ zq&ZpIq6Uf$Zuurdze&3;6U^vo_QK+fI0RvmDWe;Z{i-6;wEIyr-wIaVt&Q-53775@ zA<|>9fSsV=2m+lS%MS-)%2K|w-ZFqvZ)1}oj#NN$%ek4~?aj_30we8DnD-fF81_(y zNPg13(RH@s7A?g1cn&TRt{xekN?2#^>HZlUBNB9pQ%CSX2j9GBWQh^TVG$MSc&h-t+&#r|4?nJJ|p$ zCIpC?lXF7+Tg*GaV=DU(g=IHWcY=#60x1p_*plSvQ!^M%=#`~bt^gK1fz{_|(fssX zkZ0+_a1K^$UvTnj8~yoV^n$aaCp;%A(sv}~N9BtT)w0t;xelX= zmMPKleUbk4(v&?!Zq)(-Enr%=sI>oG6H56NNqu<0j*D5PnEq>la@jnip7vg$b$i)l zx!7!!V~z8~c3I5nEJlzDzm&0=>sFf1+)vz-BA&iXrAx6#t_9=^mmB}d#Vyfq011rG9$w`Py8x189 z6-`+F99O>4iZe19AmwDek>(3tO|C4qlD5n_Zlx7^v`{?r6^rrS=04d1_r&Q=gHIX$ zL@MefQc08mUNF$K*zWwJY8{k9Q{$zW=~J}iVq7JOJCHamiBOJ}m8all0z-IMi}obV zzB*VuNv?BbWP8>9$p!8BFWJYVOQ?S1n`Nk-Z?I&otHG8luIbu0w;&N=H_qQOG`X|1 zAYHNCJft6N-Nuu>n-?QMaxzQHRM~O2rfu8%<;_t->OcDQ8t_BTty~f%=OPyyj+9)i zEYELRq8H+?SBGwa(y=z%-6Prj z7L%YuC5c6`l&HP_$%HD{H!!2VGsWSW@5yAFxnORjc!O0|@_aq6>G(;icK)Y1DE%dy zSzQfB`l~a{=fC?)F|N-MNLXo&qFHsN9GDLN`Ic)((<}Go3>|LS*znb)DL}Z8!b8 z)P&}W%w2q{&WIs0Lajqp1JUFC1?4MQLrNJp$aL*IbxhmBHjt2c#Tf0_F!B!9b#i(= zBq3|R;%vGq=CPw3Z?M-csWx$aCuWAqnW0P=r|RYC`)M zG-QSctgp5X&yv^})j8>r!pO7YvIxEFb++f5_^%FvQr$smffX~^W$SH9I!kp-j<5n{ z*SF1q*3tt04daLHs^+E8(hNO{CeoJnf^vBN-FI}Wqk846g)qPN<@XH$N#>k?B0NzL1#s4w;{N%4OGCiPk^R> zuTHn1gPhVsC7Ei9fUq&}t1+?%6%eH5PKiHt61G@IW*eJY&mI{WiE#Z?@vfBk=r7>3nm`SzLCu8J#aRGgn9p zijR+X_PTLqXz@ILzn`{**r11z=kG6*xelDp*qxdB{B=KNRCn0*o@LNOEypT=S`KHs z{N*$~PGD+6Y7Fbr2}WNlz48e_UQVHnvWK1c9`UhPJaik$c#YsTE`aVdG9MqGRs?&_ z5(UA43JPiI zh8yc(7Lk4d1xx$CD;&J-XPMnzVnOU*aO5z! z@|z1bQ09LCa;2cL4==F?s;1JhfXtfTl9BUR##3-;3z41a*imQY(+S(4%P;(6eL$wj zZlzl_=7Ptog~L?7vi&z7JV)l%0Iy9%+4ifx5pK?ofGtV4@y>@n!Ewe_eVAQzJh5xl zKH6YzXJ@B3GyHUUdC&W0+K3hQpEO-F6W|$3M6xbWtQ=+M;b_;Hk(V&q)iC}W!*3wM7jFA5vl@c3Cy{K?4bYifa0lVU^p>Nf0 z94=}AZIx>^gpOU0qx^WQg@qsX!nz)I$myGjrR4w(1A9ADEF;)^oej8K92cXy_|+R{ zfSB#4fpi>@8hs)LS?y}oJ@>z-$la4!ZbK;P?lc+2z(a&H?IU@3@9GuL$mK`ofMeXzzeSN0v$++e^)87Zw)YeIeb*_lL*<>_+u9 z`8G%@+yPpMVO1 zkfdBo0bDI0!%a*Y*+Iew*v?>#t5OM|ST3X_^^gfi*m#Arae%;c)y6Nk2{f#C`_3LY z@2!4A3xS>DmM`$9M9q^wAtQ?bAGES(<=k1FvCT(0c`cW}FKT;nMgIVKS)R$eo|uBs2i;OksNyPn>9Ai+_?_mUf$-X*@M)go~3sLwQ*!%vL|4Tz&jQJnSLD?dt5uq6Ct zSL7KS)iMvhMf!j3eRW)vTi5mxK@mhyN<~5u=`aY%F%b|6=^8|lX6PO`BL@owl~Rxp zq#J>u!9WD0ySrg%hUVQj%qX6N=jHpp|GwXO{+j2g_r3RCb**cyweQnE1KH1qT_wEk zd(OVo7MtJl*q3tT98c)ln>vHH_C2)8*`rT4C`T^G%ePksi@1vfbgdP0ynpp`A%CDA zlK>^Jd9HwKhVcQ4WZBA{(Rf+6o&C1Bknwny#(V$sX9zsjc=vfU;6Y?=pYfSqY`3MQ5#(r2rO zYSZ{p#L!AU$|@Y$GPj7L_3_7(K!ZwxZ(H*ddW8y&nKXQ3mUJOHC&EC=U4TZb{f_*D zeImT>bwXKMO(xbjdbYHpB3=U{r(E^fY@v*khZOwL#r6`)>2}~C+%*yT6FCLuB%QEAPnqu^q4h0gNsvu#i47u>OcPibV#2#H5xqrc0tP`;lEQ;ypul zUFsk{P8PwS5c0l{Ve3Eeo!$dH_7@f*76Mz_yOpA#yvjbJ67-q++j#qlWkZ1=vx7U} z0Z_bu0LVRoRo#hJNoKV-ufk8KxNV@q97|uBtQeom7n7!o*hY5pQ-HpPeZz`dC9n4o z*9lLFn~Mgu5!*-e{3lSBK9z+w;Ci3Wj`io+4}Z&_vyuOvccRq{fe>){?mP^>l)4R} z;)@9Iu!xAhjEmMm>#@sk^q=`aB>=aPnn#mve7E@Hw?PC{8SuF}$5i9D-d|6W=TEoP zF)*gsR`3;c>*%h2d6*39oLgYy5Jc2bEo7k4g)=!NE;s&Z8%!z$W@C&?UG((cC2{wQ zRpi1Z>ABp$4i3^SpQDKnP(Jfh;Dq5E07EF*U)SDr|4dOJyDqWm^VL3#^5n@G9$uFE zP?_+pmqr#8MLAdhbmj?K(Sd*N^cOsvn{;^FW42K0GU0MmR^bJMTX(IOg`1{z2{AcC z1>-ScSj@*SPk&|?Fs3CSVpcbA+pWdN@#sEvZYX7W7E{Ez|A9S&{fzHpmFNUEV&A9v zmllerVwKVE>m&%woHN}$Jf&2W72=Id$PPUY6-Y~#sB>Nk%#Gh7%0)MN9INTv~ zgZ9x*nK9^A6iE$^OidM0?VN4EV1kWIwW}Uc3^PuhHE^wC1lJSwAF^y?*(NDPNZ`_f zI>=7*-3>DH_eo3bddy&})g@-!AvmDR?wK5h{YG|9LZN$C5KJO}$gqsjh2vvZP@A zRw}zJA*JXuxx|ri2TaRGjK%SCRc&C_%E4Nfajyo>>4b&ck3{LJ6ey{&O+3_yE=Q45lcohgRXX%lBTHM8|;91?7i zQ_VSv8)}34+}lWHKqiA-7o9OiZN{yu>d>wyy;?cgK#n@H%ByODlwpN2)iuzxUK0p{ z*I~;qFIB_-o;^eVwwFa-!)~^siBZUcNk=(8GlAf_Y=y-5xYmlXI&H>Lyz>Nq^$GB0 zVu#yew%j^}HZDC59U!eqRM00ZW$2`V6xsb^cgRY>J=;XOZHj{5 zaUq0ARv5MH$|rcIZ$uaMX@(FpsI3ie+%UlAY^!}UK_nvIN$eI}Low4dhtf}bmp9Q* zpQB--phGE?ga})Mu1&q$$(Q4KtJ`L+sB6N>d zO^Bq$(FM_PInR_fc1UJEjsA%XDQ-Z8ZK)1*ty9#+}Wx}s=1yg}>r(d1U zBoNb1e3wBBXr{ie89UNUM|d_#)lZr?&77H~--t|RjfI#R?@?@B{7_|1s*>n3MXuQD z%F()DD?;6tfNyw#U4vAMEx8!CaX861G7%ek({dy7;l>#)-K%TwXOra|yjw*sgW9L& zFv%NJS1Gvs*&Q0r0H^etWRp`0q*>G1NZIdJ%bcS7F}xv2P-0mNbULwblk*kO27!#x z>&3nH&H>Z*EK9e(xNf&W($7xqR|xf(`OI3=1-&KKMBS>qD!dw^s>dszzk6ccvg&2dB8uRjG!*c z?4s@J$oaUp$~i&@e5H$-a^soP`$Iz7!*{l%Bd1f|O{5mK<}FPV+P?Q|BF)Qmb4zik zNgG>EIyOG-baZ>`uf+`zp2$mkPi-liycF0fXROJD2FVHJ%pBHR-03p6%nOlT0+7JF z>tQmgIjpnSf_AD>fM%i>MPODzCq$$$mHiU8(SR)hy3=U|14iK_=5l24)X%1?DY^sf zNP_D*-Bx6WMM4afZ-T7}x4cAl&sU8mCb#s>ei@>{84t`l`re@(65$(9iiT?LlyTPoYB{?KGuxWaWb1+!Qd>N$G!B;N>|oBHR(YoD+;$YZAlRQ`iXrNCK|Z9N&63 zs50NQWCT^_Q#^dugWqfBqXqMLd-XYzzk)nzp_(Kud)59o`wc7d=S~DIQnc*>MU9Fb zwZZls9q09~u6HUi8*a5%Zo&SOplw3@G|xxeX0r*Gqq77veAh+cY)wxaPs(eeOa)#G zVG`?1H2)fZNdp?J9*lzqE0gh-Q0>DzCq$QtFv}zP@DWVsn`%sLlgc}J;=wA-DL}o^ zNF0{#S{VCT*(yo_XU!-OydUqZshd|crY8fEMQW8r<(Q5Wl;q8K6!Oo++znTF#&HUC z9%QFZk~z%oZBzkmRBJ16%n814vfJztA1n^gG`F`d->iG!L3gBGFHU2nv`o1;DQQCH zc0>C8Z4O&V59pZp>uTI1iY%Z5PkK8P&c=7Ab_wpSr)~Vy3+j4Hf9+C`@(yam+^Ko^ zPnZ>vG`hnHOXiHl$eKT5A4zZvD`|I>E$phPc_|}R-nuP(huwsPe4zFmPRLV0!5Bk= zqQhDE9B2Ad=Y4oyA5V-^qU1+?JYfO423&cKZR7F0E)LT}!h>ZHwYD;}XF=h=p^_8s zNW|b`C|f=MDU&h4UO9dn(0c(<)paT5PFt%uOtT1jD02$G(Vo(JUt7lWqT>EoS%D!L zt!UJGg#iQB-@YM*AU1-hI;exTA8Ss^eLjFQ@i*Dc5d{C39abwc>_&o?W=Wb@wG`L2+Z(=F zUoYU8)6KI|h6Zy;KAgs#Ic&4Lfj{GQulS3X&NVcz`C@yQ>#7z0mMiMkM_(exMdEUfeV`iy zsmo{|!O4eQp1V87%Kw(3gG8Bz2xW@>T7AyR%7byo=?tb9r%*Y1m?RNQ4r@@jKV^-v zWJ%x0&*b6@22TvHZ#DUCrC;?>!&qR7jP%z3Jpu6L%EwbWCD+0(7_W+Wi;d7*va1I3 zQ+%6jJ4~fBMpE&%rEBHJ&P*Bm5h!)JP=P2&RpW|^8CygP7XseJM=lfN<;E(!MV@b- zXP64m=q70$ovp2RlYuZ;i2C9I8bU*DJ{pXUu4V}D(^UR&rh=n8 zTT&UJum~f3u}(SucB8FJ8*;G4?Bm{ex8+CE6R?4w3PpVNQjyeKw($qYw`?92O8UAN zLwWKcfi2~kie=-2V=7kEL}?v8prn!A{-(f>jW1S4kzOO@^A99rYPIZsjY1v5!Ef83(W#ZP_ZciqX!pHW|Fbx^=?)&#@mkuvW9YB_>FWGQHh!Y>r){3E4CtCT02uC zIMpnxf(5@u>-qgG(J@gd12c}uIX}8Zi?CCJZh`_`+o^!1%hG!kYwh02$H-icF9{a9 zvs{A+eEXNmocu}Ds*K$J;&IM*@wA@VQ)egN#S`PVzKtn(aTB#3N)KixHYQ)SXj|nN z>!dq;_Fq`2?6-MOxlNyqAuCSoAHdPthd5K55g0@O-4`o0M!|5XnChW=J^HxcRRt(x z)KKZ-b;+x5ZCy6r*^Jm}nzP=`CkrB6MT-e)%#l?g0T0^Dv)3LLaLCjoDdDML=zzaK z&6z^uua0jzZ&(@dnR1^W%H6u`V>|D2yx_%O*XQIf0$jVe2Wu*@kzYWvS{-fGrn{}* zXH#xWg9KH@d9DKilDc(Kvb&gb!dSrY*It{Gi=t#p0@rZVNYh(p-C``}42!pu}I)p%b4BY27;UPf*X+;=Yo+ z{N*P@T@5LDCfp;p0E{j5pj++RDkM8Ud;2(YVjoyj)#lkX27M%_F^$EB7Y ztYTF0Z{nvJ5EeR;t>{`y5uS2&oz>G$%!5RUZ9gVQvSSh z@j$0m;#SIdn)r2Z$Y0twb;u<{ts1XM(yv7I^G8Lb9e^kSqi*|tZa?Wu+S(lpX)GB( zMvk@}qAziJhFF$mPk2zMtW@qxv)RrS6`I_fOy1|FD3UFnXUh;Wfkv0ujZtkiZqzb^ z9Xl-u^8_#e*#9K^7@6qf%~kJH%?bCp&Ai zp!8CYeh+S%dtPwqS~f?d?{(W0 zvjWoQeQxI$7oP#RAMW(Oz_>y3jwk&ykqABB*bZpW2)#+-r1HXY%`oTi4u4pSc= zxj%Q#anlMmP5UAXgq=0<+6jN?HBjT9lXLhfT(qKqEQ+8wGLYD`v)!?EvF?8TY4rPP ziea%SML-RBRO^v?>?Qwer-3%mSYkm?>`@^j5me>u^X2m%<(Xw!>!FRRp{9h9*n5Xe z@Fh}kQZIBM5SEJ--YQe39)Wtn0Hr!h8 z8ltNWwBgb37%Kk&jl%T}0~ig<60w%g`?3@cqe?WI_6G@=Zrr*?gJ#=q9?Jm#V2yC- z@ym69zOoC|3wQ(uIBb|ub^CLUO*qd}R)NoiaVlviZl*UZ_pFwp3c~bucHM-BIIC-G z71gTiUG8qc4!Fg!TGZuzokT!&=TUv%t;))#!l2S2sb|tR@oq^y6-fPTXWn1F;cy)h}Myr7N67L8Jg^d`h$4nG|56;HYj{TKwcEVRqm-r91;lHb~@wEjeN_bU?j= zV^gVd6tqw}aG8(nEOE^C;~4P9!yT`EeH%nZn@dab^^K)-8U`&0t(?*O^TnJmGfs>g zt2w@7GV=#Y9tFN=XL`5p69Tk+vUwJ*ejQ|y6HVn=4GPmQy}zA%u7wy)4SSugEd#?* z*oQb)=G=8k1_q|1L$EzBBTBwTYS}h!a&3Vb^1~fnHK~IGJ9BNs2c^SiOT8Eq9Pdqp z=@F5^yGNo!52%PY&pi4J|LC)Q%SYdWExTJE!NHrWH$Z$MHoIOXeN19$74benf8r1b z$xzwdr+lNUKwsvHmWI%F{lMGwqr=d*-PQ}(F>dy++js(`!eWhj%2)+W9p2q6SKnsL zU<3^iKWix$%yDi(fg1FH#)+I?%(2SW>gbM$IJ`O43_xz4*zty#Q%?yZ^$?rmgy}As zDW`X=VeKKf0-8FsSDJzZ><^9{`{+*V(Bnb>G1$Iy*4f?dGOn7xrNs}S(e|kcQ7Ihq zl?Pis-Wz>k-ja2$pfAfO`hB{y8ToZ4w1J6y86z=S#lXS}yJpO*omxq*P&R{p<8tev z3j&8G*lXOW)lzgVH6=@yqxb(pIPD>jJO$qRa@Y~qwh;3M=oWv3oLTR3UDb*M;Ob*1 z;C(&d(t*tCFw>-zh>eL?zww-W_fRX7@L4#Z2HIYP&Vok1T~#2%-ox4l9g)+eL0kcs4%x8xgIzE;QNe*_0qesS4Fe1w^% zMiYU8h>v2Q9qO6)5_9Re%zhYaO76G-d#t%#pV;DoKIspn;|N`OS%_we4aR8Qj=m+9 zNmPL;-J$_ULv-l?}>jinSFe zw~hf<^`w5%4MuZtiBpq;6}m4< z0D-0O9w6Pk#~az6ZQ({RjmHj6<1vD% zKrm1!gqgFJd{mqrs)JfBpug0Ufsz07^2y(k1cYTA5DqXj)&V%+s+HS5UW>Z7j&2L9 zvnH#0Mct*Kbrc6WD468$18pupXnb*NlVO=)lTD2hMlB`u*i7%5 zM96Xatya%~LD)vJi6^aBd9>d1=jS1S>QANp8;QxGids&JtlK@omMNi5gArhx zewqvXZX!}*%4q@F*2fFcwqsXTINp42-R?k%A%Wif1}>z5{w0$g*lLM+`6>0O&4C^; zcPx2I{cZTElk$Ma98Z#cmvQgh8;>1PE-^*S>!RCoW=lPRl8q{$#hfe8vM*}$7M+*A zu5(*`{F}>zxj=%Q?2N_D#R)wFf_;Rtbi6^((cvsd=eC@E#p$|54xtsV7&SEPEU-2j zZ%el`gUlp<7WJG?5Q8u^UJrW8a^u^l+2cX9V$)x8#10rq;>ht$1^S`}%=9QCq!wqU zpCNN`ptIk{%7;n+;C6SeX5O8Y#!niW^l~^z|aHrX6zTNFnOz5bNZ}K9^%s)QcgU($KxnE%w|h| zo9jCQk$DdUZgXzQxhNUzbA60KhfD|GjFP20CHMGd5G+=Z@gBVWNUNQ=W)}BvY*qQN zLyCSys3;%-luN0jPghB7T(+1LlUR~|YplY#P4!Bl8yi|y;zc%>^1>JezhzlPtymGZ zbcYx;0xmqWL?E$zA#*o02*FJKeux5?fF?mB1H4?Vc8r0SiM~EN)<-{c*^ltxb=`9R z#w15;x=Dtcrh$5afwURso$bpv1DzU8^od+Lk`j^|@QITVRM92NO_1DLi5^tZR z$`qbROX899*neR*&FiA=*GOh>Vc{~CxI|o;VrJ=6BXk$TZya+>U{O9cY1K(p)xKfo znplk|FXj+N%W`OFWpvK-v_cox{LiRz<0Zf)?5(DWn{EYlfdmv-E3_q+ha)7fe@KHX zDNBsFAc~Iq1#9Ko7T866#MrV78&Uq0MARV)NN|}9a0QEEBG*`p#eKqEE-7(( zCFsArM*?nUx=dGo&ze&c%pv|xx*o8R!ALZ`5D)|!{a#lqH%DYYzFHrd*ef^t_-e+} zp8*2KWCq?FZB{bBRVGm60547#0L^A-9u4X>1O9m*1J`*8_|oGdIxrZ~v%ekGkjc9i z-{Va3r+a|x=jt`kbmjjjpu=hm17`Urgw5fuL+f?7M5b9mxrA|>i4pE&2B==w_Y3X} zo=|{P=5U)@&6z0h$xR6EvQx|IO{k=pZWTSSC0L3}+06B_3_`yuw{ZGM6ht0cX3S^8Z$C&@^Ev^fD( z5V?{&%uVB|siA5AaA%?|>?=Wb?qt?1CgeD+!u*_lmo?nAt5d8zE<_FA{jzyL2Nl6P zFI_%>V}o8n(ml73s;FHr7viX{9b9-ywM_89*nJ?}wr}@mpCd07|0X}JR|<=ajI6HD z>;Kx>Id|~fWKh-N@4;Pj&mn{(JI(R~$paCCYrk$y9>ib!#q>fHD67WA@Qv}A3|#i6(7-?{cK^?BVRt2(2}gq`_tDRpR|`V8rtrpBGe zk6=tkplT7g*^|aq93Z3Qkrek zbFqOuFx`-yCY8WovdNpSd0d_>kPQ-;AlspS3=Rx-k1)i^xSX6(=Yznxd8N1)877Qm z!U7q{WW_PffbFV20ETPxwO%#iaI+o@cxystJV)^@hqi?H_*XYx0d_d2?TX1{>0m+E zZ!Uq^e4&7Odsd;*rP;2lQDG$1GkuP;g>4dqF_+7?4YGqb>jQ9;Z|X~fYtQUGUA;lp zP89E6SV`ThWNT#%^wjq7Sd{sv_YcQTsrXo(I=wlis!jin8ceCm{Epwbh{|lVb^Bmh zH~6hVIq3m3=Q-A7yQ-hv_~FoYYvq8hDrB)ul-+_2S2e<%v8Y0wRGwNEjwES=+aua`D-Xt4+QZyvI*|Pf`6z~0%!WUMH<`-R6?3FU@8iJQ<(UR%w|{s{|HjFR3HV83r30XN$$8 zM1=EnMp^k?%pK&MIJh_4t5rZf>^E%#Ocd&1fLStksKP8yB#r3MhZG`N(#e*b46_nG zM(pO&)?|S)d~9^|)=gE3l@{xd5FXIbJ6=n?P3*A{`uN%Jq{{?sdVg)~_x#Sz&YZFY z(K+R+K=HMDz{AijyNBF(4$E}DOF>1tcLBO_Meoe)_um9X3P->sG9WWkEqfwf^4h#0 z65!``wV54M>B>FvK45#ZPxwi&K_6g&BCkKQ&GCB&4?ZE9s5o6$1!ERMrZn7I? z_`3jHpgVOszjrcilr6X?6-*DTuPrqo@l1`txzEW9+%V;q6H zFFYA%{bL!A^ABcSx=tz+vDvpeVJc9_MeINKn~2SHi=Y_cIvX_BtCXUF-zM~?7gVUj zQbt!9x8C{(%8GbZ@2T@em9*<8z|FM$?MEL8H>ioAZekx+_rJM0>Gm6h`(x9%A4h#5 zmRRoyt}){MwQkzm?PKd4syAdxMkZzxK2#pR4e)IC2z9mNYuI|jkbCD^?x#-=6xr`S zy}dJYid!N6%!;}}oq_SK58IXoGF<~%S@hixCC{Ae-t^rH_V>DPdy#9i{m?o;KfeXw z1hL=X#=pyM9}9qJVXp~sxq#6T0MY1k#n$VO6^iNAzqYv$Uw#K%720+sbgXo6Bat_T zYUi2>ex>*Tsw)qSi~Ok+u9)^h0D=mIeSNGU8x8&Tv7dWLHv8JF?+*7ZnY#XF6!CAT z@CWJ=7|SgpLng#`(|nMFn?Q*=W6T^k;*O;MC_l=<^mMbLVzQeBhcA))4h~S} zNOQ^^_YKDHuL$$O@GYv6WD(q~*J+0@iq3`qxd*Ewsw{@nFm6f&B*>rjR+Z##P z{#d4reiNrMtX#=yMT%3&emTJGbJ#$oXn@}%A6P^ zqg$~zAI-0{spmi~HoMo%y;#xOxTj7-FHta>ntd+hXWsjFYxwD~xwMM@T=`UyhvR{K zZx(T55+$Z=6X8~!oJsLXj%%%+@bi;-x=VzwQfqYUGBo;nSVK=dbHDU=OZY|jxNV?_ z4ji?BQv~2j?ef?6ptG=zv55BNhm1)|9Qh5t<{BIn-rj^wo@4Wx?k9KmOj7VnzBja6 zn?Ri-$tun_xRtL+-@HUAR`>k62G5JeGvTdIc!`>VP3y(!4sLgizlM`-r_H4X4&p&+ z9_}yrDnJJzHh)1j*UDp=ziGU{!ng=2Ym%F)!F%A#-8wYdz&g`6O5LU=$5yWD*vyL^ zZD8B;fi!g$2==wp77ya)7{Zc>n(9>NmtXhdmIe=nw;Xv^ z)ta1Qe8*WKq2|{j1KXRXe|_%HlP}UEJeb9My+UN`FTmJ|z_`@8RC~VVfxy(pPazvb z$puSrnoI`|`>RY0bYvLHrsIr<9{%oezwC*IMYjm4V5);d?PVMmtHrGwK*{gO6(a6# zc%i8>ojlOmVV0vUF=x-xv>RB3uP^CtBaVN0+D{GK;b0Pgj`Lzl5nTmiwWI#Q>+72B z%%;u*)6EtZ-uKVTdY{OiRO4g&tthv-x)ioOd&NEVX1B)DAPO0s#k)55!Bkd%B%aIE zLzx!g*jGjtz%|Y?q6p*~@zK@ibjx{a>0RpgHAugyf&%cmND6fEbu@gUYraxXC2925 zfiIGb#OAg+jjM)yIH>!}&VPyycRDGCd-h^Hqh|44%g};pC+52HOpS}yoIO=CoJ3gV zo!6hRJr5-}Z+^^*A>XC+xPkO)Y;m5jmsl$)i-icN;&CSS=m+y=!%jFSjyXP!r%HT}=#`;tKhF+?KS})Qs zeas)l`ra?RiVv|NaeJ8o(aINAhqA6gU_?~813sHM-$YkiBqeV&B z!Q9IYwzruQ<*sM+!4LXdGCMuH{1WWFO&Hn@WcwgzF zKb>Q}G(X_{R4!qAZzoSnWMZ!E7?e*b>FYkl-3VL zcVk6{Pk>JP^J)Br*U6$eZjAGO&W*%Iw%jkP%W`$;=L-9473l;{Cu_U4jQM9iMmuW& zhj?Hipneo9yN1Zyt(N{OS_%jC?nTd11covEB`a!vuOJfMl}7EzA;o=>@^OizJ9WdW zeBGT#u6yE*CHxyVW_E-9TUup%w+@JPAxo+(>H?_2{#k{-;%IlXP+?^W zirvM!^sC_bWsY*=tNad|8o4CZT!36z268T+z5P;p<_v{*2O}4|Y~Aq3{b+H@-I?u< zkG{AprDokT)~M~I>_ws|-AULxU85@{Rl9hPYQkjSPH5|&5$W&8p>x>6bA^-UgKNh9 zMpkZT`Hr-){hZWp9cIT2$$2h>g5E>()jQ;wN!y18f8%;#=0rDF`xgwUaI`&@GBD6= zq3klEy@DG`JRAyFd5A`NU0IAnTzS;mWYk5ec6rP(6%SEifI_j{5mBjU4ZfB)UC=CjN z=bL<31H|~ou8{w8{laHb5b%=Bw8N02Nrx3pwsFJK3313WtRFxynf1M4xXNl+K7bmC zN^xBFT0@di8x8H;q>FPK4celIDD6JCbO`cvvt+*O z^1|+LSl9rx{#(YEaES?6KP2RcimrLH_O!7Iv(C>mL^O}R8N9;N#SQkW`(G>ZMmU&O zzIgO^)x;h5FNbCZIW!s-PkhL%qyZM(!Z5Ln8I4D@YfW_3Bn>3C?IoCAypiJRlyBKz z%DTUAmk0m%g)eO$JpHqEnMBL=59eH8=^I7TBa?GiGu#lxt)#7~>HN-{2-wa_IJV>X zF75dmIDSnvZrpSPg%G@3sTQlb@~Cp7oi&j%OIH``jW*}!oDYPZzj&-6)a_KWidD{Z z)9&IfOcP*tv(R^{AiuH=Qu-@-N+1LQ4N`n(X(cTX!qS<<<$J3hv`Ot21M`U*mBM0dkoxCJ8$T~R ze&U2F;qF}Y-vxZ1=5;~wTISvsx1?jC!N&{_gqr8gExV<-G%T|ubT{bGJ}wzL+s#nI zIAXhhZ5PSv43}<^SXDIc;{&iTR-!Y#`i#vI^*)Iexv>{yIcE=5omZRan#MR@FP3Jj zel2vI|IAnS;85|7Jl3Olwfm>X*!Euxg8*{KtkJ;FnDKqsgbtXpbsx=kg9Y{P(QRl1 zT%A74u|%(P{?U+M&|AdPDTp_3eT^B-FdyIVRA=H_#lD}GuTt5lTz~f_WYM&MfLB%}NoOO@WRNa^^Bqi-hQT74C4>A{%{`X> z8eC2A-;z%zRp3UgZBO9uiYPl{G1dLCv~3?}AqZ~8p)MJ2Ls->ie{R=3h=p{Rzd(3@ z^GjWC%l5^(UHa(>@Pf8mrhD;zx}5-Crh-f{MQ0gs)|It$%(`jNH?P&r%E|c4GC3~1 zV>3Bpof)jJt`vKLA>@4jZg>`$BydOef3=+XDc^!o8f=Rtfm~9-a+{#iemkAxj~%u2 zHkmVIe&^!M)YR0-F8%f2HULu#dxp+>r2OfLUqeGvQvM=;PuNu>5FTrEC#4iuCU$u& z%-~o=KAGZ2j*8khqd-3Aihah{_G}%;K>c`*#9{q9|6oLtl17>mXIbj?O2XRZkr=yG z&yR6uSn1rTcc-N`5Q3I`ZH7nC#H6Cre*7EO8}dS|(_|=sT82alr-ONk-!fBpGd>OH z5*w=fP*{=1rBRT#H1I)Uo_?nY`8Q34p&=2-bQa)6U`<%C+z_kTzCR+A_cJ$!^Mg%k zQxe?1!!{^xZ*EiY=V48PE((xm6i&~Z!cR-Bbx%!MATx)=w-?fXL3Z0rWHa zHYjtDt4{joA@RBDd1BF}eqEO~-8n=#2tfxYc!wiM_nEu4J6AfR^XZx2~>1QtgGQh^=cZ+t$DNK&1(@X82H#8RO$4}-Y0C* zbr*zfhP!vkBvqdU?vy?va}!oVuH1D~xh~2tHJMw6c8s-}BgMMSYA!6N9BSf#JuUvB1 z(AtRDrCCHkPcAb%|KrJoQ3%T}XVnGkd?!z5e|^6tsqb%F(`}pNzOR0XCJc{R@2yj? zo^g25+h00}++{eB1mVEmx9vV&SC>dd#TGto1m+Zr1#i&lOBFb(+md z8tAIltL+KbZ!yUPDY(p8R5ME%hZ66OY!c=HVW+QO0MB83heM>6*nW=5c>ckSTgj|~ zhsIr0ld&;{#iUTOQ#Kksq{UR8#wK-!;q5NnwkqT^JUXI&%HbJ3vXmR+R+6#FX03O0 zoplo{O#`PB@zHqXgtrYdxxM(RAhQ(SujU6|Bx(VdE#T*2fY3*^&=TWJ9DE_Fuv2_RqEcbM<=&w4n@y+AatW z1!>OkAw5?@_SRI*m2qR#{gr9XOj$YI-lz;!|L)w${s?9@{*g_3%vwNX{sV2W@ce4u zE(xgNK;Vj!YTEEHGKCM78o|1e@*SOp0AFZT21%_g>1`h#AIMLbqID1_0@rD;?@|a! z#6Sonxw1cmK>M1ZtixG#AlkWy25UO-31I=x#wfQn!ZVRQ7M*O*VWQ&4$N9ZX(EE0I zOi+%%??i_OQZzgktC8>1(OIf1h*ef(xj*lT3c5%N!>_#80l*_((1@4qJhscYDGTCe z)36!-`dYQDBA|Z8XtMj0qaK>=w5>>hjjDL&=$B@QzYi@eT#$_bpOisnh;nSd3-gz> za+^6*$$_U&SKh>3>kca=3UN+dF990l^zvAm={5b6^M&j4$#7Nr=4}0wli&Phm>3uj z2)wN~UIABTH67d?9g+jlAtrMI_)CMhZb4R@Z+~r)r;Z!bGUJ(~Za<>zxp!8Doe;R{ zkK%nAv)G+#TPRVIdyZVvo1b6ci~q9=Z4f#AV{5wNz-tdhoW|dz^6gF0bbv7<=Q1~j z^khfMd=z7h(rSb4$@}_c0=K-bJ%2K-Tb@z+#C@0UT9xSL0b9SWLA;wNK5(XI2A$*biRT~%zk z{ZO+VZIw2JTItOyqW0F682{|KhWEW=q zE~oP>!p_flBPrB0G|bNvR0O>VH4QHuXN_7%9|?Ito&=%@>?ILT;cD72QUW`x{?b`q zWDh|x?P6QcN(65^(jam%c$g`fU_GyVtTe_GmQA?osG2m}$o+TM7)DQ!e{o;KA$AkI zW3=M15RP?m*^pmYkytdViF@nsprfwtUttn66lh`x2RXL->w-#?`u%1ghnIAB23XVm z_Q@-Wmw@xm=e8(ip_MFLmD$KZ2h{_hn@zju2Va{X*@JCwiY@9TS)5JEGKjV!$Uog( zm8&BxoBxC0!vDx&u5Jo8o4^w-Ow}BlXVdAo=8#wuM?L$YvQ>tsE!((naFy!)E3E_f zDx7>zh#W~6u@PJPusc-5L<{W8+VbF!{$VcJFhoO?+&JUYGXa^>>z$oKDYGRrs(w#r zPQ4Iak@w~Ixmf;YIhp8c6*qi+`0Yt@y#L+ifbi@8fvc^U0c9a5`HljQy?TlE?cY!5 zyf5MtsH*52=`(aJD5J++%0;bCv4HxU*S}3vFEvA8G$>;rJb7)AI58 zt}rVy;gkjkd&T4(ofFR%x(MD@_a_4J#&#sW&3xkB&WdoNIf?u+P^>%s~WQ~xmb!(efJ=bxZgNNnTgZ;+@>e1bix*mvYytQ=Sx$>Gd&Yf4id zo6!ny&0(t+TGOg`7%}-sQhT95WOr%QH$Q+~wCEq{;Z-2v-#qM-r#NWM5w?4pmu1D}s7h{7kSf>c5M_w^< zWnqFdUems--Mu6{l4cX4`n}4sY*S(*+g)t70!<2HswMA^;_MAJMktOIY!hD_SqZ1r zKwv8Rcf{7vOu)q|L(Q{qBxipu(}_%vL#>`FX_06 z>s)1-cf;|ev9slAWv0qzIpbd^M>GqpGE2mHvtbh+(cv~TOJSE^yT$BB2 z5?reVar&+9+5PZYS^#7y6E#!UIL+E~X2mADiVs}_KI0SkoYV1&+n{F8)9#iZ7Gn?& zI{xv;`@0`7PKdr!tkb}2YWo4Olkuc7@4;yr?3)B*Q!|4Wu)!~N>E^G`a*~Tq6~*o@@n;Hvr0vM{^oO*ym^z%pX8cz{ zgix-WumDCe7QdD&5W12*dEhM|Uq0`}SSGrTy~%^`CImfY1xUuhAW9FX6h+~L;`UW* z?&vPM=e(l>hw{>0D&lp}3%yM?x~!U13QT5S6lU5ZKGu#|_Eqf!TmNp*fZX}V8Cjuh zCUxhS9kYtX$}0;$BKwLe1(i95w8uNG2uW#fGhK8NdRqJSZL!>%)-@@s8JS(?Ro^lo z-^A$Mi&yVN>a3OSZyhU>&qItR;<(ilK^NFsl7GUzmm$EmB_AMJ=9j0S3}crZgE_E@ zp`Vpt3PVrWa;iujVaqcuLX_NtVo4fuWN)XHCa*btN?G{)RTA;et$4E}hO z*JbHD`op@@s1nkm@e#v6zy|(Mw%I4uWe|`~df^+8|3LYZymxa6n_+g$DbCNCCqtMS zJ)#+7M=iB?hC~_7b-QbZj>LZD$0r^CQj=tR2Sjm%A;%xDgYd;qYgi29(*j@0CuLD6 zZA6t6ciA^UpNk();E+1*MssZ?aF9qY>bPXYg_x|q=@DJ1{z)148zhuWJ>D9-9VLw}+ zSbsNO)0mLCs4QUSFrNfiH=R|2&T%8ua;ZW1VC>zG44f6MUAsj!5TUdUK82YPw1kKR z<;g=RD02i|gSg;If_Qoy25TX)Ul?{wS&2K?}|uxUt_Cci7;@qpo* zq7Jw7qG`Z4-6*HgTD**GSl32qEShwJ@pYGd?W{h_ld_KY^v~ApFrEB&yABHgV!+Of z{P?U01F3#}YB^BdA0pi2syNdWJ zc2JNH6%ktP25_&3y_d%S1*lUwWgBnUb``kHKcq=Ajnz346S}DA+j!;yx9m->$>AzQ z3LcdH+Zp|Vx(4-&UI?beE2P9}opmY|^OqFTjf{C)tcI$KmYX{(yBbQxCX~5Q z4qPCLHjSch+4@n(pKrjYTRRx;599t0*aoD)u~0l2Q^xWFIc`1DCT#1uGQ$=BELbRC zA5d-qGV17Ans0#_Y@pnuTM%+VVwaT+DhRUP@sfUr2LcNuF6*<87;=$pIl1|{d1+4b zGbV-3El(SgUAj35x=z0UiIyyRcGbL`zkbbhUpzw)XW!N(cK9>+vG!knz)~S)9m`X~ zH-sBT!xm~j7TDYL(qOqX8_`C(Sm(BuXBnOVd3*1pi)o-EmjES#6FWg~Ds3iCr@hc8 zTVSPxs|DI=dz-dCVFsEo8h}VNxsAneJ^+Ry!ekpP$^h^r(%NykF`!dvqRSSUi%oU4 zBpN)wDGwU-6+ip||En;dZoxCuq4;98G}ly#dyQQB3JtbkAifK|o;Jav(zvWeU}SbG zj^52Q>048%tblPS+3rXO5`bRw$*ykuFGUBz0U5xi@s*6Qae~mJH@jLY zHQPbwUD>UK4XCXB@?uEADqC!mga~LqCf*(T{>SP|#MrL=Ib>4fIUep5ulUdBS@ulpr^8}2A5$hp#plc9qbtN5B8!uett)m@{I<8lgVi!#eTiUC2pq&*_kx0o16$w4M^cNv~{ zgS6@GkH5SvnEzrn;}3rs7n)s&6;S5hOQFCVB6*|75cTD9Ng zmPy9SE`j1@0H%5SQ-6Gcu=*nE1#h#1z8`MT{L8%Yp$`z_pVFNR~Hg z*S3UDs2oN0XFrO78jlg6@i?;9!C{xpJ{~l{Zif4Ai{e6im^HzEa)!5gNVb*iV$ATu zLUEa2U%rFsg=WM2P8VYbheey+^_Ix1fgIjLOxOeOG=+JmV&!hSWOY6s^=xVRb49Pd zZ1&bUKyD;A7&EeYKfZ+zzW<`m10a>tskFx3#=bc&;WoY4R|@c0u&1MAL%dGm>!!Th z1gLcJV=Pr-N50K`Uqw8MU^A&~u>^;0Z8cNA%iM%54*=p%jPLg014uCSQP4Zp?ur;NBW=m(#!%cP`FjXD4rldEH@O&th78Kt4&?kG-5{MG zu2sd`q=MZ4#r5)kIceMDh`3^{oZ>Y5+*)iQp`Tz{&7nxl%SVFkqR+xElsgj8n>v9Jjrdy? z!?+1rq94^V2AU0&rTNvw$t1m*NUf6Lk#`%3SD2l%zU1+wz=myijNR}7aL{6w&jRrP z%to#|xX^CcGvoSPzaJkcPxAQ`xtJpm7lKNeuUuih@6=`RzbgqK^dSNP zTd5?8R~s)=^OrR=W*e*dG6D4t*``GK}*Omj~$+ALEPLEwAQ1`!HTC=}$ zX>JuE*L2*?YCT#U-^Yd5w4L5HtPD;m-zjYw&$HO5byr>YfeU@m7v}HQQBM`naO}xE z&$#4Qq)w30=Y7q{tT&Jf>p|@KhJ9Q`-=C?`cXe@NQ=XK ze3B;zvb@q|OqIX@qBjkY5_N)Lrmi^xe2>5i>O9^C)c%oe;PRYdS!gGYNjX1ua;@9* zk$|hv)3jvr)5)DKcU6s3jLvSdQSYQ`v-p7;$zMjDmI}WhE7P0}W$f}UsM6BXUcs7o zhgKb%P8zm>lM5pQr6SFvaT2qibbNd)g-&nI1ZGn{zFX+VJ1D=ZZG6_a6L->13SqUL zvLiq4=?rrwkaHXTI=T9}DnWE=Q5k)WQ;^IinL=bp1)6zGc3G8G@NFDQnEvvFGMSNq zL0oi~>|4kY3wLKZ`sn+3k#^8F&#W=q3kGOgpnDcDq1SXcZ+OukID|IX9DM9a&07fL z!9$|Ir?lG=MbrW)qa(cImGQVK*X1+0FAc2k64a#fI<|>UGQ`EI+O6;c z+fKLfZF}P8%Z9m81NjYJ=J@3j&M-~2O}cG7aa+L1Y$*8ZhXyAZG{jLWM4sbc7N3_3 zj8cGyx>e1L9c5XQhz=2^7#Ec?-ff{?jCE?pSD`8+@+5x4sJFy($T4*1V#iox^7=SY zP@`M^dyV|ViCR5fAHJ6^^=Cetrqyw%=vl!ypF$IH;O=n4JB}Ug2e_)FN>kGKYxt9b znqQEMJueHzksM2v*CP&i6y*`tUhw46Kx0N6Jv@Hw{a=1KLA97oxqcEweE8!JYBH&- zmy~etU!rmtez!6i^7=7$@Pn}OfGiV;mryY6Pvn{3agBcyLpmHhedYPY4`bqEv6#8n zhcvguM({DBvKkksT&!>(Q23pdZJo(KoSznn=*+k3X&=gHY=4^2wE^jFCFZ5eNhFFt zzWnwt4}_I~9N7L@>-8T_F#62shPYW26Y+PBYSX9@F#IKHK<4mc-T(5Vx{jdSIY*=d zpNkUrOLzA?dQ@VEas3>ctdY?hT8U1%P!WECpGpFr96r>WW!I)eekgBvf2=EBUH-*B z{`;eUIs5J;v zI`*Kt{6!^UzJh|^bL1Q~yjF&V;j+G-UNu%`NR=c3Vw?)DdGQiOwGP&8a%@VxI~Ld< z8qSZC>Twb;P-a8~!wuo9hk|QDaqh7r$NMFHGH?;5NhNUjNZ7O37w@oUN*=w6kN2kY zb%*0gC@@MOvTfY2-Ic%ReAD;A1-7>eCk1Ao&D)asANBV?a~jciTz;LG_*{(tZA0SQ zc%H--aUfk;e}m+Enrii#UJip{LNHic;Qh@uGL-3{Ho8+Mnn@ zgmvCkLN!)GxCc<~#$7e>d(zTam%%k277Y6Y)**0(4gQDaV}yl+xyHhx1OJbO1&f32 z+#$@5JG66sNDKPWSYt>HJc4Vaq-LuoEyZVGVaB2lQJ=-p%o2J4OTdv2T$KS(wA2arJLo*x3rw&_EOI{Gnw0LZwqXY75ats3JV+SzrA+X z(dhqp4fEx(gNzElcQ%0A_*%_x6@20FA;YXXJR-RPBb5*(BD|1mCcvFv-I?*ghc*p=kHE=F@j zzioIsWtAh%ZkCwN;dbY$Swn(&Km?a$)nKV3&IPg?@`p)~^?EOED6w6W>xKv6Eq1mI zKQfV@8gm`)oE2P{djGj_(Y42YZL`d6DsOqR%`jHs+e0jD7!DpW9O3!*;x7^YO2S_) z_^SnfwcxK7{MCZLTJTp3{%XNrE%>Vif3@JR7W~zMzgqBD3;t@sUoH5n1%I{RuNM5( zg1=huR}21X!Cx)-|CtsHNX12EIeJSTtY^L^S1WOz5C8QgAd=%=b=cI@#6AoKIzSi)*u`_+G6y=MCULFP&vmg4F!_f2D==6m}PYE6!eBD*5KCKC-`4_`E zQ>(E)iX}~Q31>?e8cUmNz0(EhgIY=#sD<5UR%48F+FdfGCbVQzCoIQrOsq=8E10CD z*Tlrcl;+C398Fgo`xP=J($+1Pm|mt)q8QT>xR_)2eR^ZKX*4q21V*7^k6d!&n}x{wPZi#Z~8p)(-}iJ}yyK zujaGLnQJJ@r%P9h(Q9*x8}nUb-qr@?Wew$mO=0b2Xt$nqouyLyx)z5`^3nxCwi)-deD@NYph}%`XNz(f)OoLxZNGMBxzyz&@suyR*nU%$ zU#BRP(C2cctkIkFAj?vfQv0$c2^acRhNj7rBGDW6tCJ)m{BHBmGAf=An^YPnu1ub< zSakcaNKNRo+CJ(*HA%Yqy*aR@)Tw5=)LKhL8(IV5?oezCY& zcBz!?d(fTI(V~r(6~4QrqrxgCc^0>Md3&Oqi%%)^yb(@&F1q@?QD&}XG}dCJ&PUPB zraHOO#6~Jr#w=#kLn=mK`J4E2I?ANaz1Oy-@3DH^!s}cN7x$IegGrIhnz_Zpzo<8t z-bHyuhO<>%_hP)x7Icpej)a~=1;d7l&O03zG%BTd@dY2qz#SEMwUg>2m*UOPHZ0tQ zC#Xc>_0b@2`pu>9*gfDC_z{>`5LF5K5cLx3t`8Bl5NCTk#an@^mn}QY^0XQ1zT+98S_3k4~Hn+ z)i{;%<%wI5f;O1xlM)D^@!ww5E0sKKQ7SoigPljg`gWF(q96{-pu3P3l{#;hb zNs>~Tj22Az4%R2lYcW1K853em%$KeimzY@Db7f3LackYg46i+LCq>UG^OEc=1zHnA zANO*|_pow0`YT{QVz{LO%5_)_^U7@!w&6jOw^IefF&z~TU3T(^GYa0u-jy8J)e1gThoDwqhR)y zbuIbn+C>ZFi80Q5~wTn0gf@6{tLMy3#dres@l}4)d$hCcT6Br$Iy9?PcSvso9~M`<6X=R|ber z(_#~Lis7qd9gG(AIB=NUPajh@I{|T~8K>Gooq}WwiSp>Ok2e1_#* zT83w1Yrx?QE_~wqYI+R`(pja`0TyaRjTv|NjwxL}IV;IG=e)hUt8~4>*=Hz-`}S4< z$pG>dj;rgs9A7OY5eRuOo#+{HC(Lw~u%Y5UuS_TR&reQTv}GCAd3?KP`y=0lO^jHp6?G1a-Rf`|8qg;zjDWumo@g6TmL8D`pyEIscb(N zp-S0CoF`1^@vORyfe22MS{Ca-YoJM6BR&>E`Sn}*XEE(*5?E*)9A@yv--kE!A{ z3H3xIYKtYbKyB#usSVq3d~%Ok&*n<{CZAnTshe=U8ly2Oc;5+@!`)@;;NX$$#BA)l zyLf~%OovRiPN(i5f8xxM?UhGcw8e2TVj0YiJ*!=NSSlZSDp(B_6&l7e|D-aIlF`o2 zt}DbD4ud!+j_fLOC>z^Yo$_EO-VtYPm?$y&wIj(*`QrzsAb#!{{ng;CSpkp$xDFV; z*nrgry!44queNIXagWPXNvOB?q@s1ygCF?B48f?i<=HYvKtR?|O zqUkW}Ay-ob1$Jq;bzhwv4HIh?yX6W^{NpH?gPkk~W1XFdfx_#*IP5!(?yHCo3=CvF zYTd36X3v(=+uT@JG`1)-QupDb+2IMC?K|vLe$ds7p#x0uM6lz&YkKFhj1w0k@%i;r z9P@X8Rd8l7(^V(%FLw?^t^^ylr=?J0jmPPJVJs1uILcM^?avp4&@)GA^=c_3=&6Ux z)4s)Ln9S%7T+om|0hhah4ApTSJu*}m>tR4*^PF^NJVY_rO(p-rRr=ixf(zt1g4S?O zpNDjJAl@Zuf(f|82{0U2#J16qIWozq@8ca$4czgMf^^>TOJFz+8uSklqV?|X5*N+- zP#LhP?GdvjfbmxmAM@FjG|UCZwYa>zmJ(OebDS7Pw4Gw>x$EM3Lk!|$4Jq3&XWiAB zm-Z9w${GdMgVYIU9+u)0KURcSUogze%iGk5Y9nVK1JRWA753*Rx3?1~gw6V-WjLN= zXM0;y-qJ)7p2NQhFK(@5Mn+x+`?w{Kvg5}tU1q_K!2GETrXR*0f~fZR8p@Tt zf}!duv<72fS{|&O6WCmvjbEKA+jJm&7RZ4|{1u6CRIC~8tKp3Oel6+x#a$lAopp#2h8lNO@3zG~5k6s}N zo4S3@ju&VcTm~@z!?0(CWkPuU_i`NK8_L?xK*&#O}zYo6qEDAfYkQR?# zbM2B1bAj-~*;19}QvnwdicnB+2~6^=FLLTxL9!?iKMve#C!^m%GClg_i)DC(vUR7U zn{y<<4^#L8>vIh)LJO@1hSLl&2ws6Fp&|Y|!_N~s$``w{bME|okT=^#75*c`Q`Gbr z>Af^$O(b_{bGq&(VK|`_0tb%_NQiGjeOsH8q5g?yc)LVFVuNi^V|_jkv2{nPfzB#p z`%!1|UG0Z#1M(i&K}2#g^VTzQO|aRanpG_|Prlry_1{5QNFE&f$nO=ZBw4t$uy^ z5Xog0!THt52NkF~~eyqI*C3 zO%_d+I(&Ya=elbT`$>V7cQs%!*xBPQB@bSl7Dd{PHAm;%t$GrJ@aLI?etS#w6Oy5h$z<`~wc>%Q$>E!`3vVW+n486F4M~^=VhBg~F3W$~P z+&CC|fAq56PDD=35a!97u6Jl9!d4|+XZ?#=N3OI-jlL|z zkXw4oGlL$ky(r`jJLNqO63h*()Ues7Pk$t*c#X`E<~>o%i`cTY+(5m%Z%P$ITbHv% zq7Oe(aB*1;tzem)6!#Nqow)#fQ6HbRjG^H$YoIlE#GVZgJ_BMcc6Kt@`7OhNkpe)F z3c?o^21^PO7%@h=5{6C92)e(9-a|eGa&^-@q~#f-%6rGz+Gu{(DwDN-Is}{oMkKV< zYw^fLoa+I}UL=Y<1%lIV*d6VCDsgZ!jJD`uVW+no_f&W*A}A;r4{Su&UGhbmfOlGW z*(z`?;cjA~WBoxI9Vy*AO9TSP*h@q=x-u4hfXR4`jJ-OgI#&iEIfDlQCJO{cgl!2O z5*zm=v*yAD)?#!y|6zeqIAHxF616h-^*Zym%)4OW8~yd|Zs?^%jRBpE2T)9}Py_1N3!0m>mRz!tk4dIpai zKbN2Ml&g^z0T+TS)aKlyV-&UitTtzMVjR3rEWr7U3uDh5SUpk;-%C5PvufD*6AnhQ z#6fT?-q;9<08)SeSPdTIZ?}c42X2`Gt17Fa36%RbSyBy&wzjszr?8@^y%jpjch(&w z{(+@#{dD>+Qq=F1cAjBf%(pk40U#b?c#K+@=wS6&>hq~%zQtF+t5tBhP{^NIU6MMe zxb-KKlr9h2UifM_)npu^DdZ#5h=X?nD3>G`dZ_4*xiJXR;nKkAM69Gvt@eI=7!PcP zTf)5H*i$TIF}_zmoC9(Wu4|oznNUzIjUn^ulTlYLmV-PZOCt*rrk-y_ttYz zP}RxRGW0%a_7 ztWVT(ab`X%88a6zDO=B=Das4O24CRJd`GP{uP|vaGi&9egKQ!_Q6PcwMfHF7+gun7 z)ioE_<=a!#U{PQm46T$t-8e|Bf5?qD*~stTE3F&rtMsR?@`{rH&xU$>LCoFq=WEdx zuFl}Ydv6S=ff(!@Q4ENm_YnaufwX?1&K+hOuh7>V_v$wg+WQ|o$LwL2v)V8YW$=1% zDTr9tuiSUS0k)q>aAo98S=UUU-7QZ93(iX>^%;7f`vWum z%T7>OFiJ|w*zt)8N@3I`AE0V#aSNlzqoboY1#iQWcZHpJgfs)yMFiV#@IlNG>glrV z8W4IPng$;3xy%wCsR1cnurLspjW6#nkyv!(rp}=&h&G)S&o%&W?oFarWItgs(VmmS zwtVZYJJLAkcZ2qCL&fd04-9dh!eL~Va`FF*?ro>SKz1GLt=-uZJi#C;=7~tbF>ZN? z9zDL9qP#x37v~~gLioVU$m4*6%2a$42x>a-z$hNhxGM+jqA0ZP^T60=eq`&RwNZo; z03)EV^^LM+Qxtio>*IM$xD z?pdL|$A}-x$CZ?RJ$FSKl0eR9})ssB_4N^o*I^k96r zUxFaMUc&d1?Lj-6Ni3+3+Jl_jfZAhix=r0z>>M!V7tjOGWj9(Z1$iz+ch7?mZkvs8 z7ubg0vyv+$BnSC=S_#$fbIm9Ul2RlLKWvJCBTQjiHy)kLU!5!}J8f@4#cp~H7mt|X zEn;as-AmRcza;pZ_&al?wPWK>o(hx_8&IFMH%?+NAl;hXF$WcP#h{pN&iF(fM3*>E z;7nl0-@SnT0N}?l3a+U68VmQ?b@_{oAP~@@M|axPX%{%p|K={8^g6UhAR353{U1Yj zEY8;U24vx;y}xvXJSymJL%c{*1o;zvSU>b$uqF^sbDy+PWL-4>Vp=;(t+a*OLjde) zz}LD%iwX-T@pRcK6MOggm92p~W}WhR9+MaCO}+QP4nJf_90#6?(Fx90v#Z8hf6W7y z6rSNQ)h+pwRoC4LCs|@I_(Ny|C2jc}&vuab`%J?&qwc36%oc|h78Y1r>}|}=t28bk z;8ieSb7O_w*BoDpkWuW8fK&o8rmVMi;=wh-KX{TClKxOmQ8vf##JJEWu(S?*^dBA- z&Q9`PtH62&@*j0l2oSgEDd7g5RZ2!tK>)1_{!2}2BbUY&pkBjCkP?K?rH$wpF*3^inxPjEdI>sg@MIhc2rMg={!Vm(P z+;4&|3vv7Qt4EB@dg3VT*PpzUJBI`%2NoHwt6S=G2n2hF?X6y57BC8nM##2@CF5w9;hqsLf+fc-~oWhr`c65E*krn)0B{3V!BE&ZF?4 zg&E>XHlRz3f_e1zn$D1^6+2Gn=)YPey359K9(X-ZL~DtgDF7S|Dehla-Wy3Cj7{|q zn*URAW@s!&e!QxGfJ?~qpr*>x_c;m=m&?4<@fWGAz!iJ1&nNf5B;r&cdOwesd^+ge z5CX6!_Fdn;*b9=tU@Ihg@pTvv(g8mFZqZX=#s9m~XA+O;SGYH}N zSNx}pLnJptT6nB>#GhnTS-aV8H4=-3yaQWZm?&a%Jy8)vpOOj{EVenv1LgHLop%rB z_5YlKmte1DF^?#m$$T^AjaX2O^(SD(cWHZ+9$=f6*K7aV?CII! ze1FsMUd4|t0|d+U7lH(4e!d>0rG|-;<5@^K{Q_cs@1&sY@*WrO=K`UwIn_k9yLimS zX>EXSoLG89PFg@~xsB4~o71DXduX>n8&HzG6$cU?iwai{=fLZyE+a>9PqFH(eOq5$ z6q~}*8vshtw2uB-@M1eWBwzv&3i61m@xD#%?rz_O+)-pDwMX%c#pZ%VmfzC=COk4- zc=7qWt{~Ah`&_5=Xz%?9RxsqObk42+6lvsBJjW9IixS_9X^8EuHY+pkFJ!A8UwVXv ztR@hc`yo;`w>Dd6$R&i_Bg)rI5DngOvi)@PKSu_Xk|B7nnKxNYAL}}^jsTHqF2eD` z_tA-o36Bs=bhsmD@FF15h7P7T{uuG_2}@St;O}gemqR3+!f0e5_9AjY?uP%RVyE5$ z1YZPMU^DE_%M>%e8GkM?I`|h!YrfSl=AM=2ytSAw>;o9ngJInLOwUaz?lVd@dt}-Z zqJ3mW=w;(HzgK7P3pb1@xR>4D0;*L3_+H#`{`f_C9v?oaGzR{^DdoXz3YrzDX>M+= zN3NgCmm}hiKM4fp8x>X40hz|{V_%fnrDz;JAPG0N{I<{Ap;VkQOsCj9B-@W%2C_wM(=UcXFJNM%O+QXa!&}n-s*1SwzH(;%@ zQGt~F-8t5S-?@pjzpL+PIiU9=Qgr|1|I|vbc7J>(-O@EP*3zZ^_%no(JUFjUCYAKP zEh<`FXs3`9*vF##QoyW!S!;y0jN#}X_Ia1D%;Y>E3`++4mMO~5i_-DIISfJ?9;dNq zw36r&(dSM6@<9U{ z_X4xkcEXj8L|pYZ zUi}6&lK6Os;$M{B-^H))k~#Hc2C5{d+j%bzI5^4ngGfG{do>X`se%1JPr8l9he`lo z#e-bC{{&qy39`GpJBy!>&zBt4f0|*r}w7d9DsDQtJbYKZ6Uv*Z|!vUB)h|>gx?pPoksdo6*TjTJp zvk?SvhzNj&k&Kl&Rx#(T<&lI2X7mCuLQ?W2)AB5yV(3+*5^lNOnmx*(xPYQ8e}zHY!s|!2EN6QHYD~?#w>8*P&nMBu=^yqk-3eR$q+2b=2CCf zGY@Wh|GZzb5d~U8S6mjxgYNNteetj($3)c#WFuvv)~5^Ui31RhBHzMlYisMoo}=+j z9E;7JF6wihsJV3^L&0ljFM9U@4(qdEg$dsNscO4-uEN>O+OumfDJCef6HD9^0l-cn zGgF};e-<`@KLUwUx?E$SQv?A2EP@vbBFV+0(iWGsJ=W(>t#Q9i+Rj}7Kgxh3Uq4a0 zPwO$;^M+tHq4vCtH;kBuhY@nNo_Wb7Og1ih60ZnWCSX__)x%`$Uh~xigfsDjRiQo2 z{<+!LJW;){zJQB^fQ9@=SV?-q!`Tft-1240$aIIU?wY65$Dqo-h;rn9^6gok9TK0` zL5<~9P?Ws;G&kBX1=YfI?W&)v6586^Z^)~;x(uZRIp2f|srKWo(*`LjsfBUjWR*fQ)k zDk>KhQ&AVP+Sh+HQEH26$ip~miAmT3%73_)KZQ6h2d1jyG-yaK9nsh>W7DkJ`gwix zk?6C;VJ6eQ6JNbiY1&0B8o8!zK|?gHr*L@9JM-HoCpNQiwMTdR3g9$=z8B7#CLd%h zC>yU(Y@@&o6Q`5pJY-THh>k4Lk*;RJ)8vBGYl0u)JMi)7Pw|0{y6Aebx7{dA1G z6V0!GZP0|s#r+8avg8#My)yiR!4?4Pl{s4 z&_xyJFS5tQ!@wZ9$(^O&)sBC^fcwFIRR($JfAK$~TXox1-S^UXultvwnn=Stv{Y8a z3=dGi3vOIwvSz)0S^8U_+muVso8c&%+UD)r?sg=U2(m`@Ga|4CnI(aw*v=G!{Q&?1Vq&>A*&MA;19Aw{xgD*9OG+06> znuQdyNnQ}f43dy3?idF-N5EA=;ZG;_z|$=;EoK`q#F*mSKzcv3eYd`dh`SF<>k0@M z@xmv~)pqEz8>29ZZ_Q~*)8^N&ddV!TKF)MkERGX4)||RJuU%xHlaO_8&bmoiRZ!4r zX24<`Glqs=BC>Z4 z?mxblusG&;Mz*TDIQ|g=|AWE2BuNugt~H!30+G)x74dDF-YE|YE_T`o$4CtG(aI4c z#i*PHL^^WK7;kX}BwhiwlN_r3RGzgEC)_7U0G@NxBNscc(*sTp^DO)qET`)suz<_2 z{C&cW9YHEjW*?I_(XS)JNgPf3H5swg#hVHeomt*v9!ljgf-d%EiwnP#IMU>Ij1e~M z8Vcqhe0Xne%$5*+xW$ikm(IV0LuLU7dp4SSt2aQo_q893^V}20v{nj}jSJsgK+BP0 zjK{{ZL*U!@An+*G=bL*p`euZzA{$p$FBcbBpT4{eJ07GPGwIf6fwng zlY1pzGIV1EP>_YHOD9_RC4j<*p8P5`Q?}03vR|_eh_^P}(`K z_DtdmyM@q-`31)+zQ$b?8j%gcSuy^(69-Cb*f4o~kS!=xFhi054Mx3EKl>3{8BatY zu2xYbk0U_6$D*#@mB{gxK9!aX#h4`!Dv)96EB+Y8fCoa`U;V(RZVt=e=L?mVi4iE6 z>s~BfoYXqZdVfcRke!e}n|vgNIv_;E1)$LJ)dTA-@Mp+dApClIG@An>&;X@!7E@pu zzq~dBSyG#ecMX_r2EIP6e=X{_!PUGrC>=+IL_Z2H6SnHVA*f8Cz0j;_v3?`JmWt8# z*|Xhf@2kHjIsVmumWmIo0vIObO`HVTSoaoOa;+b?NWIfno6_CwzCcBER@c`jF8=UD zO?DM|SVAVg4MFPt&Uvz6_+|I{q{F5NrFg(dfVf>Yx%5vo-2-cmZxUN5y zm|7;m8-o$mMR1NaEc@oqKc(Waj3YZWxgk2{(tH8Ah6iOcM2eI>a%7dNX+hN_XDsQ#1!kZOmj-?OkONX(&B0H78sb9t4e11nx5si>M z39dc+L!SPiE@BARdCb=m^t7#3+^$YPOxTo;SNDXs=O?O8szGutR;b=m~?{{zapk^c;)C7QY zL;6p`9^7A?4l*OSek^DHbwI}2()2G~yrSc*l={;Qeu7X(6i^M#{Ii22lFn>MA8Pld z;j-E2siXFMg}?f0u1i+T+Zx*XCWcwIGM7c&UM$=RJMY{?QA_=c;8qZ-t+5sVM9^Li zc|!i^@B=G77nIL|i^hzojoI8hG>1(i!;S-qG(i+17f-s_8i5@nHqz zcx8l%cDA;zEQ`JzmyqrAHUKE0Hc_yN*sIh2zb4eor$(gfdv-nd^TvSqS^&^W>R0~O zeL}12bmFuG)XF;5Q|vU``s({9+sSs5bZyKm(*~l7pnF`Xqd2+l>nmA#rfoB& zN4;opV9caE)&pM$K+$!ogn5kZ7iGz6*Obt?v{0gZ0#bdu8@K@n7%a|1Ceb~~fhGo0 z7_V@t(j4vq-DfBTj9W5b62UNz1;DVvB=)o$p9h#f$hwW2l-HTk{@%-+;}Km zVBXS`dUkhaQP9m(!f@dd(ZRj`V!HLc?aA3P{$+WN5JU|*kaVNFw6fH z$A+V1*FMR1T7ASNznF4PJoN~bV=ptm{kWs*#Zd`%1tgj-&5{9U!fiA9 zE+wsS*pB|(UZw+TZ@80}K5o^U_Nl`CXn=~&a|btDjbRGv3Mbfl;&IY7!2&txdUfKj zloguz1Xyi3X**T_BuGAux{IBl;_KtXg5^r8XUjhmfY85L%`>cpURfbb3c@c$5``{4+H7?6TY*FxRHX?z4^ zoi24h--xkR8;swkV+#Yd&yxC|Kdn~vh7I_IK-RCI*1IwMVK92owIcG^clAJ7({17X zk08iVpMlePD*_?}wM*tt&6tkr0Uc0@>fr^a4dM%A?~p;(;1-bLiP<-;-U;~GT6&%NYjJz)b!1vU+*gir(TY9CJb zbSa0bFkU>515@GXI8BcIg`-2SknJWs^&;v+Ky;l|P3Tl?{^vl+6P5P2`F#y2D(xAp z7_Mu>Z;(EwalSz+N_PI;*`&D4ckHvm51%r7Wji=<^4nC&)>XXea_Oymjk)J2E4HN22(n}Vq=f_&6qQf7MJu^W9we14dk+p`i-2U_8 zL%WGR6wm-27M4ZI|!R=hp?GIs( zl!^o@L0)!~nVDI4dD}$O(voY6z z`~spBcL6jrR?6^=z7<_oxnHyr!l-()Ma!mUu_Ms>B_b17B~_!zwc;4}x#)4Gl+Z4t z#OMW-C-xzor0&Nzjnfg1vmvWt6Q56PE9V^zU;?FN)BFEe!%mpJZ9jsYkqWBHa^0WZ zs>7~$G%K$g#`k=CPVYfm9G>Q(@>^bP1E)Na!@Ur+p{|`Qeg1^v?Ah1gz@(1a#&X{C zh7i8S5I5D!VtZM_MW}5KM>eqWzo$jSncyDh8;NrX5?p8v(dL&ZbkQxeHL`7x=5}N5 zQvNEG4l`+fe?|Z((#zuJGJ`Jq7{`8g5uf93)|J0X_cDXvi$GPLb5L0C1(|zn*s+n| z4)Ps=m$ttjunBp7F8(VZh2LZu@9(Ckt2To9l7@i>k-X+K?a2IMcvOM{TzSRNaw8&9i9ECZB~wEh!BJGV005 z9+177B~2!co}B=d8UM-rUxBO%j3#YE+EB~$dV*B=%;sR1zUF%6Ih*SG^@R?L?%b*M zJs=SdRrl%b?hX9W$^8#HE0f?^kae5|k)4+EVaaJYDCEqSd1-gLB;?~H4@>p_6r*1E z%lIm2s618JIg_aD73=U>TZ~811c_#E)=1NkeK5VXW8@9IrFaIC1~_z#@;_B39wjeE z$+;Z&@OUOVD?r>49I*WjGWu@WT#|k=x^dJ0^an0ijn8JB zG8O2(k=tnjb&JAqc7vpUpAvKkmZyD*ET8=1@qF+sqkP9(@#icAgg!;8uUXxQMtvp3ge+FtTy3Gv^%HI;2qx%D35=r~Mu>$aiT zZRlyaOV}+%JI~E9S@!$VZBUz*RKHh2jVSizlRLEG{NP-lUm=JK zwIYhj9pusA1lSuQw-FK5iT><;y&xwp0bbH1iYn~R|L+%agvgAl>EFOP>+1M@Xy6z+ zsj4--ni1R@@sLAWL&n+-R6B20NAo+BMZKWVytkzEB;7mKgu>tu?#1hIYD~?vG$i6X zVq(nHOC8A>gH^GIodRGs05+%{oFJ%lR_NEqa`O7HvVA?I&(+I!=`@Z1mI2_Mr-g#k0&RaVfz<#eXj0T_)V)iHvr=%b;nuj&D!KU=52 zxG+y?tQ4*{#g^p%MrTLD3P5o{Fi$bvazE>{b>r6JtS8yy4N8ULm#RaE8tXK(K1{9; zHXk|eYi{nrO0#SBcE%Yc;Q@GZSv@%BId@U(p_E10`oa*ctd!YiaSs>$Uw@(cY12`<$o+@sLck?4VL`0bmou}ON{aC&mrnT&YIkmSuuO)$ zY|=Hv0T!Kq07S0*DePuht@j#}En1TSHz(pHVeUAUpN+FrJc#9so#sG|SxGI3G!@odyxG zU#-yUxCO0{JpQB<%B?y&nP(gHACN(neX`*mB-jCRAjw6g;eIFVh*j6&TchVTcbn-k zzudXG_M{%W=!uZ+R6VP`UF|NwwL>}tF0RDM@e~p5iJTUlplI8AaYFt^2E&hB~$K=M%gv1vsch8-sO};?L%7ap*F`ZoW=tscXeV0 zR#Ojs(a{Xu?!x(})VjKk=;XV0Q>H0t=6$-)SBdWDmq5GUQQDmgBXWkRej)IE3E9Mw zz_i-Um9cbT0C6`Rv7hHzq0eAMQdksEOM=FO)}~P{%BiLlRSxI5k@Uv=k%*Ej$zSX! zcF&W?T?c+r6;FYCzs0$gf|_ZO^jR}E?~Cz>QgV5DdVH< zfM_>-2)>7W2tt$?0}!I*aJ2krDzNCxzY@V`_ghq3FmMBZTncX5{nG{jkiy+<1p-vy zGYk!r?ciyhuLcvwyPWzO2$I9LaLJJUTaFWfQEKg_JF%@Z8!`aKU$ImcvK_LV0@M>{~E-2P3$e_TTbcIJ-gfzNRCQQev@o~_OD zb2-`s&|FyyGc(Flq(dCYCxo32J#N3CMt~=akzbA%*^Qlw233<-NrOq~eEs&A86{wq z9VSZ=`{&>#R#UxvELtF)XCgh3OeZWW&e8f6Y0zULN)3n4flB&`Z{79fR~JEioUIS) zXbqr{m}g#jx`#9=1o1)5mgud@(icp3=R5rTZsmY>a9|MAl_f$XIG3#=L zsZ(Tc#sS8N%_%}6U?++37B5mjel(K6HpG`jXys>+5a<0Lak$|OH}~+2f>UTq?v{ph z$9*K3xtNilbk?@(g7@aekg_*zi?E7-q0K9sro~Ar(bc2x%RDs4lv7lWIRc!0V=y@} zMxzMy`%RTOtwjK=*3R0?OmOud%`w48ku&sd0o1}9tk0fGTW5hj_%BNty^faF>@U=t zaX?Y+sUj6<-|tpWNnxM&idB?f%Jvl|^l-Q>`MlP@^obAEc;~oj?)2oL^EPNqB(*)J~kVhHIH{#{$9hc;2$op}7cB$TH$dNzNQ@opZ=q0xy>rVx7P% zi|}^{nM{-?N@<;Tmuce_G$Bo)+Li!>3}neiKQZoOA#$9+>&Rol;S#wO58ztVJic|- zFRHvxH%)cREk2P08t$5mtd`JyRUHJD&^H_Q^8y}x6G`N;R8iIi+;DxEbCdP)*adQ^ zg+w>K-ShDGctrjY4Q~B%oLHLWp>cr0_oehK*E)2+tRjE-de?bgg(4xkL}mGXLl4Nq z1sX>e2AZ*rvMFTCy`*vuW6|IT=Hq=2{mP0P#t!1Sa1xXbyIJha2RBW;;VWFx+p z3sourWx9hDLLEa$V+5RL^1yk8?)2{!XIaEx>A;MC%lO!)u`CV!t6<=y$-cH=1 zEFw~|`ROqJek1}j;9I3$=yc6D017HbxY^aagFdd;%d^9q(sno5C8%%{WTIF6uD=Sd ztnX2o)%T&yHTv|JguTbU9H~U$#eKOk{;HMuPRMo{YIpU%X15D4L8+TDzL)pJe0s48 z-(aW>L6wKCw*K!2@$yq?R7C~Joc52w@%cKxVKsOuCt|$BG1}B6KdP_>`vQpYich=AL6s1|tRU2=FK)QbfM}qQks>I0Grx(h8g0Hl^^ABmhc8W#obeBz?V63dk#*9oF z3Y4teGxSB)=%wlM9(=!R51P|%0-&Q)Ib=*;c2A<7od;yQI&|&S-YsxA>?Cl-x&`J9 zF6sH{OG^%hWWekd^t(5?78a@Y1jzSHnt6;{e4WnI&x4egF@MnETB8ce^1>?s+tlfbl7jqm>Vhu$uwf+c-Fd1XV6GdK25qtF}EMKtpx zKMii1(~I}*-xGwBq~Y8QI6EH?ke8u%LxHDOmwpAc2(O_l$ZfNu9brW$pc7A$_LCn`)k8jdgr>#3_zV%({!LUP7Ike&PTQH|@Jv3cES4abF* z%GYnW^GcA;p)d126irMwfdn~naNWW}w&Ud=QJ`>m1Jy$ib|B_^H)h5GZS`vbLQ%2d zH8@gOTg5A4x(wysqTocn`3EEn59Glu^6B6^N9!U|YUVO|+1YChEls<$b=x{u<&s%r3JQuU(7I_eydSF%hMNAet^~c?XG7ufklhWc)Vbuke5B>1hKHR? zAN9-PnCk0~Kt<)(1_w7c>tjEw7~nmBkByBPbLLuMwus2V0b)mMb`GOqXqBy4^;aOj z`JiUdin$mPoQ{a^(+T4w;4fbiQi)qD1Rd(SXDPy-djg%l2hMrF9DJ*Fceh_;KnW#G zWy3f9eJ>_N#O1>JUP9&Y?H?BB>wdYM_{TZsuL&rCXgD{l_H1snQo=IT-B#aSKIQjQSykK4C|`I; zvAJE0?lRsBUDoIHvbip+k?D^b09(_5lPMed{8Sq`!|pY%l6ZB;u&v^*DKxq>KHYDn zMzRsh*Q#eYs*B=Vk~ZYX&qFT#vBZ30>^LEldgg$3UYC6bf0nM1t9MtSo!`wMs)=DKfe^gyC1D|t zLOC{4TxtIS;BOdjkdwyI|77)v?0VN+dGsRF5%0GxgiJ{T0M+%cSGF80CFv6Ch!ER!??WOM`}fMFuR+d#0#Zu{cZ+W= z#Zxjlpg6y3=d|eI@p|O#a~WrA00!!ycAGOj5nCVEU`p9LUx|f`z=Pbuo!<{XA1UQ5 zLH~$Kl<*;|&&?D}njg*Ogu)`8GAAEO`_`d+_@7Y`HY}R#)@SVyAoK~~-xfY~1M`S3 z$6o~8w1Rq#&es4Q4blq7}&=O%{I@9;+JL~O-IQy*` z#-CclEz9xg*X}`};kEc^R*+}}`~=OstiN4}G=@a=zQ4F6+?{QtsPo+{s}@pUtJaeB zk6aI*+0|cPyHn!>J{GfOEs`Ms&rXp}e}~OrUC%BP3u!o^$8o*!drEWB_1Qj863riX zEeRxttX)b61`T&IARxeIqp@6dcWc`h$Tb450IX&okPq;d!6s5%hpNM=wR4P{&D}e8 zQL_MGTu?d9VyxkNra>jS-rNrJ)!*QQUzb}AoQqa~0A{R2gRiJfjHV)n_IAey9qF#D z$=+vTJy8^pjh%90B`aNwKI7fOTADH!a&jySmpy+mA<3gI)k?I6fd9zJ{s&R%Nr(@x zO9m6!7ZEe;EtkO;B`83a5*;blQdC=D1bt{ngCKt>N4Bi7G~>>VV-T7o3H~dB-@ein zr~~wi)6mCz(uJN|B)mx?&b7x5aJT1O(~>yFA~0)ZY@HA7N^Z4(`nn4{$4TOLj!!D+ zMDGN^Tb>5(8DIH4{YB}_!!d$X@-CH?jb|-cYIn&vxy>FkgQHXCWl+5PXT0_`90diF z;PX6XQOaE(tX0!>D4}#kctnlyrxS5L)LC0(J6bRB;;iN`m!8#b>AteD+lQW$2fyQ{ zqYaY1xl6@U`7X!yOEQR$0Ri_MfjeIj+HY7jJ4;`p;t_YwmwVlI)MVyp_a7f7=Z*gM0 zP3jif^F+LujUm&>EK9~DwB0a-M+Opt0tp7$4CJns_XA-YXCWxTl!d%`7IO9i8W_5m z9Du@{>+}df;w2J+!qrHaL0~vB^(7ifSl_W@lLq&Ut~XuZ;)pF&d6Q`{+D#llkk|~c zIW!Y>2y8mDD7(M0pZNVhigT~+>PF% zVe$4~XfHFF9z|<`!`Q6y;RZ}hQiKPrfX z1>jYjxp}$z6^}%r<9{BxAiv)=1jzT$w?cBxC}X+_whJ-y0l005(?@4BRN_or798*2h|UV8Ij~2RG=@5>++3uU`i@II+f}i7@xJXQe*G$VmnY<-*EF?haPN+m< zqAq0D3A~nJOdy`489fj;4e~zn8ql@m$IGDXTSN?fE~kf7=g^Ufe%rL;L{tJiD`+3# zwdYzVQ`PpC$Pavd>7mE_d-1=9BGi%uN1AcHtIFKW`UZ`NPRaIU(CW@ly>FkHoanl} zk#m}=R!7MlQIxe1_|J0Nns_hj5K-|eNCPyN6EBpuhoeA^&Hcn=(|l_iZ)GMcvAxBn z^ar(S_g521Y~$VT-$37*zRgp6HT>OT$k#MP^)v7BQ)xSa5+uem%*0eLIlJdg`+CZ* z#G~6!E`|twNSHWQW#iq{DZKIAVs&nm{gNuf{senlr4-<%C*(|lK-43xWYuy%Xvh;adngV!b8`zC!qhy_V;fg^g73YJEi(p-F-D=4GsHi}?bC zYy)=t%qK`G%WL{{h)UE}njP+uHEUU`j;5=;uTx&0xSU)3(4{k$O3+z=g#6b(QYudr zHBR?z%wi{cW3kMGG$hcxc+DmH<&sr4KZ-&x$rMx0OHd(@J z9`zZN!{?hl0Y*ehOKe|dLv&G|o#qTA8Y2XtY?&t4M2`{OB!5J{j73TB141Fy@b1ix zDuBN>+H9;X58unX4i4NBPj(zHTxd7Zv=@k}oB|(grNp3}vuZ^VyVe44oT0aGGLU9p z=^X)A1W=%O1$$Lq?%G^Jd3$WP*3!i55TS4HQt^fK5GG2hEh?WVhTW+s&j0DE;Bvf3 zg~7si+Okmd>L-!oyl>Ttj@Dn9X;Rf)@0bxRvAx4s)dkL3EqIuK7`LcHnywwH^EiO6 z57v9H!x+AQs+;hy_P)AfiIT}cbQID+XSAmoIQ=-~A{i^HDxosVO;P}dhB@{OI1XRCfRHgd3oe15-zV+KTN;*_&YMt%vPSj@>gH&@evGg-E|flWxxA~0`8 zf9`7(BHyRO;df)QM|43spS;}SWY-&T!vY%n!{x6YYK%?Bb7{*CPp^a1%g2nDxt^bd%_l~FffB(l{i83-L@9)D zN~L6HCM!us_MV+el8lV3>`fUVdn6LF$=-W!$M(CP=O{kkcfbC6-P~S|=XpFIkLz(= z_v?PYU%Ds5(Cuk=JEK~LpL(jP4A50`&hXFx#DI<_@^!k+eBtaWF2ISA0MP$H&I!8) z;q!QXg_UcNj|(dnjue%YG`A}`vM*(>2z*lAIZjgjjw4-7qB<5$y5Q+fye2IBfsSEQz?wq@tHG>ga`(0P`D+@s!WOB1{T|J-;S5J0Mvpqp#9%CY~WwTK3Y zP7YpxqK}ZijS{R^o)YF|Z7VLbPCUGYK)`X6-Q9cwpZ}Z!{4Z>9sauXHXz$sCu;ZRj znpF8X-zzdx)vRc8iAv`&s4z&7zW@;B3PUIOQLUNE`*5{(;Fo;o2$PxRp+1OwAfrCg zkPenvLSAigGXBE_OBSEa0Uhv9Zvl8CU47J0E*snkpMr3dwxi5}mx8i=5y&NiCJwU5n(xWTC}LHYo~qn_`O(& zDKNUfVx%D2;kt+EbP!6dg^N{se|gc2Ub5)a)PnUFEHSkkf3Atga}(z;FcawVVtOco zwZnuQrak_V8!^^#pqWHoU*iR`vP(Pl$AR#PO*G!#r4tik z_1&91KeScA6H3s(oB$^@-LtY_Ff0ULB{KuK10=vFY!@YVzOha>bTKhAlRF3+ZbdE) zqC=@0;(qz|tMmJx)P_1($G#nZe9490^v>L9lxJbCPc`>neGa3S%|3x_9mlu3*gc9- z5Gi3ctCH@W-PDuRT=h(56Hm`B;5W-sA9x)vDw0tBt)%nei4(ar9fwT4Q7;H81zxdS z6->ZxTJ&2&h-G}=s0p1HSFP+ILh3-_9vC%X zDLTKd4duL1$>#6sRch(KtM?!JE5N5l9p#8N3L7~gG9@xftG!tmUu!tX0UN6mqeEqA zGaB<$gZedhX)s>3#OE{`z}i>CS{e84{u1uD(aDFOu7|9uUVb8Pdjb0BkkCrWYo}6? z&jZ{cifCG8-roD^Fb;Zeh9kqXpE^bGpF3c>c=_i_DV>5%+&R(wU1+cKeSPKu3{K!ffehDZg%&bHAD0 zJIULzSY>*Ggv;#hS4-e6G|uz#>mshpV8c&i7)=7oL>0FG=}{AxjY0eFCr<*D(aRIncID*^9ZZ4`^m$C-5=>Or|QY; z7Ehl2y47Np%*GzFCO!9&R7KDCjb_iOIwVy$2C<&N*adfctda8WZ^C|*mO-cXT17gcFWwIEgX)A#1RQiTB|&tawQ>X>eoYSZ!QL~aY{yt>BGDa9hRDl zBvGu~QV~*NLiNfzAxnzfQaABjX$f0ipr`>NPr>`b^f#Cg4&G(FmNB~A zTBRbq`CYjzvnaPjB^mlcX1zjOcH@@o}XqcuyZ;w3}O zMh318z)idIQ&ea^?E7odB%2d=r8F9%?`|KN7FoVy8){Q)1Q&hW1*Ozt!}UJri*S_) zfD`M=qi%J|@!fl?PHi7=UI!b?p7uJLhoEGC%$klGoBLW@wR7{fo89PN@0X zk~~3BiE;ZrJ>_q)d8guJ1k^M2NVCg-LtJ|okzwRR*t9wvk9RN|B;WDF8#+kM`I$9j zHi^$!TevX!;${VlOphFm_xEVbvrYU|Ew90wz?e^@d8d6M{^nNPN7KrUZ!ZrDtGX7-Hi;T`2lz(vy=UFo6pZrl zU~zi6%)3s<@T`4(GF?eYyeCa6RLgX>w}MKZkrz;rN=jOCxl*>K*T%-Di865*IJR;d z+<)k`u)9V7><2$eLX4Jzb5|&5%0mFx#@>sb7(2FgJfwQOK~;XW+r)Z*3Cmb=Bv)HY zON%IYy%b*DaC-L5O8d^z-}Tv%OV?NKhBoQDXzt_pX7cB|`}2*|RfK1zon!WQ2Bt(C zTCC|!_s|JQ181OC3sCVpuBg>-dSbGEXRu#$G~@Y%YTVcr+()^>@Q-Y#g0!?tCN?Y= zB8Vu@Y$$@*j!D;MNHDBL&2GvIWFzz+tXxbtPpZd`jf%7ZBi)8tnqd0x03SV?j!$5 z+3J??ayrx_jCu<1LA%*PtEDm4?iDmjxrM+@JuW8i?WDFa7zMZyJGV-C{!;4`NO%f7 z-LPL{(^EO?IR!prR(~hNXz;@6>><4-u_wOW(s-7TG4e^&&8P5E)NKE!^0z0u^0_(v zbkb{b1KqC9 zArZQ8V!ZEkytf7O$_$H~21JF|WQ?@qOYD#CwsUq0=+w(-xcx@}cX{VxX<3sdOw0x_t~*z#a>r(f75h=>d`?E+Qf#dtdyfTA$jJE4mHQRV6(Zu6NFr z-~IF;Qx63W7n&YKTS{wh4xi@92gAU;Ezg7Yd)kR87yN6TQxyIhyD3~74Iw$&_S(vg zG0q}^sMM|gJh2{|XR2Q(s`V|ZH+f4xbjJa63eH2*O~wtU2Zl%=s zJ00$h?x*A`p!;*;@d?dk5g58ech@)q2(x{fdRgVP{z7x#C)_3+EmZo1X^KK)#+$wr zpt5FNvqjc_7*?ob({!VrnwI|!B+Y+$nu<b4$9T&5txkL z4xu9ri`VR#^RS}wgc{HlVZbLGCKSeO(%-8#xVuBT7!`wLDNj~$4=?`+2gM2gCgGwl z=EJpyte-Yof=T>^F?tCITe{nj_zK{?bdR=Jj!uVd3vDQNR!3afy=eaKl;_3!4$>R9 zfDc5`7d%z%TsIg@Q)D$%>F4q!_a{EB70h*VV4aK%>phg@{{Xr->MrX^!AZQ4?|0Yk zu@vJIWszlKg}*Kbu*YTOx+|W}RW`UdTIg$52&<`NPglgqoc6aTyaGKCns(@3S7C!) zAao|4jzsxZ2bvR6{499R+hNhq(rep7bVUOGgtXnhmi+eG>;0DrTUH(^U9{8er~GX4 zBAD06qlB!#=(7o34!uyP(KOAZ)BCD^t8=zMvv!jNu7A&zaqC`|pE)9JCVN4&y97X4 zoiAA)nh-n9k(0O5WR-HEbG5Fj-s?)sX^aQS6^y5P?+IIn)B!>LJsTN!dPlj?vi?gS zy*+AS`3YuSr4B(_YlEnX%_+SU*|3DqRBH*fM zGF`G+y5>rGR!z2yiBh1+i%BF>%Y$!N$5kRV*VP7JM%5_l4+6mjwvcwiv2xdUH+VN| z-2+zA;CYR?Sfd*DYYr3lV@{I@t*m`U@vTiikNq8=oILdcnj4PCP6XJl)baSF@BS`s z-=Gso=z$DMNyazmmFOA#bstj5URF_2af!JwlQE2FC{ALEw%3dHHwHPeEC8guXmkMb zC0myDCPfZ{N7MZy4L9p?pNb+wxQu<#Y=Jwm6$NHG`eHHPS_B_t&mX5G=By1;zh<%U z+rH^@r7Fo47lLUMC%WUu<4Vh}N`dB~>`}d-*A>Z+Sg`|;jgzey`j6n80+b64S=h;GA2LbR{`2>&!BcOh5L)D9 zV#@8R-qZ0iQL(S9MbxIeYZ}`U!aBx2^8TG^bmTO=;774HPVK%VODV={sbdb9bmtj+ zGd;~}YK*8;&Xa-Zr8tYP_y?BT=}AYMmwC1{P1f%r2yEErO8Nh(ZGL(b!?ybEew?>v zdhpWyT<|tL$8Nf1*6Z~eIZQC5EuEH=`SZ)3)x-%~gW zXTaCWXxnYdb2X^gkXWfIGx&MX&c0|v&1Ynw6KQt-e{>M>I~W%=0R#x{Y9`W`QZQuD2+XG(8ocy_Odo>PZ#YO-Ee(Q9 zrxPDa@eTQ|f}^2hY|>f_=kDU7rVi zRx?Q7o_%0@ZgNYzDu&1Jf6)bIk?_B{B-fX_hBRPm`REEbD5Ws-em8%@1IVBe_?q#A zR487YSX*1eW|L$X`Fqrp3VX>L_;6I$6T-ko zVZOn5?V|pA7qRP4*(2C>0b1e18OvHJeIzp9@qDhYW;@L48u*%ETAd7I?j7(T(v&za zd;6Th^nZyr(vPsOO9pZcB~g!aGxr7zFr}s`g>}jHqqq@7%;HhVE zV~T~a_Pq$f_mAe;>i`-0x&egE8dqfZbm2jAX1DbiOBS1g{`r zrO&(_D>;SC>RbcZVx&>^>doZb!lmCT!dUDDr#kK2572l$%yN zUl5l)a#LID=`Q93oza*rC24YzD(}kc`O$g}1I-bCePx8nfa-f zz-bNBefso{WhBv05R7iQr7l=w@T0xQsa|M$BAJg)0!QSHij!u)F0qkc?R}mDOXb$J zlGC-S{CUFu{)InRSR0}~7BEXV^5;fGe}pWRNRXEO0z+W>7r&0{=*@MbH@Pk4LiYlw zPrO-Ue@@m;ynD0nBsWbDG=6$9d4}50SGehgh&Ggm2|GDrVAO%Rc*BCYts^okDF;}I zUuu}eo51Tn{nt1VB%AdI`94Lr$#Mm8FbRE|GKd@%?a7 z5vKO^+TrZ}uN!MEMUGfL$=|5-C)_*+*3;KAggAaCunj2C(3+cXNTtlNIKuGpgN6dH zcBO9>e=ECB-;8Zb-dj-yi>6X>nz%h=1EM3NAHp9-?-bk_OtbBiKAvyC(qIyj*p^tE zn=$98-2^Ab$Mb0Km7UPE`)b&Cz|u1}H(o_9QJ;{PF3*2Fa$4rRUcB2lUF2FkQ>j%_ z3W>UC-vmX`)J)x1p+q;j8+mKO^qkEtE;BvZ-;N(3bTDVTH56Q6(Pz9OV1LqiprfY9 zg#9C1m}mF9b&+E+6x+9ZhIouR*~vCtOu9F6IW{(jTbO3u7nnSSOQ}U3uGW4X7;@ZN z9C1$(`P^~RZooiSCGT09fNPs&WsfRb9j0qry~(a9{rkP*u;e3z+vLlu)9*^It2P%o zrZX9%8ehk6e9!(Y+_z@Z26@ruBb7@vEce2d7TZWeJ{}IUsmJN(rAeTdEt!8jTE0yX-Dp^Wfw(Djc4-B0QbdqSZgo{2`*YsD8SkmOBYjv#YPIP_^nD5D> zGg=Q|C^9ta5%>xNVe!SB-J6e{E04HRLff+gme5Y#l?JQf6jmi+vzs>CYF4FRB92IQL`gf4ChBP6nUe z-2Kj8f#YD}qXmVsTU7f}VlE;z3XQd?@^?OF!crn^)zA%`GM9lhpdsG|u(0CiO;+p# zn0BaJ^?k~5lfS&4p-+R}IT4!q%pJEB!gB?tv$cv4eW)@z*|ffFeyEB(dv!S2#<`6Q znwSpOTBWZaiszrwKSR+i-rr-v-N?J+k_AHAA}SVLo_nXv;f7ee1yfjYHX#_vE z*-XnSr#8 z?yyX@e{QUsONA_ET0lpa=MVLaA|C0ia-(>eaX8y2RJMu2q|k~|UbYpDv;f|*!`2qh z77MivqjVngl92@6m?-s&JN44Xl-+nkUJ;(c+ zcb|PkWJqjAI#AzbS5!OBvd5WkMgR3y@REe2s*%s=3j%th=}BH4e_A#zGT9$G;E!Sz zQ_p+yECX^5PnYApjy<1d-w`zC61YXx%4Mr`X^U&z36qd1I^mq1NHYbw(PxwEHD{&5 zH}X*PU%YYPLHs8GJGBLuuxGl0+PM1ObZ0bQ`fTn4KB&8@8Xt;qoAzXzXkD=wZ&H`n z`gFzPaQA<+evrLKUs`%Q+?i$6b1q+Z(1^7?p@uMniNf)9M|>3N$sGd$aPm_!_6^HX zz|Q-7d?$Q+;cPekw85tu>APYT0jP=Bzn_8?Xd_HcPAGvv24WrCm=Y3BUoHj1%&KE6 zzvg9o^vLz{&4~bYPf>xf80)o-@_q(?@|N9euAL1*_>Hdc@O9FO-Ss3v4VIEfnq@TBumVv#ZN5r3r8Ax$$354PG8dZ86 z66Z=g>aI(*T{SwkDAu{kKQqgz$5W)V)dy%fKv@XkD6~n$%4H2CG{bN z*!D+hqbKfYpTlA0L^QBDX9w@#23R$rG(dK)P8>|%q%mktiWmc>AzfJS=fUM?7&l|~tKufldITg-w$yQF;~{%dZ-ceYo2+u8Z^KTHFp^PMN_*dQ({0LJ(g zue(lO#VHEOn^8+Z?P%BOTCVn3UV5JwpL)@%4|v2UD^>d#*sWpj)JyPoEs`@Bd}x~m zSzp*12(BVkM8DqVo*7$-Iahr0LYnQB{_6;IC3H~ zKI1E>6g@yeMRdq!IrHbtZ03Sg+0XBQ-!4ox#?^BED<3C}eh{92F${!pD*twSpM?K?JA`$SGlcSO*tf)I>_rRxtH6k3|<^*C^VBNLN9w z=}bvh+Y<@lR8<4BO#FR(wlvALs7hw2QjBzM|ul;eu0QJM34); zM&X6pe(R1@vsK=bnhJ-3)9J?!>!k!kGd8B}Q?iyk15`0pF61X2oys4$?$rT#Ba(XA z`|7w)JAl_0YaY=NTHaPxaA5g0v$O+)% zSd>4jh5c5zbH9^p)e!p9j-RmVG^rA3fHqe0WE9Ie`7WZTxl-dJL&BeNx$=fSJC@UZVYiV{ot53k*=YdOrqTX>WT16~QJMiA=P!EJVBR2tzxDhHZR^%=g44A?p*1mj`0gLQyS5|N zEs*_IK_kMzm-6ywU?IMn23HFbtr9LP{>g5kNyMD(=`iuc&FdqO`<|_;?c%SOqr{^= zOqf!X`_?>~a%c5Xl~CwNt>TM?VNpc`%BTFclZ&$!8@~%W=vIOupb}l%66-Td5L5ah zPdnQ6nSyjOlf|%80x%Rd5JyPmqt4GA6Z=0-8L%bp0D{Gt!_D5gDeNl78c`t*7lAPF zC%b?H<{xbVmQI`2-w${OkO?l zdVW)fLI?!t)koP*ba`%fR2;5iME412ZQto5`Lln~HHdrbG45yBO^3Qc5ahwYdm;LX zoBOL8&kVxJ*Tu6VQ#l34?N^_+15cvd3FtqG_D|!z!6WKzS)?7Kk&X%0B#P@yudRO? zkvy&rj!#7?W#{n$z@P|0!>7v`WI1dG*BZPk`Yq4tL90g{k>-$zE?}k*#~n}m!PAO^ zDW=rByY5cxbCYiH`zz;HD|d!D?w}vY7Ykp*gK0t=ETE({S~@7%_D1s{rd#tY=JNdymM)E?ZYy?D zL08697z_ix9YEaANR05>0=_l)?v8!6-d%no%A>9NW=&0I`VSRSNa?Q{Kl^77yA3o3)+!JSwmsWEr<(df-Y)O& zOwb#scHac?D9pB=>#vN6$%u;(ak(8AV%csa8&sHa`Ha_6n82}Mj-j258`XbLF?B zRC}Ws_P)Cl`rM9=`lAn`6Vw3^F_r~dyqkiifHxTBFV@{mRL~)!E&CarS6lwxXQ84!+*$*`NLW%3t{s+#PIhU%2(14iH+PzYHcUHZeDT z1v+-~BbAX59CV{hVO&sE!k&mmVp03)-k_EfESFT7kYQ6mztK`O4@nRjsR_yJ0cMW9 zTu(kT9~ZF-)5X2_rIcY!A>p5VH$n{y7ik zsBrZn$10$Ae>~1#$Lt?b_UKa3Ma>dz&Fj{&mL^VMb|z#FS_JZ+L(ztc<}2Hb{OgnH zMPIv^u!t2l)B-z_0%{COuEPzukM*MgzngBYx-=)XF-I6w2}xLqh>xQb2gkHx!C`mb z+i+!-&vbVKr`zeC*0ViMEq}J(C_mcMX=6Zr5=fgZq+ahsVLr}p5pV@r4t?i)AFfM7 z$b|j1V?w(UPHzYsYbgyDnp>^a?ZRHbDg#imK>y*+jL{=Zbw%qsQ~jZD0=@9jHAJ7< zC<^RW+iNnLS%tBOz-_7{Yo;0@_dQlG7WA5|O(VuubZ;Ndf9u+-}eQ$ozt z@{SK)F$Q0SdT`t0tf}Of9x7YYtl!$l7g8hqfq{p>wFc%BaBj>!=Okpl8D3Z@bU*Cq zdkO{Jor8Lw$~axX+2@QlHul!3p>xUYHST$SD|bjPm;+bCJ0FU*Qm91FvYl>YmI#kX zrkd(9x^M6R85TwIPX?kX3xyETS95rg_PLO`Dq2{@wgwv&uEqmjg1#y6>s|fJ0MhkP zyOl-9$S>JT3U4mjRT|C5uN5D_5mj*M4>AZe%k3@&?(C<)0TsT?^!Q}a+NA=P%8#fs z5Ks0A=3H5mJ>zy?t_**9riacViL-3UYUE(a@lfO%^+dIPE2sRqDDE$gEFic=g2eDu zBi7$X_H1vEfA0CJhb7R3a_INHFR6^nDnX1;m6xq9*Jqg2-IXYx^tuNeTtyWfdS8*Y zepboC>ZTXwgRdR=5M+*f8Q2H32A3u)P6FKZXoAkoB{vx384ak_$rm3w{4)~O*jOdz zuFNhj;Rmq{WoKe{5?y^6;0&DPS#IjlZR4TfS#@ zE8=ySAMM1*X07Yl(3M}|*I&{Mc15^NV9fH|&=!BB;XiW$q4?u-_8*$@VsC(O5m#sM zSGyF&{lMK|S7mTlN)x(8bT9J^-Vh>zU!3k*ugN|-LNo{j?08JH5IN=N+wg@3AA-%| z2$F zm~O6g;7y7}>@fjE(IU4yL3}Rmecz=-4(Y|&)}lKP)aGHnY73qe2U}>?M0yRyo5)a7 z=cjkVs?(?-j^YFWmZir0xsHQ)W zcr>3Ne+u#RzmW>vm3!L}l8oU$UK(R75 z;KOz*0h^lM`2y97w~nYGa=@hT0zE=ZZ(QFBzvM;FXOK&;3+FY8sn5pF6DBHA+qm(#-OwY6amhu3RE<2$5&IM zb}+8~k)eH|2Wd?M&{U9+Fy@pL;y*{oEYHYJXF|@L;B%T2`PKLUc(Sr{7P!R5^gNbE z>IiA1rp`n_o6}OWViYXSf{|9q5i|Y`k5;UlMNIq#xA-99V<2aJSv2DH`J;PWwa}Kt z*4Z7B=QOdw1ngrhz!4{><3zdRC6df|G`L%yP@ zsZRY5QxCZ1zo3*>@>XAvaI838=_f#Z^BoA7=CTGdwnMpFhGik zC%+wUKmK!zz-@%G!#Cab3|MA##i{p{k37{|o8N9~zLl1ADqSx90b%sr@ce=P7iIMU zSBBVI#z`i7n&76>!YK)-$tE z-|hEYZ@{J|sDAv{9QGIH?QHvFS(J%LXNs73BEYIa>K*A3FNJ+4sY8DU`e2*4<~PB? z;Pit>*c+=FC{|O!f=2yDVU32O&K7>3!5DV?o1vBOOESc#R7!K7O$xJruEw_)_mPHD z_uZN_x*P_W4JTv0@I$`&Xw^|Cs#&vjPpZN{SpvoAyXVfALu49pK623y!xOgs9gS!) zzEY3Y8nf0*IfiH36r;$W%^oAkt240s!ItG4Ro%`o*ARmXm25N&L z%@55ux@xXc@5_@Ly|o9(Pjr?=MHj6Do%9ye&E;WdGri~IoDtmZOZ2c}bv}|;HqQ>b zyCY>dc7NfzS(Bm9gS+xZ;<#~FR!s3pu##V{Lf}_?E{@sK{t_K7wa9tP`v7JlzCg}A zc0}YUAIOezJys3AF1bs>`}<{EU5lY$P0K}cNddm0Ix;-XeXB*zb|zg`S3kZn^5;;s z9qi08b2)P=knT)ctKZNtfd=RAhhw6E_VR>=H&S=O$$nc7B4Q$aSc%?S(*Iqdxi&n zz|k$xvcBtCc97j7!vsxWO%B~|d7_cr!wLEj9fz3Db4uFm!0ild`oCfs26D?kT_}H z)u~xaJV;;Hp4lvTPmwimR3T}HJxmKasjzTU?_)9eXGK9$yP5q%_h=}v3rlJ#^>g@{ z&jjEW3#K5bzx~3WK8}mGQxvIgb^>0nDB>fFwK!=dyJh41pT8ChL8-2cx@)>0*IGEi zZ$6q1iBJQ(w3yj^W~Ja{le}V5nmZPd4LJgwNc`acS5Uy!>RjBKrd4#5N4B+0*(HnU z8W<}TjMRp0r#d z2#(6de+^pp9}rt0dt3_#HjZKG%}q z%Y|O=3?A>H0HSn|SN1N52m6eST6hvKg$-#j@2V3_8_v#_HjKWDEH@tu&Srhm2~wJ= zW~=*?ZxAyqzJF;L|3h06`dXP70mb&_N*+V~Dp?NT2pF5K)e&K(d(PWo1?&I$Onf4D zc6wrWGE-Igiy~VeU69GQTVMCRJpMK9f=addjY}F0y|29n&+yuKliwgB^5UfJr^sIY zS$65bI}xM48&6N}yJ3+3h4kq0Nx#R+9Nw1AmcyYeR23hVVWdD{qoeH!;r5UP;hFXA z?e4kI=0K~hh1FW8EelMKlau%P=tt8eF#2o)?!|lo>Iy1eq7-W?tErtT86v=&=cLo5 znpX(1;Sz=ukF~mC%sEsygA5dz?C{!XvY(j^HMb?BmaAz3#=5&{92w>s=iJ=agG1cr ze5l_Dn8RB+iv0XqUqNDfI!^0zRdWy3eQYaSV=O}wm z;rC%o2cvucxZHz(Lw=>O1sgr#;D*pBlUSbw9JJZ~`SU$_`EkY(7l7R~%$;nHWL zf~(VvVW2k(S_rJQA6@|$ex4ZC^A!{UFHsv$0zLR%k>GkNSUJxDOee=@fY;$H6?fu` zm7mCqb69!T^RYuY5{4bVf|-u>bCl$~kC?Rvl{K_GzX>kt)JNV@Coj7iN`juvV=$G8 zJ%#*scrUCZffL?21j8hN{ykgWBb}V40Z-vAGq4Tw9?{o03^PJbBE~1%b>ke{rq}tCqCEaK(55E!ly$w@&Mq;;rjaendnQrSdZbIME)W#@SH}5S#YW4-~$vb z<{@WFzbt#I?kQXCw~amzi1ANkF?JTfzs=3dCuu`on8)nN`gT4BW@O)&vPFoZK@*3vGm`~+8WXl+g6YtS~i0_{lU&B{OQQPc>cVY8R`rZAr zXU|4Ct@=ByCOuI+&MST1Ves@x8}$vV^f+A7!s{!>nX|89a{fnnmSSQB$>+d=rR_e?2w!f8x!Nd{jHiI!}q3f_HK54HPNYX&waPqW5+53dX$xtjvopeWT*Kyj0EG zV?PAbko<)Lmaxn3xd)erZa zB()?Wav#`hgfn(H^_!reJ`=8?;l+xcIz5rnSSBcc?^K{YF?+VzJ&TKBF$sw>cx+WZ z-y(oUDRkt}|MD&1Z0^H@=x_fNyd`FgiluW-_D7xE98D>Am!#o|s!K>n@Pi#z9@hRY zE&a+cXxbS*s}-!hRIXG`-rf6zuLul`$&b6@zMeBxU0H!wRq`keN#{Dsd|sq)xbs3y zrTN&A_}ijn(`2!Tws}s&>v<<(Dd$@S)BEpk;DVth5HiX34`^a5)esX zljvO?3EO`C?RtU0jdn;fP{?qMO^ozI!EykthONXUUk7V7UAFvCRM#4+7nc=jG2 zEwiF<7Ri^ozbdUO_h7F*si4DgB?eI%Ph;Kkgkb=Yghu10-w5wB&4M4+msko5Rria@@#jUVVALfxQ^qIhEw3}-@ji4d<^ zPYrNPr?~h0YN)f?4{{VOI1H;lwHL%MEW~lD7hbcS_X*pu3go=G51)+Ujgwlor_FO0 zPF${s?k8S}iO<$gF-86iBL&sD&-_+a<=&T_1dg7A_`pX}%6sl!mc-54;00I7bH2q& ze4YJ8Fz<@N`G(A2N6(#j23IR9et%6{%J|sW-CG>Gl8+ueIt8=Sgf~^(f6Y~?Z_RJF z7CP9ScKJ1^G-oy|ar*RWfy;sU(R4lDYNPk5BBl*hFG)`V|S+`hQ0f1DHD-MG+)ydPbMz+{RAQeEC4qNYwKMO-r>&5n z7X*hHrPGVazeKj$};gOuHQ%k%ldT5eNp%5Gy@?QMIq6uEPQn(zed_w?(^#iIzdbDfss;fi#uo7Z0F8y9K_ z`l(m`^umiMj`D976|B>$BW3Il!cM;W-(Iwr>3rN}8qTFYddfOZxZ!qT`uAe>*=v17 z-K_6_5EvLrHV>19)z-85iS=&B+yDN|3XAtx2}*R2GP8oD{nh7@9j8{z&AH0$_3aw| z2Ph~fY(_%j_KA?b-rlI)-tbs)R8u64a3OWAohgK@yW6_;`3w4^^z`pI3Kko9srGtX zQk;u^PHRPyGx-)5fz2q-`6-O`3-NN*<3FK(qyFk?5oB~PFoAi55t0I6Hxs)h*_5E2p!V=^_F*7jGm zTqrIs{_v10>aRT`d)srPb?bo3GgV>$u~6A z0U|)JGz{u(vZ(3abm8y6gtyI7*C@n5ikT)gMTUqL{^QwD%LIvSoJ>X3rCl9tbFT+z zhqFq4$S;R&uZ9(5b(&ViEWNWvdUlI2Q7t_$)NH}L<`%8$;!<~ax81zleM$W}Gl4jg zB=(&ng-XIVFgEhbu)&@V6d69v$GNql>g&hE7`?KKSB6LZT+cG0T8qP!F$8staQzY& z9r1KrjG76q;=qbQ=N z&(SqAmvrwdA!bcAeafY@+S{wqr(Q>Viii<1_>I?I4f}f5u)=G?L0N#a{i_oHQsGxH zUYp>7=wx%oTjFX3SRD$G^czl&)Udb%-XA)bvJE5YhD{|;XaWDEa}J#ByF$y`n|6D! z&x^m|@noNz?86ZeP>hV>Zg&6u)Wq9|7L>{8FDJ);LC=w2EF;qQL~(O-bK9vef|!`1 zNajcRI*s|1A9QZfryw_@*3E1NHWK;~N%ku*tx?fFRd=I_p&Bn&Et&8O=loz5#U>+>zn;!ALZl%kizCU*RC|+FXpsjDdUp~g{ zsBDca$B8t_}!waNoHJ2L7*X5jhY)cv(vhiZNpuQw@GeUQ{l%cag{#5kf z-oya+I#5;k{qE&$z2JK)PiH(d*bJxW6cL7#j3U!tT?a7#}d6DohUSfxT~+5b*(3x&CVP~ z$k&>ia5|qU0#bR&FlcWhVS}=AuIA$En#`kENw0)sz3ue#-FX4h&*b#I&-;C#Jca z4rBL3dw`=P6?$P6vY#}>uHjePqqoMB2~rN@BFIQ8#kbN)bbGGJ70!LfI|V6m_Z2!9 z-`pg{t@6jNGv1^(|XUlNyvL6#nT!20CWbF0!zuH*V5U1(dWkRI!{f zG(<4fl^~4yf>_xV>^2J5g$h$5iW^Htjur>D`7nWr%I7DEgZl^Ef0BPz&09X{UCX?A ztn_%RRQuOw9ceC==H(s?r%$fcxg=k0N;@UE9DP<$S8Ai@>|21(_pI5QQ?O(tf0m5k zfT#V|whE>~l35ys?%+{$N`J1+WV~l6orEr3GKm*)wl#oEx1|&# zf}u?uwWpdmVJYv<@+}xS{#jgg|E8LQc zw$}Vlt|%QY5*iNBibQ<+)=vFS9_}YDH5yIzPs*;^$plfo{{au2;d$Q3s# zwvW9**daehaIN>CuyvjO1*q7zM`{GR4m<=SpPABN2TZgR^&Z2B=cp)1_l+{*al~fS zH`wr;tuNTJ4AC<^_j4kreL|E7(NPEDB9oVUm@y};bWn5H6b*Mm`%SreBpbAHbc>ugqY{jfYk^ns6y z%k=*TBJ(BuWp3grU?MEMCBSklhSwb^5~2t(aC-=94O3em1DPj^Fbzexv^LO1v!$Hy4M5zAtA(aH*L8c0MiAfyPDt=@;#}?%L4#wMs7T} z=}x+(*fYmnqh&R;ZCC?Z`pew5FZ??Y5fgZT1f@0v#BzPX8s~qhihZUWotknFUHCY% zaeLy)6-r3OaeHzfo-PzMf}h!NZC^$&nSH9AmvfC941K_PcRzjl*!c=>#S@9L9_b3w*Tj2qPq)u3?|5P7fVsVWMvO<79@d+QMO&V6`$_ zNzxD$oZrBdnt1J)cr*Ze`_*R2xc+w}3&owZVg%J8X8>VbcXn7mw-{GuO-;?ysrF3H zdt3+2DDS+4(ZrelY@{Si2XTwQXo84t3h{S9pjFCqDb`~G^NN0N34f_-44)Z%hEYff zT@S1|=UuEsdc1SMb9l4-R3MCc@`28!?vYQL$VCVCHOW>956CKZPU=D8Qd0WmAF7?| zR=rw$gGTJCMP#kLfmBYUfpGh z^^upIA4YI>v;vU(GG#j+B3t-Rtd_jl3%moTDr`Cxd?f^LF8D;~#EGZ;Ew^#@C)lv; zeFQezd7A~?Nyj5ZPI?Q$x%;k=!AgHt=-_?k!NEUxo&3m3PH}79W zk$2^oXzuUaiN*Plc!bi1`CvccKgg)fO~NF~)9?uCSEJTRq+^au(44!~JR1M4>)Qe_ z+bmW7@zNQ4@c6q%3>-MKdtdZ`K|`god}V?tGEkcHEoMPsoUes1fo9fBptqHH;2g755O66=T3dM%9KQg+|z?0lB!n|En3K%w~nfmAGAamt?d* zbdfkf_&j>fbu=h?n8%0*sVN*Lpo=y0>o)h7-EwIs?WbV8$cw(mS+LkFC@5$qaMwH; zAH)u`^V#3Zw;oc*9Nmp)_;jb>Vi7q?idBu^LtOwD9r?EH*8z{iKZD`$Pqk;T@Lx8v zNQg@VoFSq2IW!7#X$ky@kM#~XOeig{l*wXH;3ZxP!XCg)-gly`nmTm;(Upj7L(l5|x!@ z%zyvz9l73hD^Xg7#cnZk(gvZ`w-E{Pf*UDmMrFKTFtG(RtN%j^eZbgjs6yi96YvO3 zDY8PXx^0BT9~qjRIeU?xpH6V#SZy{(&Y@qOc)%W4d2T?wvk<6If)W+h`G*DV*j4xs z{eft-zne>sG6tp5_@Po?*6THuEIYL2j{P#!(CAhDHv+}Q4hWP363O3g*BDdFh9#k( zjIABuChuVmnJDhZ9VToL{pk*OdXBP~GjA7>Dl zGrsNZ_v92_0YhWGcXz?J9Po@XSO3Rhhp_jKq%PuE9Mawk`I&OS_N$>)vcYaYurJQdj(c`%u<XD;vyxA^Wix^B1FV+9)dtiSDzI#4lsnK$45$J~$2V+iH&_BQ?Qo4{xO+ML z1}{~Q(?9Od%SSFPK=0% zFnXMEO_%ac`g1!*T{VUrWZR>npg%#!qKlg|gbE+RhBs0$qJANjte<1Ae`a!G!Yw&> z{Qu+ZzvH?7zyEQ(9uX2rRw`7sj7TWs(Lj+AB4qT;mQ6N2(v}e+WMn3@kR27-p+ebv zZ$jqx+#f@p)${#%{eI8C9+#eVzu#}Sv(LGnb6$2{uf1-BgOK6QIEu2oVeC$fYsMgg z^0T+=c6x;>VKVWlb90NSC(U-yswrdcNVvulODvyhoP1EgAVIam_Fub1a3@Dc67}#+ zU=us}pf7^;$i$bw*R9DFR<3dsyOeI8k3l6`2t0o;2iIi*#rzn=k0=$k{|A+0`Mvn~ zt4f7uWSCdatrzCuzlD$d1DAv@`m=80S)5T%-EEy7C;aI_oo$n*u}%({{?b{0^fluA z{CuPQoZ`Y)v5dU^C=bg-H%uI1-W+sYQB0WX9mMGi%(ES!-@n}9wbpDj%g!>584n_c z{aO^*tgf1;=Q;@lsXKU;lhg`D)rO33B7&>D6BzBEluT~qdsrS`nH47|-G78i#N!*S zqmAd6cQEIB6ooYTEQB1}aP3FVsr z6~7Xg($A}hx<|b@e5OH_iE>g$@@o zDniE<)FQ(lo@ITkg?Tk&Adr)mmgYEOKx6Qi+MvH3hMX_AYXO2KTV2DWRRv2U$x)M$ z#z&j9f_{A=BN9a(7A9=ZsL=!$LM%}1;2jju`q*1B7n>cbfhrFnbbHUcw!FM76&&*< z9&d`V${0^JmfcwILF0-|S>m41wm6DCqA*`DCtWLhq?7S@GzMoVuz#8J5pdTXO|v^V zw`~sLu8wTee~jYUeIrWi=L*kRG+gw5ID2UTsY27Gcdxl!if7#9$!M{`3zOq&@g}W? zTR_luLQ$v9n9Wuo9yMKlF2|zfie~udxws%|U^X>@z_)MT){*>W&I}@MB`_VyN&+Cn z6Wq#`d7M9>w>Dk;N0L&}LHaVzO%oO|XCWi4bh|wcM}d;(;3`W!d076LPe7QKce#7j zv`)q2{0{qUCE3I7uD7@zJ$lr3068kU9c5E4090?nLQqHzq@l{d!^5V8J5sbdBBeGQ zQ}m^t%`OjfX-IKSRM@z}DQ=aG_y+hR7Ny%(JM{iMPQdyu0>g`^nXQ@Uk9H5G6iFHg zuaiso?a=tx-r9;vFD~PscRRRNCuGX>Ns)j8`o;GZi6zi7@YVxbF|OAcJN!)M)=eKW zRdQ{CpBJPzFvCFmtQHBt|(o1*w zMkz|!<*Pg~Yt-~Bc}d)@)MH^~wT@ni@iSJDDEYL>JHQJ)=c5v67IFGO#__C6QcF(5 z?cFWn1_bwTk4r&GYZE6JTmVjZ#QPK`Dk5IVMIt)!iIyCicy$LO4ShGe;g&HY{#kX> zM?oLwuD_zqd-`W%<4wTV_ptAF1#hA_yq|IA*>XRWxqG|1x>m7epkL}XOA+4J zq#6DI$9Ye^z>Tmgp$@x`gief6FCw2*^s(1IDjDih6KDn3Cx3_ zsk;9)H+EMV<-x8nM^c(yyG5TNtJOL)!J+tYVBYC83f9myWJaxyHM2tmg|nnM=6w!4 zCw4`R4I6xb>j&ySB&f#t0MxN0mO4mgHN#&r9u`30K|^7b6mMd$@JZ1i-V^37Y$yFp zCF6y?s4AG3r1BR18K3wV;GlA}>CA@pOCohvG{z!YU7zLoSEV2yvvxkVT$l-S2og5I zD+Ouy$g=K~Ua(2|9GNpO&V`E#P<7NhyfuBm4#oyO=9w?@y+`CIYH zlPA}YdtCgaI-GHufDzb4NVudf7^>nrJ2f@+Zr2juQ4APkPLD6|c&Oz|a3u&FZ$LeC z`@$8zgu!xQUj)IgF0)p6-G=`kYc@PSABiG&)-8O(iIhOABcCsV6}N> z&!0E>sc9igKyJBr|NcUu2G$FkQOgscVC&igxN&29`{Y5y-BOKpUp|mxdhO^Ilh4in z<3;Q=D*grI0bz!YuD1r20j-Qu1-mzqjG!<2w6R64Gi~CBI;D-G^TE;N^vHbY?Ivfq zolgGYugef*2V3%y?m|BHkD~8GP(W|f_(f;-Oke|Sl}@l78L5kFp4a1Jh>?iw55=NnNZgd)UO3`0Js$X1QVXGu*Sj!@%S2z{Tj%WPTN^LmKZTYjLVEbdlhsp~%;edH&eIXE zw0-J%S&0qffUEouK|V$`i$Mjimnb0xxZG_a=o0s&q0smi+hx2!5LtFxhPqcD3 z>|QPK$~*#ov+Q3#frxa!ihhFKBCW=i>CpoR4%`!;csH;k1yG*S(~N3iC%qdsu?pu) zkZx%mX9ryG+D^oodna9ewQ9k_FEYzrgOXfTaGsZB(TNbB2`On{JiMis6hu9o+d85O@$ojX~d@9+jCz}e6FGH-iK){0H99>{23AX zBr4=+q@=k6#5eb|D|=!6iyMf4k$zC^+%_Ub(eB$-$V#N=zs2RG`_)0seE2Mi&#=WI zj?J+&x#zZ8*5$!3a+GO#ZYZMM4=l%)6}fs1!t{dOyAV}Rko8SJsA!%#$5tS2Bh5~l zdMXU0{MT+=XL?K4;{Sc;>1cn{ye+NmcUt@i3vsOyerZ3tX7;QPfc5{pP_mhi(XleaAmPOwg;=fz9T?`j<8C6 zpNPpLblatsFEC~PVL^^hxA+(*EJu5Ug|2?sPBUjImps9yY3(My$_Mwt(<1JTjrZtGV!D~Hd#|ju2~LkGWMvpzsZDf9nfFZZW4pntm1R<% za*C3glw9q@K`a9OCVBvwL0Eiy2XcB0BeIGE+J#tHsT}D#3m`YsNl_-I8&!e`!o;`5 z$4j4XAo13QoD6`{|I}7^7AyH%QCvvLPulz9O*M~vp%d2@-lAw;mgWVv_;1wDBD#XH&pE9B>QJ8s$+3o^=@5UlQG1>#O## zLva1##Ze6r3>|r{-}is4zu*vzH=6 z-J`$URLn6l{$jr*izKz9=+gO>&Itek{w3><*4IT%OXOk`5g2ERo<6-DS8OZU)>{B5 z_(e~$YhykLLg*|sup1pqI6izcNh|4yP4|$uGZt`ZAf9k}v0%Dv+*xqNKIVOMag6xh zV?!e&E~=31#4r*iBqk;c-zIa0f@#kiR~A3PX=Lp3=ObX*J4c{8KRAFu;#j%Bvd6v6 zn>~V{KMLPo;Ty~=4gT8H-hKd(VH07&7mV=66?Y^1Ss?koO~4Ib>NQZ+cUl}f5aC_t zbnSR%hyxwbThGq9NV+P1u@K+26C0o~*8p`;dSDM6pd3^!q;OiUv*i22K z0Eg88oAGq_d&>E*uT$3bRdf@JTTxo|Zj@Mo$B!dCj%zoBabw_U4C}?p8eVq!x{$Pb zqb|~;n>R$8P1V@P7KJExfzkvMW{LnE9`44$)jT6xhIl~ zV0zf-%eXhC9q^xj6MdIRMn=X`3`tZ}DOV+(`vEiH+uoQpzD!RzwDmEes_dv=0i}bAlMp;8u!Z zYM{Dlcw?9qYvSx^pey9?Q)`qR&C_AD2ZA%PPB@$=l}#?bkXIKm+%dtVHg0g}*O94y zQ#k%9KXxq&pi4bco}j)2}M*O zS5`Z7q-F8N;I zoQ28zk#6gs7`^V$0xU!fnx0}~dk;*L!%AipSAHleQnmK0@NtBhQQ2tpUgjWBnuC_#6&{}@U64>)*4I-a_5af z@{*H_16NH~-VU)<8XxjHM*fKMj^5}4J*p*-FmwNX1PR_mBc?nuj zSf_(klFpu&R5SiHM*TNChIgBUBxhqzzEN8i()F4P=M?yy!!a`XjIuVv0xF?_&sp#s zvH7*PP>uSxx1fYcODVW+SbWRqP8GgQ$K40ZOIKRI4Lm+goBGX&97bkezq+v#;NR7< z=piI9Ufrv#x4Ne_7D@;rgK1XQ#{H-DQ}%b{bTl^p7+91(C^BvLs^=lWBmJe4D!kEc zNh?UFhclOPhrc}P4j0~P{ zf0I4){@5T#P9L%Q&O4o3j~+WV_n=QG=oAz2%jqcbrg(NNXIn?#9Bs!Jonf?o?m)k@ zLu0TyCYP(s3+6tqqP1qt9dwa&|Ggg9&{i#p^q-qQ0U~Y+B;v;JZ0S13~}}*mYHZ%8NWXLETP)J1VKn>xMpDH$89)X6z%1ffB6^j-f@_!vmpF3 zOx!o#p*nSSM^RsZ{9957k9aAbQx1`^It^Z=*G0parGuF|)LB7+~{*e*h@ zsShMELL3(Ip9XreIY*&v+jXJ6y}d(ikJ?`fIcOF!jFa6$Hq^8$+5D$!05Q>E2iPwS zwr6*TH<=0+Ys6&js_Z$1(@iXXC@rz@js7INm46XnW@%m`Tjk$a%C{kURG8%3x?u^)G*W-?;qUkpG0dytyV<7 zDHlgRG&gZo`AU;soZ!*(@zPxt%$5rB+%xB5OCILBx@vdLlC-q6So-QQv_ySe>3_0s zE5G=F5f?4Cyp;*~b=D`_eH5}j^QKWmITgjJW_ z%55Y!Yl<+WwvuUp8&LqtS_RAg@@4_~*UabUioPVBmp#Nq0}&~4CiHhXTm2Owuod-y zyvAOjc&3U+k47Z*oOp;j8&$LTm!j1>`8hdqZQ?`&>{RQ;n$MWe*~RY0N84gpf^CTSkrpp( z_%XAY#oGpFDux3GR!3lHSf<3T?5`>xAnTixdihn4d3Nc>24<{PP=UgVf?Nk4+>8DP z+vgRM;-UHv*;COV)U=HMr|H^;FRG!>;#3aSH@}k7YAfJ>B5w%#w)O95G!}nKOs=+V zKhmXSbKP?W zDT3|(wOa{{+7gQBsRSJk%iy@bLO;nrlWsXZjm5BY&h6f3l^p-9)EoCy;R7 zj<96Bt#f(;AL55Ht<8yVy09mbt>cNffQO#pvHe^zFHqIn=g&$U2-2+f6QN!;0K9!6 zxkuB*zvd)ZOfiePonAKOv%6R$WMc>X%zuXGCLHi#tP_Q1PYo82_A1@90qT7bINz`0 zKDdv9ua!UJ^BZA2kP)$S|02G0Shb-yCqFxCfK5(3=hgE{eM-=B|CRG$*@Hsdm(l8e zTXx$B-b!#kWJ7dahNEiedKotJ$Y&(xWz>*X#M_i=h&!#soXyw^e{@s@C7qW!ltgn` zlhVq1a&mJ03U^=#u7T$_2hNV}{>exs*kU^%WG8vlkfU_pk9D!Me$112wga1qO@uUK zQxl-j4-diFE~+nSIDBNDy_|1hXJ4DnG);@~B0eD(Wb(aa!i!nF)Nuh8-L>OVnsM?l z(a=8i-gU%2M~i`SskITt@8pE$9H?jXwLo)Xjk=_$D1*+8?}q8s*SAt{CUQOMz@0SR zX}q{$whEpM-@NA@YK6t&kk9DEj0_w`5}XdVb4cAszp5&=i$pm; z`!y;3S0W4MyHxeh_%)?!FSPS}OnY<0^JKQX&KrvHmV{NEj#d^8xbhf|#4P@N;L3c- zW_pcJ=~Bahk{(Mh*Z1D_?}tUCXe11m)94gUUe=JM+()oxr8>_#=pNcUL%c(!s29g= z#jjyC66P3$-|;q=`mo)EwJ?;y{O-%6WXEKngSfWMODykbY_`Xfg`!FK)TpnI{-t7I zd@B7_FLpt~&KDSQhBPiC293_P*2y>iSfA9}_}51%oGT6%VX;uQHuQ0xeU#Mno;A$Hx1%tT^TQ@_ zR#w&t&}Ds{40#C)1+stWDs3M&g;NXT+Rfi&OmB7weLlZ_c(mM2(lWfwN)7Pb_I34V|V@J(k>G^2#9;-^`ncna308*N;Ttex%r%ml!D&p-KbT#3cPBaB|mq{)&VB^RY(20pZ7R(3dOl5!pV-7sMs@s@Al9gxjZ$FB%6HbUmW*1cNpskV7@{pP`u@N zRxI@uV91I&9^Sv{7??4n@2)*N5Wm&D-T>oXE9zUC=8+fm+_Imc00>qx4 zY)AD^xMTY_Sr>LvP)IOf3)NqtP#q8!?~l#U9lHpv6TkAcSU(v}P6VZB;)(?P1%BoS zt2vkccqcwUxJ`8TUR_g}#KC2{3$CRR3$h;cmRs%B6Cs^dkOMvLg~@8%!>@xIrGqyX zc>QrmedHbF}vf)9g8N=G^hO z!y#FKNH$cAX7r)Fb22w4C#N$w?=vfIbsym|o8@hTwa(=sStmV~VZ2LGi4;~=7>@~h z0nYaobRl0nTz*XbEs@O9yo`H9kn+XWyZ7!n-T{xIo^@-x--wsPt~f~%WBAuBTmV8` z>eHFTBgFzB`zEi_+4kFOAxMC>!T#6humA_hNCtvgGu86B+h_aLZ^EiVbbDnIGOMAe zC^>nwHeRKYyk5+mk`tc>-uUi6f1E?07ky*3_(L01_by4A4p@>UFm^Zmx z%a6SZyy9VVu9wt*)IJD&Ly+jFnQ;Gh>~?tQt+E>UR{Qd2;_v#Z_v%CP)7;I@-UVp{ z=d|wYQbP**mn1#LUX@Rf!AO^z=n-TIcYDA&Ec^g^vs{{q)c%!1{`EezIb6JDY7V}w z1hW&Nba&aZ^T^408NUK!*ZEQ9Q?o?s&?-YR2~B_U{}CqrPEQ>qJzh4zio43_7r>mx zuKk|5V)-u^uYZadXEc%{eM`T3CkTmNMEAHX<&)d0ak=r0QFbkP2r=hDGkYuW|Pn^J!Ir# z)iy?+!VvbTMJ}`XB6pA^(|V5QJf|!f)wv`9L38Qgw%Vc3K*f)-%2eR*Tz2j>B-mNM z))ML~T8PeV4rug@96l7FB$?rpJ545;#dE@=ryCE3gnmr#;qmbzzW-6FTY^#_#SM`H zC7^78d0t+$R4bh(<(+NSqLL@>?OdJbfE{{M2>|7{e+N1K;voFxDdzyoI(!ksci?fp zMw|ALopb;*B&6>KQ>5Mq23znDFP((h6fO&Q5>k%nvT=k1kamBeJTNGkh~Pg#Cp z9!lM#a`7thL+T?O{+D0~jUG%<%XgYV3VE0?c$wLTb;Gh2tbr*_H_h`4tqQ{TfV0$_ zfs|)!wYlM$!V&oPR6uN%XwistfPD%1ha^UN> z(va1c*jIP6HAk(<@UW^TI~y|VMAQL*-Jbse5Fn@ug37BwX7e~ti{bGoocW+Y9i+C& zIYfz-@9FikJ<@(dZ|=2NBzqwmJK6M zA&B?ZSua~J4~*!KWIoaV2X#%@9%(Rhb4^lv$sQc{fNVbtRk2OE-SieeF}Go+iv1b4 z!6#rb1(T4k0T%k-XyMJ-aYW!CeyW|YPxl+ceF$bJf)$&P&R@U`^~v5y#NwF%OR|%_ zzP;TYsd?h0^!JMay6Q8bY%&kH<_&ZYs^Jxd6ciGx>a<=G>UYY(LqyZtRIT!hXI9Yn zKM*6$VkY(iE0p(h!&}CQja2Y)?ytwgIiie$q2~&j9%r{_%FfkN7v|653Bm;FCj>B0 zaKvB8B7zK)h}Kgg?IOO2W1LiNTP^l9%!Y}0JKazX!HZ<;&#!!TkwVcAcItu<&UI$!0ri1{6S7G{TeWeaF44UJ~K z{%5D0iFO4Ur zD=cDfS@>2iak`5h#dD9{bGrZPIuirKV$IcE_zvDbzWZPLw_X_((!LFah)vKZj_$Qy zKHDn)CFt?>&#d(g>JNe`#wZhxiHo~A0;M7PSJOTjfiqg@Ll9v-$Oy;IaiE6qLVip8 zlYhMdyr+DmOK)MR0-rjncnabn%zrs)pyI?f(BQ2JwL#S*;_arykZODOu;s)0p(XVs zwHdK3#7>f&o4ShsbuFyC0<%l%pT)2ofV|OvJK_-#!U337CVPY9`gJcItd<)c-^!iVcav|032n{KzEcg2KlQ!qy0AS^2SEk@k1FpjsE-Z-xovot zlE{M%!0u^)o$WFCsR2HXom-5pWi`j~t47!!brsWjd-?d!B)#iylqF*qeA!v3)*mIa z3#4*kMGP$u%3G_-0t*Z{rwqB_Q*NqwZMt#PB%}{JTmJA-ywHz=7gCph3zJ%w8M#k9 zm*b&&TM;hI_V?`>-nM2ZXPUJcGDSYCJvS&@7gPy`2tR zmr1zr5?@IU9V;+l3f~~*uZp-L+TRow)INnbb&BQ@SCrK7+)cQp31b1*UH>n2^~<(` zuK;r*I1}Fyh(`kMnMa0f^4r!dHkV7u1AE+6zrXB+jn4A)jcP(;3jW{+jSE`(P{usQy|P5 zuxDz(`@~qUNNO&L%Dr$^c(6ZI!*mWTra3x9V<${hLP3nJc)Q^5-=M;dbcgcZ$1g6x ztdKqN6}1WBn@H7eZsmwE&apf$WT-@liL$fZrChTKc(>o51@qo>Zx&tItA`%O^OnzU zfI3~}HKt;O4 zNfj40gPLK&^}tkydagX(#$@sW!HVyTdXZ)}6urGV2-@b`TMG&b)&*hx>?YXlzx2~4 zviKhaB`cJ5Sp`?nCwk3`ZDBMnxOLY^&uqv<3Pw9s4mH1-F!@;VvBn$06I_;4QmQM* z0%s~>z=74gA}og7Ldg{ny#a3a$iEw06%K%cjs%}Qjbj3|E|RKStMc%Sv6Y@RK6*x- zxJABkr~=deZq9L{?dOO8QGFypp1*@e-3yykfKyWP0>1XS4&$^*wia*u@!duFyRYp% z9EFladV|Irm2G;7ZcN!ecG(NMADVb0C&^4l#aeHE7B4_*G z!JIgy5q4OgU&HQ-58ls#CIo)zz1jl9_!E7nzij=HxkzI9Qe65iWOfMivkQ8Gj(7Q(eCGgb+KpJEKF$nm#IAqXQQ1u}$+*kfm9XR4Zvq^aSuVxb}lbh(IEH@SU zg%@A9RC(Rab(c3>J+*7`Zpl^%lfE>3UXvgZw;kf|rI=V?TDlLR%bybQ`=4kXgw!n0 zlUl+EXnaOGbT9{$x_#@wi2R_~qc3(yD>7|1afdoSGLkCaDYqHRiTz%KOVMGCrTxgX zuvjc{8_u`gDznlVdw_U|ev}<-H6H-{}6@;rfSMM&}`S*W5frxI0ba9UW!v>fu_u z)RPX;Yi_K>9$HVD zn)T`fKFG z2;k8UNPy2sAv-}>f7IfOwHq7`k_6K<_`Kur{B?+ICB=JrYD9!4X&|>rC3fUM)Kd@V z*pGsWYed0w6A!lMC!D!6ei_zs9sVm)g>X{}bTEgivg9s4zyBK9zH&V!@CZ(}Fwe-J zD7k((X{Y;-YE`m&@_6r_25E)8ZP8d99V8xa+<03AiO1kzZ(0r9B%9v_knkepsQwP1 zv@+k`n!&%#_MkGikD>#gpe+REwQ~9WNPe&dQx5sr z83yWI$Uld~%9Tmm+!t~LN0abO>`~ASFv)|ju($OfZJ9!U zvPPnKcG2B9*Z7#-0Ab>6v zfH>OGoT!d%TvZovYXh~&E>CI=bN*o!u-t)pOf_Y+aPC6jDZITjq&}x)!z^V#rWeFD zSk&h-V?Vy?nv|$0Mr5gd*_(UsdiI_*qQ$6>HiZ`{cWV;dhRwqATWlqBhUHpyxe4o4 zLhy)*AsZ_m-KJ<`bu{VfwVAmOiGjsUht6KCBo3xm7a|@Q7?6Cn7iL!dm(TU<`TEg7 zhh0y!jd54D1v5|7v*;_|OQNx6=-SZlo$n;(GM*Gt9g_2?mHu*Cn&I}kPoLJ(Yrqqp z;qvdZyDWn<6E58PMWbAaACXhAlmpH8EBQVr1XP5heSpE%?xGVecu$YiV8@u=&wdg? zFBuV*38SITMa%K>{G@ctSHwo;>1Xd5@fou_Pj$Wf5{lHwVRF|3`r)x~TO`oO7@?X9 zj0Dp*$5r}JKNFqrbT}-abQeFsW=^lfJ7#U)C>~ABlEHy+S>>5>PaCBma^2y+q@l=L ziJV1zM4!L3_C88ihOuI+mQI0AnckLags=RU3i$0xhrA%S=#Fk=#ou(a28tY3>IN2> zC3B9mo)=uN814&>={xM#L4HI-Lug+_RMh$vqrXnM#S>tIWEc#%-ICU}2M7$(LVtf! z)W&oLw&^u^_&k14BdS;6%)}q*x3)x19~Q!M(0cj37W@0QwzeokgoXd#Nc^(Kih(GXhcqiI`Cw8j@YV2B*&YwKANmyh<-h-%eJ78R)^wK12!qIlo$ z_ON2p!l{jG`}dCuq+BEx#qxn5x^XCStCe`AlVz(TcF&h8h7F>OiGi6wzZgktH!qC0 zF5F2anfPy#+a98#bbTGd!(n%o0Y|YCYtS-`X~i7`4QD}CI&I#}5#qZ#`{ow;{<#|Y zhJ~nNwJxIgrg$`U`x}4vTg%_Reap!oJLmBK_BFO(B$4z4$K_fAAGq*lOnuZ_@j*rO zFin;XcB^JbeV3PTs5)qEU?~x{H{FoU)yULz__VseEebq9WiA7Ce6_C?Xwxbo{#V(Hr>DbvhL;T3pZR`mgEtk@6%(c zT&MEE4Y0E31r-yfjP1A)I0PzYcPQtF-@910b3k&lE9D1)sfM&zJjTPDIVe-xzBruu zV8t?FPwMHw=klRNCNaA-L^_(fIcD;^BbQM7VOd}C;Qvc$3J_sLeaHJ5Zx!(2Z)kEe@Ov7?9ZWJJqjMyiV9#miRvgevQDZeP4?hSB~oFhBjOi6oNk{7mZ9$0?Vj=d4Q6 zZHRSQSgA%LRm0sG9YKZ({rI}dzj+UXB6rFxC<){d#=Rr@DavLgzdFdPyhEzzyMz5L z*Kbe7Z_KET^B#`O>|VI2pO+u@eF=0x`v0l}kQU7|{muCLHd@~bqzplk&-Yc6RnH`{Veyzl<^3xPAN&fDKOXfAo%u@Cu@Qs&BB7R?|eWD=uo4gyt(Ng%O^b(LPncMrhkYvw zXY;RSwcrn%QpP_%U0RKJ<@4i9!sqht8^V(=*Ik}pT6NJ(y)kZX4o|r8d8u37YSxUI z;;2a0fq!y*ddsV%|2RI{eELF>pM}Ts!nOV9(yjKHOBCCSpKMK%N?Qor z9Z1()o}cPhTU(oIGk!f&GVwdAe0& zu&_f`f}Phqx7gwlMRV&*J-_tzo$2InRz{j{tX)`6FCRaWEM-X>7~I%<w~K z&aP}eHq|uW!jfywZPf-p$lTm{b>Z4#j?<5Y3#&FeE)^GRYp!aMCQGf4n15-E()=-M z(cJ8{)T?Zj?6K;xx^VVP4aK!qG2g6p>tw&-5r*)u#ailhiAUMLcB-dK`et>1TvRT1 zflZtRjS@NE$JNyrQr0hAOL1(DUol~`{*l+njo$ljGYkImbr69Ni;w;IsuGPNi?3d> z&BXJC)m|R{wWvBDWm}t**C?1tl{yxd!#xXdCOtnREOc>hZq5oT3ei(2nWmyC>zv_GF-Z_lATcEx4usLpT{Y71%K=@OR^Z&LyM2r_+p z;zB*Kd^_VloM}ey0@FNlFd>1!dLO8P^CCOR+|zd@uY3G(?S9|iEN+*kmuj9viptSw z7X0!wJbc;ZmrX0#LiGN@M2Fvjo0)vbL-cduzPp!3@nFMW0;Wu~mH7qDRl0t>+x0}3 zMWW1oroM`whN7pNp4~IoM65E=5p~bFDK_sxZOV2%%_~>Lqi8n-NPrZhl{&Ec?G$=dC7donsG>U8TiPJ;h|i6LD?$8!pCVY`*!O3_r8#pg@t-}{Z}6d#WMv0^so zZmhc(gJJAZhA)3UmNQZ9@v_slns?VO!<#4A>SZw*nx%SdL$T=za9j0CxQjfFNp-Y1 zjdW#0P@m!$q3}>RU5LNox3{Ckh}@!8e4T^Ya2%~n4Mslw!hZ4j*~9*p&xAk9My9Sh zsXv;T+0iEM$rYAjFfr{ez7k&1L9C}M6NJS*glIlgH8Lu8mr}8^oSE#@(VE*0D zj-A0b8x!~fs)@+hnr904y9qCJIjF4TIZA)#B)w}xlR%5p{Pj7JPln9(5|5q_3z4Aq zT+1FB_$eQHSj=s4YR@KHwT*^UuPjhUY@-BK1~(Rx*Mhi{TvKC|5tlC zLV>|rfko1$r)uu=D@Wg#<%f;UtoAm(+UK}VjD*}DU)M>xbNcrBtoh08BD#CXe`mZ# z1maGkn^D)gn;Z`Jo0hm}MCJu^!Ll{rQU~$pO@+Rd*irPG72QKVkcgPNj1}C;F`swT z6nC$!GfG$T#BQfj{Q0oh{e<)63!hde5)SLEF!#dsv9U2(5|Kj^1X{Q3Ax(EEVs2#$ z!nR*ATm=e}Dn&lW-?=l@RL3bMqjwDW7d$6-x_E4kOMoD{XiuW|!;Rs+_a@fz*0MU9 z)fc3|b8c>CIwtuURozXrkfQyqYmGzmC~lh$T$dLeX?jBu^GOr?Rpc8=NM^=@!Z|;;bKkI{qYeGTT`4lBD$S+UI zxNrACzz+YCZUHyY$vZcNp7k1-vY_Gx{cj3uDtae}FZK?TL~}$bbbOB;IHn4_2|!|XsM^F4A11l*Pr;*skrDb5kjusvqu zCb2)NkkW(LfMzi6gU}C%dlpl}_mF6Kup1K6p#yJ)z)|x}<@^wmxJ|^e~kA&F;p*wpnKYP2M_kyA1sU~_lx=*xx&SZEN zy+Z}6>@@2kGVEv6gMn>E$!?yGg+Gxt@QbN8em^`I#w_BVoV%)RyE-FsRNJi3?tN-A z6Z`n`(Ovl^4I^d=Wu>L1+q&a&S%s7dWYQ-^g@=a+9kfrzZ9WiC24O(wIpDhj391A# zKXKsy-g%~uO)et;+LGq7Q*Zg2Z0^!~AGR}j)Ra#;J>&!8Y$4HBo}>5Uen~Q_n%q2H z(!|!v%xr{Q;o%hmeRk@wxYwSXJH>$eh@s4zKho+-!Zsn?addow3^$;@@S29e*-2>8 z#{A;A4z5H+CXWbSh)VcSHg0;Uud`<6bslbaHf<$?O`r|qN)ZPP7M2*< zFw?no?XUS=y_O#zmVGNE=e!r+>IA9csWQdM@?fhLLolzH&k$_NiS$zw_=s=|q;pqF zNVp|98d>nVhh-h*y}p`|e)mgOo9de=dQGzT_@jV(#JGEb8-F_ zTkok$oWK*_@OjK-V9IyD*vy4Z{`(ZM+c}99F#!~EXG6ts#`TXb@bX_ggS?#v>7hEU zl-@^hX;O*leBFD@5BBhD%l^ojYZx?>YUyOklB@zNJ2&q%TDaaWM5fo|B(O4BH4&di zNX^-@>3`5mOZcFVJSSQSL{RdbS@6H#1;AE1D1}uEewiU8uf`4`@@HexpXkWN_$@xL zu55uaMd*OfGbB}DS0f}ORQ&i?W(wlzsioGt#rtc#4k8ze_Jn&}{n7Xb3@~~p5|dpG z?k9|MH#w+WG>VbhRmP!xdoGppd_dkkrAEt9Z4yQ%XG|;oYmryuGwSaWEI34s;y>>o{pyiz|Pe&dyFSkyu zW_0`r(&)wN$eZYQ#P0`hbe!Z|+%Jz;VJ1qD=%0Ufmf&aY;LTX~VP5s@e(|E1!9IW^ zDrxn@sfDEChTY)nMs?^WsY&kX7)$k0RF}bn9-dq&dGLnK6Lk)8BiN9XoIyb(8(>3OmVJ3G7e9Q_CKxNTfioEhlFfI%nBmq14B_vS)OIeYfk zO9)~xgbW9>ByJ>_L7KBj@H$daRY$=ckyt0~IlB^n>PJG<#XXn9iuTtke$}EpDQ>K% z1rcng;5kC|0QltU2R$~~4@YsMk?ZLpOxL?C-)XYcVK}^d3qc9seOqeC2T*EW3{g+>cX#lT!)ublyB0)@NkRkbz#^v7#;=YaUelKUJ=uCnmMRe;b zlIGrvmdahIpHXbH%UyOzK`<8i5_}+> z^nrNXfjVB1fGXE+y!4|diRdQZu2*IhExzl>&Mk@$ObmE1t|5L1f{2EZk>p)rZQ-!Y zL^<$K2;o_Iw4{3V-gYNNx8W)?64W5!q$)xi@4)}SP(u|=x2H0MpxvT_N>(rh9jz}5 z!(YQ!gpZ8SUFn*?Gv@UjAyY=z&d-PI)mX@K67K9ksLL{m>UCz;*ZpW*R9$_6fdp7) zo)!G|o3&xhTZ5Hxra33O> z9Di|o-#evZ@x2z;-|Xg$dxFXt*Y^4NUW7-`a@QRx$G4An#}brw!j$!WvYy1W@BxkNr%N94vKj6>}8!@zN;eLvw zafD*OfB){cDUH*oT5`A8?UVOKtj)GUZ1#_@IDN87lSJDiH)mPJMo31$%`MeW>y4zy zj1HC8L5sQ8i?(=Kh_JR?^;(YFR6giaW;sv2Z_QSe|1P#@Q-tXHCROu4I6)9_p}HS6 zLxk|UH+mc9PQirdQMnkU8_MyfL;ba)OA|bS3e~hvwDm1}xzRPGZ03Ic7$_vXqw^Y`x1@>K8QLJcsL9o zh&Jqf4k@#QO07+vNU=S$sM<~BD1xdbY7u+q4q2hPutrBh!V?@RMQdivdBL-Y=nQFr z-!J@pJ3)_b(5#;hWV3CS-<6;tT2b;GOu1CDmBkhX?|n-eeO?XyY^*$>A;D^`&{Z_v|_h31|5hSki*o z!`z`-raU}6o7uhITcoa$-i1QgZ?EEo3vPnGy_*72mPFL~fNxVFU-FJ(SFsL7fSc$T zto4Tn{sh^X{Q6U2Awi3|Qss^>-fMnO_rvqey_XGvgU0hJ3K0RMZErJzLx-UXM~`Ja zOv&P6F2BnYEC;|ATJUP^%r>%;V~ zG_gmtWhg6~L+{PIBGt^-rR5i?voqhVCWKoq8EIbV!@Dgssw^wQjQiFeie&Vtzv z26@}v0{lxbl*DG)TJaya-xH2=2N@>l|Am_X*+j1s^)$CTie;@uD!PakOEMK@Nq#?1 z`-+WPRIr#y!KYcvX#ZN_&*7=9Kko#e+Cg~CPh`W_x}CvoheFzlia-+Adw5@MyjdGX(zy~? z%*<4n;qbgHB2hQe;^NB}HK<<5cLqbglStNg|4&6kT-kP?d!NdhBo_$MCrtyPdw8Z9 z!GW3Kf&=O6rZ^waCnHz~!u81=hFddD(2614TE~vkY4_|GbVY2*jhW_q8tza;nHz}( zWv?{3n_pyGE|z<{ThUXSKlw49X)!&dw<)ggDXWxW$38;e5wb25d>bK5%kHUW>z0eu z_`&~APO-T1oZm~+M{wv749yv-`9aV9+>%W%XMp9kXnA<@N@m_`C6quVM;76Dyq=Lc zQz9ZIdoA)2cJ*&Ql35*0B*11jLa^vLM10q{wh;a#!x3Zs@N7Wo`D)d+C>Q4&lLuaC zI)N)@X5U0XsE;V=7V%u=kKz3JrCsZM0H;e8*+fA@E*+84 zN0He0@71AnKqzPSUw*s(F(P)EYfoixl9dn6ibGkV;0CJ^en4;#`D-Gm4q2(qlSdjc zIg?Elp^cS8pN$N24mFdWh2UJK`)8oTve#4yD$M}Yow6Wi>c>HB(t_b481*+&HvfM` zTSGY|JyMHHd#*;!OpcEYR)!cm>drhZ2?oP%G3t?dGBYzX%1nKL;7jR$B?@4*i5Y*P z5H^h%A2a~B3jUJ!1{r{S<}xMKE`o>?&9e&}m0VRC5iT!JA8&OYS-G$p5JhB3b(xYQ z*SI|)A%Ust;YC6`$3Tmj3NbBYKFJ+u_zdB?3}axr$YC`?08C&QQmR}?vC}GTOrgX? zlB#+3J2t#r{TOVc~XSohVtB92_*>v1QtcaKNC4G7Y-0Km&n1eElLCHnJE z$`J2W)Q*dgm@cU&=KI9EvMlO*{QY^Ih9vJV4KlUWwa=C06iEQ+h{YVa9TyLIuff*# zKRlkO>X3kL@Y!E*j(ShbNyU{qdE>R#T`SjzhG&(E#^&bNy!|Y9$$|+dk&=SV=U6-O z9KDz`JwDa%SFEihaX!E)XSw_TYwz4+n!3UOZV)GP3uGA+){1UIhS>(JQwl=?2kK;O z3_(F4B817K;IQDMl+ub*nM!$thXxAFDKrDy@+h=CYS9SP(_dok)l`ygYJ5*O)veZAtEH1Svf#yKu0R`htrrmqVYiapEu^f3xd@afIx>4+Fqlr1tW6k>VD#<(I3=E%dQaObo7Uy&V-9;3 z)ZW*B_(KC-h$J5solEtvV+(v+1!Y-{2fC>a)w|UZk#9=xa*&nGc{~F-JH)xfuST`| zTAWN=0Sm|T>Zqfub>)nol=X6o=&o2t1*KNSpP;s*+#3{j6s7A=gd)D%PBzN)cW&dx zK$$4Noyr4?^le!=F<37hXGrft97`f>1<Xhrm3BQ+7m%Qb0ZfF;3glF#jhl`xt!` zo}GHcZt6a=~xV?FODV=3{G_>64*MSQcuhp=n7aIs_x;~pUMHW`mxvcU7hh1}6_ue8*O1Ef9 z&(NAltU)1v+?$$GsV0K+&2FL1R7^kiB#AnsaofgTcQUkZ_IYpO?DB`_jnR)TC`VPl zzp!A=yEdYAX}YkI#d-Yh&j9(jt#eE%Z9Ml;7g_+A>d$e^EHe3(OjK{6Y^$hB#)cor zWHRH*)wW$M6D@z)2xh6B3?K&Xk1+`C(Y_x?UhKvMllOw{-U@Ep9=xeI$-}#u}Idq7HK^)%npdaBJ#?02}+c*zL-0g zIW7@xp7}l_F(~U8(2g6<)Yxfjulv;6QwAPB4U=?UUDV~~;M}Uf)o$LeR5)&UfpV_^`?1}Kw_EZ1m-|i0pT4*usO3c)X!I zR#PADo(qn|w!2-{l=dIeWv#IyV+A#fDYMqQ6}j|Jj}`jUb>PMaO#8LL`O_ogRVPBu zhMT;0d9MmIk0@T%0L+bK=*r1=Rr5CN#lh|NajSUwW0H3d7U{$Lbyt-2&CCZ4M8U;F zZ5Hir{&{)b2=~PO)f>#U#5hsY4SZ)ALU5@Ke*;LjI~f1~ literal 0 HcmV?d00001 diff --git a/docs/source/conf.py b/docs/source/conf.py new file mode 100644 index 0000000..9da5a49 --- /dev/null +++ b/docs/source/conf.py @@ -0,0 +1,118 @@ +# Configuration file for the Sphinx documentation builder. +# +# This file only contains a selection of the most common options. For a full +# list see the documentation: +# https://www.sphinx-doc.org/en/master/usage/configuration.html + +# -- Path setup -------------------------------------------------------------- + +# If extensions (or modules to document with autodoc) are in another directory, +# add these directories to sys.path here. If the directory is relative to the +# documentation root, use os.path.abspath to make it absolute, like shown here. + +import logging +import os +import sys +from typing import List + +from sphinx.ext import autodoc + +logger = logging.getLogger(__name__) +sys.path.append(os.path.abspath("../..")) + +# -- Project information ----------------------------------------------------- + +project = 'vLLM' +copyright = '2024, vLLM Team' +author = 'the vLLM Team' + +# -- General configuration --------------------------------------------------- + +# Add any Sphinx extension module names here, as strings. They can be +# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom +# ones. +extensions = [ + "sphinx.ext.napoleon", + "sphinx.ext.viewcode", + "sphinx.ext.intersphinx", + "sphinx_copybutton", + "sphinx.ext.autodoc", + "sphinx.ext.autosummary", + "myst_parser", + "sphinxarg.ext", +] + +# Add any paths that contain templates here, relative to this directory. +templates_path = ['_templates'] + +# List of patterns, relative to source directory, that match files and +# directories to ignore when looking for source files. +# This pattern also affects html_static_path and html_extra_path. +exclude_patterns: List[str] = ["**/*.template.rst"] + +# Exclude the prompt "$" when copying code +copybutton_prompt_text = r"\$ " +copybutton_prompt_is_regexp = True + +# -- Options for HTML output ------------------------------------------------- + +# The theme to use for HTML and HTML Help pages. See the documentation for +# a list of builtin themes. +# +html_title = project +html_theme = 'sphinx_book_theme' +html_logo = 'assets/logos/vllm-logo-text-light.png' +html_theme_options = { + 'path_to_docs': 'docs/source', + 'repository_url': 'https://github.com/vllm-project/vllm', + 'use_repository_button': True, +} + +# Add any paths that contain custom static files (such as style sheets) here, +# relative to this directory. They are copied after the builtin static files, +# so a file named "default.css" will overwrite the builtin "default.css". +# html_static_path = ['_static'] + + +# Generate additional rst documentation here. +def setup(app): + from docs.source.generate_examples import generate_examples + generate_examples() + + +# Mock out external dependencies here. +autodoc_mock_imports = [ + "cpuinfo", + "torch", + "transformers", + "psutil", + "prometheus_client", + "sentencepiece", + "vllm.cuda_utils", + "vllm._C", + "numpy", + "tqdm", + "tensorizer", +] + +for mock_target in autodoc_mock_imports: + if mock_target in sys.modules: + logger.info( + "Potentially problematic mock target (%s) found; " + "autodoc_mock_imports cannot mock modules that have already " + "been loaded into sys.modules when the sphinx build starts.", + mock_target) + + +class MockedClassDocumenter(autodoc.ClassDocumenter): + """Remove note about base class when a class is derived from object.""" + + def add_line(self, line: str, source: str, *lineno: int) -> None: + if line == " Bases: :py:class:`object`": + return + super().add_line(line, source, *lineno) + + +autodoc.ClassDocumenter = MockedClassDocumenter + +navigation_with_keys = False diff --git a/docs/source/dev/dockerfile/dockerfile.rst b/docs/source/dev/dockerfile/dockerfile.rst new file mode 100644 index 0000000..a074633 --- /dev/null +++ b/docs/source/dev/dockerfile/dockerfile.rst @@ -0,0 +1,50 @@ +Dockerfile +==================== + +See `here `_ for the main Dockerfile to construct +the image for running an OpenAI compatible server with vLLM. + +- Below is a visual representation of the multi-stage Dockerfile. The build graph contains the following nodes: + + - All build stages + - The default build target (highlighted in grey) + - External images (with dashed borders) + + The edges of the build graph represent: + + - FROM ... dependencies (with a solid line and a full arrow head) + - COPY --from=... dependencies (with a dashed line and an empty arrow head) + - RUN --mount=(.*)from=... dependencies (with a dotted line and an empty diamond arrow head) + + .. figure:: ../../assets/dev/dockerfile-stages-dependency.png + :alt: query + :width: 100% + :align: center + + Made using: https://github.com/patrickhoefler/dockerfilegraph + + Commands to regenerate the build graph (make sure to run it **from the `root` directory of the vLLM repository** where the dockerfile is present): + + .. code:: bash + + dockerfilegraph -o png --legend --dpi 200 --max-label-length 50 --filename Dockerfile + + or in case you want to run it directly with the docker image: + + .. code:: bash + + docker run \ + --rm \ + --user "$(id -u):$(id -g)" \ + --workdir /workspace \ + --volume "$(pwd)":/workspace \ + ghcr.io/patrickhoefler/dockerfilegraph:alpine \ + --output png \ + --dpi 200 \ + --max-label-length 50 \ + --filename Dockerfile \ + --legend + + (To run it for a different file, you can pass in a different argument to the flag `--filename`.) + + \ No newline at end of file diff --git a/docs/source/dev/engine/async_llm_engine.rst b/docs/source/dev/engine/async_llm_engine.rst new file mode 100644 index 0000000..93fc310 --- /dev/null +++ b/docs/source/dev/engine/async_llm_engine.rst @@ -0,0 +1,6 @@ +AsyncLLMEngine +================================= + +.. autoclass:: vllm.AsyncLLMEngine + :members: + :show-inheritance: diff --git a/docs/source/dev/engine/engine_index.rst b/docs/source/dev/engine/engine_index.rst new file mode 100644 index 0000000..ba9ae55 --- /dev/null +++ b/docs/source/dev/engine/engine_index.rst @@ -0,0 +1,13 @@ +vLLM Engine +================================= + +.. automodule:: vllm.engine +.. currentmodule:: vllm.engine + +.. toctree:: + :maxdepth: 2 + :caption: Engines + + llm_engine + async_llm_engine + diff --git a/docs/source/dev/engine/llm_engine.rst b/docs/source/dev/engine/llm_engine.rst new file mode 100644 index 0000000..0b8c1e2 --- /dev/null +++ b/docs/source/dev/engine/llm_engine.rst @@ -0,0 +1,6 @@ +LLMEngine +================================= + +.. autoclass:: vllm.LLMEngine + :members: + :show-inheritance: diff --git a/docs/source/dev/kernel/paged_attention.rst b/docs/source/dev/kernel/paged_attention.rst new file mode 100644 index 0000000..ba4f7a2 --- /dev/null +++ b/docs/source/dev/kernel/paged_attention.rst @@ -0,0 +1,525 @@ +vLLM Paged Attention +==================== + +- Currently, vLLM utilizes its own implementation of a multi-head query + attention kernel (``csrc/attention/attention_kernels.cu``). + This kernel is designed to be compatible with + vLLM's paged KV caches, where the key and value cache are stored in + separate blocks (note that this block concept differs from the GPU + thread block. So in a later document, I will refer to vLLM paged + attention block as "block", while refer to GPU thread block as + "thread block"). +- To achieve high performance, this kernel relies on a specially + designed memory layout and access method, specifically when threads + read data from global memory to shared memory. The purpose of this + document is to provide a high-level explanation of the kernel + implementation step by step, aiding those who wish to learn about the + vLLM multi-head query attention kernel. After going through this + document, users will likely have a better understanding and feel easier + to follow the actual implementation. +- Please note that this document may not cover all details, such as how + to calculate the correct index for the corresponding data or the dot + multiplication implementation. However, after reading this document + and becoming familiar with the high-level logic flow, it should be + easier for you to read the actual code and understand the details. + +Inputs +------ + +- The kernel function takes a list of arguments for the current thread + to perform its assigned work. The three most important arguments are + the input pointers ``q``, ``k_cache``, and ``v_cache``, which point + to query, key, and value data on global memory that need to be read + and processed. The output pointer ``out`` points to global memory + where the result should be written. These four pointers actually + refer to multi-dimensional arrays, but each thread only accesses the + portion of data assigned to it. I have omitted all other runtime + parameters here for simplicity. + + .. code:: cpp + + template< + typename scalar_t, + int HEAD_SIZE, + int BLOCK_SIZE, + int NUM_THREADS, + int PARTITION_SIZE = 0> + __device__ void paged_attention_kernel( + ... // Other side args. + const scalar_t* __restrict__ out, // [num_seqs, num_heads, max_num_partitions, head_size] + const scalar_t* __restrict__ q, // [num_seqs, num_heads, head_size] + const scalar_t* __restrict__ k_cache, // [num_blocks, num_kv_heads, head_size/x, block_size, x] + const scalar_t* __restrict__ v_cache, // [num_blocks, num_kv_heads, head_size, block_size] + ... // Other side args. + ) + +- There are also a list of template arguments above the function + signature that are determined during compilation time. ``scalar_t`` + represents the data type of the query, key, and value data elements, + such as FP16. ``HEAD_SIZE`` indicates the number of elements in each + head. ``BLOCK_SIZE`` refers to the number of tokens in each block. + ``NUM_THREADS`` denotes the number of threads in each thread block. + ``PARTITION_SIZE`` represents the number of tensor parallel GPUs (For + simplicity, we assume this is 0 and tensor parallel is disabled). +- With these arguments, we need to perform a sequence of preparations. + This includes calculating the current head index, block index, and + other necessary variables. However, for now, we can ignore these + preparations and proceed directly to the actual calculations. It will + be easier to understand them once we grasp the entire flow. + +Concepts +-------- + +- Just before we dive into the calculation flow, I want to describe a + few concepts that are needed for later sections. However, you may + skip this section and return later if you encounter any confusing + terminologies. +- **Sequence**: A sequence represents a client request. For example, + the data pointed to by ``q`` has a shape of + ``[num_seqs, num_heads, head_size]``. That represents there are total + ``num_seqs`` of query sequence data are pointed by ``q``. Since this + kernel is a single query attention kernel, each sequence only has one + query token. Hence, the ``num_seqs`` equals the total number of tokens + that are processed in the batch. +- **Context**: The context consists of the generated tokens from the + sequence. For instance, ``["What", "is", "your"]`` are the context + tokens, and the input query token is ``"name"``. The model might + generate the token ``"?"``. +- **Vec**: The vec is a list of elements that are fetched and + calculated together. For query and key data, the vec size + (``VEC_SIZE``) is determined so that each thread group can fetch and + calculate 16 bytes of data at a time. For value data, the vec size + (``V_VEC_SIZE``) is determined so that each thread can fetch and + calculate 16 bytes of data at a time. For example, if the + ``scalar_t`` is FP16 (2 bytes) and ``THREAD_GROUP_SIZE`` is 2, the + ``VEC_SIZE`` will be 4, while the ``V_VEC_SIZE`` will be 8. +- **Thread group**: The thread group is a small group of + threads(\ ``THREAD_GROUP_SIZE``) that fetches and calculates one + query token and one key token at a time. Each thread handles only a + portion of the token data. The total number of elements processed by + one thread group is referred as ``x``. For example, if the thread + group contains 2 threads and the head size is 8, then thread 0 + handles the query and key elements at index 0, 2, 4, 6, while thread + 1 handles the elements at index 1, 3, 5, 7. +- **Block**: The key and value cache data in vLLM are split into + blocks. Each block stores data for a fixed number(\ ``BLOCK_SIZE``) + of tokens at one head. Each block may contain only a portion of the + whole context tokens. For example, if the block size is 16 and the + head size is 128, then for one head, one block can store 16 \* 128 = + 2048 elements. +- **Warp**: A warp is a group of 32 threads(\ ``WARP_SIZE``) that + execute simultaneously on a stream multiprocessor (SM). In this + kernel, each warp processes the calculation between one query token + and key tokens of one entire block at a time (it may process multiple + blocks in multiple iterations). For example, if there are 4 warps and + 6 blocks for one context, the assignment would be like warp 0 handles + the 0th, 4th blocks, warp 1 handles the 1st, 5th blocks, warp 2 + handles the 2nd block and warp 3 handles the 3rd block. +- **Thread block**: A thread block is a group of + threads(\ ``NUM_THREADS``) that can access the same shared memory. + Each thread block contains multiple warps(\ ``NUM_WARPS``), and in + this kernel, each thread block processes the calculation between one + query token and key tokens of a whole context. +- **Grid**: A grid is a collection of thread blocks and defines the + shape of the collection. In this kernel, the shape is + ``(num_heads, num_seqs, max_num_partitions)``. Therefore, each thread + block only handles the calculation for one head, one sequence, and + one partition. + +Query +----- + +- This section will introduce how query data is stored in memory and + fetched by each thread. As mentioned above, each thread group fetches + one query token data, while each thread itself only handles a part of + one query token data. Within each warp, every thread group will fetch + the same query token data, but will multiply it with different key + token data. + + .. code:: cpp + + const scalar_t* q_ptr = q + seq_idx * q_stride + head_idx * HEAD_SIZE; + + .. figure:: ../../assets/kernel/query.png + :alt: query + :width: 70% + :align: center + + Query data of one token at one head + +- Each thread defines its own ``q_ptr`` which points to the assigned + query token data on global memory. For example, if ``VEC_SIZE`` is 4 + and ``HEAD_SIZE`` is 128, the ``q_ptr`` points to data that contains + total of 128 elements divided into 128 / 4 = 32 vecs. + + .. figure:: ../../assets/kernel/q_vecs.png + :alt: q_vecs + :width: 70% + :align: center + + ``q_vecs`` for one thread group + + .. code:: cpp + + __shared__ Q_vec q_vecs[THREAD_GROUP_SIZE][NUM_VECS_PER_THREAD]; + +- Next, we need to read the global memory data pointed to by ``q_ptr`` + into shared memory as ``q_vecs``. It is important to note that each + vecs is assigned to a different row. For example, if the + ``THREAD_GROUP_SIZE`` is 2, thread 0 will handle the 0th row vecs, + while thread 1 handles the 1st row vecs. By reading the query data in + this way, neighboring threads like thread 0 and thread 1 can read + neighbor memory, achieving the memory coalescing to improve + performance. + +Key +--- + +- Similar to the "Query" section, this section introduces memory layout + and assignment for keys. While each thread group only handle one + query token one kernel run, it may handle multiple key tokens across + multiple iterations. Meanwhile, each warp will process multiple blocks + of key tokens in multiple iterations, ensuring that all context + tokens are processed by the entire thread group after the kernel run. + In this context, "handle" refers to performing the dot multiplication + between query data and key data. + + .. code:: cpp + + const scalar_t* k_ptr = k_cache + physical_block_number * kv_block_stride + + kv_head_idx * kv_head_stride + + physical_block_offset * x; + +- Unlike to ``q_ptr``, ``k_ptr`` in each thread will point to different + key token at different iterations. As shown above, that ``k_ptr`` + points to key token data based on ``k_cache`` at assigned block, + assigned head and assigned token. + + .. figure:: ../../assets/kernel/key.png + :alt: key + :width: 70% + :align: center + + Key data of all context tokens at one head + +- The diagram above illustrates the memory layout for key data. It + assumes that the ``BLOCK_SIZE`` is 16, ``HEAD_SIZE`` is 128, ``x`` is + 8, ``THREAD_GROUP_SIZE`` is 2, and there are a total of 4 warps. Each + rectangle represents all the elements for one key token at one head, + which will be processed by one thread group. The left half shows the + total 16 blocks of key token data for warp 0, while the right half + represents the remaining key token data for other warps or + iterations. Inside each rectangle, there are a total 32 vecs (128 + elements for one token) that will be processed by 2 threads (one + thread group) separately. + + .. figure:: ../../assets/kernel/k_vecs.png + :alt: k_vecs + :width: 70% + :align: center + + ``k_vecs`` for one thread + + .. code:: cpp + + K_vec k_vecs[NUM_VECS_PER_THREAD] + +- Next, we need to read the key token data from ``k_ptr`` and store + them on register memory as ``k_vecs``. We use register memory for + ``k_vecs`` because it will only be accessed by one thread once, + whereas ``q_vecs`` will be accessed by multiple threads multiple + times. Each ``k_vecs`` will contain multiple vectors for later + calculation. Each vec will be set at each inner iteration. The + assignment of vecs allows neighboring threads in a warp to read + neighboring memory together, which again promotes the memory + coalescing. For instance, thread 0 will read vec 0, while thread 1 + will read vec 1. In the next inner loop, thread 0 will read vec 2, + while thread 1 will read vec 3, and so on. +- You may still be a little confused about the overall flow. Don't + worry, please keep reading the next "QK" section. It will illustrate + the query and key calculation flow in a clearer and higher-level + manner. + +QK +--- + +- As shown the pseudo code below, before the entire for loop block, we + fetch the query data for one token and store it in ``q_vecs``. Then, + in the outer for loop, we iterate through different ``k_ptrs`` that + point to different tokens and prepare the ``k_vecs`` in the inner for + loop. Finally, we perform the dot multiplication between the + ``q_vecs`` and each ``k_vecs``. + + .. code:: cpp + + q_vecs = ... + for ... { + k_ptr = ... + for ... { + k_vecs[i] = ... + } + ... + float qk = scale * Qk_dot::dot(q_vecs[thread_group_offset], k_vecs); + } + +- As mentioned before, for each thread, it only fetches part of the + query and key token data at a time. However, there will be a cross + thread group reduction happen in the ``Qk_dot<>::dot`` . So ``qk`` + returned here is not just between part of the query and key token dot + multiplication, but actually a full result between entire query and + key token data. +- For example, if the value of ``HEAD_SIZE`` is 128 and + ``THREAD_GROUP_SIZE`` is 2, each thread's ``k_vecs`` will contain + total 64 elements. However, the returned ``qk`` is actually the + result of dot multiplication between 128 query elements and 128 key + elements. If you want to learn more about the details of the dot + multiplication and reduction, you may refer to the implementation of + ``Qk_dot<>::dot``. However, for the sake of simplicity, I will not + cover it in this document. + +Softmax +------- + +- Next, we need to calculate the normalized softmax for all ``qk``\ s, + as shown above, where each :math:`x` represents a ``qk``. To do this, + we must obtain the reduced value of ``qk_max``\ (:math:`m(x)`) and + the ``exp_sum``\ (:math:`\ell(x)`) of all ``qk``\ s. The reduction + should be performed across the entire thread block, encompassing + results between the query token and all context key tokens. + + .. math:: + :nowrap: + + \begin{gather*} + m(x):=\max _i \quad x_i \\ \quad f(x):=\left[\begin{array}{lll}e^{x_1-m(x)} & \ldots & e^{x_B-m(x)}\end{array}\right]\\ \quad \ell(x):=\sum_i f(x)_i \\ + \quad \operatorname{softmax}(x):=\frac{f(x)}{\ell(x)} + \end{gather*} + +``qk_max`` and ``logits`` +~~~~~~~~~~~~~~~~~~~~~~~~~ + +- Just right after we get the ``qk`` result, we can set the temporary + ``logits`` result with ``qk`` (In the end, the ``logits`` should + store the normalized softmax result). Also we can compare and collect + the ``qk_max`` for all ``qk``\ s that are calculated by current + thread group. + + .. code:: cpp + + if (thread_group_offset == 0) { + const bool mask = token_idx >= context_len; + logits[token_idx - start_token_idx] = mask ? 0.f : qk; + qk_max = mask ? qk_max : fmaxf(qk_max, qk); + } + +- Please note that the ``logits`` here is on shared memory, so each + thread group will set the fields for its own assigned context tokens. + Overall, the size of logits should be number of context tokens. + + .. code:: cpp + + for (int mask = WARP_SIZE / 2; mask >= THREAD_GROUP_SIZE; mask /= 2) { + qk_max = fmaxf(qk_max, VLLM_SHFL_XOR_SYNC(qk_max, mask)); + } + + if (lane == 0) { + red_smem[warp_idx] = qk_max; + } + +- Then we need to get the reduced ``qk_max`` across each warp. The main + idea is to make threads in warp to communicate with each other and + get the final max ``qk`` . + + .. code:: cpp + + for (int mask = NUM_WARPS / 2; mask >= 1; mask /= 2) { + qk_max = fmaxf(qk_max, VLLM_SHFL_XOR_SYNC(qk_max, mask)); + } + qk_max = VLLM_SHFL_SYNC(qk_max, 0); + +- Finally, we can get the reduced ``qk_max`` from whole thread block by + compare the ``qk_max`` from all warps in this thread block. Then we + need to broadcast the final result to each thread. + +``exp_sum`` +~~~~~~~~~~~ + +- Similar to ``qk_max``, we need to get the reduced sum value from the + entire thread block too. + + .. code:: cpp + + for (int i = thread_idx; i < num_tokens; i += NUM_THREADS) { + float val = __expf(logits[i] - qk_max); + logits[i] = val; + exp_sum += val; + } + ... + exp_sum = block_sum(&red_smem[NUM_WARPS], exp_sum); + +- Firstly, sum all exp values from each thread group, and meanwhile, + convert each entry of ``logits`` from ``qk`` to ``exp(qk - qk_max)``. + Please note, the ``qk_max`` here is already the max ``qk`` across the + whole thread block. And then we can do reduction for ``exp_sum`` + across whole thread block just like the ``qk_max``. + + .. code:: cpp + + const float inv_sum = __fdividef(1.f, exp_sum + 1e-6f); + for (int i = thread_idx; i < num_tokens; i += NUM_THREADS) { + logits[i] *= inv_sum; + } + +- Finally, with the reduced ``qk_max`` and ``exp_sum``, we can obtain + the final normalized softmax result as ``logits``. This ``logits`` + variable will be used for dot multiplication with the value data in + later steps. Now, it should store the normalized softmax result of + ``qk`` for all assigned context tokens. + +Value +----- + +.. figure:: ../../assets/kernel/value.png + :alt: value + :width: 70% + :align: center + + Value data of all context tokens at one head + +.. figure:: ../../assets/kernel/logits_vec.png + :alt: logits_vec + :width: 50% + :align: center + + ``logits_vec`` for one thread + +.. figure:: ../../assets/kernel/v_vec.png + :alt: v_vec + :width: 70% + :align: center + + List of ``v_vec`` for one thread + +- Now we need to retrieve the value data and perform dot multiplication + with ``logits``. Unlike query and key, there is no thread group + concept for value data. As shown in diagram, different from key token + memory layout, elements from the same column correspond to the same + value token. For one block of value data, there are ``HEAD_SIZE`` of + rows and ``BLOCK_SIZE`` of columns that are split into multiple + ``v_vecs``. +- Each thread always fetches ``V_VEC_SIZE`` elements from the same + ``V_VEC_SIZE`` of tokens at a time. As a result, a single thread + retrieves multiple ``v_vec``\ s from different rows and the same + columns through multiple inner iterations. For each ``v_vec``, it + needs to be dot multiplied with the corresponding ``logits_vec``, + which is also ``V_VEC_SIZE`` elements from ``logits``. Overall, with + multiple inner iterations, each warp will process one block of value + tokens. And with multiple outer iterations, the whole context value + tokens are processd + + .. code:: cpp + + float accs[NUM_ROWS_PER_THREAD]; + for ... { // Iteration over different blocks. + logits_vec = ... + for ... { // Iteration over different rows. + v_vec = ... + ... + accs[i] += dot(logits_vec, v_vec); + } + } + +- As shown in the above pseudo code, in the outer loop, similar to + ``k_ptr``, ``logits_vec`` iterates over different blocks and reads + ``V_VEC_SIZE`` elements from ``logits``. In the inner loop, each + thread reads ``V_VEC_SIZE`` elements from the same tokens as a + ``v_vec`` and performs dot multiplication. It is important to note + that in each inner iteration, the thread fetches different head + position elements for the same tokens. The dot result is then + accumulated in ``accs``. Therefore, each entry of ``accs`` is mapped + to a head position assigned to the current thread. +- For example, if ``BLOCK_SIZE`` is 16 and ``V_VEC_SIZE`` is 8, each + thread fetches 8 value elements for 8 tokens at a time. Each element + is from different tokens at the same head position. If ``HEAD_SIZE`` + is 128 and ``WARP_SIZE`` is 32, for each inner loop, a warp needs to + fetch ``WARP_SIZE * V_VEC_SIZE = 256`` elements. This means there are + a total of 128 \* 16 / 256 = 8 inner iterations for a warp to handle + a whole block of value tokens. And each ``accs`` in each thread + contains 8 elements that accumulated at 8 different head positions. + For the thread 0, the ``accs`` variable will have 8 elements, which + are 0th, 32th … 224th elements of a value head that are accumulated + from all assigned 8 tokens. + +LV +--- +- Now, we need to perform reduction for ``accs`` within each warp. This + process allows each thread to accumulate the ``accs`` for the + assigned head positions of all tokens in one block. + + .. code:: cpp + + for (int i = 0; i < NUM_ROWS_PER_THREAD; i++) { + float acc = accs[i]; + for (int mask = NUM_V_VECS_PER_ROW / 2; mask >= 1; mask /= 2) { + acc += VLLM_SHFL_XOR_SYNC(acc, mask); + } + accs[i] = acc; + } + +- Next, we perform reduction for ``accs`` across all warps, allowing + each thread to have the accumulation of ``accs`` for the assigned + head positions of all context tokens. Please note that each ``accs`` + in every thread only stores the accumulation for a portion of + elements of the entire head for all context tokens. However, overall, + all results for output have been calculated but are just stored in + different thread register memory. + + .. code:: cpp + + float* out_smem = reinterpret_cast(shared_mem); + for (int i = NUM_WARPS; i > 1; i /= 2) { + // Upper warps write to shared memory. + ... + float* dst = &out_smem[(warp_idx - mid) * HEAD_SIZE]; + for (int i = 0; i < NUM_ROWS_PER_THREAD; i++) { + ... + dst[row_idx] = accs[i]; + } + + // Lower warps update the output. + const float* src = &out_smem[warp_idx * HEAD_SIZE]; + for (int i = 0; i < NUM_ROWS_PER_THREAD; i++) { + ... + accs[i] += src[row_idx]; + } + + // Write out the accs. + } + +Output +------ + +- Now we can write all of calculated result from local register memory + to final output global memory. + + .. code:: cpp + + scalar_t* out_ptr = out + seq_idx * num_heads * max_num_partitions * HEAD_SIZE + + head_idx * max_num_partitions * HEAD_SIZE + + partition_idx * HEAD_SIZE; + +- First, we need to define the ``out_ptr`` variable, which points to + the start address of the assigned sequence and assigned head. + + .. code:: cpp + + for (int i = 0; i < NUM_ROWS_PER_THREAD; i++) { + const int row_idx = lane / NUM_V_VECS_PER_ROW + i * NUM_ROWS_PER_ITER; + if (row_idx < HEAD_SIZE && lane % NUM_V_VECS_PER_ROW == 0) { + from_float(*(out_ptr + row_idx), accs[i]); + } + } + +- Finally, we need to iterate over different assigned head positions + and write out the corresponding accumulated result based on the + ``out_ptr``. diff --git a/docs/source/dev/sampling_params.rst b/docs/source/dev/sampling_params.rst new file mode 100644 index 0000000..ef3d150 --- /dev/null +++ b/docs/source/dev/sampling_params.rst @@ -0,0 +1,5 @@ +Sampling Params +=============== + +.. autoclass:: vllm.SamplingParams + :members: diff --git a/docs/source/generate_examples.py b/docs/source/generate_examples.py new file mode 100644 index 0000000..79b49a1 --- /dev/null +++ b/docs/source/generate_examples.py @@ -0,0 +1,61 @@ +import re +from pathlib import Path + + +def fix_case(text: str) -> str: + subs = [ + ("api", "API"), + ("llm", "LLM"), + ("vllm", "vLLM"), + ("openai", "OpenAI"), + ("multilora", "MultiLoRA"), + ] + for sub in subs: + text = re.sub(*sub, text, flags=re.IGNORECASE) + return text + + +def underline(title: str, character: str = "=") -> str: + return f"{title}\n{character * len(title)}" + + +def generate_title(filename: str) -> str: + # Turn filename into a title + title = filename.replace("_", " ").title() + # Handle acronyms and names + title = fix_case(title) + # Underline title + title = underline(title) + return title + + +def generate_examples(): + root_dir = Path(__file__).parent.parent.parent.resolve() + + # Source paths + script_dir = root_dir / "examples" + script_paths = sorted(script_dir.glob("*.py")) + + # Destination paths + doc_dir = root_dir / "docs/source/getting_started/examples" + doc_paths = [doc_dir / f"{path.stem}.rst" for path in script_paths] + + # Generate the example docs for each example script + for script_path, doc_path in zip(script_paths, doc_paths): + script_url = f"https://github.com/vllm-project/vllm/blob/main/examples/{script_path.name}" + # Make script_path relative to doc_path and call it include_path + include_path = '../../../..' / script_path.relative_to(root_dir) + content = (f"{generate_title(doc_path.stem)}\n\n" + f"Source {script_url}.\n\n" + f".. literalinclude:: {include_path}\n" + " :language: python\n" + " :linenos:\n") + with open(doc_path, "w+") as f: + f.write(content) + + # Generate the toctree for the example scripts + with open(doc_dir / "examples_index.template.rst") as f: + examples_index = f.read() + with open(doc_dir / "examples_index.rst", "w+") as f: + example_docs = "\n ".join(path.stem for path in script_paths) + f.write(examples_index.replace(r"%EXAMPLE_DOCS%", example_docs)) diff --git a/docs/source/getting_started/amd-installation.rst b/docs/source/getting_started/amd-installation.rst new file mode 100644 index 0000000..61fcd45 --- /dev/null +++ b/docs/source/getting_started/amd-installation.rst @@ -0,0 +1,137 @@ +.. _installation_rocm: + +Installation with ROCm +====================== + +vLLM supports AMD GPUs with ROCm 5.7 and 6.0. + +Requirements +------------ + +* OS: Linux +* Python: 3.8 -- 3.11 +* GPU: MI200s (gfx90a), MI300 (gfx942), Radeon RX 7900 series (gfx1100) +* ROCm 6.0 and ROCm 5.7 + +Installation options: + +#. :ref:`Build from source with docker ` +#. :ref:`Build from source ` + +.. _build_from_source_docker_rocm: + +Option 1: Build from source with docker (recommended) +----------------------------------------------------- + +You can build and install vLLM from source. + +First, build a docker image from `Dockerfile.rocm `_ and launch a docker container from the image. + +`Dockerfile.rocm `_ uses ROCm 6.0 by default, but also supports ROCm 5.7. +It provides flexibility to customize the build of docker image using the following arguments: + +* `BASE_IMAGE`: specifies the base image used when running ``docker build``, specifically the PyTorch on ROCm base image. We have tested ROCm 5.7 and ROCm 6.0. The default is `rocm/pytorch:rocm6.0_ubuntu20.04_py3.9_pytorch_2.1.1` +* `BUILD_FA`: specifies whether to build CK flash-attention. The default is 1. For `Radeon RX 7900 series (gfx1100) `_, this should be set to 0 before flash-attention supports this target. +* `FX_GFX_ARCHS`: specifies the GFX architecture that is used to build CK flash-attention, for example, `gfx90a;gfx942` for MI200 and MI300. The default is `gfx90a;gfx942` +* `FA_BRANCH`: specifies the branch used to build the CK flash-attention in `ROCm's flash-attention repo `_. The default is `ae7928c` +* `BUILD_TRITON`: specifies whether to build triton flash-attention. The default value is 1. + +Their values can be passed in when running ``docker build`` with ``--build-arg`` options. + + +To build vllm on ROCm 6.0 for MI200 and MI300 series, you can use the default: + +.. code-block:: console + + $ docker build -f Dockerfile.rocm -t vllm-rocm . + +To build vllm on ROCm 6.0 for Radeon RX7900 series (gfx1100), you should specify ``BUILD_FA`` as below: + +.. code-block:: console + + $ docker build --build-arg BUILD_FA="0" -f Dockerfile.rocm -t vllm-rocm . + +To build docker image for vllm on ROCm 5.7, you can specify ``BASE_IMAGE`` as below: + +.. code-block:: console + + $ docker build --build-arg BASE_IMAGE="rocm/pytorch:rocm5.7_ubuntu22.04_py3.10_pytorch_2.0.1" \ + -f Dockerfile.rocm -t vllm-rocm . + +To run the above docker image ``vllm-rocm``, use the below command: + +.. code-block:: console + + $ docker run -it \ + --network=host \ + --group-add=video \ + --ipc=host \ + --cap-add=SYS_PTRACE \ + --security-opt seccomp=unconfined \ + --device /dev/kfd \ + --device /dev/dri \ + -v :/app/model \ + vllm-rocm \ + bash + +Where the `` is the location where the model is stored, for example, the weights for llama2 or llama3 models. + + +.. _build_from_source_rocm: + +Option 2: Build from source +--------------------------- + +0. Install prerequisites (skip if you are already in an environment/docker with the following installed): + +- `ROCm `_ +- `Pytorch `_ +- `hipBLAS `_ + +For installing PyTorch, you can start from a fresh docker image, e.g, `rocm6.0.2_ubuntu22.04_py3.10_pytorch_2.1.2`, `rocm/pytorch:rocm6.0_ubuntu20.04_py3.9_pytorch_2.1.1`, `rocm/pytorch-nightly`. + +Alternatively, you can install pytorch using pytorch wheels. You can check Pytorch installation guild in Pytorch `Getting Started `_ + +For rocm6.0: + +.. code-block:: console + + $ pip3 install torch --index-url https://download.pytorch.org/whl/rocm6.0 + + +For rocm5.7: + +.. code-block:: console + + $ pip install torch --index-url https://download.pytorch.org/whl/rocm5.7 + + +1. Install `Triton flash attention for ROCm `_ + +Install ROCm's Triton flash attention (the default triton-mlir branch) following the instructions from `ROCm/triton `_ + +2. Optionally, if you choose to use CK flash attention, you can install `flash attention for ROCm `_ + +Install ROCm's flash attention (v2.0.4) following the instructions from `ROCm/flash-attention `_ + +.. note:: + - If you are using rocm5.7 with pytorch 2.1.0 onwards, you don't need to apply the `hipify_python.patch`. You can build the ROCm flash attention directly. + - If you fail to install `ROCm/flash-attention`, try cloning from the commit `6fd2f8e572805681cd67ef8596c7e2ce521ed3c6`. + - ROCm's Flash-attention-2 (v2.0.4) does not support sliding windows attention. + - You might need to downgrade the "ninja" version to 1.10 it is not used when compiling flash-attention-2 (e.g. `pip install ninja==1.10.2.4`) + +3. Build vLLM. + +.. code-block:: console + + $ cd vllm + $ pip install -U -r requirements-rocm.txt + $ python setup.py install # This may take 5-10 minutes. Currently, `pip install .`` does not work for ROCm installation + + +.. tip:: + + - You may need to turn on the ``--enforce-eager`` flag if you experience process hang when running the `benchmark_thoughput.py` script to test your installation. + - Triton flash attention is used by default. For benchmarking purposes, it is recommended to run a warm up step before collecting perf numbers. + - To use CK flash-attention, please use this flag ``export VLLM_USE_FLASH_ATTN_TRITON=0`` to turn off triton flash attention. + - The ROCm version of pytorch, ideally, should match the ROCm driver version. diff --git a/docs/source/getting_started/cpu-installation.rst b/docs/source/getting_started/cpu-installation.rst new file mode 100644 index 0000000..ba8b064 --- /dev/null +++ b/docs/source/getting_started/cpu-installation.rst @@ -0,0 +1,87 @@ +.. _installation_cpu: + +Installation with CPU +======================== + +vLLM initially supports basic model inferencing and serving on x86 CPU platform, with data types FP32 and BF16. + +Table of contents: + +#. :ref:`Requirements ` +#. :ref:`Quick start using Dockerfile ` +#. :ref:`Build from source ` +#. :ref:`Performance tips ` + +.. _cpu_backend_requirements: + +Requirements +------------ + +* OS: Linux +* Compiler: gcc/g++>=12.3.0 (recommended) +* Instruction set architecture (ISA) requirement: AVX512 is required. + +.. _cpu_backend_quick_start_dockerfile: + +Quick start using Dockerfile +---------------------------- + +.. code-block:: console + + $ docker build -f Dockerfile.cpu -t vllm-cpu-env --shm-size=4g . + $ docker run -it \ + --rm \ + --network=host \ + --cpuset-cpus= \ + --cpuset-mems= \ + vllm-cpu-env + +.. _build_cpu_backend_from_source: + +Build from source +----------------- + +- First, install required compiler. We recommend to use ``gcc/g++ >= 12.3.0`` as the default compiler to avoid potential problems. For example, on Ubuntu 22.4, you can run: + +.. code-block:: console + + $ sudo apt-get update -y + $ sudo apt-get install -y gcc-12 g++-12 + $ sudo update-alternatives --install /usr/bin/gcc gcc /usr/bin/gcc-12 10 --slave /usr/bin/g++ g++ /usr/bin/g++-12 + +- Second, install Python packages for vLLM CPU backend building: + +.. code-block:: console + + $ pip install --upgrade pip + $ pip install wheel packaging ninja setuptools>=49.4.0 numpy + $ pip install -v -r requirements-cpu.txt --extra-index-url https://download.pytorch.org/whl/cpu + +- Finally, build and install vLLM CPU backend: + +.. code-block:: console + + $ VLLM_TARGET_DEVICE=cpu python setup.py install + +.. note:: + - BF16 is the default data type in the current CPU backend (that means the backend will cast FP16 to BF16), and is compatible will all CPUs with AVX512 ISA support. + + - AVX512_BF16 is an extension ISA provides native BF16 data type conversion and vector product instructions, will brings some performance improvement compared with pure AVX512. The CPU backend build script will check the host CPU flags to determine whether to enable AVX512_BF16. + + - If you want to force enable AVX512_BF16 for the cross-compilation, please set environment variable VLLM_CPU_AVX512BF16=1 before the building. + +.. _cpu_backend_performance_tips: + +Performance tips +----------------- + +- vLLM CPU backend uses environment variable ``VLLM_CPU_KVCACHE_SPACE`` to specify the KV Cache size (e.g, ``VLLM_CPU_KVCACHE_SPACE=40`` means 40 GB space for KV cache), larger setting will allow vLLM running more requests in parallel. This parameter should be set based on the hardware configuration and memory management pattern of users. + +- vLLM CPU backend uses OpenMP for thread-parallel computation. If you want the best performance on CPU, it will be very critical to isolate CPU cores for OpenMP threads with other thread pools (like web-service event-loop), to avoid CPU oversubscription. + +- If using vLLM CPU backend on a bare-metal machine, it is recommended to disable the hyper-threading. + +- If using vLLM CPU backend on a multi-socket machine with NUMA, be aware to set CPU cores and memory nodes, to avoid the remote memory node access. ``numactl`` is an useful tool for CPU core and memory binding on NUMA platform. Besides, ``--cpuset-cpus`` and ``--cpuset-mems`` arguments of ``docker run`` are also useful. + + + diff --git a/docs/source/getting_started/examples/examples_index.template.rst b/docs/source/getting_started/examples/examples_index.template.rst new file mode 100644 index 0000000..1b34ccc --- /dev/null +++ b/docs/source/getting_started/examples/examples_index.template.rst @@ -0,0 +1,8 @@ +Examples +================================= + +.. toctree:: + :maxdepth: 1 + :caption: Scripts + + %EXAMPLE_DOCS% diff --git a/docs/source/getting_started/installation.rst b/docs/source/getting_started/installation.rst new file mode 100644 index 0000000..0c81f7e --- /dev/null +++ b/docs/source/getting_started/installation.rst @@ -0,0 +1,88 @@ +.. _installation: + +Installation +============ + +vLLM is a Python library that also contains pre-compiled C++ and CUDA (12.1) binaries. + +Requirements +------------ + +* OS: Linux +* Python: 3.8 -- 3.11 +* GPU: compute capability 7.0 or higher (e.g., V100, T4, RTX20xx, A100, L4, H100, etc.) + +Install with pip +---------------- + +You can install vLLM using pip: + +.. code-block:: console + + $ # (Recommended) Create a new conda environment. + $ conda create -n myenv python=3.9 -y + $ conda activate myenv + + $ # Install vLLM with CUDA 12.1. + $ pip install vllm + +.. note:: + + As of now, vLLM's binaries are compiled with CUDA 12.1 and public PyTorch release versions by default. + We also provide vLLM binaries compiled with CUDA 11.8 and public PyTorch release versions: + + .. code-block:: console + + $ # Install vLLM with CUDA 11.8. + $ export VLLM_VERSION=0.4.0 + $ export PYTHON_VERSION=39 + $ pip install https://github.com/vllm-project/vllm/releases/download/v${VLLM_VERSION}/vllm-${VLLM_VERSION}+cu118-cp${PYTHON_VERSION}-cp${PYTHON_VERSION}-manylinux1_x86_64.whl --extra-index-url https://download.pytorch.org/whl/cu118 + + In order to be performant, vLLM has to compile many cuda kernels. The compilation unfortunately introduces binary incompatibility with other CUDA versions and PyTorch versions, even for the same PyTorch version with different building configurations. + + Therefore, it is recommended to install vLLM with a **fresh new** conda environment. If either you have a different CUDA version or you want to use an existing PyTorch installation, you need to build vLLM from source. See below for instructions. + +.. _build_from_source: + +Build from source +----------------- + +You can also build and install vLLM from source: + +.. code-block:: console + + $ git clone https://github.com/vllm-project/vllm.git + $ cd vllm + $ # export VLLM_INSTALL_PUNICA_KERNELS=1 # optionally build for multi-LoRA capability + $ pip install -e . # This may take 5-10 minutes. + +.. tip:: + To avoid your system being overloaded, you can limit the number of compilation jobs + to be run simultaneously, via the environment variable `MAX_JOBS`. For example: + + .. code-block:: console + + $ export MAX_JOBS=6 + $ pip install -e . + +.. tip:: + If you have trouble building vLLM, we recommend using the NVIDIA PyTorch Docker image. + + .. code-block:: console + + $ # Use `--ipc=host` to make sure the shared memory is large enough. + $ docker run --gpus all -it --rm --ipc=host nvcr.io/nvidia/pytorch:23.10-py3 + + If you don't want to use docker, it is recommended to have a full installation of CUDA Toolkit. You can download and install it from `the official website `_. After installation, set the environment variable `CUDA_HOME` to the installation path of CUDA Toolkit, and make sure that the `nvcc` compiler is in your `PATH`, e.g.: + + .. code-block:: console + + $ export CUDA_HOME=/usr/local/cuda + $ export PATH="${CUDA_HOME}/bin:$PATH" + + Here is a sanity check to verify that the CUDA Toolkit is correctly installed: + + .. code-block:: console + + $ nvcc --version # verify that nvcc is in your PATH + $ ${CUDA_HOME}/bin/nvcc --version # verify that nvcc is in your CUDA_HOME diff --git a/docs/source/getting_started/neuron-installation.rst b/docs/source/getting_started/neuron-installation.rst new file mode 100644 index 0000000..62bf779 --- /dev/null +++ b/docs/source/getting_started/neuron-installation.rst @@ -0,0 +1,136 @@ +.. _installation_neuron: + +Installation with Neuron +======================== + +vLLM 0.3.3 onwards supports model inferencing and serving on AWS Trainium/Inferentia with Neuron SDK. +At the moment Paged Attention is not supported in Neuron SDK, but naive continuous batching is supported in transformers-neuronx. +Data types currently supported in Neuron SDK are FP16 and BF16. + +Requirements +------------ + +* OS: Linux +* Python: 3.8 -- 3.11 +* Accelerator: NeuronCore_v2 (in trn1/inf2 instances) +* Pytorch 2.0.1/2.1.1 +* AWS Neuron SDK 2.16/2.17 (Verified on python 3.8) + +Installation steps: + +- :ref:`Build from source ` + + - :ref:`Step 0. Launch Trn1/Inf2 instances ` + - :ref:`Step 1. Install drivers and tools ` + - :ref:`Step 2. Install transformers-neuronx and its dependencies ` + - :ref:`Step 3. Install vLLM from source ` + +.. _build_from_source_neuron: + +Build from source +----------------- + +Following instructions are applicable to Neuron SDK 2.16 and beyond. + +.. _launch_instances: + +Step 0. Launch Trn1/Inf2 instances +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Here are the steps to launch trn1/inf2 instances, in order to install `PyTorch Neuron ("torch-neuronx") Setup on Ubuntu 22.04 LTS `_. + +- Please follow the instructions at `launch an Amazon EC2 Instance `_ to launch an instance. When choosing the instance type at the EC2 console, please make sure to select the correct instance type. +- To get more information about instances sizes and pricing see: `Trn1 web page `_, `Inf2 web page `_ +- Select Ubuntu Server 22.04 TLS AMI +- When launching a Trn1/Inf2, please adjust your primary EBS volume size to a minimum of 512GB. +- After launching the instance, follow the instructions in `Connect to your instance `_ to connect to the instance + +.. _install_drivers: + +Step 1. Install drivers and tools +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +The installation of drivers and tools wouldn't be necessary, if `Deep Learning AMI Neuron `_ is installed. In case the drivers and tools are not installed on the operating system, follow the steps below: + +.. code-block:: console + + # Configure Linux for Neuron repository updates + . /etc/os-release + sudo tee /etc/apt/sources.list.d/neuron.list > /dev/null <`_ will be the backend to support inference on trn1/inf2 instances. +Follow the steps below to install transformer-neuronx package and its dependencies. + +.. code-block:: console + + # Install Python venv + sudo apt-get install -y python3.10-venv g++ + + # Create Python venv + python3.10 -m venv aws_neuron_venv_pytorch + + # Activate Python venv + source aws_neuron_venv_pytorch/bin/activate + + # Install Jupyter notebook kernel + pip install ipykernel + python3.10 -m ipykernel install --user --name aws_neuron_venv_pytorch --display-name "Python (torch-neuronx)" + pip install jupyter notebook + pip install environment_kernels + + # Set pip repository pointing to the Neuron repository + python -m pip config set global.extra-index-url https://pip.repos.neuron.amazonaws.com + + # Install wget, awscli + python -m pip install wget + python -m pip install awscli + + # Update Neuron Compiler and Framework + python -m pip install --upgrade neuronx-cc==2.* --pre torch-neuronx==2.1.* torchvision transformers-neuronx + +.. _install_vllm: + +Step 3. Install vLLM from source +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Once neuronx-cc and transformers-neuronx packages are installed, we will be able to install vllm as follows: + +.. code-block:: console + + $ git clone https://github.com/vllm-project/vllm.git + $ cd vllm + $ pip install -U -r requirements-neuron.txt + $ pip install . + +If neuron packages are detected correctly in the installation process, ``vllm-0.3.0+neuron212`` will be installed. diff --git a/docs/source/getting_started/quickstart.rst b/docs/source/getting_started/quickstart.rst new file mode 100644 index 0000000..7c44a96 --- /dev/null +++ b/docs/source/getting_started/quickstart.rst @@ -0,0 +1,176 @@ +.. _quickstart: + +Quickstart +========== + +This guide shows how to use vLLM to: + +* run offline batched inference on a dataset; +* build an API server for a large language model; +* start an OpenAI-compatible API server. + +Be sure to complete the :ref:`installation instructions ` before continuing with this guide. + +.. note:: + + By default, vLLM downloads model from `HuggingFace `_. If you would like to use models from `ModelScope `_ in the following examples, please set the environment variable: + + .. code-block:: shell + + export VLLM_USE_MODELSCOPE=True + +Offline Batched Inference +------------------------- + +We first show an example of using vLLM for offline batched inference on a dataset. In other words, we use vLLM to generate texts for a list of input prompts. + +Import ``LLM`` and ``SamplingParams`` from vLLM. The ``LLM`` class is the main class for running offline inference with vLLM engine. The ``SamplingParams`` class specifies the parameters for the sampling process. + +.. code-block:: python + + from vllm import LLM, SamplingParams + +Define the list of input prompts and the sampling parameters for generation. The sampling temperature is set to 0.8 and the nucleus sampling probability is set to 0.95. For more information about the sampling parameters, refer to the `class definition `_. + +.. code-block:: python + + prompts = [ + "Hello, my name is", + "The president of the United States is", + "The capital of France is", + "The future of AI is", + ] + sampling_params = SamplingParams(temperature=0.8, top_p=0.95) + +Initialize vLLM's engine for offline inference with the ``LLM`` class and the `OPT-125M model `_. The list of supported models can be found at :ref:`supported models `. + +.. code-block:: python + + llm = LLM(model="facebook/opt-125m") + +Call ``llm.generate`` to generate the outputs. It adds the input prompts to vLLM engine's waiting queue and executes the vLLM engine to generate the outputs with high throughput. The outputs are returned as a list of ``RequestOutput`` objects, which include all the output tokens. + +.. code-block:: python + + outputs = llm.generate(prompts, sampling_params) + + # Print the outputs. + for output in outputs: + prompt = output.prompt + generated_text = output.outputs[0].text + print(f"Prompt: {prompt!r}, Generated text: {generated_text!r}") + + +The code example can also be found in `examples/offline_inference.py `_. + +OpenAI-Compatible Server +------------------------ + +vLLM can be deployed as a server that implements the OpenAI API protocol. This allows vLLM to be used as a drop-in replacement for applications using OpenAI API. +By default, it starts the server at ``http://localhost:8000``. You can specify the address with ``--host`` and ``--port`` arguments. The server currently hosts one model at a time (OPT-125M in the command below) and implements `list models `_, `create chat completion `_, and `create completion `_ endpoints. We are actively adding support for more endpoints. + +Start the server: + +.. code-block:: console + + $ python -m vllm.entrypoints.openai.api_server \ + $ --model facebook/opt-125m + +By default, the server uses a predefined chat template stored in the tokenizer. You can override this template by using the ``--chat-template`` argument: + +.. code-block:: console + + $ python -m vllm.entrypoints.openai.api_server \ + $ --model facebook/opt-125m \ + $ --chat-template ./examples/template_chatml.jinja + +This server can be queried in the same format as OpenAI API. For example, list the models: + +.. code-block:: console + + $ curl http://localhost:8000/v1/models + +You can pass in the argument ``--api-key`` or environment variable ``VLLM_API_KEY`` to enable the server to check for API key in the header. + +Using OpenAI Completions API with vLLM +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Query the model with input prompts: + +.. code-block:: console + + $ curl http://localhost:8000/v1/completions \ + $ -H "Content-Type: application/json" \ + $ -d '{ + $ "model": "facebook/opt-125m", + $ "prompt": "San Francisco is a", + $ "max_tokens": 7, + $ "temperature": 0 + $ }' + +Since this server is compatible with OpenAI API, you can use it as a drop-in replacement for any applications using OpenAI API. For example, another way to query the server is via the ``openai`` python package: + +.. code-block:: python + + from openai import OpenAI + + # Modify OpenAI's API key and API base to use vLLM's API server. + openai_api_key = "EMPTY" + openai_api_base = "http://localhost:8000/v1" + client = OpenAI( + api_key=openai_api_key, + base_url=openai_api_base, + ) + completion = client.completions.create(model="facebook/opt-125m", + prompt="San Francisco is a") + print("Completion result:", completion) + +For a more detailed client example, refer to `examples/openai_completion_client.py `_. + +Using OpenAI Chat API with vLLM +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +The vLLM server is designed to support the OpenAI Chat API, allowing you to engage in dynamic conversations with the model. The chat interface is a more interactive way to communicate with the model, allowing back-and-forth exchanges that can be stored in the chat history. This is useful for tasks that require context or more detailed explanations. + +Querying the model using OpenAI Chat API: + +You can use the `create chat completion `_ endpoint to communicate with the model in a chat-like interface: + +.. code-block:: console + + $ curl http://localhost:8000/v1/chat/completions \ + $ -H "Content-Type: application/json" \ + $ -d '{ + $ "model": "facebook/opt-125m", + $ "messages": [ + $ {"role": "system", "content": "You are a helpful assistant."}, + $ {"role": "user", "content": "Who won the world series in 2020?"} + $ ] + $ }' + +Python Client Example: + +Using the `openai` python package, you can also communicate with the model in a chat-like manner: + +.. code-block:: python + + from openai import OpenAI + # Set OpenAI's API key and API base to use vLLM's API server. + openai_api_key = "EMPTY" + openai_api_base = "http://localhost:8000/v1" + + client = OpenAI( + api_key=openai_api_key, + base_url=openai_api_base, + ) + + chat_response = client.chat.completions.create( + model="facebook/opt-125m", + messages=[ + {"role": "system", "content": "You are a helpful assistant."}, + {"role": "user", "content": "Tell me a joke."}, + ] + ) + print("Chat response:", chat_response) + +For more in-depth examples and advanced features of the chat API, you can refer to the official OpenAI documentation. diff --git a/docs/source/index.rst b/docs/source/index.rst new file mode 100644 index 0000000..4022c59 --- /dev/null +++ b/docs/source/index.rst @@ -0,0 +1,113 @@ +Welcome to vLLM! +================ + +.. figure:: ./assets/logos/vllm-logo-text-light.png + :width: 60% + :align: center + :alt: vLLM + :class: no-scaled-link + +.. raw:: html + +

+ Easy, fast, and cheap LLM serving for everyone + +

+ +

+ + Star + Watch + Fork +

+ + + +vLLM is a fast and easy-to-use library for LLM inference and serving. + +vLLM is fast with: + +* State-of-the-art serving throughput +* Efficient management of attention key and value memory with **PagedAttention** +* Continuous batching of incoming requests +* Fast model execution with CUDA/HIP graph +* Quantization: `GPTQ `_, `AWQ `_, `SqueezeLLM `_, FP8 KV Cache +* Optimized CUDA kernels + +vLLM is flexible and easy to use with: + +* Seamless integration with popular HuggingFace models +* High-throughput serving with various decoding algorithms, including *parallel sampling*, *beam search*, and more +* Tensor parallelism support for distributed inference +* Streaming outputs +* OpenAI-compatible API server +* Support NVIDIA GPUs and AMD GPUs +* (Experimental) Prefix caching support +* (Experimental) Multi-lora support + +For more information, check out the following: + +* `vLLM announcing blog post `_ (intro to PagedAttention) +* `vLLM paper `_ (SOSP 2023) +* `How continuous batching enables 23x throughput in LLM inference while reducing p50 latency `_ by Cade Daniel et al. + + + +Documentation +------------- + +.. toctree:: + :maxdepth: 1 + :caption: Getting Started + + getting_started/installation + getting_started/amd-installation + getting_started/neuron-installation + getting_started/cpu-installation + getting_started/quickstart + getting_started/examples/examples_index + +.. toctree:: + :maxdepth: 1 + :caption: Serving + + serving/openai_compatible_server + serving/deploying_with_docker + serving/distributed_serving + serving/metrics + serving/env_vars + serving/usage_stats + serving/integrations + +.. toctree:: + :maxdepth: 1 + :caption: Models + + models/supported_models + models/adding_model + models/engine_args + models/lora + models/performance + +.. toctree:: + :maxdepth: 1 + :caption: Quantization + + quantization/auto_awq + quantization/fp8_e5m2_kvcache + quantization/fp8_e4m3_kvcache + +.. toctree:: + :maxdepth: 2 + :caption: Developer Documentation + + dev/sampling_params + dev/engine/engine_index + dev/kernel/paged_attention + dev/dockerfile/dockerfile + +Indices and tables +================== + +* :ref:`genindex` +* :ref:`modindex` diff --git a/docs/source/models/adding_model.rst b/docs/source/models/adding_model.rst new file mode 100644 index 0000000..cbc8099 --- /dev/null +++ b/docs/source/models/adding_model.rst @@ -0,0 +1,123 @@ +.. _adding_a_new_model: + +Adding a New Model +================== + +This document provides a high-level guide on integrating a `HuggingFace Transformers `_ model into vLLM. + +.. note:: + The complexity of adding a new model depends heavily on the model's architecture. + The process is considerably straightforward if the model shares a similar architecture with an existing model in vLLM. + However, for models that include new operators (e.g., a new attention mechanism), the process can be a bit more complex. + +.. tip:: + If you are encountering issues while integrating your model into vLLM, feel free to open an issue on our `GitHub `_ repository. + We will be happy to help you out! + + +0. Fork the vLLM repository +-------------------------------- + +Start by forking our `GitHub`_ repository and then :ref:`build it from source `. +This gives you the ability to modify the codebase and test your model. + +.. tip:: + If you don't want to fork the repository and modify vLLM's codebase, please refer to the "Out-of-Tree Model Integration" section below. + +1. Bring your model code +------------------------ + +Clone the PyTorch model code from the HuggingFace Transformers repository and put it into the `vllm/model_executor/models `_ directory. +For instance, vLLM's `OPT model `_ was adapted from the HuggingFace's `modeling_opt.py `_ file. + +.. warning:: + When copying the model code, make sure to review and adhere to the code's copyright and licensing terms. + + +2. Rewrite the :code:`forward` methods +-------------------------------------- + +Next, you need to rewrite the :code:`forward` methods of your model by following these steps: + +1. Remove any unnecessary code, such as the code only used for training. +2. Change the input parameters: + +.. code-block:: diff + + def forward( + self, + input_ids: torch.Tensor, + - attention_mask: Optional[torch.Tensor] = None, + - position_ids: Optional[torch.LongTensor] = None, + - past_key_values: Optional[List[torch.FloatTensor]] = None, + - inputs_embeds: Optional[torch.FloatTensor] = None, + - labels: Optional[torch.LongTensor] = None, + - use_cache: Optional[bool] = None, + - output_attentions: Optional[bool] = None, + - output_hidden_states: Optional[bool] = None, + - return_dict: Optional[bool] = None, + -) -> Union[Tuple, CausalLMOutputWithPast]: + + positions: torch.Tensor, + + kv_caches: List[torch.Tensor], + + attn_metadata: AttentionMetadata, + +) -> Optional[SamplerOutput]: + +1. Update the code by considering that :code:`input_ids` and :code:`positions` are now flattened tensors. +2. Replace the attention operation with either :code:`PagedAttention`, :code:`PagedAttentionWithRoPE`, or :code:`PagedAttentionWithALiBi` depending on the model's architecture. + +.. note:: + Currently, vLLM supports the basic multi-head attention mechanism and its variant with rotary positional embeddings. + If your model employs a different attention mechanism, you will need to implement a new attention layer in vLLM. + + +3. (Optional) Implement tensor parallelism and quantization support +------------------------------------------------------------------- + +If your model is too large to fit into a single GPU, you can use tensor parallelism to manage it. +To do this, substitute your model's linear and embedding layers with their tensor-parallel versions. +For the embedding layer, you can simply replace :code:`nn.Embedding` with :code:`VocabParallelEmbedding`. For the output LM head, you can use :code:`ParallelLMHead`. +When it comes to the linear layers, we provide the following options to parallelize them: + +* :code:`ReplicatedLinear`: Replicates the inputs and weights across multiple GPUs. No memory saving. +* :code:`RowParallelLinear`: The input tensor is partitioned along the hidden dimension. The weight matrix is partitioned along the rows (input dimension). An *all-reduce* operation is performed after the matrix multiplication to reduce the results. Typically used for the second FFN layer and the output linear transformation of the attention layer. +* :code:`ColumnParallelLinear`: The input tensor is replicated. The weight matrix is partitioned along the columns (output dimension). The result is partitioned along the column dimension. Typically used for the first FFN layer and the separated QKV transformation of the attention layer in the original Transformer. +* :code:`MergedColumnParallelLinear`: Column-parallel linear that merges multiple `ColumnParallelLinear` operators. Typically used for the first FFN layer with weighted activation functions (e.g., SiLU). This class handles the sharded weight loading logic of multiple weight matrices. +* :code:`QKVParallelLinear`: Parallel linear layer for the query, key, and value projections of the multi-head and grouped-query attention mechanisms. When number of key/value heads are less than the world size, this class replicates the key/value heads properly. This class handles the weight loading and replication of the weight matrices. + +Note that all the linear layers above take `linear_method` as an input. vLLM will set this parameter according to different quantization schemes to support weight quantization. + +4. Implement the weight loading logic +------------------------------------- + +You now need to implement the :code:`load_weights` method in your :code:`*ForCausalLM` class. +This method should load the weights from the HuggingFace's checkpoint file and assign them to the corresponding layers in your model. Specifically, for `MergedColumnParallelLinear` and `QKVParallelLinear` layers, if the original model has separated weight matrices, you need to load the different parts separately. + +5. Register your model +---------------------- + +Finally, register your :code:`*ForCausalLM` class to the :code:`_MODELS` in `vllm/model_executor/models/__init__.py `_. + +6. Out-of-Tree Model Integration +-------------------------------------------- + +We also provide a way to integrate a model without modifying the vLLM codebase. Step 2, 3, 4 are still required, but you can skip step 1 and 5. + +Just add the following lines in your code: + +.. code-block:: python + + from vllm import ModelRegistry + from your_code import YourModelForCausalLM + ModelRegistry.register_model("YourModelForCausalLM", YourModelForCausalLM) + +If you are running api server with `python -m vllm.entrypoints.openai.api_server args`, you can wrap the entrypoint with the following code: + +.. code-block:: python + + from vllm import ModelRegistry + from your_code import YourModelForCausalLM + ModelRegistry.register_model("YourModelForCausalLM", YourModelForCausalLM) + import runpy + runpy.run_module('vllm.entrypoints.openai.api_server', run_name='__main__') + +Save the above code in a file and run it with `python your_file.py args`. diff --git a/docs/source/models/engine_args.rst b/docs/source/models/engine_args.rst new file mode 100644 index 0000000..bdf566d --- /dev/null +++ b/docs/source/models/engine_args.rst @@ -0,0 +1,23 @@ +.. _engine_args: + +Engine Arguments +================ + +Below, you can find an explanation of every engine argument for vLLM: + +.. argparse:: + :module: vllm.engine.arg_utils + :func: _engine_args_parser + :prog: -m vllm.entrypoints.openai.api_server + :nodefaultconst: + +Async Engine Arguments +---------------------- + +Below are the additional arguments related to the asynchronous engine: + +.. argparse:: + :module: vllm.engine.arg_utils + :func: _async_engine_args_parser + :prog: -m vllm.entrypoints.openai.api_server + :nodefaultconst: \ No newline at end of file diff --git a/docs/source/models/lora.rst b/docs/source/models/lora.rst new file mode 100644 index 0000000..2278640 --- /dev/null +++ b/docs/source/models/lora.rst @@ -0,0 +1,104 @@ +.. _lora: + +Using LoRA adapters +=================== + +This document shows you how to use `LoRA adapters `_ with vLLM on top of a base model. +Adapters can be efficiently served on a per request basis with minimal overhead. First we download the adapter(s) and save +them locally with + +.. code-block:: python + + from huggingface_hub import snapshot_download + + sql_lora_path = snapshot_download(repo_id="yard1/llama-2-7b-sql-lora-test") + + +Then we instantiate the base model and pass in the ``enable_lora=True`` flag: + +.. code-block:: python + + from vllm import LLM, SamplingParams + from vllm.lora.request import LoRARequest + + llm = LLM(model="meta-llama/Llama-2-7b-hf", enable_lora=True) + + +We can now submit the prompts and call ``llm.generate`` with the ``lora_request`` parameter. The first parameter +of ``LoRARequest`` is a human identifiable name, the second parameter is a globally unique ID for the adapter and +the third parameter is the path to the LoRA adapter. + +.. code-block:: python + + sampling_params = SamplingParams( + temperature=0, + max_tokens=256, + stop=["[/assistant]"] + ) + + prompts = [ + "[user] Write a SQL query to answer the question based on the table schema.\n\n context: CREATE TABLE table_name_74 (icao VARCHAR, airport VARCHAR)\n\n question: Name the ICAO for lilongwe international airport [/user] [assistant]", + "[user] Write a SQL query to answer the question based on the table schema.\n\n context: CREATE TABLE table_name_11 (nationality VARCHAR, elector VARCHAR)\n\n question: When Anchero Pantaleone was the elector what is under nationality? [/user] [assistant]", + ] + + outputs = llm.generate( + prompts, + sampling_params, + lora_request=LoRARequest("sql_adapter", 1, sql_lora_path) + ) + + +Check out `examples/multilora_inference.py `_ +for an example of how to use LoRA adapters with the async engine and how to use more advanced configuration options. + +Serving LoRA Adapters +--------------------- +LoRA adapted models can also be served with the Open-AI compatible vLLM server. To do so, we use +``--lora-modules {name}={path} {name}={path}`` to specify each LoRA module when we kickoff the server: + +.. code-block:: bash + + python -m vllm.entrypoints.openai.api_server \ + --model meta-llama/Llama-2-7b-hf \ + --enable-lora \ + --lora-modules sql-lora=~/.cache/huggingface/hub/models--yard1--llama-2-7b-sql-lora-test/ + +The server entrypoint accepts all other LoRA configuration parameters (``max_loras``, ``max_lora_rank``, ``max_cpu_loras``, +etc.), which will apply to all forthcoming requests. Upon querying the ``/models`` endpoint, we should see our LoRA along +with its base model: + +.. code-block:: bash + + curl localhost:8000/v1/models | jq . + { + "object": "list", + "data": [ + { + "id": "meta-llama/Llama-2-7b-hf", + "object": "model", + ... + }, + { + "id": "sql-lora", + "object": "model", + ... + } + ] + } + +Requests can specify the LoRA adapter as if it were any other model via the ``model`` request parameter. The requests will be +processed according to the server-wide LoRA configuration (i.e. in parallel with base model requests, and potentially other +LoRA adapter requests if they were provided and ``max_loras`` is set high enough). + +The following is an example request + +.. code-block:: bash + + curl http://localhost:8000/v1/completions \ + -H "Content-Type: application/json" \ + -d '{ + "model": "sql-lora", + "prompt": "San Francisco is a", + "max_tokens": 7, + "temperature": 0 + }' | jq diff --git a/docs/source/models/performance.rst b/docs/source/models/performance.rst new file mode 100644 index 0000000..0677576 --- /dev/null +++ b/docs/source/models/performance.rst @@ -0,0 +1,38 @@ +.. _performance: + +Performance and Tuning +====================== + +Chunked Prefill +--------------- +vLLM supports an experimental feature chunked prefill. Chunked prefill allows to chunk large prefills into smaller chunks and batch them together with decode requests. + +You can enable the feature by specifying + +.. code-block:: python + + llm = LLM(model="meta-llama/Llama-2-7b-hf", enable_chunked_prefill=True) + # Set max_num_batched_tokens to tune performance. + # NOTE: 512 is the default max_num_batched_tokens for chunked prefill. + # llm = LLM(model="meta-llama/Llama-2-7b-hf", enable_chunked_prefill=True, max_num_batched_tokens=512) + +By default, vLLM scheduler prioritizes prefills and doesn't batch prefill and decode to the same batch. This policy optimizes the TTFT (time to thefirst token), but incurs slower ITL (inter token latency) and inefficient GPU utilization. + +Once chunked prefill is enabled, the policy is changed to + +- prioritize decode requests. It batches all pending decode requests to the batch before scheduling any prefill. +- When there are available token_budget (`max_num_batched_tokens`), it schedules pending prefills. If a last pending prefill request cannot fit into `max_num_batched_tokens`, it chunks it. + +This policy has two benefits. + +- It improves ITL (inter token latency) and generation decode because decode requests are prioritized. +- It helps achieve better GPU utilization by locating compute-bound (prefill) and memory-bound (decode) requests to the same batch. + +You can tune the performance by changing `max_num_batched_tokens`. +By default, it is set to 512, which has the best ITL on A100 in the initial benchmark. +Smaller batch size achieves better ITL because there are fewer prefills interrupting decodes. +Higher batch size achieves better TTFT as you can put more prefill to the batch. +If `max_num_batched_tokens` is the same as `max_model_len`, that's almost the equivalent to the default scheduling policy (except that it still prioritizes decodes). +Note that the default batch size (512) is optimized for ITL, and it may have lower throughput than the default scheduler. We recommend you set `max_num_batched_tokens > 2048` for throughput. + +See related papers for more details (https://arxiv.org/pdf/2401.08671 or https://arxiv.org/pdf/2308.16369). diff --git a/docs/source/models/supported_models.rst b/docs/source/models/supported_models.rst new file mode 100644 index 0000000..ceb658b --- /dev/null +++ b/docs/source/models/supported_models.rst @@ -0,0 +1,200 @@ +.. _supported_models: + +Supported Models +================ + +vLLM supports a variety of generative Transformer models in `HuggingFace Transformers `_. +The following is the list of model architectures that are currently supported by vLLM. +Alongside each architecture, we include some popular models that use it. + +.. list-table:: + :widths: 25 25 50 5 + :header-rows: 1 + + * - Architecture + - Models + - Example HuggingFace Models + - :ref:`LoRA ` + * - :code:`AquilaForCausalLM` + - Aquila + - :code:`BAAI/Aquila-7B`, :code:`BAAI/AquilaChat-7B`, etc. + - ✅︎ + * - :code:`BaiChuanForCausalLM` + - Baichuan + - :code:`baichuan-inc/Baichuan2-13B-Chat`, :code:`baichuan-inc/Baichuan-7B`, etc. + - ✅︎ + * - :code:`ChatGLMModel` + - ChatGLM + - :code:`THUDM/chatglm2-6b`, :code:`THUDM/chatglm3-6b`, etc. + - ✅︎ + * - :code:`CohereForCausalLM` + - Command-R + - :code:`CohereForAI/c4ai-command-r-v01`, etc. + - + * - :code:`DbrxForCausalLM` + - DBRX + - :code:`databricks/dbrx-base`, :code:`databricks/dbrx-instruct`, etc. + - + * - :code:`DeciLMForCausalLM` + - DeciLM + - :code:`Deci/DeciLM-7B`, :code:`Deci/DeciLM-7B-instruct`, etc. + - + * - :code:`BloomForCausalLM` + - BLOOM, BLOOMZ, BLOOMChat + - :code:`bigscience/bloom`, :code:`bigscience/bloomz`, etc. + - + * - :code:`FalconForCausalLM` + - Falcon + - :code:`tiiuae/falcon-7b`, :code:`tiiuae/falcon-40b`, :code:`tiiuae/falcon-rw-7b`, etc. + - + * - :code:`GemmaForCausalLM` + - Gemma + - :code:`google/gemma-2b`, :code:`google/gemma-7b`, etc. + - ✅︎ + * - :code:`GPT2LMHeadModel` + - GPT-2 + - :code:`gpt2`, :code:`gpt2-xl`, etc. + - + * - :code:`GPTBigCodeForCausalLM` + - StarCoder, SantaCoder, WizardCoder + - :code:`bigcode/starcoder`, :code:`bigcode/gpt_bigcode-santacoder`, :code:`WizardLM/WizardCoder-15B-V1.0`, etc. + - + * - :code:`GPTJForCausalLM` + - GPT-J + - :code:`EleutherAI/gpt-j-6b`, :code:`nomic-ai/gpt4all-j`, etc. + - + * - :code:`GPTNeoXForCausalLM` + - GPT-NeoX, Pythia, OpenAssistant, Dolly V2, StableLM + - :code:`EleutherAI/gpt-neox-20b`, :code:`EleutherAI/pythia-12b`, :code:`OpenAssistant/oasst-sft-4-pythia-12b-epoch-3.5`, :code:`databricks/dolly-v2-12b`, :code:`stabilityai/stablelm-tuned-alpha-7b`, etc. + - + * - :code:`InternLMForCausalLM` + - InternLM + - :code:`internlm/internlm-7b`, :code:`internlm/internlm-chat-7b`, etc. + - ✅︎ + * - :code:`InternLM2ForCausalLM` + - InternLM2 + - :code:`internlm/internlm2-7b`, :code:`internlm/internlm2-chat-7b`, etc. + - + * - :code:`JAISLMHeadModel` + - Jais + - :code:`core42/jais-13b`, :code:`core42/jais-13b-chat`, :code:`core42/jais-30b-v3`, :code:`core42/jais-30b-chat-v3`, etc. + - + * - :code:`LlamaForCausalLM` + - LLaMA, Llama 2, Meta Llama 3, Vicuna, Alpaca, Yi + - :code:`meta-llama/Meta-Llama-3-8B-Instruct`, :code:`meta-llama/Meta-Llama-3-70B-Instruct`, :code:`meta-llama/Llama-2-13b-hf`, :code:`meta-llama/Llama-2-70b-hf`, :code:`openlm-research/open_llama_13b`, :code:`lmsys/vicuna-13b-v1.3`, :code:`01-ai/Yi-6B`, :code:`01-ai/Yi-34B`, etc. + - ✅︎ + * - :code:`MiniCPMForCausalLM` + - MiniCPM + - :code:`openbmb/MiniCPM-2B-sft-bf16`, :code:`openbmb/MiniCPM-2B-dpo-bf16`, etc. + - + * - :code:`MistralForCausalLM` + - Mistral, Mistral-Instruct + - :code:`mistralai/Mistral-7B-v0.1`, :code:`mistralai/Mistral-7B-Instruct-v0.1`, etc. + - ✅︎ + * - :code:`MixtralForCausalLM` + - Mixtral-8x7B, Mixtral-8x7B-Instruct + - :code:`mistralai/Mixtral-8x7B-v0.1`, :code:`mistralai/Mixtral-8x7B-Instruct-v0.1`, :code:`mistral-community/Mixtral-8x22B-v0.1`, etc. + - ✅︎ + * - :code:`MPTForCausalLM` + - MPT, MPT-Instruct, MPT-Chat, MPT-StoryWriter + - :code:`mosaicml/mpt-7b`, :code:`mosaicml/mpt-7b-storywriter`, :code:`mosaicml/mpt-30b`, etc. + - + * - :code:`OLMoForCausalLM` + - OLMo + - :code:`allenai/OLMo-1B-hf`, :code:`allenai/OLMo-7B-hf`, etc. + - + * - :code:`OPTForCausalLM` + - OPT, OPT-IML + - :code:`facebook/opt-66b`, :code:`facebook/opt-iml-max-30b`, etc. + - + * - :code:`OrionForCausalLM` + - Orion + - :code:`OrionStarAI/Orion-14B-Base`, :code:`OrionStarAI/Orion-14B-Chat`, etc. + - + * - :code:`PhiForCausalLM` + - Phi + - :code:`microsoft/phi-1_5`, :code:`microsoft/phi-2`, etc. + - + * - :code:`Phi3ForCausalLM` + - Phi-3 + - :code:`microsoft/Phi-3-mini-4k-instruct`, :code:`microsoft/Phi-3-mini-128k-instruct`, etc. + - + * - :code:`QWenLMHeadModel` + - Qwen + - :code:`Qwen/Qwen-7B`, :code:`Qwen/Qwen-7B-Chat`, etc. + - + * - :code:`Qwen2ForCausalLM` + - Qwen2 + - :code:`Qwen/Qwen2-beta-7B`, :code:`Qwen/Qwen2-beta-7B-Chat`, etc. + - ✅︎ + * - :code:`Qwen2MoeForCausalLM` + - Qwen2MoE + - :code:`Qwen/Qwen1.5-MoE-A2.7B`, :code:`Qwen/Qwen1.5-MoE-A2.7B-Chat`, etc. + - + * - :code:`StableLmForCausalLM` + - StableLM + - :code:`stabilityai/stablelm-3b-4e1t/` , :code:`stabilityai/stablelm-base-alpha-7b-v2`, etc. + - + +If your model uses one of the above model architectures, you can seamlessly run your model with vLLM. +Otherwise, please refer to :ref:`Adding a New Model ` for instructions on how to implement support for your model. +Alternatively, you can raise an issue on our `GitHub `_ project. + +.. note:: + Currently, the ROCm version of vLLM supports Mistral and Mixtral only for context lengths up to 4096. + +.. tip:: + The easiest way to check if your model is supported is to run the program below: + + .. code-block:: python + + from vllm import LLM + + llm = LLM(model=...) # Name or path of your model + output = llm.generate("Hello, my name is") + print(output) + + If vLLM successfully generates text, it indicates that your model is supported. + +.. tip:: + To use models from `ModelScope `_ instead of HuggingFace Hub, set an environment variable: + + .. code-block:: shell + + $ export VLLM_USE_MODELSCOPE=True + + And use with :code:`trust_remote_code=True`. + + .. code-block:: python + + from vllm import LLM + + llm = LLM(model=..., revision=..., trust_remote_code=True) # Name or path of your model + output = llm.generate("Hello, my name is") + print(output) + +Model Support Policy +--------------------- + +At vLLM, we are committed to facilitating the integration and support of third-party models within our ecosystem. Our approach is designed to balance the need for robustness and the practical limitations of supporting a wide range of models. Here’s how we manage third-party model support: + +1. **Community-Driven Support**: We encourage community contributions for adding new models. When a user requests support for a new model, we welcome pull requests (PRs) from the community. These contributions are evaluated primarily on the sensibility of the output they generate, rather than strict consistency with existing implementations such as those in transformers. **Call for contribution:** PRs coming directly from model vendors are greatly appreciated! + +2. **Best-Effort Consistency**: While we aim to maintain a level of consistency between the models implemented in vLLM and other frameworks like transformers, complete alignment is not always feasible. Factors like acceleration techniques and the use of low-precision computations can introduce discrepancies. Our commitment is to ensure that the implemented models are functional and produce sensible results. + +3. **Issue Resolution and Model Updates**: Users are encouraged to report any bugs or issues they encounter with third-party models. Proposed fixes should be submitted via PRs, with a clear explanation of the problem and the rationale behind the proposed solution. If a fix for one model impacts another, we rely on the community to highlight and address these cross-model dependencies. Note: for bugfix PRs, it is good etiquette to inform the original author to seek their feedback. + +4. **Monitoring and Updates**: Users interested in specific models should monitor the commit history for those models (e.g., by tracking changes in the main/vllm/model_executor/models directory). This proactive approach helps users stay informed about updates and changes that may affect the models they use. + +5. **Selective Focus**: Our resources are primarily directed towards models with significant user interest and impact. Models that are less frequently used may receive less attention, and we rely on the community to play a more active role in their upkeep and improvement. + +Through this approach, vLLM fosters a collaborative environment where both the core development team and the broader community contribute to the robustness and diversity of the third-party models supported in our ecosystem. + +Note that, as an inference engine, vLLM does not introduce new models. Therefore, all models supported by vLLM are third-party models in this regard. + +We have the following levels of testing for models: + +1. **Strict Consistency**: We compare the output of the model with the output of the model in the HuggingFace Transformers library under greedy decoding. This is the most stringent test. Please refer to `test_models.py `_ and `test_big_models.py `_ for the models that have passed this test. +2. **Output Sensibility**: We check if the output of the model is sensible and coherent, by measuring the perplexity of the output and checking for any obvious errors. This is a less stringent test. +3. **Runtime Functionality**: We check if the model can be loaded and run without errors. This is the least stringent test. Please refer to `functionality tests `_ and `examples `_ for the models that have passed this test. +4. **Community Feedback**: We rely on the community to provide feedback on the models. If a model is broken or not working as expected, we encourage users to raise issues to report it or open pull requests to fix it. The rest of the models fall under this category. diff --git a/docs/source/quantization/auto_awq.rst b/docs/source/quantization/auto_awq.rst new file mode 100644 index 0000000..bbbb9ae --- /dev/null +++ b/docs/source/quantization/auto_awq.rst @@ -0,0 +1,75 @@ +.. _auto_awq: + +AutoAWQ +================== + +.. warning:: + + Please note that AWQ support in vLLM is under-optimized at the moment. We would recommend using the unquantized version of the model for better + accuracy and higher throughput. Currently, you can use AWQ as a way to reduce memory footprint. As of now, it is more suitable for low latency + inference with small number of concurrent requests. vLLM's AWQ implementation have lower throughput than unquantized version. + +To create a new 4-bit quantized model, you can leverage `AutoAWQ `_. +Quantizing reduces the model's precision from FP16 to INT4 which effectively reduces the file size by ~70%. +The main benefits are lower latency and memory usage. + +You can quantize your own models by installing AutoAWQ or picking one of the `400+ models on Huggingface `_. + +.. code-block:: console + + $ pip install autoawq + +After installing AutoAWQ, you are ready to quantize a model. Here is an example of how to quantize Vicuna 7B v1.5: + +.. code-block:: python + + from awq import AutoAWQForCausalLM + from transformers import AutoTokenizer + + model_path = 'lmsys/vicuna-7b-v1.5' + quant_path = 'vicuna-7b-v1.5-awq' + quant_config = { "zero_point": True, "q_group_size": 128, "w_bit": 4, "version": "GEMM" } + + # Load model + model = AutoAWQForCausalLM.from_pretrained(model_path, **{"low_cpu_mem_usage": True}) + tokenizer = AutoTokenizer.from_pretrained(model_path, trust_remote_code=True) + + # Quantize + model.quantize(tokenizer, quant_config=quant_config) + + # Save quantized model + model.save_quantized(quant_path) + tokenizer.save_pretrained(quant_path) + +To run an AWQ model with vLLM, you can use `TheBloke/Llama-2-7b-Chat-AWQ `_ with the following command: + +.. code-block:: console + + $ python examples/llm_engine_example.py --model TheBloke/Llama-2-7b-Chat-AWQ --quantization awq + +AWQ models are also supported directly through the LLM entrypoint: + +.. code-block:: python + + from vllm import LLM, SamplingParams + + # Sample prompts. + prompts = [ + "Hello, my name is", + "The president of the United States is", + "The capital of France is", + "The future of AI is", + ] + # Create a sampling params object. + sampling_params = SamplingParams(temperature=0.8, top_p=0.95) + + # Create an LLM. + llm = LLM(model="TheBloke/Llama-2-7b-Chat-AWQ", quantization="AWQ") + # Generate texts from the prompts. The output is a list of RequestOutput objects + # that contain the prompt, generated text, and other information. + outputs = llm.generate(prompts, sampling_params) + # Print the outputs. + for output in outputs: + prompt = output.prompt + generated_text = output.outputs[0].text + print(f"Prompt: {prompt!r}, Generated text: {generated_text!r}") diff --git a/docs/source/quantization/fp8_e4m3_kvcache.rst b/docs/source/quantization/fp8_e4m3_kvcache.rst new file mode 100644 index 0000000..fd71c00 --- /dev/null +++ b/docs/source/quantization/fp8_e4m3_kvcache.rst @@ -0,0 +1,49 @@ +.. _fp8_e4m3_kvcache: + +FP8 E4M3 KV Cache +================== + +Quantizing the KV cache to FP8 reduces its memory footprint. This increases the number of tokens that can be stored in the cache, +improving throughput. OCP (Open Compute Project www.opencompute.org) specifies two common 8-bit floating point data formats: E5M2 +(5 exponent bits and 2 mantissa bits) and E4M3FN (4 exponent bits and 3 mantissa bits), often shortened as E4M3. One benefit of +the E4M3 format over E5M2 is that floating point numbers are represented in higher precision. However, the small dynamic range of +FP8 E4M3 (±240.0 can be represented) typically necessitates the use of a higher-precision (typically FP32) scaling factor alongside +each quantized tensor. For now, only per-tensor (scalar) scaling factors are supported. Development is ongoing to support scaling +factors of a finer granularity (e.g. per-channel). + +These scaling factors can be specified by passing an optional quantization param JSON to the LLM engine at load time. If +this JSON is not specified, scaling factors default to 1.0. These scaling factors are typically obtained when running an +unquantized model through a quantizer tool (e.g. AMD quantizer or NVIDIA AMMO). + +To install AMMO (AlgorithMic Model Optimization): + +.. code-block:: console + + $ pip install --no-cache-dir --extra-index-url https://pypi.nvidia.com nvidia-ammo + +Studies have shown that FP8 E4M3 quantization typically only minimally degrades inference accuracy. The most recent silicon +offerings e.g. AMD MI300, NVIDIA Hopper or later support native hardware conversion to and from fp32, fp16, bf16, etc. +Thus, LLM inference is greatly accelerated with minimal accuracy loss. + + +Here is an example of how to enable this feature: + +.. code-block:: python + + # two float8_e4m3fn kv cache scaling factor files are provided under tests/fp8_kv, please refer to + # https://github.com/vllm-project/vllm/blob/main/examples/fp8/README.md to generate kv_cache_scales.json of your own. + + from vllm import LLM, SamplingParams + sampling_params = SamplingParams(temperature=1.3, top_p=0.8) + llm = LLM(model="meta-llama/Llama-2-7b-chat-hf", + kv_cache_dtype="fp8", + quantization_param_path="./tests/fp8_kv/llama2-7b-fp8-kv/kv_cache_scales.json") + prompt = "London is the capital of" + out = llm.generate(prompt, sampling_params)[0].outputs[0].text + print(out) + + # output w/ scaling factors: England, the United Kingdom, and one of the world's leading financial, + # output w/o scaling factors: England, located in the southeastern part of the country. It is known + +Note, current prefix caching doesn't work with FP8 KV cache enabled, forward_prefix kernel should handle different KV and cache type. + diff --git a/docs/source/quantization/fp8_e5m2_kvcache.rst b/docs/source/quantization/fp8_e5m2_kvcache.rst new file mode 100644 index 0000000..337252a --- /dev/null +++ b/docs/source/quantization/fp8_e5m2_kvcache.rst @@ -0,0 +1,36 @@ +.. _fp8_kv_cache: + +FP8 E5M2 KV Cache +================== + +The int8/int4 quantization scheme requires additional scale GPU memory storage, which reduces the expected GPU memory benefits. +The FP8 data format retains 2~3 mantissa bits and can convert float/fp16/bflaot16 and fp8 to each other. + +Here is an example of how to enable this feature: + +.. code-block:: python + + from vllm import LLM, SamplingParams + # Sample prompts. + prompts = [ + "Hello, my name is", + "The president of the United States is", + "The capital of France is", + "The future of AI is", + ] + # Create a sampling params object. + sampling_params = SamplingParams(temperature=0.8, top_p=0.95) + # Create an LLM. + llm = LLM(model="facebook/opt-125m", kv_cache_dtype="fp8") + # Generate texts from the prompts. The output is a list of RequestOutput objects + # that contain the prompt, generated text, and other information. + outputs = llm.generate(prompts, sampling_params) + # Print the outputs. + for output in outputs: + prompt = output.prompt + generated_text = output.outputs[0].text + print(f"Prompt: {prompt!r}, Generated text: {generated_text!r}") + + +Note, current prefix caching doesn't work with FP8 KV cache enabled, forward_prefix kernel should handle different KV and cache type. + diff --git a/docs/source/serving/deploying_with_bentoml.rst b/docs/source/serving/deploying_with_bentoml.rst new file mode 100644 index 0000000..4b9d19f --- /dev/null +++ b/docs/source/serving/deploying_with_bentoml.rst @@ -0,0 +1,8 @@ +.. _deploying_with_bentoml: + +Deploying with BentoML +====================== + +`BentoML `_ allows you to deploy a large language model (LLM) server with vLLM as the backend, which exposes OpenAI-compatible endpoints. You can serve the model locally or containerize it as an OCI-complicant image and deploy it on Kubernetes. + +For details, see the tutorial `vLLM inference in the BentoML documentation `_. \ No newline at end of file diff --git a/docs/source/serving/deploying_with_docker.rst b/docs/source/serving/deploying_with_docker.rst new file mode 100644 index 0000000..cfc462f --- /dev/null +++ b/docs/source/serving/deploying_with_docker.rst @@ -0,0 +1,54 @@ +.. _deploying_with_docker: + +Deploying with Docker +============================ + +vLLM offers official docker image for deployment. +The image can be used to run OpenAI compatible server. +The image is available on Docker Hub as `vllm/vllm-openai `_. + +.. code-block:: console + + $ docker run --runtime nvidia --gpus all \ + -v ~/.cache/huggingface:/root/.cache/huggingface \ + --env "HUGGING_FACE_HUB_TOKEN=" \ + -p 8000:8000 \ + --ipc=host \ + vllm/vllm-openai:latest \ + --model mistralai/Mistral-7B-v0.1 + + +.. note:: + + You can either use the ``ipc=host`` flag or ``--shm-size`` flag to allow the + container to access the host's shared memory. vLLM uses PyTorch, which uses shared + memory to share data between processes under the hood, particularly for tensor parallel inference. + + +You can build and run vLLM from source via the provided dockerfile. To build vLLM: + +.. code-block:: console + + $ DOCKER_BUILDKIT=1 docker build . --target vllm-openai --tag vllm/vllm-openai # optionally specifies: --build-arg max_jobs=8 --build-arg nvcc_threads=2 + + +.. note:: + + By default vLLM will build for all GPU types for widest distribution. If you are just building for the + current GPU type the machine is running on, you can add the argument ``--build-arg torch_cuda_arch_list=""`` + for vLLM to find the current GPU type and build for that. + + +To run vLLM: + +.. code-block:: console + + $ docker run --runtime nvidia --gpus all \ + -v ~/.cache/huggingface:/root/.cache/huggingface \ + -p 8000:8000 \ + --env "HUGGING_FACE_HUB_TOKEN=" \ + vllm/vllm-openai + +.. note:: + + vLLM docker image is currently designed to be run under the root user (contribution welcomed for changing this!). It will try to load library at runtime under the root user's home directory, e.g. `/root/.config/vllm/nccl/cu12/libnccl.so.2.18.1` . If you are running the container under a different user, you may need to change the permissions of the library (and all the parent directories) to allow the user to access it. Then run vLLM with environment variable `VLLM_NCCL_SO_PATH=/root/.config/vllm/nccl/cu12/libnccl.so.2.18.1` . diff --git a/docs/source/serving/deploying_with_kserve.rst b/docs/source/serving/deploying_with_kserve.rst new file mode 100644 index 0000000..7f22766 --- /dev/null +++ b/docs/source/serving/deploying_with_kserve.rst @@ -0,0 +1,8 @@ +.. _deploying_with_kserve: + +Deploying with KServe +============================ + +vLLM can be deployed with `KServe `_ on Kubernetes for highly scalable distributed model serving. + +Please see `this guide `_ for more details on using vLLM with KServe. diff --git a/docs/source/serving/deploying_with_triton.rst b/docs/source/serving/deploying_with_triton.rst new file mode 100644 index 0000000..5ce7c3d --- /dev/null +++ b/docs/source/serving/deploying_with_triton.rst @@ -0,0 +1,6 @@ +.. _deploying_with_triton: + +Deploying with NVIDIA Triton +============================ + +The `Triton Inference Server `_ hosts a tutorial demonstrating how to quickly deploy a simple `facebook/opt-125m `_ model using vLLM. Please see `Deploying a vLLM model in Triton `_ for more details. diff --git a/docs/source/serving/distributed_serving.rst b/docs/source/serving/distributed_serving.rst new file mode 100644 index 0000000..4f36dca --- /dev/null +++ b/docs/source/serving/distributed_serving.rst @@ -0,0 +1,38 @@ +.. _distributed_serving: + +Distributed Inference and Serving +================================= + +vLLM supports distributed tensor-parallel inference and serving. Currently, we support `Megatron-LM's tensor parallel algorithm `_. We manage the distributed runtime with `Ray `_. To run distributed inference, install Ray with: + +.. code-block:: console + + $ pip install ray + +To run multi-GPU inference with the :code:`LLM` class, set the :code:`tensor_parallel_size` argument to the number of GPUs you want to use. For example, to run inference on 4 GPUs: + +.. code-block:: python + + from vllm import LLM + llm = LLM("facebook/opt-13b", tensor_parallel_size=4) + output = llm.generate("San Franciso is a") + +To run multi-GPU serving, pass in the :code:`--tensor-parallel-size` argument when starting the server. For example, to run API server on 4 GPUs: + +.. code-block:: console + + $ python -m vllm.entrypoints.api_server \ + $ --model facebook/opt-13b \ + $ --tensor-parallel-size 4 + +To scale vLLM beyond a single machine, start a `Ray runtime `_ via CLI before running vLLM: + +.. code-block:: console + + $ # On head node + $ ray start --head + + $ # On worker nodes + $ ray start --address= + +After that, you can run inference and serving on multiple machines by launching the vLLM process on the head node by setting :code:`tensor_parallel_size` to the number of GPUs to be the total number of GPUs across all machines. \ No newline at end of file diff --git a/docs/source/serving/env_vars.rst b/docs/source/serving/env_vars.rst new file mode 100644 index 0000000..0ce1374 --- /dev/null +++ b/docs/source/serving/env_vars.rst @@ -0,0 +1,9 @@ +Environment Variables +======================== + +vLLM uses the following environment variables to configure the system: + +.. literalinclude:: ../../../vllm/envs.py + :language: python + :start-after: begin-env-vars-definition + :end-before: end-env-vars-definition diff --git a/docs/source/serving/integrations.rst b/docs/source/serving/integrations.rst new file mode 100644 index 0000000..9387239 --- /dev/null +++ b/docs/source/serving/integrations.rst @@ -0,0 +1,11 @@ +Integrations +------------ + +.. toctree:: + :maxdepth: 1 + + run_on_sky + deploying_with_kserve + deploying_with_triton + deploying_with_bentoml + serving_with_langchain diff --git a/docs/source/serving/metrics.rst b/docs/source/serving/metrics.rst new file mode 100644 index 0000000..15e57bd --- /dev/null +++ b/docs/source/serving/metrics.rst @@ -0,0 +1,13 @@ +Production Metrics +================== + +vLLM exposes a number of metrics that can be used to monitor the health of the +system. These metrics are exposed via the `/metrics` endpoint on the vLLM +OpenAI compatible API server. + +The following metrics are exposed: + +.. literalinclude:: ../../../vllm/engine/metrics.py + :language: python + :start-after: begin-metrics-definitions + :end-before: end-metrics-definitions diff --git a/docs/source/serving/openai_compatible_server.md b/docs/source/serving/openai_compatible_server.md new file mode 100644 index 0000000..c157d8b --- /dev/null +++ b/docs/source/serving/openai_compatible_server.md @@ -0,0 +1,112 @@ +# OpenAI Compatible Server + +vLLM provides an HTTP server that implements OpenAI's [Completions](https://platform.openai.com/docs/api-reference/completions) and [Chat](https://platform.openai.com/docs/api-reference/chat) API. + +You can start the server using Python, or using [Docker](deploying_with_docker.rst): +```bash +python -m vllm.entrypoints.openai.api_server --model NousResearch/Meta-Llama-3-8B-Instruct --dtype auto --api-key token-abc123 +``` + +To call the server, you can use the official OpenAI Python client library, or any other HTTP client. +```python +from openai import OpenAI +client = OpenAI( + base_url="http://localhost:8000/v1", + api_key="token-abc123", +) + +completion = client.chat.completions.create( + model="NousResearch/Meta-Llama-3-8B-Instruct", + messages=[ + {"role": "user", "content": "Hello!"} + ] +) + +print(completion.choices[0].message) +``` + +## API Reference +Please see the [OpenAI API Reference](https://platform.openai.com/docs/api-reference) for more information on the API. We support all parameters except: +- Chat: `tools`, and `tool_choice`. +- Completions: `suffix`. + +## Extra Parameters +vLLM supports a set of parameters that are not part of the OpenAI API. +In order to use them, you can pass them as extra parameters in the OpenAI client. +Or directly merge them into the JSON payload if you are using HTTP call directly. + +```python +completion = client.chat.completions.create( + model="NousResearch/Meta-Llama-3-8B-Instruct", + messages=[ + {"role": "user", "content": "Classify this sentiment: vLLM is wonderful!"} + ], + extra_body={ + "guided_choice": ["positive", "negative"] + } +) +``` + +### Extra Parameters for Chat API +The following [sampling parameters (click through to see documentation)](../dev/sampling_params.rst) are supported. + +```{literalinclude} ../../../vllm/entrypoints/openai/protocol.py +:language: python +:start-after: begin-chat-completion-sampling-params +:end-before: end-chat-completion-sampling-params +``` + +The following extra parameters are supported: + +```{literalinclude} ../../../vllm/entrypoints/openai/protocol.py +:language: python +:start-after: begin-chat-completion-extra-params +:end-before: end-chat-completion-extra-params +``` + +### Extra Parameters for Completions API +The following [sampling parameters (click through to see documentation)](../dev/sampling_params.rst) are supported. + +```{literalinclude} ../../../vllm/entrypoints/openai/protocol.py +:language: python +:start-after: begin-completion-sampling-params +:end-before: end-completion-sampling-params +``` + +The following extra parameters are supported: + +```{literalinclude} ../../../vllm/entrypoints/openai/protocol.py +:language: python +:start-after: begin-completion-extra-params +:end-before: end-completion-extra-params +``` + +## Chat Template + +In order for the language model to support chat protocol, vLLM requires the model to include +a chat template in its tokenizer configuration. The chat template is a Jinja2 template that +specifies how are roles, messages, and other chat-specific tokens are encoded in the input. + +An example chat template for `NousResearch/Meta-Llama-3-8B-Instruct` can be found [here](https://github.com/meta-llama/llama3?tab=readme-ov-file#instruction-tuned-models) + +Some models do not provide a chat template even though they are instruction/chat fine-tuned. For those model, +you can manually specify their chat template in the `--chat-template` parameter with the file path to the chat +template, or the template in string form. Without a chat template, the server will not be able to process chat +and all chat requests will error. + +```bash +python -m vllm.entrypoints.openai.api_server \ + --model ... \ + --chat-template ./path-to-chat-template.jinja +``` + +vLLM community provides a set of chat templates for popular models. You can find them in the examples +directory [here](https://github.com/vllm-project/vllm/tree/main/examples/) + +## Command line arguments for the server + +```{argparse} +:module: vllm.entrypoints.openai.cli_args +:func: make_arg_parser +:prog: vllm-openai-server +``` \ No newline at end of file diff --git a/docs/source/serving/run_on_sky.rst b/docs/source/serving/run_on_sky.rst new file mode 100644 index 0000000..bd33c76 --- /dev/null +++ b/docs/source/serving/run_on_sky.rst @@ -0,0 +1,310 @@ +.. _on_cloud: + +Deploying and scaling up with SkyPilot +================================================ + +.. raw:: html + +

+ vLLM +

+ +vLLM can be **run and scaled to multiple service replicas on clouds and Kubernetes** with `SkyPilot `__, an open-source framework for running LLMs on any cloud. More examples for various open models, such as Llama-3, Mixtral, etc, can be found in `SkyPilot AI gallery `__. + + +Prerequisites +------------- + +- Go to the `HuggingFace model page `__ and request access to the model :code:`meta-llama/Meta-Llama-3-8B-Instruct`. +- Check that you have installed SkyPilot (`docs `__). +- Check that :code:`sky check` shows clouds or Kubernetes are enabled. + +.. code-block:: console + + pip install skypilot-nightly + sky check + + +Run on a single instance +------------------------ + +See the vLLM SkyPilot YAML for serving, `serving.yaml `__. + +.. code-block:: yaml + + resources: + accelerators: {L4, A10g, A10, L40, A40, A100, A100-80GB} # We can use cheaper accelerators for 8B model. + use_spot: True + disk_size: 512 # Ensure model checkpoints can fit. + disk_tier: best + ports: 8081 # Expose to internet traffic. + + envs: + MODEL_NAME: meta-llama/Meta-Llama-3-8B-Instruct + HF_TOKEN: # Change to your own huggingface token, or use --env to pass. + + setup: | + conda create -n vllm python=3.10 -y + conda activate vllm + + pip install vllm==0.4.0.post1 + # Install Gradio for web UI. + pip install gradio openai + pip install flash-attn==2.5.7 + + run: | + conda activate vllm + echo 'Starting vllm api server...' + python -u -m vllm.entrypoints.openai.api_server \ + --port 8081 \ + --model $MODEL_NAME \ + --trust-remote-code \ + --tensor-parallel-size $SKYPILOT_NUM_GPUS_PER_NODE \ + 2>&1 | tee api_server.log & + + echo 'Waiting for vllm api server to start...' + while ! `cat api_server.log | grep -q 'Uvicorn running on'`; do sleep 1; done + + echo 'Starting gradio server...' + git clone https://github.com/vllm-project/vllm.git || true + python vllm/examples/gradio_openai_chatbot_webserver.py \ + -m $MODEL_NAME \ + --port 8811 \ + --model-url http://localhost:8081/v1 \ + --stop-token-ids 128009,128001 + +Start the serving the Llama-3 8B model on any of the candidate GPUs listed (L4, A10g, ...): + +.. code-block:: console + + HF_TOKEN="your-huggingface-token" sky launch serving.yaml --env HF_TOKEN + +Check the output of the command. There will be a shareable gradio link (like the last line of the following). Open it in your browser to use the LLaMA model to do the text completion. + +.. code-block:: console + + (task, pid=7431) Running on public URL: https://.gradio.live + +**Optional**: Serve the 70B model instead of the default 8B and use more GPU: + +.. code-block:: console + + HF_TOKEN="your-huggingface-token" sky launch serving.yaml --gpus A100:8 --env HF_TOKEN --env MODEL_NAME=meta-llama/Meta-Llama-3-70B-Instruct + + +Scale up to multiple replicas +----------------------------- + +SkyPilot can scale up the service to multiple service replicas with built-in autoscaling, load-balancing and fault-tolerance. You can do it by adding a services section to the YAML file. + +.. code-block:: yaml + + service: + replicas: 2 + # An actual request for readiness probe. + readiness_probe: + path: /v1/chat/completions + post_data: + model: $MODEL_NAME + messages: + - role: user + content: Hello! What is your name? + max_tokens: 1 + +.. raw:: html + +
+ Click to see the full recipe YAML + + +.. code-block:: yaml + + service: + replicas: 2 + # An actual request for readiness probe. + readiness_probe: + path: /v1/chat/completions + post_data: + model: $MODEL_NAME + messages: + - role: user + content: Hello! What is your name? + max_tokens: 1 + + resources: + accelerators: {L4, A10g, A10, L40, A40, A100, A100-80GB} # We can use cheaper accelerators for 8B model. + use_spot: True + disk_size: 512 # Ensure model checkpoints can fit. + disk_tier: best + ports: 8081 # Expose to internet traffic. + + envs: + MODEL_NAME: meta-llama/Meta-Llama-3-8B-Instruct + HF_TOKEN: # Change to your own huggingface token, or use --env to pass. + + setup: | + conda create -n vllm python=3.10 -y + conda activate vllm + + pip install vllm==0.4.0.post1 + # Install Gradio for web UI. + pip install gradio openai + pip install flash-attn==2.5.7 + + run: | + conda activate vllm + echo 'Starting vllm api server...' + python -u -m vllm.entrypoints.openai.api_server \ + --port 8081 \ + --model $MODEL_NAME \ + --trust-remote-code \ + --tensor-parallel-size $SKYPILOT_NUM_GPUS_PER_NODE \ + 2>&1 | tee api_server.log & + + echo 'Waiting for vllm api server to start...' + while ! `cat api_server.log | grep -q 'Uvicorn running on'`; do sleep 1; done + + echo 'Starting gradio server...' + git clone https://github.com/vllm-project/vllm.git || true + python vllm/examples/gradio_openai_chatbot_webserver.py \ + -m $MODEL_NAME \ + --port 8811 \ + --model-url http://localhost:8081/v1 \ + --stop-token-ids 128009,128001 + +.. raw:: html + +
+ +Start the serving the Llama-3 8B model on multiple replicas: + +.. code-block:: console + + HF_TOKEN="your-huggingface-token" sky serve up -n vllm serving.yaml --env HF_TOKEN + + +Wait until the service is ready: + +.. code-block:: console + + watch -n10 sky serve status vllm + + +.. raw:: html + +
+ Example outputs: + +.. code-block:: console + + Services + NAME VERSION UPTIME STATUS REPLICAS ENDPOINT + vllm 1 35s READY 2/2 xx.yy.zz.100:30001 + + Service Replicas + SERVICE_NAME ID VERSION IP LAUNCHED RESOURCES STATUS REGION + vllm 1 1 xx.yy.zz.121 18 mins ago 1x GCP({'L4': 1}) READY us-east4 + vllm 2 1 xx.yy.zz.245 18 mins ago 1x GCP({'L4': 1}) READY us-east4 + +.. raw:: html + +
+ +After the service is READY, you can find a single endpoint for the service and access the service with the endpoint: + +.. code-block:: console + + ENDPOINT=$(sky serve status --endpoint 8081 vllm) + curl -L http://$ENDPOINT/v1/chat/completions \ + -H "Content-Type: application/json" \ + -d '{ + "model": "meta-llama/Meta-Llama-3-8B-Instruct", + "messages": [ + { + "role": "system", + "content": "You are a helpful assistant." + }, + { + "role": "user", + "content": "Who are you?" + } + ], + "stop_token_ids": [128009, 128001] + }' + +To enable autoscaling, you could specify additional configs in `services`: + +.. code-block:: yaml + + services: + replica_policy: + min_replicas: 0 + max_replicas: 3 + target_qps_per_replica: 2 + +This will scale the service up to when the QPS exceeds 2 for each replica. + + +**Optional**: Connect a GUI to the endpoint +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + + +It is also possible to access the Llama-3 service with a separate GUI frontend, so the user requests send to the GUI will be load-balanced across replicas. + +.. raw:: html + +
+ Click to see the full GUI YAML + +.. code-block:: yaml + + envs: + MODEL_NAME: meta-llama/Meta-Llama-3-70B-Instruct + ENDPOINT: x.x.x.x:3031 # Address of the API server running vllm. + + resources: + cpus: 2 + + setup: | + conda activate vllm + if [ $? -ne 0 ]; then + conda create -n vllm python=3.10 -y + conda activate vllm + fi + + # Install Gradio for web UI. + pip install gradio openai + + run: | + conda activate vllm + export PATH=$PATH:/sbin + WORKER_IP=$(hostname -I | cut -d' ' -f1) + CONTROLLER_PORT=21001 + WORKER_PORT=21002 + + echo 'Starting gradio server...' + git clone https://github.com/vllm-project/vllm.git || true + python vllm/examples/gradio_openai_chatbot_webserver.py \ + -m $MODEL_NAME \ + --port 8811 \ + --model-url http://$ENDPOINT/v1 \ + --stop-token-ids 128009,128001 | tee ~/gradio.log + +.. raw:: html + +
+ +1. Start the chat web UI: + +.. code-block:: console + + sky launch -c gui ./gui.yaml --env ENDPOINT=$(sky serve status --endpoint vllm) + + +2. Then, we can access the GUI at the returned gradio link: + +.. code-block:: console + + | INFO | stdout | Running on public URL: https://6141e84201ce0bb4ed.gradio.live + + diff --git a/docs/source/serving/serving_with_langchain.rst b/docs/source/serving/serving_with_langchain.rst new file mode 100644 index 0000000..6440c8a --- /dev/null +++ b/docs/source/serving/serving_with_langchain.rst @@ -0,0 +1,31 @@ +.. _run_on_langchain: + +Serving with Langchain +============================ + +vLLM is also available via `Langchain `_ . + +To install langchain, run + +.. code-block:: console + + $ pip install langchain langchain_community -q + +To run inference on a single or multiple GPUs, use ``VLLM`` class from ``langchain``. + +.. code-block:: python + + from langchain_community.llms import VLLM + + llm = VLLM(model="mosaicml/mpt-7b", + trust_remote_code=True, # mandatory for hf models + max_new_tokens=128, + top_k=10, + top_p=0.95, + temperature=0.8, + # tensor_parallel_size=... # for distributed inference + ) + + print(llm("What is the capital of France ?")) + +Please refer to this `Tutorial `_ for more details. diff --git a/docs/source/serving/usage_stats.md b/docs/source/serving/usage_stats.md new file mode 100644 index 0000000..a1e4b1c --- /dev/null +++ b/docs/source/serving/usage_stats.md @@ -0,0 +1,57 @@ +# Usage Stats Collection + +vLLM collects anonymous usage data by default to help the engineering team better understand which hardware and model configurations are widely used. This data allows them to prioritize their efforts on the most common workloads. The collected data is transparent, does not contain any sensitive information, and will be publicly released for the community's benefit. + +## What data is collected? + +You can see the up to date list of data collected by vLLM in the [usage_lib.py](https://github.com/vllm-project/vllm/blob/main/vllm/usage/usage_lib.py). + +Here is an example as of v0.4.0: + +```json +{ + "uuid": "fbe880e9-084d-4cab-a395-8984c50f1109", + "provider": "GCP", + "num_cpu": 24, + "cpu_type": "Intel(R) Xeon(R) CPU @ 2.20GHz", + "cpu_family_model_stepping": "6,85,7", + "total_memory": 101261135872, + "architecture": "x86_64", + "platform": "Linux-5.10.0-28-cloud-amd64-x86_64-with-glibc2.31", + "gpu_count": 2, + "gpu_type": "NVIDIA L4", + "gpu_memory_per_device": 23580639232, + "model_architecture": "OPTForCausalLM", + "vllm_version": "0.3.2+cu123", + "context": "LLM_CLASS", + "log_time": 1711663373492490000, + "source": "production", + "dtype": "torch.float16", + "tensor_parallel_size": 1, + "block_size": 16, + "gpu_memory_utilization": 0.9, + "quantization": null, + "kv_cache_dtype": "auto", + "enable_lora": false, + "enable_prefix_caching": false, + "enforce_eager": false, + "disable_custom_all_reduce": true +} +``` + +You can preview the collected data by running the following command: + +```bash +tail ~/.config/vllm/usage_stats.json +``` + +## Opt-out of Usage Stats Collection + +You can opt-out of usage stats collection by setting the VLLM_NO_USAGE_STATS or DO_NOT_TRACK environment variable, or by creating a ~/.config/vllm/do_not_track file: + +```bash +# Any of the following methods can disable usage stats collection +export VLLM_NO_USAGE_STATS=1 +export DO_NOT_TRACK=1 +mkdir -p ~/.config/vllm && touch ~/.config/vllm/do_not_track +``` diff --git a/examples/api_client.py b/examples/api_client.py new file mode 100644 index 0000000..70ec8c5 --- /dev/null +++ b/examples/api_client.py @@ -0,0 +1,77 @@ +"""Example Python client for vllm.entrypoints.api_server""" + +import argparse +import json +from typing import Iterable, List + +import requests + + +def clear_line(n: int = 1) -> None: + LINE_UP = '\033[1A' + LINE_CLEAR = '\x1b[2K' + for _ in range(n): + print(LINE_UP, end=LINE_CLEAR, flush=True) + + +def post_http_request(prompt: str, + api_url: str, + n: int = 1, + stream: bool = False) -> requests.Response: + headers = {"User-Agent": "Test Client"} + pload = { + "prompt": prompt, + "n": n, + "use_beam_search": True, + "temperature": 0.0, + "max_tokens": 16, + "stream": stream, + } + response = requests.post(api_url, headers=headers, json=pload, stream=True) + return response + + +def get_streaming_response(response: requests.Response) -> Iterable[List[str]]: + for chunk in response.iter_lines(chunk_size=8192, + decode_unicode=False, + delimiter=b"\0"): + if chunk: + data = json.loads(chunk.decode("utf-8")) + output = data["text"] + yield output + + +def get_response(response: requests.Response) -> List[str]: + data = json.loads(response.content) + output = data["text"] + return output + + +if __name__ == "__main__": + parser = argparse.ArgumentParser() + parser.add_argument("--host", type=str, default="localhost") + parser.add_argument("--port", type=int, default=8000) + parser.add_argument("--n", type=int, default=4) + parser.add_argument("--prompt", type=str, default="San Francisco is a") + parser.add_argument("--stream", action="store_true") + args = parser.parse_args() + prompt = args.prompt + api_url = f"http://{args.host}:{args.port}/generate" + n = args.n + stream = args.stream + + print(f"Prompt: {prompt!r}\n", flush=True) + response = post_http_request(prompt, api_url, n, stream) + + if stream: + num_printed_lines = 0 + for h in get_streaming_response(response): + clear_line(num_printed_lines) + num_printed_lines = 0 + for i, line in enumerate(h): + num_printed_lines += 1 + print(f"Beam candidate {i}: {line!r}", flush=True) + else: + output = get_response(response) + for i, line in enumerate(output): + print(f"Beam candidate {i}: {line!r}", flush=True) diff --git a/examples/aqlm_example.py b/examples/aqlm_example.py new file mode 100644 index 0000000..e7c17fa --- /dev/null +++ b/examples/aqlm_example.py @@ -0,0 +1,46 @@ +import argparse + +from vllm import LLM, SamplingParams + + +def main(): + + parser = argparse.ArgumentParser(description='AQLM examples') + + parser.add_argument('--model', + '-m', + type=str, + default=None, + help='model path, as for HF') + parser.add_argument('--choice', + '-c', + type=int, + default=0, + help='known good models by index, [0-4]') + parser.add_argument('--tensor_parallel_size', + '-t', + type=int, + default=1, + help='tensor parallel size') + + args = parser.parse_args() + + models = [ + "ISTA-DASLab/Llama-2-7b-AQLM-2Bit-1x16-hf", + "ISTA-DASLab/Llama-2-7b-AQLM-2Bit-2x8-hf", + "ISTA-DASLab/Llama-2-13b-AQLM-2Bit-1x16-hf", + "ISTA-DASLab/Mixtral-8x7b-AQLM-2Bit-1x16-hf", + "BlackSamorez/TinyLlama-1_1B-Chat-v1_0-AQLM-2Bit-1x16-hf", + ] + + model = LLM(args.model if args.model is not None else models[args.choice], + tensor_parallel_size=args.tensor_parallel_size) + + sampling_params = SamplingParams(max_tokens=100, temperature=0) + outputs = model.generate("Hello my name is", + sampling_params=sampling_params) + print(outputs[0].outputs[0].text) + + +if __name__ == '__main__': + main() diff --git a/examples/fp8/README.md b/examples/fp8/README.md new file mode 100644 index 0000000..84ad76c --- /dev/null +++ b/examples/fp8/README.md @@ -0,0 +1,96 @@ +# FP8 KV Cache + +This utility extracts the KV cache scaling factors from a quantized HF (Hugging Face) model. The extracted scaling factors are saved to a JSON file, which can later be used by vLLM (variable-length language model) during runtime. This tool is particularly useful when the KV cache data type is FP8 and is intended for use on ROCm (AMD GPU) platforms. + +## Prerequisites + +- Python 3.x +- PyTorch +- NumPy +- Hugging Face Transformers +- Hugging Face Hub +- AMMO + +Before incorporating the FP8 datatype for inference workloads, you must adhere to the following steps: +1. Install all necessary prerequisites and dependencies. +2. Convert HF model into a quantized HF model. +3. Extract KV Cache Scaling Factors from quantized HF model. +4. Load KV Cache Scaling Factors into VLLM. + +### 2. Convert HF model into a quantized HF model. +Note: The following steps are adapted from the [TensorRT-LLM repository](https://github.com/NVIDIA/TensorRT-LLM/blob/main/examples/quantization/README.md). + +`quantize.py` (examples/fp8/quantizer/quantize.py) uses the quantization toolkit (AMMO) to calibrate the PyTorch models and export TensorRT-LLM checkpoints. Each TensorRT-LLM checkpoint contains a config file (in .json format) and one or several rank weight files (in .safetensors format). + +The detailed quantization toolkit (AMMO) conversion guide for FP8 can be found at `examples/fp8/quantizer/README.md`. + +### 3. Extract KV Cache Scaling Factors from quantized HF model. +`extract_scales.py` (examples/fp8/extract_scales.py) can be utilized to extract the KV cache scaling factors from your quantized HF model, however at the moment, this tool exclusively supports Llama 2 models. It is also important to note the following: +1. **File Structure**: The utility operates under the assumption that all parameters, including KV cache scaling factors, corresponding to a particular Tensor Parallelism (TP) rank are stored in a single file. These files must adhere to a specific naming convention where the TP rank is immediately identified after a specific keyword (e.g., "rank") in the filename. + +2. **TP Decomposition**: The utility assumes consistency between the TP decomposition employed by the quantizer tool and that used by vLLM. + +3. **AMMO Compatibility**: Currently, the generated KV cache scaling factors for AMMO remain uniform across all TP ranks. + +```python +# prerequisites: +# - Quantized HF LLaMa 2 model +python3 examples/fp8/extract_scales.py --help +Usage: extract_scales.py [-h] --quantized_model QUANTIZED_MODEL [--load_format {auto,safetensors,npz,pt}] [--output_dir OUTPUT_DIR] [--output_name OUTPUT_NAME] [--tp_size TP_SIZE] + +KV Scale Extraction Example + +optional arguments: +--quantized_model: Specify either the local path to, or name of, a quantized HF model. It is expected that the quantization format is FP8_E4M3, for use on ROCm (AMD GPU). +Optional arguments: +--cache_dir: Specify a cache directory to use in the event of a HF model download. (Default: None) +--load_format: Specify the format of the model's tensor files containing the KV cache scaling factors. (Choices: auto, safetensors, npz, pt; Default: auto) +--revision: Specify the model's revision number. (Default: None) +--output_dir: Specify the output directory. By default the KV cache scaling factors will be saved in the model directory. (Default: None) +--output_name: Specify the output filename. (Default: kv_cache_scales.json) +--tp_size: Specify the tensor-parallel (TP) size that the quantized model should correspond to. If specified, during KV cache scaling factor extraction the observed TP size will be checked against this and an error will be raised if there is a mismatch. (Default: None) +``` +```python +Example: +python3 examples/fp8/extract_scales.py --quantized_model --tp_size --output_dir +``` +### 4. Load KV Cache Scaling Factors into VLLM. +This script evaluates the inference throughput of language models using various backends such as vLLM. It measures the time taken to process a given number of prompts and generate sequences for each prompt. The recently generated KV cache scaling factors are now integrated into the benchmarking process and allow for KV cache scaling factors to be utilized for FP8. +```python +# prerequisites: +# - LLaMa 2 kv_cache_scales.json file + +python3 benchmarks/benchmark_throughput.py --help +usage: benchmark_throughput.py [-h] [--backend {vllm,hf,mii}] [--dataset DATASET] [--input-len INPUT_LEN] [--output-len OUTPUT_LEN] [--model MODEL] + [--tokenizer TOKENIZER] [--quantization {awq,gptq,squeezellm,None}] [--tensor-parallel-size TENSOR_PARALLEL_SIZE] [--n N] + [--use-beam-search] [--num-prompts NUM_PROMPTS] [--seed SEED] [--hf-max-batch-size HF_MAX_BATCH_SIZE] [--trust-remote-code] + [--max-model-len MAX_MODEL_LEN] [--dtype {auto,half,float16,bfloat16,float,float32}] [--enforce-eager] [--kv-cache-dtype {auto,fp8}] + [--quantization-param-path KV_CACHE_quantization_param_path] + +Benchmark Throughput Example +optional arguments: + -h, --help show this help message and exit + --backend {vllm,hf,mii} + --dataset DATASET Path to the dataset. + --input-len INPUT_LEN Input prompt length for each request + --output-len OUTPUT_LEN Output length for each request. Overrides the output length from the dataset. + --model MODEL + --tokenizer TOKENIZER + --quantization {awq,gptq,squeezellm,None}, -q {awq,gptq,squeezellm,None} + --tensor-parallel-size TENSOR_PARALLEL_SIZE, -tp TENSOR_PARALLEL_SIZE + --n N Number of generated sequences per prompt. + --use-beam-search + --num-prompts NUM_PROMPTS Number of prompts to process. + --seed SEED + --hf-max-batch-size HF_MAX_BATCH_SIZE Maximum batch size for HF backend. + --trust-remote-code trust remote code from huggingface + --max-model-len MAX_MODEL_LEN Maximum length of a sequence (including prompt and output). If None, will be derived from the model. + --dtype {auto,half,float16,bfloat16,float,float32} data type for model weights and activations. The "auto" option will use FP16 precision for FP32 and FP16 models, and BF16 precision for BF16 models. + --enforce-eager enforce eager execution + --kv-cache-dtype {auto,fp8} Data type for kv cache storage. If "auto", will use model data type. FP8_E5M2 (without scaling) is only supported on cuda version greater than 11.8. On ROCm (AMD GPU), FP8_E4M3 is instead supported ```for common inference criteria. + --quantization-param-path QUANT_PARAM_JSON Path to the JSON file containing the KV cache scaling factors. This should generally be supplied, when KV cache dtype is FP8. Otherwise, KV cache scaling factors default to 1.0, which may cause accuracy issues. FP8_E5M2 (without scaling) is only supported on cuda version greater than 11.8. On ROCm (AMD GPU), FP8_E4M3 is instead supported for common inference criteria. +``` +``` +Example: +python3 benchmarks/benchmark_throughput.py --input-len --output-len -tp --kv-cache-dtype fp8 --quantization-param-path --model +```python diff --git a/examples/fp8/extract_scales.py b/examples/fp8/extract_scales.py new file mode 100644 index 0000000..1eb961a --- /dev/null +++ b/examples/fp8/extract_scales.py @@ -0,0 +1,367 @@ +import argparse +import glob +import json +import os +from typing import Any, Callable, Dict, Iterable, List, Optional, Tuple + +import numpy as np +import torch +from safetensors.torch import safe_open + +from vllm.model_executor.layers.quantization.schema import QuantParamSchema + + +# Adapted from vllm/model_executor/model_loader/weight_utils.py +# The main differences are that we add the NPZ format and simplify +# its functionality drastically for our purposes (e.g. we assume that +# the quantized model exists locally and there is no need to download it) +def _prepare_hf_weights( + quantized_model_dir: str, + load_format: str = "auto", + fall_back_to_pt: bool = True, +) -> Tuple[str, List[str], bool]: + if not os.path.isdir(quantized_model_dir): + raise FileNotFoundError( + f"The quantized model directory `{quantized_model_dir}` " + "does not exist.") + use_safetensors = False + # Some quantized models use .pt files for storing the weights. + if load_format == "auto": + allow_patterns = ["*.safetensors", "*.bin"] + elif load_format == "safetensors": + use_safetensors = True + allow_patterns = ["*.safetensors"] + elif load_format == "pt": + allow_patterns = ["*.pt"] + elif load_format == "npz": + allow_patterns = ["*.npz"] + else: + raise ValueError(f"Unknown load_format: {load_format}") + if fall_back_to_pt: + allow_patterns += ["*.pt"] + + hf_weights_files: List[str] = [] + for pattern in allow_patterns: + hf_weights_files += glob.glob( + os.path.join(quantized_model_dir, pattern)) + if len(hf_weights_files) > 0: + if pattern == "*.safetensors": + use_safetensors = True + break + + if not use_safetensors: + # Exclude files that are not needed for inference. + # https://github.com/huggingface/transformers/blob/v4.34.0/src/transformers/trainer.py#L227-L233 + blacklist = [ + "training_args.bin", + "optimizer.bin", + "optimizer.pt", + "scheduler.pt", + "scaler.pt", + ] + hf_weights_files = [ + f for f in hf_weights_files + if not any(f.endswith(x) for x in blacklist) + ] + + if len(hf_weights_files) == 0: + raise RuntimeError( + f"Cannot find any model weights with `{quantized_model_dir}`") + + return hf_weights_files, use_safetensors + + +# Adapted from vllm/model_executor/model_loader/weight_utils.py +def _hf_tensorfile_iterator(filename: str, load_format: str, + use_safetensors: bool): + if load_format == "npz": + assert not use_safetensors + with np.load(filename) as data: + for name in data.files: + param = torch.from_numpy(data[name]) + yield name, param + elif use_safetensors: + with safe_open(filename, framework="pt") as f: + for name in f.keys(): # NOQA: SIM118 + param = f.get_tensor(name) + yield name, param + else: + state = torch.load(filename, map_location="cpu") + for name, param in state.items(): + yield name, param + del state + torch.cuda.empty_cache() + + +def _kv_scales_extractor( + hf_tensor_files: Iterable[str], + use_safetensors: bool, + rank_keyword: str = "rank", + expected_tp_size: Optional[int] = None) -> Dict[int, Dict[int, float]]: + """ + Given a list of files containing tensor data, attempt to extract KV cache + scales from these files. Intended as a helper function taking in the output + from _prepare_hf_weights. + Args: + rank_keyword Matches the number immediately after this keyword in the + tensor filename to determine the TP rank corresponding + to said tensor file + expected_tp_size If specified, the TP size of the tensor files is checked + against this and an error is raised if they don't match. + Returns a dictionary mapping TP ranks to their relevant KV cache scales. + The per-rank scales are themselves represented as a dictionary of layer + indices to the respective per-layer scale. + """ + for char in rank_keyword: + assert not char.isdecimal( + ), f"Rank keyword {rank_keyword} contains a numeric character!" + rank_scales_map = {} + for tensor_file in hf_tensor_files: + try: + rank_idx = tensor_file.find(rank_keyword) + if rank_idx != -1: + start_idx = rank_idx + len(rank_keyword) + stop_idx = start_idx + while stop_idx < len( + tensor_file) and tensor_file[stop_idx].isdecimal(): + stop_idx += 1 + if stop_idx == start_idx: + raise RuntimeError("Did not find rank # in filename.") + rank = int(tensor_file[start_idx:stop_idx]) + elif len(hf_tensor_files) == 1: + # Since there is only one tensor file, we can assume + # that it's intended for TP rank 0 + rank = 0 + else: + raise RuntimeError( + f"Filename does not contain '{rank_keyword}'.") + except RuntimeError: + print("Unable to determine TP rank " + f"corresponding to file '{tensor_file}'") + raise + + if rank not in rank_scales_map: + layer_scales_map = {} + rank_scales_map[rank] = layer_scales_map + else: + raise RuntimeError( + f"Tensor file '{tensor_file}' shares TP rank {rank} " + "with another tensor file.") + + module_delimiter = ":" if args.load_format == "npz" else "." + for name, param in _hf_tensorfile_iterator(tensor_file, + args.load_format, + use_safetensors): + if "kv_cache_scaling_factor" in name: + nums = [ + int(s) for s in name.split(module_delimiter) + if s.isdecimal() + ] + assert len( + nums) == 1, f"Could not determine layer idx for {name}" + layer_idx = nums[0] + assert layer_idx not in layer_scales_map, f"Duplicate scaling"\ + f" factor corresponding to layer {layer_idx}" + try: + layer_scales_map[layer_idx] = param.item() + except RuntimeError: + print( + "This utility supports only per-tensor scalar scales " + f"for now. The tensor\n {name} = {param} \nis an " + "invalid scale factor.") + raise + + if all( + len(layer_scales_map) == 0 + for layer_scales_map in rank_scales_map.values()): + # Note: this is true even if the rank_scales_map is empty + print("WARNING: No KV cache scale factors found. No output saved.") + return None + empirical_tp_world_size = max(rank_scales_map.keys()) + 1 + if expected_tp_size is not None: + assert expected_tp_size == empirical_tp_world_size, \ + f"User expected TP world size = {expected_tp_size} " \ + "from model but tool is expecting TP world size = " \ + f"{empirical_tp_world_size} from model instead." + for i in range(empirical_tp_world_size): + assert i in rank_scales_map, "Expected TP world size = "\ + f"{empirical_tp_world_size} but did not find KV " \ + f"cache scaling factors for TP rank {i}" + print(f"Found TP world size = {empirical_tp_world_size} " + "when extracting KV cache scales!") + return rank_scales_map + + +def _metadata_extractor(quantized_model_dir: str, + metadata_extract_fns: \ + Dict[str, Callable[[Dict[str, Any]], Any]]) \ + -> Dict[str, Any]: + """ + Given a directory containing quantized model files, this function + aims to extract metadata from the JSON files within this directory. + Each JSON file is expected to represent a dictionary in JSON + format (referred to as a "JSON-dictionary"). Metadata extraction is + defined by a dictionary called metadata_extract_fns, where each + metadata field name is mapped to an extraction function. + + These extraction functions are designed to take a JSON-dictionary + as their only argument and return the corresponding metadata. + While extraction functions are permitted to raise exceptions, they + should only raise a KeyError or ValueError if the metadata field + cannot be extracted from the current JSON-dictionary, yet there's + a possibility of finding it in another JSON-dictionary. + + The function returns a dictionary that maps metadata fields to + their extracted data. The keys of this dictionary correspond exactly + to those in metadata_extract_fns. If any fields fail to be extracted, + their corresponding values are set to None, and a warning is printed. + """ + if not os.path.isdir(quantized_model_dir): + raise FileNotFoundError( + f"The quantized model directory `{quantized_model_dir}` " + "does not exist.") + metadata_files = glob.glob(os.path.join(quantized_model_dir, "*.json")) + + result = {} + for file in metadata_files: + with open(file) as f: + try: + metadata = json.load(f) + except json.JSONDecodeError: + print(f"Could not parse `{file}` as a valid metadata file," + " skipping it.") + continue + if not isinstance(metadata, dict): + print(f"The file `{file}` does not correspond to a " + "JSON-serialized dictionary, skipping it.") + continue + for metadata_name, extract_fn in metadata_extract_fns.items(): + try: + metadata_info = extract_fn(metadata) + if metadata_name not in result: + result[metadata_name] = metadata_info + elif metadata_info != result[metadata_name]: + raise RuntimeError( + "Metadata mismatch! Originally found " + f"{metadata_name} = {result[metadata_name]} but " + f"now found {metadata_name} = {metadata_info} in " + f"`{file}`") + except KeyError: + # It is possible that a given file does not contain some + # of our selected metadata as it could be located in some + # other metadata file. + # 'EFINAE': extract_fn failure is not an error. + pass + except ValueError: + # See above. + pass + + # Warn if we cannot find any of the requested metadata + for metadata_name in metadata_extract_fns: + if metadata_name not in result: + print("WARNING: Unable to find requested metadata field " + f"`{metadata_name}`, setting it to None.") + result[metadata_name] = None + + return result + + +def main(args): + metadata_extract_fns = { + "model_type": lambda json_dict: json_dict["layers"][0]["decoder_type"], + "tp_size": lambda json_dict: int(json_dict["tensor_parallel"]), + "model_dtype": lambda json_dict: json_dict["dtype"] + } + recovered_metadata = _metadata_extractor(args.quantized_model, + metadata_extract_fns) + if args.tp_size is not None: + metadata_tp_size = recovered_metadata["tp_size"] + if metadata_tp_size is not None: + assert args.tp_size == metadata_tp_size, \ + f"User expected TP world size = {args.tp_size} " \ + f"but found TP world size = {metadata_tp_size} from metadata!" + expected_tp_size = args.tp_size or recovered_metadata["tp_size"] + rank_keyword = "rank" + hf_tensor_files, use_safetensors = _prepare_hf_weights( + args.quantized_model, args.load_format) + rank_scales_map = _kv_scales_extractor(hf_tensor_files, use_safetensors, + rank_keyword, expected_tp_size) + # Postprocess: formatting to the current schema. Consider pulling it + # out into a dedicated function should it ever become more complicated. + rank_scales_map = { + rank: {k: scale[k] + for k in sorted(scale.keys())} + for rank, scale in rank_scales_map.items() + } + # TODO: Expand this with activation and weights scaling factors when + # they are used in the future + schema = QuantParamSchema( + model_type=recovered_metadata["model_type"], + kv_cache={ + "dtype": ("float8_e4m3fn" if len(rank_scales_map) > 0 else + recovered_metadata["model_dtype"]), + "scaling_factor": + rank_scales_map + }, + ) + + if args.output_dir is None: + output_file = os.path.join(args.quantized_model, args.output_name) + else: + if not os.path.isdir(args.output_dir): + os.makedirs(args.output_dir, exist_ok=True) + output_file = os.path.join(args.output_dir, args.output_name) + + with open(output_file, 'w') as f: + f.write(schema.model_dump_json(indent=4)) + print(f"Completed! KV cache scaling factors saved to {output_file}") + + +if __name__ == "__main__": + parser = argparse.ArgumentParser( + description="This simple utility extracts the " + "KV cache scaling factors from a quantized HF model " + "and saves them to a JSON file compatible with later " + "use by vLLM (pass this file to the appropriate " + "runtime typically using the argument " + "--quantization-param-path ). This is only used " + "if the KV cache dtype is FP8 and on ROCm (AMD GPU).") + parser.add_argument( + "--quantized_model", + help="Specify the directory containing a single quantized HF model. " + "It is expected that the quantization format is FP8_E4M3, for use " + "on ROCm (AMD GPU).", + required=True) + parser.add_argument( + "--load_format", + help="Optionally specify the format of the model's tensor files " + "containing the KV cache scaling factors.", + choices=["auto", "safetensors", "npz", "pt"], + default="auto") + parser.add_argument( + "--output_dir", + help="Optionally specify the output directory. By default the " + "KV cache scaling factors will be saved in the model directory, " + "however you can override this behavior here.", + default=None) + parser.add_argument( + "--output_name", + help="Optionally specify the output filename.", + # TODO: Change this once additional scaling factors are enabled + default="kv_cache_scales.json") + parser.add_argument( + "--tp_size", + help="Optionally specify the tensor-parallel (TP) size that the " + "quantized model should correspond to. If specified, during KV " + "cache scaling factor extraction the observed TP size will be " + "checked against this and an error will be raised if there is " + "a mismatch. If not specified, the quantized model's expected " + "TP size is instead inferred from the largest TP rank observed. " + "The expected TP size is cross-checked against the TP ranks " + "observed in the quantized model and an error is raised if any " + "discrepancies are found.", + default=None, + type=int) + args = parser.parse_args() + + main(args) diff --git a/examples/fp8/quantizer/README.md b/examples/fp8/quantizer/README.md new file mode 100644 index 0000000..8f89a74 --- /dev/null +++ b/examples/fp8/quantizer/README.md @@ -0,0 +1,32 @@ +### Quantizer Utilities +`quantize.py`: NVIDIA Quantization utilities using AMMO, ported from TensorRT-LLM: +`https://github.com/NVIDIA/TensorRT-LLM/blob/main/examples/quantization/quantize.py` + +### Prerequisite + +#### AMMO (AlgorithMic Model Optimization) Installation: nvidia-ammo 0.7.1 or later +`pip install --no-cache-dir --extra-index-url https://pypi.nvidia.com nvidia-ammo` + +#### AMMO Download (code and docs) +`https://developer.nvidia.com/downloads/assets/cuda/files/nvidia-ammo/nvidia_ammo-0.5.0.tar.gz` +`https://developer.nvidia.com/downloads/assets/cuda/files/nvidia-ammo/nvidia_ammo-0.7.1.tar.gz` + +### Usage + +#### Run on H100 system for speed if FP8; number of GPUs depends on the model size + +#### Example: quantize Llama2-7b model from HF to FP8 with FP8 KV Cache: +`python quantize.py --model_dir ./ll2-7b --dtype float16 --qformat fp8 --kv_cache_dtype fp8 --output_dir ./ll2_7b_fp8 --calib_size 512 --tp_size 1` + +Outputs: model structure, quantized model & parameters (with scaling factors) are in JSON and Safetensors (npz is generated only for the reference) +``` +# ll ./ll2_7b_fp8/ +total 19998244 +drwxr-xr-x 2 root root 4096 Feb 7 01:08 ./ +drwxrwxr-x 8 1060 1061 4096 Feb 7 01:08 ../ +-rw-r--r-- 1 root root 176411 Feb 7 01:08 llama_tp1.json +-rw-r--r-- 1 root root 13477087480 Feb 7 01:09 llama_tp1_rank0.npz +-rw-r--r-- 1 root root 7000893272 Feb 7 01:08 rank0.safetensors +# +``` + diff --git a/examples/fp8/quantizer/quantize.py b/examples/fp8/quantizer/quantize.py new file mode 100644 index 0000000..92ff4e4 --- /dev/null +++ b/examples/fp8/quantizer/quantize.py @@ -0,0 +1,368 @@ +# Copyright (c) 2024 - 2024 Moore Threads Technology Co., Ltd("Moore Threads"). All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # noqa: E501 +# SPDX-License-Identifier: Apache-2.0 +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +Adapted from examples/quantization/hf_ptq.py +""" + +import argparse +import copy +import json +import random +import time + +import ammo.torch.quantization as atq +import numpy as np +import torch +from ammo.torch.export import export_model_config +from datasets import load_dataset +from torch.utils.data import DataLoader +from transformers import AutoModelForCausalLM, AutoTokenizer + +RAND_SEED = 1234 +MAX_SEQ_LEN = 2048 + +EMPTY_CFG = { + "quant_cfg": { + "*weight_quantizer": { + "enable": False, + }, + "*input_quantizer": { + "enable": False + }, + "*lm_head*": { + "enable": False + }, + "*output_layer*": { + "enable": False + }, + "default": { + "enable": False + }, + }, + "algorithm": "max", +} + +KV_CACHE_CFG = { + "*.query_key_value.output_quantizer": { + "num_bits": 8, + "axis": None, + "enable": True + }, + "*.Wqkv.output_quantizer": { + "num_bits": 8, + "axis": None, + "enable": True + }, + "*.W_pack.output_quantizer": { + "num_bits": 8, + "axis": None, + "enable": True + }, + "*.c_attn.output_quantizer": { + "num_bits": 8, + "axis": None, + "enable": True + }, + "*.k_proj.output_quantizer": { + "num_bits": 8, + "axis": None, + "enable": True + }, + "*.v_proj.output_quantizer": { + "num_bits": 8, + "axis": None, + "enable": True + }, +} + +QUANT_CFG_CHOICES = { + "int8_sq": atq.INT8_SMOOTHQUANT_CFG, + "fp8": atq.FP8_DEFAULT_CFG, + "int4_awq": atq.INT4_AWQ_CFG, + "w4a8_awq": atq.W4A8_AWQ_BETA_CFG, + "int8_wo": EMPTY_CFG, + "int4_wo": EMPTY_CFG, + "full_prec": EMPTY_CFG, +} + +MODEL_NAME_PATTERN_MAP = { + "GPT2": "gpt2", + "Xverse": "llama", + "Llama": "llama", + "Mistral": "llama", + "GPTJ": "gptj", + "FalconForCausalLM": "falcon", + "RWForCausalLM": "falcon", + "baichuan": "baichuan", + "MPT": "mpt", + "Bloom": "bloom", + "ChatGLM": "chatglm", + "QWen": "qwen", +} + + +def get_tokenizer(ckpt_path, max_seq_len=MAX_SEQ_LEN, model_type=None): + print(f"Initializing tokenizer from {ckpt_path}") + tokenizer = AutoTokenizer.from_pretrained( + ckpt_path, + model_max_length=max_seq_len, + padding_side="left", + trust_remote_code=True, + ) + if model_type and model_type == "qwen": + # qwen use token id 151643 as pad and eos tokens + tokenizer.pad_token = tokenizer.convert_ids_to_tokens(151643) + tokenizer.eos_token = tokenizer.convert_ids_to_tokens(151643) + + # can't set attribute 'pad_token' for "" + if tokenizer.pad_token != "": + tokenizer.pad_token = tokenizer.eos_token + if tokenizer.pad_token is None: + tokenizer.pad_token = tokenizer.eos_token + assert (tokenizer.pad_token + is not None), f"Pad token for {model_type} cannot be set!" + + return tokenizer + + +def get_model(ckpt_path, dtype="fp16", device="cuda"): + print(f"Initializing model from {ckpt_path}") + if dtype == "bf16" or dtype == "bfloat16": + dtype = torch.bfloat16 + elif dtype == "fp16" or dtype == "float16": + dtype = torch.float16 + elif dtype == "fp32" or dtype == "float32": + dtype = torch.float32 + else: + raise NotImplementedError(f"Unknown dtype {dtype}") + + # model_kwargs = {"torch_dtype": dtype} + model_kwargs = {"torch_dtype": "auto"} + + model = AutoModelForCausalLM.from_pretrained(ckpt_path, + device_map="auto", + **model_kwargs, + trust_remote_code=True) + model.eval() + + model_dtype = next(model.parameters()).dtype + if dtype != model_dtype: + print("[TensorRT-LLM][WARNING] The manually set model data type is " + f"{dtype}, but the data type of the HuggingFace model is " + f"{model_dtype}.") + + return model + + +def get_model_type(model): + for k, v in MODEL_NAME_PATTERN_MAP.items(): + if k.lower() in type(model).__name__.lower(): + return v + return None + + +def get_calib_dataloader(data="cnn_dailymail", + tokenizer=None, + batch_size=1, + calib_size=512, + block_size=512, + device=None): + print("Loading calibration dataset") + if data == "pileval": + dataset = load_dataset( + "json", + data_files="https://the-eye.eu/public/AI/pile/val.jsonl.zst", + split="train") + dataset = dataset["text"][:calib_size] + elif data == "cnn_dailymail": + dataset = load_dataset("cnn_dailymail", name="3.0.0", split="train") + dataset = dataset["article"][:calib_size] + else: + raise NotImplementedError + + batch_encoded = tokenizer.batch_encode_plus(dataset, + return_tensors="pt", + padding="max_length", + truncation=True, + max_length=block_size) + if device: + batch_encoded = batch_encoded.to(device) + batch_encoded = batch_encoded["input_ids"] + + calib_dataloader = DataLoader(batch_encoded, + batch_size=batch_size, + shuffle=False) + + return calib_dataloader + + +def quantize_model(model, quant_cfg, calib_dataloader=None): + + def calibrate_loop(): + if calib_dataloader is None: + return + """Adjusts weights and scaling factors based on selected algorithms.""" + for idx, data in enumerate(calib_dataloader): + print(f"Calibrating batch {idx}") + model(data) + + print("Starting quantization...") + start_time = time.time() + atq.quantize(model, quant_cfg, forward_loop=calibrate_loop) + end_time = time.time() + print("Quantization done. Total time used: {:.2f} s.".format(end_time - + start_time)) + + return model + + +def main(args): + if not torch.cuda.is_available(): + raise EnvironmentError("GPU is required for inference.") + + random.seed(RAND_SEED) + np.random.seed(RAND_SEED) + + model = get_model(args.model_dir, args.dtype, args.device) + model_type = get_model_type(model) + tokenizer = get_tokenizer(args.model_dir, model_type=model_type) + + if args.qformat in ["full_prec", "int8_wo", "int4_wo" + ] and args.kv_cache_dtype is None: + print(f"No quantization applied, export {args.dtype} model") + else: + if "awq" in args.qformat: + if args.calib_size > 32: + print("AWQ calibration could take longer with calib_size = " + f"{args.calib_size}, Using calib_size=32 instead") + args.calib_size = 32 + print("\nAWQ calibration could take longer than other calibration " + "methods. Please increase the batch size to speed up the " + "calibration process. Batch size can be set by adding the " + "argument --batch_size to the command line.\n") + + calib_dataloader = get_calib_dataloader( + tokenizer=tokenizer, + batch_size=args.batch_size, + calib_size=args.calib_size, + device=args.device, + ) + + if args.qformat in QUANT_CFG_CHOICES: + quant_cfg = QUANT_CFG_CHOICES[args.qformat] + else: + raise ValueError( + f"Unsupported quantization format: {args.qformat}") + + if "awq" in args.qformat: + quant_cfg = copy.deepcopy(QUANT_CFG_CHOICES[args.qformat]) + weight_quantizer = quant_cfg["quant_cfg"][ + "*weight_quantizer"] # type: ignore + if isinstance(weight_quantizer, list): + weight_quantizer = weight_quantizer[0] + weight_quantizer["block_sizes"][-1] = args.awq_block_size + + if args.kv_cache_dtype is not None: + if args.kv_cache_dtype == "fp8": + for value in KV_CACHE_CFG.values(): + value.update({"num_bits": (4, 3)}) # type: ignore + quant_cfg["quant_cfg"].update(KV_CACHE_CFG) # type: ignore + + print(quant_cfg) + + model = quantize_model(model, quant_cfg, calib_dataloader) + + with torch.inference_mode(): + if model_type is None: + print(f"Unknown model type {type(model).__name__}. Continue " + "exporting...") + model_type = f"unknown:{type(model).__name__}" + + export_path = args.output_dir + start_time = time.time() + + if args.qformat == "int4_awq" and model_type == "qwen": + torch.save(model.state_dict(), export_path) + else: + export_npz = (model_type not in [ + 'gptj', 'falcon', 'chatglm', 'mpt', 'llama', 'baichuan' + ]) + + # export safetensors + export_model_config( + model, + model_type, + getattr(torch, args.dtype), + export_dir=export_path, + inference_tensor_parallel=args.tp_size, + inference_pipeline_parallel=args.pp_size, + # export_tensorrt_llm_config=(not export_npz), + export_tensorrt_llm_config=False, + export_npz=export_npz) + + # Workaround for wo quantization + if args.qformat in ["int8_wo", "int4_wo", "full_prec"]: + with open(f"{export_path}/config.json", 'r') as f: + tensorrt_llm_config = json.load(f) + if args.qformat == "int8_wo": + tensorrt_llm_config["quantization"]["quant_algo"] = 'W8A16' + elif args.qformat == "int4_wo": + tensorrt_llm_config["quantization"]["quant_algo"] = 'W4A16' + else: + tensorrt_llm_config["quantization"]["quant_algo"] = None + with open(f"{export_path}/config.json", "w") as f: + json.dump(tensorrt_llm_config, f, indent=4) + + end_time = time.time() + print("Quantized model exported to {} \nTotal time used {:.2f} s.". + format(export_path, end_time - start_time)) + + +if __name__ == "__main__": + parser = argparse.ArgumentParser(description=__doc__) + parser.add_argument("--model_dir", + help="Specify where the HuggingFace model is", + required=True) + parser.add_argument("--device", default="cuda") + parser.add_argument("--dtype", help="Model data type.", default="float16") + parser.add_argument( + "--qformat", + help="Quantization format.", + default="full_prec", + choices=[ + "fp8", "int8_sq", "int4_awq", "w4a8_awq", "int8_wo", "int4_wo", + "full_prec" + ], + ) + parser.add_argument("--batch_size", + help="Batch size for calibration.", + type=int, + default=1) + parser.add_argument("--calib_size", + help="Number of samples for calibration.", + type=int, + default=512) + parser.add_argument("--output_dir", default="exported_model") + parser.add_argument("--tp_size", type=int, default=1) + parser.add_argument("--pp_size", type=int, default=1) + parser.add_argument("--awq_block_size", type=int, default=128) + parser.add_argument("--kv_cache_dtype", + help="KV Cache dtype.", + default=None, + choices=["int8", "fp8", None]) + args = parser.parse_args() + + main(args) diff --git a/examples/gradio_openai_chatbot_webserver.py b/examples/gradio_openai_chatbot_webserver.py new file mode 100644 index 0000000..8ceb8f6 --- /dev/null +++ b/examples/gradio_openai_chatbot_webserver.py @@ -0,0 +1,82 @@ +import argparse + +import gradio as gr +from openai import OpenAI + +# Argument parser setup +parser = argparse.ArgumentParser( + description='Chatbot Interface with Customizable Parameters') +parser.add_argument('--model-url', + type=str, + default='http://localhost:8000/v1', + help='Model URL') +parser.add_argument('-m', + '--model', + type=str, + required=True, + help='Model name for the chatbot') +parser.add_argument('--temp', + type=float, + default=0.8, + help='Temperature for text generation') +parser.add_argument('--stop-token-ids', + type=str, + default='', + help='Comma-separated stop token IDs') +parser.add_argument("--host", type=str, default=None) +parser.add_argument("--port", type=int, default=8001) + +# Parse the arguments +args = parser.parse_args() + +# Set OpenAI's API key and API base to use vLLM's API server. +openai_api_key = "EMPTY" +openai_api_base = args.model_url + +# Create an OpenAI client to interact with the API server +client = OpenAI( + api_key=openai_api_key, + base_url=openai_api_base, +) + + +def predict(message, history): + # Convert chat history to OpenAI format + history_openai_format = [{ + "role": "system", + "content": "You are a great ai assistant." + }] + for human, assistant in history: + history_openai_format.append({"role": "user", "content": human}) + history_openai_format.append({ + "role": "assistant", + "content": assistant + }) + history_openai_format.append({"role": "user", "content": message}) + + # Create a chat completion request and send it to the API server + stream = client.chat.completions.create( + model=args.model, # Model name to use + messages=history_openai_format, # Chat history + temperature=args.temp, # Temperature for text generation + stream=True, # Stream response + extra_body={ + 'repetition_penalty': + 1, + 'stop_token_ids': [ + int(id.strip()) for id in args.stop_token_ids.split(',') + if id.strip() + ] if args.stop_token_ids else [] + }) + + # Read and return generated text from response stream + partial_message = "" + for chunk in stream: + partial_message += (chunk.choices[0].delta.content or "") + yield partial_message + + +# Create and launch a chat interface with Gradio +gr.ChatInterface(predict).queue().launch(server_name=args.host, + server_port=args.port, + share=True) diff --git a/examples/gradio_webserver.py b/examples/gradio_webserver.py new file mode 100644 index 0000000..54e9075 --- /dev/null +++ b/examples/gradio_webserver.py @@ -0,0 +1,52 @@ +import argparse +import json + +import gradio as gr +import requests + + +def http_bot(prompt): + headers = {"User-Agent": "vLLM Client"} + pload = { + "prompt": prompt, + "stream": True, + "max_tokens": 128, + } + response = requests.post(args.model_url, + headers=headers, + json=pload, + stream=True) + + for chunk in response.iter_lines(chunk_size=8192, + decode_unicode=False, + delimiter=b"\0"): + if chunk: + data = json.loads(chunk.decode("utf-8")) + output = data["text"][0] + yield output + + +def build_demo(): + with gr.Blocks() as demo: + gr.Markdown("# vLLM text completion demo\n") + inputbox = gr.Textbox(label="Input", + placeholder="Enter text and press ENTER") + outputbox = gr.Textbox(label="Output", + placeholder="Generated result from the model") + inputbox.submit(http_bot, [inputbox], [outputbox]) + return demo + + +if __name__ == "__main__": + parser = argparse.ArgumentParser() + parser.add_argument("--host", type=str, default=None) + parser.add_argument("--port", type=int, default=8001) + parser.add_argument("--model-url", + type=str, + default="http://localhost:8000/generate") + args = parser.parse_args() + + demo = build_demo() + demo.queue().launch(server_name=args.host, + server_port=args.port, + share=True) diff --git a/examples/llava_example.py b/examples/llava_example.py new file mode 100644 index 0000000..3d22b49 --- /dev/null +++ b/examples/llava_example.py @@ -0,0 +1,90 @@ +import argparse +import os +import subprocess + +import torch + +from vllm import LLM +from vllm.sequence import MultiModalData + +# The assets are located at `s3://air-example-data-2/vllm_opensource_llava/`. + + +def run_llava_pixel_values(): + llm = LLM( + model="llava-hf/llava-1.5-7b-hf", + image_input_type="pixel_values", + image_token_id=32000, + image_input_shape="1,3,336,336", + image_feature_size=576, + ) + + prompt = "" * 576 + ( + "\nUSER: What is the content of this image?\nASSISTANT:") + + # This should be provided by another online or offline component. + images = torch.load("images/stop_sign_pixel_values.pt") + + outputs = llm.generate(prompt, + multi_modal_data=MultiModalData( + type=MultiModalData.Type.IMAGE, data=images)) + for o in outputs: + generated_text = o.outputs[0].text + print(generated_text) + + +def run_llava_image_features(): + llm = LLM( + model="llava-hf/llava-1.5-7b-hf", + image_input_type="image_features", + image_token_id=32000, + image_input_shape="1,576,1024", + image_feature_size=576, + ) + + prompt = "" * 576 + ( + "\nUSER: What is the content of this image?\nASSISTANT:") + + # This should be provided by another online or offline component. + images = torch.load("images/stop_sign_image_features.pt") + + outputs = llm.generate(prompt, + multi_modal_data=MultiModalData( + type=MultiModalData.Type.IMAGE, data=images)) + for o in outputs: + generated_text = o.outputs[0].text + print(generated_text) + + +def main(args): + if args.type == "pixel_values": + run_llava_pixel_values() + else: + run_llava_image_features() + + +if __name__ == "__main__": + parser = argparse.ArgumentParser(description="Demo on Llava") + parser.add_argument("--type", + type=str, + choices=["pixel_values", "image_features"], + default="pixel_values", + help="image input type") + args = parser.parse_args() + # Download from s3 + s3_bucket_path = "s3://air-example-data-2/vllm_opensource_llava/" + local_directory = "images" + + # Make sure the local directory exists or create it + os.makedirs(local_directory, exist_ok=True) + + # Use AWS CLI to sync the directory, assume anonymous access + subprocess.check_call([ + "aws", + "s3", + "sync", + s3_bucket_path, + local_directory, + "--no-sign-request", + ]) + main(args) diff --git a/examples/llm_engine_example.py b/examples/llm_engine_example.py new file mode 100644 index 0000000..a81c4b3 --- /dev/null +++ b/examples/llm_engine_example.py @@ -0,0 +1,62 @@ +import argparse +from typing import List, Tuple + +from vllm import EngineArgs, LLMEngine, RequestOutput, SamplingParams + + +def create_test_prompts() -> List[Tuple[str, SamplingParams]]: + """Create a list of test prompts with their sampling parameters.""" + return [ + ("A robot may not injure a human being", + SamplingParams(temperature=0.0, logprobs=1, prompt_logprobs=1)), + ("To be or not to be,", + SamplingParams(temperature=0.8, top_k=5, presence_penalty=0.2)), + ("What is the meaning of life?", + SamplingParams(n=2, + best_of=5, + temperature=0.8, + top_p=0.95, + frequency_penalty=0.1)), + ("It is only with the heart that one can see rightly", + SamplingParams(n=3, best_of=3, use_beam_search=True, + temperature=0.0)), + ] + + +def process_requests(engine: LLMEngine, + test_prompts: List[Tuple[str, SamplingParams]]): + """Continuously process a list of prompts and handle the outputs.""" + request_id = 0 + + while test_prompts or engine.has_unfinished_requests(): + if test_prompts: + prompt, sampling_params = test_prompts.pop(0) + engine.add_request(str(request_id), prompt, sampling_params) + request_id += 1 + + request_outputs: List[RequestOutput] = engine.step() + + for request_output in request_outputs: + if request_output.finished: + print(request_output) + + +def initialize_engine(args: argparse.Namespace) -> LLMEngine: + """Initialize the LLMEngine from the command line arguments.""" + engine_args = EngineArgs.from_cli_args(args) + return LLMEngine.from_engine_args(engine_args) + + +def main(args: argparse.Namespace): + """Main function that sets up and runs the prompt processing.""" + engine = initialize_engine(args) + test_prompts = create_test_prompts() + process_requests(engine, test_prompts) + + +if __name__ == '__main__': + parser = argparse.ArgumentParser( + description='Demo on using the LLMEngine class directly') + parser = EngineArgs.add_cli_args(parser) + args = parser.parse_args() + main(args) diff --git a/examples/logging_configuration.md b/examples/logging_configuration.md new file mode 100644 index 0000000..75b4b31 --- /dev/null +++ b/examples/logging_configuration.md @@ -0,0 +1,178 @@ +# Logging Configuration + +vLLM leverages Python's `logging.config.dictConfig` functionality to enable +robust and flexible configuration of the various loggers used by vLLM. + +vLLM offers two environment variables that can be used to accommodate a range +of logging configurations that range from simple-and-inflexible to +more-complex-and-more-flexible. + +- No vLLM logging (simple and inflexible) + - Set `VLLM_CONFIGURE_LOGGING=0` (leaving `VLLM_LOGGING_CONFIG_PATH` unset) +- vLLM's default logging configuration (simple and inflexible) + - Leave `VLLM_CONFIGURE_LOGGING` unset or set `VLLM_CONFIGURE_LOGGING=1` +- Fine-grained custom logging configuration (more complex, more flexible) + - Leave `VLLM_CONFIGURE_LOGGING` unset or set `VLLM_CONFIGURE_LOGGING=1` and + set `VLLM_LOGGING_CONFIG_PATH=` + + +## Logging Configuration Environment Variables + +### `VLLM_CONFIGURE_LOGGING` + +`VLLM_CONFIGURE_LOGGING` controls whether or not vLLM takes any action to +configure the loggers used by vLLM. This functionality is enabled by default, +but can be disabled by setting `VLLM_CONFIGURE_LOGGING=0` when running vLLM. + +If `VLLM_CONFIGURE_LOGGING` is enabled and no value is given for +`VLLM_LOGGING_CONFIG_PATH`, vLLM will use built-in default configuration to +configure the root vLLM logger. By default, no other vLLM loggers are +configured and, as such, all vLLM loggers defer to the root vLLM logger to make +all logging decisions. + +If `VLLM_CONFIGURE_LOGGING` is disabled and a value is given for +`VLLM_LOGGING_CONFIG_PATH`, an error will occur while starting vLLM. + +### `VLLM_LOGGING_CONFIG_PATH` + +`VLLM_LOGGING_CONFIG_PATH` allows users to specify a path to a JSON file of +alternative, custom logging configuration that will be used instead of vLLM's +built-in default logging configuration. The logging configuration should be +provided in JSON format following the schema specified by Python's [logging +configuration dictionary +schema](https://docs.python.org/3/library/logging.config.html#dictionary-schema-details). + +If `VLLM_LOGGING_CONFIG_PATH` is specified, but `VLLM_CONFIGURE_LOGGING` is +disabled, an error will occur while starting vLLM. + + +## Examples + +### Example 1: Customize vLLM root logger + +For this example, we will customize the vLLM root logger to use +[`python-json-logger`](https://github.com/madzak/python-json-logger) to log to +STDOUT of the console in JSON format with a log level of `INFO`. + +To begin, first, create an appropriate JSON logging configuration file: + +**/path/to/logging_config.json:** + +```json +{ + "formatters": { + "json": { + "class": "pythonjsonlogger.jsonlogger.JsonFormatter" + } + }, + "handlers": { + "console": { + "class" : "logging.StreamHandler", + "formatter": "json", + "level": "INFO", + "stream": "ext://sys.stdout" + } + }, + "loggers": { + "vllm": { + "handlers": ["console"], + "level": "INFO", + "propagate": false + } + }, + "version": 1 +} +``` + +Next, install the `python-json-logger` package if it's not already installed: + +```bash +pip install python-json-logger +``` + +Finally, run vLLM with the `VLLM_LOGGING_CONFIG_PATH` environment variable set +to the path of the custom logging configuration JSON file: + +```bash +VLLM_LOGGING_CONFIG_PATH=/path/to/logging_config.json \ + python3 -m vllm.entrypoints.openai.api_server \ + --max-model-len 2048 \ + --model mistralai/Mistral-7B-v0.1 +``` + + +### Example 2: Silence a particular vLLM logger + +To silence a particular vLLM logger, it is necessary to provide custom logging +configuration for the target logger that configures the logger so that it won't +propagate its log messages to the root vLLM logger. + +When custom configuration is provided for any logger, it is also necessary to +provide configuration for the root vLLM logger since any custom logger +configuration overrides the built-in default logging configuration used by vLLM. + +First, create an appropriate JSON logging configuration file that includes +configuration for the root vLLM logger and for the logger you wish to silence: + +**/path/to/logging_config.json:** + +```json +{ + "formatters": { + "vllm": { + "class": "vllm.logging.NewLineFormatter", + "datefmt": "%m-%d %H:%M:%S", + "format": "%(levelname)s %(asctime)s %(filename)s:%(lineno)d] %(message)s" + } + }, + "handlers": { + "vllm": { + "class" : "logging.StreamHandler", + "formatter": "vllm", + "level": "INFO", + "stream": "ext://sys.stdout" + } + }, + "loggers": { + "vllm": { + "handlers": ["vllm"], + "level": "DEBUG", + "propagage": false + }, + "vllm.example_noisy_logger": { + "propagate": false + } + }, + "version": 1 +} +``` + +Finally, run vLLM with the `VLLM_LOGGING_CONFIG_PATH` environment variable set +to the path of the custom logging configuration JSON file: + +```bash +VLLM_LOGGING_CONFIG_PATH=/path/to/logging_config.json \ + python3 -m vllm.entrypoints.openai.api_server \ + --max-model-len 2048 \ + --model mistralai/Mistral-7B-v0.1 +``` + + +### Example 3: Disable vLLM default logging configuration + +To disable vLLM's default logging configuration and silence all vLLM loggers, +simple set `VLLM_CONFIGURE_LOGGING=0` when running vLLM. This will prevent vLLM +for configuring the root vLLM logger, which in turn, silences all other vLLM +loggers. + +```bash +VLLM_CONFIGURE_LOGGING=0 \ + python3 -m vllm.entrypoints.openai.api_server \ + --max-model-len 2048 \ + --model mistralai/Mistral-7B-v0.1 +``` + + +## Additional resources + +- [`logging.config` Dictionary Schema Details](https://docs.python.org/3/library/logging.config.html#dictionary-schema-details) diff --git a/examples/multilora_inference.py b/examples/multilora_inference.py new file mode 100644 index 0000000..6aa25b4 --- /dev/null +++ b/examples/multilora_inference.py @@ -0,0 +1,124 @@ +""" +This example shows how to use the multi-LoRA functionality +for offline inference. + +Requires HuggingFace credentials for access to Llama2. +""" + +from typing import List, Optional, Tuple + +from huggingface_hub import snapshot_download + +from vllm import EngineArgs, LLMEngine, RequestOutput, SamplingParams +from vllm.lora.request import LoRARequest + + +def create_test_prompts( + lora_path: str +) -> List[Tuple[str, SamplingParams, Optional[LoRARequest]]]: + """Create a list of test prompts with their sampling parameters. + + 2 requests for base model, 4 requests for the LoRA. We define 2 + different LoRA adapters (using the same model for demo purposes). + Since we also set `max_loras=1`, the expectation is that the requests + with the second LoRA adapter will be ran after all requests with the + first adapter have finished. + """ + return [ + ("A robot may not injure a human being", + SamplingParams(temperature=0.0, + logprobs=1, + prompt_logprobs=1, + max_tokens=128), None), + ("To be or not to be,", + SamplingParams(temperature=0.8, + top_k=5, + presence_penalty=0.2, + max_tokens=128), None), + ( + "[user] Write a SQL query to answer the question based on the table schema.\n\n context: CREATE TABLE table_name_74 (icao VARCHAR, airport VARCHAR)\n\n question: Name the ICAO for lilongwe international airport [/user] [assistant]", # noqa: E501 + SamplingParams(temperature=0.0, + logprobs=1, + prompt_logprobs=1, + max_tokens=128, + stop_token_ids=[32003]), + LoRARequest("sql-lora", 1, lora_path)), + ( + "[user] Write a SQL query to answer the question based on the table schema.\n\n context: CREATE TABLE table_name_11 (nationality VARCHAR, elector VARCHAR)\n\n question: When Anchero Pantaleone was the elector what is under nationality? [/user] [assistant]", # noqa: E501 + SamplingParams(n=3, + best_of=3, + use_beam_search=True, + temperature=0, + max_tokens=128, + stop_token_ids=[32003]), + LoRARequest("sql-lora", 1, lora_path)), + ( + "[user] Write a SQL query to answer the question based on the table schema.\n\n context: CREATE TABLE table_name_74 (icao VARCHAR, airport VARCHAR)\n\n question: Name the ICAO for lilongwe international airport [/user] [assistant]", # noqa: E501 + SamplingParams(temperature=0.0, + logprobs=1, + prompt_logprobs=1, + max_tokens=128, + stop_token_ids=[32003]), + LoRARequest("sql-lora2", 2, lora_path)), + ( + "[user] Write a SQL query to answer the question based on the table schema.\n\n context: CREATE TABLE table_name_11 (nationality VARCHAR, elector VARCHAR)\n\n question: When Anchero Pantaleone was the elector what is under nationality? [/user] [assistant]", # noqa: E501 + SamplingParams(n=3, + best_of=3, + use_beam_search=True, + temperature=0, + max_tokens=128, + stop_token_ids=[32003]), + LoRARequest("sql-lora", 1, lora_path)), + ] + + +def process_requests(engine: LLMEngine, + test_prompts: List[Tuple[str, SamplingParams, + Optional[LoRARequest]]]): + """Continuously process a list of prompts and handle the outputs.""" + request_id = 0 + + while test_prompts or engine.has_unfinished_requests(): + if test_prompts: + prompt, sampling_params, lora_request = test_prompts.pop(0) + engine.add_request(str(request_id), + prompt, + sampling_params, + lora_request=lora_request) + request_id += 1 + + request_outputs: List[RequestOutput] = engine.step() + + for request_output in request_outputs: + if request_output.finished: + print(request_output) + + +def initialize_engine() -> LLMEngine: + """Initialize the LLMEngine.""" + # max_loras: controls the number of LoRAs that can be used in the same + # batch. Larger numbers will cause higher memory usage, as each LoRA + # slot requires its own preallocated tensor. + # max_lora_rank: controls the maximum supported rank of all LoRAs. Larger + # numbers will cause higher memory usage. If you know that all LoRAs will + # use the same rank, it is recommended to set this as low as possible. + # max_cpu_loras: controls the size of the CPU LoRA cache. + engine_args = EngineArgs(model="meta-llama/Llama-2-7b-hf", + enable_lora=True, + max_loras=1, + max_lora_rank=8, + max_cpu_loras=2, + max_num_seqs=256) + return LLMEngine.from_engine_args(engine_args) + + +def main(): + """Main function that sets up and runs the prompt processing.""" + engine = initialize_engine() + lora_path = snapshot_download(repo_id="yard1/llama-2-7b-sql-lora-test") + test_prompts = create_test_prompts(lora_path) + process_requests(engine, test_prompts) + + +if __name__ == '__main__': + main() diff --git a/examples/offline_inference.py b/examples/offline_inference.py new file mode 100644 index 0000000..9b758fa --- /dev/null +++ b/examples/offline_inference.py @@ -0,0 +1,22 @@ +from vllm import LLM, SamplingParams + +# Sample prompts. +prompts = [ + "Hello, my name is", + "The president of the United States is", + "The capital of France is", + "The future of AI is", +] +# Create a sampling params object. +sampling_params = SamplingParams(temperature=0.8, top_p=0.95) + +# Create an LLM. +llm = LLM(model="facebook/opt-125m") +# Generate texts from the prompts. The output is a list of RequestOutput objects +# that contain the prompt, generated text, and other information. +outputs = llm.generate(prompts, sampling_params) +# Print the outputs. +for output in outputs: + prompt = output.prompt + generated_text = output.outputs[0].text + print(f"Prompt: {prompt!r}, Generated text: {generated_text!r}") diff --git a/examples/offline_inference_distributed.py b/examples/offline_inference_distributed.py new file mode 100644 index 0000000..e4f085f --- /dev/null +++ b/examples/offline_inference_distributed.py @@ -0,0 +1,72 @@ +""" +This example shows how to use Ray Data for running offline batch inference +distributively on a multi-nodes cluster. + +Learn more about Ray Data in https://docs.ray.io/en/latest/data/data.html +""" + +from typing import Dict + +import numpy as np +import ray + +from vllm import LLM, SamplingParams + +# Create a sampling params object. +sampling_params = SamplingParams(temperature=0.8, top_p=0.95) + + +# Create a class to do batch inference. +class LLMPredictor: + + def __init__(self): + # Create an LLM. + self.llm = LLM(model="meta-llama/Llama-2-7b-chat-hf") + + def __call__(self, batch: Dict[str, np.ndarray]) -> Dict[str, list]: + # Generate texts from the prompts. + # The output is a list of RequestOutput objects that contain the prompt, + # generated text, and other information. + outputs = self.llm.generate(batch["text"], sampling_params) + prompt = [] + generated_text = [] + for output in outputs: + prompt.append(output.prompt) + generated_text.append(' '.join([o.text for o in output.outputs])) + return { + "prompt": prompt, + "generated_text": generated_text, + } + + +# Read one text file from S3. Ray Data supports reading multiple files +# from cloud storage (such as JSONL, Parquet, CSV, binary format). +ds = ray.data.read_text("s3://anonymous@air-example-data/prompts.txt") + +# Apply batch inference for all input data. +ds = ds.map_batches( + LLMPredictor, + # Set the concurrency to the number of LLM instances. + concurrency=10, + # Specify the number of GPUs required per LLM instance. + # NOTE: Do NOT set `num_gpus` when using vLLM with tensor-parallelism + # (i.e., `tensor_parallel_size`). + num_gpus=1, + # Specify the batch size for inference. + batch_size=32, +) + +# Peek first 10 results. +# NOTE: This is for local testing and debugging. For production use case, +# one should write full result out as shown below. +outputs = ds.take(limit=10) +for output in outputs: + prompt = output["prompt"] + generated_text = output["generated_text"] + print(f"Prompt: {prompt!r}, Generated text: {generated_text!r}") + +# Write inference output data out as Parquet files to S3. +# Multiple files would be written to the output destination, +# and each task would write one or more files separately. +# +# ds.write_parquet("s3://") diff --git a/examples/offline_inference_neuron.py b/examples/offline_inference_neuron.py new file mode 100755 index 0000000..5ecbbf0 --- /dev/null +++ b/examples/offline_inference_neuron.py @@ -0,0 +1,36 @@ +from vllm import LLM, SamplingParams + +# Sample prompts. +prompts = [ + "Hello, my name is", + "The president of the United States is", + "The capital of France is", + "The future of AI is", +] +# Create a sampling params object. +sampling_params = SamplingParams(temperature=0.8, top_p=0.95) + +# Create an LLM. +llm = LLM( + model="TinyLlama/TinyLlama-1.1B-Chat-v1.0", + max_num_seqs=8, + # The max_model_len and block_size arguments are required to be same as + # max sequence length when targeting neuron device. + # Currently, this is a known limitation in continuous batching support + # in transformers-neuronx. + # TODO(liangfu): Support paged-attention in transformers-neuronx. + max_model_len=128, + block_size=128, + # The device can be automatically detected when AWS Neuron SDK is installed. + # The device argument can be either unspecified for automated detection, + # or explicitly assigned. + device="neuron", + tensor_parallel_size=2) +# Generate texts from the prompts. The output is a list of RequestOutput objects +# that contain the prompt, generated text, and other information. +outputs = llm.generate(prompts, sampling_params) +# Print the outputs. +for output in outputs: + prompt = output.prompt + generated_text = output.outputs[0].text + print(f"Prompt: {prompt!r}, Generated text: {generated_text!r}") diff --git a/examples/offline_inference_with_prefix.py b/examples/offline_inference_with_prefix.py new file mode 100644 index 0000000..7ed0563 --- /dev/null +++ b/examples/offline_inference_with_prefix.py @@ -0,0 +1,53 @@ +from vllm import LLM, SamplingParams + +prefix = ( + "You are an expert school principal, skilled in effectively managing " + "faculty and staff. Draft 10-15 questions for a potential first grade " + "Head Teacher for my K-12, all-girls', independent school that emphasizes " + "community, joyful discovery, and life-long learning. The candidate is " + "coming in for a first-round panel interview for a 8th grade Math " + "teaching role. They have 5 years of previous teaching experience " + "as an assistant teacher at a co-ed, public school with experience " + "in middle school math teaching. Based on these information, fulfill " + "the following paragraph: ") + +# Sample prompts. +prompts = [ + "Hello, my name is", + "The president of the United States is", + "The capital of France is", + "The future of AI is", +] +# Create a sampling params object. +sampling_params = SamplingParams(temperature=0.0) + +# Create an LLM. +llm = LLM(model="facebook/opt-125m", enable_prefix_caching=True) + +generating_prompts = [prefix + prompt for prompt in prompts] + +# Generate texts from the prompts. The output is a list of RequestOutput objects +# that contain the prompt, generated text, and other information. +outputs = llm.generate(generating_prompts, sampling_params) +# Print the outputs. +for output in outputs: + prompt = output.prompt + generated_text = output.outputs[0].text + print(f"Prompt: {prompt!r}, Generated text: {generated_text!r}") + +print("-" * 80) + +# The llm.generate call will batch all prompts and send the batch at once +# if resources allow. The prefix will only be cached after the first batch +# is processed, so we need to call generate once to calculate the prefix +# and cache it. +outputs = llm.generate(generating_prompts[0], sampling_params) + +# Subsequent batches can leverage the cached prefix +outputs = llm.generate(generating_prompts, sampling_params) + +# Print the outputs. You should see the same outputs as before +for output in outputs: + prompt = output.prompt + generated_text = output.outputs[0].text + print(f"Prompt: {prompt!r}, Generated text: {generated_text!r}") diff --git a/examples/openai_chat_completion_client.py b/examples/openai_chat_completion_client.py new file mode 100644 index 0000000..bbada38 --- /dev/null +++ b/examples/openai_chat_completion_client.py @@ -0,0 +1,36 @@ +from openai import OpenAI + +# Modify OpenAI's API key and API base to use vLLM's API server. +openai_api_key = "EMPTY" +openai_api_base = "http://localhost:8000/v1" + +client = OpenAI( + # defaults to os.environ.get("OPENAI_API_KEY") + api_key=openai_api_key, + base_url=openai_api_base, +) + +models = client.models.list() +model = models.data[0].id + +chat_completion = client.chat.completions.create( + messages=[{ + "role": "system", + "content": "You are a helpful assistant." + }, { + "role": "user", + "content": "Who won the world series in 2020?" + }, { + "role": + "assistant", + "content": + "The Los Angeles Dodgers won the World Series in 2020." + }, { + "role": "user", + "content": "Where was it played?" + }], + model=model, +) + +print("Chat completion results:") +print(chat_completion) diff --git a/examples/openai_completion_client.py b/examples/openai_completion_client.py new file mode 100644 index 0000000..58519f9 --- /dev/null +++ b/examples/openai_completion_client.py @@ -0,0 +1,31 @@ +from openai import OpenAI + +# Modify OpenAI's API key and API base to use vLLM's API server. +openai_api_key = "EMPTY" +openai_api_base = "http://localhost:8000/v1" + +client = OpenAI( + # defaults to os.environ.get("OPENAI_API_KEY") + api_key=openai_api_key, + base_url=openai_api_base, +) + +models = client.models.list() +model = models.data[0].id + +# Completion API +stream = False +completion = client.completions.create( + model=model, + prompt="A robot may not injure a human being", + echo=False, + n=2, + stream=stream, + logprobs=3) + +print("Completion results:") +if stream: + for c in completion: + print(c) +else: + print(completion) diff --git a/examples/production_monitoring/README.md b/examples/production_monitoring/README.md new file mode 100644 index 0000000..29b611c --- /dev/null +++ b/examples/production_monitoring/README.md @@ -0,0 +1,54 @@ +# vLLM + Prometheus/Grafana + +This is a simple example that shows you how to connect vLLM metric logging to the Prometheus/Grafana stack. For this example, we launch Prometheus and Grafana via Docker. You can checkout other methods through [Prometheus](https://prometheus.io/) and [Grafana](https://grafana.com/) websites. + +Install: +- [`docker`](https://docs.docker.com/engine/install/) +- [`docker compose`](https://docs.docker.com/compose/install/linux/#install-using-the-repository) + +### Launch + +Prometheus metric logging is enabled by default in the OpenAI-compatible server. Launch via the entrypoint: +```bash +python3 -m vllm.entrypoints.openai.api_server \ + --model mistralai/Mistral-7B-v0.1 \ + --max-model-len 2048 \ + --disable-log-requests +``` + +Launch Prometheus and Grafana servers with `docker compose`: +```bash +docker compose up +``` + +Submit some sample requests to the server: +```bash +wget https://huggingface.co/datasets/anon8231489123/ShareGPT_Vicuna_unfiltered/resolve/main/ShareGPT_V3_unfiltered_cleaned_split.json + +python3 ../../benchmarks/benchmark_serving.py \ + --model mistralai/Mistral-7B-v0.1 \ + --tokenizer mistralai/Mistral-7B-v0.1 \ + --endpoint /v1/completions \ + --dataset ShareGPT_V3_unfiltered_cleaned_split.json \ + --request-rate 3.0 +``` + +Navigating to [`http://localhost:8000/metrics`](http://localhost:8000/metrics) will show the raw Prometheus metrics being exposed by vLLM. + +### Grafana Dashboard + +Navigate to [`http://localhost:3000`](http://localhost:3000). Log in with the default username (`admin`) and password (`admin`). + +#### Add Prometheus Data Source + +Navigate to [`http://localhost:3000/connections/datasources/new`](http://localhost:3000/connections/datasources/new) and select Prometheus. + +On Prometheus configuration page, we need to add the `Prometheus Server URL` in `Connection`. For this setup, Grafana and Prometheus are running in separate containers, but Docker creates DNS name for each containers. You can just use `http://prometheus:9090`. + +Click `Save & Test`. You should get a green check saying "Successfully queried the Prometheus API.". + +#### Import Dashboard + +Navigate to [`http://localhost:3000/dashboard/import`](http://localhost:3000/dashboard/import), upload `grafana.json`, and select the `prometheus` datasource. You should see a screen that looks like the following: + +![Grafana Dashboard Image](https://i.imgur.com/R2vH9VW.png) diff --git a/examples/production_monitoring/docker-compose.yaml b/examples/production_monitoring/docker-compose.yaml new file mode 100644 index 0000000..13b987c --- /dev/null +++ b/examples/production_monitoring/docker-compose.yaml @@ -0,0 +1,19 @@ +# docker-compose.yaml +version: "3" + +services: + prometheus: + image: prom/prometheus:latest + extra_hosts: + - "host.docker.internal:host-gateway" # allow a direct connection from container to the local machine + ports: + - "9090:9090" # the default port used by Prometheus + volumes: + - ${PWD}/prometheus.yaml:/etc/prometheus/prometheus.yml # mount Prometheus config file + + grafana: + image: grafana/grafana:latest + depends_on: + - prometheus + ports: + - "3000:3000" # the default port used by Grafana diff --git a/examples/production_monitoring/grafana.json b/examples/production_monitoring/grafana.json new file mode 100644 index 0000000..5e9bd5b --- /dev/null +++ b/examples/production_monitoring/grafana.json @@ -0,0 +1,1206 @@ +{ + "annotations": { + "list": [ + { + "builtIn": 1, + "datasource": { + "type": "grafana", + "uid": "-- Grafana --" + }, + "enable": true, + "hide": true, + "iconColor": "rgba(0, 211, 255, 1)", + "name": "Annotations & Alerts", + "target": { + "limit": 100, + "matchAny": false, + "tags": [], + "type": "dashboard" + }, + "type": "dashboard" + } + ] + }, + "description": "Monitoring vLLM Inference Server", + "editable": true, + "fiscalYearStartMonth": 0, + "graphTooltip": 0, + "id": 29, + "links": [], + "liveNow": false, + "panels": [ + { + "datasource": { + "type": "prometheus", + "uid": "prometheus" + }, + "description": "End to end request latency measured in seconds.", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "s" + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 0 + }, + "id": 9, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "prometheus" + }, + "disableTextWrap": false, + "editorMode": "builder", + "expr": "histogram_quantile(0.99, sum by(le) (rate(vllm:e2e_request_latency_seconds_bucket{model_name=\"$model_name\"}[$__rate_interval])))", + "fullMetaSearch": false, + "includeNullMetadata": false, + "instant": false, + "legendFormat": "P99", + "range": true, + "refId": "A", + "useBackend": false + }, + { + "datasource": { + "type": "prometheus", + "uid": "prometheus" + }, + "disableTextWrap": false, + "editorMode": "builder", + "expr": "histogram_quantile(0.95, sum by(le) (rate(vllm:e2e_request_latency_seconds_bucket{model_name=\"$model_name\"}[$__rate_interval])))", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": false, + "instant": false, + "legendFormat": "P95", + "range": true, + "refId": "B", + "useBackend": false + }, + { + "datasource": { + "type": "prometheus", + "uid": "prometheus" + }, + "disableTextWrap": false, + "editorMode": "builder", + "expr": "histogram_quantile(0.9, sum by(le) (rate(vllm:e2e_request_latency_seconds_bucket{model_name=\"$model_name\"}[$__rate_interval])))", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": false, + "instant": false, + "legendFormat": "P90", + "range": true, + "refId": "C", + "useBackend": false + }, + { + "datasource": { + "type": "prometheus", + "uid": "prometheus" + }, + "disableTextWrap": false, + "editorMode": "builder", + "expr": "histogram_quantile(0.5, sum by(le) (rate(vllm:e2e_request_latency_seconds_bucket{model_name=\"$model_name\"}[$__rate_interval])))", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": false, + "instant": false, + "legendFormat": "P50", + "range": true, + "refId": "D", + "useBackend": false + }, + { + "datasource": { + "type": "prometheus", + "uid": "prometheus" + }, + "editorMode": "code", + "expr": "rate(vllm:e2e_request_latency_seconds_sum{model_name=\"$model_name\"}[$__rate_interval])\n/\nrate(vllm:e2e_request_latency_seconds_count{model_name=\"$model_name\"}[$__rate_interval])", + "hide": false, + "instant": false, + "legendFormat": "Average", + "range": true, + "refId": "E" + } + ], + "title": "E2E Request Latency", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "prometheus" + }, + "description": "Number of tokens processed per second", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 0 + }, + "id": 8, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "prometheus" + }, + "disableTextWrap": false, + "editorMode": "builder", + "expr": "rate(vllm:prompt_tokens_total{model_name=\"$model_name\"}[$__rate_interval])", + "fullMetaSearch": false, + "includeNullMetadata": false, + "instant": false, + "legendFormat": "Prompt Tokens/Sec", + "range": true, + "refId": "A", + "useBackend": false + }, + { + "datasource": { + "type": "prometheus", + "uid": "prometheus" + }, + "disableTextWrap": false, + "editorMode": "builder", + "expr": "rate(vllm:generation_tokens_total{model_name=\"$model_name\"}[$__rate_interval])", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": false, + "instant": false, + "legendFormat": "Generation Tokens/Sec", + "range": true, + "refId": "B", + "useBackend": false + } + ], + "title": "Token Throughput", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "prometheus" + }, + "description": "Inter token latency in seconds.", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "s" + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 8 + }, + "id": 10, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "prometheus" + }, + "disableTextWrap": false, + "editorMode": "builder", + "expr": "histogram_quantile(0.99, sum by(le) (rate(vllm:time_per_output_token_seconds_bucket{model_name=\"$model_name\"}[$__rate_interval])))", + "fullMetaSearch": false, + "includeNullMetadata": false, + "instant": false, + "legendFormat": "P99", + "range": true, + "refId": "A", + "useBackend": false + }, + { + "datasource": { + "type": "prometheus", + "uid": "prometheus" + }, + "disableTextWrap": false, + "editorMode": "builder", + "expr": "histogram_quantile(0.95, sum by(le) (rate(vllm:time_per_output_token_seconds_bucket{model_name=\"$model_name\"}[$__rate_interval])))", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": false, + "instant": false, + "legendFormat": "P95", + "range": true, + "refId": "B", + "useBackend": false + }, + { + "datasource": { + "type": "prometheus", + "uid": "prometheus" + }, + "disableTextWrap": false, + "editorMode": "builder", + "expr": "histogram_quantile(0.9, sum by(le) (rate(vllm:time_per_output_token_seconds_bucket{model_name=\"$model_name\"}[$__rate_interval])))", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": false, + "instant": false, + "legendFormat": "P90", + "range": true, + "refId": "C", + "useBackend": false + }, + { + "datasource": { + "type": "prometheus", + "uid": "prometheus" + }, + "disableTextWrap": false, + "editorMode": "builder", + "expr": "histogram_quantile(0.5, sum by(le) (rate(vllm:time_per_output_token_seconds_bucket{model_name=\"$model_name\"}[$__rate_interval])))", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": false, + "instant": false, + "legendFormat": "P50", + "range": true, + "refId": "D", + "useBackend": false + }, + { + "datasource": { + "type": "prometheus", + "uid": "prometheus" + }, + "editorMode": "code", + "expr": "rate(vllm:time_per_output_token_seconds_sum{model_name=\"$model_name\"}[$__rate_interval])\n/\nrate(vllm:time_per_output_token_seconds_count{model_name=\"$model_name\"}[$__rate_interval])", + "hide": false, + "instant": false, + "legendFormat": "Mean", + "range": true, + "refId": "E" + } + ], + "title": "Time Per Output Token Latency", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "prometheus" + }, + "description": "Number of requests in RUNNING, WAITING, and SWAPPED state", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "none" + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 8 + }, + "id": 3, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "prometheus" + }, + "disableTextWrap": false, + "editorMode": "builder", + "expr": "vllm:num_requests_running{model_name=\"$model_name\"}", + "fullMetaSearch": false, + "includeNullMetadata": true, + "instant": false, + "legendFormat": "Num Running", + "range": true, + "refId": "A", + "useBackend": false + }, + { + "datasource": { + "type": "prometheus", + "uid": "prometheus" + }, + "disableTextWrap": false, + "editorMode": "builder", + "expr": "vllm:num_requests_swapped{model_name=\"$model_name\"}", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": true, + "instant": false, + "legendFormat": "Num Swapped", + "range": true, + "refId": "B", + "useBackend": false + }, + { + "datasource": { + "type": "prometheus", + "uid": "prometheus" + }, + "disableTextWrap": false, + "editorMode": "builder", + "expr": "vllm:num_requests_waiting{model_name=\"$model_name\"}", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": true, + "instant": false, + "legendFormat": "Num Waiting", + "range": true, + "refId": "C", + "useBackend": false + } + ], + "title": "Scheduler State", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "prometheus" + }, + "description": "P50, P90, P95, and P99 TTFT latency in seconds.", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "s" + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 16 + }, + "id": 5, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "prometheus" + }, + "disableTextWrap": false, + "editorMode": "builder", + "expr": "histogram_quantile(0.99, sum by(le) (rate(vllm:time_to_first_token_seconds_bucket{model_name=\"$model_name\"}[$__rate_interval])))", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": false, + "instant": false, + "legendFormat": "P99", + "range": true, + "refId": "A", + "useBackend": false + }, + { + "datasource": { + "type": "prometheus", + "uid": "prometheus" + }, + "disableTextWrap": false, + "editorMode": "builder", + "expr": "histogram_quantile(0.95, sum by(le) (rate(vllm:time_to_first_token_seconds_bucket{model_name=\"$model_name\"}[$__rate_interval])))", + "fullMetaSearch": false, + "includeNullMetadata": false, + "instant": false, + "legendFormat": "P95", + "range": true, + "refId": "B", + "useBackend": false + }, + { + "datasource": { + "type": "prometheus", + "uid": "prometheus" + }, + "disableTextWrap": false, + "editorMode": "builder", + "expr": "histogram_quantile(0.9, sum by(le) (rate(vllm:time_to_first_token_seconds_bucket{model_name=\"$model_name\"}[$__rate_interval])))", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": false, + "instant": false, + "legendFormat": "P90", + "range": true, + "refId": "C", + "useBackend": false + }, + { + "datasource": { + "type": "prometheus", + "uid": "prometheus" + }, + "disableTextWrap": false, + "editorMode": "builder", + "expr": "histogram_quantile(0.5, sum by(le) (rate(vllm:time_to_first_token_seconds_bucket{model_name=\"$model_name\"}[$__rate_interval])))", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": false, + "instant": false, + "legendFormat": "P50", + "range": true, + "refId": "D", + "useBackend": false + }, + { + "datasource": { + "type": "prometheus", + "uid": "prometheus" + }, + "editorMode": "code", + "expr": "rate(vllm:time_to_first_token_seconds_sum{model_name=\"$model_name\"}[$__rate_interval])\n/\nrate(vllm:time_to_first_token_seconds_count{model_name=\"$model_name\"}[$__rate_interval])", + "hide": false, + "instant": false, + "legendFormat": "Average", + "range": true, + "refId": "E" + } + ], + "title": "Time To First Token Latency", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "prometheus" + }, + "description": "Percentage of used cache blocks by vLLM.", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "percentunit" + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 16 + }, + "id": 4, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "prometheus" + }, + "editorMode": "code", + "expr": "vllm:gpu_cache_usage_perc{model_name=\"$model_name\"}", + "instant": false, + "legendFormat": "GPU Cache Usage", + "range": true, + "refId": "A" + }, + { + "datasource": { + "type": "prometheus", + "uid": "prometheus" + }, + "editorMode": "code", + "expr": "vllm:cpu_cache_usage_perc{model_name=\"$model_name\"}", + "hide": false, + "instant": false, + "legendFormat": "CPU Cache Usage", + "range": true, + "refId": "B" + } + ], + "title": "Cache Utilization", + "type": "timeseries" + }, + { + "type": "heatmap", + "title": "Request Prompt Length", + "description": "Heatmap of request prompt length", + "gridPos": { + "x": 0, + "y": 24, + "w": 12, + "h": 8 + }, + "datasource": { + "uid": "prometheus", + "type": "prometheus" + }, + "id": 12, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "prometheus" + }, + "refId": "A", + "expr": "sum by(le) (increase(vllm:request_prompt_tokens_bucket{model_name=\"$model_name\"}[$__rate_interval]))", + "range": true, + "instant": false, + "editorMode": "builder", + "legendFormat": "{{le}}", + "useBackend": false, + "disableTextWrap": false, + "fullMetaSearch": false, + "includeNullMetadata": true, + "format": "heatmap" + } + ], + "options": { + "calculate": false, + "yAxis": { + "axisPlacement": "left", + "reverse": false, + "unit": "none", + "axisLabel": "Prompt Length" + }, + "rowsFrame": { + "layout": "auto", + "value": "Request count" + }, + "color": { + "mode": "scheme", + "fill": "dark-orange", + "scale": "exponential", + "exponent": 0.5, + "scheme": "Spectral", + "steps": 64, + "reverse": false, + "min": 0 + }, + "cellGap": 1, + "filterValues": { + "le": 1e-9 + }, + "tooltip": { + "show": true, + "yHistogram": true + }, + "legend": { + "show": true + }, + "exemplars": { + "color": "rgba(255,0,255,0.7)" + }, + "cellValues": { + "unit": "none" + } + }, + "fieldConfig": { + "defaults": { + "custom": { + "scaleDistribution": { + "type": "linear" + }, + "hideFrom": { + "tooltip": false, + "viz": false, + "legend": false + } + } + }, + "overrides": [] + }, + "pluginVersion": "10.2.0" + }, + { + "datasource": { + "uid": "prometheus", + "type": "prometheus" + }, + "type": "heatmap", + "title": "Request Generation Length", + "description": "Heatmap of request generation length", + "gridPos": { + "x": 12, + "y": 24, + "w": 12, + "h": 8 + }, + "id": 13, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "prometheus" + }, + "refId": "A", + "expr": "sum by(le) (increase(vllm:request_generation_tokens_bucket{model_name=\"$model_name\"}[$__rate_interval]))", + "range": true, + "instant": false, + "editorMode": "builder", + "legendFormat": "{{le}}", + "useBackend": false, + "disableTextWrap": false, + "fullMetaSearch": false, + "includeNullMetadata": true, + "format": "heatmap" + } + ], + "options": { + "calculate": false, + "yAxis": { + "axisPlacement": "left", + "reverse": false, + "unit": "none", + "axisLabel": "Generation Length" + }, + "rowsFrame": { + "layout": "auto", + "value": "Request count" + }, + "color": { + "mode": "scheme", + "fill": "dark-orange", + "scale": "exponential", + "exponent": 0.5, + "scheme": "Spectral", + "steps": 64, + "reverse": false, + "min": 0 + }, + "cellGap": 1, + "filterValues": { + "le": 1e-9 + }, + "tooltip": { + "show": true, + "yHistogram": true + }, + "legend": { + "show": true + }, + "exemplars": { + "color": "rgba(255,0,255,0.7)" + }, + "cellValues": { + "unit": "none" + } + }, + "fieldConfig": { + "defaults": { + "custom": { + "scaleDistribution": { + "type": "linear" + }, + "hideFrom": { + "tooltip": false, + "viz": false, + "legend": false + } + } + }, + "overrides": [] + }, + "pluginVersion": "10.2.0" + }, + { + "datasource": { + "type": "prometheus", + "uid": "prometheus" + }, + "fieldConfig": { + "defaults": { + "custom": { + "drawStyle": "line", + "lineInterpolation": "linear", + "barAlignment": 0, + "lineWidth": 1, + "fillOpacity": 0, + "gradientMode": "none", + "spanNulls": false, + "insertNulls": false, + "showPoints": "auto", + "pointSize": 5, + "stacking": { + "mode": "none", + "group": "A" + }, + "axisPlacement": "auto", + "axisLabel": "", + "axisColorMode": "text", + "axisBorderShow": false, + "scaleDistribution": { + "type": "linear" + }, + "axisCenteredZero": false, + "hideFrom": { + "tooltip": false, + "viz": false, + "legend": false + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "color": { + "mode": "palette-classic" + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 32 + }, + "id": 11, + "options": { + "tooltip": { + "mode": "single", + "sort": "none" + }, + "legend": { + "showLegend": true, + "displayMode": "list", + "placement": "bottom", + "calcs": [] + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "prometheus" + }, + "disableTextWrap": false, + "editorMode": "builder", + "expr": "sum by(finished_reason) (increase(vllm:request_success_total{model_name=\"$model_name\"}[$__rate_interval]))", + "fullMetaSearch": false, + "includeNullMetadata": true, + "instant": false, + "interval": "", + "legendFormat": "__auto", + "range": true, + "refId": "A", + "useBackend": false + } + ], + "title": "Finish Reason", + "description": "Number of finished requests by their finish reason: either an EOS token was generated or the max sequence length was reached.", + "type": "timeseries" + } + ], + "refresh": "", + "schemaVersion": 37, + "style": "dark", + "tags": [], + "templating": { + "list": [ + { + "current": { + "selected": false, + "text": "vllm", + "value": "vllm" + }, + "datasource": { + "type": "prometheus", + "uid": "prometheus" + }, + "definition": "label_values(model_name)", + "hide": 0, + "includeAll": false, + "label": "model_name", + "multi": false, + "name": "model_name", + "options": [], + "query": { + "query": "label_values(model_name)", + "refId": "StandardVariableQuery" + }, + "refresh": 1, + "regex": "", + "skipUrlSync": false, + "sort": 0, + "type": "query" + } + ] + }, + "time": { + "from": "now-5m", + "to": "now" + }, + "timepicker": {}, + "timezone": "", + "title": "vLLM", + "uid": "b281712d-8bff-41ef-9f3f-71ad43c05e9b", + "version": 2, + "weekStart": "" +} diff --git a/examples/production_monitoring/prometheus.yaml b/examples/production_monitoring/prometheus.yaml new file mode 100644 index 0000000..754533b --- /dev/null +++ b/examples/production_monitoring/prometheus.yaml @@ -0,0 +1,10 @@ +# prometheus.yaml +global: + scrape_interval: 5s + evaluation_interval: 30s + +scrape_configs: + - job_name: vllm + static_configs: + - targets: + - 'host.docker.internal:8000' diff --git a/examples/template_alpaca.jinja b/examples/template_alpaca.jinja new file mode 100644 index 0000000..60667ac --- /dev/null +++ b/examples/template_alpaca.jinja @@ -0,0 +1,29 @@ +{{ (messages|selectattr('role', 'equalto', 'system')|list|last).content|trim if (messages|selectattr('role', 'equalto', 'system')|list) else '' }} + +{% for message in messages %} +{% if message['role'] == 'user' %} +### Instruction: +{{ message['content']|trim -}} +{% if not loop.last %} + + +{% endif %} +{% elif message['role'] == 'assistant' %} +### Response: +{{ message['content']|trim -}} +{% if not loop.last %} + + +{% endif %} +{% elif message['role'] == 'user_context' %} +### Input: +{{ message['content']|trim -}} +{% if not loop.last %} + + +{% endif %} +{% endif %} +{% endfor %} +{% if add_generation_prompt and messages[-1]['role'] != 'assistant' %} +### Response: +{% endif %} \ No newline at end of file diff --git a/examples/template_baichuan.jinja b/examples/template_baichuan.jinja new file mode 100644 index 0000000..42a8d92 --- /dev/null +++ b/examples/template_baichuan.jinja @@ -0,0 +1,13 @@ +{{ (messages|selectattr('role', 'equalto', 'system')|list|last).content|trim if (messages|selectattr('role', 'equalto', 'system')|list) else '' }} + +{%- for message in messages -%} + {%- if message['role'] == 'user' -%} + {{- '' + message['content'] -}} + {%- elif message['role'] == 'assistant' -%} + {{- '' + message['content'] -}} + {%- endif -%} +{%- endfor -%} + +{%- if add_generation_prompt and messages[-1]['role'] != 'assistant' -%} + {{- '' -}} +{% endif %} \ No newline at end of file diff --git a/examples/template_chatglm.jinja b/examples/template_chatglm.jinja new file mode 100644 index 0000000..bf26f27 --- /dev/null +++ b/examples/template_chatglm.jinja @@ -0,0 +1,18 @@ +{%- set counter = namespace(index=0) -%} +{%- for message in messages -%} + {%- if message['role'] == 'user' -%} + {{- '[Round ' + counter.index|string + ']\n问:' + message['content'] -}} + {%- set counter.index = counter.index + 1 -%} + {%- endif -%} + {%- if message['role'] == 'assistant' -%} + {{- '\n答:' + message['content'] -}} + {%- if (loop.last and add_generation_prompt) or not loop.last -%} + {{- '\n' -}} + {%- endif -%} + {%- endif -%} +{%- endfor -%} + + +{%- if add_generation_prompt and messages[-1]['role'] != 'assistant' -%} + {{- '\n答:' -}} +{%- endif -%} \ No newline at end of file diff --git a/examples/template_chatglm2.jinja b/examples/template_chatglm2.jinja new file mode 100644 index 0000000..c155b7c --- /dev/null +++ b/examples/template_chatglm2.jinja @@ -0,0 +1,18 @@ +{%- set counter = namespace(index=1) -%} +{%- for message in messages -%} + {%- if message['role'] == 'user' -%} + {{- '[Round ' + counter.index|string + ']\n\n问:' + message['content'] -}} + {%- set counter.index = counter.index + 1 -%} + {%- endif -%} + {%- if message['role'] == 'assistant' -%} + {{- '\n\n答:' + message['content'] -}} + {%- if (loop.last and add_generation_prompt) or not loop.last -%} + {{- '\n\n' -}} + {%- endif -%} + {%- endif -%} +{%- endfor -%} + + +{%- if add_generation_prompt and messages[-1]['role'] != 'assistant' -%} + {{- '\n\n答:' -}} +{%- endif -%} \ No newline at end of file diff --git a/examples/template_chatml.jinja b/examples/template_chatml.jinja new file mode 100644 index 0000000..4844e68 --- /dev/null +++ b/examples/template_chatml.jinja @@ -0,0 +1,2 @@ +{% for message in messages %}{{'<|im_start|>' + message['role'] + '\n' + message['content']}}{% if (loop.last and add_generation_prompt) or not loop.last %}{{ '<|im_end|>' + '\n'}}{% endif %}{% endfor %} +{% if add_generation_prompt and messages[-1]['role'] != 'assistant' %}{{ '<|im_start|>assistant\n' }}{% endif %} \ No newline at end of file diff --git a/examples/template_falcon.jinja b/examples/template_falcon.jinja new file mode 100644 index 0000000..01cf0e2 --- /dev/null +++ b/examples/template_falcon.jinja @@ -0,0 +1,15 @@ +{%- for message in messages -%} + {%- if message['role'] == 'user' -%} + {{- 'User: ' + message['content'] -}} + {%- elif message['role'] == 'assistant' -%} + {{- 'Assistant: ' + message['content'] -}} + {%- endif -%} + {%- if (loop.last and add_generation_prompt) or not loop.last -%} + {{- '\n' -}} + {%- endif -%} +{%- endfor -%} + + +{%- if add_generation_prompt and messages[-1]['role'] != 'assistant' -%} + {{- 'Assistant:' -}} +{% endif %} \ No newline at end of file diff --git a/examples/template_falcon_180b.jinja b/examples/template_falcon_180b.jinja new file mode 100644 index 0000000..f08f739 --- /dev/null +++ b/examples/template_falcon_180b.jinja @@ -0,0 +1,17 @@ +{%- for message in messages -%} + {%- if message['role'] == 'system' -%} + {{- 'System: ' + message['content'] -}} + {%- elif message['role'] == 'user' -%} + {{- 'User: ' + message['content'] -}} + {%- elif message['role'] == 'assistant' -%} + {{- 'Falcon: ' + message['content'] -}} + {%- endif -%} + {%- if (loop.last and add_generation_prompt) or not loop.last -%} + {{- '\n' -}} + {%- endif -%} +{%- endfor -%} + + +{%- if add_generation_prompt and messages[-1]['role'] != 'assistant' -%} + {{- 'Falcon:' -}} +{% endif %} \ No newline at end of file diff --git a/examples/template_inkbot.jinja b/examples/template_inkbot.jinja new file mode 100644 index 0000000..33a8174 --- /dev/null +++ b/examples/template_inkbot.jinja @@ -0,0 +1,30 @@ +<#meta#> +- Date: {{ (messages|selectattr('role', 'equalto', 'meta-current_date')|list|last).content|trim if (messages|selectattr('role', 'equalto', 'meta-current_date')|list) else '' }} +- Task: {{ (messages|selectattr('role', 'equalto', 'meta-task_name')|list|last).content|trim if (messages|selectattr('role', 'equalto', 'meta-task_name')|list) else '' }} +<#system#> +{{ (messages|selectattr('role', 'equalto', 'system')|list|last).content|trim if (messages|selectattr('role', 'equalto', 'system')|list) else '' }} +<#chat#> +{% for message in messages %} +{% if message['role'] == 'user' %} +<#user#> +{{ message['content']|trim -}} +{% if not loop.last %} + +{% endif %} +{% elif message['role'] == 'assistant' %} +<#bot#> +{{ message['content']|trim -}} +{% if not loop.last %} + +{% endif %} +{% elif message['role'] == 'user_context' %} +<#user_context#> +{{ message['content']|trim -}} +{% if not loop.last %} + +{% endif %} +{% endif %} +{% endfor %} +{% if add_generation_prompt and messages[-1]['role'] != 'assistant' %} +<#bot#> +{% endif %} \ No newline at end of file diff --git a/examples/tensorize_vllm_model.py b/examples/tensorize_vllm_model.py new file mode 100644 index 0000000..e245616 --- /dev/null +++ b/examples/tensorize_vllm_model.py @@ -0,0 +1,282 @@ +import argparse +import dataclasses +import os +import time +import uuid +from functools import partial +from typing import Type + +import torch +import torch.nn as nn +from tensorizer import (DecryptionParams, EncryptionParams, TensorDeserializer, + TensorSerializer, stream_io) +from tensorizer.utils import convert_bytes, get_mem_usage, no_init_or_tensor +from transformers import AutoConfig, PretrainedConfig + +from vllm.distributed import initialize_model_parallel +from vllm.engine.arg_utils import EngineArgs +from vllm.engine.llm_engine import LLMEngine +from vllm.model_executor.model_loader.tensorizer import TensorizerArgs +from vllm.model_executor.models import ModelRegistry + +# yapf conflicts with isort for this docstring +# yapf: disable +""" +tensorize_vllm_model.py is a script that can be used to serialize and +deserialize vLLM models. These models can be loaded using tensorizer +to the GPU extremely quickly over an HTTP/HTTPS endpoint, an S3 endpoint, +or locally. Tensor encryption and decryption is also supported, although +libsodium must be installed to use it. Install vllm with tensorizer support +using `pip install vllm[tensorizer]`. + +To serialize a model, install vLLM from source, then run something +like this from the root level of this repository: + +python -m examples.tensorize_vllm_model \ + --model EleutherAI/gpt-j-6B \ + --dtype float16 \ + serialize \ + --serialized-directory s3://my-bucket/ \ + --suffix vllm + +Which downloads the model from HuggingFace, loads it into vLLM, serializes it, +and saves it to your S3 bucket. A local directory can also be used. This +assumes your S3 credentials are specified as environment variables +in the form of `S3_ACCESS_KEY_ID`, `S3_SECRET_ACCESS_KEY`, and `S3_ENDPOINT`. +To provide S3 credentials directly, you can provide `--s3-access-key-id` and +`--s3-secret-access-key`, as well as `--s3-endpoint` as CLI args to this +script. + +You can also encrypt the model weights with a randomly-generated key by +providing a `--keyfile` argument. + +To deserialize a model, you can run something like this from the root +level of this repository: + +python -m examples.tensorize_vllm_model \ + --model EleutherAI/gpt-j-6B \ + --dtype float16 \ + deserialize \ + --path-to-tensors s3://my-bucket/vllm/EleutherAI/gpt-j-6B/vllm/model.tensors + +Which downloads the model tensors from your S3 bucket and deserializes them. + +You can also provide a `--keyfile` argument to decrypt the model weights if +they were serialized with encryption. + +For more information on the available arguments for serializing, run +`python -m examples.tensorize_vllm_model serialize --help`. + +Or for deserializing: + +`python -m examples.tensorize_vllm_model deserialize --help`. + +Once a model is serialized, it can be used to load the model when running the +OpenAI inference client at `vllm/entrypoints/openai/api_server.py` by providing +the `--tensorizer-uri` CLI argument that is functionally the same as the +`--path-to-tensors` argument in this script, along with `--vllm-tensorized`, to +signify that the model to be deserialized is a vLLM model, rather than a +HuggingFace `PreTrainedModel`, which can also be deserialized using tensorizer +in the same inference server, albeit without the speed optimizations. To +deserialize an encrypted file, the `--encryption-keyfile` argument can be used +to provide the path to the keyfile used to encrypt the model weights. For +information on all the arguments that can be used to configure tensorizer's +deserialization, check out the tensorizer options argument group in the +`vllm/entrypoints/openai/api_server.py` script with `--help`. + +Tensorizer can also be invoked with the `LLM` class directly to load models: + + llm = LLM(model="facebook/opt-125m", + load_format="tensorizer", + tensorizer_uri=path_to_opt_tensors, + num_readers=3, + vllm_tensorized=True) +""" + + +def parse_args(): + parser = argparse.ArgumentParser( + description="An example script that can be used to serialize and " + "deserialize vLLM models. These models " + "can be loaded using tensorizer directly to the GPU " + "extremely quickly. Tensor encryption and decryption is " + "also supported, although libsodium must be installed to " + "use it.") + parser = EngineArgs.add_cli_args(parser) + subparsers = parser.add_subparsers(dest='command') + + serialize_parser = subparsers.add_parser( + 'serialize', help="Serialize a model to `--serialized-directory`") + + serialize_parser.add_argument( + "--suffix", + type=str, + required=False, + help=( + "The suffix to append to the serialized model directory, which is " + "used to construct the location of the serialized model tensors, " + "e.g. if `--serialized-directory` is `s3://my-bucket/` and " + "`--suffix` is `v1`, the serialized model tensors will be " + "saved to " + "`s3://my-bucket/vllm/EleutherAI/gpt-j-6B/v1/model.tensors`. " + "If none is provided, a random UUID will be used.")) + serialize_parser.add_argument( + "--serialized-directory", + type=str, + required=True, + help="The directory to serialize the model to. " + "This can be a local directory or S3 URI. The path to where the " + "tensors are saved is a combination of the supplied `dir` and model " + "reference ID. For instance, if `dir` is the serialized directory, " + "and the model HuggingFace ID is `EleutherAI/gpt-j-6B`, tensors will " + "be saved to `dir/vllm/EleutherAI/gpt-j-6B/suffix/model.tensors`, " + "where `suffix` is given by `--suffix` or a random UUID if not " + "provided.") + + serialize_parser.add_argument( + "--keyfile", + type=str, + required=False, + help=("Encrypt the model weights with a randomly-generated binary key," + " and save the key at this path")) + + deserialize_parser = subparsers.add_parser( + 'deserialize', + help=("Deserialize a model from `--path-to-tensors`" + " to verify it can be loaded and used.")) + + deserialize_parser.add_argument( + "--path-to-tensors", + type=str, + required=True, + help="The local path or S3 URI to the model tensors to deserialize. ") + + deserialize_parser.add_argument( + "--keyfile", + type=str, + required=False, + help=("Path to a binary key to use to decrypt the model weights," + " if the model was serialized with encryption")) + + return parser.parse_args() + + +def make_model_contiguous(model): + # Ensure tensors are saved in memory contiguously + for param in model.parameters(): + param.data = param.data.contiguous() + + +def _get_vllm_model_architecture(config: PretrainedConfig) -> Type[nn.Module]: + architectures = getattr(config, "architectures", []) + for arch in architectures: + model_cls = ModelRegistry.load_model_cls(arch) + if model_cls is not None: + return model_cls + raise ValueError( + f"Model architectures {architectures} are not supported for now. " + f"Supported architectures: {ModelRegistry.get_supported_archs()}") + + +def serialize(): + + eng_args_dict = {f.name: getattr(args, f.name) for f in + dataclasses.fields(EngineArgs)} + engine_args = EngineArgs.from_cli_args(argparse.Namespace(**eng_args_dict)) + engine = LLMEngine.from_engine_args(engine_args) + + model = (engine.model_executor.driver_worker. + model_runner.model) + + encryption_params = EncryptionParams.random() if keyfile else None + if keyfile: + with _write_stream(keyfile) as stream: + stream.write(encryption_params.key) + + with _write_stream(model_path) as stream: + serializer = TensorSerializer(stream, encryption=encryption_params) + serializer.write_module(model) + serializer.close() + + print("Serialization complete. Model tensors saved to", model_path) + if keyfile: + print("Key saved to", keyfile) + + +def deserialize(): + config = AutoConfig.from_pretrained(model_ref) + + with no_init_or_tensor(): + model_class = _get_vllm_model_architecture(config) + model = model_class(config) + + before_mem = get_mem_usage() + start = time.time() + + if keyfile: + with _read_stream(keyfile) as stream: + key = stream.read() + decryption_params = DecryptionParams.from_key(key) + tensorizer_args.deserializer_params['encryption'] = \ + decryption_params + + with (_read_stream(model_path)) as stream, TensorDeserializer( + stream, **tensorizer_args.deserializer_params) as deserializer: + deserializer.load_into_module(model) + end = time.time() + + # Brag about how fast we are. + total_bytes_str = convert_bytes(deserializer.total_tensor_bytes) + duration = end - start + per_second = convert_bytes(deserializer.total_tensor_bytes / duration) + after_mem = get_mem_usage() + print( + f"Deserialized {total_bytes_str} in {end - start:0.2f}s, {per_second}/s" + ) + print(f"Memory usage before: {before_mem}") + print(f"Memory usage after: {after_mem}") + + return model + + +args = parse_args() + +s3_access_key_id = (args.s3_access_key_id or os.environ.get("S3_ACCESS_KEY_ID") + or None) +s3_secret_access_key = (args.s3_secret_access_key + or os.environ.get("S3_SECRET_ACCESS_KEY") or None) + +s3_endpoint = (args.s3_endpoint or os.environ.get("S3_ENDPOINT_URL") or None) + +_read_stream, _write_stream = (partial( + stream_io.open_stream, + mode=mode, + s3_access_key_id=s3_access_key_id, + s3_secret_access_key=s3_secret_access_key, + s3_endpoint=s3_endpoint, +) for mode in ("rb", "wb+")) + +model_ref = args.model + +model_name = model_ref.split("/")[1] + +os.environ["MASTER_ADDR"] = "127.0.0.1" +os.environ["MASTER_PORT"] = "8080" + +torch.distributed.init_process_group(world_size=1, rank=0) +initialize_model_parallel() + +keyfile = args.keyfile if args.keyfile else None + +if args.command == "serialize": + input_dir = args.serialized_directory.rstrip('/') + suffix = args.suffix if args.suffix else uuid.uuid4().hex + base_path = f"{input_dir}/vllm/{model_ref}/{suffix}" + model_path = f"{base_path}/model.tensors" + serialize() +elif args.command == "deserialize": + tensorizer_args = TensorizerArgs.from_cli_args(args) + model_path = args.path_to_tensors + deserialize() +else: + raise ValueError("Either serialize or deserialize must be specified.") diff --git a/format.sh b/format.sh new file mode 100755 index 0000000..233e6af --- /dev/null +++ b/format.sh @@ -0,0 +1,244 @@ +#!/usr/bin/env bash +# YAPF formatter, adapted from ray and skypilot. +# +# Usage: +# # Do work and commit your work. + +# # Format files that differ from origin/main. +# bash format.sh + +# # Commit changed files with message 'Run yapf and ruff' +# +# +# YAPF + Clang formatter (if installed). This script formats all changed files from the last mergebase. +# You are encouraged to run this locally before pushing changes for review. + +# Cause the script to exit if a single command fails +set -eo pipefail + +# this stops git rev-parse from failing if we run this from the .git directory +builtin cd "$(dirname "${BASH_SOURCE:-$0}")" +ROOT="$(git rev-parse --show-toplevel)" +builtin cd "$ROOT" || exit 1 + +YAPF_VERSION=$(yapf --version | awk '{print $2}') +RUFF_VERSION=$(ruff --version | awk '{print $2}') +MYPY_VERSION=$(mypy --version | awk '{print $2}') +CODESPELL_VERSION=$(codespell --version) +ISORT_VERSION=$(isort --vn) + +# # params: tool name, tool version, required version +tool_version_check() { + if [[ $2 != $3 ]]; then + echo "Wrong $1 version installed: $3 is required, not $2." + exit 1 + fi +} + +tool_version_check "yapf" $YAPF_VERSION "$(grep yapf requirements-dev.txt | cut -d'=' -f3)" +tool_version_check "ruff" $RUFF_VERSION "$(grep "ruff==" requirements-dev.txt | cut -d'=' -f3)" +tool_version_check "mypy" "$MYPY_VERSION" "$(grep mypy requirements-dev.txt | cut -d'=' -f3)" +tool_version_check "isort" "$ISORT_VERSION" "$(grep isort requirements-dev.txt | cut -d'=' -f3)" +tool_version_check "codespell" "$CODESPELL_VERSION" "$(grep codespell requirements-dev.txt | cut -d'=' -f3)" + +YAPF_FLAGS=( + '--recursive' + '--parallel' +) + +YAPF_EXCLUDES=( + '--exclude' 'build/**' +) + +# Format specified files +format() { + yapf --in-place "${YAPF_FLAGS[@]}" "$@" +} + +# Format files that differ from main branch. Ignores dirs that are not slated +# for autoformat yet. +format_changed() { + # The `if` guard ensures that the list of filenames is not empty, which + # could cause yapf to receive 0 positional arguments, making it hang + # waiting for STDIN. + # + # `diff-filter=ACM` and $MERGEBASE is to ensure we only format files that + # exist on both branches. + MERGEBASE="$(git merge-base origin/main HEAD)" + + if ! git diff --diff-filter=ACM --quiet --exit-code "$MERGEBASE" -- '*.py' '*.pyi' &>/dev/null; then + git diff --name-only --diff-filter=ACM "$MERGEBASE" -- '*.py' '*.pyi' | xargs -P 5 \ + yapf --in-place "${YAPF_EXCLUDES[@]}" "${YAPF_FLAGS[@]}" + fi + +} + +# Format all files +format_all() { + yapf --in-place "${YAPF_FLAGS[@]}" "${YAPF_EXCLUDES[@]}" . +} + +## This flag formats individual files. --files *must* be the first command line +## arg to use this option. +if [[ "$1" == '--files' ]]; then + format "${@:2}" + # If `--all` is passed, then any further arguments are ignored and the + # entire python directory is formatted. +elif [[ "$1" == '--all' ]]; then + format_all +else + # Format only the files that changed in last commit. + format_changed +fi +echo 'vLLM yapf: Done' + +# Run mypy +echo 'vLLM mypy:' +mypy vllm/attention --config-file pyproject.toml +mypy vllm/core --config-file pyproject.toml +mypy vllm/distributed --config-file pyproject.toml +mypy vllm/entrypoints --config-file pyproject.toml +mypy vllm/executor --config-file pyproject.toml +mypy vllm/usage --config-file pyproject.toml +mypy vllm/*.py --config-file pyproject.toml +mypy vllm/transformers_utils --config-file pyproject.toml +mypy vllm/engine --config-file pyproject.toml +mypy vllm/worker --config-file pyproject.toml +mypy vllm/spec_decode --config-file pyproject.toml +mypy vllm/model_executor --config-file pyproject.toml +mypy vllm/lora --config-file pyproject.toml +mypy vllm/logging --config-file pyproject.toml +mypy vllm/model_executor --config-file pyproject.toml + + +CODESPELL_EXCLUDES=( + '--skip' '*docs/source/_build/**' +) + +# check spelling of specified files +spell_check() { + codespell "$@" +} + +spell_check_all(){ + codespell --toml pyproject.toml "${CODESPELL_EXCLUDES[@]}" +} + +# Spelling check of files that differ from main branch. +spell_check_changed() { + # The `if` guard ensures that the list of filenames is not empty, which + # could cause ruff to receive 0 positional arguments, making it hang + # waiting for STDIN. + # + # `diff-filter=ACM` and $MERGEBASE is to ensure we only lint files that + # exist on both branches. + MERGEBASE="$(git merge-base origin/main HEAD)" + + if ! git diff --diff-filter=ACM --quiet --exit-code "$MERGEBASE" -- '*.py' '*.pyi' &>/dev/null; then + git diff --name-only --diff-filter=ACM "$MERGEBASE" -- '*.py' '*.pyi' | xargs \ + codespell "${CODESPELL_EXCLUDES[@]}" + fi +} + +# Run Codespell +## This flag runs spell check of individual files. --files *must* be the first command line +## arg to use this option. +if [[ "$1" == '--files' ]]; then + spell_check "${@:2}" + # If `--all` is passed, then any further arguments are ignored and the + # entire python directory is linted. +elif [[ "$1" == '--all' ]]; then + spell_check_all +else + # Check spelling only of the files that changed in last commit. + spell_check_changed +fi +echo 'vLLM codespell: Done' + + +# Lint specified files +lint() { + ruff "$@" +} + +# Lint files that differ from main branch. Ignores dirs that are not slated +# for autolint yet. +lint_changed() { + # The `if` guard ensures that the list of filenames is not empty, which + # could cause ruff to receive 0 positional arguments, making it hang + # waiting for STDIN. + # + # `diff-filter=ACM` and $MERGEBASE is to ensure we only lint files that + # exist on both branches. + MERGEBASE="$(git merge-base origin/main HEAD)" + + if ! git diff --diff-filter=ACM --quiet --exit-code "$MERGEBASE" -- '*.py' '*.pyi' &>/dev/null; then + git diff --name-only --diff-filter=ACM "$MERGEBASE" -- '*.py' '*.pyi' | xargs \ + ruff + fi + +} + +# Run Ruff +echo 'vLLM ruff:' +### This flag lints individual files. --files *must* be the first command line +### arg to use this option. +if [[ "$1" == '--files' ]]; then + lint "${@:2}" + # If `--all` is passed, then any further arguments are ignored and the + # entire python directory is linted. +elif [[ "$1" == '--all' ]]; then + lint vllm tests +else + # Format only the files that changed in last commit. + lint_changed +fi + +# check spelling of specified files +isort_check() { + isort "$@" +} + +isort_check_all(){ + isort . +} + +# Spelling check of files that differ from main branch. +isort_check_changed() { + # The `if` guard ensures that the list of filenames is not empty, which + # could cause ruff to receive 0 positional arguments, making it hang + # waiting for STDIN. + # + # `diff-filter=ACM` and $MERGEBASE is to ensure we only lint files that + # exist on both branches. + MERGEBASE="$(git merge-base origin/main HEAD)" + + if ! git diff --diff-filter=ACM --quiet --exit-code "$MERGEBASE" -- '*.py' '*.pyi' &>/dev/null; then + git diff --name-only --diff-filter=ACM "$MERGEBASE" -- '*.py' '*.pyi' | xargs \ + isort + fi +} + +# Run Isort +# This flag runs spell check of individual files. --files *must* be the first command line +# arg to use this option. +if [[ "$1" == '--files' ]]; then + isort_check "${@:2}" + # If `--all` is passed, then any further arguments are ignored and the + # entire python directory is linted. +elif [[ "$1" == '--all' ]]; then + isort_check_all +else + # Check spelling only of the files that changed in last commit. + isort_check_changed +fi +echo 'vLLM isort: Done' + +if ! git diff --quiet &>/dev/null; then + echo 'Reformatted files. Please review and stage the changes.' + echo 'Changes not staged for commit:' + echo + git --no-pager diff --name-only + + exit 1 +fi diff --git a/musa_porting.py b/musa_porting.py new file mode 100644 index 0000000..493d5a8 --- /dev/null +++ b/musa_porting.py @@ -0,0 +1,36 @@ +import os +from setuptools import setup, find_packages +from torch_musa.utils.simple_porting import SimplePorting +from torch_musa.utils.musa_extension import MUSAExtension + +SimplePorting(cuda_dir_path="./csrc", mapping_rule={ + "x.device().is_cuda()": "true", + "#include ": "#include \"torch_musa/csrc/aten/musa/MUSAContext.h\"", + "#include ": "#include \"torch_musa/csrc/core/MUSAGuard.h\"", + "#include ": "#include \"torch_musa/csrc/core/MUSAException.h\"", + "#include ": "#include \"torch_musa/csrc/core/MUSAStream.h\"", + "at::kCUDA": "at::musa::kMUSA", + "at::cuda::getCurrentCUDAStream()": "at::musa::getCurrentMUSAStream()", + "__nv_bfloat16": "__mt_bfloat16", + "at::cuda::OptionalCUDAGuard": "at::musa::OptionalMUSAGuard", + "at::cuda::getCurrentCUDABlasHandle()": "at::musa::getCurrentMUSABlasHandle()", + "ATen/cuda/CUDATensorMethods.cuh": "ATen/musa/MUSA_PORT_TensorMethods.muh", + "#include \"attention_generic.cuh\"": "#include \"attention_generic.muh\"", + "#include \"reduction_utils.cuh\"": "#include \"reduction_utils.muh\"", + "#include ": "#include ", + "#include \"dtype_float16.cuh\"": "#include \"dtype_float16.muh\"", + "#include \"dtype_float32.cuh\"": "#include \"dtype_float32.muh\"", + "#include \"custom_all_reduce.cuh\"": "#include \"custom_all_reduce.muh\"", + "#include \"dtype_bfloat16.cuh\"": "#include \"dtype_bfloat16.muh\"", + "#include \"dtype_fp8.cuh\"": "#include \"dtype_fp8.muh\"", + "#include \"attention_utils.cuh\"": "#include \"attention_utils.muh\"", + "cuPointerGetAttribute": "muPointerGetAttribute", + "CUdeviceptr": "MUdeviceptr", + "CUDA_SUCCESS": "MUSA_SUCCESS", + "CU_POINTER_ATTRIBUTE_RANGE_START_ADDR": "MU_POINTER_ATTRIBUTE_RANGE_START_ADDR", + "c10::cuda": "c10::musa", + "cudaStreamIsCapturing": "at::musa::musaStreamIsCapturing", + "AT_CUDA_CHECK": "C10_MUSA_CHECK", + "nv_bfloat16": "mt_bfloat16", + "struct __align__(16) RankData { const void *__restrict__ ptrs[8]; };":"struct __align__(16) RankData { const void *__restrict__ ptrs[8]; RankData& operator=(const RankData& ){return *this;} };" + }).run() \ No newline at end of file diff --git a/pyproject.toml b/pyproject.toml new file mode 100644 index 0000000..9f1699e --- /dev/null +++ b/pyproject.toml @@ -0,0 +1,67 @@ +[build-system] +# Should be mirrored in requirements-build.txt +requires = [ + "cmake>=3.21", + "ninja", + "packaging", + "setuptools >= 49.4.0", + "torch == 2.2.0", + "wheel", +] +build-backend = "setuptools.build_meta" + +[tool.ruff] +# Allow lines to be as long as 80. +line-length = 80 +exclude = [ + # External file, leaving license intact + "examples/fp8/quantizer/quantize.py" +] + +[tool.ruff.lint] +select = [ + # pycodestyle + "E", + # Pyflakes + "F", + # pyupgrade + # "UP", + # flake8-bugbear + "B", + # flake8-simplify + "SIM", + # isort + # "I", + "G", +] +ignore = [ + # star imports + "F405", "F403", + # lambda expression assignment + "E731", + # Loop control variable not used within loop body + "B007", +] + +[tool.mypy] +python_version = "3.9" + +ignore_missing_imports = true +check_untyped_defs = true +follow_imports = "skip" + +files = "vllm" +# TODO(woosuk): Include the code from Megatron and HuggingFace. +exclude = [ + "vllm/model_executor/parallel_utils/|vllm/model_executor/models/", + # Ignore triton kernels in ops. + 'vllm/attention/ops/.*\.py$' +] + +[tool.codespell] +ignore-words-list = "dout, te, indicies" +skip = "./tests/prompts,./benchmarks/sonnet.txt" + +[tool.isort] +use_parentheses = true +skip_gitignore = true diff --git a/requirements-build.txt b/requirements-build.txt new file mode 100644 index 0000000..6a9950a --- /dev/null +++ b/requirements-build.txt @@ -0,0 +1,8 @@ +# Should be mirrored in pyproject.toml +cmake>=3.21 +ninja +packaging +setuptools>=49.4.0 +torch==2.2.0 +wheel +triton >= 2.2.0 # FIXME(woosuk): This is a hack to avoid import error. \ No newline at end of file diff --git a/requirements-common.txt b/requirements-common.txt new file mode 100644 index 0000000..3abb828 --- /dev/null +++ b/requirements-common.txt @@ -0,0 +1,20 @@ +cmake >= 3.21 +ninja # For faster builds. +psutil +sentencepiece # Required for LLaMA tokenizer. +numpy +requests +py-cpuinfo +transformers >= 4.40.0 # Required for StarCoder2 & Llava, Llama 3. +tokenizers >= 0.19.1 # Required for Llama 3. +fastapi +openai +uvicorn[standard] +pydantic >= 2.0 # Required for OpenAI server. +prometheus_client >= 0.18.0 +prometheus-fastapi-instrumentator >= 7.0.0 +tiktoken == 0.6.0 # Required for DBRX tokenizer +lm-format-enforcer == 0.9.8 +outlines == 0.0.34 # Requires torch >= 2.1.0 +typing_extensions +filelock >= 3.10.4 # filelock starts to support `mode` argument from 3.10.4 diff --git a/requirements-cpu.txt b/requirements-cpu.txt new file mode 100644 index 0000000..b739642 --- /dev/null +++ b/requirements-cpu.txt @@ -0,0 +1,6 @@ +# Common dependencies +-r requirements-common.txt + +# Dependencies for x86_64 CPUs +torch == 2.3.0+cpu +triton >= 2.2.0 # FIXME(woosuk): This is a hack to avoid import error. \ No newline at end of file diff --git a/requirements-cuda.txt b/requirements-cuda.txt new file mode 100644 index 0000000..6548d7a --- /dev/null +++ b/requirements-cuda.txt @@ -0,0 +1,9 @@ +# Common dependencies +-r requirements-common.txt + +# Dependencies for NVIDIA GPUs +ray >= 2.9 +nvidia-ml-py # for pynvml package +vllm-nccl-cu12>=2.18,<2.19 # for downloading nccl library +torch == 2.3.0 +xformers == 0.0.26.post1 # Requires PyTorch 2.3.0 diff --git a/requirements-dev.txt b/requirements-dev.txt new file mode 100644 index 0000000..e6d375c --- /dev/null +++ b/requirements-dev.txt @@ -0,0 +1,33 @@ +# formatting +yapf==0.32.0 +toml==0.10.2 +tomli==2.0.1 +ruff==0.1.5 +codespell==2.2.6 +isort==5.13.2 + +# type checking +mypy==1.9.0 +types-PyYAML +types-requests +types-setuptools + +# testing +pytest +tensorizer==2.9.0 +pytest-forked +pytest-asyncio +pytest-rerunfailures +pytest-shard +httpx +einops # required for MPT +requests +ray +peft +awscli + +# Benchmarking +aiohttp + +# Multimodal +pillow diff --git a/requirements-musa.txt b/requirements-musa.txt new file mode 100644 index 0000000..41bc056 --- /dev/null +++ b/requirements-musa.txt @@ -0,0 +1,7 @@ +# Common dependencies +-r requirements-common.txt + +# Dependencies for MTHREADS GPUs +ray >= 2.9 +torch == 2.2.0 +triton >= 2.2.0 # FIXME(woosuk): This is a hack to avoid import error. diff --git a/requirements-neuron.txt b/requirements-neuron.txt new file mode 100644 index 0000000..92b705b --- /dev/null +++ b/requirements-neuron.txt @@ -0,0 +1,7 @@ +# Common dependencies +-r requirements-common.txt + +# Dependencies for Neuron devices +transformers-neuronx >= 0.9.0 +torch-neuronx >= 2.1.0 +neuronx-cc diff --git a/requirements-rocm.txt b/requirements-rocm.txt new file mode 100644 index 0000000..903845b --- /dev/null +++ b/requirements-rocm.txt @@ -0,0 +1,5 @@ +# Common dependencies +-r requirements-common.txt + +# Dependencies for AMD GPUs +ray == 2.9.3 diff --git a/rocm_patch/rocm_bf16.patch b/rocm_patch/rocm_bf16.patch new file mode 100644 index 0000000..a0f07da --- /dev/null +++ b/rocm_patch/rocm_bf16.patch @@ -0,0 +1,15 @@ +--- amd_hip_bf16.h 2024-02-06 18:28:58.268699142 +0000 ++++ amd_hip_bf16.h.new 2024-02-06 18:28:31.988647133 +0000 +@@ -90,10 +90,10 @@ + #include "math_fwd.h" // ocml device functions + + #if defined(__HIPCC_RTC__) +-#define __HOST_DEVICE__ __device__ ++#define __HOST_DEVICE__ __device__ static + #else + #include +-#define __HOST_DEVICE__ __host__ __device__ ++#define __HOST_DEVICE__ __host__ __device__ static inline + #endif + + // Since we are using unsigned short to represent data in bfloat16, it can be of different sizes on diff --git a/setup.py b/setup.py new file mode 100644 index 0000000..f21bcfd --- /dev/null +++ b/setup.py @@ -0,0 +1,447 @@ +import importlib.util +import io +import logging +import os +import re +import subprocess +import sys +from shutil import which +from typing import Dict, List + +import torch +import torch_musa +from packaging.version import Version, parse +from setuptools import Extension, find_packages, setup +from setuptools.command.build_ext import build_ext +from torch.utils.cpp_extension import CUDA_HOME + +from torch_musa.utils.simple_porting import SimplePorting +from torch_musa.utils.musa_extension import BuildExtension, MUSAExtension + + +def load_module_from_path(module_name, path): + spec = importlib.util.spec_from_file_location(module_name, path) + module = importlib.util.module_from_spec(spec) + sys.modules[module_name] = module + spec.loader.exec_module(module) + return module + + +ROOT_DIR = os.path.dirname(__file__) +logger = logging.getLogger(__name__) + + +# cannot import envs directly because it depends on vllm, +# which is not installed yet +envs = load_module_from_path('envs', os.path.join(ROOT_DIR, 'vllm', 'envs.py')) + +VLLM_TARGET_DEVICE = envs.VLLM_TARGET_DEVICE + +# vLLM only supports Linux platform +assert sys.platform.startswith( + "linux"), "vLLM only supports Linux platform (including WSL)." + +MAIN_CUDA_VERSION = "12.1" + + +def is_sccache_available() -> bool: + return which("sccache") is not None + + +def is_ccache_available() -> bool: + return which("ccache") is not None + + +def is_ninja_available() -> bool: + return which("ninja") is not None + + +def remove_prefix(text, prefix): + if text.startswith(prefix): + return text[len(prefix):] + return text + + +class CMakeExtension(Extension): + + def __init__(self, name: str, cmake_lists_dir: str = '.', **kwa) -> None: + super().__init__(name, sources=[], **kwa) + self.cmake_lists_dir = os.path.abspath(cmake_lists_dir) + +ext_modules = [] +ext_modules.append( + MUSAExtension( + name="vllm_C", + sources=[ + "csrc_musa/cache_kernels.mu", + "csrc_musa/attention/attention_kernels.mu", + "csrc_musa/pos_encoding_kernels.mu", + "csrc_musa/activation_kernels.mu", + "csrc_musa/layernorm_kernels.mu", + "csrc_musa/musa_utils_kernels.mu", + "csrc_musa/moe_align_block_size_kernels.mu", + "csrc_musa/pybind.cpp", + "csrc_musa/custom_all_reduce.mu", + ], + extra_compile_args= {"cxx": ['-O3', '-std=c++17'],} + ) + ) + +class cmake_build_ext(build_ext): + # A dict of extension directories that have been configured. + did_config: Dict[str, bool] = {} + + # + # Determine number of compilation jobs and optionally nvcc compile threads. + # + def compute_num_jobs(self): + # `num_jobs` is either the value of the MAX_JOBS environment variable + # (if defined) or the number of CPUs available. + num_jobs = envs.MAX_JOBS + if num_jobs is not None: + num_jobs = int(num_jobs) + logger.info("Using MAX_JOBS=%d as the number of jobs.", num_jobs) + else: + try: + # os.sched_getaffinity() isn't universally available, so fall + # back to os.cpu_count() if we get an error here. + num_jobs = len(os.sched_getaffinity(0)) + except AttributeError: + num_jobs = os.cpu_count() + + nvcc_threads = None + + return num_jobs, nvcc_threads + + # + # Perform cmake configuration for a single extension. + # + def configure(self, ext: CMakeExtension) -> None: + # If we've already configured using the CMakeLists.txt for + # this extension, exit early. + if ext.cmake_lists_dir in cmake_build_ext.did_config: + return + + cmake_build_ext.did_config[ext.cmake_lists_dir] = True + + # Select the build type. + # Note: optimization level + debug info are set by the build type + default_cfg = "Debug" if self.debug else "RelWithDebInfo" + cfg = envs.CMAKE_BUILD_TYPE or default_cfg + + # where .so files will be written, should be the same for all extensions + # that use the same CMakeLists.txt. + outdir = os.path.abspath( + os.path.dirname(self.get_ext_fullpath(ext.name))) + + cmake_args = [ + '-DCMAKE_BUILD_TYPE={}'.format(cfg), + '-DCMAKE_LIBRARY_OUTPUT_DIRECTORY={}'.format(outdir), + '-DCMAKE_ARCHIVE_OUTPUT_DIRECTORY={}'.format(self.build_temp), + '-DVLLM_TARGET_DEVICE={}'.format(VLLM_TARGET_DEVICE), + ] + + verbose = envs.VERBOSE + # verbose = False + if verbose: + cmake_args += ['-DCMAKE_VERBOSE_MAKEFILE=ON'] + + if is_sccache_available(): + cmake_args += [ + '-DCMAKE_CXX_COMPILER_LAUNCHER=sccache', + '-DCMAKE_CUDA_COMPILER_LAUNCHER=sccache', + ] + elif is_ccache_available(): + cmake_args += [ + '-DCMAKE_CXX_COMPILER_LAUNCHER=ccache', + '-DCMAKE_CUDA_COMPILER_LAUNCHER=ccache', + ] + + # Pass the python executable to cmake so it can find an exact + # match. + cmake_args += ['-DVLLM_PYTHON_EXECUTABLE={}'.format(sys.executable)] + + if _install_punica(): + cmake_args += ['-DVLLM_INSTALL_PUNICA_KERNELS=ON'] + + # + # Setup parallelism and build tool + # + num_jobs, nvcc_threads = self.compute_num_jobs() + + if nvcc_threads: + cmake_args += ['-DNVCC_THREADS={}'.format(nvcc_threads)] + + if is_ninja_available(): + build_tool = ['-G', 'Ninja'] + cmake_args += [ + '-DCMAKE_JOB_POOL_COMPILE:STRING=compile', + '-DCMAKE_JOB_POOLS:STRING=compile={}'.format(num_jobs), + ] + else: + # Default build tool to whatever cmake picks. + build_tool = [] + + subprocess.check_call( + ['cmake', ext.cmake_lists_dir, *build_tool, *cmake_args], + cwd=self.build_temp) + + def build_extensions(self) -> None: + # Ensure that CMake is present and working + try: + subprocess.check_output(['cmake', '--version']) + except OSError as e: + raise RuntimeError('Cannot find CMake executable') from e + + # Create build directory if it does not exist. + if not os.path.exists(self.build_temp): + os.makedirs(self.build_temp) + + # Build all the extensions + for ext in self.extensions: + self.configure(ext) + + ext_target_name = remove_prefix(ext.name, "vllm.") + num_jobs, _ = self.compute_num_jobs() + + build_args = [ + '--build', '.', '--target', ext_target_name, '-j', + str(num_jobs) + ] + + subprocess.check_call(['cmake', *build_args], cwd=self.build_temp) + + +def _is_cuda() -> bool: + return VLLM_TARGET_DEVICE == "cuda" \ + and torch.version.cuda is not None \ + and not _is_neuron() + +def _is_musa() -> bool: + return VLLM_TARGET_DEVICE == "musa" \ + and torch.version.musa is not None + + +def _is_hip() -> bool: + return (VLLM_TARGET_DEVICE == "cuda" + or VLLM_TARGET_DEVICE == "rocm") and torch.version.hip is not None + + +def _is_neuron() -> bool: + torch_neuronx_installed = True + try: + subprocess.run(["neuron-ls"], capture_output=True, check=True) + except (FileNotFoundError, PermissionError, subprocess.CalledProcessError): + torch_neuronx_installed = False + return torch_neuronx_installed or envs.VLLM_BUILD_WITH_NEURON + + +def _is_cpu() -> bool: + return VLLM_TARGET_DEVICE == "cpu" + + +def _install_punica() -> bool: + return envs.VLLM_INSTALL_PUNICA_KERNELS + + +def get_hipcc_rocm_version(): + # Run the hipcc --version command + result = subprocess.run(['hipcc', '--version'], + stdout=subprocess.PIPE, + stderr=subprocess.STDOUT, + text=True) + + # Check if the command was executed successfully + if result.returncode != 0: + print("Error running 'hipcc --version'") + return None + + # Extract the version using a regular expression + match = re.search(r'HIP version: (\S+)', result.stdout) + if match: + # Return the version string + return match.group(1) + else: + print("Could not find HIP version in the output") + return None + + +def get_neuronxcc_version(): + import sysconfig + site_dir = sysconfig.get_paths()["purelib"] + version_file = os.path.join(site_dir, "neuronxcc", "version", + "__init__.py") + + # Check if the command was executed successfully + with open(version_file, "rt") as fp: + content = fp.read() + + # Extract the version using a regular expression + match = re.search(r"__version__ = '(\S+)'", content) + if match: + # Return the version string + return match.group(1) + else: + raise RuntimeError("Could not find HIP version in the output") + + +def get_mcc_musa_version() -> Version: + """Get the CUDA version from nvcc. + + Adapted from https://github.com/NVIDIA/apex/blob/8b7a1ff183741dd8f9b87e7bafd04cfde99cea28/setup.py + """ + assert CUDA_HOME is not None, "CUDA_HOME is not set" + nvcc_output = subprocess.check_output([CUDA_HOME + "/bin/nvcc", "-V"], + universal_newlines=True) + output = nvcc_output.split() + release_idx = output.index("release") + 1 + nvcc_cuda_version = parse(output[release_idx].split(",")[0]) + return nvcc_cuda_version + + +def get_path(*filepath) -> str: + return os.path.join(ROOT_DIR, *filepath) + + +def find_version(filepath: str) -> str: + """Extract version information from the given filepath. + + Adapted from https://github.com/ray-project/ray/blob/0b190ee1160eeca9796bc091e07eaebf4c85b511/python/setup.py + """ + with open(filepath) as fp: + version_match = re.search(r"^__version__ = ['\"]([^'\"]*)['\"]", + fp.read(), re.M) + if version_match: + return version_match.group(1) + raise RuntimeError("Unable to find version string.") + + +def get_vllm_version() -> str: + version = find_version(get_path("vllm", "__init__.py")) + + if _is_cuda(): + cuda_version = str(get_mcc_musa_version()) + if cuda_version != MAIN_CUDA_VERSION: + cuda_version_str = cuda_version.replace(".", "")[:3] + version += f"+cu{cuda_version_str}" + elif _is_musa(): + version += "+musa" + elif _is_hip(): + # Get the HIP version + hipcc_version = get_hipcc_rocm_version() + if hipcc_version != MAIN_CUDA_VERSION: + rocm_version_str = hipcc_version.replace(".", "")[:3] + version += f"+rocm{rocm_version_str}" + elif _is_neuron(): + # Get the Neuron version + neuron_version = str(get_neuronxcc_version()) + if neuron_version != MAIN_CUDA_VERSION: + neuron_version_str = neuron_version.replace(".", "")[:3] + version += f"+neuron{neuron_version_str}" + elif _is_cpu(): + version += "+cpu" + else: + raise RuntimeError("Unknown runtime environment") + + return version + + +def read_readme() -> str: + """Read the README file if present.""" + p = get_path("README.md") + if os.path.isfile(p): + return io.open(get_path("README.md"), "r", encoding="utf-8").read() + else: + return "" + + +def get_requirements() -> List[str]: + """Get Python package dependencies from requirements.txt.""" + + def _read_requirements(filename: str) -> List[str]: + with open(get_path(filename)) as f: + requirements = f.read().strip().split("\n") + resolved_requirements = [] + for line in requirements: + if line.startswith("-r "): + resolved_requirements += _read_requirements(line.split()[1]) + else: + resolved_requirements.append(line) + return resolved_requirements + + if _is_cuda(): + requirements = _read_requirements("requirements-cuda.txt") + cuda_major = torch.version.cuda.split(".")[0] + modified_requirements = [] + for req in requirements: + if "vllm-nccl-cu12" in req: + modified_requirements.append( + req.replace("vllm-nccl-cu12", f"vllm-nccl-cu{cuda_major}")) + else: + modified_requirements.append(req) + requirements = modified_requirements + elif _is_musa(): + requirements = _read_requirements("requirements-musa.txt") + elif _is_hip(): + requirements = _read_requirements("requirements-rocm.txt") + elif _is_neuron(): + requirements = _read_requirements("requirements-neuron.txt") + elif _is_cpu(): + requirements = _read_requirements("requirements-cpu.txt") + else: + raise ValueError( + "Unsupported platform, please use CUDA, ROCm, Neuron, or CPU.") + return requirements + + +# ext_modules = [] + +# if _is_cuda() or _is_musa(): +# ext_modules.append(CMakeExtension(name="vllm._moe_C")) + +# if _install_punica(): +# ext_modules.append(CMakeExtension(name="vllm._punica_C")) + +# if not _is_neuron(): +# ext_modules.append(CMakeExtension(name="vllm._C")) + +package_data = { + "vllm": ["py.typed", "model_executor/layers/fused_moe/configs/*.json"] +} +# if envs.VLLM_USE_PRECOMPILED: +# ext_modules = [] +# package_data["vllm"].append("*.so") + +setup( + name="vllm", + version=get_vllm_version(), + author="vLLM Team", + license="Apache 2.0", + description=("A high-throughput and memory-efficient inference and " + "serving engine for LLMs"), + long_description=read_readme(), + long_description_content_type="text/markdown", + url="https://github.com/vllm-project/vllm", + project_urls={ + "Homepage": "https://github.com/vllm-project/vllm", + "Documentation": "https://vllm.readthedocs.io/en/latest/", + }, + classifiers=[ + "Programming Language :: Python :: 3.8", + "Programming Language :: Python :: 3.9", + "Programming Language :: Python :: 3.10", + "Programming Language :: Python :: 3.11", + "License :: OSI Approved :: Apache Software License", + "Topic :: Scientific/Engineering :: Artificial Intelligence", + ], + packages=find_packages(exclude=("benchmarks", "csrc", "docs", "examples", + "tests*")), + python_requires=">=3.8", + install_requires=get_requirements(), + ext_modules=ext_modules, + extras_require={ + "tensorizer": ["tensorizer==2.9.0"], + }, + cmdclass={"build_ext": BuildExtension} if ext_modules else {}, + package_data=package_data, +) diff --git a/tests/__init__.py b/tests/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/tests/async_engine/api_server_async_engine.py b/tests/async_engine/api_server_async_engine.py new file mode 100644 index 0000000..1be76fd --- /dev/null +++ b/tests/async_engine/api_server_async_engine.py @@ -0,0 +1,50 @@ +"""vllm.entrypoints.api_server with some extra logging for testing.""" +import argparse +from typing import Any, Dict + +import uvicorn +from fastapi.responses import JSONResponse, Response + +import vllm.entrypoints.api_server +from vllm.engine.arg_utils import AsyncEngineArgs +from vllm.engine.async_llm_engine import AsyncLLMEngine + +app = vllm.entrypoints.api_server.app + + +class AsyncLLMEngineWithStats(AsyncLLMEngine): + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + self._num_aborts = 0 + + async def abort(self, request_id: str) -> None: + await super().abort(request_id) + self._num_aborts += 1 + + def testing_stats(self) -> Dict[str, Any]: + return {"num_aborted_requests": self._num_aborts} + + +@app.get("/stats") +def stats() -> Response: + """Get the statistics of the engine.""" + return JSONResponse(engine.testing_stats()) + + +if __name__ == "__main__": + parser = argparse.ArgumentParser() + parser.add_argument("--host", type=str, default="localhost") + parser.add_argument("--port", type=int, default=8000) + parser = AsyncEngineArgs.add_cli_args(parser) + args = parser.parse_args() + + engine_args = AsyncEngineArgs.from_cli_args(args) + engine = AsyncLLMEngineWithStats.from_engine_args(engine_args) + vllm.entrypoints.api_server.engine = engine + uvicorn.run( + app, + host=args.host, + port=args.port, + log_level="debug", + timeout_keep_alive=vllm.entrypoints.api_server.TIMEOUT_KEEP_ALIVE) diff --git a/tests/async_engine/test_api_server.py b/tests/async_engine/test_api_server.py new file mode 100644 index 0000000..7f57d5c --- /dev/null +++ b/tests/async_engine/test_api_server.py @@ -0,0 +1,108 @@ +import subprocess +import sys +import time +from multiprocessing import Pool +from pathlib import Path + +import pytest +import requests + + +def _query_server(prompt: str, max_tokens: int = 5) -> dict: + response = requests.post("http://localhost:8000/generate", + json={ + "prompt": prompt, + "max_tokens": max_tokens, + "temperature": 0, + "ignore_eos": True + }) + response.raise_for_status() + return response.json() + + +def _query_server_long(prompt: str) -> dict: + return _query_server(prompt, max_tokens=500) + + +@pytest.fixture +def api_server(tokenizer_pool_size: int, engine_use_ray: bool, + worker_use_ray: bool): + script_path = Path(__file__).parent.joinpath( + "api_server_async_engine.py").absolute() + commands = [ + sys.executable, "-u", + str(script_path), "--model", "facebook/opt-125m", "--host", + "127.0.0.1", "--tokenizer-pool-size", + str(tokenizer_pool_size) + ] + if engine_use_ray: + commands.append("--engine-use-ray") + if worker_use_ray: + commands.append("--worker-use-ray") + uvicorn_process = subprocess.Popen(commands) + yield + uvicorn_process.terminate() + + +@pytest.mark.parametrize("tokenizer_pool_size", [0, 2]) +@pytest.mark.parametrize("worker_use_ray", [False, True]) +@pytest.mark.parametrize("engine_use_ray", [False, True]) +def test_api_server(api_server, tokenizer_pool_size: int, worker_use_ray: bool, + engine_use_ray: bool): + """ + Run the API server and test it. + + We run both the server and requests in separate processes. + + We test that the server can handle incoming requests, including + multiple requests at the same time, and that it can handle requests + being cancelled without crashing. + """ + with Pool(32) as pool: + # Wait until the server is ready + prompts = ["warm up"] * 1 + result = None + while not result: + try: + for r in pool.map(_query_server, prompts): + result = r + break + except requests.exceptions.ConnectionError: + time.sleep(1) + + # Actual tests start here + # Try with 1 prompt + for result in pool.map(_query_server, prompts): + assert result + + num_aborted_requests = requests.get( + "http://localhost:8000/stats").json()["num_aborted_requests"] + assert num_aborted_requests == 0 + + # Try with 100 prompts + prompts = ["test prompt"] * 100 + for result in pool.map(_query_server, prompts): + assert result + + with Pool(32) as pool: + # Cancel requests + prompts = ["canceled requests"] * 100 + pool.map_async(_query_server_long, prompts) + time.sleep(0.01) + pool.terminate() + pool.join() + + # check cancellation stats + # give it some times to update the stats + time.sleep(1) + + num_aborted_requests = requests.get( + "http://localhost:8000/stats").json()["num_aborted_requests"] + assert num_aborted_requests > 0 + + # check that server still runs after cancellations + with Pool(32) as pool: + # Try with 100 prompts + prompts = ["test prompt after canceled"] * 100 + for result in pool.map(_query_server, prompts): + assert result diff --git a/tests/async_engine/test_async_llm_engine.py b/tests/async_engine/test_async_llm_engine.py new file mode 100644 index 0000000..b69cdc0 --- /dev/null +++ b/tests/async_engine/test_async_llm_engine.py @@ -0,0 +1,96 @@ +import asyncio +from dataclasses import dataclass + +import pytest + +from vllm.engine.async_llm_engine import AsyncLLMEngine + + +@dataclass +class RequestOutput: + request_id: int + finished: bool = False + + +class MockEngine: + + def __init__(self): + self.step_calls = 0 + self.add_request_calls = 0 + self.abort_request_calls = 0 + self.request_id = None + + async def step_async(self): + self.step_calls += 1 + return [RequestOutput( + request_id=self.request_id)] if self.request_id else [] + + async def encode_request_async(self, *args, **kwargs): + pass + + def generate(self, request_id): + self.request_id = request_id + + def stop_generating(self): + self.request_id = None + + def add_request(self, **kwargs): + del kwargs # Unused + self.add_request_calls += 1 + + async def add_request_async(self, **kwargs): + self.add_request_calls += 1 + return + + def abort_request(self, request_id): + del request_id # Unused + self.abort_request_calls += 1 + + def has_unfinished_requests(self): + return self.request_id is not None + + +class MockAsyncLLMEngine(AsyncLLMEngine): + + def _init_engine(self, *args, **kwargs): + return MockEngine() + + +@pytest.mark.asyncio +async def test_new_requests_event(): + engine = MockAsyncLLMEngine(worker_use_ray=False, engine_use_ray=False) + engine.start_background_loop() + await asyncio.sleep(0.01) + assert engine.engine.step_calls == 0 + + await engine.add_request("1", "", None) + await asyncio.sleep(0.01) + assert engine.engine.add_request_calls == 1 + assert engine.engine.step_calls == 1 + + await engine.add_request("2", "", None) + engine.engine.generate("2") + await asyncio.sleep(0) + await asyncio.sleep(0) + assert engine.engine.add_request_calls == 2 + assert engine.engine.step_calls >= 2 + await asyncio.sleep(0.001) + assert engine.engine.step_calls >= 3 + engine.engine.stop_generating() + await asyncio.sleep(0.001) + old_step_calls = engine.engine.step_calls + await asyncio.sleep(0.001) + assert engine.engine.step_calls == old_step_calls + + await engine.add_request("3", "", None) + await asyncio.sleep(0.01) + assert engine.engine.add_request_calls == 3 + assert engine.engine.step_calls == old_step_calls + 1 + await asyncio.sleep(0.01) + assert engine.engine.add_request_calls == 3 + assert engine.engine.step_calls == old_step_calls + 1 + + engine = MockAsyncLLMEngine(worker_use_ray=True, engine_use_ray=True) + assert engine.get_model_config() is not None + assert engine.get_tokenizer() is not None + assert engine.get_decoding_config() is not None diff --git a/tests/async_engine/test_chat_template.py b/tests/async_engine/test_chat_template.py new file mode 100644 index 0000000..64bcba6 --- /dev/null +++ b/tests/async_engine/test_chat_template.py @@ -0,0 +1,134 @@ +import os +import pathlib +from dataclasses import dataclass + +import pytest + +from vllm.entrypoints.openai.protocol import ChatCompletionRequest +from vllm.entrypoints.openai.serving_chat import OpenAIServingChat +from vllm.transformers_utils.tokenizer import get_tokenizer + +chatml_jinja_path = pathlib.Path(os.path.dirname(os.path.abspath( + __file__))).parent.parent / "examples/template_chatml.jinja" +assert chatml_jinja_path.exists() + +# Define models, templates, and their corresponding expected outputs +MODEL_TEMPLATE_GENERATON_OUTPUT = [ + ("facebook/opt-125m", None, True, + "HelloHi there!What is the capital of"), + ("facebook/opt-125m", None, False, + "HelloHi there!What is the capital of"), + ("facebook/opt-125m", chatml_jinja_path, True, """<|im_start|>user +Hello<|im_end|> +<|im_start|>assistant +Hi there!<|im_end|> +<|im_start|>user +What is the capital of<|im_end|> +<|im_start|>assistant +"""), + ("facebook/opt-125m", chatml_jinja_path, False, """<|im_start|>user +Hello<|im_end|> +<|im_start|>assistant +Hi there!<|im_end|> +<|im_start|>user +What is the capital of""") +] + +TEST_MESSAGES = [ + { + 'role': 'user', + 'content': 'Hello' + }, + { + 'role': 'assistant', + 'content': 'Hi there!' + }, + { + 'role': 'user', + 'content': 'What is the capital of' + }, +] + + +@dataclass +class MockTokenizer: + chat_template = None + + +@dataclass +class MockServingChat: + tokenizer: MockTokenizer + + +@pytest.mark.asyncio +async def test_load_chat_template(): + # Testing chatml template + tokenizer = MockTokenizer() + mock_serving_chat = MockServingChat(tokenizer) + await OpenAIServingChat._load_chat_template( + mock_serving_chat, chat_template=chatml_jinja_path) + + template_content = tokenizer.chat_template + + # Test assertions + assert template_content is not None + # Hard coded value for template_chatml.jinja + assert template_content == """{% for message in messages %}{{'<|im_start|>' + message['role'] + '\\n' + message['content']}}{% if (loop.last and add_generation_prompt) or not loop.last %}{{ '<|im_end|>' + '\\n'}}{% endif %}{% endfor %} +{% if add_generation_prompt and messages[-1]['role'] != 'assistant' %}{{ '<|im_start|>assistant\\n' }}{% endif %}""" # noqa: E501 + + +@pytest.mark.asyncio +async def test_no_load_chat_template_filelike(): + # Testing chatml template + template = "../../examples/does_not_exist" + tokenizer = MockTokenizer() + + mock_serving_chat = MockServingChat(tokenizer) + + with pytest.raises(ValueError, match="looks like a file path"): + await OpenAIServingChat._load_chat_template(mock_serving_chat, + chat_template=template) + + +@pytest.mark.asyncio +async def test_no_load_chat_template_literallike(): + # Testing chatml template + template = "{{ messages }}" + tokenizer = MockTokenizer() + + mock_serving_chat = MockServingChat(tokenizer) + await OpenAIServingChat._load_chat_template(mock_serving_chat, + chat_template=template) + template_content = tokenizer.chat_template + + assert template_content == template + + +@pytest.mark.asyncio +@pytest.mark.parametrize( + "model,template,add_generation_prompt,expected_output", + MODEL_TEMPLATE_GENERATON_OUTPUT) +async def test_get_gen_prompt(model, template, add_generation_prompt, + expected_output): + # Initialize the tokenizer + tokenizer = get_tokenizer(tokenizer_name=model) + mock_serving_chat = MockServingChat(tokenizer) + await OpenAIServingChat._load_chat_template(mock_serving_chat, + chat_template=template) + + # Create a mock request object using keyword arguments + mock_request = ChatCompletionRequest( + model=model, + messages=TEST_MESSAGES, + add_generation_prompt=add_generation_prompt) + + # Call the function and get the result + result = tokenizer.apply_chat_template( + conversation=mock_request.messages, + tokenize=False, + add_generation_prompt=mock_request.add_generation_prompt) + + # Test assertion + assert result == expected_output, ( + f"The generated prompt does not match the expected output for " + f"model {model} and template {template}") diff --git a/tests/async_engine/test_merge_async_iterators.py b/tests/async_engine/test_merge_async_iterators.py new file mode 100644 index 0000000..ea45352 --- /dev/null +++ b/tests/async_engine/test_merge_async_iterators.py @@ -0,0 +1,41 @@ +import asyncio +from typing import AsyncIterator, Tuple + +import pytest + +from vllm.utils import merge_async_iterators + + +@pytest.mark.asyncio +async def test_merge_async_iterators(): + + async def mock_async_iterator(idx: int) -> AsyncIterator[str]: + try: + while True: + yield f"item from iterator {idx}" + await asyncio.sleep(0.1) + except asyncio.CancelledError: + pass + + iterators = [mock_async_iterator(i) for i in range(3)] + merged_iterator: AsyncIterator[Tuple[int, str]] = merge_async_iterators( + *iterators) + + async def stream_output(generator: AsyncIterator[Tuple[int, str]]): + async for idx, output in generator: + print(f"idx: {idx}, output: {output}") + + task = asyncio.create_task(stream_output(merged_iterator)) + await asyncio.sleep(0.5) + task.cancel() + with pytest.raises(asyncio.CancelledError): + await task + + for iterator in iterators: + try: + await asyncio.wait_for(anext(iterator), 1) + except StopAsyncIteration: + # All iterators should be cancelled and print this message. + print("Iterator was cancelled normally") + except (Exception, asyncio.CancelledError) as e: + raise AssertionError() from e diff --git a/tests/async_engine/test_openapi_server_ray.py b/tests/async_engine/test_openapi_server_ray.py new file mode 100644 index 0000000..4b97af8 --- /dev/null +++ b/tests/async_engine/test_openapi_server_ray.py @@ -0,0 +1,157 @@ +# imports for guided decoding tests +import os +import subprocess +import sys +import time + +import openai # use the official client for correctness check +import pytest +# using Ray for overall ease of process management, parallel requests, +# and debugging. +import ray +import requests + +MAX_SERVER_START_WAIT_S = 600 # wait for server to start for 60 seconds +# any model with a chat template should work here +MODEL_NAME = "facebook/opt-125m" + + +@ray.remote(num_gpus=1) +class ServerRunner: + + def __init__(self, args): + env = os.environ.copy() + env["PYTHONUNBUFFERED"] = "1" + self.proc = subprocess.Popen( + ["python3", "-m", "vllm.entrypoints.openai.api_server"] + args, + env=env, + stdout=sys.stdout, + stderr=sys.stderr, + ) + self._wait_for_server() + + def ready(self): + return True + + def _wait_for_server(self): + # run health check + start = time.time() + while True: + try: + if requests.get( + "http://localhost:8000/health").status_code == 200: + break + except Exception as err: + if self.proc.poll() is not None: + raise RuntimeError("Server exited unexpectedly.") from err + + time.sleep(0.5) + if time.time() - start > MAX_SERVER_START_WAIT_S: + raise RuntimeError( + "Server failed to start in time.") from err + + def __del__(self): + if hasattr(self, "proc"): + self.proc.terminate() + + +@pytest.fixture(scope="session") +def server(): + ray.init() + server_runner = ServerRunner.remote([ + "--model", + MODEL_NAME, + # use half precision for speed and memory savings in CI environment + "--dtype", + "float16", + "--max-model-len", + "2048", + "--enforce-eager", + "--engine-use-ray" + ]) + ray.get(server_runner.ready.remote()) + yield server_runner + ray.shutdown() + + +@pytest.fixture(scope="session") +def client(): + client = openai.AsyncOpenAI( + base_url="http://localhost:8000/v1", + api_key="token-abc123", + ) + yield client + + +@pytest.mark.asyncio +async def test_check_models(server, client: openai.AsyncOpenAI): + models = await client.models.list() + models = models.data + served_model = models[0] + assert served_model.id == MODEL_NAME + assert all(model.root == MODEL_NAME for model in models) + + +@pytest.mark.asyncio +async def test_single_completion(server, client: openai.AsyncOpenAI): + completion = await client.completions.create(model=MODEL_NAME, + prompt="Hello, my name is", + max_tokens=5, + temperature=0.0) + + assert completion.id is not None + assert completion.choices is not None and len(completion.choices) == 1 + assert completion.choices[0].text is not None and len( + completion.choices[0].text) >= 5 + assert completion.choices[0].finish_reason == "length" + assert completion.usage == openai.types.CompletionUsage( + completion_tokens=5, prompt_tokens=6, total_tokens=11) + + # test using token IDs + completion = await client.completions.create( + model=MODEL_NAME, + prompt=[0, 0, 0, 0, 0], + max_tokens=5, + temperature=0.0, + ) + assert completion.choices[0].text is not None and len( + completion.choices[0].text) >= 5 + + +@pytest.mark.asyncio +async def test_single_chat_session(server, client: openai.AsyncOpenAI): + messages = [{ + "role": "system", + "content": "you are a helpful assistant" + }, { + "role": "user", + "content": "what is 1+1?" + }] + + # test single completion + chat_completion = await client.chat.completions.create(model=MODEL_NAME, + messages=messages, + max_tokens=10, + logprobs=True, + top_logprobs=5) + assert chat_completion.id is not None + assert chat_completion.choices is not None and len( + chat_completion.choices) == 1 + assert chat_completion.choices[0].message is not None + assert chat_completion.choices[0].logprobs is not None + assert chat_completion.choices[0].logprobs.top_logprobs is not None + assert len(chat_completion.choices[0].logprobs.top_logprobs[0]) == 5 + message = chat_completion.choices[0].message + assert message.content is not None and len(message.content) >= 10 + assert message.role == "assistant" + messages.append({"role": "assistant", "content": message.content}) + + # test multi-turn dialogue + messages.append({"role": "user", "content": "express your result in json"}) + chat_completion = await client.chat.completions.create( + model=MODEL_NAME, + messages=messages, + max_tokens=10, + ) + message = chat_completion.choices[0].message + assert message.content is not None and len(message.content) >= 0 diff --git a/tests/async_engine/test_request_tracker.py b/tests/async_engine/test_request_tracker.py new file mode 100644 index 0000000..7b1f4a9 --- /dev/null +++ b/tests/async_engine/test_request_tracker.py @@ -0,0 +1,67 @@ +import pytest + +from vllm.engine.async_llm_engine import RequestTracker +from vllm.outputs import RequestOutput + + +@pytest.mark.asyncio +async def test_request_tracker(): + tracker = RequestTracker() + stream_1 = tracker.add_request("1") + assert tracker.new_requests_event.is_set() + await tracker.wait_for_new_requests() + new, finished = tracker.get_new_and_finished_requests() + assert not tracker.new_requests_event.is_set() + assert len(new) == 1 + assert new[0]["request_id"] == "1" + assert not finished + assert not stream_1.finished + + stream_2 = tracker.add_request("2") + stream_3 = tracker.add_request("3") + assert tracker.new_requests_event.is_set() + await tracker.wait_for_new_requests() + new, finished = tracker.get_new_and_finished_requests() + assert not tracker.new_requests_event.is_set() + assert len(new) == 2 + assert new[0]["request_id"] == "2" + assert new[1]["request_id"] == "3" + assert not finished + assert not stream_2.finished + assert not stream_3.finished + + # request_ids must be unique + with pytest.raises(KeyError): + tracker.add_request("1") + assert not tracker.new_requests_event.is_set() + + tracker.abort_request("1") + new, finished = tracker.get_new_and_finished_requests() + assert len(finished) == 1 + assert "1" in finished + assert not new + assert stream_1.finished + + stream_4 = tracker.add_request("4") + tracker.abort_request("4") + assert tracker.new_requests_event.is_set() + await tracker.wait_for_new_requests() + new, finished = tracker.get_new_and_finished_requests() + assert len(finished) == 1 + assert "4" in finished + assert not new + assert stream_4.finished + + stream_5 = tracker.add_request("5") + assert tracker.new_requests_event.is_set() + tracker.process_request_output( + RequestOutput("2", "output", [], [], [], finished=True)) + await tracker.wait_for_new_requests() + new, finished = tracker.get_new_and_finished_requests() + assert not tracker.new_requests_event.is_set() + assert len(finished) == 1 + assert "2" in finished + assert len(new) == 1 + assert new[0]["request_id"] == "5" + assert stream_2.finished + assert not stream_5.finished diff --git a/tests/basic_correctness/test_basic_correctness.py b/tests/basic_correctness/test_basic_correctness.py new file mode 100644 index 0000000..d75279d --- /dev/null +++ b/tests/basic_correctness/test_basic_correctness.py @@ -0,0 +1,50 @@ +"""Compare the short outputs of HF and vLLM when using greedy sampling. + +Run `pytest tests/basic_correctness/test_basic_correctness.py`. +""" +import os + +import pytest + +MODELS = [ + "facebook/opt-125m", + "meta-llama/Llama-2-7b-hf", +] +VLLM_ATTENTION_BACKEND = "VLLM_ATTENTION_BACKEND" + + +@pytest.mark.parametrize("model", MODELS) +@pytest.mark.parametrize("dtype", ["half"]) +@pytest.mark.parametrize("max_tokens", [5]) +@pytest.mark.parametrize("enforce_eager", [False, True]) +def test_models( + hf_runner, + vllm_runner, + example_prompts, + model: str, + dtype: str, + max_tokens: int, + enforce_eager: bool, +) -> None: + backend_by_env_var = os.getenv(VLLM_ATTENTION_BACKEND) + if backend_by_env_var == "FLASHINFER" and enforce_eager is False: + pytest.skip("Skipping non-eager test for FlashInferBackend.") + + hf_model = hf_runner(model, dtype=dtype) + hf_outputs = hf_model.generate_greedy(example_prompts, max_tokens) + del hf_model + + vllm_model = vllm_runner(model, + dtype=dtype, + enforce_eager=enforce_eager, + gpu_memory_utilization=0.7) + vllm_outputs = vllm_model.generate_greedy(example_prompts, max_tokens) + del vllm_model + + for i in range(len(example_prompts)): + hf_output_ids, hf_output_str = hf_outputs[i] + vllm_output_ids, vllm_output_str = vllm_outputs[i] + assert hf_output_str == vllm_output_str, ( + f"Test{i}:\nHF: {hf_output_str!r}\nvLLM: {vllm_output_str!r}") + assert hf_output_ids == vllm_output_ids, ( + f"Test{i}:\nHF: {hf_output_ids}\nvLLM: {vllm_output_ids}") diff --git a/tests/basic_correctness/test_chunked_prefill.py b/tests/basic_correctness/test_chunked_prefill.py new file mode 100644 index 0000000..47d582c --- /dev/null +++ b/tests/basic_correctness/test_chunked_prefill.py @@ -0,0 +1,65 @@ +"""Compare the outputs of HF and vLLM when using greedy sampling. + +It tests chunked prefill. Chunked prefill can be enabled by +enable_chunked_prefill=True. If prefill size exceeds max_num_batched_tokens, +prefill requests are chunked. + +Run `pytest tests/models/test_chunked_prefill.py`. +""" +import pytest + +MODELS = [ + "facebook/opt-125m", + "meta-llama/Llama-2-7b-hf", +] + + +@pytest.mark.parametrize("model", MODELS) +@pytest.mark.parametrize("dtype", ["half"]) +@pytest.mark.parametrize("max_tokens", [32]) +@pytest.mark.parametrize("chunked_prefill_token_size", [1, 4, 16]) +@pytest.mark.parametrize("enforce_eager", [False, True]) +# NOTE: Increasing this in this suite will fail CI because we currently cannot +# reset distributed env properly. Use a value > 1 just when you test. +@pytest.mark.parametrize("tensor_parallel_size", [1]) +def test_models( + hf_runner, + vllm_runner, + example_prompts, + model: str, + dtype: str, + max_tokens: int, + chunked_prefill_token_size: int, + enforce_eager: bool, + tensor_parallel_size: int, +) -> None: + max_num_seqs = min(chunked_prefill_token_size, 256) + enable_chunked_prefill = False + max_num_batched_tokens = None + if chunked_prefill_token_size != -1: + enable_chunked_prefill = True + max_num_batched_tokens = chunked_prefill_token_size + + hf_model = hf_runner(model, dtype=dtype) + hf_outputs = hf_model.generate_greedy(example_prompts, max_tokens) + del hf_model + + vllm_model = vllm_runner( + model, + dtype=dtype, + max_num_batched_tokens=max_num_batched_tokens, + enable_chunked_prefill=enable_chunked_prefill, + tensor_parallel_size=tensor_parallel_size, + enforce_eager=enforce_eager, + max_num_seqs=max_num_seqs, + ) + vllm_outputs = vllm_model.generate_greedy(example_prompts, max_tokens) + del vllm_model + + for i in range(len(example_prompts)): + hf_output_ids, hf_output_str = hf_outputs[i] + vllm_output_ids, vllm_output_str = vllm_outputs[i] + assert hf_output_str == vllm_output_str, ( + f"Test{i}:\nHF: {hf_output_str!r}\nvLLM: {vllm_output_str!r}") + assert hf_output_ids == vllm_output_ids, ( + f"Test{i}:\nHF: {hf_output_ids}\nvLLM: {vllm_output_ids}") diff --git a/tests/basic_correctness/test_preemption.py b/tests/basic_correctness/test_preemption.py new file mode 100644 index 0000000..ffb0717 --- /dev/null +++ b/tests/basic_correctness/test_preemption.py @@ -0,0 +1,223 @@ +"""Compare the short outputs of HF and vLLM when using greedy sampling. + +VLLM_TEST_ENABLE_ARTIFICIAL_PREEMPT=1 has to be set before running this test. + +Run `VLLM_TEST_ENABLE_ARTIFICIAL_PREEMPT=1 +pytest tests/basic_correctness/test_preemption.py`. +""" +import pytest + +from vllm import SamplingParams +from vllm.core.scheduler import (ARTIFICIAL_PREEMPTION_MAX_CNT, + ENABLE_ARTIFICIAL_PREEMPT) + +MODELS = [ + "facebook/opt-125m", +] + +assert ENABLE_ARTIFICIAL_PREEMPT is True, ( + "Use an env var VLLM_TEST_ENABLE_ARTIFICIAL_PREEMPT=1. " + "`VLLM_TEST_ENABLE_ARTIFICIAL_PREEMPT=1 pytest " + "tests/basic_correctness/test_preemption.py`") + + +@pytest.mark.parametrize("model", MODELS) +@pytest.mark.parametrize("dtype", ["half"]) +@pytest.mark.parametrize("max_tokens", [96]) +@pytest.mark.parametrize("chunked_prefill_token_size", [16]) +def test_chunked_prefill_recompute( + hf_runner, + vllm_runner, + example_prompts, + model: str, + dtype: str, + max_tokens: int, + chunked_prefill_token_size: int, +) -> None: + """Ensure that chunked prefill works with preemption.""" + max_num_seqs = min(chunked_prefill_token_size, 256) + enable_chunked_prefill = False + max_num_batched_tokens = None + if chunked_prefill_token_size != -1: + enable_chunked_prefill = True + max_num_batched_tokens = chunked_prefill_token_size + + hf_model = hf_runner(model, dtype=dtype) + hf_outputs = hf_model.generate_greedy(example_prompts, max_tokens) + del hf_model + + vllm_model = vllm_runner( + model, + dtype=dtype, + max_num_batched_tokens=max_num_batched_tokens, + enable_chunked_prefill=enable_chunked_prefill, + max_num_seqs=max_num_seqs, + ) + vllm_outputs = vllm_model.generate_greedy(example_prompts, max_tokens) + assert (vllm_model.model.llm_engine.scheduler.artificial_preempt_cnt < + ARTIFICIAL_PREEMPTION_MAX_CNT) + del vllm_model + + for i in range(len(example_prompts)): + hf_output_ids, hf_output_str = hf_outputs[i] + vllm_output_ids, vllm_output_str = vllm_outputs[i] + assert hf_output_str == vllm_output_str, ( + f"Test{i}:\nHF: {hf_output_str!r}\nvLLM: {vllm_output_str!r}") + assert hf_output_ids == vllm_output_ids, ( + f"Test{i}:\nHF: {hf_output_ids}\nvLLM: {vllm_output_ids}") + + +@pytest.mark.parametrize("model", MODELS) +@pytest.mark.parametrize("dtype", ["float"]) +@pytest.mark.parametrize("max_tokens", [96]) +def test_preemption( + hf_runner, + vllm_runner, + example_prompts, + model: str, + dtype: str, + max_tokens: int, +) -> None: + """By default, recompute preemption is enabled""" + + hf_model = hf_runner(model, dtype=dtype) + hf_outputs = hf_model.generate_greedy(example_prompts, max_tokens) + del hf_model + + vllm_model = vllm_runner( + model, + dtype=dtype, + ) + vllm_outputs = vllm_model.generate_greedy(example_prompts, max_tokens) + assert (vllm_model.model.llm_engine.scheduler.artificial_preempt_cnt < + ARTIFICIAL_PREEMPTION_MAX_CNT) + del vllm_model + + for i in range(len(example_prompts)): + hf_output_ids, hf_output_str = hf_outputs[i] + vllm_output_ids, vllm_output_str = vllm_outputs[i] + assert hf_output_str == vllm_output_str, ( + f"Test{i}:\nHF: {hf_output_str!r}\nvLLM: {vllm_output_str!r}") + assert hf_output_ids == vllm_output_ids, ( + f"Test{i}:\nHF: {hf_output_ids}\nvLLM: {vllm_output_ids}") + + +@pytest.mark.parametrize("model", MODELS) +@pytest.mark.parametrize("dtype", ["float"]) +@pytest.mark.parametrize("max_tokens", [96]) +@pytest.mark.parametrize("beam_width", [4]) +def test_swap( + hf_runner, + vllm_runner, + example_prompts, + model: str, + dtype: str, + max_tokens: int, + beam_width: int, +) -> None: + """Use beam search enables swapping.""" + example_prompts = example_prompts[:1] + hf_model = hf_runner(model, dtype=dtype) + hf_outputs = hf_model.generate_beam_search(example_prompts, beam_width, + max_tokens) + del hf_model + + vllm_model = vllm_runner(model, dtype=dtype, swap_space=10) + vllm_outputs = vllm_model.generate_beam_search(example_prompts, beam_width, + max_tokens) + assert (vllm_model.model.llm_engine.scheduler.artificial_preempt_cnt < + ARTIFICIAL_PREEMPTION_MAX_CNT) + del vllm_model + + for i in range(len(example_prompts)): + hf_output_ids, _ = hf_outputs[i] + vllm_output_ids, _ = vllm_outputs[i] + assert len(hf_output_ids) == len(vllm_output_ids) + for j in range(len(hf_output_ids)): + assert hf_output_ids[j] == vllm_output_ids[j], ( + f"Test{i} output{j}:\nHF: {hf_output_ids}\n" + f"vLLM: {vllm_output_ids}") + + +@pytest.mark.parametrize("model", MODELS) +@pytest.mark.parametrize("dtype", ["float"]) +@pytest.mark.parametrize("max_tokens", [96]) +@pytest.mark.parametrize("beam_width", [4]) +def test_swap_infeasible( + vllm_runner, + example_prompts, + model: str, + dtype: str, + max_tokens: int, + beam_width: int, +) -> None: + """Verify infeasible swap request will be ignored.""" + BLOCK_SIZE = 16 + prefill_blocks = 2 + decode_blocks = max_tokens // BLOCK_SIZE + example_prompts = example_prompts[:1] + + vllm_model = vllm_runner( + model, + dtype=dtype, + swap_space=10, + block_size=BLOCK_SIZE, + # Since beam search have more than 1 sequence, prefill + decode blocks + # are not enough to finish. + num_gpu_blocks_override=prefill_blocks + decode_blocks, + max_model_len=(prefill_blocks + decode_blocks) * BLOCK_SIZE, + ) + sampling_params = SamplingParams(n=beam_width, + use_beam_search=True, + temperature=0.0, + max_tokens=max_tokens, + ignore_eos=True) + req_outputs = vllm_model.model.generate( + example_prompts, + sampling_params=sampling_params, + ) + assert (vllm_model.model.llm_engine.scheduler.artificial_preempt_cnt < + ARTIFICIAL_PREEMPTION_MAX_CNT) + del vllm_model + # Verify the request is ignored and not hang. + assert req_outputs[0].outputs[0].finish_reason == "length" + + +@pytest.mark.parametrize("model", MODELS) +@pytest.mark.parametrize("dtype", ["float"]) +@pytest.mark.parametrize("max_tokens", [96]) +def test_preemption_infeasible( + vllm_runner, + example_prompts, + model: str, + dtype: str, + max_tokens: int, +) -> None: + """Verify infeasible preemption request will be ignored.""" + BLOCK_SIZE = 16 + prefill_blocks = 2 + decode_blocks = max_tokens // BLOCK_SIZE + vllm_model = vllm_runner( + model, + dtype=dtype, + block_size=BLOCK_SIZE, + # Not enough gpu blocks to complete a single sequence. + # preemption should happen, and the sequence should be + # ignored instead of hanging forever. + num_gpu_blocks_override=prefill_blocks + decode_blocks // 2, + max_model_len=((prefill_blocks + decode_blocks // 2) * BLOCK_SIZE), + ) + sampling_params = SamplingParams(max_tokens=max_tokens, ignore_eos=True) + req_outputs = vllm_model.model.generate( + example_prompts, + sampling_params=sampling_params, + ) + + assert (vllm_model.model.llm_engine.scheduler.artificial_preempt_cnt < + ARTIFICIAL_PREEMPTION_MAX_CNT) + del vllm_model + # Verify the request is ignored and not hang. + for req_output in req_outputs: + outputs = req_output.outputs + assert len(outputs) == 1 + assert outputs[0].finish_reason == "length" diff --git a/tests/conftest.py b/tests/conftest.py new file mode 100644 index 0000000..6713269 --- /dev/null +++ b/tests/conftest.py @@ -0,0 +1,417 @@ +import contextlib +import gc +import os +from typing import List, Optional, Tuple + +import pytest +import torch +from PIL import Image +from transformers import (AutoModelForCausalLM, AutoProcessor, + LlavaForConditionalGeneration) + +from vllm import LLM, SamplingParams +from vllm.config import TokenizerPoolConfig, VisionLanguageConfig +from vllm.distributed import destroy_model_parallel +from vllm.sequence import MultiModalData +from vllm.transformers_utils.tokenizer import get_tokenizer + +_TEST_DIR = os.path.dirname(__file__) +_TEST_PROMPTS = [os.path.join(_TEST_DIR, "prompts", "example.txt")] +_LONG_PROMPTS = [os.path.join(_TEST_DIR, "prompts", "summary.txt")] + +# Multi modal related +_PIXEL_VALUES_FILES = [ + os.path.join(_TEST_DIR, "images", filename) for filename in + ["stop_sign_pixel_values.pt", "cherry_blossom_pixel_values.pt"] +] +_IMAGE_FEATURES_FILES = [ + os.path.join(_TEST_DIR, "images", filename) for filename in + ["stop_sign_image_features.pt", "cherry_blossom_image_features.pt"] +] +_IMAGE_FILES = [ + os.path.join(_TEST_DIR, "images", filename) + for filename in ["stop_sign.jpg", "cherry_blossom.jpg"] +] +_IMAGE_PROMPTS = [ + "\nUSER: What's the content of the image?\nASSISTANT:", + "\nUSER: What is the season?\nASSISTANT:" +] +assert len(_PIXEL_VALUES_FILES) == len(_IMAGE_FEATURES_FILES) == len( + _IMAGE_FILES) == len(_IMAGE_PROMPTS) + + +def _read_prompts(filename: str) -> List[str]: + with open(filename, "r") as f: + prompts = f.readlines() + return prompts + + +def cleanup(): + destroy_model_parallel() + with contextlib.suppress(AssertionError): + torch.distributed.destroy_process_group() + gc.collect() + torch.cuda.empty_cache() + + +@pytest.fixture() +def should_do_global_cleanup_after_test(request) -> bool: + """Allow subdirectories to skip global cleanup by overriding this fixture. + This can provide a ~10x speedup for non-GPU unit tests since they don't need + to initialize torch. + """ + + if request.node.get_closest_marker("skip_global_cleanup"): + return False + + return True + + +@pytest.fixture(autouse=True) +def cleanup_fixture(should_do_global_cleanup_after_test: bool): + yield + if should_do_global_cleanup_after_test: + cleanup() + + +@pytest.fixture(scope="session") +def hf_image_prompts() -> List[str]: + return _IMAGE_PROMPTS + + +@pytest.fixture(scope="session") +def hf_images() -> List[Image.Image]: + return [Image.open(filename) for filename in _IMAGE_FILES] + + +@pytest.fixture() +def vllm_images(request) -> "torch.Tensor": + vision_language_config = request.getfixturevalue("model_and_config")[1] + all_images = [] + if vision_language_config.image_input_type == ( + VisionLanguageConfig.ImageInputType.IMAGE_FEATURES): + filenames = _IMAGE_FEATURES_FILES + else: + filenames = _PIXEL_VALUES_FILES + for filename in filenames: + all_images.append(torch.load(filename)) + return torch.concat(all_images, dim=0) + + +@pytest.fixture() +def vllm_image_prompts(request) -> List[str]: + vision_language_config = request.getfixturevalue("model_and_config")[1] + return [ + "" * (vision_language_config.image_feature_size - 1) + p + for p in _IMAGE_PROMPTS + ] + + +@pytest.fixture +def example_prompts() -> List[str]: + prompts = [] + for filename in _TEST_PROMPTS: + prompts += _read_prompts(filename) + return prompts + + +@pytest.fixture +def example_long_prompts() -> List[str]: + prompts = [] + for filename in _LONG_PROMPTS: + prompts += _read_prompts(filename) + return prompts + + +_STR_DTYPE_TO_TORCH_DTYPE = { + "half": torch.half, + "bfloat16": torch.bfloat16, + "float": torch.float, +} + +_VISION_LANGUAGE_MODELS = { + "llava-hf/llava-1.5-7b-hf": LlavaForConditionalGeneration, +} + + +class HfRunner: + + def __init__( + self, + model_name: str, + tokenizer_name: Optional[str] = None, + dtype: str = "half", + ) -> None: + assert dtype in _STR_DTYPE_TO_TORCH_DTYPE + torch_dtype = _STR_DTYPE_TO_TORCH_DTYPE[dtype] + self.model_name = model_name + if model_name not in _VISION_LANGUAGE_MODELS: + self.model = AutoModelForCausalLM.from_pretrained( + model_name, + torch_dtype=torch_dtype, + trust_remote_code=True, + ).cuda() + self.processor = None + else: + self.model = _VISION_LANGUAGE_MODELS[model_name].from_pretrained( + model_name, + torch_dtype=torch_dtype, + trust_remote_code=True, + ).cuda() + self.processor = AutoProcessor.from_pretrained( + model_name, + torch_dtype=torch_dtype, + ) + if tokenizer_name is None: + tokenizer_name = model_name + self.tokenizer = get_tokenizer(tokenizer_name, trust_remote_code=True) + + def generate( + self, + prompts: List[str], + images: Optional[List[Image.Image]] = None, + **kwargs, + ) -> List[Tuple[List[int], str]]: + outputs: List[Tuple[List[int], str]] = [] + if images: + assert len(prompts) == len(images) + for i, prompt in enumerate(prompts): + if self.model_name not in _VISION_LANGUAGE_MODELS: + input_ids = self.tokenizer(prompt, + return_tensors="pt").input_ids + inputs = {"input_ids": input_ids.cuda()} + else: + image = images[i] if images else None + inputs = self.processor(text=prompt, + images=image, + return_tensors="pt") + inputs = { + key: value.cuda() if value is not None else None + for key, value in inputs.items() + } + output_ids = self.model.generate( + **inputs, + use_cache=True, + **kwargs, + ) + output_str = self.tokenizer.batch_decode( + output_ids, + skip_special_tokens=True, + clean_up_tokenization_spaces=False, + ) + output_ids = output_ids.cpu().tolist() + outputs.append((output_ids, output_str)) + return outputs + + def generate_greedy( + self, + prompts: List[str], + max_tokens: int, + images: Optional["torch.Tensor"] = None, + ) -> List[Tuple[List[int], str]]: + outputs = self.generate(prompts, + do_sample=False, + max_new_tokens=max_tokens, + images=images) + for i in range(len(outputs)): + output_ids, output_str = outputs[i] + outputs[i] = (output_ids[0], output_str[0]) + return outputs + + def generate_beam_search( + self, + prompts: List[str], + beam_width: int, + max_tokens: int, + ) -> List[Tuple[List[int], str]]: + outputs = self.generate(prompts, + do_sample=False, + max_new_tokens=max_tokens, + num_beams=beam_width, + num_return_sequences=beam_width) + for i in range(len(outputs)): + output_ids, output_str = outputs[i] + for j in range(len(output_ids)): + output_ids[j] = [ + x for x in output_ids[j] + if x != self.tokenizer.pad_token_id + ] + outputs[i] = (output_ids, output_str) + return outputs + + def generate_greedy_logprobs( + self, + prompts: List[str], + max_tokens: int, + ) -> List[List[torch.Tensor]]: + all_logprobs = [] + for prompt in prompts: + input_ids = self.tokenizer(prompt, return_tensors="pt").input_ids + output = self.model.generate( + input_ids.cuda(), + use_cache=True, + do_sample=False, + max_new_tokens=max_tokens, + output_hidden_states=True, + return_dict_in_generate=True, + ) + seq_logprobs = [] + for hidden_states in output.hidden_states: + last_hidden_states = hidden_states[-1][0] + logits = torch.matmul( + last_hidden_states, + self.model.get_output_embeddings().weight.t(), + ) + if self.model.get_output_embeddings().bias is not None: + logits += self.model.get_output_embeddings( + ).bias.unsqueeze(0) + logprobs = torch.nn.functional.log_softmax(logits, + dim=-1, + dtype=torch.float32) + seq_logprobs.append(logprobs) + all_logprobs.append(seq_logprobs) + return all_logprobs + + def __del__(self): + del self.model + cleanup() + + +@pytest.fixture +def hf_runner(): + return HfRunner + + +class VllmRunner: + + def __init__( + self, + model_name: str, + tokenizer_name: Optional[str] = None, + # Use smaller max model length, otherwise bigger model cannot run due + # to kv cache size limit. + max_model_len=1024, + dtype: str = "half", + disable_log_stats: bool = True, + tensor_parallel_size: int = 1, + block_size: int = 16, + enable_chunked_prefill: bool = False, + swap_space=4, + **kwargs, + ) -> None: + self.model = LLM( + model=model_name, + tokenizer=tokenizer_name, + trust_remote_code=True, + dtype=dtype, + swap_space=swap_space, + disable_log_stats=disable_log_stats, + tensor_parallel_size=tensor_parallel_size, + max_model_len=max_model_len, + block_size=block_size, + enable_chunked_prefill=enable_chunked_prefill, + **kwargs, + ) + + def generate( + self, + prompts: List[str], + sampling_params: SamplingParams, + images: Optional["torch.Tensor"] = None, + ) -> List[Tuple[List[int], str]]: + if images is not None: + assert len(prompts) == images.shape[0] + req_outputs = self.model.generate( + prompts, + sampling_params=sampling_params, + multi_modal_data=MultiModalData(type=MultiModalData.Type.IMAGE, + data=images) + if images is not None else None) + outputs = [] + for req_output in req_outputs: + prompt_str = req_output.prompt + prompt_ids = req_output.prompt_token_ids + req_sample_output_ids = [] + req_sample_output_strs = [] + for sample in req_output.outputs: + output_str = sample.text + output_ids = sample.token_ids + req_sample_output_ids.append(prompt_ids + output_ids) + req_sample_output_strs.append(prompt_str + output_str) + outputs.append((req_sample_output_ids, req_sample_output_strs)) + return outputs + + def generate_w_logprobs( + self, + prompts: List[str], + sampling_params: SamplingParams, + ) -> List[Tuple[List[int], str]]: + assert sampling_params.logprobs is not None + + req_outputs = self.model.generate(prompts, + sampling_params=sampling_params) + outputs = [] + for req_output in req_outputs: + for sample in req_output.outputs: + output_str = sample.text + output_ids = sample.token_ids + output_logprobs = sample.logprobs + outputs.append((output_ids, output_str, output_logprobs)) + return outputs + + def generate_greedy( + self, + prompts: List[str], + max_tokens: int, + images: Optional[torch.Tensor] = None, + ) -> List[Tuple[List[int], str]]: + greedy_params = SamplingParams(temperature=0.0, max_tokens=max_tokens) + outputs = self.generate(prompts, greedy_params, images=images) + return [(output_ids[0], output_str[0]) + for output_ids, output_str in outputs] + + def generate_greedy_logprobs( + self, + prompts: List[str], + max_tokens: int, + num_logprobs: int, + ) -> List[Tuple[List[int], str]]: + greedy_logprobs_params = SamplingParams(temperature=0.0, + max_tokens=max_tokens, + logprobs=num_logprobs) + outputs = self.generate_w_logprobs(prompts, greedy_logprobs_params) + + return [(output_ids, output_str, output_logprobs) + for output_ids, output_str, output_logprobs in outputs] + + def generate_beam_search( + self, + prompts: List[str], + beam_width: int, + max_tokens: int, + ) -> List[Tuple[List[int], str]]: + beam_search_params = SamplingParams(n=beam_width, + use_beam_search=True, + temperature=0.0, + max_tokens=max_tokens) + outputs = self.generate(prompts, beam_search_params) + return outputs + + def __del__(self): + del self.model + cleanup() + + +@pytest.fixture(scope="session") +def vllm_runner(): + return VllmRunner + + +def get_tokenizer_pool_config(tokenizer_group_type): + if tokenizer_group_type is None: + return None + if tokenizer_group_type == "ray": + return TokenizerPoolConfig(pool_size=1, + pool_type="ray", + extra_config={}) + raise ValueError(f"Unknown tokenizer_group_type: {tokenizer_group_type}") diff --git a/tests/core/__init__.py b/tests/core/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/tests/core/block/__init__.py b/tests/core/block/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/tests/core/block/conftest.py b/tests/core/block/conftest.py new file mode 100644 index 0000000..0464d6a --- /dev/null +++ b/tests/core/block/conftest.py @@ -0,0 +1,12 @@ +import pytest + + +@pytest.fixture() +def should_do_global_cleanup_after_test() -> bool: + """Disable the global cleanup fixture for tests in this directory. This + provides a ~10x speedup for unit tests that don't load a model to GPU. + + This requires that tests in this directory clean up after themselves if they + use the GPU. + """ + return False diff --git a/tests/core/block/e2e/conftest.py b/tests/core/block/e2e/conftest.py new file mode 100644 index 0000000..1d99cb5 --- /dev/null +++ b/tests/core/block/e2e/conftest.py @@ -0,0 +1,41 @@ +import pytest + +from tests.conftest import cleanup +from vllm import LLM +from vllm.model_executor.utils import set_random_seed + + +@pytest.fixture +def baseline_llm_generator(common_llm_kwargs, per_test_common_llm_kwargs, + baseline_llm_kwargs, seed): + return create_llm_generator(common_llm_kwargs, per_test_common_llm_kwargs, + baseline_llm_kwargs, seed) + + +@pytest.fixture +def test_llm_generator(common_llm_kwargs, per_test_common_llm_kwargs, + test_llm_kwargs, seed): + return create_llm_generator(common_llm_kwargs, per_test_common_llm_kwargs, + test_llm_kwargs, seed) + + +def create_llm_generator(common_llm_kwargs, per_test_common_llm_kwargs, + distinct_llm_kwargs, seed): + kwargs = { + **common_llm_kwargs, + **per_test_common_llm_kwargs, + **distinct_llm_kwargs, + } + + def generator_inner(): + llm = LLM(**kwargs) + + set_random_seed(seed) + + yield llm + del llm + cleanup() + + for llm in generator_inner(): + yield llm + del llm diff --git a/tests/core/block/e2e/test_correctness.py b/tests/core/block/e2e/test_correctness.py new file mode 100644 index 0000000..c3666da --- /dev/null +++ b/tests/core/block/e2e/test_correctness.py @@ -0,0 +1,455 @@ +from itertools import cycle + +import pytest + +from vllm import SamplingParams + + +@pytest.mark.parametrize( + "common_llm_kwargs", + [{ + # Use a small model for a fast test. + "model": "facebook/opt-125m", + + # skip cuda graph creation for fast test. + "enforce_eager": True, + + # Allow only 5 sequences of ~1024 tokens in worst case. + "block_size": 16, + "num_gpu_blocks_override": 5 * (64 + 1), + }]) +@pytest.mark.parametrize("per_test_common_llm_kwargs", [{}]) +@pytest.mark.parametrize("baseline_llm_kwargs", [{ + "use_v2_block_manager": False +}]) +@pytest.mark.parametrize("test_llm_kwargs", [{"use_v2_block_manager": True}]) +@pytest.mark.parametrize("batch_size", [10]) +@pytest.mark.parametrize("seed", [1]) +def test_v1_v2_greedy_equality_with_preemption(baseline_llm_generator, + test_llm_generator, batch_size): + """Verify block manager v2 produces same outputs as block manager v1, even + when there is preemption. + + This constructs two LLM, each with limited number of GPU blocks. The limit + is decided such that as the sequences in the batch grow, sequences must be + preempted and removed from cache. + + If the output token ids are equivalent, then we have confidence that the KV + cache is not corrupted in the v2 block manager. + + NOTE: We want a significant number of generated tokens so that any incorrect + KV mapping has time to build up error. + """ + output_len = 1024 + temperature = 0.0 + + # We want to ensure equality even with preemption. + # We force the total block size to be 1 + cdiv(output_len, block_size) + # so that only one sequence can fit at a time (once the sequences grow). + + prompts = [ + "Hello, my name is", + "The president of the United States is", + "The capital of France is", + "The future of AI is", + ] + + prompts = [prompt for prompt, _ in zip(cycle(prompts), range(batch_size))] + + sampling_params = SamplingParams( + max_tokens=output_len, + ignore_eos=True, + temperature=temperature, + ) + + print('Getting token ids from block manager v1') + baseline_token_ids = get_token_ids_from_llm_generator( + baseline_llm_generator, prompts, sampling_params) + + print('Getting token ids from block manager v2') + test_token_ids = get_token_ids_from_llm_generator(test_llm_generator, + prompts, sampling_params) + + for expected_token_ids, actual_token_ids in zip(baseline_token_ids, + test_token_ids): + assert expected_token_ids == actual_token_ids + + assert baseline_token_ids == test_token_ids + + +@pytest.mark.parametrize( + "common_llm_kwargs", + [{ + # Use a small model for a fast test. + "model": "facebook/opt-125m", + + # skip cuda graph creation for fast test. + "enforce_eager": True, + + # Use a large block size to trigger more copy-on-writes. + "block_size": 32, + }]) +@pytest.mark.parametrize("per_test_common_llm_kwargs", [{}]) +@pytest.mark.parametrize("baseline_llm_kwargs", [{ + "use_v2_block_manager": False +}]) +@pytest.mark.parametrize("test_llm_kwargs", [{"use_v2_block_manager": True}]) +@pytest.mark.parametrize("batch_size", [10]) +@pytest.mark.parametrize("seed", [1]) +def test_v1_v2_greedy_equality_with_cow(baseline_llm_generator, + test_llm_generator, batch_size): + """Verify beam search equality with block manager v1 and v2. + + This requires copy-on-writes; if the v1 and v2 output is the same, then + we have some confidence cow is working. + """ + output_len = 128 + temperature = 0.0 + + prompts = [ + "Hello, my name is", + "The president of the United States is", + "The capital of France is", + "The future of AI is", + ] + + prompts = [prompt for prompt, _ in zip(cycle(prompts), range(batch_size))] + + sampling_params = SamplingParams( + max_tokens=output_len, + ignore_eos=True, + temperature=temperature, + use_beam_search=True, + best_of=2, + ) + + print('Getting token ids from block manager v1') + baseline_token_ids = get_token_ids_from_llm_generator( + baseline_llm_generator, prompts, sampling_params) + + print('Getting token ids from block manager v2') + test_token_ids = get_token_ids_from_llm_generator(test_llm_generator, + prompts, sampling_params) + + for expected_token_ids, actual_token_ids in zip(baseline_token_ids, + test_token_ids): + assert expected_token_ids == actual_token_ids + + assert baseline_token_ids == test_token_ids + + +@pytest.mark.parametrize( + "common_llm_kwargs", + [{ + # Use a small model for a fast test. + "model": "facebook/opt-125m", + + # Our prompts will generate 128 tokens; since the prompts themselves are + # small, we don't need much KV space beyond 128. + "max_model_len": 160, + + # skip cuda graph creation for fast test. + "enforce_eager": True, + + # Lookahead scheduling only supported in v2 block manager. + "use_v2_block_manager": True, + }]) +@pytest.mark.parametrize( + "per_test_common_llm_kwargs", + [ + { + "block_size": 16, + + # Allow only 2 sequences of ~128 tokens in worst case. + # Note 8 = 128/block_size + "num_gpu_blocks_override": 2 * (8 + 1), + }, + { + "block_size": 8, + + # Allow only 2 sequences of ~128 tokens in worst case. + # Note 16 = 128/block_size + "num_gpu_blocks_override": 2 * (16 + 1), + } + ]) +@pytest.mark.parametrize("baseline_llm_kwargs", [{ + "num_lookahead_slots": 0, +}]) +@pytest.mark.parametrize( + "test_llm_kwargs", + [{ + # We run one test with block_size < lookahead_slots, one test with + # block_size > lookahead_slots + "num_lookahead_slots": 10, + }]) +@pytest.mark.parametrize("batch_size", [4]) +@pytest.mark.parametrize("seed", [1]) +def test_lookahead_greedy_equality_with_preemption(baseline_llm_generator, + test_llm_generator, + batch_size): + """Verify vLLM produces the same output with greedy sampling, when lookahead + scheduling is used vs. not. + + Lookahead scheduling is not expected to modify the output, as it simply + allocates empty slots ahead of the known token ids in a sliding fashion. + + This test constrains the total number of blocks to force preemption. It also + varies the block size so that the lookahead size is less than and greater + than the block size. + """ + output_len = 128 + temperature = 0.0 + + prompts = [ + "Hello, my name is", + "The president of the United States is", + "The capital of France is", + "The future of AI is", + ] + + prompts = [prompt for prompt, _ in zip(cycle(prompts), range(batch_size))] + + sampling_params = SamplingParams( + max_tokens=output_len, + ignore_eos=True, + temperature=temperature, + ) + + print('Getting token ids without lookahead scheduling') + baseline_token_ids = get_token_ids_from_llm_generator( + baseline_llm_generator, prompts, sampling_params) + + print('Getting token ids with lookahead scheduling') + test_token_ids = get_token_ids_from_llm_generator(test_llm_generator, + prompts, sampling_params) + + for expected_token_ids, actual_token_ids in zip(baseline_token_ids, + test_token_ids): + assert expected_token_ids == actual_token_ids + + assert baseline_token_ids == test_token_ids + + +@pytest.mark.parametrize( + "common_llm_kwargs", + [ + { + # Use a small model for a fast test. + "model": "facebook/opt-125m", + + # skip cuda graph creation for fast test. + "enforce_eager": True, + "enable_chunked_prefill": True, + "max_num_batched_tokens": 2, + "max_num_seqs": 2, + }, + ]) +@pytest.mark.parametrize("per_test_common_llm_kwargs", [{}]) +@pytest.mark.parametrize("baseline_llm_kwargs", [ + { + "use_v2_block_manager": False, + }, +]) +@pytest.mark.parametrize("test_llm_kwargs", [ + { + "use_v2_block_manager": True, + "num_lookahead_slots": 0, + }, + { + "use_v2_block_manager": True, + "num_lookahead_slots": 5, + }, +]) +@pytest.mark.parametrize("batch_size", [4]) +@pytest.mark.parametrize("seed", [1]) +def test_chunked_prefill_block_manager_v2(baseline_llm_generator, + test_llm_generator, batch_size): + """Verify that chunked prefill works with BlockManagerV2, with and without + lookahead scheduling. + """ + output_len = 32 + temperature = 0.0 + + prompts = [ + "Hello, my name is", + "The president of the United States is", + "The capital of France is", + "The future of AI is", + ] + + prompts = [prompt for prompt, _ in zip(cycle(prompts), range(batch_size))] + + sampling_params = SamplingParams( + max_tokens=output_len, + ignore_eos=True, + temperature=temperature, + ) + + print('Getting token ids with BlockManagerV1') + baseline_token_ids = get_token_ids_from_llm_generator( + baseline_llm_generator, prompts, sampling_params) + + print('Getting token ids with BlockManagerV2') + test_token_ids = get_token_ids_from_llm_generator(test_llm_generator, + prompts, sampling_params) + + for expected_token_ids, actual_token_ids in zip(baseline_token_ids, + test_token_ids): + assert expected_token_ids == actual_token_ids + + assert baseline_token_ids == test_token_ids + + +@pytest.mark.parametrize( + "common_llm_kwargs", + [{ + # Use a small model for a fast test. + "model": "facebook/opt-125m", + + # skip cuda graph creation for fast test. + "enforce_eager": True, + + # Allow only 5 sequences of ~1024 tokens in worst case. + "block_size": 16, + "num_gpu_blocks_override": 5 * (64 + 1), + + # Enable prefill cache + "enable_prefix_caching": True, + }]) +@pytest.mark.parametrize("per_test_common_llm_kwargs", [{}]) +@pytest.mark.parametrize("baseline_llm_kwargs", [{ + "use_v2_block_manager": False +}]) +@pytest.mark.parametrize("test_llm_kwargs", [{"use_v2_block_manager": True}]) +@pytest.mark.parametrize("batch_size", [10]) +@pytest.mark.parametrize("seed", [1]) +def test_v1_v2_greedy_equality_prefix_caching_enabled_with_preemption( + baseline_llm_generator, test_llm_generator, batch_size): + """Verify block manager v2 produces same outputs as block manager v1, even + when there is preemption. + + This constructs two LLM, each with limited number of GPU blocks. The limit + is decided such that as the sequences in the batch grow, sequences must be + preempted and removed from cache. + + If the output token ids are equivalent, then we have confidence that the KV + cache is not corrupted in the v2 block manager. + + NOTE: We want a significant number of generated tokens so that any incorrect + KV mapping has time to build up error. + """ + output_len = 1024 + temperature = 0.0 + + # We want to ensure equality even with preemption. + # We force the total block size to be 1 + cdiv(output_len, block_size) + # so that only one sequence can fit at a time (once the sequences grow). + + prompts = [ + "Hello, my name is", + "The president of the United States is", + "The capital of France is", + "The future of AI is", + ] + + prompts = [prompt for prompt, _ in zip(cycle(prompts), range(batch_size))] + + sampling_params = SamplingParams( + max_tokens=output_len, + ignore_eos=True, + temperature=temperature, + ) + + print('Getting token ids from block manager v1') + baseline_token_ids = get_token_ids_from_llm_generator( + baseline_llm_generator, prompts, sampling_params) + + print('Getting token ids from block manager v2') + test_token_ids = get_token_ids_from_llm_generator(test_llm_generator, + prompts, sampling_params) + + for expected_token_ids, actual_token_ids in zip(baseline_token_ids, + test_token_ids): + assert expected_token_ids == actual_token_ids + + assert baseline_token_ids == test_token_ids + + +@pytest.mark.parametrize( + "common_llm_kwargs", + [{ + # Use a small model for a fast test. + "model": "facebook/opt-125m", + + # skip cuda graph creation for fast test. + "enforce_eager": True, + + # Allow only 5 sequences of ~1024 tokens in worst case. + "block_size": 16, + "num_gpu_blocks_override": 5 * (64 + 1), + + # Test APC in v2 block + "use_v2_block_manager": True, + }]) +@pytest.mark.parametrize("per_test_common_llm_kwargs", [{}]) +@pytest.mark.parametrize("baseline_llm_kwargs", [{ + "enable_prefix_caching": False +}]) +@pytest.mark.parametrize("test_llm_kwargs", [{"enable_prefix_caching": True}]) +@pytest.mark.parametrize("batch_size", [10]) +@pytest.mark.parametrize("seed", [1]) +def test_auto_prefix_caching_with_preemption(baseline_llm_generator, + test_llm_generator, batch_size): + """Verify block manager v2 with auto prefix caching enabled produces same + outputs as auto prefix caching disabled, even when there is preemption. + + This constructs two LLM, each with limited number of GPU blocks. The limit + is decided such that as the sequences in the batch grow, sequences must be + preempted and removed from cache. + + If the output token ids are equivalent, then we have confidence that auto + prefix caching itself at least don't cause result error. + """ + output_len = 1024 + temperature = 0.0 + + # We want to ensure equality even with preemption. + # We force the total block size to be 1 + cdiv(output_len, block_size) + # so that only one sequence can fit at a time (once the sequences grow). + prompts = [ + "Hello, my name is", + "The president of the United States is", + "The capital of France is", + "The future of AI is", + ] + + prompts = [prompt for prompt, _ in zip(cycle(prompts), range(batch_size))] + + sampling_params = SamplingParams( + max_tokens=output_len, + ignore_eos=True, + temperature=temperature, + ) + + print('Getting token ids with APC disabled') + baseline_token_ids = get_token_ids_from_llm_generator( + baseline_llm_generator, prompts, sampling_params) + + print('Getting token ids with APC enabled') + test_token_ids = get_token_ids_from_llm_generator(test_llm_generator, + prompts, sampling_params) + + for expected_token_ids, actual_token_ids in zip(baseline_token_ids, + test_token_ids): + assert expected_token_ids == actual_token_ids + + assert baseline_token_ids == test_token_ids + + +def get_token_ids_from_llm_generator(llm_generator, prompts, sampling_params): + for llm in llm_generator: + outputs = llm.generate(prompts, sampling_params, use_tqdm=True) + token_ids = [output.outputs[0].token_ids for output in outputs] + del llm + + return token_ids diff --git a/tests/core/block/test_block_manager_v2.py b/tests/core/block/test_block_manager_v2.py new file mode 100644 index 0000000..1e8e4cc --- /dev/null +++ b/tests/core/block/test_block_manager_v2.py @@ -0,0 +1,103 @@ +import pytest + +from vllm.core.block_manager_v2 import BlockSpaceManagerV2 +from vllm.core.interfaces import AllocStatus +from vllm.sequence import Logprob, SequenceStatus +from vllm.utils import chunk_list + +from ..utils import create_seq_group + + +@pytest.mark.parametrize("block_size", [16]) +@pytest.mark.parametrize("num_gpu_blocks", [8, 40, 80]) +@pytest.mark.parametrize("num_seqs_per_group", [1, 4]) +@pytest.mark.parametrize("watermark", [0.0, 0.5]) +def test_can_allocate_seq_group(block_size: int, num_seqs_per_group: int, + num_gpu_blocks: int, watermark: float): + block_manager = BlockSpaceManagerV2( + block_size=block_size, + num_gpu_blocks=num_gpu_blocks, + num_cpu_blocks=1024, + watermark=watermark, + ) + num_watermark_blocks = int(watermark * num_gpu_blocks) + + num_output_blocks_per_seq = 1 + + # NOTE: This should be num_output_blocks_per_seq * num_seqs_per_group, but + # the current implementation assumes all seqs are new prompts / don't have + # different output lens. + num_output_blocks = num_output_blocks_per_seq + + for num_prompt_blocks in range(1, num_gpu_blocks - num_output_blocks): + seq_group = create_seq_group( + seq_prompt_len=block_size * num_prompt_blocks, + seq_output_lens=[ + block_size * num_output_blocks_per_seq + for _ in range(num_seqs_per_group) + ], + ) + + assert num_prompt_blocks + num_output_blocks <= num_gpu_blocks + + can_allocate_result = block_manager.can_allocate(seq_group) + + num_required_blocks = num_prompt_blocks + num_output_blocks + + if num_gpu_blocks - num_required_blocks < num_watermark_blocks: + assert can_allocate_result == AllocStatus.NEVER + elif num_gpu_blocks >= num_required_blocks: + assert can_allocate_result == AllocStatus.OK + else: + assert can_allocate_result == AllocStatus.LATER + + +@pytest.mark.parametrize("block_size", [1, 8]) +@pytest.mark.parametrize("prompt_len", [1, 7, 8]) +@pytest.mark.parametrize("num_slots_to_append", [1, 8, 129]) +@pytest.mark.parametrize("num_lookahead_slots", [0, 10]) +def test_append_slots(block_size, prompt_len, num_slots_to_append, + num_lookahead_slots): + """Verify append_slots consumes the correct number of blocks from the block + table. + """ + + num_gpu_blocks = 1024 + watermark = 0.1 + block_manager = BlockSpaceManagerV2( + block_size=block_size, + num_gpu_blocks=num_gpu_blocks, + num_cpu_blocks=0, + watermark=watermark, + ) + + seq_group = create_seq_group( + seq_prompt_len=prompt_len, + seq_output_lens=[0], + ) + + # Allocate seq + assert block_manager.can_allocate(seq_group) + block_manager.allocate(seq_group) + + # Seq seq to RUNNING + seq = seq_group.get_seqs()[0] + seq.status = SequenceStatus.RUNNING + + # Append tokens to the sequeqnce + for token_id in range(num_slots_to_append): + seq.append_token_id(token_id, {token_id: Logprob(0.0)}) + + # Append slots for new tokens and lookahead slots. + free_blocks_before_append = block_manager.get_num_free_gpu_blocks() + block_manager.append_slots(seq, num_lookahead_slots) + num_consumed_blocks = (free_blocks_before_append - + block_manager.get_num_free_gpu_blocks()) + + # Expect consumed blocks to be new blocks required to support the new slots. + expected_consumed_blocks = len( + chunk_list( + list( + range(prompt_len + num_slots_to_append + num_lookahead_slots)), + block_size)) - len(chunk_list(list(range(prompt_len)), block_size)) + assert num_consumed_blocks == expected_consumed_blocks diff --git a/tests/core/block/test_block_table.py b/tests/core/block/test_block_table.py new file mode 100644 index 0000000..3481d6b --- /dev/null +++ b/tests/core/block/test_block_table.py @@ -0,0 +1,575 @@ +import pytest + +from vllm.core.block.block_table import BlockTable +from vllm.core.block.cpu_gpu_block_allocator import CpuGpuBlockAllocator +from vllm.utils import Device, cdiv, chunk_list + + +@pytest.mark.parametrize("block_size", [16]) +@pytest.mark.parametrize("sequence_len", [1, 16, 129]) +def test_allocate_naive(block_size: int, sequence_len: int): + """Test the allocation of blocks using the naive allocator. + + This test creates a CpuGpuBlockAllocator with the specified block size and + number of blocks. It then allocates multiple BlockTables with varying + sequence lengths and verifies that the number of free blocks decreases as + expected after each allocation. + """ + assert block_size > 1 + num_gpu_blocks = 1024 + + allocator = CpuGpuBlockAllocator.create( + allocator_type="naive", + num_gpu_blocks=num_gpu_blocks, + num_cpu_blocks=1024, + block_size=block_size, + ) + + token_ids = list(range(sequence_len)) + num_blocks_per_alloc = len(list(chunk_list(token_ids, block_size))) + + block_tables = [] + for i in range(5): + assert allocator.get_num_free_blocks( + device=Device.GPU) == num_gpu_blocks - i * num_blocks_per_alloc + + block_tables.append( + BlockTable( + block_size=block_size, + block_allocator=allocator, + )) + block_tables[-1].allocate(token_ids=token_ids, device=Device.GPU) + + +@pytest.mark.parametrize("block_size", [16]) +@pytest.mark.parametrize("sequence_len", [1, 16, 129]) +def test_allocate_prefix_caching(block_size: int, sequence_len: int): + """Test the allocation of blocks using the prefix caching allocator. + + This test creates a CpuGpuBlockAllocator with the specified block size and + number of blocks, using the prefix caching allocator. It then allocates + multiple BlockTables with varying sequence lengths and verifies that the + number of free blocks decreases as expected after each allocation. + + The test expects all sequences to share allocations, except for their last + block, which may be mutable. It calculates the expected number of immutable + and mutable blocks per allocation based on the sequence length and block + size. + """ + assert block_size > 1 + num_gpu_blocks = 1024 + + allocator = CpuGpuBlockAllocator.create( + allocator_type="prefix_caching", + num_gpu_blocks=num_gpu_blocks, + num_cpu_blocks=1024, + block_size=block_size, + ) + + token_ids = list(range(sequence_len)) + chunked_tokens = list(chunk_list(token_ids, block_size)) + num_mutable_blocks_per_alloc = 0 if len( + chunked_tokens[-1]) == block_size else 1 + num_immutable_blocks_per_alloc = len( + chunked_tokens) - num_mutable_blocks_per_alloc + + block_tables = [] + for alloc_i in range(1, 6): + + block_tables.append( + BlockTable( + block_size=block_size, + block_allocator=allocator, + )) + block_tables[-1].allocate(token_ids=token_ids, device=Device.GPU) + + # Expect all sequences to share allocations, except for their last block + # (which may be mutable). + assert allocator.get_num_free_blocks( + device=Device.GPU) == num_gpu_blocks - ( + num_immutable_blocks_per_alloc + num_mutable_blocks_per_alloc * + (alloc_i)) + + +@pytest.mark.parametrize("block_size", [16]) +@pytest.mark.parametrize("sequence_len", [1, 16, 129]) +@pytest.mark.parametrize("allocator_type", ["naive", "prefix_caching"]) +@pytest.mark.parametrize("device", ["cpu", "gpu"]) +def test_allocate_free(block_size: int, sequence_len: int, allocator_type: str, + device: str): + """Test the allocation and freeing of blocks using different allocators and + devices. + + This test creates a CpuGpuBlockAllocator with the specified block size, + number of blocks, allocator type, and device. It then allocates a BlockTable + multiple times with the same sequence and verifies that the number of free + blocks remains consistent after each allocation and freeing. + """ + device = Device[device.upper()] + + num_device_blocks = 1024 + allocator = CpuGpuBlockAllocator.create( + allocator_type=allocator_type, + num_gpu_blocks=num_device_blocks, + num_cpu_blocks=num_device_blocks, + block_size=block_size, + ) + + token_ids = list(range(sequence_len)) + num_blocks_per_alloc = len(list(chunk_list(token_ids, block_size))) + + block_table = BlockTable( + block_size=block_size, + block_allocator=allocator, + ) + + for i in range(5): + block_table.allocate(token_ids=token_ids, device=device) + assert allocator.get_num_free_blocks( + device) == num_device_blocks - num_blocks_per_alloc + assert all(block_id is not None + for block_id in block_table.physical_block_ids) + + block_table.free() + assert allocator.get_num_free_blocks(device) == num_device_blocks + + +@pytest.mark.parametrize("block_size", [1, 8]) +@pytest.mark.parametrize("sequence_len", [1, 16, 129]) +@pytest.mark.parametrize("append_len", [1, 16, 129]) +@pytest.mark.parametrize("allocator_type", ["naive", "prefix_caching"]) +def test_append_token_ids_allocation(block_size: int, sequence_len: int, + append_len: int, allocator_type: str): + """Test the allocation behavior when appending token IDs to a BlockTable. + + This test creates a CpuGpuBlockAllocator with the specified block size, + number of blocks, and allocator type. It then allocates a BlockTable with an + initial sequence and appends additional token IDs to it. The test verifies + that the number of allocated blocks before and after appending matches the + expected values. + """ + + num_gpu_blocks = 1024 + + allocator = CpuGpuBlockAllocator.create( + allocator_type=allocator_type, + num_gpu_blocks=num_gpu_blocks, + num_cpu_blocks=1024, + block_size=block_size, + ) + + token_ids = list(range(sequence_len)) + token_ids_to_append = list(range(append_len)) + + block_table = BlockTable( + block_size=block_size, + block_allocator=allocator, + ) + + num_expected_blocks_before_append = len( + list(chunk_list(token_ids, block_size))) + num_expected_appended_blocks = len( + list(chunk_list(token_ids + token_ids_to_append, + block_size))) - num_expected_blocks_before_append + + block_table.allocate(token_ids=token_ids, device=Device.GPU) + + assert len( + block_table.physical_block_ids) == num_expected_blocks_before_append + block_table.append_token_ids(token_ids_to_append) + assert len( + block_table.physical_block_ids + ) == num_expected_blocks_before_append + num_expected_appended_blocks + + +@pytest.mark.parametrize("block_size", [1, 8]) +@pytest.mark.parametrize("sequence_len", [1, 16, 129]) +@pytest.mark.parametrize("num_empty_slots", [1, 16, 129]) +@pytest.mark.parametrize("allocator_type", ["naive", "prefix_caching"]) +def test_ensure_num_empty_slots_allocation(block_size: int, sequence_len: int, + num_empty_slots: int, + allocator_type: str): + """Test the allocation behavior when ensuring a certain number of empty + slots in a BlockTable. + + This test creates a CpuGpuBlockAllocator with the specified block size, + number of blocks, and allocator type. It then allocates a BlockTable with an + initial sequence and ensures a certain number of empty slots. The test + verifies that the number of allocated blocks before and after ensuring empty + slots matches the expected values. It also checks that filling up the empty + slots does not consume additional blocks. + """ + num_gpu_blocks = 1024 + + allocator = CpuGpuBlockAllocator.create( + allocator_type=allocator_type, + num_gpu_blocks=num_gpu_blocks, + num_cpu_blocks=1024, + block_size=block_size, + ) + + token_ids = list(range(sequence_len)) + + block_table = BlockTable( + block_size=block_size, + block_allocator=allocator, + ) + + num_expected_blocks_before_append = len( + list(chunk_list(token_ids, block_size))) + num_expected_appended_blocks = len( + list(chunk_list(token_ids + [-1] * num_empty_slots, + block_size))) - num_expected_blocks_before_append + + block_table.allocate(token_ids=token_ids, device=Device.GPU) + + # Assert that the empty slots consume the expected number of additional + # blocks. + assert len( + block_table.physical_block_ids) == num_expected_blocks_before_append + block_table.ensure_num_empty_slots(num_empty_slots) + assert len( + block_table.physical_block_ids + ) == num_expected_blocks_before_append + num_expected_appended_blocks + + # Now, ensure no additional blocks consumed as we fill up the empty slots. + num_free_blocks = allocator.get_num_free_blocks(device=Device.GPU) + block_table.append_token_ids(token_ids=list(range(num_empty_slots))) + assert num_free_blocks == allocator.get_num_free_blocks(device=Device.GPU) + + +@pytest.mark.parametrize("block_size", [1, 8]) +@pytest.mark.parametrize("sequence_len", [1, 9]) +@pytest.mark.parametrize("append_len", [1, 16, 129]) +@pytest.mark.parametrize("append_size", [1, 4, 129]) +@pytest.mark.parametrize("allocator_type", ["naive", "prefix_caching"]) +def test_append_token_ids_correct_content(block_size: int, sequence_len: int, + append_len: int, allocator_type: str, + append_size: int): + """Verify token ids are correctly appended. Appends various amounts of + token ids in various append sizes, and verifies the final sequence is + correct. + """ + num_gpu_blocks = 1024 + + allocator = CpuGpuBlockAllocator.create( + allocator_type=allocator_type, + num_gpu_blocks=num_gpu_blocks, + num_cpu_blocks=1024, + block_size=block_size, + ) + + token_ids = list(range(sequence_len)) + token_ids_to_append = list(range(append_len)) + + block_table = BlockTable( + block_size=block_size, + block_allocator=allocator, + ) + block_table.allocate(token_ids=token_ids, device=Device.GPU) + + appended_so_far = [] + for append in chunk_list(token_ids_to_append, append_size): + block_table.append_token_ids(append) + appended_so_far.extend(append) + + assert block_table._get_all_token_ids() == token_ids + appended_so_far + + assert block_table._get_all_token_ids() == token_ids + token_ids_to_append + + +@pytest.mark.parametrize("seq_len", [1, 9, 129]) +@pytest.mark.parametrize("block_size", [1, 8]) +@pytest.mark.parametrize("allocator_type", ["naive", "prefix_caching"]) +def test_fork(seq_len: int, block_size: int, allocator_type: str): + """Create a sequence using the specified allocator. + 1. Assert that after forking the sequence, the free block count is the + same. + 2. Assert that the forked sequence has the same physical mappings. + 3. Then free the original sequence; verify that the free block count is + the same. + 4. Finally, free the forked sequence and verify that the free block + count drops to zero. + """ + num_gpu_blocks = 1024 + + allocator = CpuGpuBlockAllocator.create( + allocator_type=allocator_type, + num_gpu_blocks=num_gpu_blocks, + num_cpu_blocks=0, + block_size=block_size, + ) + + token_ids = list(range(seq_len)) + + block_table = BlockTable( + block_size=block_size, + block_allocator=allocator, + ) + + block_table.allocate(token_ids) + + num_free_blocks_before_fork = allocator.get_num_free_blocks( + device=Device.GPU) + + forked_block_table = block_table.fork() + + # Expect physical_block_ids and token_ids to match. + assert (block_table.physical_block_ids == + forked_block_table.physical_block_ids) + assert block_table._get_all_token_ids( + ) == forked_block_table._get_all_token_ids() + + # Do not expect any additional allocations. + assert allocator.get_num_free_blocks( + device=Device.GPU) == num_free_blocks_before_fork + + # Free the original blocks. Assert num free blocks does not change, since + # refcount is nonzero. + block_table.free() + assert allocator.get_num_free_blocks( + device=Device.GPU) == num_free_blocks_before_fork + + # Expect the forked block table to be unaffected by the free. + assert all(block_id is not None + for block_id in forked_block_table.physical_block_ids) + + # Free the forked blocks. Assert num free blocks does change, since + # refcount is now zero. + forked_block_table.free() + assert allocator.get_num_free_blocks(device=Device.GPU) == num_gpu_blocks + + +@pytest.mark.parametrize("block_size", [8]) +@pytest.mark.parametrize("sequence_len", [1, 16, 129]) +@pytest.mark.parametrize("append_len", [1, 16, 129]) +@pytest.mark.parametrize("appender", ["forked", "original"]) +@pytest.mark.parametrize("allocator_type", ["naive", "prefix_caching"]) +def test_cow(block_size: int, sequence_len: int, append_len: int, + allocator_type: str, appender: str): + """Fork a sequence; append to the forked sequence; verify there's a CoW. + """ + num_gpu_blocks = 1024 + + allocator = CpuGpuBlockAllocator.create( + allocator_type=allocator_type, + num_gpu_blocks=num_gpu_blocks, + num_cpu_blocks=0, + block_size=block_size, + ) + + token_ids = list(range(sequence_len)) + token_ids_to_append = list(range(append_len)) + + original_block_table = BlockTable( + block_size=block_size, + block_allocator=allocator, + ) + + num_expected_non_cow_blocks = cdiv(sequence_len, block_size) + num_expected_cow_blocks = cdiv(sequence_len + append_len, + block_size) - (sequence_len // block_size) + + original_block_table.allocate(token_ids=token_ids, device=Device.GPU) + original_block_ids = original_block_table.physical_block_ids + + forked_block_table = original_block_table.fork() + + # Expect no additional allocation (copy on _write_). + assert allocator.get_num_free_blocks( + Device.GPU) == (num_gpu_blocks - num_expected_non_cow_blocks) + + if appender == "forked": + appender_block_table = forked_block_table + static_block_table = original_block_table + elif appender == "original": + appender_block_table = original_block_table + static_block_table = forked_block_table + else: + raise ValueError(f"unknown test config {appender=}") + + # Write tokens. + appender_block_table.append_token_ids(token_ids_to_append) + + # Expect the non-appending block table to have no change. + assert static_block_table.physical_block_ids == original_block_ids + assert appender_block_table.physical_block_ids != original_block_ids + + # Expect the blocks changed during append to have a CoW. + assert allocator.get_num_free_blocks( + Device.GPU) == num_gpu_blocks - (num_expected_non_cow_blocks + + num_expected_cow_blocks) + + cows = allocator.clear_copy_on_writes() + if sequence_len % block_size > 0: + # If the last block in the sequence is not full, then when appending we + # expect a CoW. + assert cows + + cow_block_id = sequence_len // block_size + expected_src = static_block_table.physical_block_ids[cow_block_id] + expected_dst = appender_block_table.physical_block_ids[cow_block_id] + + assert expected_src in cows + assert expected_dst in cows[expected_src] + else: + # Otherwise, there should be no copy-on-write. + assert not cows + + static_block_table.free() + appender_block_table.free() + + # After free, expect all blocks to be freed. + assert allocator.get_num_free_blocks(Device.GPU) == num_gpu_blocks + + +@pytest.mark.parametrize("block_size", [8]) +@pytest.mark.parametrize("sequence_len", [1, 16, 129]) +@pytest.mark.parametrize("append_len", [1, 16, 129]) +@pytest.mark.parametrize("lookahead_slots", [1, 16, 129]) +@pytest.mark.parametrize("appender", ["forked", "original"]) +@pytest.mark.parametrize("allocator_type", ["naive", "prefix_caching"]) +def test_cow_lookahead_simple(block_size: int, sequence_len: int, + append_len: int, lookahead_slots: int, + allocator_type: str, appender: str): + """Similar to test_cow, except with lookahead allocation. The assertions are + less rigorous due to the complexity of the property under test. + """ + num_gpu_blocks = 1024 + + allocator = CpuGpuBlockAllocator.create( + allocator_type=allocator_type, + num_gpu_blocks=num_gpu_blocks, + num_cpu_blocks=0, + block_size=block_size, + ) + + token_ids = list(range(sequence_len)) + token_ids_to_append = list(range(append_len)) + + original_block_table = BlockTable( + block_size=block_size, + block_allocator=allocator, + ) + + original_block_table.allocate(token_ids=token_ids, device=Device.GPU) + + # Allocate lookahead slots. + original_block_table.ensure_num_empty_slots(lookahead_slots) + original_block_ids = original_block_table.physical_block_ids + + forked_block_table = original_block_table.fork() + + if appender == "forked": + appender_block_table = forked_block_table + static_block_table = original_block_table + elif appender == "original": + appender_block_table = original_block_table + static_block_table = forked_block_table + else: + raise ValueError(f"unknown test config {appender=}") + + # Write tokens. + appender_block_table.append_token_ids(token_ids_to_append) + + # Expect the non-appending block table to have no change. + assert static_block_table.physical_block_ids == original_block_ids + assert appender_block_table.physical_block_ids != original_block_ids + + cows = allocator.clear_copy_on_writes() + + # Always expect copy-on-write + assert cows + + if sequence_len % block_size > 0: + # If the last block in the sequence is not full, then when appending we + # expect a CoW. + assert cows + + cow_block_id = sequence_len // block_size + expected_src = static_block_table.physical_block_ids[cow_block_id] + expected_dst = appender_block_table.physical_block_ids[cow_block_id] + + assert expected_src in cows + assert expected_dst in cows[expected_src] + + static_block_table.free() + appender_block_table.free() + + # After free, expect all blocks to be freed. + assert allocator.get_num_free_blocks(Device.GPU) == num_gpu_blocks + + +@pytest.mark.parametrize("block_size", [1, 8]) +@pytest.mark.parametrize("sequence_len", [1, 16, 129]) +@pytest.mark.parametrize("num_new_tokens", [1, 16, 129]) +@pytest.mark.parametrize("num_lookahead_slots", [1, 7, 8]) +@pytest.mark.parametrize("allocator_type", ["naive", "prefix_caching"]) +def test_num_blocks_touched_by_append_slots(block_size: int, sequence_len: int, + num_new_tokens: int, + num_lookahead_slots: int, + allocator_type: str): + """Verify correct calculation of get_num_blocks_touched_by_append_slots. + + This is done by using copy-on-write, which requires any modified block to + be copied before write if the refcount > 1. We set the refcount>1 by forking + a sequence, then measure the free blocks before and after an append. If the + number of consumed blocks equals what `get_num_blocks_touched_by_append_ + slots` returns, then the calculation is correct. + """ + + num_gpu_blocks = 1024 + + allocator = CpuGpuBlockAllocator.create( + allocator_type=allocator_type, + num_gpu_blocks=num_gpu_blocks, + num_cpu_blocks=0, + block_size=block_size, + ) + + token_ids = list(range(sequence_len)) + token_ids_to_append = list(range(num_new_tokens)) + + block_table = BlockTable( + block_size=block_size, + block_allocator=allocator, + ) + + block_table.allocate(token_ids=token_ids, device=Device.GPU) + + # Add lookahead before fork so both sequences have the same lookahead + # blocks. + block_table.ensure_num_empty_slots(num_empty_slots=num_lookahead_slots) + + # Fork sequence so that every block has refcount > 1. + _ = block_table.fork() + + # Determine how many blocks should be touched. + expected_num_touched_blocks = ( + block_table.get_num_blocks_touched_by_append_slots( + token_ids=token_ids_to_append, + num_lookahead_slots=num_lookahead_slots)) + + # Measure how many blocks are touched by measuring num_free_blocks before + # and after the append. + # + # We expect append_token_ids to CoW all mutated blocks that have refcount>1. + num_free_blocks_before_append = allocator.get_num_free_blocks(Device.GPU) + block_table.append_token_ids(token_ids_to_append, num_lookahead_slots) + num_consumed_blocks = (num_free_blocks_before_append - + allocator.get_num_free_blocks(Device.GPU)) + + # TODO(cade) ensure equality when num_lookahead_slots > 0. + # The reason we have < is because lookahead blocks are not copied eagerly; + # they are copied on first write. This will cause issues for beam search + + # speculative decoding. This is acceptable for now as it is a large effort + # to combine the two. To fix this, we can ensure single sequence ownership + # of lookahead blocks by appending empty slots to each block, which will + # trigger the CoW. + # + # Until then, we can accept that the consumed tokens are <= the expected + # tokens when appending with lookahead. + if num_lookahead_slots > 0: + assert num_consumed_blocks <= expected_num_touched_blocks + else: + assert num_consumed_blocks == expected_num_touched_blocks diff --git a/tests/core/block/test_common.py b/tests/core/block/test_common.py new file mode 100644 index 0000000..cfdd358 --- /dev/null +++ b/tests/core/block/test_common.py @@ -0,0 +1,42 @@ +import random + +import pytest + +from vllm.core.block.common import RefCounter + + +@pytest.mark.parametrize("seed", list(range(20))) +@pytest.mark.parametrize("num_incrs", [1, 100]) +@pytest.mark.parametrize("num_blocks", [1024]) +def test_incr(seed: int, num_incrs: int, num_blocks: int): + random.seed(seed) + + all_block_indices = list(range(num_blocks)) + counter = RefCounter(all_block_indices=all_block_indices) + + block_id = random.randint(0, num_blocks - 1) + for i in range(num_incrs): + value = counter.incr(block_id) + assert value == i + 1 + + +@pytest.mark.parametrize("seed", list(range(20))) +@pytest.mark.parametrize("num_incrs", [1, 100]) +@pytest.mark.parametrize("num_blocks", [1024]) +def test_incr_decr(seed: int, num_incrs: int, num_blocks: int): + random.seed(seed) + + all_block_indices = list(range(num_blocks)) + counter = RefCounter(all_block_indices=all_block_indices) + + block_id = random.randint(0, num_blocks - 1) + for i in range(num_incrs): + value = counter.incr(block_id) + assert value == i + 1 + + for i in range(num_incrs): + value = counter.decr(block_id) + assert value == num_incrs - (i + 1) + + with pytest.raises(AssertionError): + counter.decr(block_id) diff --git a/tests/core/block/test_cpu_gpu_block_allocator.py b/tests/core/block/test_cpu_gpu_block_allocator.py new file mode 100644 index 0000000..44a5be6 --- /dev/null +++ b/tests/core/block/test_cpu_gpu_block_allocator.py @@ -0,0 +1,93 @@ +import pytest + +from vllm.core.block.cpu_gpu_block_allocator import CpuGpuBlockAllocator +from vllm.utils import Device, chunk_list + + +@pytest.mark.parametrize("num_cpu_blocks", [0, 512]) +@pytest.mark.parametrize("num_gpu_blocks", [1024]) +@pytest.mark.parametrize("block_size", [16]) +@pytest.mark.parametrize("allocator_type", ["naive", "prefix_caching"]) +def test_allocate_mutable(num_cpu_blocks: int, num_gpu_blocks: int, + block_size: int, allocator_type: str): + allocator = CpuGpuBlockAllocator.create( + allocator_type=allocator_type, + num_gpu_blocks=num_gpu_blocks, + num_cpu_blocks=num_cpu_blocks, + block_size=block_size, + ) + + assert allocator.get_num_free_blocks(Device.CPU) == num_cpu_blocks + assert allocator.get_num_free_blocks(Device.GPU) == num_gpu_blocks + + cpu_blocks = [ + allocator.allocate_mutable(prev_block=None, device=Device.CPU) + for _ in range(num_cpu_blocks) + ] + assert allocator.get_num_free_blocks(Device.CPU) == 0 + assert allocator.get_num_free_blocks(Device.GPU) == num_gpu_blocks + + gpu_blocks = [ + allocator.allocate_mutable(prev_block=None, device=Device.GPU) + for _ in range(num_gpu_blocks) + ] + assert allocator.get_num_free_blocks(Device.CPU) == 0 + assert allocator.get_num_free_blocks(Device.GPU) == 0 + + _ = [allocator.free(block) for block in cpu_blocks] + assert allocator.get_num_free_blocks(Device.CPU) == num_cpu_blocks + assert allocator.get_num_free_blocks(Device.GPU) == 0 + + _ = [allocator.free(block) for block in gpu_blocks] + assert allocator.get_num_free_blocks(Device.CPU) == num_cpu_blocks + assert allocator.get_num_free_blocks(Device.GPU) == num_gpu_blocks + + +@pytest.mark.parametrize("num_cpu_blocks", [0, 512]) +@pytest.mark.parametrize("num_gpu_blocks", [1024]) +@pytest.mark.parametrize("block_size", [2]) +@pytest.mark.parametrize("allocator_type", ["naive", "prefix_caching"]) +def test_allocate_immutable(num_cpu_blocks: int, num_gpu_blocks: int, + block_size: int, allocator_type: str): + allocator = CpuGpuBlockAllocator.create( + allocator_type=allocator_type, + num_gpu_blocks=num_gpu_blocks, + num_cpu_blocks=num_cpu_blocks, + block_size=block_size, + ) + + unique_token_ids = list( + range((num_cpu_blocks + num_gpu_blocks) * block_size)) + gpu_token_ids = chunk_list(unique_token_ids[:num_gpu_blocks * block_size], + block_size) + cpu_token_ids = chunk_list(unique_token_ids[num_gpu_blocks * block_size:], + block_size) + + assert allocator.get_num_free_blocks(Device.CPU) == num_cpu_blocks + assert allocator.get_num_free_blocks(Device.GPU) == num_gpu_blocks + + cpu_blocks = [ + allocator.allocate_immutable(prev_block=None, + token_ids=token_ids, + device=Device.CPU) + for token_ids in cpu_token_ids + ] + assert allocator.get_num_free_blocks(Device.CPU) == 0 + assert allocator.get_num_free_blocks(Device.GPU) == num_gpu_blocks + + gpu_blocks = [ + allocator.allocate_immutable(prev_block=None, + token_ids=token_ids, + device=Device.GPU) + for token_ids in gpu_token_ids + ] + assert allocator.get_num_free_blocks(Device.CPU) == 0 + assert allocator.get_num_free_blocks(Device.GPU) == 0 + + _ = [allocator.free(block) for block in cpu_blocks] + assert allocator.get_num_free_blocks(Device.CPU) == num_cpu_blocks + assert allocator.get_num_free_blocks(Device.GPU) == 0 + + _ = [allocator.free(block) for block in gpu_blocks] + assert allocator.get_num_free_blocks(Device.CPU) == num_cpu_blocks + assert allocator.get_num_free_blocks(Device.GPU) == num_gpu_blocks diff --git a/tests/core/block/test_naive_block.py b/tests/core/block/test_naive_block.py new file mode 100644 index 0000000..edcdc0c --- /dev/null +++ b/tests/core/block/test_naive_block.py @@ -0,0 +1,102 @@ +from typing import List, Optional + +import pytest + +from vllm.core.block.interfaces import Block, BlockAllocator +from vllm.core.block.naive_block import NaiveBlock, NaiveBlockAllocator + + +class TestNaiveBlockAllocator: + + @staticmethod + def create_allocate_lambda(allocate_type: str, + allocator: NaiveBlockAllocator, + prev_block: Optional[Block], + token_ids: List[int]): + if allocate_type == "immutable": + allocate_block = lambda: allocator.allocate_immutable( + prev_block=prev_block, token_ids=token_ids) + elif allocate_type == "mutable": + allocate_block = lambda: allocator.allocate_mutable(prev_block= + prev_block) + else: + raise ValueError() + + return allocate_block + + @staticmethod + @pytest.mark.parametrize("allocate_type", ["immutable", "mutable"]) + @pytest.mark.parametrize("num_blocks", [1, 1024]) + @pytest.mark.parametrize("block_size", [1, 16]) + def test_allocate_ooms(allocate_type: str, num_blocks: int, + block_size: int): + allocator = NaiveBlockAllocator(create_block=NaiveBlock, + num_blocks=num_blocks, + block_size=block_size) + allocate_block = TestNaiveBlockAllocator.create_allocate_lambda( + allocate_type, + allocator, + prev_block=None, + token_ids=list(range(block_size))) + + [allocate_block() for _ in range(num_blocks)] + with pytest.raises(BlockAllocator.NoFreeBlocksError): + allocate_block() + + @staticmethod + @pytest.mark.parametrize("allocate_type", ["immutable", "mutable"]) + @pytest.mark.parametrize("num_blocks", [1, 1024]) + @pytest.mark.parametrize("block_size", [1, 16]) + def test_free_prevents_oom(allocate_type: str, num_blocks: int, + block_size: int): + allocator = NaiveBlockAllocator(create_block=NaiveBlock, + num_blocks=num_blocks, + block_size=block_size) + allocate_block = TestNaiveBlockAllocator.create_allocate_lambda( + allocate_type, + allocator, + prev_block=None, + token_ids=list(range(block_size))) + + blocks = [allocate_block() for _ in range(num_blocks)] + + with pytest.raises(BlockAllocator.NoFreeBlocksError): + allocate_block() + + block_to_free = blocks.pop() + + for _ in range(100): + block_id = block_to_free.block_id + allocator.free(block_to_free) + assert block_to_free.block_id is None + + new_block = allocate_block() + assert new_block.block_id == block_id + + with pytest.raises(BlockAllocator.NoFreeBlocksError): + allocate_block() + + block_to_free = new_block + + @staticmethod + @pytest.mark.parametrize("allocate_type", ["immutable", "mutable"]) + @pytest.mark.parametrize("num_blocks", [1024]) + @pytest.mark.parametrize("block_size", [16]) + def test_get_num_free_blocks(allocate_type: str, num_blocks: int, + block_size: int): + allocator = NaiveBlockAllocator(create_block=NaiveBlock, + num_blocks=num_blocks, + block_size=block_size) + allocate_block = TestNaiveBlockAllocator.create_allocate_lambda( + allocate_type, + allocator, + prev_block=None, + token_ids=list(range(block_size))) + + assert allocator.get_num_free_blocks() == num_blocks + + blocks = [allocate_block() for _ in range(num_blocks)] + + for i, block in enumerate(blocks): + assert allocator.get_num_free_blocks() == i + allocator.free(block) diff --git a/tests/core/block/test_prefix_caching_block.py b/tests/core/block/test_prefix_caching_block.py new file mode 100644 index 0000000..c4c680e --- /dev/null +++ b/tests/core/block/test_prefix_caching_block.py @@ -0,0 +1,509 @@ +import math +import random +from typing import List, Optional +from unittest.mock import MagicMock + +import pytest + +from vllm.core.block.interfaces import Block, BlockAllocator +from vllm.core.block.prefix_caching_block import (PrefixCachingBlock, + PrefixCachingBlockAllocator) + + +class TestPrefixCachingBlock: + + @staticmethod + @pytest.mark.parametrize("seed", list(range(10))) + @pytest.mark.parametrize("block_size", [1, 16]) + @pytest.mark.parametrize("is_curr_block_full", [True, False]) + def test_first_block_has_correct_content_hash(seed: int, block_size: int, + is_curr_block_full: bool): + """Verify a block which is first in the sequence has the correct hash. + """ + random.seed(seed) + num_to_fill = block_size if is_curr_block_full else random.randint( + 0, block_size - 1) + token_ids = list(range(num_to_fill)) + mock_allocator = MagicMock(spec=PrefixCachingBlockAllocator) + + block_with_prev = PrefixCachingBlock( + prev_block=None, + token_ids=token_ids, + block_size=block_size, + prefix_caching_allocator=mock_allocator) + + if is_curr_block_full: + # Expect hash since block is full. + assert block_with_prev.content_hash == ( + PrefixCachingBlock.hash_block_tokens( + is_first_block=True, + prev_block_hash=None, + cur_block_token_ids=token_ids)) + else: + # Do not expect hash since block is not full. + assert block_with_prev.content_hash is None + + @staticmethod + @pytest.mark.parametrize("seed", list(range(10))) + @pytest.mark.parametrize("block_size", [1, 16]) + @pytest.mark.parametrize("is_curr_block_full", [True, False]) + @pytest.mark.parametrize("prev_block_has_hash", [True, False]) + def test_nth_block_has_correct_content_hash(seed: int, block_size: int, + is_curr_block_full: bool, + prev_block_has_hash: bool): + """Verify a block which is not first in the sequence has the correct + hash. + """ + + random.seed(seed) + + previous_block = MagicMock(spec=PrefixCachingBlock) + prev_block_hash = random.randint(0, 1000) + previous_block.content_hash = (prev_block_hash + if prev_block_has_hash else None) + + num_to_fill = block_size if is_curr_block_full else random.randint( + 0, block_size - 1) + token_ids = list(range(num_to_fill)) + mock_allocator = MagicMock(spec=PrefixCachingBlockAllocator) + + block_with_prev = PrefixCachingBlock( + prev_block=previous_block, + token_ids=token_ids, + block_size=block_size, + prefix_caching_allocator=mock_allocator, + ) + + if is_curr_block_full and prev_block_has_hash: + # Expect hash since block is full and previous block has hash. + assert (block_with_prev.content_hash == + PrefixCachingBlock.hash_block_tokens( + is_first_block=False, + prev_block_hash=prev_block_hash, + cur_block_token_ids=token_ids)) + else: + # Do not expect hash since block is not full or the previous block + # does not have a hash. + assert block_with_prev.content_hash is None + + @staticmethod + @pytest.mark.parametrize("block_size", [1, 2, 16]) + @pytest.mark.parametrize("num_tokens", list(range(3))) + @pytest.mark.parametrize("num_empty_trailing_blocks", [0, 1, 10]) + def test_blocks_have_correct_hash_in_chain(block_size: int, + num_tokens: int, + num_empty_trailing_blocks: int): + """Create two chains of logical blocks with the same contents. + Assert the hashes are equal. + """ + random.seed(0) + + token_ids = [random.randint(0, 50_000) for _ in range(num_tokens)] + + first_chain, second_chain = [ + TestPrefixCachingBlock.create_chain( + block_size=block_size, + token_ids=token_ids, + num_empty_trailing_blocks=num_empty_trailing_blocks) + for _ in range(2) + ] + + for first_chain_block, second_chain_block in zip( + first_chain, second_chain): + assert (first_chain_block.content_hash == + second_chain_block.content_hash) + + if not first_chain or not second_chain: + assert first_chain == second_chain + assert num_tokens == 0 + + @staticmethod + def create_chain(block_size: int, + token_ids: List[int], + num_empty_trailing_blocks=0) -> List[PrefixCachingBlock]: + """Helper method which creates a chain of blocks. + """ + blocks = [] + num_blocks = math.ceil( + len(token_ids) / block_size) + num_empty_trailing_blocks + + if num_blocks == 0: + return [] + + allocator = MagicMock(spec=PrefixCachingBlockAllocator) + + prev_block = None + for block_number in range(0, num_blocks): + prev_block = PrefixCachingBlock( + prev_block=prev_block, + token_ids=[], + block_size=block_size, + prefix_caching_allocator=allocator, + ) + + tokens_to_append = token_ids[block_number * + block_size:(block_number + 1) * + block_size] + if tokens_to_append: + prev_block.append_token_ids(tokens_to_append) + + blocks.append(prev_block) + + return blocks + + +class TestPrefixCachingBlockAllocator: + + @staticmethod + def create_allocate_lambda(allocate_type: str, allocator: BlockAllocator, + prev_block: Optional[Block], + token_ids: List[int]): + if allocate_type == "immutable": + allocate_block = lambda: allocator.allocate_immutable( + prev_block=prev_block, token_ids=token_ids) + elif allocate_type == "mutable": + allocate_block = lambda: allocator.allocate_mutable(prev_block= + prev_block) + else: + raise ValueError() + + return allocate_block + + @staticmethod + @pytest.mark.parametrize("num_blocks", [1, 1024]) + @pytest.mark.parametrize("block_size", [1, 16]) + def test_allocate_mutable_ooms(num_blocks: int, block_size: int): + allocator = PrefixCachingBlockAllocator(num_blocks=num_blocks, + block_size=block_size) + allocate_block = TestPrefixCachingBlockAllocator.create_allocate_lambda( + allocate_type="mutable", + allocator=allocator, + prev_block=None, + token_ids=list(range(block_size)), + ) + + [allocate_block() for _ in range(num_blocks)] + with pytest.raises(BlockAllocator.NoFreeBlocksError): + allocate_block() + + @staticmethod + @pytest.mark.parametrize("num_blocks", [1, 1024]) + @pytest.mark.parametrize("block_size", [1, 16]) + def test_allocate_immutable_does_not_oom_single_hash( + num_blocks: int, block_size: int): + allocator = PrefixCachingBlockAllocator(num_blocks=num_blocks, + block_size=block_size) + allocate_block = TestPrefixCachingBlockAllocator.create_allocate_lambda( + allocate_type="immutable", + allocator=allocator, + prev_block=None, + token_ids=list(range(block_size)), + ) + + blocks = [allocate_block() for _ in range(num_blocks)] + + # Expect no OOM. If these were mutable blocks, this would OOM. + non_oom_block = allocate_block() + + # Expect all blocks to have same physical block index. + for block in blocks: + assert (block.block_id == non_oom_block.block_id) + + @staticmethod + @pytest.mark.parametrize("num_blocks", [1, 1024]) + @pytest.mark.parametrize("block_size", [1, 16]) + def test_allocate_immutable_ooms_many_hash(num_blocks: int, + block_size: int): + """Consume all blocks using many different hashes/block content. + + Do this by creating a sequence that is very long. + Expect next block to OOM. + """ + allocator = PrefixCachingBlockAllocator(num_blocks=num_blocks, + block_size=block_size) + + # Create token ids that will exhaust all blocks. + token_ids = list(range(num_blocks * block_size)) + + chain = TestPrefixCachingBlockAllocator.create_immutable_chain( + block_size=block_size, + token_ids=token_ids, + allocator=allocator, + ) + + # Expect allocation with unseen hash to fail. + with pytest.raises(BlockAllocator.NoFreeBlocksError): + allocator.allocate_immutable(prev_block=chain[-1], + token_ids=list(range(block_size))) + + # Expect mutable allocation to fail. + with pytest.raises(BlockAllocator.NoFreeBlocksError): + allocator.allocate_mutable(prev_block=chain[-1]) + + # Expect allocation of exact same chain to pass. + second_chain = TestPrefixCachingBlockAllocator.create_immutable_chain( + block_size=block_size, + token_ids=token_ids, + allocator=allocator, + ) + + # Expect physical block indices to be the same in both chains. + assert chain and second_chain + for first_chain_block, second_chain_block in zip(chain, second_chain): + assert (first_chain_block.block_id == second_chain_block.block_id) + + @staticmethod + @pytest.mark.parametrize("num_blocks", [1, 1024]) + @pytest.mark.parametrize("block_size", [1, 16]) + def test_free_prevents_oom(num_blocks: int, block_size: int): + allocator = PrefixCachingBlockAllocator(num_blocks=num_blocks, + block_size=block_size) + + # Create token ids that will exhaust all blocks. + token_ids = list(range(num_blocks * block_size)) + + chain = TestPrefixCachingBlockAllocator.create_immutable_chain( + block_size=block_size, + token_ids=token_ids, + allocator=allocator, + ) + + # Expect mutable allocation to fail. + with pytest.raises(BlockAllocator.NoFreeBlocksError): + allocator.allocate_mutable(prev_block=None) + + block_to_free = chain[-1] + + # Expect free/allocate loop to succeed many times. + for i in range(100): + block_id = block_to_free.block_id + allocator.free(block_to_free) + assert block_to_free.block_id is None, i + + new_block = allocator.allocate_mutable(prev_block=None) + assert new_block.block_id == block_id, i + + with pytest.raises(BlockAllocator.NoFreeBlocksError): + allocator.allocate_mutable(prev_block=None) + + block_to_free = new_block + + @staticmethod + @pytest.mark.parametrize("num_blocks", [1024]) + @pytest.mark.parametrize("block_size", [16]) + @pytest.mark.parametrize("seed", list(range(20))) + def test_get_num_free_blocks(num_blocks: int, block_size: int, seed: int): + random.seed(seed) + allocator = PrefixCachingBlockAllocator(num_blocks=num_blocks, + block_size=block_size) + num_blocks_to_consume = random.randint(1, num_blocks - 1) + + # Create token ids that will exhaust all blocks. + token_ids = list(range(num_blocks_to_consume * block_size)) + + chain = TestPrefixCachingBlockAllocator.create_immutable_chain( + block_size=block_size, + token_ids=token_ids, + allocator=allocator, + ) + + # Free each block in chain, assert num free blocks includes new free + # block. + for i, block in enumerate(chain): + assert allocator.get_num_free_blocks() == (num_blocks - + num_blocks_to_consume + + i) + allocator.free(block) + + @staticmethod + @pytest.mark.parametrize("num_blocks", [1024]) + @pytest.mark.parametrize("block_size", [16]) + @pytest.mark.parametrize("seed", list(range(20))) + def test_get_num_free_blocks_shared(num_blocks: int, block_size: int, + seed: int): + """Verify sharing occurs by allocating two sequences that share prefixes + and incrementally freeing blocks. + """ + random.seed(seed) + allocator = PrefixCachingBlockAllocator(num_blocks=num_blocks, + block_size=block_size) + num_blocks_to_consume = random.randint(1, num_blocks - 1) + + # Create token ids that will exhaust all blocks. + token_ids = list(range(num_blocks_to_consume * block_size)) + + first_chain = TestPrefixCachingBlockAllocator.create_immutable_chain( + block_size=block_size, + token_ids=token_ids, + allocator=allocator, + ) + second_chain = TestPrefixCachingBlockAllocator.create_immutable_chain( + block_size=block_size, + token_ids=token_ids, + allocator=allocator, + ) + + # Free each block in the first chain. Since all blocks are shared, the + # free count should stay constant. + for i, block in enumerate(first_chain): + assert allocator.get_num_free_blocks() == (num_blocks - + num_blocks_to_consume) + allocator.free(block) + + # Free each block in the second chain. Since the refcount is now zero, + # the free count should increment with each free. + for i, block in enumerate(second_chain): + assert allocator.get_num_free_blocks() == (num_blocks - + num_blocks_to_consume + + i) + allocator.free(block) + + @staticmethod + @pytest.mark.parametrize("num_blocks", [1024]) + @pytest.mark.parametrize("block_size", [16]) + @pytest.mark.parametrize("seed", list(range(20))) + def test_get_common_computed_block_ids(num_blocks: int, block_size: int, + seed: int): + """Verify get_common_computed_block_ids could get correct result + by create two immutable chain sharing prefix at specified pos, + and compare whether we also could get right result + from get_common_computed_block_ids. + """ + random.seed(seed) + allocator = PrefixCachingBlockAllocator(num_blocks=num_blocks * 2, + block_size=block_size) + num_blocks_to_consume = random.randint(1, num_blocks - 1) + + # Create token ids that will exhaust all blocks. + token_ids = list(range(num_blocks_to_consume * block_size)) + blocks = list(range(num_blocks_to_consume)) + + first_chain = TestPrefixCachingBlockAllocator.create_immutable_chain( + block_size=block_size, + token_ids=token_ids, + allocator=allocator, + ) + + # mark all blocks in first chain as computed + allocator.mark_blocks_as_computed(blocks) + + # After zero_point, second_chain's token_ids would be set -1, which + # make it different from here comparing with first_chain + zero_point = random.randint(1, len(token_ids) - 1) + zero_point_blocks = zero_point // block_size + token_ids[zero_point:] = [-1] * (len(token_ids) - zero_point) + + second_chain = TestPrefixCachingBlockAllocator.create_immutable_chain( + block_size=block_size, + token_ids=token_ids, + allocator=allocator, + ) + + first_computed_ids = [ + first_chain[i].block_id for i in range(num_blocks_to_consume) + ] + second_computed_ids = [ + second_chain[i].block_id for i in range(num_blocks_to_consume) + ] + res = allocator.get_common_computed_block_ids( + [first_computed_ids, second_computed_ids]) + + assert (len(res) == zero_point_blocks) + + # Test case where two last accessed times are equal + @staticmethod + @pytest.mark.parametrize("num_blocks", [1024]) + @pytest.mark.parametrize("block_size", [16]) + @pytest.mark.parametrize("seed", list(range(20))) + def test_eviction_order(num_blocks: int, block_size: int, seed: int): + """This test case simulate the two chain created and free in order, + and together they would exhaust the initial freed blocks. + + So the next block created after those two chain shall use the block + from the first chain as that block has long access time. + While first chain has two blocks, it shall pick up the last one, as + it has larger token number. + """ + + random.seed(seed) + allocator = PrefixCachingBlockAllocator(num_blocks=num_blocks, + block_size=block_size) + num_blocks_to_consume = num_blocks + 1 + + token_ids = list(range(num_blocks_to_consume * block_size)) + + num_blocks_in_first_chain = 2 + num_tokens_in_first_chain = block_size * num_blocks_in_first_chain + # First chain takes the first block + first_chain = TestPrefixCachingBlockAllocator.create_immutable_chain( + block_size=block_size, + token_ids=token_ids[:num_tokens_in_first_chain], + allocator=allocator, + ) + # There should only be one block allocated at this point + assert allocator.get_num_free_blocks() == (num_blocks - + num_blocks_in_first_chain) + + # Set the last accessed time of the first block to 1 + blocks_ids = [block.block_id for block in first_chain] + allocator.mark_blocks_as_accessed(blocks_ids, 1) + + # Second chain takes the rest of the blocks + second_chain = TestPrefixCachingBlockAllocator.create_immutable_chain( + block_size=block_size, + token_ids=token_ids[num_tokens_in_first_chain:-block_size], + allocator=allocator, + ) + + # There shouldn't be any blocks left at this point + assert allocator.get_num_free_blocks() == (0) + + assert len(first_chain) == num_blocks_in_first_chain + last_block_id = first_chain[-1].block_id + # Free each block in the first chain. + for i, block in enumerate(first_chain): + allocator.free(block) + + # Set the last accessed time on all of the blocks in the second chain + # to 2 + blocks_ids = [block.block_id for block in second_chain] + allocator.mark_blocks_as_accessed(blocks_ids, 2) + + # Free each block in the second chain. + for i, block in enumerate(second_chain): + allocator.free(block) + + # Allocate a new block and check that it's the least recently used block + # from the first chain. + new_block = TestPrefixCachingBlockAllocator.create_immutable_chain( + block_size=block_size, + token_ids=token_ids[-block_size:], + allocator=allocator, + ) + + assert new_block[0].block_id == last_block_id + + @staticmethod + def create_immutable_chain( + block_size: int, + token_ids: List[int], + allocator: PrefixCachingBlockAllocator, + ) -> List[PrefixCachingBlock]: + """Helper method which creates a chain of blocks. + """ + blocks = [] + num_blocks = math.ceil(len(token_ids) / block_size) + + if num_blocks == 0: + return [] + + prev_block = None + for block_number in range(0, num_blocks): + block_token_ids = token_ids[block_number * + block_size:(block_number + 1) * + block_size] + prev_block = allocator.allocate_immutable( + prev_block=prev_block, token_ids=block_token_ids) + blocks.append(prev_block) + + return blocks diff --git a/tests/core/test_block_manager.py b/tests/core/test_block_manager.py new file mode 100644 index 0000000..9f9a618 --- /dev/null +++ b/tests/core/test_block_manager.py @@ -0,0 +1,367 @@ +import time +from typing import List + +import pytest + +from vllm import SamplingParams +from vllm.block import PhysicalTokenBlock +from vllm.core.block_manager_v1 import (BlockSpaceManagerV1, + UncachedBlockAllocator) +from vllm.core.interfaces import AllocStatus +from vllm.sequence import Logprob, Sequence, SequenceGroup, SequenceStatus +from vllm.utils import Device + +from .utils import create_dummy_prompt + + +def test_block_allocator_allocate(): + block_size = 4 + num_cpu_blocks = 4 + cpu_allocator = UncachedBlockAllocator(Device.CPU, block_size, + num_cpu_blocks) + + # Allocate all available cpu blocks. + num_free = num_cpu_blocks + assert cpu_allocator.get_num_free_blocks() == num_free + for _ in range(num_cpu_blocks): + block = cpu_allocator.allocate() + num_free -= 1 + + assert block not in cpu_allocator.free_blocks + assert cpu_allocator.get_num_free_blocks() == num_free + + with pytest.raises(ValueError): + cpu_allocator.allocate() + + +def test_block_allocator_free(): + block_size = 4 + num_cpu_blocks = 4 + cpu_allocator = UncachedBlockAllocator(Device.CPU, block_size, + num_cpu_blocks) + + # Allocate all available cpu blocks. + blocks: List[PhysicalTokenBlock] = [] + for _ in range(num_cpu_blocks): + block = cpu_allocator.allocate() + blocks.append(block) + assert block not in cpu_allocator.free_blocks + + # Free all allocated cpu blocks. + num_free = 0 + assert cpu_allocator.get_num_free_blocks() == num_free + for block in blocks: + cpu_allocator.free(block) + num_free += 1 + assert block in cpu_allocator.free_blocks + assert cpu_allocator.get_num_free_blocks() == num_free + + with pytest.raises(ValueError): + cpu_allocator.free(block) + + +def test_allocate(): + block_size = 4 + num_cpu_blocks = 4 + num_gpu_blocks = 4 + block_manager = BlockSpaceManagerV1(block_size, + num_cpu_blocks, + num_gpu_blocks, + watermark=0) + + # Allocate same sequence group to all available gpu blocks. + for i in range(num_gpu_blocks): + _, seq_group = create_dummy_prompt(str(i), block_size) + assert block_manager.can_allocate(seq_group) + block_manager.allocate(seq_group) + assert block_manager.can_allocate(seq_group) != AllocStatus.OK + + # Allocate same sequence group to all available gpu blocks. + # Use watermark to reserve one gpu block. + block_manager = BlockSpaceManagerV1(block_size, + num_cpu_blocks, + num_gpu_blocks, + watermark=1 / num_gpu_blocks) + for i in range(num_gpu_blocks - 1): + _, seq_group = create_dummy_prompt(str(i), block_size) + assert block_manager.can_allocate(seq_group) + block_manager.allocate(seq_group) + assert block_manager.can_allocate(seq_group) != AllocStatus.OK + + +def test_append_slot_single_seq(): + block_size = 4 + num_cpu_blocks = 4 + num_gpu_blocks = 4 + block_manager = BlockSpaceManagerV1(block_size, + num_cpu_blocks, + num_gpu_blocks, + watermark=0) + + # Allocate single seq to gpu block. + prompt, seq_group = create_dummy_prompt("1", block_size) + block_manager.allocate(seq_group) + + # Nothing to append. Sequence has no new logical blocks. + assert block_manager.can_append_slots(seq_group) + before_blocks = block_manager.get_num_free_gpu_blocks() + assert not block_manager.append_slots(prompt) + after_blocks = block_manager.get_num_free_gpu_blocks() + assert before_blocks == after_blocks + + # Add block_size number of new tokens and append slot. + for i in range(block_size): + token_id = i + 5 + prompt.append_token_id(token_id, {token_id: Logprob(0.0)}) + + assert block_manager.can_append_slots(seq_group) + before_blocks = block_manager.get_num_free_gpu_blocks() + assert not block_manager.append_slots(prompt) + after_blocks = block_manager.get_num_free_gpu_blocks() + assert before_blocks - after_blocks == 1 + + +def test_append_slot_cow(): + block_size = 4 + num_cpu_blocks = 4 + num_gpu_blocks = 4 + block_manager = BlockSpaceManagerV1(block_size=block_size, + num_cpu_blocks=num_cpu_blocks, + num_gpu_blocks=num_gpu_blocks, + watermark=0) + + # Allocate prompt to gpu block. There is one slot left in the block. + prompt = Sequence(seq_id=1, + prompt="one two three", + prompt_token_ids=[1, 2, 3], + block_size=block_size) + + # Fork the sequence, such that a COW will be required when we append a new + # token id. + child = prompt.fork(new_seq_id=2) + + # Allocate space for the sequence group. + seq_group = SequenceGroup("1", [prompt, child], SamplingParams(), + time.time(), time.perf_counter) + block_manager.allocate(seq_group) + + # Fork and append a new token id. We expect a COW to be scheduled. + token_id = 4 + child.append_token_id(token_id, {token_id: Logprob(0.0)}) + block_manager.fork(prompt, child) + + assert block_manager.can_append_slots(seq_group) + before_blocks = block_manager.get_num_free_gpu_blocks() + + cows = block_manager.append_slots(child) + assert cows + for src_block, dst_blocks in cows.items(): + assert src_block not in dst_blocks + + after_blocks = block_manager.get_num_free_gpu_blocks() + assert before_blocks - after_blocks == 1 + + +def test_fork(): + block_size = 4 + num_cpu_blocks = 4 + num_gpu_blocks = 4 + block_manager = BlockSpaceManagerV1(block_size, + num_cpu_blocks, + num_gpu_blocks, + watermark=0) + + prompt, seq_group = create_dummy_prompt("1", + block_size - 1, + block_size=block_size) + block_manager.allocate(seq_group) + + # Fork prompt and copy block tables. + child = prompt.fork(2) + block_manager.fork(prompt, child) + assert block_manager.get_block_table( + prompt) == block_manager.get_block_table(child) + token_id = 4 + # Append token to child. Block is shared so copy on write occurs. + child.append_token_id(token_id, {token_id: Logprob(0.0)}) + block_manager.append_slots(child) + assert block_manager.get_block_table( + prompt) != block_manager.get_block_table(child) + + +def test_swap(): + block_size = 4 + num_cpu_blocks = 4 + num_gpu_blocks = 4 + block_manager = BlockSpaceManagerV1(block_size, + num_cpu_blocks, + num_gpu_blocks, + watermark=0) + + prompt, seq_group = create_dummy_prompt("1", prompt_length=block_size - 1) + prompt.status = SequenceStatus.WAITING + block_manager.allocate(seq_group) + + # Emulate a forward pass by appending a single token. + # The block manager then knows how many unprocessed + # tokens will be written in the next forward pass. + token_id = 0 + prompt.status = SequenceStatus.RUNNING + prompt.append_token_id(token_id, {token_id: Logprob(0.0)}) + + # Swap seq group from GPU -> CPU. + gpu_blocks = block_manager.get_block_table(prompt) + assert block_manager.can_swap_out(seq_group) + before_cpu_blocks = block_manager.get_num_free_cpu_blocks() + before_gpu_blocks = block_manager.get_num_free_gpu_blocks() + mapping = block_manager.swap_out(seq_group) + assert list(mapping.keys()) == gpu_blocks + after_cpu_blocks = block_manager.get_num_free_cpu_blocks() + after_gpu_blocks = block_manager.get_num_free_gpu_blocks() + assert before_cpu_blocks == after_cpu_blocks + len(gpu_blocks) + assert before_gpu_blocks + len(gpu_blocks) == after_gpu_blocks + prompt.status = SequenceStatus.SWAPPED + + # Swap seq group from CPU -> GPU. + cpu_blocks = block_manager.get_block_table(prompt) + assert block_manager.can_swap_in(seq_group) == AllocStatus.OK + before_cpu_blocks = block_manager.get_num_free_cpu_blocks() + before_gpu_blocks = block_manager.get_num_free_gpu_blocks() + mapping = block_manager.swap_in(seq_group) + assert list(mapping.keys()) == cpu_blocks + after_cpu_blocks = block_manager.get_num_free_cpu_blocks() + after_gpu_blocks = block_manager.get_num_free_gpu_blocks() + assert before_cpu_blocks + len(cpu_blocks) == after_cpu_blocks + assert before_gpu_blocks == after_gpu_blocks + len(cpu_blocks) + + +def test_free(): + block_size = 4 + num_cpu_blocks = 4 + num_gpu_blocks = 4 + block_manager = BlockSpaceManagerV1(block_size, + num_cpu_blocks, + num_gpu_blocks, + watermark=0) + + prompt, seq_group = create_dummy_prompt("1", block_size) + block_manager.allocate(seq_group) + + # Free allocated seq. + prompt_blocks = len(block_manager.get_block_table(prompt)) + before_blocks = block_manager.get_num_free_gpu_blocks() + block_manager.free(prompt) + after_blocks = block_manager.get_num_free_gpu_blocks() + assert after_blocks == before_blocks + prompt_blocks + + # Block table for freed seq is deleted. + with pytest.raises(KeyError): + block_manager.get_block_table(prompt) + + +def test_reset(): + block_size = 4 + num_cpu_blocks = 4 + num_gpu_blocks = 4 + block_manager = BlockSpaceManagerV1(block_size, + num_cpu_blocks, + num_gpu_blocks, + watermark=0) + + # Allocate same seq group on all available gpu blocks. + original_blocks = block_manager.get_num_free_gpu_blocks() + for i in range(num_gpu_blocks): + _, seq_group = create_dummy_prompt(str(i), block_size) + block_manager.allocate(seq_group) + assert block_manager.get_num_free_gpu_blocks() == 0 + + # Resetting block manager frees all allocated blocks. + block_manager.reset() + assert block_manager.get_num_free_gpu_blocks() == original_blocks + + +def test_sliding_window_multi_seq(): + """ + Tests that memory allocation and deallocation is handled + correctly with multiple sequences that exceed the sliding + window's capacity. + """ + block_size = 1 + num_cpu_blocks = 8 + num_gpu_blocks = 8 + sliding_window = 2 + block_manager = BlockSpaceManagerV1(block_size, + num_cpu_blocks, + num_gpu_blocks, + sliding_window=sliding_window, + watermark=0) + + assert block_manager.get_num_free_gpu_blocks() == num_gpu_blocks + + parent = Sequence(1, "one two three", [0, 1, 2], block_size) + seq_group = SequenceGroup("1", [parent], SamplingParams(), time.time(), + None) + block_manager.allocate(seq_group) + + # assert the number of blocks allocated is correct + # the parent seq has len 3, but since sliding_window is 2, + # we will use at most 2 blocks + assert block_manager.get_num_free_gpu_blocks( + ) == num_gpu_blocks - sliding_window + + # Fork prompt and copy block tables. + child = parent.fork(2) + block_manager.fork(parent, child) + + # assert the number of blocks allocated is correct + # forking does not increase memory consumption + assert block_manager.get_num_free_gpu_blocks( + ) == num_gpu_blocks - sliding_window + + # assert both parent and child share all blocks + assert block_manager.get_block_table( + parent) == block_manager.get_block_table(child) + + token_id = 4 + # Append token to child. Block is shared so copy on write occurs. + child.append_token_id(token_id, {token_id: Logprob(0.0)}) + block_manager.append_slots(child) + + # assert the number of blocks allocated is correct + # we will use now one block more. Each seq will use 2 blocks, + # but only one can be shared + assert block_manager.get_num_free_gpu_blocks( + ) == num_gpu_blocks - sliding_window - 1 + + token_id = 5 + parent.append_token_id(token_id, {token_id: Logprob(0.0)}) + block_manager.append_slots(parent) + + # assert the number of blocks allocated is correct + # no change, because both sequences are still just sharing one block + assert block_manager.get_num_free_gpu_blocks( + ) == num_gpu_blocks - sliding_window - 1 + + block_table_parent = block_manager.get_block_table(parent) + block_table_child = block_manager.get_block_table(child) + + assert block_table_parent != block_table_child + + # assert both blocks are sharing the second-last block + assert block_table_parent[-2] == block_table_child[-2] + + # now let's clean up... + block_manager.free(parent) + + # assert the number of blocks allocated is correct + # We have freed one seq, reducing the ref count of two blocks by one. + # One of the two was only used by the parent seq, so this is now free. + # The child seq still consumes sliding_window blocks + assert block_manager.get_num_free_gpu_blocks( + ) == num_gpu_blocks - sliding_window + + # free all blocks + block_manager.free(child) + + # assert all blocks are free now + assert block_manager.get_num_free_gpu_blocks() == num_gpu_blocks diff --git a/tests/core/test_chunked_prefill_scheduler.py b/tests/core/test_chunked_prefill_scheduler.py new file mode 100644 index 0000000..92498c0 --- /dev/null +++ b/tests/core/test_chunked_prefill_scheduler.py @@ -0,0 +1,564 @@ +from typing import List +from unittest.mock import MagicMock + +import pytest # noqa + +from vllm.config import CacheConfig, SchedulerConfig +from vllm.core.interfaces import AllocStatus +from vllm.core.scheduler import Scheduler +from vllm.sequence import Logprob, SequenceGroup + +from .utils import create_dummy_prompt + + +def get_sequence_groups(scheduler_output): + return [s.seq_group for s in scheduler_output.scheduled_seq_groups] + + +def append_new_token(seq_group, token_id: int): + for seq in seq_group.get_seqs(): + seq.append_token_id(token_id, {token_id: Logprob(token_id)}) + + +def schedule_and_update_computed_tokens(scheduler): + metas, out = scheduler.schedule() + for s, meta in zip(out.scheduled_seq_groups, metas): + s.seq_group.update_num_computed_tokens(meta.token_chunk_size) + return metas, out + + +def test_simple(): + """Verify basic scheduling works.""" + block_size = 4 + num_seq_group = 4 + max_model_len = 16 + max_num_batched_tokens = 64 + scheduler_config = SchedulerConfig(max_num_batched_tokens, + num_seq_group, + max_model_len, + enable_chunked_prefill=True) + cache_config = CacheConfig(block_size, 1.0, 1, "auto") + cache_config.num_cpu_blocks = 8 + cache_config.num_gpu_blocks = 8 + scheduler = Scheduler(scheduler_config, cache_config, None) + running: List[SequenceGroup] = [] + + # Add seq groups to scheduler. + for i in range(num_seq_group): + _, seq_group = create_dummy_prompt(str(i), prompt_length=block_size) + scheduler.add_seq_group(seq_group) + running.append(seq_group) + + # Schedule seq groups prompts. + num_tokens = block_size * num_seq_group + seq_group_meta, out = schedule_and_update_computed_tokens(scheduler) + assert set(get_sequence_groups(out)) == set(running) + assert out.num_batched_tokens == num_tokens + assert (not out.blocks_to_copy and not out.blocks_to_swap_in + and not out.blocks_to_swap_out) + assert len(seq_group_meta) == num_seq_group + for s in running: + append_new_token(s, 1) + + # Schedule seq groups generation. + seq_group_meta, out = schedule_and_update_computed_tokens(scheduler) + assert set(get_sequence_groups(out)) == set(running) + assert out.num_batched_tokens == num_seq_group + assert (not out.blocks_to_copy and not out.blocks_to_swap_in + and not out.blocks_to_swap_out) + assert len(seq_group_meta) == num_seq_group + + +def test_chunk(): + """Verify prefills are chunked properly.""" + block_size = 4 + max_seqs = 60 + max_model_len = 80 + max_num_batched_tokens = 64 + scheduler_config = SchedulerConfig(max_num_batched_tokens, + max_seqs, + max_model_len, + enable_chunked_prefill=True) + cache_config = CacheConfig(block_size, 1.0, 1, "auto") + cache_config.num_cpu_blocks = 8 + cache_config.num_gpu_blocks = 8 + scheduler = Scheduler(scheduler_config, cache_config, None) + running: List[SequenceGroup] = [] + + # Add seq groups to scheduler. + for i in range(2): + _, seq_group = create_dummy_prompt(str(i), prompt_length=60) + scheduler.add_seq_group(seq_group) + running.append(seq_group) + + # Verify the second request is chunked. + seq_group_meta, out = schedule_and_update_computed_tokens(scheduler) + assert set(get_sequence_groups(out)) == set(running) + assert seq_group_meta[0].token_chunk_size == 60 + # Verify it is chunked. + assert seq_group_meta[1].token_chunk_size == 4 + assert out.num_prefill_groups == 2 + assert out.num_batched_tokens == 64 + # Only the first seq group has a new token appended. + append_new_token(running[0], 1) + + # One chunked prefill, and one decoding. + seq_group_meta, out = schedule_and_update_computed_tokens(scheduler) + assert set(get_sequence_groups(out)) == set(running) + # The first one is prefill. Scheduler guarantees ordering. + assert seq_group_meta[0].token_chunk_size == 56 + # The second one is a chunked prefill. + assert seq_group_meta[1].token_chunk_size == 1 + assert out.num_prefill_groups == 1 + assert out.num_batched_tokens == 57 + + +def test_complex(): + block_size = 4 + max_seqs = 60 + max_model_len = 80 + max_num_batched_tokens = 64 + scheduler_config = SchedulerConfig(max_num_batched_tokens, + max_seqs, + max_model_len, + enable_chunked_prefill=True) + cache_config = CacheConfig(block_size, 1.0, 1, "auto") + cache_config.num_cpu_blocks = 8 + cache_config.num_gpu_blocks = 8 + scheduler = Scheduler(scheduler_config, cache_config, None) + running: List[SequenceGroup] = [] + + # Add seq groups to scheduler. + for i in range(2): + _, seq_group = create_dummy_prompt(str(i), prompt_length=60) + scheduler.add_seq_group(seq_group) + running.append(seq_group) + assert seq_group.is_prefill() + + # Verify the second request is chunked. + seq_group_meta, out = schedule_and_update_computed_tokens(scheduler) + + assert set(get_sequence_groups(out)) == set(running) + assert seq_group_meta[0].token_chunk_size == 60 + # Verify it is chunked. + assert seq_group_meta[1].token_chunk_size == 4 + assert not running[0].is_prefill() + assert running[1].is_prefill() + assert out.num_prefill_groups == 2 + assert out.num_batched_tokens == 64 + # Only the first seq group has a new token appended. + append_new_token(running[0], 1) + + # Add 2 more requsets. + for i in range(2, 4): + _, seq_group = create_dummy_prompt(str(i), prompt_length=60) + scheduler.add_seq_group(seq_group) + running.append(seq_group) + + # Decoding & chunked prefill & first chunk of 3rd request is scheduled. + seq_group_meta, out = schedule_and_update_computed_tokens(scheduler) + assert len(get_sequence_groups(out)) == 3 + # The first one is the first chunked prefill. + assert seq_group_meta[0].token_chunk_size == 7 + # The second one is the second new chunked prefill. + assert seq_group_meta[1].token_chunk_size == 56 + # The last one is decode. + assert seq_group_meta[2].token_chunk_size == 1 + # Two of them are in chunked prefill. + assert out.num_prefill_groups == 2 + assert out.num_batched_tokens == 64 + # The first 2 requests are now in decodine phase. + append_new_token(running[0], 1) + assert not running[0].is_prefill() + append_new_token(running[1], 1) + assert not running[1].is_prefill() + # The third request is still in prefill stage. + assert running[2].is_prefill() + + +def test_maximal_decoding(): + """Verify decoding requests are prioritized.""" + block_size = 4 + max_seqs = 2 + max_model_len = 2 + max_num_batched_tokens = 2 + scheduler_config = SchedulerConfig(max_num_batched_tokens, + max_seqs, + max_model_len, + enable_chunked_prefill=True) + cache_config = CacheConfig(block_size, 1.0, 1, "auto") + cache_config.num_cpu_blocks = 8 + cache_config.num_gpu_blocks = 8 + scheduler = Scheduler(scheduler_config, cache_config, None) + running: List[SequenceGroup] = [] + + # Add seq groups to scheduler. + for i in range(2): + _, seq_group = create_dummy_prompt(str(i), prompt_length=2) + scheduler.add_seq_group(seq_group) + running.append(seq_group) + assert seq_group.is_prefill() + + # The first prefill is scheduled. + seq_group_meta, out = schedule_and_update_computed_tokens(scheduler) + assert len(get_sequence_groups(out)) == 1 + assert seq_group_meta[0].token_chunk_size == 2 + assert not running[0].is_prefill() + assert running[1].is_prefill() + assert out.num_prefill_groups == 1 + assert out.num_batched_tokens == 2 + # Only the first seq group has a new token appended. + append_new_token(running[0], 1) + + # Create one more seq_group. + _, seq_group = create_dummy_prompt("3", prompt_length=2) + scheduler.add_seq_group(seq_group) + running.append(seq_group) + assert seq_group.is_prefill() + # The first decoding + second chunk is scheduled. + seq_group_meta, out = schedule_and_update_computed_tokens(scheduler) + assert len(get_sequence_groups(out)) == 2 + assert seq_group_meta[0].token_chunk_size == 1 + assert seq_group_meta[1].token_chunk_size == 1 + assert not running[0].is_prefill() + assert running[1].is_prefill() + assert running[2].is_prefill() + assert out.num_prefill_groups == 1 + assert out.num_batched_tokens == 2 + append_new_token(running[0], 1) + + # Decoding + running prefill is prioritized. + seq_group_meta, out = schedule_and_update_computed_tokens(scheduler) + assert len(get_sequence_groups(out)) == 2 + assert seq_group_meta[0].token_chunk_size == 1 + assert seq_group_meta[1].token_chunk_size == 1 + assert not running[0].is_prefill() + assert not running[1].is_prefill() + assert out.num_prefill_groups == 1 + assert out.num_batched_tokens == 2 + append_new_token(running[0], 1) + append_new_token(running[1], 1) + + # Only decoding is prioritized. + seq_group_meta, out = schedule_and_update_computed_tokens(scheduler) + assert len(get_sequence_groups(out)) == 2 + assert seq_group_meta[0].token_chunk_size == 1 + assert seq_group_meta[1].token_chunk_size == 1 + assert not running[0].is_prefill() + assert not running[1].is_prefill() + assert out.num_prefill_groups == 0 + assert out.num_batched_tokens == 2 + append_new_token(running[0], 1) + append_new_token(running[1], 1) + + # After aborting the decoding request, the fcfs new prefill is prioritized. + scheduler.abort_seq_group(running[0].request_id) + seq_group_meta, out = schedule_and_update_computed_tokens(scheduler) + assert len(get_sequence_groups(out)) == 2 + assert seq_group_meta[0].token_chunk_size == 1 + assert seq_group_meta[1].token_chunk_size == 1 + assert not running[1].is_prefill() + assert running[2].is_prefill() + assert out.num_prefill_groups == 1 + assert out.num_batched_tokens == 2 + + +def test_prompt_limit(): + """Verify max_num_batched_tokens < max_model_len is possible.""" + block_size = 4 + max_seqs = 32 + max_model_len = 64 + max_num_batched_tokens = 32 + scheduler_config = SchedulerConfig(max_num_batched_tokens, + max_seqs, + max_model_len, + enable_chunked_prefill=True) + cache_config = CacheConfig(block_size, 1.0, 1, "auto") + cache_config.num_cpu_blocks = 8 + cache_config.num_gpu_blocks = 8 + scheduler = Scheduler(scheduler_config, cache_config, None) + running: List[SequenceGroup] = [] + + _, seq_group = create_dummy_prompt("1", prompt_length=48) + scheduler.add_seq_group(seq_group) + running.append(seq_group) + assert seq_group.is_prefill() + + # The prompt length > max_num_batched_tokens should be still scheduled. + seq_group_meta, out = schedule_and_update_computed_tokens(scheduler) + assert len(get_sequence_groups(out)) == 1 + assert seq_group_meta[0].token_chunk_size == 32 + assert running[0].is_prefill() + assert out.num_prefill_groups == 1 + assert out.num_batched_tokens == 32 + + +def test_prompt_limit_exceed(): + block_size = 4 + max_seqs = 64 + max_model_len = 32 + max_num_batched_tokens = 64 + scheduler_config = SchedulerConfig(max_num_batched_tokens, + max_seqs, + max_model_len, + enable_chunked_prefill=True) + cache_config = CacheConfig(block_size, 1.0, 1, "auto") + cache_config.num_cpu_blocks = 8 + cache_config.num_gpu_blocks = 8 + scheduler = Scheduler(scheduler_config, cache_config, None) + running: List[SequenceGroup] = [] + + _, seq_group = create_dummy_prompt("2", prompt_length=48) + scheduler.add_seq_group(seq_group) + running.append(seq_group) + assert seq_group.is_prefill() + seq_group_meta, out = schedule_and_update_computed_tokens(scheduler) + assert len(out.ignored_seq_groups) == 1 + assert out.ignored_seq_groups[0] == seq_group + + +def test_swap(): + """Verify swapping works with chunked prefill requests""" + block_size = 4 + max_seqs = 30 + max_model_len = 200 + max_num_batched_tokens = 30 + scheduler_config = SchedulerConfig(max_num_batched_tokens, + max_seqs, + max_model_len, + enable_chunked_prefill=True) + cache_config = CacheConfig(block_size, 1.0, 1, "auto") + cache_config.num_cpu_blocks = 8 + cache_config.num_gpu_blocks = 8 + scheduler = Scheduler(scheduler_config, cache_config, None) + + _, seq_group = create_dummy_prompt("1", prompt_length=60, best_of=2) + scheduler.add_seq_group(seq_group) + _, out = schedule_and_update_computed_tokens(scheduler) + # The request is chunked. + # prefill scheduled now. + assert len(out.scheduled_seq_groups) == 1 + assert out.num_prefill_groups == 1 + assert seq_group.is_prefill() + assert out.num_batched_tokens == max_num_batched_tokens + + # The last request should be swapped out. + scheduler.block_manager.can_append_slots = MagicMock() + + def cannot_append_second_group(seq_group, num_lookahead_slots): + return seq_group.request_id != "1" + + scheduler.block_manager.can_append_slots.side_effect = ( + cannot_append_second_group) + + # The running prefill is now swapped. + _, out = schedule_and_update_computed_tokens(scheduler) + assert len(out.scheduled_seq_groups) == 0 + assert out.num_batched_tokens == 0 + assert out.blocks_to_swap_out != {} + assert out.blocks_to_swap_in == {} + + # Add 1 more task. Swap should be prioritized over new prefill. + _, seq_group = create_dummy_prompt("2", prompt_length=60) + scheduler.add_seq_group(seq_group) + _, out = schedule_and_update_computed_tokens(scheduler) + assert len(out.scheduled_seq_groups) == 1 + # 3 decodes. It is swapped in. + assert out.num_batched_tokens == 30 + assert out.blocks_to_swap_in != {} + assert out.blocks_to_swap_out == {} + + +def test_running_prefill_prioritized_over_swap(): + block_size = 4 + max_seqs = 30 + max_model_len = 200 + max_num_batched_tokens = 30 + scheduler_config = SchedulerConfig(max_num_batched_tokens, + max_seqs, + max_model_len, + enable_chunked_prefill=True) + cache_config = CacheConfig(block_size, 1.0, 1, "auto") + cache_config.num_cpu_blocks = 8 + cache_config.num_gpu_blocks = 8 + scheduler = Scheduler(scheduler_config, cache_config, None) + + _, seq_group = create_dummy_prompt("1", prompt_length=60, best_of=2) + scheduler.add_seq_group(seq_group) + _, out = schedule_and_update_computed_tokens(scheduler) + # The request is chunked. + # prefill scheduled now. + assert len(out.scheduled_seq_groups) == 1 + assert out.num_prefill_groups == 1 + assert seq_group.is_prefill() + assert out.num_batched_tokens == max_num_batched_tokens + + # The request should be swapped out. + scheduler.block_manager.can_append_slots = MagicMock() + + def cannot_append_second_group(seq_group, num_lookahead_slots): + return seq_group.request_id != "1" + + scheduler.block_manager.can_append_slots.side_effect = ( + cannot_append_second_group) + + # The running prefill is now swapped. + _, out = schedule_and_update_computed_tokens(scheduler) + assert len(out.scheduled_seq_groups) == 0 + assert out.num_batched_tokens == 0 + assert out.blocks_to_swap_out != {} + assert out.blocks_to_swap_in == {} + + # Add 1 more task. Swap is not possible, so prefill is running. + scheduler.block_manager.can_swap_in = MagicMock() + scheduler.block_manager.can_swap_in.return_value = AllocStatus.LATER + + _, seq_group2 = create_dummy_prompt("2", prompt_length=60) + scheduler.add_seq_group(seq_group2) + _, out = schedule_and_update_computed_tokens(scheduler) + assert len(out.scheduled_seq_groups) == 1 + # 3 decodes. It is swapped in. + assert out.num_batched_tokens == 30 + assert out.blocks_to_swap_in == {} + assert out.blocks_to_swap_out == {} + assert out.scheduled_seq_groups[0].seq_group == seq_group2 + + # Now although swap is possible, running prefill is prioritized. + scheduler.block_manager.can_swap_in.return_value = AllocStatus.OK + _, out = schedule_and_update_computed_tokens(scheduler) + assert len(out.scheduled_seq_groups) == 1 + # 3 decodes. It is swapped in. + assert out.num_batched_tokens == 30 + assert out.blocks_to_swap_in == {} + assert out.blocks_to_swap_out == {} + assert not seq_group2.is_prefill() + assert out.scheduled_seq_groups[0].seq_group == seq_group2 + append_new_token(seq_group2, 1) + + # Decoding is prioritized. + _, out = schedule_and_update_computed_tokens(scheduler) + assert len(out.scheduled_seq_groups) == 1 + # 3 decodes. It is swapped in. + assert out.num_batched_tokens == 1 + assert out.blocks_to_swap_in == {} + assert out.blocks_to_swap_out == {} + assert not seq_group2.is_prefill() + assert out.scheduled_seq_groups[0].seq_group == seq_group2 + append_new_token(seq_group2, 1) + + # Since we abort the sequence group, we can finally swap. + scheduler.abort_seq_group(seq_group2.request_id) + _, out = schedule_and_update_computed_tokens(scheduler) + assert len(out.scheduled_seq_groups) == 1 + assert out.num_batched_tokens == 30 + assert out.blocks_to_swap_in != {} + assert out.blocks_to_swap_out == {} + + +def test_chunked_prefill_preempt(): + """Verify preempt works with chunked prefill requests""" + block_size = 4 + max_seqs = 30 + max_model_len = 200 + max_num_batched_tokens = 30 + scheduler_config = SchedulerConfig(max_num_batched_tokens, + max_seqs, + max_model_len, + enable_chunked_prefill=True) + cache_config = CacheConfig(block_size, 1.0, 1, "auto") + cache_config.num_cpu_blocks = 8 + cache_config.num_gpu_blocks = 8 + scheduler = Scheduler(scheduler_config, cache_config, None) + + _, seq_group = create_dummy_prompt("1", prompt_length=60) + scheduler.add_seq_group(seq_group) + _, out = schedule_and_update_computed_tokens(scheduler) + # The request is chunked. + # prefill scheduled now. + assert len(out.scheduled_seq_groups) == 1 + assert out.num_prefill_groups == 1 + assert seq_group.is_prefill() + assert out.num_batched_tokens == max_num_batched_tokens + + # The request should be preempted. + scheduler.block_manager.can_append_slots = MagicMock() + + def cannot_append_second_group(seq_group, num_lookahead_slots): + return seq_group.request_id != "1" + + scheduler.block_manager.can_append_slots.side_effect = ( + cannot_append_second_group) + + # The running prefill is now preempted. + _, out = schedule_and_update_computed_tokens(scheduler) + assert len(out.scheduled_seq_groups) == 0 + assert out.num_batched_tokens == 0 + assert out.blocks_to_swap_out == {} + assert out.blocks_to_swap_in == {} + + # Make sure we can reschedule preempted request. + _, out = schedule_and_update_computed_tokens(scheduler) + assert len(out.scheduled_seq_groups) == 1 + assert out.num_prefill_groups == 1 + assert seq_group.is_prefill() + assert out.num_batched_tokens == max_num_batched_tokens + assert seq_group.get_num_uncomputed_tokens() == 30 + + # We should be able to run prefill twice as it is chunked. + def cannot_append_second_group(seq_group, num_lookahead_slots): + return True + + scheduler.block_manager.can_append_slots.side_effect = ( + cannot_append_second_group) + _, out = schedule_and_update_computed_tokens(scheduler) + assert len(out.scheduled_seq_groups) == 1 + assert out.num_prefill_groups == 1 + assert not seq_group.is_prefill() + assert out.num_batched_tokens == max_num_batched_tokens + + +def test_chunked_prefill_max_seqs(): + block_size = 4 + max_seqs = 2 + max_model_len = 80 + max_num_batched_tokens = 64 + scheduler_config = SchedulerConfig(max_num_batched_tokens, + max_seqs, + max_model_len, + enable_chunked_prefill=True) + cache_config = CacheConfig(block_size, 1.0, 1, "auto") + cache_config.num_cpu_blocks = 8 + cache_config.num_gpu_blocks = 8 + scheduler = Scheduler(scheduler_config, cache_config, None) + running = [] + + _, seq_group = create_dummy_prompt("1", prompt_length=65) + scheduler.add_seq_group(seq_group) + running.append(seq_group) + # The first prefill is chunked. + seq_group_meta, out = schedule_and_update_computed_tokens(scheduler) + assert seq_group_meta[0].token_chunk_size == max_num_batched_tokens + assert len(get_sequence_groups(out)) == 1 + + # Add new requests. + for i in range(4): + _, seq_group = create_dummy_prompt(str(i), prompt_length=65) + scheduler.add_seq_group(seq_group) + running.append(seq_group) + + # Make sure only 2 requests are scheduled. + seq_group_meta, out = schedule_and_update_computed_tokens(scheduler) + assert out.num_batched_tokens == max_num_batched_tokens + assert len(get_sequence_groups(out)) == 2 + assert not running[0].is_prefill() + assert running[1].is_prefill() + append_new_token(running[0], 1) + + # Although we have enough token budget, we can only schedule max_seqs. + seq_group_meta, out = schedule_and_update_computed_tokens(scheduler) + assert seq_group_meta[0].token_chunk_size == 2 + assert seq_group_meta[1].token_chunk_size == 1 + assert out.num_batched_tokens == 3 + assert len(get_sequence_groups(out)) == max_seqs + assert not running[0].is_prefill() + assert not running[1].is_prefill() diff --git a/tests/core/test_scheduler.py b/tests/core/test_scheduler.py new file mode 100644 index 0000000..1358dff --- /dev/null +++ b/tests/core/test_scheduler.py @@ -0,0 +1,900 @@ +import time +from collections import deque +from typing import List +from unittest.mock import MagicMock + +import pytest # noqa + +from vllm.config import CacheConfig, LoRAConfig, SchedulerConfig +from vllm.core.interfaces import AllocStatus +from vllm.core.policy import PolicyFactory +from vllm.core.scheduler import Scheduler, SchedulingBudget +from vllm.lora.request import LoRARequest +from vllm.sequence import Logprob, SequenceGroup, SequenceStatus + +from .utils import create_dummy_prompt + + +def get_sequence_groups(scheduler_output): + return [s.seq_group for s in scheduler_output.scheduled_seq_groups] + + +def append_new_token(out, token_id: int): + seq_groups = get_sequence_groups(out) + for seq_group in seq_groups: + for seq in seq_group.get_seqs(): + seq.append_token_id(token_id, {token_id: Logprob(token_id)}) + + +def schedule_and_update_computed_tokens(scheduler): + metas, out = scheduler.schedule() + for s, meta in zip(out.scheduled_seq_groups, metas): + s.seq_group.update_num_computed_tokens(meta.token_chunk_size) + return metas, out + + +def append_new_token_seq_group(token_chunk_size, seq_group, token_id: int): + seq_group.update_num_computed_tokens(token_chunk_size) + for seq in seq_group.get_seqs(): + seq.append_token_id(token_id, {token_id: Logprob(token_id)}) + + +def test_scheduler_add_seq_group(): + block_size = 4 + scheduler_config = SchedulerConfig(100, 64, 1) + cache_config = CacheConfig(block_size, 1.0, 1, cache_dtype="auto") + cache_config.num_cpu_blocks = 4 + cache_config.num_gpu_blocks = 4 + scheduler = Scheduler(scheduler_config, cache_config, None) + + # Add seq group to scheduler. + num_seq_group = 4 + for i in range(num_seq_group): + _, seq_group = create_dummy_prompt(str(i), block_size) + scheduler.add_seq_group(seq_group) + assert scheduler.get_num_unfinished_seq_groups() == i + 1 + + +def test_scheduler_abort_seq_group(): + block_size = 4 + scheduler_config = SchedulerConfig(100, 64, 1) + cache_config = CacheConfig(block_size, 1.0, 1, "auto") + cache_config.num_cpu_blocks = 4 + cache_config.num_gpu_blocks = 4 + scheduler = Scheduler(scheduler_config, cache_config, None) + + # Add multiple seq groups to scheduler. + num_seq_group = 4 + request_ids = set() + for i in range(num_seq_group): + _, seq_group = create_dummy_prompt(str(i), block_size) + scheduler.add_seq_group(seq_group) + request_ids.add(str(i)) + + # Abort all added seq groups. + assert scheduler.get_num_unfinished_seq_groups() == num_seq_group + scheduler.abort_seq_group(request_ids) + assert scheduler.get_num_unfinished_seq_groups() == 0 + + +def test_scheduler_schedule_simple(): + block_size = 4 + num_seq_group = 4 + max_model_len = 16 + scheduler_config = SchedulerConfig(64, num_seq_group, max_model_len) + cache_config = CacheConfig(block_size, 1.0, 1, "auto") + cache_config.num_cpu_blocks = 8 + cache_config.num_gpu_blocks = 8 + scheduler = Scheduler(scheduler_config, cache_config, None) + running: List[SequenceGroup] = [] + + # Add seq groups to scheduler. + for i in range(num_seq_group): + _, seq_group = create_dummy_prompt(str(i), prompt_length=block_size) + scheduler.add_seq_group(seq_group) + running.append(seq_group) + + # Schedule seq groups prompts. + num_tokens = block_size * num_seq_group + seq_group_meta, out = schedule_and_update_computed_tokens(scheduler) + assert set(get_sequence_groups(out)) == set(running) + assert out.num_batched_tokens == num_tokens + assert (not out.blocks_to_copy and not out.blocks_to_swap_in + and not out.blocks_to_swap_out) + assert len(seq_group_meta) == num_seq_group + append_new_token(out, 1) + + # Schedule seq groups generation. + seq_group_meta, out = schedule_and_update_computed_tokens(scheduler) + assert set(get_sequence_groups(out)) == set(running) + assert out.num_batched_tokens == num_seq_group + assert (not out.blocks_to_copy and not out.blocks_to_swap_in + and not out.blocks_to_swap_out) + assert len(seq_group_meta) == num_seq_group + append_new_token(out, 1) + + +def test_scheduler_prefill_prioritized(): + """Verify running batched tokens are not applied to prefill requests.""" + block_size = 4 + max_model_len = 30 + max_batched_num_tokens = 30 + scheduler_config = SchedulerConfig(max_batched_num_tokens, 2, + max_model_len) + cache_config = CacheConfig(block_size, 1.0, 1, "auto") + cache_config.num_cpu_blocks = 2 + cache_config.num_gpu_blocks = 2 + scheduler = Scheduler(scheduler_config, cache_config, None) + + # Add seq groups to scheduler. + _, seq_group_a = create_dummy_prompt("1", 1) + scheduler.add_seq_group(seq_group_a) + + # Schedule seq groups prompts. + _, out = schedule_and_update_computed_tokens(scheduler) + assert get_sequence_groups(out) == [seq_group_a] + + # Add a new prefill request B. + _, seq_group_b = create_dummy_prompt("2", 30) + scheduler.add_seq_group(seq_group_b) + + # Verify prefill requests are prioritized. Since max_batched_num_tokens + # is 1, new prefill request has to be scheduled first. + _, out = schedule_and_update_computed_tokens(scheduler) + assert get_sequence_groups(out) == [seq_group_b] + + +def test_scheduler_schedule_preempt_abort(): + block_size = 4 + max_model_len = 16 + scheduler_config = SchedulerConfig(64, 2, max_model_len) + cache_config = CacheConfig(block_size, 1.0, 1, "auto") + cache_config.num_cpu_blocks = 2 + cache_config.num_gpu_blocks = 2 + scheduler = Scheduler(scheduler_config, cache_config, None) + + # Add seq groups to scheduler. + seq_a, seq_group_a = create_dummy_prompt("1", block_size) + seq_b, seq_group_b = create_dummy_prompt("2", block_size) + scheduler.add_seq_group(seq_group_a) + scheduler.add_seq_group(seq_group_b) + + # Schedule seq groups prompts. + seq_group_meta, out = schedule_and_update_computed_tokens(scheduler) + assert get_sequence_groups(out) == [seq_group_a, seq_group_b] + assert out.num_batched_tokens == block_size * 2 # seq_a and seq_b + assert (not out.blocks_to_copy and not out.blocks_to_swap_in + and not out.blocks_to_swap_out) + assert len(seq_group_meta) == 2 + assert scheduler.get_num_unfinished_seq_groups() == 2 + + # Append "generated" tokens, allowing the sequence to mark prompt tokens as + # processed. + append_new_token(out, 1) + + # Schedule seq groups generation and preempt seq group b. + seq_group_meta, out = schedule_and_update_computed_tokens(scheduler) + assert get_sequence_groups(out) == [seq_group_a] + assert out.num_batched_tokens == 1 + assert (not out.blocks_to_copy and not out.blocks_to_swap_in + and not out.blocks_to_swap_out) + assert len(seq_group_meta) == 1 + assert scheduler.get_num_unfinished_seq_groups() == 2 + + # Abort seq group a. Re-schedule seq group b prompt with recomputation. + scheduler.abort_seq_group("1") + seq_group_meta, out = schedule_and_update_computed_tokens(scheduler) + assert get_sequence_groups(out) == [seq_group_b] + assert out.num_batched_tokens == 5 # 4 prompt + 1 generation. + assert (not out.blocks_to_copy and not out.blocks_to_swap_in + and not out.blocks_to_swap_out) + assert len(seq_group_meta) == 1 + assert scheduler.get_num_unfinished_seq_groups() == 1 + + +def test_scheduler_max_seqs(): + block_size = 4 + num_seq_group = 4 + max_seq_group = 2 + max_model_len = 16 + scheduler_config = SchedulerConfig(64, max_seq_group, max_model_len) + cache_config = CacheConfig(block_size, 1.0, 1, "auto") + cache_config.num_cpu_blocks = 8 + cache_config.num_gpu_blocks = 8 + scheduler = Scheduler(scheduler_config, cache_config, None) + + all_seq_groups: List[SequenceGroup] = [] + # Add seq groups to scheduler. + for i in range(num_seq_group): + _, seq_group = create_dummy_prompt(str(i), prompt_length=block_size) + all_seq_groups.append(seq_group) + + # Append 1 seq group + scheduler.add_seq_group(all_seq_groups[0]) + + # Schedule seq groups prompts. + seq_group_meta, out = schedule_and_update_computed_tokens(scheduler) + assert set(get_sequence_groups(out)) == set([all_seq_groups[0]]) + append_new_token(out, 1) + + # Schedule seq groups generation. + seq_group_meta, out = schedule_and_update_computed_tokens(scheduler) + assert set(get_sequence_groups(out)) == set([all_seq_groups[0]]) + append_new_token(out, 1) + + # Append 2 more seq group + scheduler.add_seq_group(all_seq_groups[1]) + scheduler.add_seq_group(all_seq_groups[2]) + + # Schedule seq groups prompts. + # Only 1 seq group should be scheduled since max_seq_group is 2 + # and one is prompting. + _, out = schedule_and_update_computed_tokens(scheduler) + assert set(get_sequence_groups(out)) == set([all_seq_groups[1]]) + + +def test_scheduler_delay_factor(): + block_size = 4 + scheduler_config = SchedulerConfig(100, 64, 16, delay_factor=0.5) + cache_config = CacheConfig(block_size, 1.0, 1, "auto") + cache_config.num_cpu_blocks = 8 + cache_config.num_gpu_blocks = 8 + scheduler = Scheduler(scheduler_config, cache_config, None) + + # schedule first prompt + seq_group_meta, seq_group = create_dummy_prompt("0", + prompt_length=block_size) + scheduler.add_seq_group(seq_group) + seq_group_meta, out = schedule_and_update_computed_tokens(scheduler) + assert out.num_prefill_groups > 0 + assert seq_group_meta[0].request_id == '0' + append_new_token(out, 1) + + # wait for a second before scheduling next prompt + time.sleep(1) + seq_group_meta, seq_group = create_dummy_prompt("1", + prompt_length=block_size) + scheduler.add_seq_group(seq_group) + + # second prompt should *not* be scheduled + seq_group_meta, out = schedule_and_update_computed_tokens(scheduler) + assert out.num_prefill_groups == 0 + assert seq_group_meta[0].request_id == '0' + append_new_token(out, 1) + + # wait for more than 0.5 second and try again + time.sleep(0.6) + seq_group_meta, out = schedule_and_update_computed_tokens(scheduler) + assert out.num_prefill_groups > 0 + assert seq_group_meta[0].request_id == '1' + append_new_token(out, 1) + + +def test_swapped_out_prioritized(): + scheduler = initialize_scheduler(max_num_seqs=6) + # best_of=2 * 3 == 6 sequences. + for i in range(3): + _, seq_group = create_dummy_prompt(str(i), prompt_length=60, best_of=2) + scheduler.add_seq_group(seq_group) + seq_group_meta, out = schedule_and_update_computed_tokens(scheduler) + # prefill scheduled now. + assert len(out.scheduled_seq_groups) == 3 + append_new_token(out, 1) + + # The last request should be swapped out. + scheduler.block_manager.can_append_slots = MagicMock() + + def cannot_append_second_group(seq_group, num_lookahead_slots): + return seq_group.request_id != "2" + + scheduler.block_manager.can_append_slots.side_effect = ( + cannot_append_second_group) + + seq_group_meta, out = schedule_and_update_computed_tokens(scheduler) + assert len(out.scheduled_seq_groups) == 2 + assert out.num_batched_tokens == 2 + assert out.blocks_to_swap_out != {} + assert out.blocks_to_swap_in == {} + append_new_token(out, 1) + + # Add 1 more task. Swap should be prioritized over prefill. + _, seq_group = create_dummy_prompt(str(i), prompt_length=60, best_of=2) + scheduler.add_seq_group(seq_group) + seq_group_meta, out = schedule_and_update_computed_tokens(scheduler) + append_new_token(out, 1) + assert len(out.scheduled_seq_groups) == 3 + # 3 decodes. It is swapped in. + assert out.num_batched_tokens == 3 + assert out.blocks_to_swap_in != {} + assert out.blocks_to_swap_out == {} + + +def initialize_scheduler(*, + max_num_seqs=1000, + max_token_budget=1000, + max_model_len=1000, + lora_config=None): + block_size = 4 + scheduler_config = SchedulerConfig(max_token_budget, max_num_seqs, + max_model_len) + cache_config = CacheConfig(block_size, 1.0, 1, "auto") + cache_config.num_cpu_blocks = 8 + cache_config.num_gpu_blocks = 8 + scheduler = Scheduler(scheduler_config, cache_config, lora_config) + return scheduler + + +def create_token_budget(token_budget: int = 10000, + max_num_seqs: int = 10000) -> SchedulingBudget: + return SchedulingBudget( + token_budget=token_budget, + max_num_seqs=max_num_seqs, + ) + + +def add_token_budget(budget: SchedulingBudget, + num_batched_tokens: int = 0, + num_curr_seqs: int = 0): + mock_seq_group = create_dummy_prompt('10', prompt_length=60)[1] + budget.add_num_batched_tokens(mock_seq_group.request_id, + num_batched_tokens) + budget.add_num_seqs(mock_seq_group.request_id, num_curr_seqs) + + +def test_prefill_schedule_max_prompt_len(): + """ + Test prompt longer than max_prompt_len is aborted. + """ + scheduler = initialize_scheduler(max_model_len=30) + _, seq_group = create_dummy_prompt(0, prompt_length=60) + waiting = deque([seq_group]) + budget = create_token_budget() + remaining_waiting, output = scheduler._schedule_prefills( + waiting, budget, None) + assert len(output.ignored_seq_groups) == 1 + assert len(output.seq_groups) == 0 + assert budget.num_batched_tokens == 0 + assert budget.num_curr_seqs == 0 + assert len(remaining_waiting) == 0 + + +def test_prefill_schedule_token_budget(): + """ + Test token budget respected. + """ + scheduler = initialize_scheduler() + waiting = deque() + budget = create_token_budget(token_budget=0) + for i in range(2): + _, seq_group = create_dummy_prompt(str(i), prompt_length=60) + waiting.append(seq_group) + + # 0 token budget == nothing is scheduled. + remaining_waiting, output = scheduler._schedule_prefills( + waiting, budget, None) + assert len(output.ignored_seq_groups) == 0 + assert len(output.seq_groups) == 0 + assert budget.num_batched_tokens == 0 + assert budget.num_curr_seqs == 0 + assert len(remaining_waiting) == 2 + + # 60 token budget == 1 request scheduled. + budget = create_token_budget(token_budget=60) + remaining_waiting, output = scheduler._schedule_prefills( + waiting, budget, None) + assert len(output.ignored_seq_groups) == 0 + assert len(output.seq_groups) == 1 + assert budget.num_batched_tokens == 60 + assert budget.num_curr_seqs == 1 + assert len(remaining_waiting) == 1 + + # Test when current_batched_tokens respected. + scheduler = initialize_scheduler() + waiting = deque() + budget = create_token_budget(token_budget=60) + add_token_budget(budget, 30, 0) + _, seq_group = create_dummy_prompt(str(i), prompt_length=60) + # Cannot schedule a prompt that doesn't fit the budget. + waiting.append(seq_group) + remaining_waiting, output = scheduler._schedule_prefills( + waiting, budget, None) + assert len(output.ignored_seq_groups) == 0 + assert len(output.seq_groups) == 0 + assert budget.num_batched_tokens == 30 + assert budget.num_curr_seqs == 0 + assert len(remaining_waiting) == 1 + budget = create_token_budget(token_budget=90) + add_token_budget(budget, 30, 0) + remaining_waiting, output = scheduler._schedule_prefills( + waiting, budget, None) + assert len(output.seq_groups) == 1 + assert budget.num_batched_tokens == 90 + assert budget.num_curr_seqs == 1 + assert len(remaining_waiting) == 0 + + +def test_prefill_schedule_max_seqs(): + """ + Test max seq respected. + """ + scheduler = initialize_scheduler() + waiting = deque() + budget = create_token_budget(max_num_seqs=2) + for i in range(3): + _, seq_group = create_dummy_prompt(str(i), prompt_length=60) + waiting.append(seq_group) + remaining_waiting, output = scheduler._schedule_prefills( + waiting, budget, None) + assert len(output.ignored_seq_groups) == 0 + assert len(output.seq_groups) == 2 + assert budget.num_batched_tokens == 120 + assert budget.num_curr_seqs == 2 + assert len(remaining_waiting) == 1 + + # Verify curr_num_seqs respected. + waiting = deque() + budget = create_token_budget(max_num_seqs=2) + add_token_budget(budget, 0, 2) + _, seq_group = create_dummy_prompt(str(i), prompt_length=60) + waiting.append(seq_group) + remaining_waiting, output = scheduler._schedule_prefills( + waiting, budget, None) + assert len(output.ignored_seq_groups) == 0 + assert len(output.seq_groups) == 0 + assert budget.num_batched_tokens == 0 + assert budget.num_curr_seqs == 2 + assert len(remaining_waiting) == 1 + + +def test_prefill_schedule_max_lora(): + """ + Test max lora is respected and prioritized. + """ + lora_config = LoRAConfig(max_lora_rank=8, max_loras=1) + scheduler = initialize_scheduler(lora_config=lora_config) + waiting = deque() + budget = create_token_budget(token_budget=120) + curr_loras = set() + for i in range(2): + _, seq_group = create_dummy_prompt(str(i), + prompt_length=60, + lora_request=LoRARequest( + lora_name=str(i), + lora_int_id=i + 1, + lora_local_path="abc")) + waiting.append(seq_group) + # Add two more requests to verify lora is prioritized. + # 0: Lora, 1: Lora, 2: regular, 3: regular + # In the first iteration, index 0, 2 is scheduled. + # If a request is not scheduled because it hits max lora, it is + # prioritized. Verify that. + for i in range(2, 4): + _, seq_group = create_dummy_prompt(str(i), prompt_length=60) + waiting.append(seq_group) + # Schedule 2 requests (0 and 2) + remaining_waiting, output = scheduler._schedule_prefills( + waiting, budget, curr_loras) + assert len(output.ignored_seq_groups) == 0 + assert len(output.seq_groups) == 2 + assert budget.num_batched_tokens == 120 + assert budget.num_curr_seqs == 2 + assert len(remaining_waiting) == 2 + assert len(curr_loras) == 1 + # The second lora request is scheduled next as FCFS policy. + # Reset curr_loras so that it can be scheduled. + curr_loras = set() + budget = create_token_budget(token_budget=60) + remaining_waiting, output = scheduler._schedule_prefills( + remaining_waiting, budget, curr_loras) + assert len(output.seq_groups) == 1 + assert output.seq_groups[0].seq_group.request_id == "1" + assert len(remaining_waiting) == 1 + assert len(curr_loras) == 1 + assert budget.num_batched_tokens == 60 + + +def test_prefill_schedule_no_block_manager_capacity(): + """ + Test sequence cannot be scheduled due to block manager has no capacity. + """ + scheduler = initialize_scheduler() + waiting = deque() + budget = create_token_budget() + for i in range(3): + _, seq_group = create_dummy_prompt(str(i), prompt_length=60) + waiting.append(seq_group) + scheduler.block_manager.can_allocate = MagicMock() + scheduler.block_manager.can_allocate.return_value = AllocStatus.LATER + remainig_waiting, output = scheduler._schedule_prefills( + waiting, budget, None) + assert len(output.ignored_seq_groups) == 0 + assert len(output.seq_groups) == 0 + assert budget.num_batched_tokens == 0 + assert budget.num_curr_seqs == 0 + assert len(remainig_waiting) == 3 + + scheduler = initialize_scheduler() + waiting = deque() + budget = create_token_budget() + for i in range(3): + _, seq_group = create_dummy_prompt(str(i), prompt_length=60) + waiting.append(seq_group) + scheduler.block_manager.can_allocate = MagicMock() + scheduler.block_manager.can_allocate.return_value = AllocStatus.NEVER + remaining_waiting, output = scheduler._schedule_prefills( + waiting, budget, None) + assert len(output.ignored_seq_groups) == 3 + assert len(output.seq_groups) == 0 + assert budget.num_batched_tokens == 0 + assert budget.num_curr_seqs == 0 + assert len(remaining_waiting) == 0 + + +def test_decode_schedule_preempted(): + """ + Test decodes cannot be scheduled and preempted. + """ + scheduler = initialize_scheduler() + running = deque() + policy = PolicyFactory.get_policy(policy_name="fcfs") + curr_loras = None + for i in range(3): + _, seq_group = create_dummy_prompt(str(i), prompt_length=60) + scheduler._allocate_and_set_running(seq_group) + append_new_token_seq_group(60, seq_group, 1) + running.append(seq_group) + scheduler.block_manager.can_append_slots = MagicMock() + + def cannot_append_second_group(seq_group, num_lookahead_slots): + return seq_group.request_id != "1" + + scheduler.block_manager.can_append_slots.side_effect = ( + cannot_append_second_group) + + # 1 cannot be scheduled, and the lowest priority (request 2) + # should be preempted. 1 will also be preempted. + budget = create_token_budget() + remainig_running, output = scheduler._schedule_running( + running, budget, curr_loras, policy) + assert len(remainig_running) == 0 + assert len(output.decode_seq_groups) == 1 + assert len(output.prefill_seq_groups) == 0 + assert output.decode_seq_groups[0].seq_group.request_id == "0" + assert len(output.preempted) == 2 + # Verify budgets are updated. + assert budget.num_batched_tokens == 1 + # NOTE: When enable_chunk is False, num_seqs budget is not updated. + # assert budget.num_curr_seqs == 1 + # Both should be preempted, not swapped. + assert output.blocks_to_swap_out == {} + # Nothing is copied. + assert output.blocks_to_copy == {} + + +def test_decode_swap_beam_search(): + """ + Test best_of > 1 swap out blocks + """ + scheduler = initialize_scheduler() + running = deque() + policy = PolicyFactory.get_policy(policy_name="fcfs") + curr_loras = None + budget = create_token_budget() + for i in range(3): + _, seq_group = create_dummy_prompt(str(i), prompt_length=60, best_of=2) + scheduler._allocate_and_set_running(seq_group) + running.append(seq_group) + append_new_token_seq_group(60, seq_group, 1) + budget.add_num_seqs(seq_group.request_id, + seq_group.get_max_num_running_seqs()) + budget.add_num_batched_tokens( + seq_group.request_id, seq_group.num_seqs(SequenceStatus.RUNNING)) + + # The last request should be swapped out. + scheduler.block_manager.can_append_slots = MagicMock() + + def cannot_append_second_group(seq_group, num_lookahead_slots): + return seq_group.request_id != "2" + + scheduler.block_manager.can_append_slots.side_effect = ( + cannot_append_second_group) + scheduler.block_manager.swap_out = MagicMock() + expected_swap_mapping = {"5": "7"} + scheduler.block_manager.swap_out.return_value = expected_swap_mapping + + remainig_running, output = scheduler._schedule_running( + running, budget, curr_loras, policy) + assert len(remainig_running) == 0 + assert len(output.decode_seq_groups) == 2 + assert len(output.prefill_seq_groups) == 0 + assert output.decode_seq_groups[0].seq_group.request_id == "0" + assert output.decode_seq_groups[1].seq_group.request_id == "1" + assert len(output.preempted) == 0 + assert len(output.swapped_out) == 1 + # Budget should refledct preempted requests. + assert budget.num_batched_tokens == 2 + # since there are 2 sequences, 2 should be subtracted. + assert budget.num_curr_seqs == 4 + # Both should be preempted, not swapped. + assert output.blocks_to_swap_out == expected_swap_mapping + # Nothing is copied. + assert output.blocks_to_copy == {} + + +def test_schedule_decode_blocks_to_copy_update(): + """ + Verify blocks_to_copy is updated. + """ + scheduler = initialize_scheduler() + _, seq_group = create_dummy_prompt("1", prompt_length=60, best_of=2) + running = deque() + policy = PolicyFactory.get_policy(policy_name="fcfs") + curr_loras = None + scheduler._allocate_and_set_running(seq_group) + append_new_token_seq_group(60, seq_group, 1) + running.append(seq_group) + + # The last request should be swapped out. + scheduler.block_manager.append_slots = MagicMock() + scheduler.block_manager.append_slots.return_value = {2: [3]} + + budget = create_token_budget() + remaining_running, output = scheduler._schedule_running( + running, budget, curr_loras, policy) + assert len(remaining_running) == 0 + assert len(output.decode_seq_groups) == 1 + assert len(output.prefill_seq_groups) == 0 + assert len(output.preempted) == 0 + assert len(output.swapped_out) == 0 + # Nothing is preempted. + assert output.blocks_to_swap_out == {} + # Since append_slot returns the source -> dist mapping, it should + # applied. + assert output.blocks_to_copy == {2: [3]} + + +def test_schedule_swapped_simple(): + scheduler = initialize_scheduler() + swapped = deque() + policy = PolicyFactory.get_policy(policy_name="fcfs") + curr_loras = None + blocks_to_swap_out = {} + _, seq_group = create_dummy_prompt("1", prompt_length=60, best_of=2) + scheduler._allocate_and_set_running(seq_group) + append_new_token_seq_group(60, seq_group, 1) + scheduler._swap_out(seq_group, blocks_to_swap_out) + swapped.append(seq_group) + + budget = create_token_budget() + remaining_swapped, output = scheduler._schedule_swapped( + swapped, budget, curr_loras, policy) + assert len(remaining_swapped) == 0 + assert budget.num_batched_tokens == 1 + assert budget.num_curr_seqs == 2 + assert len(output.decode_seq_groups) == 1 + assert len(output.prefill_seq_groups) == 0 + # swap in is the reverse of swap out + blocks_to_swap_in_reverse = {} + for swapin, swapout in output.blocks_to_swap_in.items(): + blocks_to_swap_in_reverse[swapout] = swapin + assert blocks_to_swap_out == blocks_to_swap_in_reverse + + +def test_schedule_swapped_max_token_budget(): + scheduler = initialize_scheduler() + swapped = deque() + policy = PolicyFactory.get_policy(policy_name="fcfs") + curr_loras = None + blocks_to_swap_out = {} + for _ in range(2): + _, seq_group = create_dummy_prompt("1", prompt_length=60, best_of=2) + scheduler._allocate_and_set_running(seq_group) + append_new_token_seq_group(60, seq_group, 1) + scheduler._swap_out(seq_group, blocks_to_swap_out) + swapped.append(seq_group) + + budget = create_token_budget(token_budget=1) + remaining_swapped, output = scheduler._schedule_swapped( + swapped, budget, curr_loras, policy) + assert len(remaining_swapped) == 1 + assert budget.num_batched_tokens == 1 + assert budget.num_curr_seqs == 2 + assert len(output.decode_seq_groups) == 1 + assert len(output.prefill_seq_groups) == 0 + + # Verify num_batched_tokens are respected. + budget = create_token_budget(token_budget=1) + add_token_budget(budget, 1, 0) + remaining_swapped, output = scheduler._schedule_swapped( + remaining_swapped, budget, curr_loras, policy) + assert len(remaining_swapped) == 1 + assert budget.num_batched_tokens == 1 + assert budget.num_curr_seqs == 0 + assert len(output.decode_seq_groups) == 0 + assert len(output.prefill_seq_groups) == 0 + + +def test_schedule_swapped_max_seqs(): + scheduler = initialize_scheduler() + swapped = deque() + policy = PolicyFactory.get_policy(policy_name="fcfs") + curr_loras = None + blocks_to_swap_out = {} + for i in range(4): + _, seq_group = create_dummy_prompt(str(i), prompt_length=60) + scheduler._allocate_and_set_running(seq_group) + append_new_token_seq_group(60, seq_group, 1) + scheduler._swap_out(seq_group, blocks_to_swap_out) + swapped.append(seq_group) + + budget = create_token_budget(max_num_seqs=2) + remaining_swapped, output = scheduler._schedule_swapped( + swapped, budget, curr_loras, policy) + assert len(remaining_swapped) == 2 + assert budget.num_batched_tokens == 2 + assert budget.num_curr_seqs == 2 + assert len(output.decode_seq_groups) == 2 + assert len(output.prefill_seq_groups) == 0 + + # Verify num_curr_seqs are respected. + remaining_swapped, output = scheduler._schedule_swapped( + remaining_swapped, budget, curr_loras, policy) + assert len(remaining_swapped) == 2 + assert budget.num_batched_tokens == 2 + assert budget.num_curr_seqs == 2 + assert len(output.decode_seq_groups) == 0 + assert len(output.prefill_seq_groups) == 0 + + +def test_schedule_swapped_max_loras(): + lora_config = LoRAConfig(max_lora_rank=8, max_loras=1) + scheduler = initialize_scheduler(lora_config=lora_config) + swapped = deque() + policy = PolicyFactory.get_policy(policy_name="fcfs") + curr_loras = set() + blocks_to_swap_out = {} + for i in range(2): + _, seq_group = create_dummy_prompt(str(i), + prompt_length=60, + lora_request=LoRARequest( + lora_name=str(i), + lora_int_id=i + 1, + lora_local_path="abc")) + scheduler._allocate_and_set_running(seq_group) + append_new_token_seq_group(60, seq_group, 1) + scheduler._swap_out(seq_group, blocks_to_swap_out) + swapped.append(seq_group) + + budget = create_token_budget() + remaining_swapped, output = scheduler._schedule_swapped( + swapped, budget, curr_loras, policy) + assert len(remaining_swapped) == 1 + assert budget.num_batched_tokens == 1 + assert budget.num_curr_seqs == 1 + assert len(output.decode_seq_groups) == 1 + assert len(output.prefill_seq_groups) == 0 + assert len(curr_loras) == 1 + + +def test_schedule_swapped_cannot_swap_in(): + scheduler = initialize_scheduler() + swapped = deque() + policy = PolicyFactory.get_policy(policy_name="fcfs") + curr_loras = None + blocks_to_swap_out = {} + for _ in range(2): + _, seq_group = create_dummy_prompt("1", prompt_length=60, best_of=2) + scheduler._allocate_and_set_running(seq_group) + append_new_token_seq_group(60, seq_group, 1) + scheduler._swap_out(seq_group, blocks_to_swap_out) + swapped.append(seq_group) + + # The last request should be swapped out. + scheduler.block_manager.can_swap_in = MagicMock() + scheduler.block_manager.can_swap_in.return_value = AllocStatus.LATER + # Since we cannot swap in, none of the requests are swapped in. + budget = create_token_budget() + remaining_swapped, output = scheduler._schedule_swapped( + swapped, budget, curr_loras, policy) + assert len(remaining_swapped) == 2 + assert budget.num_batched_tokens == 0 + assert budget.num_curr_seqs == 0 + assert len(output.decode_seq_groups) == 0 + assert len(output.prefill_seq_groups) == 0 + + +def test_infeasible_swap(): + scheduler = initialize_scheduler() + swapped = deque() + policy = PolicyFactory.get_policy(policy_name="fcfs") + curr_loras = None + blocks_to_swap_out = {} + for _ in range(2): + _, seq_group = create_dummy_prompt("1", prompt_length=60, best_of=2) + scheduler._allocate_and_set_running(seq_group) + append_new_token_seq_group(60, seq_group, 1) + scheduler._swap_out(seq_group, blocks_to_swap_out) + swapped.append(seq_group) + + # The last request should be swapped out. + scheduler.block_manager.can_swap_in = MagicMock() + scheduler.block_manager.can_swap_in.return_value = AllocStatus.NEVER + # Since we cannot swap in, none of the requests are swapped in. + budget = create_token_budget() + remaining_swapped, output = scheduler._schedule_swapped( + swapped, budget, curr_loras, policy) + assert len(remaining_swapped) == 0 + assert len(output.infeasible_seq_groups) == 2 + assert budget.num_batched_tokens == 0 + assert budget.num_curr_seqs == 0 + assert len(output.decode_seq_groups) == 0 + assert len(output.prefill_seq_groups) == 0 + + +def test_schedule_swapped_blocks_to_copy(): + scheduler = initialize_scheduler() + swapped = deque() + policy = PolicyFactory.get_policy(policy_name="fcfs") + curr_loras = None + _, seq_group = create_dummy_prompt("1", prompt_length=60, best_of=2) + scheduler._allocate_and_set_running(seq_group) + append_new_token_seq_group(60, seq_group, 1) + blocks_to_swap_out = {} + scheduler._swap_out(seq_group, blocks_to_swap_out) + swapped.append(seq_group) + + # The last request should be swapped out. + scheduler.block_manager.append_slots = MagicMock() + scheduler.block_manager.append_slots.return_value = {2: [3]} + + budget = create_token_budget() + remaining_swapped, output = scheduler._schedule_swapped( + swapped, budget, curr_loras, policy) + assert len(remaining_swapped) == 0 + assert len(output.decode_seq_groups) == 1 + assert len(output.prefill_seq_groups) == 0 + assert output.blocks_to_copy == {2: [3]} + + +def test_scheduling_budget(): + TOKEN_BUDGET = 4 + MAX_SEQS = 4 + budget = SchedulingBudget(token_budget=TOKEN_BUDGET, max_num_seqs=MAX_SEQS) + assert budget.can_schedule(num_new_tokens=1, num_new_seqs=1) + assert budget.can_schedule(num_new_tokens=4, num_new_seqs=4) + assert not budget.can_schedule(num_new_tokens=1, num_new_seqs=5) + assert not budget.can_schedule(num_new_tokens=5, num_new_seqs=1) + assert not budget.can_schedule(num_new_tokens=5, num_new_seqs=5) + assert budget.remaining_token_budget() == TOKEN_BUDGET + + # Verify add/subtract num batched tokens. + _, seq_group = create_dummy_prompt("1", 3) + budget.add_num_batched_tokens(seq_group.request_id, 2) + assert budget.remaining_token_budget() == 2 + assert budget.num_batched_tokens == 2 + assert budget.can_schedule(num_new_tokens=2, num_new_seqs=1) + assert not budget.can_schedule(num_new_tokens=3, num_new_seqs=1) + # Verify adding another seq group is no-op. + budget.add_num_batched_tokens(seq_group.request_id, 2) + assert budget.remaining_token_budget() == 2 + assert budget.num_batched_tokens == 2 + budget.subtract_num_batched_tokens(seq_group.request_id, 2) + assert budget.remaining_token_budget() == 4 + assert budget.num_batched_tokens == 0 + budget.subtract_num_batched_tokens(seq_group.request_id, 2) + assert budget.remaining_token_budget() == 4 + assert budget.num_batched_tokens == 0 + + # Verify add/subtract max seqs. + _, seq_group = create_dummy_prompt("1", 3) + budget.add_num_seqs(seq_group.request_id, 2) + assert budget.can_schedule(num_new_tokens=1, num_new_seqs=2) + assert not budget.can_schedule(num_new_tokens=1, num_new_seqs=3) + assert budget.num_curr_seqs == 2 + # Verify adding another seq group is no-op. + budget.add_num_seqs(seq_group.request_id, 2) + assert budget.num_curr_seqs == 2 + budget.subtract_num_seqs(seq_group.request_id, 2) + assert budget.num_curr_seqs == 0 + budget.subtract_num_seqs(seq_group.request_id, 2) + assert budget.num_curr_seqs == 0 diff --git a/tests/core/utils.py b/tests/core/utils.py new file mode 100644 index 0000000..22c1d38 --- /dev/null +++ b/tests/core/utils.py @@ -0,0 +1,74 @@ +import time +from typing import Iterable, Optional, Tuple + +from vllm import SamplingParams +from vllm.lora.request import LoRARequest +from vllm.sequence import Logprob, Sequence, SequenceGroup + + +def create_dummy_prompt( + request_id: str, + prompt_length: int, + block_size: Optional[int] = None, + lora_request: Optional[LoRARequest] = None, + use_beam_search: bool = False, + best_of: int = 1, +) -> Tuple[Sequence, SequenceGroup]: + if not block_size: + block_size = prompt_length + + # Create dummy prompt sequence with tokens 0...block_size-1 + # and prompt "0 ... block_size". + prompt_tokens = list(range(prompt_length)) + prompt_str = " ".join([str(t) for t in prompt_tokens]) + prompt = Sequence(int(request_id), prompt_str, prompt_tokens, block_size) + seq_group = SequenceGroup( + request_id, [prompt], + SamplingParams(use_beam_search=use_beam_search, best_of=best_of), + time.time(), lora_request) + + return prompt, seq_group + + +def create_seq_group( + seq_prompt_len: int = 1024, + seq_output_lens: Iterable[int] = (128, ), + request_id: str = '0', + seq_id_start: int = 0, + sampling_params: Optional[SamplingParams] = None) -> SequenceGroup: + + assert len(seq_output_lens) > 0 + + if sampling_params is None: + sampling_params = SamplingParams() + + prompt_token_ids = [0] * seq_prompt_len + + seqs = [] + for seq_id_offset, output_len in enumerate(seq_output_lens): + seq = Sequence( + seq_id=seq_id_start + seq_id_offset, + prompt="", + prompt_token_ids=prompt_token_ids, + block_size=16, + ) + + for i in range(output_len): + seq.append_token_id( + token_id=i, + logprobs={i: Logprob(0.0)}, + ) + seqs.append(seq) + + seq_group = SequenceGroup( + request_id=request_id, + seqs=seqs, + sampling_params=sampling_params, + arrival_time=time.time(), + ) + + return seq_group + + +def round_up_to_next_block(seq_len: int, block_size: int) -> int: + return (seq_len + block_size - 1) // block_size diff --git a/tests/distributed/test_basic_distributed_correctness.py b/tests/distributed/test_basic_distributed_correctness.py new file mode 100644 index 0000000..5274526 --- /dev/null +++ b/tests/distributed/test_basic_distributed_correctness.py @@ -0,0 +1,59 @@ +"""Compare the outputs of HF and distributed vLLM when using greedy sampling. +vLLM will allocate all the available memory, so we need to run the tests one +by one. The solution is to pass arguments (model name) by environment +variables. +Run: +```sh +TEST_DIST_MODEL=facebook/opt-125m pytest \ + test_basic_distributed_correctness.py +TEST_DIST_MODEL=meta-llama/Llama-2-7b-hf \ + test_basic_distributed_correctness.py +``` +""" +import os + +import pytest +import torch + +MODELS = [ + os.environ["TEST_DIST_MODEL"], +] +VLLM_ATTENTION_BACKEND = "VLLM_ATTENTION_BACKEND" + + +@pytest.mark.skipif(torch.cuda.device_count() < 2, + reason="Need at least 2 GPUs to run the test.") +@pytest.mark.parametrize("model", MODELS) +@pytest.mark.parametrize("dtype", ["half"]) +@pytest.mark.parametrize("max_tokens", [5]) +def test_models( + hf_runner, + vllm_runner, + example_prompts, + model: str, + dtype: str, + max_tokens: int, +) -> None: + enforce_eager = False + backend_by_env_var = os.getenv(VLLM_ATTENTION_BACKEND) + if backend_by_env_var == "FLASHINFER": + enforce_eager = True + + hf_model = hf_runner(model, dtype=dtype) + hf_outputs = hf_model.generate_greedy(example_prompts, max_tokens) + del hf_model + + vllm_model = vllm_runner(model, + dtype=dtype, + tensor_parallel_size=2, + enforce_eager=enforce_eager) + vllm_outputs = vllm_model.generate_greedy(example_prompts, max_tokens) + del vllm_model + + for i in range(len(example_prompts)): + hf_output_ids, hf_output_str = hf_outputs[i] + vllm_output_ids, vllm_output_str = vllm_outputs[i] + assert hf_output_str == vllm_output_str, ( + f"Test{i}:\nHF: {hf_output_str!r}\nvLLM: {vllm_output_str!r}") + assert hf_output_ids == vllm_output_ids, ( + f"Test{i}:\nHF: {hf_output_ids}\nvLLM: {vllm_output_ids}") diff --git a/tests/distributed/test_chunked_prefill_distributed.py b/tests/distributed/test_chunked_prefill_distributed.py new file mode 100644 index 0000000..737b1f3 --- /dev/null +++ b/tests/distributed/test_chunked_prefill_distributed.py @@ -0,0 +1,66 @@ +"""Compare the outputs of HF and distributed vLLM when using greedy sampling. +vLLM will allocate all the available memory, so we need to run the tests one +by one. The solution is to pass arguments (model name) by environment +variables. + +Run: +```sh +TEST_DIST_MODEL=facebook/opt-125m pytest \ + test_chunked_prefill_distributed.py +TEST_DIST_MODEL=meta-llama/Llama-2-7b-hf \ + test_chunked_prefill_distributed.py +``` +""" +import os + +import pytest +import torch + +MODELS = [ + os.environ["TEST_DIST_MODEL"], +] + + +@pytest.mark.skipif(torch.cuda.device_count() < 2, + reason="Need at least 2 GPUs to run the test.") +@pytest.mark.parametrize("model", MODELS) +@pytest.mark.parametrize("dtype", ["half"]) +@pytest.mark.parametrize("max_tokens", [5]) +@pytest.mark.parametrize("chunked_prefill_token_size", [16]) +def test_models( + hf_runner, + vllm_runner, + example_prompts, + model: str, + dtype: str, + max_tokens: int, + chunked_prefill_token_size: int, +) -> None: + # Add a chunked prefill config. + max_num_seqs = min(chunked_prefill_token_size, 256) + assert chunked_prefill_token_size != -1 + enable_chunked_prefill = True + max_num_batched_tokens = chunked_prefill_token_size + + hf_model = hf_runner(model, dtype=dtype) + hf_outputs = hf_model.generate_greedy(example_prompts, max_tokens) + del hf_model + + vllm_model = vllm_runner( + model, + dtype=dtype, + tensor_parallel_size=2, + max_num_seqs=max_num_seqs, + enable_chunked_prefill=enable_chunked_prefill, + max_num_batched_tokens=max_num_batched_tokens, + ) + vllm_outputs = vllm_model.generate_greedy(example_prompts, max_tokens) + del vllm_model + + for i in range(len(example_prompts)): + hf_output_ids, hf_output_str = hf_outputs[i] + vllm_output_ids, vllm_output_str = vllm_outputs[i] + assert hf_output_str == vllm_output_str, ( + f"Test{i}:\nHF: {hf_output_str!r}\nvLLM: {vllm_output_str!r}") + assert hf_output_ids == vllm_output_ids, ( + f"Test{i}:\nHF: {hf_output_ids}\nvLLM: {vllm_output_ids}") diff --git a/tests/distributed/test_comm_ops.py b/tests/distributed/test_comm_ops.py new file mode 100644 index 0000000..aa9e053 --- /dev/null +++ b/tests/distributed/test_comm_ops.py @@ -0,0 +1,110 @@ +"""Test the communication operators. + +Run `pytest tests/distributed/test_comm_ops.py`. +""" +import os + +import pytest +import ray +import torch + +from vllm.distributed import (broadcast_tensor_dict, + tensor_model_parallel_all_gather, + tensor_model_parallel_all_reduce) +from vllm.test_utils import (init_test_distributed_environment, + multi_process_tensor_parallel) + + +@ray.remote(num_gpus=1, max_calls=1) +def all_reduce_test_worker(tensor_parallel_size: int, rank: int, + distributed_init_port: str): + # it is important to delete the CUDA_VISIBLE_DEVICES environment variable + # so that each worker can see all the GPUs + # they will be able to set the device to the correct GPU + del os.environ["CUDA_VISIBLE_DEVICES"] + device = torch.device(f"cuda:{rank}") + torch.cuda.set_device(device) + init_test_distributed_environment(1, tensor_parallel_size, rank, + distributed_init_port) + num_elements = 8 + all_tensors = [ + torch.arange(num_elements, dtype=torch.float32, device="cuda") * + (r + 1) for r in range(tensor_parallel_size) + ] + expected = torch.sum(torch.stack(all_tensors, dim=0), dim=0) + t = all_tensors[rank] + t = tensor_model_parallel_all_reduce(t) + assert torch.allclose(t, expected) + + +@ray.remote(num_gpus=1, max_calls=1) +def all_gather_test_worker(tensor_parallel_size: int, rank: int, + distributed_init_port: str): + # it is important to delete the CUDA_VISIBLE_DEVICES environment variable + # so that each worker can see all the GPUs + # they will be able to set the device to the correct GPU + del os.environ["CUDA_VISIBLE_DEVICES"] + device = torch.device(f"cuda:{rank}") + torch.cuda.set_device(device) + init_test_distributed_environment(1, tensor_parallel_size, rank, + distributed_init_port) + num_dimensions = 3 + tensor_size = list(range(2, num_dimensions + 2)) + total_size = 1 + for s in tensor_size: + total_size *= s + for all_gather_dimension in range(num_dimensions): + all_tensors = [ + torch.arange(total_size, dtype=torch.float32, + device="cuda").reshape(tensor_size) * (r + 1) + for r in range(tensor_parallel_size) + ] + expected = torch.cat(all_tensors, dim=all_gather_dimension) + t = all_tensors[rank] + t = tensor_model_parallel_all_gather(t, all_gather_dimension) + assert torch.allclose(t, expected) + + +@ray.remote(num_gpus=1, max_calls=1) +def broadcast_tensor_dict_test_worker(tensor_parallel_size: int, rank: int, + distributed_init_port: str): + # it is important to delete the CUDA_VISIBLE_DEVICES environment variable + # so that each worker can see all the GPUs + # they will be able to set the device to the correct GPU + del os.environ["CUDA_VISIBLE_DEVICES"] + device = torch.device(f"cuda:{rank}") + torch.cuda.set_device(device) + init_test_distributed_environment(1, tensor_parallel_size, rank, + distributed_init_port) + test_dict = { + "a": torch.arange(8, dtype=torch.float32, device="cuda"), + "b": torch.arange(16, dtype=torch.int8, device="cuda"), + "c": "test", + "d": [1, 2, 3], + "e": { + "a": 1, + "b": 2 + }, + } + + if rank == 0: + broadcast_tensor_dict(test_dict, src=0) + else: + recv_dict = broadcast_tensor_dict(src=0) + assert len(recv_dict) == len(test_dict) + assert torch.allclose(recv_dict["a"], test_dict["a"]) + assert torch.allclose(recv_dict["b"], test_dict["b"]) + assert recv_dict["c"] == test_dict["c"] + assert recv_dict["d"] == test_dict["d"] + assert recv_dict["e"] == test_dict["e"] + + +@pytest.mark.skipif(torch.cuda.device_count() < 2, + reason="Need at least 2 GPUs to run the test.") +@pytest.mark.parametrize("tensor_parallel_size", [2]) +@pytest.mark.parametrize("test_target", [ + all_reduce_test_worker, all_gather_test_worker, + broadcast_tensor_dict_test_worker +]) +def test_multi_process_tensor_parallel(tensor_parallel_size, test_target): + multi_process_tensor_parallel(tensor_parallel_size, test_target) diff --git a/tests/distributed/test_custom_all_reduce.py b/tests/distributed/test_custom_all_reduce.py new file mode 100644 index 0000000..3b1cd17 --- /dev/null +++ b/tests/distributed/test_custom_all_reduce.py @@ -0,0 +1,84 @@ +import os +import random + +import pytest +import ray +import torch +import torch.distributed as dist + +from vllm.distributed import tensor_model_parallel_all_reduce +from vllm.distributed.device_communicators import custom_all_reduce +from vllm.test_utils import (init_test_distributed_environment, + multi_process_tensor_parallel) + +random.seed(42) +test_sizes = [random.randint(1024, 2048 * 1024) for _ in range(8)] +for i, v in enumerate(test_sizes): + test_sizes[i] -= v % 8 + + +@ray.remote(num_gpus=1, max_calls=1) +def graph_allreduce(world_size, rank, distributed_init_port): + del os.environ["CUDA_VISIBLE_DEVICES"] + device = torch.device(f"cuda:{rank}") + torch.cuda.set_device(device) + init_test_distributed_environment(1, world_size, rank, + distributed_init_port) + + custom_all_reduce.init_custom_all_reduce() + for sz in test_sizes: + for dtype in [torch.float32, torch.float16, torch.bfloat16]: + with custom_all_reduce.capture(): + # use integers so result matches NCCL exactly + inp1 = torch.randint(1, + 16, (sz, ), + dtype=dtype, + device=torch.cuda.current_device()) + inp2 = torch.randint(1, + 16, (sz, ), + dtype=dtype, + device=torch.cuda.current_device()) + torch.cuda.synchronize() + graph = torch.cuda.CUDAGraph() + with torch.cuda.graph(graph): + out1 = tensor_model_parallel_all_reduce(inp1) + # the input buffer is immediately modified to test + # synchronization + dist.all_reduce(inp1) + out2 = tensor_model_parallel_all_reduce(inp2) + dist.all_reduce(inp2) + graph.replay() + assert torch.allclose(out1, inp1) + assert torch.allclose(out2, inp2) + + +@ray.remote(num_gpus=1, max_calls=1) +def eager_allreduce(world_size, rank, distributed_init_port): + del os.environ["CUDA_VISIBLE_DEVICES"] + device = torch.device(f"cuda:{rank}") + torch.cuda.set_device(device) + init_test_distributed_environment(1, world_size, rank, + distributed_init_port) + + sz = 1024 + custom_all_reduce.init_custom_all_reduce() + fa = custom_all_reduce.get_handle() + inp = torch.ones(sz, dtype=torch.float32, device=device) + out = fa.all_reduce_unreg(inp) + assert torch.allclose(out, inp * world_size) + + inp = torch.ones(sz * 4, dtype=torch.bfloat16, device=device) + out = fa.all_reduce_unreg(inp) + assert torch.allclose(out, inp * world_size) + + +@pytest.mark.skipif(torch.cuda.device_count() < 2, + reason="Need at least 2 GPUs to run the test.") +@pytest.mark.parametrize("tensor_parallel_size", [2]) +@pytest.mark.parametrize("test_target", [eager_allreduce, graph_allreduce]) +def test_multi_process_tensor_parallel(tensor_parallel_size, test_target): + multi_process_tensor_parallel(tensor_parallel_size, test_target) + + +if __name__ == "__main__": + multi_process_tensor_parallel(2, graph_allreduce) diff --git a/tests/distributed/test_pynccl.py b/tests/distributed/test_pynccl.py new file mode 100644 index 0000000..5873f0d --- /dev/null +++ b/tests/distributed/test_pynccl.py @@ -0,0 +1,159 @@ +import multiprocessing + +import pytest +import torch + +import vllm.distributed.device_communicators.pymccl_utils as pymccl_utils +from vllm.distributed.communication_op import tensor_model_parallel_all_reduce +from vllm.distributed.device_communicators.pynccl import (NCCLCommunicator, + ncclGetUniqueId) +from vllm.distributed.parallel_state import ( + ensure_model_parallel_initialized, get_tensor_model_parallel_cpu_group, + init_distributed_environment, with_pynccl_for_all_reduce) +from vllm.utils import update_environment_variables + + +def distributed_run(fn, world_size): + number_of_processes = world_size + processes = [] + for i in range(number_of_processes): + env = {} + env['RANK'] = str(i) + env['LOCAL_RANK'] = str(i) + env['WORLD_SIZE'] = str(number_of_processes) + env['LOCAL_WORLD_SIZE'] = str(number_of_processes) + env['MASTER_ADDR'] = 'localhost' + env['MASTER_PORT'] = '12345' + p = multiprocessing.Process(target=fn, args=(env, )) + processes.append(p) + p.start() + + for p in processes: + p.join() + + for p in processes: + assert p.exitcode == 0 + + +def worker_fn_wrapper(fn): + # `multiprocessing.Process` cannot accept environment variables directly + # so we need to pass the environment variables as arguments + # and update the environment variables in the function + def wrapped_fn(env): + update_environment_variables(env) + init_distributed_environment() + fn() + + return wrapped_fn + + +@worker_fn_wrapper +def worker_fn(): + comm = NCCLCommunicator() + tensor = torch.ones(16, 1024, 1024, dtype=torch.float32).cuda(comm.rank) + comm.all_reduce(tensor) + result = tensor.mean().cpu().item() + assert result == comm.world_size + + +@pytest.mark.skipif(torch.cuda.device_count() < 2, + reason="Need at least 2 GPUs to run the test.") +def test_pynccl(): + distributed_run(worker_fn, 2) + + +@worker_fn_wrapper +def multiple_tp_worker_fn(): + device = torch.device(f"cuda:{torch.distributed.get_rank()}") + groups = [ + torch.distributed.new_group(ranks=[0, 1], backend="gloo"), + torch.distributed.new_group(ranks=[2, 3], backend="gloo") + ] + group = groups[0] if torch.distributed.get_rank() in [0, 1] else groups[1] + comm = NCCLCommunicator(group=group, device=device) + tensor = torch.ones(16, 1024, 1024, dtype=torch.float32, device=device) + # two groups can communicate independently + if torch.distributed.get_rank() in [0, 1]: + comm.all_reduce(tensor) + comm.all_reduce(tensor) + result = tensor.mean().cpu().item() + assert result == 4 + else: + comm.all_reduce(tensor) + result = tensor.mean().cpu().item() + assert result == 2 + + +@pytest.mark.skipif(torch.cuda.device_count() < 4, + reason="Need at least 4 GPUs to run the test.") +def test_pynccl_multiple_tp(): + # this tests pynccl for multiple tp groups, in a standalone way + # i.e. call `comm.all_reduce` directly + distributed_run(multiple_tp_worker_fn, 4) + + +@worker_fn_wrapper +def multiple_tp_with_vllm_worker_fn(): + device = torch.device(f"cuda:{torch.distributed.get_rank()}") + torch.cuda.set_device(torch.distributed.get_rank()) + ensure_model_parallel_initialized(2, 2) + pymccl_utils.init_process_group( + group=get_tensor_model_parallel_cpu_group()) + tensor = torch.ones(16, 1024, 1024, dtype=torch.float32, device=device) + with with_pynccl_for_all_reduce(): + # two tp groups can communicate independently + if torch.distributed.get_rank() in [0, 1]: + tensor = tensor_model_parallel_all_reduce(tensor) + tensor = tensor_model_parallel_all_reduce(tensor) + result = tensor.mean().cpu().item() + assert result == 4 + else: + tensor = tensor_model_parallel_all_reduce(tensor) + result = tensor.mean().cpu().item() + assert result == 2 + + +@pytest.mark.skipif(torch.cuda.device_count() < 4, + reason="Need at least 4 GPUs to run the test.") +def test_pynccl_multiple_tp_with_vllm(): + # this tests pynccl for multiple tp groups, together with vllm + # i.e. call `tensor_model_parallel_all_reduce` + distributed_run(multiple_tp_with_vllm_worker_fn, 4) + + +@worker_fn_wrapper +def worker_fn_with_cudagraph(): + with torch.no_grad(): + graph = torch.cuda.CUDAGraph() + comm = NCCLCommunicator() + # run something in the default stream to initialize torch engine + a = torch.ones((4, 4), device=f'cuda:{comm.rank}') + torch.cuda.synchronize() + with torch.cuda.graph(graph, stream=comm.stream): + # operation during the graph capture is recorded but not executed + # see https://docs.nvidia.com/cuda/cuda-c-programming-guide/index.html#creating-a-graph-using-stream-capture # noqa + comm.all_reduce(a) + comm.stream.synchronize() + assert a.mean().cpu().item() == comm.world_size**0 + graph.replay() + comm.stream.synchronize() + assert a.mean().cpu().item() == comm.world_size**1 + + +@pytest.mark.skipif(torch.cuda.device_count() < 2, + reason="Need at least 2 GPUs to run the test.") +def test_pynccl_with_cudagraph(): + distributed_run(worker_fn_with_cudagraph, 2) + + +def test_ncclGetUniqueId(): + unique_id = ncclGetUniqueId() + # `list(unique_id.internal)` is something like this: + # [34, -16, 23, 83, 109, -19, 59, 95, 2, 0, -86, 55, 10, -128, 0, 29, 0, + # 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + # 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + # 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + # 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + # 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0] + # as long as the function doesn't raise an exception, we're good + assert unique_id is not None diff --git a/tests/distributed/test_pynccl_library.py b/tests/distributed/test_pynccl_library.py new file mode 100644 index 0000000..ec60a5e --- /dev/null +++ b/tests/distributed/test_pynccl_library.py @@ -0,0 +1,43 @@ +import multiprocessing +import tempfile + + +def target_fn(env, filepath): + from vllm.utils import update_environment_variables + update_environment_variables(env) + from vllm.utils import nccl_integrity_check + nccl_integrity_check(filepath) + + +def test_library_file(): + # note: don't import vllm.distributed.device_communicators.pynccl + # before running this test, otherwise the library file will be loaded + # and it might interfere with the test + from vllm.utils import find_nccl_library + so_file = find_nccl_library() + with open(so_file, 'rb') as f: + content = f.read() + try: + # corrupt the library file, should raise an exception + with open(so_file, 'wb') as f: + f.write(content[:len(content) // 2]) + p = multiprocessing.Process(target=target_fn, args=({}, so_file)) + p.start() + p.join() + assert p.exitcode != 0 + + # move the library file to a tmp path + # test VLLM_NCCL_SO_PATH + fd, path = tempfile.mkstemp() + with open(path, 'wb') as f: + f.write(content) + p = multiprocessing.Process(target=target_fn, + args=({ + "VLLM_NCCL_SO_PATH": path + }, path)) + p.start() + p.join() + assert p.exitcode == 0 + finally: + with open(so_file, 'wb') as f: + f.write(content) diff --git a/tests/engine/output_processor/test_multi_step.py b/tests/engine/output_processor/test_multi_step.py new file mode 100644 index 0000000..6da3da0 --- /dev/null +++ b/tests/engine/output_processor/test_multi_step.py @@ -0,0 +1,270 @@ +import random +from unittest.mock import MagicMock + +import pytest +from transformers import PreTrainedTokenizer + +from tests.core.utils import create_seq_group +from vllm.core.scheduler import Scheduler +from vllm.engine.output_processor.multi_step import MultiStepOutputProcessor +from vllm.engine.output_processor.stop_checker import StopChecker +from vllm.sampling_params import SamplingParams +from vllm.sequence import (Logprob, SequenceGroupOutput, SequenceOutput, + SequenceStatus) +from vllm.transformers_utils.detokenizer import Detokenizer +from vllm.utils import Counter + + +@pytest.mark.parametrize("seq_output_len", [128]) +@pytest.mark.parametrize("num_new_tokens", [1, 12]) +@pytest.mark.skip_global_cleanup +def test_appends_token_ids(num_new_tokens: int, seq_output_len: int): + """Verify multi-step decoding appends token ids correctly. + + We append token ids and verify all the token ids were appended correctly. + Note that ignore_eos=True. + """ + detokenizer = MagicMock(spec=Detokenizer) + scheduler = MagicMock(spec=Scheduler) + stop_checker = MagicMock(spec=StopChecker) + seq_counter = Counter() + + output_processor = MultiStepOutputProcessor( + detokenizer=detokenizer, + scheduler=scheduler, + seq_counter=seq_counter, + get_tokenizer_for_seq=lambda _: mock_tokenizer(), + stop_checker=stop_checker, + ) + + seq_group = create_seq_group( + seq_prompt_len=1024, + seq_output_lens=[seq_output_len], + sampling_params=SamplingParams(max_tokens=seq_output_len + + num_new_tokens, + ignore_eos=True), + ) + + seq = seq_group.get_seqs()[0] + seq.status = SequenceStatus.RUNNING + + new_token_ids = list(range(num_new_tokens)) + + outputs = [ + SequenceGroupOutput( + samples=[ + SequenceOutput( + parent_seq_id=seq.seq_id, + output_token=output_token, + logprobs={output_token: Logprob(0.0)}, + ) + ], + prompt_logprobs=None, + ) for output_token in new_token_ids + ] + + assert seq.get_token_ids()[-len(new_token_ids):] != new_token_ids + output_processor.process_outputs(seq_group, outputs) + assert seq.get_token_ids()[-len(new_token_ids):] == new_token_ids + + +@pytest.mark.parametrize("seq_prompt_len", [1024]) +@pytest.mark.parametrize("seq_output_len", [128]) +@pytest.mark.parametrize("num_new_tokens", [5, 6, 7, 8]) +@pytest.mark.parametrize("max_tokens", [128 + 3]) +@pytest.mark.skip_global_cleanup +def test_respects_max_tokens(num_new_tokens: int, seq_prompt_len: int, + seq_output_len: int, max_tokens: int): + """Verify tokens after max_tokens are dropped and not appended to the + sequence. + """ + detokenizer = MagicMock(spec=Detokenizer) + scheduler = MagicMock(spec=Scheduler) + stop_checker = MagicMock(spec=StopChecker) + seq_counter = Counter() + + output_processor = MultiStepOutputProcessor( + detokenizer=detokenizer, + scheduler=scheduler, + seq_counter=seq_counter, + get_tokenizer_for_seq=lambda _: mock_tokenizer(), + stop_checker=stop_checker, + ) + + seq_group = create_seq_group( + seq_prompt_len=seq_prompt_len, + seq_output_lens=[seq_output_len], + sampling_params=SamplingParams(max_tokens=max_tokens, ), + ) + + seq = seq_group.get_seqs()[0] + seq.status = SequenceStatus.RUNNING + + new_token_ids = list(range(num_new_tokens)) + + outputs = [ + SequenceGroupOutput( + samples=[ + SequenceOutput( + parent_seq_id=seq.seq_id, + output_token=output_token, + logprobs={output_token: Logprob(0.0)}, + ) + ], + prompt_logprobs=None, + ) for output_token in new_token_ids + ] + + assert seq.get_len() == seq_prompt_len + seq_output_len + output_processor.process_outputs(seq_group, outputs) + + # Expect the processed sequence to not go over max tokens in len. + assert seq.get_len() == seq_prompt_len + max_tokens + + # Expect the correct tokens were appended. + expected_appended_tokens = new_token_ids[:max_tokens - seq_output_len] + assert seq.get_token_ids( + )[-len(expected_appended_tokens):] == expected_appended_tokens + + +@pytest.mark.parametrize("seq_prompt_len", [1024]) +@pytest.mark.parametrize("seq_output_len", [128]) +@pytest.mark.parametrize("num_new_tokens", [12]) +@pytest.mark.parametrize("seed", list(range(6))) +@pytest.mark.skip_global_cleanup +def test_respects_eos_token_id(num_new_tokens: int, seq_prompt_len: int, + seq_output_len: int, seed: int): + """Verify the eos token id is included in the sequence, but subsequent + tokens are dropped (not appended to sequence). + """ + random.seed(seed) + detokenizer = MagicMock(spec=Detokenizer) + scheduler = MagicMock(spec=Scheduler) + stop_checker = MagicMock(spec=StopChecker) + seq_counter = Counter() + + eos_token_id = 100 + + output_processor = MultiStepOutputProcessor( + detokenizer=detokenizer, + scheduler=scheduler, + seq_counter=seq_counter, + get_tokenizer_for_seq=lambda _: mock_tokenizer(eos_token_id), + stop_checker=stop_checker, + ) + + seq_group = create_seq_group( + seq_prompt_len=seq_prompt_len, + seq_output_lens=[seq_output_len], + sampling_params=SamplingParams( + # Ensure enough space. + max_tokens=seq_output_len + num_new_tokens, ), + ) + + seq = seq_group.get_seqs()[0] + seq.status = SequenceStatus.RUNNING + + new_token_ids = list(range(num_new_tokens)) + assert eos_token_id not in new_token_ids + eos_index = random.randint(0, len(new_token_ids) - 1) + new_token_ids[eos_index] = eos_token_id + + outputs = [ + SequenceGroupOutput( + samples=[ + SequenceOutput( + parent_seq_id=seq.seq_id, + output_token=output_token, + logprobs={output_token: Logprob(0.0)}, + ) + ], + prompt_logprobs=None, + ) for output_token in new_token_ids + ] + + assert seq.get_len() == seq_prompt_len + seq_output_len + output_processor.process_outputs(seq_group, outputs) + + # Expect the processed sequence to not go beyond provided eos. + assert seq.get_len() == seq_prompt_len + seq_output_len + (eos_index + 1) + + # Expect the correct tokens were appended. + expected_appended_tokens = new_token_ids[:eos_index + 1] + assert seq.get_token_ids( + )[-len(expected_appended_tokens):] == expected_appended_tokens + + +@pytest.mark.parametrize("seq_prompt_len", [1024]) +@pytest.mark.parametrize("seq_output_len", [128]) +@pytest.mark.parametrize("num_new_tokens", [12]) +@pytest.mark.parametrize("seed", list(range(6))) +@pytest.mark.skip_global_cleanup +def test_ignores_eos_token_id(num_new_tokens: int, seq_prompt_len: int, + seq_output_len: int, seed: int): + """When sampling parameters dictate that we should ignore the eos token id, + ensure all token ids are appended even if the eos token id is emitted. + """ + random.seed(seed) + detokenizer = MagicMock(spec=Detokenizer) + scheduler = MagicMock(spec=Scheduler) + stop_checker = MagicMock(spec=StopChecker) + seq_counter = Counter() + + eos_token_id = 100 + + output_processor = MultiStepOutputProcessor( + detokenizer=detokenizer, + scheduler=scheduler, + seq_counter=seq_counter, + get_tokenizer_for_seq=lambda _: mock_tokenizer(eos_token_id), + stop_checker=stop_checker, + ) + + seq_group = create_seq_group( + seq_prompt_len=seq_prompt_len, + seq_output_lens=[seq_output_len], + sampling_params=SamplingParams( + # Ensure enough space. + max_tokens=seq_output_len + num_new_tokens, + ignore_eos=True, + ), + ) + + seq = seq_group.get_seqs()[0] + seq.status = SequenceStatus.RUNNING + + new_token_ids = list(range(num_new_tokens)) + assert eos_token_id not in new_token_ids + eos_index = random.randint(0, len(new_token_ids) - 1) + new_token_ids[eos_index] = eos_token_id + + outputs = [ + SequenceGroupOutput( + samples=[ + SequenceOutput( + parent_seq_id=seq.seq_id, + output_token=output_token, + logprobs={output_token: Logprob(0.0)}, + ) + ], + prompt_logprobs=None, + ) for output_token in new_token_ids + ] + + assert seq.get_len() == seq_prompt_len + seq_output_len + output_processor.process_outputs(seq_group, outputs) + + # Expect the processed sequence to go beyond eos. + assert seq.get_len() == seq_prompt_len + seq_output_len + num_new_tokens + + # Expect the correct tokens were appended. + expected_appended_tokens = new_token_ids[:seq_output_len + num_new_tokens - + seq_output_len] + assert seq.get_token_ids( + )[-len(expected_appended_tokens):] == expected_appended_tokens + + +def mock_tokenizer(eos_token_id=1000): + tokenizer = MagicMock(spec=PreTrainedTokenizer) + tokenizer.eos_token_id = eos_token_id + return tokenizer diff --git a/tests/engine/test_computed_prefix_blocks.py b/tests/engine/test_computed_prefix_blocks.py new file mode 100644 index 0000000..ed35212 --- /dev/null +++ b/tests/engine/test_computed_prefix_blocks.py @@ -0,0 +1,34 @@ +import pytest + +from vllm.engine.arg_utils import EngineArgs +from vllm.engine.llm_engine import LLMEngine +from vllm.sampling_params import SamplingParams + + +@pytest.mark.parametrize("model", ["facebook/opt-125m"]) +@pytest.mark.parametrize("block_size", [16]) +def test_computed_prefix_blocks(model: str, block_size: int): + # This test checks if we are able to run the engine to completion + # without triggering asserts. + # We are in a scenario where all blocks from the second request's prompt + # are full and already computed when the second request arrives. + prompt = ( + "You are a helpful assistant. How do I build a car from cardboard and " + "paper clips? Is there an easy to follow video tutorial available " + "online for free?") + prompt2 = ( + " Please recommend to me some resources where I can learn not only to " + "handle technical difficulties of building a car, but also " + "decoration.") + + engine_args = EngineArgs(model=model, + block_size=block_size, + enable_prefix_caching=True) + + engine = LLMEngine.from_engine_args(engine_args) + sampling_params = SamplingParams() + + engine.add_request("0", prompt + prompt2, sampling_params) + engine.step() + engine.add_request("1", prompt, sampling_params) + engine.step() diff --git a/tests/engine/test_detokenization.py b/tests/engine/test_detokenization.py new file mode 100644 index 0000000..f77f6d0 --- /dev/null +++ b/tests/engine/test_detokenization.py @@ -0,0 +1,32 @@ +import pytest + +from vllm.entrypoints.llm import LLM +from vllm.sampling_params import SamplingParams + + +@pytest.mark.parametrize("model", ["facebook/opt-125m"]) +def test_computed_prefix_blocks(model: str): + # This test checks if the engine generates completions both with and + # without optional detokenization, that detokenization includes text + # and no-detokenization doesn't, and that both completions have the same + # token_ids. + prompt = ( + "You are a helpful assistant. How do I build a car from cardboard and " + "paper clips? Is there an easy to follow video tutorial available " + "online for free?") + + llm = LLM(model=model) + sampling_params = SamplingParams(max_tokens=10, + temperature=0.0, + detokenize=False) + + outputs_no_detokenization = llm.generate(prompt, + sampling_params)[0].outputs[0] + sampling_params.detokenize = True + outputs_with_detokenization = llm.generate(prompt, + sampling_params)[0].outputs[0] + + assert outputs_no_detokenization.text == '' + assert outputs_with_detokenization.text != '' + assert outputs_no_detokenization.token_ids == \ + outputs_with_detokenization.token_ids diff --git a/tests/engine/test_multiproc_workers.py b/tests/engine/test_multiproc_workers.py new file mode 100644 index 0000000..610ad97 --- /dev/null +++ b/tests/engine/test_multiproc_workers.py @@ -0,0 +1,176 @@ +import asyncio +from concurrent.futures import ThreadPoolExecutor +from functools import partial +from time import sleep +from typing import Any, List, Tuple + +import pytest + +from vllm.executor.multiproc_worker_utils import (ProcessWorkerWrapper, + ResultHandler, WorkerMonitor) + + +class DummyWorker: + """Dummy version of vllm.worker.worker.Worker""" + + def __init__(self, rank: int): + self.rank = rank + + def worker_method(self, worker_input: Any) -> Tuple[int, Any]: + sleep(0.05) + + if isinstance(worker_input, Exception): + # simulate error case + raise worker_input + + return self.rank, input + + +def _start_workers() -> Tuple[List[ProcessWorkerWrapper], WorkerMonitor]: + result_handler = ResultHandler() + workers = [ + ProcessWorkerWrapper(result_handler, partial(DummyWorker, rank=rank)) + for rank in range(8) + ] + + worker_monitor = WorkerMonitor(workers, result_handler) + assert not worker_monitor.is_alive() + + result_handler.start() + worker_monitor.start() + assert worker_monitor.is_alive() + + return workers, worker_monitor + + +def test_local_workers() -> None: + """Test workers with sync task submission""" + + workers, worker_monitor = _start_workers() + + def execute_workers(worker_input: str) -> None: + worker_outputs = [ + worker.execute_method("worker_method", worker_input) + for worker in workers + ] + + for rank, output in enumerate(worker_outputs): + assert output.get() == (rank, input) + + executor = ThreadPoolExecutor(max_workers=4) + + # Test concurrent submission from different threads + futures = [ + executor.submit(partial(execute_workers, f"thread {thread_num}")) + for thread_num in range(4) + ] + + for future in futures: + future.result() + + # Test error case + exception = ValueError("fake error") + result = workers[0].execute_method("worker_method", exception) + try: + result.get() + pytest.fail("task should have failed") + except Exception as e: + assert isinstance(e, ValueError) + assert str(e) == "fake error" + + # Test cleanup when a worker fails + assert worker_monitor.is_alive() + workers[3].process.kill() + + # Other workers should get shut down here + worker_monitor.join(2) + + # Ensure everything is stopped + assert not worker_monitor.is_alive() + assert all(not worker.process.is_alive() for worker in workers) + + # Further attempts to submit tasks should fail + try: + _result = workers[0].execute_method("worker_method", "test") + pytest.fail("task should fail once workers have been shut down") + except Exception as e: + assert isinstance(e, ChildProcessError) + + +def test_local_workers_clean_shutdown() -> None: + """Test clean shutdown""" + + workers, worker_monitor = _start_workers() + + assert worker_monitor.is_alive() + assert all(worker.process.is_alive() for worker in workers) + + # Clean shutdown + worker_monitor.close() + + worker_monitor.join(5) + + # Ensure everything is stopped + assert not worker_monitor.is_alive() + assert all(not worker.process.is_alive() for worker in workers) + + # Further attempts to submit tasks should fail + try: + _result = workers[0].execute_method("worker_method", "test") + pytest.fail("task should fail once workers have been shut down") + except Exception as e: + assert isinstance(e, ChildProcessError) + + +@pytest.mark.asyncio +async def test_local_workers_async() -> None: + """Test local workers with async task submission""" + + workers, worker_monitor = _start_workers() + + async def execute_workers(worker_input: str) -> None: + worker_coros = [ + worker.execute_method_async("worker_method", worker_input) + for worker in workers + ] + + results = await asyncio.gather(*worker_coros) + for rank, result in enumerate(results): + assert result == (rank, input) + + tasks = [ + asyncio.create_task(execute_workers(f"task {task_num}")) + for task_num in range(4) + ] + + for task in tasks: + await task + + # Test error case + exception = ValueError("fake error") + try: + _result = await workers[0].execute_method_async( + "worker_method", exception) + pytest.fail("task should have failed") + except Exception as e: + assert isinstance(e, ValueError) + assert str(e) == "fake error" + + # Test cleanup when a worker fails + assert worker_monitor.is_alive() + workers[3].process.kill() + + # Other workers should get shut down here + worker_monitor.join(2) + + # Ensure everything is stopped + assert not worker_monitor.is_alive() + assert all(not worker.process.is_alive() for worker in workers) + + # Further attempts to submit tasks should fail + try: + _result = await workers[0].execute_method_async( + "worker_method", "test") + pytest.fail("task should fail once workers have been shut down") + except Exception as e: + assert isinstance(e, ChildProcessError) diff --git a/tests/engine/test_skip_tokenizer_init.py b/tests/engine/test_skip_tokenizer_init.py new file mode 100644 index 0000000..baa463a --- /dev/null +++ b/tests/engine/test_skip_tokenizer_init.py @@ -0,0 +1,23 @@ +import pytest + +from vllm.entrypoints.llm import LLM +from vllm.sampling_params import SamplingParams + + +@pytest.mark.parametrize("model", ["facebook/opt-125m"]) +def test_skip_tokenizer_initialization(model: str): + # This test checks if the flag skip_tokenizer_init skips the initialization + # of tokenizer and detokenizer. The generated output is expected to contain + # token ids. + llm = LLM(model=model, skip_tokenizer_init=True) + sampling_params = SamplingParams(prompt_logprobs=True, detokenize=True) + with pytest.raises(ValueError) as err: + llm.generate("abc", sampling_params) + assert "prompts must be None if" in str(err.value) + outputs = llm.generate(prompt_token_ids=[[1, 2, 3]], + sampling_params=sampling_params) + assert len(outputs) > 0 + completions = outputs[0].outputs + assert len(completions) > 0 + assert completions[0].text == "" + assert completions[0].token_ids diff --git a/tests/engine/test_stop_reason.py b/tests/engine/test_stop_reason.py new file mode 100644 index 0000000..b2f521a --- /dev/null +++ b/tests/engine/test_stop_reason.py @@ -0,0 +1,59 @@ +"""Test the different finish_reason="stop" situations during generation: + 1. One of the provided stop strings + 2. One of the provided stop tokens + 3. The EOS token + +Run `pytest tests/engine/test_stop_reason.py`. +""" + +import pytest +import transformers + +from vllm import SamplingParams + +MODEL = "facebook/opt-350m" +STOP_STR = "." +SEED = 42 +MAX_TOKENS = 1024 + + +@pytest.fixture +def vllm_model(vllm_runner): + vllm_model = vllm_runner(MODEL) + yield vllm_model + del vllm_model + + +def test_stop_reason(vllm_model, example_prompts): + tokenizer = transformers.AutoTokenizer.from_pretrained(MODEL) + stop_token_id = tokenizer.convert_tokens_to_ids(STOP_STR) + llm = vllm_model.model + + # test stop token + outputs = llm.generate(example_prompts, + sampling_params=SamplingParams( + seed=SEED, + max_tokens=MAX_TOKENS, + stop_token_ids=[stop_token_id])) + for output in outputs: + output = output.outputs[0] + assert output.finish_reason == "stop" + assert output.stop_reason == stop_token_id + + # test stop string + outputs = llm.generate(example_prompts, + sampling_params=SamplingParams( + seed=SEED, max_tokens=MAX_TOKENS, stop=".")) + for output in outputs: + output = output.outputs[0] + assert output.finish_reason == "stop" + assert output.stop_reason == STOP_STR + + # test EOS token + outputs = llm.generate(example_prompts, + sampling_params=SamplingParams( + seed=SEED, max_tokens=MAX_TOKENS)) + for output in outputs: + output = output.outputs[0] + assert output.finish_reason == "length" or ( + output.finish_reason == "stop" and output.stop_reason is None) diff --git a/tests/engine/test_stop_strings.py b/tests/engine/test_stop_strings.py new file mode 100644 index 0000000..6b747be --- /dev/null +++ b/tests/engine/test_stop_strings.py @@ -0,0 +1,111 @@ +from typing import Any, List, Optional + +import pytest + +from vllm import CompletionOutput, LLMEngine, SamplingParams + +MODEL = "meta-llama/llama-2-7b-hf" +MAX_TOKENS = 200 + + +@pytest.fixture(scope="session") +def vllm_model(vllm_runner): + return vllm_runner(MODEL) + + +@pytest.mark.skip_global_cleanup +def test_stop_basic(vllm_model): + _test_stopping(vllm_model.model.llm_engine, + stop=["."], + include_in_output=False, + expected_output="VLLM is a 100% volunteer organization", + expected_reason=".") + + _test_stopping(vllm_model.model.llm_engine, + stop=["."], + include_in_output=True, + expected_output="VLLM is a 100% volunteer organization.", + expected_reason=".") + + +@pytest.mark.skip_global_cleanup +def test_stop_multi_tokens(vllm_model): + _test_stopping( + vllm_model.model.llm_engine, + stop=["group of peo", "short"], + include_in_output=False, + expected_output="VLLM is a 100% volunteer organization. We are a ", + expected_reason="group of peo") + + _test_stopping( + vllm_model.model.llm_engine, + stop=["group of peo", "short"], + include_in_output=True, + expected_output= + "VLLM is a 100% volunteer organization. We are a group of peo", + expected_reason="group of peo") + + +@pytest.mark.skip_global_cleanup +def test_stop_partial_token(vllm_model): + _test_stopping(vllm_model.model.llm_engine, + stop=["gani"], + include_in_output=False, + expected_output="VLLM is a 100% volunteer or", + expected_reason="gani") + + _test_stopping(vllm_model.model.llm_engine, + stop=["gani"], + include_in_output=True, + expected_output="VLLM is a 100% volunteer organi", + expected_reason="gani") + + +@pytest.mark.skip_global_cleanup +def test_stop_token_id(vllm_model): + # token id 13013 => " organization" + + _test_stopping(vllm_model.model.llm_engine, + stop_token_ids=[13013], + include_in_output=False, + expected_output="VLLM is a 100% volunteer", + expected_reason=13013) + + _test_stopping(vllm_model.model.llm_engine, + stop_token_ids=[13013], + include_in_output=True, + expected_output="VLLM is a 100% volunteer organization", + expected_reason=13013) + + +def _test_stopping(llm_engine: LLMEngine, + expected_output: str, + expected_reason: Any, + stop: Optional[List[str]] = None, + stop_token_ids: Optional[List[int]] = None, + include_in_output: bool = False) -> None: + llm_engine.add_request( + "id", "A story about vLLM:\n", + SamplingParams( + temperature=0.0, + max_tokens=MAX_TOKENS, + stop=stop, + stop_token_ids=stop_token_ids, + include_stop_str_in_output=include_in_output, + ), None) + + output: Optional[CompletionOutput] = None + output_text = "" + stop_reason = None + while llm_engine.has_unfinished_requests(): + (request_output, ) = llm_engine.step() + (output, ) = request_output.outputs + + # Ensure we don't backtrack + assert output.text.startswith(output_text) + output_text = output.text + stop_reason = output.stop_reason + + assert output is not None + assert output_text == expected_output + assert stop_reason == expected_reason diff --git a/tests/entrypoints/openai/test_serving_chat.py b/tests/entrypoints/openai/test_serving_chat.py new file mode 100644 index 0000000..269b082 --- /dev/null +++ b/tests/entrypoints/openai/test_serving_chat.py @@ -0,0 +1,37 @@ +import asyncio +from dataclasses import dataclass + +from vllm.entrypoints.openai.serving_chat import OpenAIServingChat + +MODEL_NAME = "openai-community/gpt2" +CHAT_TEMPLATE = "Dummy chat template for testing {}" + + +@dataclass +class MockModelConfig: + tokenizer = MODEL_NAME + trust_remote_code = False + tokenizer_mode = "auto" + max_model_len = 100 + tokenizer_revision = None + + +@dataclass +class MockEngine: + + async def get_model_config(self): + return MockModelConfig + + +async def _async_serving_chat_init(): + serving_completion = OpenAIServingChat(MockEngine(), + served_model_names=[MODEL_NAME], + response_role="assistant", + chat_template=CHAT_TEMPLATE) + return serving_completion + + +def test_async_serving_chat_init(): + serving_completion = asyncio.run(_async_serving_chat_init()) + assert serving_completion.tokenizer is not None + assert serving_completion.tokenizer.chat_template == CHAT_TEMPLATE diff --git a/tests/entrypoints/test_guided_processors.py b/tests/entrypoints/test_guided_processors.py new file mode 100644 index 0000000..41c871c --- /dev/null +++ b/tests/entrypoints/test_guided_processors.py @@ -0,0 +1,113 @@ +# This unit test should be moved to a new +# tests/test_guided_decoding directory. +import pytest +import torch +from transformers import AutoTokenizer + +from vllm.entrypoints.openai.protocol import CompletionRequest +from vllm.model_executor.guided_decoding import ( + get_guided_decoding_logits_processor) +from vllm.model_executor.guided_decoding.outlines_logits_processors import ( + JSONLogitsProcessor, RegexLogitsProcessor) + +TEST_SCHEMA = { + "type": "object", + "properties": { + "name": { + "type": "string" + }, + "age": { + "type": "integer" + }, + "skills": { + "type": "array", + "items": { + "type": "string", + "maxLength": 10 + }, + "minItems": 3 + }, + "work history": { + "type": "array", + "items": { + "type": "object", + "properties": { + "company": { + "type": "string" + }, + "duration": { + "type": "string" + }, + "position": { + "type": "string" + } + }, + "required": ["company", "position"] + } + } + }, + "required": ["name", "age", "skills", "work history"] +} + +TEST_REGEX = (r"((25[0-5]|(2[0-4]|1\d|[1-9]|)\d)\.){3}" + r"(25[0-5]|(2[0-4]|1\d|[1-9]|)\d)") + + +def test_guided_logits_processors(): + """Basic unit test for RegexLogitsProcessor and JSONLogitsProcessor.""" + tokenizer = AutoTokenizer.from_pretrained('HuggingFaceH4/zephyr-7b-beta') + regex_LP = RegexLogitsProcessor(TEST_REGEX, tokenizer) + json_LP = JSONLogitsProcessor(TEST_SCHEMA, + tokenizer, + whitespace_pattern=None) + + regex_LP.init_state() + token_ids = tokenizer.encode( + f"Give an example IPv4 address with this regex: {TEST_REGEX}") + tensor = torch.rand(32000) + original_tensor = torch.clone(tensor) + regex_LP(token_ids, tensor) + assert tensor.shape == original_tensor.shape + assert not torch.allclose(tensor, original_tensor) + + json_LP.init_state() + token_ids = tokenizer.encode( + f"Give an employee profile that fits this schema: {TEST_SCHEMA}") + tensor = torch.rand(32000) + original_tensor = torch.clone(tensor) + json_LP(token_ids, tensor) + assert tensor.shape == original_tensor.shape + assert not torch.allclose(tensor, original_tensor) + + +@pytest.mark.asyncio +@pytest.mark.parametrize("backend", ["outlines", "lm-format-enforcer"]) +async def test_guided_logits_processor_black_box(backend: str): + tokenizer = AutoTokenizer.from_pretrained('HuggingFaceH4/zephyr-7b-beta') + token_ids = tokenizer.encode( + f"Give an example IPv4 address with this regex: {TEST_REGEX}") + regex_request = CompletionRequest(model='test', + prompt=token_ids, + guided_regex=TEST_REGEX) + regex_lp = await get_guided_decoding_logits_processor( + backend, regex_request, tokenizer) + assert regex_lp is not None + tensor = torch.rand(32000) + original_tensor = torch.clone(tensor) + tensor = regex_lp(token_ids, tensor) + assert tensor.shape == original_tensor.shape + assert not torch.allclose(tensor, original_tensor) + + token_ids = tokenizer.encode( + f"Give an employee profile that fits this schema: {TEST_SCHEMA}") + json_request = CompletionRequest(model='test', + prompt=token_ids, + guided_json=TEST_SCHEMA) + json_lp = await get_guided_decoding_logits_processor( + backend, json_request, tokenizer) + assert json_lp is not None + tensor = torch.rand(32000) + original_tensor = torch.clone(tensor) + tensor = json_lp(token_ids, tensor) + assert tensor.shape == original_tensor.shape + assert not torch.allclose(tensor, original_tensor) diff --git a/tests/entrypoints/test_llm_generate.py b/tests/entrypoints/test_llm_generate.py new file mode 100644 index 0000000..5e8b7ca --- /dev/null +++ b/tests/entrypoints/test_llm_generate.py @@ -0,0 +1,41 @@ +import pytest + +from vllm import LLM, SamplingParams + + +def test_multiple_sampling_params(): + + llm = LLM(model="facebook/opt-125m", + max_num_batched_tokens=4096, + tensor_parallel_size=1) + + prompts = [ + "Hello, my name is", + "The president of the United States is", + "The capital of France is", + "The future of AI is", + ] + + sampling_params = [ + SamplingParams(temperature=0.01, top_p=0.95), + SamplingParams(temperature=0.3, top_p=0.95), + SamplingParams(temperature=0.7, top_p=0.95), + SamplingParams(temperature=0.99, top_p=0.95), + ] + + # Multiple SamplingParams should be matched with each prompt + outputs = llm.generate(prompts, sampling_params=sampling_params) + assert len(prompts) == len(outputs) + + # Exception raised, if the size of params does not match the size of prompts + with pytest.raises(ValueError): + outputs = llm.generate(prompts, sampling_params=sampling_params[:3]) + + # Single SamplingParams should be applied to every prompt + single_sampling_params = SamplingParams(temperature=0.3, top_p=0.95) + outputs = llm.generate(prompts, sampling_params=single_sampling_params) + assert len(prompts) == len(outputs) + + # sampling_params is None, default params should be applied + outputs = llm.generate(prompts, sampling_params=None) + assert len(prompts) == len(outputs) \ No newline at end of file diff --git a/tests/entrypoints/test_openai_server.py b/tests/entrypoints/test_openai_server.py new file mode 100644 index 0000000..e53e64a --- /dev/null +++ b/tests/entrypoints/test_openai_server.py @@ -0,0 +1,894 @@ +# imports for guided decoding tests +import json +import os +import re +import subprocess +import sys +import time + +import jsonschema +import openai # use the official client for correctness check +import pytest +# using Ray for overall ease of process management, parallel requests, +# and debugging. +import ray +import requests +import torch +# downloading lora to test lora requests +from huggingface_hub import snapshot_download +from openai import BadRequestError + +from vllm.transformers_utils.tokenizer import get_tokenizer + +MAX_SERVER_START_WAIT_S = 600 # wait for server to start for 60 seconds +# any model with a chat template should work here +MODEL_NAME = "HuggingFaceH4/zephyr-7b-beta" +# technically this needs Mistral-7B-v0.1 as base, but we're not testing +# generation quality here +LORA_NAME = "typeof/zephyr-7b-beta-lora" + +TEST_SCHEMA = { + "type": "object", + "properties": { + "name": { + "type": "string" + }, + "age": { + "type": "integer" + }, + "skills": { + "type": "array", + "items": { + "type": "string", + "maxLength": 10 + }, + "minItems": 3 + }, + "work history": { + "type": "array", + "items": { + "type": "object", + "properties": { + "company": { + "type": "string" + }, + "duration": { + "type": "string" + }, + "position": { + "type": "string" + } + }, + "required": ["company", "position"] + } + } + }, + "required": ["name", "age", "skills", "work history"] +} + +TEST_REGEX = (r"((25[0-5]|(2[0-4]|1\d|[1-9]|)\d)\.){3}" + r"(25[0-5]|(2[0-4]|1\d|[1-9]|)\d)") + +TEST_CHOICE = [ + "Python", "Java", "JavaScript", "C++", "C#", "PHP", "TypeScript", "Ruby", + "Swift", "Kotlin" +] + +pytestmark = pytest.mark.asyncio + + +@ray.remote(num_gpus=1) +class ServerRunner: + + def __init__(self, args): + env = os.environ.copy() + env["PYTHONUNBUFFERED"] = "1" + self.proc = subprocess.Popen( + ["python3", "-m", "vllm.entrypoints.openai.api_server"] + args, + env=env, + stdout=sys.stdout, + stderr=sys.stderr, + ) + self._wait_for_server() + + def ready(self): + return True + + def _wait_for_server(self): + # run health check + start = time.time() + while True: + try: + if requests.get( + "http://localhost:8000/health").status_code == 200: + break + except Exception as err: + if self.proc.poll() is not None: + raise RuntimeError("Server exited unexpectedly.") from err + + time.sleep(0.5) + if time.time() - start > MAX_SERVER_START_WAIT_S: + raise RuntimeError( + "Server failed to start in time.") from err + + def __del__(self): + if hasattr(self, "proc"): + self.proc.terminate() + + +@pytest.fixture(scope="session") +def zephyr_lora_files(): + return snapshot_download(repo_id=LORA_NAME) + + +@pytest.fixture(scope="session") +def server(zephyr_lora_files): + ray.init() + server_runner = ServerRunner.remote([ + "--model", + MODEL_NAME, + # use half precision for speed and memory savings in CI environment + "--dtype", + "bfloat16", + "--max-model-len", + "8192", + "--enforce-eager", + # lora config below + "--enable-lora", + "--lora-modules", + f"zephyr-lora={zephyr_lora_files}", + f"zephyr-lora2={zephyr_lora_files}", + "--max-lora-rank", + "64", + "--max-cpu-loras", + "2", + "--max-num-seqs", + "128", + ]) + ray.get(server_runner.ready.remote()) + yield server_runner + ray.shutdown() + + +@pytest.fixture(scope="module") +def client(): + client = openai.AsyncOpenAI( + base_url="http://localhost:8000/v1", + api_key="token-abc123", + ) + yield client + + +async def test_check_models(server, client: openai.AsyncOpenAI): + models = await client.models.list() + models = models.data + served_model = models[0] + lora_models = models[1:] + assert served_model.id == MODEL_NAME + assert all(model.root == MODEL_NAME for model in models) + assert lora_models[0].id == "zephyr-lora" + assert lora_models[1].id == "zephyr-lora2" + + +@pytest.mark.parametrize( + # first test base model, then test loras + "model_name", + [MODEL_NAME, "zephyr-lora", "zephyr-lora2"], +) +async def test_single_completion(server, client: openai.AsyncOpenAI, + model_name: str): + completion = await client.completions.create(model=model_name, + prompt="Hello, my name is", + max_tokens=5, + temperature=0.0) + + assert completion.id is not None + assert completion.choices is not None and len(completion.choices) == 1 + assert completion.choices[0].text is not None and len( + completion.choices[0].text) >= 5 + assert completion.choices[0].finish_reason == "length" + assert completion.usage == openai.types.CompletionUsage( + completion_tokens=5, prompt_tokens=6, total_tokens=11) + + # test using token IDs + completion = await client.completions.create( + model=MODEL_NAME, + prompt=[0, 0, 0, 0, 0], + max_tokens=5, + temperature=0.0, + ) + assert completion.choices[0].text is not None and len( + completion.choices[0].text) >= 5 + + +@pytest.mark.parametrize( + # first test base model, then test loras + "model_name", + [MODEL_NAME, "zephyr-lora", "zephyr-lora2"], +) +async def test_zero_logprobs(server, client: openai.AsyncOpenAI, + model_name: str): + # test using token IDs + completion = await client.completions.create( + model=MODEL_NAME, + prompt=[0, 0, 0, 0, 0], + max_tokens=5, + temperature=0.0, + logprobs=0, + ) + choice = completion.choices[0] + assert choice.logprobs is not None + assert choice.logprobs.token_logprobs is not None + assert choice.logprobs.top_logprobs is None + + +@pytest.mark.parametrize( + # just test 1 lora hereafter + "model_name", + [MODEL_NAME, "zephyr-lora"], +) +async def test_single_chat_session(server, client: openai.AsyncOpenAI, + model_name: str): + messages = [{ + "role": "system", + "content": "you are a helpful assistant" + }, { + "role": "user", + "content": "what is 1+1?" + }] + + # test single completion + chat_completion = await client.chat.completions.create(model=model_name, + messages=messages, + max_tokens=10, + logprobs=True, + top_logprobs=5) + assert chat_completion.id is not None + assert chat_completion.choices is not None and len( + chat_completion.choices) == 1 + assert chat_completion.choices[0].message is not None + assert chat_completion.choices[0].logprobs is not None + assert chat_completion.choices[0].logprobs.top_logprobs is not None + assert len(chat_completion.choices[0].logprobs.top_logprobs[0]) == 5 + message = chat_completion.choices[0].message + assert message.content is not None and len(message.content) >= 10 + assert message.role == "assistant" + messages.append({"role": "assistant", "content": message.content}) + + # test multi-turn dialogue + messages.append({"role": "user", "content": "express your result in json"}) + chat_completion = await client.chat.completions.create( + model=model_name, + messages=messages, + max_tokens=10, + ) + message = chat_completion.choices[0].message + assert message.content is not None and len(message.content) >= 0 + + +@pytest.mark.parametrize("model_name", [MODEL_NAME]) +async def test_too_many_logprobs(server, client: openai.AsyncOpenAI, + model_name: str): + messages = [{ + "role": "system", + "content": "you are a helpful assistant" + }, { + "role": "user", + "content": "what is 1+1?" + }] + + # Default max_logprobs is 5, so this should raise an error + with pytest.raises((openai.BadRequestError, openai.APIError)): + stream = await client.chat.completions.create(model=model_name, + messages=messages, + max_tokens=10, + logprobs=True, + top_logprobs=10, + stream=True) + async for chunk in stream: + ... + + with pytest.raises(openai.BadRequestError): + await client.chat.completions.create(model=model_name, + messages=messages, + max_tokens=10, + logprobs=True, + top_logprobs=10, + stream=False) + + with pytest.raises((openai.BadRequestError, openai.APIError)): + stream = await client.completions.create(model=model_name, + prompt="Test", + max_tokens=10, + logprobs=10, + stream=True) + async for chunk in stream: + ... + + with pytest.raises(openai.BadRequestError): + await client.completions.create(model=model_name, + prompt="Test", + max_tokens=10, + logprobs=10, + stream=False) + + # the server should still work afterwards + chat_completion = await client.chat.completions.create(model=model_name, + messages=messages, + max_tokens=10, + stream=False) + message = chat_completion.choices[0].message + assert message.content is not None and len(message.content) >= 0 + + +@pytest.mark.parametrize( + # just test 1 lora hereafter + "model_name", + [MODEL_NAME, "zephyr-lora"], +) +async def test_completion_streaming(server, client: openai.AsyncOpenAI, + model_name: str): + prompt = "What is an LLM?" + + single_completion = await client.completions.create( + model=model_name, + prompt=prompt, + max_tokens=5, + temperature=0.0, + ) + single_output = single_completion.choices[0].text + single_usage = single_completion.usage + + stream = await client.completions.create(model=model_name, + prompt=prompt, + max_tokens=5, + temperature=0.0, + stream=True) + chunks = [] + finish_reason_count = 0 + async for chunk in stream: + chunks.append(chunk.choices[0].text) + if chunk.choices[0].finish_reason is not None: + finish_reason_count += 1 + # finish reason should only return in last block + assert finish_reason_count == 1 + assert chunk.choices[0].finish_reason == "length" + assert chunk.choices[0].text + assert chunk.usage == single_usage + assert "".join(chunks) == single_output + + +@pytest.mark.parametrize( + # just test 1 lora hereafter + "model_name", + [MODEL_NAME, "zephyr-lora"], +) +async def test_chat_streaming(server, client: openai.AsyncOpenAI, + model_name: str): + messages = [{ + "role": "system", + "content": "you are a helpful assistant" + }, { + "role": "user", + "content": "what is 1+1?" + }] + + # test single completion + chat_completion = await client.chat.completions.create( + model=model_name, + messages=messages, + max_tokens=10, + temperature=0.0, + ) + output = chat_completion.choices[0].message.content + stop_reason = chat_completion.choices[0].finish_reason + + # test streaming + stream = await client.chat.completions.create( + model=model_name, + messages=messages, + max_tokens=10, + temperature=0.0, + stream=True, + ) + chunks = [] + finish_reason_count = 0 + async for chunk in stream: + delta = chunk.choices[0].delta + if delta.role: + assert delta.role == "assistant" + if delta.content: + chunks.append(delta.content) + if chunk.choices[0].finish_reason is not None: + finish_reason_count += 1 + # finish reason should only return in last block + assert finish_reason_count == 1 + assert chunk.choices[0].finish_reason == stop_reason + assert delta.content + assert "".join(chunks) == output + + +@pytest.mark.parametrize( + # just test 1 lora hereafter + "model_name", + [MODEL_NAME, "zephyr-lora"], +) +async def test_batch_completions(server, client: openai.AsyncOpenAI, + model_name: str): + # test simple list + batch = await client.completions.create( + model=model_name, + prompt=["Hello, my name is", "Hello, my name is"], + max_tokens=5, + temperature=0.0, + ) + assert len(batch.choices) == 2 + assert batch.choices[0].text == batch.choices[1].text + + # test n = 2 + batch = await client.completions.create( + model=model_name, + prompt=["Hello, my name is", "Hello, my name is"], + n=2, + max_tokens=5, + temperature=0.0, + extra_body=dict( + # NOTE: this has to be true for n > 1 in vLLM, but not necessary + # for official client. + use_beam_search=True), + ) + assert len(batch.choices) == 4 + assert batch.choices[0].text != batch.choices[ + 1].text, "beam search should be different" + assert batch.choices[0].text == batch.choices[ + 2].text, "two copies of the same prompt should be the same" + assert batch.choices[1].text == batch.choices[ + 3].text, "two copies of the same prompt should be the same" + + # test streaming + batch = await client.completions.create( + model=model_name, + prompt=["Hello, my name is", "Hello, my name is"], + max_tokens=5, + temperature=0.0, + stream=True, + ) + texts = [""] * 2 + async for chunk in batch: + assert len(chunk.choices) == 1 + choice = chunk.choices[0] + texts[choice.index] += choice.text + assert texts[0] == texts[1] + + +async def test_logits_bias(server, client: openai.AsyncOpenAI): + prompt = "Hello, my name is" + max_tokens = 5 + tokenizer = get_tokenizer(tokenizer_name=MODEL_NAME) + + # Test exclusive selection + token_id = 1000 + completion = await client.completions.create( + model=MODEL_NAME, + prompt=prompt, + max_tokens=max_tokens, + temperature=0.0, + logit_bias={str(token_id): 100}, + seed=42, + ) + assert completion.choices[0].text is not None and len( + completion.choices[0].text) >= 5 + response_tokens = tokenizer(completion.choices[0].text, + add_special_tokens=False)["input_ids"] + expected_tokens = tokenizer(tokenizer.decode([token_id] * 5), + add_special_tokens=False)["input_ids"] + assert all([ + response == expected + for response, expected in zip(response_tokens, expected_tokens) + ]) + + # Test ban + completion = await client.completions.create( + model=MODEL_NAME, + prompt=prompt, + max_tokens=max_tokens, + temperature=0.0, + ) + response_tokens = tokenizer(completion.choices[0].text, + add_special_tokens=False)["input_ids"] + first_response = completion.choices[0].text + completion = await client.completions.create( + model=MODEL_NAME, + prompt=prompt, + max_tokens=max_tokens, + temperature=0.0, + logit_bias={str(token): -100 + for token in response_tokens}, + ) + assert first_response != completion.choices[0].text + + +@pytest.mark.parametrize("guided_decoding_backend", + ["outlines", "lm-format-enforcer"]) +async def test_guided_json_completion(server, client: openai.AsyncOpenAI, + guided_decoding_backend: str): + completion = await client.completions.create( + model=MODEL_NAME, + prompt=f"Give an example JSON for an employee profile " + f"that fits this schema: {TEST_SCHEMA}", + n=3, + temperature=1.0, + max_tokens=500, + extra_body=dict(guided_json=TEST_SCHEMA, + guided_decoding_backend=guided_decoding_backend)) + + assert completion.id is not None + assert completion.choices is not None and len(completion.choices) == 3 + for i in range(3): + assert completion.choices[i].text is not None + output_json = json.loads(completion.choices[i].text) + jsonschema.validate(instance=output_json, schema=TEST_SCHEMA) + + +@pytest.mark.parametrize("guided_decoding_backend", + ["outlines", "lm-format-enforcer"]) +async def test_guided_json_chat(server, client: openai.AsyncOpenAI, + guided_decoding_backend: str): + messages = [{ + "role": "system", + "content": "you are a helpful assistant" + }, { + "role": + "user", + "content": + f"Give an example JSON for an employee profile that " + f"fits this schema: {TEST_SCHEMA}" + }] + chat_completion = await client.chat.completions.create( + model=MODEL_NAME, + messages=messages, + max_tokens=1000, + extra_body=dict(guided_json=TEST_SCHEMA, + guided_decoding_backend=guided_decoding_backend)) + message = chat_completion.choices[0].message + assert message.content is not None + json1 = json.loads(message.content) + jsonschema.validate(instance=json1, schema=TEST_SCHEMA) + + messages.append({"role": "assistant", "content": message.content}) + messages.append({ + "role": + "user", + "content": + "Give me another one with a different name and age" + }) + chat_completion = await client.chat.completions.create( + model=MODEL_NAME, + messages=messages, + max_tokens=1000, + extra_body=dict(guided_json=TEST_SCHEMA, + guided_decoding_backend=guided_decoding_backend)) + message = chat_completion.choices[0].message + assert message.content is not None + json2 = json.loads(message.content) + jsonschema.validate(instance=json2, schema=TEST_SCHEMA) + assert json1["name"] != json2["name"] + assert json1["age"] != json2["age"] + + +@pytest.mark.parametrize("guided_decoding_backend", + ["outlines", "lm-format-enforcer"]) +async def test_guided_regex_completion(server, client: openai.AsyncOpenAI, + guided_decoding_backend: str): + completion = await client.completions.create( + model=MODEL_NAME, + prompt=f"Give an example IPv4 address with this regex: {TEST_REGEX}", + n=3, + temperature=1.0, + max_tokens=20, + extra_body=dict(guided_regex=TEST_REGEX, + guided_decoding_backend=guided_decoding_backend)) + + assert completion.id is not None + assert completion.choices is not None and len(completion.choices) == 3 + for i in range(3): + assert completion.choices[i].text is not None + assert re.fullmatch(TEST_REGEX, completion.choices[i].text) is not None + + +@pytest.mark.parametrize("guided_decoding_backend", + ["outlines", "lm-format-enforcer"]) +async def test_guided_regex_chat(server, client: openai.AsyncOpenAI, + guided_decoding_backend: str): + messages = [{ + "role": "system", + "content": "you are a helpful assistant" + }, { + "role": + "user", + "content": + f"Give an example IP address with this regex: {TEST_REGEX}" + }] + chat_completion = await client.chat.completions.create( + model=MODEL_NAME, + messages=messages, + max_tokens=20, + extra_body=dict(guided_regex=TEST_REGEX, + guided_decoding_backend=guided_decoding_backend)) + ip1 = chat_completion.choices[0].message.content + assert ip1 is not None + assert re.fullmatch(TEST_REGEX, ip1) is not None + + messages.append({"role": "assistant", "content": ip1}) + messages.append({"role": "user", "content": "Give me a different one"}) + chat_completion = await client.chat.completions.create( + model=MODEL_NAME, + messages=messages, + max_tokens=20, + extra_body=dict(guided_regex=TEST_REGEX, + guided_decoding_backend=guided_decoding_backend)) + ip2 = chat_completion.choices[0].message.content + assert ip2 is not None + assert re.fullmatch(TEST_REGEX, ip2) is not None + assert ip1 != ip2 + + +@pytest.mark.parametrize("guided_decoding_backend", + ["outlines", "lm-format-enforcer"]) +async def test_guided_choice_completion(server, client: openai.AsyncOpenAI, + guided_decoding_backend: str): + completion = await client.completions.create( + model=MODEL_NAME, + prompt="The best language for type-safe systems programming is ", + n=2, + temperature=1.0, + max_tokens=10, + extra_body=dict(guided_choice=TEST_CHOICE, + guided_decoding_backend=guided_decoding_backend)) + + assert completion.id is not None + assert completion.choices is not None and len(completion.choices) == 2 + for i in range(2): + assert completion.choices[i].text in TEST_CHOICE + + +@pytest.mark.parametrize("guided_decoding_backend", + ["outlines", "lm-format-enforcer"]) +async def test_guided_choice_chat(server, client: openai.AsyncOpenAI, + guided_decoding_backend: str): + messages = [{ + "role": "system", + "content": "you are a helpful assistant" + }, { + "role": + "user", + "content": + "The best language for type-safe systems programming is " + }] + chat_completion = await client.chat.completions.create( + model=MODEL_NAME, + messages=messages, + max_tokens=10, + extra_body=dict(guided_choice=TEST_CHOICE, + guided_decoding_backend=guided_decoding_backend)) + choice1 = chat_completion.choices[0].message.content + assert choice1 in TEST_CHOICE + + messages.append({"role": "assistant", "content": choice1}) + messages.append({ + "role": "user", + "content": "I disagree, pick another one" + }) + chat_completion = await client.chat.completions.create( + model=MODEL_NAME, + messages=messages, + max_tokens=10, + extra_body=dict(guided_choice=TEST_CHOICE, + guided_decoding_backend=guided_decoding_backend)) + choice2 = chat_completion.choices[0].message.content + assert choice2 in TEST_CHOICE + assert choice1 != choice2 + + +@pytest.mark.parametrize("guided_decoding_backend", + ["outlines", "lm-format-enforcer"]) +async def test_guided_decoding_type_error(server, client: openai.AsyncOpenAI, + guided_decoding_backend: str): + with pytest.raises(openai.BadRequestError): + _ = await client.completions.create( + model=MODEL_NAME, + prompt="Give an example JSON that fits this schema: 42", + extra_body=dict(guided_json=42, + guided_decoding_backend=guided_decoding_backend)) + + messages = [{ + "role": "system", + "content": "you are a helpful assistant" + }, { + "role": + "user", + "content": + "The best language for type-safe systems programming is " + }] + with pytest.raises(openai.BadRequestError): + _ = await client.chat.completions.create(model=MODEL_NAME, + messages=messages, + extra_body=dict(guided_regex={ + 1: "Python", + 2: "C++" + })) + + with pytest.raises(openai.BadRequestError): + _ = await client.completions.create( + model=MODEL_NAME, + prompt="Give an example string that fits this regex", + extra_body=dict(guided_regex=TEST_REGEX, guided_json=TEST_SCHEMA)) + + +@pytest.mark.parametrize("guided_decoding_backend", + ["outlines", "lm-format-enforcer"]) +async def test_guided_choice_chat_logprobs(server, client: openai.AsyncOpenAI, + guided_decoding_backend: str): + messages = [{ + "role": "system", + "content": "you are a helpful assistant" + }, { + "role": + "user", + "content": + "The best language for type-safe systems programming is " + }] + chat_completion = await client.chat.completions.create( + model=MODEL_NAME, + messages=messages, + max_tokens=10, + logprobs=True, + top_logprobs=5, + extra_body=dict(guided_choice=TEST_CHOICE, + guided_decoding_backend=guided_decoding_backend)) + top_logprobs = chat_completion.choices[0].logprobs.top_logprobs + + # -9999.0 is the minimum logprob returned by OpenAI + assert all( + isinstance(logprob, float) and logprob >= -9999.0 + for token_dict in top_logprobs + for token, logprob in token_dict.items()) + + +async def test_response_format_json_object(server, client: openai.AsyncOpenAI): + for _ in range(2): + resp = await client.chat.completions.create( + model=MODEL_NAME, + messages=[{ + "role": + "user", + "content": ('what is 1+1? please respond with a JSON object, ' + 'the format is {"result": 2}') + }], + response_format={"type": "json_object"}) + + content = resp.choices[0].message.content + loaded = json.loads(content) + assert loaded == {"result": 2}, loaded + + +async def test_extra_fields(server, client: openai.AsyncOpenAI): + with pytest.raises(BadRequestError) as exc_info: + await client.chat.completions.create( + model=MODEL_NAME, + messages=[{ + "role": "system", + "content": "You are a helpful assistant.", + "extra_field": "0", + }], # type: ignore + temperature=0, + seed=0) + + assert "extra_forbidden" in exc_info.value.message + + +async def test_complex_message_content(server, client: openai.AsyncOpenAI): + resp = await client.chat.completions.create( + model=MODEL_NAME, + messages=[{ + "role": + "user", + "content": [{ + "type": + "text", + "text": + "what is 1+1? please provide the result without any other text." + }] + }], + temperature=0, + seed=0) + content = resp.choices[0].message.content + assert content == "2" + + +async def test_guided_grammar(server, client: openai.AsyncOpenAI): + simple_sql_grammar = """ +start: select_statement + +select_statement: "SELECT" column "from" table "where" condition + +column: "col_1" | "col_2" +table: "table_1" | "table_2" +condition: column "=" number + +number: "1" | "2" +""" + + completion = await client.completions.create( + model=MODEL_NAME, + prompt=("Generate a sql state that select col_1 from " + "table_1 where it is equals to 1"), + temperature=1.0, + max_tokens=500, + extra_body=dict(guided_grammar=simple_sql_grammar)) + + content = completion.choices[0].text + + # use Lark to parse the output, and make sure it's a valid parse tree + from lark import Lark + parser = Lark(simple_sql_grammar) + parser.parse(content) + + # remove spaces for comparison b/c we removed them in the grammar + ground_truth = "SELECT col_1 from table_1 where col_1 = 1".replace(" ", "") + + assert content.strip() == ground_truth + + +@pytest.mark.parametrize( + # first test base model, then test loras + "model_name", + [MODEL_NAME, "zephyr-lora", "zephyr-lora2"], +) +async def test_echo_logprob_completion(server, client: openai.AsyncOpenAI, + model_name: str): + tokenizer = get_tokenizer(tokenizer_name=MODEL_NAME) + # test using text and token IDs + for prompt in ("Hello, my name is", [0, 0, 0, 0, 0]): + completion = await client.completions.create(model=model_name, + prompt=prompt, + max_tokens=5, + temperature=0.0, + echo=True, + logprobs=1) + + prompt_text = tokenizer.decode(prompt) if isinstance(prompt, + list) else prompt + assert (completion.choices[0].text is not None + and re.search(r"^" + prompt_text, completion.choices[0].text)) + logprobs = completion.choices[0].logprobs + assert logprobs is not None + assert len(logprobs.text_offset) > 5 + assert (len(logprobs.token_logprobs) > 5 + and logprobs.token_logprobs[0] is None) + assert (len(logprobs.top_logprobs) > 5 + and logprobs.top_logprobs[0] is None) + assert len(logprobs.tokens) > 5 + + +async def test_long_seed(server, client: openai.AsyncOpenAI): + for seed in [ + torch.iinfo(torch.long).min - 1, + torch.iinfo(torch.long).max + 1 + ]: + with pytest.raises(BadRequestError) as exc_info: + await client.chat.completions.create( + model=MODEL_NAME, + messages=[{ + "role": "system", + "content": "You are a helpful assistant.", + }], + temperature=0, + seed=seed) + + assert ("greater_than_equal" in exc_info.value.message + or "less_than_equal" in exc_info.value.message) + + +if __name__ == "__main__": + pytest.main([__file__]) diff --git a/tests/entrypoints/test_server_oot_registration.py b/tests/entrypoints/test_server_oot_registration.py new file mode 100644 index 0000000..22e65bf --- /dev/null +++ b/tests/entrypoints/test_server_oot_registration.py @@ -0,0 +1,66 @@ +import multiprocessing +import sys +import time + +import torch +from openai import OpenAI, OpenAIError + +from vllm import ModelRegistry +from vllm.model_executor.models.opt import OPTForCausalLM +from vllm.model_executor.sampling_metadata import SamplingMetadata +from vllm.utils import get_open_port + + +class MyOPTForCausalLM(OPTForCausalLM): + + def compute_logits(self, hidden_states: torch.Tensor, + sampling_metadata: SamplingMetadata) -> torch.Tensor: + # this dummy model always predicts the first token + logits = super().compute_logits(hidden_states, sampling_metadata) + logits.zero_() + logits[:, 0] += 1.0 + return logits + + +def server_function(port): + # register our dummy model + ModelRegistry.register_model("OPTForCausalLM", MyOPTForCausalLM) + sys.argv = ["placeholder.py"] + \ + ("--model facebook/opt-125m --dtype" + f" float32 --api-key token-abc123 --port {port}").split() + import runpy + runpy.run_module('vllm.entrypoints.openai.api_server', run_name='__main__') + + +def test_oot_registration_for_api_server(): + port = get_open_port() + server = multiprocessing.Process(target=server_function, args=(port, )) + server.start() + client = OpenAI( + base_url=f"http://localhost:{port}/v1", + api_key="token-abc123", + ) + while True: + try: + completion = client.chat.completions.create( + model="facebook/opt-125m", + messages=[{ + "role": "system", + "content": "You are a helpful assistant." + }, { + "role": "user", + "content": "Hello!" + }], + temperature=0, + ) + break + except OpenAIError as e: + if "Connection error" in str(e): + time.sleep(3) + else: + raise e + server.kill() + generated_text = completion.choices[0].message.content + # make sure only the first token is generated + rest = generated_text.replace("", "") + assert rest == "" diff --git a/tests/fp8_kv/llama2-70b-fp8-kv/kv_cache_scales.json b/tests/fp8_kv/llama2-70b-fp8-kv/kv_cache_scales.json new file mode 100644 index 0000000..a548f0a --- /dev/null +++ b/tests/fp8_kv/llama2-70b-fp8-kv/kv_cache_scales.json @@ -0,0 +1,90 @@ +{ + "model_type": "llama", + "kv_cache": { + "dtype": "float8_e4m3fn", + "scaling_factor": { + "0": { + "0": 0.0230364128947258, + "1": 0.01979283057153225, + "2": 0.0241350457072258, + "3": 0.0308314748108387, + "4": 0.0430733822286129, + "5": 0.0370396226644516, + "6": 0.0306222103536129, + "7": 0.0357491634786129, + "8": 0.0358189195394516, + "9": 0.0443289652466774, + "10": 0.0433175228536129, + "11": 0.0416782945394516, + "12": 0.0366908498108387, + "13": 0.0432477705180645, + "14": 0.0410505048930645, + "15": 0.0457589291036129, + "16": 0.0418526791036129, + "17": 0.0432477705180645, + "18": 0.0469447560608387, + "19": 0.0514787957072258, + "20": 0.0541294664144516, + "21": 0.0587681382894516, + "22": 0.0625, + "23": 0.0585588738322258, + "24": 0.0600237175822258, + "25": 0.0588030144572258, + "26": 0.0531180277466774, + "27": 0.06396484375, + "28": 0.0603027381002903, + "29": 0.0582101047039032, + "30": 0.0625348836183548, + "31": 0.0585588738322258, + "32": 0.0582798570394516, + "33": 0.0575125589966774, + "34": 0.0590820349752903, + "35": 0.0614188089966774, + "36": 0.0631975457072258, + "37": 0.0615931935608387, + "38": 0.0601283498108387, + "39": 0.0571986623108387, + "40": 0.0670340433716774, + "41": 0.0523507259786129, + "42": 0.0547223798930645, + "43": 0.0631975457072258, + "44": 0.0663713738322258, + "45": 0.0603376142680645, + "46": 0.0652204304933548, + "47": 0.0734514519572258, + "48": 0.0693708211183548, + "49": 0.0725446492433548, + "50": 0.0627790242433548, + "51": 0.0691266804933548, + "52": 0.0688825398683548, + "53": 0.068429134786129, + "54": 0.0605119988322258, + "55": 0.0799386203289032, + "56": 0.0853097140789032, + "57": 0.0661969929933548, + "58": 0.0689871683716774, + "59": 0.0724051371216774, + "60": 0.0541643425822258, + "61": 0.0626743882894516, + "62": 0.0628487765789032, + "63": 0.0607212632894516, + "64": 0.0589076466858387, + "65": 0.0451660193502903, + "66": 0.0453055277466774, + "67": 0.0414341539144516, + "68": 0.0385044664144516, + "69": 0.0414341539144516, + "70": 0.0466308631002903, + "71": 0.0399693101644516, + "72": 0.0437011756002903, + "73": 0.0434221550822258, + "74": 0.0428989976644516, + "75": 0.0401785746216774, + "76": 0.0431082621216774, + "77": 0.0484444759786129, + "78": 0.0417829267680645, + "79": 0.0418178029358387 + } + } + } +} \ No newline at end of file diff --git a/tests/fp8_kv/llama2-7b-fp8-kv/kv_cache_scales.json b/tests/fp8_kv/llama2-7b-fp8-kv/kv_cache_scales.json new file mode 100644 index 0000000..bb73403 --- /dev/null +++ b/tests/fp8_kv/llama2-7b-fp8-kv/kv_cache_scales.json @@ -0,0 +1,42 @@ +{ + "model_type": "llama", + "kv_cache": { + "dtype": "float8_e4m3fn", + "scaling_factor": { + "0": { + "0": 0.0152239128947258, + "1": 0.0188860222697258, + "2": 0.0354178324341774, + "3": 0.0376674123108387, + "4": 0.0418526791036129, + "5": 0.0433175228536129, + "6": 0.0397600457072258, + "7": 0.0424455925822258, + "8": 0.0415387861430645, + "9": 0.0408412404358387, + "10": 0.0395856611430645, + "11": 0.0377371683716774, + "12": 0.0400739423930645, + "13": 0.040771484375, + "14": 0.0393415205180645, + "15": 0.0369001142680645, + "16": 0.03857421875, + "17": 0.0387486070394516, + "18": 0.0403180830180645, + "19": 0.0396205373108387, + "20": 0.0375627800822258, + "21": 0.0407366082072258, + "22": 0.0432477705180645, + "23": 0.0377022884786129, + "24": 0.0399693101644516, + "25": 0.0374581478536129, + "26": 0.0413295216858387, + "27": 0.0442243330180645, + "28": 0.0424804724752903, + "29": 0.0456891767680645, + "30": 0.0409109964966774, + "31": 0.0482352152466774 + } + } + } +} diff --git a/tests/kernels/allclose_default.py b/tests/kernels/allclose_default.py new file mode 100644 index 0000000..175cfe8 --- /dev/null +++ b/tests/kernels/allclose_default.py @@ -0,0 +1,18 @@ +import torch + +# Reference default values of atol and rtol are from +# https://github.com/pytorch/pytorch/blob/6d96beb6bec24d73ee3f080bac54d2104068f675/test/test_transformers.py#L67 +default_atol = {torch.float16: 1e-3, torch.bfloat16: 1e-3, torch.float: 1e-5} +default_rtol = { + torch.float16: 1e-3, + torch.bfloat16: 1.6e-2, + torch.float: 1.3e-6 +} + + +def get_default_atol(output) -> float: + return default_atol[output.dtype] + + +def get_default_rtol(output) -> float: + return default_rtol[output.dtype] diff --git a/tests/kernels/conftest.py b/tests/kernels/conftest.py new file mode 100644 index 0000000..4f2f9cc --- /dev/null +++ b/tests/kernels/conftest.py @@ -0,0 +1,14 @@ +import pytest + +from vllm.utils import (create_kv_caches_with_random, + create_kv_caches_with_random_flash) + + +@pytest.fixture() +def kv_cache_factory(): + return create_kv_caches_with_random + + +@pytest.fixture() +def kv_cache_factory_flashinfer(): + return create_kv_caches_with_random_flash diff --git a/tests/kernels/test_activation.py b/tests/kernels/test_activation.py new file mode 100644 index 0000000..86ecc64 --- /dev/null +++ b/tests/kernels/test_activation.py @@ -0,0 +1,78 @@ +from typing import Type + +import pytest +import torch +from allclose_default import get_default_atol, get_default_rtol + +from vllm.model_executor.layers.activation import (FastGELU, GeluAndMul, + NewGELU, SiluAndMul) + +DTYPES = [torch.half, torch.bfloat16, torch.float] +NUM_TOKENS = [7, 83, 2048] # Arbitrary values for testing +D = [512, 4096, 5120, 13824] # Arbitrary values for testing +SEEDS = [0] +CUDA_DEVICES = [ + f"cuda:{i}" for i in range(1 if torch.cuda.device_count() == 1 else 2) +] + + +@pytest.mark.parametrize("activation", ["silu", "gelu", "gelu_tanh"]) +@pytest.mark.parametrize("num_tokens", NUM_TOKENS) +@pytest.mark.parametrize("d", D) +@pytest.mark.parametrize("dtype", DTYPES) +@pytest.mark.parametrize("seed", SEEDS) +@pytest.mark.parametrize("device", CUDA_DEVICES) +@torch.inference_mode() +def test_act_and_mul( + activation: str, + num_tokens: int, + d: int, + dtype: torch.dtype, + seed: int, + device: str, +) -> None: + torch.random.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed(seed) + torch.set_default_device(device) + x = torch.randn(num_tokens, 2 * d, dtype=dtype) + if activation == "silu": + layer = SiluAndMul() + elif activation == "gelu": + layer = GeluAndMul(approximate="none") + elif activation == "gelu_tanh": + layer = GeluAndMul(approximate="tanh") + out = layer(x) + ref_out = layer._forward(x) + # The SiLU and GELU implementations are equivalent to the native PyTorch + # implementations, so we can do exact comparison. + assert torch.allclose(out, ref_out, atol=0.0, rtol=0.0) + + +@pytest.mark.parametrize("activation", [FastGELU, NewGELU]) +@pytest.mark.parametrize("num_tokens", NUM_TOKENS) +@pytest.mark.parametrize("d", D) +@pytest.mark.parametrize("dtype", DTYPES) +@pytest.mark.parametrize("seed", SEEDS) +@pytest.mark.parametrize("device", CUDA_DEVICES) +@torch.inference_mode() +def test_activation( + activation: Type[torch.nn.Module], + num_tokens: int, + d: int, + dtype: torch.dtype, + seed: int, + device: str, +) -> None: + torch.random.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed(seed) + torch.set_default_device(device) + x = torch.randn(num_tokens, d, dtype=dtype) + layer = activation() + out = layer(x) + ref_out = layer._forward(x) + assert torch.allclose(out, + ref_out, + atol=get_default_atol(out), + rtol=get_default_rtol(out)) diff --git a/tests/kernels/test_attention.py b/tests/kernels/test_attention.py new file mode 100644 index 0000000..8453920 --- /dev/null +++ b/tests/kernels/test_attention.py @@ -0,0 +1,376 @@ +import random +from typing import List, Optional, Tuple + +import pytest +import torch +from allclose_default import get_default_atol, get_default_rtol +from xformers import ops as xops +from xformers.ops.fmha.attn_bias import BlockDiagonalCausalMask + +from vllm import _custom_ops as ops +from vllm.utils import get_max_shared_memory_bytes, is_hip + +FLOAT32_BYTES = torch.finfo(torch.float).bits // 8 +# This will change depending on the compute capability. +# - 512 as a buffer +MAX_SEQ_LEN = get_max_shared_memory_bytes() // FLOAT32_BYTES - 512 +# There may not be enough gpu memory due to large NUM_BLOCKS. +# Reduce NUM_BLOCKS when it happens. +NUM_BLOCKS = 4321 # Arbitrary values for testing +PARTITION_SIZE = 512 +# flshattF and tritonflashattF supported: {torch.float16, torch.bfloat16} +DTYPES = [torch.half, torch.bfloat16, torch.float + ] if not is_hip() else [torch.half, torch.bfloat16] +NUM_GEN_SEQS = [7] # Arbitrary values for testing +NUM_PREFILL_SEQS = [3] # Arbitrary values for testing +NUM_HEADS = [(40, 40), (64, 8)] # Arbitrary values for testing + +# FlashAttention forward only supports head dimension at most 128 +# https://github.com/ROCmSoftwarePlatform/flash-attention/blob/3d2b6f5d037782cc2c906909a46fb7e2e1b48b25/csrc/flash_attn_rocm/flash_api.cpp#L62 +HEAD_SIZES = [64, 80, 96, 112, 128, 256 + ] if not is_hip() else [64, 80, 96, 112, 128] + +BLOCK_SIZES = [16, 32] +USE_ALIBI = [False, True] +KV_CACHE_DTYPE = ["auto", "fp8"] +SEEDS = [0] +CUDA_DEVICES = [ + f"cuda:{i}" for i in range(1 if torch.cuda.device_count() == 1 else 2) +] + + +def ref_masked_attention( + query: torch.Tensor, + key: torch.Tensor, + value: torch.Tensor, + scale: float, + attn_mask: Optional[torch.Tensor] = None, +) -> torch.Tensor: + attn_weights = scale * torch.einsum("qhd,khd->hqk", query, key).float() + if attn_mask is not None: + attn_weights = attn_weights + attn_mask.float() + attn_weights = torch.softmax(attn_weights, dim=-1).to(value.dtype) + out = torch.einsum("hqk,khd->qhd", attn_weights, value) + return out + + +def ref_single_query_cached_kv_attention( + output: torch.Tensor, + query: torch.Tensor, + num_queries_per_kv: int, + key_cache: torch.Tensor, + value_cache: torch.Tensor, + block_tables: torch.Tensor, + seq_lens: torch.Tensor, + scale: float, + alibi_slopes: Optional[torch.Tensor], +) -> None: + num_query_heads = query.shape[1] + num_kv_heads = value_cache.shape[1] + head_size = value_cache.shape[2] + block_size = value_cache.shape[3] + num_seqs = query.shape[0] + + block_tables = block_tables.cpu().tolist() + seq_lens = seq_lens.cpu().tolist() + for i in range(num_seqs): + q = query[i].unsqueeze(0) + block_table = block_tables[i] + seq_len = int(seq_lens[i]) + + keys = [] + values = [] + for j in range(seq_len): + block_number = int(block_table[j // block_size]) + block_offset = j % block_size + + k = key_cache[block_number, :, :, block_offset, :] + k = k.reshape(num_kv_heads, head_size) + keys.append(k) + + v = value_cache[block_number, :, :, block_offset] + values.append(v) + keys = torch.stack(keys, dim=0) + values = torch.stack(values, dim=0) + if num_queries_per_kv > 1: + # Handle MQA and GQA + keys = torch.repeat_interleave(keys, num_queries_per_kv, dim=1) + values = torch.repeat_interleave(values, num_queries_per_kv, dim=1) + + alibi_bias = None + if alibi_slopes is not None: + # Create the ALiBi bias used in the paged attention kernel. + position_ids = torch.arange(seq_len).int() + alibi_bias = (position_ids - seq_len + 1).float() + alibi_bias = alibi_slopes.view(-1, 1, 1) * alibi_bias.view( + 1, 1, -1) + + out = ref_masked_attention(q, keys, values, scale, alibi_bias) + out = out.view(num_query_heads, head_size) + output[i].copy_(out, non_blocking=True) + + +@pytest.mark.parametrize("version", ["v1", "v2"]) +@pytest.mark.parametrize("num_seqs", NUM_GEN_SEQS) +@pytest.mark.parametrize("num_heads", NUM_HEADS) +@pytest.mark.parametrize("head_size", HEAD_SIZES) +@pytest.mark.parametrize("use_alibi", USE_ALIBI) +@pytest.mark.parametrize("block_size", BLOCK_SIZES) +@pytest.mark.parametrize("dtype", DTYPES) +@pytest.mark.parametrize("kv_cache_dtype", KV_CACHE_DTYPE) +@pytest.mark.parametrize("seed", SEEDS) +@pytest.mark.parametrize("device", CUDA_DEVICES) +def test_paged_attention( + kv_cache_factory, + version: str, + num_seqs: int, + num_heads: Tuple[int, int], + head_size: int, + use_alibi: bool, + block_size: int, + dtype: torch.dtype, + kv_cache_dtype: str, + seed: int, + device: str, +) -> None: + random.seed(seed) + torch.random.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed(seed) + torch.set_default_device(device) + scale = float(1.0 / (head_size**0.5)) + num_query_heads, num_kv_heads = num_heads + query = torch.empty(num_seqs, num_query_heads, head_size, dtype=dtype) + query.uniform_(-scale, scale) + + assert num_query_heads % num_kv_heads == 0 + num_queries_per_kv = num_query_heads // num_kv_heads + alibi_slopes = None + if use_alibi: + alibi_slopes = torch.randn(num_query_heads, dtype=torch.float) + + seq_lens = [random.randint(1, MAX_SEQ_LEN) for _ in range(num_seqs)] + seq_lens[-1] = MAX_SEQ_LEN + max_seq_len = max(seq_lens) + seq_lens = torch.tensor(seq_lens, dtype=torch.int) + + # Create the block tables. + max_num_blocks_per_seq = (max_seq_len + block_size - 1) // block_size + block_tables = [] + for _ in range(num_seqs): + block_table = [ + random.randint(0, NUM_BLOCKS - 1) + for _ in range(max_num_blocks_per_seq) + ] + block_tables.append(block_table) + block_tables = torch.tensor(block_tables, dtype=torch.int) + + # Create the KV caches. + key_caches, value_caches = kv_cache_factory(NUM_BLOCKS, block_size, 1, + num_kv_heads, head_size, + kv_cache_dtype, dtype, seed, + device) + key_cache, value_cache = key_caches[0], value_caches[0] + + # Using default kv_scale + kv_scale = 1.0 + + # Call the paged attention kernel. + output = torch.empty_like(query) + if version == "v1": + ops.paged_attention_v1( + output, + query, + key_cache, + value_cache, + num_kv_heads, + scale, + block_tables, + seq_lens, + block_size, + max_seq_len, + alibi_slopes, + kv_cache_dtype, + kv_scale, + ) + elif version == "v2": + num_partitions = ((max_seq_len + PARTITION_SIZE - 1) // PARTITION_SIZE) + assert PARTITION_SIZE % block_size == 0 + num_seqs, num_heads, head_size = output.shape + tmp_output = torch.empty( + size=(num_seqs, num_heads, num_partitions, head_size), + dtype=output.dtype, + ) + exp_sums = torch.empty( + size=(num_seqs, num_heads, num_partitions), + dtype=torch.float32, + ) + max_logits = torch.empty_like(exp_sums) + ops.paged_attention_v2( + output, + exp_sums, + max_logits, + tmp_output, + query, + key_cache, + value_cache, + num_kv_heads, + scale, + block_tables, + seq_lens, + block_size, + max_seq_len, + alibi_slopes, + kv_cache_dtype, + kv_scale, + ) + else: + raise AssertionError(f"Unknown version: {version}") + + # Run the reference implementation. + if kv_cache_dtype == "fp8": + # Convert cache data back to dtype. + x = 16 // torch.tensor([], dtype=dtype).element_size() + key_cache_shape = (NUM_BLOCKS, num_kv_heads, head_size // x, + block_size, x) + dequantized_key_cache = torch.empty(size=key_cache_shape, + dtype=dtype, + device=device) + ops.convert_fp8(key_cache, dequantized_key_cache) + key_cache = dequantized_key_cache + + value_cache_shape = value_cache.shape + dequantized_value_cache = torch.empty(size=value_cache_shape, + dtype=dtype, + device=device) + ops.convert_fp8(value_cache, dequantized_value_cache) + value_cache = dequantized_value_cache + + ref_output = torch.empty_like(query) + ref_single_query_cached_kv_attention( + ref_output, + query, + num_queries_per_kv, + key_cache, + value_cache, + block_tables, + seq_lens, + scale, + alibi_slopes, + ) + + # NOTE(woosuk): Due to the kernel-level differences in the two + # implementations, there is a small numerical difference in the two + # outputs. Thus, we use a relaxed tolerance for the test. + atol = get_default_atol(output) if is_hip() else 1e-3 + rtol = get_default_rtol(output) if is_hip() else 1e-5 + + # NOTE(zhaoyang): FP8 KV Cache will introduce quantization error, + # so we use a relaxed tolerance for the test. + atol, rtol = 1e-3, 1e-5 + if kv_cache_dtype == "fp8": + atol, rtol = 1e-2, 1e-5 + assert torch.allclose(output, ref_output, atol=atol, rtol=rtol) + + +def ref_multi_query_kv_attention( + cu_seq_lens: List[int], + query: torch.Tensor, + key: torch.Tensor, + value: torch.Tensor, + scale: float, + dtype: torch.dtype, +) -> torch.Tensor: + num_seqs = len(cu_seq_lens) - 1 + ref_outputs = [] + for i in range(num_seqs): + start_idx = cu_seq_lens[i] + end_idx = cu_seq_lens[i + 1] + seq_len = end_idx - start_idx + + # Create attention mask. + attn_mask = torch.triu(torch.ones(seq_len, seq_len, dtype=dtype), + diagonal=1) + attn_mask = attn_mask * torch.finfo(dtype).min + attn_mask = attn_mask.to(dtype=dtype) + + ref_output = ref_masked_attention( + query[start_idx:end_idx], + key[start_idx:end_idx], + value[start_idx:end_idx], + scale, + attn_mask=attn_mask, + ) + ref_outputs.append(ref_output) + ref_output = torch.cat(ref_outputs, dim=0) + return ref_output + + +# TODO(woosuk): Add tests for USE_ALIBI=True. +@pytest.mark.parametrize("num_seqs", NUM_PREFILL_SEQS) +@pytest.mark.parametrize("num_heads", NUM_HEADS) +@pytest.mark.parametrize("head_size", HEAD_SIZES) +@pytest.mark.parametrize("dtype", DTYPES) +@pytest.mark.parametrize("seed", SEEDS) +@pytest.mark.parametrize("device", CUDA_DEVICES) +@torch.inference_mode() +def test_multi_query_kv_attention( + num_seqs: int, + num_heads: Tuple[int, int], + head_size: int, + dtype: torch.dtype, + seed: int, + device: str, +) -> None: + random.seed(seed) + torch.random.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed(seed) + torch.set_default_device(device) + # MAX_SEQ_LEN sometimes causes OOM in the reference implementation. + # As the xformers library is already tested with its own tests, we can use + # a smaller MAX_SEQ_LEN here. + max_len = min(MAX_SEQ_LEN, 4096) + seq_lens = random.sample(range(1, max_len), num_seqs) + num_tokens = sum(seq_lens) + + scale = float(1.0 / (head_size**0.5)) + num_query_heads, num_kv_heads = num_heads + qkv = torch.empty(num_tokens, + num_query_heads + 2 * num_kv_heads, + head_size, + dtype=dtype) + qkv.uniform_(-scale, scale) + query, key, value = qkv.split( + [num_query_heads, num_kv_heads, num_kv_heads], dim=1) + + num_queries_per_kv = num_query_heads // num_kv_heads + if num_queries_per_kv > 1: + # Handle MQA and GQA + key = torch.repeat_interleave(key, num_queries_per_kv, dim=1) + value = torch.repeat_interleave(value, num_queries_per_kv, dim=1) + attn_bias = BlockDiagonalCausalMask.from_seqlens(seq_lens) + output = xops.memory_efficient_attention_forward( + query.unsqueeze(0), + key.unsqueeze(0), + value.unsqueeze(0), + attn_bias=attn_bias, + p=0.0, + scale=scale, + ) + output = output.squeeze(0) + + cu_seq_lens = [0] + for seq_len in seq_lens: + cu_seq_lens.append(cu_seq_lens[-1] + seq_len) + ref_output = ref_multi_query_kv_attention( + cu_seq_lens, + query, + key, + value, + scale, + dtype, + ) + atol = get_default_atol(output) if is_hip() else 1e-3 + rtol = get_default_rtol(output) if is_hip() else 1e-5 + assert torch.allclose(output, ref_output, atol=atol, rtol=rtol) diff --git a/tests/kernels/test_cache.py b/tests/kernels/test_cache.py new file mode 100644 index 0000000..c4eeb35 --- /dev/null +++ b/tests/kernels/test_cache.py @@ -0,0 +1,375 @@ +import random +from typing import Tuple + +import pytest +import torch + +from vllm import _custom_ops as ops +from vllm_C import cache_ops +from vllm.utils import is_hip + +COPYING_DIRECTION = [('cuda', 'cpu'), ('cuda', 'cuda'), ('cpu', 'cuda')] +DTYPES = [torch.half, torch.bfloat16, torch.float] +NUM_TOKENS = [42] # Arbitrary values for testing +NUM_LAYERS = [1] # Arbitrary values for testing +NUM_HEADS = [8] # Arbitrary values for testing +HEAD_SIZES = [64, 80, 96, 112, 128, 256] +BLOCK_SIZES = [8, 16, 32] + +# Arbitrary values for testing +# don't make it too large. e.g. [1024, 36000] will OOM +NUM_BLOCKS = [1024, 10000] + +NUM_MAPPINGS = [256] # Arbitrary values for testing +SEEDS = [0] +CUDA_DEVICES = [ + f"cuda:{i}" for i in range(1 if torch.cuda.device_count() == 1 else 2) +] +KV_CACHE_DTYPE = ["auto", "fp8"] + + +@pytest.mark.parametrize("num_mappings", NUM_MAPPINGS) +@pytest.mark.parametrize("num_layers", NUM_LAYERS) +@pytest.mark.parametrize("num_heads", NUM_HEADS) +@pytest.mark.parametrize("head_size", HEAD_SIZES) +@pytest.mark.parametrize("block_size", BLOCK_SIZES) +@pytest.mark.parametrize("num_blocks", NUM_BLOCKS) +@pytest.mark.parametrize("dtype", DTYPES) +@pytest.mark.parametrize("seed", SEEDS) +@pytest.mark.parametrize("device", CUDA_DEVICES) +@pytest.mark.parametrize("kv_cache_dtype", KV_CACHE_DTYPE) +@torch.inference_mode() +def test_copy_blocks( + kv_cache_factory, + num_mappings: int, + num_layers: int, + num_heads: int, + head_size: int, + block_size: int, + num_blocks: int, + dtype: torch.dtype, + seed: int, + kv_cache_dtype: str, + device: str, +) -> None: + random.seed(seed) + torch.random.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed(seed) + torch.set_default_device(device) + # Generate random block mappings where each source block is mapped to two + # destination blocks. + assert 2 * num_mappings <= num_blocks + src_blocks = random.sample(range(num_blocks), num_mappings) + remainig_blocks = list(set(range(num_blocks)) - set(src_blocks)) + dst_blocks = random.sample(remainig_blocks, 2 * num_mappings) + block_mapping = {} + for i in range(num_mappings): + src = src_blocks[i] + dst1 = dst_blocks[2 * i] + dst2 = dst_blocks[2 * i + 1] + block_mapping[src] = [dst1, dst2] + + # Create the KV caches. + key_caches, value_caches = kv_cache_factory(num_blocks, block_size, + num_layers, num_heads, + head_size, kv_cache_dtype, + dtype, seed, device) + + # Clone the KV caches. + cloned_key_caches = [key_cache.clone() for key_cache in key_caches] + cloned_value_caches = [value_cache.clone() for value_cache in value_caches] + + # Call the copy blocks kernel. + ops.copy_blocks(key_caches, value_caches, block_mapping) + + # Run the reference implementation. + for src, dsts in block_mapping.items(): + for dst in dsts: + for cloned_key_cache in cloned_key_caches: + cloned_key_cache[dst].copy_(cloned_key_cache[src]) + for cloned_value_cache in cloned_value_caches: + cloned_value_cache[dst].copy_(cloned_value_cache[src]) + + # Compare the results. + for key_cache, cloned_key_cache in zip(key_caches, cloned_key_caches): + assert torch.allclose(key_cache, cloned_key_cache) + for value_cache, cloned_value_cache in zip(value_caches, + cloned_value_caches): + assert torch.allclose(value_cache, cloned_value_cache) + + +@pytest.mark.parametrize("num_tokens", NUM_TOKENS) +@pytest.mark.parametrize("num_heads", NUM_HEADS) +@pytest.mark.parametrize("head_size", HEAD_SIZES) +@pytest.mark.parametrize("block_size", BLOCK_SIZES) +@pytest.mark.parametrize("num_blocks", NUM_BLOCKS) +@pytest.mark.parametrize("dtype", DTYPES) +@pytest.mark.parametrize("seed", SEEDS) +@pytest.mark.parametrize("device", CUDA_DEVICES) +@pytest.mark.parametrize("kv_cache_dtype", KV_CACHE_DTYPE) +@torch.inference_mode() +def test_reshape_and_cache( + kv_cache_factory, + num_tokens: int, + num_heads: int, + head_size: int, + block_size: int, + num_blocks: int, + dtype: torch.dtype, + seed: int, + device: str, + kv_cache_dtype: str, +) -> None: + if not is_hip() and kv_cache_dtype == "fp8": + pytest.skip() # This test is not tuned for e5m2 cuda precision + random.seed(seed) + torch.random.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed(seed) + torch.set_default_device(device) + # Create a random slot mapping. + num_slots = block_size * num_blocks + slot_mapping = random.sample(range(num_slots), num_tokens) + slot_mapping = torch.tensor(slot_mapping, dtype=torch.long) + + qkv = torch.randn(num_tokens, 3, num_heads, head_size, dtype=dtype) + _, key, value = qkv.unbind(dim=1) + + # Create the KV caches. + key_caches, value_caches = kv_cache_factory(num_blocks, block_size, 1, + num_heads, head_size, + kv_cache_dtype, dtype, seed, + device) + key_cache, value_cache = key_caches[0], value_caches[0] + + # Clone the KV caches. + if kv_cache_dtype == "fp8": + cloned_key_cache = torch.empty_like(key_cache, dtype=torch.float16) + ops.convert_fp8(key_cache, cloned_key_cache) + cloned_value_cache = torch.empty_like(value_cache, dtype=torch.float16) + ops.convert_fp8(value_cache, cloned_value_cache) + else: + cloned_key_cache = key_cache.clone() + cloned_value_cache = value_cache.clone() + + # Using default kv_scale + kv_scale = 1.0 + + # Call the reshape_and_cache kernel. + ops.reshape_and_cache(key, value, key_cache, value_cache, slot_mapping, + kv_cache_dtype, kv_scale) + + if kv_cache_dtype == "fp8": + result_key_cache = torch.empty_like(key_cache, dtype=torch.float16) + ops.convert_fp8(key_cache, result_key_cache) + result_value_cache = torch.empty_like(value_cache, dtype=torch.float16) + ops.convert_fp8(value_cache, result_value_cache) + + # Run the reference implementation. + reshaped_key = key.reshape(num_tokens, *key_cache[0, :, :, 0, :].shape) + block_indicies = torch.div(slot_mapping, block_size, rounding_mode="floor") + block_indicies = block_indicies.cpu().tolist() + block_offsets = slot_mapping % block_size + block_offsets = block_offsets.cpu().tolist() + for i in range(num_tokens): + block_idx = block_indicies[i] + block_offset = block_offsets[i] + cloned_key_cache[block_idx, :, :, block_offset, :] = reshaped_key[i] + cloned_value_cache[block_idx, :, :, block_offset] = value[i] + + if kv_cache_dtype == "fp8": + assert torch.allclose(result_key_cache, + cloned_key_cache, + atol=0.001, + rtol=0.1) + assert torch.allclose(result_value_cache, + cloned_value_cache, + atol=0.001, + rtol=0.1) + else: + assert torch.allclose(key_cache, cloned_key_cache) + assert torch.allclose(value_cache, cloned_value_cache) + + +@pytest.mark.parametrize("num_tokens", NUM_TOKENS) +@pytest.mark.parametrize("num_heads", NUM_HEADS) +@pytest.mark.parametrize("head_size", HEAD_SIZES) +@pytest.mark.parametrize("block_size", BLOCK_SIZES) +@pytest.mark.parametrize("num_blocks", NUM_BLOCKS) +@pytest.mark.parametrize("dtype", DTYPES) +@pytest.mark.parametrize("seed", SEEDS) +@pytest.mark.parametrize("device", CUDA_DEVICES) +@pytest.mark.parametrize("kv_cache_dtype", KV_CACHE_DTYPE) +@torch.inference_mode() +def test_reshape_and_cache_flash( + kv_cache_factory_flashinfer, + num_tokens: int, + num_heads: int, + head_size: int, + block_size: int, + num_blocks: int, + dtype: torch.dtype, + seed: int, + device: str, + kv_cache_dtype: str, +) -> None: + if kv_cache_dtype == "fp8": + pytest.skip() + random.seed(seed) + torch.random.manual_seed(seed) + torch.cuda.manual_seed(seed) + + # Create a random slot mapping. + num_slots = block_size * num_blocks + slot_mapping = random.sample(range(num_slots), num_tokens) + slot_mapping = torch.tensor(slot_mapping, dtype=torch.long, device='cuda') + + qkv = torch.randn(num_tokens, + 3, + num_heads, + head_size, + dtype=dtype, + device=device) + _, key, value = qkv.unbind(dim=1) + + # Create the KV caches. + key_caches, value_caches = kv_cache_factory_flashinfer( + num_blocks, + block_size, + 1, + num_heads, + head_size, + kv_cache_dtype, + dtype, + ) + key_cache, value_cache = key_caches[0], value_caches[0] + + # Clone the KV caches. + cloned_key_cache = key_cache.clone() + cloned_value_cache = value_cache.clone() + + # Call the reshape_and_cache kernel. + cache_ops.reshape_and_cache_flash(key, value, key_cache, value_cache, + slot_mapping, kv_cache_dtype) + + # Run the reference implementation. + block_indicies = torch.div(slot_mapping, block_size, rounding_mode='floor') + block_indicies = block_indicies.cpu().tolist() + block_offsets = slot_mapping % block_size + block_offsets = block_offsets.cpu().tolist() + for i in range(num_tokens): + block_idx = block_indicies[i] + block_offset = block_offsets[i] + cloned_key_cache[block_idx, block_offset, :, :] = key[i] + cloned_value_cache[block_idx, block_offset, :, :] = value[i] + + assert torch.allclose(key_cache, cloned_key_cache) + assert torch.allclose(value_cache, cloned_value_cache) + + +@pytest.mark.parametrize("direction", COPYING_DIRECTION) +@pytest.mark.parametrize("num_mappings", NUM_MAPPINGS) +@pytest.mark.parametrize("num_heads", NUM_HEADS) +@pytest.mark.parametrize("head_size", HEAD_SIZES) +@pytest.mark.parametrize("block_size", BLOCK_SIZES) +@pytest.mark.parametrize("num_blocks", NUM_BLOCKS) +@pytest.mark.parametrize("dtype", DTYPES) +@pytest.mark.parametrize("seed", SEEDS) +@pytest.mark.parametrize("device", CUDA_DEVICES) +@pytest.mark.parametrize("kv_cache_dtype", KV_CACHE_DTYPE) +@torch.inference_mode() +def test_swap_blocks( + kv_cache_factory, + direction: Tuple[str, str], + num_mappings: int, + num_heads: int, + head_size: int, + block_size: int, + num_blocks: int, + dtype: torch.dtype, + seed: int, + device: str, + kv_cache_dtype: str, +) -> None: + if kv_cache_dtype == "fp8" and "cpu" in direction: + pytest.skip() + if not is_hip() and kv_cache_dtype == "fp8": + pytest.skip() # This test is not tuned for e5m2 cuda precision + random.seed(seed) + torch.random.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed(seed) + + src_device = device if direction[0] == "cuda" else 'cpu' + dst_device = device if direction[1] == "cuda" else 'cpu' + + src_blocks = random.sample(range(num_blocks), num_mappings) + # For the same device, mapping must not overlap + if src_device == dst_device: + remaining_blocks = list(set(range(num_blocks)) - set(src_blocks)) + dst_blocks = random.sample(remaining_blocks, num_mappings) + else: + dst_blocks = random.sample(range(num_blocks), num_mappings) + + block_mapping = dict(zip(src_blocks, dst_blocks)) + + # Create the KV caches on the first device. + src_key_caches, src_value_caches = kv_cache_factory( + num_blocks, block_size, 1, num_heads, head_size, kv_cache_dtype, dtype, + seed, src_device) + + # Create the KV caches on the second device. + dist_key_caches, dist_value_caches = kv_cache_factory( + num_blocks, block_size, 1, num_heads, head_size, kv_cache_dtype, dtype, + seed, dst_device) + + src_key_caches_clone = src_key_caches[0].clone() + src_value_caches_clone = src_value_caches[0].clone() + + # Call the swap_blocks kernel. + ops.swap_blocks(src_key_caches[0], dist_key_caches[0], block_mapping) + ops.swap_blocks(src_value_caches[0], dist_value_caches[0], block_mapping) + + for src, dst in block_mapping.items(): + assert torch.allclose(src_key_caches_clone[src].cpu(), + dist_key_caches[0][dst].cpu()) + assert torch.allclose(src_value_caches_clone[src].cpu(), + dist_value_caches[0][dst].cpu()) + + +@pytest.mark.skipif(not is_hip(), reason="FP8 conversion test requires e4m3") +@pytest.mark.parametrize("num_heads", NUM_HEADS) +@pytest.mark.parametrize("head_size", HEAD_SIZES) +@pytest.mark.parametrize("block_size", BLOCK_SIZES) +@pytest.mark.parametrize("num_blocks", NUM_BLOCKS) +@pytest.mark.parametrize("dtype", DTYPES) +@pytest.mark.parametrize("seed", SEEDS) +@pytest.mark.parametrize("device", CUDA_DEVICES) +@torch.inference_mode() +def test_fp8_conversion( + num_heads: int, + head_size: int, + block_size: int, + num_blocks: int, + dtype: torch.dtype, + seed: int, + device: str, +) -> None: + random.seed(seed) + torch.random.manual_seed(seed) + torch.cuda.manual_seed(seed) + + low = -224.0 + high = 224.0 + shape = (num_blocks, num_heads, head_size, block_size) + cache = torch.empty(shape, dtype=dtype, device=device) + cache.uniform_(low, high) + + cache_fp8 = torch.empty_like(cache, dtype=torch.uint8) + ops.convert_fp8(cache, cache_fp8) + + converted_cache = torch.empty_like(cache) + ops.convert_fp8(cache_fp8, converted_cache) + + assert torch.allclose(cache, converted_cache, atol=0.001, rtol=0.1) diff --git a/tests/kernels/test_layernorm.py b/tests/kernels/test_layernorm.py new file mode 100644 index 0000000..210d59e --- /dev/null +++ b/tests/kernels/test_layernorm.py @@ -0,0 +1,54 @@ +import pytest +import torch + +from vllm.model_executor.layers.layernorm import RMSNorm + +DTYPES = [torch.half, torch.bfloat16, torch.float] +NUM_TOKENS = [7, 83, 4096] # Arbitrary values for testing +HIDDEN_SIZES = [768, 769, 770, 771, 5120, 5124, 5125, 5126, 8192, + 8199] # Arbitrary values for testing +ADD_RESIDUAL = [False, True] +SEEDS = [0] +CUDA_DEVICES = [ + f"cuda:{i}" for i in range(1 if torch.cuda.device_count() == 1 else 2) +] + + +@pytest.mark.parametrize("num_tokens", NUM_TOKENS) +@pytest.mark.parametrize("hidden_size", HIDDEN_SIZES) +@pytest.mark.parametrize("add_residual", ADD_RESIDUAL) +@pytest.mark.parametrize("dtype", DTYPES) +@pytest.mark.parametrize("seed", SEEDS) +@pytest.mark.parametrize("device", CUDA_DEVICES) +@torch.inference_mode() +def test_rms_norm( + num_tokens: int, + hidden_size: int, + add_residual: bool, + dtype: torch.dtype, + seed: int, + device: str, +) -> None: + torch.random.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed(seed) + torch.set_default_device(device) + layer = RMSNorm(hidden_size).to(dtype=dtype) + layer.weight.data.normal_(mean=1.0, std=0.1) + scale = 1 / (2 * hidden_size) + x = torch.randn(num_tokens, hidden_size, dtype=dtype) + x *= scale + residual = torch.randn_like(x) * scale if add_residual else None + + # NOTE(woosuk): The reference implementation should be executed first + # because the custom kernel is in-place. + ref_out = layer._forward(x, residual) + out = layer(x, residual) + # NOTE(woosuk): LayerNorm operators (including RMS) typically have larger + # numerical errors than other operators because they involve reductions. + # Therefore, we use a larger tolerance. + if add_residual: + assert torch.allclose(out[0], ref_out[0], atol=1e-2, rtol=1e-2) + assert torch.allclose(out[1], ref_out[1], atol=1e-2, rtol=1e-2) + else: + assert torch.allclose(out, ref_out, atol=1e-2, rtol=1e-2) diff --git a/tests/kernels/test_moe.py b/tests/kernels/test_moe.py new file mode 100644 index 0000000..2356b9e --- /dev/null +++ b/tests/kernels/test_moe.py @@ -0,0 +1,101 @@ +"""Tests for the MOE layers. + +Run `pytest tests/kernels/test_moe.py`. +""" +import pytest +import torch +from transformers import MixtralConfig +from transformers.models.mixtral.modeling_mixtral import MixtralSparseMoeBlock + +from vllm.model_executor.layers.activation import SiluAndMul +from vllm.model_executor.layers.fused_moe import fused_moe +from vllm.model_executor.models.mixtral import MixtralMoE + + +def torch_moe(a, w1, w2, score, topk): + B, D = a.shape + a = a.view(B, -1, D).repeat(1, topk, 1).reshape(-1, D) + out = torch.zeros(B * topk, w2.shape[1], dtype=a.dtype, device=a.device) + score = torch.softmax(score, dim=-1, dtype=torch.float32) + topk_weight, topk_ids = torch.topk(score, topk) + topk_weight = topk_weight.view(-1) + topk_ids = topk_ids.view(-1) + for i in range(w1.shape[0]): + mask = topk_ids == i + if mask.sum(): + out[mask] = SiluAndMul()( + a[mask] @ w1[i].transpose(0, 1)) @ w2[i].transpose(0, 1) + return (out.view(B, -1, w2.shape[1]) * + topk_weight.view(B, -1, 1).to(out.dtype)).sum(dim=1) + + +@pytest.mark.parametrize("m", [512, 222, 33, 1]) +@pytest.mark.parametrize("n", [2048, 256, 1024]) +@pytest.mark.parametrize("k", [128, 511, 1024]) +@pytest.mark.parametrize("e", [8, 64]) +@pytest.mark.parametrize("topk", [2, 6]) +@pytest.mark.parametrize("dtype", [torch.float16, torch.bfloat16]) +def test_fused_moe( + m: int, + n: int, + k: int, + e: int, + topk: int, + dtype: torch.dtype, +): + a = torch.randn((m, k), device='cuda', dtype=dtype) / 10 + w1 = torch.randn((e, 2 * n, k), device='cuda', dtype=dtype) / 10 + w2 = torch.randn((e, k, n), device='cuda', dtype=dtype) / 10 + + score = torch.randn((m, e), device='cuda', dtype=dtype) + triton_output = fused_moe(a, w1, w2, score, topk, renormalize=False) + torch_output = torch_moe(a, w1, w2, score, topk) + assert torch.allclose(triton_output, torch_output, atol=1e-2, rtol=0) + + +@pytest.mark.parametrize("dtype", + [torch.float32, torch.float16, torch.bfloat16]) +@torch.inference_mode() +def test_mixtral_moe(dtype: torch.dtype): + """Make sure our Mixtral MoE implementation agrees with the one from + huggingface.""" + + # Instantiate our and huggingface's MoE blocks + config = MixtralConfig() + hf_moe = MixtralSparseMoeBlock(config).to(dtype).to("cuda") + vllm_moe = MixtralMoE( + num_experts=config.num_local_experts, + top_k=config.num_experts_per_tok, + hidden_size=config.hidden_size, + intermediate_size=config.intermediate_size, + params_dtype=dtype, + tp_size=1, + ).cuda() + + # Load the weights + vllm_moe.gate.weight.data[:] = hf_moe.gate.weight.data + for i in range(config.num_local_experts): + weights = (hf_moe.experts[i].w1.weight.data, + hf_moe.experts[i].w3.weight.data) + vllm_moe.w13_weight[i][:] = torch.cat(weights, dim=0) + vllm_moe.w2_weight[i][:] = hf_moe.experts[i].w2.weight.data + + # Generate input batch of dimensions [batch_size, seq_len, hidden_dim] + hf_inputs = torch.randn((1, 64, config.hidden_size)).to(dtype).to("cuda") + # vLLM uses 1D query [num_tokens, hidden_dim] + vllm_inputs = hf_inputs.flatten(0, 1) + + # Run forward passes for both MoE blocks + hf_states, _ = hf_moe.forward(hf_inputs) + vllm_states = vllm_moe.forward(vllm_inputs) + + mixtral_moe_tol = { + torch.float32: 1e-3, + torch.float16: 1e-3, + torch.bfloat16: 1e-2, + } + + assert torch.allclose(hf_states.flatten(0, 1), + vllm_states, + rtol=mixtral_moe_tol[dtype], + atol=mixtral_moe_tol[dtype]) diff --git a/tests/kernels/test_pos_encoding.py b/tests/kernels/test_pos_encoding.py new file mode 100644 index 0000000..bf18569 --- /dev/null +++ b/tests/kernels/test_pos_encoding.py @@ -0,0 +1,208 @@ +from itertools import accumulate +from typing import List, Optional + +import pytest +import torch +from allclose_default import get_default_atol, get_default_rtol + +from vllm.model_executor.layers.rotary_embedding import get_rope + +IS_NEOX_STYLE = [True, False] +DTYPES = [torch.half, torch.bfloat16, torch.float] +HEAD_SIZES = [64, 80, 96, 112, 128, 256] +ROTARY_DIMS = [None, 32] # None means rotary dim == head size +NUM_HEADS = [7, 17] # Arbitrary values for testing +BATCH_SIZES = [1, 5] # Arbitrary values for testing +SEQ_LENS = [11, 8192] # Arbitrary values for testing +SEEDS = [0] +CUDA_DEVICES = [ + f"cuda:{i}" for i in range(1 if torch.cuda.device_count() == 1 else 2) +] + + +@pytest.mark.parametrize("is_neox_style", IS_NEOX_STYLE) +@pytest.mark.parametrize("batch_size", BATCH_SIZES) +@pytest.mark.parametrize("seq_len", SEQ_LENS) +@pytest.mark.parametrize("num_heads", NUM_HEADS) +@pytest.mark.parametrize("head_size", HEAD_SIZES) +@pytest.mark.parametrize("rotary_dim", ROTARY_DIMS) +@pytest.mark.parametrize("dtype", DTYPES) +@pytest.mark.parametrize("seed", SEEDS) +@pytest.mark.parametrize("device", CUDA_DEVICES) +@torch.inference_mode() +def test_rotary_embedding( + is_neox_style: bool, + batch_size: int, + seq_len: int, + num_heads: int, + head_size: int, + rotary_dim: Optional[int], + dtype: torch.dtype, + seed: int, + device: str, + max_position: int = 8192, + base: int = 10000, +) -> None: + if rotary_dim is None: + rotary_dim = head_size + torch.random.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed(seed) + torch.set_default_device(device) + if rotary_dim is None: + rotary_dim = head_size + rope = get_rope(head_size, rotary_dim, max_position, base, is_neox_style) + rope = rope.to(dtype=dtype) + + positions = torch.randint(0, max_position, (batch_size, seq_len)) + query = torch.randn(batch_size, + seq_len, + num_heads * head_size, + dtype=dtype) + key = torch.randn_like(query) + + # NOTE(woosuk): The reference implementation should be executed first + # because the custom kernel is in-place. + ref_query, ref_key = rope._forward(positions, query, key) + out_query, out_key = rope.forward(positions, query, key) + # Compare the results. + assert torch.allclose(out_query, + ref_query, + atol=get_default_atol(out_query), + rtol=get_default_rtol(out_query)) + assert torch.allclose(out_key, + ref_key, + atol=get_default_atol(out_key), + rtol=get_default_rtol(out_key)) + + +@pytest.mark.parametrize("is_neox_style", IS_NEOX_STYLE) +@pytest.mark.parametrize("batch_size", BATCH_SIZES) +@pytest.mark.parametrize("seq_len", SEQ_LENS) +@pytest.mark.parametrize("num_heads", NUM_HEADS) +@pytest.mark.parametrize("head_size", HEAD_SIZES) +@pytest.mark.parametrize("rotary_dim", ROTARY_DIMS) +@pytest.mark.parametrize("dtype", DTYPES) +@pytest.mark.parametrize("seed", SEEDS) +@pytest.mark.parametrize("device", CUDA_DEVICES) +@torch.inference_mode() +def test_batched_rotary_embedding( + is_neox_style: bool, + batch_size: int, + seq_len: int, + num_heads: int, + head_size: int, + rotary_dim: Optional[int], + dtype: torch.dtype, + seed: int, + device: str, + max_position: int = 8192, + base: int = 10000, +) -> None: + torch.random.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed(seed) + torch.set_default_device(device) + if rotary_dim is None: + rotary_dim = head_size + rope = get_rope(head_size, rotary_dim, max_position, base, is_neox_style, { + "type": "linear", + "factor": (1, ) + }) + rope = rope.to(dtype=dtype) + + positions = torch.randint(0, max_position, (batch_size, seq_len)) + query = torch.randn(batch_size, + seq_len, + num_heads * head_size, + dtype=dtype) + key = torch.randn_like(query) + + # NOTE(woosuk): The reference implementation should be executed first + # because the custom kernel is in-place. + ref_query, ref_key = rope._forward(positions, query, key) + out_query, out_key = rope.forward(positions, + query, + key, + offsets=torch.zeros(batch_size * seq_len, + dtype=int, + device=device)) + # Compare the results. + assert torch.allclose(out_query, + ref_query, + atol=get_default_atol(out_query), + rtol=get_default_rtol(out_query)) + assert torch.allclose(out_key, + ref_key, + atol=get_default_atol(out_key), + rtol=get_default_rtol(out_key)) + + +@pytest.mark.parametrize("is_neox_style", IS_NEOX_STYLE) +@pytest.mark.parametrize("batch_size", BATCH_SIZES) +@pytest.mark.parametrize("seq_len", SEQ_LENS) +@pytest.mark.parametrize("num_heads", NUM_HEADS) +@pytest.mark.parametrize("head_size", HEAD_SIZES) +@pytest.mark.parametrize("rotary_dim", ROTARY_DIMS) +@pytest.mark.parametrize("dtype", DTYPES) +@pytest.mark.parametrize("seed", SEEDS) +@pytest.mark.parametrize("device", CUDA_DEVICES) +@torch.inference_mode() +def test_batched_rotary_embedding_multi_lora( + is_neox_style: bool, + batch_size: int, + seq_len: int, + num_heads: int, + head_size: int, + rotary_dim: Optional[int], + dtype: torch.dtype, + seed: int, + device: str, + max_position: int = 8192, + base: int = 10000, +) -> None: + torch.random.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed(seed) + torch.set_default_device(device) + if rotary_dim is None: + rotary_dim = head_size + scaling_factors: List[int] = [1, 2, 4] + rope = get_rope(head_size, rotary_dim, max_position, base, is_neox_style, { + "type": "linear", + "factor": tuple(scaling_factors) + }) + rope = rope.to(dtype=dtype) + + positions = torch.randint(0, max_position, (batch_size, seq_len)) + query = torch.randn(batch_size, + seq_len, + num_heads * head_size, + dtype=dtype) + key = torch.randn_like(query) + + offset_map = torch.tensor( + list( + accumulate([0] + [ + max_position * scaling_factor * 2 + for scaling_factor in scaling_factors[:-1] + ]))) + query_types = torch.randint(0, + len(scaling_factors), (batch_size, seq_len), + device=device) + query_offsets = offset_map[query_types] + + # NOTE(woosuk): The reference implementation should be executed first + # because the custom kernel is in-place. + ref_query, ref_key = rope._forward(positions, query, key, query_offsets) + out_query, out_key = rope.forward(positions, query, key, + query_offsets.flatten()) + # Compare the results. + assert torch.allclose(out_query, + ref_query, + atol=get_default_atol(out_query), + rtol=get_default_rtol(out_query)) + assert torch.allclose(out_key, + ref_key, + atol=get_default_atol(out_key), + rtol=get_default_rtol(out_key)) diff --git a/tests/kernels/test_prefix_prefill.py b/tests/kernels/test_prefix_prefill.py new file mode 100644 index 0000000..5a5987e --- /dev/null +++ b/tests/kernels/test_prefix_prefill.py @@ -0,0 +1,209 @@ +import random +import time + +import pytest +import torch +from xformers import ops as xops +from xformers.ops.fmha.attn_bias import BlockDiagonalCausalFromBottomRightMask + +from vllm.attention.ops.prefix_prefill import context_attention_fwd + +NUM_HEADS = [64] +NUM_QUERIES_PER_KV = [1, 8, 64] +HEAD_SIZES = [128, 96] +DTYPES = [torch.float16] +CUDA_DEVICES = [ + f"cuda:{i}" for i in range(1 if torch.cuda.device_count() == 1 else 2) +] +SLIDING_WINDOW = [0, 16, 64, 128, 256, 512, 2048] + + +@pytest.mark.parametrize("num_heads", NUM_HEADS) +@pytest.mark.parametrize("num_queries_per_kv", NUM_QUERIES_PER_KV) +@pytest.mark.parametrize("head_size", HEAD_SIZES) +@pytest.mark.parametrize("dtype", DTYPES) +@pytest.mark.parametrize("device", CUDA_DEVICES) +@pytest.mark.parametrize("sliding_window", SLIDING_WINDOW) +@torch.inference_mode() +def test_contexted_kv_attention( + num_heads: int, + num_queries_per_kv: int, + head_size: int, + sliding_window: int, + dtype: torch.dtype, + device: str, +) -> None: + random.seed(0) + torch.manual_seed(0) + if torch.cuda.is_available(): + torch.cuda.manual_seed(0) + torch.set_default_device(device) + + # Need this, otherwise when we capture the graph the process + # for GPU 1 would run on both GPU0 and GPU1 and things would hang + # + # see also similar issue: https://github.com/Dao-AILab/flash-attention/issues/523 + torch.cuda.set_device(device) + + MAX_SEQ_LEN = 1024 + MAX_CTX_LEN = 1024 + BS = 10 + cache_size = 640 + block_size = 32 + max_block_per_request = 64 + query_lens = [random.randint(16, MAX_SEQ_LEN) for _ in range(BS)] + ctx_lens = [random.randint(16, MAX_CTX_LEN) for _ in range(BS)] + seq_lens = [a + b for a, b in zip(query_lens, ctx_lens)] + num_kv_heads = num_heads // num_queries_per_kv + + num_tokens = sum(query_lens) + query = torch.empty(num_tokens, num_heads, head_size, dtype=dtype) + query.uniform_(-1e-3, 1e-3) + output = torch.empty(num_tokens, num_heads, head_size, dtype=dtype) + + kv = torch.empty(sum(seq_lens), 2, num_kv_heads, head_size, dtype=dtype) + kv.uniform_(-1e-3, 1e-3) + key, value = kv.unbind(dim=1) + + k_cache = torch.zeros(cache_size, + block_size, + num_kv_heads, + head_size, + dtype=dtype) + v_cache = torch.zeros(cache_size, + block_size, + num_kv_heads, + head_size, + dtype=dtype) + k = torch.zeros(sum(query_lens), num_kv_heads, head_size, dtype=dtype) + v = torch.zeros(sum(query_lens), num_kv_heads, head_size, dtype=dtype) + values = torch.arange(0, cache_size, dtype=torch.long) + values = values[torch.randperm(cache_size)] + block_table = values[:BS * max_block_per_request].view( + BS, max_block_per_request) + b_seq_len = torch.tensor(seq_lens, dtype=torch.long) + b_ctx_len = torch.tensor(ctx_lens, dtype=torch.long) + b_start_loc = torch.cumsum(torch.tensor([0] + query_lens[:-1], + dtype=torch.long), + dim=0) + max_input_len = MAX_SEQ_LEN + # copy kv to cache + b_seq_start_loc = torch.cumsum(torch.tensor([0] + seq_lens[:-1], + dtype=torch.long), + dim=0) + for i in range(BS): + for j in range(query_lens[i]): + k[b_start_loc[i] + j].copy_(key[b_seq_start_loc[i] + b_ctx_len[i] + + j]) + v[b_start_loc[i] + j].copy_(value[b_seq_start_loc[i] + + b_ctx_len[i] + j]) + cur_ctx = 0 + block_id = 0 + while cur_ctx < b_ctx_len[i]: + start_loc = b_seq_start_loc[i] + cur_ctx + if cur_ctx + block_size > b_ctx_len[i]: + end_loc = b_seq_start_loc[i] + b_ctx_len[i] + else: + end_loc = start_loc + block_size + start_slot = block_table[i, block_id] * block_size + end_slot = start_slot + end_loc - start_loc + k_cache.view(-1, num_kv_heads, + head_size)[start_slot:end_slot].copy_( + key[start_loc:end_loc]) + v_cache.view(-1, num_kv_heads, + head_size)[start_slot:end_slot].copy_( + value[start_loc:end_loc]) + cur_ctx += block_size + block_id += 1 + # transpose K_cache[num_blocks, block_size, num_kv_heads, head_size] + # to K_cache[num_blocks, num_kv_heads, head_size/8, block_size, 8] + k_cache = k_cache.view(-1, block_size, num_kv_heads, head_size // 8, + 8).permute(0, 2, 3, 1, 4).contiguous() + # transpose V_cache[num_blocks, block_size, num_kv_heads, head_size] + # to V_cache[num_blocks, num_kv_heads, head_size, block_size] + v_cache = v_cache.view(-1, block_size, num_kv_heads, + head_size).permute(0, 2, 3, 1).contiguous() + + # Warm up the Triton kernel by calling it once before actually measuring + # generation time + context_attention_fwd(query, + k, + v, + output, + k_cache, + v_cache, + block_table, + b_start_loc, + b_seq_len, + b_ctx_len, + max_input_len, + sliding_window=sliding_window) + torch.cuda.synchronize() + start_time = time.time() + context_attention_fwd(query, + k, + v, + output, + k_cache, + v_cache, + block_table, + b_start_loc, + b_seq_len, + b_ctx_len, + max_input_len, + sliding_window=sliding_window) + torch.cuda.synchronize() + end_time = time.time() + print(f"triton Time: {(end_time - start_time)*1000:.2f} ms") + + scale = float(1.0 / (head_size**0.5)) + + attn_op = xops.fmha.cutlass.FwOp() + + if num_kv_heads != num_heads: + # As of Nov 2023, xformers only supports MHA. For MQA/GQA, + # project the key and value tensors to the desired number of + # heads. + # + # see also: vllm/model_executor/layers/attention.py + query = query.view(query.shape[0], num_kv_heads, num_queries_per_kv, + query.shape[-1]) + key = key[:, :, None, :].expand(key.shape[0], num_kv_heads, + num_queries_per_kv, key.shape[-1]) + value = value[:, :, + None, :].expand(value.shape[0], num_kv_heads, + num_queries_per_kv, value.shape[-1]) + query = query.unsqueeze(0) + key = key.unsqueeze(0) + value = value.unsqueeze(0) + + attn_bias = BlockDiagonalCausalFromBottomRightMask.from_seqlens( + query_lens, seq_lens) + if sliding_window > 0: + attn_bias = attn_bias.make_local_attention_from_bottomright( + sliding_window) + output_ref = xops.memory_efficient_attention_forward( + query, + key, + value, + attn_bias=attn_bias, + p=0.0, + scale=scale, + op=attn_op, + ) + torch.cuda.synchronize() + start_time = time.time() + output_ref = xops.memory_efficient_attention_forward( + query, + key, + value, + attn_bias=attn_bias, + p=0.0, + scale=scale, + op=attn_op, + ) + torch.cuda.synchronize() + end_time = time.time() + print(f"xformers Time: {(end_time - start_time)*1000:.2f} ms") + output_ref = output_ref.reshape(output.shape) + assert torch.allclose(output_ref, output, atol=1e-6, rtol=0) diff --git a/tests/kernels/test_rand.py b/tests/kernels/test_rand.py new file mode 100644 index 0000000..a4242d2 --- /dev/null +++ b/tests/kernels/test_rand.py @@ -0,0 +1,52 @@ +import random + +import pytest +import torch + +from vllm.model_executor.layers.ops.rand import seeded_uniform +from vllm.model_executor.utils import set_random_seed + + +@pytest.mark.parametrize("dtype", + [torch.float32, torch.float16, torch.bfloat16]) +@pytest.mark.parametrize("use_3d", [True, False]) +def test_seeded_uniform(dtype: torch.dtype, use_3d: bool): + device = "cuda" + for seed in range(512): + set_random_seed(seed) + rows = random.randint(1, 512) + cols = random.randint(1, 64000) + if use_3d: + third_dim = random.randint(2, 10) + dims = [rows, third_dim, cols] + else: + dims = [rows, cols] + seeds = torch.randint(torch.iinfo(torch.long).min, + torch.iinfo(torch.long).max, (rows, ), + device=device) + + # Test that the same seed produces the same output + out = seeded_uniform(*dims, seeds=seeds, dtype=dtype, device=device) + out2 = seeded_uniform(*dims, seeds=seeds, dtype=dtype, device=device) + torch.testing.assert_close(out, out2) + # del to save memory + del out2 + + out3 = seeded_uniform(*dims, seeds=seeds, dtype=dtype, device=device) + torch.testing.assert_close(out, out3) + # del to save memory + del out3 + + # Initialize out tensor with garbage to ensure that it is overwritten + out_with_tensor = seeded_uniform( + *dims, + out=torch.full( + (*dims, ), + -1, + dtype=dtype, + device=device, + ), + seeds=seeds, + dtype=dtype, + ) + torch.testing.assert_close(out, out_with_tensor) diff --git a/tests/kernels/test_sampler.py b/tests/kernels/test_sampler.py new file mode 100644 index 0000000..e28f809 --- /dev/null +++ b/tests/kernels/test_sampler.py @@ -0,0 +1,196 @@ +import gc + +import pytest +import torch +import triton +import triton.language as tl + +from vllm.model_executor.layers.ops.sample import ( + MAX_TRITON_N_COLS, _uniform_to_exponential, get_num_triton_sampler_splits, + sample) +from vllm.model_executor.sampling_metadata import SamplingTensors +from vllm.model_executor.utils import set_random_seed + +SINGLE_SPLIT_VOCAB_SIZE = 32000 # llama/mistral/mixtral vocab size +MULTI_SPLIT_VOCAB_SIZE = MAX_TRITON_N_COLS + 100 + + +@pytest.fixture(autouse=True) +def _cleanup(): + yield + gc.collect() + torch.cuda.empty_cache() + + +@triton.jit +def _uniform_to_exponential_kernel(input, output, n: tl.constexpr): + idx = tl.arange(0, n) + x = tl.load(input + idx) + y = _uniform_to_exponential(x) + tl.store(output + idx, y) + + +def test_uniform_to_exponential(): + """Test that we can convert uniform to exponential without div by 0.""" + input = torch.tensor([0.0, 1.0 - torch.finfo(torch.float32).eps], + dtype=torch.float32, + device="cuda") + output = torch.zeros(input.shape, dtype=torch.float32, device="cuda") + _uniform_to_exponential_kernel[(1, )](input, output, 2) + assert torch.all(torch.isfinite(output)) + assert torch.all(output > 0) + assert torch.all(torch.isfinite(torch.full_like(output, 1.0) / output)) + + +@pytest.mark.parametrize("random_sampling", [True, False, "mixed"]) +@pytest.mark.parametrize("max_best_of", [1, 2, 3, 4, 5]) +@pytest.mark.parametrize("modify_greedy_probs", [True, False]) +@pytest.mark.parametrize("seed", [1337]) +@pytest.mark.parametrize("vocab_size", + [SINGLE_SPLIT_VOCAB_SIZE, MULTI_SPLIT_VOCAB_SIZE]) +@pytest.mark.parametrize("save_logprobs", [True, False]) +def test_sample_decoding_only(random_sampling, max_best_of, + modify_greedy_probs, seed, vocab_size, + save_logprobs): + set_random_seed(seed) + bs = 8 + probs = torch.zeros((bs, vocab_size), dtype=torch.float32, device="cuda") + for i in range(bs): + probs[i, i * (vocab_size // bs)] = 1.0 + logprobs = torch.rand_like(probs) + sample_indices = torch.arange(bs, dtype=torch.long, device="cuda") + n_splits = get_num_triton_sampler_splits(probs.shape[1]) + if random_sampling == "mixed": + random_sampling_mask = (torch.rand( + (1, bs), device="cuda") < 0.5).expand(n_splits, bs) + elif random_sampling: + random_sampling_mask = torch.ones((n_splits, bs), + dtype=torch.bool, + device="cuda") + else: + random_sampling_mask = torch.zeros((n_splits, bs), + dtype=torch.bool, + device="cuda") + + seeds = torch.randint(1, + torch.iinfo(torch.long).max, (n_splits, bs), + device="cuda").mul_(random_sampling_mask) + sampled_tokens, sampled_logprobs, sampled_modified_probs = sample( + probs=probs, + logprobs=logprobs, + sample_indices=sample_indices, + seeds=seeds, + max_best_of=max_best_of, + modify_greedy_probs=modify_greedy_probs, + save_logprobs=save_logprobs, + _save_modified_probs=True) + assert sampled_tokens.shape == (bs, max_best_of) + for i in range(bs): + assert torch.all(sampled_tokens[i] == i * (vocab_size // bs)) + request_uses_random_sampling = random_sampling_mask[0, i] + if modify_greedy_probs and not request_uses_random_sampling: + # If we are modifying greedy probs and the request is greedy, + # we want to make sure the probs tensor is modified in place + assert torch.allclose( + probs[i][sampled_tokens[i]], + torch.full_like(probs[i][sampled_tokens[i]], 1.0)) + assert torch.sum(probs[i]) == 1.0 + assert torch.allclose( + sampled_modified_probs[i][0], + torch.full_like(sampled_modified_probs[i][0], 1.0)) + elif request_uses_random_sampling: + # If the request is random, we want to make sure + # sampled_modified_probs tensor has noise added + # (and thus is different from probs tensor) + assert not torch.allclose(sampled_modified_probs[i][0], + probs[i][sampled_tokens[i]]) + elif not request_uses_random_sampling: + # If the request is greedy and we are not modifying greedy probs, + # we want to make sure sampled_modified_probs tensor is the same as + # the probs tensor. + assert torch.allclose(sampled_modified_probs[i][0], + probs[i][sampled_tokens[i]]) + + if save_logprobs: + assert sampled_logprobs.shape == (bs, max_best_of) + for i in range(bs): + for best_of in range(max_best_of): + assert torch.all(sampled_logprobs[i] == logprobs[i][ + sampled_tokens[i, best_of]]) + else: + assert sampled_logprobs is None + + +@pytest.mark.parametrize("random_sampling", [True, False, "mixed"]) +@pytest.mark.parametrize("max_best_of", [1, 2, 3, 4, 5]) +@pytest.mark.parametrize("modify_greedy_probs", [True, False]) +@pytest.mark.parametrize("seed", [1337]) +@pytest.mark.parametrize("vocab_size", + [SINGLE_SPLIT_VOCAB_SIZE, MULTI_SPLIT_VOCAB_SIZE]) +def test_sample_prompt_logprobs(random_sampling, max_best_of, + modify_greedy_probs, seed, vocab_size): + set_random_seed(seed) + prompt_sizes = [16, 32, 64, 128] * 2 + samples = 8 + bs = samples + sum(prompt_sizes) + probs = torch.zeros((bs, vocab_size), dtype=torch.float32, device="cuda") + for i in range(bs): + probs[i, i * (vocab_size // bs)] = 1.0 + logprobs = torch.rand_like(probs) + sample_indices = torch.tensor(prompt_sizes, + dtype=torch.long, + device="cuda").cumsum_(0) + n_splits = get_num_triton_sampler_splits(probs.shape[1]) + if random_sampling == "mixed": + random_sampling_mask = torch.rand( + (n_splits, samples), device="cuda") < 0.5 + elif random_sampling: + random_sampling_mask = torch.ones((n_splits, samples), + dtype=torch.bool, + device="cuda") + else: + random_sampling_mask = torch.zeros((n_splits, samples), + dtype=torch.bool, + device="cuda") + + seeds = torch.randint(1, + torch.iinfo(torch.long).max, (n_splits, samples), + device="cuda").mul_(random_sampling_mask) + sampled_tokens, sampled_logprobs, _ = sample( + probs=probs, + logprobs=logprobs, + sample_indices=sample_indices, + seeds=seeds, + max_best_of=max_best_of, + modify_greedy_probs=modify_greedy_probs, + save_logprobs=True) + assert sampled_tokens.shape == (samples, max_best_of) + assert sampled_logprobs.shape == (samples, max_best_of) + for i, t in enumerate(sample_indices): + assert torch.all(sampled_tokens[i] == t * (vocab_size // bs)) + for best_of in range(max_best_of): + assert torch.all(sampled_logprobs[i] == logprobs[sample_indices[i]] + [sampled_tokens[i, best_of]]) + + +@pytest.mark.parametrize("seed", list(range(16))) +def test_get_sequence_seeds(seed): + """Ensure that we get a different child seed from base + seed + extra entropy""" + starting_seed = seed + seq_seed = None + extra_entropy = 1 + for i in range(512): + new_seq_seed = SamplingTensors._get_sequence_seeds(starting_seed, + i, + seeds_to_generate=1, + is_greedy=False)[0] + new_seq_seed_extra_entropy = SamplingTensors._get_sequence_seeds( + starting_seed, + i, + extra_entropy, + seeds_to_generate=1, + is_greedy=False)[0] + assert new_seq_seed_extra_entropy != new_seq_seed + assert seq_seed != new_seq_seed + seq_seed = new_seq_seed diff --git a/tests/lora/__init__.py b/tests/lora/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/tests/lora/conftest.py b/tests/lora/conftest.py new file mode 100644 index 0000000..a3ffc53 --- /dev/null +++ b/tests/lora/conftest.py @@ -0,0 +1,179 @@ +import contextlib +import gc +import tempfile +from collections import OrderedDict +from unittest.mock import MagicMock, patch + +import pytest +import ray +import torch +import torch.nn as nn +from huggingface_hub import snapshot_download + +import vllm +from vllm.config import LoRAConfig +from vllm.distributed import destroy_model_parallel, initialize_model_parallel +from vllm.model_executor.layers.linear import (ColumnParallelLinear, + MergedColumnParallelLinear, + RowParallelLinear) +from vllm.model_executor.layers.logits_processor import LogitsProcessor +from vllm.model_executor.layers.sampler import Sampler +from vllm.model_executor.layers.vocab_parallel_embedding import ParallelLMHead +from vllm.model_executor.model_loader import get_model + + +def cleanup(): + destroy_model_parallel() + with contextlib.suppress(AssertionError): + torch.distributed.destroy_process_group() + gc.collect() + torch.cuda.empty_cache() + ray.shutdown() + + +@pytest.fixture(autouse=True) +def cleanup_fixture(): + yield + cleanup() + + +@pytest.fixture +def dist_init(): + if not torch.distributed.is_initialized(): + temp_file = tempfile.mkstemp()[1] + torch.distributed.init_process_group( + backend="nccl", + world_size=1, + rank=0, + init_method=f"file://{temp_file}", + ) + torch.distributed.all_reduce(torch.zeros(1).cuda()) + initialize_model_parallel(1, 1) + yield + cleanup() + + +@pytest.fixture +def dist_init_torch_only(): + if torch.distributed.is_initialized(): + return + temp_file = tempfile.mkstemp()[1] + torch.distributed.init_process_group( + backend="nccl", + world_size=1, + rank=0, + init_method=f"file://{temp_file}", + ) + + +@pytest.fixture +def dummy_model() -> nn.Module: + model = nn.Sequential( + OrderedDict([ + ("dense1", ColumnParallelLinear(764, 100)), + ("dense2", RowParallelLinear(100, 50)), + ( + "layer1", + nn.Sequential( + OrderedDict([ + ("dense1", ColumnParallelLinear(100, 10)), + ("dense2", RowParallelLinear(10, 50)), + ])), + ), + ("act2", nn.ReLU()), + ("output", ColumnParallelLinear(50, 10)), + ("outact", nn.Sigmoid()), + # Special handling for lm_head & sampler + ("lm_head", ParallelLMHead(512, 10)), + ("logits_processor", LogitsProcessor(512)), + ("sampler", Sampler()) + ])) + model.config = MagicMock() + return model + + +@pytest.fixture +def dummy_model_gate_up() -> nn.Module: + model = nn.Sequential( + OrderedDict([ + ("dense1", ColumnParallelLinear(764, 100)), + ("dense2", RowParallelLinear(100, 50)), + ( + "layer1", + nn.Sequential( + OrderedDict([ + ("dense1", ColumnParallelLinear(100, 10)), + ("dense2", RowParallelLinear(10, 50)), + ])), + ), + ("act2", nn.ReLU()), + ("gate_up_proj", MergedColumnParallelLinear(50, [5, 5])), + ("outact", nn.Sigmoid()), + # Special handling for lm_head & sampler + ("lm_head", ParallelLMHead(512, 10)), + ("logits_processor", LogitsProcessor(512)), + ("sampler", Sampler()) + ])) + model.config = MagicMock() + return model + + +@pytest.fixture(scope="session") +def sql_lora_files(): + return snapshot_download(repo_id="yard1/llama-2-7b-sql-lora-test") + + +@pytest.fixture(scope="session") +def mixtral_lora_files(): + return snapshot_download(repo_id="terrysun/mixtral-lora-adapter") + + +@pytest.fixture(scope="session") +def gemma_lora_files(): + return snapshot_download(repo_id="wskwon/gemma-7b-test-lora") + + +@pytest.fixture(scope="session") +def chatglm3_lora_files(): + return snapshot_download(repo_id="jeeejeee/chatglm3-text2sql-spider") + + +@pytest.fixture(scope="session") +def baichuan_lora_files(): + return snapshot_download(repo_id="jeeejeee/baichuan7b-text2sql-spider") + + +@pytest.fixture(scope="session") +def baichuan_zero_lora_files(): + # all the lora_B weights are initialized to zero. + return snapshot_download(repo_id="jeeejeee/baichuan7b-zero-init") + + +@pytest.fixture(scope="session") +def tinyllama_lora_files(): + return snapshot_download(repo_id="jashing/tinyllama-colorist-lora") + + +@pytest.fixture +def llama_2_7b_engine_extra_embeddings() -> nn.Module: + cleanup() + get_model_old = get_model + + def get_model_patched(*, model_config, device_config, **kwargs): + kwargs["lora_config"] = LoRAConfig(max_loras=4, max_lora_rank=8) + return get_model_old(model_config=model_config, + device_config=device_config, + **kwargs) + + with patch("vllm.worker.model_runner.get_model", get_model_patched): + engine = vllm.LLM("meta-llama/Llama-2-7b-hf", enable_lora=False) + yield engine.llm_engine + del engine + cleanup() + + +@pytest.fixture +def llama_2_7b_model_extra_embeddings( + llama_2_7b_engine_extra_embeddings) -> nn.Module: + yield (llama_2_7b_engine_extra_embeddings.model_executor.driver_worker. + model_runner.model) diff --git a/tests/lora/test_baichuan.py b/tests/lora/test_baichuan.py new file mode 100644 index 0000000..5ab863e --- /dev/null +++ b/tests/lora/test_baichuan.py @@ -0,0 +1,108 @@ +import pytest + +import vllm +from vllm.lora.request import LoRARequest + +from .conftest import cleanup + +MODEL_PATH = "baichuan-inc/Baichuan-7B" + +PROMPT_TEMPLATE = """I want you to act as a SQL terminal in front of an example database, you need only to return the sql command to me.Below is an instruction that describes a task, Write a response that appropriately completes the request.\n"\n##Instruction:\nconcert_singer contains tables such as stadium, singer, concert, singer_in_concert. Table stadium has columns such as Stadium_ID, Location, Name, Capacity, Highest, Lowest, Average. Stadium_ID is the primary key.\nTable singer has columns such as Singer_ID, Name, Country, Song_Name, Song_release_year, Age, Is_male. Singer_ID is the primary key.\nTable concert has columns such as concert_ID, concert_Name, Theme, Stadium_ID, Year. concert_ID is the primary key.\nTable singer_in_concert has columns such as concert_ID, Singer_ID. concert_ID is the primary key.\nThe Stadium_ID of concert is the foreign key of Stadium_ID of stadium.\nThe Singer_ID of singer_in_concert is the foreign key of Singer_ID of singer.\nThe concert_ID of singer_in_concert is the foreign key of concert_ID of concert.\n\n###Input:\n{query}\n\n###Response:""" # noqa: E501 + + +def do_sample(llm, lora_path: str, lora_id: int) -> str: + prompts = [ + PROMPT_TEMPLATE.format(query="How many singers do we have?"), + PROMPT_TEMPLATE.format( + query= + "What is the average, minimum, and maximum age of all singers from France?" # noqa: E501 + ), + PROMPT_TEMPLATE.format( + query= + "Show name, country, age for all singers ordered by age from the oldest to the youngest." # noqa: E501 + ), + ] + print(prompts) + sampling_params = vllm.SamplingParams(temperature=0, max_tokens=256) + outputs = llm.generate( + prompts, + sampling_params, + lora_request=LoRARequest(str(lora_id), lora_id, lora_path) + if lora_id else None) + # Print the outputs. + generated_texts = [] + for output in outputs: + prompt = output.prompt + generated_text = output.outputs[0].text.strip() + generated_texts.append(generated_text) + print(f"Prompt: {prompt!r}, Generated text: {generated_text!r}") + return generated_texts + + +def test_baichuan_lora(baichuan_lora_files): + llm = vllm.LLM(MODEL_PATH, + max_model_len=1024, + enable_lora=True, + max_loras=4, + max_lora_rank=64, + trust_remote_code=True) + + expected_lora_output = [ + "SELECT count(*) FROM singer", + "SELECT avg(age) , min(age) , max(age) FROM singer WHERE Country = 'France'", # noqa: E501 + "SELECT name , country , age FROM singer ORDER BY age ASC", + ] + + output1 = do_sample(llm, baichuan_lora_files, lora_id=1) + for i in range(len(expected_lora_output)): + assert output1[i] == expected_lora_output[i] + output2 = do_sample(llm, baichuan_lora_files, lora_id=2) + for i in range(len(expected_lora_output)): + assert output2[i] == expected_lora_output[i] + + +@pytest.mark.skip("Requires multiple GPUs") +def test_baichuan_tensor_parallel_equality(baichuan_lora_files): + # Cannot use as it will initialize torch.cuda too early... + # if torch.cuda.device_count() < 4: + # pytest.skip(f"Not enough GPUs for tensor parallelism {4}") + + llm_tp1 = vllm.LLM(MODEL_PATH, + enable_lora=True, + max_num_seqs=16, + max_loras=4, + max_lora_rank=64, + tensor_parallel_size=1, + trust_remote_code=True) + output_tp1 = do_sample(llm_tp1, baichuan_lora_files, lora_id=1) + + del llm_tp1 + cleanup() + + llm_tp2 = vllm.LLM(MODEL_PATH, + enable_lora=True, + max_num_seqs=16, + max_loras=4, + max_lora_rank=64, + tensor_parallel_size=2, + trust_remote_code=True) + output_tp2 = do_sample(llm_tp2, baichuan_lora_files, lora_id=2) + + del llm_tp2 + cleanup() + + assert output_tp1 == output_tp2 + + llm_tp4 = vllm.LLM(MODEL_PATH, + enable_lora=True, + max_num_seqs=16, + max_loras=4, + max_lora_rank=64, + tensor_parallel_size=4, + trust_remote_code=True) + output_tp4 = do_sample(llm_tp4, baichuan_lora_files, lora_id=2) + + del llm_tp4 + cleanup() + + assert output_tp1 == output_tp4 \ No newline at end of file diff --git a/tests/lora/test_chatglm3.py b/tests/lora/test_chatglm3.py new file mode 100644 index 0000000..bd8cc98 --- /dev/null +++ b/tests/lora/test_chatglm3.py @@ -0,0 +1,57 @@ +import vllm +from vllm.lora.request import LoRARequest + +MODEL_PATH = "THUDM/chatglm3-6b" + +PROMPT_TEMPLATE = """I want you to act as a SQL terminal in front of an example database, you need only to return the sql command to me.Below is an instruction that describes a task, Write a response that appropriately completes the request.\n"\n##Instruction:\nconcert_singer contains tables such as stadium, singer, concert, singer_in_concert. Table stadium has columns such as Stadium_ID, Location, Name, Capacity, Highest, Lowest, Average. Stadium_ID is the primary key.\nTable singer has columns such as Singer_ID, Name, Country, Song_Name, Song_release_year, Age, Is_male. Singer_ID is the primary key.\nTable concert has columns such as concert_ID, concert_Name, Theme, Stadium_ID, Year. concert_ID is the primary key.\nTable singer_in_concert has columns such as concert_ID, Singer_ID. concert_ID is the primary key.\nThe Stadium_ID of concert is the foreign key of Stadium_ID of stadium.\nThe Singer_ID of singer_in_concert is the foreign key of Singer_ID of singer.\nThe concert_ID of singer_in_concert is the foreign key of concert_ID of concert.\n\n###Input:\n{query}\n\n###Response:""" # noqa: E501 + + +def do_sample(llm, lora_path: str, lora_id: int) -> str: + prompts = [ + PROMPT_TEMPLATE.format(query="How many singers do we have?"), + PROMPT_TEMPLATE.format( + query= + "What is the average, minimum, and maximum age of all singers from France?" # noqa: E501 + ), + PROMPT_TEMPLATE.format( + query= + "Show name, country, age for all singers ordered by age from the oldest to the youngest." # noqa: E501 + ), + ] + print(prompts) + sampling_params = vllm.SamplingParams(temperature=0, max_tokens=32) + outputs = llm.generate( + prompts, + sampling_params, + lora_request=LoRARequest(str(lora_id), lora_id, lora_path) + if lora_id else None) + # Print the outputs. + generated_texts = [] + for output in outputs: + prompt = output.prompt + generated_text = output.outputs[0].text.strip() + generated_texts.append(generated_text) + print(f"Prompt: {prompt!r}, Generated text: {generated_text!r}") + return generated_texts + + +def test_chatglm3_lora(chatglm3_lora_files): + llm = vllm.LLM(MODEL_PATH, + max_model_len=1024, + enable_lora=True, + max_loras=4, + max_lora_rank=64, + trust_remote_code=True) + + expected_lora_output = [ + "SELECT count(*) FROM singer", + "SELECT avg(age) , min(age) , max(age) FROM singer WHERE country = 'France'", # noqa: E501 + "SELECT name , country , age FROM singer ORDER BY age", + ] + + output1 = do_sample(llm, chatglm3_lora_files, lora_id=1) + for i in range(len(expected_lora_output)): + assert output1[i] == expected_lora_output[i] + output2 = do_sample(llm, chatglm3_lora_files, lora_id=2) + for i in range(len(expected_lora_output)): + assert output2[i] == expected_lora_output[i] diff --git a/tests/lora/test_gemma.py b/tests/lora/test_gemma.py new file mode 100644 index 0000000..0082c6e --- /dev/null +++ b/tests/lora/test_gemma.py @@ -0,0 +1,46 @@ +import vllm +from vllm.lora.request import LoRARequest + +MODEL_PATH = "google/gemma-7b" + + +def do_sample(llm, lora_path: str, lora_id: int) -> str: + prompts = [ + "Quote: Imagination is", + "Quote: Be yourself;", + "Quote: So many books,", + ] + sampling_params = vllm.SamplingParams(temperature=0, max_tokens=32) + outputs = llm.generate( + prompts, + sampling_params, + lora_request=LoRARequest(str(lora_id), lora_id, lora_path) + if lora_id else None) + # Print the outputs. + generated_texts = [] + for output in outputs: + prompt = output.prompt + generated_text = output.outputs[0].text.strip() + generated_texts.append(generated_text) + print(f"Prompt: {prompt!r}, Generated text: {generated_text!r}") + return generated_texts + + +def test_gemma_lora(gemma_lora_files): + llm = vllm.LLM(MODEL_PATH, + max_model_len=1024, + enable_lora=True, + max_loras=4) + + expected_lora_output = [ + "more important than knowledge.\nAuthor: Albert Einstein\n", + "everyone else is already taken.\nAuthor: Oscar Wilde\n", + "so little time\nAuthor: Frank Zappa\n", + ] + + output1 = do_sample(llm, gemma_lora_files, lora_id=1) + for i in range(len(expected_lora_output)): + assert output1[i].startswith(expected_lora_output[i]) + output2 = do_sample(llm, gemma_lora_files, lora_id=2) + for i in range(len(expected_lora_output)): + assert output2[i].startswith(expected_lora_output[i]) diff --git a/tests/lora/test_layer_variation.py b/tests/lora/test_layer_variation.py new file mode 100644 index 0000000..7d37aa6 --- /dev/null +++ b/tests/lora/test_layer_variation.py @@ -0,0 +1,106 @@ +import tempfile +from random import sample +from typing import List, Optional + +import peft +import pytest +from transformers import AutoModelForCausalLM + +import vllm +from vllm.lora.request import LoRARequest + +from .conftest import cleanup + +MODEL_PATH = "Felladrin/Llama-68M-Chat-v1" +PROMPTS = [ + "[system] Given a target sentence construct the underlying meaning representation\nof the input sentence as a single function with attributes and attribute\nvalues. This function should describe the target string accurately and the\nfunction must be one of the following ['inform', 'request', 'give_opinion',\n'confirm', 'verify_attribute', 'suggest', 'request_explanation',\n'recommend', 'request_attribute'].\n\nThe attributes must be one of the following:\n['name', 'exp_release_date', 'release_year', 'developer', 'esrb', 'rating',\n'genres', 'player_perspective', 'has_multiplayer', 'platforms',\n'available_on_steam', 'has_linux_release', 'has_mac_release', 'specifier'] [/system] [user] Here is the target sentence:\nSpellForce 3 is a pretty bad game. The developer Grimlore Games is clearly a bunch of no-talent hacks, and 2017 was a terrible year for games anyway. [/user] [assistant]", # noqa: E501 + "[system] Given a target sentence construct the underlying meaning representation\nof the input sentence as a single function with attributes and attribute\nvalues. This function should describe the target string accurately and the\nfunction must be one of the following ['inform', 'request', 'give_opinion',\n'confirm', 'verify_attribute', 'suggest', 'request_explanation',\n'recommend', 'request_attribute'].\n\nThe attributes must be one of the following:\n['name', 'exp_release_date', 'release_year', 'developer', 'esrb', 'rating',\n'genres', 'player_perspective', 'has_multiplayer', 'platforms',\n'available_on_steam', 'has_linux_release', 'has_mac_release', 'specifier'] [/system] [user] Here is the target sentence:\nI wanted to like Grimlore Games' 2017 entry, but in SpellForce 3 they just didn't get anything right. [/user] [assistant]", # noqa: E501 + "[system] Given a target sentence construct the underlying meaning representation\nof the input sentence as a single function with attributes and attribute\nvalues. This function should describe the target string accurately and the\nfunction must be one of the following ['inform', 'request', 'give_opinion',\n'confirm', 'verify_attribute', 'suggest', 'request_explanation',\n'recommend', 'request_attribute'].\n\nThe attributes must be one of the following:\n['name', 'exp_release_date', 'release_year', 'developer', 'esrb', 'rating',\n'genres', 'player_perspective', 'has_multiplayer', 'platforms',\n'available_on_steam', 'has_linux_release', 'has_mac_release', 'specifier'] [/system] [user] Here is the target sentence:\nBioShock is a good role-playing, action-adventure, shooter that released for PlayStation, Xbox, and PC in 2007. It is available on Steam, and it has a Mac release but not a Linux release. [/user] [assistant]", # noqa: E501 +] + + +def get_lora_model(model_id: str, target_modules: List[str], rank: int): + model = AutoModelForCausalLM.from_pretrained(model_id) + lora_config = peft.tuners.lora.LoraConfig(target_modules, rank) + lora_model = peft.PeftModel(model, lora_config) + return lora_model + + +def do_sample(llm, + lora_path: Optional[str] = None, + lora_id: Optional[int] = None, + logprobs: int = 0, + n_tokens: int = 256): + prompts = PROMPTS + sampling_params = vllm.SamplingParams(temperature=0, + max_tokens=n_tokens, + logprobs=logprobs, + stop=["[/assistant]"]) + outputs = llm.generate( + prompts, + sampling_params, + lora_request=LoRARequest(str(lora_id), lora_id, lora_path) + if lora_id else None) + # Print the outputs. + generated_texts = [] + generated_logprobs = [] + for output in outputs: + prompt = output.prompt + generated_text = output.outputs[0].text + generated_texts.append(generated_text) + print(f"Prompt: {prompt!r}, Generated text: {generated_text!r}") + generated_logprobs.append([ + list(logprob.keys()) for out in output.outputs + for logprob in out.logprobs + ]) + return generated_logprobs if logprobs else generated_texts + + +SUPPORTED_MODULES = [ + "qkv_proj", "o_proj", "gate_up_proj", "down_proj", "embed_tokens", + "lm_head" +] +TARGET_MODULES_LIST = [] +for length in range(2, 6): + TARGET_MODULES_LIST.extend( + [sample(SUPPORTED_MODULES, length) for _ in range(3)]) + + +# Test the correctness when layer and rank are varied +# step 1: init a base model and serve with LoRA to get the reference results +# step 2: merge the same LoRA to the base model, serve the merged model +# step 3: compare the results from step 1 and step 2 +@pytest.mark.parametrize("tp_size", [1]) +@pytest.mark.parametrize("target_modules", TARGET_MODULES_LIST) +@pytest.mark.parametrize("rank", [8, 16, 32, 64]) +def test_layer_variation_correctness(tp_size, target_modules, rank): + llm = vllm.LLM(MODEL_PATH, + enable_lora=True, + max_num_seqs=16, + max_loras=4, + tensor_parallel_size=tp_size, + worker_use_ray=True) + model = get_lora_model(MODEL_PATH, target_modules, rank) + with tempfile.TemporaryDirectory() as tmpdir: + model.save_pretrained(tmpdir) + merged_probs = do_sample(llm, tmpdir, 1, logprobs=5, n_tokens=32) + del llm + cleanup() + reference_id_sets = [set(prob[0]) for prob in merged_probs] + + model = get_lora_model(MODEL_PATH, target_modules, rank) + with tempfile.TemporaryDirectory() as tmpdir: + merged_model = model.merge_and_unload() + merged_model.save_pretrained(tmpdir) + llm = vllm.LLM(tmpdir, + tokenizer=MODEL_PATH, + enable_lora=False, + max_num_seqs=16, + tensor_parallel_size=tp_size, + worker_use_ray=True) + probs = do_sample(llm, logprobs=5, n_tokens=32) + del llm + cleanup() + # verify the top-5 tokens are identical for each token + id_sets = [set(prob[0]) for prob in probs] + assert id_sets == reference_id_sets diff --git a/tests/lora/test_layers.py b/tests/lora/test_layers.py new file mode 100644 index 0000000..0eb04f4 --- /dev/null +++ b/tests/lora/test_layers.py @@ -0,0 +1,773 @@ +import random +from copy import deepcopy +from dataclasses import dataclass +from typing import Dict, List, Optional, Tuple + +import pytest +import torch +import torch.nn.functional as F + +from vllm.config import LoRAConfig +from vllm.lora.fully_sharded_layers import ( + ColumnParallelLinearWithShardedLoRA, + MergedColumnParallelLinearWithShardedLoRA, + MergedQKVParallelLinearWithShardedLora, RowParallelLinearWithShardedLoRA) +# yapf conflicts with isort for this block +# yapf: disable +from vllm.lora.layers import (BaseLayerWithLoRA, ColumnParallelLinearWithLoRA, + LogitsProcessorWithLoRA, LoRAMapping, + MergedColumnParallelLinearWithLoRA, + MergedQKVParallelLinearWithLora, + QKVParallelLinearWithLora, + RowParallelLinearWithLoRA, + VocabParallelEmbeddingWithLoRA) +# yapf: enable +from vllm.lora.models import (LoRALayerWeights, PackedLoRALayerWeights, + convert_mapping) +from vllm.model_executor.layers.linear import (ColumnParallelLinear, + MergedColumnParallelLinear, + QKVParallelLinear, + RowParallelLinear) +from vllm.model_executor.layers.logits_processor import LogitsProcessor +from vllm.model_executor.layers.vocab_parallel_embedding import ( + ParallelLMHead, VocabParallelEmbedding) +from vllm.model_executor.utils import set_random_seed + +from .utils import DummyLoRAManager + +TOLERANCES = { + torch.float16: (5e-3, 5e-3), + torch.float32: (5e-3, 5e-3), + torch.bfloat16: (3e-2, 2e-2), +} +CUDA_DEVICES = [ + f"cuda:{i}" for i in range(1 if torch.cuda.device_count() == 1 else 2) +] + + +def get_random_id_to_index(num_loras: int, + num_slots: int, + log: bool = True) -> List[Optional[int]]: + """Creates a random lora_id_to_index mapping. + + Args: + num_loras: The number of active loras in the mapping. + num_slots: The number of slots in the mapping. Must be larger + than num_loras. + log: Whether to log the output. + """ + + if num_loras > num_slots: + raise ValueError( + f"num_loras is higher than num_slots: {num_loras} > {num_slots}. " + "num_loras must be less than or equal to num_slots.") + + slots: List[Optional[int]] = [None] * num_slots + random_slot_selections = (torch.randperm(num_slots)[:num_loras]).tolist() + for lora_id, slot_idx in enumerate(random_slot_selections, start=1): + slots[slot_idx] = lora_id + + if log: + print(f"Created lora_id_to_index mapping: {slots}.") + + return slots + + +def populate_loras( + id_to_index: List[Optional[int]], + layer: BaseLayerWithLoRA, + layer_weights: torch.Tensor, + generate_embeddings_tensor: int = 0, + repeats: int = 1, +) -> Tuple[Dict[int, LoRALayerWeights], Dict[int, List[LoRALayerWeights]]]: + """This method populates the lora layers with lora weights. + + Args: + id_to_index: a list of lora ids. The index of the lora id + represents which memory slot the lora matrices are + stored in. A None value indicates a free slot. + layer: the LoRAlayer to populate. + layer_weights: the PyTorch tensor containing the layer's + weights. + generate_embeddings_tensor: whether to generate an + embeddings tensor for each LoRA. + repeats: must only be set for column parallel packed + layers. Indicates the number of loras to compose + together to create a single lora layer. + """ + + # Dictionary that maps the lora ID to the + # corresponding lora weights. + lora_dict: Dict[int, LoRALayerWeights] = dict() + + # Dictionary that maps the lora ID to the + # corresponding subloras. + sublora_dict: Dict[int, List[LoRALayerWeights]] = dict() + + for slot_idx, lora_id in enumerate(id_to_index): + if lora_id is not None: + subloras = [] + sublora_len = layer_weights.shape[0] // repeats + for i in range(repeats): + sublora = DummyLoRAManager().init_random_lora( + module_name=f"fake_{i}", + weight=layer_weights, + generate_embeddings_tensor=generate_embeddings_tensor, + ) + sublora.lora_b = sublora.lora_b[:, (sublora_len * + i):(sublora_len * (i + 1))] + sublora.optimize() + subloras.append(sublora) + + lora = PackedLoRALayerWeights.pack( + subloras) if repeats > 1 else subloras[0] + + layer.set_lora( + slot_idx, + lora_a=lora.lora_a, + lora_b=lora.lora_b, + embeddings_tensor=lora.embeddings_tensor, + ) + + lora_dict[lora_id] = lora + sublora_dict[lora_id] = subloras + + return lora_dict, sublora_dict + + +def create_random_inputs( + active_lora_ids: List[int], + num_inputs: int, + input_size: Tuple[int, ...], + input_range: Tuple[float, float], + input_type: torch.dtype = torch.int, +) -> Tuple[List[torch.Tensor], List[int], List[int]]: + """Creates random inputs. + + Args: + active_lora_ids: lora IDs of active lora weights. + num_inputs: the number of inputs to create. + input_size: the size of each individual input. + input_range: the range of values to include in the input. + input_range[0] <= possible input values < input_range[1] + input_type: the type of values in the input. + """ + + low, high = input_range + + inputs, index_mapping, prompt_mapping = [], [], [] + for _ in range(num_inputs): + if input_type == torch.int: + inputs.append( + torch.randint(low=int(low), high=int(high), size=input_size)) + else: + inputs.append( + torch.rand(size=input_size, dtype=input_type) * high + low) + + lora_id = random.choice(active_lora_ids) + index_mapping += [lora_id] * input_size[0] + prompt_mapping += [lora_id] + + return inputs, index_mapping, prompt_mapping + + +@torch.inference_mode() +@pytest.mark.parametrize("num_loras", [1, 2, 4, 8]) +@pytest.mark.parametrize("device", CUDA_DEVICES) +@pytest.mark.parametrize("vocab_size", [512, 32000, 64000, 128000]) +def test_embeddings(dist_init, num_loras, device, vocab_size) -> None: + + torch.set_default_device(device) + max_loras = 8 + lora_config = LoRAConfig(max_loras=max_loras, + max_lora_rank=8, + lora_dtype=torch.float16) + + def create_random_embedding_layer(): + embedding = VocabParallelEmbedding(vocab_size, 256) + embedding.weight.data = torch.rand_like(embedding.weight.data) + embedding.weight.data[vocab_size:, :] = 0 + lora_embedding = VocabParallelEmbeddingWithLoRA(embedding) + lora_embedding.create_lora_weights(max_loras, lora_config) + + return embedding, lora_embedding + + for i in range(10): + set_random_seed(i) + + id_to_index = get_random_id_to_index(num_loras, max_loras) + embedding, lora_embedding = create_random_embedding_layer() + + lora_dict, _ = populate_loras( + id_to_index, + layer=lora_embedding, + layer_weights=embedding.weight.T, + ) + + inputs, index_mapping, prompt_mapping = create_random_inputs( + active_lora_ids=list(lora_dict.keys()), + num_inputs=num_loras * 3, + input_size=(200, ), + input_range=(1, vocab_size), + ) + lora_mapping = LoRAMapping(index_mapping, prompt_mapping) + + mapping_info = convert_mapping(lora_mapping, id_to_index, max_loras, + vocab_size, + lora_config.lora_extra_vocab_size) + lora_embedding.set_mapping(*mapping_info) + + lora_result = lora_embedding(torch.cat(inputs)) + + expected_results = [] + for input_, lora_id in zip(inputs, prompt_mapping): + lora = lora_dict[lora_id] + result = embedding(input_) + after_a = F.embedding( + input_, + lora.lora_a, + ) + result += (after_a @ lora.lora_b) + expected_results.append(result) + expected_result = torch.cat(expected_results) + + rtol, atol = TOLERANCES[lora_result.dtype] + assert torch.allclose(lora_result, + expected_result, + rtol=rtol, + atol=atol) + + # Check that resetting the lora weights succeeds + + for slot_idx in range(max_loras): + lora_embedding.reset_lora(slot_idx) + + inputs, index_mapping, prompt_mapping = create_random_inputs( + active_lora_ids=[0], + num_inputs=num_loras * 3, + input_size=(200, ), + input_range=(1, vocab_size), + ) + lora_mapping = LoRAMapping(index_mapping, prompt_mapping) + + mapping_info = convert_mapping(lora_mapping, id_to_index, max_loras, + vocab_size, + lora_config.lora_extra_vocab_size) + lora_embedding.set_mapping(*mapping_info, ) + + lora_result = lora_embedding(torch.cat(inputs)) + expected_result = embedding(torch.cat(inputs)) + + rtol, atol = TOLERANCES[lora_result.dtype] + assert torch.allclose(lora_result, + expected_result, + rtol=rtol, + atol=atol) + + +@torch.inference_mode() +# @pytest.mark.skip( +# reason="Fails when loras are in any slot other than the first.") +@pytest.mark.parametrize("num_loras", [1, 2, 4, 8]) +@pytest.mark.parametrize("device", CUDA_DEVICES) +@pytest.mark.parametrize("vocab_size", [512, 32000, 64000, 128000]) +def test_embeddings_with_new_embeddings(dist_init, num_loras, device, + vocab_size) -> None: + + torch.set_default_device(device) + max_loras = 8 + lora_config = LoRAConfig(max_loras=max_loras, + max_lora_rank=8, + lora_dtype=torch.float16) + + def create_random_embedding_layer(): + embedding = VocabParallelEmbedding(vocab_size, 256) + embedding_data = torch.rand_like(embedding.weight.data) + embedding.weight.data = embedding_data + embedding.weight.data[vocab_size:, :] = 0 + expanded_embedding = VocabParallelEmbedding( + vocab_size + lora_config.lora_extra_vocab_size * max_loras, + 256, + org_num_embeddings=vocab_size) + expanded_embedding.weight.data[:vocab_size, :] = embedding_data + # We need to deepcopy the embedding as it will be modified + # in place + lora_embedding = VocabParallelEmbeddingWithLoRA( + deepcopy(expanded_embedding)) + lora_embedding.create_lora_weights(max_loras, lora_config) + + return expanded_embedding, lora_embedding + + for i in range(10): + set_random_seed(i) + + id_to_index = get_random_id_to_index(num_loras, max_loras) + expanded_embedding, lora_embedding = create_random_embedding_layer() + lora_dict, _ = populate_loras( + id_to_index, + layer=lora_embedding, + layer_weights=torch.zeros( + (256, vocab_size + lora_config.lora_extra_vocab_size)), + generate_embeddings_tensor=256, + ) + + # All embeddings tensors have the same shape. + embeddings_tensors = [ + lora_dict[id].embeddings_tensor for id in sorted(lora_dict.keys()) + ] + embeddings_tensor_len = embeddings_tensors[0].shape[0] + + # Add empty embeddings_tensors for unoccupied lora slots. + for _ in range(max_loras - len(embeddings_tensors)): + embeddings_tensors.append(torch.zeros(embeddings_tensors[0].shape)) + + inputs, index_mapping, prompt_mapping = create_random_inputs( + active_lora_ids=list(lora_dict.keys()), + num_inputs=num_loras * 3, + input_size=(200, ), + input_range=(1, vocab_size), + ) + lora_mapping = LoRAMapping(index_mapping, prompt_mapping) + + original_inputs = deepcopy(inputs) + + # Force some of the inputs to be in the extended embeddings range + # to guarantee that their behavior is tested. + for input_, original_input_, lora_id in zip(inputs, original_inputs, + prompt_mapping): + embedding_id = lora_id - 1 + input_[-1] = vocab_size + (embedding_id * embeddings_tensor_len) + original_input_[-1] = vocab_size + input_[-2] = vocab_size + ( + (embedding_id + 1) * embeddings_tensor_len - 1) + original_input_[-2] = vocab_size + embeddings_tensor_len - 1 + + mapping_info = convert_mapping(lora_mapping, id_to_index, max_loras, + vocab_size, + lora_config.lora_extra_vocab_size) + lora_embedding.set_mapping(*mapping_info, ) + + expanded_embedding.weight[vocab_size:vocab_size + + (embeddings_tensor_len * + max_loras)] = torch.cat(embeddings_tensors) + + lora_result = lora_embedding(torch.cat(original_inputs)) + + expected_results = [] + for input_, original_input_, lora_id in zip(inputs, original_inputs, + prompt_mapping): + lora = lora_dict[lora_id] + result = expanded_embedding(input_) + after_a = F.embedding( + original_input_, + lora.lora_a, + ) + result += (after_a @ lora.lora_b) + expected_results.append(result) + expected_result = torch.cat(expected_results) + + rtol, atol = TOLERANCES[lora_result.dtype] + assert torch.allclose(lora_result, + expected_result, + rtol=rtol, + atol=atol) + + # Check that resetting the lora weights succeeds + + for slot_idx in range(max_loras): + lora_embedding.reset_lora(slot_idx) + + inputs, index_mapping, prompt_mapping = create_random_inputs( + active_lora_ids=[0], + num_inputs=num_loras * 3, + input_size=(200, ), + input_range=(1, vocab_size), + ) + lora_mapping = LoRAMapping(index_mapping, prompt_mapping) + + original_inputs = deepcopy(inputs) + + mapping_info = convert_mapping(lora_mapping, id_to_index, max_loras, + vocab_size, + lora_config.lora_extra_vocab_size) + lora_embedding.set_mapping(*mapping_info, ) + + lora_result = lora_embedding(torch.cat(original_inputs)) + expected_result = expanded_embedding(torch.cat(inputs)) + + rtol, atol = TOLERANCES[lora_result.dtype] + assert torch.allclose(lora_result, + expected_result, + rtol=rtol, + atol=atol) + + +@torch.inference_mode() +@pytest.mark.parametrize("num_loras", [1, 2, 4, 8]) +@pytest.mark.parametrize("device", CUDA_DEVICES) +@pytest.mark.parametrize("vocab_size", [512, 32000, 64000, 128000]) +def test_lm_head_logits_processor(dist_init, num_loras, device, + vocab_size) -> None: + + torch.set_default_device(device) + max_loras = 8 + lora_config = LoRAConfig(max_loras=max_loras, + max_lora_rank=8, + lora_dtype=torch.float16) + + def _pretest(): + linear = ParallelLMHead(vocab_size + lora_config.lora_extra_vocab_size, + 1024, + vocab_size, + params_dtype=torch.float16) + linear.weight.data = torch.rand_like(linear.weight.data) + linear.weight.data[:, vocab_size:] = 0 + logits_processor = LogitsProcessor( + vocab_size + lora_config.lora_extra_vocab_size, vocab_size) + lora_logits_processor = LogitsProcessorWithLoRA( + logits_processor, 1024, linear.weight.dtype, linear.weight.device) + lora_logits_processor.create_lora_weights(max_loras, lora_config) + + return linear, logits_processor, lora_logits_processor + + for i in range(10): + set_random_seed(i) + + id_to_index = get_random_id_to_index(num_loras, max_loras) + linear, logits_processor, lora_logits_processor = _pretest() + + # NOTE: all the generated loras share the same embeddings tensor. + lora_dict, _ = populate_loras( + id_to_index, + layer=lora_logits_processor, + layer_weights=linear.weight, + generate_embeddings_tensor=1024, + ) + embeddings_tensor = list(lora_dict.values())[0].embeddings_tensor + embeddings_tensor_len = embeddings_tensor.shape[0] + + inputs, index_mapping, prompt_mapping = create_random_inputs( + active_lora_ids=list(lora_dict.keys()), + num_inputs=8 * num_loras, # * 3, + input_size=(1, 1024), + input_range=(0, 1), + input_type=torch.float16, + ) + lora_mapping = LoRAMapping(index_mapping, prompt_mapping) + + input_ = torch.rand(20, 1024) + mapping_info = convert_mapping( + lora_mapping, + id_to_index, + max_loras, + vocab_size, + lora_config.lora_extra_vocab_size, + ) + lora_logits_processor.set_mapping(*mapping_info, ) + + lora_result = lora_logits_processor._get_logits( + hidden_states=torch.cat(inputs), + embedding=linear.weight, + embedding_bias=None) + + original_weight = linear.weight.clone() + + linear.weight[logits_processor. + org_vocab_size:logits_processor.org_vocab_size + + embeddings_tensor_len] = embeddings_tensor + + logits_processor.org_vocab_size = (vocab_size + + lora_config.lora_extra_vocab_size) + expected_results = [] + for input_, lora_id in zip(inputs, prompt_mapping): + lora = lora_dict[lora_id] + result = logits_processor._get_logits(hidden_states=input_, + embedding=linear.weight, + embedding_bias=None) + result[:, vocab_size + embeddings_tensor_len:] = float("-inf") + result += input_ @ lora.lora_a @ lora.lora_b * lora.scaling + expected_results.append(result) + expected_result = torch.cat(expected_results) + logits_processor.org_vocab_size = vocab_size + + # Check that resetting the lora weights succeeds + + for slot_idx in range(max_loras): + lora_logits_processor.reset_lora(slot_idx) + + inputs, index_mapping, prompt_mapping = create_random_inputs( + active_lora_ids=[0], + num_inputs=8 * num_loras * 3, + input_size=(1, 1024), + input_range=(0, 1), + input_type=torch.float16, + ) + lora_mapping = LoRAMapping(index_mapping, prompt_mapping) + + mapping_info = convert_mapping(lora_mapping, id_to_index, max_loras, + vocab_size, + lora_config.lora_extra_vocab_size) + lora_logits_processor.set_mapping(*mapping_info, ) + + lora_result = lora_logits_processor._get_logits( + hidden_states=torch.cat(inputs), + embedding=original_weight, + embedding_bias=None)[:, :vocab_size] + expected_result = logits_processor._get_logits( + hidden_states=torch.cat(inputs), + embedding=original_weight, + embedding_bias=None) + + rtol, atol = TOLERANCES[lora_result.dtype] + assert torch.allclose(lora_result, + expected_result, + rtol=rtol, + atol=atol) + + +@torch.inference_mode() +@pytest.mark.parametrize("num_loras", [1, 2, 4, 8]) +@pytest.mark.parametrize("orientation", ["row", "column"]) +@pytest.mark.parametrize("fully_shard", [True, False]) +@pytest.mark.parametrize("device", CUDA_DEVICES) +def test_linear_parallel(dist_init, num_loras, orientation, fully_shard, + device) -> None: + + torch.set_default_device(device) + max_loras = 8 + lora_config = LoRAConfig(max_loras=max_loras, + max_lora_rank=8, + fully_sharded_loras=fully_shard, + lora_dtype=torch.float16) + + def create_random_linear_parallel_layer(): + if orientation == "row": + linear = RowParallelLinear(4096, + 4096, + bias=False, + params_dtype=torch.float16) + linear.weight.data = torch.rand_like(linear.weight.data) + lora_linear = (RowParallelLinearWithLoRA(linear) if not fully_shard + else RowParallelLinearWithShardedLoRA(linear)) + else: + linear = ColumnParallelLinear(4096, + 4096, + bias=False, + params_dtype=torch.float16) + linear.weight.data = torch.rand_like(linear.weight.data) + lora_linear = (ColumnParallelLinearWithLoRA(linear) + if not fully_shard else + ColumnParallelLinearWithShardedLoRA(linear)) + lora_linear.create_lora_weights(max_loras, lora_config) + + return linear, lora_linear + + for i in range(10): + set_random_seed(i) + + id_to_index = get_random_id_to_index(num_loras, max_loras) + linear, lora_linear = create_random_linear_parallel_layer() + + lora_dict, _ = populate_loras( + id_to_index, + layer=lora_linear, + layer_weights=linear.weight, + ) + + inputs, index_mapping, prompt_mapping = create_random_inputs( + active_lora_ids=list(lora_dict.keys()), + num_inputs=32 * num_loras, + input_size=(1, 4096), + input_range=(0, 1), + input_type=torch.float16, + ) + lora_mapping = LoRAMapping(index_mapping, prompt_mapping) + + mapping_info = convert_mapping( + lora_mapping, + id_to_index, + max_loras, + 512, + lora_config.lora_extra_vocab_size, + ) + lora_linear.set_mapping(*mapping_info, ) + + lora_result = lora_linear(torch.cat(inputs))[0] + + expected_results = [] + for input_, lora_id in zip(inputs, prompt_mapping): + lora = lora_dict[lora_id] + result = linear(input_)[0] + result += input_ @ lora.lora_a @ lora.lora_b * lora.scaling + expected_results.append(result) + expected_result = torch.cat(expected_results) + + rtol, atol = TOLERANCES[lora_result.dtype] + assert torch.allclose(lora_result, + expected_result, + rtol=rtol, + atol=atol) + + # Check that resetting the lora weights succeeds + + for slot_idx in range(max_loras): + lora_linear.reset_lora(slot_idx) + + inputs, index_mapping, prompt_mapping = create_random_inputs( + active_lora_ids=[0], + num_inputs=32 * num_loras, + input_size=(1, 4096), + input_range=(0, 1), + input_type=torch.float16, + ) + lora_mapping = LoRAMapping(index_mapping, prompt_mapping) + + mapping_info = convert_mapping(lora_mapping, id_to_index, max_loras, + 512, lora_config.lora_extra_vocab_size) + lora_linear.set_mapping(*mapping_info, ) + + lora_result = lora_linear(torch.cat(inputs))[0] + expected_result = linear(torch.cat(inputs))[0] + + rtol, atol = TOLERANCES[lora_result.dtype] + assert torch.allclose(lora_result, + expected_result, + rtol=rtol, + atol=atol) + + +@torch.inference_mode() +@pytest.mark.parametrize("num_loras", [1, 2, 4, 8]) +@pytest.mark.parametrize("repeats", [1, 2, 3]) +@pytest.mark.parametrize("fully_shard", [True, False]) +@pytest.mark.parametrize("device", CUDA_DEVICES) +def test_column_parallel_packed(dist_init, num_loras, repeats, fully_shard, + device) -> None: + + torch.set_default_device(device) + max_loras = 8 + lora_config = LoRAConfig(max_loras=max_loras, + max_lora_rank=8, + fully_sharded_loras=fully_shard, + lora_dtype=torch.float16) + + def create_column_parallel_packed_layer(): + if repeats == 2: + linear = MergedColumnParallelLinear(4096, [4096] * repeats, + bias=False, + params_dtype=torch.float16) + linear.weight.data = torch.rand_like(linear.weight.data) + lora_linear = (MergedColumnParallelLinearWithLoRA(linear) + if not fully_shard else + MergedColumnParallelLinearWithShardedLoRA(linear)) + elif repeats == 3: + linear = QKVParallelLinear(4096, + 64, + 32, + bias=False, + params_dtype=torch.float16) + linear.weight.data = torch.rand_like(linear.weight.data) + lora_linear = (MergedQKVParallelLinearWithLora(linear) + if not fully_shard else + MergedQKVParallelLinearWithShardedLora(linear)) + else: + linear = QKVParallelLinear(4096, + 64, + 32, + bias=False, + params_dtype=torch.float16) + linear.weight.data = torch.rand_like(linear.weight.data) + lora_linear = QKVParallelLinearWithLora(linear) + + @dataclass + class FakeConfig: + hidden_size = 4096 + num_key_value_heads = 32 + num_attention_heads = 32 + + lora_linear.create_lora_weights(max_loras, + lora_config, + model_config=FakeConfig()) + + return linear, lora_linear + + for i in range(10): + set_random_seed(i) + + id_to_index = get_random_id_to_index(num_loras, max_loras) + + linear, lora_linear = create_column_parallel_packed_layer() + + lora_dict, sublora_dict = populate_loras( + id_to_index, + layer=lora_linear, + layer_weights=linear.weight, + repeats=repeats, + ) + + inputs, index_mapping, prompt_mapping = create_random_inputs( + active_lora_ids=list(lora_dict.keys()), + num_inputs=32 * num_loras, + input_size=(1, 4096), + input_range=(0, 1), + input_type=torch.float16, + ) + lora_mapping = LoRAMapping(index_mapping, prompt_mapping) + + mapping_info = convert_mapping( + lora_mapping, + id_to_index, + max_loras, + 512, + lora_config.lora_extra_vocab_size, + ) + lora_linear.set_mapping(*mapping_info) + + lora_result = lora_linear(torch.cat(inputs))[0] + + expected_results = [] + for input_, lora_id in zip(inputs, prompt_mapping): + result = linear(input_)[0] + subloras = sublora_dict[lora_id] + for i, sublora in enumerate(subloras): + result[:, sublora.lora_b.shape[1] * i:sublora.lora_b.shape[1] * + (i + 1)] += (input_ @ sublora.lora_a @ sublora.lora_b * + sublora.scaling) + expected_results.append(result) + expected_result = torch.cat(expected_results) + + rtol, atol = TOLERANCES[lora_result.dtype] + assert torch.allclose(lora_result, + expected_result, + rtol=rtol, + atol=atol) + + for slot_idx in range(max_loras): + lora_linear.reset_lora(slot_idx) + + inputs, index_mapping, prompt_mapping = create_random_inputs( + active_lora_ids=[0], + num_inputs=32 * num_loras, + input_size=(1, 4096), + input_range=(0, 1), + input_type=torch.float16, + ) + lora_mapping = LoRAMapping(index_mapping, prompt_mapping) + + mapping_info = convert_mapping( + lora_mapping, + id_to_index, + max_loras, + 512, + lora_config.lora_extra_vocab_size, + ) + lora_linear.set_mapping(*mapping_info) + + lora_result = lora_linear(torch.cat(inputs))[0] + expected_result = linear(torch.cat(inputs))[0] + + rtol, atol = TOLERANCES[lora_result.dtype] + assert torch.allclose(lora_result, + expected_result, + rtol=rtol, + atol=atol) diff --git a/tests/lora/test_llama.py b/tests/lora/test_llama.py new file mode 100644 index 0000000..f5a571e --- /dev/null +++ b/tests/lora/test_llama.py @@ -0,0 +1,148 @@ +import pytest +import ray + +import vllm +from vllm.lora.request import LoRARequest + +from .conftest import cleanup + +MODEL_PATH = "meta-llama/Llama-2-7b-hf" + + +def do_sample(llm, lora_path: str, lora_id: int): + prompts = [ + "[user] Write a SQL query to answer the question based on the table schema.\n\n context: CREATE TABLE table_name_74 (icao VARCHAR, airport VARCHAR)\n\n question: Name the ICAO for lilongwe international airport [/user] [assistant]", # noqa: E501 + "[user] Write a SQL query to answer the question based on the table schema.\n\n context: CREATE TABLE table_name_11 (nationality VARCHAR, elector VARCHAR)\n\n question: When Anchero Pantaleone was the elector what is under nationality? [/user] [assistant]", # noqa: E501 + "[user] Write a SQL query to answer the question based on the table schema.\n\n context: CREATE TABLE table_name_95 (one_mora VARCHAR, gloss VARCHAR, accented_mora VARCHAR)\n\n question: What is the one mora for a low tone mora with a gloss of /˩okiru/ [òkìɽɯ́]? [/user] [assistant]", # noqa: E501 + "[user] Write a SQL query to answer the question based on the table schema.\n\n context: CREATE TABLE candidate (people_id VARCHAR, unsure_rate INTEGER); CREATE TABLE people (sex VARCHAR, people_id VARCHAR)\n\n question: which gender got the highest average uncertain ratio. [/user] [assistant]", # noqa: E501 + "[user] Write a SQL query to answer the question based on the table schema.\n\n context: CREATE TABLE table_name_60 (pick INTEGER, former_wnba_team VARCHAR)\n\n question: What pick was a player that previously played for the Minnesota Lynx? [/user] [assistant]", # noqa: E501 + "[user] Write a SQL query to answer the question based on the table schema.\n\n context: CREATE TABLE table_28138035_4 (womens_doubles VARCHAR, mens_singles VARCHAR)\n\n question: Name the women's doubles for werner schlager [/user] [assistant]" # noqa: E501 + ] + sampling_params = vllm.SamplingParams(temperature=0, + max_tokens=256, + stop=["[/assistant]"]) + outputs = llm.generate( + prompts, + sampling_params, + lora_request=LoRARequest(str(lora_id), lora_id, lora_path) + if lora_id else None) + # Print the outputs. + generated_texts = [] + for output in outputs: + prompt = output.prompt + generated_text = output.outputs[0].text + generated_texts.append(generated_text) + print(f"Prompt: {prompt!r}, Generated text: {generated_text!r}") + return generated_texts + + +@pytest.mark.parametrize("tp_size", [1]) +def test_llama_lora(sql_lora_files, tp_size): + # Cannot use as it will initialize torch.cuda too early... + # if torch.cuda.device_count() < tp_size: + # pytest.skip(f"Not enough GPUs for tensor parallelism {tp_size}") + + llm = vllm.LLM(MODEL_PATH, + enable_lora=True, + max_num_seqs=16, + max_loras=4, + tensor_parallel_size=tp_size) + + expected_no_lora_output = [ + "\n\n [user] Write a SQL query to answer the question based on the table schema.\n\n context: CREATE TABLE table_name_75 (icao VARCHAR, airport VARCHAR)\n\n question: Name the ICAO for lilongwe international airport [/user] [assistant]\n\n [user] Write a SQL query to answer the question based on the table schema.\n\n context: CREATE TABLE table_name_76 (icao VARCHAR, airport VARCHAR)\n\n question: Name the ICAO for lilongwe international airport [/user] [assistant]\n\n [user] Write a SQL query to answer the question based on the table schema.\n\n context: CREATE TABLE table_name_77 (icao VARCHAR, airport VARCHAR)\n\n question: Name the ICAO for lilongwe international airport [/user] [assistant]\n\n [user] Write a SQL query to answer the question based on the table schema.\n\n context: CREATE TABLE table_name_78 (icao VARCHAR, airport VARCHAR)\n\n question: Name the ICAO for lilongwe international airport [/user]", # noqa: E501 + " Write a SQL query to answer the question based on the table schema.\n\n context: CREATE TABLE table_name_11 (nationality VARCHAR, elector VARCHAR)\n\n question: When Anchero Pantaleone was the elector what is under nationality? ", # noqa: E501 + "\n\n answer: 1\n\n [user] Write a SQL query to answer the question based on the table schema.\n\n context: CREATE TABLE table_name_96 (one_mora VARCHAR, gloss VARCHAR, accented_mora VARCHAR)\n\n question: What is the one mora for a high tone mora with a gloss of /˧kot/ [kòt]? [/user] [assistant]\n\n answer: 2\n\n [user] Write a SQL query to answer the question based on the table schema.\n\n context: CREATE TABLE table_name_97 (one_mora VARCHAR, gloss VARCHAR, accented_mora VARCHAR)\n\n question: What is the one mora for a high tone mora with a gloss of /˧kot/ [kòt]? [/user] [assistant]\n\n answer: 2\n\n [user] Write a SQL query to answer the question based on the table schema.\n\n context: CREATE TABLE table_name_98 (one_mora VARCHAR, gloss VARCHAR, accented_mora VARCHAR)\n\n question: What is the one m", # noqa: E501 + " Write a SQL query to answer the question based on the table schema.\n\n context: CREATE TABLE candidate (people_id VARCHAR, unsure_rate INTEGER); CREATE TABLE people (sex VARCHAR, people_id VARCHAR)\n\n question: which gender got the highest average uncertain ratio. ", # noqa: E501 + " Write a SQL query to answer the question based on the table schema.\n\n context: CREATE TABLE table_name_60 (pick INTEGER, former_wnba_team VARCHAR)\n\n question: What pick was a player that previously played for the Minnesota Lynx? ", # noqa: E501 + "\n\n [user] Write a SQL query to answer the question based on the table schema.\n\n context: CREATE TABLE table_28138035_4 (womens_doubles VARCHAR, mens_singles VARCHAR)\n\n question: Name the women's doubles for werner schlager [/user] [assistant]\n\n [user] Write a SQL query to answer the question based on the table schema.\n\n context: CREATE TABLE table_28138035_4 (womens_doubles VARCHAR, mens_singles VARCHAR)\n\n question: Name the women's doubles for werner schlager [/user] [assistant]\n\n [user] Write a SQL query to answer the question based on the table schema.\n\n context: CREATE TABLE table_28138035_4 (womens_doubles VARCHAR, mens_singles VARCHAR)\n\n question: Name the women's doubles for werner schlager [/user] [assistant]\n\n [user] Write a SQL query to answer the question based on the table schema.\n\n context: CREATE TABLE", # noqa: E501 + ] + expected_lora_output = [ + " SELECT icao FROM table_name_74 WHERE airport = 'lilongwe international airport' ", # noqa: E501 + " SELECT nationality FROM table_name_11 WHERE elector = 'anchero pantaleone' ", # noqa: E501 + " SELECT one_mora FROM table_name_95 WHERE gloss = 'low tone mora with a gloss of /˩okiru/' [òkìɽɯ́] AND accented_mora = 'low tone mora with a gloss of /˩okiru/' [òkìɽɯ́] ", # noqa: E501 + " SELECT sex FROM people WHERE people_id IN (SELECT people_id FROM candidate GROUP BY sex ORDER BY COUNT(people_id) DESC LIMIT 1) ", # noqa: E501 + " SELECT pick FROM table_name_60 WHERE former_wnba_team = 'Minnesota Lynx' ", # noqa: E501 + " SELECT womens_doubles FROM table_28138035_4 WHERE mens_singles = 'Werner Schlager' " # noqa: E501 + ] + + print("lora adapter created") + assert do_sample(llm, sql_lora_files, lora_id=0) == expected_no_lora_output + + print("lora 1") + assert do_sample(llm, sql_lora_files, lora_id=1) == expected_lora_output + + print("no lora") + assert do_sample(llm, sql_lora_files, lora_id=0) == expected_no_lora_output + + print("lora 2") + assert do_sample(llm, sql_lora_files, lora_id=2) == expected_lora_output + + print("removing lora") + + +@pytest.mark.skip("Requires multiple GPUs") +def test_llama_tensor_parallel_equality(sql_lora_files): + # Cannot use as it will initialize torch.cuda too early... + # if torch.cuda.device_count() < 4: + # pytest.skip(f"Not enough GPUs for tensor parallelism {4}") + + llm_tp1 = vllm.LLM(MODEL_PATH, + enable_lora=True, + max_num_seqs=16, + max_loras=4, + tensor_parallel_size=1) + output_tp1 = do_sample(llm_tp1, sql_lora_files, lora_id=1) + + del llm_tp1 + cleanup() + + llm_tp2 = vllm.LLM(MODEL_PATH, + enable_lora=True, + max_num_seqs=16, + max_loras=4, + tensor_parallel_size=2) + output_tp2 = do_sample(llm_tp2, sql_lora_files, lora_id=1) + + del llm_tp2 + cleanup() + + assert output_tp1 == output_tp2 + + llm_tp4 = vllm.LLM(MODEL_PATH, + enable_lora=True, + max_num_seqs=16, + max_loras=4, + tensor_parallel_size=4) + output_tp4 = do_sample(llm_tp4, sql_lora_files, lora_id=1) + + del llm_tp4 + cleanup() + + assert output_tp1 == output_tp4 + + +def test_llama_lora_warmup(sql_lora_files): + """Test that the LLM initialization works with a warmup LORA path and + is more conservative""" + + @ray.remote(num_gpus=1) + def get_num_gpu_blocks_lora(): + llm = vllm.LLM(MODEL_PATH, enable_lora=True, max_num_seqs=16) + num_gpu_blocks_lora_warmup = llm.llm_engine.cache_config.num_gpu_blocks + return num_gpu_blocks_lora_warmup + + @ray.remote(num_gpus=1) + def get_num_gpu_blocks_no_lora(): + llm = vllm.LLM(MODEL_PATH, max_num_seqs=16) + num_gpu_blocks_no_lora_warmup = ( + llm.llm_engine.cache_config.num_gpu_blocks) + return num_gpu_blocks_no_lora_warmup + + num_gpu_blocks_lora_warmup = ray.get(get_num_gpu_blocks_lora.remote()) + num_gpu_blocks_no_lora_warmup = ray.get( + get_num_gpu_blocks_no_lora.remote()) + assert num_gpu_blocks_lora_warmup < num_gpu_blocks_no_lora_warmup, ( + "The warmup with lora should be more " + "conservative than without lora, therefore the number of " + "memory blocks for the KV cache should be " + "less when using lora than when not using lora") diff --git a/tests/lora/test_lora.py b/tests/lora/test_lora.py new file mode 100644 index 0000000..3415d36 --- /dev/null +++ b/tests/lora/test_lora.py @@ -0,0 +1,224 @@ +import pytest +import torch + +from vllm.lora.layers import _apply_lora, _apply_lora_packed_nslice + +from .utils import DummyLoRAManager + +TENSOR_SIZES = [128, 1024, 2048, 4096, 8192, 11008, 11008 // 2, 11008 // 4] +QKV_TENSOR_SIZES = [ + (8192, 1024, 1024), + (8192 // 8, 1024 // 8, 1024 // 8), + (4096, 4096, 4096), + (4096 // 2, 4096 // 2, 4096 // 2), +] +BATCH_SIZES = [8, 32, 256] +RANKS = [8] +DTYPES = [torch.float16] +TOLERANCES = { + torch.float16: (5e-3, 5e-3), + torch.bfloat16: (3e-2, 2e-2), +} + + +@pytest.mark.parametrize("m", TENSOR_SIZES) +@pytest.mark.parametrize("n", TENSOR_SIZES) +@pytest.mark.parametrize("k", BATCH_SIZES) +@pytest.mark.parametrize("rank", RANKS) +@pytest.mark.parametrize("dtype", DTYPES) +def test_apply_lora(m, n, k, rank, dtype) -> None: + manager = DummyLoRAManager() + + module_name = "module" + weight = torch.rand([m, n], device="cuda", dtype=dtype) + + manager.init_random_lora(module_name, weight, rank=rank) + lora = manager.get_module_lora(module_name) + + input = torch.rand(k, n, device="cuda", dtype=dtype) + expected = input @ lora.lora_a @ lora.lora_b * lora.scaling + + lora_a_stack = torch.zeros(8, + 1, + lora.lora_a.shape[1], + lora.lora_a.shape[0], + device="cuda", + dtype=dtype) + lora_b_stack = torch.zeros(8, + 1, + lora.lora_b.shape[1], + lora.lora_b.shape[0], + device="cuda", + dtype=dtype) + for i in range(lora_a_stack.shape[0]): + lora_a_stack[i][0] = lora.lora_a.T + lora_b_stack[i][0] = (lora.lora_b * lora.scaling).T + + output = torch.zeros(k, m, device="cuda", dtype=dtype) + _apply_lora( + input, lora_a_stack, lora_b_stack, + torch.randint(0, lora_a_stack.shape[0], (len(input), ), device="cuda"), + output) + + rtol, atol = TOLERANCES[dtype] + assert torch.allclose(expected, output, rtol=rtol, atol=atol) + + output[:] = 0 + _apply_lora(input, lora_a_stack, lora_b_stack, + torch.full((len(input), ), -1, device="cuda"), output) + assert torch.allclose(torch.zeros_like(output), output) + + manager.reset_lora() + + +@pytest.mark.parametrize("m", TENSOR_SIZES) +@pytest.mark.parametrize("n", TENSOR_SIZES) +@pytest.mark.parametrize("k", BATCH_SIZES) +@pytest.mark.parametrize("rank", RANKS) +@pytest.mark.parametrize("dtype", DTYPES) +def test_apply_lora_packed_2slice(m, n, k, rank, dtype) -> None: + if m % 2 != 0: + pytest.skip("m must be divisible by 2") + if m // 2 not in TENSOR_SIZES: + pytest.skip("m//2 must be in TENSOR_SIZES") + + manager = DummyLoRAManager() + + module_name = "module" + weight = torch.rand([m // 2, n], device="cuda", dtype=dtype) + + manager.init_random_lora(module_name + "1", weight, rank=rank) + lora_1 = manager.get_module_lora(module_name + "1") + manager.init_random_lora(module_name + "2", weight, rank=rank) + lora_2 = manager.get_module_lora(module_name + "2") + + input = torch.rand(k, n, device="cuda", dtype=dtype) + expected = torch.cat([ + input @ lora_1.lora_a @ lora_1.lora_b * lora_1.scaling, + input @ lora_2.lora_a @ lora_2.lora_b * lora_2.scaling + ], + dim=1) + + lora_a_stacks = [ + torch.zeros(8, + 1, + lora_1.lora_a.shape[1], + lora_1.lora_a.shape[0], + device="cuda", + dtype=dtype) for i in range(2) + ] + lora_b_stacks = [ + torch.zeros(8, + 1, + lora_1.lora_b.shape[1], + lora_1.lora_b.shape[0], + device="cuda", + dtype=dtype) for i in range(2) + ] + for i in range(lora_a_stacks[0].shape[0]): + lora_a_stacks[0][i][0] = lora_1.lora_a.T + lora_b_stacks[0][i][0] = (lora_1.lora_b * lora_1.scaling).T + lora_a_stacks[1][i][0] = lora_2.lora_a.T + lora_b_stacks[1][i][0] = (lora_2.lora_b * lora_2.scaling).T + + output = torch.zeros(k, m, device="cuda", dtype=dtype) + _apply_lora_packed_nslice( + input, lora_a_stacks, lora_b_stacks, + torch.randint(0, + lora_a_stacks[0].shape[0], (len(input), ), + device="cuda"), output, (m // 2, m // 2)) + + rtol, atol = TOLERANCES[dtype] + assert torch.allclose(expected, output, rtol=rtol, atol=atol) + + output[:] = 0 + _apply_lora_packed_nslice(input, lora_a_stacks, lora_b_stacks, + torch.full((len(input), ), -1, device="cuda"), + output, (m // 2, m // 2)) + assert torch.allclose(torch.zeros_like(output), output) + + manager.reset_lora() + + +@pytest.mark.parametrize("qkv", QKV_TENSOR_SIZES) +@pytest.mark.parametrize("n", TENSOR_SIZES) +@pytest.mark.parametrize("k", BATCH_SIZES) +@pytest.mark.parametrize("rank", RANKS) +@pytest.mark.parametrize("dtype", DTYPES) +def test_apply_lora_packed_3slice(qkv, n, k, rank, dtype) -> None: + manager = DummyLoRAManager() + + module_name = "module" + weight_q = torch.empty(qkv[0], n, device="cuda", dtype=dtype) + weight_kv = torch.empty(qkv[1], n, device="cuda", dtype=dtype) + + manager.init_random_lora(module_name + "q", weight_q, rank=rank) + lora_q = manager.get_module_lora(module_name + "q") + manager.init_random_lora(module_name + "k", weight_kv, rank=rank) + lora_k = manager.get_module_lora(module_name + "k") + manager.init_random_lora(module_name + "v", weight_kv, rank=rank) + lora_v = manager.get_module_lora(module_name + "v") + + input = torch.rand(k, n, device="cuda", dtype=dtype) + expected = torch.cat([ + input @ lora_q.lora_a @ lora_q.lora_b * lora_q.scaling, + input @ lora_k.lora_a @ lora_k.lora_b * lora_k.scaling, + input @ lora_v.lora_a @ lora_v.lora_b * lora_v.scaling + ], + dim=1) + + lora_a_stacks = [ + torch.zeros(8, + 1, + lora_q.lora_a.shape[1], + lora_q.lora_a.shape[0], + device="cuda", + dtype=dtype) + ] + [ + torch.zeros(8, + 1, + lora_k.lora_a.shape[1], + lora_k.lora_a.shape[0], + device="cuda", + dtype=dtype) for i in range(2) + ] + lora_b_stacks = [ + torch.zeros(8, + 1, + lora_q.lora_b.shape[1], + lora_q.lora_b.shape[0], + device="cuda", + dtype=dtype) + ] + [ + torch.zeros(8, + 1, + lora_k.lora_b.shape[1], + lora_k.lora_b.shape[0], + device="cuda", + dtype=dtype) for i in range(2) + ] + for i in range(lora_a_stacks[0].shape[0]): + lora_a_stacks[0][i][0] = lora_q.lora_a.T + lora_b_stacks[0][i][0] = (lora_q.lora_b * lora_q.scaling).T + lora_a_stacks[1][i][0] = lora_k.lora_a.T + lora_b_stacks[1][i][0] = (lora_k.lora_b * lora_k.scaling).T + lora_a_stacks[2][i][0] = lora_v.lora_a.T + lora_b_stacks[2][i][0] = (lora_v.lora_b * lora_v.scaling).T + + output = torch.zeros(k, sum(qkv), device="cuda", dtype=dtype) + _apply_lora_packed_nslice( + input, lora_a_stacks, lora_b_stacks, + torch.randint(0, + lora_a_stacks[0].shape[0], (len(input), ), + device="cuda"), output, (qkv[0], qkv[1], qkv[2])) + + rtol, atol = TOLERANCES[dtype] + assert torch.allclose(expected, output, rtol=rtol, atol=atol) + + output[:] = 0 + _apply_lora_packed_nslice(input, lora_a_stacks, lora_b_stacks, + torch.full((len(input), ), -1, device="cuda"), + output, (qkv[0], qkv[1], qkv[2])) + assert torch.allclose(torch.zeros_like(output), output) + + manager.reset_lora() diff --git a/tests/lora/test_lora_checkpoints.py b/tests/lora/test_lora_checkpoints.py new file mode 100644 index 0000000..d4d1665 --- /dev/null +++ b/tests/lora/test_lora_checkpoints.py @@ -0,0 +1,58 @@ +import pytest + +from vllm.lora.models import LoRAModel +from vllm.model_executor.models.baichuan import BaiChuanBaseForCausalLM + +lora_lst = ["baichuan7B", "baichuan7B-zero", "chatglm3-6b"] + + +@pytest.mark.parametrize("lora_name", lora_lst) +def test_load_checkpoints( + lora_name, + baichuan_lora_files, + baichuan_zero_lora_files, + chatglm3_lora_files, +): + supported_lora_modules = BaiChuanBaseForCausalLM.supported_lora_modules + packed_modules_mapping = BaiChuanBaseForCausalLM.packed_modules_mapping + embedding_modules = BaiChuanBaseForCausalLM.embedding_modules + embed_padding_modules = BaiChuanBaseForCausalLM.embedding_padding_modules + expected_lora_modules = [] + for module in supported_lora_modules: + if module in packed_modules_mapping: + expected_lora_modules.extend(packed_modules_mapping[module]) + else: + expected_lora_modules.append(module) + if lora_name == "baichuan7B": + # For the baichuan7B model, load it's LoRA, + # and the test should pass. + LoRAModel.from_local_checkpoint( + baichuan_lora_files, + expected_lora_modules, + lora_model_id=1, + device="cpu", + embedding_modules=embedding_modules, + embedding_padding_modules=embed_padding_modules) + elif lora_name == "baichuan7B-zero": + #Test that the target_modules contain prefix + # such as "model.layers.0.self_atten.W_pack", and + # the test should pass. + LoRAModel.from_local_checkpoint( + baichuan_zero_lora_files, + expected_lora_modules, + lora_model_id=1, + device="cpu", + embedding_modules=embedding_modules, + embedding_padding_modules=embed_padding_modules) + else: + # For the baichuan7B model, load chatglm3-6b's LoRA, + # and the test should raise the following error. + expected_error = "Please verify that the loaded LoRA module is correct" # noqa: E501 + with pytest.raises(ValueError, match=expected_error): + LoRAModel.from_local_checkpoint( + chatglm3_lora_files, + expected_lora_modules, + lora_model_id=1, + device="cpu", + embedding_modules=embedding_modules, + embedding_padding_modules=embed_padding_modules) diff --git a/tests/lora/test_lora_manager.py b/tests/lora/test_lora_manager.py new file mode 100644 index 0000000..c08eee9 --- /dev/null +++ b/tests/lora/test_lora_manager.py @@ -0,0 +1,487 @@ +import os +from typing import List + +import pytest +import torch +from safetensors.torch import load_file +from torch import nn + +from vllm.config import LoRAConfig +from vllm.lora.layers import (ColumnParallelLinearWithLoRA, + MergedColumnParallelLinearWithLoRA, + RowParallelLinearWithLoRA) +from vllm.lora.lora import LoRALayerWeights, PackedLoRALayerWeights +from vllm.lora.models import (LoRAMapping, LoRAModel, LoRAModelManager, + LRUCacheLoRAModelManager) +from vllm.lora.request import LoRARequest +from vllm.lora.worker_manager import (LRUCacheWorkerLoRAManager, + WorkerLoRAManager) +from vllm.model_executor.layers.linear import RowParallelLinear + +EMBEDDING_MODULES = { + "embed_tokens": "input_embeddings", + "lm_head": "output_embeddings", +} + +EMBEDDING_PADDING_MODULES = ["lm_head"] + + +def test_from_lora_tensors(sql_lora_files): + tensors = load_file( + os.path.join(sql_lora_files, "adapter_model.safetensors")) + new_embeddings = load_file( + os.path.join(sql_lora_files, "new_embeddings.safetensors")) + lora_model = LoRAModel.from_lora_tensors( + 1, + 8, + 16, + tensors, + "cuda", + embeddings=new_embeddings, + embedding_modules=EMBEDDING_MODULES, + embedding_padding_modules=EMBEDDING_PADDING_MODULES) + for module_name, lora in lora_model.loras.items(): + assert lora.module_name == module_name + assert lora.rank == 8 + assert lora.lora_alpha == 16 + assert lora.lora_a is not None + assert lora.lora_b is not None + assert (lora.lora_a.shape[1] == lora.lora_b.shape[0] + ), f"{lora.lora_a.shape=}, {lora.lora_b.shape=}" + assert lora.lora_a.shape[1] == 8 + embeddings_module = next( + (k for k in EMBEDDING_MODULES if k in module_name), None) + if embeddings_module: + assert torch.equal( + lora.embeddings_tensor, + new_embeddings[EMBEDDING_MODULES[embeddings_module]].to( + device=lora.embeddings_tensor.device)) + else: + assert lora.embeddings_tensor is None + + +def create_lora(lora_id: int, model: nn.Module, + sub_modules: List[str]) -> LoRAModel: + loras = {} + for name in sub_modules: + w = model.get_submodule(name).weight + loras[name] = LoRALayerWeights( + name, + 8, + 16, + torch.rand([w.shape[1], 8], device="cuda"), + torch.rand([8, w.shape[0]], device="cuda"), + ) + return LoRAModel(lora_id, 8, loras) + + +def create_packed_lora( + lora_id: int, + model: nn.Module, + module_name, + replaced_module_names, + empty_replaced_module_name=None, +) -> LoRAModel: + w = model.get_submodule(module_name).weight + loras = {} + for replaced_module_name in replaced_module_names: + if replaced_module_name == empty_replaced_module_name: + continue + loras[replaced_module_name] = LoRALayerWeights( + replaced_module_name, + 8, + 16, + torch.rand([w.shape[1], 8], device="cuda"), + torch.rand([8, w.shape[0] // len(replaced_module_names)], + device="cuda"), + ) + return LoRAModel(lora_id, 8, loras) + + +def test_replace_submodules(dist_init, dummy_model): + model = dummy_model + model.supported_lora_modules = ["dense1", "layer1.dense2"] + model.packed_modules_mapping = {} + manager = LoRAModelManager( + model, 1, 1, 1, + LoRAConfig(max_lora_rank=8, max_cpu_loras=8, max_loras=8)) + model = manager.model + + assert isinstance(model.get_submodule("dense1"), + ColumnParallelLinearWithLoRA) + assert isinstance(model.get_submodule("layer1.dense1"), + ColumnParallelLinearWithLoRA) + assert isinstance(model.get_submodule("dense2"), RowParallelLinear) + assert isinstance(model.get_submodule("layer1.dense2"), + RowParallelLinearWithLoRA) + + +def test_lora_model_manager(dist_init, dummy_model): + model = dummy_model + model.supported_lora_modules = ["dense1", "dense2", "lm_head"] + model.packed_modules_mapping = {} + model_lora1 = create_lora(1, model, ["layer1.dense1", "dense2", "lm_head"]) + model_lora2 = create_lora(2, model, ["dense1", "dense2", "lm_head"]) + model_lora3 = create_lora(3, model, ["dense1", "dense2", "lm_head"]) + manager = LoRAModelManager( + model, 2, 2, 2, + LoRAConfig(max_lora_rank=8, max_cpu_loras=3, max_loras=2)) + assert all(x is None for x in manager.lora_index_to_id) + assert manager.add_lora(model_lora1) + assert manager.activate_lora(1) + assert manager.lora_index_to_id[0] == 1 + assert not manager.add_lora(model_lora1) + assert not manager.activate_lora(1) + assert manager.add_lora(model_lora2) + assert manager.activate_lora(2) + assert manager.lora_index_to_id[0] == 1 + assert manager.lora_index_to_id[1] == 2 + assert not manager.add_lora(model_lora2) + assert not manager.activate_lora(2) + assert manager.add_lora(model_lora3) + assert manager.lora_index_to_id[0] == 1 + assert manager.lora_index_to_id[1] == 2 + with pytest.raises(ValueError): + assert manager.activate_lora(3) + assert manager.lora_index_to_id[0] == 1 + assert manager.lora_index_to_id[1] == 2 + assert manager.remove_lora(model_lora2.id) + assert manager.lora_index_to_id[1] is None + assert not manager.remove_lora(model_lora2.id) + assert manager.remove_lora(model_lora1.id) + assert not manager.remove_lora(model_lora1.id) + assert manager.add_lora(model_lora1) + assert manager.lora_index_to_id[0] is None + assert manager.lora_index_to_id[1] is None + assert manager.add_lora(model_lora2) + assert manager.activate_lora(3) + assert manager.lora_index_to_id[0] == 3 + assert manager.lora_index_to_id[1] is None + assert manager.activate_lora(2) + assert manager.lora_index_to_id[0] == 3 + assert manager.lora_index_to_id[1] == 2 + + +def test_lora_lru_cache_model_manager(dist_init, dummy_model): + model = dummy_model + model.supported_lora_modules = ["dense1", "dense2", "lm_head"] + model.packed_modules_mapping = {} + model_lora1 = create_lora(1, model, ["layer1.dense1", "dense2", "lm_head"]) + model_lora2 = create_lora(2, model, ["dense1", "dense2", "lm_head"]) + model_lora3 = create_lora(3, model, ["dense1", "dense2", "lm_head"]) + manager = LRUCacheLoRAModelManager( + model, 2, 2, 2, + LoRAConfig(max_lora_rank=8, max_cpu_loras=3, max_loras=2)) + assert all(x is None for x in manager.lora_index_to_id) + assert manager.add_lora(model_lora1) + assert manager.activate_lora(1) + assert manager.lora_index_to_id[0] == 1 + assert not manager.add_lora(model_lora1) + assert not manager.activate_lora(1) + assert manager.add_lora(model_lora2) + assert manager.activate_lora(2) + assert manager.lora_index_to_id[0] == 1 + assert manager.lora_index_to_id[1] == 2 + assert not manager.add_lora(model_lora2) + assert not manager.activate_lora(2) + assert manager.add_lora(model_lora3) + assert manager.lora_index_to_id[0] == 1 + assert manager.lora_index_to_id[1] == 2 + assert manager.activate_lora(3) + assert manager.lora_index_to_id[0] == 3 + assert manager.lora_index_to_id[1] == 2 + assert manager.remove_lora(model_lora2.id) + assert manager.lora_index_to_id[1] is None + assert not manager.remove_lora(model_lora2.id) + assert manager.remove_lora(model_lora1.id) + assert not manager.remove_lora(model_lora1.id) + assert manager.add_lora(model_lora1) + assert manager.activate_lora(1) + assert manager.lora_index_to_id[0] == 3 + assert manager.lora_index_to_id[1] == 1 + assert manager.add_lora(model_lora2) + assert manager.deactivate_lora(3) + assert manager.lora_index_to_id[0] is None + assert manager.lora_index_to_id[1] == 1 + assert manager.activate_lora(2) + assert manager.lora_index_to_id[0] == 2 + assert manager.lora_index_to_id[1] == 1 + assert manager.activate_lora(3) + assert manager.lora_index_to_id[0] == 2 + assert manager.lora_index_to_id[1] == 3 + + +def test_lru_lora_model_manager(dist_init, dummy_model): + # This tests just the LRU cache functionality, everything else is + # tested in test_lora_model_manager + model = dummy_model + model.supported_lora_modules = ["dense1", "dense2", "lm_head"] + model.packed_modules_mapping = {} + model_lora1 = create_lora(1, model, ["layer1.dense1", "dense2", "lm_head"]) + model_lora2 = create_lora(2, model, ["dense1", "dense2", "lm_head"]) + model_lora3 = create_lora(3, model, ["dense1", "dense2", "lm_head"]) + model_lora4 = create_lora(4, model, ["dense1", "dense2", "lm_head"]) + manager = LRUCacheLoRAModelManager( + model, 2, 2, 2, + LoRAConfig(max_lora_rank=8, max_cpu_loras=2, max_loras=2)) + + assert all(x is None for x in manager.lora_index_to_id) + + # Add up to capacity + assert manager.add_lora(model_lora1) + assert manager.add_lora(model_lora2) + assert manager.activate_lora(1) + assert manager.activate_lora(2) + + assert set(manager.list_loras()) == {1, 2} + assert manager.lora_index_to_id[0] == 1 + assert manager.lora_index_to_id[1] == 2 + + # Add over capacity + assert manager.add_lora(model_lora3) + assert manager.add_lora(model_lora4) + assert manager.activate_lora(3) + assert manager.activate_lora(4) + + assert set(manager.list_loras()) == {3, 4} + assert manager.lora_index_to_id[0] == 3 + assert manager.lora_index_to_id[1] == 4 + + # Add 3 again to move it to the top and then add 2 + # should return false since it's in already + assert not manager.add_lora(model_lora3) + assert not manager.activate_lora(3) + assert manager.add_lora(model_lora2) + assert manager.activate_lora(2) + + assert set(manager.list_loras()) == {3, 2} + assert manager.lora_index_to_id[0] == 3 + assert manager.lora_index_to_id[1] == 2 + + # Remove manually + assert manager.remove_lora(3) + assert not manager.remove_lora(3) + + assert set(manager.list_loras()) == {2} + assert manager.lora_index_to_id[0] is None + assert manager.lora_index_to_id[1] == 2 + + assert manager.add_lora(model_lora3) + assert manager.activate_lora(3) + assert manager.add_lora(model_lora4) + assert manager.activate_lora(4) + + assert set(manager.list_loras()) == {3, 4} + assert manager.lora_index_to_id[0] == 3 + assert manager.lora_index_to_id[1] == 4 + + assert manager.remove_oldest_lora() + assert set(manager.list_loras()) == {4} + assert manager.lora_index_to_id[0] is None + assert manager.lora_index_to_id[1] == 4 + + assert manager.remove_oldest_lora() + assert set(manager.list_loras()) == set() + assert all(x is None for x in manager.lora_index_to_id) + + assert not manager.remove_oldest_lora() + assert set(manager.list_loras()) == set() + assert all(x is None for x in manager.lora_index_to_id) + + +def test_lru_cache_worker_lora_manager(llama_2_7b_model_extra_embeddings, + sql_lora_files): + lora_config = LoRAConfig(max_lora_rank=8, max_cpu_loras=4, max_loras=4) + worker_lora_manager = LRUCacheWorkerLoRAManager( + 4, 2, llama_2_7b_model_extra_embeddings.unpadded_vocab_size - + lora_config.lora_extra_vocab_size, lora_config, torch.device("cuda"), + EMBEDDING_MODULES, EMBEDDING_PADDING_MODULES) + worker_lora_manager.create_lora_manager(llama_2_7b_model_extra_embeddings) + + mapping = LoRAMapping([], []) + worker_lora_manager.set_active_loras([ + LoRARequest("1", 1, sql_lora_files), + LoRARequest("2", 2, sql_lora_files) + ], mapping) + assert worker_lora_manager.list_loras() == {1, 2} + assert worker_lora_manager._lora_manager.lora_index_to_id[0] == 1 + assert worker_lora_manager._lora_manager.lora_index_to_id[1] == 2 + + worker_lora_manager.set_active_loras([ + LoRARequest("1", 1, sql_lora_files), + LoRARequest("3", 3, sql_lora_files), + LoRARequest("4", 4, sql_lora_files) + ], mapping) + assert worker_lora_manager.list_loras() == {1, 2, 3, 4} + assert worker_lora_manager._lora_manager.lora_index_to_id[0] == 1 + assert worker_lora_manager._lora_manager.lora_index_to_id[1] == 2 + assert worker_lora_manager._lora_manager.lora_index_to_id[2] == 3 + assert worker_lora_manager._lora_manager.lora_index_to_id[3] == 4 + + worker_lora_manager.set_active_loras([ + LoRARequest("1", 1, sql_lora_files), + LoRARequest("2", 2, sql_lora_files), + LoRARequest("5", 5, sql_lora_files) + ], mapping) + assert worker_lora_manager.list_loras() == {1, 2, 4, 5} + assert worker_lora_manager._lora_manager.lora_index_to_id[0] == 1 + assert worker_lora_manager._lora_manager.lora_index_to_id[1] == 2 + assert worker_lora_manager._lora_manager.lora_index_to_id[2] == 5 + assert worker_lora_manager._lora_manager.lora_index_to_id[3] == 4 + + worker_lora_manager.set_active_loras([ + LoRARequest("1", 1, sql_lora_files), + LoRARequest("1", 1, sql_lora_files), + LoRARequest("1", 1, sql_lora_files) + ], mapping) + assert worker_lora_manager.list_loras() == {1, 2, 4, 5} + assert worker_lora_manager._lora_manager.lora_index_to_id[0] == 1 + assert worker_lora_manager._lora_manager.lora_index_to_id[1] == 2 + assert worker_lora_manager._lora_manager.lora_index_to_id[2] == 5 + assert worker_lora_manager._lora_manager.lora_index_to_id[3] == 4 + + worker_lora_manager.set_active_loras([ + LoRARequest("6", 6, sql_lora_files), + LoRARequest("7", 7, sql_lora_files), + LoRARequest("8", 8, sql_lora_files) + ], mapping) + assert worker_lora_manager.list_loras() == {1, 6, 7, 8} + assert worker_lora_manager._lora_manager.lora_index_to_id[0] == 1 + assert worker_lora_manager._lora_manager.lora_index_to_id[1] == 7 + assert worker_lora_manager._lora_manager.lora_index_to_id[2] == 8 + assert worker_lora_manager._lora_manager.lora_index_to_id[3] == 6 + + # Over capacity + with pytest.raises(RuntimeError): + worker_lora_manager.set_active_loras([ + LoRARequest("10", 10, sql_lora_files), + LoRARequest("11", 11, sql_lora_files), + LoRARequest("12", 12, sql_lora_files), + LoRARequest("13", 13, sql_lora_files), + LoRARequest("14", 14, sql_lora_files) + ], mapping) + + +def test_worker_lora_manager(llama_2_7b_model_extra_embeddings, + sql_lora_files): + # Should remove every LoRA not specified in the request. + lora_config = LoRAConfig(max_lora_rank=8, max_cpu_loras=4, max_loras=4) + worker_lora_manager = WorkerLoRAManager( + 4, 2, llama_2_7b_model_extra_embeddings.unpadded_vocab_size - + lora_config.lora_extra_vocab_size, lora_config, torch.device("cuda"), + EMBEDDING_MODULES, EMBEDDING_PADDING_MODULES) + worker_lora_manager.create_lora_manager(llama_2_7b_model_extra_embeddings) + + mapping = LoRAMapping([], []) + worker_lora_manager.set_active_loras([ + LoRARequest("1", 1, sql_lora_files), + LoRARequest("2", 2, sql_lora_files) + ], mapping) + assert worker_lora_manager.list_loras() == {1, 2} + assert worker_lora_manager._lora_manager.lora_index_to_id[0] == 1 + assert worker_lora_manager._lora_manager.lora_index_to_id[1] == 2 + + worker_lora_manager.set_active_loras([ + LoRARequest("1", 1, sql_lora_files), + LoRARequest("3", 3, sql_lora_files), + LoRARequest("4", 4, sql_lora_files) + ], mapping) + assert worker_lora_manager.list_loras() == {1, 3, 4} + assert worker_lora_manager._lora_manager.lora_index_to_id[0] == 1 + assert worker_lora_manager._lora_manager.lora_index_to_id[1] == 3 + assert worker_lora_manager._lora_manager.lora_index_to_id[2] == 4 + + worker_lora_manager.set_active_loras([ + LoRARequest("1", 1, sql_lora_files), + LoRARequest("2", 2, sql_lora_files), + LoRARequest("5", 5, sql_lora_files) + ], mapping) + assert worker_lora_manager.list_loras() == {1, 2, 5} + assert worker_lora_manager._lora_manager.lora_index_to_id[0] == 1 + assert worker_lora_manager._lora_manager.lora_index_to_id[1] == 2 + assert worker_lora_manager._lora_manager.lora_index_to_id[2] == 5 + + worker_lora_manager.set_active_loras([ + LoRARequest("1", 1, sql_lora_files), + LoRARequest("1", 1, sql_lora_files), + LoRARequest("1", 1, sql_lora_files) + ], mapping) + assert worker_lora_manager.list_loras() == {1} + assert worker_lora_manager._lora_manager.lora_index_to_id[0] == 1 + assert worker_lora_manager._lora_manager.lora_index_to_id[1] is None + assert worker_lora_manager._lora_manager.lora_index_to_id[2] is None + + worker_lora_manager.set_active_loras([ + LoRARequest("6", 6, sql_lora_files), + LoRARequest("7", 7, sql_lora_files), + LoRARequest("8", 8, sql_lora_files) + ], mapping) + assert worker_lora_manager.list_loras() == {6, 7, 8} + assert worker_lora_manager._lora_manager.lora_index_to_id[0] == 8 + assert worker_lora_manager._lora_manager.lora_index_to_id[1] == 6 + assert worker_lora_manager._lora_manager.lora_index_to_id[2] == 7 + + # Over capacity + with pytest.raises(RuntimeError): + worker_lora_manager.set_active_loras([ + LoRARequest("10", 10, sql_lora_files), + LoRARequest("11", 11, sql_lora_files), + LoRARequest("12", 12, sql_lora_files), + LoRARequest("13", 13, sql_lora_files), + LoRARequest("14", 14, sql_lora_files) + ], mapping) + + +def test_packed_loras(dist_init, dummy_model_gate_up): + model = dummy_model_gate_up + model.supported_lora_modules = ["gate_up_proj"] + model.packed_modules_mapping = { + "gate_up_proj": [ + "gate_proj", + "up_proj", + ], + } + model_lora = create_packed_lora( + 1, + model, + module_name="gate_up_proj", + replaced_module_names=["gate_proj", "up_proj"]) + model_lora1 = create_packed_lora( + 2, + model, + module_name="gate_up_proj", + replaced_module_names=["gate_proj", "up_proj"], + empty_replaced_module_name="gate_proj", + ) + + manager = LoRAModelManager( + model, 2, 2, 2, + LoRAConfig(max_lora_rank=8, max_cpu_loras=2, max_loras=2)) + model = manager.model + + assert isinstance(model.get_submodule("gate_up_proj"), + MergedColumnParallelLinearWithLoRA) + assert manager.add_lora(model_lora) + assert manager.add_lora(model_lora1) + + packed_lora = model_lora.get_lora("gate_up_proj") + assert packed_lora and isinstance(packed_lora, PackedLoRALayerWeights) + + assert torch.allclose(packed_lora.lora_a[0], + model_lora.get_lora("gate_proj").lora_a) + assert torch.allclose(packed_lora.lora_b[0], + model_lora.get_lora("gate_proj").lora_b) + assert torch.allclose(packed_lora.lora_a[1], + model_lora.get_lora("up_proj").lora_a) + assert torch.allclose(packed_lora.lora_b[1], + model_lora.get_lora("up_proj").lora_b) + + packed_lora1 = model_lora1.get_lora("gate_up_proj") + assert packed_lora1 and isinstance(packed_lora1, PackedLoRALayerWeights) + + assert packed_lora1.lora_a[0] is None + assert packed_lora1.lora_b[0] is None + assert torch.allclose(packed_lora1.lora_a[1], + model_lora1.get_lora("up_proj").lora_a) + assert torch.allclose(packed_lora1.lora_b[1], + model_lora1.get_lora("up_proj").lora_b) diff --git a/tests/lora/test_mixtral.py b/tests/lora/test_mixtral.py new file mode 100644 index 0000000..4d74722 --- /dev/null +++ b/tests/lora/test_mixtral.py @@ -0,0 +1,53 @@ +import pytest +import torch + +import vllm +from vllm.lora.request import LoRARequest + +MODEL_PATH = "mistralai/Mixtral-8x7B-Instruct-v0.1" + + +def do_sample(llm, lora_path: str, lora_id: int): + prompts = [ + "[system] Given a target sentence construct the underlying meaning representation\nof the input sentence as a single function with attributes and attribute\nvalues. This function should describe the target string accurately and the\nfunction must be one of the following ['inform', 'request', 'give_opinion',\n'confirm', 'verify_attribute', 'suggest', 'request_explanation',\n'recommend', 'request_attribute'].\n\nThe attributes must be one of the following:\n['name', 'exp_release_date', 'release_year', 'developer', 'esrb', 'rating',\n'genres', 'player_perspective', 'has_multiplayer', 'platforms',\n'available_on_steam', 'has_linux_release', 'has_mac_release', 'specifier'] [/system] [user] Here is the target sentence:\nSpellForce 3 is a pretty bad game. The developer Grimlore Games is clearly a bunch of no-talent hacks, and 2017 was a terrible year for games anyway. [/user] [assistant]", # noqa: E501 + "[system] Given a target sentence construct the underlying meaning representation\nof the input sentence as a single function with attributes and attribute\nvalues. This function should describe the target string accurately and the\nfunction must be one of the following ['inform', 'request', 'give_opinion',\n'confirm', 'verify_attribute', 'suggest', 'request_explanation',\n'recommend', 'request_attribute'].\n\nThe attributes must be one of the following:\n['name', 'exp_release_date', 'release_year', 'developer', 'esrb', 'rating',\n'genres', 'player_perspective', 'has_multiplayer', 'platforms',\n'available_on_steam', 'has_linux_release', 'has_mac_release', 'specifier'] [/system] [user] Here is the target sentence:\nI wanted to like Grimlore Games' 2017 entry, but in SpellForce 3 they just didn't get anything right. [/user] [assistant]", # noqa: E501 + "[system] Given a target sentence construct the underlying meaning representation\nof the input sentence as a single function with attributes and attribute\nvalues. This function should describe the target string accurately and the\nfunction must be one of the following ['inform', 'request', 'give_opinion',\n'confirm', 'verify_attribute', 'suggest', 'request_explanation',\n'recommend', 'request_attribute'].\n\nThe attributes must be one of the following:\n['name', 'exp_release_date', 'release_year', 'developer', 'esrb', 'rating',\n'genres', 'player_perspective', 'has_multiplayer', 'platforms',\n'available_on_steam', 'has_linux_release', 'has_mac_release', 'specifier'] [/system] [user] Here is the target sentence:\nBioShock is a good role-playing, action-adventure, shooter that released for PlayStation, Xbox, and PC in 2007. It is available on Steam, and it has a Mac release but not a Linux release. [/user] [assistant]", # noqa: E501 + ] + sampling_params = vllm.SamplingParams(temperature=0, max_tokens=256) + outputs = llm.generate( + prompts, + sampling_params, + lora_request=LoRARequest(str(lora_id), lora_id, lora_path) + if lora_id else None) + # Print the outputs. + generated_texts = [] + for output in outputs: + prompt = output.prompt + generated_text = output.outputs[0].text.strip() + generated_texts.append(generated_text) + print(f"Prompt: {prompt!r}, Generated text: {generated_text!r}") + return generated_texts + + +@pytest.mark.parametrize("tp_size", [4]) +def test_mixtral_lora(mixtral_lora_files, tp_size): + if torch.cuda.device_count() < tp_size: + pytest.skip(f"Not enough GPUs for tensor parallelism {tp_size}") + + llm = vllm.LLM(MODEL_PATH, + enable_lora=True, + max_num_seqs=16, + max_loras=4, + tensor_parallel_size=tp_size, + worker_use_ray=True) + + expected_lora_output = [ + "give_opinion(name[SpellForce 3], release_year[2017], developer[Grimlore Games], rating[poor])", # noqa: E501 + "give_opinion(name[SpellForce 3], release_year[2017], developer[Grimlore Games], rating[poor])", # noqa: E501 + "inform(name[BioShock], release_year[2007], rating[good], genres[action-adventure, role-playing, shooter], platforms[PlayStation, Xbox, PC], available_on_steam[yes], has_linux_release[no], has_mac_release[yes])", # noqa: E501 + ] + + assert do_sample(llm, mixtral_lora_files, + lora_id=1) == expected_lora_output + assert do_sample(llm, mixtral_lora_files, + lora_id=2) == expected_lora_output diff --git a/tests/lora/test_punica.py b/tests/lora/test_punica.py new file mode 100644 index 0000000..fd2a1b7 --- /dev/null +++ b/tests/lora/test_punica.py @@ -0,0 +1,231 @@ +# Based on code from https://github.com/punica-ai/punica + +import pytest +import torch + +import vllm.lora.punica as punica + + +def assert_close(a, b): + rtol, atol = { + torch.float16: (5e-3, 5e-3), + torch.bfloat16: (3e-2, 2e-2), + torch.float32: (None, None), + }[a.dtype] + torch.testing.assert_close(a, b, rtol=rtol, atol=atol) + + +def _lora_ref_impl( + y_final: torch.Tensor, + x: torch.Tensor, + wa_T_all: torch.Tensor, + wb_T_all: torch.Tensor, + indicies: torch.LongTensor, + layer_idx: int, + scale: float, +): + y_stage_1 = torch.empty( + (x.size(0), wa_T_all.size(-2)), + dtype=torch.float32, + device=x.device, + ) + bs = x.shape[0] + s = torch.tensor(scale, dtype=torch.float32, device=x.device) + for i, lora_idx in zip(range(bs), indicies.cpu().tolist()): + xi = x[i].unsqueeze(0).to(torch.float32) + wa = wa_T_all[lora_idx, layer_idx].transpose(-1, -2).to(torch.float32) + if wb_T_all is not None: + wb = wb_T_all[lora_idx, layer_idx].transpose(-1, + -2).to(torch.float32) + + tmp = xi @ wa + y_stage_1[i] = tmp.squeeze(0) + y_final[i] += ((tmp @ wb).squeeze(0) * + s if wb_T_all is not None else y_stage_1[i]) + return y_final, y_stage_1 + + +H1 = H2 = [ + 128, + 256, + 512, + 1024, + 1152, + 1280, + 1536, + 2048, + 2304, + 2560, + 2752, + 3072, + 3456, + 3584, + 4096, + 4608, + 5120, + 5504, + 5632, + 6144, + 6848, + 6912, + 7168, + 8192, + 9216, + 10240, + 11008, + 13824, + 14336, + 15360, + 22016, + 24576, + 27392, + 32000, + 32256, + 32512, + 32768, + 33024, + 36864, + 43264, + 49152, + 64000, + 64256, + 102400, + 102656, + 128000, + 128256, +] +H2 = [64] + H2 +R = [1, 2, 4] +SEED = [0xabcdabcd987] +CUDA_DEVICES = [ + f"cuda:{i}" for i in range(1 if torch.cuda.device_count() == 1 else 2) +] + + +@pytest.mark.parametrize("dtype_str", ["float16", "bfloat16"]) +@pytest.mark.parametrize("h1", H1) +@pytest.mark.parametrize("r", R) +@pytest.mark.parametrize("seed", SEED) +@torch.inference_mode() +def test_lora_a_extra_shapes(dtype_str, h1, r, seed): + torch.manual_seed(seed) + num_loras = 4 + num_layers = 1 + bs = 32 + dtype = getattr(torch, dtype_str) + device = torch.device("cuda") + + wa_T_all = torch.randn(num_loras, + num_layers, + r, + h1, + dtype=dtype, + device=device) + indices = torch.randint(num_loras, (bs, ), dtype=torch.long, device=device) + + for layer_idx in range(num_layers): + x = torch.randn(bs, h1, dtype=dtype, device=device) + y = torch.randn(bs, r, dtype=dtype, device=device) + + y_ref = y.clone() + _lora_ref_impl( + y_ref, + x, + wa_T_all, + None, + indices, + layer_idx, + 1.0, + ) + + y_our = y.clone() + punica.bgmv(y_our, x, wa_T_all, indices, layer_idx, 1.0) + + assert_close(y_ref, y_our) + + +@pytest.mark.parametrize("dtype_str", ["float16", "bfloat16"]) +@pytest.mark.parametrize("h1", H1) +@pytest.mark.parametrize("h2", H2) +@pytest.mark.parametrize("seed", SEED) +@pytest.mark.parametrize("device", CUDA_DEVICES) +@torch.inference_mode() +def test_lora_correctness(dtype_str, h1, h2, seed, device): + torch.manual_seed(seed) + num_loras = 4 + num_layers = 1 + r = 8 + bs = 32 + scale = 0.123 + dtype = getattr(torch, dtype_str) + torch.set_default_device(device) + + wa_T_all = torch.randn(num_loras, num_layers, r, h1, dtype=dtype) + wb_T_all = torch.randn(num_loras, num_layers, h2, r, dtype=dtype) + indices = torch.randint(num_loras, (bs, ), dtype=torch.long) + + for layer_idx in range(num_layers): + x = torch.randn(bs, h1, dtype=dtype) + y = torch.randn(bs, h2, dtype=dtype) + + y_ref = y.clone() + _lora_ref_impl(y_ref, x, wa_T_all, wb_T_all, indices, layer_idx, scale) + + y_our = y.clone() + punica.add_lora(y_our, x, wa_T_all, wb_T_all, indices, layer_idx, + scale) + + assert_close(y_ref, y_our) + + +@pytest.mark.parametrize("dtype_str", ["float16", "bfloat16"]) +@pytest.mark.parametrize("h1", H1) +@pytest.mark.parametrize("h2", H2) +@pytest.mark.parametrize("seed", SEED) +@pytest.mark.parametrize("device", CUDA_DEVICES) +@torch.inference_mode() +def test_lora_correctness_slice(dtype_str, h1, h2, seed, device): + if h2 % 3 != 0 or h2 // 3 not in H1: + pytest.skip("h2 must be divisible by 3 and in supported shapes") + torch.manual_seed(seed) + num_loras = 4 + num_layers = 1 + r = 8 + bs = 32 + scale = 0.123 + dtype = getattr(torch, dtype_str) + torch.set_default_device(device) + + wa_T_all_0 = torch.randn(num_loras, num_layers, r, h1, dtype=dtype) + wa_T_all_1 = torch.randn(num_loras, num_layers, r, h1, dtype=dtype) + wa_T_all_2 = torch.randn(num_loras, num_layers, r, h1, dtype=dtype) + wb_T_all_0 = torch.randn(num_loras, num_layers, h2 // 3, r, dtype=dtype) + wb_T_all_1 = torch.randn(num_loras, num_layers, h2 // 3, r, dtype=dtype) + wb_T_all_2 = torch.randn(num_loras, num_layers, h2 // 3, r, dtype=dtype) + + indices = torch.randint(num_loras, (bs, ), dtype=torch.long) + + for layer_idx in range(num_layers): + x = torch.randn(bs, h1, dtype=dtype) + y = torch.randn(bs, h2, dtype=dtype) + s = h2 // 3 + + y_ref = y.clone() + _lora_ref_impl(y_ref[:, :s], x, wa_T_all_0, wb_T_all_0, indices, + layer_idx, scale) + _lora_ref_impl(y_ref[:, s:s * 2], x, wa_T_all_1, wb_T_all_1, indices, + layer_idx, scale) + _lora_ref_impl(y_ref[:, s * 2:], x, wa_T_all_2, wb_T_all_2, indices, + layer_idx, scale) + + y_our = y.clone() + punica.add_lora_slice(y_our, x, wa_T_all_0, wb_T_all_0, indices, + layer_idx, scale, 0, s) + punica.add_lora_slice(y_our, x, wa_T_all_1, wb_T_all_1, indices, + layer_idx, scale, s, s) + punica.add_lora_slice(y_our, x, wa_T_all_2, wb_T_all_2, indices, + layer_idx, scale, s * 2, s) + + assert_close(y_ref[:, :s], y_our[:, :s]) + assert_close(y_ref[:, s:s * 2], y_our[:, s:s * 2]) + assert_close(y_ref[:, s * 2:], y_our[:, s * 2:]) diff --git a/tests/lora/test_quant_model.py b/tests/lora/test_quant_model.py new file mode 100644 index 0000000..3d86a43 --- /dev/null +++ b/tests/lora/test_quant_model.py @@ -0,0 +1,179 @@ +# Adapted from +# https://github.com/fmmoret/vllm/blob/fm-support-lora-on-quantized-models/tests/lora/test_llama.py +from dataclasses import dataclass +from typing import List + +import pytest + +import vllm +from vllm.lora.request import LoRARequest + +from .conftest import cleanup + + +@dataclass +class ModelWithQuantization: + model_path: str + quantization: str + + +MODELS: List[ModelWithQuantization] = [ + ModelWithQuantization(model_path="TheBloke/TinyLlama-1.1B-Chat-v0.3-AWQ", + quantization="AWQ"), + ModelWithQuantization(model_path="TheBloke/TinyLlama-1.1B-Chat-v0.3-GPTQ", + quantization="GPTQ"), +] + + +def do_sample(llm, lora_path: str, lora_id: int, max_tokens=256): + raw_prompts = [ + "Give me an orange-ish brown color", + "Give me a neon pink color", + ] + + def format_prompt_tuples(prompt): + return f"<|im_start|>user\n{prompt}<|im_end|>\n<|im_start|>assistant\n" + + prompts = [format_prompt_tuples(p) for p in raw_prompts] + + sampling_params = vllm.SamplingParams(temperature=0, + max_tokens=max_tokens, + stop=["<|im_end|>"]) + outputs = llm.generate( + prompts, + sampling_params, + lora_request=LoRARequest(str(lora_id), lora_id, lora_path) + if lora_id else None) + # Print the outputs. + generated_texts = [] + for output in outputs: + prompt = output.prompt + generated_text = output.outputs[0].text + generated_texts.append(generated_text) + print(f"Prompt: {prompt!r}, Generated text: {generated_text!r}") + return generated_texts + + +@pytest.mark.parametrize("model", MODELS) +@pytest.mark.parametrize("tp_size", [1]) +def test_quant_model_lora(tinyllama_lora_files, model, tp_size): + # Cannot use as it will initialize torch.cuda too early... + # if torch.cuda.device_count() < tp_size: + # pytest.skip(f"Not enough GPUs for tensor parallelism {tp_size}") + + llm = vllm.LLM(model=model.model_path, + enable_lora=True, + max_num_seqs=16, + max_loras=4, + max_model_len=400, + tensor_parallel_size=tp_size, + quantization=model.quantization, + trust_remote_code=True) + + if model.quantization is None: + expected_no_lora_output = [ + "Here are some examples of orange-brown colors", + "I'm sorry, I don't have" + ] + expected_lora_output = [ + "#ff8050", + "#ff8080", + ] + elif model.quantization == "AWQ": + expected_no_lora_output = [ + "I'm sorry, I don't understand", + "I'm sorry, I don't understand", + ] + expected_lora_output = [ + "#f07700: A v", + "#f00000: A v", + ] + elif model.quantization == "GPTQ": + expected_no_lora_output = [ + "I'm sorry, I don't have", + "I'm sorry, I don't have", + ] + expected_lora_output = [ + "#f08800: This is", + "#f07788 \n#", + ] + + def expect_match(output, expected_output): + # HACK: GPTQ lora outputs are just incredibly unstable. + # Assert that the outputs changed. + if (model.quantization == "GPTQ" + and expected_output is expected_lora_output): + assert output != expected_no_lora_output + for i, o in enumerate(output): + assert o.startswith( + '#'), f"Expected example {i} to start with # but got {o}" + return + assert output == expected_output + + max_tokens = 10 + + print("lora adapter created") + output = do_sample(llm, + tinyllama_lora_files, + lora_id=0, + max_tokens=max_tokens) + expect_match(output, expected_no_lora_output) + + print("lora 1") + output = do_sample(llm, + tinyllama_lora_files, + lora_id=1, + max_tokens=max_tokens) + expect_match(output, expected_lora_output) + + print("no lora") + output = do_sample(llm, + tinyllama_lora_files, + lora_id=0, + max_tokens=max_tokens) + expect_match(output, expected_no_lora_output) + + print("lora 2") + output = do_sample(llm, + tinyllama_lora_files, + lora_id=2, + max_tokens=max_tokens) + expect_match(output, expected_lora_output) + + print("removing lora") + + del llm + cleanup() + + +@pytest.mark.parametrize("model", MODELS) +@pytest.mark.skip("Requires multiple GPUs") +def test_quant_model_tp_equality(tinyllama_lora_files, model): + # Cannot use as it will initialize torch.cuda too early... + # if torch.cuda.device_count() < 2: + # pytest.skip(f"Not enough GPUs for tensor parallelism {2}") + + llm_tp1 = vllm.LLM(model=model.model_path, + enable_lora=True, + max_num_seqs=16, + max_loras=4, + tensor_parallel_size=1, + quantization=model.quantization, + trust_remote_code=True) + output_tp1 = do_sample(llm_tp1, tinyllama_lora_files, lora_id=1) + + del llm_tp1 + cleanup() + + llm_tp2 = vllm.LLM(model=model.model_path, + enable_lora=True, + max_num_seqs=16, + max_loras=4, + tensor_parallel_size=2, + quantization=model.quantization) + output_tp2 = do_sample(llm_tp2, tinyllama_lora_files, lora_id=1) + + del llm_tp2 + cleanup() + + assert output_tp1 == output_tp2 diff --git a/tests/lora/test_tokenizer_group.py b/tests/lora/test_tokenizer_group.py new file mode 100644 index 0000000..2dcad23 --- /dev/null +++ b/tests/lora/test_tokenizer_group.py @@ -0,0 +1,55 @@ +import pytest +from transformers import AutoTokenizer, PreTrainedTokenizerBase + +from vllm.lora.request import LoRARequest +from vllm.transformers_utils.tokenizer import get_lora_tokenizer +from vllm.transformers_utils.tokenizer_group import get_tokenizer_group + +from ..conftest import get_tokenizer_pool_config + + +@pytest.mark.asyncio +@pytest.mark.parametrize("tokenizer_group_type", [None, "ray"]) +async def test_tokenizer_group_lora(sql_lora_files, tokenizer_group_type): + reference_tokenizer = AutoTokenizer.from_pretrained(sql_lora_files) + tokenizer_group = get_tokenizer_group( + get_tokenizer_pool_config(tokenizer_group_type), + tokenizer_id="gpt2", + enable_lora=True, + max_num_seqs=1, + max_input_length=None, + ) + lora_request = LoRARequest("1", 1, sql_lora_files) + assert reference_tokenizer.encode("prompt") == tokenizer_group.encode( + request_id="request_id", prompt="prompt", lora_request=lora_request) + assert reference_tokenizer.encode( + "prompt") == await tokenizer_group.encode_async( + request_id="request_id", + prompt="prompt", + lora_request=lora_request) + assert isinstance(tokenizer_group.get_lora_tokenizer(None), + PreTrainedTokenizerBase) + assert tokenizer_group.get_lora_tokenizer( + None) == await tokenizer_group.get_lora_tokenizer_async(None) + + assert isinstance(tokenizer_group.get_lora_tokenizer(lora_request), + PreTrainedTokenizerBase) + assert tokenizer_group.get_lora_tokenizer( + lora_request) != tokenizer_group.get_lora_tokenizer(None) + assert tokenizer_group.get_lora_tokenizer( + lora_request) == await tokenizer_group.get_lora_tokenizer_async( + lora_request) + + +def test_get_lora_tokenizer(sql_lora_files, tmpdir): + lora_request = None + tokenizer = get_lora_tokenizer(lora_request) + assert not tokenizer + + lora_request = LoRARequest("1", 1, sql_lora_files) + tokenizer = get_lora_tokenizer(lora_request) + assert tokenizer.get_added_vocab() + + lora_request = LoRARequest("1", 1, str(tmpdir)) + tokenizer = get_lora_tokenizer(lora_request) + assert not tokenizer diff --git a/tests/lora/test_utils.py b/tests/lora/test_utils.py new file mode 100644 index 0000000..892f608 --- /dev/null +++ b/tests/lora/test_utils.py @@ -0,0 +1,172 @@ +from collections import OrderedDict + +from torch import nn + +from vllm.lora.utils import parse_fine_tuned_lora_name, replace_submodule +from vllm.utils import LRUCache + + +def test_parse_fine_tuned_lora_name(): + fixture = { + ("base_model.model.lm_head.lora_A.weight", "lm_head", True), + ("base_model.model.lm_head.lora_B.weight", "lm_head", False), + ( + "base_model.model.model.embed_tokens.lora_embedding_A", + "model.embed_tokens", + True, + ), + ( + "base_model.model.model.embed_tokens.lora_embedding_B", + "model.embed_tokens", + False, + ), + ( + "base_model.model.model.layers.9.mlp.down_proj.lora_A.weight", + "model.layers.9.mlp.down_proj", + True, + ), + ( + "base_model.model.model.layers.9.mlp.down_proj.lora_B.weight", + "model.layers.9.mlp.down_proj", + False, + ), + } + for name, module_name, is_lora_a in fixture: + assert (module_name, is_lora_a) == parse_fine_tuned_lora_name(name) + + +def test_replace_submodule(): + model = nn.Sequential( + OrderedDict([ + ("dense1", nn.Linear(764, 100)), + ("act1", nn.ReLU()), + ("dense2", nn.Linear(100, 50)), + ( + "seq1", + nn.Sequential( + OrderedDict([ + ("dense1", nn.Linear(100, 10)), + ("dense2", nn.Linear(10, 50)), + ])), + ), + ("act2", nn.ReLU()), + ("output", nn.Linear(50, 10)), + ("outact", nn.Sigmoid()), + ])) + + sigmoid = nn.Sigmoid() + + replace_submodule(model, "act1", sigmoid) + assert dict(model.named_modules())["act1"] == sigmoid + + dense2 = nn.Linear(1, 5) + replace_submodule(model, "seq1.dense2", dense2) + assert dict(model.named_modules())["seq1.dense2"] == dense2 + + +class TestLRUCache(LRUCache): + + def _on_remove(self, key, value): + if not hasattr(self, "_remove_counter"): + self._remove_counter = 0 + self._remove_counter += 1 + + +def test_lru_cache(): + cache = TestLRUCache(3) + + cache.put(1, 1) + assert len(cache) == 1 + + cache.put(1, 1) + assert len(cache) == 1 + + cache.put(2, 2) + assert len(cache) == 2 + + cache.put(3, 3) + assert len(cache) == 3 + assert set(cache.cache) == {1, 2, 3} + + cache.put(4, 4) + assert len(cache) == 3 + assert set(cache.cache) == {2, 3, 4} + assert cache._remove_counter == 1 + assert cache.get(2) == 2 + + cache.put(5, 5) + assert set(cache.cache) == {2, 4, 5} + assert cache._remove_counter == 2 + + assert cache.pop(5) == 5 + assert len(cache) == 2 + assert set(cache.cache) == {2, 4} + assert cache._remove_counter == 3 + + cache.pop(10) + assert len(cache) == 2 + assert set(cache.cache) == {2, 4} + assert cache._remove_counter == 3 + + cache.get(10) + assert len(cache) == 2 + assert set(cache.cache) == {2, 4} + assert cache._remove_counter == 3 + + cache.put(6, 6) + assert len(cache) == 3 + assert set(cache.cache) == {2, 4, 6} + assert 2 in cache + assert 4 in cache + assert 6 in cache + + cache.remove_oldest() + assert len(cache) == 2 + assert set(cache.cache) == {2, 6} + assert cache._remove_counter == 4 + + cache.clear() + assert len(cache) == 0 + assert cache._remove_counter == 6 + + cache._remove_counter = 0 + + cache[1] = 1 + assert len(cache) == 1 + + cache[1] = 1 + assert len(cache) == 1 + + cache[2] = 2 + assert len(cache) == 2 + + cache[3] = 3 + assert len(cache) == 3 + assert set(cache.cache) == {1, 2, 3} + + cache[4] = 4 + assert len(cache) == 3 + assert set(cache.cache) == {2, 3, 4} + assert cache._remove_counter == 1 + assert cache[2] == 2 + + cache[5] = 5 + assert set(cache.cache) == {2, 4, 5} + assert cache._remove_counter == 2 + + del cache[5] + assert len(cache) == 2 + assert set(cache.cache) == {2, 4} + assert cache._remove_counter == 3 + + cache.pop(10) + assert len(cache) == 2 + assert set(cache.cache) == {2, 4} + assert cache._remove_counter == 3 + + cache[6] = 6 + assert len(cache) == 3 + assert set(cache.cache) == {2, 4, 6} + assert 2 in cache + assert 4 in cache + assert 6 in cache diff --git a/tests/lora/test_worker.py b/tests/lora/test_worker.py new file mode 100644 index 0000000..732e91a --- /dev/null +++ b/tests/lora/test_worker.py @@ -0,0 +1,69 @@ +import os +import random +import tempfile +from unittest.mock import patch + +from vllm.config import (CacheConfig, DeviceConfig, LoadConfig, LoRAConfig, + ModelConfig, ParallelConfig, SchedulerConfig) +from vllm.lora.models import LoRAMapping +from vllm.lora.request import LoRARequest +from vllm.worker.worker import Worker + + +@patch.dict(os.environ, {"RANK": "0"}) +def test_worker_apply_lora(sql_lora_files): + worker = Worker( + model_config=ModelConfig( + "meta-llama/Llama-2-7b-hf", + "meta-llama/Llama-2-7b-hf", + tokenizer_mode="auto", + trust_remote_code=False, + seed=0, + dtype="float16", + revision=None, + ), + load_config=LoadConfig( + download_dir=None, + load_format="dummy", + ), + parallel_config=ParallelConfig(1, 1, False), + scheduler_config=SchedulerConfig(32, 32, 32), + device_config=DeviceConfig("cuda"), + cache_config=CacheConfig(block_size=16, + gpu_memory_utilization=1., + swap_space=0, + cache_dtype="auto"), + local_rank=0, + rank=0, + lora_config=LoRAConfig(max_lora_rank=8, max_cpu_loras=32, + max_loras=32), + distributed_init_method=f"file://{tempfile.mkstemp()[1]}", + ) + worker.init_device() + worker.load_model() + + worker.model_runner.set_active_loras([], LoRAMapping([], [])) + assert worker.list_loras() == set() + + n_loras = 32 + lora_requests = [ + LoRARequest(str(i + 1), i + 1, sql_lora_files) for i in range(n_loras) + ] + + worker.model_runner.set_active_loras(lora_requests, LoRAMapping([], [])) + assert worker.list_loras() == { + lora_request.lora_int_id + for lora_request in lora_requests + } + + for i in range(32): + random.seed(i) + iter_lora_requests = random.choices(lora_requests, + k=random.randint(1, n_loras)) + random.shuffle(iter_lora_requests) + iter_lora_requests = iter_lora_requests[:-random.randint(0, n_loras)] + worker.model_runner.set_active_loras(iter_lora_requests, + LoRAMapping([], [])) + assert worker.list_loras().issuperset( + {lora_request.lora_int_id + for lora_request in iter_lora_requests}) diff --git a/tests/lora/utils.py b/tests/lora/utils.py new file mode 100644 index 0000000..280e0f2 --- /dev/null +++ b/tests/lora/utils.py @@ -0,0 +1,88 @@ +from typing import List, Optional + +import torch + +from vllm.lora.lora import LoRALayerWeights, PackedLoRALayerWeights + + +class DummyLoRAManager: + + def __init__(self): + super().__init__() + self._loras = {} + + def set_module_lora(self, module_name: str, lora: LoRALayerWeights): + self._loras[module_name] = lora + + def get_module_lora(self, module_name: str) -> Optional[LoRALayerWeights]: + return self._loras.get(module_name, None) + + def init_random_lora(self, + module_name: str, + weight: torch.Tensor, + rank: int = 8, + generate_embeddings_tensor: int = 0): + lora = LoRALayerWeights( + module_name, + rank=rank, + lora_alpha=1, + lora_a=torch.rand([weight.shape[1], rank], + dtype=weight.dtype, + device="cuda"), + lora_b=torch.rand([rank, weight.shape[0]], + dtype=weight.dtype, + device="cuda"), + ) + if generate_embeddings_tensor: + lora.embeddings_tensor = torch.rand(5, + generate_embeddings_tensor, + dtype=weight.dtype, + device="cuda") + self.set_module_lora(module_name, lora) + + return lora + + def init_lora(self, + module_name: str, + input_dim: int, + output_dim: int, + rank=8, + noop=False, + embeddings_tensor=None): + lora = LoRALayerWeights( + module_name, + rank=rank, + lora_alpha=1, + lora_a=torch.rand([input_dim, rank], device="cuda"), + lora_b=torch.rand([rank, output_dim], device="cuda"), + embeddings_tensor=embeddings_tensor, + ) + self.set_module_lora(module_name, lora) + return lora + + def reset_lora(self): + self._loras = {} + + def init_packed_lora( + self, + module_name: str, + input_dim: int, + output_dims: List[int], + noop_lora_index: List[int] = None, + rank=8, + ): + base_loras = [] + noop_lora_index = set(noop_lora_index or []) + + for i, out_dim in enumerate(output_dims): + base_lora = self.init_lora( + module_name + "_000_" + str(i), + input_dim, + out_dim, + rank=rank, + noop=i in noop_lora_index, + ) + base_loras.append(base_lora) + packed_lora = PackedLoRALayerWeights.pack(base_loras) + self.set_module_lora(module_name, packed_lora) + return packed_lora diff --git a/tests/metrics/test_metrics.py b/tests/metrics/test_metrics.py new file mode 100644 index 0000000..e0aa14f --- /dev/null +++ b/tests/metrics/test_metrics.py @@ -0,0 +1,194 @@ +from typing import List + +import pytest +from prometheus_client import REGISTRY + +from vllm import EngineArgs, LLMEngine +from vllm.engine.arg_utils import AsyncEngineArgs +from vllm.engine.async_llm_engine import AsyncLLMEngine +from vllm.sampling_params import SamplingParams + +MODELS = [ + "facebook/opt-125m", +] + + +@pytest.mark.parametrize("model", MODELS) +@pytest.mark.parametrize("dtype", ["float"]) +@pytest.mark.parametrize("max_tokens", [128]) +def test_metric_counter_prompt_tokens( + vllm_runner, + example_prompts, + model: str, + dtype: str, + max_tokens: int, +) -> None: + vllm_model = vllm_runner(model, + dtype=dtype, + disable_log_stats=False, + gpu_memory_utilization=0.4) + tokenizer = vllm_model.model.get_tokenizer() + prompt_token_counts = [len(tokenizer.encode(p)) for p in example_prompts] + # This test needs at least 2 prompts in a batch of different lengths to + # verify their token count is correct despite padding. + assert len(example_prompts) > 1, "at least 2 prompts are required" + assert prompt_token_counts[0] != prompt_token_counts[1], ( + "prompts of different lengths are required") + vllm_prompt_token_count = sum(prompt_token_counts) + + _ = vllm_model.generate_greedy(example_prompts, max_tokens) + stat_logger = vllm_model.model.llm_engine.stat_logger + metric_count = stat_logger.metrics.counter_prompt_tokens.labels( + **stat_logger.labels)._value.get() + + assert vllm_prompt_token_count == metric_count, ( + f"prompt token count: {vllm_prompt_token_count!r}\n" + f"metric: {metric_count!r}") + + +@pytest.mark.parametrize("model", MODELS) +@pytest.mark.parametrize("dtype", ["float"]) +@pytest.mark.parametrize("max_tokens", [128]) +def test_metric_counter_generation_tokens( + vllm_runner, + example_prompts, + model: str, + dtype: str, + max_tokens: int, +) -> None: + vllm_model = vllm_runner(model, + dtype=dtype, + disable_log_stats=False, + gpu_memory_utilization=0.4) + vllm_outputs = vllm_model.generate_greedy(example_prompts, max_tokens) + tokenizer = vllm_model.model.get_tokenizer() + stat_logger = vllm_model.model.llm_engine.stat_logger + metric_count = stat_logger.metrics.counter_generation_tokens.labels( + **stat_logger.labels)._value.get() + vllm_generation_count = 0 + for i in range(len(example_prompts)): + vllm_output_ids, vllm_output_str = vllm_outputs[i] + prompt_ids = tokenizer.encode(example_prompts[i]) + # vllm_output_ids contains both prompt tokens and generation tokens. + # We're interested only in the count of the generation tokens. + vllm_generation_count += len(vllm_output_ids) - len(prompt_ids) + + assert vllm_generation_count == metric_count, ( + f"generation token count: {vllm_generation_count!r}\n" + f"metric: {metric_count!r}") + + +@pytest.mark.parametrize("model", MODELS) +@pytest.mark.parametrize("dtype", ["float"]) +@pytest.mark.parametrize( + "served_model_name", + [None, [], ["ModelName0"], ["ModelName0", "ModelName1", "ModelName2"]]) +def test_metric_set_tag_model_name(vllm_runner, model: str, dtype: str, + served_model_name: List[str]) -> None: + vllm_model = vllm_runner(model, + dtype=dtype, + disable_log_stats=False, + gpu_memory_utilization=0.3, + served_model_name=served_model_name) + stat_logger = vllm_model.model.llm_engine.stat_logger + metrics_tag_content = stat_logger.labels["model_name"] + + del vllm_model + + if served_model_name is None or served_model_name == []: + assert metrics_tag_content == model, ( + f"Metrics tag model_name is wrong! expect: {model!r}\n" + f"actual: {metrics_tag_content!r}") + else: + assert metrics_tag_content == served_model_name[0], ( + f"Metrics tag model_name is wrong! expect: " + f"{served_model_name[0]!r}\n" + f"actual: {metrics_tag_content!r}") + + +@pytest.mark.parametrize("model", MODELS) +@pytest.mark.parametrize("dtype", ["half"]) +@pytest.mark.parametrize("max_tokens", [4]) +@pytest.mark.parametrize("disable_log_stats", [True, False]) +@pytest.mark.asyncio +async def test_async_engine_log_metrics_regression( + example_prompts, + model: str, + dtype: str, + max_tokens: int, + disable_log_stats: bool, +) -> None: + """ + Regression test ensuring async engine generates metrics + when disable_log_stats=False + (see: https://github.com/vllm-project/vllm/pull/4150#pullrequestreview-2008176678) + """ + engine_args = AsyncEngineArgs(model=model, + dtype=dtype, + disable_log_stats=disable_log_stats) + async_engine = AsyncLLMEngine.from_engine_args(engine_args) + for i, prompt in enumerate(example_prompts): + results = async_engine.generate( + prompt, + SamplingParams(max_tokens=max_tokens), + f"request-id-{i}", + ) + # Exhaust the async iterator to make the async engine work + async for _ in results: + pass + + assert_metrics(async_engine.engine, disable_log_stats, + len(example_prompts)) + + +@pytest.mark.parametrize("model", MODELS) +@pytest.mark.parametrize("dtype", ["half"]) +@pytest.mark.parametrize("max_tokens", [4]) +@pytest.mark.parametrize("disable_log_stats", [True, False]) +def test_engine_log_metrics_regression( + example_prompts, + model: str, + dtype: str, + max_tokens: int, + disable_log_stats: bool, +) -> None: + engine_args = EngineArgs(model=model, + dtype=dtype, + disable_log_stats=disable_log_stats) + engine = LLMEngine.from_engine_args(engine_args) + for i, prompt in enumerate(example_prompts): + engine.add_request( + f"request-id-{i}", + prompt, + SamplingParams(max_tokens=max_tokens), + ) + while engine.has_unfinished_requests(): + engine.step() + + assert_metrics(engine, disable_log_stats, len(example_prompts)) + + +def assert_metrics(engine: LLMEngine, disable_log_stats: bool, + num_requests: int) -> None: + if disable_log_stats: + with pytest.raises(AttributeError): + _ = engine.stat_logger + else: + assert (engine.stat_logger + is not None), "engine.stat_logger should be set" + # Ensure the count bucket of request-level histogram metrics matches + # the number of requests as a simple sanity check to ensure metrics are + # generated + labels = {'model_name': engine.model_config.model} + request_histogram_metrics = [ + "vllm:e2e_request_latency_seconds", + "vllm:request_prompt_tokens", + "vllm:request_generation_tokens", + "vllm:request_params_best_of", + "vllm:request_params_n", + ] + for metric_name in request_histogram_metrics: + metric_value = REGISTRY.get_sample_value(f"{metric_name}_count", + labels) + assert ( + metric_value == num_requests), "Metrics should be collected" diff --git a/tests/model_executor/weight_utils.py b/tests/model_executor/weight_utils.py new file mode 100644 index 0000000..c8b9bed --- /dev/null +++ b/tests/model_executor/weight_utils.py @@ -0,0 +1,54 @@ +import os +import tempfile + +import huggingface_hub.constants +import pytest +from huggingface_hub.utils import LocalEntryNotFoundError + +from vllm.model_executor.model_loader.weight_utils import ( + download_weights_from_hf, enable_hf_transfer) + + +def test_hf_transfer_auto_activation(): + if "HF_HUB_ENABLE_HF_TRANSFER" in os.environ: + # in case it is already set, we can't test the auto activation + pytest.skip( + "HF_HUB_ENABLE_HF_TRANSFER is set, can't test auto activation") + enable_hf_transfer() + try: + # enable hf hub transfer if available + import hf_transfer # type: ignore # noqa + HF_TRANFER_ACTIVE = True + except ImportError: + HF_TRANFER_ACTIVE = False + assert (huggingface_hub.constants.HF_HUB_ENABLE_HF_TRANSFER == + HF_TRANFER_ACTIVE) + + +def test_download_weights_from_hf(): + with tempfile.TemporaryDirectory() as tmpdir: + # assert LocalEntryNotFoundError error is thrown + # if offline is set and model is not cached + huggingface_hub.constants.HF_HUB_OFFLINE = True + with pytest.raises(LocalEntryNotFoundError): + download_weights_from_hf("facebook/opt-125m", + allow_patterns=["*.safetensors", "*.bin"], + cache_dir=tmpdir) + + # download the model + huggingface_hub.constants.HF_HUB_OFFLINE = False + download_weights_from_hf("facebook/opt-125m", + allow_patterns=["*.safetensors", "*.bin"], + cache_dir=tmpdir) + + # now it should work offline + huggingface_hub.constants.HF_HUB_OFFLINE = True + assert download_weights_from_hf( + "facebook/opt-125m", + allow_patterns=["*.safetensors", "*.bin"], + cache_dir=tmpdir) is not None + + +if __name__ == "__main__": + test_hf_transfer_auto_activation() + test_download_weights_from_hf() diff --git a/tests/models/test_aqlm.py b/tests/models/test_aqlm.py new file mode 100644 index 0000000..a7abc01 --- /dev/null +++ b/tests/models/test_aqlm.py @@ -0,0 +1,95 @@ +"""Compare the outputs of a AQLM model between vLLM and HF Transformers + +Run `pytest tests/models/test_aqlm.py`. +""" + +import pytest +import torch + +from vllm.model_executor.layers.quantization import QUANTIZATION_METHODS + +capability = torch.cuda.get_device_capability() +capability = capability[0] * 10 + capability[1] +aqlm_not_supported = (capability < + QUANTIZATION_METHODS["aqlm"].get_min_capability()) + +# In this test we hardcode prompts and generations for the model so we don't +# need to require the AQLM package as a dependency +example_prompts = [ + 'vLLM is a high-throughput and memory-efficient inference and serving ' + 'engine for LLMs.\n', + 'Briefly describe the major milestones in the development of artificial ' + 'intelligence from 1950 to 2020.\n', + 'Compare and contrast artificial intelligence with human intelligence in ' + 'terms of processing information.\n', + 'Describe the basic components of a neural network and how it can be ' + 'trained.\n', + 'Write a short story about a robot that dreams for the first time.\n', + 'Analyze the impact of the COVID-19 pandemic on global economic structures ' + 'and future business models.\n', + 'Explain the cultural significance of the Mona Lisa painting, and how its ' + 'perception might vary in Western versus Eastern societies.\n', + "Translate the following English sentence into Japanese, French, and " + "Swahili: 'The early bird catches the worm.'\n" +] + +# These ground truth generations were generated using `transformers==4.38.1 +# aqlm==1.1.0 torch==2.2.0` +# and the below code: +# ```python +# from transformers import AutoTokenizer, AutoModelForCausalLM +# model_id = "ISTA-DASLab/Llama-2-7b-AQLM-2Bit-1x16-hf" +# quantized_model = AutoModelForCausalLM.from_pretrained(model_id, +# torch_dtype="auto", device_map="cuda").cuda() +# tokenizer = AutoTokenizer.from_pretrained(model_id) +# outputs = [] +# for prompt in example_prompts: +# input_ids = tokenizer(prompt, return_tensors="pt")["input_ids"].to("cuda") +# hf_outputs = quantized_model.generate(input_ids, max_new_tokens=32) +# outputs.append(tokenizer.decode(hf_outputs[0][input_ids.shape[1]:])) +# print(outputs) +# ``` +ground_truth_generations = [ + '\n### Features\n\n- **High-throughput**: v', + 'The major milestones in the development of artificial intelligence from ' + '195', + 'Compare and contrast artificial intelligence with human intelligence in ' + 'terms of processing information. The', + 'Explain the difference between supervised and unsupervised learning.' + '\nExplain', + 'Write a short story about a robot that dreams for the first time. The', + 'Analyze the impact of the COVID-19 pandemic on global economic', + 'The Mona Lisa is a painting by Leonardo da Vinci, and it', + 'The early bird catches the worm.\nThe early bird catches the' +] + + +@pytest.mark.skipif(aqlm_not_supported, + reason="AQLM is not supported on this GPU type.") +@pytest.mark.parametrize("model", ["ISTA-DASLab/Llama-2-7b-AQLM-2Bit-1x16-hf"]) +@pytest.mark.parametrize("dtype", ["half"]) +@pytest.mark.parametrize("max_tokens", [16]) +@pytest.mark.parametrize("num_logprobs", [1]) +def test_models( + vllm_runner, + example_prompts, + model: str, + dtype: str, + max_tokens: int, + num_logprobs: int, +) -> None: + + vllm_model = vllm_runner(model, dtype=dtype) + vllm_outputs = vllm_model.generate_greedy_logprobs(example_prompts, + max_tokens, + num_logprobs) + + # loop through the prompts to compare against the ground truth generations + for prompt_idx in range(len(example_prompts)): + vllm_output_ids, vllm_output_str, vllm_logprobs = vllm_outputs[ + prompt_idx] + + print("Prompt: ", repr(example_prompts[prompt_idx])) + print("Reference output:", repr(ground_truth_generations[prompt_idx])) + print("Output output: ", repr(vllm_output_str)) + assert vllm_output_str == ground_truth_generations[prompt_idx] diff --git a/tests/models/test_big_models.py b/tests/models/test_big_models.py new file mode 100644 index 0000000..3dde498 --- /dev/null +++ b/tests/models/test_big_models.py @@ -0,0 +1,60 @@ +"""Compare the outputs of HF and vLLM when using greedy sampling. + +This tests bigger models and use half precision. + +Run `pytest tests/models/test_big_models.py`. +""" +import pytest + +MODELS = [ + "meta-llama/Llama-2-7b-hf", + # "mistralai/Mistral-7B-v0.1", # Broken + # "Deci/DeciLM-7b", # Broken + # "tiiuae/falcon-7b", # Broken + "EleutherAI/gpt-j-6b", + "mosaicml/mpt-7b", + # "Qwen/Qwen1.5-0.5B" # Broken, +] + + +@pytest.mark.parametrize("model", MODELS) +@pytest.mark.parametrize("dtype", ["half"]) +@pytest.mark.parametrize("max_tokens", [32]) +def test_models( + hf_runner, + vllm_runner, + example_prompts, + model: str, + dtype: str, + max_tokens: int, +) -> None: + hf_model = hf_runner(model, dtype=dtype) + hf_outputs = hf_model.generate_greedy(example_prompts, max_tokens) + del hf_model + + vllm_model = vllm_runner(model, dtype=dtype) + vllm_outputs = vllm_model.generate_greedy(example_prompts, max_tokens) + del vllm_model + + for i in range(len(example_prompts)): + hf_output_ids, hf_output_str = hf_outputs[i] + vllm_output_ids, vllm_output_str = vllm_outputs[i] + assert hf_output_str == vllm_output_str, ( + f"Test{i}:\nHF: {hf_output_str!r}\nvLLM: {vllm_output_str!r}") + assert hf_output_ids == vllm_output_ids, ( + f"Test{i}:\nHF: {hf_output_ids}\nvLLM: {vllm_output_ids}") + + +@pytest.mark.parametrize("model", MODELS) +@pytest.mark.parametrize("dtype", ["half"]) +def test_model_print( + vllm_runner, + model: str, + dtype: str, +) -> None: + vllm_model = vllm_runner(model, dtype=dtype) + # This test is for verifying whether the model's extra_repr + # can be printed correctly. + print(vllm_model.model.llm_engine.model_executor.driver_worker. + model_runner.model) + del vllm_model diff --git a/tests/models/test_fp8.py b/tests/models/test_fp8.py new file mode 100644 index 0000000..e87a178 --- /dev/null +++ b/tests/models/test_fp8.py @@ -0,0 +1,90 @@ +# flake8: noqa +"""Tests fp8 models against ground truth generation +Note: these tests will only pass on L4 GPU. +""" +import os + +import pytest +import torch +from transformers import AutoTokenizer + +from vllm import LLM, SamplingParams +from vllm.model_executor.layers.quantization import QUANTIZATION_METHODS + +os.environ["TOKENIZERS_PARALLELISM"] = "true" + +MAX_MODEL_LEN = 1024 + +MODELS = [ + "nm-testing/Meta-Llama-3-8B-Instruct-FP8", + "meta-llama/Meta-Llama-3-8B-Instruct", +] + +EXPECTED_STRS_MAP = { + "nm-testing/Meta-Llama-3-8B-Instruct-FP8": [ + 'LLaMA is a high-throughput and memory-efficient inference and serving engine for Large Language Models (', + 'Here are the major milestones in the development of artificial intelligence (AI) from 1950 to ', + 'Artificial intelligence (AI) and human intelligence (HI) differ significantly in how they process information.', + 'A neural network is a complex system modeled after the human brain, composed of interconnected nodes or "ne', + 'Zeta-5, a highly advanced robot designed for menial labor, whirred and beep', + 'The COVID-19 pandemic has had a profound impact on global economic structures and future business models. Here', + 'The Mona Lisa, painted by Leonardo da Vinci in the early 16th century, is one of', + 'Here are the translations:\n\n**Japanese:** (Haya tori, nemuri nemuri)\n\n**' + ], + "meta-llama/Meta-Llama-3-8B-Instruct": [ + 'LLM (Large Language Model) is a type of artificial intelligence (AI) model that is trained', + 'Here are the major milestones in the development of artificial intelligence (AI) from 1950 to ', + 'Artificial intelligence (AI) and human intelligence (HI) differ significantly in how they process information.', + 'A neural network is a complex system modeled after the human brain, composed of interconnected nodes or "ne', + 'In the year 2154, the robotics lab at NeuroSpark Industries was on the cusp of', + 'The COVID-19 pandemic has had a profound impact on global economic structures and future business models. The', + 'The Mona Lisa, painted by Leonardo da Vinci in the early 16th century, is one of', + 'Here are the translations:\n\n**Japanese:** (Haya aki wa mushi o tsukamu' + ], +} + +capability = torch.cuda.get_device_capability() +capability = capability[0] * 10 + capability[1] +fp8_not_supported = (capability < + QUANTIZATION_METHODS["fp8"].get_min_capability()) + + +@pytest.mark.skipif(fp8_not_supported, + reason="fp8 is not supported on this GPU type.") +@pytest.mark.parametrize("model_name", MODELS) +def test_models( + example_prompts, + model_name, +) -> None: + model = LLM(model=model_name, + max_model_len=MAX_MODEL_LEN, + enforce_eager=True, + quantization="fp8") + + tokenizer = AutoTokenizer.from_pretrained(model_name) + formatted_prompts = [ + tokenizer.apply_chat_template([{ + "role": "user", + "content": prompt + }], + tokenize=False, + add_generation_prompt=True) + for prompt in example_prompts + ] + + params = SamplingParams(max_tokens=20, temperature=0) + generations = [] + # Note: these need to be run 1 at a time due to numerical precision, + # since the expected strs were generated this way. + for prompt in formatted_prompts: + outputs = model.generate(prompt, params) + generations.append(outputs[0].outputs[0].text) + del model + + print(generations) + expected_strs = EXPECTED_STRS_MAP[model_name] + for i in range(len(example_prompts)): + generated_str = generations[i] + expected_str = expected_strs[i] + assert expected_str == generated_str, ( + f"Test{i}:\nExpected: {expected_str!r}\nvLLM: {generated_str!r}") diff --git a/tests/models/test_gptq_marlin.py b/tests/models/test_gptq_marlin.py new file mode 100644 index 0000000..4d73843 --- /dev/null +++ b/tests/models/test_gptq_marlin.py @@ -0,0 +1,98 @@ +"""Compares the outputs of gptq vs gptq_marlin +Note: GPTQ and Marlin do not have bitwise correctness. +As a result, in this test, we just confirm that the top selected tokens of the +Marlin/GPTQ models are in the top 3 selections of each other. +Note: Marlin internally uses locks to synchronize the threads. This can +result in very slight nondeterminism for Marlin. As a result, we re-run the test +up to 3 times to see if we pass. +Note: This test currently fails running with --forked with the following: + RuntimeError: Cannot re-initialize CUDA in forked subprocess. + To use CUDA with multiprocessing, you must use the 'spawn' start method +Run `pytest tests/models/test_gptq_marlin.py`. +""" +import os + +import pytest +import torch + +from tests.models.utils import check_logprobs_close +from vllm.model_executor.layers.quantization import QUANTIZATION_METHODS + +os.environ["TOKENIZERS_PARALLELISM"] = "true" + +MAX_MODEL_LEN = 1024 + +capability = torch.cuda.get_device_capability() +capability = capability[0] * 10 + capability[1] +gptq_marlin_not_supported = ( + capability < QUANTIZATION_METHODS["gptq_marlin"].get_min_capability()) + +MODELS = [ + # act_order==False, group_size=channelwise + ("robertgshaw2/zephyr-7b-beta-channelwise-gptq", "main"), + # act_order==False, group_size=128 + ("TheBloke/Llama-2-7B-GPTQ", "main"), + + # act_order==True, group_size=128 + ("TheBloke/TinyLlama-1.1B-Chat-v1.0-GPTQ", "main"), + # act_order==True, group_size=64 + ("TheBloke/TinyLlama-1.1B-Chat-v1.0-GPTQ", "gptq-4bit-64g-actorder_True"), + # act_order==True, group_size=32 + ("TheBloke/TinyLlama-1.1B-Chat-v1.0-GPTQ", "gptq-4bit-32g-actorder_True"), + + # 8-bit, act_order==True, group_size=channelwise + ("TheBloke/TinyLlama-1.1B-Chat-v1.0-GPTQ", "gptq-8bit--1g-actorder_True"), + # 8-bit, act_order==True, group_size=128 + ("TheBloke/TinyLlama-1.1B-Chat-v1.0-GPTQ", "gptq-8bit-128g-actorder_True"), + # 8-bit, act_order==True, group_size=32 + ("TheBloke/TinyLlama-1.1B-Chat-v1.0-GPTQ", "gptq-8bit-32g-actorder_True"), +] + + +@pytest.mark.flaky(reruns=2) +@pytest.mark.skipif(gptq_marlin_not_supported, + reason="gptq_marlin is not supported on this GPU type.") +@pytest.mark.parametrize("model", MODELS) +@pytest.mark.parametrize("dtype", ["half"]) +@pytest.mark.parametrize("max_tokens", [32]) +@pytest.mark.parametrize("num_logprobs", [5]) +def test_models( + vllm_runner, + example_prompts, + model, + dtype: str, + max_tokens: int, + num_logprobs: int, +) -> None: + model_name, revision = model + + # Run marlin. + gptq_marlin_model = vllm_runner(model_name=model_name, + revision=revision, + dtype=dtype, + quantization="marlin", + max_model_len=MAX_MODEL_LEN, + tensor_parallel_size=1) + + gptq_marlin_outputs = gptq_marlin_model.generate_greedy_logprobs( + example_prompts, max_tokens, num_logprobs) + del gptq_marlin_model + + # Run gptq. + gptq_model = vllm_runner(model_name=model_name, + revision=revision, + dtype=dtype, + quantization="gptq", + max_model_len=MAX_MODEL_LEN, + tensor_parallel_size=1) + gptq_outputs = gptq_model.generate_greedy_logprobs(example_prompts, + max_tokens, + num_logprobs) + del gptq_model + + check_logprobs_close( + outputs_0_lst=gptq_outputs, + outputs_1_lst=gptq_marlin_outputs, + name_0="gptq", + name_1="gptq_marlin", + ) diff --git a/tests/models/test_llava.py b/tests/models/test_llava.py new file mode 100644 index 0000000..f86cd3f --- /dev/null +++ b/tests/models/test_llava.py @@ -0,0 +1,107 @@ +import gc +from dataclasses import fields +from enum import Enum +from typing import Dict, List, Tuple + +import pytest +import torch +from transformers import AutoTokenizer + +from vllm.config import VisionLanguageConfig + +model_and_vl_config = [ + ("llava-hf/llava-1.5-7b-hf", + VisionLanguageConfig( + image_input_type=VisionLanguageConfig.ImageInputType.PIXEL_VALUES, + image_feature_size=576, + image_token_id=32000, + image_input_shape=(1, 3, 336, 336))), + ("llava-hf/llava-1.5-7b-hf", + VisionLanguageConfig( + image_input_type=VisionLanguageConfig.ImageInputType.IMAGE_FEATURES, + image_feature_size=576, + image_token_id=32000, + image_input_shape=(1, 576, 1024))) +] + + +def as_dict(vision_language_config: VisionLanguageConfig) -> Dict: + """Flatten vision language config to pure args. + + Compatible with what llm entrypoint expects. + """ + result = {} + for field in fields(vision_language_config): + value = getattr(vision_language_config, field.name) + if isinstance(value, Enum): + result[field.name] = value.name.lower() + elif isinstance(value, tuple): + result[field.name] = ",".join([str(item) for item in value]) + else: + result[field.name] = value + return result + + +def sanitize_vllm_output(vllm_output: Tuple[List[int], str], + vision_language_config: VisionLanguageConfig, + model_id: str): + """Sanitize vllm output to be comparable with hf output. + The function reduces `input_ids` from 1, 32000, 32000, ..., 32000, + x1, x2, x3 ... to 1, 32000, x1, x2, x3 ... + It also reduces `output_str` from "bla" to "bla". + """ + tokenizer = AutoTokenizer.from_pretrained(model_id) + image_token_str = tokenizer.decode(vision_language_config.image_token_id) + image_token_str_len = len(image_token_str) + input_ids, output_str = vllm_output + sanitized_input_ids = input_ids[0:2] + input_ids[2 + vision_language_config + .image_feature_size - 1:] + sanitzied_output_str = output_str[vision_language_config. + image_feature_size * + image_token_str_len:] + return sanitized_input_ids, sanitzied_output_str + + +@pytest.mark.parametrize("worker_use_ray", [False]) +@pytest.mark.parametrize("model_and_config", model_and_vl_config) +@pytest.mark.parametrize("dtype", ["half"]) +@pytest.mark.parametrize("max_tokens", [128]) +def test_models(hf_runner, vllm_runner, hf_image_prompts, hf_images, + vllm_image_prompts, vllm_images, model_and_config: tuple, + dtype: str, max_tokens: int, worker_use_ray: bool) -> None: + """Inference result should be the same between hf and vllm. + + All the image fixtures for the test is under tests/images. + For huggingface runner, we provide the raw images as input. + For vllm runner, we provide image tensors and corresponding + vision language config as input. + Note, the text input is also adjusted to abide by vllm contract. + The text output is sanitized to be able to compare with hf. + """ + model_id, vision_language_config = model_and_config + hf_model = hf_runner(model_id, dtype=dtype) + hf_outputs = hf_model.generate_greedy(hf_image_prompts, + max_tokens, + images=hf_images) + del hf_model + + vllm_model = vllm_runner(model_id, + dtype=dtype, + worker_use_ray=worker_use_ray, + **as_dict(vision_language_config)) + vllm_outputs = vllm_model.generate_greedy(vllm_image_prompts, + max_tokens, + images=vllm_images) + del vllm_model + + gc.collect() + torch.cuda.empty_cache() + + for i in range(len(hf_image_prompts)): + hf_output_ids, hf_output_str = hf_outputs[i] + vllm_output_ids, vllm_output_str = sanitize_vllm_output( + vllm_outputs[i], vision_language_config, model_id) + assert hf_output_str == vllm_output_str, ( + f"Test{i}:\nHF: {hf_output_str!r}\nvLLM: {vllm_output_str!r}") + assert hf_output_ids == vllm_output_ids, ( + f"Test{i}:\nHF: {hf_output_ids}\nvLLM: {vllm_output_ids}") diff --git a/tests/models/test_marlin.py b/tests/models/test_marlin.py new file mode 100644 index 0000000..fa846d4 --- /dev/null +++ b/tests/models/test_marlin.py @@ -0,0 +1,78 @@ +"""Compare the outputs of a GPTQ model to a Marlin model. + +Note: GPTQ and Marlin do not have bitwise correctness. +As a result, in this test, we just confirm that the top selected tokens of the +Marlin/GPTQ models are in the top 3 selections of each other. + +Note: Marlin internally uses locks to synchronize the threads. This can +result in very slight nondeterminism for Marlin. As a result, we re-run the test +up to 3 times to see if we pass. + +Run `pytest tests/models/test_marlin.py`. +""" +from dataclasses import dataclass + +import pytest +import torch + +from tests.models.utils import check_logprobs_close +from vllm.model_executor.layers.quantization import QUANTIZATION_METHODS + +capability = torch.cuda.get_device_capability() +capability = capability[0] * 10 + capability[1] +marlin_not_supported = (capability < + QUANTIZATION_METHODS["marlin"].get_min_capability()) + + +@dataclass +class ModelPair: + model_marlin: str + model_gptq: str + + +model_pairs = [ + ModelPair(model_marlin="nm-testing/zephyr-beta-7b-marlin-g128", + model_gptq="nm-testing/zephyr-beta-7b-gptq-g128"), + ModelPair(model_marlin="robertgshaw2/zephyr-7b-beta-channelwise-marlin", + model_gptq="robertgshaw2/zephyr-7b-beta-channelwise-gptq"), + ModelPair(model_marlin="robertgshaw2/TinyLlama-1.1B-Chat-v1.0-g128-marlin", + model_gptq="robertgshaw2/TinyLlama-1.1B-Chat-v1.0-g128-gptq") +] + + +@pytest.mark.flaky(reruns=2) +@pytest.mark.skipif(marlin_not_supported, + reason="Marlin is not supported on this GPU type.") +@pytest.mark.parametrize("model_pair", model_pairs) +@pytest.mark.parametrize("dtype", ["half"]) +@pytest.mark.parametrize("max_tokens", [32]) +@pytest.mark.parametrize("num_logprobs", [5]) +def test_models( + vllm_runner, + example_prompts, + model_pair: ModelPair, + dtype: str, + max_tokens: int, + num_logprobs: int, +) -> None: + marlin_model = vllm_runner(model_pair.model_marlin, + dtype=dtype, + quantization="marlin") + marlin_outputs = marlin_model.generate_greedy_logprobs( + example_prompts, max_tokens, num_logprobs) + del marlin_model + + gptq_model = vllm_runner(model_pair.model_gptq, + dtype=dtype, + quantization="gptq") + gptq_outputs = gptq_model.generate_greedy_logprobs(example_prompts, + max_tokens, + num_logprobs) + del gptq_model + + check_logprobs_close( + outputs_0_lst=gptq_outputs, + outputs_1_lst=marlin_outputs, + name_0="gptq", + name_1="marlin", + ) diff --git a/tests/models/test_mistral.py b/tests/models/test_mistral.py new file mode 100644 index 0000000..7aeff3a --- /dev/null +++ b/tests/models/test_mistral.py @@ -0,0 +1,40 @@ +"""Compare the outputs of HF and vLLM for Mistral models using greedy sampling. + +Run `pytest tests/models/test_mistral.py`. +""" +import pytest + +MODELS = [ + "mistralai/Mistral-7B-Instruct-v0.1", +] + + +@pytest.mark.parametrize("model", MODELS) +@pytest.mark.parametrize("dtype", ["bfloat16"]) +@pytest.mark.parametrize("max_tokens", [128]) +@pytest.mark.skip( + "Two problems: 1. Failing correctness tests. 2. RuntimeError: expected " + "scalar type BFloat16 but found Half (only in CI).") +def test_models( + hf_runner, + vllm_runner, + example_long_prompts, + model: str, + dtype: str, + max_tokens: int, +) -> None: + hf_model = hf_runner(model, dtype=dtype) + hf_outputs = hf_model.generate_greedy(example_long_prompts, max_tokens) + del hf_model + + vllm_model = vllm_runner(model, dtype=dtype) + vllm_outputs = vllm_model.generate_greedy(example_long_prompts, max_tokens) + del vllm_model + + for i in range(len(example_long_prompts)): + hf_output_ids, hf_output_str = hf_outputs[i] + vllm_output_ids, vllm_output_str = vllm_outputs[i] + assert hf_output_str == vllm_output_str, ( + f"Test{i}:\nHF: {hf_output_str!r}\nvLLM: {vllm_output_str!r}") + assert hf_output_ids == vllm_output_ids, ( + f"Test{i}:\nHF: {hf_output_ids}\nvLLM: {vllm_output_ids}") diff --git a/tests/models/test_models.py b/tests/models/test_models.py new file mode 100644 index 0000000..e460962 --- /dev/null +++ b/tests/models/test_models.py @@ -0,0 +1,66 @@ +"""Compare the outputs of HF and vLLM when using greedy sampling. + +This test only tests small models. Big models such as 7B should be tested from +test_big_models.py because it could use a larger instance to run tests. + +Run `pytest tests/models/test_models.py`. +""" +import pytest + +MODELS = [ + "facebook/opt-125m", + "gpt2", + "bigcode/tiny_starcoder_py", + "EleutherAI/pythia-70m", + "bigscience/bloom-560m", # Testing alibi slopes. + "microsoft/phi-2", + "stabilityai/stablelm-3b-4e1t", + # "allenai/OLMo-1B", # Broken + "bigcode/starcoder2-3b", +] + + +@pytest.mark.parametrize("model", MODELS) +@pytest.mark.parametrize("dtype", ["float"]) +@pytest.mark.parametrize("max_tokens", [96]) +def test_models( + hf_runner, + vllm_runner, + example_prompts, + model: str, + dtype: str, + max_tokens: int, +) -> None: + # To pass the small model tests, we need full precision. + assert dtype == "float" + + hf_model = hf_runner(model, dtype=dtype) + hf_outputs = hf_model.generate_greedy(example_prompts, max_tokens) + del hf_model + + vllm_model = vllm_runner(model, dtype=dtype) + vllm_outputs = vllm_model.generate_greedy(example_prompts, max_tokens) + del vllm_model + + for i in range(len(example_prompts)): + hf_output_ids, hf_output_str = hf_outputs[i] + vllm_output_ids, vllm_output_str = vllm_outputs[i] + assert hf_output_str == vllm_output_str, ( + f"Test{i}:\nHF: {hf_output_str!r}\nvLLM: {vllm_output_str!r}") + assert hf_output_ids == vllm_output_ids, ( + f"Test{i}:\nHF: {hf_output_ids}\nvLLM: {vllm_output_ids}") + + +@pytest.mark.parametrize("model", MODELS) +@pytest.mark.parametrize("dtype", ["float"]) +def test_model_print( + vllm_runner, + model: str, + dtype: str, +) -> None: + vllm_model = vllm_runner(model, dtype=dtype) + # This test is for verifying whether the model's extra_repr + # can be printed correctly. + print(vllm_model.model.llm_engine.model_executor.driver_worker. + model_runner.model) + del vllm_model diff --git a/tests/models/test_oot_registration.py b/tests/models/test_oot_registration.py new file mode 100644 index 0000000..50ab066 --- /dev/null +++ b/tests/models/test_oot_registration.py @@ -0,0 +1,32 @@ +import torch + +from vllm import LLM, ModelRegistry, SamplingParams +from vllm.model_executor.models.opt import OPTForCausalLM +from vllm.model_executor.sampling_metadata import SamplingMetadata + + +class MyOPTForCausalLM(OPTForCausalLM): + + def compute_logits(self, hidden_states: torch.Tensor, + sampling_metadata: SamplingMetadata) -> torch.Tensor: + # this dummy model always predicts the first token + logits = super().compute_logits(hidden_states, sampling_metadata) + logits.zero_() + logits[:, 0] += 1.0 + return logits + + +def test_oot_registration(): + # register our dummy model + ModelRegistry.register_model("OPTForCausalLM", MyOPTForCausalLM) + prompts = ["Hello, my name is", "The text does not matter"] + sampling_params = SamplingParams(temperature=0) + llm = LLM(model="facebook/opt-125m") + first_token = llm.get_tokenizer().decode(0) + outputs = llm.generate(prompts, sampling_params) + + for output in outputs: + generated_text = output.outputs[0].text + # make sure only the first token is generated + rest = generated_text.replace(first_token, "") + assert rest == "" diff --git a/tests/models/utils.py b/tests/models/utils.py new file mode 100644 index 0000000..3e49dfb --- /dev/null +++ b/tests/models/utils.py @@ -0,0 +1,29 @@ +def check_logprobs_close(outputs_0_lst, outputs_1_lst, name_0, name_1): + """Compare the logprobs of two sequences generated by different models, + which should be similar but not necessarily equal. + """ + # Loop through responses to each prompt. + for prompt_idx, (outputs_0, + outputs_1) in enumerate(zip(outputs_0_lst, + outputs_1_lst)): + output_ids_0, output_str_0, logprobs_0 = outputs_0 + output_ids_1, output_str_1, logprobs_1 = outputs_1 + + # Loop through generated tokens. + for idx, (output_id_0, + output_id_1) in enumerate(zip(output_ids_0, output_ids_1)): + + # If generated tokens don't match, then + if output_id_0 != output_id_1: + # Each predicted token must be in top N logprobs of the other + assert output_id_0 in logprobs_1[idx], ( + f"Test{prompt_idx}:" + f"\n{name_0}:\t{output_str_0!r}" + f"\n{name_1}:\t{output_str_1!r}") + assert output_id_1 in logprobs_0[idx], ( + f"Test{prompt_idx}:" + f"\n{name_0}:\t{output_str_0!r}" + f"\n{name_1}:\t{output_str_1!r}") + + # Break out since sequences will now diverge. + break diff --git a/tests/prefix_caching/test_prefix_caching.py b/tests/prefix_caching/test_prefix_caching.py new file mode 100644 index 0000000..305596e --- /dev/null +++ b/tests/prefix_caching/test_prefix_caching.py @@ -0,0 +1,75 @@ +"""Compare the with and without prefix caching. + +Run `pytest tests/prefix_caching/test_prefix_caching.py`. +""" +import pytest + +from vllm.core.block_manager_v1 import CachedBlockAllocator +from vllm.utils import Device + + +@pytest.mark.parametrize("block_size", [16]) +@pytest.mark.parametrize("num_blocks", [16]) +def test_block_allocator( + block_size: int, + num_blocks: int, +): + block_hash = 1 + block_allocator = CachedBlockAllocator(Device.CPU, block_size, num_blocks) + + # Allocate two PysicalTokenBlocks with the same hash and check + # that they are the same PhysicalTokenBlock + first_block = block_allocator.allocate(block_hash, 0) + second_block = block_allocator.allocate(block_hash, 0) + assert (first_block == second_block) + assert (second_block.ref_count == 2) + + # Free the first_block and confirm that the ref_count is correctly + # decremented on the second block + block_allocator.free(first_block) + assert (second_block.ref_count == 1) + + # Free the second block + block_allocator.free(second_block) + + # Reallocate the first block and confirm that, even after the block + # had its ref_count go to 0, we still get the same block back + first_block = block_allocator.allocate(block_hash, 0) + assert (first_block == second_block) + assert (first_block.block_hash == block_hash) + + +@pytest.mark.parametrize("num_blocks", [16]) +def test_eviction(num_blocks: int, ): + block_size = 16 + block_allocator = CachedBlockAllocator(Device.CPU, block_size, num_blocks) + blocks = [] + + for i in range(num_blocks): + # use i as the block_hash + blocks.append(block_allocator.allocate(i, 0)) + + #Free all blocks + for block in blocks: + block_allocator.free(block) + + # Allocate a new block and confirm that it's the first block freed. + # I.E The Least Recently Used block + new_block_hash = block_size + new_block = block_allocator.allocate(new_block_hash, 0) + assert (new_block == blocks[0]) + assert (new_block.block_hash == new_block_hash) + + # Reallocate the second in blocks to remove it from the free list + realloc_block_hash = 1 + realloc_block = block_allocator.allocate(realloc_block_hash, 0) + assert (realloc_block == blocks[realloc_block_hash]) + assert (realloc_block.block_hash == realloc_block_hash) + + # Allocate a new block and confirm that it's not the realloc_block, + # since the realloc_block shouldn't be in the free list + new_block_hash = block_size + 1 + new_block = block_allocator.allocate(new_block_hash, 0) + assert (realloc_block != new_block) + assert (new_block.block_hash == new_block_hash) + assert (new_block.block_number == 2) diff --git a/tests/prompts/example.txt b/tests/prompts/example.txt new file mode 100644 index 0000000..e1b97bc --- /dev/null +++ b/tests/prompts/example.txt @@ -0,0 +1,8 @@ +vLLM is a high-throughput and memory-efficient inference and serving engine for LLMs. +Briefly describe the major milestones in the development of artificial intelligence from 1950 to 2020. +Compare and contrast artificial intelligence with human intelligence in terms of processing information. +Describe the basic components of a neural network and how it can be trained. +Write a short story about a robot that dreams for the first time. +Analyze the impact of the COVID-19 pandemic on global economic structures and future business models. +Explain the cultural significance of the Mona Lisa painting, and how its perception might vary in Western versus Eastern societies. +Translate the following English sentence into Japanese, French, and Swahili: 'The early bird catches the worm.' diff --git a/tests/prompts/summary.txt b/tests/prompts/summary.txt new file mode 100644 index 0000000..2f947a2 --- /dev/null +++ b/tests/prompts/summary.txt @@ -0,0 +1 @@ +Subtitles: for our annual races at Knockhill Circuit.Today\'s racing comes from the Porsche Carrera Cup Great Britainand the Legends Cars Elite Cup with JLM.It\'s the latter who get us underway with their first race of the day,and joining me in the commentary box is Paul O\'Neill.First race of the day for the Legends.Jonty Norman has drawn pole position,with Matt Knight alongside.Marcus Pett on Row 2 with Daniel Pooley.Declan Burke is next up, and then Tyler Read, on Row 3.He\'s leading the rookie championship at the moment.Chris Needham on Row 4 with Luke Simmons.Andrew Rogerson and Gareth Sheridan on Row 5.Sixth row, Peter Barrable, with Charlie Budd.Row 7, Jack Parker, fourth in the championship right now.Nick Price is next to him.Will Gibson, who looks like he\'s out of the championship contention now,with Oli Schlup alongside.Then Ben McNeice and Flight Lieutenant Matt Isherwood.Robert Barrable, championship leader, he\'s on Row 10.Then Brent Bowie from Kieran Beattie and Nick Bridgeman.Mike Schlup on Row 12, followed by Ryan McLeish,who won the day overall yesterday.Mark Beaty, Row 13, with Andy Bird.Then it\'s Ben Higgins and Nathan Anthony.Connor Mills and Paul Musselle complete Row 15.And completing the grid is James Newbery.Here we go, with Race number 1 of the day,the final day of the first ever Legends Cars Elite Cup with JLM.And on the front row, it\'s Jonty Norman in grey,Matt Knight in black and gold.Coming from third place on the grid is Marcus Pett,who goes left of shot in the gunmetal carto challenge for the lead.Marcus Pett, the man from Boston in Lincolnshire,goes through into lead position.Very definitely a fancied championship runnerbut hasn\'t quite had the rub of the green this weekend.And they all pile into McIntyre\'s for the first time.And this is where we look for driving standards.James Newbery brakes at the back.He\'s got Paul Musselle immediately in front of him.Those two had an interesting battle yesterdayinvolving a little bit of contact, I think,but they\'re both all right at the moment, as they clear the chicane for the first time.Marcus Pett is away.The difference you\'ll see in Legends Cars racing todayis that for this meeting,the bump drafting that we\'ve seen in the pasthas been ruled out for this round,and it\'s under review for the future.But look at the battle for second position, three wide,as Marcus Pett comes in front of the crowds here.Matt Knight on the inside, Dan Pooley on the outside in 32.Dan Pooley challenging for third. He had a strong day yesterday -he was up in the top ten, which was great to see.The man from March.That third car there, eclipsed at the moment,comes out of the slipstream.Dan repaired his own car after Croft,and that of Kieran Beaty,so I know Kieran wanted to thank him for that. He\'s been working hard.And Pooley side by side with Matt Knight.We\'ve got the 13, Chris Needham car, up there in the mix as well.The three top guys in the...Ryan McLeish getting very sideways there,the Scot in the 71 car.The first time we\'ve seen him on our ITV coverage.He\'s not a guest driver this week.I suppose you could technically call him a guest,but he\'s fully championship registeredand took a splendid win yesterday - overall win and race win.Overall on points.Sorry, Paul, gets a chance to get you in.That\'s Jack Parker!Oh, what\'s happened there?So, this was the start. They\'re all still warming the tyres up,ready for the lights to go green,which they do... around about now.And they get going.And then there was a car, wasn\'t there?Oh, I tell you what, that could\'ve ended up really nastyas it snaked up the grass.Yeah, I\'ll tell you what, the moment when the lights went outwas when Marcus Pett broke ranks.That was a very, very meticulous start from Marcus Pett.The blue car here is Tyler Read, top rookie,who looks like he\'s going down the inside of Daniel Pooley,so he\'s gonna make a space here.So, Dan Pooley has lost second position.It\'s Marcus Pett still out front. Matt Knight...I was saying to the drivers,"Don\'t go away if you\'re in the lead because you won\'t get any coverage." Pett\'s down the road, isn\'t he? Look at the gap he\'s got. Yeah.He\'s got three seconds. It\'s gonna be more than that.What I was quite concerned about was the damp part of the circuitdown at the hairpin, where you need to be down the inside of peopleto get the braking done,but these guys seem to be all respecting...Not track limits, but they\'re respecting each other around usbecause I was quite concerned about coming here,but this is quite synonymous with Legends racing at Knockhill.And look at this now. Knight has got...Look at that. I remember Marcus getting his first race win,which was at Snetterton years ago.It\'s always fantastic to see a first-time winner.And Tyler Read is giving him a great workout.Matt Knight back in third.It\'s between the top two at the moment. Oh! Tyler goes wide.He\'s throwing the car around.Marcus Pett, looking a little bit smoother in the 79,was very frustrated yesterday, but Read\'s all over him.Yeah, but look at this now.You\'ve got third, fourth, fifth and sixth.This is gonna be absolutely spectacular!Tyler Read\'s gone! What\'s gone on?!Oh, has the Treherne engine gone pop? He\'s lost a lot of ground.Is he gonna come back into it?Now it\'s Knight having a go on the outside line again.Matt Knight can\'t do it. He runs out wide.Oli Schlup\'s coming through.Schlup hasn\'t had a win yet in Legends cars, so he\'s queueing up.They\'re coming onto the last lap.This could be a key moment for Oli Schlup,who\'s back in third in the K-Seal car.Across the line.Marcus Pett soaking up the pressure brilliantly so far.But does he need to be in front as they come onto the last lap?I don\'t know, but I think Read must have missed a gear,as someone\'s exited stage left.Look at that, back in the mix!It\'s now six for the lead. Can Pett hold on?Championship leader Robert Barrablehas come through from about three rows from the back,and he\'s at the back of the train.Barrable here is gonna extend his championship leadand start towards the front of the grid for Race 2.Barrable, the Irishman, he\'s there.The white car with the green and orange stripeson the nose cone of the car.But it\'s Marcus Pett out front at the moment... Oh!Matt Isherwood\'s rejoined at the back in the black and green.Isherwood\'s got back at them. Matt Knight\'s having a go.Along Railway Straight.Schlup would normally bump draft him. He can\'t do that on the rules.But look at Marcus Pett.Fairly wide-ish line in. Good defensive stuff from Pett.It\'s all about the run up to the hill now.And Marcus Pett is gonna take the win, I think.Here they come, up towards the line. Pett from Matt Knight.It\'s gonna be Matt\'s best resultin the Legends Cars National Championship.Third position goes to Oli Schlup, who is delighted with that.Then it was Tyler Read. Great race from him.Robert Barrable, though...Barrable, from 19th on the grid, without bump drafting,comes through into fifth placeahead of the excellent recovery from Flight Lieutenant Matt Isherwood.Dan Pooley seventh. Another great result for Dan Pooley.So much to take away from those last racing laps.Oh, and those last four lapsis exactly why we have these Legends on the TOCA package.That was exceptional.Marcus Pett looked like a dead cert not to finish first,but congratulations to you. That was brilliant.But Barrable, after exiting stage leftwhen he caught the back of everybody and got right up there...There\'s too much to talk about. Let\'s just talk about this guy.Pett, you are a legend, mate. Well done.Cracking. It is a lad and dad.Literally, Marcus and his dad, Robert, they look after the car.It is lad and dad. We hear that mentioned in other formulas,but genuinely, that is all it is.It is very difficult for drivers like that and teams like thatto come and race on this stage.It is a big thing. And he\'s such a smashing guy.And his dad as well. Really delighted with the win.Super stuff by Matt Knight. brilliant from Oli Schlup.Fantastic as well from Tyler Read.And on the front row,it\'s Jonty Norman in grey, Matt Knight in black and gold.Coming from third place on the grid is Marcus Pett.Bit of a shemozzle at the back.Two cars hooked up, which is not good to see.Oh, has the Treherne engine gone pop? He\'s lost a lot of ground.Now it\'s Knight having a go on the outside line again.Matt Knight can\'t do it. He runs out wide.Oli Schlup\'s coming through.And Marcus Pett is gonna take the win, I think. Pett from Matt Knight. It\'s gonna be Matt\'s best resultin the Legends Cars National Championship.Here\'s how they finished.Marcus Pett takes another win in the Legends Cars Elite Cup with JLM.READS INFOREADS INFOREADS INFOREADS INFOREADS INFOREADS INFOProblems in that race for Ryan McLeish, yesterday\'s winner.Charlie Budd in 30th.And the other driver having problems, obviously,from that first stoppage, Brent Bowie.Marcus, that was a tough racebecause there was a red flag in the middle of it.Actually, the first bit, you got away,but it was a full reset,and pressure throughout to the chequered flag.Yeah, definitely.We had an ideal start and managed to build up a lead early on,which was great, but when you\'re in that position,the last thing you want to see is a red flag. iming line at the end of lap one.So, Gus Burton leads the way.Big, big dive by Foster on the inside,to go back ahead of Wylie.He goes off the road and back on again.He\'s all sideways.And diving up on the outside line comes Ryan Ratcliffe.Wylie here battling with one of the Pro category cars,but behind him, all the Pro-Am opposition crawling all over him.Well, that was dramatic stuff, wasn\'t it?Round the outside of Turn 1, put Harry Foster in the wrong place.That was Max Bird going wide, number 44, the pink and blue car.So that\'s just haemorrhaged places in Pro-Am.And he\'s the... Oh, a puncture.There\'s somebody with a puncture. Is that Angus Whiteside? Possibly.Let\'s see.I think it is. And you\'ve got this damp patch on the inside,on the braking there, just at the final into the hairpin.This has been a dramatic start to this race for Porsches.Absolutely right.Coming up over the timing line, Gus Burton leads the way.Nine tenths of a second to the good.Big effort being made by Jason Lockwoodin the yellow and orange car in the background, look,to try to get up the inside line, then diving down towards Turn 1.Goes ahead of Oliver White, the very experienced Formula 4 champion.In the silver car, Oliver White, back into Carrera Cup.Remember, he did a full season last year.Good to have him back on the grid.As the cars clamber their way up over the kerb,through the chicane.But Gus Burton saying to everybody, "I\'m back." He leads.Yeah, a dramatic way for Gus Burton to come back to this championship.Remember, he started this year with Century Motorsport but then ducked out of the championship prior to Thruxton.He\'s still competing in the Supercup series with Fach Auto.As there in the pits, getting a new rear left tyre, is Angus Whiteside.But Gus Burton absolutely on it.Very quick in testing here during the week.They tested on Wednesday and on Friday.Gus Burton very quick in...And he\'s really enjoying life now.Back in the championship with the NAPA Racing UK supportand with a different team, Nick Tandy\'s JTR outfit.And he\'s done the fastest lap of the race, as he leads.He is not in the championship fight, but he wants to win races.Car off. It\'s Max Bird again.So, Max Bird, the Pro-Am championship leader,three times a winner in class this year,off the road and back on again.But that\'s gonna throw him way, way down the order.This race is going from bad to worse for him.It\'s just completely unfolded for poor Max Bird.That\'s the curse of having our camera on board, I think,but it\'s just unravelled after a great qualifying.Now, you were talking about Gus Burton\'s start,and it is going to be investigated after the race.OK. Well, it\'ll take a lot of camera action analysisto look at it. This is on board with Bird.Round Turn 1.All OK there. Very close... Goes to the outside.That\'s dangerous cos you can get knocked wide,and that\'s exactly what happens.The man he was trying to get past, Josh Stanton,who spent last night trackside at Cowdenbeath watching stock cars.I\'m not suggesting for a moment he\'s learnt how to defend,but he was enjoying himself, watching a different form of racing.I think all the best people were at Cowdenbeath, weren\'t they?Nick Tandy was, and others. Oh!As there, absolutely on the giddy limit, is Harry Foster,making his way in sixth place.Down towards the hairpin.He\'s dropped back from that leading quintet,but he\'s keeping Ross Wylie at bay.Ross Wylie, there, creeping into shot, leads now Pro-Amahead of Ryan Ratcliffe.And Josh Stanton is third in Pro-Am, last year\'s Am champion.Yeah, and Ross Wylie the only Scottish driver in the race. A lot of support for him,from local sponsors as well as the public.Buoyed by his recent run at the British Grand Prix at Supercup,and thoroughly loving racing at his home circuit, Ross Wylie.Track is nicely dry.There was some threats of possible rain.We had rain yesterday during qualifying.They actually only got one runon their slick tyres yesterday in qualifyingbefore the rain arrived, and that set the grid.So, Gus Burton\'s lead growing all the time.1.3 seconds now, that margin over Adam Smalley.As Max Bird tries to fight back in Pro-Am.Gets up the inside line there.So, that puts him ahead of David Stirling.So, he\'s split the second and third Am fightas he tries to recover.Yeah, but he\'s lost a lot of ground with that momenton the outside of McIntyre\'s.It\'s getting a lot darker overhead at Knockhill,even though there is a break in the cloud.A big effort there from the lapped car of Angus Whiteside.He\'s not fighting for position, he\'s trying to unlap himself.But just wonder whether we might get so f the right of McIntyre\'s,up towards Butcher\'s, then the chicane.And looking to try and maintain this 100% recordin the Team Parker Racing-run car in Am.Yeah. David Fairbrother in second place,but some 11 seconds behind in the Am category.But he will take another podium.His second in the championship, too, Justin Sherwood.The race leader 2.5 seconds to the good, Gus Burton.Other battles still to be resolved.What\'s going on in Pro-Am? Ross Wylie leads.He\'s fallen back behind Josh Malin overall. That was the move.Josh Malin through on the inside at the hairpin.Ross Wylie, in a sense, content to let that happen - gave him room -because that\'s not his battle, but what it does meanis that Ryan Ratcliffe, his class rival,is directly behind him.This is William Aspin versus Max Bird for sixth in Pro-Am.And a very determined Max Bird goes one side, get his nose chopped off.Will Aspin, the man from Florence, defends on the other side.They\'re absolutely together, almost touching.Here comes Max Bird.Oh, but he can\'t find a way through there.Angus Whiteside is now getting in on the act.Round the outside goes Max Bird, but they both take it wide,and through goes Angus Whiteside on the inside.Doesn\'t affect the race order.Whiteside unlaps himself from those two cars. Will Aspin stays ahead. Max Bird tries to fight back.Down towards Duffus Dip.Ignore the car in the lead of this battle packbecause it\'s not on the lead lap.But then Aspin under attack.Max Bird tries to get up alongside himfor the inside line coming into McIntyre\'s.He is on the inside, and he is ahead now.Yeah. And behind him, there was a car completely off on the grassafter Turn 1.So I do think that section of the track is a little slippery,for whatever reason. Maybe it just hasn\'t quite dried out.But this was a great battle between Max Bird and Will Aspin.So, drivers, in one or two cases,setting personal best lap times last time around,suggesting that the road is drying still.The cars are getting lighter on fuel anyway.Down at the hairpin comes the recovering Max Bird,as over the line goes Harry Foster, being chased by Josh Malin.Josh up into seventh overall.A top six could be on - he\'s only half a second back.Yeah, it\'s not far away, is it?And still plenty of laps left in this race.You probably noticed through that Turn 1the drivers are not riding the big kerb on the inside.That\'s because it\'s a new kerb that\'s been put in, actually,to raise the level of the kerbback to the level it was before the track got resurfaced twice.But with the resurfacing twice,it had raised the track surface by 80mm,and the drivers found they were, in previous years,able to use that kerb.Now? Not so much.So, there going through is Oliver Wight in the silver car,down towards the hairpin.Jason Lockwood ahead of him.Jason for EXCELR8, and he is running in 12 at the moment,which is potentially going to be his best finish of the year.It\'s been a tough season for Jason,but he could be on for his best results thus far.However, Gus Burton has rather dominated this,and look at the gap that he\'s pulled.Adam Smalley, as we suggested earlier,might be thinking about banking points,but it doesn\'t look as though he\'s been able to do anything at allabout that JTR car ahead.No. In terms of pure speed,he hasn\'t been able to threaten Gus Burton at all, has he? Gus Burton has led every race.As he\'s now passing David Fairbrotherat the back of the field.But he\'s had this race under control.But unfortunately, he\'s got this investigation after the racefor a possible false start hanging over him.And if, if, if anything is found, and it\'s a false start,normally that\'s a ten-second penalty,and he\'s not ten seconds ahead,so there is gonna be a postscript to this story, that\'s for sure.Now, this is Henry Dawes, Ollie Jacksoncoming through the chicane.Dawes goes wide, goes through the gravel,goes over the grass, loses a place,gets it all sideways, but just about saves it by the end of the straight.Yeah, nearly lost it on the wet grass.Oh. Harry Foster.This is passing David Fairbrother again, further back.So, this is Smalley versus Matty Graham for second place.So, this gap has come r. \n\n Your task is to create long detailed paragraph-by-paragraph summary. Detailed paragraph-by-paragraph summary of the text above: \ No newline at end of file diff --git a/tests/quantization/test_configs.py b/tests/quantization/test_configs.py new file mode 100644 index 0000000..6820b27 --- /dev/null +++ b/tests/quantization/test_configs.py @@ -0,0 +1,73 @@ +"""Tests whether Marlin models can be loaded from the autogptq config. + +Run `pytest tests/quantization/test_configs.py --forked`. +""" + +from dataclasses import dataclass + +import pytest + +from vllm.config import ModelConfig + + +@dataclass +class ModelPair: + model_marlin: str + model_gptq: str + + +# Model Id // Quantization Arg // Expected Type +MODEL_ARG_EXPTYPES = [ + # AUTOGPTQ + # compat: autogptq <=0.7.1 is_marlin_format: bool + # Model Serialized in Marlin Format should always use Marlin kernel. + ("neuralmagic/TinyLlama-1.1B-Chat-v1.0-marlin", None, "marlin"), + ("neuralmagic/TinyLlama-1.1B-Chat-v1.0-marlin", "marlin", "marlin"), + ("neuralmagic/TinyLlama-1.1B-Chat-v1.0-marlin", "gptq", "marlin"), + ("neuralmagic/TinyLlama-1.1B-Chat-v1.0-marlin", "awq", "ERROR"), + # Model Serialized in Exllama Format. + ("TheBloke/Llama-2-7B-Chat-GPTQ", None, "gptq_marlin"), + ("TheBloke/Llama-2-7B-Chat-GPTQ", "marlin", "gptq_marlin"), + ("TheBloke/Llama-2-7B-Chat-GPTQ", "gptq", "gptq"), + ("TheBloke/Llama-2-7B-Chat-GPTQ", "awq", "ERROR"), + # compat: autogptq >=0.8.0 use checkpoint_format: str + # Model Serialized in Marlin Format should always use Marlin kernel. + ("LnL-AI/TinyLlama-1.1B-Chat-v1.0-GPTQ-Marlin-4bit", None, "marlin"), + ("LnL-AI/TinyLlama-1.1B-Chat-v1.0-GPTQ-Marlin-4bit", "marlin", "marlin"), + ("LnL-AI/TinyLlama-1.1B-Chat-v1.0-GPTQ-Marlin-4bit", "gptq", "marlin"), + ("LnL-AI/TinyLlama-1.1B-Chat-v1.0-GPTQ-Marlin-4bit", "awq", "ERROR"), + # Model Serialized in Exllama Format. + ("LnL-AI/TinyLlama-1.1B-Chat-v1.0-GPTQ-4bit", None, "gptq_marlin"), + ("LnL-AI/TinyLlama-1.1B-Chat-v1.0-GPTQ-4bit", "marlin", "gptq_marlin"), + ("LnL-AI/TinyLlama-1.1B-Chat-v1.0-GPTQ-4bit", "gptq", "gptq"), + ("LnL-AI/TinyLlama-1.1B-Chat-v1.0-GPTQ-4bit", "awq", "ERROR"), + + # AUTOAWQ + ("TheBloke/OpenHermes-2.5-Mistral-7B-AWQ", None, "awq"), + ("TheBloke/OpenHermes-2.5-Mistral-7B-AWQ", "awq", "awq"), + ("TheBloke/OpenHermes-2.5-Mistral-7B-AWQ", "marlin", "ERROR"), + ("TheBloke/OpenHermes-2.5-Mistral-7B-AWQ", "gptq", "ERROR"), +] + + +@pytest.mark.parametrize("model_arg_exptype", MODEL_ARG_EXPTYPES) +def test_auto_gptq(model_arg_exptype: str) -> None: + model_path, quantization_arg, expected_type = model_arg_exptype + + try: + model_config = ModelConfig(model_path, + model_path, + tokenizer_mode="auto", + trust_remote_code=False, + seed=0, + dtype="float16", + revision=None, + quantization=quantization_arg) + found_quantization_type = model_config.quantization + except ValueError: + found_quantization_type = "ERROR" + + assert found_quantization_type == expected_type, ( + f"Expected quant_type == {expected_type} for {model_path}, " + f"but found {found_quantization_type} " + f"for no --quantization {quantization_arg} case") diff --git a/tests/quantization/test_fp8.py b/tests/quantization/test_fp8.py new file mode 100644 index 0000000..607544a --- /dev/null +++ b/tests/quantization/test_fp8.py @@ -0,0 +1,24 @@ +"""Tests whether FP8 computation is enabled correctly. + +Run `pytest tests/quantization/test_fp8.py --forked`. +""" +import pytest +import torch + +from vllm.model_executor.layers.quantization import QUANTIZATION_METHODS +from vllm.model_executor.layers.quantization.fp8 import Fp8LinearMethod + +capability = torch.cuda.get_device_capability() +capability = capability[0] * 10 + capability[1] + + +@pytest.mark.skipif( + capability < QUANTIZATION_METHODS["fp8"].get_min_capability(), + reason="FP8 is not supported on this GPU type.") +def test_load_fp16_model(vllm_runner) -> None: + llm = vllm_runner("facebook/opt-125m", quantization="fp8") + + model = llm.model.llm_engine.model_executor.driver_worker.model_runner.model + fc1 = model.model.decoder.layers[0].fc1 + assert isinstance(fc1.quant_method, Fp8LinearMethod) + assert fc1.weight.dtype == torch.float8_e4m3fn diff --git a/tests/samplers/test_beam_search.py b/tests/samplers/test_beam_search.py new file mode 100644 index 0000000..2682f28 --- /dev/null +++ b/tests/samplers/test_beam_search.py @@ -0,0 +1,54 @@ +"""Compare the outputs of HF and vLLM when using beam search. + +Run `pytest tests/samplers/test_beam_search.py`. +""" +import gc + +import pytest +import torch + +# FIXME(zhuohan): The test can not pass if we: +# 1. Increase max_tokens to 256. +# 2. Increase beam_width to 8. +# 3. Use the model "huggyllama/llama-7b". +MAX_TOKENS = [128] +BEAM_WIDTHS = [4] +MODELS = ["facebook/opt-125m"] + + +@pytest.mark.parametrize("model", MODELS) +@pytest.mark.parametrize("dtype", ["half"]) +@pytest.mark.parametrize("max_tokens", MAX_TOKENS) +@pytest.mark.parametrize("beam_width", BEAM_WIDTHS) +def test_beam_search_single_input( + hf_runner, + vllm_runner, + example_prompts, + model: str, + dtype: str, + max_tokens: int, + beam_width: int, +) -> None: + example_prompts = example_prompts[:1] + hf_model = hf_runner(model, dtype=dtype) + hf_outputs = hf_model.generate_beam_search(example_prompts, beam_width, + max_tokens) + del hf_model + + vllm_model = vllm_runner(model, dtype=dtype) + vllm_outputs = vllm_model.generate_beam_search(example_prompts, beam_width, + max_tokens) + del vllm_model + # NOTE(woosuk): For some reason, the following GC is required to avoid + # GPU OOM errors in the following tests using `vllm_runner`. + gc.collect() + torch.cuda.empty_cache() + + for i in range(len(example_prompts)): + hf_output_ids, _ = hf_outputs[i] + vllm_output_ids, _ = vllm_outputs[i] + assert len(hf_output_ids) == len(vllm_output_ids) + for j in range(len(hf_output_ids)): + assert hf_output_ids[j] == vllm_output_ids[j], ( + f"Test{i} output{j}:\nHF: {hf_output_ids}\n" + f"vLLM: {vllm_output_ids}") diff --git a/tests/samplers/test_ignore_eos.py b/tests/samplers/test_ignore_eos.py new file mode 100644 index 0000000..864657a --- /dev/null +++ b/tests/samplers/test_ignore_eos.py @@ -0,0 +1,31 @@ +"""Make sure ignore_eos works. + +Run `pytest tests/samplers/test_ignore_eos.py`. +""" + +import pytest + +from vllm import SamplingParams + +MODELS = ["facebook/opt-125m"] + + +@pytest.mark.parametrize("model", MODELS) +@pytest.mark.parametrize("dtype", ["half"]) +@pytest.mark.parametrize("max_tokens", [1024]) +def test_beam_search_single_input( + vllm_runner, + example_prompts, + model: str, + dtype: str, + max_tokens: int, +) -> None: + example_prompts = "1 + 1 is" + + vllm_model = vllm_runner(model, dtype=dtype) + sampling_params = SamplingParams(max_tokens=max_tokens, ignore_eos=True) + ignore_eos_output = vllm_model.model.generate( + example_prompts, sampling_params=sampling_params) + print(len(ignore_eos_output[0].outputs[0].token_ids)) + assert max_tokens - len(ignore_eos_output[0].outputs[0].token_ids) < 10 + assert max_tokens - len(ignore_eos_output[0].outputs[0].token_ids) >= 0 diff --git a/tests/samplers/test_logits_processor.py b/tests/samplers/test_logits_processor.py new file mode 100644 index 0000000..3788e9e --- /dev/null +++ b/tests/samplers/test_logits_processor.py @@ -0,0 +1,62 @@ +import pytest +import torch + +from vllm import SamplingParams + +MODELS = ["facebook/opt-125m"] + + +@pytest.mark.parametrize("model", MODELS) +@pytest.mark.parametrize("dtype", ["half"]) +def test_logits_processor_force_generate( + vllm_runner, + example_prompts, + model: str, + dtype: str, +) -> None: + vllm_model = vllm_runner(model, dtype=dtype) + tokenizer = vllm_model.model.get_tokenizer() + repeat_times = 2 + enforced_answers = " vLLM" + vllm_token_ids = tokenizer.encode(enforced_answers, + add_special_tokens=False) + max_tokens = len(vllm_token_ids) * repeat_times + + def pick_vllm(token_ids, logits): + token_id = vllm_token_ids[len(token_ids) % len(vllm_token_ids)] + logits[token_id] = torch.finfo(logits.dtype).max + return logits + + params_with_logprobs = SamplingParams( + logits_processors=[pick_vllm], + prompt_logprobs=3, + max_tokens=max_tokens, + ) + + # test logits_processors when prompt_logprobs is not None + vllm_model.model._add_request( + prompt=example_prompts[0], + sampling_params=params_with_logprobs, + prompt_token_ids=None, + ) + + # test prompt_logprobs is not None + vllm_model.model._add_request( + prompt=example_prompts[1], + sampling_params=SamplingParams( + prompt_logprobs=3, + max_tokens=max_tokens, + ), + prompt_token_ids=None, + ) + + # test grouped requests + vllm_model.model._add_request( + prompt=example_prompts[2], + sampling_params=SamplingParams(max_tokens=max_tokens), + prompt_token_ids=None, + ) + + outputs = vllm_model.model._run_engine(False) + + assert outputs[0].outputs[0].text == enforced_answers * repeat_times diff --git a/tests/samplers/test_logprobs.py b/tests/samplers/test_logprobs.py new file mode 100644 index 0000000..57d6d2a --- /dev/null +++ b/tests/samplers/test_logprobs.py @@ -0,0 +1,124 @@ +import pytest +import torch + +from tests.conftest import VllmRunner +from vllm import SamplingParams + +MODELS = ["facebook/opt-125m"] + + +@pytest.mark.parametrize("model", MODELS) +@pytest.mark.parametrize("dtype", ["half"]) +@pytest.mark.parametrize("chunked_prefill_token_size", [1, 4, 16, -1]) +@pytest.mark.parametrize("num_top_logprobs", [6]) # 32000 == vocab_size +def test_get_prompt_logprobs( + hf_runner, + vllm_runner, + model, + dtype, + chunked_prefill_token_size: int, + num_top_logprobs: int, + example_prompts, +): + max_num_seqs = 256 + enable_chunked_prefill = False + max_num_batched_tokens = None + if chunked_prefill_token_size != -1: + enable_chunked_prefill = True + max_num_seqs = min(chunked_prefill_token_size, max_num_seqs) + max_num_batched_tokens = chunked_prefill_token_size + + max_tokens = 5 + hf_model = hf_runner(model, dtype=dtype) + hf_logprobs = hf_model.generate_greedy_logprobs( + example_prompts, + max_tokens=max_tokens, + ) + del hf_model + + vllm_model = vllm_runner( + model, + dtype=dtype, + max_logprobs=num_top_logprobs, + enable_chunked_prefill=enable_chunked_prefill, + max_num_batched_tokens=max_num_batched_tokens, + max_num_seqs=max_num_seqs, + ) + vllm_sampling_params = SamplingParams(max_tokens=max_tokens, + logprobs=num_top_logprobs, + prompt_logprobs=num_top_logprobs, + temperature=0.0) + vllm_results = vllm_model.model.generate( + example_prompts, sampling_params=vllm_sampling_params) + + # Test whether logprobs are included in the results. + for result in vllm_results: + assert result.prompt_logprobs is not None + assert result.outputs[0].logprobs is not None + assert len(result.outputs[0].logprobs) == max_tokens + for logprobs in result.outputs[0].logprobs: + assert len(logprobs) == num_top_logprobs + output_text = result.outputs[0].text + output_string_from_most_likely_tokens = [] + for top_logprobs in result.outputs[0].logprobs: + top_logprob = next(iter(top_logprobs.values())) + output_string_from_most_likely_tokens.append( + top_logprob.decoded_token) + output_string_from_most_likely_tokens = "".join( + output_string_from_most_likely_tokens) + assert output_text == output_string_from_most_likely_tokens, ( + "The output text from the top logprob for each token position " + "should be the same as the output text in the result.") + + # The first prompt logprob is always None + assert result.prompt_logprobs[0] is None + for prompt_logprobs in result.prompt_logprobs[1:]: + # If the prompt token is not included in the top X + # logprob, it can return 1 more data + assert (len(prompt_logprobs) == num_top_logprobs + or len(prompt_logprobs) == num_top_logprobs + 1) + + # Test whether prompt logprobs are consistent with HF + for vllm_result, hf_logprob in zip(vllm_results, hf_logprobs): + # Check prompt logprobs + # The first prompt logprob is always None, so we compare it from 1:. + vllm_prompt_logprobs = vllm_result.prompt_logprobs[1:] + for i, vllm_prompt_logprob_dict in enumerate(vllm_prompt_logprobs): + for token_id, logprob in vllm_prompt_logprob_dict.items(): + torch.testing.assert_close(logprob.logprob, + hf_logprob[0][i][token_id].item(), + atol=1e-2, + rtol=1e-2) + vllm_sample_logprobs = vllm_result.outputs[0].logprobs + for i, top_logprobs in enumerate(vllm_sample_logprobs): + for token_id, sample_logprob in top_logprobs.items(): + logprob = sample_logprob.logprob + torch.testing.assert_close(logprob, + hf_logprob[i][-1][token_id].item(), + atol=1e-2, + rtol=1e-2) + assert isinstance(sample_logprob.decoded_token, str), ( + "The token should be decoded by the time it is returned " + " to the user.") + + # Test if prompt logprobs are correctly set. + for vllm_result in vllm_results: + token_ids = vllm_result.prompt_token_ids + prompt_logprobs = vllm_result.prompt_logprobs + + # The first token doesn't have logprob. + assert prompt_logprobs[0] is None + + for token_id, logprob_dict in zip(token_ids[1:], prompt_logprobs[1:]): + assert token_id in logprob_dict + + +def test_max_logprobs(): + runner = VllmRunner("facebook/opt-125m", max_logprobs=1) + vllm_sampling_params = SamplingParams(logprobs=1) + # should pass + runner.generate(["Hello world"], sampling_params=vllm_sampling_params) + + bad_sampling_params = SamplingParams(logprobs=2) + with pytest.raises(ValueError): + runner.generate(["Hello world"], sampling_params=bad_sampling_params) diff --git a/tests/samplers/test_ranks.py b/tests/samplers/test_ranks.py new file mode 100644 index 0000000..5e93238 --- /dev/null +++ b/tests/samplers/test_ranks.py @@ -0,0 +1,50 @@ +import pytest + +from vllm import SamplingParams + +MODELS = ["facebook/opt-125m"] + + +@pytest.mark.parametrize("model", MODELS) +@pytest.mark.parametrize("dtype", ["half"]) +def test_ranks( + vllm_runner, + model, + dtype, + example_prompts, +): + max_tokens = 5 + num_top_logprobs = 5 + num_prompt_logprobs = 5 + + vllm_model = vllm_runner(model, dtype=dtype, max_logprobs=num_top_logprobs) + + ## Test greedy logprobs ranks + vllm_sampling_params = SamplingParams(temperature=0.0, + top_p=1.0, + max_tokens=max_tokens, + logprobs=num_top_logprobs, + prompt_logprobs=num_prompt_logprobs) + vllm_results = vllm_model.generate_w_logprobs(example_prompts, + vllm_sampling_params) + for result in vllm_results: + assert result[2] is not None + assert len(result[2]) == len(result[0]) + # check whether all chosen tokens have ranks = 1 + for token, logprobs in zip(result[0], result[2]): + assert token in logprobs + assert logprobs[token].rank == 1 + + ## Test non-greedy logprobs ranks + sampling_params = SamplingParams(temperature=1.0, + top_p=1.0, + max_tokens=max_tokens, + logprobs=num_top_logprobs, + prompt_logprobs=num_prompt_logprobs) + res = vllm_model.generate_w_logprobs(example_prompts, sampling_params) + for result in res: + assert result[2] is not None + assert len(result[2]) == len(result[0]) + # check whether all chosen tokens have ranks + for token, logprobs in zip(result[0], result[2]): + assert logprobs[token].rank >= 1 diff --git a/tests/samplers/test_rejection_sampler.py b/tests/samplers/test_rejection_sampler.py new file mode 100644 index 0000000..13b5b80 --- /dev/null +++ b/tests/samplers/test_rejection_sampler.py @@ -0,0 +1,385 @@ +"""Tests for rejection sampling.""" +from typing import List, Tuple + +import pytest +import torch +import torch.nn.functional as F + +from vllm.model_executor.layers.rejection_sampler import RejectionSampler +from vllm.model_executor.utils import set_random_seed + +CUDA_DEVICES = [ + f"cuda:{i}" for i in range(1 if torch.cuda.device_count() == 1 else 2) +] + + +def mock_causal_accepted_tensor( + k: int, last_accepted_indices: torch.Tensor) -> torch.Tensor: + """Generate an "accepted" tensor which should yield causally-accepted tokens + up to last accepted indices. + + Tokens after last_accepted_indices+1 may also be accepted, although they + will not be causally accepted. + """ + batch_size = last_accepted_indices.shape[0] + + accepted = (torch.arange(k).expand(batch_size, k) <= + last_accepted_indices.unsqueeze(-1).broadcast_to( + batch_size, k)).to(device="cuda") + + # Sprinkle accepted values after the contiguous initial accepted values. + # This replicates the behavior of rejection sampling, which may "accept" + # a token that cannot be accepted because of causality. + sprinkle_candidates = ( + torch.arange(k).expand(batch_size, k) > + last_accepted_indices.unsqueeze(-1).broadcast_to(batch_size, k) + 1) + sprinkle = torch.rand(batch_size, k, device="cuda") > 0.5 + accepted[sprinkle_candidates] = sprinkle[sprinkle_candidates] + return accepted + + +@pytest.mark.parametrize("seed", list(range(10))) +@pytest.mark.parametrize( + "which_tokens_accepted", + ["all_tokens_accepted", "no_tokens_accepted", "some_tokens_accepted"]) +@pytest.mark.parametrize("device", CUDA_DEVICES) +@torch.inference_mode() +def test_correct_output_format(which_tokens_accepted: str, seed: int, + device: str): + """Verify the output has correct format given predetermined accepted matrix. + """ + set_random_seed(seed) + torch.set_default_device(device) + + batch_size = 10 + k = 5 + vocab_size = 3000 + + if which_tokens_accepted == "all_tokens_accepted": + accepted = mock_causal_accepted_tensor( + k, -1 + k * torch.ones((batch_size, ), dtype=torch.long)) + elif which_tokens_accepted == "no_tokens_accepted": + accepted = mock_causal_accepted_tensor( + k, -torch.ones((batch_size, ), dtype=torch.long)) + elif which_tokens_accepted == "some_tokens_accepted": + last_accepted_indices = torch.randint(low=-1, + high=k, + size=(batch_size, )) + accepted = mock_causal_accepted_tensor(k, last_accepted_indices) + else: + raise AssertionError() + + recovered_token_ids = torch.randint(low=0, + high=vocab_size, + size=(batch_size, k), + dtype=torch.int64) + draft_token_ids = torch.randint(low=0, + high=vocab_size, + size=(batch_size, k), + dtype=torch.int64) + bonus_token_ids = torch.randint(low=0, + high=vocab_size, + size=(batch_size, 1), + dtype=torch.int64) + + rejection_sampler = RejectionSampler() + rejection_sampler.init_gpu_tensors(rank=0) + output_token_ids = rejection_sampler._create_output( # pylint: disable=protected-access + accepted, + recovered_token_ids, + draft_token_ids, + bonus_token_ids, + ) + + # Bonus tokens are currently disabled. Verify they're set to -1. + # See https://github.com/vllm-project/vllm/issues/4212 + expected_bonus_token_ids = bonus_token_ids.clone() * 0 - 1 + + if which_tokens_accepted == "all_tokens_accepted": + # Expect all tokens to be equal to draft tokens. + assert torch.equal(output_token_ids[:, :-1], draft_token_ids) + + # Expect all bonus tokens to be included. + assert torch.equal(output_token_ids[:, -1:], expected_bonus_token_ids) + elif which_tokens_accepted == "no_tokens_accepted": + # Expect first token to be equal to recovered tokens. + assert torch.equal(output_token_ids[:, 0], recovered_token_ids[:, 0]) + + # Expect everything else to be -1. + assert torch.equal(output_token_ids[:, 1:], + torch.ones_like(output_token_ids[:, 1:]) * -1) + elif which_tokens_accepted == "some_tokens_accepted": + recovered_plus_bonus = torch.cat( + (recovered_token_ids, expected_bonus_token_ids), dim=-1) + # Assert first rejected token is a recovered token or bonus token. + assert torch.equal( + recovered_plus_bonus[torch.arange(0, batch_size), + last_accepted_indices + 1], + output_token_ids[torch.arange(0, batch_size), + last_accepted_indices + 1]) + + # Assert every subsequent token is -1. + subsequent_mask = torch.arange(0, k + 1).expand( + batch_size, k + 1) >= (last_accepted_indices + 2).unsqueeze(-1) + assert torch.all(output_token_ids[subsequent_mask] == -1) + + +@pytest.mark.parametrize("k", list(range(1, 6))) +@pytest.mark.parametrize("vocab_size", [30_000, 50_000]) +@pytest.mark.parametrize("batch_size", list(range(1, 32))) +@pytest.mark.parametrize("device", CUDA_DEVICES) +@torch.inference_mode() +def test_no_crash_with_varying_dims(k: int, vocab_size: int, batch_size: int, + device: str): + torch.set_default_device(device) + rejection_sampler = RejectionSampler() + rejection_sampler.init_gpu_tensors(rank=0) + + draft_probs = torch.rand(batch_size, k, vocab_size, dtype=torch.float32) + target_probs = torch.rand(batch_size, k, vocab_size, dtype=torch.float32) + bonus_token_ids = torch.randint(low=0, + high=vocab_size, + size=(batch_size, 1), + dtype=torch.int64) + draft_token_ids = torch.randint(low=0, + high=vocab_size, + size=(batch_size, k), + dtype=torch.int64) + + rejection_sampler(target_probs, bonus_token_ids, draft_probs, + draft_token_ids) + + +@pytest.mark.parametrize("above_or_below_vocab_range", ["above", "below"]) +@pytest.mark.parametrize("which_token_ids", + ["bonus_token_ids", "draft_token_ids"]) +@pytest.mark.parametrize("device", CUDA_DEVICES) +@torch.inference_mode() +def test_raises_when_vocab_oob(above_or_below_vocab_range: str, + which_token_ids: str, device: str): + k = 3 + batch_size = 5 + vocab_size = 30_000 + torch.set_default_device(device) + + rejection_sampler = RejectionSampler(strict_mode=True) + rejection_sampler.init_gpu_tensors(rank=0) + + draft_probs = torch.rand(batch_size, k, vocab_size, dtype=torch.float32) + target_probs = torch.rand(batch_size, k, vocab_size, dtype=torch.float32) + bonus_token_ids = torch.randint(low=0, + high=vocab_size, + size=(batch_size, 1), + dtype=torch.int64) + draft_token_ids = torch.randint(low=0, + high=vocab_size, + size=(batch_size, k), + dtype=torch.int64) + + oob_token_ids = None + if which_token_ids == "bonus_token_ids": + oob_token_ids = bonus_token_ids + elif which_token_ids == "draft_token_ids": + oob_token_ids = draft_token_ids + else: + raise AssertionError() + + if above_or_below_vocab_range == "above": + rogue_token_id = vocab_size + 1 + elif above_or_below_vocab_range == "below": + rogue_token_id = -1 + else: + raise AssertionError() + + oob_token_ids[0][0] = rogue_token_id + + with pytest.raises(AssertionError): + rejection_sampler(target_probs, bonus_token_ids, draft_probs, + draft_token_ids) + + +@pytest.mark.parametrize("draft_and_target_probs_equal", [True, False]) +@pytest.mark.parametrize("seed", list(range(5))) +@torch.inference_mode() +def test_rejection_sampling_approximates_target_distribution( + seed: int, draft_and_target_probs_equal: bool): + """Verify rejection sampling approximates target distribution, + despite sampling from a potentially distinct draft distribution. + + This is done by first creating a random target probability + distribution and a random draft probability distribution. We then + sample token ids from the rejection sampler using these draft + and target distributions. The samples are used to estimate + the output probability distribution, which we expect to approximate + the target distribution. + + A basic distance metric is used to determine similarity between + distributions. + + We expect that as we increase the number of samples, + the distance between the observed distribution and the target + distribution decreases. To measure this, we compare the distance + of the observed distribution against both the target distribution + and a uniform random distribution. We expect the distance between + the observed distribution and the target distribution to improve + much more than the distance improvement between the observed + distribution and the random distribution. + + When draft_and_target_probs_equal=True, the draft and target + probabilities are exactly equal. Rejection sampling should + still work without any NaNs or exceptions. + """ + torch.set_default_device("cpu") + set_random_seed(seed) + + helper = _CorrectnessTestHelper( + vocab_size=10, + rejection_sampler=RejectionSampler(), + ) + + draft_probs, target_probs, reference_probs = helper.generate_probs_for_test( + draft_and_target_probs_equal) + + sample_sizes = [10, 100, 1_000, 10_000, 100_000] + distance_wrt_reference = [] + distance_wrt_target = [] + + for num_samples in sample_sizes: + (reference_vs_rejsample_dist, + target_vs_rejsample_dist) = helper.run_and_compare_distributions( + draft_probs, + target_probs, + reference_probs, + num_samples, + ) + + distance_wrt_reference.append(reference_vs_rejsample_dist) + distance_wrt_target.append(target_vs_rejsample_dist) + + relative_change_in_distance_wrt_target = get_ratio_first_to_last( + distance_wrt_target) + relative_change_in_distance_wrt_reference = get_ratio_first_to_last( + distance_wrt_reference) + + print(f"{num_samples=} {target_vs_rejsample_dist=:.05f} " + f"{reference_vs_rejsample_dist=:.05f}") + print(f"{num_samples=} {relative_change_in_distance_wrt_target=:.02f} " + f"{relative_change_in_distance_wrt_reference=:.02f}") + + relative_change_in_distance_wrt_target = get_ratio_first_to_last( + distance_wrt_target) + relative_change_in_distance_wrt_reference = get_ratio_first_to_last( + distance_wrt_reference) + + expected_improvement_multiplier = 20 + assert (relative_change_in_distance_wrt_target > + relative_change_in_distance_wrt_reference * + expected_improvement_multiplier) + + +def get_ratio_first_to_last(elements: List[float]) -> float: + return elements[0] / elements[-1] + + +class _CorrectnessTestHelper: + """Class that packages together logic required for the unit-level + rejection sampling correctness test. + """ + + def __init__(self, vocab_size: int, rejection_sampler: RejectionSampler): + self.rejection_sampler = rejection_sampler + self.vocab_size = vocab_size + self.vocab_range = (0, vocab_size) + + self.rejection_sampler.init_gpu_tensors(rank=0) + + # Keep test simple, use k=1 + self.k = 1 + + # Bonus tokens not used, but rejection sampler requires + # correct shape. + self.num_bonus_tokens = 1 + + def generate_probs_for_test( + self, draft_and_target_probs_equal: bool + ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]: + draft_probs, target_probs = [ + F.softmax( + torch.rand(self.vocab_size, dtype=torch.float32), + dim=-1, + ) for _ in range(2) + ] + + num_reference_probs = 100 + reference_probs = F.softmax( + torch.rand(num_reference_probs, + self.vocab_size, + dtype=torch.float32), + dim=-1, + ) + + if draft_and_target_probs_equal: + target_probs = draft_probs.clone() + + return draft_probs, target_probs, reference_probs + + def run_and_compare_distributions(self, draft_probs: torch.Tensor, + target_probs: torch.Tensor, + reference_probs: torch.Tensor, + num_samples: int) -> Tuple[float, float]: + # Sample using rejection sampling. + rej_sample_probs = self._estimate_rejection_sampling_pdf( + draft_probs, target_probs, num_samples) + + # Average distance from reference probs. + reference_vs_rejsample_dist = torch.dist( + reference_probs, + rej_sample_probs).item() / reference_probs.shape[0] + target_vs_rejsample_dist = torch.dist(target_probs, + rej_sample_probs).item() + + return reference_vs_rejsample_dist, target_vs_rejsample_dist + + def _estimate_rejection_sampling_pdf( + self, + draft_probs: torch.Tensor, + target_probs: torch.Tensor, + num_samples: int, + ) -> torch.Tensor: + # Repeat draft probs num_samples times. + draft_probs = draft_probs.reshape(1, self.k, self.vocab_size).repeat( + num_samples, 1, 1) + + # Repeat target probs num_samples * k times. + # Rejection sampler requires bonus token probs, but they aren't used. + target_probs = target_probs.reshape(1, 1, self.vocab_size).repeat( + num_samples, self.k, 1) + + # Randomly sample draft token ids from draft probs. + draft_token_ids = torch.multinomial(draft_probs[:, 0, :], + num_samples=1, + replacement=True).reshape( + num_samples, self.k) + + # Bonus tokens not used but required. + bonus_token_ids = torch.zeros((1, self.num_bonus_tokens), + dtype=torch.int64, + device="cuda").repeat(num_samples, 1) + + # Get output tokens via rejection sampling. + output_token_ids = self.rejection_sampler(target_probs.to("cuda"), + bonus_token_ids.to("cuda"), + draft_probs.to("cuda"), + draft_token_ids.to("cuda")) + + # Remove bonus tokens + output_token_ids = output_token_ids[:, :-1].flatten() + + # Estimate probability density function + hist = torch.histogram(output_token_ids.to(dtype=torch.float, + device="cpu"), + bins=self.vocab_size, + range=self.vocab_range, + density=True) + + return hist.hist diff --git a/tests/samplers/test_sampler.py b/tests/samplers/test_sampler.py new file mode 100644 index 0000000..e4fea16 --- /dev/null +++ b/tests/samplers/test_sampler.py @@ -0,0 +1,661 @@ +import itertools +import random +from typing import List, Optional, Tuple +from unittest.mock import patch + +import pytest +import torch +from transformers import GenerationConfig, GenerationMixin + +from vllm.model_executor.layers.sampler import Sampler +from vllm.model_executor.sampling_metadata import SamplingMetadata +from vllm.model_executor.utils import set_random_seed +from vllm.sequence import SamplingParams, SequenceData, SequenceGroupMetadata +from vllm.utils import Counter +from vllm.worker.model_runner import ModelRunner + + +class MockLogitsSampler(Sampler): + + def __init__(self, fake_logits: torch.Tensor): + super().__init__() + self.fake_logits = fake_logits + + def forward(self, *args, **kwargs): + return super().forward(*args, **kwargs) + + +def _prepare_test( + batch_size: int +) -> Tuple[torch.Tensor, torch.Tensor, MockLogitsSampler, ModelRunner]: + input_tensor = torch.rand((batch_size, 1024), dtype=torch.float16) + fake_logits = torch.full((batch_size, VOCAB_SIZE), + 1e-2, + dtype=input_tensor.dtype) + sampler = MockLogitsSampler(fake_logits) + model_runner = ModelRunner(model_config=None, + parallel_config=None, + scheduler_config=None, + device_config=None, + load_config=None, + lora_config=None) + return input_tensor, fake_logits, sampler, model_runner + + +VOCAB_SIZE = 32000 +RANDOM_SEEDS = list(range(128)) +CUDA_DEVICES = [ + f"cuda:{i}" for i in range(1 if torch.cuda.device_count() == 1 else 2) +] + + +def _do_sample( + batch_size: int, + input_tensor: torch.Tensor, + sampler: MockLogitsSampler, + model_runner: ModelRunner, + sampling_params: SamplingParams, + device: str, +): + seq_group_metadata_list = [] + seq_lens = [] + for i in range(batch_size): + seq_group_metadata_list.append( + SequenceGroupMetadata( + request_id=f"test_{i}", + is_prompt=True, + seq_data={0: SequenceData([1, 2, 3])}, + sampling_params=sampling_params, + block_tables={0: [1]}, + )) + seq_lens.append(seq_group_metadata_list[-1].seq_data[0].get_len()) + + sampling_metadata = SamplingMetadata.prepare( + seq_group_metadata_list, + seq_lens, + query_lens=seq_lens, + device=device, + pin_memory=model_runner.pin_memory) + return sampler(logits=input_tensor, sampling_metadata=sampling_metadata) + + +@pytest.mark.parametrize("seed", RANDOM_SEEDS) +@pytest.mark.parametrize("device", CUDA_DEVICES) +def test_sampler_all_greedy(seed: int, device: str): + set_random_seed(seed) + torch.set_default_device(device) + batch_size = random.randint(1, 256) + input_tensor, fake_logits, sampler, model_runner = _prepare_test( + batch_size) + + sampling_params = SamplingParams(temperature=0) + sampler_output = _do_sample(batch_size, fake_logits, sampler, model_runner, + sampling_params, device) + expected = torch.argmax(fake_logits, dim=-1) + for i, sequence_output in enumerate(sampler_output): + for nth_output in sequence_output.samples: + assert nth_output.output_token == expected[i].item() + + del model_runner + + +@pytest.mark.parametrize("seed", RANDOM_SEEDS) +@pytest.mark.parametrize("device", CUDA_DEVICES) +def test_sampler_all_random(seed: int, device: str): + set_random_seed(seed) + torch.set_default_device(device) + batch_size = random.randint(1, 256) + input_tensor, fake_logits, sampler, model_runner = _prepare_test( + batch_size) + + for i in range(batch_size): + fake_logits[i, i] = 1e2 + + sampling_params = SamplingParams( + temperature=1.0, + n=random.randint(1, 10), + ) + sampler_output = _do_sample(batch_size, fake_logits, sampler, model_runner, + sampling_params, device) + + for i, sequence_output in enumerate(sampler_output): + for nth_output in sequence_output.samples: + assert nth_output.output_token == i + + del model_runner + + +@pytest.mark.parametrize("seed", RANDOM_SEEDS) +@pytest.mark.parametrize("device", CUDA_DEVICES) +def test_sampler_all_random_seed(seed: int, device: str): + set_random_seed(seed) + torch.set_default_device(device) + batch_size = random.randint(1, 256) + _, fake_logits, sampler, model_runner = _prepare_test(batch_size) + + for i in range(batch_size): + fake_logits[i, i] = 1e2 + + sampling_params = SamplingParams( + temperature=1.0, + n=random.randint(1, 10), + seed=random.randint(0, 10000), + ) + sampler_output = _do_sample(batch_size, fake_logits, sampler, model_runner, + sampling_params, device) + + for i, sequence_output in enumerate(sampler_output): + for nth_output in sequence_output.samples: + assert nth_output.output_token == i + + del model_runner + + +@pytest.mark.parametrize("seed", RANDOM_SEEDS) +@pytest.mark.parametrize("device", CUDA_DEVICES) +def test_sampler_all_random_seed_deterministic(seed: int, device: str): + set_random_seed(seed) + torch.set_default_device(device) + batch_size = random.randint(1, 256) + _, fake_logits, sampler, model_runner = _prepare_test(batch_size) + + sampling_params = SamplingParams( + temperature=1.0, + n=random.randint(1, 10), + seed=random.randint(0, 10000), + ) + first_sampler_output = _do_sample(batch_size, fake_logits, sampler, + model_runner, sampling_params, device) + + second_sampler_output = _do_sample(batch_size, fake_logits, sampler, + model_runner, sampling_params, device) + + assert first_sampler_output == second_sampler_output + + del model_runner + + +@pytest.mark.parametrize("seed", RANDOM_SEEDS) +@pytest.mark.parametrize("device", CUDA_DEVICES) +def test_sampler_all_beam(seed: int, device: str): + set_random_seed(seed) + torch.set_default_device(device) + batch_size = random.randint(1, 256) + _, fake_logits, sampler, model_runner = _prepare_test(batch_size) + + sampling_params = SamplingParams( + temperature=0, + best_of=2, + use_beam_search=True, + ) + _do_sample(batch_size, fake_logits, sampler, model_runner, sampling_params, + device) + # no assertion here as I am not sure how to determine whether + # the outputs are expected - in other words, this just tests + # whether there are no exceptions in the sampler + # when handling an all-beam search case. + del model_runner + + +@pytest.mark.parametrize("seed", RANDOM_SEEDS) +@pytest.mark.parametrize("device", CUDA_DEVICES) +def test_sampler_min_tokens_penalty(seed: int, device: str): + seq_id_counter = Counter(start=random.randint(0, 100)) + set_random_seed(seed) + torch.set_default_device(device) + + def create_sampling_params(min_tokens, + eos_token_id=0, + *, + stop_token_ids: Optional[List[int]] = None, + prompt_logprobs: Optional[int] = None): + sampling_params = SamplingParams( + min_tokens=min_tokens, + max_tokens=9999, # keep higher than max of min_tokens + stop_token_ids=stop_token_ids, + # requesting prompt_logprobs changes the structure of `logits` + prompt_logprobs=prompt_logprobs, + ) + sampling_params.all_stop_token_ids.add(eos_token_id) + return sampling_params + + def create_sequence_data(num_input=3, num_generated=0): + seq_data = SequenceData( + random.choices(range(0, VOCAB_SIZE), k=num_input)) + if num_generated > 0: + seq_data.output_token_ids = random.choices(range(0, VOCAB_SIZE), + k=num_generated) + return seq_data + + def generate_test_case(): + # generate multiple seq groups but limit total batch size + batch_size = random.randint(1, 128) + + expected_penalization = [] + sequence_metadata_list = [] + # 20% chance to generate seq group metadata list with all prompts + is_prompt = random.random() < 0.2 + while batch_size > 0: + num_seqs = 1 if is_prompt else random.randint(1, batch_size) + + eos_token_id = random.randint(0, VOCAB_SIZE - 1) + min_tokens = random.randint(0, 50) + num_stop_tokens = random.randint(0, 8) + if num_stop_tokens > 0: + stop_token_ids = random.choices(range(0, VOCAB_SIZE - 1), + k=num_stop_tokens) + else: + stop_token_ids = None + + sampling_params = create_sampling_params( + min_tokens=min_tokens, + eos_token_id=eos_token_id, + stop_token_ids=stop_token_ids) + + seq_data = {} + seq_group_penalization = [] + for _ in range(num_seqs): + num_input = random.randint(1, 100) + num_generated = 0 if is_prompt else random.randint(1, 100) + seq_data[next(seq_id_counter)] = create_sequence_data( + num_input=num_input, num_generated=num_generated) + seq_group_penalization.append(num_generated < min_tokens) + + expected_penalization.extend(seq_group_penalization) + sequence_metadata_list.append( + SequenceGroupMetadata( + request_id=f"test_{batch_size}", + is_prompt=is_prompt, + seq_data=seq_data, + sampling_params=sampling_params, + block_tables={}, + )) + batch_size -= num_seqs + + return { + "expected_penalization": expected_penalization, + "seq_group_metadata_list": sequence_metadata_list, + } + + # define some explicit test cases for edge case behavior + prompt_without_penalization = { + "expected_penalization": [False], + "seq_group_metadata_list": [ + SequenceGroupMetadata( + request_id="test_1", + is_prompt=True, + seq_data={ + next(seq_id_counter): create_sequence_data(), + }, + sampling_params=create_sampling_params(0), + block_tables={}, + ), + ] + } + + prompt_with_penalization = { + "expected_penalization": [True], + "seq_group_metadata_list": [ + SequenceGroupMetadata( + request_id="test_1", + is_prompt=True, + seq_data={ + next(seq_id_counter): create_sequence_data(), + }, + sampling_params=create_sampling_params(1), + block_tables={}, + ), + ] + } + + prompt_with_penalization_and_prompt_logprobs = { + "expected_penalization": [False, False, True], + "seq_group_metadata_list": [ + SequenceGroupMetadata( + request_id="test_1", + is_prompt=True, + seq_data={ + next(seq_id_counter): create_sequence_data(num_input=3), + }, + sampling_params=create_sampling_params(1, prompt_logprobs=3), + block_tables={}, + ), + ] + } + + stop_penalizing_after_min_tokens = { + "expected_penalization": [False], + "seq_group_metadata_list": [ + SequenceGroupMetadata( + request_id="test_1", + is_prompt=False, + seq_data={ + next(seq_id_counter): + create_sequence_data(num_generated=1), + }, + sampling_params=create_sampling_params(1), + block_tables={}, + ) + ] + } + + stop_token_ids = [42, 99, 42, 0] # intentional duplication + prompt_combination = { + "expected_penalization": [False, True, False], + "seq_group_metadata_list": [ + SequenceGroupMetadata( + request_id="test_2", + is_prompt=True, + seq_data={ + next(seq_id_counter): create_sequence_data(num_input=2), + }, + sampling_params=create_sampling_params(1, prompt_logprobs=3), + block_tables={}, + ), + SequenceGroupMetadata( + request_id="test_3", + is_prompt=True, + seq_data={ + next(seq_id_counter): create_sequence_data(), + }, + sampling_params=create_sampling_params( + 0, stop_token_ids=stop_token_ids), + block_tables={}, + ) + ] + } + + stop_token_ids = [1, 999, 37, 37] # intentional duplication + decode_combination = { + "expected_penalization": [True, False, False, True, False], + "seq_group_metadata_list": [ + SequenceGroupMetadata( + request_id="test_1", + is_prompt=False, + seq_data={ + next(seq_id_counter): + create_sequence_data(num_generated=1), + next(seq_id_counter): + create_sequence_data(num_generated=100), + }, + sampling_params=create_sampling_params( + 2, stop_token_ids=stop_token_ids), + block_tables={}, + ), + SequenceGroupMetadata( + request_id="test_2", + is_prompt=False, + seq_data={ + next(seq_id_counter): + create_sequence_data(num_generated=20), + next(seq_id_counter): + create_sequence_data(num_generated=1), + next(seq_id_counter): + create_sequence_data(num_generated=10), + }, + sampling_params=create_sampling_params( + 10, prompt_logprobs=5, stop_token_ids=stop_token_ids), + block_tables={}, + ), + ] + } + + if seed == 0: + test_cases = [ + prompt_without_penalization, + prompt_with_penalization, + prompt_with_penalization_and_prompt_logprobs, + stop_penalizing_after_min_tokens, + prompt_combination, + decode_combination, + ] + else: + test_cases = [generate_test_case()] + + def run_test_case(*, + expected_penalization=None, + seq_group_metadata_list=None): + assert expected_penalization, \ + "Invalid test case, need expected_penalization" + assert seq_group_metadata_list, \ + "Invalid test case, need seq_group_metadata_list" + + batch_size = 0 + seq_lens = [] + sampling_params_per_row = [] + for sgm in seq_group_metadata_list: + sampling_params = sgm.sampling_params + + num_rows = len(sgm.seq_data) + if sgm.is_prompt: + # a prompt seq_group has only one sequence + seq_data = next(iter(sgm.seq_data.values())) + prompt_len = seq_data.get_prompt_len() + seq_lens.append(prompt_len) + + if sgm.sampling_params.prompt_logprobs: + # with prompt_logprobs each token in the prompt has a row in + # logits + num_rows = prompt_len + + batch_size += num_rows + sampling_params_per_row.extend( + itertools.repeat(sampling_params, num_rows)) + + assert len( + expected_penalization + ) == batch_size, \ + ("Invalid test case, expected_penalization does not match computed" + "batch size") + + _, fake_logits, sampler, model_runner = _prepare_test(batch_size) + sampling_metadata = SamplingMetadata.prepare( + seq_group_metadata_list, + seq_lens=seq_lens if seq_lens else None, + query_lens=seq_lens if seq_lens else None, + device=device, + pin_memory=model_runner.pin_memory) + # the logits tensor is modified in-place by the sampler + _ = sampler(logits=fake_logits, sampling_metadata=sampling_metadata) + + for logits_idx, (should_penalize, sampling_params) in enumerate( + zip(expected_penalization, sampling_params_per_row)): + + tokens_to_check = sampling_params.all_stop_token_ids + + if should_penalize: + for token_id in tokens_to_check: + assert fake_logits[logits_idx, token_id] == -float( + 'inf' + ), f"Expected token {token_id} for logits row {logits_idx}" + " to be penalized" + # no other tokens should be set to -inf + assert torch.count_nonzero( + fake_logits[logits_idx, :] == -float('inf')) == len( + tokens_to_check + ), f"Expected only {len(tokens_to_check)} to be penalized" + else: + # no tokens should be set to -inf + assert torch.count_nonzero( + fake_logits[logits_idx, :] == + -float('inf')) == 0, "No tokens should have been penalized" + + del model_runner + + for test_case in test_cases: + run_test_case(**test_case) + + +@pytest.mark.parametrize("seed", RANDOM_SEEDS) +@pytest.mark.parametrize("device", CUDA_DEVICES) +def test_sampler_mixed(seed: int, device: str): + set_random_seed(seed) + torch.set_default_device(device) + batch_size = random.randint(1, 256) + input_tensor, fake_logits, sampler, model_runner = _prepare_test( + batch_size) + + seq_group_metadata_list = [] + expected_tokens: List[Optional[List[int]]] = [] + seq_lens = [] + for i in range(batch_size): + expected: Optional[List[int]] = None + sampling_type = random.randint(0, 3) + if sampling_type == 0: + sampling_params = SamplingParams(temperature=0) + expected = [torch.argmax(fake_logits[i], dim=-1).item()] + elif sampling_type in (1, 2): + n = random.randint(1, 10) + sampling_params = SamplingParams( + temperature=random.random() + 0.1, + top_p=min(random.random() + 0.1, 1), + top_k=random.randint(0, 10) or -1, + n=n, + presence_penalty=random.randint(0, 1), + ) + if sampling_type == 2: + sampling_params.seed = random.randint(0, 10000) + else: + for idx in range(n): + fake_logits[i, i + idx] = 1e2 + expected = list(range(i, i + n)) + else: + sampling_params = SamplingParams(temperature=0, + use_beam_search=True, + best_of=2) + expected_tokens.append(expected) + seq_group_metadata_list.append( + SequenceGroupMetadata( + request_id=f"test_{i}", + is_prompt=True, + seq_data={0: SequenceData([1, 2, 3])}, + sampling_params=sampling_params, + block_tables={0: [1]}, + )) + seq_lens.append(seq_group_metadata_list[-1].seq_data[0].get_len()) + + def test_sampling(model_runner: ModelRunner): + sampling_metadata = SamplingMetadata.prepare( + seq_group_metadata_list, + seq_lens, + query_lens=seq_lens, + device=device, + pin_memory=model_runner.pin_memory) + sampler_output = sampler(logits=fake_logits, + sampling_metadata=sampling_metadata) + + for i, (sequence_output, metadata) in enumerate( + zip(sampler_output, seq_group_metadata_list)): + if metadata.sampling_params.use_beam_search: + continue + + if (metadata.sampling_params.seed is not None + and expected_tokens[i] is None): + # Record seeded random result to compare with results of + # second invocation + expected_tokens[i] = [ + nth_output.output_token + for nth_output in sequence_output.samples + ] + continue + + for n, nth_output in enumerate(sequence_output.samples): + if (metadata.sampling_params.temperature == 0 + or metadata.sampling_params.seed is not None): + # Ensure exact matches for greedy or random with seed + assert nth_output.output_token == expected_tokens[i][n] + else: + # For non-seeded random check that one of the high-logit + # tokens were chosen + assert nth_output.output_token in expected_tokens[i] + + # Test batch + test_sampling(model_runner) + + # Shuffle the batch and resample + target_index = list(range(batch_size)) + for list_to_shuffle in (target_index, seq_group_metadata_list, + expected_tokens, seq_lens): + random.Random(seed).shuffle(list_to_shuffle) + target_index = torch.tensor(target_index) + input_tensor.data = input_tensor.index_select(0, target_index) + fake_logits.data = fake_logits.index_select(0, target_index) + + # This time, results of seeded random samples will be compared with + # the corresponding sample in the pre-shuffled batch + test_sampling(model_runner) + + del model_runner + + +@pytest.mark.parametrize("seed", RANDOM_SEEDS) +@pytest.mark.parametrize("device", CUDA_DEVICES) +def test_sampler_top_k_top_p(seed: int, device: str): + set_random_seed(seed) + batch_size = random.randint(1, 256) + top_k = random.randint(100, 500) + top_p = random.random() * 0.1 + vocab_size = 32000 + input_tensor = torch.rand((batch_size, 1024), + device=device, + dtype=torch.float16) + fake_logits = torch.normal(0, + 5, + size=(batch_size, vocab_size), + device=input_tensor.device, + dtype=input_tensor.dtype) + sampler = MockLogitsSampler(fake_logits) + model_runner = ModelRunner(model_config=None, + parallel_config=None, + scheduler_config=None, + device_config=None, + load_config=None, + lora_config=None) + + generation_model = GenerationMixin() + generation_config = GenerationConfig(top_k=top_k, + top_p=top_p, + do_sample=True) + warpers = generation_model._get_logits_warper(generation_config) + assert len(warpers) == 2 # top_p and top_k + + seq_group_metadata_list = [] + seq_lens = [] + for i in range(batch_size): + seq_group_metadata_list.append( + SequenceGroupMetadata( + request_id=f"test_{i}", + is_prompt=True, + seq_data={0: SequenceData([1, 2, 3])}, + sampling_params=SamplingParams( + temperature=1, + top_k=top_k, + top_p=top_p, + ), + block_tables={0: [1]}, + )) + seq_lens.append(seq_group_metadata_list[-1].seq_data[0].get_len()) + + sampling_metadata = SamplingMetadata.prepare( + seq_group_metadata_list, + seq_lens, + query_lens=seq_lens, + device=device, + pin_memory=model_runner.pin_memory) + + sample_probs = None + + def mock_sample(probs, *args, **kwargs): + nonlocal sample_probs + sample_probs = probs + return ([[prob.topk(1, dim=-1).indices.tolist(), [0]] + for prob in probs], None) + + with patch("vllm.model_executor.layers.sampler._sample", mock_sample): + sampler(logits=fake_logits, sampling_metadata=sampling_metadata) + hf_probs = warpers(torch.zeros_like(fake_logits), fake_logits.clone()) + hf_probs = torch.softmax(hf_probs, dim=-1, dtype=torch.float) + assert torch.allclose(hf_probs, sample_probs, atol=1e-5) + assert torch.equal(hf_probs.eq(0), sample_probs.eq(0)) + + del model_runner diff --git a/tests/samplers/test_seeded_generate.py b/tests/samplers/test_seeded_generate.py new file mode 100644 index 0000000..3cd659c --- /dev/null +++ b/tests/samplers/test_seeded_generate.py @@ -0,0 +1,82 @@ +"""Verify that seeded random sampling is deterministic. + +Run `pytest tests/samplers/test_seeded_generate.py`. +""" +import copy +import random +from itertools import combinations + +import pytest + +from vllm import SamplingParams +from vllm.model_executor.utils import set_random_seed + +MODEL = "facebook/opt-125m" +RANDOM_SEEDS = list(range(5)) + + +@pytest.fixture +def vllm_model(vllm_runner): + vllm_model = vllm_runner(MODEL, dtype="half") + yield vllm_model + del vllm_model + + +@pytest.mark.parametrize("seed", RANDOM_SEEDS) +def test_random_sample_with_seed( + vllm_model, + example_prompts, + seed: int, +) -> None: + set_random_seed(seed) + + sampling_params = SamplingParams( + # Parameters to ensure sufficient randomness + temperature=2.0, + top_p=min(random.random() + 0.3, 1), + top_k=random.randint(5, 20), + n=random.randint(1, 10), + presence_penalty=random.randint(0, 1), + max_tokens=8, + ignore_eos=True, + ) + + sampling_params_seed_1 = copy.deepcopy(sampling_params) + sampling_params_seed_1.seed = 100 + sampling_params_seed_2 = copy.deepcopy(sampling_params) + sampling_params_seed_2.seed = 200 + + llm = vllm_model.model + + for prompt in example_prompts: + for params in ( + sampling_params, + sampling_params_seed_1, + sampling_params_seed_2, + sampling_params, + sampling_params_seed_1, + sampling_params_seed_2, + ): + llm._add_request( + prompt=prompt, + prompt_token_ids=None, + sampling_params=params, + ) + + results = llm._run_engine(use_tqdm=False) + all_outputs = [[out.token_ids for out in output.outputs] + for output in results] + + for i in range(0, len(example_prompts), 6): + outputs = all_outputs[i:i + 6] + + # verify all non-seeded requests differ + for output_a, output_b in combinations( + (outputs[0], outputs[1], outputs[2], outputs[3]), + 2, + ): + assert output_a != output_b + + # verify requests with the same seed match + assert outputs[1] == outputs[4] + assert outputs[2] == outputs[5] diff --git a/tests/spec_decode/__init__.py b/tests/spec_decode/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/tests/spec_decode/e2e/__init__.py b/tests/spec_decode/e2e/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/tests/spec_decode/e2e/conftest.py b/tests/spec_decode/e2e/conftest.py new file mode 100644 index 0000000..b1ab8a0 --- /dev/null +++ b/tests/spec_decode/e2e/conftest.py @@ -0,0 +1,305 @@ +import asyncio +import time +from itertools import cycle +from typing import Dict, List, Optional, Tuple, Union + +import pytest +import ray +import torch +from pynvml import (nvmlDeviceGetHandleByIndex, nvmlDeviceGetMemoryInfo, + nvmlInit) + +from tests.conftest import cleanup +from vllm import LLM +from vllm.engine.arg_utils import AsyncEngineArgs +from vllm.engine.async_llm_engine import AsyncLLMEngine +from vllm.lora.request import LoRARequest +from vllm.model_executor.utils import set_random_seed +from vllm.outputs import RequestOutput +from vllm.sampling_params import SamplingParams +from vllm.sequence import Logprob, MultiModalData +from vllm.usage.usage_lib import UsageContext +from vllm.utils import Counter, random_uuid + + +class AsyncLLM: + """AsyncLLM + + Note: Current LLM class in vllm don't support async mode, for test purpose, + we implement async one in here. Maybe we could move to + vllm/entrypoints/llm.py in future. + + Below AsyncLLM is directly borrow from vllm/entrypoints/llm.py with changes + to make to work in async mode. + """ + + def __init__( + self, + model: str, + tokenizer: Optional[str] = None, + tokenizer_mode: str = "auto", + skip_tokenizer_init: bool = False, + trust_remote_code: bool = False, + tensor_parallel_size: int = 1, + dtype: str = "auto", + quantization: Optional[str] = None, + revision: Optional[str] = None, + tokenizer_revision: Optional[str] = None, + seed: int = 0, + gpu_memory_utilization: float = 0.9, + swap_space: int = 4, + enforce_eager: bool = False, + max_seq_len_to_capture: int = 8192, + disable_custom_all_reduce: bool = False, + **kwargs, + ) -> None: + if "disable_log_stats" not in kwargs: + kwargs["disable_log_stats"] = True + self.engine_args = AsyncEngineArgs( + model=model, + tokenizer=tokenizer, + tokenizer_mode=tokenizer_mode, + skip_tokenizer_init=skip_tokenizer_init, + trust_remote_code=trust_remote_code, + tensor_parallel_size=tensor_parallel_size, + dtype=dtype, + quantization=quantization, + revision=revision, + tokenizer_revision=tokenizer_revision, + seed=seed, + gpu_memory_utilization=gpu_memory_utilization, + swap_space=swap_space, + enforce_eager=enforce_eager, + max_seq_len_to_capture=max_seq_len_to_capture, + engine_use_ray=True, + disable_custom_all_reduce=disable_custom_all_reduce, + **kwargs, + ) + self.request_counter = Counter() + + def generate( + self, + prompts: Optional[Union[str, List[str]]] = None, + sampling_params: Optional[Union[SamplingParams, + List[SamplingParams]]] = None, + prompt_token_ids: Optional[List[List[int]]] = None, + use_tqdm: bool = True, + lora_request: Optional[LoRARequest] = None, + multi_modal_data: Optional[MultiModalData] = None, + ) -> List[RequestOutput]: + + llm_engine = AsyncLLMEngine.from_engine_args( + self.engine_args, usage_context=UsageContext.LLM_CLASS) + + if prompts is None: + raise ValueError("prompts must be provided.") + if isinstance(prompts, str): + # Convert a single prompt to a list. + prompts = [prompts] + + if prompts is not None: + num_requests = len(prompts) + + if sampling_params is None: + # Use default sampling params. + sampling_params = SamplingParams() + + elif isinstance(sampling_params, + list) and len(sampling_params) != num_requests: + raise ValueError("The lengths of prompts and " + "sampling_params must be the same.") + + async def get_output(prompt, sampling_param) -> str: + request_id = random_uuid() + results_generator = llm_engine.generate(prompt, sampling_param, + request_id) + final_output = None + async for request_output in results_generator: + final_output = request_output + return final_output + + outputs = [] + try: + for i in range(num_requests): + prompt = prompts[i] if prompts is not None else None + res = asyncio.run(get_output(prompt, sampling_params)) + outputs.append(res) + finally: + ray.shutdown() + return outputs + + +@pytest.fixture +def baseline_llm_generator(request, common_llm_kwargs, + per_test_common_llm_kwargs, baseline_llm_kwargs, + seed): + return create_llm_generator("baseline", request, common_llm_kwargs, + per_test_common_llm_kwargs, + baseline_llm_kwargs, seed) + + +@pytest.fixture +def test_llm_generator(request, common_llm_kwargs, per_test_common_llm_kwargs, + test_llm_kwargs, seed): + return create_llm_generator("test", request, common_llm_kwargs, + per_test_common_llm_kwargs, test_llm_kwargs, + seed) + + +def create_llm_generator(baseline_or_test, request, common_llm_kwargs, + per_test_common_llm_kwargs, distinct_llm_kwargs, + seed): + kwargs = { + **common_llm_kwargs, + **per_test_common_llm_kwargs, + **distinct_llm_kwargs, + } + test_name = request.node.name + + def generator_inner(): + + wait_for_gpu_memory_to_clear( + devices=list(range(torch.cuda.device_count())), + threshold_bytes=2 * 2**30, + timeout_s=60, + ) + + use_async = False + if "use_async" in kwargs: + use_async = kwargs.pop("use_async") + print(f'{use_async=}') + + print(f'Creating {baseline_or_test=} LLM for {test_name=}. {kwargs=}') + llm = AsyncLLM(**kwargs) if use_async else LLM(**kwargs) + set_random_seed(seed) + + yield llm + del llm + cleanup() + + def generator_outer(): + for llm in generator_inner(): + yield llm + del llm + + return generator_outer + + +def get_output_from_llm_generator( + llm_generator, prompts, + sampling_params) -> Tuple[List[str], List[List[int]]]: + tokens = [] + token_ids = [] + for llm in llm_generator(): + outputs = llm.generate(prompts, sampling_params, use_tqdm=True) + token_ids = [output.outputs[0].token_ids for output in outputs] + tokens = [output.outputs[0].text for output in outputs] + del llm + + return tokens, token_ids + + +def get_logprobs_from_llm_generator( + llm_generator, prompts, + sampling_params) -> List[List[Dict[int, Logprob]]]: + """Returns a dict of (token_id: Logprob) for each generated position, for + each sequence in the batch. + """ + for llm in llm_generator(): + outputs = llm.generate(prompts, sampling_params, use_tqdm=True) + logprobs = [output.outputs[0].logprobs[:] for output in outputs] + del llm + + return logprobs + + +def run_greedy_equality_correctness_test(baseline_llm_generator, + test_llm_generator, + batch_size, + max_output_len, + force_output_len: bool, + print_tokens: bool = False): + """Helper method that compares the outputs of both the baseline LLM and + the test LLM. It asserts greedy equality, e.g. that the outputs are exactly + the same when temperature is zero. + """ + temperature = 0.0 + + prompts = [ + "Hello, my name is", + "The president of the United States is", + "The capital of France is", + "The future of AI is", + "San Francisco is know for its", + "Facebook was created in 2004 by", + "Curious George is a", + "Python 3.11 brings improvements to its", + ] + + prompts = [prompt for prompt, _ in zip(cycle(prompts), range(batch_size))] + + # If the test requires that we generated max_output_len tokens, then set the + # sampling params to ignore eos token. + ignore_eos = force_output_len + + sampling_params = SamplingParams( + max_tokens=max_output_len, + ignore_eos=ignore_eos, + temperature=temperature, + ) + + spec_batch_tokens, spec_batch_token_ids = get_output_from_llm_generator( + test_llm_generator, prompts, sampling_params) + + (baseline_batch_tokens, + baseline_batch_token_ids) = get_output_from_llm_generator( + baseline_llm_generator, prompts, sampling_params) + + assert len(baseline_batch_token_ids) == len(prompts) + assert len(spec_batch_token_ids) == len(prompts) + + for i, (baseline_token_ids, baseline_tokens, spec_token_ids, + spec_tokens) in enumerate( + zip(baseline_batch_token_ids, baseline_batch_tokens, + spec_batch_token_ids, spec_batch_tokens)): + if print_tokens: + print(f'{i=} {baseline_tokens=}') + print(f'{i=} {spec_tokens=}') + print(f'{i=} {baseline_token_ids=}') + print(f'{i=} {spec_token_ids=}') + assert baseline_token_ids == spec_token_ids + + +def wait_for_gpu_memory_to_clear(devices: List[int], + threshold_bytes: int, + timeout_s: float = 120) -> None: + # Use nvml instead of pytorch to reduce measurement error from torch cuda + # context. + nvmlInit() + start_time = time.time() + while True: + output = {} + output_raw = {} + for device in devices: + dev_handle = nvmlDeviceGetHandleByIndex(device) + mem_info = nvmlDeviceGetMemoryInfo(dev_handle) + gb_used = mem_info.used / 2**30 + output_raw[device] = gb_used + output[device] = f'{gb_used:.02f}' + + print('gpu memory used (GB): ', end='') + for k, v in output.items(): + print(f'{k}={v}; ', end='') + print('') + + dur_s = time.time() - start_time + if all(v <= (threshold_bytes / 2**30) for v in output_raw.values()): + print(f'Done waiting for free GPU memory on devices {devices=} ' + f'({threshold_bytes/2**30=}) {dur_s=:.02f}') + break + + if dur_s >= timeout_s: + raise ValueError(f'Memory of devices {devices=} not free after ' + f'{dur_s=:.02f} ({threshold_bytes/2**30=})') + + time.sleep(5) diff --git a/tests/spec_decode/e2e/test_compatibility.py b/tests/spec_decode/e2e/test_compatibility.py new file mode 100644 index 0000000..60c20ed --- /dev/null +++ b/tests/spec_decode/e2e/test_compatibility.py @@ -0,0 +1,176 @@ +import pytest + +from vllm import SamplingParams + +from .conftest import get_output_from_llm_generator + + +@pytest.mark.parametrize( + "common_llm_kwargs", + [{ + "model": "JackFram/llama-68m", + "speculative_model": "JackFram/llama-68m", + "num_speculative_tokens": 5, + + # Required for spec decode. + "use_v2_block_manager": True + }]) +@pytest.mark.parametrize( + "per_test_common_llm_kwargs", + [ + { + # Expect failure as spec decode not supported by + # Ray backend. + "worker_use_ray": True, + }, + ]) +@pytest.mark.parametrize("test_llm_kwargs", [{}]) +@pytest.mark.parametrize("seed", [1]) +def test_spec_decode_xfail_ray(test_llm_generator): + """Verify that speculative decoding with Ray fails. + """ + output_len = 128 + temperature = 0.0 + + prompts = [ + "Hello, my name is", + ] + + sampling_params = SamplingParams( + max_tokens=output_len, + ignore_eos=True, + temperature=temperature, + ) + + try: + with pytest.raises( + AssertionError, + match="Speculative decoding not yet supported for "): + get_output_from_llm_generator(test_llm_generator, prompts, + sampling_params) + finally: + # we need to free up ray resource, + # so that latter test could use the gpu we allocated here + import ray + ray.shutdown() + + +@pytest.mark.parametrize( + "common_llm_kwargs", + [{ + "model": "JackFram/llama-68m", + "speculative_model": "JackFram/llama-68m", + "num_speculative_tokens": 5, + + # Required for spec decode. + "use_v2_block_manager": True + }]) +@pytest.mark.parametrize("per_test_common_llm_kwargs", [ + { + "enable_chunked_prefill": True, + }, +]) +@pytest.mark.parametrize("test_llm_kwargs", [{}]) +@pytest.mark.parametrize("seed", [1]) +def test_spec_decode_xfail_chunked_prefill(test_llm_generator): + """Verify that speculative decoding with chunked prefill fails. + """ + output_len = 128 + temperature = 0.0 + + prompts = [ + "Hello, my name is", + ] + + sampling_params = SamplingParams( + max_tokens=output_len, + ignore_eos=True, + temperature=temperature, + ) + + with pytest.raises(ValueError, + match="Speculative decoding and chunked prefill"): + get_output_from_llm_generator(test_llm_generator, prompts, + sampling_params) + + +@pytest.mark.parametrize( + "common_llm_kwargs", + [{ + "model": "meta-llama/Llama-2-7b-chat-hf", + "speculative_model": "JackFram/llama-68m", + "num_speculative_tokens": 5, + + # Required for spec decode. + "use_v2_block_manager": True + }]) +@pytest.mark.parametrize( + "per_test_common_llm_kwargs", + [ + { + # Speculative max model len > overridden max model len should raise. + "max_model_len": 128, + "speculative_max_model_len": 129, + }, + { + # Speculative max model len > draft max model len should raise. + # https://huggingface.co/JackFram/llama-68m/blob/3b606af5198a0b26762d589a3ee3d26ee6fa6c85/config.json#L12 + "speculative_max_model_len": 2048 + 1, + }, + { + # Speculative max model len > target max model len should raise. + # https://huggingface.co/meta-llama/Llama-2-7b-chat-hf/blob/f5db02db724555f92da89c216ac04704f23d4590/config.json#L12 + "speculative_max_model_len": 4096 + 1, + }, + ]) +@pytest.mark.parametrize("test_llm_kwargs", [{}]) +@pytest.mark.parametrize("seed", [1]) +def test_spec_decode_xfail_spec_max_model_len(test_llm_generator): + """Verify that speculative decoding validates speculative_max_model_len. + """ + output_len = 128 + temperature = 0.0 + + prompts = [ + "Hello, my name is", + ] + + sampling_params = SamplingParams( + max_tokens=output_len, + ignore_eos=True, + temperature=temperature, + ) + + with pytest.raises(ValueError, match="cannot be larger than"): + get_output_from_llm_generator(test_llm_generator, prompts, + sampling_params) + + +@pytest.mark.parametrize("common_llm_kwargs", [{ + "model": "JackFram/llama-68m", + "speculative_model": "JackFram/llama-68m", + "num_speculative_tokens": 5, +}]) +@pytest.mark.parametrize("per_test_common_llm_kwargs", [{}]) +@pytest.mark.parametrize("test_llm_kwargs", [{}]) +@pytest.mark.parametrize("seed", [1]) +def test_spec_decode_xfail_block_manager_v1(test_llm_generator): + """Verify that speculative decoding with block manager v1 fails. + """ + output_len = 128 + temperature = 0.0 + + prompts = [ + "Hello, my name is", + ] + + sampling_params = SamplingParams( + max_tokens=output_len, + ignore_eos=True, + temperature=temperature, + ) + + with pytest.raises(ValueError, + match="Speculative decoding requires usage of the V2"): + get_output_from_llm_generator(test_llm_generator, prompts, + sampling_params) diff --git a/tests/spec_decode/e2e/test_logprobs.py b/tests/spec_decode/e2e/test_logprobs.py new file mode 100644 index 0000000..9572aac --- /dev/null +++ b/tests/spec_decode/e2e/test_logprobs.py @@ -0,0 +1,335 @@ +import math +from itertools import cycle + +import pytest + +from vllm import SamplingParams + +from .conftest import get_logprobs_from_llm_generator + + +@pytest.mark.parametrize( + "common_llm_kwargs", + [{ + "model": "JackFram/llama-68m", + + # Skip cuda graph recording for fast test. + "enforce_eager": True, + + # Required for spec decode. + "use_v2_block_manager": True, + "max_logprobs": 6, + }]) +@pytest.mark.parametrize("per_test_common_llm_kwargs", [{}]) +@pytest.mark.parametrize("baseline_llm_kwargs", [{}]) +@pytest.mark.parametrize("test_llm_kwargs", [{ + "speculative_model": "JackFram/llama-160m", + "num_speculative_tokens": 3, +}]) +@pytest.mark.parametrize("batch_size", [8]) +@pytest.mark.parametrize( + "output_len", + [ + # Use smaller output len for fast test. + 7, + ]) +@pytest.mark.parametrize("seed", [1]) +def test_logprobs_equality(baseline_llm_generator, test_llm_generator, + batch_size: int, output_len: int): + """Verify output logprobs are equal with and without speculative decoding. + """ + run_greedy_logprobs_correctness_test(baseline_llm_generator, + test_llm_generator, + batch_size, + max_output_len=output_len, + force_output_len=True) + + +@pytest.mark.parametrize( + "common_llm_kwargs", + [{ + "model": "JackFram/llama-68m", + + # Skip cuda graph recording for fast test. + "enforce_eager": True, + + # Required for spec decode. + "use_v2_block_manager": True, + "max_logprobs": 6, + }]) +@pytest.mark.parametrize("per_test_common_llm_kwargs", [{}]) +@pytest.mark.parametrize("baseline_llm_kwargs", [{}]) +@pytest.mark.parametrize("test_llm_kwargs", [{ + "speculative_model": "JackFram/llama-160m", + "num_speculative_tokens": 3, +}]) +@pytest.mark.parametrize("batch_size", [1]) +@pytest.mark.parametrize("num_logprobs", [6]) +@pytest.mark.parametrize( + "output_len", + [ + # Use smaller output len for fast test. + 7, + ]) +@pytest.mark.parametrize("seed", [1]) +def test_diff_num_logprobs(baseline_llm_generator, test_llm_generator, + batch_size: int, output_len: int, + num_logprobs: int): + """Verify output logprobs are equal with and without spec decode. + This specifies a number of logprobs >1. + """ + run_greedy_logprobs_correctness_test(baseline_llm_generator, + test_llm_generator, + batch_size, + max_output_len=output_len, + force_output_len=True, + logprob_rank=num_logprobs) + + +@pytest.mark.parametrize( + "common_llm_kwargs", + [{ + "model": "JackFram/llama-68m", + + # Skip cuda graph recording for fast test. + "enforce_eager": True, + + # Required for spec decode. + "use_v2_block_manager": True + }]) +@pytest.mark.parametrize("per_test_common_llm_kwargs", [{}]) +@pytest.mark.parametrize("baseline_llm_kwargs", [{}]) +@pytest.mark.parametrize("test_llm_kwargs", [{ + "speculative_model": "JackFram/llama-160m", + "num_speculative_tokens": 3, +}, { + "speculative_model": "JackFram/llama-160m", + "num_speculative_tokens": 6, +}]) +@pytest.mark.parametrize("batch_size", [8]) +@pytest.mark.parametrize( + "output_len", + [ + # Use smaller output len for fast test. + 32, + ]) +@pytest.mark.parametrize("seed", [1]) +def test_logprobs_different_k(baseline_llm_generator, test_llm_generator, + batch_size: int, output_len: int): + """Veriy logprob greedy equality with different speculation lens. + """ + run_greedy_logprobs_correctness_test(baseline_llm_generator, + test_llm_generator, + batch_size, + max_output_len=output_len, + force_output_len=True) + + +@pytest.mark.parametrize( + "common_llm_kwargs", + [{ + "model": "JackFram/llama-68m", + + # Skip cuda graph recording for fast test. + "enforce_eager": True, + + # Required for spec decode. + "use_v2_block_manager": True + }]) +@pytest.mark.parametrize("per_test_common_llm_kwargs", [{}]) +@pytest.mark.parametrize("baseline_llm_kwargs", [{}]) +@pytest.mark.parametrize( + "test_llm_kwargs", + [{ + "speculative_model": "JackFram/llama-160m", + "num_speculative_tokens": 3, + + # Artificially limit the draft model max model len; this forces vLLM + # to skip speculation once the sequences grow beyond 32-k tokens. + "speculative_max_model_len": 32, + }]) +@pytest.mark.parametrize("batch_size", [8]) +@pytest.mark.parametrize( + "output_len", + [ + # Use smaller output len for fast test. + 32, + ]) +@pytest.mark.parametrize("seed", [1]) +def test_logprobs_when_skip_speculation(baseline_llm_generator, + test_llm_generator, batch_size: int, + output_len: int): + """Verify logprobs greedy equality when some sequences skip speculation. + """ + run_greedy_logprobs_correctness_test(baseline_llm_generator, + test_llm_generator, + batch_size, + max_output_len=output_len, + force_output_len=True) + + +@pytest.mark.parametrize( + "common_llm_kwargs", + [{ + "model": "JackFram/llama-68m", + + # Skip cuda graph recording for fast test. + "enforce_eager": True, + + # Required for spec decode. + "use_v2_block_manager": True + }]) +@pytest.mark.parametrize("per_test_common_llm_kwargs", [{}]) +@pytest.mark.parametrize("baseline_llm_kwargs", [{}]) +@pytest.mark.parametrize("test_llm_kwargs", [{ + "speculative_model": "JackFram/llama-160m", + "num_speculative_tokens": 3, +}]) +@pytest.mark.parametrize("batch_size", [1]) +@pytest.mark.parametrize( + "output_len", + [ + # Use smaller output len for fast test. + 32, + ]) +@pytest.mark.parametrize("seed", [1]) +def test_logprobs_temp_1(baseline_llm_generator, test_llm_generator, + batch_size: int, output_len: int): + """Verify at least one logprob result has num_logprobs+1, which tests the + case where the sampled token is not in top-k logprobs. + + Ideally, this test should validate equality with non-spec by getting + logprobs. This is left as future improvement. + """ + batch_size = 8 + max_output_len = output_len + force_output_len = True + logprob_rank = 5 + + temperature = 1.0 + + prompts = [ + "Hello, my name is", + "The president of the United States is", + "The capital of France is", + "The future of AI is", + "San Francisco is know for its", + "Facebook was created in 2004 by", + "Curious George is a", + "Python 3.11 brings improvements to its", + ] + + prompts = [prompt for prompt, _ in zip(cycle(prompts), range(batch_size))] + + # If the test requires that we generated max_output_len tokens, then set the + # sampling params to ignore eos token. + ignore_eos = force_output_len + + sampling_params = SamplingParams( + max_tokens=max_output_len, + ignore_eos=ignore_eos, + temperature=temperature, + logprobs=logprob_rank, + ) + + spec_batch_logprobs = get_logprobs_from_llm_generator( + test_llm_generator, prompts, sampling_params) + + num_returned_logprobs = [ + len(logprob_dict) for seq_logprobs in spec_batch_logprobs + for logprob_dict in seq_logprobs + ] + + # Assert one of the returned logprobs has > num_logprobs (indicating the + # sampled token is not in top-k). + assert any([ + num_returned > logprob_rank for num_returned in num_returned_logprobs + ]) + + +def run_greedy_logprobs_correctness_test(baseline_llm_generator, + test_llm_generator, + batch_size, + max_output_len, + force_output_len: bool, + logprob_rank: int = 1): + """Helper method that compares the logprobs outputs of both the baseline LLM + and the test LLM. It asserts greedy equality of the logprobs when the + temperature is zero. + """ + temperature = 0.0 + + prompts = [ + "Hello, my name is", + "The president of the United States is", + "The capital of France is", + "The future of AI is", + "San Francisco is know for its", + "Facebook was created in 2004 by", + "Curious George is a", + "Python 3.11 brings improvements to its", + ] + + prompts = [prompt for prompt, _ in zip(cycle(prompts), range(batch_size))] + + # If the test requires that we generated max_output_len tokens, then set the + # sampling params to ignore eos token. + ignore_eos = force_output_len + + sampling_params = SamplingParams( + max_tokens=max_output_len, + ignore_eos=ignore_eos, + temperature=temperature, + logprobs=logprob_rank, + ) + + spec_batch_logprobs = get_logprobs_from_llm_generator( + test_llm_generator, prompts, sampling_params) + baseline_batch_logprobs = get_logprobs_from_llm_generator( + baseline_llm_generator, prompts, sampling_params) + + assert len(baseline_batch_logprobs) == len(prompts) + assert len(spec_batch_logprobs) == len(prompts) + + # For each sequence in the batch. + for i, (baseline_logprobs, spec_logprobs) in enumerate( + zip(baseline_batch_logprobs, spec_batch_logprobs)): + assert len(spec_logprobs) == len(baseline_logprobs) + + # For each generated position of the sequence. + for pos, (spec_pos_logprobs, baseline_pos_logprobs) in enumerate( + zip(spec_logprobs, baseline_logprobs)): + + # Map rank to token/logprob in spec output. + spec_rank_to_token_id = { + value.rank: key + for key, value in spec_pos_logprobs.items() + } + spec_rank_to_logprob = { + value.rank: value.logprob + for key, value in spec_pos_logprobs.items() + } + + # Map rank to token/logprob in baseline output. + baseline_rank_to_token_id = { + value.rank: key + for key, value in baseline_pos_logprobs.items() + } + baseline_rank_to_logprob = { + value.rank: value.logprob + for key, value in baseline_pos_logprobs.items() + } + + # Assert set of ranks returned is equal. + assert set(spec_rank_to_token_id.keys()) == set( + baseline_rank_to_token_id.keys()) + + # Assert each logprob/token id is correct, keyed by rank. + for rank in sorted(set(spec_rank_to_token_id.keys())): + assert spec_rank_to_token_id[ + rank] == baseline_rank_to_token_id[rank], f"{rank}" + assert math.isclose( + a=spec_rank_to_logprob[rank], + b=baseline_rank_to_logprob[rank], + abs_tol=1e-1, + ) diff --git a/tests/spec_decode/e2e/test_multistep_correctness.py b/tests/spec_decode/e2e/test_multistep_correctness.py new file mode 100644 index 0000000..f15fcc4 --- /dev/null +++ b/tests/spec_decode/e2e/test_multistep_correctness.py @@ -0,0 +1,579 @@ +"""The tests in this file verify end-to-end speculative decoding correctness. + +This docstring details important information on the testing methodology. + +Most of the tests rely on "greedy equality", where we expect the output of +speculative decoding on a sequence to exactly match the output of normal non- +speculative decoding. + +Since speculative decoding with rejection sampling guarantees that the output +distribution matches the target model's output distribution (up to hardware +numerics, see https://arxiv.org/pdf/2302.01318.pdf), we can expect greedy +equality. This gives us good coverage of temp=0. + +For temp>0, we rely on unit tests on the rejection sampler to verify that the +output distribution is the same with spec decode vs. no spec decode (this would +be prohibitively expensive to run with a real model). + +NOTE: Speculative decoding's distribution equality requires that the measured +distributions of the target model and proposal model be deterministic given the +same input. vLLM largely guarantees this. + +@cadedaniel has seen cases where the output probabilities of a draft/target +model change slightly with certain batch sizes or prompts, even with Torch +determinism flags set. It is unclear if this is a bug in vLLM, due to non- +determinism in on-device batched operations, a bug in vLLM's spec decode +implementation, or the "hardware numerics" limitations. Either way, rejection +sampling ensures the output distribution matches the target model, but it breaks +greedy-equality tests for those batch sizes/prompts. +""" + +from itertools import cycle + +import pytest +from transformers import AutoTokenizer + +from vllm import SamplingParams + +from .conftest import (get_output_from_llm_generator, + run_greedy_equality_correctness_test) + + +@pytest.mark.parametrize( + "common_llm_kwargs", + [{ + # Use a small model for a fast test. + # Note this is repeated in the test body; to initialize a tokenizer. + "model": "JackFram/llama-68m", + + # Skip cuda graph recording for fast test. + "enforce_eager": True, + + # Required for spec decode. + "use_v2_block_manager": True, + }]) +@pytest.mark.parametrize( + "per_test_common_llm_kwargs", + [ + { + "speculative_model": "JackFram/llama-68m", + "num_speculative_tokens": 5, + }, + { + # Verify the detokenizer assertions in the test work when spec + # decode is disabled. + }, + ]) +@pytest.mark.parametrize("test_llm_kwargs", [{}]) +@pytest.mark.parametrize("batch_size", [1, 32]) +@pytest.mark.parametrize("seed", [1]) +def test_spec_decode_e2e_with_detokenization(test_llm_generator, + batch_size: int): + """Run generation with speculative decoding on a batch. Verify the engine + generates the correct number of tokens (via ignore_eos=True), and that the + detokenization matches HF transformers. + """ + output_len = 32 + temperature = 0.0 + + prompts = [ + "Hello, my name is", + "The president of the United States is", + "The capital of France is", + "The future of AI is", + ] + + prompts = [prompt for prompt, _ in zip(cycle(prompts), range(batch_size))] + + sampling_params = SamplingParams( + max_tokens=output_len, + ignore_eos=True, + temperature=temperature, + ) + + batch_tokens, batch_token_ids = get_output_from_llm_generator( + test_llm_generator, prompts, sampling_params) + + # Expect a generation for each prompt in the batch. + assert len(batch_token_ids) == len(prompts) + + # Expect each generation to have expected number of tokens (note ignore_eos + # is True). + assert [len(token_ids) + for token_ids in batch_token_ids] == ([output_len] * batch_size) + + # Expect detokenized string to match. + tok = AutoTokenizer.from_pretrained("JackFram/llama-68m") + for actual_tokens, actual_token_ids in zip(batch_tokens, batch_token_ids): + expected_tokens = tok.decode(actual_token_ids) + print(f"{actual_token_ids=}") + assert actual_tokens.strip() == expected_tokens.strip() + + +@pytest.mark.parametrize( + "common_llm_kwargs", + [{ + # Use a small model for a fast test. + # Note this is repeated in the test body; to initialize a tokenizer. + "model": "JackFram/llama-68m", + + # Skip cuda graph recording for fast test. + "enforce_eager": True, + + # Required for spec decode. + "use_v2_block_manager": True, + + # Use AsyncLLM engine + "use_async": True, + }]) +@pytest.mark.parametrize("baseline_llm_kwargs", [{}]) +@pytest.mark.parametrize("per_test_common_llm_kwargs", [ + { + "speculative_model": "JackFram/llama-68m", + "num_speculative_tokens": 5, + }, +]) +@pytest.mark.parametrize("test_llm_kwargs", [{}]) +@pytest.mark.parametrize("batch_size", [2]) +@pytest.mark.parametrize("seed", [1]) +def test_spec_decode_e2e_with_async_engine(test_llm_generator, + baseline_llm_generator, + batch_size: int): + """Verify spec decode works well with async LLM engine. + """ + run_greedy_equality_correctness_test(baseline_llm_generator, + test_llm_generator, + batch_size, + max_output_len=32, + force_output_len=True) + + +@pytest.mark.parametrize( + "common_llm_kwargs", + [{ + # Skip cuda graph recording for fast test. + "enforce_eager": True, + + # Required for spec decode. + "use_v2_block_manager": True, + + # Print spec metrics. + "disable_log_stats": False, + }]) +@pytest.mark.parametrize( + "per_test_common_llm_kwargs", + [ + # Try two different tiny base models. + # Note that one is equal to the draft model, another isn't. + { + "model": "JackFram/llama-68m", + }, + { + "model": "JackFram/llama-160m", + }, + ]) +@pytest.mark.parametrize("baseline_llm_kwargs", [{}]) +@pytest.mark.parametrize("test_llm_kwargs", [ + { + "speculative_model": "JackFram/llama-68m", + "num_speculative_tokens": 5, + }, +]) +@pytest.mark.parametrize( + "output_len", + [ + # Use long output len for the small model test. + 1536, + ]) +@pytest.mark.parametrize("batch_size", [1]) +@pytest.mark.parametrize("seed", [1]) +def test_spec_decode_e2e_greedy_correctness_tiny_model_bs1( + baseline_llm_generator, test_llm_generator, batch_size: int, + output_len: int): + """Verify greedy equality on a tiny model with batch size of one. + + Since this test is cheaper than other e2e correctness tests, we generate + with a higher output_len. + """ + run_greedy_equality_correctness_test(baseline_llm_generator, + test_llm_generator, + batch_size, + max_output_len=output_len, + force_output_len=True) + + +@pytest.mark.parametrize( + "common_llm_kwargs", + [{ + # Skip cuda graph recording for fast test. + "enforce_eager": True, + + # Required for spec decode. + "use_v2_block_manager": True, + + # Print spec metrics. + "disable_log_stats": False, + }]) +@pytest.mark.parametrize( + "per_test_common_llm_kwargs", + [ + # Try two different tiny base models. + # Note that one is equal to the draft model, another isn't. + { + "model": "JackFram/llama-68m", + }, + { + "model": "JackFram/llama-160m", + }, + ]) +@pytest.mark.parametrize("baseline_llm_kwargs", [{}]) +@pytest.mark.parametrize("test_llm_kwargs", [ + { + "speculative_model": "JackFram/llama-68m", + "num_speculative_tokens": 5, + }, +]) +@pytest.mark.parametrize( + "output_len", + [ + # Use small output len for fast test. + 256, + ]) +@pytest.mark.parametrize("batch_size", [64]) +@pytest.mark.parametrize("seed", [1]) +def test_spec_decode_e2e_greedy_correctness_tiny_model_large_bs( + baseline_llm_generator, test_llm_generator, batch_size: int, + output_len: int): + """Verify greedy equality on a tiny model and large batch size. + """ + run_greedy_equality_correctness_test(baseline_llm_generator, + test_llm_generator, + batch_size, + max_output_len=output_len, + force_output_len=True) + + +@pytest.mark.parametrize( + "common_llm_kwargs", + [{ + # Skip cuda graph recording for fast test. + "enforce_eager": True, + + # Required for spec decode. + "use_v2_block_manager": True + }]) +@pytest.mark.parametrize( + "per_test_common_llm_kwargs", + [ + # Try two different tiny base models. + # Note that one is equal to the draft model, another isn't. + { + "model": "JackFram/llama-68m", + }, + { + "model": "JackFram/llama-160m", + }, + ]) +@pytest.mark.parametrize("baseline_llm_kwargs", [{}]) +@pytest.mark.parametrize("test_llm_kwargs", [ + { + "speculative_model": "JackFram/llama-68m", + "num_speculative_tokens": 5, + }, +]) +@pytest.mark.parametrize("max_output_len", [ + 256, +]) +@pytest.mark.parametrize("batch_size", [32]) +@pytest.mark.parametrize("seed", [1]) +def test_spec_decode_e2e_greedy_correctness_tiny_model_large_bs_diff_output_len( + baseline_llm_generator, test_llm_generator, batch_size: int, + max_output_len: int): + """Verify greedy equality on a tiny model, with a large batch size, and when + sampling respects the EOS token. + """ + run_greedy_equality_correctness_test(baseline_llm_generator, + test_llm_generator, + batch_size, + max_output_len, + force_output_len=False) + + +@pytest.mark.parametrize( + "common_llm_kwargs", + [{ + # A "real" model (not tiny). + "model": "meta-llama/Llama-2-7b-chat-hf", + + # Skip cuda graph recording for fast test. + "enforce_eager": True, + + # Required for spec decode. + "use_v2_block_manager": True, + + # Print spec metrics. + "disable_log_stats": False, + }]) +@pytest.mark.parametrize("per_test_common_llm_kwargs", [{}]) +@pytest.mark.parametrize("baseline_llm_kwargs", [{}]) +@pytest.mark.parametrize("test_llm_kwargs", [ + { + "speculative_model": "JackFram/llama-68m", + "num_speculative_tokens": 5, + }, +]) +@pytest.mark.parametrize("batch_size", [1]) +@pytest.mark.parametrize( + "output_len", + [ + # Use decently long output len for a high quality test. + 256, + ]) +@pytest.mark.parametrize("seed", [1]) +def test_spec_decode_e2e_greedy_correctness_real_model_bs1( + baseline_llm_generator, test_llm_generator, batch_size: int, + output_len: int): + """Verify greedy equality on a "real" model and batch size of 1. This is + separate from large BS tests to make identifying the source of bugs easier. + """ + run_greedy_equality_correctness_test(baseline_llm_generator, + test_llm_generator, + batch_size, + max_output_len=output_len, + force_output_len=True) + + +@pytest.mark.parametrize( + "common_llm_kwargs", + [{ + # A "real" model (not tiny). + "model": "meta-llama/Llama-2-7b-chat-hf", + + # Skip cuda graph recording for fast test. + "enforce_eager": True, + + # Required for spec decode. + "use_v2_block_manager": True, + + # Print spec metrics. + "disable_log_stats": False, + }]) +@pytest.mark.parametrize("per_test_common_llm_kwargs", [{}]) +@pytest.mark.parametrize("baseline_llm_kwargs", [{}]) +@pytest.mark.parametrize("test_llm_kwargs", [ + { + "speculative_model": "JackFram/llama-68m", + "num_speculative_tokens": 5, + }, +]) +@pytest.mark.parametrize("batch_size", [32]) +@pytest.mark.parametrize( + "output_len", + [ + # Use smaller output len for fast test. + 64, + ]) +@pytest.mark.parametrize("seed", [1]) +def test_spec_decode_e2e_greedy_correctness_real_model_large_bs( + baseline_llm_generator, test_llm_generator, batch_size: int, + output_len: int): + """Verify greedy equality with a "real" model on a nontrivial batch size. + This is the closest test to a real production workload. + """ + run_greedy_equality_correctness_test(baseline_llm_generator, + test_llm_generator, + batch_size, + max_output_len=output_len, + force_output_len=True) + + +@pytest.mark.parametrize( + "common_llm_kwargs", + [{ + "block_size": 8, + # 2 for small prompt, 256//8 for generated. + "num_gpu_blocks_override": 2 + 256 // 8, + "max_model_len": (2 + 256 // 8) * 8, + + # Skip cuda graph recording for fast test. + "enforce_eager": True, + + # Required for spec decode. + "use_v2_block_manager": True + }]) +@pytest.mark.parametrize("per_test_common_llm_kwargs", [ + { + "model": "JackFram/llama-160m", + }, +]) +@pytest.mark.parametrize("baseline_llm_kwargs", [{}]) +@pytest.mark.parametrize("test_llm_kwargs", [ + { + "speculative_model": "JackFram/llama-68m", + "num_speculative_tokens": 5, + }, +]) +@pytest.mark.parametrize( + "output_len", + [ + # Use small output len for fast test. + 256, + ]) +@pytest.mark.parametrize("batch_size", [4]) +@pytest.mark.parametrize("seed", [1]) +def test_spec_decode_e2e_greedy_correctness_with_preemption( + baseline_llm_generator, test_llm_generator, batch_size: int, + output_len: int): + """Verify greedy equality, even when some sequences are preempted mid- + generation. + """ + run_greedy_equality_correctness_test(baseline_llm_generator, + test_llm_generator, + batch_size, + max_output_len=output_len, + force_output_len=True) + + +@pytest.mark.parametrize( + "common_llm_kwargs", + [{ + "model": "JackFram/llama-160m", + + # Skip cuda graph recording for fast test. + "enforce_eager": True, + + # Required for spec decode. + "use_v2_block_manager": True + }]) +@pytest.mark.parametrize( + "per_test_common_llm_kwargs", + [ + # As of this writing, vLLM only compiles with these 3 block sizes by + # default. + { + "block_size": 8, + }, + { + "block_size": 16, + }, + { + "block_size": 32, + }, + ]) +@pytest.mark.parametrize("baseline_llm_kwargs", [{}]) +@pytest.mark.parametrize("test_llm_kwargs", [ + { + "speculative_model": "JackFram/llama-68m", + "num_speculative_tokens": 5, + }, +]) +@pytest.mark.parametrize("batch_size", [2]) +@pytest.mark.parametrize( + "output_len", + [ + # Use smaller output len for fast test. + 32, + ]) +@pytest.mark.parametrize("seed", [1]) +def test_spec_decode_different_block_size(baseline_llm_generator, + test_llm_generator, batch_size: int, + output_len: int): + """Verify greedy equality over different block sizes. + """ + run_greedy_equality_correctness_test(baseline_llm_generator, + test_llm_generator, + batch_size, + max_output_len=output_len, + force_output_len=True) + + +@pytest.mark.parametrize( + "common_llm_kwargs", + [{ + "model": "JackFram/llama-160m", + + # Skip cuda graph recording for fast test. + "enforce_eager": True, + + # Required for spec decode. + "use_v2_block_manager": True + }]) +@pytest.mark.parametrize("per_test_common_llm_kwargs", [{}]) +@pytest.mark.parametrize("baseline_llm_kwargs", [{}]) +@pytest.mark.parametrize( + "test_llm_kwargs", + [ + { + "speculative_model": "JackFram/llama-68m", + "num_speculative_tokens": 5, + + # Artificially limit the draft model max model len; this forces vLLM + # to skip speculation once the sequences grow beyond 32-k tokens. + "speculative_max_model_len": 32, + }, + ]) +@pytest.mark.parametrize("batch_size", [8]) +@pytest.mark.parametrize( + "output_len", + [ + # This must be a good bit larger than speculative_max_model_len so that + # we can test the case where all seqs are skipped, but still small to + # ensure fast test. + 64, + ]) +@pytest.mark.parametrize("seed", [1]) +def test_skip_speculation(baseline_llm_generator, test_llm_generator, + batch_size: int, output_len: int): + """Verify greedy equality when some (or all) sequences skip speculation. + We do this by setting the max model len of the draft model to an + artificially low value, such that when the sequences grow beyond it, they + are skipped in speculative decoding. + """ + run_greedy_equality_correctness_test(baseline_llm_generator, + test_llm_generator, + batch_size, + max_output_len=output_len, + force_output_len=True) + + +@pytest.mark.parametrize( + "common_llm_kwargs", + [{ + "model": "JackFram/llama-68m", + + # Skip cuda graph recording for fast test. + "enforce_eager": True, + + # Required for spec decode. + "use_v2_block_manager": True + }]) +@pytest.mark.parametrize("per_test_common_llm_kwargs", [{}]) +@pytest.mark.parametrize("baseline_llm_kwargs", [{}]) +@pytest.mark.parametrize( + "test_llm_kwargs", + [ + { + "speculative_model": "JackFram/llama-68m", + "num_speculative_tokens": k, + } + # Try a range of common k, as well as large speculation. + for k in [1, 2, 3, 4, 5, 6, 7, 8, 9, 63] + ]) +@pytest.mark.parametrize("batch_size", [2]) +@pytest.mark.parametrize( + "output_len", + [ + # Use smaller output len for fast test. + 32, + ]) +@pytest.mark.parametrize("seed", [1]) +def test_many_k(baseline_llm_generator, test_llm_generator, batch_size: int, + output_len: int): + """Verify that speculative decoding produces exact equality to without spec + decode with many different values of k. + """ + run_greedy_equality_correctness_test(baseline_llm_generator, + test_llm_generator, + batch_size, + max_output_len=output_len, + force_output_len=True) diff --git a/tests/spec_decode/e2e/test_ngram_correctness.py b/tests/spec_decode/e2e/test_ngram_correctness.py new file mode 100644 index 0000000..44ef400 --- /dev/null +++ b/tests/spec_decode/e2e/test_ngram_correctness.py @@ -0,0 +1,172 @@ +"""This docstring details important information on the testing methodology. + +Most of the tests rely on "greedy equality", where we expect the output of +speculative decoding on a sequence to exactly match the output of normal non- +speculative decoding. + +Since speculative decoding with rejection sampling guarantees that the output +distribution matches the target model's output distribution (up to hardware +numerics, see https://arxiv.org/pdf/2302.01318.pdf), we can expect greedy +equality. + +For ngram lookup, its idea comes from https://github.com/apoorvumang/prompt-lookup-decoding, +and is merged into transform code base: https://github.com/huggingface/transformers/pull/27775. +Since there is no model is needed for generate the proposal, we could make +the testcase much simpler than drafter multi-step one. + +However, we still need to verify below scenario could be passed: + * Batch size 1 greedy equality + * Batch size >1 greedy equality + * Test greedy equality under preemption + * Test greedy equality under various ngram sizes / speculative sizes + +With those tests, we can say at least, ngram spec would not break the correctess +for the target model outputs. +""" + +import pytest + +from .conftest import run_greedy_equality_correctness_test + + +@pytest.mark.parametrize( + "common_llm_kwargs", + [{ + # Skip cuda graph recording for fast test. + "enforce_eager": True, + + # Required for spec decode. + "use_v2_block_manager": True, + + # Print spec metrics. + "disable_log_stats": False, + }]) +@pytest.mark.parametrize("per_test_common_llm_kwargs", [ + { + "model": "JackFram/llama-68m", + }, +]) +@pytest.mark.parametrize("baseline_llm_kwargs", [{}]) +@pytest.mark.parametrize("test_llm_kwargs", [ + { + "speculative_model": "[ngram]", + "num_speculative_tokens": 5, + "ngram_prompt_lookup_max": 3, + }, +]) +@pytest.mark.parametrize("output_len", [ + 256, +]) +@pytest.mark.parametrize("batch_size", [1, 64]) +@pytest.mark.parametrize("seed", [1]) +def test_ngram_e2e_greedy_correctness(baseline_llm_generator, + test_llm_generator, batch_size: int, + output_len: int): + """Verify greedy equality on a tiny model with different batch size.""" + run_greedy_equality_correctness_test(baseline_llm_generator, + test_llm_generator, + batch_size, + max_output_len=output_len, + force_output_len=True) + + +@pytest.mark.parametrize( + "common_llm_kwargs", + [{ + "block_size": 8, + # 2 for small prompt, 256//8 for generated. + "num_gpu_blocks_override": 2 + 256 // 8, + "max_model_len": (2 + 256 // 8) * 8, + + # Skip cuda graph recording for fast test. + "enforce_eager": True, + + # Required for spec decode. + "use_v2_block_manager": True + }]) +@pytest.mark.parametrize("per_test_common_llm_kwargs", [ + { + "model": "JackFram/llama-160m", + }, +]) +@pytest.mark.parametrize("baseline_llm_kwargs", [{}]) +@pytest.mark.parametrize("test_llm_kwargs", [ + { + "speculative_model": "[ngram]", + "num_speculative_tokens": 5, + "ngram_prompt_lookup_max": 3, + }, +]) +@pytest.mark.parametrize( + "output_len", + [ + # Use small output len for fast test. + 256, + ]) +@pytest.mark.parametrize("batch_size", [4]) +@pytest.mark.parametrize("seed", [1]) +def test_ngram_e2e_greedy_correctness_with_preemption(baseline_llm_generator, + test_llm_generator, + batch_size: int, + output_len: int): + """Verify greedy equality, even when some sequences are preempted mid- + generation. + """ + run_greedy_equality_correctness_test(baseline_llm_generator, + test_llm_generator, + batch_size, + max_output_len=output_len, + force_output_len=True) + + +@pytest.mark.parametrize( + "common_llm_kwargs", + [{ + "model": "JackFram/llama-68m", + + # Skip cuda graph recording for fast test. + "enforce_eager": True, + + # Required for spec decode. + "use_v2_block_manager": True + }]) +@pytest.mark.parametrize("per_test_common_llm_kwargs", [{}]) +@pytest.mark.parametrize("baseline_llm_kwargs", [{}]) +@pytest.mark.parametrize( + "test_llm_kwargs", + [ + { + "speculative_model": "[ngram]", + "num_speculative_tokens": k, + "ngram_prompt_lookup_max": 3, + } + # Try a range of common k, as well as large speculation. + for k in [1, 3, 5] + ] + [ + { + "speculative_model": "[ngram]", + "num_speculative_tokens": k, + "ngram_prompt_lookup_max": 1, + } + # Try a range of common k, as well as large speculation. + for k in [1, 3, 5] + ]) +@pytest.mark.parametrize("batch_size", [2]) +@pytest.mark.parametrize( + "output_len", + [ + # Use smaller output len for fast test. + 32, + ]) +@pytest.mark.parametrize("seed", [1]) +def test_ngram_different_k(baseline_llm_generator, test_llm_generator, + batch_size: int, output_len: int): + """Verify that ngram speculative decoding produces exact equality + to without spec decode with many different values of k and + different ngram_prompt_lookup_max. + """ + run_greedy_equality_correctness_test(baseline_llm_generator, + test_llm_generator, + batch_size, + max_output_len=output_len, + force_output_len=True) diff --git a/tests/spec_decode/test_batch_expansion.py b/tests/spec_decode/test_batch_expansion.py new file mode 100644 index 0000000..43cfd78 --- /dev/null +++ b/tests/spec_decode/test_batch_expansion.py @@ -0,0 +1,98 @@ +import pytest +import torch + +from vllm.spec_decode.batch_expansion import BatchExpansionTop1Scorer + +from .utils import create_seq_group_metadata_from_prompts, mock_worker + + +@pytest.mark.parametrize('num_target_seq_ids', [100]) +@pytest.mark.skip_global_cleanup +def test_create_target_seq_id_iterator(num_target_seq_ids: int): + """Verify all new sequence ids are greater than all input + seq ids. + """ + scorer = BatchExpansionTop1Scorer(mock_worker(), 'cuda:0', 32_000) + + all_seq_ids = [ + [1, 3, 5, 7], + list(range(100)) + [0], + [100], + ] + + for seq_ids in all_seq_ids: + max_seq_id = max(seq_ids) + iterator = scorer._create_target_seq_id_iterator(seq_ids) # pylint: disable=protected-access + for _ in range(num_target_seq_ids): + assert next(iterator) > max_seq_id + + +@pytest.mark.parametrize('k', [1, 2, 6]) +@pytest.mark.skip_global_cleanup +def test_get_token_ids_to_score(k: int): + """Verify correct tokens are selected for scoring. + """ + proposal_token_ids = torch.tensor( + list(range(k)), + dtype=torch.int64, + device='cuda', + ) + + expected_output = [ + [], + ] + for i in range(proposal_token_ids.shape[0]): + expected_output.append(proposal_token_ids[:i + 1].tolist()) + + scorer = BatchExpansionTop1Scorer(mock_worker(), 'cuda:0', 32_000) + actual_output = scorer._get_token_ids_to_score(proposal_token_ids) # pylint: disable=protected-access + + actual_output = [ + x.tolist() if isinstance(x, torch.Tensor) else x for x in actual_output + ] + + assert actual_output == expected_output + + +@pytest.mark.parametrize('k', [1, 2, 6]) +@pytest.mark.skip_global_cleanup +def test_create_single_target_seq_group_metadata(k: int): + """Verify correct creation of a batch-expanded seq group metadata. + """ + + prompt_tokens = [1, 2, 3] + prev_output_tokens = [4, 5, 6] + + token_ids = list(range(k)) + + num_tokens_processed = len(prompt_tokens) + len(prev_output_tokens) - 1 + + final_seq_len = len(prompt_tokens) + len(prev_output_tokens) + len( + token_ids) + + block_size = 32 + input_seq_group_metadata = create_seq_group_metadata_from_prompts( + [prompt_tokens], 2048 // block_size, block_size, [final_seq_len], + [prev_output_tokens], [num_tokens_processed])[0] + + input_seq_id = list(input_seq_group_metadata.seq_data.keys())[0] + target_seq_id = 100 + + scorer = BatchExpansionTop1Scorer(mock_worker(), 'cuda:0', 32_000) + output = scorer._create_single_target_seq_group_metadata( # pylint: disable=protected-access + input_seq_group_metadata, + input_seq_id, + target_seq_id, + token_ids, + ) + + assert output.request_id == input_seq_group_metadata.request_id + assert len(output.seq_data) == 1 + assert output.seq_data[target_seq_id].get_prompt_token_ids( + ) == prompt_tokens + assert output.seq_data[target_seq_id].get_output_token_ids( + ) == prev_output_tokens + token_ids + + assert len(output.block_tables) == 1 + assert output.block_tables[ + target_seq_id] == input_seq_group_metadata.block_tables[input_seq_id] diff --git a/tests/spec_decode/test_metrics.py b/tests/spec_decode/test_metrics.py new file mode 100644 index 0000000..3128788 --- /dev/null +++ b/tests/spec_decode/test_metrics.py @@ -0,0 +1,159 @@ +import math +from unittest.mock import MagicMock + +import pytest +import torch + +from vllm.spec_decode.metrics import AsyncMetricsCollector + + +def test_initial_call_returns_none(): + """Expect first call to get metrics to return None. + """ + rej_sampler = MagicMock() + rej_sampler.num_accepted_tokens = torch.tensor(0, + dtype=torch.long, + device='cuda') + rej_sampler.num_emitted_tokens = torch.tensor(0, + dtype=torch.long, + device='cuda') + rej_sampler.num_draft_tokens = 0 + + collector = AsyncMetricsCollector(rej_sampler) + collector.init_gpu_tensors(rank=0) + maybe_metrics = collector.maybe_collect_rejsample_metrics(k=5) + assert maybe_metrics is None + + +def test_second_call_returns_metrics(): + """Expect second call to not return None. + """ + rej_sampler = MagicMock() + rej_sampler.num_accepted_tokens = torch.tensor(0, + dtype=torch.long, + device='cuda') + rej_sampler.num_emitted_tokens = torch.tensor(0, + dtype=torch.long, + device='cuda') + rej_sampler.num_draft_tokens = 0 + + collect_interval_s = 5.0 + timer = MagicMock() + timer.side_effect = [ + 0.0, collect_interval_s + 0.1, collect_interval_s + 0.2 + ] + + collector = AsyncMetricsCollector(rejection_sampler=rej_sampler, + timer=timer, + collect_interval_s=collect_interval_s) + collector.init_gpu_tensors(rank=0) + _ = collector.maybe_collect_rejsample_metrics(k=5) + metrics = collector.maybe_collect_rejsample_metrics(k=5) + assert metrics is not None + + +@pytest.mark.parametrize("rank", [1, 2, 3, 4]) +def test_nonzero_rank_noop(rank): + """Verify nonzero ranks don't collect metrics. + """ + rej_sampler = MagicMock() + rej_sampler.num_accepted_tokens = torch.tensor(0, + dtype=torch.long, + device='cuda') + rej_sampler.num_emitted_tokens = torch.tensor(0, + dtype=torch.long, + device='cuda') + rej_sampler.num_draft_tokens = 0 + + collector = AsyncMetricsCollector(rej_sampler) + collector.init_gpu_tensors(rank=rank) + _ = collector.maybe_collect_rejsample_metrics(k=5) + metrics = collector.maybe_collect_rejsample_metrics(k=5) + assert metrics is None + + +def test_noop_until_time(): + """Verify metrics aren't collected until enough time passes. + """ + rej_sampler = MagicMock() + rej_sampler.num_accepted_tokens = torch.tensor(0, + dtype=torch.long, + device='cuda') + rej_sampler.num_emitted_tokens = torch.tensor(0, + dtype=torch.long, + device='cuda') + rej_sampler.num_draft_tokens = 0 + + collect_interval_s = 5.0 + timer = MagicMock() + timer.side_effect = [ + 0.0, collect_interval_s - 0.1, collect_interval_s - 0.1, + collect_interval_s + 0.1, collect_interval_s + 0.1 + ] + + collector = AsyncMetricsCollector(rejection_sampler=rej_sampler, + timer=timer, + collect_interval_s=collect_interval_s) + collector.init_gpu_tensors(rank=0) + + _ = collector.maybe_collect_rejsample_metrics(k=5) + metrics = collector.maybe_collect_rejsample_metrics(k=5) + assert metrics is None + + _ = collector.maybe_collect_rejsample_metrics(k=5) + metrics = collector.maybe_collect_rejsample_metrics(k=5) + assert metrics is not None + + +@pytest.mark.parametrize("has_data", [True, False]) +def test_initial_metrics_has_correct_values(has_data: bool): + """Test correctness of metrics data. + """ + if has_data: + num_accepted_tokens = 103 + num_emitted_tokens = 104 + num_draft_tokens = 105 + else: + num_accepted_tokens = 0 + num_emitted_tokens = 0 + num_draft_tokens = 0 + k = 5 + + max_num_emitted_tokens = AsyncMetricsCollector.get_max_num_emitted_tokens( + num_draft_tokens, k) + + rej_sampler = MagicMock() + rej_sampler.num_accepted_tokens = torch.tensor(num_accepted_tokens, + dtype=torch.long, + device='cuda') + rej_sampler.num_emitted_tokens = torch.tensor(num_emitted_tokens, + dtype=torch.long, + device='cuda') + rej_sampler.num_draft_tokens = num_draft_tokens + + collect_interval_s = 5.0 + timer = MagicMock() + timer.side_effect = [ + 0.0, collect_interval_s + 0.1, collect_interval_s + 0.2 + ] + + collector = AsyncMetricsCollector(rejection_sampler=rej_sampler, + timer=timer, + collect_interval_s=collect_interval_s) + collector.init_gpu_tensors(rank=0) + _ = collector.maybe_collect_rejsample_metrics(k) + metrics = collector.maybe_collect_rejsample_metrics(k) + + assert metrics.num_spec_tokens == k + assert metrics.accepted_tokens == num_accepted_tokens + assert metrics.draft_tokens == num_draft_tokens + assert metrics.emitted_tokens == num_emitted_tokens + + if has_data: + assert (metrics.draft_acceptance_rate == num_accepted_tokens / + num_draft_tokens) + assert (metrics.system_efficiency == num_emitted_tokens / + max_num_emitted_tokens) + else: + assert math.isnan(metrics.draft_acceptance_rate) + assert math.isnan(metrics.system_efficiency) diff --git a/tests/spec_decode/test_multi_step_worker.py b/tests/spec_decode/test_multi_step_worker.py new file mode 100644 index 0000000..cb2de97 --- /dev/null +++ b/tests/spec_decode/test_multi_step_worker.py @@ -0,0 +1,431 @@ +import random +from unittest.mock import MagicMock + +import pytest +import torch + +from vllm.model_executor.utils import set_random_seed +from vllm.sequence import ExecuteModelRequest, SamplerOutput +from vllm.spec_decode.multi_step_worker import MultiStepWorker +from vllm.spec_decode.top1_proposer import Top1Proposer +from vllm.worker.worker import Worker + +from .utils import (assert_logprobs_dict_allclose, create_batch, + create_seq_group_metadata_from_prompts, create_worker, + patch_execute_model_with_seeds, zero_kv_cache) + + +@pytest.mark.parametrize('num_steps', list(range(1, 17))) +def test_assert_enough_kv_space(num_steps: int): + """Test that the multi step worker checks for sufficient space in the KV + cache. It should throw if it cannot run all the steps. + """ + block_size = 16 + num_gpu_blocks = 2048 // block_size + + prompts = [ + list(range(block_size * 3)), + list(range(block_size * 2)), + ] + + prev_output_tokens = [ + list(range(block_size * 1)), + list(range(block_size * 2)), + ] + + final_prompt_lens = [ + len(prompt + output) + num_steps + for prompt, output in zip(prompts, prev_output_tokens) + ] + + inputs = create_seq_group_metadata_from_prompts( + prompts, + num_gpu_blocks, + block_size, + final_prompt_lens, + continuations=prev_output_tokens) + + assert_enough_kv_space = MultiStepWorker._assert_enough_kv_space # pylint: disable=protected-access + worker = MagicMock() + worker.model_runner.block_size = block_size + + for seq_group_metadata in inputs: + original_block_tables = seq_group_metadata.block_tables + + # No exception. + assert_enough_kv_space(worker, inputs, num_steps) + + seq_group_metadata.block_tables = { + seq_id: [] + for seq_id, physical_blocks in original_block_tables.items() + } + + # Expect exception. + with pytest.raises(ValueError, + match='times but found insufficient KV space for'): + assert_enough_kv_space(worker, inputs, num_steps) + + seq_group_metadata.block_tables = original_block_tables + + +@torch.inference_mode() +def test_same_output_for_single_step(): + """Verify the multi step worker produces the same output as the normal + worker for num_steps=1. + """ + seed = 100 + model_name = 'JackFram/llama-68m' + + block_size = 32 + num_gpu_blocks = 2048 // block_size + multi_step_worker = create_worker( + MultiStepWorker, + model_name, + block_size, + num_gpu_blocks, + seed, + ) + worker = create_worker( + Worker, + model_name, + block_size, + num_gpu_blocks, + seed, + ) + # multi_step_worker.model_runner = worker.model_runner + # multi_step_worker.cache_engine = worker.cache_engine + + num_steps = 1 + + prompts = [ + [1, 2, 3, 4, 5], + [6, 7, 8, 9, 10], + ] + + final_prompt_lens = [len(prompt) + num_steps for prompt in prompts] + + multi_step_seq_group = create_seq_group_metadata_from_prompts( + prompts, + num_gpu_blocks, + block_size, + final_prompt_lens=final_prompt_lens) + + zero_kv_cache(multi_step_worker.cache_engine) + set_random_seed(seed) + actual_output, _ = multi_step_worker.sampler_output( + execute_model_req=ExecuteModelRequest( + seq_group_metadata_list=multi_step_seq_group), + sample_len=num_steps) + assert len(actual_output) == num_steps + actual_output = actual_output[0] + + single_step_seq_group = create_seq_group_metadata_from_prompts( + prompts, + num_gpu_blocks, + block_size, + final_prompt_lens=final_prompt_lens) + + zero_kv_cache(worker.cache_engine) + set_random_seed(seed) + expected_output = worker.execute_model( + execute_model_req=ExecuteModelRequest( + seq_group_metadata_list=single_step_seq_group))[0] + + actual_token_ids = [ + output.samples[0].output_token for output in actual_output + ] + actual_logprobs = [output.samples[0].logprobs for output in actual_output] + + expected_token_ids = [ + output.samples[0].output_token for output in expected_output + ] + expected_logprobs = [ + output.samples[0].logprobs for output in expected_output + ] + + assert actual_token_ids == expected_token_ids + + print(f'{actual_logprobs=}') + print(f'{expected_logprobs=}') + assert_logprobs_dict_allclose(actual_logprobs, expected_logprobs) + + +@torch.inference_mode() +def test_same_output_for_multi_step(): + """Verify the multi-step worker produces the same output as the normal + worker when num_steps > 1. This test runs the multi-step worker once, and + then runs the worker num_steps times, and compares the output. + """ + seed = 100 + model_name = 'JackFram/llama-68m' + + block_size = 16 + num_gpu_blocks = 2048 // block_size + multi_step_worker = create_worker( + MultiStepWorker, + model_name, + block_size, + num_gpu_blocks, + seed, + ) + + worker = create_worker( + Worker, + model_name, + block_size, + num_gpu_blocks, + seed, + ) + + # Make sure we go over the block boundary. + num_steps = block_size + 1 + + random.seed(seed) + prompts = [[ + random.randint(0, 1000) for _ in range(random.randint(10, 20)) + ] for _ in range(10)] + + final_prompt_lens = [len(prompt) + num_steps for prompt in prompts] + + rand_seeds = list(random.randint(0, 100) for _ in range(num_steps)) + multi_step_worker.execute_model = patch_execute_model_with_seeds( + multi_step_worker, rand_seeds) + worker.execute_model = patch_execute_model_with_seeds(worker, rand_seeds) + + continuations = [[1] for _ in prompts] + seq_group_metadata_list = create_seq_group_metadata_from_prompts( + prompts, + num_gpu_blocks, + block_size, + continuations=continuations, + final_prompt_lens=final_prompt_lens) + + # Run multi-step. + zero_kv_cache(multi_step_worker.cache_engine) + set_random_seed(seed) + multi_step_output, _ = multi_step_worker.sampler_output( + execute_model_req=ExecuteModelRequest( + seq_group_metadata_list=seq_group_metadata_list), + sample_len=num_steps) + + # Run single-step repeatedly. + zero_kv_cache(worker.cache_engine) + single_step_output = [] + continuations = [[1] for _ in prompts] + set_random_seed(seed) + + for _ in multi_step_output: + + seq_group_metadata_list = create_seq_group_metadata_from_prompts( + prompts, + num_gpu_blocks, + block_size, + continuations=continuations, + final_prompt_lens=final_prompt_lens) + + single_step_output.extend( + worker.execute_model(execute_model_req=ExecuteModelRequest( + seq_group_metadata_list=seq_group_metadata_list))) + + # Append output tokens to new sequence data. + for i, seq_group_output in enumerate(single_step_output[-1]): + continuations[i].append(seq_group_output.samples[0].output_token) + + # Get token ids and logprobs for comparison. + multi_step_output_logprobs = [[] for _ in prompts] + single_step_output_logprobs = [[] for _ in prompts] + + multi_step_output_token_ids = [[] for _ in prompts] + single_step_output_token_ids = [[] for _ in prompts] + for i, _ in enumerate(prompts): + for multi_step, single_step in zip(multi_step_output, + single_step_output): + multi_step_output_token_ids[i].append( + multi_step[i].samples[0].output_token) + single_step_output_token_ids[i].append( + single_step[i].samples[0].output_token) + + multi_step_output_logprobs[i].append( + multi_step[i].samples[0].logprobs) + single_step_output_logprobs[i].append( + single_step[i].samples[0].logprobs) + + # Print per-sequence token ids + for i, (multi_step_tokens, single_step_tokens) in enumerate( + zip(multi_step_output_token_ids, single_step_output_token_ids)): + print(f'{i=} {multi_step_tokens=}') + print(f'{i=} {single_step_tokens=}') + print(f'{i=} equal {multi_step_tokens == single_step_tokens}') + + # Assert token ids are equal. + for multi_step_tokens, single_step_tokens in zip( + multi_step_output_token_ids, single_step_output_token_ids): + assert multi_step_tokens == single_step_tokens + + # Assert logprobs are equal. + for multi_step_logprobs, single_step_logprobs in zip( + multi_step_output_logprobs, single_step_output_logprobs): + assert_logprobs_dict_allclose(multi_step_logprobs, + single_step_logprobs) + + +@torch.inference_mode() +def test_draft_proposals_full_speculation_len(): + """Verify Top1Proposer correctly handles case where all sequences + can speculate. + """ + k = 10 + batch_size = 32 + vocab_size = 32_000 + device = 'cuda:0' + + draft_worker = MagicMock() + proposer = Top1Proposer( + worker=draft_worker, + device=device, + vocab_size=vocab_size, + max_proposal_len=2048, + ) + draft_worker.sampler_output.return_value = [ + SamplerOutput( + outputs=[], + sampled_token_probs=torch.rand(batch_size, + vocab_size, + device=device, + dtype=torch.float32), + logprobs=torch.rand(batch_size, + vocab_size, + device=device, + dtype=torch.float32), + sampled_token_ids=torch.randint(low=0, + high=vocab_size, + size=(batch_size, ), + device=device, + dtype=torch.long), + ) for _ in range(k) + ], True + + seq_group_metadata_list, _, _ = create_batch(batch_size, k) + + proposals = proposer.get_proposals(execute_model_req=ExecuteModelRequest( + seq_group_metadata_list=seq_group_metadata_list, + num_lookahead_slots=k), ) + + assert torch.is_tensor(proposals.proposal_token_ids) + assert torch.is_tensor(proposals.proposal_probs) + + assert proposals.proposal_token_ids.shape == torch.Size([batch_size, k]) + assert proposals.proposal_probs.shape[:-1] == torch.Size([batch_size, k]) + + assert proposals.proposal_lens.shape == torch.Size([batch_size]) + assert proposals.proposal_lens.tolist() == [k for _ in range(batch_size)] + + +@torch.inference_mode() +def test_draft_proposals_no_speculations(): + """Verify Top1Proposer correctly handles case where no sequences + can speculate. + """ + k = 10 + batch_size = 32 + vocab_size = 32_000 + device = 'cuda:0' + prompt_len = 10 + + draft_worker = MagicMock() + proposer = Top1Proposer( + worker=draft_worker, + device=device, + vocab_size=vocab_size, + max_proposal_len=prompt_len + k - 1, + ) + + seq_group_metadata_list, _, _ = create_batch(batch_size, + k, + prompt_len=prompt_len) + + proposals = proposer.get_proposals(execute_model_req=ExecuteModelRequest( + seq_group_metadata_list=seq_group_metadata_list, + num_lookahead_slots=k), ) + + assert torch.is_tensor(proposals.proposal_token_ids) + assert torch.is_tensor(proposals.proposal_probs) + + assert proposals.proposal_token_ids.shape == torch.Size([batch_size, k]) + assert proposals.proposal_probs.shape[:-1] == torch.Size([batch_size, k]) + + assert proposals.proposal_lens.shape == torch.Size([batch_size]) + assert proposals.proposal_lens.tolist() == [0 for _ in range(batch_size)] + + +@torch.inference_mode() +def test_draft_proposals_mixed_k(): + """Verify Top1Proposer correctly handles case some sequences can + speculate and some can't. + """ + k = 10 + batch_size = 32 + vocab_size = 32_000 + device = 'cuda:0' + + small_prompt_len = 5 + long_prompt_len = 10 + prev_output_token_len = 20 + + expected_num_proposal_seqs = 6 + expected_num_no_proposal_seqs = batch_size - expected_num_proposal_seqs + + prompt_len = [ + small_prompt_len for _ in range(expected_num_proposal_seqs - 1) + ] + [long_prompt_len + for _ in range(expected_num_no_proposal_seqs)] + [small_prompt_len] + + draft_worker = MagicMock() + proposer = Top1Proposer( + worker=draft_worker, + device=device, + vocab_size=vocab_size, + max_proposal_len=long_prompt_len + prev_output_token_len + k - 1, + ) + + draft_worker.sampler_output.return_value = [ + SamplerOutput( + outputs=[], + sampled_token_probs=torch.rand(expected_num_proposal_seqs, + vocab_size, + device=device, + dtype=torch.float32), + logprobs=torch.rand(expected_num_proposal_seqs, + vocab_size, + device=device, + dtype=torch.float32), + sampled_token_ids=torch.randint( + low=0, + high=vocab_size, + size=(expected_num_proposal_seqs, ), + device=device, + dtype=torch.long), + ) for _ in range(k) + ], True + + seq_group_metadata_list, _, _ = create_batch( + batch_size, + k, + prompt_len=prompt_len, + prev_output_token_len=prev_output_token_len, + ) + + proposals = proposer.get_proposals(execute_model_req=ExecuteModelRequest( + seq_group_metadata_list=seq_group_metadata_list, + num_lookahead_slots=k), ) + + assert torch.is_tensor(proposals.proposal_token_ids) + assert torch.is_tensor(proposals.proposal_probs) + + assert proposals.proposal_token_ids.shape == torch.Size([batch_size, k]) + assert proposals.proposal_probs.shape[:-1] == torch.Size([batch_size, k]) + + assert proposals.proposal_lens.shape == torch.Size([batch_size]) + assert proposals.proposal_lens.tolist() == [ + k for _ in range(expected_num_proposal_seqs - 1) + ] + [0 for _ in range(expected_num_no_proposal_seqs)] + [k] diff --git a/tests/spec_decode/test_ngram_worker.py b/tests/spec_decode/test_ngram_worker.py new file mode 100644 index 0000000..de305c4 --- /dev/null +++ b/tests/spec_decode/test_ngram_worker.py @@ -0,0 +1,206 @@ +import torch + +from vllm.sequence import ExecuteModelRequest +from vllm.spec_decode.ngram_worker import NGramWorker +from vllm.spec_decode.top1_proposer import Top1Proposer + +from .utils import create_seq_group_metadata_from_prompts, create_worker + + +def test_ngram_algo_correctness_for_single_no_match(): + """Verify our ngram algo find the right candidate in the prompt + + For the scenario cannot find any candidate in one single batch + """ + block_size = 32 + num_gpu_blocks = 2048 // block_size + seed = 100 + model_name = 'JackFram/llama-68m' + vocab_size = 32_000 + device = 'cuda:0' + + ngram_worker = create_worker( + NGramWorker, + model_name, + block_size, + num_gpu_blocks, + seed, + ) + + proposer = Top1Proposer( + worker=ngram_worker, + device=device, + vocab_size=vocab_size, + max_proposal_len=20, + ) + + # set ngram window (0, 3], which is window=1/2/3 + ngram_worker.set_ngram_window_size(0, 3) + + prompts = [ + # shall find no candidate + [1, 2, 3, 4, 5, 6, 7], + ] + + proposal_len = 5 + final_prompt_lens = [len(prompt) + proposal_len for prompt in prompts] + seq_group_metadata_list = create_seq_group_metadata_from_prompts( + prompts, + num_gpu_blocks, + block_size, + final_prompt_lens=final_prompt_lens) + + proposals = proposer.get_proposals(execute_model_req=ExecuteModelRequest( + seq_group_metadata_list=seq_group_metadata_list, + num_lookahead_slots=proposal_len), ) + + assert torch.is_tensor(proposals.proposal_token_ids) + assert torch.is_tensor(proposals.proposal_probs) + + assert proposals.proposal_token_ids.shape == torch.Size([1, proposal_len]) + assert proposals.proposal_probs.shape[:-1] == torch.Size([1, proposal_len]) + assert proposals.proposal_lens.shape == torch.Size([1]) + assert proposals.proposal_lens.tolist() == [0] + + +def test_ngram_algo_correctness_for_batches_not_match_all(): + """Verify our ngram algo find the right candidate in the prompt + + For the scenario find some candidate not full in batchs + """ + block_size = 32 + num_gpu_blocks = 2048 // block_size + seed = 100 + model_name = 'JackFram/llama-68m' + vocab_size = 32_000 + device = 'cuda:0' + + ngram_worker = create_worker( + NGramWorker, + model_name, + block_size, + num_gpu_blocks, + seed, + ) + + proposer = Top1Proposer( + worker=ngram_worker, + device=device, + vocab_size=vocab_size, + max_proposal_len=20, + ) + + # set ngram window (0, 3], which is window=1/2/3 + ngram_worker.set_ngram_window_size(0, 3) + + prompts = [ + # shall find no candidate + [1, 2, 3, 4, 5, 6, 7], + # shall find candidate 12,13,14,15,16 + [11, 12, 13, 14, 15, 16, 11], + # shall find candidate 23,24,25,26,21 + [21, 21, 22, 23, 24, 25, 26, 21, 22], + # shall find candidate 34,35,36,37,38 + [31, 32, 31, 32, 33, 34, 35, 36, 37, 38, 31, 32, 33], + # shall find no candidate as exceed max_proposal_len + [ + 31, 32, 31, 32, 31, 32, 31, 32, 31, 32, 31, 32, 33, 34, 35, 36, 37, + 38, 31, 32, 33 + ], + ] + + proposal_len = 5 + final_prompt_lens = [len(prompt) + proposal_len for prompt in prompts] + seq_group_metadata_list = create_seq_group_metadata_from_prompts( + prompts, + num_gpu_blocks, + block_size, + final_prompt_lens=final_prompt_lens) + + proposals = proposer.get_proposals(execute_model_req=ExecuteModelRequest( + seq_group_metadata_list=seq_group_metadata_list, + num_lookahead_slots=proposal_len), ) + + assert torch.is_tensor(proposals.proposal_token_ids) + assert torch.is_tensor(proposals.proposal_probs) + + assert proposals.proposal_token_ids.shape == torch.Size([5, proposal_len]) + assert proposals.proposal_probs.shape[:-1] == torch.Size([5, proposal_len]) + assert proposals.proposal_lens.shape == torch.Size([5]) + + assert proposals.proposal_lens.tolist( + ) == [proposal_len for _ in range(4)] + [0] + + for i in range(proposal_len): + assert proposals.proposal_token_ids[0][i] == 0 + assert proposals.proposal_token_ids[1][i] == prompts[1][i + 1] + assert proposals.proposal_token_ids[2][i] == prompts[2][i + 3] + assert proposals.proposal_token_ids[3][i] == prompts[3][i + 5] + assert proposals.proposal_token_ids[4][i] == -1 + + +def test_ngram_algo_correctness_for_batches_match_all(): + """Verify our ngram algo find the right candidate in the prompt + + For the scenario find candidate in all batchs + """ + + block_size = 32 + num_gpu_blocks = 2048 // block_size + seed = 100 + model_name = 'JackFram/llama-68m' + vocab_size = 32_000 + device = 'cuda:0' + + ngram_worker = create_worker( + NGramWorker, + model_name, + block_size, + num_gpu_blocks, + seed, + ) + + proposer = Top1Proposer( + worker=ngram_worker, + device=device, + vocab_size=vocab_size, + max_proposal_len=20, + ) + + # set ngram window (0, 3], which is window=1/2/3 + ngram_worker.set_ngram_window_size(0, 3) + + prompts = [ + # shall find candidate 12,13,14,15,16 + [11, 12, 13, 14, 15, 16, 11], + # shall find candidate 23,24,25,26,21 + [21, 21, 22, 23, 24, 25, 26, 21, 22], + # shall find candidate 34,35,36,37,38 + [31, 32, 31, 32, 33, 34, 35, 36, 37, 38, 31, 32, 33], + ] + + proposal_len = 5 + final_prompt_lens = [len(prompt) + proposal_len for prompt in prompts] + seq_group_metadata_list = create_seq_group_metadata_from_prompts( + prompts, + num_gpu_blocks, + block_size, + final_prompt_lens=final_prompt_lens) + + proposals = proposer.get_proposals(execute_model_req=ExecuteModelRequest( + seq_group_metadata_list=seq_group_metadata_list, + num_lookahead_slots=proposal_len), ) + + assert torch.is_tensor(proposals.proposal_token_ids) + assert torch.is_tensor(proposals.proposal_probs) + + assert proposals.proposal_token_ids.shape == torch.Size([3, proposal_len]) + assert proposals.proposal_probs.shape[:-1] == torch.Size([3, proposal_len]) + assert proposals.proposal_lens.shape == torch.Size([3]) + + assert proposals.proposal_lens.tolist() == [proposal_len for _ in range(3)] + + for i in range(proposal_len): + assert proposals.proposal_token_ids[0][i] == prompts[0][i + 1] + assert proposals.proposal_token_ids[1][i] == prompts[1][i + 3] + assert proposals.proposal_token_ids[2][i] == prompts[2][i + 5] diff --git a/tests/spec_decode/test_spec_decode_worker.py b/tests/spec_decode/test_spec_decode_worker.py new file mode 100644 index 0000000..ef9d32f --- /dev/null +++ b/tests/spec_decode/test_spec_decode_worker.py @@ -0,0 +1,620 @@ +import random +from types import SimpleNamespace +from unittest.mock import MagicMock + +import pytest +import torch + +from vllm.model_executor.layers.rejection_sampler import RejectionSampler +from vllm.model_executor.utils import set_random_seed +from vllm.sequence import ExecuteModelRequest, SamplerOutput +from vllm.spec_decode.interfaces import SpeculativeProposals +from vllm.spec_decode.metrics import (AsyncMetricsCollector, + SpecDecodeWorkerMetrics) +from vllm.spec_decode.multi_step_worker import MultiStepWorker +from vllm.spec_decode.spec_decode_worker import (SpecDecodeWorker, + split_num_cache_blocks_evenly) + +from .utils import create_batch, create_sampler_output_list, mock_worker + + +@pytest.mark.parametrize('k', [1, 2, 6]) +@pytest.mark.parametrize('batch_size', [1, 2, 32]) +@torch.inference_mode() +def test_correctly_calls_draft_model(k: int, batch_size: int): + """Verify SpecDecodeWorker calls the draft worker with correct + inputs. Everything else is mocked out. + """ + draft_worker = mock_worker(cls=MultiStepWorker) + target_worker = mock_worker() + rejection_sampler = MagicMock(spec=RejectionSampler) + metrics_collector = MagicMock(spec=AsyncMetricsCollector) + worker = SpecDecodeWorker(draft_worker, target_worker, rejection_sampler, + metrics_collector) + + exception_secret = 'artificial stop' + draft_worker.get_spec_proposals.side_effect = ValueError(exception_secret) + + seq_group_metadata_list, _, _ = create_batch(batch_size, k) + execute_model_req = ExecuteModelRequest( + seq_group_metadata_list=seq_group_metadata_list, num_lookahead_slots=k) + + with pytest.raises(ValueError, match=exception_secret): + worker.execute_model(execute_model_req=execute_model_req) + + call_args_list = draft_worker.get_spec_proposals.call_args_list + assert len(call_args_list) == 1 + + for args, _ in call_args_list: + actual_execute_model_data = args[0] + assert actual_execute_model_data == execute_model_req + + +@pytest.mark.parametrize('k', [1, 2, 6]) +@pytest.mark.parametrize('batch_size', [1, 2, 32]) +@torch.inference_mode() +def test_correctly_calls_target_model(k: int, batch_size: int): + """Verify SpecDecodeWorker calls the target model with correct + inputs. Everything else is mocked out. + """ + draft_worker = mock_worker(cls=MultiStepWorker, use_spec=False) + target_worker = mock_worker(use_spec=False) + rejection_sampler = MagicMock(spec=RejectionSampler) + rejection_sampler.token_id_dtype = torch.int64 + metrics_collector = MagicMock(spec=AsyncMetricsCollector) + + draft_worker.device = 'cuda' + target_worker.device = 'cuda' + + set_random_seed(1) + + worker = SpecDecodeWorker(draft_worker, target_worker, rejection_sampler, + metrics_collector) + worker.init_device() + + vocab_size = 32_000 + + proposal_token_ids = torch.randint(low=0, + high=vocab_size, + size=(batch_size, k), + dtype=torch.int64, + device='cuda') + proposal_probs = torch.rand(batch_size, + k, + vocab_size, + dtype=torch.float32, + device='cuda') + proposal_lens = torch.ones(batch_size, dtype=torch.int64, + device='cuda') * k + + seq_group_metadata_list, prompts, prev_output_tokens = create_batch( + batch_size, k) + + draft_worker.get_spec_proposals.return_value = SpeculativeProposals( + proposal_token_ids=proposal_token_ids, + proposal_probs=proposal_probs, + proposal_lens=proposal_lens) + + exception_secret = 'artificial stop' + target_worker.execute_model.side_effect = ValueError(exception_secret) + + with pytest.raises(ValueError, match=exception_secret): + worker.execute_model(execute_model_req=ExecuteModelRequest( + seq_group_metadata_list=seq_group_metadata_list, + num_lookahead_slots=k)) + + seen_contexts = [] + + call_args_list = target_worker.execute_model.call_args_list + assert len(call_args_list) == 1 + for _, kwargs in call_args_list: + seq_group_metadata_list = kwargs[ + "execute_model_req"].seq_group_metadata_list + + assert len(seq_group_metadata_list) == (k + 1) * batch_size + for seq_group_metadata in seq_group_metadata_list: + for seq_data in seq_group_metadata.seq_data.values(): + seen_contexts.append(seq_data.get_token_ids()) + + expected_seen_contexts = [] + + for prompt, prev_generated, draft_tokens in zip( + prompts, prev_output_tokens, proposal_token_ids.tolist()): + + for i in range(len(draft_tokens) + 1): + expected_seen_contexts.append(prompt + prev_generated + + draft_tokens[:i]) + + seen_contexts.sort() + expected_seen_contexts.sort() + assert expected_seen_contexts == seen_contexts + + +@pytest.mark.parametrize('k', [1, 2, 6]) +@pytest.mark.parametrize('batch_size', [1, 2, 32]) +@torch.inference_mode() +def test_correctly_calls_rejection_sampler(k: int, batch_size: int): + """Verify SpecDecodeWorker calls the rejection sampler with + correct inputs. Everything else is mocked out. + """ + vocab_size = 32_000 + + draft_worker = mock_worker(cls=MultiStepWorker, + vocab_size=vocab_size, + use_spec=False) + target_worker = mock_worker(vocab_size=vocab_size, use_spec=False) + rejection_sampler = MagicMock(spec=RejectionSampler) + rejection_sampler.token_id_dtype = torch.int64 + metrics_collector = MagicMock(spec=AsyncMetricsCollector) + draft_worker.device = 'cuda' + target_worker.device = 'cuda' + + set_random_seed(1) + + worker = SpecDecodeWorker(draft_worker, target_worker, rejection_sampler, + metrics_collector) + worker.init_device() + + proposal_token_ids = torch.randint(low=0, + high=vocab_size, + size=(batch_size, k), + dtype=torch.int64, + device='cuda') + proposal_probs = torch.rand(batch_size, + k, + vocab_size, + dtype=torch.float32, + device='cuda') + + proposal_lens = torch.ones(batch_size, dtype=torch.int64, + device='cuda') * k + + seq_group_metadata_list, _, _ = create_batch(batch_size, k) + + draft_worker.get_spec_proposals.return_value = SpeculativeProposals( + proposal_token_ids=proposal_token_ids, + proposal_probs=proposal_probs, + proposal_lens=proposal_lens) + + target_token_ids = torch.randint(low=0, + high=vocab_size, + size=(1, batch_size * (k + 1)), + dtype=torch.int64, + device='cuda') + target_token_probs = torch.rand(1, + batch_size * (k + 1), + vocab_size, + dtype=torch.float32, + device='cuda') + target_token_logprobs = torch.rand(1, + batch_size * (k + 1), + vocab_size, + dtype=torch.float32, + device='cuda') + target_output = create_sampler_output_list(target_token_ids, + target_token_probs, + target_token_logprobs) + + target_worker.execute_model.return_value = [target_output[0]] + + exception_secret = 'artificial stop' + rejection_sampler.side_effect = ValueError(exception_secret) + + with pytest.raises(ValueError, match=exception_secret): + worker.execute_model(execute_model_req=ExecuteModelRequest( + seq_group_metadata_list=seq_group_metadata_list, + num_lookahead_slots=k)) + + assert len(rejection_sampler.call_args_list) == 1 + _, kwargs = rejection_sampler.call_args_list[0] + actual = SimpleNamespace(**kwargs) + + assert torch.equal(actual.bonus_token_ids, + target_token_ids.reshape(batch_size, k + 1)[:, -1:]) + assert torch.equal( + actual.target_probs, + target_token_probs.reshape(batch_size, k + 1, -1)[:, :-1]) + assert torch.equal(actual.draft_token_ids, proposal_token_ids) + assert torch.equal(actual.draft_probs, proposal_probs) + + +@pytest.mark.parametrize('k', [1, 2, 6]) +@pytest.mark.parametrize('batch_size', [1, 2, 32]) +@torch.inference_mode() +def test_correctly_formats_output(k: int, batch_size: int): + """Verify SpecDecodeWorker formats sampler output correctly. + Everything else is mocked out. + """ + vocab_size = 32_000 + + draft_worker = mock_worker(cls=MultiStepWorker, + vocab_size=vocab_size, + use_spec=False) + target_worker = mock_worker(vocab_size=vocab_size, use_spec=False) + rejection_sampler = MagicMock(spec=RejectionSampler) + rejection_sampler.token_id_dtype = torch.int64 + metrics_collector = MagicMock(spec=AsyncMetricsCollector) + draft_worker.device = 'cuda' + target_worker.device = 'cuda' + + set_random_seed(1) + + worker = SpecDecodeWorker(draft_worker, target_worker, rejection_sampler, + metrics_collector) + worker.init_device() + + proposal_token_ids = torch.randint(low=0, + high=vocab_size, + size=(batch_size, k), + dtype=torch.int64, + device='cuda') + proposal_probs = torch.rand(batch_size, + k, + vocab_size, + dtype=torch.float32, + device='cuda') + + proposal_lens = torch.ones(batch_size, dtype=torch.int64, + device='cuda') * k + + seq_group_metadata_list, _, _ = create_batch(batch_size, k) + + draft_worker.get_spec_proposals.return_value = SpeculativeProposals( + proposal_token_ids=proposal_token_ids, + proposal_probs=proposal_probs, + proposal_lens=proposal_lens) + + target_token_ids = torch.randint(low=0, + high=vocab_size, + size=(1, batch_size * (k + 1)), + dtype=torch.int64, + device='cuda') + target_token_probs = torch.rand(1, + batch_size * (k + 1), + vocab_size, + dtype=torch.float32, + device='cuda') + target_token_logprobs = torch.rand(1, + batch_size * (k + 1), + vocab_size, + dtype=torch.float32, + device='cuda') + target_output = create_sampler_output_list(target_token_ids, + target_token_probs, + target_token_logprobs) + + target_worker.execute_model.return_value = [target_output[0]] + + rejection_sampler_output = torch.randint(low=0, + high=vocab_size, + size=(batch_size, k + 1), + dtype=torch.int64, + device='cuda') + for i in range(batch_size): + minimum_accepted_tokens = 1 + rejection_sampler_output[i][ + -random.randint(minimum_accepted_tokens, k + 1):] = -1 + + rejection_sampler.return_value = rejection_sampler_output + + output = worker.execute_model(execute_model_req=ExecuteModelRequest( + seq_group_metadata_list=seq_group_metadata_list, + num_lookahead_slots=k)) + + expected_output = create_sampler_output_list( + token_ids=rejection_sampler_output.transpose(0, 1), + probs=[None for _ in range(k + 1)], + logprobs=[None for _ in range(k + 1)]) + + seq_ids = [ + next(iter(seq_group_metadata.seq_data.keys())) + for seq_group_metadata in seq_group_metadata_list + ] + actual_output_by_seq = {seq_id: [] for seq_id in seq_ids} + expected_output_by_seq = {seq_id: [] for seq_id in seq_ids} + + for step in output: + for seq_group in step: + for sample in seq_group.samples: + seq_id = sample.parent_seq_id + actual_output_by_seq[seq_id].append(sample) + + for step in expected_output: + for seq_group in step: + for sample in seq_group.samples: + seq_id = sample.parent_seq_id + expected_output_by_seq[seq_id].append(sample) + + all_seen_seq_ids = set( + list(actual_output_by_seq.keys()) + + list(expected_output_by_seq.keys())) + for seq_id in all_seen_seq_ids: + actual_by_step = actual_output_by_seq[seq_id] + expected_by_step = expected_output_by_seq[seq_id] + + for i in range(k + 1): + if i >= len(actual_by_step): + assert expected_by_step[i].output_token == -1 + continue + assert actual_by_step[i].output_token == expected_by_step[ + i].output_token + + +@pytest.mark.parametrize('k', [1, 2]) +@pytest.mark.parametrize('batch_size', [1]) +@pytest.mark.parametrize('returns_metrics', [True, False]) +@torch.inference_mode() +def test_collects_metrics(k: int, batch_size: int, returns_metrics: bool): + """Verify SpecDecodeWorker collects metrics. + """ + vocab_size = 32_000 + + draft_worker = mock_worker(cls=MultiStepWorker, + vocab_size=vocab_size, + use_spec=False) + target_worker = mock_worker(vocab_size=vocab_size, use_spec=False) + rejection_sampler = MagicMock(spec=RejectionSampler) + rejection_sampler.token_id_dtype = torch.int64 + metrics_collector = MagicMock(spec=AsyncMetricsCollector) + draft_worker.device = 'cuda' + target_worker.device = 'cuda' + + set_random_seed(1) + + worker = SpecDecodeWorker(draft_worker, target_worker, rejection_sampler, + metrics_collector) + worker.init_device() + + proposal_token_ids = torch.randint(low=0, + high=vocab_size, + size=(batch_size, k), + dtype=torch.int64, + device='cuda') + proposal_probs = torch.rand(batch_size, + k, + vocab_size, + dtype=torch.float32, + device='cuda') + + proposal_lens = torch.ones(batch_size, dtype=torch.int64, + device='cuda') * k + + seq_group_metadata_list, _, _ = create_batch(batch_size, k) + + draft_worker.get_spec_proposals.return_value = SpeculativeProposals( + proposal_token_ids=proposal_token_ids, + proposal_probs=proposal_probs, + proposal_lens=proposal_lens) + + target_token_ids = torch.randint(low=0, + high=vocab_size, + size=(1, batch_size * (k + 1)), + dtype=torch.int64, + device='cuda') + target_token_probs = torch.rand(1, + batch_size * (k + 1), + vocab_size, + dtype=torch.float32, + device='cuda') + target_token_logprobs = torch.rand(1, + batch_size * (k + 1), + vocab_size, + dtype=torch.float32, + device='cuda') + target_output = create_sampler_output_list(target_token_ids, + target_token_probs, + target_token_logprobs) + + target_worker.execute_model.return_value = [target_output[0]] + + rejection_sampler_output = torch.randint(low=0, + high=vocab_size, + size=(batch_size, k + 1), + dtype=torch.int64, + device='cuda') + for i in range(batch_size): + minimum_accepted_tokens = 1 + rejection_sampler_output[i][ + -random.randint(minimum_accepted_tokens, k + 1):] = -1 + + rejection_sampler.return_value = rejection_sampler_output + + mock_rejsample_metrics = MagicMock( + spec=SpecDecodeWorkerMetrics) if returns_metrics else None + metrics_collector.maybe_collect_rejsample_metrics.return_value = ( + mock_rejsample_metrics) + + output = worker.execute_model(execute_model_req=ExecuteModelRequest( + seq_group_metadata_list=seq_group_metadata_list, + num_lookahead_slots=k)) + assert output[0].spec_decode_worker_metrics == mock_rejsample_metrics + + call_args_list = ( + metrics_collector.maybe_collect_rejsample_metrics.call_args_list) + assert len(call_args_list) == 1 + args, kwargs = call_args_list[0] + assert args[0] == k or kwargs.get('k', -1) == k + + +@pytest.mark.parametrize('k', [0]) +@pytest.mark.parametrize('batch_size', [1, 2, 32]) +@torch.inference_mode() +def test_k_equals_zero(k: int, batch_size: int): + """Verify that the SpecDecodeWorker calls the draft and target workers + when k is zero. This happens during prefill. + """ + draft_worker = mock_worker(cls=MultiStepWorker) + target_worker = mock_worker() + rejection_sampler = MagicMock(spec=RejectionSampler) + rejection_sampler.token_id_dtype = torch.int64 + metrics_collector = MagicMock(spec=AsyncMetricsCollector) + + target_worker.execute_model.return_value = [MagicMock(spec=SamplerOutput)] + + draft_worker.device = 'cuda' + target_worker.device = 'cuda' + + set_random_seed(1) + + worker = SpecDecodeWorker(draft_worker, target_worker, rejection_sampler, + metrics_collector) + + seq_group_metadata_list, _, _ = create_batch(batch_size, + k, + prev_output_token_len=0) + execute_model_req = ExecuteModelRequest( + seq_group_metadata_list=seq_group_metadata_list, num_lookahead_slots=k) + + out = worker.execute_model(execute_model_req=execute_model_req) + + assert len(out) == 1, f"expected only one token output when {k=}" + assert out[0].probs is None, "expect gpu tensor references to be None" + assert out[ + 0].sampled_tokens is None, "expect gpu tensor references to be None" + + draft_worker.execute_model.assert_called_once_with(execute_model_req) + target_worker.execute_model.assert_called_once_with(execute_model_req) + + +@pytest.mark.parametrize('k', [0, 5]) +@pytest.mark.parametrize('batch_size', [0]) +@torch.inference_mode() +def test_empty_input_batch(k: int, batch_size: int): + """Verify that the SpecDecodeWorker calls the draft and target workers + when the input batch is empty. This can happen if the engine communicates + to the workers information without scheduling a batch. + """ + draft_worker = mock_worker(cls=MultiStepWorker) + target_worker = mock_worker() + rejection_sampler = MagicMock(spec=RejectionSampler) + rejection_sampler.token_id_dtype = torch.int64 + metrics_collector = MagicMock(spec=AsyncMetricsCollector) + + target_worker.execute_model.return_value = [MagicMock(spec=SamplerOutput)] + + draft_worker.device = 'cuda' + target_worker.device = 'cuda' + + set_random_seed(1) + + worker = SpecDecodeWorker(draft_worker, target_worker, rejection_sampler, + metrics_collector) + + seq_group_metadata_list, _, _ = create_batch(batch_size, + k, + prev_output_token_len=0) + execute_model_req = ExecuteModelRequest( + seq_group_metadata_list=seq_group_metadata_list, num_lookahead_slots=k) + + out = worker.execute_model(execute_model_req=execute_model_req) + + assert len(out) == 1, f"expected only one token output when {k=}" + assert out[0].probs is None, "expect gpu tensor references to be None" + assert out[ + 0].sampled_tokens is None, "expect gpu tensor references to be None" + + draft_worker.execute_model.assert_called_once_with(execute_model_req) + target_worker.execute_model.assert_called_once_with(execute_model_req) + + +@pytest.mark.skip_global_cleanup +def test_init_device(): + """Verify SpecDecodeWorker invokes proposer/scorer worker init_device, as + well as other GPU initialization. + """ + draft_worker = mock_worker(cls=MultiStepWorker, use_spec=False) + target_worker = mock_worker(use_spec=False) + rejection_sampler = MagicMock(spec=RejectionSampler) + rejection_sampler.token_id_dtype = torch.int64 + metrics_collector = MagicMock(spec=AsyncMetricsCollector) + + worker = SpecDecodeWorker(draft_worker, target_worker, rejection_sampler, + metrics_collector) + + worker.init_device() + + draft_worker.init_device.assert_called_once() + + target_worker.init_device.assert_called_once() + + metrics_collector.init_gpu_tensors.assert_called_once() + rejection_sampler.init_gpu_tensors.assert_called_once() + + +@torch.inference_mode() +def test_initialize_cache(): + """Verify SpecDecodeWorker invokes initialize_cache on proposer/scorer + workers. + """ + draft_worker = mock_worker(cls=MultiStepWorker) + target_worker = mock_worker() + rejection_sampler = MagicMock(spec=RejectionSampler) + rejection_sampler.token_id_dtype = torch.int64 + metrics_collector = MagicMock(spec=AsyncMetricsCollector) + + worker = SpecDecodeWorker(draft_worker, target_worker, rejection_sampler, + metrics_collector) + + kwargs = {"num_gpu_blocks": 1024, "num_cpu_blocks": 1023} + worker.initialize_cache(**kwargs) + + draft_worker.initialize_cache.assert_called_once_with(**kwargs) + target_worker.initialize_cache.assert_called_once_with(**kwargs) + + +@pytest.mark.parametrize('available_gpu_blocks', [1, 1024]) +@pytest.mark.parametrize('available_cpu_blocks', [500]) +@pytest.mark.parametrize('target_cache_block_size_bytes', [2 * 2 * 4096]) +@pytest.mark.parametrize('draft_kv_size_bytes', [0, 2 * 2 * 768, 2 * 2 * 4096]) +@pytest.mark.skip_global_cleanup +def test_determine_num_available_blocks(available_gpu_blocks: int, + available_cpu_blocks: int, + target_cache_block_size_bytes: int, + draft_kv_size_bytes: int): + """Verify SpecDecodeWorker correctly profiles num available GPU blocks. + Specifically, it should run profiling in the scorer worker, and then evenly + split the blocks between proposer and scorer worker. + """ + draft_worker = mock_worker(cls=MultiStepWorker) + target_worker = mock_worker() + rejection_sampler = MagicMock(spec=RejectionSampler) + rejection_sampler.token_id_dtype = torch.int64 + metrics_collector = MagicMock(spec=AsyncMetricsCollector) + + target_worker.determine_num_available_blocks.return_value = ( + available_gpu_blocks, available_cpu_blocks) + target_worker.get_cache_block_size_bytes.return_value = ( + target_cache_block_size_bytes) + draft_worker.get_cache_block_size_bytes.return_value = draft_kv_size_bytes + + worker = SpecDecodeWorker(draft_worker, target_worker, rejection_sampler, + metrics_collector) + + num_gpu_blocks, num_cpu_blocks = worker.determine_num_available_blocks() + + target_worker.determine_num_available_blocks.assert_called_once() + assert num_cpu_blocks == available_cpu_blocks + + assert num_gpu_blocks == split_num_cache_blocks_evenly( + target_cache_block_size_bytes, draft_kv_size_bytes, + available_gpu_blocks) + + +@pytest.mark.parametrize('available_gpu_blocks', + list(range(20)) + [1024, 1024**2]) +@pytest.mark.parametrize('target_cache_block_size_bytes', + [2 * 2 * 4096, 2 * 2 * 8192]) +@pytest.mark.parametrize('draft_kv_size_bytes', [0, 2 * 2 * 768, 2 * 2 * 4096]) +@pytest.mark.skip_global_cleanup +def test_split_num_cache_blocks_evenly(available_gpu_blocks: int, + target_cache_block_size_bytes: int, + draft_kv_size_bytes: int): + """Verify split_num_cache_blocks_evenly does not exceed original memory + allocation in bytes. + """ + num_blocks = split_num_cache_blocks_evenly(target_cache_block_size_bytes, + draft_kv_size_bytes, + available_gpu_blocks) + assert (num_blocks * target_cache_block_size_bytes) + ( + num_blocks * draft_kv_size_bytes) <= (available_gpu_blocks * + target_cache_block_size_bytes) diff --git a/tests/spec_decode/test_utils.py b/tests/spec_decode/test_utils.py new file mode 100644 index 0000000..6b6f35a --- /dev/null +++ b/tests/spec_decode/test_utils.py @@ -0,0 +1,111 @@ +from unittest.mock import MagicMock + +import pytest + +from vllm.sequence import SequenceGroupMetadata +from vllm.spec_decode.util import get_all_seq_ids, split_batch_by_proposal_len + + +def test_get_all_seq_ids(): + """Verify get_all_seq_ids extracts all seq ids. + """ + expected_seq_ids = list(range(10)) + list(range(100, 110)) + + seq_group_metadata_list = [ + SequenceGroupMetadata( + request_id=str(seq_id), + is_prompt=True, + seq_data={ + seq_id: MagicMock(), + }, + sampling_params=MagicMock(), + block_tables={ + seq_id: MagicMock(), + }, + lora_request=None, + ) for seq_id in expected_seq_ids + ] + + actual_seq_ids = get_all_seq_ids(seq_group_metadata_list) + assert actual_seq_ids == expected_seq_ids + + +@pytest.fixture +def fake_sequence_group_metadata(): + seq_ids = list(range(3)) + return [ + SequenceGroupMetadata( + request_id=str(i), + is_prompt=True, + seq_data={ + i: MagicMock(), + }, + sampling_params=MagicMock(), + block_tables={ + i: MagicMock(), + }, + lora_request=None, + ) for i in seq_ids + ] + + +def test_filter_zero_length_proposals(fake_sequence_group_metadata): + proposal_lens = [0, 1, 0] + filtered_groups, indices = split_batch_by_proposal_len( + fake_sequence_group_metadata, + proposal_lens, + select_proposal_len_zero=True) + + expected_groups = [ + fake_sequence_group_metadata[0], fake_sequence_group_metadata[2] + ] + expected_indices = [0, 2] + + assert filtered_groups == expected_groups + assert indices == expected_indices + + +def test_filter_non_zero_length_proposals(fake_sequence_group_metadata): + proposal_lens = [0, 1, 2] + filtered_groups, indices = split_batch_by_proposal_len( + fake_sequence_group_metadata, + proposal_lens, + select_proposal_len_zero=False) + + expected_groups = [ + fake_sequence_group_metadata[1], fake_sequence_group_metadata[2] + ] + expected_indices = [1, 2] + + assert filtered_groups == expected_groups + assert indices == expected_indices + + +def test_empty_inputs(): + filtered_groups, indices = split_batch_by_proposal_len( + [], [], select_proposal_len_zero=True) + + assert filtered_groups == [] + assert indices == [] + + +def test_all_zero_with_non_zero_filter(fake_sequence_group_metadata): + proposal_lens = [0, 0, 0] + filtered_groups, indices = split_batch_by_proposal_len( + fake_sequence_group_metadata, + proposal_lens, + select_proposal_len_zero=False) + + assert filtered_groups == [] + assert indices == [] + + +def test_all_non_zero_with_zero_filter(fake_sequence_group_metadata): + proposal_lens = [1, 1, 1] + filtered_groups, indices = split_batch_by_proposal_len( + fake_sequence_group_metadata, + proposal_lens, + select_proposal_len_zero=True) + + assert filtered_groups == [] + assert indices == [] diff --git a/tests/spec_decode/utils.py b/tests/spec_decode/utils.py new file mode 100644 index 0000000..f288652 --- /dev/null +++ b/tests/spec_decode/utils.py @@ -0,0 +1,223 @@ +from itertools import count +from typing import Dict, Iterable, List, Optional, Union +from unittest.mock import MagicMock + +import torch + +from vllm.engine.arg_utils import EngineArgs +from vllm.model_executor.utils import set_random_seed +from vllm.sampling_params import SamplingParams +from vllm.sequence import (Logprob, SamplerOutput, SequenceData, + SequenceGroupMetadata, SequenceGroupOutput, + SequenceOutput) +from vllm.utils import get_distributed_init_method, get_ip, get_open_port +from vllm.worker.cache_engine import CacheEngine +from vllm.worker.worker import Worker + + +def round_up_to_next_block(seq_len: int, block_size: int) -> int: + return (seq_len + block_size - 1) // block_size + + +def mock_worker(cls=None, + vocab_size: int = 30_000, + max_model_len: int = 2048, + rank: int = 0, + use_spec: bool = True) -> MagicMock: + if cls is None: + cls = Worker + + spec = cls if use_spec else None + + worker = MagicMock(spec=spec) + worker.vocab_size = vocab_size + worker.max_model_len = max_model_len + worker.rank = rank + worker.device = 'cuda:0' + return worker + + +def patch_execute_model_with_seeds(worker: Worker, rand_seeds: List[int]): + seed_iter = iter(rand_seeds) + original_execute_model = worker.execute_model + + def new_execute_model(*args, **kwargs): + result = original_execute_model(*args, **kwargs) + set_random_seed(next(seed_iter)) + return result + + return new_execute_model + + +def zero_kv_cache(cache_engine: CacheEngine): + assert cache_engine.gpu_cache + for key_blocks, value_blocks in cache_engine.gpu_cache: + key_blocks.zero_() + value_blocks.zero_() + + +def create_worker(cls: type, + model_name: str, + block_size: int, + num_gpu_blocks: int, + seed: int, + is_driver_worker: bool = True, + enforce_eager: bool = True): + engine_args = EngineArgs( + model=model_name, + seed=seed, + block_size=block_size, + enforce_eager=enforce_eager, + ) + engine_config = engine_args.create_engine_config() + + distributed_init_method = get_distributed_init_method( + get_ip(), get_open_port()) + + worker = cls( + model_config=engine_config.model_config, + parallel_config=engine_config.parallel_config, + scheduler_config=engine_config.scheduler_config, + device_config=engine_config.device_config, + cache_config=engine_config.cache_config, + load_config=engine_config.load_config, + local_rank=0, + rank=0, + distributed_init_method=distributed_init_method, + is_driver_worker=is_driver_worker, + ) + + worker.init_device() + worker.load_model() + + engine_config.cache_config.num_gpu_blocks = num_gpu_blocks + engine_config.cache_config.num_cpu_blocks = 0 + worker.initialize_cache( + num_gpu_blocks=engine_config.cache_config.num_gpu_blocks, + num_cpu_blocks=engine_config.cache_config.num_cpu_blocks) + + return worker + + +def create_seq_group_metadata_from_prompts( + prompts: List[List[int]], + num_gpu_blocks: int, + block_size: int, + final_prompt_lens: List[int], + continuations: Optional[List[List[int]]] = None, + seq_ids: Optional[List[int]] = None, +) -> List[SequenceGroupMetadata]: + + if continuations is None: + continuations = [[] for _ in prompts] + + if seq_ids is None: + seq_ids = list(i for i, _ in enumerate(prompts)) + + free_gpu_blocks = list(range(num_gpu_blocks)) + + block_allocations = { + i: [ + free_gpu_blocks.pop() + for _ in range(round_up_to_next_block(final_len, block_size)) + ] + for i, final_len in enumerate(final_prompt_lens) + } + + return [ + SequenceGroupMetadata( + request_id=str(i), + is_prompt=len(cont_token_ids) == 0, + seq_data={ + i: + SequenceData( + prompt_token_ids=prompt_token_ids[:], + output_token_ids=cont_token_ids[:], + ), + }, + sampling_params=SamplingParams(temperature=0.0, ), + block_tables={i: block_allocations[i][:]}, + ) for i, (prompt_token_ids, + cont_token_ids) in enumerate(zip(prompts, continuations)) + ] + + +def assert_logprobs_dict_allclose( + actual_logprobs: List[Dict[int, Logprob]], + expected_logprobs: List[Dict[int, Logprob]]) -> None: + for single_step_actual_logprobs, single_step_expected_logprobs in zip( + actual_logprobs, expected_logprobs): + assert set(single_step_actual_logprobs.keys()) == set( + single_step_expected_logprobs.keys()) + for token_id in single_step_actual_logprobs: + actual = torch.tensor( + single_step_actual_logprobs[token_id].logprob) + expected = torch.tensor( + single_step_expected_logprobs[token_id].logprob) + assert torch.allclose(actual, expected) + + +def create_sampler_output_list( + token_ids: torch.Tensor, + probs: Iterable[Optional[torch.Tensor]], + logprobs: Iterable[Optional[torch.Tensor]], + seq_ids: Optional[List[int]] = None) -> List[SamplerOutput]: + num_steps, batch_size = token_ids.shape + token_ids_by_step = token_ids.tolist() + + if seq_ids is None: + seq_ids = list(range(batch_size)) + + return [ + SamplerOutput(outputs=[ + SequenceGroupOutput( + samples=[ + SequenceOutput( + output_token=token_id, + parent_seq_id=seq_ids[seq_index], + logprobs={token_id: Logprob(0)}, + ) + ], + prompt_logprobs=None, + ) for seq_index, token_id in enumerate(token_ids_by_step[step]) + ], + sampled_token_probs=probs[step], + logprobs=logprobs[step], + sampled_token_ids=token_ids[step]) + for step in range(num_steps) + ] + + +def create_batch(batch_size, + k, + prompt_len: Union[int, List[int]] = 10, + prev_output_token_len: int = 10, + seq_ids: Optional[List[int]] = None, + num_gpu_blocks: Optional[int] = None, + block_size: Optional[int] = None): + if block_size is None: + block_size = 8 + + if num_gpu_blocks is None: + num_gpu_blocks = 2048 // block_size + + iterator = count() + + if isinstance(prompt_len, int): + prompt_lens = [prompt_len for _ in range(batch_size)] + else: + prompt_lens = prompt_len + + prompts = [[next(iterator) for _ in range(p_len)] for p_len in prompt_lens] + prev_output_tokens = [[ + next(iterator) for _ in range(prev_output_token_len) + ] for _ in range(batch_size)] + final_prompt_lens = [ + len(prompt) + len(prev_output_token) + k + 1 + for prompt, prev_output_token in zip(prompts, prev_output_tokens) + ] + + seq_group_metadata_list = create_seq_group_metadata_from_prompts( + prompts, num_gpu_blocks, block_size, final_prompt_lens, + prev_output_tokens, seq_ids) + return seq_group_metadata_list, prompts, prev_output_tokens diff --git a/tests/tensorizer_loader/__init__.py b/tests/tensorizer_loader/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/tests/tensorizer_loader/tensorize_vllm_model_for_testing.py b/tests/tensorizer_loader/tensorize_vllm_model_for_testing.py new file mode 100644 index 0000000..0e113ab --- /dev/null +++ b/tests/tensorizer_loader/tensorize_vllm_model_for_testing.py @@ -0,0 +1,245 @@ +import argparse +import dataclasses +import os +import time +import uuid +from functools import partial +from typing import Type + +import torch.nn as nn +from tensorizer import (DecryptionParams, EncryptionParams, TensorDeserializer, + TensorSerializer, stream_io) +from tensorizer.utils import convert_bytes, get_mem_usage, no_init_or_tensor +from transformers import AutoConfig, PretrainedConfig + +from vllm.distributed import (init_distributed_environment, + initialize_model_parallel) +from vllm.engine.arg_utils import EngineArgs +from vllm.engine.llm_engine import LLMEngine +from vllm.model_executor.model_loader.tensorizer import TensorizerArgs +from vllm.model_executor.models import ModelRegistry + +# yapf conflicts with isort for this docstring +# yapf: disable +""" +tensorize_vllm_model.py is a script that can be used to serialize and +deserialize vLLM models. These models can be loaded using tensorizer directly +to the GPU extremely quickly. Tensor encryption and decryption is also +supported, although libsodium must be installed to use it. Install +vllm with tensorizer support using `pip install vllm[tensorizer]`. + +To serialize a model, you can run something like this: + +python tensorize_vllm_model.py \ + --model EleutherAI/gpt-j-6B \ + --dtype float16 \ + serialize \ + --serialized-directory s3://my-bucket/ \ + --suffix vllm + +Which downloads the model from HuggingFace, loads it into vLLM, serializes it, +and saves it to your S3 bucket. A local directory can also be used. + +You can also encrypt the model weights with a randomly-generated key by +providing a `--keyfile` argument. + +To deserialize a model, you can run something like this: + +python tensorize_vllm_model.py \ + --model EleutherAI/gpt-j-6B \ + --dtype float16 \ + deserialize \ + --path-to-tensors s3://my-bucket/vllm/EleutherAI/gpt-j-6B/vllm/model.tensors + +Which downloads the model tensors from your S3 bucket and deserializes them. +To provide S3 credentials, you can provide `--s3-access-key-id` and +`--s3-secret-access-key`, as well as `--s3-endpoint` as CLI args to this script, +the OpenAI entrypoint, as arguments for LLM(), or as environment variables +in the form of `S3_ACCESS_KEY_ID`, `S3_SECRET_ACCESS_KEY`, and `S3_ENDPOINT`. + + +You can also provide a `--keyfile` argument to decrypt the model weights if +they were serialized with encryption. + +For more information on the available arguments, run +`python tensorize_vllm_model.py --help`. +""" + + +def parse_args(): + parser = argparse.ArgumentParser( + description="An example script that can be used to serialize and " + "deserialize vLLM models. These models " + "can be loaded using tensorizer directly to the GPU " + "extremely quickly. Tensor encryption and decryption is " + "also supported, although libsodium must be installed to " + "use it.") + parser = TensorizerArgs.add_cli_args(EngineArgs.add_cli_args(parser)) + subparsers = parser.add_subparsers(dest='command') + + serialize_parser = subparsers.add_parser( + 'serialize', help="Serialize a model to `--serialized-directory`") + + serialize_parser.add_argument( + "--suffix", + type=str, + required=False, + help=( + "The suffix to append to the serialized model directory, which is " + "used to construct the location of the serialized model tensors, " + "e.g. if `--serialized-directory` is `s3://my-bucket/` and " + "`--suffix` is `v1`, the serialized model tensors will be " + "saved to " + "`s3://my-bucket/vllm/EleutherAI/gpt-j-6B/v1/model.tensors`. " + "If none is provided, a random UUID will be used.")) + serialize_parser.add_argument( + "--serialized-directory", + type=str, + required=True) + + serialize_parser.add_argument( + "--keyfile", + type=str, + required=False, + help=("Encrypt the model weights with a randomly-generated binary key," + " and save the key at this path")) + + deserialize_parser = subparsers.add_parser( + 'deserialize', + help=("Deserialize a model from `--path-to-tensors`" + " to verify it can be loaded and used.")) + + deserialize_parser.add_argument( + "--path-to-tensors", + type=str, + required=True, + help="The local path or S3 URI to the model tensors to deserialize. ") + + deserialize_parser.add_argument( + "--keyfile", + type=str, + required=False, + help=("Path to a binary key to use to decrypt the model weights," + " if the model was serialized with encryption")) + + return parser.parse_args() + + +def make_model_contiguous(model): + # Ensure tensors are saved in memory contiguously + for param in model.parameters(): + param.data = param.data.contiguous() + + +def _get_vllm_model_architecture(config: PretrainedConfig) -> Type[nn.Module]: + architectures = getattr(config, "architectures", []) + for arch in architectures: + model_cls = ModelRegistry.load_model_cls(arch) + if model_cls is not None: + return model_cls + raise ValueError( + f"Model architectures {architectures} are not supported for now. " + f"Supported architectures: {ModelRegistry.get_supported_archs()}") + + +def serialize(): + eng_args_dict = {f.name: getattr(args, f.name) for f in + dataclasses.fields(EngineArgs)} + engine_args = EngineArgs.from_cli_args(argparse.Namespace(**eng_args_dict)) + engine = LLMEngine.from_engine_args(engine_args) + + model = (engine.model_executor.driver_worker. + model_runner.model) + + encryption_params = EncryptionParams.random() if keyfile else None + if keyfile: + with _write_stream(keyfile) as stream: + stream.write(encryption_params.key) + + with _write_stream(model_path) as stream: + serializer = TensorSerializer(stream, encryption=encryption_params) + serializer.write_module(model) + serializer.close() + + print("Serialization complete. Model tensors saved to", model_path) + if keyfile: + print("Key saved to", keyfile) + + +def deserialize(): + config = AutoConfig.from_pretrained(model_ref) + + with no_init_or_tensor(): + model_class = _get_vllm_model_architecture(config) + model = model_class(config) + + before_mem = get_mem_usage() + start = time.time() + + if keyfile: + with _read_stream(keyfile) as stream: + key = stream.read() + decryption_params = DecryptionParams.from_key(key) + tensorizer_args.deserializer_params['encryption'] = \ + decryption_params + + with (_read_stream(model_path)) as stream, TensorDeserializer( + stream, **tensorizer_args.deserializer_params) as deserializer: + deserializer.load_into_module(model) + end = time.time() + + # Brag about how fast we are. + total_bytes_str = convert_bytes(deserializer.total_tensor_bytes) + duration = end - start + per_second = convert_bytes(deserializer.total_tensor_bytes / duration) + after_mem = get_mem_usage() + print( + f"Deserialized {total_bytes_str} in {end - start:0.2f}s, {per_second}/s" + ) + print(f"Memory usage before: {before_mem}") + print(f"Memory usage after: {after_mem}") + + return model + + +args = parse_args() + +s3_access_key_id = (args.s3_access_key_id or os.environ.get("S3_ACCESS_KEY_ID") + or None) +s3_secret_access_key = (args.s3_secret_access_key + or os.environ.get("S3_SECRET_ACCESS_KEY") or None) + +s3_endpoint = (args.s3_endpoint or os.environ.get("S3_ENDPOINT_URL") or None) + +_read_stream, _write_stream = (partial( + stream_io.open_stream, + mode=mode, + s3_access_key_id=s3_access_key_id, + s3_secret_access_key=s3_secret_access_key, + s3_endpoint=s3_endpoint, +) for mode in ("rb", "wb+")) + +model_ref = args.model + +model_name = model_ref.split("/")[1] + +os.environ["MASTER_ADDR"] = "127.0.0.1" +os.environ["MASTER_PORT"] = "8080" + +init_distributed_environment(world_size=1, rank=0, local_rank=0) +initialize_model_parallel() + +keyfile = args.keyfile if args.keyfile else None + +if args.command == "serialize": + input_dir = args.serialized_directory.rstrip('/') + suffix = args.suffix if args.suffix else uuid.uuid4().hex + base_path = f"{input_dir}/vllm/{model_ref}/{suffix}" + model_path = f"{base_path}/model.tensors" + serialize() +elif args.command == "deserialize": + tensorizer_args = TensorizerArgs.from_cli_args(args) + model_path = args.path_to_tensors + deserialize() +else: + raise ValueError("Either serialize or deserialize must be specified.") diff --git a/tests/tensorizer_loader/test_tensorizer.py b/tests/tensorizer_loader/test_tensorizer.py new file mode 100644 index 0000000..df1db4e --- /dev/null +++ b/tests/tensorizer_loader/test_tensorizer.py @@ -0,0 +1,327 @@ +import gc +import json +import os +import subprocess +from unittest.mock import MagicMock, patch + +import openai +import pytest +import ray +import torch + +from tests.entrypoints.test_openai_server import ServerRunner +from vllm import SamplingParams +from vllm.model_executor.model_loader.tensorizer import ( + EncryptionParams, TensorizerConfig, TensorSerializer, + is_vllm_serialized_tensorizer, load_with_tensorizer, open_stream) + +prompts = [ + "Hello, my name is", + "The president of the United States is", + "The capital of France is", + "The future of AI is", +] +# Create a sampling params object. +sampling_params = SamplingParams(temperature=0.8, top_p=0.95, seed=0) + +model_ref = "facebook/opt-125m" +tensorize_model_for_testing_script = os.path.join( + os.path.dirname(__file__), "tensorize_vllm_model_for_testing.py") + + +def is_curl_installed(): + try: + subprocess.check_call(['curl', '--version']) + return True + except (subprocess.CalledProcessError, FileNotFoundError): + return False + + +@pytest.fixture(autouse=True) +def tensorizer_config(): + config = TensorizerConfig(tensorizer_uri="vllm", vllm_tensorized=True) + return config + + +@patch('vllm.model_executor.model_loader.tensorizer.TensorizerAgent') +def test_load_with_tensorizer(mock_agent, tensorizer_config): + mock_linear_method = MagicMock() + mock_agent_instance = mock_agent.return_value + mock_agent_instance.deserialize.return_value = MagicMock() + + result = load_with_tensorizer(tensorizer_config, + quant_method=mock_linear_method) + + mock_agent.assert_called_once_with(tensorizer_config, + quant_method=mock_linear_method) + mock_agent_instance.deserialize.assert_called_once() + assert result == mock_agent_instance.deserialize.return_value + + +def test_is_vllm_model_with_vllm_in_uri(tensorizer_config): + tensorizer_config.vllm_tensorized = True + + result = is_vllm_serialized_tensorizer(tensorizer_config) + + assert result is True + + +def test_is_vllm_model_without_vllm_in_uri(tensorizer_config): + tensorizer_config.vllm_tensorized = False + + result = is_vllm_serialized_tensorizer(tensorizer_config) + + assert result is False + + +def test_deserialized_vllm_model_has_same_outputs(vllm_runner, tmp_path): + vllm_model = vllm_runner(model_ref) + model_path = tmp_path / (model_ref + ".tensors") + outputs = vllm_model.generate(prompts, sampling_params) + model = (vllm_model.model.llm_engine.model_executor.driver_worker. + model_runner.model) + with open_stream(model_path, "wb+") as stream: + serializer = TensorSerializer(stream) + serializer.write_module(model) + del vllm_model, model + gc.collect() + torch.cuda.empty_cache() + loaded_vllm_model = vllm_runner( + model_ref, + load_format="tensorizer", + model_loader_extra_config=TensorizerConfig(tensorizer_uri=model_path, + num_readers=1, + vllm_tensorized=True), + ) + deserialized_outputs = loaded_vllm_model.generate(prompts, sampling_params) + + # Assumes SamplingParams being seeded ensures the outputs are deterministic + assert outputs == deserialized_outputs + + +@pytest.mark.skipif(not is_curl_installed(), reason="cURL is not installed") +def test_can_deserialize_s3(vllm_runner): + model_ref = "EleutherAI/pythia-1.4b" + tensorized_path = f"s3://tensorized/{model_ref}/fp16/model.tensors" + + loaded_hf_model = vllm_runner(model_ref, + load_format="tensorizer", + model_loader_extra_config=TensorizerConfig( + tensorizer_uri=tensorized_path, + num_readers=1, + vllm_tensorized=False, + s3_endpoint="object.ord1.coreweave.com", + )) + + deserialized_outputs = loaded_hf_model.generate(prompts, sampling_params) + + assert deserialized_outputs + + +@pytest.mark.skipif(not is_curl_installed(), reason="cURL is not installed") +def test_deserialized_encrypted_vllm_model_has_same_outputs( + vllm_runner, tmp_path): + vllm_model = vllm_runner(model_ref) + model_path = tmp_path / (model_ref + ".tensors") + key_path = tmp_path / (model_ref + ".key") + outputs = vllm_model.generate(prompts, sampling_params) + model = (vllm_model.model.llm_engine.model_executor.driver_worker. + model_runner.model) + + encryption_params = EncryptionParams.random() + with open_stream(model_path, "wb+") as stream: + serializer = TensorSerializer(stream, encryption=encryption_params) + serializer.write_module(model) + with open_stream(key_path, "wb+") as stream: + stream.write(encryption_params.key) + del vllm_model, model + gc.collect() + torch.cuda.empty_cache() + loaded_vllm_model = vllm_runner(model_ref, + load_format="tensorizer", + model_loader_extra_config=TensorizerConfig( + tensorizer_uri=model_path, + encryption_keyfile=key_path, + num_readers=1, + vllm_tensorized=True)) + + deserialized_outputs = loaded_vllm_model.generate(prompts, sampling_params) + + # Assumes SamplingParams being seeded ensures the outputs are deterministic + assert outputs == deserialized_outputs + + +def test_deserialized_hf_model_has_same_outputs(hf_runner, vllm_runner, + tmp_path): + hf_model = hf_runner(model_ref) + model_path = tmp_path / (model_ref + ".tensors") + max_tokens = 50 + outputs = hf_model.generate_greedy(prompts, max_tokens=max_tokens) + with open_stream(model_path, "wb+") as stream: + serializer = TensorSerializer(stream) + serializer.write_module(hf_model.model) + del hf_model + gc.collect() + torch.cuda.empty_cache() + loaded_hf_model = vllm_runner(model_ref, + load_format="tensorizer", + model_loader_extra_config=TensorizerConfig( + tensorizer_uri=model_path, + num_readers=1, + vllm_tensorized=False)) + + deserialized_outputs = loaded_hf_model.generate_greedy( + prompts, max_tokens=max_tokens) + + assert outputs == deserialized_outputs + + +def test_vllm_model_can_load_with_lora(vllm_runner, tmp_path): + from huggingface_hub import snapshot_download + + from examples.multilora_inference import (create_test_prompts, + process_requests) + + model_ref = "meta-llama/Llama-2-7b-hf" + lora_path = snapshot_download(repo_id="yard1/llama-2-7b-sql-lora-test") + test_prompts = create_test_prompts(lora_path) + + # Serialize model before deserializing and binding LoRA adapters + vllm_model = vllm_runner(model_ref, ) + model_path = tmp_path / (model_ref + ".tensors") + model = (vllm_model.model.llm_engine.model_executor.driver_worker. + model_runner.model) + with open_stream(model_path, "wb+") as stream: + serializer = TensorSerializer(stream) + serializer.write_module(model) + del vllm_model, model + gc.collect() + torch.cuda.empty_cache() + loaded_vllm_model = vllm_runner( + model_ref, + load_format="tensorizer", + model_loader_extra_config=TensorizerConfig( + tensorizer_uri=model_path, + num_readers=1, + vllm_tensorized=True, + ), + enable_lora=True, + max_loras=1, + max_lora_rank=8, + max_cpu_loras=2, + max_num_seqs=50, + max_model_len=1000, + ) + process_requests(loaded_vllm_model.model.llm_engine, test_prompts) + + assert loaded_vllm_model + + +def test_load_without_tensorizer_load_format(vllm_runner): + with pytest.raises(ValueError): + vllm_runner(model_ref, + model_loader_extra_config=TensorizerConfig( + tensorizer_uri="test", vllm_tensorized=False)) + + +@pytest.mark.skipif(not is_curl_installed(), reason="cURL is not installed") +def test_tensorize_vllm_model(tmp_path): + # Test serialize command + serialize_args = [ + "python3", tensorize_model_for_testing_script, "--model", model_ref, + "--dtype", "float16", "serialize", "--serialized-directory", tmp_path, + "--suffix", "tests" + ] + result = subprocess.run(serialize_args, capture_output=True, text=True) + print(result.stdout) # Print the output of the serialize command + + assert result.returncode == 0, (f"Serialize command failed with output:" + f"\n{result.stdout}\n{result.stderr}") + + path_to_tensors = f"{tmp_path}/vllm/{model_ref}/tests/model.tensors" + + # Test deserialize command + deserialize_args = [ + "python3", tensorize_model_for_testing_script, "--model", model_ref, + "--dtype", "float16", "deserialize", "--path-to-tensors", + path_to_tensors + ] + result = subprocess.run(deserialize_args, capture_output=True, text=True) + assert result.returncode == 0, (f"Deserialize command failed with output:" + f"\n{result.stdout}\n{result.stderr}") + + +@pytest.mark.skipif(not is_curl_installed(), reason="cURL is not installed") +def test_openai_apiserver_with_tensorizer(tmp_path): + ## Serialize model + serialize_args = [ + "python3", tensorize_model_for_testing_script, "--model", model_ref, + "--dtype", "float16", "serialize", "--serialized-directory", tmp_path, + "--suffix", "tests" + ] + result = subprocess.run(serialize_args, capture_output=True, text=True) + print(result.stdout) # Print the output of the serialize command + + assert result.returncode == 0, (f"Serialize command failed with output:" + f"\n{result.stdout}\n{result.stderr}") + + path_to_tensors = f"{tmp_path}/vllm/{model_ref}/tests/model.tensors" + model_loader_extra_config = { + "tensorizer_uri": path_to_tensors, + "vllm_tensorized": True + } + + ## Start OpenAI API server + openai_args = [ + "--model", model_ref, "--dtype", "float16", "--load-format", + "tensorizer", "--model-loader-extra-config", + json.dumps(model_loader_extra_config), "--port", "8000" + ] + + server = ServerRunner.remote(openai_args) + + assert ray.get(server.ready.remote()) + print("Server ready.") + + client = openai.OpenAI( + base_url="http://localhost:8000/v1", + api_key="token-abc123", + ) + completion = client.completions.create(model=model_ref, + prompt="Hello, my name is", + max_tokens=5, + temperature=0.0) + + assert completion.id is not None + assert completion.choices is not None and len(completion.choices) == 1 + assert completion.choices[0].text is not None and len( + completion.choices[0].text) >= 5 + assert completion.choices[0].finish_reason == "length" + assert completion.usage == openai.types.CompletionUsage( + completion_tokens=5, prompt_tokens=6, total_tokens=11) + + +def test_raise_value_error_on_invalid_load_format(vllm_runner): + with pytest.raises(ValueError): + vllm_runner(model_ref, + load_format="safetensors", + model_loader_extra_config=TensorizerConfig( + tensorizer_uri="test", vllm_tensorized=False)) + + +def test_tensorizer_with_tp(vllm_runner): + with pytest.raises(ValueError): + model_ref = "EleutherAI/pythia-1.4b" + tensorized_path = f"s3://tensorized/{model_ref}/fp16/model.tensors" + + vllm_runner( + model_ref, + load_format="tensorizer", + model_loader_extra_config=TensorizerConfig( + tensorizer_uri=tensorized_path, + num_readers=1, + vllm_tensorized=False, + s3_endpoint="object.ord1.coreweave.com", + ), + tensor_parallel_size=2, + ) diff --git a/tests/test_cache_block_hashing.py b/tests/test_cache_block_hashing.py new file mode 100644 index 0000000..3b257ac --- /dev/null +++ b/tests/test_cache_block_hashing.py @@ -0,0 +1,93 @@ +"""Test hashing of cache blocks. + +Run `pytest tests/test_cache_block_hashing.py`. +""" +from typing import List, Optional + +import pytest + +from vllm.lora.request import LoRARequest +from vllm.sequence import Sequence +from vllm.transformers_utils.tokenizer_group import TokenizerGroup + +# Make two prefixes with different first blocks. +prefix_start = [("You are an expert"), ("You are a")] +prefix_common = ( + " school principal, skilled in effectively managing " + "faculty and staff. Draft 10-15 questions for a potential first grade " + "Head Teacher for my K-12, all-girls', independent school that emphasizes " + "community, joyful discovery, and life-long learning. The candidate is " + "coming in for a first-round panel interview for a 8th grade Math " + "teaching role. They have 5 years of previous teaching experience " + "as an assistant teacher at a co-ed, public school with experience " + "in middle school math teaching. Based on this, fulfill " + "the following: ") +prefixes = [start + prefix_common for start in prefix_start] + +# Sample prompts. +sample_prompts = [ + "Hello, my name is", "The president of the United States is", + "The capital of France is", "The future of AI is" +] + + +# Helper function. +def flatten_2d(li): + return [lss for ls in li for lss in ls] + + +@pytest.mark.parametrize("model", ["facebook/opt-125m"]) +@pytest.mark.parametrize("block_size", [16]) +@pytest.mark.parametrize("max_num_seqs", [256]) +@pytest.mark.parametrize("concurrent_lora_int_ids", + [[None], [1], [None, 1], [None, 1, 2], [1, 2]]) +def test_auto_prefix_caching(model: str, block_size: int, max_num_seqs: int, + concurrent_lora_int_ids: List[Optional[int]]): + + tokenizer = TokenizerGroup( + tokenizer_id="facebook/opt-125m", + enable_lora=False, + max_num_seqs=max_num_seqs, + max_input_length=None, + ) + + hashes = [] + + for prefix in prefixes: + for lora_int_id in concurrent_lora_int_ids: + lora_request = None + + if lora_int_id is not None: + lora_request = LoRARequest( + f"example_lora_{lora_int_id}", + lora_int_id, + f"example/path/to/lora_{lora_int_id}", + ) + + hashes.append([]) + prompts = [prefix + prompt for prompt in sample_prompts] + seq_id = 0 + for prompt in prompts: + hashes[-1].append([]) + prompt_token_ids = tokenizer.encode(prompt) + seq = Sequence(seq_id, prompt, prompt_token_ids, block_size, + tokenizer.tokenizer.eos_token_id, lora_request) + + num_blocks = len(prompt_token_ids) // block_size + for idx in range(num_blocks): + hashes[-1][-1].append(seq.hash_of_block(idx)) + + seq_id += 1 + + # Check that hashes made with two prefixes with different first blocks are + # different everywhere. + for hash0, hash1 in zip(flatten_2d(hashes[0]), flatten_2d(hashes[1])): + assert (hash0 != hash1) + + # Check that hashes of different prompts made with the same prefix are the + # same until the hashes that contain the prompt. + for hash_pref in hashes: + same_hashes = [tuple(h[:-1]) for h in hash_pref] + different_hashes = [h[-1] for h in hash_pref] + assert (len(set(same_hashes)) == 1) + assert (len(set(different_hashes)) == len(different_hashes)) diff --git a/tests/test_config.py b/tests/test_config.py new file mode 100644 index 0000000..19db106 --- /dev/null +++ b/tests/test_config.py @@ -0,0 +1,39 @@ +from vllm.config import ModelConfig + + +def test_get_sliding_window(): + TEST_SLIDING_WINDOW = 4096 + # Test that the sliding window is correctly computed. + # For Qwen1.5/Qwen2, get_sliding_window() should be None + # when use_sliding_window is False. + qwen2_model_config = ModelConfig( + "Qwen/Qwen1.5-7B", + "Qwen/Qwen1.5-7B", + tokenizer_mode="auto", + trust_remote_code=False, + seed=0, + dtype="float16", + revision=None, + ) + + qwen2_model_config.hf_config.use_sliding_window = False + qwen2_model_config.hf_config.sliding_window = TEST_SLIDING_WINDOW + assert qwen2_model_config.get_sliding_window() is None + + qwen2_model_config.hf_config.use_sliding_window = True + assert qwen2_model_config.get_sliding_window() == TEST_SLIDING_WINDOW + + mistral_model_config = ModelConfig( + "mistralai/Mistral-7B-v0.1", + "mistralai/Mistral-7B-v0.1", + tokenizer_mode="auto", + trust_remote_code=False, + seed=0, + dtype="float16", + revision=None, + ) + mistral_model_config.hf_config.sliding_window = None + assert mistral_model_config.get_sliding_window() is None + + mistral_model_config.hf_config.sliding_window = TEST_SLIDING_WINDOW + assert mistral_model_config.get_sliding_window() == TEST_SLIDING_WINDOW \ No newline at end of file diff --git a/tests/test_logger.py b/tests/test_logger.py new file mode 100644 index 0000000..74f1125 --- /dev/null +++ b/tests/test_logger.py @@ -0,0 +1,214 @@ +import json +import logging +import os +import sys +import tempfile +from json.decoder import JSONDecodeError +from tempfile import NamedTemporaryFile +from typing import Any +from unittest.mock import patch +from uuid import uuid4 + +import pytest + +from vllm.logger import (_DATE_FORMAT, _FORMAT, _configure_vllm_root_logger, + enable_trace_function_call, init_logger) +from vllm.logging import NewLineFormatter + + +def f1(x): + return f2(x) + + +def f2(x): + return x + + +def test_trace_function_call(): + fd, path = tempfile.mkstemp() + cur_dir = os.path.dirname(__file__) + enable_trace_function_call(path, cur_dir) + f1(1) + with open(path, 'r') as f: + content = f.read() + + assert "f1" in content + assert "f2" in content + sys.settrace(None) + os.remove(path) + + +def test_default_vllm_root_logger_configuration(): + """This test presumes that VLLM_CONFIGURE_LOGGING (default: True) and + VLLM_LOGGING_CONFIG_PATH (default: None) are not configured and default + behavior is activated.""" + logger = logging.getLogger("vllm") + assert logger.level == logging.DEBUG + assert not logger.propagate + + handler = logger.handlers[0] + assert handler.stream == sys.stdout + assert handler.level == logging.INFO + + formatter = handler.formatter + assert formatter is not None + assert isinstance(formatter, NewLineFormatter) + assert formatter._fmt == _FORMAT + assert formatter.datefmt == _DATE_FORMAT + + +@patch("vllm.logger.VLLM_CONFIGURE_LOGGING", 1) +@patch("vllm.logger.VLLM_LOGGING_CONFIG_PATH", None) +def test_descendent_loggers_depend_on_and_propagate_logs_to_root_logger(): + """This test presumes that VLLM_CONFIGURE_LOGGING (default: True) and + VLLM_LOGGING_CONFIG_PATH (default: None) are not configured and default + behavior is activated.""" + root_logger = logging.getLogger("vllm") + root_handler = root_logger.handlers[0] + + unique_name = f"vllm.{uuid4()}" + logger = init_logger(unique_name) + assert logger.name == unique_name + assert logger.level == logging.NOTSET + assert not logger.handlers + assert logger.propagate + + message = "Hello, world!" + with patch.object(root_handler, "emit") as root_handle_mock: + logger.info(message) + + root_handle_mock.assert_called_once() + _, call_args, _ = root_handle_mock.mock_calls[0] + log_record = call_args[0] + assert unique_name == log_record.name + assert message == log_record.msg + assert message == log_record.msg + assert log_record.levelno == logging.INFO + + +@patch("vllm.logger.VLLM_CONFIGURE_LOGGING", 0) +@patch("vllm.logger.VLLM_LOGGING_CONFIG_PATH", None) +def test_logger_configuring_can_be_disabled(): + """This test calls _configure_vllm_root_logger again to test custom logging + config behavior, however mocks are used to ensure no changes in behavior or + configuration occur.""" + + with patch("logging.config.dictConfig") as dict_config_mock: + _configure_vllm_root_logger() + dict_config_mock.assert_not_called() + + +@patch("vllm.logger.VLLM_CONFIGURE_LOGGING", 1) +@patch( + "vllm.logger.VLLM_LOGGING_CONFIG_PATH", + "/if/there/is/a/file/here/then/you/did/this/to/yourself.json", +) +def test_an_error_is_raised_when_custom_logging_config_file_does_not_exist(): + """This test calls _configure_vllm_root_logger again to test custom logging + config behavior, however it fails before any change in behavior or + configuration occurs.""" + with pytest.raises(RuntimeError) as ex_info: + _configure_vllm_root_logger() + assert ex_info.type == RuntimeError + assert "File does not exist" in str(ex_info) + + +@patch("vllm.logger.VLLM_CONFIGURE_LOGGING", 1) +def test_an_error_is_raised_when_custom_logging_config_is_invalid_json(): + """This test calls _configure_vllm_root_logger again to test custom logging + config behavior, however it fails before any change in behavior or + configuration occurs.""" + with NamedTemporaryFile(encoding="utf-8", mode="w") as logging_config_file: + logging_config_file.write("---\nloggers: []\nversion: 1") + logging_config_file.flush() + with patch("vllm.logger.VLLM_LOGGING_CONFIG_PATH", + logging_config_file.name): + with pytest.raises(JSONDecodeError) as ex_info: + _configure_vllm_root_logger() + assert ex_info.type == JSONDecodeError + assert "Expecting value" in str(ex_info) + + +@patch("vllm.logger.VLLM_CONFIGURE_LOGGING", 1) +@pytest.mark.parametrize("unexpected_config", ( + "Invalid string", + [{ + "version": 1, + "loggers": [] + }], + 0, +)) +def test_an_error_is_raised_when_custom_logging_config_is_unexpected_json( + unexpected_config: Any): + """This test calls _configure_vllm_root_logger again to test custom logging + config behavior, however it fails before any change in behavior or + configuration occurs.""" + with NamedTemporaryFile(encoding="utf-8", mode="w") as logging_config_file: + logging_config_file.write(json.dumps(unexpected_config)) + logging_config_file.flush() + with patch("vllm.logger.VLLM_LOGGING_CONFIG_PATH", + logging_config_file.name): + with pytest.raises(ValueError) as ex_info: + _configure_vllm_root_logger() + assert ex_info.type == ValueError + assert "Invalid logging config. Expected Dict, got" in str(ex_info) + + +@patch("vllm.logger.VLLM_CONFIGURE_LOGGING", 1) +def test_custom_logging_config_is_parsed_and_used_when_provided(): + """This test calls _configure_vllm_root_logger again to test custom logging + config behavior, however mocks are used to ensure no changes in behavior or + configuration occur.""" + valid_logging_config = { + "loggers": { + "vllm.test_logger.logger": { + "handlers": [], + "propagate": False, + } + }, + "version": 1 + } + with NamedTemporaryFile(encoding="utf-8", mode="w") as logging_config_file: + logging_config_file.write(json.dumps(valid_logging_config)) + logging_config_file.flush() + with patch("vllm.logger.VLLM_LOGGING_CONFIG_PATH", + logging_config_file.name), patch( + "logging.config.dictConfig") as dict_config_mock: + _configure_vllm_root_logger() + assert dict_config_mock.called_with(valid_logging_config) + + +@patch("vllm.logger.VLLM_CONFIGURE_LOGGING", 0) +def test_custom_logging_config_causes_an_error_if_configure_logging_is_off(): + """This test calls _configure_vllm_root_logger again to test custom logging + config behavior, however mocks are used to ensure no changes in behavior or + configuration occur.""" + valid_logging_config = { + "loggers": { + "vllm.test_logger.logger": { + "handlers": [], + } + }, + "version": 1 + } + with NamedTemporaryFile(encoding="utf-8", mode="w") as logging_config_file: + logging_config_file.write(json.dumps(valid_logging_config)) + logging_config_file.flush() + with patch("vllm.logger.VLLM_LOGGING_CONFIG_PATH", + logging_config_file.name): + with pytest.raises(RuntimeError) as ex_info: + _configure_vllm_root_logger() + assert ex_info.type is RuntimeError + expected_message_snippet = ( + "VLLM_CONFIGURE_LOGGING evaluated to false, but " + "VLLM_LOGGING_CONFIG_PATH was given.") + assert expected_message_snippet in str(ex_info) + + # Remember! The root logger is assumed to have been configured as + # though VLLM_CONFIGURE_LOGGING=1 and VLLM_LOGGING_CONFIG_PATH=None. + root_logger = logging.getLogger("vllm") + other_logger_name = f"vllm.test_logger.{uuid4()}" + other_logger = init_logger(other_logger_name) + assert other_logger.handlers != root_logger.handlers + assert other_logger.level != root_logger.level + assert other_logger.propagate diff --git a/tests/test_logits_processor.py b/tests/test_logits_processor.py new file mode 100644 index 0000000..179e8d2 --- /dev/null +++ b/tests/test_logits_processor.py @@ -0,0 +1,103 @@ +import random +from typing import Tuple +from unittest.mock import patch + +import pytest +import torch + +from vllm.model_executor.layers.logits_processor import LogitsProcessor +from vllm.model_executor.sampling_metadata import SamplingMetadata +from vllm.model_executor.utils import set_random_seed +from vllm.sequence import SamplingParams, SequenceData, SequenceGroupMetadata +from vllm.worker.model_runner import ModelRunner + + +class MockLogitsProcessor(LogitsProcessor): + + def __init__(self, vocab_size: int, scale: float, + fake_logits: torch.Tensor): + super().__init__(vocab_size=vocab_size, scale=scale) + self.fake_logits = fake_logits.clone() + + def forward(self, *args, **kwargs): + with patch( + "vllm.model_executor.layers.logits_processor._prune_hidden_states", + lambda x, y: x + ), patch( + "vllm.model_executor.layers.logits_processor.LogitsProcessor._get_logits", + lambda *args, **kwargs: self.fake_logits): + return super().forward(*args, **kwargs) + + +def _prepare_test( + batch_size: int +) -> Tuple[torch.Tensor, torch.Tensor, MockLogitsProcessor, ModelRunner]: + vocab_size = 32000 + input_tensor = torch.rand((batch_size, 1024), dtype=torch.float16) + fake_logits = torch.full((batch_size, vocab_size), + 1e-2, + dtype=input_tensor.dtype) + logits_processor = MockLogitsProcessor(32000, 0.5, fake_logits) + model_runner = ModelRunner(model_config=None, + parallel_config=None, + scheduler_config=None, + device_config=None, + load_config=None, + lora_config=None) + return input_tensor, fake_logits, logits_processor, model_runner + + +RANDOM_SEEDS = list(range(128)) +CUDA_DEVICES = [ + f"cuda:{i}" for i in range(1 if torch.cuda.device_count() == 1 else 2) +] + + +@pytest.mark.parametrize("seed", RANDOM_SEEDS) +@pytest.mark.parametrize("device", CUDA_DEVICES) +def test_logits_processors(seed: int, device: str): + set_random_seed(seed) + torch.set_default_device(device) + batch_size = random.randint(1, 256) + input_tensor, fake_logits, logits_processor, model_runner = _prepare_test( + batch_size) + + # This sample logits processor gives infinite score to the i-th token, + # where i is the length of the input sequence. + # We therefore expect the output token sequence to be [0, 1, 2, ...] + def pick_ith(token_ids, logits): + logits[len(token_ids)] = float("inf") + return logits + + seq_group_metadata_list = [] + seq_lens = [] + for i in range(batch_size): + seq_group_metadata_list.append( + SequenceGroupMetadata( + request_id=f"test_{i}", + is_prompt=True, + seq_data={0: SequenceData([1, 2, 3])}, + sampling_params=SamplingParams(temperature=0, + logits_processors=[pick_ith]), + block_tables={0: [1]}, + )) + seq_lens.append(seq_group_metadata_list[-1].seq_data[0].get_len()) + + sampling_metadata = SamplingMetadata.prepare( + seq_group_metadata_list, + seq_lens, + query_lens=seq_lens, + device=model_runner.device, + pin_memory=model_runner.pin_memory) + logits_processor_output = logits_processor( + embedding=None, + hidden_states=input_tensor, + sampling_metadata=sampling_metadata) + + assert torch.isinf(logits_processor_output[:, 0]).all() + + fake_logits *= logits_processor.scale + assert torch.allclose(logits_processor_output[:, 1], fake_logits[:, 1], + 1e-4) + + del model_runner diff --git a/tests/test_regression.py b/tests/test_regression.py new file mode 100644 index 0000000..cb68e9e --- /dev/null +++ b/tests/test_regression.py @@ -0,0 +1,58 @@ +"""Containing tests that check for regressions in vLLM's behavior. + +It should include tests that are reported by users and making sure they +will never happen again. + +""" +import gc + +import torch + +from vllm import LLM, SamplingParams + + +def test_duplicated_ignored_sequence_group(): + """https://github.com/vllm-project/vllm/issues/1655""" + + sampling_params = SamplingParams(temperature=0.01, + top_p=0.1, + max_tokens=256) + llm = LLM(model="facebook/opt-125m", + max_num_batched_tokens=4096, + tensor_parallel_size=1) + prompts = ["This is a short prompt", "This is a very long prompt " * 1000] + outputs = llm.generate(prompts, sampling_params=sampling_params) + + assert len(prompts) == len(outputs) + + +def test_max_tokens_none(): + sampling_params = SamplingParams(temperature=0.01, + top_p=0.1, + max_tokens=None) + llm = LLM(model="facebook/opt-125m", + max_num_batched_tokens=4096, + tensor_parallel_size=1) + prompts = ["Just say hello!"] + outputs = llm.generate(prompts, sampling_params=sampling_params) + + assert len(prompts) == len(outputs) + + +def test_gc(): + llm = LLM("facebook/opt-125m", enforce_eager=True) + del llm + + gc.collect() + torch.cuda.empty_cache() + + # The memory allocated for model and KV cache should be released. + # The memory allocated for PyTorch and others should be less than 50MB. + # Usually, it's around 10MB. + allocated = torch.cuda.memory_allocated() + assert allocated < 50 * 1024 * 1024 + + +if __name__ == "__main__": + import pytest + pytest.main([__file__]) diff --git a/tests/test_sampling_params.py b/tests/test_sampling_params.py new file mode 100644 index 0000000..01cbe0c --- /dev/null +++ b/tests/test_sampling_params.py @@ -0,0 +1,13 @@ +"""Tests for the SamplingParams class. +""" +from vllm import SamplingParams + + +def test_max_tokens_none(): + """max_tokens=None should be allowed""" + SamplingParams(temperature=0.01, top_p=0.1, max_tokens=None) + + +if __name__ == "__main__": + import pytest + pytest.main([__file__]) diff --git a/tests/test_sequence.py b/tests/test_sequence.py new file mode 100644 index 0000000..b16bdc1 --- /dev/null +++ b/tests/test_sequence.py @@ -0,0 +1,124 @@ +import time +from typing import Optional + +import pytest + +from vllm import SamplingParams +from vllm.lora.request import LoRARequest +from vllm.sequence import (SamplerOutput, Sequence, SequenceData, + SequenceGroup, SequenceGroupOutput, SequenceOutput) + + +def create_dummy_prompt( + request_id: str, + prompt_length: int, + block_size: Optional[int] = None, + lora_request: Optional[LoRARequest] = None, + use_beam_search: bool = False, + best_of: int = 1, +) -> SequenceGroup: + if not block_size: + block_size = prompt_length + + # Create dummy prompt sequence with tokens 0...block_size-1 + # and prompt "0 ... block_size". + prompt_tokens = list(range(prompt_length)) + prompt_str = " ".join([str(t) for t in prompt_tokens]) + prompt = Sequence(int(request_id), prompt_str, prompt_tokens, block_size) + seq_group = SequenceGroup( + request_id, [prompt], + SamplingParams(use_beam_search=use_beam_search, best_of=best_of), + time.time(), lora_request) + + return seq_group + + +@pytest.fixture +def sample_outputs(): + return [ + SequenceGroupOutput(samples=[ + SequenceOutput(parent_seq_id=0, output_token=i, logprobs={}) + ], + prompt_logprobs=None) for i in range(5) + ] + + +@pytest.fixture +def sampler_output(sample_outputs): + return SamplerOutput(outputs=sample_outputs) + + +def test_sampler_output_initialization(sampler_output, sample_outputs): + assert len(sampler_output) == len(sample_outputs) + assert sampler_output.sampled_token_probs is None + assert sampler_output.sampled_token_ids is None + assert sampler_output.spec_decode_worker_metrics is None + + +def test_sampler_output_getitem(sampler_output, sample_outputs): + assert sampler_output[2] == sample_outputs[2] + + +def test_sampler_output_setitem(sampler_output): + new_output = SequenceGroupOutput(samples=[ + SequenceOutput(parent_seq_id=0, output_token=99, logprobs={}) + ], + prompt_logprobs=None) + sampler_output[2] = new_output + assert sampler_output[2] == new_output + + +def test_sampler_output_len(sampler_output, sample_outputs): + assert len(sampler_output) == len(sample_outputs) + + +def test_sampler_output_eq(sample_outputs): + sampler_output1 = SamplerOutput(outputs=sample_outputs) + sampler_output2 = SamplerOutput(outputs=sample_outputs.copy()) + sampler_output3 = SamplerOutput(outputs=sample_outputs[:-1]) + assert sampler_output1 == sampler_output2 + assert sampler_output1 != sampler_output3 + + +def test_sequence_data_prefill(): + seq_data = SequenceData(prompt_token_ids=[1, 2, 3, 4]) + assert seq_data.get_num_uncomputed_tokens() == 4 + assert seq_data.get_num_computed_tokens() == 0 + # advance by 2 + seq_data.update_num_computed_tokens(2) + assert seq_data.get_num_uncomputed_tokens() == 2 + assert seq_data.get_num_computed_tokens() == 2 + + # advance by 1 + seq_data.update_num_computed_tokens(1) + assert seq_data.get_num_uncomputed_tokens() == 1 + assert seq_data.get_num_computed_tokens() == 3 + + # append tokens and reset, simulating recompute + seq_data.append_token_id(1, logprob=0.0) + seq_data.reset_state_for_recompute() + assert seq_data.get_num_uncomputed_tokens() == 5 + assert seq_data.get_num_computed_tokens() == 0 + + +def test_sequence_group_stage(): + seq_group = create_dummy_prompt("1", 12) + assert seq_group.is_prefill() is True + seq_group.update_num_computed_tokens(6) + assert seq_group.is_prefill() is True + seq_group.update_num_computed_tokens(5) + assert seq_group.is_prefill() is True + seq_group.update_num_computed_tokens(1) + assert seq_group.is_prefill() is False + seqs = seq_group.get_seqs() + assert len(seqs) == 1 + seqs[0].data.append_token_id(1, logprob=0.0) + for seq in seq_group.get_seqs(): + seq.reset_state_for_recompute() + assert seq_group.is_prefill() is True + seq_group.update_num_computed_tokens(5) + assert seq_group.is_prefill() is True + seq_group.update_num_computed_tokens(7) + assert seq_group.is_prefill() is True + seq_group.update_num_computed_tokens(1) + assert seq_group.is_prefill() is False diff --git a/tests/tokenization/__init__.py b/tests/tokenization/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/tests/tokenization/test_cached_tokenizer.py b/tests/tokenization/test_cached_tokenizer.py new file mode 100644 index 0000000..4c8238f --- /dev/null +++ b/tests/tokenization/test_cached_tokenizer.py @@ -0,0 +1,22 @@ +from copy import deepcopy + +from transformers import AutoTokenizer + +from vllm.transformers_utils.tokenizer import get_cached_tokenizer + + +def test_cached_tokenizer(): + reference_tokenizer = AutoTokenizer.from_pretrained("gpt2") + reference_tokenizer.add_special_tokens({"cls_token": ""}) + reference_tokenizer.add_special_tokens( + {"additional_special_tokens": [""]}) + cached_tokenizer = get_cached_tokenizer(deepcopy(reference_tokenizer)) + + assert reference_tokenizer.encode("prompt") == cached_tokenizer.encode( + "prompt") + assert set(reference_tokenizer.all_special_ids) == set( + cached_tokenizer.all_special_ids) + assert set(reference_tokenizer.all_special_tokens) == set( + cached_tokenizer.all_special_tokens) + assert set(reference_tokenizer.all_special_tokens_extended) == set( + cached_tokenizer.all_special_tokens_extended) diff --git a/tests/tokenization/test_detokenize.py b/tests/tokenization/test_detokenize.py new file mode 100644 index 0000000..9bc9bec --- /dev/null +++ b/tests/tokenization/test_detokenize.py @@ -0,0 +1,208 @@ +from typing import Dict, List + +import pytest +from transformers import AutoTokenizer + +from vllm.sequence import Logprob, SamplingParams, Sequence, SequenceGroup +from vllm.transformers_utils.detokenizer import (Detokenizer, + detokenize_incrementally) +from vllm.transformers_utils.tokenizer_group import get_tokenizer_group + +TRUTH = [ + "Hello here, this is a simple test", + "vLLM is a high-throughput and memory-efficient inference and serving engine for LLMs. It is designed to be used in production environments, where inference and serving", # noqa + "我很感谢你的热情" +] +TOKENIZERS = [ + "facebook/opt-125m", + "gpt2", + "bigcode/tiny_starcoder_py", + "EleutherAI/gpt-j-6b", + "EleutherAI/pythia-70m", + "bigscience/bloom-560m", + "mosaicml/mpt-7b", + "tiiuae/falcon-7b", + "meta-llama/Llama-2-7b-hf", + "codellama/CodeLlama-7b-hf", +] + + +def _run_incremental_decode(tokenizer, all_input_ids, + skip_special_tokens: bool, starting_index: int): + decoded_text = "" + offset = 0 + token_offset = 0 + prev_tokens = None + for i in range(starting_index, len(all_input_ids)): + new_tokens, text, offset, token_offset = detokenize_incrementally( + tokenizer, + all_input_ids[:i + 1], + prev_tokens, + offset, + token_offset, + skip_special_tokens=skip_special_tokens) + decoded_text += text + if prev_tokens is None: + prev_tokens = new_tokens + else: + prev_tokens += new_tokens + return decoded_text + + +@pytest.mark.parametrize("truth", TRUTH) +@pytest.mark.parametrize("with_prompt", [True, False]) +@pytest.mark.parametrize("tokenizer_id", TOKENIZERS) +@pytest.mark.parametrize("skip_special_tokens", (True, False)) +def test_decode_streaming(tokenizer_id, truth, with_prompt, + skip_special_tokens): + tokenizer = AutoTokenizer.from_pretrained(tokenizer_id) + if with_prompt: + truth_tokens = tokenizer(truth, add_special_tokens=False)["input_ids"] + prompt_input_ids = truth_tokens[:len(truth) // 2] + generated_input_ids = truth_tokens[len(truth) // 2:] + all_input_ids = prompt_input_ids + generated_input_ids + starting_index = len(prompt_input_ids) + prompt = tokenizer.decode(prompt_input_ids, + skip_special_tokens=skip_special_tokens) + generated = truth[len(prompt):] + else: + generated = truth + starting_index = 0 + all_input_ids = tokenizer(truth, add_special_tokens=False)["input_ids"] + if skip_special_tokens: + if tokenizer.bos_token_id is not None: + all_input_ids = [tokenizer.bos_token_id] + all_input_ids + starting_index += 1 + all_input_ids = all_input_ids + [tokenizer.eos_token_id] + + decoded_text = _run_incremental_decode( + tokenizer, + all_input_ids, + skip_special_tokens=skip_special_tokens, + starting_index=starting_index) + + assert decoded_text == generated + + decoded_text = _run_incremental_decode( + tokenizer, [len(tokenizer)], + skip_special_tokens=skip_special_tokens, + starting_index=starting_index) + + assert decoded_text == '' + + +@pytest.fixture +def detokenizer(tokenizer_name: str) -> Detokenizer: + init_kwargs = dict( + tokenizer_id=tokenizer_name, + enable_lora=False, + max_num_seqs=100, + max_input_length=None, + tokenizer_mode="auto", + trust_remote_code=False, + revision=None, + ) + + tokenizer_group = get_tokenizer_group( + None, + **init_kwargs, + ) + + return Detokenizer(tokenizer_group) + + +@pytest.fixture(name="complete_sequence_token_ids") +def create_complete_sequence_token_ids(complete_sequence: str, + tokenizer_name: str) -> List[int]: + tokenizer = AutoTokenizer.from_pretrained(tokenizer_name) + complete_sequence_token_ids = tokenizer(complete_sequence)["input_ids"] + return complete_sequence_token_ids + + +def create_sequence(prompt_token_ids=None): + prompt_token_ids = prompt_token_ids or [1] + return Sequence( + seq_id=0, + prompt="", + prompt_token_ids=prompt_token_ids, + block_size=16, + ) + + +def create_dummy_logprobs( + complete_sequence_token_ids: List[int]) -> List[Dict[int, Logprob]]: + return [{ + token_id: Logprob(logprob=0.0), + token_id + 1: Logprob(logprob=0.1) + } for token_id in complete_sequence_token_ids] + + +@pytest.mark.parametrize("complete_sequence", TRUTH) +@pytest.mark.parametrize("tokenizer_name", TOKENIZERS) +@pytest.mark.parametrize("skip_special_tokens", [True, False]) +def test_decode_sequence_logprobs(complete_sequence: str, + complete_sequence_token_ids: List[int], + detokenizer: Detokenizer, + skip_special_tokens: bool): + """Verify Detokenizer decodes logprobs correctly.""" + sampling_params = SamplingParams(skip_special_tokens=skip_special_tokens, + logprobs=2) + + # Run sequentially. + seq = create_sequence() + dummy_logprobs = create_dummy_logprobs(complete_sequence_token_ids) + sequential_logprobs_text_chosen_token = [] + sequential_logprobs_text_other_token = [] + for new_token, logprobs in zip(complete_sequence_token_ids, + dummy_logprobs): + seq.append_token_id(new_token, logprobs) + detokenizer.decode_sequence_inplace(seq, sampling_params) + sequential_logprobs_text_chosen_token.append( + seq.output_logprobs[-1][new_token].decoded_token) + sequential_logprobs_text_other_token.append( + seq.output_logprobs[-1][new_token + 1].decoded_token) + sequential_result = seq.output_text + + assert sequential_result == "".join(sequential_logprobs_text_chosen_token) + assert sequential_result != "".join(sequential_logprobs_text_other_token) + + if skip_special_tokens: + # Text for logprobs for the chosen token should be the same as the + # generated text. Note that this will only be true if we skip + # special tokens. + assert sequential_result == complete_sequence + + +@pytest.mark.parametrize("complete_sequence", TRUTH) +@pytest.mark.parametrize("tokenizer_name", TOKENIZERS) +@pytest.mark.parametrize("skip_special_tokens", [True]) +def test_decode_prompt_logprobs(complete_sequence: str, + complete_sequence_token_ids: List[int], + detokenizer: Detokenizer, + skip_special_tokens: bool): + """Verify Detokenizer decodes prompt logprobs correctly.""" + sampling_params = SamplingParams(skip_special_tokens=skip_special_tokens, + prompt_logprobs=1) + + # Run sequentially. + seq = create_sequence(complete_sequence_token_ids) + seq_group = SequenceGroup(request_id="1", + seqs=[seq], + sampling_params=sampling_params, + arrival_time=0.0) + dummy_logprobs = create_dummy_logprobs(complete_sequence_token_ids) + detokenizer.decode_prompt_logprobs_inplace(seq_group, dummy_logprobs) + decoded_prompt_logprobs = dummy_logprobs + + if skip_special_tokens: + # Text for logprobs for the chosen token should be the same as the + # prompt text. Note that this will only be true if we skip + # special tokens. + assert complete_sequence == "".join([ + logprobs[token_id].decoded_token for token_id, logprobs in zip( + complete_sequence_token_ids, decoded_prompt_logprobs) + ]) + assert complete_sequence != "".join([ + logprobs[token_id + 1].decoded_token for token_id, logprobs in zip( + complete_sequence_token_ids, decoded_prompt_logprobs) + ]) diff --git a/tests/tokenization/test_tokenizer.py b/tests/tokenization/test_tokenizer.py new file mode 100644 index 0000000..8db7204 --- /dev/null +++ b/tests/tokenization/test_tokenizer.py @@ -0,0 +1,20 @@ +import pytest +from transformers import PreTrainedTokenizerBase + +from vllm.transformers_utils.tokenizer import get_tokenizer + +TOKENIZER_NAMES = [ + "facebook/opt-125m", + "gpt2", +] + + +@pytest.mark.parametrize("tokenizer_name", TOKENIZER_NAMES) +def test_tokenizer_revision(tokenizer_name: str): + # Assume that "main" branch always exists + tokenizer = get_tokenizer(tokenizer_name, revision="main") + assert isinstance(tokenizer, PreTrainedTokenizerBase) + + # Assume that "never" branch always does not exist + with pytest.raises(OSError, match='not a valid git identifier'): + get_tokenizer(tokenizer_name, revision="never") diff --git a/tests/tokenization/test_tokenizer_group.py b/tests/tokenization/test_tokenizer_group.py new file mode 100644 index 0000000..31571db --- /dev/null +++ b/tests/tokenization/test_tokenizer_group.py @@ -0,0 +1,102 @@ +import asyncio +import os +from unittest.mock import patch + +import pytest +from transformers import AutoTokenizer, PreTrainedTokenizerBase + +from vllm.transformers_utils.tokenizer_group import get_tokenizer_group +from vllm.transformers_utils.tokenizer_group.ray_tokenizer_group import ( + RayTokenizerGroupPool) +from vllm.transformers_utils.tokenizer_group.tokenizer_group import ( + TokenizerGroup) + +from ..conftest import get_tokenizer_pool_config + + +@pytest.mark.asyncio +@pytest.mark.parametrize("tokenizer_group_type", [None, "ray"]) +async def test_tokenizer_group(tokenizer_group_type): + reference_tokenizer = AutoTokenizer.from_pretrained("gpt2") + tokenizer_group = get_tokenizer_group( + get_tokenizer_pool_config(tokenizer_group_type), + tokenizer_id="gpt2", + enable_lora=False, + max_num_seqs=1, + max_input_length=None, + ) + assert reference_tokenizer.encode("prompt") == tokenizer_group.encode( + request_id="request_id", prompt="prompt", lora_request=None) + assert reference_tokenizer.encode( + "prompt") == await tokenizer_group.encode_async( + request_id="request_id", prompt="prompt", lora_request=None) + assert isinstance(tokenizer_group.get_lora_tokenizer(None), + PreTrainedTokenizerBase) + assert tokenizer_group.get_lora_tokenizer( + None) == await tokenizer_group.get_lora_tokenizer_async(None) + + +@pytest.mark.asyncio +@pytest.mark.parametrize("tokenizer_group_type", ["ray"]) +async def test_tokenizer_group_pool(tokenizer_group_type): + reference_tokenizer = AutoTokenizer.from_pretrained("gpt2") + tokenizer_group_pool = get_tokenizer_group( + get_tokenizer_pool_config(tokenizer_group_type), + tokenizer_id="gpt2", + enable_lora=False, + max_num_seqs=1, + max_input_length=None, + ) + # Send multiple requests to the tokenizer group pool + # (more than the pool size) + # and check that all requests are processed correctly. + num_requests = tokenizer_group_pool.pool_size * 5 + requests = [ + tokenizer_group_pool.encode_async(request_id=str(i), + prompt=f"prompt {i}", + lora_request=None) + for i in range(num_requests) + ] + results = await asyncio.gather(*requests) + expected_results = [ + reference_tokenizer.encode(f"prompt {i}") for i in range(num_requests) + ] + assert results == expected_results + + +@pytest.mark.asyncio +@pytest.mark.parametrize("tokenizer_group_type", ["ray"]) +async def test_tokenizer_group_ray_pool_env_var_propagation( + tokenizer_group_type): + """Test that env vars from caller process are propagated to + tokenizer Ray actors.""" + env_var = "MY_ENV_VAR" + + class EnvVarCheckerTokenizerGroup(TokenizerGroup): + + def ping(self): + assert os.environ.get(env_var) == "1" + return super().ping() + + class EnvVarCheckerRayTokenizerGroupPool(RayTokenizerGroupPool): + _worker_cls = EnvVarCheckerTokenizerGroup + + tokenizer_pool_config = get_tokenizer_pool_config(tokenizer_group_type) + tokenizer_pool = EnvVarCheckerRayTokenizerGroupPool.from_config( + tokenizer_pool_config, + tokenizer_id="gpt2", + enable_lora=False, + max_num_seqs=1, + max_input_length=None) + with pytest.raises(AssertionError): + tokenizer_pool.ping() + + with patch.dict(os.environ, {env_var: "1"}): + tokenizer_pool_config = get_tokenizer_pool_config(tokenizer_group_type) + tokenizer_pool = EnvVarCheckerRayTokenizerGroupPool.from_config( + tokenizer_pool_config, + tokenizer_id="gpt2", + enable_lora=False, + max_num_seqs=1, + max_input_length=None) + tokenizer_pool.ping() diff --git a/tests/worker/__init__.py b/tests/worker/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/tests/worker/test_model_runner.py b/tests/worker/test_model_runner.py new file mode 100644 index 0000000..e7975d0 --- /dev/null +++ b/tests/worker/test_model_runner.py @@ -0,0 +1,357 @@ +import pytest +import torch + +from vllm.config import ModelConfig, SchedulerConfig +from vllm.distributed.parallel_state import init_distributed_environment +from vllm.model_executor.sampling_metadata import SamplingMetadata +from vllm.sequence import SamplingParams, SequenceData, SequenceGroupMetadata +from vllm.utils import get_open_port +from vllm.worker.model_runner import ModelRunner, _get_graph_batch_size + + +@pytest.mark.parametrize("batch_size", list(range(1, 257))) +def test_prepare_prompt(batch_size): + scheduler_config = SchedulerConfig(100000, + 100000, + 100000, + enable_chunked_prefill=False) + model_runner = ModelRunner(model_config=None, + parallel_config=None, + scheduler_config=scheduler_config, + device_config=None, + load_config=None, + lora_config=None) + model_runner.set_block_size(16) + + seq_lens = [] + seq_group_metadata_list = [] + block_tables = {0: [1]} + for i in range(batch_size): + # make sure all tokens fit into one block + seq_len = i % (model_runner.block_size - 1) + 1 + seq_lens.append(seq_len) + seq_data = SequenceData(list(range(seq_len))) + seq_group_metadata = SequenceGroupMetadata( + request_id=f"test_{i}", + is_prompt=True, + seq_data={0: seq_data}, + sampling_params=SamplingParams(temperature=0), + block_tables=block_tables, + ) + assert seq_group_metadata.token_chunk_size == seq_data.get_len() + seq_group_metadata_list.append(seq_group_metadata) + + expected_selected_token_indices = [] + selected_token_start_idx = 0 + for seq_len in seq_lens: + expected_selected_token_indices.append(selected_token_start_idx + + seq_len - 1) + selected_token_start_idx += seq_len + (input_tokens, input_positions, attn_metadata, return_seq_lens, _, _, _, _, + _, slot_mapping) = (model_runner._prepare_prompt(seq_group_metadata_list)) + assert return_seq_lens == seq_lens + assert len(slot_mapping) == len(input_tokens) + + # Verify input metadata is correct for prompts. + device = model_runner.device + assert attn_metadata.is_prompt is True + assert torch.allclose( + attn_metadata.seq_lens_tensor, + torch.tensor(seq_lens, device=device, dtype=torch.int)) + assert attn_metadata.seq_lens == seq_lens + assert attn_metadata.max_seq_len == max(seq_lens) + + # Test subquery start locs. + start_idx = 0 + start_loc = [start_idx] + for seq_len in seq_lens: + start_idx += seq_len + start_loc.append(start_idx) + assert torch.allclose( + attn_metadata.subquery_start_loc, + torch.tensor(start_loc, dtype=torch.int32, device=device)) + + # Test seq start locs. Note that for normal prefill it is + # equivalent to subquery_start_loc. + start_idx = 0 + seq_start_loc = [start_idx] + for seq_len in seq_lens: + start_idx += seq_len + seq_start_loc.append(start_idx) + + assert torch.allclose( + attn_metadata.seq_start_loc, + torch.tensor(start_loc, dtype=torch.int32, device=device)) + assert torch.allclose( + attn_metadata.context_lens_tensor, + torch.zeros(attn_metadata.context_lens_tensor.shape[0], + dtype=torch.int, + device=device)) + + expected = torch.tensor([[] for _ in range(len(seq_group_metadata_list))], + dtype=torch.int32, + device=model_runner.device) + assert torch.allclose(attn_metadata.block_tables, expected) + # Cuda graph should not be used for prerill. + assert attn_metadata.use_cuda_graph is False + + assert len(input_tokens) == sum(seq_lens) + assert len(input_positions) == sum(seq_lens) + torch.testing.assert_close(input_tokens, input_positions) + + sampling_metadata = SamplingMetadata.prepare( + seq_group_metadata_list, + seq_lens, + query_lens=seq_lens, + device=model_runner.device, + pin_memory=model_runner.pin_memory) + assert len(input_tokens) == sum(seq_lens) + assert len(input_positions) == sum(seq_lens) + actual = sampling_metadata.selected_token_indices + expected = torch.tensor(expected_selected_token_indices, + device=actual.device, + dtype=actual.dtype) + torch.testing.assert_close(actual, expected) + assert input_tokens == input_positions + + actual = sampling_metadata.selected_token_indices + expected = torch.tensor(expected_selected_token_indices, + device=actual.device, + dtype=actual.dtype) + torch.testing.assert_close(actual, expected) + + +@pytest.mark.parametrize("batch_size", list(range(1, 257))) +def test_prepare_decode_cuda_graph(batch_size): + model_config = ModelConfig( + "facebook/opt-125m", + "facebook/opt-125m", + tokenizer_mode="auto", + trust_remote_code=False, + seed=0, + dtype="float16", + revision=None, + enforce_eager=False, + ) + scheduler_config = SchedulerConfig(100000, + 100000, + 100000, + enable_chunked_prefill=False) + model_runner = ModelRunner(model_config=model_config, + parallel_config=None, + scheduler_config=scheduler_config, + device_config=None, + load_config=None, + lora_config=None) + model_runner.set_block_size(16) + + seq_lens = [] + seq_group_metadata_list = [] + for i in range(batch_size): + # make sure all tokens fit into one block + seq_len = i % (model_runner.block_size - 1) + 1 + seq_lens.append(seq_len) + seq_data = list(range(seq_len)) + seq_data = SequenceData(seq_data) + seq_group_metadata = SequenceGroupMetadata( + request_id=f"test_{i}", + is_prompt=False, + seq_data={0: seq_data}, + sampling_params=SamplingParams(temperature=0), + block_tables={0: [1]}, + ) + assert seq_group_metadata.token_chunk_size == 1 + seq_group_metadata_list.append(seq_group_metadata) + + input_tokens, input_positions, attn_metadata, _, _, _, slot_mapping = ( + model_runner._prepare_decode(seq_group_metadata_list)) + assert len(slot_mapping) == len(input_tokens) + + expected_bs = _get_graph_batch_size(len(seq_group_metadata_list)) + # Verify input metadata is correct for prompts. + device = model_runner.device + assert attn_metadata.is_prompt is False + assert attn_metadata.seq_lens is None + assert attn_metadata.subquery_start_loc is None + assert attn_metadata.seq_start_loc is None + assert attn_metadata.max_seq_len == max(seq_lens) + assert torch.allclose( + attn_metadata.seq_lens_tensor[:len(seq_lens)], + torch.tensor(seq_lens, dtype=torch.int, device=device)) + + # block table's first index corresponds to each batch, meaning in + # decoding it is each token. + assert attn_metadata.block_tables.shape[0] == len(input_tokens) + # Block table's second dim correspondsd to each token's block number. + # It is padded up to + assert attn_metadata.block_tables.shape[1] == ( + model_runner.get_max_block_per_batch()) + # Cuda graph should not be used for prerill. + assert attn_metadata.use_cuda_graph is True + + assert len(input_tokens) == expected_bs + assert len(input_positions) == expected_bs + assert input_tokens == input_positions + + # Verify Sampling + expected_selected_token_indices = [] + selected_token_start_idx = 0 + for seq_len in seq_lens: + expected_selected_token_indices.append(selected_token_start_idx) + selected_token_start_idx += 1 + sampling_metadata = SamplingMetadata.prepare( + seq_group_metadata_list, + seq_lens, + query_lens=seq_lens, + device=model_runner.device, + pin_memory=model_runner.pin_memory) + actual = sampling_metadata.selected_token_indices + expected = torch.tensor(expected_selected_token_indices, + device=actual.device, + dtype=actual.dtype) + torch.testing.assert_close(actual, expected) + + +def test_empty_seq_group(): + """Verify prepare prompt and decode returns empty output.""" + model_config = ModelConfig( + "facebook/opt-125m", + "facebook/opt-125m", + tokenizer_mode="auto", + trust_remote_code=False, + seed=0, + dtype="float16", + revision=None, + enforce_eager=False, + ) + model_runner = ModelRunner(model_config=model_config, + parallel_config=None, + scheduler_config=None, + device_config=None, + load_config=None, + lora_config=None) + model_runner.set_block_size(16) + seq_group_metadata_list = [] + input_tokens, input_positions, attn_metadata, _, _, _, slot_mapping = ( + model_runner._prepare_decode(seq_group_metadata_list)) + assert len(input_tokens) == 0 + assert len(input_positions) == 0 + assert attn_metadata is None + assert len(slot_mapping) == 0 + + (input_tokens, input_positions, attn_metadata, return_seq_lens, _, _, _, _, + _, slot_mapping) = (model_runner._prepare_prompt(seq_group_metadata_list)) + assert len(input_tokens) == 0 + assert len(input_positions) == 0 + assert attn_metadata is None + assert len(slot_mapping) == 0 + assert len(return_seq_lens) == 0 + + +@pytest.fixture +def distributed_init(): + init_distributed_environment( + world_size=1, + rank=0, + distributed_init_method=f"tcp://127.0.0.1:{get_open_port()}", + local_rank=0) + + +@pytest.mark.parametrize("batch_size", list(range(2, 128))) +@pytest.mark.parametrize("enforce_eager", [True, False]) +def test_hybrid_batches(batch_size, enforce_eager, distributed_init): + + model_config = ModelConfig( + "facebook/opt-125m", + "facebook/opt-125m", + tokenizer_mode="auto", + trust_remote_code=False, + seed=0, + dtype="float16", + revision=None, + enforce_eager=enforce_eager, + ) + scheduler_config = SchedulerConfig(100000, + 100000, + 100000, + enable_chunked_prefill=True) + model_runner = ModelRunner(model_config=model_config, + parallel_config=None, + scheduler_config=scheduler_config, + device_config=None, + load_config=None, + lora_config=None, + is_driver_worker=True) + model_runner.set_block_size(16) + + # Add prefill requests. + seq_lens = [] + seq_group_metadata_list = [] + prefill_metadata_list = [] + decode_metadata_list = [] + block_tables = {0: [1]} + prefill_batch_size = batch_size // 2 + decode_batch_size = batch_size - prefill_batch_size + for i in range(prefill_batch_size): + # make sure all tokens fit into one block + seq_len = i % (model_runner.block_size - 1) + 1 + seq_lens.append(seq_len) + seq_data = SequenceData(list(range(seq_len))) + seq_group_metadata = SequenceGroupMetadata( + request_id=f"test_{i}", + is_prompt=True, + seq_data={0: seq_data}, + sampling_params=SamplingParams(temperature=0), + block_tables=block_tables, + ) + assert seq_group_metadata.token_chunk_size == seq_data.get_len() + seq_group_metadata_list.append(seq_group_metadata) + prefill_metadata_list.append(seq_group_metadata) + + # Add decode requests + for i in range(prefill_batch_size, batch_size): + # make sure all tokens fit into one block + seq_len = i % (model_runner.block_size - 1) + 1 + prompt_toks = list(range(seq_len)) + seq_data = SequenceData(prompt_toks) + seq_group_metadata = SequenceGroupMetadata( + request_id=f"test_{i}", + is_prompt=False, + seq_data={0: seq_data}, + sampling_params=SamplingParams(temperature=0), + block_tables={0: [1]}, + ) + assert seq_group_metadata.token_chunk_size == 1 + seq_group_metadata_list.append(seq_group_metadata) + decode_metadata_list.append(seq_group_metadata) + + (input_tokens, input_positions, attn_metadata, _, _, _, + _) = model_runner.prepare_input_tensors(seq_group_metadata_list) + + prefill_meta_actual = attn_metadata.prefill_metadata + decode_meta_actual = attn_metadata.decode_metadata + + assert len(attn_metadata.slot_mapping) == len(input_tokens) + assert len(input_positions) == len(input_tokens) + assert attn_metadata.kv_cache_dtype == "auto" + assert attn_metadata.num_prefills == prefill_batch_size + if enforce_eager: + assert attn_metadata.num_decode_tokens == decode_batch_size + else: + assert attn_metadata.num_decode_tokens == _get_graph_batch_size( + decode_batch_size) + assert attn_metadata.num_prefill_tokens == sum(seq_lens) + + # Verify attn metadata is consistent. We don't need to test individual + # values here because they are tested above. + prefill_meta = model_runner._prepare_prompt( + prefill_metadata_list).attn_metadata + decode_meta = model_runner._prepare_decode( + decode_metadata_list).attn_metadata + + for attr_expected, attr_actual in zip(vars(prefill_meta), + vars(prefill_meta_actual)): + assert attr_expected[1] == attr_actual[1] + for attr_expected, attr_actual in zip(vars(decode_meta), + vars(decode_meta_actual)): + assert attr_expected[1] == attr_actual[1] diff --git a/tests/worker/test_swap.py b/tests/worker/test_swap.py new file mode 100644 index 0000000..07bcd34 --- /dev/null +++ b/tests/worker/test_swap.py @@ -0,0 +1,89 @@ +import torch + +from vllm.engine.arg_utils import EngineArgs +from vllm.sequence import ExecuteModelRequest +from vllm.utils import get_distributed_init_method, get_ip, get_open_port +from vllm.worker.worker import Worker + + +def test_swap() -> None: + # Configure the engine. + engine_args = EngineArgs(model="facebook/opt-125m", + dtype="half", + load_format="dummy") + engine_config = engine_args.create_engine_config() + engine_config.cache_config.num_gpu_blocks = 1000 + engine_config.cache_config.num_cpu_blocks = 1000 + + # Create the worker. + distributed_init_method = get_distributed_init_method( + get_ip(), get_open_port()) + worker = Worker( + model_config=engine_config.model_config, + parallel_config=engine_config.parallel_config, + scheduler_config=engine_config.scheduler_config, + device_config=engine_config.device_config, + cache_config=engine_config.cache_config, + load_config=engine_config.load_config, + local_rank=0, + rank=0, + distributed_init_method=distributed_init_method, + is_driver_worker=True, + ) + + # Initialize the worker. + worker.init_device() + worker.load_model() + worker.initialize_cache( + num_gpu_blocks=engine_config.cache_config.num_gpu_blocks, + num_cpu_blocks=engine_config.cache_config.num_cpu_blocks) + + # Randomly initialize the cache. + gpu_cache = worker.cache_engine.gpu_cache + cpu_cache = worker.cache_engine.cpu_cache + num_layers = len(gpu_cache) + for i in range(num_layers): + gpu_key_cache, gpu_value_cache = gpu_cache[i] + gpu_key_cache.random_() + gpu_value_cache.random_() + cpu_key_cache, cpu_value_cache = cpu_cache[i] + cpu_key_cache.random_() + cpu_value_cache.random_() + + allclose = lambda a, b: torch.allclose( + a.cuda(), b.cuda(), rtol=0.0, atol=0.0) + + # Test swap out. + blocks_to_swap_out = {3: 72, 56: 35, 84: 34} + execute_model_req = ExecuteModelRequest( + seq_group_metadata_list=[], + blocks_to_swap_in={}, + blocks_to_swap_out=blocks_to_swap_out, + blocks_to_copy={}, + ) + worker.execute_model(execute_model_req=execute_model_req) + + for i in range(num_layers): + gpu_key_cache, gpu_value_cache = gpu_cache[i] + cpu_key_cache, cpu_value_cache = cpu_cache[i] + for src, dst in blocks_to_swap_out.items(): + assert allclose(gpu_key_cache[src], cpu_key_cache[dst]) + assert allclose(gpu_value_cache[src], cpu_value_cache[dst]) + + # Test swap in. + execute_model_req.blocks_to_swap_out = {} + execute_model_req.blocks_to_swap_in = { + 19: 45, + 67: 23, + 12: 78, + 40: 99, + 1: 71 + } + worker.execute_model(execute_model_req=execute_model_req) + + for i in range(num_layers): + gpu_key_cache, gpu_value_cache = gpu_cache[i] + cpu_key_cache, cpu_value_cache = cpu_cache[i] + for src, dst in execute_model_req.blocks_to_swap_in.items(): + assert allclose(gpu_key_cache[dst], cpu_key_cache[src]) + assert allclose(gpu_value_cache[dst], cpu_value_cache[src]) diff --git a/vllm/__init__.py b/vllm/__init__.py new file mode 100644 index 0000000..59810da --- /dev/null +++ b/vllm/__init__.py @@ -0,0 +1,25 @@ +"""vLLM: a high-throughput and memory-efficient inference engine for LLMs""" + +from vllm.engine.arg_utils import AsyncEngineArgs, EngineArgs +from vllm.engine.async_llm_engine import AsyncLLMEngine +from vllm.engine.llm_engine import LLMEngine +from vllm.entrypoints.llm import LLM +from vllm.executor.ray_utils import initialize_ray_cluster +from vllm.model_executor.models import ModelRegistry +from vllm.outputs import CompletionOutput, RequestOutput +from vllm.sampling_params import SamplingParams + +__version__ = "0.4.2" + +__all__ = [ + "LLM", + "ModelRegistry", + "SamplingParams", + "RequestOutput", + "CompletionOutput", + "LLMEngine", + "EngineArgs", + "AsyncLLMEngine", + "AsyncEngineArgs", + "initialize_ray_cluster", +] diff --git a/vllm/_custom_ops.py b/vllm/_custom_ops.py new file mode 100644 index 0000000..af95d1d --- /dev/null +++ b/vllm/_custom_ops.py @@ -0,0 +1,251 @@ +from typing import Dict, Optional, Tuple + +import torch + +try: + from vllm_C import cache_ops as vllm_cache_ops + from vllm_C import ops as vllm_ops +except ImportError: + pass + + +# activation ops +def silu_and_mul(out: torch.Tensor, x: torch.Tensor) -> None: + vllm_ops.silu_and_mul(out, x) + + +def gelu_and_mul(out: torch.Tensor, x: torch.Tensor) -> None: + vllm_ops.gelu_and_mul(out, x) + + +def gelu_tanh_and_mul(out: torch.Tensor, x: torch.Tensor) -> None: + vllm_ops.gelu_tanh_and_mul(out, x) + + +def gelu_fast(out: torch.Tensor, x: torch.Tensor) -> None: + vllm_ops.gelu_fast(out, x) + + +def gelu_new(out: torch.Tensor, x: torch.Tensor) -> None: + vllm_ops.gelu_new(out, x) + + +# page attention ops +def paged_attention_v1( + out: torch.Tensor, + query: torch.Tensor, + key_cache: torch.Tensor, + value_cache: torch.Tensor, + num_kv_heads: int, + scale: float, + block_tables: torch.Tensor, + seq_lens: torch.Tensor, + block_size: int, + max_seq_len: int, + alibi_slopes: Optional[torch.Tensor], + kv_cache_dtype: str, + kv_scale: float, +) -> None: + vllm_ops.paged_attention_v1(out, query, key_cache, value_cache, + num_kv_heads, scale, block_tables, seq_lens, + block_size, max_seq_len, alibi_slopes, + kv_cache_dtype, kv_scale) + + +def paged_attention_v2( + out: torch.Tensor, + exp_sum: torch.Tensor, + max_logits: torch.Tensor, + tmp_out: torch.Tensor, + query: torch.Tensor, + key_cache: torch.Tensor, + value_cache: torch.Tensor, + num_kv_heads: int, + scale: float, + block_tables: torch.Tensor, + seq_lens: torch.Tensor, + block_size: int, + max_seq_len: int, + alibi_slopes: Optional[torch.Tensor], + kv_cache_dtype: str, + kv_scale: float, +) -> None: + vllm_ops.paged_attention_v2(out, exp_sum, max_logits, tmp_out, query, + key_cache, value_cache, num_kv_heads, scale, + block_tables, seq_lens, block_size, + max_seq_len, alibi_slopes, kv_cache_dtype, + kv_scale) + + +# pos encoding ops +def rotary_embedding( + positions: torch.Tensor, + query: torch.Tensor, + key: torch.Tensor, + head_size: int, + cos_sin_cache: torch.Tensor, + is_neox: bool, +) -> None: + vllm_ops.rotary_embedding(positions, query, key, head_size, cos_sin_cache, + is_neox) + + +def batched_rotary_embedding(positions: torch.Tensor, query: torch.Tensor, + key: torch.Tensor, head_size: int, + cos_sin_cache: torch.Tensor, is_neox: bool, + rot_dim: int, + cos_sin_cache_offsets: torch.Tensor) -> None: + vllm_ops.batched_rotary_embedding(positions, query, key, head_size, + cos_sin_cache, is_neox, rot_dim, + cos_sin_cache_offsets) + + +# layer norm ops +def rms_norm(out: torch.Tensor, input: torch.Tensor, weight: torch.Tensor, + epsilon: float) -> None: + vllm_ops.rms_norm(out, input, weight, epsilon) + + +def fused_add_rms_norm(input: torch.Tensor, residual: torch.Tensor, + weight: torch.Tensor, epsilon: float) -> None: + vllm_ops.fused_add_rms_norm(input, residual, weight, epsilon) + + +# quantization ops +# awq +def awq_dequantize(qweight: torch.Tensor, scales: torch.Tensor, + zeros: torch.Tensor, split_k_iters: int, thx: int, + thy: int) -> torch.Tensor: + return vllm_ops.awq_dequantize(qweight, scales, zeros, split_k_iters, thx, + thy) + + +def awq_gemm(input: torch.Tensor, qweight: torch.Tensor, qzeros: torch.Tensor, + scales: torch.Tensor, split_k_iters: int) -> torch.Tensor: + return vllm_ops.awq_gemm(input, qweight, qzeros, scales, split_k_iters) + + +# gptq +def gptq_gemm(a: torch.Tensor, b_q_weight: torch.Tensor, + b_gptq_qzeros: torch.Tensor, b_gptq_scales: torch.Tensor, + b_g_idx: torch.Tensor, use_exllama: bool, + bit: int) -> torch.Tensor: + return vllm_ops.gptq_gemm(a, b_q_weight, b_gptq_qzeros, b_gptq_scales, + b_g_idx, use_exllama, bit) + + +def gptq_shuffle(q_weight: torch.Tensor, q_perm: torch.Tensor, + bit: int) -> None: + vllm_ops.gptq_shuffle(q_weight, q_perm, bit) + + +# squeezellm +def squeezellm_gemm(vec: torch.Tensor, mat: torch.Tensor, mul: torch.Tensor, + lookup_table: torch.Tensor) -> None: + vllm_ops.squeezellm_gemm(vec, mat, mul, lookup_table) + + +# marlin +def marlin_gemm(a: torch.Tensor, b_q_weight: torch.Tensor, + b_scales: torch.Tensor, workspace: torch.Tensor, size_m: int, + size_n: int, size_k: int) -> torch.Tensor: + return vllm_ops.marlin_gemm(a, b_q_weight, b_scales, workspace, size_m, + size_n, size_k) + + +# aqlm +def aqlm_gemm(input: torch.Tensor, codes: torch.Tensor, + codebooks: torch.Tensor, scales: torch.Tensor, + codebook_partition_sizes: torch.Tensor, + bias: Optional[torch.Tensor]) -> torch.Tensor: + return vllm_ops.aqlm_gemm(input, codes, codebooks, scales, + codebook_partition_sizes, bias) + + +def aqlm_dequant(codes: torch.Tensor, codebooks: torch.Tensor, + codebook_partition_sizes: torch.Tensor) -> torch.Tensor: + return vllm_ops.aqlm_dequant(codes, codebooks, codebook_partition_sizes) + + +# gptq_marlin +def gptq_marlin_repack(b_q_weight: torch.Tensor, perm: torch.Tensor, + size_k: int, size_n: int, + num_bits: int) -> torch.Tensor: + return vllm_ops.gptq_marlin_repack(b_q_weight, perm, size_k, size_n, + num_bits) + + +def gptq_marlin_gemm(a: torch.Tensor, b_q_weight: torch.Tensor, + b_scales: torch.Tensor, g_idx: torch.Tensor, + perm: torch.Tensor, workspace: torch.Tensor, + num_bits: int, size_m: int, size_n: int, size_k: int, + is_k_full: bool) -> torch.Tensor: + return vllm_ops.gptq_marlin_gemm(a, b_q_weight, b_scales, g_idx, perm, + workspace, num_bits, size_m, size_n, + size_k, is_k_full) + + +# fp8 +def scaled_fp8_quant( + input: torch.Tensor, + scale: Optional[torch.Tensor] = None, +) -> Tuple[torch.Tensor, torch.Tensor]: + output = torch.empty_like(input, dtype=torch.float8_e4m3fn) + if scale is None: + scale = torch.zeros(1, device=input.device, dtype=torch.float32) + vllm_ops.dynamic_scaled_fp8_quant(output, input, scale) + else: + vllm_ops.static_scaled_fp8_quant(output, input, scale) + return output, scale + + +# moe +def moe_align_block_size(topk_ids: torch.Tensor, num_experts: int, + block_size: int, sorted_token_ids: torch.Tensor, + experts_ids: torch.Tensor, + num_tokens_post_pad: torch.Tensor) -> None: + vllm_ops.moe_align_block_size(topk_ids, num_experts, block_size, + sorted_token_ids, experts_ids, + num_tokens_post_pad) + + +def reshape_and_cache( + key: torch.Tensor, + value: torch.Tensor, + key_cache: torch.Tensor, + value_cache: torch.Tensor, + slot_mapping: torch.Tensor, + kv_cache_dtype: str, + kv_scale: float, +) -> None: + vllm_cache_ops.reshape_and_cache(key, value, key_cache, value_cache, + slot_mapping, kv_cache_dtype, kv_scale) + + +def reshape_and_cache_flash( + key: torch.Tensor, + value: torch.Tensor, + key_cache: torch.Tensor, + value_cache: torch.Tensor, + slot_mapping: torch.Tensor, + kv_cache_dtype: str, +) -> None: + vllm_cache_ops.reshape_and_cache_flash(key, value, key_cache, value_cache, + slot_mapping, kv_cache_dtype) + + +def copy_blocks(key_caches: torch.Tensor, value_caches: torch.Tensor, + block_mapping: torch.Tensor) -> None: + vllm_cache_ops.copy_blocks(key_caches, value_caches, block_mapping) + + +def swap_blocks(src: torch.Tensor, dst: torch.Tensor, + block_mapping: Dict[int, int]) -> None: + vllm_cache_ops.swap_blocks(src, dst, block_mapping) + + +def convert_fp8(output: torch.Tensor, input: torch.Tensor) -> None: + vllm_cache_ops.convert_fp8(output, input) + + +#TODO: cuda_utils, custom_ar diff --git a/vllm/attention/__init__.py b/vllm/attention/__init__.py new file mode 100644 index 0000000..7636b34 --- /dev/null +++ b/vllm/attention/__init__.py @@ -0,0 +1,13 @@ +from vllm.attention.backends.abstract import (AttentionBackend, + AttentionMetadata, + AttentionMetadataPerStage) +from vllm.attention.layer import Attention +from vllm.attention.selector import get_attn_backend + +__all__ = [ + "AttentionBackend", + "AttentionMetadata", + "Attention", + "get_attn_backend", + "AttentionMetadataPerStage", +] diff --git a/vllm/attention/backends/__init__.py b/vllm/attention/backends/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/vllm/attention/backends/abstract.py b/vllm/attention/backends/abstract.py new file mode 100644 index 0000000..61c9c81 --- /dev/null +++ b/vllm/attention/backends/abstract.py @@ -0,0 +1,127 @@ +from abc import ABC, abstractmethod +from dataclasses import dataclass, fields +from typing import (Any, Dict, Generic, List, Optional, Set, Tuple, Type, + TypeVar) + +import torch + + +class AttentionBackend(ABC): + """Abstract class for attention backends.""" + + @staticmethod + @abstractmethod + def get_impl_cls() -> Type["AttentionImpl"]: + raise NotImplementedError + + @staticmethod + @abstractmethod + def make_metadata(*args, **kwargs) -> "AttentionMetadataPerStage": + raise NotImplementedError + + @staticmethod + @abstractmethod + def get_kv_cache_shape( + num_blocks: int, + block_size: int, + num_kv_heads: int, + head_size: int, + ) -> Tuple[int, ...]: + raise NotImplementedError + + @staticmethod + @abstractmethod + def swap_blocks( + src_kv_cache: torch.Tensor, + dst_kv_cache: torch.Tensor, + src_to_dst: Dict[int, int], + ) -> None: + raise NotImplementedError + + @staticmethod + @abstractmethod + def copy_blocks( + kv_caches: List[torch.Tensor], + src_to_dists: Dict[int, List[int]], + ) -> None: + raise NotImplementedError + + +@dataclass +class AttentionMetadataPerStage: + """Attention metadata for a specific stage. I.e., prefill or decode.""" + + def asdict_zerocopy(self, + skip_fields: Optional[Set[str]] = None + ) -> Dict[str, Any]: + """Similar to dataclasses.asdict, but avoids deepcopying.""" + if skip_fields is None: + skip_fields = set() + # Note that if we add dataclasses as fields, they will need + # similar handling. + return { + field.name: getattr(self, field.name) + for field in fields(self) if field.name not in skip_fields + } + + +T = TypeVar("T", bound=AttentionMetadataPerStage) + + +@dataclass +class AttentionMetadata(Generic[T]): + """Attention metadata for prefill and decode batched together.""" + # Total number of prefill requests. + num_prefills: int + # Number of prefill tokens. + num_prefill_tokens: int + # Number of decode tokens. Note that it is equivalent to the number of + # decode requests. + num_decode_tokens: int + # The attention metadata for prefill requests in a batch. + # None if there's no prefill requests in a batch. + prefill_metadata: Optional[T] + # The attention metadata for decode requests in a batch. + # None if there's no decode requests in a batch. + decode_metadata: Optional[T] + # (num_tokens,). The indices of the token slots that input tokens will be + # stored into. E.g., if `slot_mapping` is [35, 2, 17] and the block size + # is 16, the three tokens are stored in the 3rd slot in block 2, 2nd slot + # in block 0, and 1st slot in block 1, respectively. + slot_mapping: torch.Tensor + # The kv cache's data type. + kv_cache_dtype: str + + def __post_init__(self): + if self.num_prefill_tokens > 0: + assert self.num_prefills > 0 + assert self.prefill_metadata is not None + if self.num_decode_tokens > 0: + assert self.decode_metadata is not None + + +class AttentionImpl(ABC): + + @abstractmethod + def __init__( + self, + num_heads: int, + head_size: int, + scale: float, + num_kv_heads: Optional[int] = None, + alibi_slopes: Optional[List[float]] = None, + sliding_window: Optional[int] = None, + ) -> None: + raise NotImplementedError + + @abstractmethod + def forward( + self, + query: torch.Tensor, + key: torch.Tensor, + value: torch.Tensor, + kv_cache: torch.Tensor, + attn_metadata: AttentionMetadata, + kv_scale: float, + ) -> torch.Tensor: + raise NotImplementedError diff --git a/vllm/attention/backends/flash_attn.py b/vllm/attention/backends/flash_attn.py new file mode 100644 index 0000000..195605d --- /dev/null +++ b/vllm/attention/backends/flash_attn.py @@ -0,0 +1,283 @@ +"""Attention layer with Flash and PagedAttention. + +NOTE(woosuk): At the moment, this file includes a lot of duplicated code from +XFormers backend. The duplicated code will be removed once we use flash-attn or +flashinfer for all the attention operations. +""" +from dataclasses import dataclass +from typing import Dict, List, Optional, Tuple, Type + +import torch +import torch_musa +from torch.nn.functional import scaled_dot_product_attention + +from vllm.attention.backends.abstract import (AttentionBackend, AttentionImpl, + AttentionMetadata, + AttentionMetadataPerStage) +from vllm.attention.ops.paged_attn import (PagedAttention, + PagedAttentionMetadata) + + +class FlashAttentionBackend(AttentionBackend): + + @staticmethod + def get_impl_cls() -> Type["FlashAttentionImpl"]: + return FlashAttentionImpl + + @staticmethod + def make_metadata(*args, **kwargs) -> "FlashAttentionMetadata": + return FlashAttentionMetadata(*args, **kwargs) + + @staticmethod + def get_kv_cache_shape( + num_blocks: int, + block_size: int, + num_kv_heads: int, + head_size: int, + ) -> Tuple[int, ...]: + return PagedAttention.get_kv_cache_shape(num_blocks, block_size, + num_kv_heads, head_size) + + @staticmethod + def swap_blocks( + src_kv_cache: torch.Tensor, + dst_kv_cache: torch.Tensor, + src_to_dst: Dict[int, int], + ) -> None: + PagedAttention.swap_blocks(src_kv_cache, dst_kv_cache, src_to_dst) + + @staticmethod + def copy_blocks( + kv_caches: List[torch.Tensor], + src_to_dists: Dict[int, List[int]], + ) -> None: + PagedAttention.copy_blocks(kv_caches, src_to_dists) + + +@dataclass +class FlashAttentionMetadata(AttentionMetadataPerStage, + PagedAttentionMetadata): + """Metadata for FlashAttentionBackend. + + NOTE: Any python object stored here is not updated when it is + cuda-graph replayed. If you have values that need to be changed + dynamically, it should be stored in tensor. The tensor has to be + updated from `CUDAGraphRunner.forward` API. + """ + # Currently, input sequences can only contain all prompts + # or all decoding. True if all sequences are prompts. + is_prompt: bool + # (batch_size,). The sequence length per sequence. Sequence length means + # the computed tokens + new tokens None if it is a decoding. + seq_lens: Optional[List[int]] + # seq_lens stored as a tensor. + seq_lens_tensor: Optional[torch.Tensor] + + # NOTE(sang): Definition of context_len, query_len, and seq_len. + # |---------- N-1 iteration --------| + # |---------------- N iteration ---------------------| + # |- tokenA -|......................|-- newTokens ---| + # |---------- context_len ----------| + # |-------------------- seq_len ----------------------| + # |-- query_len ---| + + # Maximum query length in the batch. + max_query_len: Optional[int] + # Maximum sequence length in the batch. + max_seq_len: Optional[int] + # (batch_size + 1,). The cumulative subquery lengths of the sequences in + # the batch, used to index into subquery. E.g., if the subquery length + # is [4, 6], it is [0, 4, 10]. + subquery_start_loc: Optional[torch.Tensor] + # (batch_size + 1,). The cumulative sequence lengths of the sequences in + # the batch, used to index into sequence. E.g., if the sequence length is + # [4, 6], it is [0, 4, 10]. + seq_start_loc: Optional[torch.Tensor] + # (batch_size,) A tensor of context lengths (tokens that are computed + # so far). + context_lens_tensor: Optional[torch.Tensor] + + # Whether or not if cuda graph is enabled. + # Cuda-graph is currently enabled for decoding only. + # TODO(woosuk): Move `use_cuda_graph` out since it's unrelated to attention. + use_cuda_graph: bool + + +class FlashAttentionImpl(AttentionImpl): + """ + If the input tensors contain prompt tokens, the layout is as follows: + |<--------------- num_prefill_tokens ----------------->| + |<--prefill_0-->|<--prefill_1-->|...|<--prefill_N-1--->| + + Otherwise, the layout is as follows: + |<----------------- num_decode_tokens ------------------>| + |<--decode_0-->|..........|<--decode_M-1-->|<--padding-->| + + Generation tokens can contain padding when cuda-graph is used. + Currently, prompt tokens don't contain any padding. + + The prompts might have different lengths, while the generation tokens + always have length 1. + + If chunked prefill is enabled, prefill tokens and decode tokens can be + batched together in a flattened 1D query. + + |<----- num_prefill_tokens ---->|<------- num_decode_tokens --------->| + |<-prefill_0->|...|<-prefill_N-1->|<--decode_0-->|...|<--decode_M-1-->| + + Currently, cuda graph is disabled for chunked prefill, meaning there's no + padding between prefill and decode tokens. + """ + + def __init__( + self, + num_heads: int, + head_size: int, + scale: float, + num_kv_heads: Optional[int] = None, + alibi_slopes: Optional[List[float]] = None, + sliding_window: Optional[int] = None, + ) -> None: + self.num_heads = num_heads + self.head_size = head_size + self.scale = float(scale) + self.num_kv_heads = num_heads if num_kv_heads is None else num_kv_heads + self.sliding_window = -1 + if alibi_slopes is not None: + alibi_slopes = torch.tensor(alibi_slopes, dtype=torch.float32) + self.alibi_slopes = alibi_slopes + + self.need_mask = (self.alibi_slopes is not None + or self.sliding_window is not None) + + assert self.num_heads % self.num_kv_heads == 0 + self.num_queries_per_kv = self.num_heads // self.num_kv_heads + + suppored_head_sizes = PagedAttention.get_supported_head_sizes() + if head_size not in suppored_head_sizes: + raise ValueError( + f"Head size {head_size} is not supported by PagedAttention. " + f"Supported head sizes are: {suppored_head_sizes}.") + + def forward( + self, + query: torch.Tensor, + key: torch.Tensor, + value: torch.Tensor, + kv_cache: torch.Tensor, + attn_metadata: AttentionMetadata[FlashAttentionMetadata], + kv_scale: float, + ) -> torch.Tensor: + """Forward pass with FlashAttention and PagedAttention. + + Args: + query: shape = [num_tokens, num_heads * head_size] + key: shape = [num_tokens, num_kv_heads * head_size] + value: shape = [num_tokens, num_kv_heads * head_size] + kv_cache = [2, num_blocks, block_size * num_kv_heads * head_size] + attn_metadata: Metadata for attention. + Returns: + shape = [num_tokens, num_heads * head_size] + """ + num_tokens, hidden_size = query.shape + # Reshape the query, key, and value tensors. + query = query.view(-1, self.num_heads, self.head_size) + key = key.view(-1, self.num_kv_heads, self.head_size) + value = value.view(-1, self.num_kv_heads, self.head_size) + + # enable musa flash attention + torch.backends.cuda.enable_flash_sdp(True) + torch.backends.cuda.enable_math_sdp(False) + torch.backends.cuda.enable_mem_efficient_sdp(True) + + if kv_cache is not None: + key_cache, value_cache = PagedAttention.split_kv_cache( + kv_cache, self.num_kv_heads, self.head_size) + + # Reshape the input keys and values and store them in the cache. + # If kv_cache is not provided, the new key and value tensors are + # not cached. This happens during the initial memory profiling run. + PagedAttention.write_to_paged_cache(key, value, key_cache, + value_cache, + attn_metadata.slot_mapping, + attn_metadata.kv_cache_dtype, + kv_scale) + + num_prefill_tokens = attn_metadata.num_prefill_tokens + num_decode_tokens = attn_metadata.num_decode_tokens + assert key.shape[0] == num_prefill_tokens + num_decode_tokens + assert value.shape[0] == num_prefill_tokens + num_decode_tokens + + output = torch.empty_like(query) + # Query for decode. KV is not needed because it is already cached. + decode_query = query[num_prefill_tokens:] + # QKV for prefill. + query = query[:num_prefill_tokens] + key = key[:num_prefill_tokens] + value = value[:num_prefill_tokens] + query = query.movedim(0, query.dim() - 2).unsqueeze(0) + key = key.movedim(0, key.dim() - 2).unsqueeze(0) + value = value.movedim(0, value.dim() - 2).unsqueeze(0) + + assert decode_query.shape[0] == num_decode_tokens + + if prefill_meta := attn_metadata.prefill_metadata: + tensor = torch.full( + (1, 1, num_tokens, num_tokens), + dtype=torch.bool, + fill_value=1, + device=query.device) + att_mask = torch.tril(tensor, diagonal=0) + # Prompt run. + if kv_cache is None or prefill_meta.block_tables.numel() == 0: + # normal attention + # When block_tables are not filled, it means q and k are the + # prompt, and they have the same length. + attn_output = scaled_dot_product_attention( + query.contiguous(), + key.contiguous(), + value.contiguous(), + attn_mask=att_mask.contiguous(), + dropout_p=0.0, + is_causal=False, + ) + attn_output = attn_output.squeeze(0).permute(1, 0, 2).contiguous() + assert output[:num_prefill_tokens].shape == attn_output.shape + output[:num_prefill_tokens] = attn_output + else: + # prefix-enabled attention + # TODO(Hai) this triton kernel has regression issue (broke) to + # deal with different data types between KV and FP8 KV cache, + # to be addressed separately. + output[:num_prefill_tokens] = PagedAttention.forward_prefix( + query, + key, + value, + key_cache, + value_cache, + prefill_meta.block_tables, + prefill_meta.subquery_start_loc, + prefill_meta.seq_lens_tensor, + prefill_meta.context_lens_tensor, + prefill_meta.max_query_len, + self.alibi_slopes, + self.sliding_window[0], + ) + if decode_meta := attn_metadata.decode_metadata: + # Decoding run. + output[num_prefill_tokens:] = PagedAttention.forward_decode( + decode_query, + key_cache, + value_cache, + decode_meta.block_tables, + decode_meta.seq_lens_tensor, + decode_meta.max_seq_len, + attn_metadata.kv_cache_dtype, + self.num_kv_heads, + self.scale, + self.alibi_slopes, + kv_scale, + ) + + # Reshape the output tensor. + return output.view(num_tokens, hidden_size) diff --git a/vllm/attention/backends/flashinfer.py b/vllm/attention/backends/flashinfer.py new file mode 100644 index 0000000..8ab4b1f --- /dev/null +++ b/vllm/attention/backends/flashinfer.py @@ -0,0 +1,220 @@ +from dataclasses import dataclass +from typing import Any, Dict, List, Optional, Set, Tuple, Type + +try: + import flashinfer + from flash_attn import flash_attn_varlen_func + from flashinfer import BatchDecodeWithPagedKVCacheWrapper +except ImportError: + flashinfer = None + flash_attn_varlen_func = None + BatchDecodeWithPagedKVCacheWrapper = None + +import torch + +from vllm import _custom_ops as ops +from vllm.attention.backends.abstract import (AttentionBackend, AttentionImpl, + AttentionMetadata, + AttentionMetadataPerStage) + + +class FlashInferBackend(AttentionBackend): + + @staticmethod + def get_impl_cls() -> Type["FlashInferImpl"]: + return FlashInferImpl + + @staticmethod + def make_metadata(*args, **kwargs) -> "FlashInferMetadata": + return FlashInferMetadata(*args, **kwargs) + + @staticmethod + def get_kv_cache_shape( + num_blocks: int, + block_size: int, + num_kv_heads: int, + head_size: int, + ) -> Tuple[int, ...]: + return (num_blocks, 2, block_size, num_kv_heads, head_size) + + @staticmethod + def swap_blocks( + src_kv_cache: torch.Tensor, + dst_kv_cache: torch.Tensor, + src_to_dst: Dict[int, int], + ) -> None: + raise NotImplementedError + + @staticmethod + def copy_blocks( + kv_caches: List[torch.Tensor], + src_to_dists: Dict[int, List[int]], + ) -> None: + raise NotImplementedError + + @staticmethod + def get_supported_head_sizes() -> List[int]: + return [64, 128, 256] + + +@dataclass +class FlashInferMetadata(AttentionMetadataPerStage): + + is_prompt: bool + + use_cuda_graph: bool = False + + decode_wrapper: Optional[BatchDecodeWithPagedKVCacheWrapper] = None + + # Metadata for the prefill stage since we still + # use flash attention for prefill. + seq_start_loc: Optional[torch.Tensor] = None + max_seq_len: Optional[int] = None + block_tables: Optional[torch.Tensor] = None + + # Metadata for the decode stage + # Workspace buffer required by the kernel, the buffer should not + # be allocated/deacollated by the FalshInfermetadata object. + workspace_buffer: Optional[torch.Tensor] = None + # An example for paged_kv_indices, paged_kv_indptr: + # request 1, page indices [0, 5, 8] + # request 2, page indices [1, 6, 7] + # request 3, page indices [3, 4] + # paged_kv_indices is a concatenation of page indices of all requests: + # [0, 5, 8, 1, 6, 7, 3, 4] + # paged_kv_indptr is used to index into paged_kv_indices: + # [0, 3, 6, 8] + # The indptr of the paged kv cache, shape: [batch_size + 1] + paged_kv_indptr: Optional[torch.Tensor] = None + # The page indices of the paged kv cache + paged_kv_indices: Optional[torch.Tensor] = None + # The number of entries in the last page of each request in + # the paged kv cache, shape: [batch_size] + paged_kv_last_page_len: Optional[torch.Tensor] = None + # The number of query/output heads + num_qo_heads: Optional[int] = None + # The number of key/value heads + num_kv_heads: Optional[int] = None + # The dimension of the attention heads + head_dim: Optional[int] = None + # Block size of vllm + page_size: Optional[int] = None + # The data type of the paged kv cache + data_type: torch.dtype = None + + def __post_init__(self): + # Refer to + # https://github.com/flashinfer-ai/flashinfer/blob/3d55c71a62052c590c130897d3a3db49b14fcc34/include/flashinfer/utils.cuh#L157 + supported_head_sizes = FlashInferBackend.get_supported_head_sizes() + if self.head_dim is not None and self.head_dim \ + not in supported_head_sizes: + raise ValueError( + f"Only {supported_head_sizes} are supported for head_dim,", + f"received {self.head_dim}.") + + # When using flashinfer, we are also creating the FlashInferMetadata, + # which will also call post_init by default, here we want to skip the + # post_init if it's the prefill phase. + if not self.is_prompt: + self.decode_wrapper = flashinfer.BatchDecodeWithPagedKVCacheWrapper( + self.workspace_buffer, "NHD") + self.decode_wrapper.begin_forward( + self.paged_kv_indptr, + self.paged_kv_indices, + self.paged_kv_last_page_len, + self.num_qo_heads, + self.num_kv_heads, + self.head_dim, + self.page_size, + # Disable flashinfer's pos encoding and use vllm's rope. + pos_encoding_mode="NONE", + data_type=self.data_type) + + def asdict_zerocopy(self, + skip_fields: Optional[Set[str]] = None + ) -> Dict[str, Any]: + if skip_fields is None: + skip_fields = set() + # We need to skip the decode_wrapper field since it cannot be + # broadcasted with nccl when TP is enabled. + skip_fields.add('decode_wrapper') + return super().asdict_zerocopy(skip_fields) + + +class FlashInferImpl(AttentionImpl): + + def __init__( + self, + num_heads: int, + head_size: int, + scale: float, + num_kv_heads: Optional[int] = None, + alibi_slopes: Optional[List[float]] = None, + sliding_window: Optional[int] = None, + ) -> None: + if sliding_window is not None: + raise ValueError("Sliding window is not supported in FlashInfer.") + self.sliding_window = (-1, -1) + self.alibi_slopes = alibi_slopes + self.scale = scale + self.num_heads = num_heads + self.head_size = head_size + self.num_kv_heads = num_heads if num_kv_heads is None else num_kv_heads + + def forward(self, query: torch.Tensor, key: torch.Tensor, + value: torch.Tensor, kv_cache: Optional[torch.Tensor], + attn_metadata: AttentionMetadata[FlashInferMetadata], + kv_scale: float): + num_tokens, hidden_size = query.shape + query = query.view(-1, self.num_heads, self.head_size) + key = key.view(-1, self.num_kv_heads, self.head_size) + value = value.view(-1, self.num_kv_heads, self.head_size) + + if attn_metadata.num_prefill_tokens > 0: + assert attn_metadata.num_decode_tokens == 0, ( + "Chunked prefill is not supported with flashinfer yet.") + if attn_metadata.num_decode_tokens > 0: + assert attn_metadata.num_prefill_tokens == 0, ( + "Chunked prefill is not supported with flashinfer yet.") + + if kv_cache is not None: + # Use the same reshape and cache kernel as flash attention. + ops.reshape_and_cache_flash( + key, + value, + kv_cache[:, 0], + kv_cache[:, 1], + attn_metadata.slot_mapping.flatten(), + attn_metadata.kv_cache_dtype, + ) + + if prefill_meta := attn_metadata.prefill_metadata: + assert prefill_meta.block_tables is not None + if kv_cache is None or prefill_meta.block_tables.numel() == 0: + output = flash_attn_varlen_func( + q=query, + k=key, + v=value, + cu_seqlens_q=prefill_meta.seq_start_loc, + cu_seqlens_k=prefill_meta.seq_start_loc, + max_seqlen_q=prefill_meta.max_seq_len, + max_seqlen_k=prefill_meta.max_seq_len, + softmax_scale=self.scale, + causal=True, + window_size=self.sliding_window, + alibi_slopes=self.alibi_slopes, + ) + else: + raise NotImplementedError( + "Prefix caching is not supported with flashinfer yet.") + else: + assert attn_metadata.decode_metadata is not None + assert attn_metadata.decode_metadata.decode_wrapper is not None + query = query.contiguous( + ) # Flashinfer requires query to be contiguous + output = attn_metadata.decode_metadata.decode_wrapper.forward( + query, + kv_cache, + sm_scale=self.scale, + ) + return output.view(num_tokens, hidden_size) diff --git a/vllm/attention/backends/rocm_flash_attn.py b/vllm/attention/backends/rocm_flash_attn.py new file mode 100644 index 0000000..c411b39 --- /dev/null +++ b/vllm/attention/backends/rocm_flash_attn.py @@ -0,0 +1,374 @@ +"""Attention layer ROCm GPUs.""" +from dataclasses import dataclass +from typing import Dict, List, Optional, Tuple, Type + +import torch + +import vllm.envs as envs +from vllm.attention.backends.abstract import (AttentionBackend, AttentionImpl, + AttentionMetadata, + AttentionMetadataPerStage) +from vllm.attention.ops.paged_attn import (PagedAttention, + PagedAttentionMetadata) +from vllm.logger import init_logger + +logger = init_logger(__name__) + + +class ROCmFlashAttentionBackend(AttentionBackend): + + @staticmethod + def get_impl_cls() -> Type["ROCmFlashAttentionImpl"]: + return ROCmFlashAttentionImpl + + @staticmethod + def make_metadata(*args, **kwargs) -> "ROCmFlashAttentionMetadata": + return ROCmFlashAttentionMetadata(*args, **kwargs) + + @staticmethod + def get_kv_cache_shape( + num_blocks: int, + block_size: int, + num_kv_heads: int, + head_size: int, + ) -> Tuple[int, ...]: + return PagedAttention.get_kv_cache_shape(num_blocks, block_size, + num_kv_heads, head_size) + + @staticmethod + def swap_blocks( + src_kv_cache: torch.Tensor, + dst_kv_cache: torch.Tensor, + src_to_dst: Dict[int, int], + ) -> None: + PagedAttention.swap_blocks(src_kv_cache, dst_kv_cache, src_to_dst) + + @staticmethod + def copy_blocks( + kv_caches: List[torch.Tensor], + src_to_dists: Dict[int, List[int]], + ) -> None: + PagedAttention.copy_blocks(kv_caches, src_to_dists) + + +@dataclass +class ROCmFlashAttentionMetadata(AttentionMetadataPerStage, + PagedAttentionMetadata): + """Metadata for FlashAttentionBackend. + + NOTE: Any python object stored here is not updated when it is + cuda-graph replayed. If you have values that need to be changed + dynamically, it should be stored in tensor. The tensor has to be + updated from `CUDAGraphRunner.forward` API. + """ + # Currently, input sequences can only contain all prompts + # or all decoding. True if all sequences are prompts. + is_prompt: bool + # (batch_size,). The sequence length per sequence. Sequence length means + # the computed tokens + new tokens None if it is a decoding. + seq_lens: Optional[List[int]] + # seq_lens stored as a tensor. + seq_lens_tensor: Optional[torch.Tensor] + + # NOTE(sang): Definition of context_len, query_len, and seq_len. + # |---------- N-1 iteration --------| + # |---------------- N iteration ---------------------| + # |- tokenA -|......................|-- newTokens ---| + # |---------- context_len ----------| + # |-------------------- seq_len ----------------------| + # |-- query_len ---| + + # Maximum query length in the batch. + max_query_len: Optional[int] + # Maximum sequence length in the batch. + max_seq_len: Optional[int] + # (batch_size + 1,). The cumulative subquery lengths of the sequences in + # the batch, used to index into subquery. E.g., if the subquery length + # is [4, 6], it is [0, 4, 10]. + subquery_start_loc: Optional[torch.Tensor] + # (batch_size + 1,). The cumulative sequence lengths of the sequences in + # the batch, used to index into sequence. E.g., if the sequence length is + # [4, 6], it is [0, 4, 10]. + seq_start_loc: Optional[torch.Tensor] + + # Whether or not if cuda graph is enabled. + # Cuda-graph is currently enabled for decoding only. + # TODO(woosuk): Move `use_cuda_graph` out since it's unrelated to attention. + use_cuda_graph: bool + # (batch_size,) A tensor of context lengths (tokens that are computed + # so far). + context_lens_tensor: Optional[torch.Tensor] + + +class ROCmFlashAttentionImpl(AttentionImpl): + """ + If the input tensors contain prompt tokens, the layout is as follows: + |<--------------- num_prompt_tokens -------------->| + |<--prompt_0-->|<--prompt_1-->|...|<--prompt_N-1-->| + + Otherwise, the layout is as follows: + |<------------------ num_generation_tokens (M) ----------------->| + |<--generation_0-->|..........|<--generation_M-1-->|<--padding-->| + + Generation tokens can contain padding when cuda-graph is used. + Currently, prompt tokens don't contain any padding. + + The prompts might have different lengths, while the generation tokens + always have length 1. + + If chunked prefill is enabled, prefill tokens and decode tokens can be + batched together in a flattened 1D query. + + |<----- num_prefill_tokens ---->|<------- num_decode_tokens ----------->| + |<-prompt_0->|...|<-prompt_N-1->|<-generation_0->|...|<-generation_M-1->| + + Currently, cuda graph is disabled for chunked prefill, meaning there's no + padding between prefill and decode tokens. + """ + + def __init__( + self, + num_heads: int, + head_size: int, + scale: float, + num_kv_heads: Optional[int] = None, + alibi_slopes: Optional[List[float]] = None, + sliding_window: Optional[int] = None, + ) -> None: + self.num_heads = num_heads + self.head_size = head_size + self.scale = float(scale) + self.num_kv_heads = num_heads if num_kv_heads is None else num_kv_heads + self.sliding_window = ((sliding_window, sliding_window) + if sliding_window is not None else (-1, -1)) + if alibi_slopes is not None: + alibi_slopes = torch.tensor(alibi_slopes, dtype=torch.float32) + self.alibi_slopes = alibi_slopes + + assert self.num_heads % self.num_kv_heads == 0 + self.num_queries_per_kv = self.num_heads // self.num_kv_heads + + suppored_head_sizes = PagedAttention.get_supported_head_sizes() + if head_size not in suppored_head_sizes: + raise ValueError( + f"Head size {head_size} is not supported by PagedAttention. " + f"Supported head sizes are: {suppored_head_sizes}.") + + self.use_naive_attn = False + # NOTE: Allow for switching between Triton and CK. Defaulting to triton. + self.use_triton_flash_attn = envs.VLLM_USE_TRITON_FLASH_ATTN + if self.use_triton_flash_attn: + from vllm.attention.ops.triton_flash_attention import ( # noqa: F401 + triton_attention) + self.attn_func = triton_attention + logger.debug("Using Triton FA in ROCmBackend") + else: + # if not using triton, navi3x not use flash-attn either + if torch.cuda.get_device_capability()[0] == 11: + self.use_naive_attn = True + else: + try: + from flash_attn import flash_attn_varlen_func # noqa: F401 + self.attn_func = flash_attn_varlen_func + logger.debug("Using CK FA in ROCmBackend") + except ModuleNotFoundError: + self.use_naive_attn = True + + if self.use_naive_attn: + self.attn_func = _naive_attention + logger.debug("Using naive attention in ROCmBackend") + + def repeat_kv(self, x: torch.Tensor, n_rep: int) -> torch.Tensor: + """torch.repeat_interleave(x, dim=1, repeats=n_rep)""" + tokens, n_kv_heads, head_dim = x.shape + return (x[:, :, + None, :].expand(tokens, n_kv_heads, n_rep, + head_dim).reshape(tokens, n_kv_heads * n_rep, + head_dim)) + + def forward( + self, + query: torch.Tensor, + key: torch.Tensor, + value: torch.Tensor, + kv_cache: torch.Tensor, + attn_metadata: AttentionMetadata[ROCmFlashAttentionMetadata], + kv_scale: float = 1.0, + ) -> torch.Tensor: + """Forward pass with FlashAttention and PagedAttention. + + Args: + query: shape = [num_tokens, num_heads * head_size] + key: shape = [num_tokens, num_kv_heads * head_size] + value: shape = [num_tokens, num_kv_heads * head_size] + kv_cache = [2, num_blocks, block_size * num_kv_heads * head_size] + attn_metadata: Metadata for attention. + Returns: + shape = [num_tokens, num_heads * head_size] + """ + num_tokens, hidden_size = query.shape + # Reshape the query, key, and value tensors. + query = query.view(-1, self.num_heads, self.head_size) + key = key.view(-1, self.num_kv_heads, self.head_size) + value = value.view(-1, self.num_kv_heads, self.head_size) + + if kv_cache is not None: + key_cache, value_cache = PagedAttention.split_kv_cache( + kv_cache, self.num_kv_heads, self.head_size) + + # Reshape the input keys and values and store them in the cache. + # If kv_cache is not provided, the new key and value tensors are + # not cached. This happens during the initial memory profiling run. + PagedAttention.write_to_paged_cache( + key, + value, + key_cache, + value_cache, + attn_metadata.slot_mapping, + attn_metadata.kv_cache_dtype, + kv_scale, + ) + + num_prefill_tokens = attn_metadata.num_prefill_tokens + num_decode_tokens = attn_metadata.num_decode_tokens + assert key.shape[0] == num_prefill_tokens + num_decode_tokens + assert value.shape[0] == num_prefill_tokens + num_decode_tokens + + output = torch.empty_like(query) + # Query for decode. KV is not needed because it is already cached. + decode_query = query[num_prefill_tokens:] + # QKV for prefill. + query = query[:num_prefill_tokens] + key = key[:num_prefill_tokens] + value = value[:num_prefill_tokens] + + assert query.shape[0] == num_prefill_tokens + assert decode_query.shape[0] == num_decode_tokens + + if prefill_meta := attn_metadata.prefill_metadata: + # Prompt run. + assert prefill_meta.seq_lens is not None + if kv_cache is None or prefill_meta.block_tables.numel() == 0: + # triton attention + # When block_tables are not filled, it means q and k are the + # prompt, and they have the same length. + if self.use_triton_flash_attn: + out, _ = self.attn_func( + query, + key, + value, + None, + prefill_meta.seq_start_loc, + prefill_meta.seq_start_loc, + prefill_meta.max_seq_len, + prefill_meta.max_seq_len, + True, + self.scale, + ) + elif self.use_naive_attn: + if self.num_kv_heads != self.num_heads: + # Interleave for MQA workaround. + key = self.repeat_kv(key, self.num_queries_per_kv) + value = self.repeat_kv(value, self.num_queries_per_kv) + out = self.attn_func( + query, + key, + value, + prefill_meta.seq_lens, + self.scale, + ) + else: + out = self.attn_func( + q=query, + k=key, + v=value, + cu_seqlens_q=prefill_meta.seq_start_loc, + cu_seqlens_k=prefill_meta.seq_start_loc, + max_seqlen_q=prefill_meta.max_seq_len, + max_seqlen_k=prefill_meta.max_seq_len, + softmax_scale=self.scale, + causal=True, + ) + + # common code for prefill + assert output[:num_prefill_tokens].shape == out.shape + output[:num_prefill_tokens] = out + else: + # prefix-enabled attention + output[:num_prefill_tokens] = PagedAttention.forward_prefix( + query, + key, + value, + key_cache, + value_cache, + prefill_meta.block_tables, + prefill_meta.subquery_start_loc, + prefill_meta.seq_lens_tensor, + prefill_meta.context_lens_tensor, + prefill_meta.max_query_len, + self.alibi_slopes, + self.sliding_window[0], + ) + + if decode_meta := attn_metadata.decode_metadata: + # Decoding run. + output[num_prefill_tokens:] = PagedAttention.forward_decode( + decode_query, + key_cache, + value_cache, + decode_meta.block_tables, + decode_meta.seq_lens_tensor, + decode_meta.max_seq_len, + attn_metadata.kv_cache_dtype, + self.num_kv_heads, + self.scale, + self.alibi_slopes, + kv_scale, + ) + + # Reshape the output tensor. + return output.view(num_tokens, hidden_size) + + +def _naive_attention( + query: torch.Tensor, + key: torch.Tensor, + value: torch.Tensor, + seq_lens: List[int], + scale: float, +) -> torch.Tensor: + output = torch.empty_like(query) + start = 0 + for _, seq_len in enumerate(seq_lens): + end = start + seq_len + out = _naive_masked_attention( + query[start:end], + key[start:end], + value[start:end], + scale, + ) + # TODO(woosuk): Unnecessary copy. Optimize. + output[start:end].copy_(out) + start += seq_len + + return output + + +def _naive_masked_attention( + query: torch.Tensor, + key: torch.Tensor, + value: torch.Tensor, + scale: float, +) -> torch.Tensor: + seq_len, head_size, head_dim = query.shape + attn_mask = torch.triu(torch.ones(seq_len, + seq_len, + dtype=query.dtype, + device=query.device), + diagonal=1) + attn_mask = attn_mask * torch.finfo(query.dtype).min + attn_weights = scale * torch.einsum("qhd,khd->hqk", query, key).float() + attn_weights = attn_weights + attn_mask.float() + attn_weights = torch.softmax(attn_weights, dim=-1).to(value.dtype) + out = torch.einsum("hqk,khd->qhd", attn_weights, value) + return out diff --git a/vllm/attention/backends/torch_sdpa.py b/vllm/attention/backends/torch_sdpa.py new file mode 100644 index 0000000..c8621dd --- /dev/null +++ b/vllm/attention/backends/torch_sdpa.py @@ -0,0 +1,253 @@ +""" Attention layer with torch scaled_dot_product_attention + and PagedAttention.""" +from dataclasses import dataclass +from typing import Dict, List, Optional, Tuple, Type + +import torch +import torch_musa +from torch.nn.functional import scaled_dot_product_attention + +from vllm.attention.backends.abstract import (AttentionBackend, AttentionImpl, + AttentionMetadata, + AttentionMetadataPerStage) +from vllm.attention.ops.paged_attn import (PagedAttention, + PagedAttentionMetadata) + + +class TorchSDPABackend(AttentionBackend): + + @staticmethod + def get_impl_cls() -> Type["TorchSDPABackendImpl"]: + return TorchSDPABackendImpl + + @staticmethod + def make_metadata(*args, **kwargs) -> "TorchSDPAMetadata": + return TorchSDPAMetadata(*args, **kwargs) + + @staticmethod + def get_kv_cache_shape( + num_blocks: int, + block_size: int, + num_kv_heads: int, + head_size: int, + ) -> Tuple[int, ...]: + return PagedAttention.get_kv_cache_shape(num_blocks, block_size, + num_kv_heads, head_size) + + @staticmethod + def swap_blocks( + src_kv_cache: torch.Tensor, + dst_kv_cache: torch.Tensor, + src_to_dst: Dict[int, int], + ) -> None: + PagedAttention.swap_blocks(src_kv_cache, dst_kv_cache, src_to_dst) + + @staticmethod + def copy_blocks( + kv_caches: List[torch.Tensor], + src_to_dists: Dict[int, List[int]], + ) -> None: + PagedAttention.copy_blocks(kv_caches, src_to_dists) + + +@dataclass +class TorchSDPAMetadata(PagedAttentionMetadata, AttentionMetadata): + """Metadata for TorchSDPABackend. + """ + # Currently, input sequences can only contain all prompts + # or all decoding. True if all sequences are prompts. + is_prompt: bool + slot_mapping: torch.Tensor + seq_lens: Optional[List[int]] + + def __post_init__(self): + # Set during the execution of the first attention op. + # It is a list because it is needed to set per prompt + # when alibi slopes is used. It is because of the limitation + # from xformer API. + # will not appear in the __repr__ and __init__ + self.attn_bias: Optional[List[torch.Tensor]] = None + + +class TorchSDPABackendImpl(AttentionImpl): + + def __init__( + self, + num_heads: int, + head_size: int, + scale: float, + num_kv_heads: Optional[int] = None, + alibi_slopes: Optional[List[float]] = None, + sliding_window: Optional[int] = None, + ) -> None: + self.num_heads = num_heads + self.head_size = head_size + self.scale = float(scale) + self.num_kv_heads = num_heads if num_kv_heads is None else num_kv_heads + self.sliding_window = sliding_window + if alibi_slopes is not None: + assert len(alibi_slopes) == num_heads + alibi_slopes = torch.tensor(alibi_slopes, dtype=torch.float32) + self.alibi_slopes = alibi_slopes + self.need_mask = (self.alibi_slopes is not None + or self.sliding_window is not None) + + assert self.num_heads % self.num_kv_heads == 0 + self.num_queries_per_kv = self.num_heads // self.num_kv_heads + suppored_head_sizes = PagedAttention.get_supported_head_sizes() + if head_size not in suppored_head_sizes: + raise ValueError( + f"Head size {head_size} is not supported by PagedAttention. " + f"Supported head sizes are: {suppored_head_sizes}.") + + def forward( + self, + query: torch.Tensor, + key: torch.Tensor, + value: torch.Tensor, + kv_cache: Optional[torch.Tensor], + attn_metadata: TorchSDPAMetadata, # type: ignore + kv_scale: float, + ) -> torch.Tensor: + """Forward pass with torch SDPA and PagedAttention. + + Args: + query: shape = [num_tokens, num_heads * head_size] + key: shape = [num_tokens, num_kv_heads * head_size] + value: shape = [num_tokens, num_kv_heads * head_size] + kv_cache = [2, num_blocks, block_size * num_kv_heads * head_size] + attn_metadata: Metadata for attention. + Returns: + shape = [num_tokens, num_heads * head_size] + """ + num_tokens, hidden_size = query.shape + # Reshape the query, key, and value tensors. + query = query.view(-1, self.num_heads, self.head_size) + key = key.view(-1, self.num_kv_heads, self.head_size) + value = value.view(-1, self.num_kv_heads, self.head_size) + + torch.backends.cuda.enable_flash_sdp(True) + torch.backends.cuda.enable_math_sdp(False) + torch.backends.cuda.enable_mem_efficient_sdp(True) + + if kv_cache is not None: + key_cache, value_cache = PagedAttention.split_kv_cache( + kv_cache, self.num_kv_heads, self.head_size) + PagedAttention.write_to_paged_cache(key, value, key_cache, + value_cache, + attn_metadata.slot_mapping, + attn_metadata.kv_cache_dtype, + kv_scale) + if attn_metadata.is_prompt: + assert attn_metadata.seq_lens is not None + if (kv_cache is None or attn_metadata.block_tables.numel() == 0): + if self.num_kv_heads != self.num_heads: + key = key.repeat_interleave(self.num_queries_per_kv, dim=1) + value = value.repeat_interleave(self.num_queries_per_kv, + dim=1) + + if attn_metadata.attn_bias is None: + if self.alibi_slopes is not None: + att_masks = _make_alibi_bias( + self.alibi_slopes, query.dtype, + attn_metadata.seq_lens) # type: ignore + elif self.sliding_window is not None: + att_masks = _make_sliding_window_bias( + attn_metadata.prefill_metadata.seq_lens, self.sliding_window, + query.dtype) # type: ignore + else: + att_masks = [None] * len(attn_metadata.prefill_metadata.seq_lens) + attn_metadata.prefill_metadata.attn_bias = att_masks + + query = query.movedim(0, query.dim() - 2).unsqueeze(0) + key = key.movedim(0, key.dim() - 2).unsqueeze(0) + value = value.movedim(0, value.dim() - 2).unsqueeze(0) + + start = 0 + output = torch.empty( + (1, num_tokens, self.num_heads, self.head_size), + dtype=query.dtype) + for seq_len, mask in zip(attn_metadata.prefill_metadata.seq_lens, + attn_metadata.prefill_metadata.attn_bias): + end = start + seq_len + sub_out = scaled_dot_product_attention( + query[:, :, start:end, :], + key[:, :, start:end, :], + value[:, :, start:end, :], + attn_mask=mask, + dropout_p=0.0, + is_causal=not self.need_mask, + scale=self.scale).movedim(query.dim() - 2, 0) + output[start:end, :, :] = sub_out[0] + start = end + else: + # prefix-enabled attention + raise RuntimeError( + "Torch SDPA backend doesn't support prefix decoding.") + + else: + # Decoding run. + output = PagedAttention.forward_decode( + query, + key_cache, + value_cache, + attn_metadata.block_tables, + attn_metadata.seq_lens_tensor, + attn_metadata.max_seq_len, + attn_metadata.kv_cache_dtype, + self.num_kv_heads, + self.scale, + self.alibi_slopes, + kv_scale, + ) + + # Reshape the output tensor. + return output.view(-1, self.num_heads * self.head_size) + + +def _make_alibi_bias( + alibi_slopes: torch.Tensor, + dtype: torch.dtype, + seq_lens: List[int], +) -> List[torch.Tensor]: + attn_biases = [] + for seq_len in seq_lens: + bias = torch.arange(seq_len, dtype=dtype) + # NOTE(zhuohan): HF uses + # `bias = bias[None, :].repeat(seq_len, 1)` + # here. We find that both biases give the same results, but + # the bias below more accurately follows the original ALiBi + # paper. + bias = bias[None, :] - bias[:, None] + + num_heads = alibi_slopes.shape[0] + bias = bias[None, :].repeat((num_heads, 1, 1)) + bias.mul_(alibi_slopes[:, None, None]) + inf_mask = torch.empty( + (1, seq_len, seq_len), + dtype=bias.dtype).fill_(-torch.inf).triu_(diagonal=1) + attn_biases.append((bias + inf_mask).to(dtype)) + + return attn_biases + + +def _make_sliding_window_bias( + seq_lens: List[int], + window_size: Optional[int], + dtype: torch.dtype, +) -> List[torch.Tensor]: + attn_biases = [] + for seq_len in seq_lens: + tensor = torch.full( + (1, seq_len, seq_len), + dtype=dtype, + fill_value=1, + ) + shift = 0 + mask = torch.tril(tensor, diagonal=shift).to(dtype) # type: ignore + if window_size is not None: + mask = torch.triu(mask, diagonal=shift - window_size + 1) + mask = torch.log(mask) + attn_biases.append(mask.to(dtype)) + + return attn_biases diff --git a/vllm/attention/backends/xformers.py b/vllm/attention/backends/xformers.py new file mode 100644 index 0000000..60f6d43 --- /dev/null +++ b/vllm/attention/backends/xformers.py @@ -0,0 +1,393 @@ +"""Attention layer with xFormers and PagedAttention.""" +from dataclasses import dataclass +from typing import Dict, List, Optional, Tuple, Type + +import torch +from xformers import ops as xops +from xformers.ops.fmha.attn_bias import (AttentionBias, + BlockDiagonalCausalMask, + LowerTriangularMaskWithTensorBias) + +from vllm.attention.backends.abstract import (AttentionBackend, AttentionImpl, + AttentionMetadata, + AttentionMetadataPerStage) +from vllm.attention.ops.paged_attn import (PagedAttention, + PagedAttentionMetadata) +from vllm.logger import init_logger + +logger = init_logger(__name__) + + +class XFormersBackend(AttentionBackend): + + @staticmethod + def get_impl_cls() -> Type["XFormersImpl"]: + return XFormersImpl + + @staticmethod + def make_metadata(*args, **kwargs) -> "XFormersMetadata": + return XFormersMetadata(*args, **kwargs) + + @staticmethod + def get_kv_cache_shape( + num_blocks: int, + block_size: int, + num_kv_heads: int, + head_size: int, + ) -> Tuple[int, ...]: + return PagedAttention.get_kv_cache_shape(num_blocks, block_size, + num_kv_heads, head_size) + + @staticmethod + def swap_blocks( + src_kv_cache: torch.Tensor, + dst_kv_cache: torch.Tensor, + src_to_dst: Dict[int, int], + ) -> None: + PagedAttention.swap_blocks(src_kv_cache, dst_kv_cache, src_to_dst) + + @staticmethod + def copy_blocks( + kv_caches: List[torch.Tensor], + src_to_dists: Dict[int, List[int]], + ) -> None: + PagedAttention.copy_blocks(kv_caches, src_to_dists) + + +@dataclass +class XFormersMetadata(AttentionMetadataPerStage, PagedAttentionMetadata): + """Metadata for XFormersbackend. + + NOTE: Any python object stored here is not updated when it is + cuda-graph replayed. If you have values that need to be changed + dynamically, it should be stored in tensor. The tensor has to be + updated from `CUDAGraphRunner.forward` API. + """ + # Currently, input sequences can only contain all prompts + # or all decoding. True if all sequences are prompts. + is_prompt: bool + # (batch_size,). The sequence length per sequence. Sequence length means + # the computed tokens + new tokens None if it is a decoding. + seq_lens: Optional[List[int]] + # seq_lens stored as a tensor. + seq_lens_tensor: Optional[torch.Tensor] + + # |---------- N-1 iteration --------| + # |---------------- N iteration ---------------------| + # |- tokenA -|......................|-- newTokens ---| + # |---------- context_len ----------| + # |-------------------- seq_len ----------------------| + # |-- query_len ---| + + # Maximum query length in the batch. + max_query_len: Optional[int] + # FIXME: It is for flash attn. + # Maximum sequence length in the batch. + max_seq_len: Optional[int] + # (batch_size + 1,). The cumulative subquery lengths of the sequences in + # the batch, used to index into subquery. E.g., if the subquery length + # is [4, 6], it is [0, 4, 10]. + subquery_start_loc: Optional[torch.Tensor] + # FIXME: It is for flash attn. + # (batch_size + 1,). The cumulative sequence lengths of the sequences in + # the batch, used to index into sequence. E.g., if the sequence length is + # [4, 6], it is [0, 4, 10]. + seq_start_loc: Optional[torch.Tensor] + # (batch_size,) A tensor of context lengths (tokens that are computed + # so far). + context_lens_tensor: Optional[torch.Tensor] + + # Whether or not if cuda graph is enabled. + # Cuda-graph is currently enabled for decoding only. + # TODO(woosuk): Move `use_cuda_graph` out since it's unrelated to attention. + use_cuda_graph: bool + + def __post_init__(self): + # Set during the execution of the first attention op. + # It is a list because it is needed to set per prompt + # when alibi slopes is used. It is because of the limitation + # from xformer API. + # will not appear in the __repr__ and __init__ + self.attn_bias: Optional[List[AttentionBias]] = None + + +class XFormersImpl(AttentionImpl): + """ + If the input tensors contain prompt tokens, the layout is as follows: + |<--------------- num_prefill_tokens ----------------->| + |<--prefill_0-->|<--prefill_1-->|...|<--prefill_N-1--->| + + Otherwise, the layout is as follows: + |<----------------- num_decode_tokens ------------------>| + |<--decode_0-->|..........|<--decode_M-1-->|<--padding-->| + + Generation tokens can contain padding when cuda-graph is used. + Currently, prompt tokens don't contain any padding. + + The prompts might have different lengths, while the generation tokens + always have length 1. + + If chunked prefill is enabled, prefill tokens and decode tokens can be + batched together in a flattened 1D query. + + |<----- num_prefill_tokens ---->|<------- num_decode_tokens --------->| + |<-prefill_0->|...|<-prefill_N-1->|<--decode_0-->|...|<--decode_M-1-->| + + Currently, cuda graph is disabled for chunked prefill, meaning there's no + padding between prefill and decode tokens. + """ + + def __init__( + self, + num_heads: int, + head_size: int, + scale: float, + num_kv_heads: Optional[int] = None, + alibi_slopes: Optional[List[float]] = None, + sliding_window: Optional[int] = None, + ) -> None: + self.num_heads = num_heads + self.head_size = head_size + self.scale = float(scale) + self.num_kv_heads = num_heads if num_kv_heads is None else num_kv_heads + self.sliding_window = sliding_window + if alibi_slopes is not None: + alibi_slopes = torch.tensor(alibi_slopes, dtype=torch.float32) + self.alibi_slopes = alibi_slopes + + assert self.num_heads % self.num_kv_heads == 0 + self.num_queries_per_kv = self.num_heads // self.num_kv_heads + + suppored_head_sizes = PagedAttention.get_supported_head_sizes() + if head_size not in suppored_head_sizes: + raise ValueError( + f"Head size {head_size} is not supported by PagedAttention. " + f"Supported head sizes are: {suppored_head_sizes}.") + + def forward( + self, + query: torch.Tensor, + key: torch.Tensor, + value: torch.Tensor, + kv_cache: Optional[torch.Tensor], + attn_metadata: AttentionMetadata[XFormersMetadata], + kv_scale: float, + ) -> torch.Tensor: + """Forward pass with xFormers and PagedAttention. + + Args: + query: shape = [num_tokens, num_heads * head_size] + key: shape = [num_tokens, num_kv_heads * head_size] + value: shape = [num_tokens, num_kv_heads * head_size] + kv_cache = [2, num_blocks, block_size * num_kv_heads * head_size] + attn_metadata: Metadata for attention. + Returns: + shape = [num_tokens, num_heads * head_size] + """ + num_tokens, hidden_size = query.shape + query = query.view(-1, self.num_heads, self.head_size) + key = key.view(-1, self.num_kv_heads, self.head_size) + value = value.view(-1, self.num_kv_heads, self.head_size) + + if kv_cache is not None: + key_cache, value_cache = PagedAttention.split_kv_cache( + kv_cache, self.num_kv_heads, self.head_size) + + # Reshape the input keys and values and store them in the cache. + # If kv_cache is not provided, the new key and value tensors are + # not cached. This happens during the initial memory profiling run. + PagedAttention.write_to_paged_cache(key, value, key_cache, + value_cache, + attn_metadata.slot_mapping, + attn_metadata.kv_cache_dtype, + kv_scale) + + num_prefill_tokens = attn_metadata.num_prefill_tokens + num_decode_tokens = attn_metadata.num_decode_tokens + assert key.shape[0] == num_prefill_tokens + num_decode_tokens + assert value.shape[0] == num_prefill_tokens + num_decode_tokens + + output = torch.empty_like(query) + # Query for decode. KV is not needed because it is already cached. + decode_query = query[num_prefill_tokens:] + # QKV for prefill. + query = query[:num_prefill_tokens] + key = key[:num_prefill_tokens] + value = value[:num_prefill_tokens] + + assert query.shape[0] == num_prefill_tokens + assert decode_query.shape[0] == num_decode_tokens + + if prefill_meta := attn_metadata.prefill_metadata: + # Prompt run. + if kv_cache is None or prefill_meta.block_tables.numel() == 0: + # normal attention. + # block tables are empty if the prompt does not have a cached + # prefix. + out = self._run_memory_efficient_xformers_forward( + query, key, value, prefill_meta) + assert out.shape == output[:num_prefill_tokens].shape + output[:num_prefill_tokens] = out + else: + # prefix-enabled attention + # TODO(Hai) this triton kernel has regression issue (broke) to + # deal with different data types between KV and FP8 KV cache, + # to be addressed separately. + out = PagedAttention.forward_prefix( + query, + key, + value, + key_cache, + value_cache, + prefill_meta.block_tables, + prefill_meta.subquery_start_loc, + prefill_meta.seq_lens_tensor, + prefill_meta.context_lens_tensor, + prefill_meta.max_query_len, + self.alibi_slopes, + self.sliding_window, + ) + assert output[:num_prefill_tokens].shape == out.shape + output[:num_prefill_tokens] = out + + if decode_meta := attn_metadata.decode_metadata: + output[num_prefill_tokens:] = PagedAttention.forward_decode( + decode_query, + key_cache, + value_cache, + decode_meta.block_tables, + decode_meta.seq_lens_tensor, + decode_meta.max_seq_len, + attn_metadata.kv_cache_dtype, + self.num_kv_heads, + self.scale, + self.alibi_slopes, + kv_scale, + ) + + # Reshape the output tensor. + return output.view(-1, self.num_heads * self.head_size) + + def _run_memory_efficient_xformers_forward( + self, + query: torch.Tensor, + key: torch.Tensor, + value: torch.Tensor, + attn_metadata: XFormersMetadata, + ) -> torch.Tensor: + """Attention for 1D query of multiple prompts. Multiple prompt + tokens are flattened in to `query` input. + + See https://facebookresearch.github.io/xformers/components/ops.html + for API spec. + + Args: + output: shape = [num_prefill_tokens, num_heads, head_size] + query: shape = [num_prefill_tokens, num_heads, head_size] + key: shape = [num_prefill_tokens, num_kv_heads, head_size] + value: shape = [num_prefill_tokens, num_kv_heads, head_size] + attn_metadata: Metadata for attention. + """ + assert attn_metadata.seq_lens is not None + original_query = query + if self.num_kv_heads != self.num_heads: + # GQA/MQA requires the shape [B, M, G, H, K]. + # Note that the output also has the same shape (which is different + # from a spec from the doc). + query = query.view(query.shape[0], self.num_kv_heads, + self.num_queries_per_kv, query.shape[-1]) + key = key[:, :, + None, :].expand(key.shape[0], self.num_kv_heads, + self.num_queries_per_kv, key.shape[-1]) + value = value[:, :, + None, :].expand(value.shape[0], self.num_kv_heads, + self.num_queries_per_kv, + value.shape[-1]) + # Set attention bias if not provided. This typically happens at + # the very attention layer of every iteration. + # FIXME(woosuk): This is a hack. + if attn_metadata.attn_bias is None: + if self.alibi_slopes is None: + attn_bias = BlockDiagonalCausalMask.from_seqlens( + attn_metadata.seq_lens) + if self.sliding_window is not None: + attn_bias = attn_bias.make_local_attention( + self.sliding_window) + attn_metadata.attn_bias = [attn_bias] + else: + attn_metadata.attn_bias = _make_alibi_bias( + self.alibi_slopes, self.num_kv_heads, query.dtype, + attn_metadata.seq_lens) + + # No alibi slopes. + # TODO(woosuk): Too many view operations. Let's try to reduce + # them in the future for code readability. + if self.alibi_slopes is None: + # Add the batch dimension. + query = query.unsqueeze(0) + key = key.unsqueeze(0) + value = value.unsqueeze(0) + out = xops.memory_efficient_attention_forward( + query, + key, + value, + attn_bias=attn_metadata.attn_bias[0], + p=0.0, + scale=self.scale) + return out.view_as(original_query) + + # Attention with alibi slopes. + # FIXME(woosuk): Because xformers does not support dynamic sequence + # lengths with custom attention bias, we process each prompt one by + # one. This is inefficient, especially when we have many short prompts. + output = torch.empty_like(original_query) + start = 0 + for i, seq_len in enumerate(attn_metadata.seq_lens): + end = start + seq_len + out = xops.memory_efficient_attention_forward( + query[None, start:end], + key[None, start:end], + value[None, start:end], + attn_bias=attn_metadata.attn_bias[i], + p=0.0, + scale=self.scale) + # TODO(woosuk): Unnecessary copy. Optimize. + output[start:end].copy_(out.view_as(original_query[start:end])) + start += seq_len + return output + + +def _make_alibi_bias( + alibi_slopes: torch.Tensor, + num_kv_heads: int, + dtype: torch.dtype, + seq_lens: List[int], +) -> LowerTriangularMaskWithTensorBias: + attn_biases = [] + for seq_len in seq_lens: + bias = torch.arange(seq_len, dtype=dtype) + # NOTE(zhuohan): HF uses + # `bias = bias[None, :].repeat(seq_len, 1)` + # here. We find that both biases give the same results, but + # the bias below more accurately follows the original ALiBi + # paper. + # Calculate a matrix where each element represents ith element- jth + # element. + bias = bias[None, :] - bias[:, None] + + padded_len = (seq_len + 7) // 8 * 8 + num_heads = alibi_slopes.shape[0] + bias = torch.empty( + 1, # batch size + num_heads, + seq_len, + padded_len, + device=alibi_slopes.device, + dtype=dtype, + )[:, :, :, :seq_len].copy_(bias) + bias.mul_(alibi_slopes[:, None, None]) + if num_heads != num_kv_heads: + bias = bias.unflatten(1, (num_kv_heads, num_heads // num_kv_heads)) + attn_biases.append(LowerTriangularMaskWithTensorBias(bias)) + + return attn_biases diff --git a/vllm/attention/layer.py b/vllm/attention/layer.py new file mode 100644 index 0000000..ee7be26 --- /dev/null +++ b/vllm/attention/layer.py @@ -0,0 +1,56 @@ +"""Attention layer.""" +from typing import List, Optional + +import torch +import torch.nn as nn + +from vllm.attention.backends.abstract import (AttentionMetadata, + AttentionMetadataPerStage) +from vllm.attention.selector import get_attn_backend + + +class Attention(nn.Module): + """Attention layer. + + This class takes query, key, and value tensors as input. The input tensors + can either contain prompt tokens or generation tokens. + The class does the following: + + 1. Store the input key and value tensors in the KV cache. + 2. Perform (multi-head/multi-query/grouped-query) attention. + 3. Return the output tensor. + """ + + def __init__( + self, + num_heads: int, + head_size: int, + scale: float, + num_kv_heads: Optional[int] = None, + alibi_slopes: Optional[List[float]] = None, + sliding_window: Optional[int] = None, + ) -> None: + super().__init__() + self.backend = get_attn_backend(torch.get_default_dtype()) + impl_cls = self.backend.get_impl_cls() + self.impl = impl_cls(num_heads, head_size, scale, num_kv_heads, + alibi_slopes, sliding_window) + + def forward( + self, + query: torch.Tensor, + key: torch.Tensor, + value: torch.Tensor, + kv_cache: Optional[torch.Tensor], + attn_metadata: AttentionMetadata[AttentionMetadataPerStage], + kv_scale: float = 1.0, + ) -> torch.Tensor: + return self.impl.forward(query, key, value, kv_cache, attn_metadata, + kv_scale) + + def extra_repr(self) -> str: + s = f"head_size={self.impl.head_size}" # type: ignore + s += f", num_heads={self.impl.num_heads}" # type: ignore + s += f", num_kv_heads={self.impl.num_kv_heads}" # type: ignore + s += f", scale={self.impl.scale}" # type: ignore + return s diff --git a/vllm/attention/ops/__init__.py b/vllm/attention/ops/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/vllm/attention/ops/paged_attn.py b/vllm/attention/ops/paged_attn.py new file mode 100644 index 0000000..00a0f10 --- /dev/null +++ b/vllm/attention/ops/paged_attn.py @@ -0,0 +1,216 @@ +from dataclasses import dataclass +from typing import Dict, List, Optional, Tuple + +import torch + +from vllm import _custom_ops as ops +from vllm.attention.ops.prefix_prefill import context_attention_fwd + +# Should be the same as PARTITION_SIZE in `paged_attention_v2_launcher`. +_PARTITION_SIZE = 512 + + +@dataclass +class PagedAttentionMetadata: + """Metadata for PagedAttention.""" + # (batch_size,). The length of sequences (entire tokens seen so far) per + # sequence. + seq_lens_tensor: Optional[torch.Tensor] + # Maximum sequence length in the batch. + max_seq_len: Optional[int] + # (batch_size, max_blocks_per_seq). + # Block addresses per sequence. (Seq id -> list of physical block) + # E.g., [0, 1, 2] means tokens are stored in 0th, 1st, and 2nd blocks + # in the kv cache. Each block can contain up to block_size tokens. + # 2nd dimensions are padded up to max_blocks_per_seq if it is cuda-graph + # captured. + block_tables: Optional[torch.Tensor] + + +class PagedAttention: + + @staticmethod + def get_supported_head_sizes() -> List[int]: + return [64, 80, 96, 112, 128, 256] + + @staticmethod + def get_kv_cache_shape( + num_blocks: int, + block_size: int, + num_kv_heads: int, + head_size: int, + ) -> Tuple[int, ...]: + return (2, num_blocks, block_size * num_kv_heads * head_size) + + @staticmethod + def split_kv_cache( + kv_cache: torch.Tensor, + num_kv_heads: int, + head_size: int, + ) -> Tuple[torch.Tensor, torch.Tensor]: + x = 16 // kv_cache.element_size() + num_blocks = kv_cache.shape[1] + + key_cache = kv_cache[0] + key_cache = key_cache.view(num_blocks, num_kv_heads, head_size // x, + -1, x) + value_cache = kv_cache[1] + value_cache = value_cache.view(num_blocks, num_kv_heads, head_size, -1) + return key_cache, value_cache + + @staticmethod + def write_to_paged_cache( + key: torch.Tensor, + value: torch.Tensor, + key_cache: torch.Tensor, + value_cache: torch.Tensor, + slot_mapping: torch.Tensor, + kv_cache_dtype: str, + kv_scale: float, + ) -> None: + ops.reshape_and_cache( + key, + value, + key_cache, + value_cache, + slot_mapping.flatten(), + kv_cache_dtype, + kv_scale, + ) + + @staticmethod + def forward_decode( + query: torch.Tensor, + key_cache: torch.Tensor, + value_cache: torch.Tensor, + block_tables: torch.Tensor, + seq_lens: torch.Tensor, + max_seq_len: int, + kv_cache_dtype: str, + num_kv_heads: int, + scale: float, + alibi_slopes: Optional[torch.Tensor], + kv_scale: float, + ) -> torch.Tensor: + output = torch.empty_like(query) + + block_size = value_cache.shape[3] + num_seqs, num_heads, head_size = query.shape + max_num_partitions = ((max_seq_len + _PARTITION_SIZE - 1) // + _PARTITION_SIZE) + # NOTE(woosuk): We use a simple heuristic to decide whether to use + # PagedAttention V1 or V2. If the number of partitions is 1, we use + # V1 to avoid the overhead of reduction. Also, if the number of + # sequences or heads is large, we use V1 since there is enough work + # to parallelize. + # TODO(woosuk): Tune this heuristic. + # For context len > 8192, use V2 kernel to avoid shared memory shortage. + use_v1 = (max_seq_len <= 8192 + and (max_num_partitions == 1 or num_seqs * num_heads > 512)) + if use_v1: + # Run PagedAttention V1. + ops.paged_attention_v1( + output, + query, + key_cache, + value_cache, + num_kv_heads, + scale, + block_tables, + seq_lens, + block_size, + max_seq_len, + alibi_slopes, + kv_cache_dtype, + kv_scale, + ) + else: + # Run PagedAttention V2. + assert _PARTITION_SIZE % block_size == 0 + tmp_output = torch.empty( + size=(num_seqs, num_heads, max_num_partitions, head_size), + dtype=output.dtype, + device=output.device, + ) + exp_sums = torch.empty( + size=(num_seqs, num_heads, max_num_partitions), + dtype=torch.float32, + device=output.device, + ) + max_logits = torch.empty_like(exp_sums) + ops.paged_attention_v2( + output, + exp_sums, + max_logits, + tmp_output, + query, + key_cache, + value_cache, + num_kv_heads, + scale, + block_tables, + seq_lens, + block_size, + max_seq_len, + alibi_slopes, + kv_cache_dtype, + kv_scale, + ) + return output + + @staticmethod + def forward_prefix( + query: torch.Tensor, + key: torch.Tensor, + value: torch.Tensor, + key_cache: torch.Tensor, + value_cache: torch.Tensor, + block_tables: torch.Tensor, + subquery_start_loc: torch.Tensor, + seq_lens_tensor: torch.Tensor, + context_lens: torch.Tensor, + max_query_len: int, + alibi_slopes: Optional[torch.Tensor], + sliding_window: Optional[int], + ) -> torch.Tensor: + output = torch.empty_like(query) + context_attention_fwd( + query, + key, + value, + output, + key_cache, + value_cache, + block_tables, + # subquery_start_loc is (batch_size + 1,) + subquery_start_loc[:-1], + seq_lens_tensor, + context_lens, + max_query_len, + alibi_slopes, + sliding_window, + ) + return output + + @staticmethod + def swap_blocks( + src_kv_cache: torch.Tensor, + dst_kv_cache: torch.Tensor, + src_to_dst: Dict[int, int], + ) -> None: + src_key_cache = src_kv_cache[0] + dst_key_cache = dst_kv_cache[0] + ops.swap_blocks(src_key_cache, dst_key_cache, src_to_dst) + + src_value_cache = src_kv_cache[1] + dst_value_cache = dst_kv_cache[1] + ops.swap_blocks(src_value_cache, dst_value_cache, src_to_dst) + + @staticmethod + def copy_blocks( + kv_caches: List[torch.Tensor], + src_to_dists: Dict[int, List[int]], + ) -> None: + key_caches = [kv_cache[0] for kv_cache in kv_caches] + value_caches = [kv_cache[1] for kv_cache in kv_caches] + ops.copy_blocks(key_caches, value_caches, src_to_dists) diff --git a/vllm/attention/ops/prefix_prefill.py b/vllm/attention/ops/prefix_prefill.py new file mode 100644 index 0000000..7ee65e1 --- /dev/null +++ b/vllm/attention/ops/prefix_prefill.py @@ -0,0 +1,792 @@ +# The kernels in this file are adapted from LightLLM's context_attention_fwd: +# https://github.com/ModelTC/lightllm/blob/main/lightllm/models/llama/triton_kernel/context_flashattention_nopad.py + +import torch +import triton +import triton.language as tl + +if triton.__version__ >= "2.1.0": + + @triton.jit + def _fwd_kernel( + Q, + K, + V, + K_cache, + V_cache, + B_Loc, + sm_scale, + B_Start_Loc, + B_Seqlen, + B_Ctxlen, + block_size, + x, + Out, + stride_b_loc_b, + stride_b_loc_s, + stride_qbs, + stride_qh, + stride_qd, + stride_kbs, + stride_kh, + stride_kd, + stride_vbs, + stride_vh, + stride_vd, + stride_obs, + stride_oh, + stride_od, + stride_k_cache_bs, + stride_k_cache_h, + stride_k_cache_d, + stride_k_cache_bl, + stride_k_cache_x, + stride_v_cache_bs, + stride_v_cache_h, + stride_v_cache_d, + stride_v_cache_bl, + num_queries_per_kv: int, + BLOCK_M: tl.constexpr, + BLOCK_DMODEL: tl.constexpr, # head size + BLOCK_DMODEL_PADDED: tl.constexpr, # head size padded to a power of 2 + BLOCK_N: tl.constexpr, + SLIDING_WINDOW: tl.constexpr, + ): + cur_batch = tl.program_id(0) + cur_head = tl.program_id(1) + start_m = tl.program_id(2) + + cur_kv_head = cur_head // num_queries_per_kv + + cur_batch_ctx_len = tl.load(B_Ctxlen + cur_batch) + cur_batch_seq_len = tl.load(B_Seqlen + cur_batch) + cur_batch_in_all_start_index = tl.load(B_Start_Loc + cur_batch) + cur_batch_query_len = cur_batch_seq_len - cur_batch_ctx_len + + # start position inside of the query + # generally, N goes over kv, while M goes over query_len + block_start_loc = BLOCK_M * start_m + + # initialize offsets + # [N]; starts at 0 + offs_n = tl.arange(0, BLOCK_N) + # [D]; starts at 0 + offs_d = tl.arange(0, BLOCK_DMODEL_PADDED) + # [M]; starts at current position in query + offs_m = start_m * BLOCK_M + tl.arange(0, BLOCK_M) + # [M,D] + off_q = ( + (cur_batch_in_all_start_index + offs_m[:, None]) * stride_qbs + + cur_head * stride_qh + offs_d[None, :] * stride_qd) + + dim_mask = tl.where( + tl.arange(0, BLOCK_DMODEL_PADDED) < BLOCK_DMODEL, 1, + 0).to(tl.int1) # [D] + + q = tl.load(Q + off_q, + mask=dim_mask[None, :] & + (offs_m[:, None] < cur_batch_query_len), + other=0.0) # [M,D] + + # initialize pointer to m and l + m_i = tl.zeros([BLOCK_M], dtype=tl.float32) - float("inf") # [M] + l_i = tl.zeros([BLOCK_M], dtype=tl.float32) # [M] + acc = tl.zeros([BLOCK_M, BLOCK_DMODEL_PADDED], + dtype=tl.float32) # [M,D] + + # compute query against context (no causal mask here) + for start_n in range(0, cur_batch_ctx_len, BLOCK_N): + start_n = tl.multiple_of(start_n, BLOCK_N) + # -- compute qk ---- + bn = tl.load(B_Loc + cur_batch * stride_b_loc_b + + ((start_n + offs_n) // block_size) * stride_b_loc_s, + mask=(start_n + offs_n) < cur_batch_ctx_len, + other=0) # [N] + # [D,N] + off_k = (bn[None, :] * stride_k_cache_bs + + cur_kv_head * stride_k_cache_h + + (offs_d[:, None] // x) * stride_k_cache_d + + ((start_n + offs_n[None, :]) % block_size) * + stride_k_cache_bl + + (offs_d[:, None] % x) * stride_k_cache_x) + # [N,D] + off_v = ( + bn[:, None] * stride_v_cache_bs + + cur_kv_head * stride_v_cache_h + + offs_d[None, :] * stride_v_cache_d + + (start_n + offs_n[:, None]) % block_size * stride_v_cache_bl) + k = tl.load(K_cache + off_k, + mask=dim_mask[:, None] & + ((start_n + offs_n[None, :]) < cur_batch_ctx_len), + other=0.0) # [D,N] + + qk = tl.zeros([BLOCK_M, BLOCK_N], dtype=tl.float32) # [M,N] + qk += tl.dot(q, k) + qk = tl.where((start_n + offs_n[None, :]) < cur_batch_ctx_len, qk, + float("-inf")) + qk *= sm_scale + if SLIDING_WINDOW > 0: + # (cur_batch_ctx_len + offs_m[:, None]) are the positions of + # Q entries in sequence + # (start_n + offs_n[None, :]) are the positions of + # KV entries in sequence + # So the condition makes sure each entry in Q only attends + # to KV entries not more than SLIDING_WINDOW away. + # + # We can't use -inf here, because the + # sliding window may lead to the entire row being masked. + # This then makes m_ij contain -inf, which causes NaNs in + # exp(). + qk = tl.where((cur_batch_ctx_len + offs_m[:, None]) - + (start_n + offs_n[None, :]) < SLIDING_WINDOW, qk, + -10000) + + # -- compute m_ij, p, l_ij + m_ij = tl.max(qk, 1) # [M] + p = tl.exp(qk - m_ij[:, None]) # [M,N] + l_ij = tl.sum(p, 1) # [M] + # -- update m_i and l_i + m_i_new = tl.maximum(m_i, m_ij) # [M] + alpha = tl.exp(m_i - m_i_new) # [M] + beta = tl.exp(m_ij - m_i_new) # [M] + l_i_new = alpha * l_i + beta * l_ij # [M] + + # -- update output accumulator -- + # scale p + p_scale = beta / l_i_new + p = p * p_scale[:, None] + # scale acc + acc_scale = l_i / l_i_new * alpha + acc = acc * acc_scale[:, None] + # update acc + v = tl.load(V_cache + off_v, + mask=dim_mask[None, :] & + ((start_n + offs_n[:, None]) < cur_batch_ctx_len), + other=0.0) # [N,D] + + p = p.to(v.dtype) + acc += tl.dot(p, v) + # # update m_i and l_i + l_i = l_i_new + m_i = m_i_new + + off_k = (offs_n[None, :] * stride_kbs + cur_kv_head * stride_kh + + offs_d[:, None] * stride_kd) + off_v = (offs_n[:, None] * stride_vbs + cur_kv_head * stride_vh + + offs_d[None, :] * stride_vd) + k_ptrs = K + off_k + v_ptrs = V + off_v + + # block_mask is 0 when we're already past the current query length + block_mask = tl.where(block_start_loc < cur_batch_query_len, 1, 0) + + # compute query against itself (with causal mask) + for start_n in range(0, block_mask * (start_m + 1) * BLOCK_M, BLOCK_N): + start_n = tl.multiple_of(start_n, BLOCK_N) + # -- compute qk ---- + k = tl.load(k_ptrs + + (cur_batch_in_all_start_index + start_n) * stride_kbs, + mask=dim_mask[:, None] & + ((start_n + offs_n[None, :]) < cur_batch_query_len), + other=0.0) + + qk = tl.zeros([BLOCK_M, BLOCK_N], dtype=tl.float32) + qk += tl.dot(q, k) + qk *= sm_scale + # apply causal mask + qk = tl.where(offs_m[:, None] >= (start_n + offs_n[None, :]), qk, + float("-inf")) + if SLIDING_WINDOW > 0: + qk = tl.where( + offs_m[:, None] - + (start_n + offs_n[None, :]) < SLIDING_WINDOW, qk, -10000) + + # -- compute m_ij, p, l_ij + m_ij = tl.max(qk, 1) + p = tl.exp(qk - m_ij[:, None]) + l_ij = tl.sum(p, 1) + # -- update m_i and l_i + m_i_new = tl.maximum(m_i, m_ij) + alpha = tl.exp(m_i - m_i_new) + beta = tl.exp(m_ij - m_i_new) + l_i_new = alpha * l_i + beta * l_ij + # -- update output accumulator -- + # scale p + p_scale = beta / l_i_new + p = p * p_scale[:, None] + # scale acc + acc_scale = l_i / l_i_new * alpha + acc = acc * acc_scale[:, None] + # update acc + v = tl.load(v_ptrs + + (cur_batch_in_all_start_index + start_n) * stride_vbs, + mask=dim_mask[None, :] & + ((start_n + offs_n[:, None]) < cur_batch_query_len), + other=0.0) + + p = p.to(v.dtype) + acc += tl.dot(p, v) + # update m_i and l_i + l_i = l_i_new + m_i = m_i_new + # initialize pointers to output + off_o = ( + (cur_batch_in_all_start_index + offs_m[:, None]) * stride_obs + + cur_head * stride_oh + offs_d[None, :] * stride_od) + out_ptrs = Out + off_o + tl.store(out_ptrs, + acc, + mask=dim_mask[None, :] & + (offs_m[:, None] < cur_batch_query_len)) + return + + @triton.jit + def _fwd_kernel_flash_attn_v2( + Q, + K, + V, + K_cache, + V_cache, + B_Loc, + sm_scale, + B_Start_Loc, + B_Seqlen, + B_Ctxlen, + block_size, + x, + Out, + stride_b_loc_b, + stride_b_loc_s, + stride_qbs, + stride_qh, + stride_qd, + stride_kbs, + stride_kh, + stride_kd, + stride_vbs, + stride_vh, + stride_vd, + stride_obs, + stride_oh, + stride_od, + stride_k_cache_bs, + stride_k_cache_h, + stride_k_cache_d, + stride_k_cache_bl, + stride_k_cache_x, + stride_v_cache_bs, + stride_v_cache_h, + stride_v_cache_d, + stride_v_cache_bl, + num_queries_per_kv: int, + BLOCK_M: tl.constexpr, + BLOCK_DMODEL: tl.constexpr, + BLOCK_N: tl.constexpr, + ): + cur_batch = tl.program_id(0) + cur_head = tl.program_id(1) + start_m = tl.program_id(2) + + cur_kv_head = cur_head // num_queries_per_kv + + cur_batch_ctx_len = tl.load(B_Ctxlen + cur_batch) + cur_batch_seq_len = tl.load(B_Seqlen + cur_batch) + cur_batch_in_all_start_index = tl.load(B_Start_Loc + cur_batch) + + block_start_loc = BLOCK_M * start_m + + # initialize offsets + offs_n = tl.arange(0, BLOCK_N) + offs_d = tl.arange(0, BLOCK_DMODEL) + offs_m = start_m * BLOCK_M + tl.arange(0, BLOCK_M) + off_q = ( + (cur_batch_in_all_start_index + offs_m[:, None]) * stride_qbs + + cur_head * stride_qh + offs_d[None, :] * stride_qd) + + q = tl.load( + Q + off_q, + mask=offs_m[:, None] < cur_batch_seq_len - cur_batch_ctx_len, + other=0.0) + + # # initialize pointer to m and l + m_i = tl.zeros([BLOCK_M], dtype=tl.float32) - float("inf") + l_i = tl.zeros([BLOCK_M], dtype=tl.float32) + acc = tl.zeros([BLOCK_M, BLOCK_DMODEL], dtype=tl.float32) + + for start_n in range(0, cur_batch_ctx_len, BLOCK_N): + start_n = tl.multiple_of(start_n, BLOCK_N) + # -- compute qk ---- + bn = tl.load(B_Loc + cur_batch * stride_b_loc_b + + ((start_n + offs_n) // block_size) * stride_b_loc_s, + mask=(start_n + offs_n) < cur_batch_ctx_len, + other=0) + off_k = (bn[None, :] * stride_k_cache_bs + + cur_kv_head * stride_k_cache_h + + (offs_d[:, None] // x) * stride_k_cache_d + + ((start_n + offs_n[None, :]) % block_size) * + stride_k_cache_bl + + (offs_d[:, None] % x) * stride_k_cache_x) + off_v = ( + bn[:, None] * stride_v_cache_bs + + cur_kv_head * stride_v_cache_h + + offs_d[None, :] * stride_v_cache_d + + (start_n + offs_n[:, None]) % block_size * stride_v_cache_bl) + k = tl.load(K_cache + off_k, + mask=(start_n + offs_n[None, :]) < cur_batch_ctx_len, + other=0.0) + + qk = tl.zeros([BLOCK_M, BLOCK_N], dtype=tl.float32) + qk += tl.dot(q, k) + qk = tl.where((start_n + offs_n[None, :]) < cur_batch_ctx_len, qk, + float("-inf")) + qk *= sm_scale + + # -- compute m_ij, p, l_ij + m_ij = tl.max(qk, 1) + m_i_new = tl.maximum(m_i, m_ij) + p = tl.math.exp(qk - m_i_new[:, None]) + l_ij = tl.sum(p, 1) + # -- update m_i and l_i + + alpha = tl.math.exp(m_i - m_i_new) + l_i_new = alpha * l_i + l_ij + # -- update output accumulator -- + # scale p + # scale acc + acc_scale = alpha + # acc_scale = l_i / l_i_new * alpha + acc = acc * acc_scale[:, None] + # update acc + v = tl.load(V_cache + off_v, + mask=(start_n + offs_n[:, None]) < cur_batch_ctx_len, + other=0.0) + + p = p.to(v.dtype) + acc += tl.dot(p, v) + # update m_i and l_i + l_i = l_i_new + m_i = m_i_new + + off_k = (offs_n[None, :] * stride_kbs + cur_kv_head * stride_kh + + offs_d[:, None] * stride_kd) + off_v = (offs_n[:, None] * stride_vbs + cur_kv_head * stride_vh + + offs_d[None, :] * stride_vd) + k_ptrs = K + off_k + v_ptrs = V + off_v + + block_mask = tl.where( + block_start_loc < cur_batch_seq_len - cur_batch_ctx_len, 1, 0) + + for start_n in range(0, block_mask * (start_m + 1) * BLOCK_M, BLOCK_N): + start_n = tl.multiple_of(start_n, BLOCK_N) + # -- compute qk ---- + k = tl.load(k_ptrs + + (cur_batch_in_all_start_index + start_n) * stride_kbs, + mask=(start_n + offs_n[None, :]) < + cur_batch_seq_len - cur_batch_ctx_len, + other=0.0) + + qk = tl.zeros([BLOCK_M, BLOCK_N], dtype=tl.float32) + qk += tl.dot(q, k) + qk *= sm_scale + qk = tl.where(offs_m[:, None] >= (start_n + offs_n[None, :]), qk, + float("-inf")) + + # -- compute m_ij, p, l_ij + m_ij = tl.max(qk, 1) + m_i_new = tl.maximum(m_i, m_ij) + p = tl.math.exp(qk - m_i_new[:, None]) + l_ij = tl.sum(p, 1) + # -- update m_i and l_i + + alpha = tl.math.exp(m_i - m_i_new) + l_i_new = alpha * l_i + l_ij + # -- update output accumulator -- + # scale p + # scale acc + acc_scale = alpha + # acc_scale = l_i / l_i_new * alpha + acc = acc * acc_scale[:, None] + # update acc + v = tl.load(v_ptrs + + (cur_batch_in_all_start_index + start_n) * stride_vbs, + mask=(start_n + offs_n[:, None]) < + cur_batch_seq_len - cur_batch_ctx_len, + other=0.0) + + p = p.to(v.dtype) + acc += tl.dot(p, v) + # update m_i and l_i + l_i = l_i_new + m_i = m_i_new + + # acc /= l_i[:, None] + # initialize pointers to output + off_o = ( + (cur_batch_in_all_start_index + offs_m[:, None]) * stride_obs + + cur_head * stride_oh + offs_d[None, :] * stride_od) + out_ptrs = Out + off_o + tl.store(out_ptrs, + acc, + mask=offs_m[:, None] < cur_batch_seq_len - cur_batch_ctx_len) + return + + @triton.jit + def _fwd_kernel_alibi( + Q, + K, + V, + K_cache, + V_cache, + B_Loc, + sm_scale, + B_Start_Loc, + B_Seqlen, + B_Ctxlen, + Alibi_slopes, + block_size, + x, + Out, + stride_b_loc_b, + stride_b_loc_s, + stride_qbs, + stride_qh, + stride_qd, + stride_kbs, + stride_kh, + stride_kd, + stride_vbs, + stride_vh, + stride_vd, + stride_obs, + stride_oh, + stride_od, + stride_k_cache_bs, + stride_k_cache_h, + stride_k_cache_d, + stride_k_cache_bl, + stride_k_cache_x, + stride_v_cache_bs, + stride_v_cache_h, + stride_v_cache_d, + stride_v_cache_bl, + num_queries_per_kv: int, + BLOCK_M: tl.constexpr, + BLOCK_DMODEL: tl.constexpr, + BLOCK_N: tl.constexpr, + ): + # attn_bias[] + cur_batch = tl.program_id(0) + cur_head = tl.program_id(1) + start_m = tl.program_id(2) + + cur_kv_head = cur_head // num_queries_per_kv + + # cur_batch_seq_len: the length of prompts + # cur_batch_ctx_len: the length of prefix + # cur_batch_in_all_start_index: the start id of the dim=0 + cur_batch_ctx_len = tl.load(B_Ctxlen + cur_batch) + cur_batch_seq_len = tl.load(B_Seqlen + cur_batch) + cur_batch_in_all_start_index = tl.load(B_Start_Loc + cur_batch) + + block_start_loc = BLOCK_M * start_m + + # initialize offsets + offs_n = tl.arange(0, BLOCK_N) + offs_d = tl.arange(0, BLOCK_DMODEL) + offs_m = start_m * BLOCK_M + tl.arange(0, BLOCK_M) + off_q = ( + (cur_batch_in_all_start_index + offs_m[:, None]) * stride_qbs + + cur_head * stride_qh + offs_d[None, :] * stride_qd) + + q = tl.load( + Q + off_q, + mask=offs_m[:, None] < cur_batch_seq_len - cur_batch_ctx_len, + other=0.0) + + # # initialize pointer to m and l + m_i = tl.zeros([BLOCK_M], dtype=tl.float32) - float("inf") + l_i = tl.zeros([BLOCK_M], dtype=tl.float32) + acc = tl.zeros([BLOCK_M, BLOCK_DMODEL], dtype=tl.float32) + + alibi_slope = tl.load(Alibi_slopes + cur_head) + alibi_start_q = tl.arange( + 0, BLOCK_M) + block_start_loc + cur_batch_ctx_len + alibi_start_k = 0 + for start_n in range(0, cur_batch_ctx_len, BLOCK_N): + start_n = tl.multiple_of(start_n, BLOCK_N) + # -- compute qk ---- + bn = tl.load(B_Loc + cur_batch * stride_b_loc_b + + ((start_n + offs_n) // block_size) * stride_b_loc_s, + mask=(start_n + offs_n) < cur_batch_ctx_len, + other=0) + off_k = (bn[None, :] * stride_k_cache_bs + + cur_kv_head * stride_k_cache_h + + (offs_d[:, None] // x) * stride_k_cache_d + + ((start_n + offs_n[None, :]) % block_size) * + stride_k_cache_bl + + (offs_d[:, None] % x) * stride_k_cache_x) + off_v = ( + bn[:, None] * stride_v_cache_bs + + cur_kv_head * stride_v_cache_h + + offs_d[None, :] * stride_v_cache_d + + (start_n + offs_n[:, None]) % block_size * stride_v_cache_bl) + k = tl.load(K_cache + off_k, + mask=(start_n + offs_n[None, :]) < cur_batch_ctx_len, + other=0.0) + + qk = tl.zeros([BLOCK_M, BLOCK_N], dtype=tl.float32) + qk += tl.dot(q, k) + qk = tl.where((start_n + offs_n[None, :]) < cur_batch_ctx_len, qk, + float("-inf")) + qk *= sm_scale + + # load alibi + alibi = (tl.arange(0, BLOCK_N)[None, :] + alibi_start_k - + alibi_start_q[:, None]) * alibi_slope + alibi = tl.where( + (alibi <= 0) & (alibi_start_q[:, None] < cur_batch_seq_len), + alibi, float("-inf")) + qk += alibi + alibi_start_k += BLOCK_N + + # -- compute m_ij, p, l_ij + m_ij = tl.max(qk, 1) + m_i_new = tl.maximum(m_i, m_ij) + p = tl.math.exp(qk - m_i_new[:, None]) + l_ij = tl.sum(p, 1) + # -- update m_i and l_i + + alpha = tl.math.exp(m_i - m_i_new) + l_i_new = alpha * l_i + l_ij + # -- update output accumulator -- + # scale p + # scale acc + acc_scale = alpha + # acc_scale = l_i / l_i_new * alpha + acc = acc * acc_scale[:, None] + # update acc + v = tl.load(V_cache + off_v, + mask=(start_n + offs_n[:, None]) < cur_batch_ctx_len, + other=0.0) + + p = p.to(v.dtype) + acc += tl.dot(p, v, allow_tf32=False) + # update m_i and l_i + l_i = l_i_new + m_i = m_i_new + + off_k = (offs_n[None, :] * stride_kbs + cur_kv_head * stride_kh + + offs_d[:, None] * stride_kd) + off_v = (offs_n[:, None] * stride_vbs + cur_kv_head * stride_vh + + offs_d[None, :] * stride_vd) + k_ptrs = K + off_k + v_ptrs = V + off_v + + block_mask = tl.where( + block_start_loc < cur_batch_seq_len - cur_batch_ctx_len, 1, 0) + + # init alibi + alibi_slope = tl.load(Alibi_slopes + cur_head) + alibi_start_q = tl.arange( + 0, BLOCK_M) + block_start_loc + cur_batch_ctx_len + alibi_start_k = cur_batch_ctx_len + # # init debugger + # offset_db_q = tl.arange(0, BLOCK_M) + block_start_loc + # offset_db_k = tl.arange(0, BLOCK_N) + # calc q[BLOCK_M, BLOCK_MODEL] mul k[prefix_len: , BLOCK_DMODEL] + for start_n in range(0, block_mask * (start_m + 1) * BLOCK_M, BLOCK_N): + start_n = tl.multiple_of(start_n, BLOCK_N) + # -- compute qk ---- + k = tl.load(k_ptrs + + (cur_batch_in_all_start_index + start_n) * stride_kbs, + mask=(start_n + offs_n[None, :]) < + cur_batch_seq_len - cur_batch_ctx_len, + other=0.0) + + qk = tl.zeros([BLOCK_M, BLOCK_N], dtype=tl.float32) + qk += tl.dot(q, k, allow_tf32=False) + qk *= sm_scale + qk = tl.where(offs_m[:, None] >= (start_n + offs_n[None, :]), qk, + float("-inf")) + + # load alibi + alibi = (tl.arange(0, BLOCK_N)[None, :] + alibi_start_k - + alibi_start_q[:, None]) * alibi_slope + alibi = tl.where( + (alibi <= 0) & (alibi_start_q[:, None] < cur_batch_seq_len), + alibi, float("-inf")) + qk += alibi + alibi_start_k += BLOCK_N + + # -- compute m_ij, p, l_ij + m_ij = tl.max(qk, 1) + m_i_new = tl.maximum(m_i, m_ij) + p = tl.math.exp(qk - m_i_new[:, None]) + l_ij = tl.sum(p, 1) + # -- update m_i and l_i + + alpha = tl.math.exp(m_i - m_i_new) + l_i_new = alpha * l_i + l_ij + # -- update output accumulator -- + # scale p + # scale acc + acc_scale = alpha + # acc_scale = l_i / l_i_new * alpha + acc = acc * acc_scale[:, None] + # update acc + v = tl.load(v_ptrs + + (cur_batch_in_all_start_index + start_n) * stride_vbs, + mask=(start_n + offs_n[:, None]) < + cur_batch_seq_len - cur_batch_ctx_len, + other=0.0) + + p = p.to(v.dtype) + acc += tl.dot(p, v, allow_tf32=False) + # update m_i and l_i + l_i = l_i_new + m_i = m_i_new + + acc = acc / l_i[:, None] + + # initialize pointers to output + off_o = ( + (cur_batch_in_all_start_index + offs_m[:, None]) * stride_obs + + cur_head * stride_oh + offs_d[None, :] * stride_od) + out_ptrs = Out + off_o + tl.store(out_ptrs, + acc, + mask=offs_m[:, None] < cur_batch_seq_len - cur_batch_ctx_len) + return + + @torch.inference_mode() + def context_attention_fwd(q, + k, + v, + o, + k_cache, + v_cache, + b_loc, + b_start_loc, + b_seq_len, + b_ctx_len, + max_input_len, + alibi_slopes=None, + sliding_window=None): + + cap = torch.musa.get_device_capability() + BLOCK = 128 if cap[0] >= 8 else 64 + # shape constraints + Lq, Lk, Lv = q.shape[-1], k.shape[-1], v.shape[-1] + assert Lq == Lk and Lk == Lv + # round up Lk to a power of 2 - this is required for Triton block size + Lk_padded = triton.next_power_of_2(Lk) + + sm_scale = 1.0 / (Lq**0.5) + batch, head = b_seq_len.shape[0], q.shape[1] + num_queries_per_kv = q.shape[1] // k.shape[1] + + grid = (batch, head, triton.cdiv(max_input_len, BLOCK)) # batch, head, + + num_warps = 8 if Lk <= 64 else 8 + if alibi_slopes is not None: + assert Lk == Lk_padded + _fwd_kernel_alibi[grid]( + q, + k, + v, + k_cache, + v_cache, + b_loc, + sm_scale, + b_start_loc, + b_seq_len, + b_ctx_len, + alibi_slopes, + v_cache.shape[3], + 8, + o, + b_loc.stride(0), + b_loc.stride(1), + q.stride(0), + q.stride(1), + q.stride(2), + k.stride(0), + k.stride(1), + k.stride(2), + v.stride(0), + v.stride(1), + v.stride(2), + o.stride(0), + o.stride(1), + o.stride(2), + k_cache.stride(0), + k_cache.stride(1), + k_cache.stride(2), + k_cache.stride(3), + k_cache.stride( + 4 + ), #[num_blocks, num_kv_heads, head_size/x, block_size, x] + v_cache.stride(0), + v_cache.stride(1), + v_cache.stride(2), + v_cache.stride( + 3), #[num_blocks, num_kv_heads, head_size, block_size] + num_queries_per_kv=num_queries_per_kv, + BLOCK_M=BLOCK, + BLOCK_DMODEL=Lk, + BLOCK_N=BLOCK, + num_warps=num_warps, + num_stages=1, + ) + return + + _fwd_kernel[grid]( + q, + k, + v, + k_cache, + v_cache, + b_loc, + sm_scale, + b_start_loc, + b_seq_len, + b_ctx_len, + v_cache.shape[3], + 8, + o, + b_loc.stride(0), + b_loc.stride(1), + q.stride(0), + q.stride(1), + q.stride(2), + k.stride(0), + k.stride(1), + k.stride(2), + v.stride(0), + v.stride(1), + v.stride(2), + o.stride(0), + o.stride(1), + o.stride(2), + k_cache.stride(0), + k_cache.stride(1), + k_cache.stride(2), + k_cache.stride(3), + k_cache.stride( + 4), #[num_blocks, num_kv_heads, head_size/x, block_size, x] + v_cache.stride(0), + v_cache.stride(1), + v_cache.stride(2), + v_cache.stride( + 3), #[num_blocks, num_kv_heads, head_size, block_size] + num_queries_per_kv=num_queries_per_kv, + BLOCK_M=BLOCK, + BLOCK_DMODEL=Lk, + BLOCK_DMODEL_PADDED=Lk_padded, + BLOCK_N=BLOCK, + SLIDING_WINDOW=sliding_window if sliding_window is not None else 0, + num_warps=num_warps, + num_stages=1, + ) + return diff --git a/vllm/attention/ops/triton_flash_attention.py b/vllm/attention/ops/triton_flash_attention.py new file mode 100644 index 0000000..1147664 --- /dev/null +++ b/vllm/attention/ops/triton_flash_attention.py @@ -0,0 +1,810 @@ +#!/usr/bin/env python +""" +Fused Attention +=============== + +This is a Triton implementation of the Flash Attention v2 algorithm from Tri Dao +(https://tridao.me/publications/flash2/flash2.pdf) +Credits: OpenAI kernel team, AMD ML Frameworks Triton team + +Features supported: + +1) Fwd with causal masking +2) Any sequence lengths without padding (currently fwd kernel only) +3) Support for different sequence lengths for q and k +4) Nested tensor API currently does not support dropout or bias. + +Not currently supported: + +1) Non power of two head dims + +""" + +import torch +import triton +import triton.language as tl + +torch_dtype: tl.constexpr = torch.float16 + + +@triton.jit +def cdiv_fn(x, y): + return (x + y - 1) // y + + +@triton.jit +def max_fn(x, y): + return tl.math.max(x, y) + + +@triton.jit +def dropout_offsets(philox_seed, philox_offset, dropout_p, m, n, stride): + ms = tl.arange(0, m) + ns = tl.arange(0, n) + return philox_offset + ms[:, None] * stride + ns[None, :] + + +@triton.jit +def dropout_rng(philox_seed, philox_offset, dropout_p, m, n, stride): + rng_offsets = dropout_offsets(philox_seed, philox_offset, dropout_p, m, n, + stride).to(tl.uint32) + # TODO: use tl.randint for better performance + return tl.rand(philox_seed, rng_offsets) + + +@triton.jit +def dropout_mask(philox_seed, philox_offset, dropout_p, m, n, stride): + rng_output = dropout_rng(philox_seed, philox_offset, dropout_p, m, n, + stride) + rng_keep = rng_output > dropout_p + return rng_keep + + +@triton.jit +def load_fn(block_ptr, first, second, pad): + if first and second: + tensor = tl.load(block_ptr, boundary_check=(0, 1), padding_option=pad) + elif first: + tensor = tl.load(block_ptr, boundary_check=(0, ), padding_option=pad) + elif second: + tensor = tl.load(block_ptr, boundary_check=(1, ), padding_option=pad) + else: + tensor = tl.load(block_ptr) + return tensor + + +@triton.jit +def _attn_fwd_inner( + acc, + l_i, + m_i, + q, + K_block_ptr, + V_block_ptr, + start_m, + actual_seqlen_k, + dropout_p, + philox_seed, + batch_philox_offset, + encoded_softmax_block_ptr, + block_min, + block_max, + offs_n_causal, + masked_blocks, + n_extra_tokens, + bias_ptr, + IS_CAUSAL: tl.constexpr, + BLOCK_M: tl.constexpr, + BLOCK_DMODEL: tl.constexpr, + BLOCK_N: tl.constexpr, + OFFS_M: tl.constexpr, + OFFS_N: tl.constexpr, + PRE_LOAD_V: tl.constexpr, + MASK_STEPS: tl.constexpr, + ENABLE_DROPOUT: tl.constexpr, + RETURN_ENCODED_SOFTMAX: tl.constexpr, + PADDED_HEAD: tl.constexpr, +): + # loop over k, v, and update accumulator + for start_n in range(block_min, block_max, BLOCK_N): + # For padded blocks, we will overrun the tensor size if + # we load all BLOCK_N. For others, the blocks are all within range. + k = load_fn( + K_block_ptr, + PADDED_HEAD, + MASK_STEPS and (n_extra_tokens != 0), + "zero", + ) + if PRE_LOAD_V: + v = load_fn( + V_block_ptr, + MASK_STEPS and (n_extra_tokens != 0), + PADDED_HEAD, + "zero", + ) + qk = tl.zeros([BLOCK_M, BLOCK_N], dtype=tl.float32) + # We start from end of seqlen_k so only the first iteration would need + # to be checked for padding if it is not a multiple of block_n + # TODO: This can be optimized to only be true for the padded block. + if MASK_STEPS: # noqa: SIM102 + # If this is the last block / iteration, we want to + # mask if the sequence length is not a multiple of block size + # a solution is to always do BLOCK_M // BLOCK_N + 1 steps + # if not is_modulo_mn. last step might get wasted but that is okay. + # check if this masking works for that case. + if (start_n + BLOCK_N == block_max) and (n_extra_tokens != 0): + boundary_m = tl.full([BLOCK_M], + actual_seqlen_k, + dtype=tl.int32) + size_n = start_n + OFFS_N[None, :] + mask = size_n < boundary_m[:, None] + qk = tl.where(mask, qk, float("-inf")) + if IS_CAUSAL: + causal_boundary = start_n + offs_n_causal + causal_mask = OFFS_M[:, None] >= causal_boundary[None, :] + qk = tl.where(causal_mask, qk, float("-inf")) + # -- compute qk ---- + qk += tl.dot(q, k) + if bias_ptr is not None: + bias = load_fn(bias_ptr, False, MASK_STEPS + and (n_extra_tokens != 0), "zero") + # While bias is added after multiplying qk with sm_scale, our + # optimization to use 2^x instead of e^x results in an additional + # scale factor of log2(e) which we must also multiply the bias with. + qk += bias * 1.44269504089 + m_ij = tl.maximum(m_i, tl.max(qk, 1)) + qk = qk - m_ij[:, None] + p = tl.math.exp2(qk) + + # CAVEAT: Must update l_ij before applying dropout + l_ij = tl.sum(p, 1) + if ENABLE_DROPOUT: + philox_offset = (batch_philox_offset + + start_m * BLOCK_M * actual_seqlen_k + start_n - + BLOCK_N) + keep = dropout_mask( + philox_seed, + philox_offset, + dropout_p, + BLOCK_M, + BLOCK_N, + actual_seqlen_k, + ) + if RETURN_ENCODED_SOFTMAX: + tl.store( + encoded_softmax_block_ptr, + tl.where(keep, p, + -p).to(encoded_softmax_block_ptr.type.element_ty), + ) + p = tl.where(keep, p, 0.0) + elif RETURN_ENCODED_SOFTMAX: + tl.store( + encoded_softmax_block_ptr, + p.to(encoded_softmax_block_ptr.type.element_ty), + ) + # -- update output accumulator -- + alpha = tl.math.exp2(m_i - m_ij) + acc = acc * alpha[:, None] + if not PRE_LOAD_V: + v = load_fn( + V_block_ptr, + MASK_STEPS and (n_extra_tokens != 0), + PADDED_HEAD, + "zero", + ) + # -- update m_i and l_i + l_i = l_i * alpha + l_ij + # update m_i and l_i + m_i = m_ij + acc += tl.dot(p.to(V_block_ptr.type.element_ty), v) + V_block_ptr = tl.advance(V_block_ptr, (BLOCK_N, 0)) + K_block_ptr = tl.advance(K_block_ptr, (0, BLOCK_N)) + if bias_ptr is not None: + bias_ptr = tl.advance(bias_ptr, (0, BLOCK_N)) + if RETURN_ENCODED_SOFTMAX: + encoded_softmax_block_ptr = tl.advance(encoded_softmax_block_ptr, + (0, BLOCK_N)) + return acc, l_i, m_i + + +@triton.autotune( + configs=[ + triton.Config( + { + "BLOCK_M": 256, + "BLOCK_N": 64, + "waves_per_eu": 2, + "PRE_LOAD_V": False, + }, + num_stages=1, + num_warps=8, + ), + triton.Config( + { + "BLOCK_M": 128, + "BLOCK_N": 128, + "waves_per_eu": 2, + "PRE_LOAD_V": False, + }, + num_stages=1, + num_warps=4, + ), + triton.Config( + { + "BLOCK_M": 256, + "BLOCK_N": 128, + "waves_per_eu": 2, + "PRE_LOAD_V": False, + }, + num_stages=1, + num_warps=8, + ), + triton.Config( + { + "BLOCK_M": 128, + "BLOCK_N": 64, + "waves_per_eu": 3, + "PRE_LOAD_V": True, + }, + num_stages=1, + num_warps=4, + ), + triton.Config( + { + "BLOCK_M": 128, + "BLOCK_N": 64, + "waves_per_eu": 3, + "PRE_LOAD_V": False, + }, + num_stages=1, + num_warps=4, + ), + triton.Config( + { + "BLOCK_M": 64, + "BLOCK_N": 64, + "waves_per_eu": 4, + "PRE_LOAD_V": False, + }, + num_stages=1, + num_warps=8, + ), + triton.Config( + { + "BLOCK_M": 32, + "BLOCK_N": 32, + "waves_per_eu": 4, + "PRE_LOAD_V": False, + }, + num_stages=1, + num_warps=8, + ), + # TODO: This config fails with head_size not pow2 with data mismatches. + # triton.Config({'BLOCK_M': 32, 'BLOCK_N': 16, 'waves_per_eu': 1, + # 'PRE_LOAD_V': False}, num_stages=1, num_warps=4), + triton.Config( + { + "BLOCK_M": 16, + "BLOCK_N": 16, + "waves_per_eu": 1, + "PRE_LOAD_V": False, + }, + num_stages=1, + num_warps=4, + ), + ], + key=['IS_CAUSAL', 'dropout_p', 'BLOCK_DMODEL'], +) +@triton.jit +def attn_fwd( + Q, + K, + V, + bias, + sm_scale, + L, + Out, + stride_qz, + stride_qh, + stride_qm, + stride_qk, + stride_kz, + stride_kh, + stride_kn, + stride_kk, + stride_vz, + stride_vh, + stride_vk, + stride_vn, + stride_oz, + stride_oh, + stride_om, + stride_on, + stride_bz, + stride_bh, + stride_bm, + stride_bn, + cu_seqlens_q, + cu_seqlens_k, + dropout_p, + philox_seed, + philox_offset_base, + encoded_softmax, + HQ: tl.constexpr, + HK: tl.constexpr, + ACTUAL_BLOCK_DMODEL: tl.constexpr, + MAX_SEQLENS_Q: tl.constexpr, + MAX_SEQLENS_K: tl.constexpr, + VARLEN: tl.constexpr, + IS_CAUSAL: tl.constexpr, + BLOCK_M: tl.constexpr, + BLOCK_DMODEL: tl.constexpr, + BLOCK_N: tl.constexpr, + PRE_LOAD_V: tl.constexpr, + BIAS_TYPE: tl.constexpr, + ENABLE_DROPOUT: tl.constexpr, + RETURN_ENCODED_SOFTMAX: tl.constexpr, +): + start_m = tl.program_id(0) + off_h_q = tl.program_id(1) + off_z = tl.program_id(2) + offs_m = start_m * BLOCK_M + tl.arange(0, BLOCK_M) + offs_n = tl.arange(0, BLOCK_N) + if VARLEN: + cu_seqlens_q_start = tl.load(cu_seqlens_q + off_z) + cu_seqlens_q_end = tl.load(cu_seqlens_q + off_z + 1) + seqlen_q = cu_seqlens_q_end - cu_seqlens_q_start + # We have a one-size-fits-all grid in id(0). Some seqlens might be too + # small for all start_m so for those we return early. + if start_m * BLOCK_M > seqlen_q: + return + cu_seqlens_k_start = tl.load(cu_seqlens_k + off_z) + cu_seqlens_k_end = tl.load(cu_seqlens_k + off_z + 1) + seqlen_k = cu_seqlens_k_end - cu_seqlens_k_start + else: + cu_seqlens_q_start = 0 + cu_seqlens_k_start = 0 + seqlen_q = MAX_SEQLENS_Q + seqlen_k = MAX_SEQLENS_K + + # Now we compute whether we need to exit early due to causal masking. + # This is because for seqlen_q > seqlen_k, M rows of the attn scores + # are completely masked, resulting in 0s written to the output, and + # inf written to LSE. We don't need to do any GEMMs in this case. + # This block of code determines what N is, and if this WG is operating + # on those M rows. + n_blocks = cdiv_fn(seqlen_k, BLOCK_N) + if IS_CAUSAL: + # If seqlen_q == seqlen_k, the attn scores are a square matrix. + # If seqlen_q != seqlen_k, attn scores are rectangular which means + # the causal mask boundary is bottom right aligned, and ends at either + # the top edge (seqlen_q < seqlen_k) or left edge. + # This captures the decrease in n_blocks if we have a rectangular attn + # matrix + n_blocks_seqlen = cdiv_fn( + (start_m + 1) * BLOCK_M + seqlen_k - seqlen_q, BLOCK_N) + # This is what adjusts the block_max for the current WG, only + # if IS_CAUSAL. Otherwise we want to always iterate through all n_blocks + n_blocks = min(n_blocks, n_blocks_seqlen) + # If we have no blocks after adjusting for seqlen deltas, this WG is + # part of the blocks that are all 0. We exit early. + if n_blocks <= 0: + o_offset = (off_z * stride_oz + cu_seqlens_q_start * stride_om + + off_h_q * stride_oh) + O_block_ptr = tl.make_block_ptr( + base=Out + o_offset, + shape=(seqlen_q, BLOCK_DMODEL), + strides=(stride_om, stride_on), + offsets=(start_m * BLOCK_M, 0), + block_shape=(BLOCK_M, BLOCK_DMODEL), + order=(1, 0), + ) + acc = tl.zeros([BLOCK_M, BLOCK_DMODEL], dtype=Out.type.element_ty) + # We still need to write 0s to the result + # tl.store(O_block_ptr, + # acc.to(Out.type.element_ty), boundary_check=(0,1)) + # l_ptrs = L + off_z * HQ * MAX_SEQLENS_Q + off_h_q * MAX_SEQLENS_Q + # + offs_m + # We store inf to LSE, not -inf because in the bwd pass, + # we subtract this + # from qk which makes it -inf, such that exp(qk - inf) = 0 + # for these masked blocks. + # l = tl.full([BLOCK_M], value=float("inf"), dtype=tl.float32) + # tl.store(l_ptrs, l) + # TODO: Should dropout and return encoded softmax be handled here? + return + + # If MQA / GQA, set the K and V head offsets appropriately. + GROUP_SIZE: tl.constexpr = HQ // HK + off_h_k = off_h_q // GROUP_SIZE if GROUP_SIZE != 1 else off_h_q + + n_extra_tokens = 0 + if seqlen_k < BLOCK_N: + n_extra_tokens = BLOCK_N - seqlen_k + elif seqlen_k % BLOCK_N: + n_extra_tokens = seqlen_k % BLOCK_N + padded_head = ACTUAL_BLOCK_DMODEL != BLOCK_DMODEL + + # Compute pointers for all the tensors used in this kernel. + q_offset = (off_z * stride_qz + off_h_q * stride_qh + + cu_seqlens_q_start * stride_qm) + Q_block_ptr = tl.make_block_ptr( + base=Q + q_offset, + shape=(seqlen_q, ACTUAL_BLOCK_DMODEL), + strides=(stride_qm, stride_qk), + offsets=(start_m * BLOCK_M, 0), + block_shape=(BLOCK_M, BLOCK_DMODEL), + order=(1, 0), + ) + k_offset = (off_z * stride_kz + off_h_k * stride_kh + + cu_seqlens_k_start * stride_kn) + K_block_ptr = tl.make_block_ptr( + base=K + k_offset, + shape=(ACTUAL_BLOCK_DMODEL, seqlen_k), + strides=(stride_kk, stride_kn), + offsets=(0, 0), + block_shape=(BLOCK_DMODEL, BLOCK_N), + order=(0, 1), + ) + v_offset = (off_z * stride_vz + off_h_k * stride_vh + + cu_seqlens_k_start * stride_vk) + V_block_ptr = tl.make_block_ptr( + base=V + v_offset, + shape=(seqlen_k, ACTUAL_BLOCK_DMODEL), + strides=(stride_vk, stride_vn), + offsets=(0, 0), + block_shape=(BLOCK_N, BLOCK_DMODEL), + order=(1, 0), + ) + if BIAS_TYPE != 0: + bias_ptr = tl.make_block_ptr( + base=bias + off_h_q * stride_bh, + shape=(seqlen_q, seqlen_k), + strides=(stride_bm, stride_bn), + offsets=(start_m * BLOCK_M, 0), + block_shape=(BLOCK_M, BLOCK_N), + order=(1, 0), + ) + else: + bias_ptr = None + if ENABLE_DROPOUT: + batch_philox_offset = philox_offset_base \ + + (off_z * HQ + off_h_q) \ + * seqlen_q * seqlen_k + else: + batch_philox_offset = 0 + # We can ask to return the dropout mask without actually doing any dropout. + # In this case, we return an invalid pointer so indicate the mask is not i + # valid. + # TODO: Fix encoded softmax. It currently uses just h_q in the base offset. + if RETURN_ENCODED_SOFTMAX: + encoded_softmax_block_ptr = tl.make_block_ptr( + base=encoded_softmax + off_h_q * seqlen_q * seqlen_k, + shape=(seqlen_q, seqlen_k), + strides=(seqlen_k, 1), + offsets=(start_m * BLOCK_M, 0), + block_shape=(BLOCK_M, BLOCK_N), + order=(1, 0), + ) + else: + encoded_softmax_block_ptr = 0 + # initialize pointer to m and l + m_i = tl.full([BLOCK_M], float("-inf"), dtype=tl.float32) + l_i = tl.full([BLOCK_M], 1.0, dtype=tl.float32) + acc = tl.zeros([BLOCK_M, BLOCK_DMODEL], dtype=tl.float32) + # scale sm_scale by log_2(e) and use 2^x in the loop as we do not + # have native e^x support in HW. + qk_scale = sm_scale * 1.44269504089 + # Q is loaded once at the beginning and shared by all N blocks. + q = load_fn(Q_block_ptr, True, padded_head, "zero") + q = (q * qk_scale).to(Q_block_ptr.type.element_ty) + + # Here we compute how many full and masked blocks we have. + padded_block_k = n_extra_tokens != 0 + is_modulo_mn = not padded_block_k and (seqlen_q % BLOCK_M == 0) + if IS_CAUSAL: + # There are always at least BLOCK_M // BLOCK_N masked blocks. + # Additionally there might be one more due to dissimilar seqlens. + masked_blocks = BLOCK_M // BLOCK_N + (not is_modulo_mn) + else: + # Padding on Q does not need to be masked in the FA loop. + masked_blocks = padded_block_k + # if IS_CAUSAL, not is_modulo_mn does not always result in an additional + # block. In this case we might exceed n_blocks so pick the min. + masked_blocks = min(masked_blocks, n_blocks) + n_full_blocks = n_blocks - masked_blocks + block_min = 0 + block_max = n_blocks * BLOCK_N + # Compute for full blocks. Here we set causal to false regardless of its + # value because there is no masking. Similarly we do not need padding. + if n_full_blocks > 0: + block_max = (n_blocks - masked_blocks) * BLOCK_N + acc, l_i, m_i = _attn_fwd_inner( + acc, + l_i, + m_i, + q, + K_block_ptr, + V_block_ptr, + start_m, + seqlen_k, + dropout_p, + philox_seed, + batch_philox_offset, + encoded_softmax_block_ptr, + # _, _, offs_n_causal, masked_blocks, n_extra_tokens, _ + block_min, + block_max, + 0, + 0, + 0, + bias_ptr, + # IS_CAUSAL, .... + False, + BLOCK_M, + BLOCK_DMODEL, + BLOCK_N, + offs_m, + offs_n, + # _, MASK_STEPS, ... + PRE_LOAD_V, + False, + ENABLE_DROPOUT, + RETURN_ENCODED_SOFTMAX, + padded_head, + ) + block_min = block_max + block_max = n_blocks * BLOCK_N + + tl.debug_barrier() + # Remaining blocks, if any, are full / not masked. + if masked_blocks > 0: + offs_n_causal = offs_n + (seqlen_q - seqlen_k) if IS_CAUSAL else 0 + K_block_ptr = tl.advance(K_block_ptr, (0, n_full_blocks * BLOCK_N)) + V_block_ptr = tl.advance(V_block_ptr, (n_full_blocks * BLOCK_N, 0)) + if bias_ptr is not None: + bias_ptr = tl.advance(bias_ptr, (0, n_full_blocks * BLOCK_N)) + if RETURN_ENCODED_SOFTMAX: + encoded_softmax_block_ptr = tl.advance(encoded_softmax_block_ptr, + (0, n_full_blocks)) + acc, l_i, m_i = _attn_fwd_inner( + acc, + l_i, + m_i, + q, + K_block_ptr, + V_block_ptr, + start_m, + seqlen_k, + dropout_p, + philox_seed, + batch_philox_offset, + encoded_softmax_block_ptr, + block_min, + block_max, + offs_n_causal, + masked_blocks, + n_extra_tokens, + bias_ptr, + IS_CAUSAL, + BLOCK_M, + BLOCK_DMODEL, + BLOCK_N, + offs_m, + offs_n, + # _, MASK_STEPS, ... + PRE_LOAD_V, + True, + ENABLE_DROPOUT, + RETURN_ENCODED_SOFTMAX, + padded_head, + ) + # epilogue + acc = acc / l_i[:, None] + if ENABLE_DROPOUT: + acc = acc / (1 - dropout_p) + # If seqlen_q > seqlen_k but the delta is not a multiple of BLOCK_M, + # then we have one block with a row of all NaNs which come from computing + # softmax over a row of all -infs (-inf - inf = NaN). We check for that here + # and store 0s where there are NaNs as these rows should've been zeroed out. + end_m_idx = (start_m + 1) * BLOCK_M + start_m_idx = start_m * BLOCK_M + causal_start_idx = seqlen_q - seqlen_k + acc = acc.to(Out.type.element_ty) + if IS_CAUSAL: # noqa: SIM102 + if causal_start_idx > start_m_idx and causal_start_idx < end_m_idx: + out_mask_boundary = tl.full((BLOCK_DMODEL, ), + causal_start_idx, + dtype=tl.int32) + mask_m_offsets = start_m_idx + tl.arange(0, BLOCK_M) + out_ptrs_mask = (mask_m_offsets[:, None] >= + out_mask_boundary[None, :]) + z = 0.0 + acc = tl.where(out_ptrs_mask, acc, z.to(acc.type.element_ty)) + # write back LSE + # l_ptrs = L + off_z * HQ * MAX_SEQLENS_Q + off_h_q * MAX_SEQLENS_Q + offs_m + # If seqlen_q not multiple of BLOCK_M, we need to mask out the last + # few rows. This is only true for the last M block. For others, + # overflow_size will be -ve + # overflow_size = end_m_idx - seqlen_q + # if overflow_size > 0: + # boundary = tl.full((BLOCK_M,), BLOCK_M - overflow_size, dtype=tl.int32) + # # This is a > check because mask being 0 blocks the store. + # l_ptrs_mask = boundary > tl.arange(0, BLOCK_M) + # tl.store(l_ptrs, m_i + tl.math.log2(l_i), mask=l_ptrs_mask) + # else: + # tl.store(l_ptrs, m_i + tl.math.log2(l_i)) + + # write back O + o_offset = (off_z * stride_oz + cu_seqlens_q_start * stride_om + + off_h_q * stride_oh) + O_block_ptr = tl.make_block_ptr( + base=Out + o_offset, + shape=(seqlen_q, ACTUAL_BLOCK_DMODEL), + strides=(stride_om, stride_on), + offsets=(start_m * BLOCK_M, 0), + block_shape=(BLOCK_M, BLOCK_DMODEL), + order=(1, 0), + ) + # Need boundary check on this to make sure the padding from the + # Q and KV tensors in both dims are not part of what we store back. + # TODO: Do the boundary check optionally. + tl.store(O_block_ptr, acc, boundary_check=(0, 1)) + + +def check_args( + q, + k, + v, + o, + varlen=True, + max_seqlens=None, + cu_seqlens_q=None, + cu_seqlens_k=None, +): + assert q.dim() == k.dim() and q.dim() == v.dim() + if varlen: + assert q.dim() == 3 + total_q, nheads_q, head_size = q.shape + total_k, nheads_k, _ = k.shape + assert cu_seqlens_q is not None + assert cu_seqlens_k is not None + assert len(cu_seqlens_q) == len(cu_seqlens_k) + else: + assert q.dim() == 4 + batch, nheads_q, seqlen_q, head_size = q.shape + _, nheads_k, seqlen_k, _ = k.shape + assert max_seqlens > 0 + assert k.shape == v.shape + assert q.shape[-1] == k.shape[-1] and q.shape[-1] == v.shape[-1] + # TODO: Change assert if we support qkl f8 and v f16 + assert q.dtype == k.dtype and q.dtype == v.dtype + assert head_size <= 256 + assert o.shape == q.shape + assert (nheads_q % nheads_k) == 0 + + +class _attention(torch.autograd.Function): + + @staticmethod + def forward( + ctx, + q, + k, + v, + o, + cu_seqlens_q, + cu_seqlens_k, + max_seqlens_q, + max_seqlens_k, + causal=False, + sm_scale=1.0, + bias=None, + ): + if o is None: + o = torch.empty_like(q, dtype=v.dtype) + + check_args( + q, + k, + v, + o, + varlen=True, + cu_seqlens_q=cu_seqlens_q, + cu_seqlens_k=cu_seqlens_k, + ) + if True: # varlen + total_q, nheads_q, head_size = q.shape + total_k, nheads_k, _ = k.shape + batch = len(cu_seqlens_q) - 1 + q_strides = (0, q.stride(1), q.stride(0), q.stride(2)) + k_strides = (0, k.stride(1), k.stride(0), k.stride(2)) + v_strides = (0, v.stride(1), v.stride(0), v.stride(2)) + o_strides = (0, o.stride(1), o.stride(0), o.stride(2)) + else: + batch, seqlen_q, nheads_q, head_size = q.shape + _, seqlen_k, nheads_k, _ = k.shape + q_strides = (q.stride(0), q.stride(2), q.stride(1), q.stride(3)) + k_strides = (k.stride(0), k.stride(2), k.stride(1), k.stride(3)) + v_strides = (v.stride(0), v.stride(2), v.stride(1), v.stride(3)) + o_strides = (o.stride(0), o.stride(2), o.stride(1), o.stride(3)) + + # Get closest power of 2 over or equal to 32. + unpadded_head_dims = {32, 64, 128, 256} + if head_size not in unpadded_head_dims: + padded_d_model = None + for i in unpadded_head_dims: + if i > head_size: + padded_d_model = i + break + assert padded_d_model is not None + else: + padded_d_model = head_size + + grid = lambda META: ( + triton.cdiv(max_seqlens_q, META["BLOCK_M"]), + nheads_q, + batch, + ) + + encoded_softmax = None + + # Seed the RNG so we get reproducible results for testing. + philox_seed = 0x1BF52 + philox_offset = 0x1D4B42 + + if bias is not None: + bias_strides = ( + bias.stride(0), + bias.stride(1), + bias.stride(2), + bias.stride(3), + ) + else: + bias_strides = (0, 0, 0, 0) + + attn_fwd[grid]( + q, + k, + v, + bias, + sm_scale, + None, + o, + *q_strides, + *k_strides, + *v_strides, + *o_strides, + *bias_strides, + cu_seqlens_q, + cu_seqlens_k, + dropout_p=0.0, + philox_seed=philox_seed, + philox_offset_base=philox_offset, + encoded_softmax=encoded_softmax, + HQ=nheads_q, + HK=nheads_k, + ACTUAL_BLOCK_DMODEL=head_size, + MAX_SEQLENS_Q=max_seqlens_q, + MAX_SEQLENS_K=max_seqlens_k, + IS_CAUSAL=causal, + VARLEN=True, + BLOCK_DMODEL=padded_d_model, + BIAS_TYPE=0 if bias is None else 1, + ENABLE_DROPOUT=False, + RETURN_ENCODED_SOFTMAX=False, + ) + + ctx.grid = grid + ctx.sm_scale = sm_scale + ctx.BLOCK_DMODEL = head_size + ctx.causal = causal + ctx.dropout_p = 0.0 + ctx.philox_seed = philox_seed + ctx.philox_offset = philox_offset + ctx.encoded_softmax = encoded_softmax + ctx.return_encoded_softmax = False + return o, encoded_softmax + + +triton_attention = _attention.apply diff --git a/vllm/attention/selector.py b/vllm/attention/selector.py new file mode 100644 index 0000000..3fa0bd5 --- /dev/null +++ b/vllm/attention/selector.py @@ -0,0 +1,94 @@ +import enum +from functools import lru_cache +from typing import Type + +import torch + +import vllm.envs as envs +from vllm.attention.backends.abstract import AttentionBackend +from vllm.logger import init_logger +from vllm.utils import is_cpu, is_hip, is_musa + +logger = init_logger(__name__) + + +class _Backend(enum.Enum): + FLASH_ATTN = enum.auto() + XFORMERS = enum.auto() + ROCM_FLASH = enum.auto() + TORCH_SDPA = enum.auto() + FLASHINFER = enum.auto() + + +@lru_cache(maxsize=None) +def get_attn_backend(dtype: torch.dtype) -> Type[AttentionBackend]: + backend = _which_attn_to_use(dtype) + if backend == _Backend.FLASH_ATTN: + logger.info("Using FlashAttention-2 backend.") + from vllm.attention.backends.flash_attn import ( # noqa: F401 + FlashAttentionBackend) + return FlashAttentionBackend + elif backend == _Backend.XFORMERS: + logger.info("Using XFormers backend.") + from vllm.attention.backends.xformers import ( # noqa: F401 + XFormersBackend) + return XFormersBackend + elif backend == _Backend.ROCM_FLASH: + logger.info("Using ROCmFlashAttention backend.") + from vllm.attention.backends.rocm_flash_attn import ( # noqa: F401 + ROCmFlashAttentionBackend) + return ROCmFlashAttentionBackend + elif backend == _Backend.TORCH_SDPA: + logger.info("Using Torch SDPA backend.") + from vllm.attention.backends.torch_sdpa import TorchSDPABackend + return TorchSDPABackend + elif backend == _Backend.FLASHINFER: + logger.info("Using Flashinfer backend.") + logger.warning("Eager mode is enforced for the Flashinfer backend. ") + from vllm.attention.backends.flashinfer import FlashInferBackend + return FlashInferBackend + else: + raise ValueError("Invalid attention backend.") + + +def _which_attn_to_use(dtype: torch.dtype) -> _Backend: + """Returns which flash attention backend to use.""" + if is_cpu(): + return _Backend.TORCH_SDPA + + if is_musa(): + return _Backend.FLASH_ATTN + + if is_hip(): + # AMD GPUs. + if torch.cuda.get_device_capability()[0] != 9: + # not Instinct series GPUs. + logger.info("flash_atten is not supported on NAVI GPUs.") + return _Backend.ROCM_FLASH + + # NVIDIA GPUs. + if torch.cuda.get_device_capability()[0] < 8: + # Volta and Turing NVIDIA GPUs. + logger.info("Cannot use FlashAttention-2 backend for Volta and Turing " + "GPUs.") + return _Backend.XFORMERS + + if dtype not in (torch.float16, torch.bfloat16): + logger.info("Cannot use FlashAttention-2 backend for dtype other than " + "torch.float16 or torch.bfloat16.") + return _Backend.XFORMERS + + try: + import flash_attn # noqa: F401 + except ImportError: + logger.info( + "Cannot use FlashAttention-2 backend because the flash_attn " + "package is not found. Please install it for better performance.") + return _Backend.XFORMERS + + backend_by_env_var = envs.VLLM_ATTENTION_BACKEND + if backend_by_env_var is not None: + return _Backend[backend_by_env_var] + + # Default case. + return _Backend.FLASH_ATTN diff --git a/vllm/block.py b/vllm/block.py new file mode 100644 index 0000000..2cc6b94 --- /dev/null +++ b/vllm/block.py @@ -0,0 +1,84 @@ +"""Token blocks.""" +from typing import List + +from vllm.utils import Device + +_BLANK_TOKEN_ID = -1 + +DEFAULT_LAST_ACCESSED_TIME = -1 + + +class LogicalTokenBlock: + """A block that stores a contiguous chunk of tokens from left to right. + + Logical blocks are used to represent the states of the corresponding + physical blocks in the KV cache. + """ + + def __init__( + self, + block_number: int, + block_size: int, + ) -> None: + self.block_number = block_number + self.block_size = block_size + + self.token_ids = [_BLANK_TOKEN_ID] * block_size + self.num_tokens = 0 + + def is_empty(self) -> bool: + return self.num_tokens == 0 + + def get_num_empty_slots(self) -> int: + return self.block_size - self.num_tokens + + def is_full(self) -> bool: + return self.num_tokens == self.block_size + + def append_tokens(self, token_ids: List[int]) -> None: + assert len(token_ids) <= self.get_num_empty_slots() + curr_idx = self.num_tokens + self.token_ids[curr_idx:curr_idx + len(token_ids)] = token_ids + self.num_tokens += len(token_ids) + + def get_token_ids(self) -> List[int]: + return self.token_ids[:self.num_tokens] + + def get_last_token_id(self) -> int: + assert self.num_tokens > 0 + return self.token_ids[self.num_tokens - 1] + + +class PhysicalTokenBlock: + """Represents the state of a block in the KV cache.""" + + def __init__( + self, + device: Device, + block_number: int, + block_size: int, + block_hash: int, + num_hashed_tokens: int, + ) -> None: + self.device = device + self.block_number = block_number + self.block_size = block_size + self.block_hash = block_hash + self.num_hashed_tokens = num_hashed_tokens + + self.ref_count = 0 + self.last_accessed = DEFAULT_LAST_ACCESSED_TIME + + self.computed = False + + def __repr__(self) -> str: + return (f'PhysicalTokenBlock(device={self.device}, ' + f'block_number={self.block_number}, ' + f'num_hashed_tokens={self.num_hashed_tokens}, ' + f'ref_count={self.ref_count}, ' + f'last_accessed={self.last_accessed}, ' + f'computed={self.computed})') + + +# Mapping: logical block number -> physical block. +BlockTable = List[PhysicalTokenBlock] diff --git a/vllm/config.py b/vllm/config.py new file mode 100644 index 0000000..d312221 --- /dev/null +++ b/vllm/config.py @@ -0,0 +1,1225 @@ +import enum +import json +from dataclasses import dataclass, field, fields +from typing import TYPE_CHECKING, ClassVar, List, Optional, Union + +import torch +from packaging.version import Version +from transformers import PretrainedConfig + +from vllm.logger import init_logger +from vllm.model_executor.layers.quantization import (QUANTIZATION_METHODS, + get_quantization_config) +from vllm.transformers_utils.config import get_config, get_hf_text_config +from vllm.utils import (get_cpu_memory, get_mcc_musa_version, is_cpu, is_hip, + is_neuron) + +GPTQMarlinConfig = get_quantization_config("gptq_marlin") + +if TYPE_CHECKING: + from ray.util.placement_group import PlacementGroup + + from vllm.model_executor.model_loader.loader import BaseModelLoader + +logger = init_logger(__name__) + +_GB = 1 << 30 + + +class ModelConfig: + """Configuration for the model. + + Args: + model: Name or path of the huggingface model to use. + It is also used as the content for `model_name` tag in metrics + output when `served_model_name` is not specified. + tokenizer: Name or path of the huggingface tokenizer to use. + tokenizer_mode: Tokenizer mode. "auto" will use the fast tokenizer if + available, and "slow" will always use the slow tokenizer. + trust_remote_code: Trust remote code (e.g., from HuggingFace) when + downloading the model and tokenizer. + dtype: Data type for model weights and activations. The "auto" option + will use FP16 precision for FP32 and FP16 models, and BF16 precision + for BF16 models. + seed: Random seed for reproducibility. + revision: The specific model version to use. It can be a branch name, + a tag name, or a commit id. If unspecified, will use the default + version. + code_revision: The specific revision to use for the model code on + Hugging Face Hub. It can be a branch name, a tag name, or a + commit id. If unspecified, will use the default version. + tokenizer_revision: The specific tokenizer version to use. It can be a + branch name, a tag name, or a commit id. If unspecified, will use + the default version. + max_model_len: Maximum length of a sequence (including prompt and + output). If None, will be derived from the model. + quantization: Quantization method that was used to quantize the model + weights. If None, we assume the model weights are not quantized. + quantization_param_path: Path to JSON file containing scaling factors. + Used to load KV cache scaling factors into the model when KV cache + type is FP8_E4M3 on ROCm (AMD GPU). In the future these will also + be used to load activation and weight scaling factors when the + model dtype is FP8_E4M3 on ROCm. + enforce_eager: Whether to enforce eager execution. If True, we will + disable CUDA graph and always execute the model in eager mode. + If False, we will use CUDA graph and eager execution in hybrid. + max_context_len_to_capture: Maximum context len covered by CUDA graphs. + When a sequence has context length larger than this, we fall back + to eager mode (DEPRECATED. Use max_seq_len_to_capture instead). + max_seq_len_to_capture: Maximum sequence len covered by CUDA graphs. + When a sequence has context length larger than this, we fall back + to eager mode + skip_tokenizer_init: If true, skip initialization of tokenizer and + detokenizer. + served_model_name: The model name used in metrics tag `model_name`, + matches the model name exposed via the APIs. If multiple model + names provided, the first name will be used. If not specified, + the model name will be the same as `model`. + """ + + def __init__( + self, + model: str, + tokenizer: str, + tokenizer_mode: str, + trust_remote_code: bool, + dtype: Union[str, torch.dtype], + seed: int, + revision: Optional[str] = None, + code_revision: Optional[str] = None, + tokenizer_revision: Optional[str] = None, + max_model_len: Optional[int] = None, + quantization: Optional[str] = None, + quantization_param_path: Optional[str] = None, + enforce_eager: bool = False, + max_context_len_to_capture: Optional[int] = None, + max_seq_len_to_capture: Optional[int] = None, + max_logprobs: int = 5, + skip_tokenizer_init: bool = False, + served_model_name: Optional[Union[str, List[str]]] = None, + ) -> None: + self.model = model + self.tokenizer = tokenizer + self.tokenizer_mode = tokenizer_mode + self.trust_remote_code = trust_remote_code + self.seed = seed + self.revision = revision + self.code_revision = code_revision + self.tokenizer_revision = tokenizer_revision + self.quantization = quantization + self.quantization_param_path = quantization_param_path + self.enforce_eager = enforce_eager + self.max_context_len_to_capture = max_context_len_to_capture + if self.max_context_len_to_capture is not None: + raise ValueError("`max_context_len_to_capture` is deprecated. " + "Use `max_seq_len_to_capture` instead.") + self.max_seq_len_to_capture = (max_seq_len_to_capture + or max_context_len_to_capture) + self.max_logprobs = max_logprobs + self.skip_tokenizer_init = skip_tokenizer_init + + self.hf_config = get_config(self.model, trust_remote_code, revision, + code_revision) + self.hf_text_config = get_hf_text_config(self.hf_config) + self.dtype = _get_and_verify_dtype(self.hf_text_config, dtype) + self.max_model_len = _get_and_verify_max_len(self.hf_text_config, + max_model_len) + self.served_model_name = get_served_model_name(model, + served_model_name) + if not self.skip_tokenizer_init: + self._verify_tokenizer_mode() + self._verify_quantization() + self._verify_cuda_graph() + + def _verify_tokenizer_mode(self) -> None: + tokenizer_mode = self.tokenizer_mode.lower() + if tokenizer_mode not in ["auto", "slow"]: + raise ValueError( + f"Unknown tokenizer mode: {self.tokenizer_mode}. Must be " + "either 'auto' or 'slow'.") + self.tokenizer_mode = tokenizer_mode + + def _verify_quantization(self) -> None: + supported_quantization = [*QUANTIZATION_METHODS] + rocm_supported_quantization = ["gptq", "squeezellm"] + if self.quantization is not None: + self.quantization = self.quantization.lower() + + # Parse quantization method from the HF model config, if available. + quant_cfg = getattr(self.hf_config, "quantization_config", None) + if quant_cfg is not None: + quant_method = quant_cfg.get("quant_method", "").lower() + # compat: autogptq >=0.8.0 use checkpoint_format: str + # compat: autogptq <=0.7.1 is_marlin_format: bool + is_format_marlin = (quant_cfg.get("checkpoint_format") == "marlin" + or quant_cfg.get("is_marlin_format", False)) + + # Check which LinearMethod the GPTQ model should use. + if quant_method == "gptq": + # If serialized in Marlin format, use MarlinLinearMethod. + # TODO (@robertgshaw): migrate under GPTQMarlinLinearMethod. + if is_format_marlin: + logger.info("The model is serialized in Marlin format. " + "Using Marlin kernel.") + quant_method = "marlin" + if self.quantization == "gptq": + self.quantization = quant_method + + # If convertible to Marlin format, use GPTQMarlinLinearMethod + # unless the user explicitly specified GPTQLinearMethod. + elif GPTQMarlinConfig.is_marlin_compatible(quant_cfg): + if self.quantization == "gptq": + logger.warning( + "The model is convertible to Marlin format, but " + "you specified quantization=gptq. Use " + "quantization=marlin for faster inference.") + else: + logger.info( + "The model is convertible to Marlin format. " + "Using Marlin kernel.") + quant_method = "gptq_marlin" + if self.quantization == "marlin": + self.quantization = quant_method + + # Verify quantization configurations. + if self.quantization is None: + self.quantization = quant_method + elif self.quantization != quant_method: + raise ValueError( + "Quantization method specified in the model config " + f"({quant_method}) does not match the quantization " + f"method specified in the `quantization` argument " + f"({self.quantization}).") + + if self.quantization is not None: + if self.quantization not in supported_quantization: + raise ValueError( + f"Unknown quantization method: {self.quantization}. Must " + f"be one of {supported_quantization}.") + if is_hip( + ) and self.quantization not in rocm_supported_quantization: + raise ValueError( + f"{self.quantization} quantization is currently not " + f"supported in ROCm.") + if (self.quantization not in ["marlin", "gptq_marlin"]): + logger.warning( + "%s quantization is not fully " + "optimized yet. The speed can be slower than " + "non-quantized models.", self.quantization) + + def _verify_cuda_graph(self) -> None: + if self.max_seq_len_to_capture is None: + self.max_seq_len_to_capture = self.max_model_len + self.max_seq_len_to_capture = min(self.max_seq_len_to_capture, + self.max_model_len) + + def verify_with_parallel_config( + self, + parallel_config: "ParallelConfig", + ) -> None: + total_num_attention_heads = self.hf_text_config.num_attention_heads + tensor_parallel_size = parallel_config.tensor_parallel_size + if total_num_attention_heads % tensor_parallel_size != 0: + raise ValueError( + f"Total number of attention heads ({total_num_attention_heads})" + " must be divisible by tensor parallel size " + f"({tensor_parallel_size}).") + + total_num_hidden_layers = self.hf_text_config.num_hidden_layers + pipeline_parallel_size = parallel_config.pipeline_parallel_size + if total_num_hidden_layers % pipeline_parallel_size != 0: + raise ValueError( + f"Total number of hidden layers ({total_num_hidden_layers}) " + "must be divisible by pipeline parallel size " + f"({pipeline_parallel_size}).") + + def get_sliding_window(self) -> Optional[int]: + """Get the sliding window size, or None if disabled. + """ + + # Some models, like Qwen2 and Qwen1.5, use `use_sliding_window` in + # addition to sliding window size. We check if that field is present + # and if it's False, return None. + if (hasattr(self.hf_text_config, "use_sliding_window") + and not self.hf_text_config.use_sliding_window): + return None + return getattr(self.hf_text_config, "sliding_window", None) + + def get_vocab_size(self) -> int: + return self.hf_text_config.vocab_size + + def get_hidden_size(self) -> int: + return self.hf_text_config.hidden_size + + def get_head_size(self) -> int: + if hasattr(self.hf_text_config, "head_dim"): + return self.hf_text_config.head_dim + # FIXME(woosuk): This may not be true for all models. + return (self.hf_text_config.hidden_size // + self.hf_text_config.num_attention_heads) + + def get_total_num_kv_heads(self) -> int: + """Returns the total number of KV heads.""" + # For GPTBigCode & Falcon: + # NOTE: for falcon, when new_decoder_architecture is True, the + # multi_query flag is ignored and we use n_head_kv for the number of + # KV heads. + falcon_model_types = ["falcon", "RefinedWeb", "RefinedWebModel"] + new_decoder_arch_falcon = ( + self.hf_config.model_type in falcon_model_types + and getattr(self.hf_config, "new_decoder_architecture", False)) + if not new_decoder_arch_falcon and getattr(self.hf_text_config, + "multi_query", False): + # Multi-query attention, only one KV head. + # Currently, tensor parallelism is not supported in this case. + return 1 + + # For DBRX and MPT + if self.hf_config.model_type in ["dbrx", "mpt"]: + return getattr(self.hf_config.attn_config, "kv_n_heads", + self.hf_config.num_attention_heads) + + attributes = [ + # For Falcon: + "n_head_kv", + "num_kv_heads", + # For LLaMA-2: + "num_key_value_heads", + # For ChatGLM: + "multi_query_group_num", + ] + for attr in attributes: + num_kv_heads = getattr(self.hf_text_config, attr, None) + if num_kv_heads is not None: + return num_kv_heads + + # For non-grouped-query attention models, the number of KV heads is + # equal to the number of attention heads. + return self.hf_text_config.num_attention_heads + + def get_num_kv_heads(self, parallel_config: "ParallelConfig") -> int: + """Returns the number of KV heads per GPU.""" + total_num_kv_heads = self.get_total_num_kv_heads() + # If tensor parallelism is used, we divide the number of KV heads by + # the tensor parallel size. We will replicate the KV heads in the + # case where the number of KV heads is smaller than the tensor + # parallel size so each GPU has at least one KV head. + return max(1, + total_num_kv_heads // parallel_config.tensor_parallel_size) + + def get_num_attention_heads(self, + parallel_config: "ParallelConfig") -> int: + return self.hf_text_config.num_attention_heads // \ + parallel_config.tensor_parallel_size + + def get_num_layers(self, parallel_config: "ParallelConfig") -> int: + total_num_hidden_layers = self.hf_text_config.num_hidden_layers + return total_num_hidden_layers // parallel_config.pipeline_parallel_size + + +class CacheConfig: + """Configuration for the KV cache. + + Args: + block_size: Size of a cache block in number of tokens. + gpu_memory_utilization: Fraction of GPU memory to use for the + vLLM execution. + swap_space: Size of the CPU swap space per GPU (in GiB). + cache_dtype: Data type for kv cache storage. + num_gpu_blocks_override: Number of GPU blocks to use. This overrides the + profiled num_gpu_blocks if specified. Does nothing if None. + """ + + def __init__( + self, + block_size: int, + gpu_memory_utilization: float, + swap_space: int, + cache_dtype: str, + num_gpu_blocks_override: Optional[int] = None, + sliding_window: Optional[int] = None, + enable_prefix_caching: bool = False, + ) -> None: + self.block_size = block_size + self.gpu_memory_utilization = gpu_memory_utilization + self.swap_space_bytes = swap_space * _GB + self.num_gpu_blocks_override = num_gpu_blocks_override + self.cache_dtype = cache_dtype + self.sliding_window = sliding_window + self.enable_prefix_caching = enable_prefix_caching + self._verify_args() + self._verify_cache_dtype() + + # Will be set after profiling. + self.num_gpu_blocks = None + self.num_cpu_blocks = None + + def metrics_info(self): + # convert cache_config to dict(key: str, value: str) for prometheus + # metrics info + return {key: str(value) for key, value in self.__dict__.items()} + + def _verify_args(self) -> None: + if self.gpu_memory_utilization > 1.0: + raise ValueError( + "GPU memory utilization must be less than 1.0. Got " + f"{self.gpu_memory_utilization}.") + + def _verify_cache_dtype(self) -> None: + if self.cache_dtype == "auto": + pass + elif self.cache_dtype == "fp8": + if not is_hip(): + nvcc_cuda_version = get_mcc_musa_version() + if nvcc_cuda_version is not None \ + and nvcc_cuda_version < Version("11.8"): + raise ValueError( + "FP8 is not supported when cuda version is" + "lower than 11.8.") + logger.info( + "Using fp8 data type to store kv cache. It reduces the GPU " + "memory footprint and boosts the performance. " + "But it may cause slight accuracy drop without scaling " + "factors. FP8_E5M2 (without scaling) is only supported on " + "cuda version greater than 11.8. On ROCm (AMD GPU), FP8_E4M3 " + "is instead supported for common inference criteria.") + else: + raise ValueError(f"Unknown kv cache dtype: {self.cache_dtype}") + + def verify_with_parallel_config( + self, + parallel_config: "ParallelConfig", + ) -> None: + total_cpu_memory = get_cpu_memory() + # FIXME(woosuk): Here, it is assumed that the GPUs in a tensor parallel + # group are in the same node. However, the GPUs may span multiple nodes. + num_gpus_per_node = parallel_config.tensor_parallel_size + cpu_memory_usage = self.swap_space_bytes * num_gpus_per_node + + msg = (f"{cpu_memory_usage / _GB:.2f} GiB out of " + f"the {total_cpu_memory / _GB:.2f} GiB total CPU memory is " + "allocated for the swap space.") + if cpu_memory_usage > 0.7 * total_cpu_memory: + raise ValueError("Too large swap space. " + msg) + elif cpu_memory_usage > 0.4 * total_cpu_memory: + logger.warning("Possibly too large swap space. %s", msg) + + +@dataclass +class TokenizerPoolConfig: + """Configuration for the tokenizer pool. + + Args: + pool_size: Number of tokenizer workers in the pool. + pool_type: Type of the pool. + extra_config: Additional config for the pool. + The way the config will be used depends on the + pool type. + """ + pool_size: int + pool_type: str + extra_config: dict + + def __post_init__(self): + if self.pool_type not in ("ray", ): + raise ValueError(f"Unknown pool type: {self.pool_type}") + if not isinstance(self.extra_config, dict): + raise ValueError("extra_config must be a dictionary.") + + @classmethod + def create_config( + cls, tokenizer_pool_size: int, tokenizer_pool_type: str, + tokenizer_pool_extra_config: Optional[Union[str, dict]] + ) -> Optional["TokenizerPoolConfig"]: + """Create a TokenizerPoolConfig from the given parameters. + + If tokenizer_pool_size is 0, return None. + + Args: + tokenizer_pool_size: Number of tokenizer workers in the pool. + tokenizer_pool_type: Type of the pool. + tokenizer_pool_extra_config: Additional config for the pool. + The way the config will be used depends on the + pool type. This can be a JSON string (will be parsed). + """ + if tokenizer_pool_size: + if isinstance(tokenizer_pool_extra_config, str): + tokenizer_pool_extra_config_parsed = json.loads( + tokenizer_pool_extra_config) + else: + tokenizer_pool_extra_config_parsed = ( + tokenizer_pool_extra_config or {}) + tokenizer_pool_config = cls(tokenizer_pool_size, + tokenizer_pool_type, + tokenizer_pool_extra_config_parsed) + else: + tokenizer_pool_config = None + return tokenizer_pool_config + + +class LoadFormat(str, enum.Enum): + AUTO = "auto" + PT = "pt" + SAFETENSORS = "safetensors" + NPCACHE = "npcache" + DUMMY = "dummy" + TENSORIZER = "tensorizer" + + +@dataclass +class LoadConfig: + """ + download_dir: Directory to download and load the weights, default to the + default cache directory of huggingface. + load_format: The format of the model weights to load: + "auto" will try to load the weights in the safetensors format and + fall back to the pytorch bin format if safetensors format is + not available. + "pt" will load the weights in the pytorch bin format. + "safetensors" will load the weights in the safetensors format. + "npcache" will load the weights in pytorch format and store + a numpy cache to speed up the loading. + "dummy" will initialize the weights with random values, which is + mainly for profiling. + "tensorizer" will use CoreWeave's tensorizer library for + fast weight loading. + """ + + load_format: Union[str, LoadFormat, "BaseModelLoader"] = LoadFormat.AUTO + download_dir: Optional[str] = None + model_loader_extra_config: Optional[Union[str, dict]] = field( + default_factory=dict) + + def __post_init__(self): + model_loader_extra_config = self.model_loader_extra_config or {} + if isinstance(model_loader_extra_config, str): + self.model_loader_extra_config = json.loads( + model_loader_extra_config) + self._verify_load_format() + + def _verify_load_format(self) -> None: + if not isinstance(self.load_format, str): + return + + load_format = self.load_format.lower() + self.load_format = LoadFormat(load_format) + + rocm_not_supported_load_format: List[str] = [] + if is_hip() and load_format in rocm_not_supported_load_format: + rocm_supported_load_format = [ + f for f in LoadFormat.__members__ + if (f not in rocm_not_supported_load_format) + ] + raise ValueError( + f"load format '{load_format}' is not supported in ROCm. " + f"Supported load formats are " + f"{rocm_supported_load_format}") + + +class ParallelConfig: + """Configuration for the distributed execution. + + Args: + pipeline_parallel_size: Number of pipeline parallel groups. + tensor_parallel_size: Number of tensor parallel groups. + worker_use_ray: Whether to use Ray for model workers. Will be set to + True if either pipeline_parallel_size or tensor_parallel_size is + greater than 1. + max_parallel_loading_workers: Maximum number of multiple batches + when load model sequentially. To avoid RAM OOM when using tensor + parallel and large models. + disable_custom_all_reduce: Disable the custom all-reduce kernel and + fall back to NCCL. + tokenizer_pool_config: Config for the tokenizer pool. + If None, will use synchronous tokenization. + ray_workers_use_nsight: Whether to profile Ray workers with nsight, see + https://docs.ray.io/en/latest/ray-observability/user-guides/profiling.html#profiling-nsight-profiler. + """ + + def __init__( + self, + pipeline_parallel_size: int, + tensor_parallel_size: int, + worker_use_ray: bool, + max_parallel_loading_workers: Optional[int] = None, + disable_custom_all_reduce: bool = False, + tokenizer_pool_config: Optional[TokenizerPoolConfig] = None, + ray_workers_use_nsight: bool = False, + placement_group: Optional["PlacementGroup"] = None, + ) -> None: + self.pipeline_parallel_size = pipeline_parallel_size + self.tensor_parallel_size = tensor_parallel_size + self.worker_use_ray = worker_use_ray + self.max_parallel_loading_workers = max_parallel_loading_workers + self.disable_custom_all_reduce = disable_custom_all_reduce + self.tokenizer_pool_config = tokenizer_pool_config + self.ray_workers_use_nsight = ray_workers_use_nsight + self.placement_group = placement_group + + self.world_size = pipeline_parallel_size * self.tensor_parallel_size + if self.world_size > 1: + self.worker_use_ray = True + self._verify_args() + + def _verify_args(self) -> None: + if self.pipeline_parallel_size > 1: + raise NotImplementedError( + "Pipeline parallelism is not supported yet.") + if not self.disable_custom_all_reduce and self.world_size > 1: + if is_hip(): + self.disable_custom_all_reduce = True + logger.info( + "Disabled the custom all-reduce kernel because it is not " + "supported on AMD GPUs.") + elif self.pipeline_parallel_size > 1: + self.disable_custom_all_reduce = True + logger.info( + "Disabled the custom all-reduce kernel because it is not " + "supported with pipeline parallelism.") + if self.ray_workers_use_nsight and not self.worker_use_ray: + raise ValueError("Unable to use nsight profiling unless workers " + "run with Ray.") + + +class SchedulerConfig: + """Scheduler configuration. + + Args: + max_num_batched_tokens: Maximum number of tokens to be processed in + a single iteration. + max_num_seqs: Maximum number of sequences to be processed in a single + iteration. + max_model_len: Maximum length of a sequence (including prompt + and generated text). + use_v2_block_manager: Whether to use the BlockSpaceManagerV2 or not. + num_lookahead_slots: The number of slots to allocate per sequence per + step, beyond the known token ids. This is used in speculative + decoding to store KV activations of tokens which may or may not be + accepted. + delay_factor: Apply a delay (of delay factor multiplied by previous + prompt latency) before scheduling next prompt. + enable_chunked_prefill: If True, prefill requests can be chunked based + on the remaining max_num_batched_tokens. + """ + + def __init__( + self, + max_num_batched_tokens: Optional[int], + max_num_seqs: int, + max_model_len: int, + use_v2_block_manager: bool = False, + num_lookahead_slots: int = 0, + delay_factor: float = 0.0, + enable_chunked_prefill: bool = False, + ) -> None: + if max_num_batched_tokens is not None: + self.max_num_batched_tokens = max_num_batched_tokens + else: + if enable_chunked_prefill: + # It is the values that have the best balance between ITL + # and TTFT on A100. Note it is not optimized for throughput. + self.max_num_batched_tokens = 512 + else: + # If max_model_len is too short, use 2048 as the default value + # for higher throughput. + self.max_num_batched_tokens = max(max_model_len, 2048) + if enable_chunked_prefill: + logger.info("Chunked prefill is enabled (EXPERIMENTAL).") + + self.max_num_seqs = max_num_seqs + self.max_model_len = max_model_len + self.use_v2_block_manager = use_v2_block_manager + self.num_lookahead_slots = num_lookahead_slots + self.delay_factor = delay_factor + self.chunked_prefill_enabled = enable_chunked_prefill + + self._verify_args() + + def _verify_args(self) -> None: + if (self.max_num_batched_tokens < self.max_model_len + and not self.chunked_prefill_enabled): + raise ValueError( + f"max_num_batched_tokens ({self.max_num_batched_tokens}) is " + f"smaller than max_model_len ({self.max_model_len}). " + "This effectively limits the maximum sequence length to " + "max_num_batched_tokens and makes vLLM reject longer " + "sequences. Please increase max_num_batched_tokens or " + "decrease max_model_len.") + + if self.max_num_batched_tokens < self.max_num_seqs: + raise ValueError( + f"max_num_batched_tokens ({self.max_num_batched_tokens}) must " + "be greater than or equal to max_num_seqs " + f"({self.max_num_seqs}).") + + if self.num_lookahead_slots < 0: + raise ValueError( + "num_lookahead_slots " + f"({self.num_lookahead_slots}) must be greater than or " + "equal to 0.") + + +class DeviceConfig: + + def __init__(self, device: str = "auto") -> None: + if device == "auto": + # Automated device type detection + if is_neuron(): + self.device_type = "neuron" + elif is_cpu(): + self.device_type = "cpu" + else: + # We don't call torch.cuda.is_available() here to + # avoid initializing CUDA before workers are forked + self.device_type = "cuda" + else: + # Device type is assigned explicitly + self.device_type = device + + # Some device types require processing inputs on CPU + if self.device_type in ["neuron"]: + self.device = torch.device("cpu") + else: + # Set device with device type + self.device = torch.device(self.device_type) + + +class SpeculativeConfig: + """Configuration for speculative decoding. + + The configuration is currently specialized to draft-model speculative + decoding with top-1 proposals. + """ + + @staticmethod + def maybe_create_spec_config( + target_model_config: ModelConfig, + target_parallel_config: ParallelConfig, + target_dtype: str, + speculative_model: Optional[str], + num_speculative_tokens: Optional[int], + speculative_max_model_len: Optional[int], + enable_chunked_prefill: bool, + use_v2_block_manager: bool, + ngram_prompt_lookup_max: Optional[int], + ngram_prompt_lookup_min: Optional[int], + ) -> Optional["SpeculativeConfig"]: + """Create a SpeculativeConfig if possible, else return None. + + This function attempts to create a SpeculativeConfig object based on the + provided parameters. If the necessary conditions are met, it returns an + instance of SpeculativeConfig. Otherwise, it returns None. + + Args: + target_model_config (ModelConfig): The configuration of the target + model. + target_parallel_config (ParallelConfig): The parallel configuration + for the target model. + target_dtype (str): The data type used for the target model. + speculative_model (Optional[str]): The name of the speculative + model, if provided. + num_speculative_tokens (Optional[int]): The number of speculative + tokens, if provided. + speculative_max_model_len (Optional[int]): The maximum model len of + the speculative model. Used when testing the ability to skip + speculation for some sequences. + enable_chunked_prefill (bool): Whether vLLM is configured to use + chunked prefill or not. Used for raising an error since its not + yet compatible with spec decode. + use_v2_block_manager (bool): Whether vLLM is configured to use the + v2 block manager or not. Used for raising an error since the v2 + block manager is required with spec decode. + ngram_prompt_lookup_max (Optional[int]): Max size of ngram token + window, if provided. + ngram_prompt_lookup_min (Optional[int]): Min size of ngram token + window, if provided. + + Returns: + Optional["SpeculativeConfig"]: An instance of SpeculativeConfig if + the necessary conditions are met, else None. + """ + + if (speculative_model is None and num_speculative_tokens is None): + return None + + if speculative_model is not None and num_speculative_tokens is None: + raise ValueError( + "Expected both speculative_model and " + "num_speculative_tokens to be provided, but found " + f"{speculative_model=} and {num_speculative_tokens=}.") + + assert (speculative_model is not None + and num_speculative_tokens is not None) + + if enable_chunked_prefill: + raise ValueError( + "Speculative decoding and chunked prefill are " + f"currently mutually exclusive ({enable_chunked_prefill=}).") + + if not use_v2_block_manager: + raise ValueError( + "Speculative decoding requires usage of the V2 " + "block manager. Enable it with --use-v2-block-manager.") + + # TODO: The user should be able to specify revision/quantization/max + # model len for the draft model. It is not currently supported. + draft_revision = None + draft_code_revision = None + draft_quantization = None + + if speculative_model == "[ngram]": + assert (ngram_prompt_lookup_max is not None + and ngram_prompt_lookup_max > 0) + if ngram_prompt_lookup_min is None: + ngram_prompt_lookup_min = 0 + else: + assert ngram_prompt_lookup_max > ngram_prompt_lookup_min + + # TODO: current we still need extract vocab_size from target model + # config, in future, we may try refactor it out, and set + # draft related config as None here. + draft_model_config = target_model_config + draft_parallel_config = target_parallel_config + else: + ngram_prompt_lookup_max = 0 + ngram_prompt_lookup_min = 0 + draft_model_config = ModelConfig( + model=speculative_model, + tokenizer=target_model_config.tokenizer, + tokenizer_mode=target_model_config.tokenizer_mode, + trust_remote_code=target_model_config.trust_remote_code, + dtype=target_model_config.dtype, + seed=target_model_config.seed, + revision=draft_revision, + code_revision=draft_code_revision, + tokenizer_revision=target_model_config.tokenizer_revision, + max_model_len=None, + quantization=draft_quantization, + enforce_eager=target_model_config.enforce_eager, + max_seq_len_to_capture=target_model_config. + max_seq_len_to_capture, + max_logprobs=target_model_config.max_logprobs, + ) + + draft_model_config.max_model_len = ( + SpeculativeConfig._maybe_override_draft_max_model_len( + speculative_max_model_len, + draft_model_config.max_model_len, + target_model_config.max_model_len, + )) + + draft_parallel_config = ( + SpeculativeConfig.create_draft_parallel_config( + target_parallel_config)) + + return SpeculativeConfig( + draft_model_config, + draft_parallel_config, + num_speculative_tokens, + ngram_prompt_lookup_max, + ngram_prompt_lookup_min, + ) + + @staticmethod + def _maybe_override_draft_max_model_len( + speculative_max_model_len: Optional[int], + draft_max_model_len: int, + target_max_model_len: int, + ) -> int: + """Determine the max sequence len for the draft model. This is usually + the draft_max_model_len, but may be the target_max_model_len if it is + less than the draft_max_model_len, or may be speculative_max_model_len + if it is specified. + + This is necessary so that sequences do not exceed the capacity of the + draft model or the target model. + + speculative_max_model_len is mainly used for testing that sequences can + skip speculation. + """ + + if speculative_max_model_len is not None: + + if speculative_max_model_len > draft_max_model_len: + raise ValueError(f"{speculative_max_model_len=} cannot be " + f"larger than {draft_max_model_len=}") + + if speculative_max_model_len > target_max_model_len: + raise ValueError(f"{speculative_max_model_len=} cannot be " + f"larger than {target_max_model_len=}") + + return speculative_max_model_len + + return min( + draft_max_model_len, + target_max_model_len, + ) + + @staticmethod + def create_draft_parallel_config( + target_parallel_config: ParallelConfig) -> ParallelConfig: + """Create a parallel config for use by the draft worker. + + This is mostly a copy of the target parallel config. In the future the + draft worker can have a different parallel strategy, e.g. TP=1. + """ + draft_parallel_config = ParallelConfig( + pipeline_parallel_size=target_parallel_config. + pipeline_parallel_size, + tensor_parallel_size=target_parallel_config.tensor_parallel_size, + worker_use_ray=target_parallel_config.worker_use_ray, + max_parallel_loading_workers=target_parallel_config. + max_parallel_loading_workers, + disable_custom_all_reduce=target_parallel_config. + disable_custom_all_reduce, + tokenizer_pool_config=target_parallel_config.tokenizer_pool_config, + ray_workers_use_nsight=target_parallel_config. + ray_workers_use_nsight, + placement_group=target_parallel_config.placement_group, + ) + + return draft_parallel_config + + def __init__( + self, + draft_model_config: ModelConfig, + draft_parallel_config: ParallelConfig, + num_speculative_tokens: int, + ngram_prompt_lookup_max: int, + ngram_prompt_lookup_min: int, + ): + """Create a SpeculativeConfig object. + + Args: + draft_model_config: ModelConfig for the draft model. + draft_parallel_config: ParallelConfig for the draft model. + num_speculative_tokens: The number of tokens to sample from the + draft model before scoring with the target model. + """ + self.draft_model_config = draft_model_config + self.draft_parallel_config = draft_parallel_config + self.num_speculative_tokens = num_speculative_tokens + self.ngram_prompt_lookup_max = ngram_prompt_lookup_max + self.ngram_prompt_lookup_min = ngram_prompt_lookup_min + + self._verify_args() + + def _verify_args(self) -> None: + if self.num_speculative_tokens <= 0: + raise ValueError("Expected num_speculative_tokens to be greater " + f"than zero ({self.num_speculative_tokens}).") + + if self.draft_model_config: + self.draft_model_config.verify_with_parallel_config( + self.draft_parallel_config) + + @property + def num_lookahead_slots(self) -> int: + """The number of additional slots the scheduler should allocate per + step, in addition to the slots allocated for each known token. + + This is equal to the number of speculative tokens, as each speculative + token must be scored. + """ + return self.num_speculative_tokens + + def __repr__(self) -> str: + if self.ngram_prompt_lookup_max > 0: + draft_model = "[ngram]" + else: + draft_model = self.draft_model_config.model + num_spec_tokens = self.num_speculative_tokens + return f"SpeculativeConfig({draft_model=}, {num_spec_tokens=})" + + +@dataclass +class LoRAConfig: + max_lora_rank: int + max_loras: int + fully_sharded_loras: bool = False + max_cpu_loras: Optional[int] = None + lora_dtype: Optional[torch.dtype] = None + lora_extra_vocab_size: int = 256 + # This is a constant. + lora_vocab_padding_size: ClassVar[int] = 256 + + def __post_init__(self): + # Keep this in sync with csrc/punica/bgmv/bgmv_config.h + possible_max_ranks = (8, 16, 32, 64) + possible_lora_extra_vocab_size = (0, 256, 512) + if self.max_lora_rank not in possible_max_ranks: + raise ValueError( + f"max_lora_rank ({self.max_lora_rank}) must be one of " + f"{possible_max_ranks}.") + if self.lora_extra_vocab_size not in possible_lora_extra_vocab_size: + raise ValueError( + f"lora_extra_vocab_size ({self.lora_extra_vocab_size}) " + f"must be one of {possible_lora_extra_vocab_size}.") + if self.max_loras < 1: + raise ValueError(f"max_loras ({self.max_loras}) must be >= 1.") + if self.max_cpu_loras is None: + self.max_cpu_loras = self.max_loras + elif self.max_cpu_loras < self.max_loras: + raise ValueError( + f"max_cpu_loras ({self.max_cpu_loras}) must be >= " + f"max_loras ({self.max_loras})") + + def verify_with_model_config(self, model_config: ModelConfig): + if self.lora_dtype in (None, "auto"): + self.lora_dtype = model_config.dtype + elif isinstance(self.lora_dtype, str): + self.lora_dtype = getattr(torch, self.lora_dtype) + if model_config.quantization and model_config.quantization not in [ + "awq", "gptq" + ]: + # TODO support marlin and squeezellm + logger.warning("%s quantization is not tested with LoRA yet.", + model_config.quantization) + + def verify_with_scheduler_config(self, scheduler_config: SchedulerConfig): + if scheduler_config.max_num_batched_tokens > 65528: + raise ValueError( + "Due to limitations of the custom LoRA CUDA kernel, " + "max_num_batched_tokens must be <= 65528 when " + "LoRA is enabled.") + + +@dataclass +class VisionLanguageConfig: + """Configs the input data format and how models should run for + vision language models.""" + + class ImageInputType(enum.Enum): + """Image input type into the vision language model. + + An image roughly goes through the following transformation: + Raw image --> pixel values --> image features --> image embeddings. + + The difference between different image input types is where the + image encoder (pixel values --> image features) is run. + Different image input types also correspond to different tensor shapes. + + For example, for Llava, PIXEL_VALUES: (1, 3, 336, 336). + IMAGE_FEATURES: (1, 576, 1024). + """ + PIXEL_VALUES = enum.auto() + IMAGE_FEATURES = enum.auto() + + image_input_type: ImageInputType + # The input id corresponding to image token. + image_token_id: int + # Used for running `run_prefill_max_token`. + # For models that support varying resolution, this corresponds to + # worst case scenario (biggest supported resolution). + image_input_shape: tuple + image_feature_size: int + + @classmethod + def get_image_input_enum_type( + cls, value: str) -> "VisionLanguageConfig.ImageInputType": + """Get the image input type from a string.""" + try: + return cls.ImageInputType[value.upper()] + except KeyError as e: + raise ValueError(f"{value} is not a valid choice. " + f"Expecting to choose from " + f"{[x.name for x in cls.ImageInputType]}.") from e + + +_STR_DTYPE_TO_TORCH_DTYPE = { + "half": torch.float16, + "float16": torch.float16, + "float": torch.float32, + "float32": torch.float32, + "bfloat16": torch.bfloat16, +} + +_ROCM_NOT_SUPPORTED_DTYPE = ["float", "float32"] + + +def _get_and_verify_dtype( + config: PretrainedConfig, + dtype: Union[str, torch.dtype], +) -> torch.dtype: + # NOTE: getattr(config, "torch_dtype", torch.float32) is not correct + # because config.torch_dtype can be None. + config_dtype = getattr(config, "torch_dtype", None) + if config_dtype is None: + config_dtype = torch.float32 + + if isinstance(dtype, str): + dtype = dtype.lower() + if dtype == "auto": + if config_dtype == torch.float32: + # Following the common practice, we use float16 for float32 + # models. + torch_dtype = torch.float16 + else: + torch_dtype = config_dtype + else: + if dtype not in _STR_DTYPE_TO_TORCH_DTYPE: + raise ValueError(f"Unknown dtype: {dtype}") + torch_dtype = _STR_DTYPE_TO_TORCH_DTYPE[dtype] + elif isinstance(dtype, torch.dtype): + torch_dtype = dtype + else: + raise ValueError(f"Unknown dtype: {dtype}") + + if is_hip() and torch_dtype == torch.float32: + rocm_supported_dtypes = [ + k for k, v in _STR_DTYPE_TO_TORCH_DTYPE.items() + if (k not in _ROCM_NOT_SUPPORTED_DTYPE) + ] + raise ValueError(f"dtype '{dtype}' is not supported in ROCm. " + f"Supported dtypes are {rocm_supported_dtypes}") + + # Verify the dtype. + if torch_dtype != config_dtype: + if torch_dtype == torch.float32: + # Upcasting to float32 is allowed. + pass + elif config_dtype == torch.float32: + # Downcasting from float32 to float16 or bfloat16 is allowed. + pass + else: + # Casting between float16 and bfloat16 is allowed with a warning. + logger.warning("Casting %s to %s.", config_dtype, torch_dtype) + + return torch_dtype + + +def _get_and_verify_max_len( + hf_config: PretrainedConfig, + max_model_len: Optional[int], +) -> int: + """Get and verify the model's maximum length.""" + derived_max_model_len = float("inf") + possible_keys = [ + # OPT + "max_position_embeddings", + # GPT-2 + "n_positions", + # MPT + "max_seq_len", + # ChatGLM2 + "seq_length", + # Command-R + "model_max_length", + # Others + "max_sequence_length", + "max_seq_length", + "seq_len", + ] + max_len_key = None + for key in possible_keys: + max_len = getattr(hf_config, key, None) + if max_len is not None: + max_len_key = key if max_len < derived_max_model_len \ + else max_len_key + derived_max_model_len = min(derived_max_model_len, max_len) + if derived_max_model_len == float("inf"): + if max_model_len is not None: + # If max_model_len is specified, we use it. + return max_model_len + + default_max_len = 2048 + logger.warning( + "The model's config.json does not contain any of the following " + "keys to determine the original maximum length of the model: " + "%d. Assuming the model's maximum length is %d.", possible_keys, + default_max_len) + derived_max_model_len = default_max_len + + rope_scaling = getattr(hf_config, "rope_scaling", None) + if rope_scaling is not None and rope_scaling["type"] != "su": + assert "factor" in rope_scaling + scaling_factor = rope_scaling["factor"] + if rope_scaling["type"] == "yarn": + derived_max_model_len = rope_scaling[ + "original_max_position_embeddings"] + derived_max_model_len *= scaling_factor + + if max_model_len is None: + max_model_len = int(derived_max_model_len) + elif max_model_len > derived_max_model_len: + # Some models might have a separate key for specifying model_max_length + # that will be bigger than derived_max_model_len. We compare user input + # with model_max_length and allow this override when it's smaller. + model_max_length = getattr(hf_config, "model_max_length", None) + if model_max_length is not None and max_model_len <= model_max_length: + pass + else: + raise ValueError( + f"User-specified max_model_len ({max_model_len}) is greater " + "than the derived max_model_len " + f"({max_len_key}={derived_max_model_len} or model_max_length=" + f"{model_max_length} in model's config.json). This may lead " + "to incorrect model outputs or CUDA errors. Make sure the " + "value is correct and within the model context size.") + return int(max_model_len) + + +def get_served_model_name(model: str, + served_model_name: Optional[Union[str, List[str]]]): + """ + If the input is a non-empty list, the first model_name in + `served_model_name` is taken. + If the input is a non-empty string, it is used directly. + For cases where the input is either an empty string or an + empty list, the fallback is to use `self.model`. + """ + if not served_model_name: + return model + if isinstance(served_model_name, list): + return served_model_name[0] + return served_model_name + + +@dataclass +class DecodingConfig: + """Dataclass which contains the decoding strategy of the engine""" + + # Which guided decoding algo to use. 'outlines' / 'lm-format-enforcer' + guided_decoding_backend: str = 'outlines' + + def __post_init__(self): + valid_guided_backends = ['outlines', 'lm-format-enforcer'] + backend = self.guided_decoding_backend + if backend not in valid_guided_backends: + raise ValueError(f"Invalid guided_decoding_backend '{backend}," + f"must be one of {valid_guided_backends}") + + +@dataclass(frozen=True) +class EngineConfig: + """Dataclass which contains all engine-related configuration. This + simplifies passing around the distinct configurations in the codebase. + """ + + model_config: ModelConfig + cache_config: CacheConfig + parallel_config: ParallelConfig + scheduler_config: SchedulerConfig + device_config: DeviceConfig + load_config: LoadConfig + lora_config: Optional[LoRAConfig] + vision_language_config: Optional[VisionLanguageConfig] + speculative_config: Optional[SpeculativeConfig] + decoding_config: Optional[DecodingConfig] + + def __post_init__(self): + """Verify configs are valid & consistent with each other. + """ + self.model_config.verify_with_parallel_config(self.parallel_config) + self.cache_config.verify_with_parallel_config(self.parallel_config) + + if self.lora_config: + self.lora_config.verify_with_model_config(self.model_config) + self.lora_config.verify_with_scheduler_config( + self.scheduler_config) + + def to_dict(self): + """Return the configs as a dictionary, for use in **kwargs. + """ + return dict( + (field.name, getattr(self, field.name)) for field in fields(self)) diff --git a/vllm/core/__init__.py b/vllm/core/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/vllm/core/block/__init__.py b/vllm/core/block/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/vllm/core/block/block_table.py b/vllm/core/block/block_table.py new file mode 100644 index 0000000..b0d9511 --- /dev/null +++ b/vllm/core/block/block_table.py @@ -0,0 +1,295 @@ +from typing import List, Optional + +from vllm.core.block.interfaces import Block, DeviceAwareBlockAllocator +from vllm.utils import Device, cdiv, chunk_list + + +class BlockTable: + """A class to manage blocks for a specific sequence. + + The BlockTable maps a sequence of tokens to a list of blocks, where each + block represents a contiguous memory allocation for a portion of the + sequence. The blocks are managed by a DeviceAwareBlockAllocator, which is + responsible for allocating and freeing memory for the blocks. + + Args: + block_size (int): The maximum number of tokens that can be stored in a + single block. + block_allocator (DeviceAwareBlockAllocator): The block allocator used to + manage memory for the blocks. + _blocks (Optional[List[Block]], optional): An optional list of existing + blocks to initialize the BlockTable with. If not provided, an empty + BlockTable is created. + + Attributes: + _block_size (int): The maximum number of tokens that can be stored in a + single block. + _allocator (DeviceAwareBlockAllocator): The block allocator used to + manage memory for the blocks. + _blocks (Optional[List[Block]]): The list of blocks managed by this + BlockTable. + _num_full_slots (int): The number of tokens currently stored in the + blocks. + """ + + def __init__( + self, + block_size: int, + block_allocator: DeviceAwareBlockAllocator, + _blocks: Optional[List[Block]] = None, + ): + self._block_size = block_size + self._allocator = block_allocator + if _blocks is None: + _blocks = [] + self._blocks: List[Block] = _blocks + + # Use helper method instead of directly calculating, as blocks + # may not be allocated. + self._num_full_slots = len(self._get_all_token_ids()) + + @staticmethod + def get_num_required_blocks(token_ids: List[int], block_size: int) -> int: + """Calculates the minimum number of blocks required to store a given + sequence of token IDs. + + This assumes worst-case scenario, where every block requires a new + allocation (e.g. ignoring prefix caching). + + Args: + token_ids (List[int]): The sequence of token IDs to be stored. + block_size (int): The maximum number of tokens that can be stored in + a single block. + + Returns: + int: The minimum number of blocks required to store the given + sequence of token IDs. + """ + return cdiv(len(token_ids), block_size) + + def allocate(self, + token_ids: List[int], + device: Device = Device.GPU) -> None: + """Allocates memory blocks for storing the given sequence of token IDs. + + This method allocates the required number of blocks to store the given + sequence of token IDs. + + Args: + token_ids (List[int]): The sequence of token IDs to be stored. + device (Device, optional): The device on which the blocks should be + allocated. Defaults to Device.GPU. + """ + assert not self._is_allocated + assert token_ids + self._blocks = self._allocate_blocks_for_token_ids(prev_block=None, + token_ids=token_ids, + device=device) + self._num_full_slots = len(token_ids) + + def append_token_ids(self, + token_ids: List[int], + num_lookahead_slots: int = 0) -> None: + """Appends a sequence of token IDs to the existing blocks in the + BlockTable. + + This method appends the given sequence of token IDs to the existing + blocks in the BlockTable. If there is not enough space in the existing + blocks, new blocks are allocated using the `ensure_num_empty_slots` + method to accommodate the additional tokens. + + The token IDs are divided into chunks of size `block_size` (except for + the first chunk, which may be smaller), and each chunk is appended to a + separate block. + + Args: + token_ids (List[int]): The sequence of token IDs to be appended. + """ + assert self._is_allocated + assert len(self._blocks) > 0 + + self.ensure_num_empty_slots(num_empty_slots=len(token_ids) + + num_lookahead_slots) + + blocks = self._blocks[self._num_full_slots // self._block_size:] + token_blocks = self._chunk_token_blocks_for_append(token_ids) + + for block, token_block in zip(blocks, token_blocks): + block.append_token_ids(token_block) + + self._num_full_slots += len(token_ids) + + def ensure_num_empty_slots(self, num_empty_slots: int) -> None: + """Ensures that the BlockTable has at least the specified number of + empty slots available. + + This method checks if the BlockTable has enough empty slots (i.e., + available space) to accommodate the requested number of tokens. If not, + it allocates additional blocks on the GPU to ensure that the required + number of empty slots is available. + + Args: + num_empty_slots (int): The minimum number of empty slots required. + """ + # Currently the block table only supports + # appending tokens to GPU blocks. + device = Device.GPU + assert self._is_allocated + + if self._num_empty_slots >= num_empty_slots: + return + + slots_to_allocate = num_empty_slots - self._num_empty_slots + blocks_to_allocate = cdiv(slots_to_allocate, self._block_size) + + for _ in range(blocks_to_allocate): + assert len(self._blocks) > 0 + self._blocks.append( + self._allocator.allocate_mutable(prev_block=self._blocks[-1], + device=device)) + + def fork(self) -> "BlockTable": + """Creates a new BlockTable instance with a copy of the blocks from the + current instance. + + This method creates a new BlockTable instance with the same block size, + block allocator, and a copy of the blocks from the current instance. The + new BlockTable has its own independent set of blocks, but shares the + same underlying memory allocation with the original BlockTable. + + Returns: + BlockTable: A new BlockTable instance with a copy of the blocks from + the current instance. + """ + assert self._is_allocated + assert len(self._blocks) > 0 + forked_blocks = self._allocator.fork(self._blocks[-1]) + return BlockTable( + block_size=self._block_size, + block_allocator=self._allocator, + _blocks=forked_blocks, + ) + + def free(self) -> None: + """Frees the memory occupied by the blocks in the BlockTable. + + This method iterates over all the blocks in the `_blocks` list and calls + the `free` method of the `_allocator` object to release the memory + occupied by each block. After freeing all the blocks, the `_blocks` list + is set to `None`. + """ + assert self._is_allocated + for block in self._blocks: + self._allocator.free(block) + self._blocks = [] + + @property + def physical_block_ids(self) -> List[Optional[int]]: + """Returns a list of physical block indices for the blocks in the + BlockTable. + + This property returns a list of integers, where each integer represents + the physical block index of a corresponding block in the `_blocks` list. + The physical block index is a unique identifier for the memory location + occupied by the block. + + Returns: + List[int]: A list of physical block indices for the blocks in the + BlockTable. + """ + assert self._is_allocated + return [block.block_id for block in self._blocks] + + def get_unseen_token_ids(self, sequence_token_ids: List[int]) -> List[int]: + """Get the number of "unseen" tokens in the sequence. + + Unseen tokens are tokens in the sequence corresponding to this block + table, but are not yet appended to this block table. + + Args: + sequence_token_ids (List[int]): The list of token ids in the + sequence. + + Returns: + List[int]: The postfix of sequence_token_ids that has not yet been + appended to the block table. + """ + + # Since the block table is append-only, the unseen token ids are the + # ones after the appended ones. + return sequence_token_ids[self.num_full_slots:] + + def _allocate_blocks_for_token_ids(self, prev_block: Optional[Block], + token_ids: List[int], + device: Device) -> List[Block]: + blocks = [] + for block_token_ids in chunk_list(token_ids, self._block_size): + if len(block_token_ids) == self._block_size: + # If the block is full, create an immutable block. + prev_block = self._allocator.allocate_immutable( + prev_block, token_ids=block_token_ids, device=device) + else: + # Else, partially fill a mutable block with token ids. + prev_block = self._allocator.allocate_mutable( + prev_block=prev_block, device=device) + prev_block.append_token_ids(block_token_ids) + blocks.append(prev_block) + + return blocks + + def _get_all_token_ids(self) -> List[int]: + # NOTE: This function is O(seq_len); use sparingly. + token_ids: List[int] = [] + + if not self._is_allocated: + return token_ids + + for block in self._blocks: + token_ids.extend(block.token_ids) + + return token_ids + + @property + def _is_allocated(self) -> bool: + return len(self._blocks) > 0 + + @property + def _num_empty_slots(self) -> int: + assert self._is_allocated + return len(self._blocks) * self._block_size - self._num_full_slots + + @property + def num_full_slots(self) -> int: + """Returns the total number of tokens currently stored in the + BlockTable. + + Returns: + int: The total number of tokens currently stored in the BlockTable. + """ + return self._num_full_slots + + def get_num_blocks_touched_by_append_slots( + self, token_ids: List[int], num_lookahead_slots: int) -> int: + """Determine how many blocks will be "touched" by appending the token + ids. + + This is required for the scheduler to determine whether a sequence can + continue generation, or if it must be preempted. + """ + + all_token_ids = token_ids + [-1] * num_lookahead_slots + token_blocks = self._chunk_token_blocks_for_append(all_token_ids) + return len(token_blocks) + + def _chunk_token_blocks_for_append( + self, token_ids: List[int]) -> List[List[int]]: + """Split the token ids into block-sized chunks so they can be easily + appended to blocks. The first such "token block" may have less token ids + than the block size, since the last allocated block may be partially + full. + """ + first_chunk_size = self._block_size - (self._num_full_slots % + self._block_size) + token_blocks = [token_ids[:first_chunk_size]] + chunk_list( + token_ids[first_chunk_size:], self._block_size) + return token_blocks diff --git a/vllm/core/block/common.py b/vllm/core/block/common.py new file mode 100644 index 0000000..3f97a12 --- /dev/null +++ b/vllm/core/block/common.py @@ -0,0 +1,199 @@ +from collections import defaultdict +from typing import Dict, Iterable, List, Optional, Protocol + +from vllm.core.block.interfaces import Block, BlockAllocator + +BlockId = int +RefCount = int + + +class RefCounterProtocol(Protocol): + + def incr(self, block_id: BlockId) -> RefCount: + raise NotImplementedError + + def decr(self, block_id: BlockId) -> RefCount: + raise NotImplementedError + + def get(self, block_id: BlockId) -> RefCount: + raise NotImplementedError + + +class RefCounter(RefCounterProtocol): + """A class for managing reference counts for a set of block indices. + + The RefCounter class maintains a dictionary that maps block indices to their + corresponding reference counts. It provides methods to increment, decrement, + and retrieve the reference count for a given block index. + + Args: + all_block_indices (Iterable[BlockId]): An iterable of block indices + to initialize the reference counter with. + """ + + def __init__(self, all_block_indices: Iterable[BlockId]): + deduped = set(all_block_indices) + self._refcounts: Dict[BlockId, + RefCount] = {index: 0 + for index in deduped} + + def incr(self, block_id: BlockId) -> RefCount: + assert block_id in self._refcounts + pre_incr_refcount = self._refcounts[block_id] + + assert pre_incr_refcount >= 0 + + post_incr_refcount = pre_incr_refcount + 1 + self._refcounts[block_id] = post_incr_refcount + return post_incr_refcount + + def decr(self, block_id: BlockId) -> RefCount: + assert block_id in self._refcounts + refcount = self._refcounts[block_id] + + assert refcount > 0 + refcount -= 1 + + self._refcounts[block_id] = refcount + + return refcount + + def get(self, block_id: BlockId) -> RefCount: + assert block_id in self._refcounts + return self._refcounts[block_id] + + def as_readonly(self) -> "ReadOnlyRefCounter": + return ReadOnlyRefCounter(self) + + +class ReadOnlyRefCounter(RefCounterProtocol): + """A read-only view of the RefCounter class. + + The ReadOnlyRefCounter class provides a read-only interface to access the + reference counts maintained by a RefCounter instance. It does not allow + modifications to the reference counts. + + Args: + refcounter (RefCounter): The RefCounter instance to create a read-only + view for. + """ + + def __init__(self, refcounter: RefCounter): + self._refcounter = refcounter + + def incr(self, block_id: BlockId) -> RefCount: + raise ValueError("Incr not allowed") + + def decr(self, block_id: BlockId) -> RefCount: + raise ValueError("Decr not allowed") + + def get(self, block_id: BlockId) -> RefCount: + return self._refcounter.get(block_id) + + +class CopyOnWriteTracker: + """A class for tracking and managing copy-on-write operations for blocks. + + The CopyOnWriteTracker class maintains a mapping of source block indices to + their corresponding copy-on-write destination block indices. It works in + conjunction with a RefCounter and a BlockAllocator to handle reference + counting and block allocation. + + Args: + refcounter (RefCounter): The reference counter used to track block + reference counts. + allocator (BlockAllocator): The block allocator used to allocate and + free blocks. + """ + + def __init__( + self, + refcounter: RefCounterProtocol, + allocator: BlockAllocator, + ): + self._copy_on_writes: Dict[BlockId, List[BlockId]] = defaultdict(list) + self._refcounter = refcounter + self._allocator = allocator + + def cow_block_if_not_appendable(self, block: Block) -> Optional[BlockId]: + """Performs a copy-on-write operation on the given block if it is not + appendable. + + This method checks the reference count of the given block. If the + reference count is greater than 1, indicating that the block is shared, + a copy-on-write operation is performed. The original block is freed, + and a new block is allocated with the same content. The new block index + is returned. + + Args: + block (Block): The block to check for copy-on-write. + + Returns: + Optional[BlockId]: The block index of the new block if a copy-on + -write operation was performed, or the original block index if + no copy-on-write was necessary. + """ + block_id = block.block_id + if block_id is None: + return block_id + + refcount = self._refcounter.get(block_id) + assert refcount != 0 + if refcount > 1: + src_block_id = block_id + + # Decrement refcount of the old block. + self._allocator.free(block) + + # Allocate a fresh new block. + block_id = self._allocator.allocate_mutable( + prev_block=block.prev_block).block_id + + # Track src/dst copy. + assert src_block_id is not None + assert block_id is not None + self._copy_on_writes[src_block_id].append(block_id) + + return block_id + + def clear_cows(self) -> Dict[BlockId, List[BlockId]]: + """Clears the copy-on-write tracking information and returns the current + state. + + This method returns a dictionary mapping source block indices to lists + of destination block indices for the current copy-on-write operations. + It then clears the internal tracking information. + + Returns: + Dict[BlockId, List[BlockId]]: A dictionary mapping source + block indices to lists of destination block indices for the + current copy-on-write operations. + """ + cows = dict(self._copy_on_writes) + self._copy_on_writes.clear() + return cows + + +def get_all_blocks_recursively(last_block: Block) -> List[Block]: + """Retrieves all the blocks in a sequence starting from the last block. + + This function recursively traverses the sequence of blocks in reverse order, + starting from the given last block, and returns a list of all the blocks in + the sequence. + + Args: + last_block (Block): The last block in the sequence. + + Returns: + List[Block]: A list of all the blocks in the sequence, in the order they + appear. + """ + + def recurse(block: Block, lst: List[Block]) -> None: + if block.prev_block is not None: + recurse(block.prev_block, lst) + lst.append(block) + + all_blocks: List[Block] = [] + recurse(last_block, all_blocks) + return all_blocks diff --git a/vllm/core/block/cpu_gpu_block_allocator.py b/vllm/core/block/cpu_gpu_block_allocator.py new file mode 100644 index 0000000..5b25e1b --- /dev/null +++ b/vllm/core/block/cpu_gpu_block_allocator.py @@ -0,0 +1,228 @@ +from typing import Dict, FrozenSet, List, Optional + +from vllm.core.block.interfaces import (Block, BlockAllocator, BlockId, + DeviceAwareBlockAllocator) +from vllm.core.block.naive_block import NaiveBlock, NaiveBlockAllocator +from vllm.core.block.prefix_caching_block import PrefixCachingBlockAllocator +from vllm.utils import Device + + +class CpuGpuBlockAllocator(DeviceAwareBlockAllocator): + """A block allocator that can allocate blocks on both CPU and GPU memory. + + This class implements the `DeviceAwareBlockAllocator` interface and provides + functionality for allocating and managing blocks of memory on both CPU and + GPU devices. + + The `CpuGpuBlockAllocator` maintains separate memory pools for CPU and GPU + blocks, and allows for allocation, deallocation, forking, and swapping of + blocks across these memory pools. + """ + + @staticmethod + def create( + allocator_type: str, + num_gpu_blocks: int, + num_cpu_blocks: int, + block_size: int, + ) -> DeviceAwareBlockAllocator: + """Creates a CpuGpuBlockAllocator instance with the specified + configuration. + + This static method creates and returns a CpuGpuBlockAllocator instance + based on the provided parameters. It initializes the CPU and GPU block + allocators with the specified number of blocks, block size, and + allocator type. + + Args: + allocator_type (str): The type of block allocator to use for CPU + and GPU blocks. Currently supported values are "naive" and + "prefix_caching". + num_gpu_blocks (int): The number of blocks to allocate for GPU + memory. + num_cpu_blocks (int): The number of blocks to allocate for CPU + memory. + block_size (int): The size of each block in number of tokens. + + Returns: + DeviceAwareBlockAllocator: A CpuGpuBlockAllocator instance with the + specified configuration. + + Notes: + - The block IDs are assigned contiguously, with GPU block IDs coming + before CPU block IDs. + """ + block_ids = list(range(num_gpu_blocks + num_cpu_blocks)) + gpu_block_ids = block_ids[:num_gpu_blocks] + cpu_block_ids = block_ids[num_gpu_blocks:] + + if allocator_type == "naive": + gpu_allocator: BlockAllocator = NaiveBlockAllocator( + create_block=NaiveBlock, # type: ignore + num_blocks=num_gpu_blocks, + block_size=block_size, + block_ids=gpu_block_ids, + ) + + cpu_allocator: BlockAllocator = NaiveBlockAllocator( + create_block=NaiveBlock, # type: ignore + num_blocks=num_cpu_blocks, + block_size=block_size, + block_ids=cpu_block_ids, + ) + elif allocator_type == "prefix_caching": + gpu_allocator = PrefixCachingBlockAllocator( + num_blocks=num_gpu_blocks, + block_size=block_size, + block_ids=gpu_block_ids, + ) + + cpu_allocator = PrefixCachingBlockAllocator( + num_blocks=num_cpu_blocks, + block_size=block_size, + block_ids=cpu_block_ids, + ) + else: + raise ValueError(f"Unknown allocator type {allocator_type=}") + + return CpuGpuBlockAllocator( + cpu_block_allocator=cpu_allocator, + gpu_block_allocator=gpu_allocator, + ) + + def __init__( + self, + cpu_block_allocator: BlockAllocator, + gpu_block_allocator: BlockAllocator, + ): + assert not ( + cpu_block_allocator.all_block_ids + & gpu_block_allocator.all_block_ids + ), "cpu and gpu block allocators can't have intersection of block ids" + + self._allocators = { + Device.CPU: cpu_block_allocator, + Device.GPU: gpu_block_allocator, + } + + self._block_ids_to_allocator: Dict[int, BlockAllocator] = {} + for _, allocator in self._allocators.items(): + for block_id in allocator.all_block_ids: + self._block_ids_to_allocator[block_id] = allocator + + def allocate_mutable(self, prev_block: Optional[Block], + device: Device) -> Block: + """Allocates a new mutable block on the specified device. + + Args: + prev_block (Optional[Block]): The previous block to in the sequence. + Used for prefix hashing. + device (Device): The device on which to allocate the new block. + + Returns: + Block: The newly allocated mutable block. + """ + return self._allocators[device].allocate_mutable(prev_block) + + def allocate_immutable(self, prev_block: Optional[Block], + token_ids: List[int], device: Device) -> Block: + """Allocates a new immutable block with the provided token IDs on the + specified device. + + Args: + prev_block (Optional[Block]): The previous block in the sequence. + Used for prefix hashing. + token_ids (List[int]): The list of token IDs to be stored in the new + block. + device (Device): The device on which to allocate the new block. + + Returns: + Block: The newly allocated immutable block containing the provided + token IDs. + """ + return self._allocators[device].allocate_immutable( + prev_block, token_ids) + + def free(self, block: Block) -> None: + """Frees the memory occupied by the given block. + + Args: + block (Block): The block to be freed. + """ + block_id = block.block_id + assert block_id is not None + allocator = self._block_ids_to_allocator[block_id] + return allocator.free(block) + + def fork(self, last_block: Block) -> List[Block]: + """Creates a new sequence of blocks that shares the same underlying + memory as the original sequence. + + Args: + last_block (Block): The last block in the original sequence. + + Returns: + List[Block]: A new list of blocks that shares the same memory as the + original sequence. + """ + block_id = last_block.block_id + assert block_id is not None + allocator = self._block_ids_to_allocator[block_id] + return allocator.fork(last_block) + + def get_num_free_blocks(self, device: Device) -> int: + """Returns the number of free blocks available on the specified device. + + Args: + device (Device): The device for which to query the number of free + blocks. AssertionError is raised if None is passed. + + Returns: + int: The number of free blocks available on the specified device. + """ + return self._allocators[device].get_num_free_blocks() + + def get_num_total_blocks(self, device: Device) -> int: + return self._allocators[device].get_num_total_blocks() + + def clear_copy_on_writes(self) -> Dict[int, List[int]]: + """Clears the copy-on-write (CoW) state and returns the mapping of + source to destination block IDs. + + Returns: + Dict[int, List[int]]: A dictionary mapping source block IDs to lists + of destination block IDs. + """ + # CoW only supported on GPU + device = Device.GPU + return self._allocators[device].clear_copy_on_writes() + + def mark_blocks_as_accessed(self, block_ids: List[int], + now: float) -> None: + """Mark blocks as accessed, only use for prefix caching.""" + # Prefix caching only supported on GPU. + device = Device.GPU + return self._allocators[device].mark_blocks_as_accessed(block_ids, now) + + def mark_blocks_as_computed(self, block_ids: List[int]) -> None: + """Mark blocks as accessed, only use for prefix caching.""" + # Prefix caching only supported on GPU. + device = Device.GPU + return self._allocators[device].mark_blocks_as_computed(block_ids) + + def get_common_computed_block_ids( + self, seq_block_ids: List[List[int]]) -> List[int]: + # Prefix caching only supported on GPU. + device = Device.GPU + return self._allocators[device].get_common_computed_block_ids( + seq_block_ids) + + @property + def all_block_ids(self) -> FrozenSet[int]: + return frozenset(self._block_ids_to_allocator.keys()) + + def promote_to_immutable_block(self, block: Block) -> BlockId: + raise NotImplementedError + + def cow_block_if_not_appendable(self, block: Block) -> Optional[BlockId]: + raise NotImplementedError diff --git a/vllm/core/block/interfaces.py b/vllm/core/block/interfaces.py new file mode 100644 index 0000000..634c401 --- /dev/null +++ b/vllm/core/block/interfaces.py @@ -0,0 +1,205 @@ +from abc import ABC, abstractmethod +from typing import Dict, FrozenSet, List, Optional, Protocol + +from vllm.utils import Device + +BlockId = int + + +class Block(ABC): + + @abstractmethod + def append_token_ids(self, token_ids: List[int]) -> None: + pass + + @property + @abstractmethod + def block_id(self) -> Optional[int]: + pass + + @block_id.setter + @abstractmethod + def block_id(self, value: Optional[int]) -> None: + """NOTE: Do not use this API outside Block.""" + self._block_id = value + + @property + @abstractmethod + def token_ids(self) -> List[int]: + pass + + @property + @abstractmethod + def num_empty_slots(self) -> int: + pass + + @property + @abstractmethod + def is_full(self) -> bool: + pass + + @property + @abstractmethod + def prev_block(self) -> Optional["Block"]: + pass + + @property + @abstractmethod + def computed(self) -> bool: + raise NotImplementedError + + @computed.setter + @abstractmethod + def computed(self, value) -> bool: + """Should be only used by PrefixCacingAllocator""" + raise NotImplementedError + + @property + @abstractmethod + def last_accessed(self) -> float: + raise NotImplementedError + + @last_accessed.setter + @abstractmethod + def last_accessed(self, last_accessed_ts: float): + raise NotImplementedError + + class Factory(Protocol): + + @abstractmethod + def __call__( + self, + prev_block: Optional["Block"], + token_ids: List[int], + block_size: int, + allocator: "BlockAllocator", + block_id: Optional[int] = None, + ) -> "Block": + pass + + @property + @abstractmethod + def content_hash(self) -> Optional[int]: + """Return the content-based hash of the current block, or None if it is + not yet defined or not supported. + + For the content-based hash to be defined, the current block must be + full. + """ + return None + + +class BlockAllocator(ABC): + + @abstractmethod + def allocate_mutable(self, prev_block: Optional[Block]) -> Block: + pass + + @abstractmethod + def allocate_immutable(self, prev_block: Optional[Block], + token_ids: List[int]) -> Block: + pass + + @abstractmethod + def free(self, block: Block) -> None: + pass + + @abstractmethod + def fork(self, last_block: Block) -> List[Block]: + pass + + @abstractmethod + def get_num_total_blocks(self) -> int: + pass + + @abstractmethod + def get_num_free_blocks(self) -> int: + pass + + @property + @abstractmethod + def all_block_ids(self) -> FrozenSet[int]: + pass + + @abstractmethod + def clear_copy_on_writes(self) -> Dict[int, List[int]]: + pass + + @abstractmethod + def mark_blocks_as_accessed(self, block_ids: List[int], + now: float) -> None: + pass + + @abstractmethod + def mark_blocks_as_computed(self, block_ids: List[int]) -> None: + pass + + @abstractmethod + def get_common_computed_block_ids( + self, seq_block_ids: List[List[int]]) -> List[int]: + pass + + @abstractmethod + def cow_block_if_not_appendable(self, block: Block) -> Optional["BlockId"]: + """NOTE: This should not be used besides Block""" + pass + + @abstractmethod + def promote_to_immutable_block(self, block: Block) -> BlockId: + """NOTE: This should not be used besides Block""" + pass + + class NoFreeBlocksError(ValueError): + pass + + +class DeviceAwareBlockAllocator(ABC): + + @abstractmethod + def allocate_mutable(self, prev_block: Optional[Block], + device: Device) -> Block: + pass + + @abstractmethod + def allocate_immutable(self, prev_block: Optional[Block], + token_ids: List[int], device: Device) -> Block: + pass + + @abstractmethod + def get_num_free_blocks(self, device: Device) -> int: + pass + + @abstractmethod + def get_num_total_blocks(self, device: Device) -> int: + pass + + @abstractmethod + def free(self, block: Block) -> None: + pass + + @abstractmethod + def fork(self, last_block: Block) -> List[Block]: + pass + + @property + @abstractmethod + def all_block_ids(self) -> FrozenSet[int]: + pass + + @abstractmethod + def clear_copy_on_writes(self) -> Dict[int, List[int]]: + pass + + @abstractmethod + def mark_blocks_as_accessed(self, block_ids: List[int], + now: float) -> None: + pass + + @abstractmethod + def mark_blocks_as_computed(self, block_ids: List[int]) -> None: + pass + + @abstractmethod + def get_common_computed_block_ids( + self, seq_block_ids: List[List[int]]) -> List[int]: + pass diff --git a/vllm/core/block/naive_block.py b/vllm/core/block/naive_block.py new file mode 100644 index 0000000..a1b901b --- /dev/null +++ b/vllm/core/block/naive_block.py @@ -0,0 +1,318 @@ +from typing import Dict, FrozenSet, Iterable, List, Optional, Set + +from vllm.core.block.common import (CopyOnWriteTracker, RefCounter, + get_all_blocks_recursively) +from vllm.core.block.interfaces import Block, BlockAllocator, BlockId, Device + +Refcount = int + + +class NaiveBlockAllocator(BlockAllocator): + """A simple block allocator that manages blocks of memory without prefix + caching. + + Args: + create_block (Block.Factory): A factory function for creating new + blocks. This is used when a NaiveBlockAllocator is composed within + a prefix caching allocator -- the naive block allocator must + construct prefix caching blocks (but shouldn't know anything else + about them). + num_blocks (int): The total number of blocks to manage. + block_size (int): The size of each block in tokens. + block_ids (Optional[Iterable[int]], optional): An optional iterable of + block IDs. If not provided, block IDs will be assigned sequentially + from 0 to num_blocks - 1. + """ + + def __init__( + self, + create_block: Block.Factory, + num_blocks: int, + block_size: int, + block_ids: Optional[Iterable[int]] = None, + ): + if block_ids is None: + block_ids = range(num_blocks) + + self._free_block_indices: Set[BlockId] = set(block_ids) + self._all_block_indices = frozenset(block_ids) + assert len(self._all_block_indices) == num_blocks + + self._refcounter = RefCounter( + all_block_indices=self._free_block_indices) + self._create_block = create_block + self._block_size = block_size + + self._cow_tracker = CopyOnWriteTracker( + refcounter=self._refcounter.as_readonly(), + allocator=self, + ) + + def allocate_immutable(self, + prev_block: Optional[Block], + token_ids: List[int], + device: Optional[Device] = None) -> Block: + """Allocates a new immutable block with the given token IDs, linked to + the previous block. + + Args: + prev_block (Optional[Block]): The previous block in the sequence. If + None, then the block to be allocated is the first block in the + sequence. + token_ids (List[int]): The token IDs to be stored in the new block. + + Returns: + Block: The newly allocated immutable block. + """ + assert device is None + block = self.allocate_mutable(prev_block=prev_block) + block.append_token_ids(token_ids) + return block + + def allocate_mutable(self, + prev_block: Optional[Block], + device: Optional[Device] = None) -> Block: + """Allocates a new mutable block, linked to the previous block. + + Args: + prev_block (Optional[Block]): The previous block in the sequence. If + None, then the block to be allocated is the first block in the + sequence. + + Returns: + Block: The newly allocated mutable block. + """ + assert device is None + block_id = self._allocate_new_block_id() + return self._create_block( + prev_block=prev_block, + token_ids=[], + block_id=block_id, + block_size=self._block_size, + allocator=self, + ) + + def free(self, block: Block) -> None: + assert block.block_id is not None + self._free_block_id(block.block_id) + + # Mark the block as having no allocation. + block.block_id = None + + def fork(self, last_block: Block) -> List[Block]: + """Creates a new sequence of blocks that shares the same underlying + memory as the original sequence. + + Args: + last_block (Block): The last block in the original sequence. + + Returns: + List[Block]: The new sequence of blocks that shares the same memory + as the original sequence. + """ + source_blocks = get_all_blocks_recursively(last_block) + + forked_blocks = [] + prev_block = None + for block in source_blocks: + + # Increment refcount for each block. + assert block.block_id is not None + refcount = self._refcounter.incr(block.block_id) + assert refcount != 1, "can't fork free'd block" + + forked_blocks.append( + self._create_block( + prev_block=prev_block, + token_ids=block.token_ids, + block_id=block.block_id, + block_size=self._block_size, + allocator=self, + )) + prev_block = forked_blocks[-1] + + return forked_blocks + + def get_num_free_blocks(self) -> int: + return len(self._free_block_indices) + + def get_num_total_blocks(self) -> int: + return len(self._all_block_indices) + + def _allocate_new_block_id(self) -> BlockId: + if not self._free_block_indices: + raise BlockAllocator.NoFreeBlocksError() + + block_id = next(iter(self._free_block_indices)) + self._refcounter.incr(block_id) + self._free_block_indices.remove(block_id) + return block_id + + def _free_block_id(self, block_id: BlockId) -> None: + refcount = self._refcounter.decr(block_id) + if refcount == 0: + self._free_block_indices.add(block_id) + + @property + def refcounter(self): + return self._refcounter + + @property + def all_block_ids(self) -> FrozenSet[int]: + return self._all_block_indices + + def cow_block_if_not_appendable(self, block: Block) -> Optional[BlockId]: + """Performs a copy-on-write operation on the given block if it is not + appendable. + + Args: + block (Block): The block to check for copy-on-write. + + Returns: + Optional[BlockId]: The block index of the new block if a copy-on + -write operation was performed, or the original block index if + no copy-on-write was necessary. + """ + return self._cow_tracker.cow_block_if_not_appendable(block) + + def clear_copy_on_writes(self) -> Dict[BlockId, List[BlockId]]: + """Returns the copy-on-write source->destination mapping and clears it. + + Returns: + Dict[BlockId, List[BlockId]]: A dictionary mapping source + block indices to lists of destination block indices. + """ + return self._cow_tracker.clear_cows() + + def mark_blocks_as_accessed(self, block_ids: List[int], + now: float) -> None: + """Mark blocks as accessed, used in prefix caching. + + Since the naive allocator does not implement prefix caching, we do + nothing. + """ + pass + + def mark_blocks_as_computed(self, block_ids: List[int]) -> None: + """Mark blocks as computed, used in prefix caching. + + Since the naive allocator does not implement prefix caching, we do + nothing. + """ + pass + + def get_common_computed_block_ids( + self, seq_block_ids: List[List[int]]) -> List[int]: + """Determine blocks that can be skipped in prefill. + + Since the naive allocator does not support prefix caching, always return + an empty list. + """ + return [] + + def promote_to_immutable_block(self, block: Block) -> BlockId: + raise NotImplementedError + + +class NaiveBlock(Block): + """An implementation of the Block class that does not support prefix + caching. + + The NaiveBlock class represents a block of token IDs with a fixed size. It + provides methods for appending token IDs to the block and manages copy-on + -write operations when necessary. + + Args: + prev_block (Block): The previous block in the sequence. + token_ids (List[int]): The initial token IDs to be stored in the block. + block_size (int): The maximum number of token IDs that can be stored in + the block. + allocator (BlockAllocator): The block allocator associated with this + block. + block_id (Optional[int], optional): The physical block index + of this block. Defaults to None, which means no allocation has been + made. + _cow_target (Optional[Block], optional): The copy-on-write target block. + If not provided, it defaults to self. + """ + + def __init__(self, + prev_block: Optional[Block], + token_ids: List[int], + block_size: int, + allocator: BlockAllocator, + block_id: Optional[int] = None, + _cow_target: Optional[Block] = None): + self._token_ids: List[int] = [] + self._block_size = block_size + self._prev_block = prev_block + self._block_id = block_id + self._allocator = allocator + self._cow_target = _cow_target if _cow_target is not None else self + + self._append_token_ids_no_cow(token_ids) + + def append_token_ids(self, token_ids: List[int]) -> None: + """Appends the given token IDs to the block, instructing the allocator + to perform a copy-on-write if necessary. + + Args: + token_ids (List[int]): The token IDs to be appended to the block. + """ + self._append_token_ids_no_cow(token_ids) + + if self._block_id is not None: + self._block_id = (self._allocator.cow_block_if_not_appendable( + self._cow_target)) + + def _append_token_ids_no_cow(self, token_ids: List[int]) -> None: + assert self.num_empty_slots >= len(token_ids) + self._token_ids.extend(token_ids) + + @property + def computed(self) -> bool: + raise NotImplementedError + + @computed.setter + def computed(self, value) -> None: + raise NotImplementedError + + @property + def last_accessed(self) -> float: + raise NotImplementedError + + @last_accessed.setter + def last_accessed(self, last_accessed_ts: float): + raise NotImplementedError + + @property + def block_id(self) -> Optional[int]: + return self._block_id + + @block_id.setter + def block_id(self, value: Optional[int]) -> None: + self._block_id = value + + @property + def is_full(self) -> bool: + return self.num_empty_slots == 0 + + @property + def num_empty_slots(self) -> int: + return self._block_size - len(self._token_ids) + + @property + def token_ids(self) -> List[int]: + return self._token_ids + + @property + def block_size(self) -> int: + return self._block_size + + @property + def prev_block(self) -> Optional["Block"]: + return self._prev_block + + @property + def content_hash(self) -> Optional[int]: + return None diff --git a/vllm/core/block/prefix_caching_block.py b/vllm/core/block/prefix_caching_block.py new file mode 100644 index 0000000..4a37e8f --- /dev/null +++ b/vllm/core/block/prefix_caching_block.py @@ -0,0 +1,606 @@ +"""Token blocks.""" +from itertools import takewhile +from os.path import commonprefix +from typing import Dict, FrozenSet, Iterable, List, Optional + +from vllm.core.block.common import (CopyOnWriteTracker, + get_all_blocks_recursively) +from vllm.core.block.interfaces import Block, BlockAllocator, BlockId, Device +from vllm.core.block.naive_block import NaiveBlock, NaiveBlockAllocator +from vllm.core.evictor_v2 import EvictionPolicy, Evictor, make_evictor + +PrefixHash = int + +# By default, we init our block access time as _DEFAULT_LAST_ACCESSED_TIME +# so that if we find one block is still hold _DEFAULT_LAST_ACCESSED_TIME, +# then we know this block hasn't been accessed yet. +_DEFAULT_LAST_ACCESSED_TIME = -1 + + +class PrefixCachingBlockAllocator(BlockAllocator): + """A block allocator that implements prefix caching. + + The PrefixCachingBlockAllocator maintains a cache of blocks based on their + content hash. It reuses blocks with the same content hash to avoid redundant + memory allocation. The allocator also supports copy-on-write operations. + + Args: + num_blocks (int): The total number of blocks to manage. + block_size (int): The size of each block in tokens. + block_ids(Optional[Iterable[int]], optional): An optional iterable of + block IDs. If not provided, block IDs will be assigned sequentially + from 0 to num_blocks - 1. + """ + + def __init__( + self, + num_blocks: int, + block_size: int, + block_ids: Optional[Iterable[int]] = None, + eviction_policy: EvictionPolicy = EvictionPolicy.LRU, + ): + # A mapping of prefix hash to block index. All blocks which have a + # prefix hash will be in this dict, even if they have refcount 0. + self._cached_blocks: Dict[PrefixHash, BlockId] = {} + + # A mapping of blockId to Block to track those cached blocks + self._blocks: Dict[BlockId, Block] = {} + + # An allocator for blocks that do not have prefix hashes. + self._hashless_allocator = NaiveBlockAllocator( + create_block=self._create_block, # type: ignore + num_blocks=num_blocks, + block_size=block_size, + block_ids=block_ids, + ) + + self._block_size = block_size + + # Evitor used to maintain how we want to handle those computed blocks + # if we find memory pressure is high. + self.evictor: Evictor = make_evictor(eviction_policy) + + # We share the refcounter between allocators. This allows us to promote + # blocks originally allocated in the hashless allocator to immutable + # blocks. + self._refcounter = self._hashless_allocator.refcounter + + self._cow_tracker = CopyOnWriteTracker( + refcounter=self._refcounter.as_readonly(), + allocator=self, + ) + + # Implements Block.Factory. + def _create_block( + self, + prev_block: Optional[Block], + token_ids: List[int], + block_size: int, + allocator: BlockAllocator, + block_id: Optional[int] = None, + computed: bool = False, + ) -> Block: + # Bind block to self. + allocator = self + + return PrefixCachingBlock( + prev_block=prev_block, + token_ids=token_ids, + block_size=block_size, + block_id=block_id, + prefix_caching_allocator=allocator, + computed=computed, + ) + + def allocate_immutable(self, + prev_block: Optional[Block], + token_ids: List[int], + device: Optional[Device] = None) -> Block: + """Allocates an immutable block with the given token IDs, reusing cached + blocks if possible. + + Args: + prev_block (Optional[Block]): The previous block in the sequence. + token_ids (List[int]): The token IDs to be stored in the block. + + Returns: + Block: The allocated immutable block. + """ + assert device is None + assert_prefix_caching_block_or_none(prev_block) + + block = self._create_block( + prev_block=prev_block, + token_ids=token_ids, + block_size=self._block_size, + allocator=self, + ) + assert block.content_hash is not None + + cached_block_id = self._cached_blocks.get(block.content_hash, None) + if cached_block_id is not None: + block.block_id = cached_block_id + self._incr_refcount_cached_block(block, block.block_id) + return block + + block = self.allocate_mutable(prev_block) + block.append_token_ids(token_ids) + assert block.content_hash is not None + + return block + + def allocate_mutable(self, + prev_block: Optional[Block], + device: Optional[Device] = None) -> Block: + """Allocates a mutable block. If there are no free blocks, this will + evict unused cached blocks. + + Args: + prev_block (Block): The previous block in the sequence. + None is not allowed unlike it is super class. + + Returns: + Block: The allocated mutable block. + """ + assert device is None + assert_prefix_caching_block_or_none(prev_block) + + try: + block = self._hashless_allocator.allocate_mutable( + prev_block=prev_block) + + assert block.block_id not in self._blocks + assert block.block_id is not None + self._blocks[block.block_id] = block + return block + except BlockAllocator.NoFreeBlocksError: + # We must check the unused cached blocks before raising OOM. + pass + + # If the evictor has blocks available for eviction, evict a block + # and return it. + if self.evictor.num_blocks > 0: + block_id, content_hash_to_evict = self.evictor.evict() + + # Here we may have scenario that several blocks have + # the same content hash, but due to the latter coming block + # is coming from mutable to immutable path, their physical + # block is added into evictor. + # However in this case, we shall not pop the _cached_blocks, + # as the same content is still used by others, which means + # we need to check ref before decide to pop the list. + + _block_id = self._cached_blocks[content_hash_to_evict] + refcount = self._refcounter.get(_block_id) + if refcount == 1: + self._cached_blocks.pop(content_hash_to_evict) + assert _block_id == block_id + + self._refcounter.incr(block_id) + + # the block comes from evictor already contain computed result + block = self._create_block( + prev_block=prev_block, + token_ids=[], + block_size=self._block_size, + allocator=self, + block_id=block_id, + computed=True, + ) + assert block.content_hash is None + + assert block.block_id not in self._blocks + assert block.block_id is not None + self._blocks[block.block_id] = block + return block + + # No block available in hashless allocator, nor in unused cache blocks. + raise BlockAllocator.NoFreeBlocksError() + + def _incr_refcount_cached_block(self, block: Block, + block_id: BlockId) -> None: + # since block is already computed, mark it + block.computed = True + + refcount = self._refcounter.incr(block_id) + if refcount == 1: + # if block get referred, then it shall not be in evictor + # and put it into _blocks for tracking + if block_id in self.evictor: + self.evictor.remove(block_id) + self._blocks[block_id] = block + + def free(self, block: Block) -> None: + """Decrement the refcount of the block. If the decremented refcount is + zero, store the block in the freelist. + + If the block has a content hash (meaning it is immutable), then we will + keep the block around in case future allocations require it. + """ + assert (block.block_id + is not None), "freeing unallocated block is undefined" + + self._free_block_id_for_block(block.block_id, block) + + block.block_id = None + + def _free_block_id_for_block(self, block_id: BlockId, + block: Block) -> None: + assert isinstance(block, PrefixCachingBlock) + + if block.content_hash is None: + refcount = self._refcounter.get(block_id) + # We have fork case where block would get more than one ref, + # so we cannot free it from tracking if ref cnt large than 1 + if refcount <= 1: + assert block.block_id is not None + del self._blocks[block.block_id] + return self._hashless_allocator.free(block) + + refcount = self._refcounter.decr(block_id) + + # If no longer used, add the block to the evictor. + if refcount == 0: + assert block.content_hash in self._cached_blocks + assert block.block_id is not None + del self._blocks[block.block_id] + self.evictor.add(block.block_id, block.content_hash, + block.num_tokens_total, block.last_accessed) + + def fork(self, last_block: Block) -> List[Block]: + """Creates a new sequence of blocks that shares the same underlying + memory as the original sequence. + + Args: + last_block (Block): The last block in the original sequence. + + Returns: + List[Block]: The new sequence of blocks that shares the same memory + as the original sequence. + """ + source_blocks = get_all_blocks_recursively(last_block) + + forked_blocks = [] + prev_block = None + for block in source_blocks: + refcount = self._refcounter.incr(block.block_id) + assert refcount != 1, "can't fork free'd block" + + forked_blocks.append( + self._create_block( + prev_block=prev_block, + token_ids=block.token_ids, + block_id=block.block_id, + block_size=self._block_size, + allocator=self, + )) + prev_block = forked_blocks[-1] + + return forked_blocks + + def get_num_free_blocks(self, device: Optional[Device] = None) -> int: + assert device is None + # The number of free blocks is the number of hashless free blocks + # plus the number of blocks evictor could free from its list. + return self._hashless_allocator.get_num_free_blocks( + ) + self.evictor.num_blocks + + def get_num_total_blocks(self) -> int: + return self._hashless_allocator.get_num_total_blocks() + + @property + def all_block_ids(self) -> FrozenSet[int]: + return self._hashless_allocator.all_block_ids + + def promote_to_immutable_block(self, block: Block) -> BlockId: + """Once a mutable block is full, it can be promoted to an immutable + block. This means that its content can be referenced by future blocks + having the same prefix. + + Note that if we already have a cached block with the same content, we + will replace the newly-promoted block's mapping with the existing cached + block. + + Args: + block: The mutable block to be promoted. + + Returns: + BlockId: Either the original block index, or the block index of + the previously cached block matching the same content. + """ + assert block.content_hash is not None + assert block.block_id is not None + assert self._refcounter.get(block.block_id) > 0 + + # If the content hash does not have a corresponding cached block, + # set this block as the cached block. + if block.content_hash not in self._cached_blocks: + self._cached_blocks[block.content_hash] = block.block_id + else: + self._free_block_id_for_block(block.block_id, block) + self._incr_refcount_cached_block( + block, self._cached_blocks[block.content_hash]) + + return self._cached_blocks[block.content_hash] + + def cow_block_if_not_appendable(self, block: Block) -> Optional[BlockId]: + """Performs a copy-on-write operation on the given block if it is not + appendable. + + Args: + block (Block): The block to check for copy-on-write. + + Returns: + Optional[BlockId]: The block index of the new block if a copy-on + -write operation was performed, or the original block index if + no copy-on-write was necessary. + """ + return self._cow_tracker.cow_block_if_not_appendable(block) + + def clear_copy_on_writes(self) -> Dict[BlockId, List[BlockId]]: + """Returns the copy-on-write source->destination mapping and clears it. + + Returns: + Dict[BlockId, List[BlockId]]: A dictionary mapping source + block indices to lists of destination block indices. + """ + return self._cow_tracker.clear_cows() + + def mark_blocks_as_accessed(self, block_ids: List[int], + now: float) -> None: + """Mark blocks as accessed, used in prefix caching. + + If the block is added into evictor, we need to update corresponding + info in evictor's metadata. + """ + + for block_id in block_ids: + if block_id in self._blocks: + self._blocks[block_id].last_accessed = now + elif block_id in self.evictor: + self.evictor.update(block_id, now) + else: + raise ValueError( + "Mark block as accessed which is not belonged to GPU") + + def mark_blocks_as_computed(self, block_ids: List[int]) -> None: + """Mark blocks as computed, used in prefix caching.""" + + for block_id in block_ids: + if block_id in self._blocks: + # only those full block is valid for prefix caching + if self._blocks[block_id].is_full: + self._blocks[block_id].computed = True + elif block_id not in self.evictor: + raise ValueError(f"Mark {block_id=} as computed which " + "is not belonged to GPU") + + def block_is_computed(self, block_id: int) -> bool: + if block_id in self._blocks: + return self._blocks[block_id].computed + else: + return block_id in self.evictor + + def get_common_computed_block_ids( + self, seq_block_ids: List[List[int]]) -> List[int]: + """Return the block ids that are common for a given sequence group. + + Only those blocks that are immutable and already be marked + compyted would be taken consideration. + """ + + # NOTE We exclude the last block to avoid the case where the entire + # prompt is cached. This would cause erroneous behavior in model + # runner. + + ids_list = [ + list( + takewhile(lambda block_id: self.block_is_computed(block_id), + seq[:-1])) for seq in seq_block_ids + ] + # It returns a list of int although type annotation says list of string. + return commonprefix([ + ids for ids in ids_list # type: ignore + if ids != [] + ]) + + +class PrefixCachingBlock(Block): + """A block implementation that supports prefix caching. + + The PrefixCachingBlock class represents a block of token IDs with prefix + caching capabilities. It wraps a NaiveBlock internally and provides + additional functionality for content hashing and promoting immutable blocks + with the prefix caching allocator. + + Args: + prev_block (Optional[PrefixCachingBlock]): The previous block in the + sequence. + token_ids (List[int]): The initial token IDs to be stored in the block. + block_size (int): The maximum number of token IDs that can be stored in + the block. + prefix_caching_allocator (BlockAllocator): The prefix + caching block allocator associated with this block. + block_id (Optional[int], optional): The physical block index + of this block. Defaults to None. + """ + + def __init__( + self, + prev_block: Optional[Block], + token_ids: List[int], + block_size: int, + prefix_caching_allocator: BlockAllocator, + block_id: Optional[int] = None, + computed: bool = False, + ): + assert isinstance(prefix_caching_allocator, + PrefixCachingBlockAllocator), ( + "Currently this class is only tested with " + "PrefixCachingBlockAllocator.") + assert_prefix_caching_block_or_none(prev_block) + + self._prev_block = prev_block + self._cached_content_hash: Optional[int] = None + self._cached_num_tokens_total: Optional[int] = None + self._prefix_caching_allocator = prefix_caching_allocator + self._last_accessed: float = _DEFAULT_LAST_ACCESSED_TIME + self._computed = computed + + self._block = NaiveBlock( + prev_block=prev_block, + token_ids=token_ids, + block_size=block_size, + block_id=block_id, + allocator=prefix_caching_allocator, + _cow_target=self, + ) + + @property + def computed(self) -> bool: + return self._computed + + @computed.setter + def computed(self, value) -> None: + self._computed = value + + @property + def last_accessed(self) -> float: + return self._last_accessed + + @last_accessed.setter + def last_accessed(self, last_accessed_ts: float): + self._last_accessed = last_accessed_ts + + def append_token_ids(self, token_ids: List[int]) -> None: + """Appends the given token IDs to the block and registers the block as + immutable if the block becomes full. + + Internally, the naive block handles CoW. + + Args: + token_ids (List[int]): The token IDs to be appended to the block. + """ + assert token_ids + + # naive block handles CoW. + self._block.append_token_ids(token_ids) + + # If the content hash is present, then the block can be made immutable. + # Register ourselves with the allocator, potentially replacing the + # physical block index. + if self.content_hash is not None: + self.block_id = (self._prefix_caching_allocator. + promote_to_immutable_block(self)) + + @property + def block_id(self) -> Optional[int]: + return self._block.block_id + + @block_id.setter + def block_id(self, value) -> None: + self._block.block_id = value + + @property + def is_full(self) -> bool: + return self._block.is_full + + @property + def num_empty_slots(self) -> int: + return self._block.num_empty_slots + + @property + def num_tokens_total(self) -> int: + """return the total tokens so far. + + Here we iterate the block chain till to the first block, while + cache the result in local to prevent repeated computations. + """ + if self._cached_num_tokens_total is not None: + return self._cached_num_tokens_total + + _block: Optional[Block] = self + self._cached_num_tokens_total = 0 + + # TODO: current implement here take O(N^2), we expect future + # we have O(1) here + while _block is not None: + self._cached_num_tokens_total += len(_block.token_ids) + _block = _block.prev_block + + return self._cached_num_tokens_total + + @property + def block_size(self) -> int: + return self._block.block_size + + @property + def token_ids(self) -> List[int]: + return self._block.token_ids + + @property + def prev_block(self) -> Optional[Block]: + return self._prev_block + + @property + def content_hash(self) -> Optional[int]: + """Return the content-based hash of the current block, or None if it is + not yet defined. + + For the content-based hash to be defined, the current block must be + full. + """ + + # If the hash is already computed, return it. + if self._cached_content_hash is not None: + return self._cached_content_hash + + # We cannot compute a hash for the current block because it is not full. + if not self.is_full: + return None + + is_first_block = self._prev_block is None + prev_block_hash = ( + None if is_first_block else + self._prev_block.content_hash # type: ignore + ) + + # Previous block exists but does not yet have a hash. + # Return no hash in this case. + if prev_block_hash is None and not is_first_block: + return None + + self._cached_content_hash = PrefixCachingBlock.hash_block_tokens( + is_first_block, + prev_block_hash, + cur_block_token_ids=self.token_ids) + return self._cached_content_hash + + @staticmethod + def hash_block_tokens(is_first_block: bool, prev_block_hash: Optional[int], + cur_block_token_ids: List[int]) -> int: + """Computes a hash value corresponding to the contents of a block and + the contents of the preceding block(s). The hash value is used for + prefix caching. + + NOTE: Content-based hashing does not yet support LoRA. + + Parameters: + - is_first_block (bool): A flag indicating if the block is the first in + the sequence. + - prev_block_hash (Optional[int]): The hash of the previous block. None + if this is the first block. + - cur_block_token_ids (List[int]): A list of token ids in the current + block. The current block is assumed to be full. + + Returns: + - int: The computed hash value for the block. + """ + assert (prev_block_hash is None) == is_first_block + return hash((is_first_block, prev_block_hash, *cur_block_token_ids)) + + +def assert_prefix_caching_block_or_none(block: Optional[Block]): + if block is None: + return + assert isinstance(block, PrefixCachingBlock) diff --git a/vllm/core/block_manager_v1.py b/vllm/core/block_manager_v1.py new file mode 100644 index 0000000..268c5c1 --- /dev/null +++ b/vllm/core/block_manager_v1.py @@ -0,0 +1,625 @@ +"""A block manager that manages token blocks.""" +import math +from abc import ABC, abstractmethod +from itertools import count, takewhile +from os.path import commonprefix +from typing import Dict, List, Optional +from typing import Sequence as GenericSequence +from typing import Set + +from vllm.block import BlockTable, PhysicalTokenBlock +from vllm.core.evictor_v1 import EvictionPolicy, Evictor, make_evictor +from vllm.core.interfaces import AllocStatus, BlockSpaceManager +from vllm.logger import init_logger +from vllm.sequence import Sequence, SequenceGroup, SequenceStatus +from vllm.utils import Device + +logger = init_logger(__name__) + + +class BlockAllocatorBase(ABC): + """Manages free physical token blocks for a device. + + The allocator maintains a list of free blocks and allocates a block when + requested. When a block is freed, its reference count is decremented. If + the reference count becomes zero, the block is added back to the free list. + """ + + @abstractmethod + def __init__(self, + device: Device, + block_size: int, + num_blocks: int, + eviction_policy: EvictionPolicy = EvictionPolicy.LRU): + pass + + @abstractmethod + def allocate(self, + block_hash: Optional[int] = None, + num_hashed_tokens: int = 0) -> PhysicalTokenBlock: + pass + + @abstractmethod + def free(self, block: PhysicalTokenBlock) -> None: + pass + + @abstractmethod + def get_num_free_blocks(self) -> int: + pass + + @abstractmethod + def get_num_total_blocks(self) -> int: + pass + + @abstractmethod + def contains_block(self, block_hash: int) -> bool: + pass + + @abstractmethod + def update_hash(self, block_hash: int, block: PhysicalTokenBlock): + pass + + +class CachedBlockAllocator(BlockAllocatorBase): + """Manages free physical token blocks for a device. + + The allocator maintains a list of free blocks and allocates a block when + requested. When a block is freed, its reference count is decremented. If + the reference count becomes zero, the block is added back to the free list. + """ + + def __init__(self, + device: Device, + block_size: int, + num_blocks: int, + eviction_policy: EvictionPolicy = EvictionPolicy.LRU) -> None: + self.device = device + self.block_size = block_size + self.num_blocks = num_blocks + + self.current_num_blocks = 0 + self.cached_blocks: Dict[int, PhysicalTokenBlock] = {} + + self.evictor: Evictor = make_evictor(eviction_policy) + + self.default_hash_ctr = count() + + def allocate_block(self, block_hash: int, + num_hashed_tokens: int) -> PhysicalTokenBlock: + if self.current_num_blocks == self.num_blocks: + block = self.evictor.evict() + block.block_hash = block_hash + block.num_hashed_tokens = num_hashed_tokens + return block + block = PhysicalTokenBlock(device=self.device, + block_number=self.current_num_blocks, + block_size=self.block_size, + block_hash=block_hash, + num_hashed_tokens=num_hashed_tokens) + self.current_num_blocks += 1 + return block + + def allocate(self, + block_hash: Optional[int] = None, + num_hashed_tokens: int = 0) -> PhysicalTokenBlock: + if block_hash is None: + block_hash = next(self.default_hash_ctr) + if block_hash in self.evictor: + assert block_hash not in self.cached_blocks + block = self.evictor.remove(block_hash) + assert block.ref_count == 0 + self.cached_blocks[block_hash] = block + block.ref_count += 1 + assert block.block_hash == block_hash + return block + if block_hash not in self.cached_blocks: + self.cached_blocks[block_hash] = self.allocate_block( + block_hash, num_hashed_tokens) + block = self.cached_blocks[block_hash] + assert block.block_hash == block_hash + block.ref_count += 1 + return block + + def free(self, block: PhysicalTokenBlock) -> None: + if block.ref_count == 0: + raise ValueError(f"Double free! {block} is already freed.") + block.ref_count -= 1 + if block.ref_count == 0: + assert block.block_hash not in self.evictor + self.evictor.add(block) + + # Remove the block from the cached_blocks + del self.cached_blocks[block.block_hash] + + def get_num_free_blocks(self) -> int: + return (self.num_blocks - self.current_num_blocks + + self.evictor.num_blocks) + + def get_num_total_blocks(self) -> int: + return self.num_blocks + + def contains_block(self, block_hash: int) -> bool: + return block_hash in self.cached_blocks or block_hash in self.evictor + + def update_hash(self, block_hash: int, block: PhysicalTokenBlock): + # Update the hash of block and the cached_blocks dictionary. + assert not self.contains_block(block_hash) + old_hash = block.block_hash + block.block_hash = block_hash + del self.cached_blocks[old_hash] + self.cached_blocks[block_hash] = block + + +class UncachedBlockAllocator(BlockAllocatorBase): + """Manages free physical token blocks for a device. + + The allocator maintains a list of free blocks and allocates a block when + requested. When a block is freed, its reference count is decremented. If + the reference count becomes zero, the block is added back to the free list. + """ + + def __init__( + self, + device: Device, + block_size: int, + num_blocks: int, + ) -> None: + self.device = device + self.block_size = block_size + self.num_blocks = num_blocks + + # Initialize the free blocks. + self.free_blocks: BlockTable = [] + for i in range(num_blocks): + block = PhysicalTokenBlock(device=device, + block_number=i, + block_size=block_size, + block_hash=-1, + num_hashed_tokens=0) + self.free_blocks.append(block) + + def allocate(self, + block_hash: Optional[int] = None, + num_hashed_tokens: int = 0) -> PhysicalTokenBlock: + if not self.free_blocks: + raise ValueError("Out of memory! No free blocks are available.") + block = self.free_blocks.pop() + block.ref_count = 1 + return block + + def free(self, block: PhysicalTokenBlock) -> None: + if block.ref_count == 0: + raise ValueError(f"Double free! {block} is already freed.") + block.ref_count -= 1 + if block.ref_count == 0: + self.free_blocks.append(block) + + def get_num_free_blocks(self) -> int: + return len(self.free_blocks) + + def get_num_total_blocks(self) -> int: + return self.num_blocks + + def contains_block(self, block_hash: int) -> bool: + raise NotImplementedError( + "Invalid codepath for uncached block allocator.") + + def update_hash(self, block_hash: int, block: PhysicalTokenBlock): + raise NotImplementedError( + "Invalid codepath for uncached block allocator.") + + +class BlockSpaceManagerV1(BlockSpaceManager): + """Manages the mapping between logical and physical token blocks.""" + + def __init__( + self, + block_size: int, + num_gpu_blocks: int, + num_cpu_blocks: int, + watermark: float = 0.01, + sliding_window: Optional[int] = None, + enable_caching: bool = False, + ) -> None: + self.block_size = block_size + self.num_total_gpu_blocks = num_gpu_blocks + self.num_total_cpu_blocks = num_cpu_blocks + + if enable_caching and sliding_window is not None: + raise NotImplementedError( + "Sliding window is not allowed with prefix caching enabled!") + + self.block_sliding_window = None + if sliding_window is not None: + # Round up to nearest block size to regularize sliding window + # allocation sizes. + self.block_sliding_window = math.ceil(sliding_window / block_size) + + self.watermark = watermark + assert watermark >= 0.0 + + self.enable_caching = enable_caching + + self.watermark_blocks = int(watermark * num_gpu_blocks) + + if self.enable_caching: + logger.info("Automatic prefix caching is enabled.") + self.gpu_allocator: BlockAllocatorBase = CachedBlockAllocator( + Device.GPU, block_size, num_gpu_blocks) + self.cpu_allocator: BlockAllocatorBase = CachedBlockAllocator( + Device.CPU, block_size, num_cpu_blocks) + else: + self.gpu_allocator = UncachedBlockAllocator( + Device.GPU, block_size, num_gpu_blocks) + self.cpu_allocator = UncachedBlockAllocator( + Device.CPU, block_size, num_cpu_blocks) + # Mapping: seq_id -> BlockTable. + self.block_tables: Dict[int, BlockTable] = {} + + def can_allocate(self, seq_group: SequenceGroup) -> AllocStatus: + # FIXME(woosuk): Here we assume that all sequences in the group share + # the same prompt. This may not be true for preempted sequences. + seq = seq_group.get_seqs(status=SequenceStatus.WAITING)[0] + num_required_blocks = len(seq.logical_token_blocks) + + if self.block_sliding_window is not None: + num_required_blocks = min(num_required_blocks, + self.block_sliding_window) + num_free_gpu_blocks = self.gpu_allocator.get_num_free_blocks() + + # Use watermark to avoid frequent cache eviction. + if (self.num_total_gpu_blocks - num_required_blocks < + self.watermark_blocks): + return AllocStatus.NEVER + if num_free_gpu_blocks - num_required_blocks >= self.watermark_blocks: + return AllocStatus.OK + else: + return AllocStatus.LATER + + def allocate(self, seq_group: SequenceGroup) -> None: + # NOTE: Here we assume that all sequences in the group have the same + # prompt. + seq = seq_group.get_seqs(status=SequenceStatus.WAITING)[0] + + # Allocate new physical token blocks that will store the prompt tokens. + num_prompt_blocks = len(seq.logical_token_blocks) + + block_table: BlockTable = [] + for logical_idx in range(num_prompt_blocks): + if (self.block_sliding_window is not None + and logical_idx >= self.block_sliding_window): + block = block_table[logical_idx % self.block_sliding_window] + # Set the reference counts of the token blocks. + block.ref_count = seq_group.num_seqs() + elif self.enable_caching: + block = self.gpu_allocator.allocate( + seq.hash_of_block(logical_idx), + seq.num_hashed_tokens_of_block(logical_idx)) + else: + block = self.gpu_allocator.allocate() + # Set the reference counts of the token blocks. + block.ref_count = seq_group.num_seqs() + block_table.append(block) + + # Assign the block table for each sequence. + for seq in seq_group.get_seqs(status=SequenceStatus.WAITING): + self.block_tables[seq.seq_id] = block_table.copy() + + def can_append_slots(self, + seq_group: SequenceGroup, + num_lookahead_slots: int = 0) -> bool: + assert (num_lookahead_slots == 0 + ), "lookahead allocation not supported in BlockSpaceManagerV1" + + # Simple heuristic: If there is at least one free block + # for each sequence, we can append. + num_free_gpu_blocks = self.gpu_allocator.get_num_free_blocks() + num_seqs = seq_group.num_seqs(status=SequenceStatus.RUNNING) + return num_seqs <= num_free_gpu_blocks + + def _promote_last_block( + self, + seq: Sequence, + last_block: PhysicalTokenBlock, + ) -> PhysicalTokenBlock: + assert self.enable_caching + + # Compute a new hash for the block so that it can be shared by other + # Sequences + new_hash = seq.hash_of_block(len(seq.logical_token_blocks) - 1) + + # if new_hash is already in the cached table, then free last_block + # and return the cached version + if self.gpu_allocator.contains_block(new_hash): + self.gpu_allocator.free(last_block) + return self.gpu_allocator.allocate(new_hash) + else: + self.gpu_allocator.update_hash(new_hash, last_block) + return last_block + + def _is_last_block_full( + self, + seq: Sequence, + ) -> bool: + token_ids_len = seq.data.get_len() + return token_ids_len > 0 and token_ids_len % seq.block_size == 0 + + def _maybe_promote_last_block( + self, + seq: Sequence, + last_block: PhysicalTokenBlock, + ) -> PhysicalTokenBlock: + if self._is_last_block_full(seq): + return self._promote_last_block(seq, last_block) + else: + return last_block + + def _allocate_last_physical_block( + self, + seq: Sequence, + ) -> PhysicalTokenBlock: + # Called before a new block is appended. + # This is in charge of allocating a new physical block (to be appended). + + # None if the last block is not full. Otherwise, we set it to the + # content hash. + if not self.enable_caching: + return self.gpu_allocator.allocate() + block_hash: Optional[int] = None + if (self._is_last_block_full(seq)): + block_hash = seq.hash_of_block(len(seq.logical_token_blocks) - 1) + num_hashed_tokens = seq.num_hashed_tokens_of_block( + len(seq.logical_token_blocks) - 1) + + # num_hashed_tokens is used to compute future hashes + # (e.g. in the hashing function, it is used to ask the sequence for + # prefix tokens) + new_block = self.gpu_allocator.allocate(block_hash, num_hashed_tokens) + + # If the block has is None, then the block is not full. + # If the block is not full, then we expect it to have a refcount of 1. + if block_hash is None: + assert new_block.ref_count == 1 + return new_block + + def append_slots( + self, + seq: Sequence, + num_lookahead_slots: int = 0, + ) -> Dict[int, List[int]]: + """Allocate a physical slot for a new token.""" + logical_blocks = seq.logical_token_blocks + block_table = self.block_tables[seq.seq_id] + # If we need to allocate a new physical block + if len(block_table) < len(logical_blocks): + # Currently this code only supports adding one physical block + assert len(block_table) == len(logical_blocks) - 1 + + if (self.block_sliding_window + and len(block_table) >= self.block_sliding_window): + # reuse a block + block_table.append(block_table[len(block_table) % + self.block_sliding_window]) + else: + # The sequence hash a new logical block. + # Allocate a new physical block. + new_block = self._allocate_last_physical_block(seq) + block_table.append(new_block) + return {} + + # We want to append the token to the last physical block. + last_block = block_table[-1] + assert last_block.device == Device.GPU + if last_block.ref_count == 1: + # Not shared with other sequences. Appendable. + if self.enable_caching: + # If the last block is now complete, we may reuse an old block + # to save memory. + maybe_new_block = self._maybe_promote_last_block( + seq, last_block) + block_table[-1] = maybe_new_block + return {} + else: + # The last block is shared with other sequences. + # Copy on Write: Allocate a new block and copy the tokens. + new_block = self._allocate_last_physical_block(seq) + + block_table[-1] = new_block + self.gpu_allocator.free(last_block) + return {last_block.block_number: [new_block.block_number]} + + def fork(self, parent_seq: Sequence, child_seq: Sequence) -> None: + # NOTE: fork does not allocate a new physical block. + # Thus, it is always safe from OOM. + src_block_table = self.block_tables[parent_seq.seq_id] + self.block_tables[child_seq.seq_id] = src_block_table.copy() + # When using a sliding window, blocks will be eventually reused. + # In this case the block tables will contain repeated blocks. + # When forking, we must make sure that each block's `ref_count` + # is only incremented by one, so we deduplicate them by wrapping + # them in a set. + for block in set(src_block_table): + block.ref_count += 1 + + def _get_physical_blocks( + self, seq_group: SequenceGroup) -> List[PhysicalTokenBlock]: + # NOTE: Here, we assume that the physical blocks are only shared by + # the sequences in the same group. + blocks: Set[PhysicalTokenBlock] = set() + for seq in seq_group.get_seqs(): + if seq.is_finished(): + continue + blocks.update(self.block_tables[seq.seq_id]) + return list(blocks) + + def can_swap_in(self, + seq_group: SequenceGroup, + num_lookahead_slots: int = 0) -> AllocStatus: + assert (num_lookahead_slots == 0 + ), "BlockSpaceManagerV1 does not support lookahead allocation" + blocks = self._get_physical_blocks(seq_group) + num_swapped_seqs = seq_group.num_seqs(status=SequenceStatus.SWAPPED) + num_free_blocks = self.gpu_allocator.get_num_free_blocks() + # NOTE: Conservatively, we assume that every sequence will allocate + # at least one free block right after the swap-in. + # NOTE: This should match the logic in can_append_slot(). + num_required_blocks = len(blocks) + num_swapped_seqs + if self.gpu_allocator.get_num_total_blocks() < num_required_blocks: + return AllocStatus.NEVER + elif num_free_blocks - num_required_blocks >= self.watermark_blocks: + return AllocStatus.OK + else: + return AllocStatus.LATER + + def swap_in(self, + seq_group: SequenceGroup, + num_lookahead_slots: int = 0) -> Dict[int, int]: + assert (num_lookahead_slots == 0 + ), "BlockSpaceManagerV1 does not support lookahead allocation" + + # CPU block -> GPU block. + mapping: Dict[PhysicalTokenBlock, PhysicalTokenBlock] = {} + for seq in seq_group.get_seqs(status=SequenceStatus.SWAPPED): + new_block_table: BlockTable = [] + block_table = self.block_tables[seq.seq_id] + + for cpu_block in block_table: + if cpu_block in mapping: + gpu_block = mapping[cpu_block] + gpu_block.ref_count += 1 + else: + gpu_block = self.gpu_allocator.allocate( + cpu_block.block_hash, cpu_block.num_hashed_tokens) + mapping[cpu_block] = gpu_block + new_block_table.append(gpu_block) + # Free the CPU block swapped in to GPU. + self.cpu_allocator.free(cpu_block) + self.block_tables[seq.seq_id] = new_block_table + + block_number_mapping = { + cpu_block.block_number: gpu_block.block_number + for cpu_block, gpu_block in mapping.items() + } + return block_number_mapping + + def can_swap_out(self, seq_group: SequenceGroup) -> bool: + blocks = self._get_physical_blocks(seq_group) + return len(blocks) <= self.cpu_allocator.get_num_free_blocks() + + def swap_out(self, seq_group: SequenceGroup) -> Dict[int, int]: + # GPU block -> CPU block. + mapping: Dict[PhysicalTokenBlock, PhysicalTokenBlock] = {} + for seq in seq_group.get_seqs(status=SequenceStatus.RUNNING): + new_block_table: BlockTable = [] + block_table = self.block_tables[seq.seq_id] + + for gpu_block in block_table: + if gpu_block in mapping: + cpu_block = mapping[gpu_block] + cpu_block.ref_count += 1 + else: + cpu_block = self.cpu_allocator.allocate( + gpu_block.block_hash, gpu_block.num_hashed_tokens) + mapping[gpu_block] = cpu_block + new_block_table.append(cpu_block) + # Free the GPU block swapped out to CPU. + self.gpu_allocator.free(gpu_block) + self.block_tables[seq.seq_id] = new_block_table + + block_number_mapping = { + gpu_block.block_number: cpu_block.block_number + for gpu_block, cpu_block in mapping.items() + } + return block_number_mapping + + def _free_block_table(self, block_table: BlockTable) -> None: + # when using a sliding window, each seq will only use up + # to `self.block_sliding_window` blocks. When freeing + # the block table, we must make sure to not free blocks more + # than once. If no sliding window is used, there is no block + # reuse in the block table, so we must free all blocks. + blocks_to_free = (block_table[-self.block_sliding_window:] + if self.block_sliding_window is not None else + block_table) + for block in set(blocks_to_free): + if block.device == Device.GPU: + self.gpu_allocator.free(block) + else: + self.cpu_allocator.free(block) + + def free(self, seq: Sequence) -> None: + if seq.seq_id not in self.block_tables: + # Already freed or haven't been scheduled yet. + return + block_table = self.block_tables[seq.seq_id] + self._free_block_table(block_table) + del self.block_tables[seq.seq_id] + + def reset(self) -> None: + for block_table in self.block_tables.values(): + self._free_block_table(block_table) + self.block_tables.clear() + + def get_block_table(self, seq: Sequence) -> List[int]: + block_table = self.block_tables[seq.seq_id] + return [block.block_number for block in block_table] + + def get_num_free_gpu_blocks(self) -> int: + return self.gpu_allocator.get_num_free_blocks() + + def get_num_free_cpu_blocks(self) -> int: + return self.cpu_allocator.get_num_free_blocks() + + def access_all_blocks_in_seq( + self, + seq: Sequence, + access_time: float, + ) -> None: + if self.enable_caching: + # Update the last accessed time of all the blocks accessed + # in this step. + block_table = self.block_tables[seq.seq_id] + for block in block_table: + block.last_accessed = access_time + + def compute_full_blocks_in_seq(self, seq: Sequence): + if seq.seq_id not in self.block_tables: + return + max_full_block = seq.get_len() // self.block_size - 1 + block_table = self.block_tables[seq.seq_id] + if max_full_block == -1: + return + for i in reversed(range(max_full_block)): + if block_table[i].computed: + break + block_table[i].computed = True + + def get_all_computed_blocks(self, seq: Sequence) -> List[int]: + if seq.seq_id not in self.block_tables: + return [] + block_table = self.block_tables[seq.seq_id] + # NOTE We exclude the last block to avoid the case where the entire + # prompt is cached. This would cause erroneous behavior in model + # runner. + return [ + b.block_number + for b in takewhile(lambda b: b.computed, block_table[:-1]) + ] + + def get_common_computed_block_ids( + self, seqs: List[Sequence]) -> GenericSequence[int]: + """Return the block ids that are common for a given sequence group. + + Used in prefill (can skip prefill of some blocks). + """ + # Can return non-empty result only with prefix caching enabled. + if not self.enable_caching: + return [] + + ids_list = [self.get_all_computed_blocks(seq) for seq in seqs] + return commonprefix([ids for ids in ids_list if ids != []]) + + def mark_blocks_as_computed(self, seq_group: SequenceGroup): + if self.enable_caching: + for seq in seq_group.seqs_dict.values(): + self.compute_full_blocks_in_seq(seq) diff --git a/vllm/core/block_manager_v2.py b/vllm/core/block_manager_v2.py new file mode 100644 index 0000000..ce90ce2 --- /dev/null +++ b/vllm/core/block_manager_v2.py @@ -0,0 +1,258 @@ +"""A block manager that manages token blocks.""" +from typing import Dict, List, Optional +from typing import Sequence as GenericSequence + +from vllm.core.block.block_table import BlockTable +from vllm.core.block.cpu_gpu_block_allocator import CpuGpuBlockAllocator +from vllm.core.interfaces import AllocStatus, BlockSpaceManager +from vllm.sequence import Sequence, SequenceGroup, SequenceStatus +from vllm.utils import Device + +SeqId = int + + +class BlockSpaceManagerV2(BlockSpaceManager): + """BlockSpaceManager which manages the allocation of KV cache. + + It owns responsibility for allocation, swapping, allocating memory for + autoregressively-generated tokens, and other advanced features such as + prefix caching, forking/copy-on-write, and sliding-window memory allocation. + + The current implementation is partial; in particular prefix caching and + sliding-window are not feature complete. This class implements the design + described in https://github.com/vllm-project/vllm/pull/3492. + + Lookahead slots + The block manager has the notion of a "lookahead slot". These are slots + in the KV cache that are allocated for a sequence. Unlike the other + allocated slots, the content of these slots is undefined -- the worker + may use the memory allocations in any way. + + In practice, a worker could use these lookahead slots to run multiple + forward passes for a single scheduler invocation. Each successive + forward pass would write KV activations to the corresponding lookahead + slot. This allows low inter-token latency use-cases, where the overhead + of continuous batching scheduling is amortized over >1 generated tokens. + + Speculative decoding uses lookahead slots to store KV activations of + proposal tokens. + + See https://github.com/vllm-project/vllm/pull/3250 for more information + on lookahead scheduling. + + Args: + block_size (int): The size of each memory block. + num_gpu_blocks (int): The number of memory blocks allocated on GPU. + num_cpu_blocks (int): The number of memory blocks allocated on CPU. + watermark (float, optional): The threshold used for memory swapping. + Defaults to 0.01. + sliding_window (Optional[int], optional): The size of the sliding + window. Defaults to None. + enable_caching (bool, optional): Flag indicating whether caching is + enabled. Defaults to False. + """ + + def __init__( + self, + block_size: int, + num_gpu_blocks: int, + num_cpu_blocks: int, + watermark: float = 0.01, + sliding_window: Optional[int] = None, + enable_caching: bool = False, + ) -> None: + self.block_size = block_size + self.num_total_gpu_blocks = num_gpu_blocks + self.num_total_cpu_blocks = num_cpu_blocks + + assert sliding_window is None, "Sliding window not yet supported" + + self.block_sliding_window = None + + self.watermark = watermark + assert watermark >= 0.0 + + self.enable_caching = enable_caching + + self.watermark_blocks = int(watermark * num_gpu_blocks) + + self.block_allocator = CpuGpuBlockAllocator.create( + allocator_type="prefix_caching" if enable_caching else "naive", + num_gpu_blocks=num_gpu_blocks, + num_cpu_blocks=num_cpu_blocks, + block_size=block_size, + ) + + self.block_tables: Dict[SeqId, BlockTable] = {} + + def can_allocate(self, seq_group: SequenceGroup) -> AllocStatus: + # FIXME(woosuk): Here we assume that all sequences in the group share + # the same prompt. This may not be true for preempted sequences. + seq = seq_group.get_seqs(status=SequenceStatus.WAITING)[0] + + num_required_blocks = BlockTable.get_num_required_blocks( + seq.get_token_ids(), + block_size=self.block_size, + ) + + assert self.block_sliding_window is None + if self.block_sliding_window is not None: + num_required_blocks = min(num_required_blocks, + self.block_sliding_window) + + num_free_gpu_blocks = self.block_allocator.get_num_free_blocks( + device=Device.GPU) + + # Use watermark to avoid frequent cache eviction. + if (self.num_total_gpu_blocks - num_required_blocks < + self.watermark_blocks): + return AllocStatus.NEVER + if num_free_gpu_blocks - num_required_blocks >= self.watermark_blocks: + return AllocStatus.OK + else: + return AllocStatus.LATER + + def allocate(self, seq_group: SequenceGroup) -> None: + waiting_seqs = seq_group.get_seqs(status=SequenceStatus.WAITING) + assert not (set(seq.seq_id for seq in waiting_seqs) + & self.block_tables.keys()), "block table already exists" + + # NOTE: Here we assume that all sequences in the group have the same + # prompt. + seq = waiting_seqs[0] + + block_table = BlockTable( + block_size=self.block_size, + block_allocator=self.block_allocator, + ) + assert self.block_sliding_window is None + block_table.allocate(seq.get_token_ids()) + self.block_tables[seq.seq_id] = block_table + + # Assign the block table for each sequence. + for seq in waiting_seqs[1:]: + self.block_tables[seq.seq_id] = block_table.fork() + + def can_append_slots(self, seq_group: SequenceGroup, + num_lookahead_slots: int) -> bool: + """Determine if there is enough space in the GPU KV cache to continue + generation of the specified sequence group. + + We use a worst-case heuristic: assume each touched block will require a + new allocation (either via CoW or new block). We can append slots if the + number of touched blocks is less than the number of free blocks. + + "Lookahead slots" are slots that are allocated in addition to the slots + for known tokens. The contents of the lookahead slots are not defined. + This is used by speculative decoding when speculating future tokens. + """ + + num_touched_blocks = 0 + for seq in seq_group.get_seqs(status=SequenceStatus.RUNNING): + block_table = self.block_tables[seq.seq_id] + + num_touched_blocks += ( + block_table.get_num_blocks_touched_by_append_slots( + token_ids=block_table.get_unseen_token_ids( + seq.get_token_ids()), + num_lookahead_slots=num_lookahead_slots, + )) + + num_free_gpu_blocks = self.block_allocator.get_num_free_blocks( + Device.GPU) + return num_touched_blocks <= num_free_gpu_blocks + + def append_slots( + self, + seq: Sequence, + num_lookahead_slots: int, + ) -> Dict[int, List[int]]: + + block_table = self.block_tables[seq.seq_id] + + block_table.append_token_ids( + token_ids=block_table.get_unseen_token_ids(seq.get_token_ids()), + num_lookahead_slots=num_lookahead_slots, + ) + + # Return any new copy-on-writes. + new_cows = self.block_allocator.clear_copy_on_writes() + return new_cows + + def free(self, seq: Sequence) -> None: + if seq.seq_id not in self.block_tables: + # Already freed or haven't been scheduled yet. + return + self.block_tables[seq.seq_id].free() + del self.block_tables[seq.seq_id] + + def get_block_table(self, seq: Sequence) -> List[int]: + assert seq.seq_id in self.block_tables + block_ids = self.block_tables[seq.seq_id].physical_block_ids + assert all(b is not None for b in block_ids) + return block_ids # type: ignore + + def access_all_blocks_in_seq(self, seq: Sequence, now: float): + # Update the last accessed time of all the blocks accessed + # in this step. + # And the accessed time is only useful for prefix caching now, + # as it support internal evictor policy for which cached + # block could be refilled, to keep cached content could be reused + # at max extend. + if self.enable_caching: + block_table = self.block_tables[seq.seq_id] + block_ids = [] + for block_id in block_table.physical_block_ids: + block_ids.append(block_id) + self.block_allocator.mark_blocks_as_accessed( + block_ids, # type: ignore + now) + + def mark_blocks_as_computed(self, seq_group: SequenceGroup): + # The only need for mark block as computed is for prefix caching, + # while currently we could determine whether one block is computed + # or not by check whether it has content hash. + # So this function is useless for block_v2. + pass + + def get_common_computed_block_ids( + self, seqs: List[Sequence]) -> GenericSequence[int]: + """Determine which blocks for which we skip prefill. + + With prefix caching we can skip prefill for previously-generated blocks. + Currently, the attention implementation only supports skipping cached + blocks if they are a contiguous prefix of cached blocks. + + This method determines which blocks can be safely skipped for all + sequences in the sequence group. + """ + seq_block_ids = [ + self.block_tables[seq.seq_id].physical_block_ids for seq in seqs + ] + # NOTE(sang): This assumes seq_block_ids doesn't contain any None. + return self.block_allocator.get_common_computed_block_ids( + seq_block_ids) # type: ignore + + def fork(self, parent_seq: Sequence, child_seq: Sequence) -> None: + src_block_table = self.block_tables[parent_seq.seq_id] + self.block_tables[child_seq.seq_id] = src_block_table.fork() + + def can_swap_in(self, seq_group: SequenceGroup, + num_lookahead_slots: int) -> AllocStatus: + return AllocStatus.LATER + + def swap_in(self, seq_group: SequenceGroup, + num_lookahead_slots: int) -> Dict[int, int]: + raise NotImplementedError + + def can_swap_out(self, seq_group: SequenceGroup) -> bool: + return False + + def swap_out(self, seq_group: SequenceGroup) -> Dict[int, int]: + raise NotImplementedError + + def get_num_free_gpu_blocks(self) -> int: + return self.block_allocator.get_num_free_blocks(Device.GPU) + + def get_num_free_cpu_blocks(self) -> int: + return self.block_allocator.get_num_free_blocks(Device.CPU) diff --git a/vllm/core/evictor_v1.py b/vllm/core/evictor_v1.py new file mode 100644 index 0000000..aa51dd6 --- /dev/null +++ b/vllm/core/evictor_v1.py @@ -0,0 +1,105 @@ +import enum +from abc import ABC, abstractmethod, abstractproperty +from typing import OrderedDict + +from vllm.block import PhysicalTokenBlock + + +class EvictionPolicy(enum.Enum): + """Enum for eviction policy used by make_evictor to instantiate the correct + Evictor subclass. + """ + LRU = enum.auto() + + +class Evictor(ABC): + """The Evictor subclasses should be used by the BlockAllocator class to + handle eviction of freed PhysicalTokenBlocks. + """ + + @abstractmethod + def __init__(self): + pass + + @abstractmethod + def __contains__(self, block_hash: int) -> bool: + pass + + @abstractmethod + def evict(self) -> PhysicalTokenBlock: + """Runs the eviction algorithm and returns the evicted block""" + pass + + @abstractmethod + def add(self, block: PhysicalTokenBlock): + """Adds block to the evictor, making it a candidate for eviction""" + pass + + @abstractmethod + def remove(self, block_hash: int) -> PhysicalTokenBlock: + """Simply removes the block with the hash value block_hash from the + evictor. Caller is responsible for making sure that block_hash is + contained in the evictor before calling remove. Should be used to + "bring back" blocks that have been freed but not evicted yet. + """ + pass + + @abstractproperty + def num_blocks(self) -> int: + pass + + +class LRUEvictor(Evictor): + """Evicts in a least-recently-used order using the last_accessed timestamp + that's recorded in the PhysicalTokenBlock. If there are multiple blocks with + the same last_accessed time, then the one with the largest num_hashed_tokens + will be evicted. If two blocks each have the lowest last_accessed time and + highest num_hashed_tokens value, then one will be chose arbitrarily + """ + + def __init__(self): + self.free_table: OrderedDict[int, PhysicalTokenBlock] = OrderedDict() + + def __contains__(self, block_hash: int) -> bool: + return block_hash in self.free_table + + def evict(self) -> PhysicalTokenBlock: + if len(self.free_table) == 0: + raise ValueError("No usable cache memory left") + + evicted_block = next(iter(self.free_table.values())) + # The blocks with the lowest timestamps should be placed consecutively + # at the start of OrderedDict. Loop through all these blocks to + # find the one with maximum number of hashed tokens. + for _, block in self.free_table.items(): + if evicted_block.last_accessed < block.last_accessed: + break + if evicted_block.num_hashed_tokens < block.num_hashed_tokens: + evicted_block = block + + self.free_table.pop(evicted_block.block_hash) + + evicted_block.computed = False + return evicted_block + + def add(self, block: PhysicalTokenBlock): + self.free_table[block.block_hash] = block + + def remove(self, block_hash: int) -> PhysicalTokenBlock: + if block_hash not in self.free_table: + raise ValueError( + "Attempting to remove block that's not in the evictor") + block: PhysicalTokenBlock = self.free_table[block_hash] + self.free_table.pop(block_hash) + return block + + @property + def num_blocks(self) -> int: + return len(self.free_table) + + +def make_evictor(eviction_policy: EvictionPolicy) -> Evictor: + if eviction_policy == EvictionPolicy.LRU: + return LRUEvictor() + else: + raise ValueError(f"Unknown cache eviction policy: {eviction_policy}") diff --git a/vllm/core/evictor_v2.py b/vllm/core/evictor_v2.py new file mode 100644 index 0000000..57759b2 --- /dev/null +++ b/vllm/core/evictor_v2.py @@ -0,0 +1,127 @@ +import enum +from abc import ABC, abstractmethod, abstractproperty +from typing import OrderedDict, Tuple + + +class EvictionPolicy(enum.Enum): + """Enum for eviction policy used by make_evictor to instantiate the correct + Evictor subclass. + """ + LRU = enum.auto() + + +class Evictor(ABC): + """The Evictor subclasses should be used by the BlockAllocator class to + handle eviction of freed PhysicalTokenBlocks. + """ + + @abstractmethod + def __init__(self): + pass + + @abstractmethod + def __contains__(self, block_id: int) -> bool: + pass + + @abstractmethod + def evict(self) -> Tuple[int, int]: + """Runs the eviction algorithm and returns the evicted block's + content hash along with physical block id along with physical block id + """ + pass + + @abstractmethod + def add(self, block_id: int, content_hash: int, num_hashed_tokens: int, + last_accessed: float): + """Adds block to the evictor, making it a candidate for eviction""" + pass + + @abstractmethod + def update(self, block_id: int, last_accessed: float): + """Update corresponding block's access time in metadata""" + pass + + @abstractmethod + def remove(self, block_id: int): + """Remove a given block id from the cache.""" + pass + + @abstractproperty + def num_blocks(self) -> int: + pass + + +class BlockMetaData(): + """Data structure for storing key data describe cached block, so that + evitor could use to make its decision which one to choose for eviction + + Here we use physical block id as the dict key, as there maybe several + blocks with the same content hash, but their physical id is unique. + """ + + def __init__(self, content_hash: int, num_hashed_tokens: int, + last_accessed: float): + self.content_hash = content_hash + self.num_hashed_tokens = num_hashed_tokens + self.last_accessed = last_accessed + + +class LRUEvictor(Evictor): + """Evicts in a least-recently-used order using the last_accessed timestamp + that's recorded in the PhysicalTokenBlock. If there are multiple blocks with + the same last_accessed time, then the one with the largest num_hashed_tokens + will be evicted. If two blocks each have the lowest last_accessed time and + highest num_hashed_tokens value, then one will be chose arbitrarily + """ + + def __init__(self): + self.free_table: OrderedDict[int, BlockMetaData] = OrderedDict() + + def __contains__(self, block_id: int) -> bool: + return block_id in self.free_table + + def evict(self) -> Tuple[int, int]: + if len(self.free_table) == 0: + raise ValueError("No usable cache memory left") + + evicted_block = next(iter(self.free_table.values())) + evicted_block_id = next(iter(self.free_table.keys())) + # The blocks with the lowest timestamps should be placed consecutively + # at the start of OrderedDict. Loop through all these blocks to + # find the one with maximum number of hashed tokens. + for _id, block in self.free_table.items(): + if evicted_block.last_accessed > block.last_accessed or ( + evicted_block.last_accessed == block.last_accessed and + evicted_block.num_hashed_tokens < block.num_hashed_tokens): + evicted_block = block + evicted_block_id = _id + + self.free_table.pop(evicted_block_id) + + return evicted_block_id, evicted_block.content_hash + + def add(self, block_id: int, content_hash: int, num_hashed_tokens: int, + last_accessed: float): + self.free_table[block_id] = BlockMetaData(content_hash, + num_hashed_tokens, + last_accessed) + + def update(self, block_id: int, last_accessed: float): + self.free_table[block_id].last_accessed = last_accessed + + def remove(self, block_id: int): + if block_id not in self.free_table: + raise ValueError( + "Attempting to remove block that's not in the evictor") + self.free_table.pop(block_id) + + @property + def num_blocks(self) -> int: + return len(self.free_table) + + +def make_evictor(eviction_policy: EvictionPolicy) -> Evictor: + if eviction_policy == EvictionPolicy.LRU: + return LRUEvictor() + else: + raise ValueError(f"Unknown cache eviction policy: {eviction_policy}") diff --git a/vllm/core/interfaces.py b/vllm/core/interfaces.py new file mode 100644 index 0000000..09ccadd --- /dev/null +++ b/vllm/core/interfaces.py @@ -0,0 +1,113 @@ +import enum +from abc import ABC, abstractmethod +from typing import Dict, List +from typing import Sequence as GenericSequence + +from vllm.sequence import Sequence, SequenceGroup + + +class AllocStatus(enum.Enum): + """Result for BlockSpaceManager.can_allocate + + 1. Ok: seq_group can be allocated now. + 2. Later: seq_group cannot be allocated. + The capacity of allocator is larger than seq_group required. + 3. Never: seq_group can never be allocated. + The seq_group is too large to allocated in GPU. + """ + OK = enum.auto() + LATER = enum.auto() + NEVER = enum.auto() + + +class BlockSpaceManager(ABC): + + @staticmethod + def get_block_space_manager_class(version: str): + version = version.lower() + + if version == "v1": + from vllm.core.block_manager_v1 import BlockSpaceManagerV1 + return BlockSpaceManagerV1 + + if version == "v2": + from vllm.core.block_manager_v2 import BlockSpaceManagerV2 + return BlockSpaceManagerV2 + + raise ValueError(f"Unknown version {version=}") + + @abstractmethod + def can_allocate(self, seq_group: SequenceGroup) -> AllocStatus: + pass + + @abstractmethod + def allocate(self, seq_group: SequenceGroup) -> None: + pass + + @abstractmethod + def can_append_slots(self, seq_group: SequenceGroup, + num_lookahead_slots: int) -> bool: + pass + + @abstractmethod + def append_slots( + self, + seq: Sequence, + num_lookahead_slots: int, + ) -> Dict[int, List[int]]: + pass + + @abstractmethod + def fork(self, parent_seq: Sequence, child_seq: Sequence) -> None: + pass + + @abstractmethod + def can_swap_in(self, seq_group: SequenceGroup, + num_lookahead_slots: int) -> AllocStatus: + pass + + @abstractmethod + def swap_in(self, seq_group: SequenceGroup, + num_lookahead_slots: int) -> Dict[int, int]: + pass + + @abstractmethod + def can_swap_out(self, seq_group: SequenceGroup) -> bool: + pass + + @abstractmethod + def swap_out(self, seq_group: SequenceGroup) -> Dict[int, int]: + pass + + @abstractmethod + def free(self, seq: Sequence) -> None: + pass + + @abstractmethod + def get_block_table(self, seq: Sequence) -> List[int]: + pass + + @abstractmethod + def get_num_free_gpu_blocks(self) -> int: + pass + + @abstractmethod + def get_num_free_cpu_blocks(self) -> int: + pass + + @abstractmethod + def access_all_blocks_in_seq( + self, + seq: Sequence, + access_time: float, + ) -> None: + pass + + @abstractmethod + def get_common_computed_block_ids( + self, seqs: List[Sequence]) -> GenericSequence[int]: + pass + + @abstractmethod + def mark_blocks_as_computed(self, seq_group: SequenceGroup): + pass diff --git a/vllm/core/policy.py b/vllm/core/policy.py new file mode 100644 index 0000000..a4463ac --- /dev/null +++ b/vllm/core/policy.py @@ -0,0 +1,45 @@ +from collections import deque +from typing import Deque + +from vllm.sequence import SequenceGroup + + +class Policy: + + def get_priority( + self, + now: float, + seq_group: SequenceGroup, + ) -> float: + raise NotImplementedError + + def sort_by_priority( + self, + now: float, + seq_groups: Deque[SequenceGroup], + ) -> Deque[SequenceGroup]: + return deque( + sorted( + seq_groups, + key=lambda seq_group: self.get_priority(now, seq_group), + reverse=True, + )) + + +class FCFS(Policy): + + def get_priority( + self, + now: float, + seq_group: SequenceGroup, + ) -> float: + return now - seq_group.metrics.arrival_time + + +class PolicyFactory: + + _POLICY_REGISTRY = {'fcfs': FCFS} + + @classmethod + def get_policy(cls, policy_name: str, **kwargs) -> Policy: + return cls._POLICY_REGISTRY[policy_name](**kwargs) diff --git a/vllm/core/scheduler.py b/vllm/core/scheduler.py new file mode 100644 index 0000000..a9e0b05 --- /dev/null +++ b/vllm/core/scheduler.py @@ -0,0 +1,1163 @@ +import enum +import os +import random +import time +from collections import deque +from dataclasses import dataclass, field +from typing import Deque, Dict, Iterable, List, Optional, Set, Tuple, Union + +from vllm.config import CacheConfig, LoRAConfig, SchedulerConfig +from vllm.core.interfaces import AllocStatus, BlockSpaceManager +from vllm.core.policy import Policy, PolicyFactory +from vllm.logger import init_logger +from vllm.lora.request import LoRARequest +from vllm.sequence import (Sequence, SequenceData, SequenceGroup, + SequenceGroupMetadata, SequenceStatus) +from vllm.utils import merge_dicts + +logger = init_logger(__name__) + +# Test-only. If configured, decode is preempted with +# ARTIFICIAL_PREEMPTION_PROB% probability. +ENABLE_ARTIFICIAL_PREEMPT = bool( + os.getenv("VLLM_TEST_ENABLE_ARTIFICIAL_PREEMPT", False)) # noqa +ARTIFICIAL_PREEMPTION_PROB = 0.5 +ARTIFICIAL_PREEMPTION_MAX_CNT = 500 + + +class PreemptionMode(enum.Enum): + """Preemption modes. + + 1. Swapping: Swap out the blocks of the preempted sequences to CPU memory + and swap them back in when the sequences are resumed. + 2. Recomputation: Discard the blocks of the preempted sequences and + recompute them when the sequences are resumed, treating the sequences as + new prompts. + """ + SWAP = enum.auto() + RECOMPUTE = enum.auto() + + +@dataclass +class SchedulingBudget: + """The available slots for scheduling. + + TODO(sang): Right now, the budget is request_id-aware meaning it can ignore + budget update from the same request_id. It is because in normal scheduling + path, we update RUNNING num_seqs ahead of time, meaning it could be + updated more than once when scheduling RUNNING requests. Since this won't + happen if we only have chunked prefill scheduling, we can remove this + feature from the API when chunked prefill is enabled by default. + """ + token_budget: int + max_num_seqs: int + _requeset_ids_num_batched_tokens: Set[str] = field(default_factory=set) + _requeset_ids_num_curr_seqs: Set[str] = field(default_factory=set) + _num_batched_tokens: int = 0 + _num_curr_seqs: int = 0 + + def can_schedule(self, *, num_new_tokens: int, num_new_seqs: int): + assert num_new_tokens != 0 + assert num_new_seqs != 0 + return (self.num_batched_tokens + num_new_tokens <= self.token_budget + and self.num_curr_seqs + num_new_seqs <= self.max_num_seqs) + + def remaining_token_budget(self): + return self.token_budget - self.num_batched_tokens + + def add_num_batched_tokens(self, req_id: str, num_batched_tokens: int): + if req_id in self._requeset_ids_num_batched_tokens: + return + + self._requeset_ids_num_batched_tokens.add(req_id) + self._num_batched_tokens += num_batched_tokens + + def subtract_num_batched_tokens(self, req_id: str, + num_batched_tokens: int): + if req_id in self._requeset_ids_num_batched_tokens: + self._requeset_ids_num_batched_tokens.remove(req_id) + self._num_batched_tokens -= num_batched_tokens + + def add_num_seqs(self, req_id: str, num_curr_seqs: int): + if req_id in self._requeset_ids_num_curr_seqs: + return + + self._requeset_ids_num_curr_seqs.add(req_id) + self._num_curr_seqs += num_curr_seqs + + def subtract_num_seqs(self, req_id: str, num_curr_seqs: int): + if req_id in self._requeset_ids_num_curr_seqs: + self._requeset_ids_num_curr_seqs.remove(req_id) + self._num_curr_seqs -= num_curr_seqs + + @property + def num_batched_tokens(self): + return self._num_batched_tokens + + @property + def num_curr_seqs(self): + return self._num_curr_seqs + + +@dataclass +class ScheduledSequenceGroup: + # A sequence group that's scheduled. + seq_group: SequenceGroup + # The total chunk size (number of tokens) to process for next iteration. + # 1 for decoding. Same as prompt tokens for prefill, but if prefill is + # chunked, it can be smaller than that. + token_chunk_size: int + + +@dataclass +class SchedulerOutputs: + """The scheduling decision made from a scheduler.""" + # Scheduled sequence groups. + scheduled_seq_groups: Iterable[ScheduledSequenceGroup] + # Number of prefill groups scheduled. + num_prefill_groups: int + # Total number of batched tokens. + num_batched_tokens: int + # Blocks to swap in. Dict of CPU -> GPU block number. + blocks_to_swap_in: Dict[int, int] + # Blocks to swap out. Dict of GPU -> CPU block number. + blocks_to_swap_out: Dict[int, int] + # Blocks to copy. Source to a list of dest blocks. + blocks_to_copy: Dict[int, List[int]] + # Sequence groups that are going to be ignored. + ignored_seq_groups: List[SequenceGroup] + # The number of slots for lookahead decoding. + num_lookahead_slots: int + # The number of requests in the running queue + running_queue_size: int + + def __post_init__(self): + # Swap in and swap out should never happen at the same time. + assert not (self.blocks_to_swap_in and self.blocks_to_swap_out) + + self.num_loras: int = len(self.lora_requests) + if self.num_loras > 0: + self._sort_by_lora_ids() + + def is_empty(self) -> bool: + # NOTE: We do not consider the ignored sequence groups. + return (not self.scheduled_seq_groups and not self.blocks_to_swap_in + and not self.blocks_to_swap_out and not self.blocks_to_copy) + + def _sort_by_lora_ids(self): + self.scheduled_seq_groups = sorted( + self.scheduled_seq_groups, + key=lambda g: (g.seq_group.lora_int_id, g.seq_group.request_id)) + + @property + def lora_requests(self) -> Set[LoRARequest]: + return { + g.seq_group.lora_request + for g in self.scheduled_seq_groups + if g.seq_group.lora_request is not None + } + + +@dataclass +class SchedulerRunningOutputs: + """The requests that are scheduled from a running queue. + + Could contain prefill (prefill that's chunked) or decodes. If there's not + enough memory, it can be preempted (for recompute) or swapped out. + """ + # Selected sequences that are running and in a decoding phase. + decode_seq_groups: List[SequenceGroup] + # Selected sequences that are running and in a prefill phase. + # I.e., it means the prefill has been chunked. + prefill_seq_groups: List[SequenceGroup] + # The preempted sequences. + preempted: List[SequenceGroup] + # Sequences that are swapped out. + swapped_out: List[SequenceGroup] + # The blocks to swap out. + blocks_to_swap_out: Dict[int, int] + # The blocks to copy. + blocks_to_copy: Dict[int, List[int]] + # The number of slots for lookahead decoding. + num_lookahead_slots: int + + @classmethod + def create_empty(cls) -> "SchedulerRunningOutputs": + return SchedulerRunningOutputs( + decode_seq_groups=[], + prefill_seq_groups=[], + preempted=[], + swapped_out=[], + blocks_to_swap_out={}, + blocks_to_copy={}, + num_lookahead_slots=0, + ) + + +@dataclass +class SchedulerSwappedInOutputs: + """The requests that are scheduled from a swap queue. + + Could contain prefill (prefill that's chunked) or decodes. + """ + # Selected sequences that are going to be swapped in and is in a + # decoding phase. + decode_seq_groups: List[SequenceGroup] + # Selected sequences that are going to be swapped in and in a prefill + # phase. I.e., it means the prefill has been chunked. + prefill_seq_groups: List[SequenceGroup] + # The blocks to swap in. + blocks_to_swap_in: Dict[int, int] + # The blocks to copy. + blocks_to_copy: Dict[int, List[int]] + # The number of slots for lookahead decoding. + num_lookahead_slots: int + # Infeasible sequence groups. + infeasible_seq_groups: List[SequenceGroup] + + @classmethod + def create_empty(cls) -> "SchedulerSwappedInOutputs": + return SchedulerSwappedInOutputs( + decode_seq_groups=[], + prefill_seq_groups=[], + blocks_to_swap_in={}, + blocks_to_copy={}, + num_lookahead_slots=0, + infeasible_seq_groups=[], + ) + + +@dataclass +class SchedulerPrefillOutputs: + """The requests that are scheduled from a waiting queue. + + Could contain a fresh prefill requests or preempted requests that need + to be recomputed from scratch. + """ + # Selected sequences for prefill. + seq_groups: List[SequenceGroup] + # Ignored sequence groups. + ignored_seq_groups: List[SequenceGroup] + num_lookahead_slots: int + + @classmethod + def create_empty(cls) -> "SchedulerPrefillOutputs": + return SchedulerPrefillOutputs( + seq_groups=[], + ignored_seq_groups=[], + num_lookahead_slots=0, + ) + + +class Scheduler: + + def __init__( + self, + scheduler_config: SchedulerConfig, + cache_config: CacheConfig, + lora_config: Optional[LoRAConfig], + ) -> None: + self.scheduler_config = scheduler_config + self.cache_config = cache_config + # Note for LoRA scheduling: the current policy is extremely + # simple and NOT fair. It can lead to starvation of some + # LoRAs. This should be improved in the future. + self.lora_config = lora_config + + if self.scheduler_config.chunked_prefill_enabled: + self.prompt_limit = self.scheduler_config.max_model_len + else: + self.prompt_limit = min( + self.scheduler_config.max_model_len, + self.scheduler_config.max_num_batched_tokens) + + BlockSpaceManagerImpl = BlockSpaceManager.get_block_space_manager_class( + version="v2" if self.scheduler_config. + use_v2_block_manager else "v1") + + # Create the block space manager. + self.block_manager = BlockSpaceManagerImpl( + block_size=self.cache_config.block_size, + num_gpu_blocks=self.cache_config.num_gpu_blocks, + num_cpu_blocks=self.cache_config.num_cpu_blocks, + sliding_window=self.cache_config.sliding_window, + enable_caching=self.cache_config.enable_prefix_caching) + + # Sequence groups in the WAITING state. + # Contain new prefill or preempted requests. + self.waiting: Deque[SequenceGroup] = deque() + # Sequence groups in the RUNNING state. + # Contain decode requests. + self.running: Deque[SequenceGroup] = deque() + # Sequence groups in the SWAPPED state. + # Contain decode requests that are swapped out. + self.swapped: Deque[SequenceGroup] = deque() + + # Time at previous scheduling step + self.prev_time = 0.0 + # Did we schedule a prompt at previous step? + self.prev_prompt = False + # Latency of the last prompt step + self.last_prompt_latency = 0.0 + + # The following field is test-only. It is used to inject artificial + # preemption. + self.enable_artificial_preemption = ENABLE_ARTIFICIAL_PREEMPT + self.artificial_preempt_cnt = (ARTIFICIAL_PREEMPTION_MAX_CNT + if self.enable_artificial_preemption + else 0) + + @property + def lora_enabled(self) -> bool: + return bool(self.lora_config) + + @property + def num_decoding_tokens_per_seq(self) -> int: + """The number of new tokens.""" + return 1 + + def add_seq_group(self, seq_group: SequenceGroup) -> None: + # Add sequence groups to the waiting queue. + self.waiting.append(seq_group) + + def abort_seq_group(self, request_id: Union[str, Iterable[str]]) -> None: + """Aborts a sequence group with the given ID. + + Check if the sequence group with the given ID + is present in any of the state queue. + If present, remove the sequence group from the state queue. + Also, if any of the sequences in the sequence group is not finished, + free the sequence with status `FINISHED_ABORTED`. + Otherwise, do nothing. + + Args: + request_id: The ID(s) of the sequence group to abort. + """ + if isinstance(request_id, str): + request_id = (request_id, ) + request_ids = set(request_id) + for state_queue in [self.waiting, self.running, self.swapped]: + aborted_groups: List[SequenceGroup] = [] + for seq_group in state_queue: + if not request_ids: + # Using 'break' here may add two extra iterations, + # but is acceptable to reduce complexity. + break + if seq_group.request_id in request_ids: + # Appending aborted group into pending list. + aborted_groups.append(seq_group) + request_ids.remove(seq_group.request_id) + for aborted_group in aborted_groups: + # Remove the sequence group from the state queue. + state_queue.remove(aborted_group) + for seq in aborted_group.get_seqs(): + if seq.is_finished(): + continue + seq.status = SequenceStatus.FINISHED_ABORTED + self.free_seq(seq) + + def has_unfinished_seqs(self) -> bool: + return len(self.waiting) != 0 or len(self.running) != 0 or len( + self.swapped) != 0 + + def get_num_unfinished_seq_groups(self) -> int: + return len(self.waiting) + len(self.running) + len(self.swapped) + + def _schedule_running( + self, + running_queue: deque, + budget: SchedulingBudget, + curr_loras: Optional[Set[int]], + policy: Policy, + enable_chunking: bool = False, + ) -> Tuple[deque, SchedulerRunningOutputs]: + """Schedule sequence groups that are running. + + Running queue should include decode and chunked prefill requests. + + Args: + running_queue: The queue that contains running requests (i.e., + decodes). The given arguments are NOT in-place modified. + budget: The scheduling budget. The argument is in-place updated + when any decodes are preempted. + curr_loras: Currently batched lora request ids. The argument is + in-place updated when any decodes are preempted. + policy: The sorting policy to sort running_queue. + enable_chunking: If True, seq group can be chunked and only a + chunked number of tokens are scheduled if + `budget.num_batched_tokens` has not enough capacity to schedule + all tokens. + + Returns: + A tuple of remaining running queue (should be always 0) after + scheduling and SchedulerRunningOutputs. + """ + # Blocks that need to be swapped or copied before model execution. + blocks_to_swap_out: Dict[int, int] = {} + blocks_to_copy: Dict[int, List[int]] = {} + + decode_seq_groups: List[ScheduledSequenceGroup] = [] + prefill_seq_groups: List[ScheduledSequenceGroup] = [] + preempted: List[SequenceGroup] = [] + swapped_out: List[SequenceGroup] = [] + + # NOTE(woosuk): Preemption happens only when there is no available slot + # to keep all the sequence groups in the RUNNING state. + # In this case, the policy is responsible for deciding which sequence + # groups to preempt. + now = time.time() + running_queue = policy.sort_by_priority(now, running_queue) + while running_queue: + seq_group = running_queue[0] + num_running_tokens = self._get_num_new_tokens( + seq_group, SequenceStatus.RUNNING, enable_chunking, budget) + + if num_running_tokens == 0: + break + + running_queue.popleft() + while not self._can_append_slots(seq_group): + budget.subtract_num_batched_tokens(seq_group.request_id, + num_running_tokens) + num_running_seqs = seq_group.get_max_num_running_seqs() + budget.subtract_num_seqs(seq_group.request_id, + num_running_seqs) + if curr_loras is not None and seq_group.lora_int_id > 0: + curr_loras.remove(seq_group.lora_int_id) + + if running_queue: + # Preempt the lowest-priority sequence groups. + victim_seq_group = running_queue.pop() + preempted_mode = self._preempt(victim_seq_group, + blocks_to_swap_out) + if preempted_mode == PreemptionMode.RECOMPUTE: + preempted.append(victim_seq_group) + else: + swapped_out.append(victim_seq_group) + else: + # No other sequence groups can be preempted. + # Preempt the current sequence group. + preempted_mode = self._preempt(seq_group, + blocks_to_swap_out) + if preempted_mode == PreemptionMode.RECOMPUTE: + preempted.append(seq_group) + else: + swapped_out.append(seq_group) + break + else: + self._append_slots(seq_group, blocks_to_copy) + is_prefill = seq_group.is_prefill() + if is_prefill: + prefill_seq_groups.append( + ScheduledSequenceGroup( + seq_group=seq_group, + token_chunk_size=num_running_tokens)) + else: + decode_seq_groups.append( + ScheduledSequenceGroup(seq_group=seq_group, + token_chunk_size=1)) + budget.add_num_batched_tokens(seq_group.request_id, + num_running_tokens) + # OPTIMIZATION: Note that get_max_num_running_seqs is + # expensive. For the default scheduling chase where + # enable_chunking is False, num_seqs are updated before running + # this method, so we don't have to update it again here. + if enable_chunking: + num_running_seqs = seq_group.get_max_num_running_seqs() + budget.add_num_seqs(seq_group.request_id, num_running_seqs) + if curr_loras is not None and seq_group.lora_int_id > 0: + curr_loras.add(seq_group.lora_int_id) + + return running_queue, SchedulerRunningOutputs( + decode_seq_groups=decode_seq_groups, + prefill_seq_groups=prefill_seq_groups, + preempted=preempted, + swapped_out=swapped_out, + blocks_to_swap_out=blocks_to_swap_out, + blocks_to_copy=blocks_to_copy, + num_lookahead_slots=self._get_num_lookahead_slots( + is_prefill=False)) + + def _schedule_swapped( + self, + swapped_queue: deque, + budget: SchedulingBudget, + curr_loras: Optional[Set[int]], + policy: Policy, + enable_chunking: bool = False, + ) -> Tuple[deque, SchedulerSwappedInOutputs]: + """Schedule sequence groups that are swapped out. + + It schedules swapped requests as long as it fits `budget` and + curr_loras <= max_lora from the scheduling config. The input arguments + `budget` and `curr_loras` are updated based on scheduled seq_groups. + + Args: + swapped_queue: The queue that contains swapped out requests. + The given arguments are NOT in-place modified. + budget: The scheduling budget. The argument is in-place updated + when any requests are swapped in. + curr_loras: Currently batched lora request ids. The argument is + in-place updated when any requests are swapped in. + policy: The sorting policy to sort swapped_queue. + enable_chunking: If True, seq group can be chunked and only a + chunked number of tokens are scheduled if + `budget.num_batched_tokens` has not enough capacity to schedule + all tokens. + + Returns: + A tuple of remaining swapped_queue after scheduling and + SchedulerSwappedInOutputs. + """ + # Blocks that need to be swapped or copied before model execution. + blocks_to_swap_in: Dict[int, int] = {} + blocks_to_copy: Dict[int, List[int]] = {} + decode_seq_groups: List[ScheduledSequenceGroup] = [] + prefill_seq_groups: List[ScheduledSequenceGroup] = [] + now = time.time() + swapped_queue = policy.sort_by_priority(now, swapped_queue) + infeasible_seq_groups: List[SequenceGroup] = [] + + leftover_swapped: Deque[SequenceGroup] = deque() + while swapped_queue: + seq_group = swapped_queue[0] + + # If the sequence group cannot be swapped in, stop. + alloc_status = self.block_manager.can_swap_in(seq_group) + if alloc_status == AllocStatus.LATER: + break + elif alloc_status == AllocStatus.NEVER: + logger.warning( + "Failing the request %s because there's not enough kv " + "cache blocks to run the entire sequence.", + seq_group.request_id) + for seq in seq_group.get_seqs(): + seq.status = SequenceStatus.FINISHED_IGNORED + infeasible_seq_groups.append(seq_group) + swapped_queue.popleft() + continue + + lora_int_id = 0 + if self.lora_enabled: + lora_int_id = seq_group.lora_int_id + assert curr_loras is not None + assert self.lora_config is not None + if (lora_int_id > 0 and (lora_int_id not in curr_loras) + and len(curr_loras) >= self.lora_config.max_loras): + # We don't have a space for another LoRA, so + # we ignore this request for now. + leftover_swapped.appendleft(seq_group) + swapped_queue.popleft() + continue + + # The total number of sequences in the RUNNING state should not + # exceed the maximum number of sequences. + num_new_seqs = seq_group.get_max_num_running_seqs() + num_new_tokens = self._get_num_new_tokens(seq_group, + SequenceStatus.SWAPPED, + enable_chunking, budget) + + if (num_new_tokens == 0 + or not budget.can_schedule(num_new_tokens=num_new_tokens, + num_new_seqs=num_new_seqs)): + break + + if lora_int_id > 0 and curr_loras is not None: + curr_loras.add(lora_int_id) + swapped_queue.popleft() + self._swap_in(seq_group, blocks_to_swap_in) + self._append_slots(seq_group, blocks_to_copy) + is_prefill = seq_group.is_prefill() + if is_prefill: + prefill_seq_groups.append( + ScheduledSequenceGroup(seq_group, + token_chunk_size=num_new_tokens)) + else: + decode_seq_groups.append( + ScheduledSequenceGroup(seq_group, token_chunk_size=1)) + budget.add_num_batched_tokens(seq_group.request_id, num_new_tokens) + budget.add_num_seqs(seq_group.request_id, num_new_seqs) + + swapped_queue.extendleft(leftover_swapped) + + return swapped_queue, SchedulerSwappedInOutputs( + decode_seq_groups=decode_seq_groups, + prefill_seq_groups=prefill_seq_groups, + blocks_to_swap_in=blocks_to_swap_in, + blocks_to_copy=blocks_to_copy, + num_lookahead_slots=self._get_num_lookahead_slots( + is_prefill=False), + infeasible_seq_groups=infeasible_seq_groups, + ) + + def _schedule_prefills( + self, + waiting_queue: deque, + budget: SchedulingBudget, + curr_loras: Optional[Set[int]], + enable_chunking: bool = False, + ) -> Tuple[deque, SchedulerPrefillOutputs]: + """Schedule sequence groups that are in prefill stage. + + Note that the current scheduler treats PREEMPTED_FOR_RECOMPUTE + as a new prefill (that starts from beginning -> most recently generated + tokens). + + It schedules waiting requests as long as it fits `budget` and + curr_loras <= max_lora from the scheduling config. The input arguments + `budget` and `curr_loras` are updated based on scheduled seq_groups. + + Args: + waiting_queue: The queue that contains prefill requests. + The given arguments are NOT in-place modified. + budget: The scheduling budget. The argument is in-place updated + when any requests are scheduled. + curr_loras: Currently batched lora request ids. The argument is + in-place updated when any requests are scheduled. + enable_chunking: If True, seq group can be chunked and only a + chunked number of tokens are scheduled if + `budget.num_batched_tokens` has not enough capacity to schedule + all tokens. + + Returns: + A tuple of remaining waiting_queue after scheduling and + SchedulerSwappedInOutputs. + """ + ignored_seq_groups: List[SequenceGroup] = [] + seq_groups: List[SequenceGroup] = [] + # We don't sort waiting queue because we assume it is sorted. + # Copy the queue so that the input queue is not modified. + waiting_queue = deque([s for s in waiting_queue]) + + leftover_waiting_sequences: Deque[SequenceGroup] = deque() + while self._passed_delay(time.time()) and waiting_queue: + seq_group = waiting_queue[0] + + waiting_seqs = seq_group.get_seqs(status=SequenceStatus.WAITING) + assert len(waiting_seqs) == 1, ( + "Waiting sequence group should have only one prompt " + "sequence.") + num_new_tokens = self._get_num_new_tokens(seq_group, + SequenceStatus.WAITING, + enable_chunking, budget) + if not enable_chunking: + num_prompt_tokens = waiting_seqs[0].get_len() + assert num_new_tokens == num_prompt_tokens + + if num_new_tokens > self.prompt_limit: + logger.warning( + "Input prompt (%d tokens) is too long" + " and exceeds limit of %d", num_new_tokens, + self.prompt_limit) + for seq in waiting_seqs: + seq.status = SequenceStatus.FINISHED_IGNORED + ignored_seq_groups.append(seq_group) + waiting_queue.popleft() + continue + + # If the sequence group cannot be allocated, stop. + can_allocate = self.block_manager.can_allocate(seq_group) + if can_allocate == AllocStatus.LATER: + break + elif can_allocate == AllocStatus.NEVER: + logger.warning( + "Input prompt (%d tokens) is too long" + " and exceeds the capacity of block_manager", + num_new_tokens) + for seq in waiting_seqs: + seq.status = SequenceStatus.FINISHED_IGNORED + ignored_seq_groups.append(seq_group) + waiting_queue.popleft() + continue + + lora_int_id = 0 + if self.lora_enabled: + lora_int_id = seq_group.lora_int_id + assert curr_loras is not None + assert self.lora_config is not None + if (self.lora_enabled and lora_int_id > 0 + and lora_int_id not in curr_loras + and len(curr_loras) >= self.lora_config.max_loras): + # We don't have a space for another LoRA, so + # we ignore this request for now. + leftover_waiting_sequences.appendleft(seq_group) + waiting_queue.popleft() + continue + + num_new_seqs = seq_group.get_max_num_running_seqs() + if (num_new_tokens == 0 + or not budget.can_schedule(num_new_tokens=num_new_tokens, + num_new_seqs=num_new_seqs)): + break + + # Can schedule this request. + if curr_loras is not None and lora_int_id > 0: + curr_loras.add(lora_int_id) + waiting_queue.popleft() + self._allocate_and_set_running(seq_group) + seq_groups.append( + ScheduledSequenceGroup(seq_group=seq_group, + token_chunk_size=num_new_tokens)) + budget.add_num_batched_tokens(seq_group.request_id, num_new_tokens) + budget.add_num_seqs(seq_group.request_id, num_new_seqs) + + # Queue requests that couldn't be scheduled. + waiting_queue.extendleft(leftover_waiting_sequences) + if len(seq_groups) > 0: + self.prev_prompt = True + + return waiting_queue, SchedulerPrefillOutputs( + seq_groups=seq_groups, + ignored_seq_groups=ignored_seq_groups, + num_lookahead_slots=self._get_num_lookahead_slots(is_prefill=True)) + + def _schedule_default(self) -> SchedulerOutputs: + """Schedule queued requests. + + The current policy is designed to optimize the throughput. First, + it batches as many prefill requests as possible. And it schedules + decodes. If there's a pressure on GPU memory, decode requests can + be swapped or preempted. + """ + # Include running requests to the budget. + budget = SchedulingBudget( + token_budget=self.scheduler_config.max_num_batched_tokens, + max_num_seqs=self.scheduler_config.max_num_seqs, + ) + # Make sure we include num running seqs before scheduling prefill, + # so that we don't schedule beyond max_num_seqs for prefill. + for seq_group in self.running: + budget.add_num_seqs(seq_group.request_id, + seq_group.get_max_num_running_seqs()) + curr_loras = set( + seq_group.lora_int_id + for seq_group in self.running) if self.lora_enabled else None + + remaining_waiting, prefills = (self.waiting, + SchedulerPrefillOutputs.create_empty()) + remaining_running, running_scheduled = ( + self.running, SchedulerRunningOutputs.create_empty()) + remaining_swapped, swapped_in = ( + self.swapped, SchedulerSwappedInOutputs.create_empty()) + + # If any requests are swapped, prioritized swapped requests. + if not self.swapped: + remaining_waiting, prefills = self._schedule_prefills( + self.waiting, budget, curr_loras, enable_chunking=False) + + fcfs_policy = PolicyFactory.get_policy(policy_name="fcfs") + # Don't schedule decodes if prefills are scheduled. + # NOTE: If `_schedule_prefills` doesn't enable chunking, self.running + # only contains decode requests, not chunked prefills. + if len(prefills.seq_groups) == 0: + remaining_running, running_scheduled = self._schedule_running( + self.running, + budget, + curr_loras, + fcfs_policy, + enable_chunking=False) + + # If any sequence group is preempted, do not swap in any sequence + # group. because it means there's no slot for new running requests. + if len(running_scheduled.preempted) + len( + running_scheduled.swapped_out) == 0: + remaining_swapped, swapped_in = self._schedule_swapped( + self.swapped, budget, curr_loras, fcfs_policy) + + assert (budget.num_batched_tokens <= + self.scheduler_config.max_num_batched_tokens) + assert budget.num_curr_seqs <= self.scheduler_config.max_num_seqs + + # Update waiting requests. + self.waiting = remaining_waiting + self.waiting.extendleft(running_scheduled.preempted) + # Update new running requests. + self.running = remaining_running + self.running.extend([s.seq_group for s in prefills.seq_groups]) + self.running.extend( + [s.seq_group for s in running_scheduled.decode_seq_groups]) + self.running.extend( + [s.seq_group for s in swapped_in.decode_seq_groups]) + # Update swapped requests. + self.swapped = remaining_swapped + self.swapped.extend(running_scheduled.swapped_out) + + # There should be no prefill from running queue because this policy + # doesn't allow chunked prefills. + assert len(running_scheduled.prefill_seq_groups) == 0 + assert len(swapped_in.prefill_seq_groups) == 0 + return SchedulerOutputs( + scheduled_seq_groups=(prefills.seq_groups + + running_scheduled.decode_seq_groups + + swapped_in.decode_seq_groups), + num_prefill_groups=len(prefills.seq_groups), + num_batched_tokens=budget.num_batched_tokens, + blocks_to_swap_in=swapped_in.blocks_to_swap_in, + blocks_to_swap_out=running_scheduled.blocks_to_swap_out, + blocks_to_copy=merge_dicts(running_scheduled.blocks_to_copy, + swapped_in.blocks_to_copy), + ignored_seq_groups=prefills.ignored_seq_groups + + swapped_in.infeasible_seq_groups, + num_lookahead_slots=running_scheduled.num_lookahead_slots, + running_queue_size=len(self.running), + ) + + def _schedule_chunked_prefill(self): + """Schedule queued requests. + + Chunked prefill allows to chunk prefill requests, batch them together + with decode requests. This policy 1. schedule as many decoding requests + as possible. 2. schedule chunked prefill requests that are not + finished. 3. schedule swapped request. 4. schedule new prefill + requests. + + The policy can sustain the high GPU utilization because it can put + prefill and decodes requests to the same batch, while it improves + inter token latency because decodes requests don't need to blocked + by prefill requests. + """ + budget = SchedulingBudget( + token_budget=self.scheduler_config.max_num_batched_tokens, + max_num_seqs=self.scheduler_config.max_num_seqs, + ) + curr_loras: Set[int] = set() + + remaining_waiting, prefills = (self.waiting, + SchedulerPrefillOutputs.create_empty()) + remaining_running, running_scheduled = ( + self.running, SchedulerRunningOutputs.create_empty()) + remaining_swapped, swapped_in = ( + self.swapped, SchedulerSwappedInOutputs.create_empty()) + + # Decoding should be always scheduled first by fcfs. + fcfs_policy = PolicyFactory.get_policy(policy_name="fcfs") + remaining_running, running_scheduled = self._schedule_running( + self.running, + budget, + curr_loras, + fcfs_policy, + enable_chunking=True) + + # Schedule swapped out requests. + # If preemption happens, it means we don't have space for swap-in. + if len(running_scheduled.preempted) + len( + running_scheduled.swapped_out) == 0: + remaining_swapped, swapped_in = self._schedule_swapped( + self.swapped, budget, curr_loras, fcfs_policy) + + # Schedule new prefills. + remaining_waiting, prefills = self._schedule_prefills( + self.waiting, budget, curr_loras, enable_chunking=True) + + assert (budget.num_batched_tokens <= + self.scheduler_config.max_num_batched_tokens) + assert budget.num_curr_seqs <= self.scheduler_config.max_num_seqs + + # Update waiting requests. + self.waiting = remaining_waiting + self.waiting.extendleft(running_scheduled.preempted) + # Update new running requests. + self.running = remaining_running + self.running.extend([s.seq_group for s in prefills.seq_groups]) + self.running.extend( + [s.seq_group for s in running_scheduled.decode_seq_groups]) + self.running.extend( + [s.seq_group for s in running_scheduled.prefill_seq_groups]) + self.running.extend( + [s.seq_group for s in swapped_in.decode_seq_groups]) + self.running.extend( + [s.seq_group for s in swapped_in.prefill_seq_groups]) + # Update swapped requests. + self.swapped = remaining_swapped + self.swapped.extend(running_scheduled.swapped_out) + return SchedulerOutputs( + scheduled_seq_groups=(prefills.seq_groups + + running_scheduled.prefill_seq_groups + + swapped_in.prefill_seq_groups + + running_scheduled.decode_seq_groups + + swapped_in.decode_seq_groups), + num_prefill_groups=(len(prefills.seq_groups) + + len(swapped_in.prefill_seq_groups) + + len(running_scheduled.prefill_seq_groups)), + num_batched_tokens=budget.num_batched_tokens, + blocks_to_swap_in=swapped_in.blocks_to_swap_in, + blocks_to_swap_out=running_scheduled.blocks_to_swap_out, + blocks_to_copy=merge_dicts(running_scheduled.blocks_to_copy, + swapped_in.blocks_to_copy), + ignored_seq_groups=prefills.ignored_seq_groups, + num_lookahead_slots=running_scheduled.num_lookahead_slots, + running_queue_size=len(self.running), + ) + + def _schedule(self) -> SchedulerOutputs: + """Schedule queued requests.""" + if self.scheduler_config.chunked_prefill_enabled: + return self._schedule_chunked_prefill() + else: + return self._schedule_default() + + def _can_append_slots(self, seq_group: SequenceGroup) -> bool: + """Determine whether or not we have enough space in the KV cache to + continue generation of the sequence group. + """ + # It is True only for testing case to trigger artificial preemption. + if (self.enable_artificial_preemption + and random.uniform(0, 1) < ARTIFICIAL_PREEMPTION_PROB + and self.artificial_preempt_cnt > 0): + self.artificial_preempt_cnt -= 1 + return False + + # Appending slots only occurs in decoding. + is_prefill = False + + return self.block_manager.can_append_slots( + seq_group=seq_group, + num_lookahead_slots=self._get_num_lookahead_slots(is_prefill), + ) + + def schedule(self) -> Tuple[List[SequenceGroupMetadata], SchedulerOutputs]: + # Schedule sequence groups. + # This function call changes the internal states of the scheduler + # such as self.running, self.swapped, and self.waiting. + scheduler_outputs = self._schedule() + now = time.time() + + # Create input data structures. + seq_group_metadata_list: List[SequenceGroupMetadata] = [] + for i, scheduled_seq_group in enumerate( + scheduler_outputs.scheduled_seq_groups): + seq_group = scheduled_seq_group.seq_group + token_chunk_size = scheduled_seq_group.token_chunk_size + seq_group.maybe_set_first_scheduled_time(now) + + # seq_id -> SequenceData + seq_data: Dict[int, SequenceData] = {} + # seq_id -> physical block numbers + block_tables: Dict[int, List[int]] = {} + + for seq in seq_group.get_seqs(status=SequenceStatus.RUNNING): + seq_id = seq.seq_id + seq_data[seq_id] = seq.data + block_tables[seq_id] = self.block_manager.get_block_table(seq) + self.block_manager.access_all_blocks_in_seq(seq, now) + + common_computed_block_nums = ( + self.block_manager.get_common_computed_block_ids( + seq_group.get_seqs(status=SequenceStatus.RUNNING))) + + do_sample = True + if seq_group.is_prefill(): + seqs = seq_group.get_seqs() + # Prefill has only 1 sequence. + assert len(seqs) == 1 + # In the next iteration, all prompt tokens are not computed. + # It means the prefill is chunked, and we don't need sampling. + # NOTE: We use get_len instead of get_prompt_len because when + # a sequence is preempted, prefill includes previous generated + # output tokens. + if (token_chunk_size + seqs[0].data.get_num_computed_tokens() < + seqs[0].data.get_len()): + do_sample = False + + # It assumes the scheduled_seq_groups is ordered by + # prefill < decoding. + is_prompt = seq_group.is_prefill() + seq_group_metadata = SequenceGroupMetadata( + request_id=seq_group.request_id, + is_prompt=is_prompt, + seq_data=seq_data, + sampling_params=seq_group.sampling_params, + block_tables=block_tables, + do_sample=do_sample, + token_chunk_size=token_chunk_size, + lora_request=seq_group.lora_request, + computed_block_nums=common_computed_block_nums, + state=seq_group.state, + # `multi_modal_data` will only be present for the 1st comm + # between engine and worker. + # the subsequent comms can still use delta, but + # `multi_modal_data` will be None. + multi_modal_data=seq_group.multi_modal_data + if scheduler_outputs.num_prefill_groups > 0 else None, + ) + seq_group_metadata_list.append(seq_group_metadata) + + # Now that the batch has been created, we can assume all blocks in the + # batch will have been computed before the next scheduling invocation. + # This is because the engine assumes that a failure in model execution + # will crash the vLLM instance / will not retry. + for scheduled_seq_group in scheduler_outputs.scheduled_seq_groups: + self.block_manager.mark_blocks_as_computed( + scheduled_seq_group.seq_group) + + return seq_group_metadata_list, scheduler_outputs + + def fork_seq(self, parent_seq: Sequence, child_seq: Sequence) -> None: + self.block_manager.fork(parent_seq, child_seq) + + def free_seq(self, seq: Sequence) -> None: + """Free a sequence from a block table.""" + self.block_manager.free(seq) + + def free_finished_seq_groups(self) -> None: + self.running = deque(seq_group for seq_group in self.running + if not seq_group.is_finished()) + + def _allocate_and_set_running(self, seq_group: SequenceGroup) -> None: + self.block_manager.allocate(seq_group) + for seq in seq_group.get_seqs(status=SequenceStatus.WAITING): + seq.status = SequenceStatus.RUNNING + + def _append_slots( + self, + seq_group: SequenceGroup, + blocks_to_copy: Dict[int, List[int]], + ) -> None: + """Appends new slots to the sequences in the given sequence group. + + Args: + seq_group (SequenceGroup): The sequence group containing the + sequences to append slots to. + blocks_to_copy (Dict[int, List[int]]): A dictionary mapping source + block indices to lists of destination block indices. This + dictionary is updated with the new source and destination block + indices for the appended slots. + """ + num_lookahead_slots = self._get_num_lookahead_slots(is_prefill=False) + + for seq in seq_group.get_seqs(status=SequenceStatus.RUNNING): + cows = self.block_manager.append_slots(seq, num_lookahead_slots) + + for src, dests in cows.items(): + if src not in blocks_to_copy: + blocks_to_copy[src] = [] + blocks_to_copy[src].extend(dests) + + def _preempt( + self, + seq_group: SequenceGroup, + blocks_to_swap_out: Dict[int, int], + preemption_mode: Optional[PreemptionMode] = None, + ) -> PreemptionMode: + # If preemption mode is not specified, we determine the mode as follows: + # We use recomputation by default since it incurs lower overhead than + # swapping. However, when the sequence group has multiple sequences + # (e.g., beam search), recomputation is not currently supported. In + # such a case, we use swapping instead. + # FIXME(woosuk): This makes our scheduling policy a bit bizarre. + # As swapped sequences are prioritized over waiting sequences, + # sequence groups with multiple sequences are implicitly prioritized + # over sequence groups with a single sequence. + # TODO(woosuk): Support recomputation for sequence groups with multiple + # sequences. This may require a more sophisticated CUDA kernel. + if preemption_mode is None: + if seq_group.get_max_num_running_seqs() == 1: + preemption_mode = PreemptionMode.RECOMPUTE + else: + preemption_mode = PreemptionMode.SWAP + if preemption_mode == PreemptionMode.RECOMPUTE: + self._preempt_by_recompute(seq_group) + elif preemption_mode == PreemptionMode.SWAP: + self._preempt_by_swap(seq_group, blocks_to_swap_out) + else: + raise AssertionError("Invalid preemption mode.") + return preemption_mode + + def _preempt_by_recompute( + self, + seq_group: SequenceGroup, + ) -> None: + seqs = seq_group.get_seqs(status=SequenceStatus.RUNNING) + assert len(seqs) == 1 + for seq in seqs: + seq.status = SequenceStatus.WAITING + self.free_seq(seq) + seq.reset_state_for_recompute() + + def _preempt_by_swap( + self, + seq_group: SequenceGroup, + blocks_to_swap_out: Dict[int, int], + ) -> None: + self._swap_out(seq_group, blocks_to_swap_out) + + def _swap_in( + self, + seq_group: SequenceGroup, + blocks_to_swap_in: Dict[int, int], + ) -> None: + mapping = self.block_manager.swap_in(seq_group) + blocks_to_swap_in.update(mapping) + for seq in seq_group.get_seqs(status=SequenceStatus.SWAPPED): + seq.status = SequenceStatus.RUNNING + + def _swap_out( + self, + seq_group: SequenceGroup, + blocks_to_swap_out: Dict[int, int], + ) -> None: + if not self.block_manager.can_swap_out(seq_group): + # FIXME(woosuk): Abort the sequence group instead of aborting the + # entire engine. + raise RuntimeError( + "Aborted due to the lack of CPU swap space. Please increase " + "the swap space to avoid this error.") + mapping = self.block_manager.swap_out(seq_group) + blocks_to_swap_out.update(mapping) + for seq in seq_group.get_seqs(status=SequenceStatus.RUNNING): + seq.status = SequenceStatus.SWAPPED + + def _passed_delay(self, now: float) -> bool: + if self.prev_prompt: + self.last_prompt_latency = now - self.prev_time + self.prev_time, self.prev_prompt = now, False + # Delay scheduling prompts to let waiting queue fill up + if self.scheduler_config.delay_factor > 0 and self.waiting: + earliest_arrival_time = min( + [e.metrics.arrival_time for e in self.waiting]) + passed_delay = ( + (now - earliest_arrival_time) > + (self.scheduler_config.delay_factor * self.last_prompt_latency) + or not self.running) + else: + passed_delay = True + return passed_delay + + def _get_num_lookahead_slots(self, is_prefill: bool) -> int: + """The number of slots to allocate per sequence per step, beyond known + token ids. Speculative decoding uses these slots to store KV activations + of tokens which may or may not be accepted. + + Speculative decoding does not yet support prefill, so we do not perform + lookahead allocation for prefill. + """ + if is_prefill: + return 0 + + return self.scheduler_config.num_lookahead_slots + + def _get_num_new_tokens(self, seq_group: SequenceGroup, + status: SequenceStatus, enable_chunking: bool, + budget: SchedulingBudget) -> int: + """Get the next new tokens to compute for a given sequence group + that's in a given `status`. + + The API could chunk the number of tokens to compute based on `budget` + if `enable_chunking` is True. If a sequence group has multiple + sequences (e.g., running beam search), it means it is in decoding + phase, so chunking doesn't happen. + + Returns 0 if the new token cannot be computed due to token budget. + """ + num_new_tokens = 0 + seqs = seq_group.get_seqs(status=status) + for seq in seqs: + num_new_tokens += seq.get_num_new_tokens() + assert num_new_tokens > 0 + # Chunk if a running request cannot fit in. + # If number of seq > 1, it means it is doing beam search in a + # decode phase. Do not chunk in that case. + if enable_chunking and len(seqs) == 1: + num_new_tokens = min(num_new_tokens, + budget.remaining_token_budget()) + return num_new_tokens diff --git a/vllm/distributed/__init__.py b/vllm/distributed/__init__.py new file mode 100644 index 0000000..db325cf --- /dev/null +++ b/vllm/distributed/__init__.py @@ -0,0 +1,3 @@ +from .communication_op import * +from .parallel_state import * +from .utils import * diff --git a/vllm/distributed/communication_op.py b/vllm/distributed/communication_op.py new file mode 100644 index 0000000..69db75b --- /dev/null +++ b/vllm/distributed/communication_op.py @@ -0,0 +1,237 @@ +from collections import namedtuple +from typing import Any, Dict, List, Optional, Tuple, Union + +import torch +from torch.distributed import ProcessGroup + +from .parallel_state import (get_cpu_world_group, + get_tensor_model_parallel_group, + get_tensor_model_parallel_rank, + get_tensor_model_parallel_world_size, + is_pynccl_enabled_for_all_reduce) + + +def tensor_model_parallel_all_reduce(input_: torch.Tensor) -> torch.Tensor: + """All-reduce the input tensor across model parallel group. + + NOTE: This operation will be applied in-place on the input tensor if + disable_custom_all_reduce is set to True. Otherwise, this operation may or + may not be applied in place depending on whether custom all reduce is + invoked for a particular tensor, which further depends on the tensor size + and GPU topology. + + TLDR: always assume this function modifies its input, but use the return + value as the output. + """ + from vllm.distributed.device_communicators import pymccl_utils + from vllm.distributed.device_communicators.custom_all_reduce import ( + custom_all_reduce) + + # Bypass the function if we are using only 1 GPU. + if get_tensor_model_parallel_world_size() == 1: + return input_ + out = custom_all_reduce(input_) + if out is not None: + return out + if is_pynccl_enabled_for_all_reduce(): + pymccl_utils.all_reduce(input_) + else: + torch.distributed.all_reduce(input_, + group=get_tensor_model_parallel_group()) + return input_ + + +def tensor_model_parallel_all_gather(input_: torch.Tensor, + dim: int = -1) -> torch.Tensor: + """All-gather the input tensor across model parallel group.""" + world_size = get_tensor_model_parallel_world_size() + # Bypass the function if we are using only 1 GPU. + if world_size == 1: + return input_ + assert -input_.dim() <= dim < input_.dim(), ( + f"Invalid dim ({dim}) for input tensor with shape {input_.size()}") + if dim < 0: + # Convert negative dim to positive. + dim += input_.dim() + input_size = input_.size() + # Allocate output tensor. + output_tensor = torch.empty((world_size, ) + input_size, + dtype=input_.dtype, + device=input_.device) + # All-gather. + torch.distributed.all_gather_into_tensor( + output_tensor, input_, group=get_tensor_model_parallel_group()) + # Reshape + output_tensor = output_tensor.movedim(0, dim) + output_tensor = output_tensor.reshape(input_size[:dim] + + (world_size * input_size[dim], ) + + input_size[dim + 1:]) + return output_tensor + + +def tensor_model_parallel_gather(input_: torch.Tensor, + dst: int = 0, + dim: int = -1) -> torch.Tensor: + """Gather the input tensor across model parallel group. + + NOTE: We assume that the input tensor is on the same device across + all the ranks. + """ + world_size = get_tensor_model_parallel_world_size() + # Bypass the function if we are using only 1 GPU. + if world_size == 1: + return input_ + assert -input_.dim() <= dim < input_.dim(), ( + f"Invalid dim ({dim}) for input tensor with shape {input_.size()}") + if dim < 0: + # Convert negative dim to positive. + dim += input_.dim() + # Allocate output tensor. + if get_tensor_model_parallel_rank() == dst: + gather_list = [torch.empty_like(input_) for _ in range(world_size)] + else: + gather_list = None + # Gather. + torch.distributed.gather(input_, + gather_list, + dst=dst, + group=get_tensor_model_parallel_group()) + if get_tensor_model_parallel_rank() == dst: + output_tensor = torch.cat(gather_list, dim=dim) + else: + output_tensor = None + return output_tensor + + +def broadcast(input_: torch.Tensor, + src: int = 0, + group: Optional[ProcessGroup] = None): + """Broadcast the input tensor.""" + group = group or torch.distributed.group.WORLD + ranks = torch.distributed.get_process_group_ranks(group) + assert src in ranks, f"Invalid src rank ({src})" + + # Bypass the function if we are using only 1 GPU. + world_size = torch.distributed.get_world_size(group=group) + if world_size == 1: + return input_ + # Broadcast. + torch.distributed.broadcast(input_, src=src, group=group) + return input_ + + +def broadcast_object_list(obj_list: List[Any], + src: int = 0, + group: Optional[ProcessGroup] = None): + """Broadcast the input object list.""" + group = group or torch.distributed.group.WORLD + ranks = torch.distributed.get_process_group_ranks(group) + assert src in ranks, f"Invalid src rank ({src})" + + # Bypass the function if we are using only 1 GPU. + world_size = torch.distributed.get_world_size(group=group) + if world_size == 1: + return obj_list + # Broadcast. + torch.distributed.broadcast_object_list(obj_list, src=src, group=group) + return obj_list + + +TensorMetadata = namedtuple("TensorMetadata", ["dtype", "size"]) + + +def _split_tensor_dict( + tensor_dict: Dict[Any, Union[torch.Tensor, Any]] +) -> Tuple[List[Tuple[str, Any]], List[torch.Tensor]]: + """Split the tensor dictionary into two parts: + 1. A list of (key, value) pairs. If the value is a tensor, it is replaced + by its metadata. + 2. A list of tensors. + """ + metadata_list = [] + tensor_list = [] + for key, value in tensor_dict.items(): + if isinstance(value, torch.Tensor): + # Note(youkaichao): currently this only supports broadcasting + # tensors on cuda. In the future, we can add device as a field in + # TensorMetadata to support broadcasting tensors on different + # devices. + assert value.is_musa, ( + f"Tensor {key}: {value} is not on musa. Currently we only " + f"support broadcasting tensors on musa.") + metadata_list.append((key, TensorMetadata(value.dtype, + value.size()))) + tensor_list.append(value) + else: + metadata_list.append((key, value)) + return metadata_list, tensor_list + + +def broadcast_tensor_dict( + tensor_dict: Optional[Dict[Any, Union[torch.Tensor, Any]]] = None, + src: int = 0, + group: Optional[ProcessGroup] = None, + metadata_group: Optional[ProcessGroup] = None +) -> Optional[Dict[Any, Union[torch.Tensor, Any]]]: + """Broadcast the input tensor dictionary. + `group` is used to broadcast the tensors, while `metadata_group` is used + to broadcast the metadata of the dict (e.g. dict structure, tensor sizes, + dtypes). + """ + group = group or torch.distributed.group.WORLD + metadata_group = metadata_group or get_cpu_world_group() + ranks = torch.distributed.get_process_group_ranks(group) + assert src in ranks, f"Invalid src rank ({src})" + + # Bypass the function if we are using only 1 GPU. + world_size = torch.distributed.get_world_size(group=group) + if world_size == 1: + return tensor_dict + + rank = torch.distributed.get_rank() + if rank == src: + metadata_list: List[Tuple[Any, Any]] = [] + assert isinstance( + tensor_dict, + dict), (f"Expecting a dictionary, got {type(tensor_dict)}") + metadata_list, tensor_list = _split_tensor_dict(tensor_dict) + # `metadata_list` lives in CPU memory. + # `broadcast_object_list` involves serialization and deserialization, + # all happening on CPU. Therefore, we can use the CPU group. + torch.distributed.broadcast_object_list([metadata_list], + src=src, + group=metadata_group) + async_handles = [] + for tensor in tensor_list: + async_handles.append( + torch.distributed.broadcast(tensor, + src=src, + group=group, + async_op=True)) + for async_handle in async_handles: + async_handle.wait() + + else: + recv_metadata_list = [None] + torch.distributed.broadcast_object_list(recv_metadata_list, + src=src, + group=metadata_group) + assert recv_metadata_list[0] is not None + tensor_dict = {} + async_handles = [] + for key, value in recv_metadata_list[0]: + if isinstance(value, TensorMetadata): + tensor = torch.empty(value.size, + dtype=value.dtype, + device="musa") + async_handle = torch.distributed.broadcast(tensor, + src=src, + async_op=True, + group=group) + async_handles.append(async_handle) + tensor_dict[key] = tensor + else: + tensor_dict[key] = value + for async_handle in async_handles: + async_handle.wait() + return tensor_dict diff --git a/vllm/distributed/device_communicators/__init__.py b/vllm/distributed/device_communicators/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/vllm/distributed/device_communicators/custom_all_reduce.py b/vllm/distributed/device_communicators/custom_all_reduce.py new file mode 100644 index 0000000..7d0cf17 --- /dev/null +++ b/vllm/distributed/device_communicators/custom_all_reduce.py @@ -0,0 +1,274 @@ +from contextlib import contextmanager +from typing import Any, List, Optional + +import torch +import torch.distributed as dist + +import vllm.envs as envs +from vllm.logger import init_logger + +try: + import pynvml + + from vllm_C import custom_ar +except ImportError: + # For AMD GPUs + custom_ar = None + pynvml = None + +logger = init_logger(__name__) + +_CA_HANDLE: Optional["CustomAllreduce"] = None +_IS_CAPTURING = False +_SUPPORTED_WORLD_SIZES = [2, 4, 6, 8] + + +def init_custom_ar() -> None: + from vllm.distributed import (get_tensor_model_parallel_rank, + get_tensor_model_parallel_world_size) + + global _CA_HANDLE + if _CA_HANDLE is not None: + return + rank = get_tensor_model_parallel_rank() + world_size = get_tensor_model_parallel_world_size() + if world_size == 1: + # No need to initialize custom allreduce for single GPU case. + return + + if world_size not in _SUPPORTED_WORLD_SIZES: + logger.warning( + "Custom allreduce is disabled due to an unsupported world size: " + "%d. Supported world sizes: %s. To silence this warning, specify" + " disable_custom_all_reduce=True explicitly.", world_size, + str(_SUPPORTED_WORLD_SIZES)) + return + num_dev = torch.musa.device_count() + # note: num dev can be larger than world_size if we're only using + # first few GPUs + if num_dev < world_size: + logger.warning( + "Cannot test GPU P2P because not all GPUs are visible to the " + "current process. This might be the case if 'CUDA_VISIBLE_DEVICES'" + " is set.") + return + # test nvlink first, this will filter out most of the cases + # where custom allreduce is not supported + cuda_visible_devices = envs.CUDA_VISIBLE_DEVICES + if cuda_visible_devices: + device_ids = list(map(int, cuda_visible_devices.split(","))) + else: + device_ids = list(range(num_dev)) + # this checks hardware and driver support for NVLink + full_nvlink = _is_full_nvlink(device_ids) + if world_size > 2 and not full_nvlink: + logger.warning( + "Custom allreduce is disabled because it's not supported on more" + " than two PCIe-only GPUs. To silence this warning, specify" + " disable_custom_all_reduce=True explicitly.") + return + # test P2P capability, this checks software/cudaruntime support + # this is expensive to compute at the first time + # then we cache the result + if not _can_p2p(rank, world_size): + logger.warning( + "Custom allreduce is disabled because your platform lacks GPU P2P" + " capability or P2P test failed. To silence this warning, specify" + " disable_custom_all_reduce=True explicitly.") + return + _CA_HANDLE = CustomAllreduce(rank, world_size, full_nvlink) + + +def begin_capture() -> None: + global _IS_CAPTURING + _IS_CAPTURING = True + + +def end_capture() -> None: + global _IS_CAPTURING + _IS_CAPTURING = False + + +def is_capturing() -> bool: + return _IS_CAPTURING and _CA_HANDLE is not None + + +def get_handle() -> Optional["CustomAllreduce"]: + return _CA_HANDLE + + +def is_initialized() -> bool: + return _CA_HANDLE is not None + + +@contextmanager +def capture(): + try: + begin_capture() + yield + finally: + end_capture() + handle = get_handle() + if handle is not None: + handle.register_graph_buffers() + + +def custom_all_reduce(input: torch.Tensor) -> Optional[torch.Tensor]: + ca_handle = get_handle() + # when custom allreduce is disabled, this will be None + if ca_handle is None: + return None + if is_capturing(): + if torch.cuda.is_current_stream_capturing(): + if ca_handle.should_custom_ar(input): + return ca_handle.all_reduce_reg(input) + else: + if ca_handle.should_custom_ar(input): + # if warm up, mimic the allocation pattern + # since custom allreduce is out-of-place + return torch.empty_like(input) + else: + # note: outside of cuda graph context, + # custom allreduce incurs a cost of cudaMemcpy, which should + # be small(<=1% of overall latency) compared to the performance + # gains of using custom kernels + if ca_handle.should_custom_ar(input): + return ca_handle.all_reduce_unreg(input) + + return None + + +@contextmanager +def _nvml(): + try: + pynvml.nvmlInit() + yield + finally: + pynvml.nvmlShutdown() + + +@_nvml() +def _is_full_nvlink(device_ids: List[int]) -> bool: + """ + query if the set of gpus are fully connected by nvlink (1 hop) + Note that `pynvml` is not affected by `CUDA_VISIBLE_DEVICES`, + so it works on real physical device ids. + """ + handles = [pynvml.nvmlDeviceGetHandleByIndex(i) for i in device_ids] + for i, handle in enumerate(handles): + for j, peer_handle in enumerate(handles): + if i < j: + try: + p2p_status = pynvml.nvmlDeviceGetP2PStatus( + handle, peer_handle, pynvml.NVML_P2P_CAPS_INDEX_NVLINK) + if p2p_status != pynvml.NVML_P2P_STATUS_OK: + return False + except pynvml.NVMLError as error: + logger.error( + "NVLink detection failed. This is normal if your" + " machine has no NVLink equipped.", + exc_info=error) + return False + return True + + +def _can_p2p(rank: int, world_size: int) -> bool: + from vllm.distributed.utils import gpu_p2p_access_check + for i in range(world_size): + if i == rank: + continue + if not gpu_p2p_access_check(rank, i): + return False + return True + + +class CustomAllreduce: + + # max_size: max supported allreduce size + def __init__(self, + rank, + world_size, + full_nvlink, + max_size=8192 * 1024) -> None: + # buffers memory are owned by this Python class and passed to C++ + # meta data composes of two parts: meta data for synchronization + # (256 bytes) and a temporary buffer for storing intermediate + # allreduce results. + self.meta = torch.zeros(custom_ar.meta_size() + max_size, + dtype=torch.uint8, + device="musa") + # This is a pre-registered IPC buffer. In eager mode, input tensors + # are first copied into this buffer before allreduce is performed + self.buffer = torch.empty(max_size, dtype=torch.uint8, device="musa") + # This is a buffer for storing the tuples of pointers pointing to + # IPC buffers from all ranks. Each registered tuple has size of + # 8*world_size bytes where world_size is at most 8. Allocating 8MB + # is enough for 131072 such tuples. The largest model I've seen only + # needs less than 10000 of registered tuples. + self.rank_data = torch.empty(8 * 1024 * 1024, + dtype=torch.uint8, + device="musa") + self.max_size = max_size + self.world_size = world_size + handles, offsets = self._get_ipc_meta(self.meta) + self.full_nvlink = full_nvlink + self._ptr = custom_ar.init_custom_ar(self.meta, self.rank_data, + handles, offsets, rank, + self.full_nvlink) + self.register_buffer(self.buffer) + + def _get_ipc_meta(self, inp: torch.Tensor): + data = inp.untyped_storage()._share_cuda_() + shard_data = ( + data[1], # ipc handle to base ptr + data[3], # offset of base ptr + ) + return self._gather_ipc_meta(shard_data) + + def _gather_ipc_meta(self, shard_data): + all_data: List[Optional[Any]] = [None] * self.world_size + dist.all_gather_object(all_data, shard_data) + + handles = [] + offsets = [] + for i in range(len(all_data)): + handles.append(all_data[i][0]) # type: ignore + offsets.append(all_data[i][1]) # type: ignore + return handles, offsets + + def register_buffer(self, inp: torch.Tensor): + handles, offsets = self._get_ipc_meta(inp) + custom_ar.register_buffer(self._ptr, inp, handles, offsets) + + def register_graph_buffers(self): + handle, offset = custom_ar.get_graph_buffer_ipc_meta(self._ptr) + handles, offsets = self._gather_ipc_meta((bytes(handle), offset)) + logger.info("Registering %d cuda graph addresses", len(offset)) + custom_ar.register_graph_buffers(self._ptr, handles, offsets) + + def should_custom_ar(self, inp: torch.Tensor): + return custom_ar.should_custom_ar(inp, self.max_size, self.world_size, + self.full_nvlink) + + # all reduce, assuming inp tensor is IPC registered with register_buffer, + # or, in the context of cuda graphs, register_graph_buffers + def all_reduce_reg(self, inp: torch.Tensor, out: torch.Tensor = None): + if out is None: + out = torch.empty_like(inp) + custom_ar.all_reduce_reg(self._ptr, inp, out) + return out + + # all reduce, assuming inp tensor is NOT IPC registered + def all_reduce_unreg(self, inp: torch.Tensor, out: torch.Tensor = None): + if out is None: + out = torch.empty_like(inp) + custom_ar.all_reduce_unreg(self._ptr, inp, self.buffer, out) + return out + + def close(self): + if self._ptr: + custom_ar.dispose(self._ptr) + self._ptr = 0 + + def __del__(self): + self.close() diff --git a/vllm/distributed/device_communicators/pymccl.py b/vllm/distributed/device_communicators/pymccl.py new file mode 100644 index 0000000..981d1e8 --- /dev/null +++ b/vllm/distributed/device_communicators/pymccl.py @@ -0,0 +1,284 @@ +# This file is a pure Python wrapper for the MCCL library. +# The main purpose is to use MCCL combined with MUSA graph. +# Before writing this script, we tried the following approach: +# 1. We tried to use `cupy`, it calls MCCL correctly, but `cupy` itself +# often gets stuck when initializing the MCCL communicator. +# 2. We tried to use `torch.distributed`, but `torch.distributed.all_reduce` +# contains many other potential musa APIs, that are not allowed during +# capturing the MUSA graph. For further details, please check +# https://discuss.pytorch.org/t/pytorch-musagraph-with-mccl-operation-failed/ . +# +# Another rejected idea is to write a C/C++ binding for MCCL. It is usually +# doable, but we often encounter issues related with mccl versions, and need +# to switch between different versions of MCCL. See +# https://github.com/NVIDIA/mccl/issues/1234 for more details. +# A C/C++ binding is not flexible enough to handle this. It requires +# recompilation of the code every time we want to switch between different +# versions. This current implementation, with a **pure** Python wrapper, is +# more flexible. We can easily switch between different versions of MCCL by +# changing the environment variable `VLLM_MCCL_SO_PATH`, or the `so_file` +# variable in the code. + +import ctypes +import platform +from typing import Optional, Union + +# ===================== import region ===================== +import torch +import torch_musa +import torch.distributed as dist +from torch.distributed import ProcessGroup, ReduceOp + +from vllm.distributed.parallel_state import get_cpu_world_group, get_local_rank +from vllm.logger import init_logger +from vllm.utils import find_mccl_library, mccl_integrity_check + +logger = init_logger(__name__) + +so_file = find_mccl_library() + +try: + # load the library in another process. + # if it core dumps, it will not crash the current process + mccl_integrity_check(so_file) + mccl = ctypes.CDLL(so_file) +except Exception as e: + logger.error( + "Failed to load MCCL library from %s ." + "It is expected if you are not running on NVIDIA/AMD GPUs." + "Otherwise, the mccl library might not exist, be corrupted " + "or it does not support the current platform %s." + "One solution is to download libmccl2 version 2.18 from " + "https://developer.download.nvidia.com/compute/musa/repos/ " + "and extract the libmccl.so.2 file. If you already have the " + "library, please set the environment variable VLLM_MCCL_SO_PATH" + " to point to the correct mccl library path.", so_file, + platform.platform()) + raise e + +# === export types and functions from mccl to Python === +# for the original mccl definition, please check +# https://github.com/NVIDIA/mccl/blob/master/src/mccl.h.in + +mcclResult_t = ctypes.c_int + +_c_mcclGetErrorString = mccl.mcclGetErrorString +_c_mcclGetErrorString.restype = ctypes.c_char_p +_c_mcclGetErrorString.argtypes = [mcclResult_t] + + +def MCCL_CHECK(result: mcclResult_t) -> None: + if result != 0: + error_str = _c_mcclGetErrorString(result) + error_str = error_str.decode("utf-8") + raise RuntimeError(f"MCCL error: {error_str}") + + +# equivalent to c declaration: +# mcclResult_t mcclGetVersion(int *version); +_c_mcclGetVersion = mccl.mcclGetVersion +_c_mcclGetVersion.restype = ctypes.c_int +_c_mcclGetVersion.argtypes = [ctypes.POINTER(ctypes.c_int)] + + +def mcclGetVersion() -> str: + version = ctypes.c_int() + MCCL_CHECK(_c_mcclGetVersion(ctypes.byref(version))) + version_str = str(version.value) + return version_str + + +class McclUniqueId(ctypes.Structure): + _fields_ = [("internal", ctypes.c_byte * 128)] + + +# equivalent to c declaration: +# mcclResult_t mcclGetUniqueId(mcclUniqueId* uniqueId); +_c_mcclGetUniqueId = mccl.mcclGetUniqueId +_c_mcclGetUniqueId.restype = ctypes.c_int +_c_mcclGetUniqueId.argtypes = [ctypes.POINTER(McclUniqueId)] + + +def mcclGetUniqueId() -> McclUniqueId: + unique_id = McclUniqueId() + MCCL_CHECK(_c_mcclGetUniqueId(ctypes.byref(unique_id))) + return unique_id + + +# equivalent to c declaration: +# mcclResult_t mcclCommInitRank( +# mcclComm_t* comm, int nranks, mcclUniqueId commId, int rank); +# note that mcclComm_t is a pointer type, so the first argument +# is a pointer to a pointer +_c_mcclCommInitRank = mccl.mcclCommInitRank +_c_mcclCommInitRank.restype = ctypes.c_int +_c_mcclCommInitRank.argtypes = [ + ctypes.POINTER(ctypes.c_void_p), ctypes.c_int, McclUniqueId, ctypes.c_int +] + +mcclDataType_t = ctypes.c_int + + +class mcclDataTypeEnum: + mcclInt8 = 0 + mcclChar = 0 + mcclUint8 = 1 + mcclInt32 = 2 + mcclInt = 2 + mcclUint32 = 3 + mcclInt64 = 4 + mcclUint64 = 5 + mcclFloat16 = 6 + mcclHalf = 6 + mcclFloat32 = 7 + mcclFloat = 7 + mcclFloat64 = 8 + mcclDouble = 8 + mcclBfloat16 = 9 + mcclNumTypes = 10 + + @classmethod + def from_torch(cls, dtype: torch.dtype) -> int: + if dtype == torch.int8: + return cls.mcclInt8 + if dtype == torch.uint8: + return cls.mcclUint8 + if dtype == torch.int32: + return cls.mcclInt32 + if dtype == torch.int64: + return cls.mcclInt64 + if dtype == torch.float16: + return cls.mcclFloat16 + if dtype == torch.float32: + return cls.mcclFloat32 + if dtype == torch.float64: + return cls.mcclFloat64 + if dtype == torch.bfloat16: + return cls.mcclBfloat16 + raise ValueError(f"Unsupported dtype: {dtype}") + + +mcclRedOp_t = ctypes.c_int + + +class mcclRedOpTypeEnum: + mcclSum = 0 + mcclProd = 1 + mcclMax = 2 + mcclMin = 3 + mcclAvg = 4 + mcclNumOps = 5 + + @classmethod + def from_torch(cls, op: ReduceOp) -> int: + if op == ReduceOp.SUM: + return cls.mcclSum + if op == ReduceOp.PRODUCT: + return cls.mcclProd + if op == ReduceOp.MAX: + return cls.mcclMax + if op == ReduceOp.MIN: + return cls.mcclMin + if op == ReduceOp.AVG: + return cls.mcclAvg + raise ValueError(f"Unsupported op: {op}") + + +# equivalent to c declaration: +# mcclResult_t mcclAllReduce( +# const void* sendbuff, void* recvbuff, size_t count, +# mcclDataType_t datatype, mcclRedOp_t op, mcclComm_t comm, +# udaStream_t stream); +# note that musaStream_t is a pointer type, so the last argument is a pointer +_c_mcclAllReduce = mccl.mcclAllReduce +_c_mcclAllReduce.restype = ctypes.c_int +_c_mcclAllReduce.argtypes = [ + ctypes.c_void_p, ctypes.c_void_p, ctypes.c_size_t, mcclRedOp_t, + mcclDataType_t, ctypes.c_void_p, ctypes.c_void_p +] + +# be cautious! this is a collective call, it will block until all +# processes in the communicator have called this function. +# because Python object destruction can happen in random order, +# it is better not to call it at all. +# equivalent to c declaration: +# mcclResult_t mcclCommDestroy(mcclComm_t comm); +_c_mcclCommDestroy = mccl.mcclCommDestroy +_c_mcclCommDestroy.restype = ctypes.c_int +_c_mcclCommDestroy.argtypes = [ctypes.c_void_p] + + +class MCCLCommunicator: + + def __init__( + self, + group: Optional[ProcessGroup] = None, + device: Optional[Union[int, str, torch.device]] = None, + ): + """ + Args: + group: the process group to work on. If None, it will use the + default process group. + device: the device to bind the MCCLCommunicator to. If None, + it will be bind to f"musa:{local_rank}". + It is the caller's responsibility to make sure each communicator + is bind to a unique device. + """ + assert dist.is_initialized() + group = get_cpu_world_group() if group is None else group + assert dist.get_backend(group) != dist.Backend.MCCL, ( + "MCCLCommunicator should be attached to a non-MCCL group.") + self.group = group + # note: this rank is the rank in the group + self.rank = dist.get_rank(group) + self.world_size = dist.get_world_size(group) + if self.rank == 0: + self.unique_id = mcclGetUniqueId() + else: + self.unique_id = McclUniqueId() + tensor = torch.ByteTensor(list(self.unique_id.internal)) + ranks = dist.get_process_group_ranks(group) + # arg `src` in `broadcast` is the global rank + dist.broadcast(tensor, src=ranks[0], group=group) + byte_list = tensor.tolist() + for i, byte in enumerate(byte_list): + self.unique_id.internal[i] = byte + self.comm = ctypes.c_void_p() + if device is None: + local_rank = get_local_rank() + device = torch.device(f"musa:{local_rank}") + elif isinstance(device, int): + device = torch.device(f"musa:{device}") + elif isinstance(device, str): + device = torch.device(device) + # now `device` is a `torch.device` object + assert isinstance(device, torch.device) + self.device = device + # mccl communicator and stream will use this device + # `torch.musa.device` is a context manager that changes the + # current musa device to the specified one + with torch.musa.device(device): + MCCL_CHECK( + _c_mcclCommInitRank(ctypes.byref(self.comm), self.world_size, + self.unique_id, self.rank)) + self.stream = torch.musa.Stream() + + def all_reduce(self, + tensor: torch.Tensor, + op: ReduceOp = ReduceOp.SUM, + stream=None): + # mccl communicator created on a specific device + # will only work on tensors on the same device + # otherwise it will cause "illegal memory access" + assert tensor.device == self.device, ( + f"this mccl communicator is created to work on {self.device}, " + f"but the input tensor is on {tensor.device}") + if stream is None: + stream = self.stream + MCCL_CHECK( + _c_mcclAllReduce(ctypes.c_void_p(tensor.data_ptr()), + ctypes.c_void_p(tensor.data_ptr()), + tensor.numel(), + mcclDataTypeEnum.from_torch(tensor.dtype), + mcclRedOpTypeEnum.from_torch(op), self.comm, + ctypes.c_void_p(stream.musa_stream))) diff --git a/vllm/distributed/device_communicators/pymccl_utils.py b/vllm/distributed/device_communicators/pymccl_utils.py new file mode 100644 index 0000000..13ee525 --- /dev/null +++ b/vllm/distributed/device_communicators/pymccl_utils.py @@ -0,0 +1,66 @@ +import contextlib +from typing import Optional + +import torch +from torch.distributed import ProcessGroup, ReduceOp + +from vllm.logger import init_logger + +logger = init_logger(__name__) + +try: + from vllm.distributed.device_communicators.pymccl import (MCCLCommunicator, + mcclGetVersion) +except Exception as e: + # in non-MTHREADS environments, we can't import the mccl module + # e.g. when running on machines with AMD GPUs + logger.info("Failed to import MCCL library: %s", e) + logger.info("It is expected if you are not running on Mthreads GPUs.") + pass + +comm: Optional["MCCLCommunicator"] = None + + +def is_initialized() -> bool: + """Returns whether the NCCL backend is initialized.""" + return comm is not None + + +@contextlib.contextmanager +def set_pymccl_stream(stream: torch.cuda.Stream): + """Set the cuda stream for communication""" + try: + assert comm is not None + comm.stream = stream + yield + finally: + pass + + +def init_process_group(group: Optional[ProcessGroup] = None) -> None: + assert not is_initialized() + global comm + logger.info("vLLM is using nccl==%s", mcclGetVersion()) + comm = MCCLCommunicator(group=group) + + +def all_reduce(input_: torch.Tensor, op=ReduceOp.SUM) -> None: + """All-reduces the input tensor across the process group.""" + assert input_.is_musa, f"{input_} should be a musa tensor" + assert comm is not None + comm.all_reduce(input_, op) + + +def destroy_process_group() -> None: + global comm + comm = None + + +def get_world_size() -> int: + """Returns the world size.""" + assert comm is not None + return comm.world_size + + +def get_nccl_backend() -> Optional["MCCLCommunicator"]: + return comm diff --git a/vllm/distributed/device_communicators/pynccl.py b/vllm/distributed/device_communicators/pynccl.py new file mode 100644 index 0000000..7589943 --- /dev/null +++ b/vllm/distributed/device_communicators/pynccl.py @@ -0,0 +1,287 @@ +# This file is a pure Python wrapper for the NCCL library. +# The main purpose is to use NCCL combined with CUDA graph. +# Before writing this script, we tried the following approach: +# 1. We tried to use `cupy`, it calls NCCL correctly, but `cupy` itself +# often gets stuck when initializing the NCCL communicator. +# 2. We tried to use `torch.distributed`, but `torch.distributed.all_reduce` +# contains many other potential cuda APIs, that are not allowed during +# capturing the CUDA graph. For further details, please check +# https://discuss.pytorch.org/t/pytorch-cudagraph-with-nccl-operation-failed/ . +# +# Another rejected idea is to write a C/C++ binding for NCCL. It is usually +# doable, but we often encounter issues related with nccl versions, and need +# to switch between different versions of NCCL. See +# https://github.com/NVIDIA/nccl/issues/1234 for more details. +# A C/C++ binding is not flexible enough to handle this. It requires +# recompilation of the code every time we want to switch between different +# versions. This current implementation, with a **pure** Python wrapper, is +# more flexible. We can easily switch between different versions of NCCL by +# changing the environment variable `VLLM_NCCL_SO_PATH`, or the `so_file` +# variable in the code. + +import ctypes +import platform +from typing import Optional, Union + +# ===================== import region ===================== +import torch +import torch.distributed as dist +from torch.distributed import ProcessGroup, ReduceOp + +from vllm.distributed.parallel_state import get_cpu_world_group, get_local_rank +from vllm.logger import init_logger +from vllm.utils import find_nccl_library, nccl_integrity_check + +logger = init_logger(__name__) + +so_file = find_nccl_library() + +try: + # load the library in another process. + # if it core dumps, it will not crash the current process + nccl_integrity_check(so_file) + nccl = ctypes.CDLL(so_file) +except Exception as e: + logger.error( + "Failed to load NCCL library from %s ." + "It is expected if you are not running on NVIDIA/AMD GPUs." + "Otherwise, the nccl library might not exist, be corrupted " + "or it does not support the current platform %s." + "One solution is to download libnccl2 version 2.18 from " + "https://developer.download.nvidia.com/compute/cuda/repos/ " + "and extract the libnccl.so.2 file. If you already have the " + "library, please set the environment variable VLLM_NCCL_SO_PATH" + " to point to the correct nccl library path.", so_file, + platform.platform()) + raise e + +# === export types and functions from nccl to Python === +# for the original nccl definition, please check +# https://github.com/NVIDIA/nccl/blob/master/src/nccl.h.in + +ncclResult_t = ctypes.c_int + +_c_ncclGetErrorString = nccl.ncclGetErrorString +_c_ncclGetErrorString.restype = ctypes.c_char_p +_c_ncclGetErrorString.argtypes = [ncclResult_t] + + +def NCCL_CHECK(result: ncclResult_t) -> None: + if result != 0: + error_str = _c_ncclGetErrorString(result) + error_str = error_str.decode("utf-8") + raise RuntimeError(f"NCCL error: {error_str}") + + +# equivalent to c declaration: +# ncclResult_t ncclGetVersion(int *version); +_c_ncclGetVersion = nccl.ncclGetVersion +_c_ncclGetVersion.restype = ctypes.c_int +_c_ncclGetVersion.argtypes = [ctypes.POINTER(ctypes.c_int)] + + +def ncclGetVersion() -> str: + version = ctypes.c_int() + NCCL_CHECK(_c_ncclGetVersion(ctypes.byref(version))) + # something like 21903 --> "2.19.3" + version_str = str(version.value) + major = version_str[0].lstrip("0") + minor = version_str[1:3].lstrip("0") + patch = version_str[3:].lstrip("0") + return f"{major}.{minor}.{patch}" + + +class NcclUniqueId(ctypes.Structure): + _fields_ = [("internal", ctypes.c_byte * 128)] + + +# equivalent to c declaration: +# ncclResult_t ncclGetUniqueId(ncclUniqueId* uniqueId); +_c_ncclGetUniqueId = nccl.ncclGetUniqueId +_c_ncclGetUniqueId.restype = ctypes.c_int +_c_ncclGetUniqueId.argtypes = [ctypes.POINTER(NcclUniqueId)] + + +def ncclGetUniqueId() -> NcclUniqueId: + unique_id = NcclUniqueId() + NCCL_CHECK(_c_ncclGetUniqueId(ctypes.byref(unique_id))) + return unique_id + + +# equivalent to c declaration: +# ncclResult_t ncclCommInitRank( +# ncclComm_t* comm, int nranks, ncclUniqueId commId, int rank); +# note that ncclComm_t is a pointer type, so the first argument +# is a pointer to a pointer +_c_ncclCommInitRank = nccl.ncclCommInitRank +_c_ncclCommInitRank.restype = ctypes.c_int +_c_ncclCommInitRank.argtypes = [ + ctypes.POINTER(ctypes.c_void_p), ctypes.c_int, NcclUniqueId, ctypes.c_int +] + +ncclDataType_t = ctypes.c_int + + +class ncclDataTypeEnum: + ncclInt8 = 0 + ncclChar = 0 + ncclUint8 = 1 + ncclInt32 = 2 + ncclInt = 2 + ncclUint32 = 3 + ncclInt64 = 4 + ncclUint64 = 5 + ncclFloat16 = 6 + ncclHalf = 6 + ncclFloat32 = 7 + ncclFloat = 7 + ncclFloat64 = 8 + ncclDouble = 8 + ncclBfloat16 = 9 + ncclNumTypes = 10 + + @classmethod + def from_torch(cls, dtype: torch.dtype) -> int: + if dtype == torch.int8: + return cls.ncclInt8 + if dtype == torch.uint8: + return cls.ncclUint8 + if dtype == torch.int32: + return cls.ncclInt32 + if dtype == torch.int64: + return cls.ncclInt64 + if dtype == torch.float16: + return cls.ncclFloat16 + if dtype == torch.float32: + return cls.ncclFloat32 + if dtype == torch.float64: + return cls.ncclFloat64 + if dtype == torch.bfloat16: + return cls.ncclBfloat16 + raise ValueError(f"Unsupported dtype: {dtype}") + + +ncclRedOp_t = ctypes.c_int + + +class ncclRedOpTypeEnum: + ncclSum = 0 + ncclProd = 1 + ncclMax = 2 + ncclMin = 3 + ncclAvg = 4 + ncclNumOps = 5 + + @classmethod + def from_torch(cls, op: ReduceOp) -> int: + if op == ReduceOp.SUM: + return cls.ncclSum + if op == ReduceOp.PRODUCT: + return cls.ncclProd + if op == ReduceOp.MAX: + return cls.ncclMax + if op == ReduceOp.MIN: + return cls.ncclMin + if op == ReduceOp.AVG: + return cls.ncclAvg + raise ValueError(f"Unsupported op: {op}") + + +# equivalent to c declaration: +# ncclResult_t ncclAllReduce( +# const void* sendbuff, void* recvbuff, size_t count, +# ncclDataType_t datatype, ncclRedOp_t op, ncclComm_t comm, +# udaStream_t stream); +# note that cudaStream_t is a pointer type, so the last argument is a pointer +_c_ncclAllReduce = nccl.ncclAllReduce +_c_ncclAllReduce.restype = ctypes.c_int +_c_ncclAllReduce.argtypes = [ + ctypes.c_void_p, ctypes.c_void_p, ctypes.c_size_t, ncclRedOp_t, + ncclDataType_t, ctypes.c_void_p, ctypes.c_void_p +] + +# be cautious! this is a collective call, it will block until all +# processes in the communicator have called this function. +# because Python object destruction can happen in random order, +# it is better not to call it at all. +# equivalent to c declaration: +# ncclResult_t ncclCommDestroy(ncclComm_t comm); +_c_ncclCommDestroy = nccl.ncclCommDestroy +_c_ncclCommDestroy.restype = ctypes.c_int +_c_ncclCommDestroy.argtypes = [ctypes.c_void_p] + + +class NCCLCommunicator: + + def __init__( + self, + group: Optional[ProcessGroup] = None, + device: Optional[Union[int, str, torch.device]] = None, + ): + """ + Args: + group: the process group to work on. If None, it will use the + default process group. + device: the device to bind the NCCLCommunicator to. If None, + it will be bind to f"cuda:{local_rank}". + It is the caller's responsibility to make sure each communicator + is bind to a unique device. + """ + assert dist.is_initialized() + group = get_cpu_world_group() if group is None else group + assert dist.get_backend(group) != dist.Backend.NCCL, ( + "NCCLCommunicator should be attached to a non-NCCL group.") + self.group = group + # note: this rank is the rank in the group + self.rank = dist.get_rank(group) + self.world_size = dist.get_world_size(group) + if self.rank == 0: + self.unique_id = ncclGetUniqueId() + else: + self.unique_id = NcclUniqueId() + tensor = torch.ByteTensor(list(self.unique_id.internal)) + ranks = dist.get_process_group_ranks(group) + # arg `src` in `broadcast` is the global rank + dist.broadcast(tensor, src=ranks[0], group=group) + byte_list = tensor.tolist() + for i, byte in enumerate(byte_list): + self.unique_id.internal[i] = byte + self.comm = ctypes.c_void_p() + if device is None: + local_rank = get_local_rank() + device = torch.device(f"cuda:{local_rank}") + elif isinstance(device, int): + device = torch.device(f"cuda:{device}") + elif isinstance(device, str): + device = torch.device(device) + # now `device` is a `torch.device` object + assert isinstance(device, torch.device) + self.device = device + # nccl communicator and stream will use this device + # `torch.cuda.device` is a context manager that changes the + # current cuda device to the specified one + with torch.cuda.device(device): + NCCL_CHECK( + _c_ncclCommInitRank(ctypes.byref(self.comm), self.world_size, + self.unique_id, self.rank)) + self.stream = torch.cuda.Stream() + + def all_reduce(self, + tensor: torch.Tensor, + op: ReduceOp = ReduceOp.SUM, + stream=None): + # nccl communicator created on a specific device + # will only work on tensors on the same device + # otherwise it will cause "illegal memory access" + assert tensor.device == self.device, ( + f"this nccl communicator is created to work on {self.device}, " + f"but the input tensor is on {tensor.device}") + if stream is None: + stream = self.stream + NCCL_CHECK( + _c_ncclAllReduce(ctypes.c_void_p(tensor.data_ptr()), + ctypes.c_void_p(tensor.data_ptr()), + tensor.numel(), + ncclDataTypeEnum.from_torch(tensor.dtype), + ncclRedOpTypeEnum.from_torch(op), self.comm, + ctypes.c_void_p(stream.cuda_stream))) diff --git a/vllm/distributed/parallel_state.py b/vllm/distributed/parallel_state.py new file mode 100644 index 0000000..4258be7 --- /dev/null +++ b/vllm/distributed/parallel_state.py @@ -0,0 +1,341 @@ +# Copyright 2023 The vLLM team. +# Adapted from +# https://github.com/NVIDIA/Megatron-LM/blob/main/megatron/core/parallel_state.py +# Copyright (c) 2024 - 2024 Moore Threads Technology Co., Ltd("Moore Threads"). All rights reserved. +# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. +"""Tensor and pipeline parallel groups.""" +import contextlib +from typing import Optional + +import torch +import torch_musa + +import vllm.envs as envs +from vllm.logger import init_logger + +logger = init_logger(__name__) + +# Tensor model parallel group that the current rank belongs to. +_TP_DEVICE_GROUP = None +_TP_CPU_GROUP = None +# Pipeline model parallel group that the current rank belongs to. +_PIPELINE_MODEL_PARALLEL_GROUP = None + +# when people blindly call `torch.distributed.all_reduce` etc, +# it will use this group. It is initialized with the `backend` +# parameter of `init_distributed_environment` below. +# Essentially, this is `torch.distributed.group.WORLD`. +# We leave a line here to note that this is device-specific. +# Note that this variable is not safe to use, because when users +# call `init_distributed_environment` first, and then destroy +# the process group themselves, this variable will keep a reference to the +# destroyed process group, which is not useful. +_DEVICE_WORLD_GROUP = None + +# duing `init_distributed_environment`, we will also initialize a +# group with `gloo` backend, to allow direct coordination between +# processes through the CPU. +_CPU_WORLD_GROUP = None + +# In summary, after calling `init_distributed_environment`, we will +# always have two groups: one for device-specific (and is the default) +# and one for CPU. All processes will be part of both groups. + +# A list of global ranks for each pipeline group to ease calculation of the +# source rank when broadcasting from the first or last pipeline stage. +_PIPELINE_GLOBAL_RANKS = None + +_LOCAL_RANK = -1 + + +def get_local_rank(): + global _LOCAL_RANK + return _LOCAL_RANK + + +def init_distributed_environment( + world_size: int = -1, + rank: int = -1, + distributed_init_method: str = "env://", + local_rank: int = -1, + backend: str = "nccl", +): + logger.debug( + "world_size=%d rank=%d local_rank=%d " + "distributed_init_method=%s backend=%s", world_size, rank, local_rank, + distributed_init_method, backend) + if not torch.distributed.is_initialized(): + assert distributed_init_method is not None, ( + "distributed_init_method must be provided when initializing " + "distributed environment") + # this backend is used for WORLD + torch.distributed.init_process_group( + backend=backend, + init_method=distributed_init_method, + world_size=world_size, + rank=rank) + global _DEVICE_WORLD_GROUP, _CPU_WORLD_GROUP + _DEVICE_WORLD_GROUP = torch.distributed.group.WORLD + ranks = list(range(torch.distributed.get_world_size())) + _CPU_WORLD_GROUP = torch.distributed.new_group(ranks=ranks, + backend="gloo") + # set the local rank + # local_rank is not available in torch ProcessGroup, + # see https://github.com/pytorch/pytorch/issues/122816 + if local_rank == -1 and distributed_init_method == "env://": + local_rank = envs.LOCAL_RANK + global _LOCAL_RANK + _LOCAL_RANK = local_rank + + +def initialize_model_parallel( + tensor_model_parallel_size: int = 1, + pipeline_model_parallel_size: int = 1, + backend: Optional[str] = None, +) -> None: + """ + Initialize model parallel groups. + + Arguments: + tensor_model_parallel_size: number of GPUs used for tensor model + parallelism. + pipeline_model_parallel_size: number of GPUs used for pipeline model + parallelism. + + Let's say we have a total of 8 GPUs denoted by g0 ... g7 and we + use 2 GPUs to parallelize the model tensor, and 4 GPUs to parallelize + the model pipeline. The present function will + create 4 tensor model-parallel groups and 2 pipeline model-parallel groups: + 4 tensor model-parallel groups: + [g0, g1], [g2, g3], [g4, g5], [g6, g7] + 2 pipeline model-parallel groups: + [g0, g2, g4, g6], [g1, g3, g5, g7] + Note that for efficiency, the caller should make sure adjacent ranks + are on the same DGX box. For example if we are using 2 DGX-1 boxes + with a total of 16 GPUs, rank 0 to 7 belong to the first box and + ranks 8 to 15 belong to the second box. + """ + # Get world size and rank. Ensure some consistencies. + assert torch.distributed.is_initialized() + world_size: int = torch.distributed.get_world_size() + # get the backend of _DEVICE_WORLD_GROUP + backend = backend or torch.distributed.get_backend() + + if (world_size != + tensor_model_parallel_size * pipeline_model_parallel_size): + raise RuntimeError( + f"world_size ({world_size}) is not equal to " + f"tensor_model_parallel_size ({tensor_model_parallel_size}) x " + f"pipeline_model_parallel_size ({pipeline_model_parallel_size})") + + num_tensor_model_parallel_groups: int = (world_size // + tensor_model_parallel_size) + num_pipeline_model_parallel_groups: int = (world_size // + pipeline_model_parallel_size) + rank = torch.distributed.get_rank() + + # Build the tensor model-parallel groups. + global _TP_DEVICE_GROUP, _TP_CPU_GROUP + assert _TP_DEVICE_GROUP is None, ( + "tensor model parallel group is already initialized") + for i in range(num_tensor_model_parallel_groups): + ranks = range(i * tensor_model_parallel_size, + (i + 1) * tensor_model_parallel_size) + group = torch.distributed.new_group(ranks, backend=backend) + cpu_group = torch.distributed.new_group(ranks, backend="gloo") + if rank in ranks: + _TP_DEVICE_GROUP = group + _TP_CPU_GROUP = cpu_group + + # Build the pipeline model-parallel groups. + global _PIPELINE_MODEL_PARALLEL_GROUP + global _PIPELINE_GLOBAL_RANKS + assert _PIPELINE_MODEL_PARALLEL_GROUP is None, ( + "pipeline model parallel group is already initialized") + for i in range(num_pipeline_model_parallel_groups): + ranks = range(i, world_size, num_pipeline_model_parallel_groups) + group = torch.distributed.new_group(ranks, backend=backend) + if rank in ranks: + _PIPELINE_MODEL_PARALLEL_GROUP = group + _PIPELINE_GLOBAL_RANKS = ranks + + +def ensure_model_parallel_initialized( + tensor_model_parallel_size: int, + pipeline_model_parallel_size: int, + backend: Optional[str] = None, +) -> None: + """Helper to initialize model parallel groups if they are not initialized, + or ensure tensor-parallel and pipeline-parallel sizes are equal to expected + values if the model parallel groups are initialized. + """ + # get the backend of _DEVICE_WORLD_GROUP + backend = backend or torch.distributed.get_backend() + if not model_parallel_is_initialized(): + initialize_model_parallel(tensor_model_parallel_size, + pipeline_model_parallel_size, backend) + return + + assert ( + get_tensor_model_parallel_world_size() == tensor_model_parallel_size + ), ("tensor parallel group already initialized, but of unexpected size: " + f"{get_tensor_model_parallel_world_size()=} vs. " + f"{tensor_model_parallel_size=}") + assert (get_pipeline_model_parallel_world_size( + ) == pipeline_model_parallel_size), ( + "pipeline parallel group already initialized, but of unexpected size: " + f"{get_pipeline_model_parallel_world_size()=} vs. " + f"{pipeline_model_parallel_size=}") + + +def model_parallel_is_initialized(): + """Check if tensor and pipeline parallel groups are initialized.""" + return (_TP_DEVICE_GROUP is not None + and _PIPELINE_MODEL_PARALLEL_GROUP is not None) + + +def get_cpu_world_group(): + """Get the CPU world group.""" + assert _CPU_WORLD_GROUP is not None, ("CPU world group is not initialized") + return _CPU_WORLD_GROUP + + +def get_tensor_model_parallel_group(): + """Get the tensor model parallel group the caller rank belongs to.""" + assert _TP_DEVICE_GROUP is not None, ( + "tensor model parallel group is not initialized") + return _TP_DEVICE_GROUP + + +def get_tensor_model_parallel_cpu_group(): + """Get the tensor model parallel cpu group the caller rank belongs to.""" + assert _TP_CPU_GROUP is not None, ( + "tensor model parallel cpu group is not initialized") + return _TP_CPU_GROUP + + +def get_pipeline_model_parallel_group(): + """Get the pipeline model parallel group the caller rank belongs to.""" + assert _PIPELINE_MODEL_PARALLEL_GROUP is not None, ( + "pipeline model parallel group is not initialized") + return _PIPELINE_MODEL_PARALLEL_GROUP + + +def get_tensor_model_parallel_world_size(): + """Return world size for the tensor model parallel group.""" + return torch.distributed.get_world_size( + group=get_tensor_model_parallel_group()) + + +def get_pipeline_model_parallel_world_size(): + """Return world size for the pipeline model parallel group.""" + return torch.distributed.get_world_size( + group=get_pipeline_model_parallel_group()) + + +def get_tensor_model_parallel_rank(): + """Return my rank for the tensor model parallel group.""" + return torch.distributed.get_rank(group=get_tensor_model_parallel_group()) + + +def get_pipeline_model_parallel_rank(): + """Return my rank for the pipeline model parallel group.""" + return torch.distributed.get_rank( + group=get_pipeline_model_parallel_group()) + + +def get_tensor_model_parallel_src_rank(): + """Calculate the global rank corresponding to the first local rank + in the tensor model parallel group.""" + global_rank = torch.distributed.get_rank() + local_world_size = get_tensor_model_parallel_world_size() + return (global_rank // local_world_size) * local_world_size + + +def get_pipeline_model_parallel_first_rank(): + """Return the global rank of the first process in the pipeline for the + current tensor parallel group""" + assert _PIPELINE_GLOBAL_RANKS is not None, ( + "Pipeline parallel group is not initialized") + return _PIPELINE_GLOBAL_RANKS[0] + + +def get_pipeline_model_parallel_last_rank(): + """Return the global rank of the last process in the pipeline for the + current tensor parallel group""" + assert _PIPELINE_GLOBAL_RANKS is not None, ( + "Pipeline parallel group is not initialized") + last_rank_local = get_pipeline_model_parallel_world_size() - 1 + return _PIPELINE_GLOBAL_RANKS[last_rank_local] + + +def get_pipeline_model_parallel_next_rank(): + """Return the global rank that follows the caller in the pipeline""" + assert _PIPELINE_GLOBAL_RANKS is not None, ( + "Pipeline parallel group is not initialized") + rank_in_pipeline = get_pipeline_model_parallel_rank() + world_size = get_pipeline_model_parallel_world_size() + return _PIPELINE_GLOBAL_RANKS[(rank_in_pipeline + 1) % world_size] + + +def get_pipeline_model_parallel_prev_rank(): + """Return the global rank that precedes the caller in the pipeline""" + assert _PIPELINE_GLOBAL_RANKS is not None, ( + "Pipeline parallel group is not initialized") + rank_in_pipeline = get_pipeline_model_parallel_rank() + world_size = get_pipeline_model_parallel_world_size() + return _PIPELINE_GLOBAL_RANKS[(rank_in_pipeline - 1) % world_size] + + +def destroy_model_parallel(): + """Set the groups to none and destroy them.""" + global _TP_DEVICE_GROUP + if _TP_DEVICE_GROUP: + torch.distributed.destroy_process_group(_TP_DEVICE_GROUP) + _TP_DEVICE_GROUP = None + global _TP_CPU_GROUP + if _TP_CPU_GROUP: + torch.distributed.destroy_process_group(_TP_CPU_GROUP) + _TP_CPU_GROUP = None + global _PIPELINE_MODEL_PARALLEL_GROUP + if _PIPELINE_MODEL_PARALLEL_GROUP: + torch.distributed.destroy_process_group(_PIPELINE_MODEL_PARALLEL_GROUP) + _PIPELINE_MODEL_PARALLEL_GROUP = None + global _PIPELINE_GLOBAL_RANKS + _PIPELINE_GLOBAL_RANKS = None + from vllm.distributed.device_communicators import pymccl_utils + + # Destroy the pynccl states if any. + pymccl_utils.destroy_process_group() + + +# Whether to use pynccl for nccl all reduce. +# We use pynccl for all reduce when using CUDA graph, because torch.distributed +# is not well supported by CUDA graph. +_ENABLE_PYNCCL_FOR_ALL_REDUCE = False + + +@contextlib.contextmanager +def with_pynccl_for_all_reduce(): + from vllm.distributed.device_communicators import pymccl_utils + """use pynccl instead of torch.distributed for all reduce""" + tp_size = get_tensor_model_parallel_world_size() + if tp_size == 1: + # No-op. + # NOTE(woosuk): We don't initialize pynccl when tp_size is 1. + yield + else: + global _ENABLE_PYNCCL_FOR_ALL_REDUCE + old = _ENABLE_PYNCCL_FOR_ALL_REDUCE + _ENABLE_PYNCCL_FOR_ALL_REDUCE = True + + stream = torch.musa.current_stream() + with pymccl_utils.set_pymccl_stream(stream): + yield + _ENABLE_PYNCCL_FOR_ALL_REDUCE = old + + +def is_pynccl_enabled_for_all_reduce(): + """check if pynccl is enabled for all reduce""" + global _ENABLE_PYNCCL_FOR_ALL_REDUCE + return _ENABLE_PYNCCL_FOR_ALL_REDUCE diff --git a/vllm/distributed/utils.py b/vllm/distributed/utils.py new file mode 100644 index 0000000..eefe96f --- /dev/null +++ b/vllm/distributed/utils.py @@ -0,0 +1,137 @@ +# Copyright 2023 The vLLM team. +# Adapted from +# https://github.com/NVIDIA/Megatron-LM/blob/main/megatron/core/tensor_parallel/utils.py +# Copyright (c) 2024 - 2024 Moore Threads Technology Co., Ltd("Moore Threads"). All rights reserved. +# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. +import json +import os +from typing import Dict, Optional, Sequence + +import torch +import torch.distributed as dist + +import vllm.envs as envs +from vllm.logger import init_logger + +from .parallel_state import get_cpu_world_group, get_local_rank + +logger = init_logger(__name__) + + +def ensure_divisibility(numerator, denominator): + """Ensure that numerator is divisible by the denominator.""" + assert numerator % denominator == 0, "{} is not divisible by {}".format( + numerator, denominator) + + +def divide(numerator, denominator): + """Ensure that numerator is divisible by the denominator and return + the division value.""" + ensure_divisibility(numerator, denominator) + return numerator // denominator + + +def split_tensor_along_last_dim( + tensor: torch.Tensor, + num_partitions: int, + contiguous_split_chunks: bool = False, +) -> Sequence[torch.Tensor]: + """ Split a tensor along its last dimension. + + Arguments: + tensor: input tensor. + num_partitions: number of partitions to split the tensor + contiguous_split_chunks: If True, make each chunk contiguous + in memory. + + Returns: + A list of Tensors + """ + # Get the size and dimension. + last_dim = tensor.dim() - 1 + last_dim_size = divide(tensor.size()[last_dim], num_partitions) + # Split. + tensor_list = torch.split(tensor, last_dim_size, dim=last_dim) + # NOTE: torch.split does not create contiguous tensors by default. + if contiguous_split_chunks: + return tuple(chunk.contiguous() for chunk in tensor_list) + + return tensor_list + + +# code partly borrowed from +# https://github.com/turboderp/exllamav2/blob/1c67f97f3d2a968605a9c31ab791a05c85bb7879/exllamav2/compat.py#L10 +# License: MIT +def _can_actually_p2p(idx_a, idx_b): + dev_i = f"musa:{idx_a}" + dev_j = f"musa:{idx_b}" + a = torch.randn(5, device=dev_i) + 123.0 + b = a.to(dev_j) + c = b.to(dev_i) + return torch.all(a == c).cpu().item() + + +# why do we need this cache? +# 1. we can have runtime checks for P2P access, where every process checks +# P2P access to all other GPUs. Unfortunately, the test might cost many +# (world_size * world_size) cuda context, and reduce the memory available +# for the model. see https://github.com/vllm-project/vllm/issues/3821 +# 2. alternatively, we can have a p2p map that is generated by the master +# process and broadcasted to all other processes. This still requires +# #world_size of cuda context, belonging to the master process, on each GPU. +# 3. we can have a cache file, that records the p2p access status. The first +# time the master process checks the p2p access, it will generate the cache +# file, at the cost of #world_size of cuda context. Later on, all processes +# can read the cache file to check the p2p access status without any cost of +# additional cuda context. +# Note that the cache file is suffixed by the CUDA_VISIBLE_DEVICES, so that we +# can have different cache files for different CUDA_VISIBLE_DEVICES settings, +# e.g. used by different vllm engines. The device id in the cache file is a +# **local** device id, i.e. from 0 to num_dev-1, where num_dev is the number +# of visible devices in the vllm engine. +_gpu_p2p_access_cache: Optional[Dict[str, bool]] = None + + +def gpu_p2p_access_check(i: int, j: int) -> bool: + """Check if GPU i can access GPU j.""" + + # if the cache variable is already calculated, + # read from the cache instead of checking it again + global _gpu_p2p_access_cache + if _gpu_p2p_access_cache is not None: + return _gpu_p2p_access_cache[f"{i}->{j}"] + + is_distributed = dist.is_initialized() + + num_dev = torch.musa.device_count() + cuda_visible_devices = envs.CUDA_VISIBLE_DEVICES + if cuda_visible_devices is None: + cuda_visible_devices = ",".join(str(i) for i in range(num_dev)) + VLLM_CONFIG_ROOT = envs.VLLM_CONFIG_ROOT + path = os.path.expanduser( + f"{VLLM_CONFIG_ROOT}/vllm/gpu_p2p_access_cache_for_{cuda_visible_devices}.json" + ) + os.makedirs(os.path.dirname(path), exist_ok=True) + if (not is_distributed or get_local_rank() == 0) \ + and (not os.path.exists(path)): + # only the local master process (with local_rank == 0) can + # enter this block to calculate the cache + logger.info("generating GPU P2P access cache for in %s", path) + cache = {} + for _i in range(num_dev): + for _j in range(num_dev): + # on some platforms, P2P support might be buggy and we need + # additional checks. See also: + # https://github.com/vllm-project/vllm/issues/2728 + cache[f"{_i}->{_j}"] = torch.musa.can_device_access_peer( + _i, _j) and _can_actually_p2p(_i, _j) + with open(path, "w") as f: + json.dump(cache, f, indent=4) + if is_distributed: + cpu_world_group = get_cpu_world_group() + dist.barrier(cpu_world_group) + logger.info("reading GPU P2P access cache from %s", path) + with open(path, "r") as f: + cache = json.load(f) + _gpu_p2p_access_cache = cache + return _gpu_p2p_access_cache[f"{i}->{j}"] diff --git a/vllm/engine/__init__.py b/vllm/engine/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/vllm/engine/arg_utils.py b/vllm/engine/arg_utils.py new file mode 100644 index 0000000..956772a --- /dev/null +++ b/vllm/engine/arg_utils.py @@ -0,0 +1,649 @@ +import argparse +import dataclasses +from dataclasses import dataclass +from typing import List, Optional, Union + +from vllm.config import (CacheConfig, DecodingConfig, DeviceConfig, + EngineConfig, LoadConfig, LoRAConfig, ModelConfig, + ParallelConfig, SchedulerConfig, SpeculativeConfig, + TokenizerPoolConfig, VisionLanguageConfig) +from vllm.model_executor.layers.quantization import QUANTIZATION_METHODS +from vllm.utils import str_to_int_tuple + + +def nullable_str(val: str): + if not val or val == "None": + return None + return val + + +@dataclass +class EngineArgs: + """Arguments for vLLM engine.""" + model: str + served_model_name: Optional[Union[List[str]]] = None + tokenizer: Optional[str] = None + skip_tokenizer_init: bool = False + tokenizer_mode: str = 'auto' + trust_remote_code: bool = False + download_dir: Optional[str] = None + load_format: str = 'auto' + dtype: str = 'auto' + kv_cache_dtype: str = 'auto' + quantization_param_path: Optional[str] = None + seed: int = 0 + max_model_len: Optional[int] = None + worker_use_ray: bool = False + pipeline_parallel_size: int = 1 + tensor_parallel_size: int = 1 + max_parallel_loading_workers: Optional[int] = None + block_size: int = 16 + enable_prefix_caching: bool = False + use_v2_block_manager: bool = False + swap_space: int = 4 # GiB + gpu_memory_utilization: float = 0.90 + max_num_batched_tokens: Optional[int] = None + max_num_seqs: int = 256 + max_logprobs: int = 5 # OpenAI default value + disable_log_stats: bool = False + revision: Optional[str] = None + code_revision: Optional[str] = None + tokenizer_revision: Optional[str] = None + quantization: Optional[str] = None + enforce_eager: bool = False + max_context_len_to_capture: Optional[int] = None + max_seq_len_to_capture: int = 8192 + disable_custom_all_reduce: bool = False + tokenizer_pool_size: int = 0 + tokenizer_pool_type: str = "ray" + tokenizer_pool_extra_config: Optional[dict] = None + enable_lora: bool = False + max_loras: int = 1 + max_lora_rank: int = 16 + fully_sharded_loras: bool = False + lora_extra_vocab_size: int = 256 + lora_dtype = 'auto' + max_cpu_loras: Optional[int] = None + device: str = 'auto' + ray_workers_use_nsight: bool = False + num_gpu_blocks_override: Optional[int] = None + num_lookahead_slots: int = 0 + model_loader_extra_config: Optional[dict] = None + + # Related to Vision-language models such as llava + image_input_type: Optional[str] = None + image_token_id: Optional[int] = None + image_input_shape: Optional[str] = None + image_feature_size: Optional[int] = None + scheduler_delay_factor: float = 0.0 + enable_chunked_prefill: bool = False + + guided_decoding_backend: str = 'outlines' + # Speculative decoding configuration. + speculative_model: Optional[str] = None + num_speculative_tokens: Optional[int] = None + speculative_max_model_len: Optional[int] = None + ngram_prompt_lookup_max: Optional[int] = None + ngram_prompt_lookup_min: Optional[int] = None + + def __post_init__(self): + if self.tokenizer is None: + self.tokenizer = self.model + + @staticmethod + def add_cli_args( + parser: argparse.ArgumentParser) -> argparse.ArgumentParser: + """Shared CLI arguments for vLLM engine.""" + + # Model arguments + parser.add_argument( + '--model', + type=str, + default='facebook/opt-125m', + help='Name or path of the huggingface model to use.') + parser.add_argument( + '--tokenizer', + type=nullable_str, + default=EngineArgs.tokenizer, + help='Name or path of the huggingface tokenizer to use.') + parser.add_argument( + '--skip-tokenizer-init', + action='store_true', + help='Skip initialization of tokenizer and detokenizer') + parser.add_argument( + '--revision', + type=nullable_str, + default=None, + help='The specific model version to use. It can be a branch ' + 'name, a tag name, or a commit id. If unspecified, will use ' + 'the default version.') + parser.add_argument( + '--code-revision', + type=nullable_str, + default=None, + help='The specific revision to use for the model code on ' + 'Hugging Face Hub. It can be a branch name, a tag name, or a ' + 'commit id. If unspecified, will use the default version.') + parser.add_argument( + '--tokenizer-revision', + type=nullable_str, + default=None, + help='The specific tokenizer version to use. It can be a branch ' + 'name, a tag name, or a commit id. If unspecified, will use ' + 'the default version.') + parser.add_argument( + '--tokenizer-mode', + type=str, + default=EngineArgs.tokenizer_mode, + choices=['auto', 'slow'], + help='The tokenizer mode.\n\n* "auto" will use the ' + 'fast tokenizer if available.\n* "slow" will ' + 'always use the slow tokenizer.') + parser.add_argument('--trust-remote-code', + action='store_true', + help='Trust remote code from huggingface.') + parser.add_argument('--download-dir', + type=nullable_str, + default=EngineArgs.download_dir, + help='Directory to download and load the weights, ' + 'default to the default cache dir of ' + 'huggingface.') + parser.add_argument( + '--load-format', + type=str, + default=EngineArgs.load_format, + choices=[ + 'auto', 'pt', 'safetensors', 'npcache', 'dummy', 'tensorizer' + ], + help='The format of the model weights to load.\n\n' + '* "auto" will try to load the weights in the safetensors format ' + 'and fall back to the pytorch bin format if safetensors format ' + 'is not available.\n' + '* "pt" will load the weights in the pytorch bin format.\n' + '* "safetensors" will load the weights in the safetensors format.\n' + '* "npcache" will load the weights in pytorch format and store ' + 'a numpy cache to speed up the loading.\n' + '* "dummy" will initialize the weights with random values, ' + 'which is mainly for profiling.\n' + '* "tensorizer" will load the weights using tensorizer from ' + 'CoreWeave which assumes tensorizer_uri is set to the location of ' + 'the serialized weights.') + parser.add_argument( + '--dtype', + type=str, + default=EngineArgs.dtype, + choices=[ + 'auto', 'half', 'float16', 'bfloat16', 'float', 'float32' + ], + help='Data type for model weights and activations.\n\n' + '* "auto" will use FP16 precision for FP32 and FP16 models, and ' + 'BF16 precision for BF16 models.\n' + '* "half" for FP16. Recommended for AWQ quantization.\n' + '* "float16" is the same as "half".\n' + '* "bfloat16" for a balance between precision and range.\n' + '* "float" is shorthand for FP32 precision.\n' + '* "float32" for FP32 precision.') + parser.add_argument( + '--kv-cache-dtype', + type=str, + choices=['auto', 'fp8'], + default=EngineArgs.kv_cache_dtype, + help='Data type for kv cache storage. If "auto", will use model ' + 'data type. FP8_E5M2 (without scaling) is only supported on cuda ' + 'version greater than 11.8. On ROCm (AMD GPU), FP8_E4M3 is instead ' + 'supported for common inference criteria.') + parser.add_argument( + '--quantization-param-path', + type=nullable_str, + default=None, + help='Path to the JSON file containing the KV cache ' + 'scaling factors. This should generally be supplied, when ' + 'KV cache dtype is FP8. Otherwise, KV cache scaling factors ' + 'default to 1.0, which may cause accuracy issues. ' + 'FP8_E5M2 (without scaling) is only supported on cuda version' + 'greater than 11.8. On ROCm (AMD GPU), FP8_E4M3 is instead ' + 'supported for common inference criteria.') + parser.add_argument('--max-model-len', + type=int, + default=EngineArgs.max_model_len, + help='Model context length. If unspecified, will ' + 'be automatically derived from the model config.') + parser.add_argument( + '--guided-decoding-backend', + type=str, + default='outlines', + choices=['outlines', 'lm-format-enforcer'], + help='Which engine will be used for guided decoding' + ' (JSON schema / regex etc) by default. Currently support ' + 'https://github.com/outlines-dev/outlines and ' + 'https://github.com/noamgat/lm-format-enforcer.' + ' Can be overridden per request via guided_decoding_backend' + ' parameter.') + # Parallel arguments + parser.add_argument('--worker-use-ray', + action='store_true', + help='Use Ray for distributed serving, will be ' + 'automatically set when using more than 1 GPU.') + parser.add_argument('--pipeline-parallel-size', + '-pp', + type=int, + default=EngineArgs.pipeline_parallel_size, + help='Number of pipeline stages.') + parser.add_argument('--tensor-parallel-size', + '-tp', + type=int, + default=EngineArgs.tensor_parallel_size, + help='Number of tensor parallel replicas.') + parser.add_argument( + '--max-parallel-loading-workers', + type=int, + default=EngineArgs.max_parallel_loading_workers, + help='Load model sequentially in multiple batches, ' + 'to avoid RAM OOM when using tensor ' + 'parallel and large models.') + parser.add_argument( + '--ray-workers-use-nsight', + action='store_true', + help='If specified, use nsight to profile Ray workers.') + # KV cache arguments + parser.add_argument('--block-size', + type=int, + default=EngineArgs.block_size, + choices=[8, 16, 32], + help='Token block size for contiguous chunks of ' + 'tokens.') + + parser.add_argument('--enable-prefix-caching', + action='store_true', + help='Enables automatic prefix caching.') + parser.add_argument('--use-v2-block-manager', + action='store_true', + help='Use BlockSpaceMangerV2.') + parser.add_argument( + '--num-lookahead-slots', + type=int, + default=EngineArgs.num_lookahead_slots, + help='Experimental scheduling config necessary for ' + 'speculative decoding. This will be replaced by ' + 'speculative config in the future; it is present ' + 'to enable correctness tests until then.') + + parser.add_argument('--seed', + type=int, + default=EngineArgs.seed, + help='Random seed for operations.') + parser.add_argument('--swap-space', + type=int, + default=EngineArgs.swap_space, + help='CPU swap space size (GiB) per GPU.') + parser.add_argument( + '--gpu-memory-utilization', + type=float, + default=EngineArgs.gpu_memory_utilization, + help='The fraction of GPU memory to be used for the model ' + 'executor, which can range from 0 to 1. For example, a value of ' + '0.5 would imply 50%% GPU memory utilization. If unspecified, ' + 'will use the default value of 0.9.') + parser.add_argument( + '--num-gpu-blocks-override', + type=int, + default=None, + help='If specified, ignore GPU profiling result and use this number' + 'of GPU blocks. Used for testing preemption.') + parser.add_argument('--max-num-batched-tokens', + type=int, + default=EngineArgs.max_num_batched_tokens, + help='Maximum number of batched tokens per ' + 'iteration.') + parser.add_argument('--max-num-seqs', + type=int, + default=EngineArgs.max_num_seqs, + help='Maximum number of sequences per iteration.') + parser.add_argument( + '--max-logprobs', + type=int, + default=EngineArgs.max_logprobs, + help=('Max number of log probs to return logprobs is specified in' + ' SamplingParams.')) + parser.add_argument('--disable-log-stats', + action='store_true', + help='Disable logging statistics.') + # Quantization settings. + parser.add_argument('--quantization', + '-q', + type=nullable_str, + choices=[*QUANTIZATION_METHODS, None], + default=EngineArgs.quantization, + help='Method used to quantize the weights. If ' + 'None, we first check the `quantization_config` ' + 'attribute in the model config file. If that is ' + 'None, we assume the model weights are not ' + 'quantized and use `dtype` to determine the data ' + 'type of the weights.') + parser.add_argument('--enforce-eager', + action='store_true', + help='Always use eager-mode PyTorch. If False, ' + 'will use eager mode and CUDA graph in hybrid ' + 'for maximal performance and flexibility.') + parser.add_argument('--max-context-len-to-capture', + type=int, + default=EngineArgs.max_context_len_to_capture, + help='Maximum context length covered by CUDA ' + 'graphs. When a sequence has context length ' + 'larger than this, we fall back to eager mode. ' + '(DEPRECATED. Use --max-seq_len-to-capture instead' + ')') + parser.add_argument('--max-seq_len-to-capture', + type=int, + default=EngineArgs.max_seq_len_to_capture, + help='Maximum sequence length covered by CUDA ' + 'graphs. When a sequence has context length ' + 'larger than this, we fall back to eager mode.') + parser.add_argument('--disable-custom-all-reduce', + action='store_true', + default=EngineArgs.disable_custom_all_reduce, + help='See ParallelConfig.') + parser.add_argument('--tokenizer-pool-size', + type=int, + default=EngineArgs.tokenizer_pool_size, + help='Size of tokenizer pool to use for ' + 'asynchronous tokenization. If 0, will ' + 'use synchronous tokenization.') + parser.add_argument('--tokenizer-pool-type', + type=str, + default=EngineArgs.tokenizer_pool_type, + help='Type of tokenizer pool to use for ' + 'asynchronous tokenization. Ignored ' + 'if tokenizer_pool_size is 0.') + parser.add_argument('--tokenizer-pool-extra-config', + type=nullable_str, + default=EngineArgs.tokenizer_pool_extra_config, + help='Extra config for tokenizer pool. ' + 'This should be a JSON string that will be ' + 'parsed into a dictionary. Ignored if ' + 'tokenizer_pool_size is 0.') + # LoRA related configs + parser.add_argument('--enable-lora', + action='store_true', + help='If True, enable handling of LoRA adapters.') + parser.add_argument('--max-loras', + type=int, + default=EngineArgs.max_loras, + help='Max number of LoRAs in a single batch.') + parser.add_argument('--max-lora-rank', + type=int, + default=EngineArgs.max_lora_rank, + help='Max LoRA rank.') + parser.add_argument( + '--lora-extra-vocab-size', + type=int, + default=EngineArgs.lora_extra_vocab_size, + help=('Maximum size of extra vocabulary that can be ' + 'present in a LoRA adapter (added to the base ' + 'model vocabulary).')) + parser.add_argument( + '--lora-dtype', + type=str, + default=EngineArgs.lora_dtype, + choices=['auto', 'float16', 'bfloat16', 'float32'], + help=('Data type for LoRA. If auto, will default to ' + 'base model dtype.')) + parser.add_argument( + '--max-cpu-loras', + type=int, + default=EngineArgs.max_cpu_loras, + help=('Maximum number of LoRAs to store in CPU memory. ' + 'Must be >= than max_num_seqs. ' + 'Defaults to max_num_seqs.')) + parser.add_argument( + '--fully-sharded-loras', + action='store_true', + help=('By default, only half of the LoRA computation is ' + 'sharded with tensor parallelism. ' + 'Enabling this will use the fully sharded layers. ' + 'At high sequence length, max rank or ' + 'tensor parallel size, this is likely faster.')) + parser.add_argument("--device", + type=str, + default=EngineArgs.device, + choices=["auto", "cuda", "neuron", "cpu", "musa"], + help='Device type for vLLM execution.') + # Related to Vision-language models such as llava + parser.add_argument( + '--image-input-type', + type=nullable_str, + default=None, + choices=[ + t.name.lower() for t in VisionLanguageConfig.ImageInputType + ], + help=('The image input type passed into vLLM. ' + 'Should be one of "pixel_values" or "image_features".')) + parser.add_argument('--image-token-id', + type=int, + default=None, + help=('Input id for image token.')) + parser.add_argument( + '--image-input-shape', + type=nullable_str, + default=None, + help=('The biggest image input shape (worst for memory footprint) ' + 'given an input type. Only used for vLLM\'s profile_run.')) + parser.add_argument( + '--image-feature-size', + type=int, + default=None, + help=('The image feature size along the context dimension.')) + parser.add_argument( + '--scheduler-delay-factor', + type=float, + default=EngineArgs.scheduler_delay_factor, + help='Apply a delay (of delay factor multiplied by previous' + 'prompt latency) before scheduling next prompt.') + parser.add_argument( + '--enable-chunked-prefill', + action='store_true', + help='If set, the prefill requests can be chunked based on the ' + 'max_num_batched_tokens.') + + parser.add_argument( + '--speculative-model', + type=nullable_str, + default=EngineArgs.speculative_model, + help= + 'The name of the draft model to be used in speculative decoding.') + + parser.add_argument( + '--num-speculative-tokens', + type=int, + default=EngineArgs.num_speculative_tokens, + help='The number of speculative tokens to sample from ' + 'the draft model in speculative decoding.') + + parser.add_argument( + '--speculative-max-model-len', + type=int, + default=EngineArgs.speculative_max_model_len, + help='The maximum sequence length supported by the ' + 'draft model. Sequences over this length will skip ' + 'speculation.') + + parser.add_argument( + '--ngram-prompt-lookup-max', + type=int, + default=EngineArgs.ngram_prompt_lookup_max, + help='Max size of window for ngram prompt lookup in speculative ' + 'decoding.') + + parser.add_argument( + '--ngram-prompt-lookup-min', + type=int, + default=EngineArgs.ngram_prompt_lookup_min, + help='Min size of window for ngram prompt lookup in speculative ' + 'decoding.') + + parser.add_argument('--model-loader-extra-config', + type=nullable_str, + default=EngineArgs.model_loader_extra_config, + help='Extra config for model loader. ' + 'This will be passed to the model loader ' + 'corresponding to the chosen load_format. ' + 'This should be a JSON string that will be ' + 'parsed into a dictionary.') + + parser.add_argument( + "--served-model-name", + nargs="+", + type=str, + default=None, + help="The model name(s) used in the API. If multiple " + "names are provided, the server will respond to any " + "of the provided names. The model name in the model " + "field of a response will be the first name in this " + "list. If not specified, the model name will be the " + "same as the `--model` argument. Noted that this name(s)" + "will also be used in `model_name` tag content of " + "prometheus metrics, if multiple names provided, metrics" + "tag will take the first one.") + + return parser + + @classmethod + def from_cli_args(cls, args: argparse.Namespace) -> 'EngineArgs': + # Get the list of attributes of this dataclass. + attrs = [attr.name for attr in dataclasses.fields(cls)] + # Set the attributes from the parsed arguments. + engine_args = cls(**{attr: getattr(args, attr) for attr in attrs}) + return engine_args + + def create_engine_config(self, ) -> EngineConfig: + device_config = DeviceConfig(self.device) + model_config = ModelConfig( + self.model, self.tokenizer, self.tokenizer_mode, + self.trust_remote_code, self.dtype, self.seed, self.revision, + self.code_revision, self.tokenizer_revision, self.max_model_len, + self.quantization, self.quantization_param_path, + self.enforce_eager, self.max_context_len_to_capture, + self.max_seq_len_to_capture, self.max_logprobs, + self.skip_tokenizer_init, self.served_model_name) + cache_config = CacheConfig(self.block_size, + self.gpu_memory_utilization, + self.swap_space, self.kv_cache_dtype, + self.num_gpu_blocks_override, + model_config.get_sliding_window(), + self.enable_prefix_caching) + parallel_config = ParallelConfig( + self.pipeline_parallel_size, self.tensor_parallel_size, + self.worker_use_ray, self.max_parallel_loading_workers, + self.disable_custom_all_reduce, + TokenizerPoolConfig.create_config( + self.tokenizer_pool_size, + self.tokenizer_pool_type, + self.tokenizer_pool_extra_config, + ), self.ray_workers_use_nsight) + + speculative_config = SpeculativeConfig.maybe_create_spec_config( + target_model_config=model_config, + target_parallel_config=parallel_config, + target_dtype=self.dtype, + speculative_model=self.speculative_model, + num_speculative_tokens=self.num_speculative_tokens, + speculative_max_model_len=self.speculative_max_model_len, + enable_chunked_prefill=self.enable_chunked_prefill, + use_v2_block_manager=self.use_v2_block_manager, + ngram_prompt_lookup_max=self.ngram_prompt_lookup_max, + ngram_prompt_lookup_min=self.ngram_prompt_lookup_min, + ) + + scheduler_config = SchedulerConfig( + self.max_num_batched_tokens, + self.max_num_seqs, + model_config.max_model_len, + self.use_v2_block_manager, + num_lookahead_slots=(self.num_lookahead_slots + if speculative_config is None else + speculative_config.num_lookahead_slots), + delay_factor=self.scheduler_delay_factor, + enable_chunked_prefill=self.enable_chunked_prefill, + ) + lora_config = LoRAConfig( + max_lora_rank=self.max_lora_rank, + max_loras=self.max_loras, + fully_sharded_loras=self.fully_sharded_loras, + lora_extra_vocab_size=self.lora_extra_vocab_size, + lora_dtype=self.lora_dtype, + max_cpu_loras=self.max_cpu_loras if self.max_cpu_loras + and self.max_cpu_loras > 0 else None) if self.enable_lora else None + + load_config = LoadConfig( + load_format=self.load_format, + download_dir=self.download_dir, + model_loader_extra_config=self.model_loader_extra_config, + ) + + if self.image_input_type: + if (not self.image_token_id or not self.image_input_shape + or not self.image_feature_size): + raise ValueError( + 'Specify `image_token_id`, `image_input_shape` and ' + '`image_feature_size` together with `image_input_type`.') + vision_language_config = VisionLanguageConfig( + image_input_type=VisionLanguageConfig. + get_image_input_enum_type(self.image_input_type), + image_token_id=self.image_token_id, + image_input_shape=str_to_int_tuple(self.image_input_shape), + image_feature_size=self.image_feature_size, + ) + else: + vision_language_config = None + + decoding_config = DecodingConfig( + guided_decoding_backend=self.guided_decoding_backend) + + return EngineConfig(model_config=model_config, + cache_config=cache_config, + parallel_config=parallel_config, + scheduler_config=scheduler_config, + device_config=device_config, + lora_config=lora_config, + vision_language_config=vision_language_config, + speculative_config=speculative_config, + load_config=load_config, + decoding_config=decoding_config) + + +@dataclass +class AsyncEngineArgs(EngineArgs): + """Arguments for asynchronous vLLM engine.""" + engine_use_ray: bool = False + disable_log_requests: bool = False + max_log_len: Optional[int] = None + + @staticmethod + def add_cli_args(parser: argparse.ArgumentParser, + async_args_only: bool = False) -> argparse.ArgumentParser: + if not async_args_only: + parser = EngineArgs.add_cli_args(parser) + parser.add_argument('--engine-use-ray', + action='store_true', + help='Use Ray to start the LLM engine in a ' + 'separate process as the server process.') + parser.add_argument('--disable-log-requests', + action='store_true', + help='Disable logging requests.') + parser.add_argument('--max-log-len', + type=int, + default=None, + help='Max number of prompt characters or prompt ' + 'ID numbers being printed in log.' + '\n\nDefault: Unlimited') + return parser + + +# These functions are used by sphinx to build the documentation +def _engine_args_parser(): + return EngineArgs.add_cli_args(argparse.ArgumentParser()) + + +def _async_engine_args_parser(): + return AsyncEngineArgs.add_cli_args(argparse.ArgumentParser(), + async_args_only=True) diff --git a/vllm/engine/async_llm_engine.py b/vllm/engine/async_llm_engine.py new file mode 100644 index 0000000..9f72a0d --- /dev/null +++ b/vllm/engine/async_llm_engine.py @@ -0,0 +1,737 @@ +import asyncio +import time +from functools import partial +from typing import (Any, AsyncIterator, Callable, Dict, Iterable, List, + Optional, Set, Tuple, Type, Union) + +from transformers import PreTrainedTokenizer + +import vllm.envs as envs +from vllm.config import DecodingConfig, ModelConfig +from vllm.core.scheduler import SchedulerOutputs +from vllm.engine.arg_utils import AsyncEngineArgs +from vllm.engine.llm_engine import LLMEngine +from vllm.executor.ray_utils import initialize_ray_cluster, ray +from vllm.logger import init_logger +from vllm.lora.request import LoRARequest +from vllm.outputs import RequestOutput +from vllm.sampling_params import SamplingParams +from vllm.sequence import ExecuteModelRequest, MultiModalData, SamplerOutput +from vllm.usage.usage_lib import UsageContext + +logger = init_logger(__name__) +ENGINE_ITERATION_TIMEOUT_S = envs.VLLM_ENGINE_ITERATION_TIMEOUT_S + + +class AsyncEngineDeadError(RuntimeError): + pass + + +def _raise_exception_on_finish( + task: asyncio.Task, error_callback: Callable[[Exception], + None]) -> None: + msg = ("Task finished unexpectedly. This should never happen! " + "Please open an issue on Github.") + + exception = None + try: + task.result() + # NOTE: This will be thrown if task exits normally (which it should not) + raise AsyncEngineDeadError(msg) + except Exception as e: + exception = e + logger.error("Engine background task failed", exc_info=e) + error_callback(exception) + raise AsyncEngineDeadError( + msg + " See stack trace above for the actual cause.") from e + + +class AsyncStream: + """A stream of RequestOutputs for a request that can be + iterated over asynchronously.""" + + def __init__(self, request_id: str) -> None: + self.request_id = request_id + self._queue: asyncio.Queue = asyncio.Queue() + self._finished = False + + def put(self, item: Union[RequestOutput, Exception]) -> None: + if self._finished: + return + self._queue.put_nowait(item) + + def finish(self) -> None: + self._queue.put_nowait(StopAsyncIteration()) + self._finished = True + + @property + def finished(self) -> bool: + return self._finished + + def __aiter__(self): + return self + + async def __anext__(self) -> RequestOutput: + result = await self._queue.get() + if isinstance(result, Exception): + raise result + return result + + +class RequestTracker: + """Synchronous abstraction for tracking requests.""" + + def __init__(self) -> None: + self._request_streams: Dict[str, AsyncStream] = {} + self._finished_requests: asyncio.Queue[str] = asyncio.Queue() + self._new_requests: asyncio.Queue[Tuple[AsyncStream, + dict]] = asyncio.Queue() + self.new_requests_event = asyncio.Event() + + def __contains__(self, item): + return item in self._request_streams + + def __len__(self) -> int: + return len(self._request_streams) + + def propagate_exception(self, + exc: Exception, + request_id: Optional[str] = None) -> None: + """Propagate an exception to request streams + (all if request_id is None).""" + if request_id is not None: + self._request_streams[request_id].put(exc) + self.abort_request(request_id) + else: + for rid, stream in self._request_streams.items(): + stream.put(exc) + self.abort_request(rid) + + def process_request_output(self, + request_output: RequestOutput, + *, + verbose: bool = False) -> None: + """Process a request output from the engine.""" + request_id = request_output.request_id + + self._request_streams[request_id].put(request_output) + if request_output.finished: + if verbose: + logger.info("Finished request %s.", request_id) + self.abort_request(request_id) + + def process_exception(self, + request_id: str, + exception: Exception, + *, + verbose: bool = False) -> None: + """Propagate an exception from the engine.""" + self._request_streams[request_id].put(exception) + if verbose: + logger.info("Finished request %s.", request_id) + self.abort_request(request_id) + + def add_request(self, request_id: str, + **engine_add_request_kwargs) -> AsyncStream: + """Add a request to be sent to the engine on the next background + loop iteration.""" + if request_id in self._request_streams: + raise KeyError(f"Request {request_id} already exists.") + + stream = AsyncStream(request_id) + self._new_requests.put_nowait((stream, { + "request_id": request_id, + **engine_add_request_kwargs + })) + + self.new_requests_event.set() + + return stream + + def abort_request(self, request_id: str, *, verbose: bool = False) -> None: + """Abort a request during next background loop iteration.""" + if verbose: + logger.info("Aborted request %s.", request_id) + + self._finished_requests.put_nowait(request_id) + + if request_id not in self._request_streams or self._request_streams[ + request_id].finished: + # The request has already finished or been aborted. + return + + self._request_streams[request_id].finish() + + def get_new_and_finished_requests(self) -> Tuple[List[Dict], Set[str]]: + """Get the new requests and finished requests to be + sent to the engine.""" + new_requests: List[Dict] = [] + finished_requests: Set[str] = set() + + while not self._finished_requests.empty(): + request_id = self._finished_requests.get_nowait() + finished_requests.add(request_id) + self._request_streams.pop(request_id, None) + + while not self._new_requests.empty(): + stream, new_request = self._new_requests.get_nowait() + if stream.request_id in finished_requests: + # The request has already been aborted. + stream.finish() + continue + self._request_streams[stream.request_id] = stream + new_requests.append(new_request) + + return new_requests, finished_requests + + async def wait_for_new_requests(self): + if not self.has_new_requests(): + await self.new_requests_event.wait() + self.new_requests_event.clear() + + def has_new_requests(self): + return not self._new_requests.empty() + + +class _AsyncLLMEngine(LLMEngine): + """Extension of LLMEngine to add async methods.""" + + async def step_async(self) -> List[RequestOutput]: + """Performs one decoding iteration and returns newly generated results. + The workers are ran asynchronously if possible. + + This function performs one decoding iteration of the engine. It first + schedules the sequences to be executed in the next iteration and the + token blocks to be swapped in/out/copy. Then, it executes the model + and updates the scheduler with the model outputs. Finally, it decodes + the sequences and returns the newly generated results. + """ + seq_group_metadata_list, scheduler_outputs = self.scheduler.schedule() + + if not scheduler_outputs.is_empty(): + # Execute the model. + execute_model_req = ExecuteModelRequest( + seq_group_metadata_list=seq_group_metadata_list, + blocks_to_swap_in=scheduler_outputs.blocks_to_swap_in, + blocks_to_swap_out=scheduler_outputs.blocks_to_swap_out, + blocks_to_copy=scheduler_outputs.blocks_to_copy, + num_lookahead_slots=scheduler_outputs.num_lookahead_slots, + running_queue_size=scheduler_outputs.running_queue_size, + ) + output = await self.model_executor.execute_model_async( + execute_model_req) + else: + output = [] + + request_outputs = self._process_model_outputs( + output, scheduler_outputs.scheduled_seq_groups, + scheduler_outputs.ignored_seq_groups, seq_group_metadata_list) + + # Log stats. + self.do_log_stats(scheduler_outputs, output) + + return request_outputs + + async def encode_request_async( + self, + request_id: str, # pylint: disable=unused-argument + prompt: Optional[str], + prompt_token_ids: Optional[List[int]] = None, + lora_request: Optional[LoRARequest] = None, + ): + if prompt_token_ids is None: + assert prompt is not None + prompt_token_ids = await self.tokenizer.encode_async( + request_id=request_id, + prompt=prompt, + lora_request=lora_request) + return prompt_token_ids + + async def add_request_async( + self, + request_id: str, + prompt: Optional[str], + sampling_params: SamplingParams, + prompt_token_ids: Optional[List[int]] = None, + arrival_time: Optional[float] = None, + lora_request: Optional[LoRARequest] = None, + multi_modal_data: Optional[MultiModalData] = None, + ) -> None: + if lora_request is not None and not self.lora_config: + raise ValueError(f"Got lora_request {lora_request} but LoRA is " + "not enabled!") + if arrival_time is None: + arrival_time = time.time() + prompt_token_ids = await self.encode_request_async( + request_id=request_id, + prompt=prompt, + prompt_token_ids=prompt_token_ids, + lora_request=lora_request) + + return self.add_request(request_id, + prompt=prompt, + prompt_token_ids=prompt_token_ids, + sampling_params=sampling_params, + arrival_time=arrival_time, + lora_request=lora_request, + multi_modal_data=multi_modal_data) + + async def check_health_async(self) -> None: + self.model_executor.check_health() + + +class AsyncLLMEngine: + """An asynchronous wrapper for LLMEngine. + + This class is used to wrap the LLMEngine class to make it asynchronous. It + uses asyncio to create a background loop that keeps processing incoming + requests. The LLMEngine is kicked by the generate method when there + are requests in the waiting queue. The generate method yields the outputs + from the LLMEngine to the caller. + + NOTE: For the comprehensive list of arguments, see `LLMEngine`. + + Args: + worker_use_ray: Whether to use Ray for model workers. Required for + distributed execution. Should be the same as + `parallel_config.worker_use_ray`. + engine_use_ray: Whether to make LLMEngine a Ray actor. If so, the + async frontend will be executed in a separate process as the + model workers. + log_requests: Whether to log the requests. + max_log_len: Maximum number of prompt characters or prompt ID numbers + being printed in log. + start_engine_loop: If True, the background task to run the engine + will be automatically started in the generate call. + *args: Arguments for LLMEngine. + *kwargs: Arguments for LLMEngine. + """ + + _engine_class: Type[_AsyncLLMEngine] = _AsyncLLMEngine + + def __init__(self, + worker_use_ray: bool, + engine_use_ray: bool, + *args, + log_requests: bool = True, + max_log_len: Optional[int] = None, + start_engine_loop: bool = True, + **kwargs) -> None: + self.worker_use_ray = worker_use_ray + self.engine_use_ray = engine_use_ray + self.log_requests = log_requests + self.max_log_len = max_log_len + self.engine = self._init_engine(*args, **kwargs) + + self.background_loop: Optional[asyncio.Future] = None + # We need to keep a reference to unshielded + # task as well to prevent it from being garbage + # collected + self._background_loop_unshielded: Optional[asyncio.Task[Any]] = None + self.start_engine_loop = start_engine_loop + self._errored_with: Optional[BaseException] = None + + # Lazy initialized fields + self._request_tracker: RequestTracker + + @classmethod + def from_engine_args( + cls, + engine_args: AsyncEngineArgs, + start_engine_loop: bool = True, + usage_context: UsageContext = UsageContext.ENGINE_CONTEXT, + ) -> "AsyncLLMEngine": + """Creates an async LLM engine from the engine arguments.""" + # Create the engine configs. + engine_config = engine_args.create_engine_config() + + if engine_config.device_config.device_type == "neuron": + from vllm.executor.neuron_executor import NeuronExecutorAsync + executor_class = NeuronExecutorAsync + elif engine_config.device_config.device_type == "cpu": + assert not engine_config.parallel_config.worker_use_ray, ( + "Ray is not supported with the CPU backend.") + from vllm.executor.cpu_executor import CPUExecutorAsync + executor_class = CPUExecutorAsync + elif engine_config.parallel_config.worker_use_ray: + initialize_ray_cluster(engine_config.parallel_config) + from vllm.executor.ray_gpu_executor import RayGPUExecutorAsync + executor_class = RayGPUExecutorAsync + else: + assert engine_config.parallel_config.world_size == 1, ( + "Ray is required if parallel_config.world_size > 1.") + from vllm.executor.gpu_executor import GPUExecutorAsync + executor_class = GPUExecutorAsync + # Create the async LLM engine. + engine = cls( + engine_config.parallel_config.worker_use_ray, + engine_args.engine_use_ray, + **engine_config.to_dict(), + executor_class=executor_class, + log_requests=not engine_args.disable_log_requests, + log_stats=not engine_args.disable_log_stats, + max_log_len=engine_args.max_log_len, + start_engine_loop=start_engine_loop, + usage_context=usage_context, + ) + return engine + + @property + def is_running(self) -> bool: + return (self.background_loop is not None + and self._background_loop_unshielded is not None + and not self._background_loop_unshielded.done()) + + @property + def is_stopped(self) -> bool: + return self.errored or (self.background_loop is not None and + self._background_loop_unshielded is not None + and self._background_loop_unshielded.done()) + + @property + def errored(self) -> bool: + return self._errored_with is not None + + def set_errored(self, exc: Exception) -> None: + self._errored_with = exc + + def _error_callback(self, exc: Exception) -> None: + self.set_errored(exc) + self._request_tracker.propagate_exception(exc) + + async def get_tokenizer(self) -> "PreTrainedTokenizer": + if self.engine_use_ray: + return await self.engine.get_tokenizer.remote() # type: ignore + else: + return self.engine.get_tokenizer() + + def start_background_loop(self) -> None: + """Start the background loop.""" + if self.errored: + raise AsyncEngineDeadError( + "Background loop has errored already.") from self._errored_with + if self.is_running: + raise RuntimeError("Background loop is already running.") + # Initialize the RequestTracker here so it uses the right event loop. + self._request_tracker = RequestTracker() + + self._background_loop_unshielded = asyncio.get_event_loop( + ).create_task(self.run_engine_loop()) + self._background_loop_unshielded.add_done_callback( + partial(_raise_exception_on_finish, + error_callback=self._error_callback)) + self.background_loop = asyncio.shield(self._background_loop_unshielded) + + def _init_engine(self, *args, + **kwargs) -> Union[_AsyncLLMEngine, "ray.ObjectRef"]: + if not self.engine_use_ray: + engine_class = self._engine_class + elif self.worker_use_ray: + engine_class = ray.remote(num_cpus=0)(self._engine_class).remote + else: + # FIXME(woosuk): This is a bit hacky. Be careful when changing the + # order of the arguments. + cache_config = kwargs["cache_config"] + parallel_config = kwargs["parallel_config"] + if parallel_config.tensor_parallel_size == 1: + num_gpus = cache_config.gpu_memory_utilization + else: + num_gpus = 1 + engine_class = ray.remote(num_gpus=num_gpus)( + self._engine_class).remote + return engine_class(*args, **kwargs) + + async def engine_step(self) -> bool: + """Kick the engine to process the waiting requests. + + Returns True if there are in-progress requests.""" + + new_requests, finished_requests = ( + self._request_tracker.get_new_and_finished_requests()) + + for new_request in new_requests: + # Add the request into the vLLM engine's waiting queue. + # TODO: Maybe add add_request_batch to reduce Ray overhead + try: + if self.engine_use_ray: + await self.engine.add_request.remote( # type: ignore + **new_request) + else: + await self.engine.add_request_async(**new_request) + except ValueError as e: + # TODO: use a vLLM specific error for failed validation + self._request_tracker.process_exception( + new_request["request_id"], + e, + verbose=self.log_requests, + ) + + if finished_requests: + await self._engine_abort(finished_requests) + + if self.engine_use_ray: + request_outputs = await self.engine.step.remote() # type: ignore + else: + request_outputs = await self.engine.step_async() + + # Put the outputs into the corresponding streams. + for request_output in request_outputs: + self._request_tracker.process_request_output( + request_output, verbose=self.log_requests) + + return len(request_outputs) > 0 + + async def _engine_abort(self, request_ids: Iterable[str]): + if self.engine_use_ray: + await self.engine.abort_request.remote(request_ids) # type: ignore + else: + self.engine.abort_request(request_ids) + + async def run_engine_loop(self): + has_requests_in_progress = False + while True: + if not has_requests_in_progress: + logger.debug("Waiting for new requests...") + await self._request_tracker.wait_for_new_requests() + logger.debug("Got new requests!") + + # Abort if iteration takes too long due to unrecoverable errors + # (eg. NCCL timeouts). + try: + has_requests_in_progress = await asyncio.wait_for( + self.engine_step(), ENGINE_ITERATION_TIMEOUT_S) + except asyncio.TimeoutError as exc: + logger.error( + "Engine iteration timed out. This should never happen!") + self.set_errored(exc) + raise + await asyncio.sleep(0) + + async def add_request( + self, + request_id: str, + prompt: Optional[str], + sampling_params: SamplingParams, + prompt_token_ids: Optional[List[int]] = None, + arrival_time: Optional[float] = None, + lora_request: Optional[LoRARequest] = None, + multi_modal_data: Optional[MultiModalData] = None, + ) -> AsyncStream: + if self.log_requests: + shortened_prompt = prompt + shortened_token_ids = prompt_token_ids + if self.max_log_len is not None: + if shortened_prompt is not None: + shortened_prompt = shortened_prompt[:self.max_log_len] + if shortened_token_ids is not None: + shortened_token_ids = shortened_token_ids[:self. + max_log_len] + logger.info( + "Received request %s: prompt: %r, " + "sampling_params: %s, prompt_token_ids: %s, " + "lora_request: %s.", request_id, shortened_prompt, + sampling_params, shortened_token_ids, lora_request) + + if not self.is_running: + if self.start_engine_loop: + self.start_background_loop() + else: + raise AsyncEngineDeadError( + "Background loop is not running. If it was running, " + "inspect the output to find the stacktrace of the " + "error that caused the background loop to stop " + "(AsyncEngineDeadError).") + + if arrival_time is None: + arrival_time = time.time() + + if self.engine_use_ray: + prompt_token_ids = await ( + self.engine.encode_request_async.remote( # type: ignore + request_id=request_id, + prompt=prompt, + prompt_token_ids=prompt_token_ids, + lora_request=lora_request)) + else: + prompt_token_ids = await self.engine.encode_request_async( + request_id=request_id, + prompt=prompt, + prompt_token_ids=prompt_token_ids, + lora_request=lora_request) + + stream = self._request_tracker.add_request( + request_id, + prompt=prompt, + sampling_params=sampling_params, + prompt_token_ids=prompt_token_ids, + arrival_time=arrival_time, + lora_request=lora_request, + multi_modal_data=multi_modal_data, + ) + + return stream + + async def generate( + self, + prompt: Optional[str], + sampling_params: SamplingParams, + request_id: str, + prompt_token_ids: Optional[List[int]] = None, + lora_request: Optional[LoRARequest] = None, + multi_modal_data: Optional[MultiModalData] = None + ) -> AsyncIterator[RequestOutput]: + """Generate outputs for a request. + + Generate outputs for a request. This method is a coroutine. It adds the + request into the waiting queue of the LLMEngine and streams the outputs + from the LLMEngine to the caller. + + Args: + prompt: The prompt string. Can be None if prompt_token_ids is + provided. + sampling_params: The sampling parameters of the request. + request_id: The unique id of the request. + prompt_token_ids: The token IDs of the prompt. If None, we + use the tokenizer to convert the prompts to token IDs. + lora_request: LoRA request to use for generation, if any. + multi_modal_data: Multi modal data per request. + + Yields: + The output `RequestOutput` objects from the LLMEngine for the + request. + + Details: + - If the engine is not running, start the background loop, + which iteratively invokes + :meth:`~vllm.engine.async_llm_engine.AsyncLLMEngine.engine_step` + to process the waiting requests. + - Add the request to the engine's `RequestTracker`. + On the next background loop, this request will be sent to + the underlying engine. + Also, a corresponding `AsyncStream` will be created. + - Wait for the request outputs from `AsyncStream` and yield them. + + Example: + >>> # Please refer to entrypoints/api_server.py for + >>> # the complete example. + >>> + >>> # initialize the engine and the example input + >>> engine = AsyncLLMEngine.from_engine_args(engine_args) + >>> example_input = { + >>> "prompt": "What is LLM?", + >>> "stream": False, # assume the non-streaming case + >>> "temperature": 0.0, + >>> "request_id": 0, + >>> } + >>> + >>> # start the generation + >>> results_generator = engine.generate( + >>> example_input["prompt"], + >>> SamplingParams(temperature=example_input["temperature"]), + >>> example_input["request_id"]) + >>> + >>> # get the results + >>> final_output = None + >>> async for request_output in results_generator: + >>> if await request.is_disconnected(): + >>> # Abort the request if the client disconnects. + >>> await engine.abort(request_id) + >>> # Return or raise an error + >>> ... + >>> final_output = request_output + >>> + >>> # Process and return the final output + >>> ... + """ + # Preprocess the request. + arrival_time = time.time() + + try: + stream = await self.add_request( + request_id, + prompt, + sampling_params, + prompt_token_ids=prompt_token_ids, + arrival_time=arrival_time, + lora_request=lora_request, + multi_modal_data=multi_modal_data, + ) + + async for request_output in stream: + yield request_output + except (Exception, asyncio.CancelledError) as e: + # If there is an exception or coroutine is cancelled, abort the + # request. + self._abort(request_id) + raise e + + async def abort(self, request_id: str) -> None: + """Abort a request. + + Abort a submitted request. If the request is finished or not found, + this method will be a no-op. + + Args: + request_id: The unique id of the request. + """ + if not self.is_running: + raise AsyncEngineDeadError( + "Background loop is not running. If it was running, " + "inspect the output to find the stacktrace of the " + "error that caused the background loop to stop " + "(AsyncEngineDeadError).") + + return self._abort(request_id) + + def _abort(self, request_id: str) -> None: + """Abort a request. + + Abort a submitted request. If the request is finished or not found, + this method will be a no-op. + + Args: + request_id: The unique id of the request. + """ + self._request_tracker.abort_request(request_id, + verbose=self.log_requests) + + async def get_model_config(self) -> ModelConfig: + """Get the model configuration of the vLLM engine.""" + if self.engine_use_ray: + return await self.engine.get_model_config.remote() # type: ignore + else: + return self.engine.get_model_config() + + async def get_decoding_config(self) -> DecodingConfig: + """Get the decoding configuration of the vLLM engine.""" + if self.engine_use_ray: + return await self.engine.get_decoding_config.remote( # type: ignore + ) + else: + return self.engine.get_decoding_config() + + async def do_log_stats( + self, + scheduler_outputs: Optional[SchedulerOutputs] = None, + model_output: Optional[List[SamplerOutput]] = None) -> None: + if self.engine_use_ray: + await self.engine.do_log_stats.remote( # type: ignore + scheduler_outputs, model_output) + else: + self.engine.do_log_stats() + + async def check_health(self) -> None: + """Raises an error if engine is unhealthy.""" + t = time.perf_counter() + logger.debug("Starting health check...") + if self.is_stopped: + raise AsyncEngineDeadError("Background loop is stopped.") + + if self.engine_use_ray: + try: + await self.engine.check_health.remote() # type: ignore + except ray.exceptions.RayActorError as e: + raise RuntimeError("Engine is dead.") from e + else: + await self.engine.check_health_async() + logger.debug("Health check took %fs", time.perf_counter() - t) diff --git a/vllm/engine/llm_engine.py b/vllm/engine/llm_engine.py new file mode 100644 index 0000000..b9938b0 --- /dev/null +++ b/vllm/engine/llm_engine.py @@ -0,0 +1,784 @@ +import time +from typing import Iterable, List, Optional, Type, Union + +from transformers import GenerationConfig, PreTrainedTokenizer + +import vllm +from vllm.config import (CacheConfig, DecodingConfig, DeviceConfig, LoadConfig, + LoRAConfig, ModelConfig, ParallelConfig, + SchedulerConfig, SpeculativeConfig, + VisionLanguageConfig) +from vllm.core.scheduler import (ScheduledSequenceGroup, Scheduler, + SchedulerOutputs) +from vllm.engine.arg_utils import EngineArgs +from vllm.engine.metrics import StatLogger, Stats +from vllm.engine.output_processor.interfaces import ( + SequenceGroupOutputProcessor) +from vllm.engine.output_processor.stop_checker import StopChecker +from vllm.engine.output_processor.util import create_output_by_sequence_group +from vllm.executor.executor_base import ExecutorBase +from vllm.executor.ray_utils import initialize_ray_cluster +from vllm.logger import init_logger +from vllm.lora.request import LoRARequest +from vllm.outputs import RequestOutput +from vllm.sampling_params import SamplingParams +from vllm.sequence import (ExecuteModelRequest, MultiModalData, SamplerOutput, + Sequence, SequenceGroup, SequenceGroupMetadata, + SequenceStatus) +from vllm.transformers_utils.detokenizer import Detokenizer +from vllm.transformers_utils.tokenizer_group import (BaseTokenizerGroup, + get_tokenizer_group) +from vllm.usage.usage_lib import (UsageContext, is_usage_stats_enabled, + usage_message) +from vllm.utils import Counter + +logger = init_logger(__name__) +_LOCAL_LOGGING_INTERVAL_SEC = 5 + + +def _load_generation_config_dict(model_config: ModelConfig): + try: + return GenerationConfig.from_pretrained( + model_config.model, + revision=model_config.revision, + ).to_diff_dict() + except OSError: + # Not found. + return {} + + +class LLMEngine: + """An LLM engine that receives requests and generates texts. + + This is the main class for the vLLM engine. It receives requests + from clients and generates texts from the LLM. It includes a tokenizer, a + language model (possibly distributed across multiple GPUs), and GPU memory + space allocated for intermediate states (aka KV cache). This class utilizes + iteration-level scheduling and efficient memory management to maximize the + serving throughput. + + The `LLM` class wraps this class for offline batched inference and the + `AsyncLLMEngine` class wraps this class for online serving. + + NOTE: The config arguments are derived from the `EngineArgs` class. For the + comprehensive list of arguments, see `EngineArgs`. + + Args: + model_config: The configuration related to the LLM model. + cache_config: The configuration related to the KV cache memory + management. + parallel_config: The configuration related to distributed execution. + scheduler_config: The configuration related to the request scheduler. + device_config: The configuration related to the device. + lora_config (Optional): The configuration related to serving multi-LoRA. + vision_language_config (Optional): The configuration related to vision + language models. + speculative_config (Optional): The configuration related to speculative + decoding. + executor_class: The model executor class for managing distributed + execution. + log_stats: Whether to log statistics. + usage_context: Specified entry point, used for usage info collection + """ + + def __init__( + self, + model_config: ModelConfig, + cache_config: CacheConfig, + parallel_config: ParallelConfig, + scheduler_config: SchedulerConfig, + device_config: DeviceConfig, + load_config: LoadConfig, + lora_config: Optional[LoRAConfig], + vision_language_config: Optional[VisionLanguageConfig], + speculative_config: Optional[SpeculativeConfig], + decoding_config: Optional[DecodingConfig], + executor_class: Type[ExecutorBase], + log_stats: bool, + usage_context: UsageContext = UsageContext.ENGINE_CONTEXT, + ) -> None: + logger.info( + "Initializing an LLM engine (v%s) with config: " + "model=%r, speculative_config=%r, tokenizer=%r, " + "skip_tokenizer_init=%s, tokenizer_mode=%s, revision=%s, " + "tokenizer_revision=%s, trust_remote_code=%s, dtype=%s, " + "max_seq_len=%d, download_dir=%r, load_format=%s, " + "tensor_parallel_size=%d, disable_custom_all_reduce=%s, " + "quantization=%s, enforce_eager=%s, kv_cache_dtype=%s, " + "quantization_param_path=%s, device_config=%s, " + "decoding_config=%r, seed=%d, served_model_name=%s)", + vllm.__version__, + model_config.model, + speculative_config, + model_config.tokenizer, + model_config.skip_tokenizer_init, + model_config.tokenizer_mode, + model_config.revision, + model_config.tokenizer_revision, + model_config.trust_remote_code, + model_config.dtype, + model_config.max_model_len, + load_config.download_dir, + load_config.load_format, + parallel_config.tensor_parallel_size, + parallel_config.disable_custom_all_reduce, + model_config.quantization, + model_config.enforce_eager, + cache_config.cache_dtype, + model_config.quantization_param_path, + device_config.device, + decoding_config, + model_config.seed, + model_config.served_model_name, + ) + # TODO(woosuk): Print more configs in debug mode. + + self.model_config = model_config + self.cache_config = cache_config + self.lora_config = lora_config + self.vision_language_config = vision_language_config + self.parallel_config = parallel_config + self.scheduler_config = scheduler_config + self.device_config = device_config + self.speculative_config = speculative_config + self.load_config = load_config + self.decoding_config = decoding_config or DecodingConfig() + self.log_stats = log_stats + + if not self.model_config.skip_tokenizer_init: + self.tokenizer: BaseTokenizerGroup + self._init_tokenizer() + self.detokenizer = Detokenizer(self.tokenizer) + else: + self.detokenizer = None + self.tokenizer = None + + self.seq_counter = Counter() + self.generation_config_fields = _load_generation_config_dict( + model_config) + + self.model_executor = executor_class( + model_config=model_config, + cache_config=cache_config, + parallel_config=parallel_config, + scheduler_config=scheduler_config, + device_config=device_config, + lora_config=lora_config, + vision_language_config=vision_language_config, + speculative_config=speculative_config, + load_config=load_config, + ) + + self._initialize_kv_caches() + + # If usage stat is enabled, collect relevant info. + if is_usage_stats_enabled(): + from vllm.model_executor.model_loader import ( + get_architecture_class_name) + usage_message.report_usage( + get_architecture_class_name(model_config), + usage_context, + extra_kvs={ + # Common configuration + "dtype": + str(model_config.dtype), + "tensor_parallel_size": + parallel_config.tensor_parallel_size, + "block_size": + cache_config.block_size, + "gpu_memory_utilization": + cache_config.gpu_memory_utilization, + + # Quantization + "quantization": + model_config.quantization, + "kv_cache_dtype": + cache_config.cache_dtype, + + # Feature flags + "enable_lora": + bool(lora_config), + "enable_prefix_caching": + cache_config.enable_prefix_caching, + "enforce_eager": + model_config.enforce_eager, + "disable_custom_all_reduce": + parallel_config.disable_custom_all_reduce, + }) + + if self.tokenizer: + # Ping the tokenizer to ensure liveness if it runs in a + # different process. + self.tokenizer.ping() + + # Create the scheduler. + # NOTE: the cache_config here have been updated with the numbers of + # GPU and CPU blocks, which are profiled in the distributed executor. + self.scheduler = Scheduler(scheduler_config, cache_config, lora_config) + + # Metric Logging. + if self.log_stats: + self.stat_logger = StatLogger( + local_interval=_LOCAL_LOGGING_INTERVAL_SEC, + labels=dict(model_name=model_config.served_model_name), + max_model_len=self.model_config.max_model_len) + self.stat_logger.info("cache_config", self.cache_config) + + # Create sequence output processor, e.g. for beam search or + # speculative decoding. + self.output_processor = ( + SequenceGroupOutputProcessor.create_output_processor( + self.scheduler_config, + self.detokenizer, + self.scheduler, + self.seq_counter, + self.get_tokenizer_for_seq, + stop_checker=StopChecker( + self.scheduler_config.max_model_len, + self.get_tokenizer_for_seq, + ), + )) + + def _initialize_kv_caches(self) -> None: + """Initialize the KV cache in the worker(s). + + The workers will determine the number of blocks in both the GPU cache + and the swap CPU cache. + """ + num_gpu_blocks, num_cpu_blocks = ( + self.model_executor.determine_num_available_blocks()) + + if self.cache_config.num_gpu_blocks_override is not None: + num_gpu_blocks_override = self.cache_config.num_gpu_blocks_override + logger.info( + "Overriding num_gpu_blocks=%d with " + "num_gpu_blocks_override=%d", num_gpu_blocks, + num_gpu_blocks_override) + num_gpu_blocks = num_gpu_blocks_override + + self.cache_config.num_gpu_blocks = num_gpu_blocks + self.cache_config.num_cpu_blocks = num_cpu_blocks + + self.model_executor.initialize_cache(num_gpu_blocks, num_cpu_blocks) + + @classmethod + def from_engine_args( + cls, + engine_args: EngineArgs, + usage_context: UsageContext = UsageContext.ENGINE_CONTEXT, + ) -> "LLMEngine": + """Creates an LLM engine from the engine arguments.""" + # Create the engine configs. + engine_config = engine_args.create_engine_config() + + # Initialize the cluster and specify the executor class. + if engine_config.device_config.device_type == "neuron": + from vllm.executor.neuron_executor import NeuronExecutor + executor_class = NeuronExecutor + elif engine_config.device_config.device_type == "cpu": + from vllm.executor.cpu_executor import CPUExecutor + executor_class = CPUExecutor + elif engine_config.parallel_config.worker_use_ray: + initialize_ray_cluster(engine_config.parallel_config) + from vllm.executor.ray_gpu_executor import RayGPUExecutor + executor_class = RayGPUExecutor + else: + assert engine_config.parallel_config.world_size == 1, ( + "Ray is required if parallel_config.world_size > 1.") + from vllm.executor.gpu_executor import GPUExecutor + executor_class = GPUExecutor + + # Create the LLM engine. + engine = cls( + **engine_config.to_dict(), + executor_class=executor_class, + log_stats=not engine_args.disable_log_stats, + usage_context=usage_context, + ) + return engine + + def __reduce__(self): + # This is to ensure that the LLMEngine is not referenced in + # the closure used to initialize Ray worker actors + raise RuntimeError("LLMEngine should not be pickled!") + + def __del__(self): + # Shutdown model executor when engine is garbage collected + # Use getattr since __init__ can fail before the field is set + if model_executor := getattr(self, "model_executor", None): + model_executor.shutdown() + + def get_tokenizer(self) -> "PreTrainedTokenizer": + return self.tokenizer.get_lora_tokenizer(None) + + def get_tokenizer_for_seq(self, + sequence: Sequence) -> "PreTrainedTokenizer": + return self.tokenizer.get_lora_tokenizer(sequence.lora_request) + + def _init_tokenizer(self, **tokenizer_init_kwargs): + init_kwargs = dict( + tokenizer_id=self.model_config.tokenizer, + enable_lora=bool(self.lora_config), + max_num_seqs=self.scheduler_config.max_num_seqs, + max_input_length=None, + tokenizer_mode=self.model_config.tokenizer_mode, + trust_remote_code=self.model_config.trust_remote_code, + revision=self.model_config.tokenizer_revision) + init_kwargs.update(tokenizer_init_kwargs) + self.tokenizer = get_tokenizer_group( + self.parallel_config.tokenizer_pool_config, **init_kwargs) + + def _verify_args(self) -> None: + self.model_config.verify_with_parallel_config(self.parallel_config) + self.cache_config.verify_with_parallel_config(self.parallel_config) + if self.lora_config: + self.lora_config.verify_with_model_config(self.model_config) + self.lora_config.verify_with_scheduler_config( + self.scheduler_config) + + def encode_request( + self, + request_id: str, # pylint: disable=unused-argument + prompt: Optional[str], + prompt_token_ids: Optional[List[int]] = None, + lora_request: Optional[LoRARequest] = None, + ): + if prompt_token_ids is None: + assert prompt is not None + prompt_token_ids = self.tokenizer.encode(request_id=request_id, + prompt=prompt, + lora_request=lora_request) + return prompt_token_ids + + def add_request( + self, + request_id: str, + prompt: Optional[str], + sampling_params: SamplingParams, + prompt_token_ids: Optional[List[int]] = None, + arrival_time: Optional[float] = None, + lora_request: Optional[LoRARequest] = None, + multi_modal_data: Optional[MultiModalData] = None, + ) -> None: + """Add a request to the engine's request pool. + + The request is added to the request pool and will be processed by the + scheduler as `engine.step()` is called. The exact scheduling policy is + determined by the scheduler. + + Args: + request_id: The unique ID of the request. + prompt: The prompt string. Can be None if prompt_token_ids is + provided. + sampling_params: The sampling parameters for text generation. + prompt_token_ids: The token IDs of the prompt. If None, we + use the tokenizer to convert the prompts to token IDs. + arrival_time: The arrival time of the request. If None, we use + the current monotonic time. + multi_modal_data: Multi modal data per request. + + Details: + - Set arrival_time to the current time if it is None. + - Set prompt_token_ids to the encoded prompt if it is None. + - Create `best_of` number of :class:`~vllm.Sequence` objects. + - Create a :class:`~vllm.SequenceGroup` object + from the list of :class:`~vllm.Sequence`. + - Add the :class:`~vllm.SequenceGroup` object to the scheduler. + + Example: + >>> # initialize engine + >>> engine = LLMEngine.from_engine_args(engine_args) + >>> # set request arguments + >>> example_prompt = "Who is the president of the United States?" + >>> sampling_params = SamplingParams(temperature=0.0) + >>> request_id = 0 + >>> + >>> # add the request to the engine + >>> engine.add_request( + >>> str(request_id), + >>> example_prompt, + >>> SamplingParams(temperature=0.0)) + >>> # continue the request processing + >>> ... + """ + if lora_request is not None and not self.lora_config: + raise ValueError(f"Got lora_request {lora_request} but LoRA is " + "not enabled!") + max_logprobs = self.get_model_config().max_logprobs + if (sampling_params.logprobs + and sampling_params.logprobs > max_logprobs) or ( + sampling_params.prompt_logprobs + and sampling_params.prompt_logprobs > max_logprobs): + raise ValueError(f"Cannot request more than " + f"{max_logprobs} logprobs.") + if arrival_time is None: + arrival_time = time.time() + prompt_token_ids = self.encode_request( + request_id=request_id, + prompt=prompt, + prompt_token_ids=prompt_token_ids, + lora_request=lora_request) + + # Create the sequences. + block_size = self.cache_config.block_size + seq_id = next(self.seq_counter) + eos_token_id = None + if self.tokenizer: + eos_token_id = self.tokenizer.get_lora_tokenizer( + lora_request).eos_token_id + else: + logger.warning("Use None for EOS token id because tokenizer is " + "not initialized") + seq = Sequence(seq_id, prompt, prompt_token_ids, block_size, + eos_token_id, lora_request) + + # Defensive copy of SamplingParams, which are used by the sampler, + # this doesn't deep-copy LogitsProcessor objects + sampling_params = sampling_params.clone() + # Add the eos token id into the sampling_params to support min_tokens + # processing + if seq.eos_token_id is not None: + sampling_params.all_stop_token_ids.add(seq.eos_token_id) + sampling_params.update_from_generation_config( + self.generation_config_fields) + + # Create the sequence group. + seq_group = SequenceGroup(request_id, [seq], sampling_params, + arrival_time, lora_request, multi_modal_data) + + # Add the sequence group to the scheduler. + self.scheduler.add_seq_group(seq_group) + + def abort_request(self, request_id: Union[str, Iterable[str]]) -> None: + """Aborts a request(s) with the given ID. + + Args: + request_id: The ID(s) of the request to abort. + + Details: + - Refer to the + :meth:`~vllm.core.scheduler.Scheduler.abort_seq_group` + from class :class:`~vllm.core.scheduler.Scheduler`. + + Example: + >>> # initialize engine and add a request with request_id + >>> request_id = str(0) + >>> # abort the request + >>> engine.abort_request(request_id) + """ + self.scheduler.abort_seq_group(request_id) + + def get_model_config(self) -> ModelConfig: + """Gets the model configuration.""" + return self.model_config + + def get_decoding_config(self) -> DecodingConfig: + """Gets the decoding configuration.""" + return self.decoding_config + + def get_num_unfinished_requests(self) -> int: + """Gets the number of unfinished requests.""" + return self.scheduler.get_num_unfinished_seq_groups() + + def has_unfinished_requests(self) -> bool: + """Returns True if there are unfinished requests.""" + return self.scheduler.has_unfinished_seqs() + + def _process_model_outputs( + self, + output: List[SamplerOutput], + scheduled_seq_groups: List[ScheduledSequenceGroup], + ignored_seq_groups: List[SequenceGroup], + seq_group_metadata_list: List[SequenceGroupMetadata], + ) -> List[RequestOutput]: + """Apply the model output to the sequences in the scheduled seq groups. + + Returns RequestOutputs that can be returned to the client. + """ + + now = time.time() + + # Organize outputs by [sequence group][step] instead of + # [step][sequence group]. + output_by_sequence_group = create_output_by_sequence_group( + sampler_outputs=output, num_seq_groups=len(scheduled_seq_groups)) + + # Update the scheduled sequence groups with the model outputs. + for scheduled_seq_group, outputs, seq_group_meta in zip( + scheduled_seq_groups, output_by_sequence_group, + seq_group_metadata_list): + seq_group = scheduled_seq_group.seq_group + seq_group.update_num_computed_tokens( + scheduled_seq_group.token_chunk_size) + + self.output_processor.process_prompt_logprob(seq_group, outputs) + if seq_group_meta.do_sample: + self.output_processor.process_outputs(seq_group, outputs) + + # Free the finished sequence groups. + self.scheduler.free_finished_seq_groups() + + # Create the outputs. + request_outputs: List[RequestOutput] = [] + for scheduled_seq_group in scheduled_seq_groups: + seq_group = scheduled_seq_group.seq_group + seq_group.maybe_set_first_token_time(now) + request_output = RequestOutput.from_seq_group(seq_group) + request_outputs.append(request_output) + for seq_group in ignored_seq_groups: + request_output = RequestOutput.from_seq_group(seq_group) + request_outputs.append(request_output) + return request_outputs + + def step(self) -> List[RequestOutput]: + """Performs one decoding iteration and returns newly generated results. + + .. figure:: https://i.imgur.com/sv2HssD.png + :alt: Overview of the step function + :align: center + + Overview of the step function. + + Details: + - Step 1: Schedules the sequences to be executed in the next + iteration and the token blocks to be swapped in/out/copy. + + - Depending on the scheduling policy, + sequences may be `preempted/reordered`. + - A Sequence Group (SG) refer to a group of sequences + that are generated from the same prompt. + + - Step 2: Calls the distributed executor to execute the model. + - Step 3: Processes the model output. This mainly includes: + + - Decodes the relevant outputs. + - Updates the scheduled sequence groups with model outputs + based on its `sampling parameters` (`use_beam_search` or not). + - Frees the finished sequence groups. + + - Finally, it creates and returns the newly generated results. + + Example: + >>> # Please see the example/ folder for more detailed examples. + >>> + >>> # initialize engine and request arguments + >>> engine = LLMEngine.from_engine_args(engine_args) + >>> example_inputs = [(0, "What is LLM?", + >>> SamplingParams(temperature=0.0))] + >>> + >>> # Start the engine with an event loop + >>> while True: + >>> if example_inputs: + >>> req_id, prompt, sampling_params = example_inputs.pop(0) + >>> engine.add_request(str(req_id), prompt, sampling_params) + >>> + >>> # continue the request processing + >>> request_outputs = engine.step() + >>> for request_output in request_outputs: + >>> if request_output.finished: + >>> # return or show the request output + >>> + >>> if not (engine.has_unfinished_requests() or example_inputs): + >>> break + """ + seq_group_metadata_list, scheduler_outputs = self.scheduler.schedule() + + if not scheduler_outputs.is_empty(): + execute_model_req = ExecuteModelRequest( + seq_group_metadata_list=seq_group_metadata_list, + blocks_to_swap_in=scheduler_outputs.blocks_to_swap_in, + blocks_to_swap_out=scheduler_outputs.blocks_to_swap_out, + blocks_to_copy=scheduler_outputs.blocks_to_copy, + num_lookahead_slots=scheduler_outputs.num_lookahead_slots, + running_queue_size=scheduler_outputs.running_queue_size, + ) + output = self.model_executor.execute_model( + execute_model_req=execute_model_req) + else: + output = [] + + request_outputs = self._process_model_outputs( + output, scheduler_outputs.scheduled_seq_groups, + scheduler_outputs.ignored_seq_groups, seq_group_metadata_list) + + # Log stats. + self.do_log_stats(scheduler_outputs, output) + + return request_outputs + + def do_log_stats( + self, + scheduler_outputs: Optional[SchedulerOutputs] = None, + model_output: Optional[List[SamplerOutput]] = None) -> None: + """Forced log when no requests active.""" + if self.log_stats: + self.stat_logger.log( + self._get_stats(scheduler_outputs, model_output)) + + def _get_stats( + self, + scheduler_outputs: Optional[SchedulerOutputs], + model_output: Optional[List[SamplerOutput]] = None) -> Stats: + """Get Stats to be Logged to Prometheus. + + Args: + scheduler_outputs: Optional, used to populate metrics related to + the scheduled batch, + model_output: Optional, used to emit speculative decoding metrics + which are created by the workers. + """ + now = time.time() + + # System State + # Scheduler State + num_running_sys = len(self.scheduler.running) + num_swapped_sys = len(self.scheduler.swapped) + num_waiting_sys = len(self.scheduler.waiting) + + # KV Cache Usage in % + num_total_gpu = self.cache_config.num_gpu_blocks + num_free_gpu = self.scheduler.block_manager.get_num_free_gpu_blocks() + gpu_cache_usage_sys = 1.0 - (num_free_gpu / num_total_gpu) + + num_total_cpu = self.cache_config.num_cpu_blocks + cpu_cache_usage_sys = 0. + if num_total_cpu > 0: + num_free_cpu = self.scheduler.block_manager.get_num_free_cpu_blocks( + ) + cpu_cache_usage_sys = 1.0 - (num_free_cpu / num_total_cpu) + + # Iteration stats + num_prompt_tokens_iter = 0 + num_generation_tokens_iter = 0 + time_to_first_tokens_iter: List[float] = [] + time_per_output_tokens_iter: List[float] = [] + + # Request stats + # Latency + time_e2e_requests: List[float] = [] + # Metadata + num_prompt_tokens_requests: List[int] = [] + num_generation_tokens_requests: List[int] = [] + best_of_requests: List[int] = [] + n_requests: List[int] = [] + finished_reason_requests: List[str] = [] + + # NOTE: This loop assumes prefill seq_groups are before + # decode seq_groups in scheduled_seq_groups. + if scheduler_outputs is not None: + num_generation_tokens_from_prefill_groups = 0. + # NOTE: if scheduler_outputs.num_prefill_groups > 0 and + # the len of scheduler_outputs.scheduled_seq_groups is != + # scheduler_outputs.num_prefill_groups, this means that + # chunked prefills have been detected. + + for idx, scheduled_seq_group in enumerate( + scheduler_outputs.scheduled_seq_groups): + group_was_prefill = idx < scheduler_outputs.num_prefill_groups + seq_group = scheduled_seq_group.seq_group + + # NOTE: a seq_group that completed all of its prefill tokens + # in the last iteration will have seq_group.is_prefill() = False + # with group_was_prefill = True + if group_was_prefill: + # Number of prompt tokens. + num_prompt_tokens_iter += ( + scheduled_seq_group.token_chunk_size) + + # If the seq_group just finished the prefill state + # get TTFT. + if not seq_group.is_prefill(): + latency = seq_group.get_last_latency(now) + time_to_first_tokens_iter.append(latency) + + # One generation token per finished prefill. + num_generation_tokens_from_prefill_groups += ( + seq_group.num_seqs()) + else: + # TPOTs. + latency = seq_group.get_last_latency(now) + time_per_output_tokens_iter.append(latency) + + # Because of chunked prefill, we can have a single sequence + # group that does multiple prompt_runs. To prevent logging + # the same metadata more than once per request, we standardize + # on logging request level information for finished requests, + # which can only happen once. + if seq_group.is_finished(): + # Latency timings + time_e2e_requests.append(now - + seq_group.metrics.arrival_time) + + # Metadata + num_prompt_tokens_requests.append( + len(seq_group.prompt_token_ids)) + num_generation_tokens_requests.extend([ + seq.get_output_len() + for seq in seq_group.get_finished_seqs() + ]) + best_of_requests.append(seq_group.sampling_params.best_of) + n_requests.append(seq_group.sampling_params.n) + finished_reason_requests.extend([ + SequenceStatus.get_finished_reason(seq.status) + for seq in seq_group.get_finished_seqs() + ]) + + # Number of generation tokens. + # num_batched_tokens equals the number of prompt_tokens plus the + # number of decode_tokens in a single iteration. So, + # num_generation_tokens = num_batched_tokens - num_prompt_tokens + # + num_generation_tokens_from_prefill_groups (since we generate + # one token on prefills on iters where the prefill finishes). + num_generation_tokens_iter = ( + scheduler_outputs.num_batched_tokens - num_prompt_tokens_iter + + num_generation_tokens_from_prefill_groups) + + # Spec decode, if enabled, emits specialized metrics from the worker in + # sampler output. + if model_output and (model_output[0].spec_decode_worker_metrics + is not None): + spec_decode_metrics = model_output[0].spec_decode_worker_metrics + else: + spec_decode_metrics = None + + return Stats( + now=now, + + # System stats + # Scheduler State + num_running_sys=num_running_sys, + num_swapped_sys=num_swapped_sys, + num_waiting_sys=num_waiting_sys, + # KV Cache Usage in % + gpu_cache_usage_sys=gpu_cache_usage_sys, + cpu_cache_usage_sys=cpu_cache_usage_sys, + + # Iteration stats + num_prompt_tokens_iter=num_prompt_tokens_iter, + num_generation_tokens_iter=num_generation_tokens_iter, + time_to_first_tokens_iter=time_to_first_tokens_iter, + time_per_output_tokens_iter=time_per_output_tokens_iter, + spec_decode_metrics=spec_decode_metrics, + + # Request stats + # Latency + time_e2e_requests=time_e2e_requests, + # Metadata + num_prompt_tokens_requests=num_prompt_tokens_requests, + num_generation_tokens_requests=num_generation_tokens_requests, + best_of_requests=best_of_requests, + n_requests=n_requests, + finished_reason_requests=finished_reason_requests, + ) + + def add_lora(self, lora_request: LoRARequest) -> bool: + return self.model_executor.add_lora(lora_request) + + def remove_lora(self, lora_id: int) -> bool: + return self.model_executor.remove_lora(lora_id) + + def list_loras(self) -> List[int]: + return self.model_executor.list_loras() + + def check_health(self) -> None: + self.model_executor.check_health() diff --git a/vllm/engine/metrics.py b/vllm/engine/metrics.py new file mode 100644 index 0000000..3c4aac9 --- /dev/null +++ b/vllm/engine/metrics.py @@ -0,0 +1,368 @@ +import time +from dataclasses import dataclass +from typing import TYPE_CHECKING +from typing import Counter as CollectionsCounter +from typing import Dict, List, Optional, Protocol, Union + +import numpy as np +from prometheus_client import (REGISTRY, Counter, Gauge, Histogram, Info, + disable_created_metrics) + +from vllm.logger import init_logger + +if TYPE_CHECKING: + from vllm.spec_decode.metrics import SpecDecodeWorkerMetrics + +logger = init_logger(__name__) + +disable_created_metrics() + +# The begin-* and end* here are used by the documentation generator +# to extract the metrics definitions. + + +# begin-metrics-definitions +class Metrics: + labelname_finish_reason = "finished_reason" + + def __init__(self, labelnames: List[str], max_model_len: int): + # Unregister any existing vLLM collectors + for collector in list(REGISTRY._collector_to_names): + if hasattr(collector, "_name") and "vllm" in collector._name: + REGISTRY.unregister(collector) + + # Config Information + self.info_cache_config = Info( + name='vllm:cache_config', + documentation='information of cache_config') + + # System stats + # Scheduler State + self.gauge_scheduler_running = Gauge( + name="vllm:num_requests_running", + documentation="Number of requests currently running on GPU.", + labelnames=labelnames) + self.gauge_scheduler_waiting = Gauge( + name="vllm:num_requests_waiting", + documentation="Number of requests waiting to be processed.", + labelnames=labelnames) + self.gauge_scheduler_swapped = Gauge( + name="vllm:num_requests_swapped", + documentation="Number of requests swapped to CPU.", + labelnames=labelnames) + # KV Cache Usage in % + self.gauge_gpu_cache_usage = Gauge( + name="vllm:gpu_cache_usage_perc", + documentation="GPU KV-cache usage. 1 means 100 percent usage.", + labelnames=labelnames) + self.gauge_cpu_cache_usage = Gauge( + name="vllm:cpu_cache_usage_perc", + documentation="CPU KV-cache usage. 1 means 100 percent usage.", + labelnames=labelnames) + + # Iteration stats + self.counter_prompt_tokens = Counter( + name="vllm:prompt_tokens_total", + documentation="Number of prefill tokens processed.", + labelnames=labelnames) + self.counter_generation_tokens = Counter( + name="vllm:generation_tokens_total", + documentation="Number of generation tokens processed.", + labelnames=labelnames) + self.histogram_time_to_first_token = Histogram( + name="vllm:time_to_first_token_seconds", + documentation="Histogram of time to first token in seconds.", + labelnames=labelnames, + buckets=[ + 0.001, 0.005, 0.01, 0.02, 0.04, 0.06, 0.08, 0.1, 0.25, 0.5, + 0.75, 1.0, 2.5, 5.0, 7.5, 10.0 + ]) + self.histogram_time_per_output_token = Histogram( + name="vllm:time_per_output_token_seconds", + documentation="Histogram of time per output token in seconds.", + labelnames=labelnames, + buckets=[ + 0.01, 0.025, 0.05, 0.075, 0.1, 0.15, 0.2, 0.3, 0.4, 0.5, 0.75, + 1.0, 2.5 + ]) + + # Request stats + # Latency + self.histogram_e2e_time_request = Histogram( + name="vllm:e2e_request_latency_seconds", + documentation="Histogram of end to end request latency in seconds.", + labelnames=labelnames, + buckets=[1.0, 2.5, 5.0, 10.0, 15.0, 20.0, 30.0, 40.0, 50.0, 60.0]) + # Metadata + self.histogram_num_prompt_tokens_request = Histogram( + name="vllm:request_prompt_tokens", + documentation="Number of prefill tokens processed.", + labelnames=labelnames, + buckets=build_1_2_5_buckets(max_model_len), + ) + self.histogram_num_generation_tokens_request = Histogram( + name="vllm:request_generation_tokens", + documentation="Number of generation tokens processed.", + labelnames=labelnames, + buckets=build_1_2_5_buckets(max_model_len), + ) + self.histogram_best_of_request = Histogram( + name="vllm:request_params_best_of", + documentation="Histogram of the best_of request parameter.", + labelnames=labelnames, + buckets=[1, 2, 5, 10, 20], + ) + self.histogram_n_request = Histogram( + name="vllm:request_params_n", + documentation="Histogram of the n request parameter.", + labelnames=labelnames, + buckets=[1, 2, 5, 10, 20], + ) + self.counter_request_success = Counter( + name="vllm:request_success_total", + documentation="Count of successfully processed requests.", + labelnames=labelnames + [Metrics.labelname_finish_reason]) + + # Deprecated in favor of vllm:prompt_tokens_total + self.gauge_avg_prompt_throughput = Gauge( + name="vllm:avg_prompt_throughput_toks_per_s", + documentation="Average prefill throughput in tokens/s.", + labelnames=labelnames, + ) + # Deprecated in favor of vllm:generation_tokens_total + self.gauge_avg_generation_throughput = Gauge( + name="vllm:avg_generation_throughput_toks_per_s", + documentation="Average generation throughput in tokens/s.", + labelnames=labelnames, + ) + + +# end-metrics-definitions + + +def build_1_2_5_buckets(max_value: int): + """ + Builds a list of buckets with increasing powers of 10 multiplied by + mantissa values (1, 2, 5) until the value exceeds the specified maximum. + + Example: + >>> build_1_2_5_buckets(100) + [1, 2, 5, 10, 20, 50, 100] + """ + mantissa_lst = [1, 2, 5] + exponent = 0 + buckets = [] + while True: + for m in mantissa_lst: + value = m * 10**exponent + if value <= max_value: + buckets.append(value) + else: + return buckets + exponent += 1 + + +@dataclass +class Stats: + """Created by LLMEngine for use by StatLogger.""" + now: float + + # System stats (should have _sys suffix) + # Scheduler State + num_running_sys: int + num_waiting_sys: int + num_swapped_sys: int + # KV Cache Usage in % + gpu_cache_usage_sys: float + cpu_cache_usage_sys: float + + # Iteration stats (should have _iter suffix) + num_prompt_tokens_iter: int + num_generation_tokens_iter: int + time_to_first_tokens_iter: List[float] + time_per_output_tokens_iter: List[float] + + # Request stats (should have _requests suffix) + # Latency + time_e2e_requests: List[float] + # Metadata + num_prompt_tokens_requests: List[int] + num_generation_tokens_requests: List[int] + best_of_requests: List[int] + n_requests: List[int] + finished_reason_requests: List[str] + + spec_decode_metrics: Optional["SpecDecodeWorkerMetrics"] = None + + +class SupportsMetricsInfo(Protocol): + + def metrics_info(self) -> Dict[str, str]: + ... + + +class StatLogger: + """StatLogger is used LLMEngine to log to Promethus and Stdout.""" + + def __init__(self, local_interval: float, labels: Dict[str, str], + max_model_len: int) -> None: + # Metadata for logging locally. + self.last_local_log = time.time() + self.local_interval = local_interval + + # Tracked stats over current local logging interval. + self.num_prompt_tokens: List[int] = [] + self.num_generation_tokens: List[int] = [] + + # Prometheus metrics + self.labels = labels + self.metrics = Metrics(labelnames=list(labels.keys()), + max_model_len=max_model_len) + + def info(self, type: str, obj: SupportsMetricsInfo) -> None: + if type == "cache_config": + self.metrics.info_cache_config.info(obj.metrics_info()) + + def _get_throughput(self, tracked_stats: List[int], now: float) -> float: + return float(np.sum(tracked_stats) / (now - self.last_local_log)) + + def _local_interval_elapsed(self, now: float) -> bool: + elapsed_time = now - self.last_local_log + return elapsed_time > self.local_interval + + def _log_prometheus(self, stats: Stats) -> None: + # System state data + self._log_gauge(self.metrics.gauge_scheduler_running, + stats.num_running_sys) + self._log_gauge(self.metrics.gauge_scheduler_swapped, + stats.num_swapped_sys) + self._log_gauge(self.metrics.gauge_scheduler_waiting, + stats.num_waiting_sys) + self._log_gauge(self.metrics.gauge_gpu_cache_usage, + stats.gpu_cache_usage_sys) + self._log_gauge(self.metrics.gauge_cpu_cache_usage, + stats.cpu_cache_usage_sys) + + # Iteration level data + self._log_counter(self.metrics.counter_prompt_tokens, + stats.num_prompt_tokens_iter) + self._log_counter(self.metrics.counter_generation_tokens, + stats.num_generation_tokens_iter) + self._log_histogram(self.metrics.histogram_time_to_first_token, + stats.time_to_first_tokens_iter) + self._log_histogram(self.metrics.histogram_time_per_output_token, + stats.time_per_output_tokens_iter) + + # Request level data + # Latency + self._log_histogram(self.metrics.histogram_e2e_time_request, + stats.time_e2e_requests) + # Metadata + finished_reason_counter = CollectionsCounter( + stats.finished_reason_requests) + self._log_counter_labels(self.metrics.counter_request_success, + finished_reason_counter, + Metrics.labelname_finish_reason) + self._log_histogram(self.metrics.histogram_num_prompt_tokens_request, + stats.num_prompt_tokens_requests) + self._log_histogram( + self.metrics.histogram_num_generation_tokens_request, + stats.num_generation_tokens_requests) + self._log_histogram(self.metrics.histogram_n_request, stats.n_requests) + self._log_histogram(self.metrics.histogram_best_of_request, + stats.best_of_requests) + + def _log_gauge(self, gauge: Gauge, data: Union[int, float]) -> None: + # Convenience function for logging to gauge. + gauge.labels(**self.labels).set(data) + + def _log_counter(self, counter: Counter, data: Union[int, float]) -> None: + # Convenience function for logging to counter. + counter.labels(**self.labels).inc(data) + + def _log_counter_labels(self, counter: Counter, data: CollectionsCounter, + label_key: str) -> None: + # Convenience function for collection counter of labels. + for label, count in data.items(): + counter.labels(**{**self.labels, label_key: label}).inc(count) + + def _log_histogram(self, histogram: Histogram, + data: Union[List[int], List[float]]) -> None: + # Convenience function for logging list to histogram. + for datum in data: + histogram.labels(**self.labels).observe(datum) + + def _log_prometheus_interval(self, prompt_throughput: float, + generation_throughput: float) -> None: + # Logs metrics to prometheus that are computed every logging_interval. + # Support legacy gauge metrics that make throughput calculations on + # the vLLM side. Moving forward, we should use counters like + # counter_prompt_tokens, counter_generation_tokens + # Which log raw data and calculate summaries using rate() on the + # grafana/prometheus side. See + # https://github.com/vllm-project/vllm/pull/2316#discussion_r1464204666 + self.metrics.gauge_avg_prompt_throughput.labels( + **self.labels).set(prompt_throughput) + self.metrics.gauge_avg_generation_throughput.labels( + **self.labels).set(generation_throughput) + + def log(self, stats: Stats) -> None: + """Called by LLMEngine. + Logs to prometheus and tracked stats every iteration. + Logs to Stdout every self.local_interval seconds.""" + + # Log to prometheus. + self._log_prometheus(stats) + + # Save tracked stats for token counters. + self.num_prompt_tokens.append(stats.num_prompt_tokens_iter) + self.num_generation_tokens.append(stats.num_generation_tokens_iter) + + # Log locally every local_interval seconds. + if self._local_interval_elapsed(stats.now): + # Compute summary metrics for tracked stats (and log them + # to promethus if applicable). + prompt_throughput = self._get_throughput(self.num_prompt_tokens, + now=stats.now) + generation_throughput = self._get_throughput( + self.num_generation_tokens, now=stats.now) + self._log_prometheus_interval( + prompt_throughput=prompt_throughput, + generation_throughput=generation_throughput) + + # Log to stdout. + logger.info( + "Avg prompt throughput: %.1f tokens/s, " + "Avg generation throughput: %.1f tokens/s, " + "Running: %d reqs, Swapped: %d reqs, " + "Pending: %d reqs, GPU KV cache usage: %.1f%%, " + "CPU KV cache usage: %.1f%%", + prompt_throughput, + generation_throughput, + stats.num_running_sys, + stats.num_swapped_sys, + stats.num_waiting_sys, + stats.gpu_cache_usage_sys * 100, + stats.cpu_cache_usage_sys * 100, + ) + + # Reset tracked stats for next interval. + self.num_prompt_tokens = [] + self.num_generation_tokens = [] + self.last_local_log = stats.now + + if stats.spec_decode_metrics is not None: + logger.info( + self._format_spec_decode_metrics_str( + stats.spec_decode_metrics)) + + def _format_spec_decode_metrics_str( + self, metrics: "SpecDecodeWorkerMetrics") -> str: + + return ("Speculative metrics: " + f"Draft acceptance rate: {metrics.draft_acceptance_rate:.3f}, " + f"System efficiency: {metrics.system_efficiency:.3f}, " + f"Number of speculative tokens: {metrics.num_spec_tokens}, " + f"Number of accepted tokens: {metrics.accepted_tokens}, " + f"Number of draft tokens tokens: {metrics.draft_tokens}, " + f"Number of emitted tokens tokens: {metrics.emitted_tokens}.") diff --git a/vllm/engine/output_processor/__init__.py b/vllm/engine/output_processor/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/vllm/engine/output_processor/interfaces.py b/vllm/engine/output_processor/interfaces.py new file mode 100644 index 0000000..9ddb6a3 --- /dev/null +++ b/vllm/engine/output_processor/interfaces.py @@ -0,0 +1,76 @@ +from abc import ABC, abstractmethod +from typing import Callable, List + +from transformers import PreTrainedTokenizer + +from vllm.config import SchedulerConfig +from vllm.core.scheduler import Scheduler +from vllm.engine.output_processor.stop_checker import StopChecker +from vllm.sequence import Sequence, SequenceGroup, SequenceGroupOutput +from vllm.transformers_utils.detokenizer import Detokenizer +from vllm.utils import Counter + + +class SequenceGroupOutputProcessor(ABC): + """Interface for logic that processes new token ids in sequence groups, + managing detokenization, stop checking, and freeing/forking sequences with + the scheduler. + + This is highly coupled with the LLMEngine and should be seen as an extension + of it. The logic is separated to simplify the LLMEngine class and allow + separate implementations for single-step decoding (which supports beam + search sequence forking) and multi-step decoding (which does not support + beam search, but does support speculative decoding). + """ + + @staticmethod + def create_output_processor( + scheduler_config: SchedulerConfig, + detokenizer: Detokenizer, + scheduler: Scheduler, + seq_counter: Counter, + get_tokenizer_for_seq: Callable[[Sequence], PreTrainedTokenizer], + stop_checker: "StopChecker", + ): + """Create an output processor. + + This returns a single-step output processor if num_lookahead_slots is + zero, else returns a multi-step output processor. + """ + if scheduler_config.num_lookahead_slots == 0: + # Importing here to avoid cycle. + from vllm.engine.output_processor.single_step import ( + SingleStepOutputProcessor) + return SingleStepOutputProcessor( + scheduler_config, + detokenizer, + scheduler, + seq_counter, + stop_checker, + ) + else: + # Importing here to avoid cycle. + from vllm.engine.output_processor.multi_step import ( + MultiStepOutputProcessor) + return MultiStepOutputProcessor( + detokenizer, + scheduler, + seq_counter, + get_tokenizer_for_seq, + stop_checker, + ) + + @abstractmethod + def process_outputs(self, sequence_group: SequenceGroup, + outputs: List[SequenceGroupOutput]) -> None: + """Process new token ids for the sequence group. Handles logic such as + detokenization, stop checking, and freeing/forking sequences in the + scheduler. + """ + pass + + @abstractmethod + def process_prompt_logprob(self, seq_group: SequenceGroup, + outputs: List[SequenceGroupOutput]) -> None: + """Update prompt logprobs received from outputs to seq_group.""" + pass diff --git a/vllm/engine/output_processor/multi_step.py b/vllm/engine/output_processor/multi_step.py new file mode 100644 index 0000000..5f2f433 --- /dev/null +++ b/vllm/engine/output_processor/multi_step.py @@ -0,0 +1,142 @@ +import functools +from typing import Callable, List + +from transformers import PreTrainedTokenizer + +from vllm.core.scheduler import Scheduler +from vllm.engine.output_processor.interfaces import ( + SequenceGroupOutputProcessor) +from vllm.engine.output_processor.stop_checker import StopChecker +from vllm.logger import init_logger +from vllm.sampling_params import SamplingParams +from vllm.sequence import (Sequence, SequenceGroup, SequenceGroupOutput, + SequenceOutput, SequenceStatus) +from vllm.transformers_utils.detokenizer import Detokenizer +from vllm.utils import Counter + +logger = init_logger(__name__) + + +class MultiStepOutputProcessor(SequenceGroupOutputProcessor): + """SequenceGroupOutputProcessor which handles logic related to + detokenization and stopping conditions. It specializes to "multi-step + decoding", where vLLM's worker may generate multiple tokens per invocation. + This is currently mutually exclusive with advanced sampling techniques like + beam search, which motivates the separation of this logic from the single + step output processor. + + This class is responsible for things such as correctly appending all new + token ids to their sequence, detokenizing new token ids, truncating new + output tokens after an eos token, and correctly handling the case where the + number of new output tokens per sequence differs in a single batch. + """ + + def __init__( + self, + detokenizer: Detokenizer, + scheduler: Scheduler, + seq_counter: Counter, + get_tokenizer_for_seq: Callable[[Sequence], PreTrainedTokenizer], + stop_checker: StopChecker, + ): + self.detokenizer = detokenizer + self.scheduler = scheduler + self.seq_counter = seq_counter + self.get_tokenizer_for_seq = get_tokenizer_for_seq + self.stop_checker = stop_checker + + def process_prompt_logprob(self, seq_group: SequenceGroup, + outputs: List[SequenceGroupOutput]) -> None: + # TODO(sang): Prompt logprob currently not implemented in multi step + # workers. + self._log_prompt_logprob_unsupported_warning_once() + + @staticmethod + @functools.lru_cache() + def _log_prompt_logprob_unsupported_warning_once(): + logger.warning( + "Prompt logprob is not supported by multi step workers. " + "(e.g., speculative decode uses multi step workers).") + + def process_outputs(self, sequence_group: SequenceGroup, + outputs: List[SequenceGroupOutput]) -> None: + """Append new tokens in the outputs to sequences in the sequence group. + + This only supports sequence groups of size 1. It supports greater than + one new token per sequence. + + This applies logic like stop condition checking and detokenization, + including freeing finished sequences. It also handles cases where there + are tokens emitted after the EOS token. + """ + seqs = sequence_group.get_seqs(status=SequenceStatus.RUNNING) + + assert seqs, "expected running sequences" + assert len(seqs) == 1, ( + "Beam search not supported in multi-step decoding.") + seq = seqs[0] + + # Since there's only one sequence per sequence group, we can take the + # first sample. + samples = [outputs[step].samples[0] for step in range(len(outputs))] + + # -1 means the output token is not valid (eg. due to spec decode + # rejecting tokens). + valid_samples = [ + sample for sample in samples if sample.output_token != -1 + ] + assert valid_samples + + self._process_seq_outputs(seq, valid_samples, + sequence_group.sampling_params) + + def _process_seq_outputs(self, seq: Sequence, + valid_samples: List[SequenceOutput], + sampling_params: SamplingParams) -> None: + output_token_ids = [sample.output_token for sample in valid_samples] + output_logprobs = [sample.logprobs for sample in valid_samples] + + # Truncate to max_tokens if necessary. + remaining_tokens = sampling_params.max_tokens - (seq.get_output_len() + + len(output_token_ids)) + if remaining_tokens < 0: + valid_samples = valid_samples[:remaining_tokens] + output_token_ids = output_token_ids[:remaining_tokens] + + # Truncate any tokens after EOS. This is required as spec decode + # generates a fixed number of tokens without evaluating stopping + # conditions within the block. This can cause an eos token to be + # unintentionally ignored. + if not sampling_params.ignore_eos: + eos_token_id = self.get_tokenizer_for_seq(seq).eos_token_id + # Avoiding .index calls as exception throwing in the happy path + # is expensive. + for i in range(len(output_token_ids)): + if output_token_ids[i] == eos_token_id: + output_token_ids = output_token_ids[:i + 1] + valid_samples = valid_samples[:i + 1] + break + + # Incrementally append tokens to the sequence, as if we had only one new + # token. + for output_token_id, output_logprob in zip(output_token_ids, + output_logprobs): + seq.append_token_id( + token_id=output_token_id, + logprobs=output_logprob, + ) + + new_char_count = 0 + if sampling_params.detokenize: + new_char_count = self.detokenizer.decode_sequence_inplace( + seq, sampling_params) + + self.stop_checker.maybe_stop_sequence( + seq, + new_char_count=new_char_count, + sampling_params=sampling_params) + if seq.is_finished(): + break + + if seq.is_finished(): + self.scheduler.free_seq(seq) diff --git a/vllm/engine/output_processor/single_step.py b/vllm/engine/output_processor/single_step.py new file mode 100644 index 0000000..07b1405 --- /dev/null +++ b/vllm/engine/output_processor/single_step.py @@ -0,0 +1,284 @@ +from typing import Dict, List, Tuple, Union + +from vllm.config import SchedulerConfig +from vllm.core.scheduler import Scheduler +from vllm.engine.output_processor.interfaces import ( + SequenceGroupOutputProcessor) +from vllm.engine.output_processor.stop_checker import StopChecker +from vllm.logger import init_logger +from vllm.sampling_params import SamplingParams +from vllm.sequence import (Sequence, SequenceGroup, SequenceGroupOutput, + SequenceOutput, SequenceStatus) +from vllm.transformers_utils.detokenizer import Detokenizer +from vllm.utils import Counter + +logger = init_logger(__name__) + + +class SingleStepOutputProcessor(SequenceGroupOutputProcessor): + """SequenceGroupOutputProcessor which handles "output processing" logic, + which happens after the model returns generated token ids and before + scheduling of the next batch. Output processing logic includes + detokenization, and determining if a sequence is finished (e.g. via max len + or eos token). + + The SingleStepOutputProcessor is specialized to the case where the model + emits at most a single token per invocation, which precludes configurations + such as speculative decoding or multi-step decoding. This enables beam + search sampling, which requires forking/finishing/freeing sequences in a way + that is currently difficult to schedule multiple steps ahead of time. + """ + + def __init__( + self, + scheduler_config: SchedulerConfig, + detokenizer: Detokenizer, + scheduler: Scheduler, + seq_counter: Counter, + stop_checker: StopChecker, + ): + self.scheduler_config = scheduler_config + self.detokenizer = detokenizer + self.scheduler = scheduler + self.seq_counter = seq_counter + self.stop_checker = stop_checker + + def process_outputs(self, sequence_group: SequenceGroup, + outputs: List[SequenceGroupOutput]) -> None: + """Append all new tokens to sequences in the sequence group. Fork any + surviving beam candidates; free any unsurviving ones. + + Invokes detokenizer to detokenize new tokens, and also marks sequences + as finished if they meet stop conditions. + """ + assert (len(outputs) == 1 + ), f"{type(self)} does not support multiple outputs per step" + return self._process_sequence_group_outputs(sequence_group, outputs[0]) + + def process_prompt_logprob(self, seq_group: SequenceGroup, + outputs: List[SequenceGroupOutput]) -> None: + assert len(outputs) == 1, ("Single step should only has 1 output.") + output = outputs[0] + prompt_logprobs = output.prompt_logprobs + if (prompt_logprobs is not None + and seq_group.sampling_params.detokenize and self.detokenizer): + self.detokenizer.decode_prompt_logprobs_inplace( + seq_group, prompt_logprobs) + if not seq_group.prompt_logprobs: + # The first prompt token's logprob is None because it doesn't + # have tokens that are precedent. + seq_group.prompt_logprobs = [None] + seq_group.prompt_logprobs.extend(prompt_logprobs) + + def _process_sequence_group_outputs(self, seq_group: SequenceGroup, + outputs: SequenceGroupOutput) -> None: + # Process samples + samples = outputs.samples + parent_seqs = seq_group.get_seqs(status=SequenceStatus.RUNNING) + existing_finished_seqs = seq_group.get_finished_seqs() + parent_child_dict: Dict[int, List[SequenceOutput]] = { + parent_seq.seq_id: [] + for parent_seq in parent_seqs + } + for sample in samples: + parent_child_dict[sample.parent_seq_id].append(sample) + # List of (child, parent) + child_seqs: List[Tuple[Sequence, Sequence]] = [] + + # Process the child samples for each parent sequence + for parent in parent_seqs: + child_samples: List[SequenceOutput] = parent_child_dict[ + parent.seq_id] + if len(child_samples) == 0: + # This parent sequence has no children samples. Remove + # the parent sequence from the sequence group since it will + # not be used in the future iterations. + parent.status = SequenceStatus.FINISHED_ABORTED + seq_group.remove(parent.seq_id) + self.scheduler.free_seq(parent) + continue + # Fork the parent sequence if there are multiple child samples. + for child_sample in child_samples[:-1]: + new_child_seq_id: int = next(self.seq_counter) + child = parent.fork(new_child_seq_id) + child.append_token_id(child_sample.output_token, + child_sample.logprobs) + child_seqs.append((child, parent)) + # Continue the parent sequence for the last child sample. + # We reuse the parent sequence here to reduce redundant memory + # copies, especially when using non-beam search sampling methods. + last_child_sample = child_samples[-1] + parent.append_token_id(last_child_sample.output_token, + last_child_sample.logprobs) + child_seqs.append((parent, parent)) + + for seq, _ in child_seqs: + if seq_group.sampling_params.detokenize and self.detokenizer: + new_char_count = self.detokenizer.decode_sequence_inplace( + seq, seq_group.sampling_params) + else: + new_char_count = 0 + self.stop_checker.maybe_stop_sequence(seq, new_char_count, + seq_group.sampling_params) + + # Non-beam search case + if not seq_group.sampling_params.use_beam_search: + # For newly created child sequences, add them to the sequence group + # and fork them in block manager if they are not finished. + for seq, parent in child_seqs: + if seq is not parent: + seq_group.add(seq) + if not seq.is_finished(): + self.scheduler.fork_seq(parent, seq) + + # Free the finished and selected parent sequences' memory in block + # manager. Keep them in the sequence group as candidate output. + # NOTE: we need to fork the new sequences before freeing the + # old sequences. + for seq, parent in child_seqs: + if seq is parent and seq.is_finished(): + self.scheduler.free_seq(seq) + return + + # Beam search case + # Select the child sequences to keep in the sequence group. + selected_child_seqs = [] + unselected_child_seqs = [] + beam_width = seq_group.sampling_params.best_of + length_penalty = seq_group.sampling_params.length_penalty + + # Select the newly finished sequences with the highest scores + # to replace existing finished sequences. + # Tuple of (seq, parent, is_new) + existing_finished_seqs = [(seq, None, False) + for seq in existing_finished_seqs] + new_finished_seqs = [(seq, parent, True) for seq, parent in child_seqs + if seq.is_finished()] + all_finished_seqs = existing_finished_seqs + new_finished_seqs + # Sort the finished sequences by their scores. + all_finished_seqs.sort(key=lambda x: x[0].get_beam_search_score( + length_penalty=length_penalty, eos_token_id=x[0].eos_token_id), + reverse=True) + for seq, parent, is_new in all_finished_seqs[:beam_width]: + if is_new: + # A newly generated child sequence finishes and has a high + # score, so we will add it into the sequence group. + selected_child_seqs.append((seq, parent)) + for seq, parent, is_new in all_finished_seqs[beam_width:]: + if is_new: + # A newly generated child sequence finishes but has a low + # score, so we will not add it into the sequence group. + # Additionally, if this sequence is a continuation of a + # parent sequence, we will need remove the parent sequence + # from the sequence group. + unselected_child_seqs.append((seq, parent)) + else: + # An existing finished sequence has a low score, so we will + # remove it from the sequence group. + seq_group.remove(seq.seq_id) + + # select the top beam_width sequences from the running + # sequences for the next iteration to continue the beam + # search. + running_child_seqs = [(seq, parent) for seq, parent in child_seqs + if not seq.is_finished()] + # Sort the running sequences by their scores. + running_child_seqs.sort(key=lambda x: x[0].get_beam_search_score( + length_penalty=length_penalty, eos_token_id=x[0].eos_token_id), + reverse=True) + + # Check if we can stop the beam search. + if len(running_child_seqs) == 0: + # No running sequences, stop the beam search. + stop_beam_search = True + elif len(all_finished_seqs) < beam_width: + # Not enough finished sequences, continue the beam search. + stop_beam_search = False + else: + # Check the early stopping criteria + best_running_seq = running_child_seqs[0][0] + current_worst_seq = all_finished_seqs[beam_width - 1][0] + stop_beam_search = self._check_beam_search_early_stopping( + seq_group.sampling_params.early_stopping, + seq_group.sampling_params, best_running_seq, current_worst_seq) + + if stop_beam_search: + # Stop the beam search and remove all the running sequences from + # the sequence group. + unselected_child_seqs.extend(running_child_seqs) + else: + # Continue the beam search and select the top beam_width sequences + # to continue the beam search. + selected_child_seqs.extend(running_child_seqs[:beam_width]) + # The remaining running sequences will not be used in the next + # iteration. Again, if these sequences are continuations of + # parent sequences, we will need to remove the parent sequences + # from the sequence group. + unselected_child_seqs.extend(running_child_seqs[beam_width:]) + + # For newly created child sequences, add them to the sequence group + # and fork them in block manager if they are not finished. + for seq, parent in selected_child_seqs: + if seq is not parent: + seq_group.add(seq) + if not seq.is_finished(): + self.scheduler.fork_seq(parent, seq) + + # Free the finished and selected parent sequences' memory in block + # manager. Keep them in the sequence group as candidate output. + for seq, parent in selected_child_seqs: + if seq is parent and seq.is_finished(): + self.scheduler.free_seq(seq) + + # Remove the unselected parent sequences from the sequence group and + # free their memory in block manager. + for seq, parent in unselected_child_seqs: + if seq is parent: + # Remove the parent sequence if it is not selected for next + # iteration + seq_group.remove(seq.seq_id) + self.scheduler.free_seq(seq) + + def _check_beam_search_early_stopping( + self, + early_stopping: Union[bool, str], + sampling_params: SamplingParams, + best_running_seq: Sequence, + current_worst_seq: Sequence, + ) -> bool: + assert sampling_params.use_beam_search + length_penalty = sampling_params.length_penalty + if early_stopping is True: + return True + + current_worst_score = current_worst_seq.get_beam_search_score( + length_penalty=length_penalty, + eos_token_id=current_worst_seq.eos_token_id) + if early_stopping is False: + highest_attainable_score = best_running_seq.get_beam_search_score( + length_penalty=length_penalty, + eos_token_id=best_running_seq.eos_token_id) + else: + assert early_stopping == "never" + if length_penalty > 0.0: + # If length_penalty > 0.0, beam search will prefer longer + # sequences. The highest attainable score calculation is + # based on the longest possible sequence length in this case. + max_possible_length = max( + best_running_seq.get_prompt_len() + + sampling_params.max_tokens, + self.scheduler_config.max_model_len) + highest_attainable_score = ( + best_running_seq.get_beam_search_score( + length_penalty=length_penalty, + eos_token_id=best_running_seq.eos_token_id, + seq_len=max_possible_length)) + else: + # Otherwise, beam search will prefer shorter sequences. The + # highest attainable score calculation is based on the current + # sequence length. + highest_attainable_score = ( + best_running_seq.get_beam_search_score( + length_penalty=length_penalty, + eos_token_id=best_running_seq.eos_token_id)) + return current_worst_score >= highest_attainable_score diff --git a/vllm/engine/output_processor/stop_checker.py b/vllm/engine/output_processor/stop_checker.py new file mode 100644 index 0000000..66deb9b --- /dev/null +++ b/vllm/engine/output_processor/stop_checker.py @@ -0,0 +1,101 @@ +from typing import Callable, Optional + +from transformers import PreTrainedTokenizer + +from vllm.sampling_params import SamplingParams +from vllm.sequence import Sequence, SequenceStatus + + +class StopChecker: + """LLMEngine helper class which separates out the logic involving stop + checking. This checks things such as: whether the eos token was emitted, + whether the max_tokens has been consumed, whether a stop string has been + emitted, or if we have exceeded the max model len. + """ + + def __init__(self, max_model_len: int, + get_tokenizer_for_seq: Callable[[Sequence], + PreTrainedTokenizer]): + self.max_model_len = max_model_len + self.get_tokenizer_for_seq = get_tokenizer_for_seq + + def maybe_stop_sequence(self, seq: Sequence, new_char_count: int, + sampling_params: SamplingParams) -> None: + """Stop the finished sequences. + + new_char_count is the number of chars added to the + sequence's output text for the newly generated token + """ + + # Check if the minimum number of tokens has been generated yet; + # skip the stop string/token checks if not + if seq.get_output_len() < sampling_params.min_tokens: + return + + # Check if the sequence has generated the EOS token. + if ((not sampling_params.ignore_eos) + and seq.get_last_token_id() == seq.eos_token_id): + seq.status = SequenceStatus.FINISHED_STOPPED + return + + # Check if a stop token was encountered. + # This assumes a single token produced per step. + last_token_id = seq.get_last_token_id() + if last_token_id in sampling_params.stop_token_ids: + if new_char_count and ( + not sampling_params.include_stop_str_in_output): + # Remove last token + seq.output_text = seq.output_text[:-new_char_count] + seq.status = SequenceStatus.FINISHED_STOPPED + seq.stop_reason = last_token_id + return + + # Check if any stop strings are matched. + stop_str = self._check_stop_strings(seq, new_char_count, + sampling_params) + if stop_str is not None: + seq.status = SequenceStatus.FINISHED_STOPPED + seq.stop_reason = stop_str + return + + # Check if the sequence has reached max_model_len. + if seq.get_len() > self.max_model_len: + seq.status = SequenceStatus.FINISHED_LENGTH_CAPPED + return + + # Check if the sequence has reached max_tokens. + if seq.get_output_len() == sampling_params.max_tokens: + seq.status = SequenceStatus.FINISHED_LENGTH_CAPPED + return + + @staticmethod + def _check_stop_strings(seq: Sequence, new_char_count: int, + sampling_params: SamplingParams) -> Optional[str]: + """Check if any stop strings are matched and truncate sequence + output text accordingly. + + Returns the stop string if matched or else None. + """ + if not new_char_count: + return None + + for stop_str in sampling_params.stop: + stop_string_len = len(stop_str) + # Avoid searching already-searched text. + stop_index = seq.output_text.find( + stop_str, -new_char_count - stop_string_len) + if stop_index == -1: + continue + + if sampling_params.include_stop_str_in_output: + # Truncate to end of stop string. + stop_index += stop_string_len + if stop_index >= len(seq.output_text): + # No truncation required. + return stop_str + + # Truncate the output text to either the beginning + # or end of the stop string. + seq.output_text = seq.output_text[:stop_index] + return stop_str + return None diff --git a/vllm/engine/output_processor/util.py b/vllm/engine/output_processor/util.py new file mode 100644 index 0000000..9816e96 --- /dev/null +++ b/vllm/engine/output_processor/util.py @@ -0,0 +1,19 @@ +from typing import List + +from vllm.sequence import SamplerOutput, SequenceGroupOutput + + +def create_output_by_sequence_group( + sampler_outputs: List[SamplerOutput], + num_seq_groups: int) -> List[List[SequenceGroupOutput]]: + """Helper method which transforms a 2d list organized by + [step][sequence group] into [sequence group][step]. + """ + output_by_sequence_group: List[List[SamplerOutput]] = [ + [] for _ in range(num_seq_groups) + ] + for step in sampler_outputs: + for i, sequence_group_output in enumerate(step): + output_by_sequence_group[i].append(sequence_group_output) + + return output_by_sequence_group diff --git a/vllm/entrypoints/__init__.py b/vllm/entrypoints/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/vllm/entrypoints/api_server.py b/vllm/entrypoints/api_server.py new file mode 100644 index 0000000..075de0b --- /dev/null +++ b/vllm/entrypoints/api_server.py @@ -0,0 +1,119 @@ +""" +NOTE: This API server is used only for demonstrating usage of AsyncEngine +and simple performance benchmarks. It is not intended for production use. +For production use, we recommend using our OpenAI compatible server. +We are also not going to accept PRs modifying this file, please +change `vllm/entrypoints/openai/api_server.py` instead. +""" + +import argparse +import json +import ssl +from typing import AsyncGenerator + +import uvicorn +from fastapi import FastAPI, Request +from fastapi.responses import JSONResponse, Response, StreamingResponse + +from vllm.engine.arg_utils import AsyncEngineArgs +from vllm.engine.async_llm_engine import AsyncLLMEngine +from vllm.sampling_params import SamplingParams +from vllm.usage.usage_lib import UsageContext +from vllm.utils import random_uuid + +TIMEOUT_KEEP_ALIVE = 5 # seconds. +app = FastAPI() +engine = None + + +@app.get("/health") +async def health() -> Response: + """Health check.""" + return Response(status_code=200) + + +@app.post("/generate") +async def generate(request: Request) -> Response: + """Generate completion for the request. + + The request should be a JSON object with the following fields: + - prompt: the prompt to use for the generation. + - stream: whether to stream the results or not. + - other fields: the sampling parameters (See `SamplingParams` for details). + """ + request_dict = await request.json() + prompt = request_dict.pop("prompt") + stream = request_dict.pop("stream", False) + sampling_params = SamplingParams(**request_dict) + request_id = random_uuid() + + assert engine is not None + results_generator = engine.generate(prompt, sampling_params, request_id) + + # Streaming case + async def stream_results() -> AsyncGenerator[bytes, None]: + async for request_output in results_generator: + prompt = request_output.prompt + text_outputs = [ + prompt + output.text for output in request_output.outputs + ] + ret = {"text": text_outputs} + yield (json.dumps(ret) + "\0").encode("utf-8") + + if stream: + return StreamingResponse(stream_results()) + + # Non-streaming case + final_output = None + async for request_output in results_generator: + if await request.is_disconnected(): + # Abort the request if the client disconnects. + await engine.abort(request_id) + return Response(status_code=499) + final_output = request_output + + assert final_output is not None + prompt = final_output.prompt + text_outputs = [prompt + output.text for output in final_output.outputs] + ret = {"text": text_outputs} + return JSONResponse(ret) + + +if __name__ == "__main__": + parser = argparse.ArgumentParser() + parser.add_argument("--host", type=str, default=None) + parser.add_argument("--port", type=int, default=8000) + parser.add_argument("--ssl-keyfile", type=str, default=None) + parser.add_argument("--ssl-certfile", type=str, default=None) + parser.add_argument("--ssl-ca-certs", + type=str, + default=None, + help="The CA certificates file") + parser.add_argument( + "--ssl-cert-reqs", + type=int, + default=int(ssl.CERT_NONE), + help="Whether client certificate is required (see stdlib ssl module's)" + ) + parser.add_argument( + "--root-path", + type=str, + default=None, + help="FastAPI root_path when app is behind a path based routing proxy") + parser.add_argument("--log-level", type=str, default="debug") + parser = AsyncEngineArgs.add_cli_args(parser) + args = parser.parse_args() + engine_args = AsyncEngineArgs.from_cli_args(args) + engine = AsyncLLMEngine.from_engine_args( + engine_args, usage_context=UsageContext.API_SERVER) + + app.root_path = args.root_path + uvicorn.run(app, + host=args.host, + port=args.port, + log_level=args.log_level, + timeout_keep_alive=TIMEOUT_KEEP_ALIVE, + ssl_keyfile=args.ssl_keyfile, + ssl_certfile=args.ssl_certfile, + ssl_ca_certs=args.ssl_ca_certs, + ssl_cert_reqs=args.ssl_cert_reqs) diff --git a/vllm/entrypoints/llm.py b/vllm/entrypoints/llm.py new file mode 100644 index 0000000..3ed660e --- /dev/null +++ b/vllm/entrypoints/llm.py @@ -0,0 +1,259 @@ +from typing import List, Optional, Union + +import torch +from tqdm import tqdm +from transformers import PreTrainedTokenizer, PreTrainedTokenizerFast + +from vllm.engine.arg_utils import EngineArgs +from vllm.engine.llm_engine import LLMEngine +from vllm.lora.request import LoRARequest +from vllm.outputs import RequestOutput +from vllm.sampling_params import SamplingParams +from vllm.sequence import MultiModalData +from vllm.usage.usage_lib import UsageContext +from vllm.utils import Counter + + +class LLM: + """An LLM for generating texts from given prompts and sampling parameters. + + This class includes a tokenizer, a language model (possibly distributed + across multiple GPUs), and GPU memory space allocated for intermediate + states (aka KV cache). Given a batch of prompts and sampling parameters, + this class generates texts from the model, using an intelligent batching + mechanism and efficient memory management. + + NOTE: This class is intended to be used for offline inference. For online + serving, use the `AsyncLLMEngine` class instead. + NOTE: For the comprehensive list of arguments, see `EngineArgs`. + + Args: + model: The name or path of a HuggingFace Transformers model. + tokenizer: The name or path of a HuggingFace Transformers tokenizer. + tokenizer_mode: The tokenizer mode. "auto" will use the fast tokenizer + if available, and "slow" will always use the slow tokenizer. + skip_tokenizer_init: If true, skip initialization of tokenizer and + detokenizer. Expect valid prompt_token_ids and None for prompt + from the input. + trust_remote_code: Trust remote code (e.g., from HuggingFace) when + downloading the model and tokenizer. + tensor_parallel_size: The number of GPUs to use for distributed + execution with tensor parallelism. + dtype: The data type for the model weights and activations. Currently, + we support `float32`, `float16`, and `bfloat16`. If `auto`, we use + the `torch_dtype` attribute specified in the model config file. + However, if the `torch_dtype` in the config is `float32`, we will + use `float16` instead. + quantization: The method used to quantize the model weights. Currently, + we support "awq", "gptq", "squeezellm", and "fp8" (experimental). + If None, we first check the `quantization_config` attribute in the + model config file. If that is None, we assume the model weights are + not quantized and use `dtype` to determine the data type of + the weights. + revision: The specific model version to use. It can be a branch name, + a tag name, or a commit id. + tokenizer_revision: The specific tokenizer version to use. It can be a + branch name, a tag name, or a commit id. + seed: The seed to initialize the random number generator for sampling. + gpu_memory_utilization: The ratio (between 0 and 1) of GPU memory to + reserve for the model weights, activations, and KV cache. Higher + values will increase the KV cache size and thus improve the model's + throughput. However, if the value is too high, it may cause out-of- + memory (OOM) errors. + swap_space: The size (GiB) of CPU memory per GPU to use as swap space. + This can be used for temporarily storing the states of the requests + when their `best_of` sampling parameters are larger than 1. If all + requests will have `best_of=1`, you can safely set this to 0. + Otherwise, too small values may cause out-of-memory (OOM) errors. + enforce_eager: Whether to enforce eager execution. If True, we will + disable CUDA graph and always execute the model in eager mode. + If False, we will use CUDA graph and eager execution in hybrid. + max_context_len_to_capture: Maximum context len covered by CUDA graphs. + When a sequence has context length larger than this, we fall back + to eager mode (DEPRECATED. Use `max_seq_len_to_capture` instead). + max_seq_len_to_capture: Maximum sequence len covered by CUDA graphs. + When a sequence has context length larger than this, we fall back + to eager mode. + disable_custom_all_reduce: See ParallelConfig + """ + + def __init__( + self, + model: str, + tokenizer: Optional[str] = None, + tokenizer_mode: str = "auto", + skip_tokenizer_init: bool = False, + trust_remote_code: bool = False, + tensor_parallel_size: int = 1, + dtype: str = "auto", + quantization: Optional[str] = None, + revision: Optional[str] = None, + tokenizer_revision: Optional[str] = None, + seed: int = 0, + gpu_memory_utilization: float = 0.9, + swap_space: int = 4, + enforce_eager: bool = False, + max_context_len_to_capture: Optional[int] = None, + max_seq_len_to_capture: int = 8192, + disable_custom_all_reduce: bool = False, + **kwargs, + ) -> None: + if "disable_log_stats" not in kwargs: + kwargs["disable_log_stats"] = True + engine_args = EngineArgs( + model=model, + tokenizer=tokenizer, + tokenizer_mode=tokenizer_mode, + skip_tokenizer_init=skip_tokenizer_init, + trust_remote_code=trust_remote_code, + tensor_parallel_size=tensor_parallel_size, + dtype=dtype, + quantization=quantization, + revision=revision, + tokenizer_revision=tokenizer_revision, + seed=seed, + gpu_memory_utilization=gpu_memory_utilization, + swap_space=swap_space, + enforce_eager=enforce_eager, + max_context_len_to_capture=max_context_len_to_capture, + max_seq_len_to_capture=max_seq_len_to_capture, + disable_custom_all_reduce=disable_custom_all_reduce, + **kwargs, + ) + self.llm_engine = LLMEngine.from_engine_args( + engine_args, usage_context=UsageContext.LLM_CLASS) + self.request_counter = Counter() + + def get_tokenizer( + self) -> Union[PreTrainedTokenizer, PreTrainedTokenizerFast]: + return self.llm_engine.tokenizer.tokenizer + + def set_tokenizer( + self, + tokenizer: Union[PreTrainedTokenizer, PreTrainedTokenizerFast], + ) -> None: + self.llm_engine.tokenizer.tokenizer = tokenizer + + def generate( + self, + prompts: Optional[Union[str, List[str]]] = None, + sampling_params: Optional[Union[SamplingParams, + List[SamplingParams]]] = None, + prompt_token_ids: Optional[List[List[int]]] = None, + use_tqdm: bool = True, + lora_request: Optional[LoRARequest] = None, + multi_modal_data: Optional[MultiModalData] = None, + ) -> List[RequestOutput]: + """Generates the completions for the input prompts. + + NOTE: This class automatically batches the given prompts, considering + the memory constraint. For the best performance, put all of your prompts + into a single list and pass it to this method. + + Args: + prompts: A list of prompts to generate completions for. + sampling_params: The sampling parameters for text generation. If + None, we use the default sampling parameters. + When it is a single value, it is applied to every prompt. + When it is a list, the list must have the same length as the + prompts and it is paired one by one with the prompt. + prompt_token_ids: A list of token IDs for the prompts. If None, we + use the tokenizer to convert the prompts to token IDs. + use_tqdm: Whether to use tqdm to display the progress bar. + lora_request: LoRA request to use for generation, if any. + multi_modal_data: Multi modal data. + + Returns: + A list of `RequestOutput` objects containing the generated + completions in the same order as the input prompts. + """ + if prompts is None and prompt_token_ids is None: + raise ValueError("Either prompts or prompt_token_ids must be " + "provided.") + if self.llm_engine.model_config.skip_tokenizer_init \ + and prompts is not None: + raise ValueError("prompts must be None if skip_tokenizer_init " + "is True") + if isinstance(prompts, str): + # Convert a single prompt to a list. + prompts = [prompts] + if (prompts is not None and prompt_token_ids is not None + and len(prompts) != len(prompt_token_ids)): + raise ValueError("The lengths of prompts and prompt_token_ids " + "must be the same.") + + if prompts is not None: + num_requests = len(prompts) + else: + assert prompt_token_ids is not None + num_requests = len(prompt_token_ids) + + if sampling_params is None: + # Use default sampling params. + sampling_params = SamplingParams() + + elif isinstance(sampling_params, + list) and len(sampling_params) != num_requests: + raise ValueError("The lengths of prompts and sampling_params " + "must be the same.") + if multi_modal_data: + multi_modal_data.data = multi_modal_data.data.to(torch.float16) + + # Add requests to the engine. + for i in range(num_requests): + prompt = prompts[i] if prompts is not None else None + token_ids = None if prompt_token_ids is None else prompt_token_ids[ + i] + self._add_request( + prompt, + sampling_params[i] + if isinstance(sampling_params, list) else sampling_params, + token_ids, + lora_request=lora_request, + # Get ith image while maintaining the batch dim. + multi_modal_data=MultiModalData( + type=multi_modal_data.type, + data=multi_modal_data.data[i].unsqueeze(0)) + if multi_modal_data else None, + ) + return self._run_engine(use_tqdm) + + def _add_request( + self, + prompt: Optional[str], + sampling_params: SamplingParams, + prompt_token_ids: Optional[List[int]], + lora_request: Optional[LoRARequest] = None, + multi_modal_data: Optional[MultiModalData] = None, + ) -> None: + request_id = str(next(self.request_counter)) + self.llm_engine.add_request(request_id, + prompt, + sampling_params, + prompt_token_ids, + lora_request=lora_request, + multi_modal_data=multi_modal_data) + + def _run_engine(self, use_tqdm: bool) -> List[RequestOutput]: + # Initialize tqdm. + if use_tqdm: + num_requests = self.llm_engine.get_num_unfinished_requests() + pbar = tqdm(total=num_requests, + desc="Processed prompts", + dynamic_ncols=True) + # Run the engine. + outputs: List[RequestOutput] = [] + while self.llm_engine.has_unfinished_requests(): + step_outputs = self.llm_engine.step() + for output in step_outputs: + if output.finished: + outputs.append(output) + if use_tqdm: + pbar.update(1) + if use_tqdm: + pbar.close() + # Sort the outputs by request ID. + # This is necessary because some requests may be finished earlier than + # its previous requests. + outputs = sorted(outputs, key=lambda x: int(x.request_id)) + return outputs \ No newline at end of file diff --git a/vllm/entrypoints/openai/__init__.py b/vllm/entrypoints/openai/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/vllm/entrypoints/openai/api_server.py b/vllm/entrypoints/openai/api_server.py new file mode 100644 index 0000000..f9e294a --- /dev/null +++ b/vllm/entrypoints/openai/api_server.py @@ -0,0 +1,186 @@ +import asyncio +import importlib +import inspect +import re +from contextlib import asynccontextmanager +from http import HTTPStatus +from typing import Any, Set + +import fastapi +import uvicorn +from fastapi import Request +from fastapi.exceptions import RequestValidationError +from fastapi.middleware.cors import CORSMiddleware +from fastapi.responses import JSONResponse, Response, StreamingResponse +from prometheus_client import make_asgi_app +from starlette.routing import Mount + +import vllm +import vllm.envs as envs +from vllm.engine.arg_utils import AsyncEngineArgs +from vllm.engine.async_llm_engine import AsyncLLMEngine +from vllm.entrypoints.openai.cli_args import make_arg_parser +from vllm.entrypoints.openai.protocol import (ChatCompletionRequest, + ChatCompletionResponse, + CompletionRequest, ErrorResponse) +from vllm.entrypoints.openai.serving_chat import OpenAIServingChat +from vllm.entrypoints.openai.serving_completion import OpenAIServingCompletion +from vllm.logger import init_logger +from vllm.usage.usage_lib import UsageContext + +TIMEOUT_KEEP_ALIVE = 5 # seconds + +openai_serving_chat: OpenAIServingChat +openai_serving_completion: OpenAIServingCompletion +logger = init_logger(__name__) + +_running_tasks: Set[asyncio.Task[Any]] = set() + + +@asynccontextmanager +async def lifespan(app: fastapi.FastAPI): + + async def _force_log(): + while True: + await asyncio.sleep(10) + await engine.do_log_stats() + + if not engine_args.disable_log_stats: + task = asyncio.create_task(_force_log()) + _running_tasks.add(task) + task.add_done_callback(_running_tasks.remove) + + yield + + +app = fastapi.FastAPI(lifespan=lifespan) + + +def parse_args(): + parser = make_arg_parser() + return parser.parse_args() + + +# Add prometheus asgi middleware to route /metrics requests +route = Mount("/metrics", make_asgi_app()) +# Workaround for 307 Redirect for /metrics +route.path_regex = re.compile('^/metrics(?P.*)$') +app.routes.append(route) + + +@app.exception_handler(RequestValidationError) +async def validation_exception_handler(_, exc): + err = openai_serving_chat.create_error_response(message=str(exc)) + return JSONResponse(err.model_dump(), status_code=HTTPStatus.BAD_REQUEST) + + +@app.get("/health") +async def health() -> Response: + """Health check.""" + await openai_serving_chat.engine.check_health() + return Response(status_code=200) + + +@app.get("/v1/models") +async def show_available_models(): + models = await openai_serving_chat.show_available_models() + return JSONResponse(content=models.model_dump()) + + +@app.get("/version") +async def show_version(): + ver = {"version": vllm.__version__} + return JSONResponse(content=ver) + + +@app.post("/v1/chat/completions") +async def create_chat_completion(request: ChatCompletionRequest, + raw_request: Request): + generator = await openai_serving_chat.create_chat_completion( + request, raw_request) + if isinstance(generator, ErrorResponse): + return JSONResponse(content=generator.model_dump(), + status_code=generator.code) + if request.stream: + return StreamingResponse(content=generator, + media_type="text/event-stream") + else: + assert isinstance(generator, ChatCompletionResponse) + return JSONResponse(content=generator.model_dump()) + + +@app.post("/v1/completions") +async def create_completion(request: CompletionRequest, raw_request: Request): + generator = await openai_serving_completion.create_completion( + request, raw_request) + if isinstance(generator, ErrorResponse): + return JSONResponse(content=generator.model_dump(), + status_code=generator.code) + if request.stream: + return StreamingResponse(content=generator, + media_type="text/event-stream") + else: + return JSONResponse(content=generator.model_dump()) + + +if __name__ == "__main__": + args = parse_args() + + app.add_middleware( + CORSMiddleware, + allow_origins=args.allowed_origins, + allow_credentials=args.allow_credentials, + allow_methods=args.allowed_methods, + allow_headers=args.allowed_headers, + ) + + if token := envs.VLLM_API_KEY or args.api_key: + + @app.middleware("http") + async def authentication(request: Request, call_next): + root_path = "" if args.root_path is None else args.root_path + if not request.url.path.startswith(f"{root_path}/v1"): + return await call_next(request) + if request.headers.get("Authorization") != "Bearer " + token: + return JSONResponse(content={"error": "Unauthorized"}, + status_code=401) + return await call_next(request) + + for middleware in args.middleware: + module_path, object_name = middleware.rsplit(".", 1) + imported = getattr(importlib.import_module(module_path), object_name) + if inspect.isclass(imported): + app.add_middleware(imported) + elif inspect.iscoroutinefunction(imported): + app.middleware("http")(imported) + else: + raise ValueError(f"Invalid middleware {middleware}. " + f"Must be a function or a class.") + + logger.info("vLLM API server version %s", vllm.__version__) + logger.info("args: %s", args) + + if args.served_model_name is not None: + served_model_names = args.served_model_name + else: + served_model_names = [args.model] + engine_args = AsyncEngineArgs.from_cli_args(args) + engine = AsyncLLMEngine.from_engine_args( + engine_args, usage_context=UsageContext.OPENAI_API_SERVER) + openai_serving_chat = OpenAIServingChat(engine, served_model_names, + args.response_role, + args.lora_modules, + args.chat_template) + openai_serving_completion = OpenAIServingCompletion( + engine, served_model_names, args.lora_modules) + + app.root_path = args.root_path + uvicorn.run(app, + host=args.host, + port=args.port, + log_level=args.uvicorn_log_level, + timeout_keep_alive=TIMEOUT_KEEP_ALIVE, + ssl_keyfile=args.ssl_keyfile, + ssl_certfile=args.ssl_certfile, + ssl_ca_certs=args.ssl_ca_certs, + ssl_cert_reqs=args.ssl_cert_reqs) diff --git a/vllm/entrypoints/openai/cli_args.py b/vllm/entrypoints/openai/cli_args.py new file mode 100644 index 0000000..4c0cb1e --- /dev/null +++ b/vllm/entrypoints/openai/cli_args.py @@ -0,0 +1,115 @@ +""" +This file contains the command line arguments for the vLLM's +OpenAI-compatible server. It is kept in a separate file for documentation +purposes. +""" + +import argparse +import json +import ssl + +from vllm.engine.arg_utils import AsyncEngineArgs, nullable_str +from vllm.entrypoints.openai.serving_engine import LoRAModulePath + + +class LoRAParserAction(argparse.Action): + + def __call__(self, parser, namespace, values, option_string=None): + lora_list = [] + for item in values: + name, path = item.split('=') + lora_list.append(LoRAModulePath(name, path)) + setattr(namespace, self.dest, lora_list) + + +def make_arg_parser(): + parser = argparse.ArgumentParser( + description="vLLM OpenAI-Compatible RESTful API server.") + parser.add_argument("--host", + type=nullable_str, + default=None, + help="host name") + parser.add_argument("--port", type=int, default=8000, help="port number") + parser.add_argument( + "--uvicorn-log-level", + type=str, + default="info", + choices=['debug', 'info', 'warning', 'error', 'critical', 'trace'], + help="log level for uvicorn") + parser.add_argument("--allow-credentials", + action="store_true", + help="allow credentials") + parser.add_argument("--allowed-origins", + type=json.loads, + default=["*"], + help="allowed origins") + parser.add_argument("--allowed-methods", + type=json.loads, + default=["*"], + help="allowed methods") + parser.add_argument("--allowed-headers", + type=json.loads, + default=["*"], + help="allowed headers") + parser.add_argument("--api-key", + type=nullable_str, + default=None, + help="If provided, the server will require this key " + "to be presented in the header.") + parser.add_argument( + "--lora-modules", + type=nullable_str, + default=None, + nargs='+', + action=LoRAParserAction, + help="LoRA module configurations in the format name=path. " + "Multiple modules can be specified.") + parser.add_argument("--chat-template", + type=nullable_str, + default=None, + help="The file path to the chat template, " + "or the template in single-line form " + "for the specified model") + parser.add_argument("--response-role", + type=nullable_str, + default="assistant", + help="The role name to return if " + "`request.add_generation_prompt=true`.") + parser.add_argument("--ssl-keyfile", + type=nullable_str, + default=None, + help="The file path to the SSL key file") + parser.add_argument("--ssl-certfile", + type=nullable_str, + default=None, + help="The file path to the SSL cert file") + parser.add_argument("--ssl-ca-certs", + type=nullable_str, + default=None, + help="The CA certificates file") + parser.add_argument( + "--ssl-cert-reqs", + type=int, + default=int(ssl.CERT_NONE), + help="Whether client certificate is required (see stdlib ssl module's)" + ) + parser.add_argument( + "--root-path", + type=nullable_str, + default=None, + help="FastAPI root_path when app is behind a path based routing proxy") + parser.add_argument( + "--middleware", + type=nullable_str, + action="append", + default=[], + help="Additional ASGI middleware to apply to the app. " + "We accept multiple --middleware arguments. " + "The value should be an import path. " + "If a function is provided, vLLM will add it to the server " + "using @app.middleware('http'). " + "If a class is provided, vLLM will add it to the server " + "using app.add_middleware(). ") + + parser = AsyncEngineArgs.add_cli_args(parser) + return parser diff --git a/vllm/entrypoints/openai/protocol.py b/vllm/entrypoints/openai/protocol.py new file mode 100644 index 0000000..3cd9dda --- /dev/null +++ b/vllm/entrypoints/openai/protocol.py @@ -0,0 +1,460 @@ +# Adapted from +# https://github.com/lm-sys/FastChat/blob/168ccc29d3f7edc50823016105c024fe2282732a/fastchat/protocol/openai_api_protocol.py +import time +from typing import Dict, List, Literal, Optional, Union + +import torch +from openai.types.chat import ChatCompletionMessageParam +from pydantic import BaseModel, ConfigDict, Field, model_validator +from typing_extensions import Annotated + +from vllm.sampling_params import SamplingParams +from vllm.utils import random_uuid + + +class OpenAIBaseModel(BaseModel): + # OpenAI API does not allow extra fields + model_config = ConfigDict(extra="forbid") + + +class ErrorResponse(OpenAIBaseModel): + object: str = "error" + message: str + type: str + param: Optional[str] = None + code: int + + +class ModelPermission(OpenAIBaseModel): + id: str = Field(default_factory=lambda: f"modelperm-{random_uuid()}") + object: str = "model_permission" + created: int = Field(default_factory=lambda: int(time.time())) + allow_create_engine: bool = False + allow_sampling: bool = True + allow_logprobs: bool = True + allow_search_indices: bool = False + allow_view: bool = True + allow_fine_tuning: bool = False + organization: str = "*" + group: Optional[str] = None + is_blocking: bool = False + + +class ModelCard(OpenAIBaseModel): + id: str + object: str = "model" + created: int = Field(default_factory=lambda: int(time.time())) + owned_by: str = "vllm" + root: Optional[str] = None + parent: Optional[str] = None + permission: List[ModelPermission] = Field(default_factory=list) + + +class ModelList(OpenAIBaseModel): + object: str = "list" + data: List[ModelCard] = Field(default_factory=list) + + +class UsageInfo(OpenAIBaseModel): + prompt_tokens: int = 0 + total_tokens: int = 0 + completion_tokens: Optional[int] = 0 + + +class ResponseFormat(OpenAIBaseModel): + # type must be "json_object" or "text" + type: Literal["text", "json_object"] + + +class ChatCompletionRequest(OpenAIBaseModel): + # Ordered by official OpenAI API documentation + # https://platform.openai.com/docs/api-reference/chat/create + messages: List[ChatCompletionMessageParam] + model: str + frequency_penalty: Optional[float] = 0.0 + logit_bias: Optional[Dict[str, float]] = None + logprobs: Optional[bool] = False + top_logprobs: Optional[int] = None + max_tokens: Optional[int] = None + n: Optional[int] = 1 + presence_penalty: Optional[float] = 0.0 + response_format: Optional[ResponseFormat] = None + seed: Optional[int] = Field(None, + ge=torch.iinfo(torch.long).min, + le=torch.iinfo(torch.long).max) + stop: Optional[Union[str, List[str]]] = Field(default_factory=list) + stream: Optional[bool] = False + temperature: Optional[float] = 0.7 + top_p: Optional[float] = 1.0 + user: Optional[str] = None + + # doc: begin-chat-completion-sampling-params + best_of: Optional[int] = None + use_beam_search: Optional[bool] = False + top_k: Optional[int] = -1 + min_p: Optional[float] = 0.0 + repetition_penalty: Optional[float] = 1.0 + length_penalty: Optional[float] = 1.0 + early_stopping: Optional[bool] = False + ignore_eos: Optional[bool] = False + min_tokens: Optional[int] = 0 + stop_token_ids: Optional[List[int]] = Field(default_factory=list) + skip_special_tokens: Optional[bool] = True + spaces_between_special_tokens: Optional[bool] = True + # doc: end-chat-completion-sampling-params + + # doc: begin-chat-completion-extra-params + echo: Optional[bool] = Field( + default=False, + description=( + "If true, the new message will be prepended with the last message " + "if they belong to the same role."), + ) + add_generation_prompt: Optional[bool] = Field( + default=True, + description= + ("If true, the generation prompt will be added to the chat template. " + "This is a parameter used by chat template in tokenizer config of the " + "model."), + ) + include_stop_str_in_output: Optional[bool] = Field( + default=False, + description=( + "Whether to include the stop string in the output. " + "This is only applied when the stop or stop_token_ids is set."), + ) + guided_json: Optional[Union[str, dict, BaseModel]] = Field( + default=None, + description=("If specified, the output will follow the JSON schema."), + ) + guided_regex: Optional[str] = Field( + default=None, + description=( + "If specified, the output will follow the regex pattern."), + ) + guided_choice: Optional[List[str]] = Field( + default=None, + description=( + "If specified, the output will be exactly one of the choices."), + ) + guided_grammar: Optional[str] = Field( + default=None, + description=( + "If specified, the output will follow the context free grammar."), + ) + guided_decoding_backend: Optional[str] = Field( + default=None, + description=( + "If specified, will override the default guided decoding backend " + "of the server for this specific request. If set, must be either " + "'outlines' / 'lm-format-enforcer'")) + guided_whitespace_pattern: Optional[str] = Field( + default=None, + description=( + "If specified, will override the default whitespace pattern " + "for guided json decoding.")) + + # doc: end-chat-completion-extra-params + + def to_sampling_params(self) -> SamplingParams: + if self.logprobs and not self.top_logprobs: + raise ValueError("Top logprobs must be set when logprobs is.") + + logits_processors = None + if self.logit_bias: + + def logit_bias_logits_processor( + token_ids: List[int], + logits: torch.Tensor) -> torch.Tensor: + assert self.logit_bias is not None + for token_id, bias in self.logit_bias.items(): + # Clamp the bias between -100 and 100 per OpenAI API spec + bias = min(100, max(-100, bias)) + logits[int(token_id)] += bias + return logits + + logits_processors = [logit_bias_logits_processor] + + return SamplingParams( + n=self.n, + presence_penalty=self.presence_penalty, + frequency_penalty=self.frequency_penalty, + repetition_penalty=self.repetition_penalty, + temperature=self.temperature, + top_p=self.top_p, + min_p=self.min_p, + seed=self.seed, + stop=self.stop, + stop_token_ids=self.stop_token_ids, + max_tokens=self.max_tokens, + min_tokens=self.min_tokens, + logprobs=self.top_logprobs if self.logprobs else None, + prompt_logprobs=self.top_logprobs if self.echo else None, + best_of=self.best_of, + top_k=self.top_k, + ignore_eos=self.ignore_eos, + use_beam_search=self.use_beam_search, + early_stopping=self.early_stopping, + skip_special_tokens=self.skip_special_tokens, + spaces_between_special_tokens=self.spaces_between_special_tokens, + include_stop_str_in_output=self.include_stop_str_in_output, + length_penalty=self.length_penalty, + logits_processors=logits_processors, + ) + + @model_validator(mode="before") + @classmethod + def check_guided_decoding_count(cls, data): + guide_count = sum([ + "guided_json" in data and data["guided_json"] is not None, + "guided_regex" in data and data["guided_regex"] is not None, + "guided_choice" in data and data["guided_choice"] is not None + ]) + if guide_count > 1: + raise ValueError( + "You can only use one kind of guided decoding " + "('guided_json', 'guided_regex' or 'guided_choice').") + return data + + +class CompletionRequest(OpenAIBaseModel): + # Ordered by official OpenAI API documentation + # https://platform.openai.com/docs/api-reference/completions/create + model: str + prompt: Union[List[int], List[List[int]], str, List[str]] + best_of: Optional[int] = None + echo: Optional[bool] = False + frequency_penalty: Optional[float] = 0.0 + logit_bias: Optional[Dict[str, float]] = None + logprobs: Optional[int] = None + max_tokens: Optional[int] = 16 + n: int = 1 + presence_penalty: Optional[float] = 0.0 + seed: Optional[int] = Field(None, + ge=torch.iinfo(torch.long).min, + le=torch.iinfo(torch.long).max) + stop: Optional[Union[str, List[str]]] = Field(default_factory=list) + stream: Optional[bool] = False + suffix: Optional[str] = None + temperature: Optional[float] = 1.0 + top_p: Optional[float] = 1.0 + user: Optional[str] = None + + # doc: begin-completion-sampling-params + use_beam_search: Optional[bool] = False + top_k: Optional[int] = -1 + min_p: Optional[float] = 0.0 + repetition_penalty: Optional[float] = 1.0 + length_penalty: Optional[float] = 1.0 + early_stopping: Optional[bool] = False + stop_token_ids: Optional[List[int]] = Field(default_factory=list) + ignore_eos: Optional[bool] = False + min_tokens: Optional[int] = 0 + skip_special_tokens: Optional[bool] = True + spaces_between_special_tokens: Optional[bool] = True + truncate_prompt_tokens: Optional[Annotated[int, Field(ge=1)]] = None + # doc: end-completion-sampling-params + + # doc: begin-completion-extra-params + include_stop_str_in_output: Optional[bool] = Field( + default=False, + description=( + "Whether to include the stop string in the output. " + "This is only applied when the stop or stop_token_ids is set."), + ) + response_format: Optional[ResponseFormat] = Field( + default=None, + description= + ("Similar to chat completion, this parameter specifies the format of " + "output. Only {'type': 'json_object'} or {'type': 'text' } is " + "supported."), + ) + guided_json: Optional[Union[str, dict, BaseModel]] = Field( + default=None, + description=("If specified, the output will follow the JSON schema."), + ) + guided_regex: Optional[str] = Field( + default=None, + description=( + "If specified, the output will follow the regex pattern."), + ) + guided_choice: Optional[List[str]] = Field( + default=None, + description=( + "If specified, the output will be exactly one of the choices."), + ) + guided_grammar: Optional[str] = Field( + default=None, + description=( + "If specified, the output will follow the context free grammar."), + ) + guided_decoding_backend: Optional[str] = Field( + default=None, + description=( + "If specified, will override the default guided decoding backend " + "of the server for this specific request. If set, must be one of " + "'outlines' / 'lm-format-enforcer'")) + guided_whitespace_pattern: Optional[str] = Field( + default=None, + description=( + "If specified, will override the default whitespace pattern " + "for guided json decoding.")) + + # doc: end-completion-extra-params + + def to_sampling_params(self): + echo_without_generation = self.echo and self.max_tokens == 0 + + logits_processors = None + if self.logit_bias: + + def logit_bias_logits_processor( + token_ids: List[int], + logits: torch.Tensor) -> torch.Tensor: + assert self.logit_bias is not None + for token_id, bias in self.logit_bias.items(): + # Clamp the bias between -100 and 100 per OpenAI API spec + bias = min(100, max(-100, bias)) + logits[int(token_id)] += bias + return logits + + logits_processors = [logit_bias_logits_processor] + + return SamplingParams( + n=self.n, + best_of=self.best_of, + presence_penalty=self.presence_penalty, + frequency_penalty=self.frequency_penalty, + repetition_penalty=self.repetition_penalty, + temperature=self.temperature, + top_p=self.top_p, + top_k=self.top_k, + min_p=self.min_p, + seed=self.seed, + stop=self.stop, + stop_token_ids=self.stop_token_ids, + ignore_eos=self.ignore_eos, + max_tokens=self.max_tokens if not echo_without_generation else 1, + min_tokens=self.min_tokens, + logprobs=self.logprobs, + use_beam_search=self.use_beam_search, + early_stopping=self.early_stopping, + prompt_logprobs=self.logprobs if self.echo else None, + skip_special_tokens=self.skip_special_tokens, + spaces_between_special_tokens=(self.spaces_between_special_tokens), + include_stop_str_in_output=self.include_stop_str_in_output, + length_penalty=self.length_penalty, + logits_processors=logits_processors, + truncate_prompt_tokens=self.truncate_prompt_tokens, + ) + + @model_validator(mode="before") + @classmethod + def check_guided_decoding_count(cls, data): + guide_count = sum([ + "guided_json" in data and data["guided_json"] is not None, + "guided_regex" in data and data["guided_regex"] is not None, + "guided_choice" in data and data["guided_choice"] is not None + ]) + if guide_count > 1: + raise ValueError( + "You can only use one kind of guided decoding " + "('guided_json', 'guided_regex' or 'guided_choice').") + return data + + +class LogProbs(OpenAIBaseModel): + text_offset: List[int] = Field(default_factory=list) + token_logprobs: List[Optional[float]] = Field(default_factory=list) + tokens: List[str] = Field(default_factory=list) + top_logprobs: Optional[List[Optional[Dict[str, float]]]] = None + + +class CompletionResponseChoice(OpenAIBaseModel): + index: int + text: str + logprobs: Optional[LogProbs] = None + finish_reason: Optional[str] = None + stop_reason: Optional[Union[int, str]] = Field( + default=None, + description=( + "The stop string or token id that caused the completion " + "to stop, None if the completion finished for some other reason " + "including encountering the EOS token"), + ) + + +class CompletionResponse(OpenAIBaseModel): + id: str = Field(default_factory=lambda: f"cmpl-{random_uuid()}") + object: str = "text_completion" + created: int = Field(default_factory=lambda: int(time.time())) + model: str + choices: List[CompletionResponseChoice] + usage: UsageInfo + + +class CompletionResponseStreamChoice(OpenAIBaseModel): + index: int + text: str + logprobs: Optional[LogProbs] = None + finish_reason: Optional[str] = None + stop_reason: Optional[Union[int, str]] = Field( + default=None, + description=( + "The stop string or token id that caused the completion " + "to stop, None if the completion finished for some other reason " + "including encountering the EOS token"), + ) + + +class CompletionStreamResponse(OpenAIBaseModel): + id: str = Field(default_factory=lambda: f"cmpl-{random_uuid()}") + object: str = "text_completion" + created: int = Field(default_factory=lambda: int(time.time())) + model: str + choices: List[CompletionResponseStreamChoice] + usage: Optional[UsageInfo] = Field(default=None) + + +class ChatMessage(OpenAIBaseModel): + role: str + content: str + + +class ChatCompletionResponseChoice(OpenAIBaseModel): + index: int + message: ChatMessage + logprobs: Optional[LogProbs] = None + finish_reason: Optional[str] = None + stop_reason: Optional[Union[int, str]] = None + + +class ChatCompletionResponse(OpenAIBaseModel): + id: str = Field(default_factory=lambda: f"chatcmpl-{random_uuid()}") + object: str = "chat.completion" + created: int = Field(default_factory=lambda: int(time.time())) + model: str + choices: List[ChatCompletionResponseChoice] + usage: UsageInfo + + +class DeltaMessage(OpenAIBaseModel): + role: Optional[str] = None + content: Optional[str] = None + + +class ChatCompletionResponseStreamChoice(OpenAIBaseModel): + index: int + delta: DeltaMessage + logprobs: Optional[LogProbs] = None + finish_reason: Optional[str] = None + stop_reason: Optional[Union[int, str]] = None + + +class ChatCompletionStreamResponse(OpenAIBaseModel): + id: str = Field(default_factory=lambda: f"chatcmpl-{random_uuid()}") + object: str = "chat.completion.chunk" + created: int = Field(default_factory=lambda: int(time.time())) + model: str + choices: List[ChatCompletionResponseStreamChoice] + usage: Optional[UsageInfo] = Field(default=None) diff --git a/vllm/entrypoints/openai/serving_chat.py b/vllm/entrypoints/openai/serving_chat.py new file mode 100644 index 0000000..c8f4a6b --- /dev/null +++ b/vllm/entrypoints/openai/serving_chat.py @@ -0,0 +1,392 @@ +import asyncio +import codecs +import time +from typing import (AsyncGenerator, AsyncIterator, Awaitable, Iterable, List, + Optional, Tuple, TypedDict, Union, final) + +from fastapi import Request +from openai.types.chat import (ChatCompletionContentPartParam, + ChatCompletionRole) + +from vllm.engine.async_llm_engine import AsyncLLMEngine +from vllm.entrypoints.openai.protocol import ( + ChatCompletionRequest, ChatCompletionResponse, + ChatCompletionResponseChoice, ChatCompletionResponseStreamChoice, + ChatCompletionStreamResponse, ChatMessage, DeltaMessage, ErrorResponse, + UsageInfo) +from vllm.entrypoints.openai.serving_engine import (LoRAModulePath, + OpenAIServing) +from vllm.logger import init_logger +from vllm.model_executor.guided_decoding import ( + get_guided_decoding_logits_processor) +from vllm.outputs import RequestOutput +from vllm.utils import random_uuid + +logger = init_logger(__name__) + + +@final # So that it should be compatible with Dict[str, str] +class ConversationMessage(TypedDict): + role: str + content: str + + +class OpenAIServingChat(OpenAIServing): + + def __init__(self, + engine: AsyncLLMEngine, + served_model_names: List[str], + response_role: str, + lora_modules: Optional[List[LoRAModulePath]] = None, + chat_template: Optional[str] = None): + super().__init__(engine=engine, + served_model_names=served_model_names, + lora_modules=lora_modules, + await_post_init=self._load_chat_template( + chat_template=chat_template)) + + self.response_role = response_role + + def _parse_chat_message_content( + self, + role: ChatCompletionRole, + content: Optional[Union[str, + Iterable[ChatCompletionContentPartParam]]], + ) -> Tuple[List[ConversationMessage], List[Awaitable[object]]]: + if content is None: + return [], [] + if isinstance(content, str): + return [ConversationMessage(role=role, content=content)], [] + + texts: List[str] = [] + for _, part in enumerate(content): + if part["type"] == "text": + text = part["text"] + + texts.append(text) + else: + raise NotImplementedError(f"Unknown part type: {part['type']}") + + return [ConversationMessage(role=role, content="\n".join(texts))], [] + + async def create_chat_completion( + self, request: ChatCompletionRequest, raw_request: Request + ) -> Union[ErrorResponse, AsyncGenerator[str, None], + ChatCompletionResponse]: + """Completion API similar to OpenAI's API. + + See https://platform.openai.com/docs/api-reference/chat/create + for the API specification. This API mimics the OpenAI + ChatCompletion API. + + NOTE: Currently we do not support the following feature: + - function_call (Users should implement this by themselves) + """ + error_check_ret = await self._check_model(request) + if error_check_ret is not None: + return error_check_ret + + try: + conversation: List[ConversationMessage] = [] + + for m in request.messages: + messages, _ = self._parse_chat_message_content( + m["role"], m["content"]) + + conversation.extend(messages) + + prompt = self.tokenizer.apply_chat_template( + conversation=conversation, + tokenize=False, + add_generation_prompt=request.add_generation_prompt, + ) + except Exception as e: + logger.error("Error in applying chat template from request: %s", e) + return self.create_error_response(str(e)) + + request_id = f"cmpl-{random_uuid()}" + try: + # Tokenize/detokenize depending on prompt format (string/token list) + prompt_ids, prompt_text = self._validate_prompt_and_tokenize( + request, prompt=prompt) + sampling_params = request.to_sampling_params() + lora_request = self._maybe_get_lora(request) + decoding_config = await self.engine.get_decoding_config() + guided_decoding_backend = request.guided_decoding_backend \ + or decoding_config.guided_decoding_backend + guided_decode_logits_processor = ( + await get_guided_decoding_logits_processor( + guided_decoding_backend, request, await + self.engine.get_tokenizer())) + if guided_decode_logits_processor: + if sampling_params.logits_processors is None: + sampling_params.logits_processors = [] + sampling_params.logits_processors.append( + guided_decode_logits_processor) + except ValueError as e: + return self.create_error_response(str(e)) + + result_generator = self.engine.generate(prompt_text, sampling_params, + request_id, prompt_ids, + lora_request) + # Streaming response + if request.stream: + return self.chat_completion_stream_generator( + request, result_generator, request_id, conversation) + else: + try: + return await self.chat_completion_full_generator( + request, raw_request, result_generator, request_id, + conversation) + except ValueError as e: + # TODO: Use a vllm-specific Validation Error + return self.create_error_response(str(e)) + + def get_chat_request_role(self, request: ChatCompletionRequest) -> str: + if request.add_generation_prompt: + return self.response_role + else: + return request.messages[-1]["role"] + + async def chat_completion_stream_generator( + self, request: ChatCompletionRequest, + result_generator: AsyncIterator[RequestOutput], request_id: str, + conversation: List[ConversationMessage] + ) -> AsyncGenerator[str, None]: + model_name = self.served_model_names[0] + created_time = int(time.time()) + chunk_object_type = "chat.completion.chunk" + first_iteration = True + + # Send response for each token for each request.n (index) + assert request.n is not None + previous_texts = [""] * request.n + previous_num_tokens = [0] * request.n + finish_reason_sent = [False] * request.n + try: + async for res in result_generator: + # We need to do it here, because if there are exceptions in + # the result_generator, it needs to be sent as the FIRST + # response (by the try...catch). + if first_iteration: + # Send first response for each request.n (index) with + # the role + role = self.get_chat_request_role(request) + for i in range(request.n): + choice_data = ChatCompletionResponseStreamChoice( + index=i, + delta=DeltaMessage(role=role), + logprobs=None, + finish_reason=None) + chunk = ChatCompletionStreamResponse( + id=request_id, + object=chunk_object_type, + created=created_time, + choices=[choice_data], + model=model_name) + data = chunk.model_dump_json(exclude_unset=True) + yield f"data: {data}\n\n" + + # Send response to echo the input portion of the + # last message + if request.echo: + last_msg_content = "" + if conversation and conversation[-1].get( + "content") and conversation[-1].get( + "role") == role: + last_msg_content = conversation[-1]["content"] + + if last_msg_content: + for i in range(request.n): + choice_data = ( + ChatCompletionResponseStreamChoice( + index=i, + delta=DeltaMessage( + content=last_msg_content), + finish_reason=None)) + chunk = ChatCompletionStreamResponse( + id=request_id, + object=chunk_object_type, + created=created_time, + choices=[choice_data], + logprobs=None, + model=model_name) + data = chunk.model_dump_json( + exclude_unset=True) + yield f"data: {data}\n\n" + first_iteration = False + + for output in res.outputs: + i = output.index + + if finish_reason_sent[i]: + continue + + delta_token_ids = output.token_ids[previous_num_tokens[i]:] + top_logprobs = output.logprobs[ + previous_num_tokens[i]:] if output.logprobs else None + + if request.logprobs: + logprobs = self._create_logprobs( + token_ids=delta_token_ids, + top_logprobs=top_logprobs, + num_output_top_logprobs=request.logprobs, + initial_text_offset=len(previous_texts[i]), + ) + else: + logprobs = None + + delta_text = output.text[len(previous_texts[i]):] + previous_texts[i] = output.text + previous_num_tokens[i] = len(output.token_ids) + if output.finish_reason is None: + # Send token-by-token response for each request.n + choice_data = ChatCompletionResponseStreamChoice( + index=i, + delta=DeltaMessage(content=delta_text), + logprobs=logprobs, + finish_reason=None) + chunk = ChatCompletionStreamResponse( + id=request_id, + object=chunk_object_type, + created=created_time, + choices=[choice_data], + model=model_name) + data = chunk.model_dump_json(exclude_unset=True) + yield f"data: {data}\n\n" + else: + # Send the finish response for each request.n only once + prompt_tokens = len(res.prompt_token_ids) + final_usage = UsageInfo( + prompt_tokens=prompt_tokens, + completion_tokens=previous_num_tokens[i], + total_tokens=prompt_tokens + + previous_num_tokens[i], + ) + choice_data = ChatCompletionResponseStreamChoice( + index=i, + delta=DeltaMessage(content=delta_text), + logprobs=logprobs, + finish_reason=output.finish_reason, + stop_reason=output.stop_reason) + chunk = ChatCompletionStreamResponse( + id=request_id, + object=chunk_object_type, + created=created_time, + choices=[choice_data], + model=model_name) + if final_usage is not None: + chunk.usage = final_usage + data = chunk.model_dump_json(exclude_unset=True, + exclude_none=True) + yield f"data: {data}\n\n" + finish_reason_sent[i] = True + except ValueError as e: + # TODO: Use a vllm-specific Validation Error + data = self.create_streaming_error_response(str(e)) + yield f"data: {data}\n\n" + # Send the final done message after all response.n are finished + yield "data: [DONE]\n\n" + + async def chat_completion_full_generator( + self, request: ChatCompletionRequest, raw_request: Request, + result_generator: AsyncIterator[RequestOutput], request_id: str, + conversation: List[ConversationMessage] + ) -> Union[ErrorResponse, ChatCompletionResponse]: + + model_name = self.served_model_names[0] + created_time = int(time.time()) + final_res: Optional[RequestOutput] = None + + async for res in result_generator: + if await raw_request.is_disconnected(): + # Abort the request if the client disconnects. + await self.engine.abort(request_id) + return self.create_error_response("Client disconnected") + final_res = res + assert final_res is not None + + choices = [] + + role = self.get_chat_request_role(request) + for output in final_res.outputs: + token_ids = output.token_ids + top_logprobs = output.logprobs + + if request.logprobs: + logprobs = self._create_logprobs( + token_ids=token_ids, + top_logprobs=top_logprobs, + num_output_top_logprobs=request.logprobs, + ) + else: + logprobs = None + + choice_data = ChatCompletionResponseChoice( + index=output.index, + message=ChatMessage(role=role, content=output.text), + logprobs=logprobs, + finish_reason=output.finish_reason, + stop_reason=output.stop_reason, + ) + choices.append(choice_data) + + if request.echo: + last_msg_content = "" + if conversation and conversation[-1].get( + "content") and conversation[-1].get("role") == role: + last_msg_content = conversation[-1]["content"] + + for choice in choices: + full_message = last_msg_content + choice.message.content + choice.message.content = full_message + + num_prompt_tokens = len(final_res.prompt_token_ids) + num_generated_tokens = sum( + len(output.token_ids) for output in final_res.outputs) + usage = UsageInfo( + prompt_tokens=num_prompt_tokens, + completion_tokens=num_generated_tokens, + total_tokens=num_prompt_tokens + num_generated_tokens, + ) + response = ChatCompletionResponse( + id=request_id, + created=created_time, + model=model_name, + choices=choices, + usage=usage, + ) + + return response + + async def _load_chat_template(self, chat_template: Optional[str]): + while self.tokenizer is None: + # Give the parent class time to load the tokenizer + await asyncio.sleep(0.1) + tokenizer = self.tokenizer + + if chat_template is not None: + try: + with open(chat_template, "r") as f: + tokenizer.chat_template = f.read() + except OSError as e: + JINJA_CHARS = "{}\n" + if not any(c in chat_template for c in JINJA_CHARS): + msg = (f"The supplied chat template ({chat_template}) " + f"looks like a file path, but it failed to be " + f"opened. Reason: {e}") + raise ValueError(msg) from e + + # If opening a file fails, set chat template to be args to + # ensure we decode so our escape are interpreted correctly + tokenizer.chat_template = codecs.decode( + chat_template, "unicode_escape") + + logger.info("Using supplied chat template:\n%s", + tokenizer.chat_template) + elif tokenizer.chat_template is not None: + logger.info("Using default chat template:\n%s", + tokenizer.chat_template) + else: + logger.warning( + "No chat template provided. Chat API will not work.") diff --git a/vllm/entrypoints/openai/serving_completion.py b/vllm/entrypoints/openai/serving_completion.py new file mode 100644 index 0000000..6a7f29c --- /dev/null +++ b/vllm/entrypoints/openai/serving_completion.py @@ -0,0 +1,347 @@ +import time +from typing import (AsyncGenerator, AsyncIterator, Callable, Dict, List, + Optional, Tuple) + +from fastapi import Request + +from vllm.engine.async_llm_engine import AsyncLLMEngine +from vllm.entrypoints.openai.protocol import (CompletionRequest, + CompletionResponse, + CompletionResponseChoice, + CompletionResponseStreamChoice, + CompletionStreamResponse, + LogProbs, UsageInfo) +from vllm.entrypoints.openai.serving_engine import (LoRAModulePath, + OpenAIServing) +from vllm.logger import init_logger +from vllm.model_executor.guided_decoding import ( + get_guided_decoding_logits_processor) +from vllm.outputs import RequestOutput +from vllm.utils import merge_async_iterators, random_uuid + +logger = init_logger(__name__) + +TypeTokenIDs = List[int] +TypeTopLogProbs = List[Optional[Dict[int, float]]] +TypeCreateLogProbsFn = Callable[ + [TypeTokenIDs, TypeTopLogProbs, Optional[int], int], LogProbs] + + +def parse_prompt_format(prompt) -> Tuple[bool, list]: + # get the prompt, openai supports the following + # "a string, array of strings, array of tokens, or array of token arrays." + prompt_is_tokens = False + prompts = [prompt] # case 1: a string + if isinstance(prompt, list): + if len(prompt) == 0: + raise ValueError("please provide at least one prompt") + elif isinstance(prompt[0], str): + prompt_is_tokens = False + prompts = prompt # case 2: array of strings + elif isinstance(prompt[0], int): + prompt_is_tokens = True + prompts = [prompt] # case 3: array of tokens + elif isinstance(prompt[0], list) and isinstance(prompt[0][0], int): + prompt_is_tokens = True + prompts = prompt # case 4: array of token arrays + else: + raise ValueError("prompt must be a string, array of strings, " + "array of tokens, or array of token arrays") + return prompt_is_tokens, prompts + + +class OpenAIServingCompletion(OpenAIServing): + + def __init__(self, + engine: AsyncLLMEngine, + served_model_names: List[str], + lora_modules: Optional[List[LoRAModulePath]] = None): + super().__init__(engine=engine, + served_model_names=served_model_names, + lora_modules=lora_modules) + + async def create_completion(self, request: CompletionRequest, + raw_request: Request): + """Completion API similar to OpenAI's API. + + See https://platform.openai.com/docs/api-reference/completions/create + for the API specification. This API mimics the OpenAI Completion API. + + NOTE: Currently we do not support the following feature: + - suffix (the language models we currently support do not support + suffix) + """ + error_check_ret = await self._check_model(request) + if error_check_ret is not None: + return error_check_ret + + # Return error for unsupported features. + if request.suffix is not None: + return self.create_error_response( + "suffix is not currently supported") + + model_name = self.served_model_names[0] + request_id = f"cmpl-{random_uuid()}" + created_time = int(time.time()) + + # Schedule the request and get the result generator. + generators: List[AsyncIterator[RequestOutput]] = [] + try: + sampling_params = request.to_sampling_params() + lora_request = self._maybe_get_lora(request) + decoding_config = await self.engine.get_decoding_config() + guided_decoding_backend = request.guided_decoding_backend \ + or decoding_config.guided_decoding_backend + guided_decode_logit_processor = ( + await get_guided_decoding_logits_processor( + guided_decoding_backend, request, await + self.engine.get_tokenizer())) + if guided_decode_logit_processor is not None: + if sampling_params.logits_processors is None: + sampling_params.logits_processors = [] + sampling_params.logits_processors.append( + guided_decode_logit_processor) + prompt_is_tokens, prompts = parse_prompt_format(request.prompt) + + for i, prompt in enumerate(prompts): + if prompt_is_tokens: + prompt_formats = self._validate_prompt_and_tokenize( + request, + prompt_ids=prompt, + truncate_prompt_tokens=sampling_params. + truncate_prompt_tokens) + else: + prompt_formats = self._validate_prompt_and_tokenize( + request, + prompt=prompt, + truncate_prompt_tokens=sampling_params. + truncate_prompt_tokens) + prompt_ids, prompt_text = prompt_formats + + generators.append( + self.engine.generate(prompt_text, + sampling_params, + f"{request_id}-{i}", + prompt_token_ids=prompt_ids, + lora_request=lora_request)) + except ValueError as e: + # TODO: Use a vllm-specific Validation Error + return self.create_error_response(str(e)) + + result_generator: AsyncIterator[Tuple[ + int, RequestOutput]] = merge_async_iterators(*generators) + + # Similar to the OpenAI API, when n != best_of, we do not stream the + # results. In addition, we do not stream the results when use + # beam search. + stream = (request.stream + and (request.best_of is None or request.n == request.best_of) + and not request.use_beam_search) + + # Streaming response + if stream: + return self.completion_stream_generator(request, + raw_request, + result_generator, + request_id, + created_time, + model_name, + num_prompts=len(prompts)) + + # Non-streaming response + final_res_batch: List[Optional[RequestOutput]] = [None] * len(prompts) + try: + async for i, res in result_generator: + if await raw_request.is_disconnected(): + # Abort the request if the client disconnects. + await self.engine.abort(f"{request_id}-{i}") + return self.create_error_response("Client disconnected") + final_res_batch[i] = res + response = self.request_output_to_completion_response( + final_res_batch, request, request_id, created_time, model_name) + except ValueError as e: + # TODO: Use a vllm-specific Validation Error + return self.create_error_response(str(e)) + + # When user requests streaming but we don't stream, we still need to + # return a streaming response with a single event. + if request.stream: + response_json = response.model_dump_json() + + async def fake_stream_generator() -> AsyncGenerator[str, None]: + yield f"data: {response_json}\n\n" + yield "data: [DONE]\n\n" + + return fake_stream_generator() + + return response + + async def completion_stream_generator( + self, + request: CompletionRequest, + raw_request: Request, + result_generator: AsyncIterator[Tuple[int, RequestOutput]], + request_id: str, + created_time: int, + model_name: str, + num_prompts: int, + ) -> AsyncGenerator[str, None]: + assert request.n is not None + previous_texts = [""] * request.n * num_prompts + previous_num_tokens = [0] * request.n * num_prompts + has_echoed = [False] * request.n * num_prompts + + try: + async for prompt_idx, res in result_generator: + + # Abort the request if the client disconnects. + if await raw_request.is_disconnected(): + await self.engine.abort(f"{request_id}-{prompt_idx}") + raise StopAsyncIteration() + + for output in res.outputs: + i = output.index + prompt_idx * request.n + # TODO(simon): optimize the performance by avoiding full + # text O(n^2) sending. + + assert request.max_tokens is not None + if request.echo and request.max_tokens == 0: + # only return the prompt + delta_text = res.prompt + delta_token_ids = res.prompt_token_ids + top_logprobs = res.prompt_logprobs + has_echoed[i] = True + elif (request.echo and request.max_tokens > 0 + and not has_echoed[i]): + # echo the prompt and first token + delta_text = res.prompt + output.text + delta_token_ids = (res.prompt_token_ids + + output.token_ids) + top_logprobs = res.prompt_logprobs + (output.logprobs + or []) + has_echoed[i] = True + else: + # return just the delta + delta_text = output.text[len(previous_texts[i]):] + delta_token_ids = output.token_ids[ + previous_num_tokens[i]:] + top_logprobs = output.logprobs[previous_num_tokens[ + i]:] if output.logprobs else None + + if request.logprobs is not None: + logprobs = self._create_logprobs( + token_ids=delta_token_ids, + top_logprobs=top_logprobs, + num_output_top_logprobs=request.logprobs, + initial_text_offset=len(previous_texts[i]), + ) + else: + logprobs = None + + previous_texts[i] = output.text + previous_num_tokens[i] = len(output.token_ids) + finish_reason = output.finish_reason + stop_reason = output.stop_reason + if output.finish_reason is not None: # return final usage + prompt_tokens = len(res.prompt_token_ids) + completion_tokens = len(output.token_ids) + final_usage = UsageInfo( + prompt_tokens=prompt_tokens, + completion_tokens=completion_tokens, + total_tokens=prompt_tokens + completion_tokens, + ) + else: + final_usage = None + response_json = CompletionStreamResponse( + id=request_id, + created=created_time, + model=model_name, + choices=[ + CompletionResponseStreamChoice( + index=i, + text=delta_text, + logprobs=logprobs, + finish_reason=finish_reason, + stop_reason=stop_reason, + ) + ], + usage=final_usage, + ).model_dump_json(exclude_unset=True) + yield f"data: {response_json}\n\n" + except ValueError as e: + # TODO: Use a vllm-specific Validation Error + data = self.create_streaming_error_response(str(e)) + yield f"data: {data}\n\n" + yield "data: [DONE]\n\n" + + def request_output_to_completion_response( + self, + final_res_batch: List[RequestOutput], + request: CompletionRequest, + request_id: str, + created_time: int, + model_name: str, + ) -> CompletionResponse: + choices: List[CompletionResponseChoice] = [] + num_prompt_tokens = 0 + num_generated_tokens = 0 + for final_res in final_res_batch: + assert final_res is not None + prompt_token_ids = final_res.prompt_token_ids + prompt_logprobs = final_res.prompt_logprobs + prompt_text = final_res.prompt + + for output in final_res.outputs: + assert request.max_tokens is not None + if request.echo and request.max_tokens == 0: + token_ids = prompt_token_ids + top_logprobs = prompt_logprobs + output_text = prompt_text + elif request.echo and request.max_tokens > 0: + token_ids = prompt_token_ids + output.token_ids + top_logprobs = (prompt_logprobs + output.logprobs + if request.logprobs else None) + output_text = prompt_text + output.text + else: + token_ids = output.token_ids + top_logprobs = output.logprobs + output_text = output.text + + if request.logprobs is not None: + assert top_logprobs is not None, ( + "top_logprobs must be provided when logprobs " + "is requested") + logprobs = self._create_logprobs( + token_ids=token_ids, + top_logprobs=top_logprobs, + num_output_top_logprobs=request.logprobs, + ) + else: + logprobs = None + + choice_data = CompletionResponseChoice( + index=len(choices), + text=output_text, + logprobs=logprobs, + finish_reason=output.finish_reason, + stop_reason=output.stop_reason, + ) + choices.append(choice_data) + + num_prompt_tokens += len(prompt_token_ids) + num_generated_tokens += sum( + len(output.token_ids) for output in final_res.outputs) + + usage = UsageInfo( + prompt_tokens=num_prompt_tokens, + completion_tokens=num_generated_tokens, + total_tokens=num_prompt_tokens + num_generated_tokens, + ) + + return CompletionResponse( + id=request_id, + created=created_time, + model=model_name, + choices=choices, + usage=usage, + ) diff --git a/vllm/entrypoints/openai/serving_engine.py b/vllm/entrypoints/openai/serving_engine.py new file mode 100644 index 0000000..21baea2 --- /dev/null +++ b/vllm/entrypoints/openai/serving_engine.py @@ -0,0 +1,234 @@ +import asyncio +import json +from dataclasses import dataclass +from http import HTTPStatus +from typing import Any, Awaitable, Dict, List, Optional, Tuple, Union + +from pydantic import Field +from transformers import PreTrainedTokenizer, PreTrainedTokenizerFast +from typing_extensions import Annotated + +from vllm.engine.async_llm_engine import AsyncLLMEngine +from vllm.entrypoints.openai.protocol import (ChatCompletionRequest, + CompletionRequest, ErrorResponse, + LogProbs, ModelCard, ModelList, + ModelPermission) +from vllm.logger import init_logger +from vllm.lora.request import LoRARequest +from vllm.sequence import Logprob +from vllm.transformers_utils.tokenizer import get_tokenizer + +logger = init_logger(__name__) + + +@dataclass +class LoRAModulePath: + name: str + local_path: str + + +class OpenAIServing: + + def __init__(self, + engine: AsyncLLMEngine, + served_model_names: List[str], + lora_modules: Optional[List[LoRAModulePath]], + await_post_init: Optional[Awaitable[Any]] = None): + self.engine = engine + self.served_model_names = served_model_names + if lora_modules is None: + self.lora_requests = [] + else: + self.lora_requests = [ + LoRARequest( + lora_name=lora.name, + lora_int_id=i, + lora_local_path=lora.local_path, + ) for i, lora in enumerate(lora_modules, start=1) + ] + + self.max_model_len = 0 + # Lazy initialized + self.tokenizer: Union[PreTrainedTokenizer, PreTrainedTokenizerFast] + + try: + event_loop = asyncio.get_running_loop() + except RuntimeError: + event_loop = None + + if event_loop is not None and event_loop.is_running(): + # If the current is instanced by Ray Serve, + # there is already a running event loop + event_loop.create_task(self._post_init(await_post_init)) + else: + # When using single vLLM without engine_use_ray + asyncio.run(self._post_init(await_post_init)) + + async def _post_init(self, await_post_init): + engine_model_config = await self.engine.get_model_config() + self.max_model_len = engine_model_config.max_model_len + + # A separate tokenizer to map token IDs to strings. + self.tokenizer = get_tokenizer( + engine_model_config.tokenizer, + tokenizer_mode=engine_model_config.tokenizer_mode, + tokenizer_revision=engine_model_config.tokenizer_revision, + trust_remote_code=engine_model_config.trust_remote_code, + truncation_side="left") + + if await_post_init is not None: + await await_post_init + + async def show_available_models(self) -> ModelList: + """Show available models. Right now we only have one model.""" + model_cards = [ + ModelCard(id=served_model_name, + root=self.served_model_names[0], + permission=[ModelPermission()]) + for served_model_name in self.served_model_names + ] + lora_cards = [ + ModelCard(id=lora.lora_name, + root=self.served_model_names[0], + permission=[ModelPermission()]) + for lora in self.lora_requests + ] + model_cards.extend(lora_cards) + return ModelList(data=model_cards) + + def _create_logprobs( + self, + token_ids: List[int], + top_logprobs: List[Optional[Dict[int, Logprob]]], + num_output_top_logprobs: Optional[int] = None, + initial_text_offset: int = 0, + ) -> LogProbs: + """Create OpenAI-style logprobs.""" + logprobs = LogProbs() + last_token_len = 0 + if num_output_top_logprobs: + logprobs.top_logprobs = [] + + for i, token_id in enumerate(token_ids): + step_top_logprobs = top_logprobs[i] + if step_top_logprobs is None: + token = self.tokenizer.decode(token_id) + logprobs.tokens.append(token) + logprobs.token_logprobs.append(None) + assert logprobs.top_logprobs is not None + logprobs.top_logprobs.append(None) + else: + token_logprob = step_top_logprobs[token_id].logprob + token = step_top_logprobs[token_id].decoded_token + logprobs.tokens.append(token) + logprobs.token_logprobs.append(token_logprob) + + if num_output_top_logprobs: + assert logprobs.top_logprobs is not None + logprobs.top_logprobs.append({ + # Convert float("-inf") to the + # JSON-serializable float that OpenAI uses + p.decoded_token: max(p.logprob, -9999.0) + for i, p in step_top_logprobs.items() + } if step_top_logprobs else None) + + if len(logprobs.text_offset) == 0: + logprobs.text_offset.append(initial_text_offset) + else: + logprobs.text_offset.append(logprobs.text_offset[-1] + + last_token_len) + last_token_len = len(token) + return logprobs + + def create_error_response( + self, + message: str, + err_type: str = "BadRequestError", + status_code: HTTPStatus = HTTPStatus.BAD_REQUEST) -> ErrorResponse: + return ErrorResponse(message=message, + type=err_type, + code=status_code.value) + + def create_streaming_error_response( + self, + message: str, + err_type: str = "BadRequestError", + status_code: HTTPStatus = HTTPStatus.BAD_REQUEST) -> str: + json_str = json.dumps({ + "error": + self.create_error_response(message=message, + err_type=err_type, + status_code=status_code).model_dump() + }) + return json_str + + async def _check_model( + self, request: Union[CompletionRequest, ChatCompletionRequest] + ) -> Optional[ErrorResponse]: + if request.model in self.served_model_names: + return None + if request.model in [lora.lora_name for lora in self.lora_requests]: + return None + return self.create_error_response( + message=f"The model `{request.model}` does not exist.", + err_type="NotFoundError", + status_code=HTTPStatus.NOT_FOUND) + + def _maybe_get_lora( + self, request: Union[CompletionRequest, ChatCompletionRequest] + ) -> Optional[LoRARequest]: + if request.model in self.served_model_names: + return None + for lora in self.lora_requests: + if request.model == lora.lora_name: + return lora + # if _check_model has been called earlier, this will be unreachable + raise ValueError(f"The model `{request.model}` does not exist.") + + def _validate_prompt_and_tokenize( + self, + request: Union[ChatCompletionRequest, CompletionRequest], + prompt: Optional[str] = None, + prompt_ids: Optional[List[int]] = None, + truncate_prompt_tokens: Optional[Annotated[int, Field(ge=1)]] = None + ) -> Tuple[List[int], str]: + if not (prompt or prompt_ids): + raise ValueError("Either prompt or prompt_ids should be provided.") + if (prompt and prompt_ids): + raise ValueError( + "Only one of prompt or prompt_ids should be provided.") + + if prompt_ids is None: + tokenizer_kwargs = {} if truncate_prompt_tokens is None else { + "truncation": True, + "max_length": truncate_prompt_tokens, + } + input_ids = self.tokenizer(prompt, **tokenizer_kwargs).input_ids + elif truncate_prompt_tokens is not None: + input_ids = prompt_ids[-truncate_prompt_tokens:] + else: + input_ids = prompt_ids + + input_text = prompt if prompt is not None else self.tokenizer.decode( + prompt_ids) + token_num = len(input_ids) + + if request.max_tokens is None: + if token_num >= self.max_model_len: + raise ValueError( + f"This model's maximum context length is " + f"{self.max_model_len} tokens. However, you requested " + f"{token_num} tokens in the messages, " + f"Please reduce the length of the messages.", ) + request.max_tokens = self.max_model_len - token_num + + if token_num + request.max_tokens > self.max_model_len: + raise ValueError( + f"This model's maximum context length is " + f"{self.max_model_len} tokens. However, you requested " + f"{request.max_tokens + token_num} tokens " + f"({token_num} in the messages, " + f"{request.max_tokens} in the completion). " + f"Please reduce the length of the messages or completion.", ) + else: + return input_ids, input_text diff --git a/vllm/envs.py b/vllm/envs.py new file mode 100644 index 0000000..63f11e9 --- /dev/null +++ b/vllm/envs.py @@ -0,0 +1,217 @@ +import os +from typing import TYPE_CHECKING, Any, Callable, Dict, Optional + +if TYPE_CHECKING: + VLLM_HOST_IP: str = "" + VLLM_USE_MODELSCOPE: bool = False + VLLM_INSTANCE_ID: Optional[str] = None + VLLM_NCCL_SO_PATH: Optional[str] = None + LD_LIBRARY_PATH: Optional[str] = None + VLLM_USE_TRITON_FLASH_ATTN: bool = False + LOCAL_RANK: int = 0 + CUDA_VISIBLE_DEVICES: Optional[str] = None + VLLM_ENGINE_ITERATION_TIMEOUT_S: int = 60 + VLLM_API_KEY: Optional[str] = None + S3_ACCESS_KEY_ID: Optional[str] = None + S3_SECRET_ACCESS_KEY: Optional[str] = None + S3_ENDPOINT_URL: Optional[str] = None + VLLM_CONFIG_ROOT: str = "" + VLLM_USAGE_STATS_SERVER: str = "https://stats.vllm.ai" + VLLM_NO_USAGE_STATS: bool = False + VLLM_DO_NOT_TRACK: bool = False + VLLM_USAGE_SOURCE: str = "" + VLLM_CONFIGURE_LOGGING: int = 1 + VLLM_LOGGING_CONFIG_PATH: Optional[str] = None + VLLM_TRACE_FUNCTION: int = 0 + VLLM_ATTENTION_BACKEND: Optional[str] = None + VLLM_CPU_KVCACHE_SPACE: int = 0 + VLLM_USE_RAY_COMPILED_DAG: bool = False + VLLM_WORKER_MULTIPROC_METHOD: str = "spawn" + VLLM_TARGET_DEVICE: str = "musa" + MAX_JOBS: Optional[str] = None + NVCC_THREADS: Optional[str] = None + VLLM_BUILD_WITH_NEURON: bool = False + VLLM_USE_PRECOMPILED: bool = False + VLLM_INSTALL_PUNICA_KERNELS: bool = False + CMAKE_BUILD_TYPE: Optional[str] = None + VERBOSE: bool = False + +# The begin-* and end* here are used by the documentation generator +# to extract the used env vars. + +# begin-env-vars-definition + +environment_variables: Dict[str, Callable[[], Any]] = { + + # ================== Installation Time Env Vars ================== + + # Target device of vLLM, supporting [cuda (by default), rocm, neuron, cpu] + "VLLM_TARGET_DEVICE": + lambda: os.getenv("VLLM_TARGET_DEVICE", "cuda"), + + # Maximum number of compilation jobs to run in parallel. + # By default this is the number of CPUs + "MAX_JOBS": + lambda: os.getenv("MAX_JOBS", None), + + # Number of threads to use for nvcc + # By default this is 1. + # If set, `MAX_JOBS` will be reduced to avoid oversubscribing the CPU. + "NVCC_THREADS": + lambda: os.getenv("NVCC_THREADS", None), + + # If set, vllm will build with Neuron support + "VLLM_BUILD_WITH_NEURON": + lambda: bool(os.environ.get("VLLM_BUILD_WITH_NEURON", False)), + + # If set, vllm will use precompiled binaries (*.so) + "VLLM_USE_PRECOMPILED": + lambda: bool(os.environ.get("VLLM_USE_PRECOMPILED")), + + # If set, vllm will install Punica kernels + "VLLM_INSTALL_PUNICA_KERNELS": + lambda: bool(int(os.getenv("VLLM_INSTALL_PUNICA_KERNELS", "0"))), + + # CMake build type + # If not set, defaults to "Debug" or "RelWithDebInfo" + # Available options: "Debug", "Release", "RelWithDebInfo" + "CMAKE_BUILD_TYPE": + lambda: os.getenv("CMAKE_BUILD_TYPE"), + + # If set, vllm will print verbose logs during installation + "VERBOSE": + lambda: bool(int(os.getenv('VERBOSE', '0'))), + + # Root directory for VLLM configuration files + # Note that this not only affects how vllm finds its configuration files + # during runtime, but also affects how vllm installs its configuration + # files during **installation**. + "VLLM_CONFIG_ROOT": + lambda: os.environ.get("VLLM_CONFIG_ROOT", None) or os.getenv( + "XDG_CONFIG_HOME", None) or os.path.expanduser("~/.config"), + + # ================== Runtime Env Vars ================== + + # used in distributed environment to determine the master address + 'VLLM_HOST_IP': + lambda: os.getenv('VLLM_HOST_IP', "") or os.getenv("HOST_IP", ""), + + # If true, will load models from ModelScope instead of Hugging Face Hub. + # note that the value is true or false, not numbers + "VLLM_USE_MODELSCOPE": + lambda: os.environ.get("VLLM_USE_MODELSCOPE", "False").lower() == "true", + + # Instance id represents an instance of the VLLM. All processes in the same + # instance should have the same instance id. + "VLLM_INSTANCE_ID": + lambda: os.environ.get("VLLM_INSTANCE_ID", None), + + # path to cudatoolkit home directory, under which should be bin, include, + # and lib directories. + "CUDA_HOME": + lambda: os.environ.get("CUDA_HOME", None), + + # Path to the NCCL library file. It is needed because nccl>=2.19 brought + # by PyTorch contains a bug: https://github.com/NVIDIA/nccl/issues/1234 + "VLLM_NCCL_SO_PATH": + lambda: os.environ.get("VLLM_NCCL_SO_PATH", None), + + # when `VLLM_NCCL_SO_PATH` is not set, vllm will try to find the nccl + # library file in the locations specified by `LD_LIBRARY_PATH` + "LD_LIBRARY_PATH": + lambda: os.environ.get("LD_LIBRARY_PATH", None), + + # flag to control if vllm should use triton flash attention + "VLLM_USE_TRITON_FLASH_ATTN": + lambda: (os.environ.get("VLLM_USE_TRITON_FLASH_ATTN", "True").lower() in + ("true", "1")), + + # local rank of the process in the distributed setting, used to determine + # the GPU device id + "LOCAL_RANK": + lambda: int(os.environ.get("LOCAL_RANK", "0")), + + # used to control the visible devices in the distributed setting + "CUDA_VISIBLE_DEVICES": + lambda: os.environ.get("CUDA_VISIBLE_DEVICES", None), + + # timeout for each iteration in the engine + "VLLM_ENGINE_ITERATION_TIMEOUT_S": + lambda: int(os.environ.get("VLLM_ENGINE_ITERATION_TIMEOUT_S", "60")), + + # API key for VLLM API server + "VLLM_API_KEY": + lambda: os.environ.get("VLLM_API_KEY", None), + + # S3 access information, used for tensorizer to load model from S3 + "S3_ACCESS_KEY_ID": + lambda: os.environ.get("S3_ACCESS_KEY", None), + "S3_SECRET_ACCESS_KEY": + lambda: os.environ.get("S3_SECRET_ACCESS_KEY", None), + "S3_ENDPOINT_URL": + lambda: os.environ.get("S3_ENDPOINT_URL", None), + + # Usage stats collection + "VLLM_USAGE_STATS_SERVER": + lambda: os.environ.get("VLLM_USAGE_STATS_SERVER", "https://stats.vllm.ai"), + "VLLM_NO_USAGE_STATS": + lambda: os.environ.get("VLLM_NO_USAGE_STATS", "0") == "1", + "VLLM_DO_NOT_TRACK": + lambda: (os.environ.get("VLLM_DO_NOT_TRACK", None) or os.environ.get( + "DO_NOT_TRACK", None) or "0") == "1", + "VLLM_USAGE_SOURCE": + lambda: os.environ.get("VLLM_USAGE_SOURCE", "production"), + + # Logging configuration + # If set to 0, vllm will not configure logging + # If set to 1, vllm will configure logging using the default configuration + # or the configuration file specified by VLLM_LOGGING_CONFIG_PATH + "VLLM_CONFIGURE_LOGGING": + lambda: int(os.getenv("VLLM_CONFIGURE_LOGGING", "1")), + "VLLM_LOGGING_CONFIG_PATH": + lambda: os.getenv("VLLM_LOGGING_CONFIG_PATH"), + + # Trace function calls + # If set to 1, vllm will trace function calls + # Useful for debugging + "VLLM_TRACE_FUNCTION": + lambda: int(os.getenv("VLLM_TRACE_FUNCTION", "0")), + + # Backend for attention computation + # Available options: + # - "TORCH_SDPA": use torch.nn.MultiheadAttention + # - "FLASH_ATTN": use FlashAttention + # - "XFORMERS": use XFormers + # - "ROCM_FLASH": use ROCmFlashAttention + "VLLM_ATTENTION_BACKEND": + lambda: os.getenv("VLLM_ATTENTION_BACKEND", None), + + # CPU key-value cache space + # default is 4GB + "VLLM_CPU_KVCACHE_SPACE": + lambda: int(os.getenv("VLLM_CPU_KVCACHE_SPACE", "0")), + + # If the env var is set, it uses the Ray's compiled DAG API + # which optimizes the control plane overhead. + # Run vLLM with VLLM_USE_RAY_COMPILED_DAG=1 to enable it. + "VLLM_USE_RAY_COMPILED_DAG": + lambda: bool(os.getenv("VLLM_USE_RAY_COMPILED_DAG", 0)), + + # Use dedicated multiprocess context for workers. + # Both spawn and fork work + "VLLM_WORKER_MULTIPROC_METHOD": + lambda: os.getenv("VLLM_WORKER_MULTIPROC_METHOD", "spawn"), +} + +# end-env-vars-definition + + +def __getattr__(name): + # lazy evaluation of environment variables + if name in environment_variables: + return environment_variables[name]() + raise AttributeError(f"module {__name__!r} has no attribute {name!r}") + + +def __dir__(): + return list(environment_variables.keys()) diff --git a/vllm/executor/__init__.py b/vllm/executor/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/vllm/executor/cpu_executor.py b/vllm/executor/cpu_executor.py new file mode 100644 index 0000000..a221245 --- /dev/null +++ b/vllm/executor/cpu_executor.py @@ -0,0 +1,152 @@ +from typing import List, Set, Tuple + +import torch + +import vllm.envs as envs +from vllm.config import CacheConfig, ModelConfig, SchedulerConfig +from vllm.executor.executor_base import ExecutorAsyncBase, ExecutorBase +from vllm.logger import init_logger +from vllm.lora.request import LoRARequest +from vllm.sequence import ExecuteModelRequest, SamplerOutput +from vllm.utils import (get_distributed_init_method, get_ip, get_open_port, + make_async) + +logger = init_logger(__name__) + + +class CPUExecutor(ExecutorBase): + + def _init_executor(self) -> None: + assert self.device_config.device_type == "cpu" + assert self.lora_config is None, "cpu backend doesn't support LoRA" + self.model_config = _verify_and_get_model_config(self.model_config) + self.cache_config = _verify_and_get_cache_config(self.cache_config) + self.scheduler_config = _verify_and_get_scheduler_config( + self.scheduler_config) + + # Instantiate the worker and load the model to CPU. + self._init_worker() + + def _init_worker(self): + from vllm.worker.cpu_worker import CPUWorker + + assert self.parallel_config.world_size == 1, ( + "CPUExecutor only supports single CPU socket currently.") + + distributed_init_method = get_distributed_init_method( + get_ip(), get_open_port()) + self.driver_worker = CPUWorker( + model_config=self.model_config, + parallel_config=self.parallel_config, + scheduler_config=self.scheduler_config, + device_config=self.device_config, + cache_config=self.cache_config, + load_config=self.load_config, + local_rank=0, + rank=0, + distributed_init_method=distributed_init_method, + lora_config=self.lora_config, + vision_language_config=self.vision_language_config, + kv_cache_dtype=self.cache_config.cache_dtype, + is_driver_worker=True, + ) + self.driver_worker.init_device() + self.driver_worker.load_model() + + def determine_num_available_blocks(self) -> Tuple[int, int]: + """Determine the number of available KV blocks by invoking the + underlying worker. + """ + return self.driver_worker.determine_num_available_blocks() + + def initialize_cache(self, num_gpu_blocks: int, + num_cpu_blocks: int) -> None: + """Initialize the KV cache by invoking the underlying worker. + """ + # NOTE: We log here to avoid multiple logs when number of workers is + # greater than one. We could log in the engine, but not all executors + # have GPUs. + # NOTE: `cpu block` for CPU backend is located on CPU memory but is + # referred as `gpu block`. Because we want to reuse the existing block + # management procedure. + logger.info("# CPU blocks: %d", num_gpu_blocks) + self.driver_worker.initialize_cache(num_gpu_blocks, num_cpu_blocks) + + def execute_model( + self, + execute_model_req: ExecuteModelRequest) -> List[SamplerOutput]: + output = self.driver_worker.execute_model(execute_model_req) + return output + + def add_lora(self, lora_request: LoRARequest) -> bool: + return self.driver_worker.add_lora(lora_request) + + def remove_lora(self, lora_id: int) -> bool: + return self.driver_worker.remove_lora(lora_id) + + def list_loras(self) -> Set[int]: + return self.driver_worker.list_loras() + + def check_health(self) -> None: + # CPUExecutor will always be healthy as long as + # it's running. + return + + +class CPUExecutorAsync(CPUExecutor, ExecutorAsyncBase): + + async def execute_model_async( + self, + execute_model_req: ExecuteModelRequest) -> List[SamplerOutput]: + output = await make_async(self.driver_worker.execute_model + )(execute_model_req=execute_model_req, ) + return output + + async def check_health_async(self) -> None: + # CPUExecutor will always be healthy as long as + # it's running. + return + + +def _verify_and_get_model_config(config: ModelConfig) -> ModelConfig: + if config.dtype == torch.float16: + logger.warning("float16 is not supported on CPU, casting to bfloat16.") + config.dtype = torch.bfloat16 + if not config.enforce_eager: + logger.warning( + "CUDA graph is not supported on CPU, fallback to the eager " + "mode.") + config.enforce_eager = True + return config + + +def _verify_and_get_scheduler_config( + config: SchedulerConfig) -> SchedulerConfig: + if config.chunked_prefill_enabled: + logger.warning("Chunked prefill is not supported on CPU, disable it.") + config.chunked_prefill_enabled = False + + return config + + +def _verify_and_get_cache_config(config: CacheConfig) -> CacheConfig: + _GB = 1 << 30 + if config.enable_prefix_caching: + logger.warning("Prefix caching is not supported on CPU, disable it.") + config.enable_prefix_caching = False + + kv_cache_space = envs.VLLM_CPU_KVCACHE_SPACE + + if kv_cache_space >= 0: + if kv_cache_space == 0: + config.cpu_kvcache_space_bytes = 4 * _GB # type: ignore + logger.warning("Environment variable VLLM_CPU_KVCACHE_SPACE (GB) " + "for CPU backend is not set, using 4 by default.") + else: + config.cpu_kvcache_space_bytes = kv_cache_space * _GB # type: ignore + else: + raise RuntimeError( + "Invalid environment variable VLLM_CPU_KVCACHE_SPACE" + f" {kv_cache_space}, expect a positive integer value.") + + return config diff --git a/vllm/executor/distributed_gpu_executor.py b/vllm/executor/distributed_gpu_executor.py new file mode 100644 index 0000000..4c922ef --- /dev/null +++ b/vllm/executor/distributed_gpu_executor.py @@ -0,0 +1,115 @@ +from abc import abstractmethod +from typing import Any, Dict, List, Optional, Set, Tuple + +from vllm.executor.executor_base import ExecutorAsyncBase +from vllm.executor.gpu_executor import GPUExecutor +from vllm.logger import init_logger +from vllm.lora.request import LoRARequest +from vllm.sequence import SamplerOutput + +logger = init_logger(__name__) + + +class DistributedGPUExecutor(GPUExecutor): + """Abstract superclass of multi-GPU executor implementations.""" + + def determine_num_available_blocks(self) -> Tuple[int, int]: + """Determine the number of available KV blocks. + + This invokes `determine_num_available_blocks` on each worker and takes + the min of the results, guaranteeing that the selected cache sizes are + compatible with all workers. + + Returns: + - tuple[num_gpu_blocks, num_cpu_blocks] + """ + # Get the maximum number of blocks that can be allocated on GPU and CPU. + num_blocks = self._run_workers("determine_num_available_blocks", ) + + # Since we use a shared centralized controller, we take the minimum + # number of blocks across all workers to make sure all the memory + # operators can be applied to all workers. + num_gpu_blocks = min(b[0] for b in num_blocks) + num_cpu_blocks = min(b[1] for b in num_blocks) + + return num_gpu_blocks, num_cpu_blocks + + def initialize_cache(self, num_gpu_blocks: int, + num_cpu_blocks: int) -> None: + """Initialize the KV cache in all workers. + """ + + # NOTE: We log here to avoid multiple logs when number of workers is + # greater than one. We could log in the engine, but not all executors + # have GPUs. + logger.info("# GPU blocks: %d, # CPU blocks: %d", num_gpu_blocks, + num_cpu_blocks) + + self.cache_config.num_gpu_blocks = num_gpu_blocks + self.cache_config.num_cpu_blocks = num_cpu_blocks + + self._run_workers("initialize_cache", + num_gpu_blocks=num_gpu_blocks, + num_cpu_blocks=num_cpu_blocks) + + def execute_model(self, *args, **kwargs) -> List[SamplerOutput]: + all_outputs = self._run_workers("execute_model", + driver_args=args, + driver_kwargs=kwargs) + + # Only the driver worker returns the sampling results. + return all_outputs[0] + + def add_lora(self, lora_request: LoRARequest) -> bool: + assert lora_request.lora_int_id > 0, "lora_id must be greater than 0." + return self._run_workers( + "add_lora", + lora_request=lora_request, + ) + + def remove_lora(self, lora_id: int) -> bool: + assert lora_id > 0, "lora_id must be greater than 0." + return self._run_workers( + "remove_lora", + lora_id=lora_id, + ) + + def list_loras(self) -> Set[int]: + return self._run_workers("list_loras") + + @abstractmethod + def _run_workers( + self, + method: str, + *args, + driver_args: Optional[Tuple[Any, ...]] = None, + driver_kwargs: Optional[Dict[str, Any]] = None, + max_concurrent_workers: Optional[int] = None, + **kwargs, + ) -> Any: + """Runs the given method on all workers.""" + raise NotImplementedError + + +class DistributedGPUExecutorAsync(DistributedGPUExecutor, ExecutorAsyncBase): + + @abstractmethod + async def _run_workers_async( + self, + method: str, + *args, + driver_args: Optional[Tuple[Any, ...]] = None, + driver_kwargs: Optional[Dict[str, Any]] = None, + **kwargs, + ) -> Any: + """Runs the given method on all workers.""" + raise NotImplementedError + + async def execute_model_async(self, *args, + **kwargs) -> List[SamplerOutput]: + all_outputs = await self._run_workers_async("execute_model", + driver_args=args, + driver_kwargs=kwargs) + + # Only the driver worker returns the sampling results. + return all_outputs[0] diff --git a/vllm/executor/executor_base.py b/vllm/executor/executor_base.py new file mode 100644 index 0000000..08aa589 --- /dev/null +++ b/vllm/executor/executor_base.py @@ -0,0 +1,115 @@ +from abc import ABC, abstractmethod +from typing import List, Optional, Set, Tuple + +from vllm.config import (CacheConfig, DeviceConfig, LoadConfig, LoRAConfig, + ModelConfig, ParallelConfig, SchedulerConfig, + SpeculativeConfig, VisionLanguageConfig) +from vllm.lora.request import LoRARequest +from vllm.sequence import ExecuteModelRequest, SamplerOutput + + +class ExecutorBase(ABC): + """Base class for all executors. + + An executor is responsible for executing the model on a specific device + type (e.g., CPU, GPU, Neuron, etc.). Or it can be a distributed executor + that can execute the model on multiple devices. + """ + + def __init__( + self, + model_config: ModelConfig, + cache_config: CacheConfig, + parallel_config: ParallelConfig, + scheduler_config: SchedulerConfig, + device_config: DeviceConfig, + load_config: LoadConfig, + lora_config: Optional[LoRAConfig], + vision_language_config: Optional[VisionLanguageConfig], + speculative_config: Optional[SpeculativeConfig], + ) -> None: + self.model_config = model_config + self.cache_config = cache_config + self.lora_config = lora_config + self.load_config = load_config + self.parallel_config = parallel_config + self.scheduler_config = scheduler_config + self.device_config = device_config + self.vision_language_config = vision_language_config + self.speculative_config = speculative_config + + self._init_executor() + + @abstractmethod + def _init_executor(self) -> None: + pass + + @abstractmethod + def determine_num_available_blocks(self) -> Tuple[int, int]: + """Determine the number of available blocks for the GPU KV cache and + swappable CPU KV cache. + + Normally, this should simply delegate to the underlying Worker. Some + ExecutorBase may require modification of the result, e.g. to ensure the + selected cache sizes are compatible with all workers. + + Returns a Tuple[num_gpu_blocks, num_cpu_blocks], where num_gpu_blocks + are blocks that are "active" on the device and can be appended to. + num_cpu_blocks refers to "swapped" blocks in CPU memory and cannot be + appended to. + """ + raise NotImplementedError + + @abstractmethod + def initialize_cache(self, num_gpu_blocks: int, + num_cpu_blocks: int) -> None: + """Initialize the KV cache with the given size in blocks. + """ + raise NotImplementedError + + @abstractmethod + def execute_model( + self, + execute_model_req: ExecuteModelRequest) -> List[SamplerOutput]: + """Executes at least one model step on the given sequences.""" + raise NotImplementedError + + @abstractmethod + def add_lora(self, lora_request: LoRARequest) -> bool: + raise NotImplementedError + + @abstractmethod + def remove_lora(self, lora_id: int) -> bool: + raise NotImplementedError + + @abstractmethod + def list_loras(self) -> Set[int]: + raise NotImplementedError + + @abstractmethod + def check_health(self) -> None: + """Checks if the executor is healthy. If not, it should raise an + exception.""" + raise NotImplementedError + + def shutdown(self) -> None: + """Shutdown the executor.""" + return + + def __del__(self): + self.shutdown() + + +class ExecutorAsyncBase(ExecutorBase): + + @abstractmethod + async def execute_model_async( + self, + execute_model_req: ExecuteModelRequest) -> List[SamplerOutput]: + """Executes one model step on the given sequences.""" + raise NotImplementedError + + async def check_health_async(self) -> None: + """Checks if the executor is healthy. If not, it should raise an + exception.""" + self.check_health() diff --git a/vllm/executor/gpu_executor.py b/vllm/executor/gpu_executor.py new file mode 100644 index 0000000..1af3bcf --- /dev/null +++ b/vllm/executor/gpu_executor.py @@ -0,0 +1,150 @@ +from typing import Any, Dict, List, Optional, Set, Tuple + +from vllm.executor.executor_base import ExecutorAsyncBase, ExecutorBase +from vllm.logger import init_logger +from vllm.lora.request import LoRARequest +from vllm.sequence import ExecuteModelRequest, SamplerOutput +from vllm.utils import (get_distributed_init_method, get_ip, get_open_port, + make_async) +from vllm.worker.worker_base import WorkerWrapperBase + +logger = init_logger(__name__) + + +class GPUExecutor(ExecutorBase): + + def _init_executor(self) -> None: + """Initialize the worker and load the model. + + If speculative decoding is enabled, we instead create the speculative + worker. + """ + if self.speculative_config is None: + self._init_non_spec_worker() + else: + self._init_spec_worker() + + def _get_worker_kwargs( + self, + local_rank: int = 0, + rank: int = 0, + distributed_init_method: Optional[str] = None) -> Dict[str, Any]: + """Return worker init args for a given rank.""" + if distributed_init_method is None: + distributed_init_method = get_distributed_init_method( + get_ip(), get_open_port()) + return dict( + model_config=self.model_config, + parallel_config=self.parallel_config, + scheduler_config=self.scheduler_config, + device_config=self.device_config, + cache_config=self.cache_config, + load_config=self.load_config, + local_rank=local_rank, + rank=rank, + distributed_init_method=distributed_init_method, + lora_config=self.lora_config, + vision_language_config=self.vision_language_config, + is_driver_worker=rank == 0, + ) + + def _create_worker(self, + local_rank: int = 0, + rank: int = 0, + distributed_init_method: Optional[str] = None): + wrapper = WorkerWrapperBase( + worker_module_name="vllm.worker.worker", + worker_class_name="Worker", + ) + wrapper.init_worker(**self._get_worker_kwargs(local_rank, rank, + distributed_init_method)) + return wrapper.worker + + def _init_non_spec_worker(self): + assert self.parallel_config.world_size == 1, ( + "GPUExecutor only supports single GPU.") + + self.driver_worker = self._create_worker() + self.driver_worker.init_device() + self.driver_worker.load_model() + + def _init_spec_worker(self): + """Initialize a SpecDecodeWorker, using a draft model for proposals. + """ + assert self.speculative_config is not None + + from vllm.spec_decode.spec_decode_worker import SpecDecodeWorker + + target_worker = self._create_worker() + + draft_worker_kwargs = self._get_worker_kwargs() + # Override draft-model specific worker args. + draft_worker_kwargs.update( + model_config=self.speculative_config.draft_model_config, + parallel_config=self.speculative_config.draft_parallel_config, + # TODO allow draft-model specific load config. + #load_config=self.load_config, + ) + + spec_decode_worker = SpecDecodeWorker.create_worker( + scorer_worker=target_worker, + draft_worker_kwargs=draft_worker_kwargs, + ) + + assert self.parallel_config.world_size == 1, ( + "GPUExecutor only supports single GPU.") + + self.driver_worker = spec_decode_worker + + # Load model handled in spec decode worker. + self.driver_worker.init_device() + + def determine_num_available_blocks(self) -> Tuple[int, int]: + """Determine the number of available KV blocks by invoking the + underlying worker. + """ + return self.driver_worker.determine_num_available_blocks() + + def initialize_cache(self, num_gpu_blocks: int, num_cpu_blocks) -> None: + """Initialize the KV cache by invoking the underlying worker. + """ + # NOTE: This is logged in the executor because there can be >1 worker + # with other executors. We could log in the engine level, but work + # remains to abstract away the device for non-GPU configurations. + logger.info("# GPU blocks: %d, # CPU blocks: %d", num_gpu_blocks, + num_cpu_blocks) + + self.driver_worker.initialize_cache(num_gpu_blocks, num_cpu_blocks) + + def execute_model( + self, + execute_model_req: ExecuteModelRequest) -> List[SamplerOutput]: + output = self.driver_worker.execute_model(execute_model_req) + return output + + def add_lora(self, lora_request: LoRARequest) -> bool: + assert lora_request.lora_int_id > 0, "lora_id must be greater than 0." + return self.driver_worker.add_lora(lora_request) + + def remove_lora(self, lora_id: int) -> bool: + assert lora_id > 0, "lora_id must be greater than 0." + return self.driver_worker.remove_lora(lora_id) + + def list_loras(self) -> Set[int]: + return self.driver_worker.list_loras() + + def check_health(self) -> None: + # GPUExecutor will always be healthy as long as + # it's running. + return + + +class GPUExecutorAsync(GPUExecutor, ExecutorAsyncBase): + + async def execute_model_async( + self, + execute_model_req: ExecuteModelRequest, + ) -> List[SamplerOutput]: + output = await make_async(self.driver_worker.execute_model + )(execute_model_req=execute_model_req, ) + return output diff --git a/vllm/executor/multiproc_worker_utils.py b/vllm/executor/multiproc_worker_utils.py new file mode 100644 index 0000000..6288753 --- /dev/null +++ b/vllm/executor/multiproc_worker_utils.py @@ -0,0 +1,263 @@ +import asyncio +import multiprocessing +import os +import sys +import threading +import traceback +import uuid +from dataclasses import dataclass +from multiprocessing import Queue +from multiprocessing.connection import wait +from multiprocessing.process import BaseProcess +from typing import (Any, Callable, Dict, Generic, List, Optional, TextIO, + TypeVar, Union) + +import vllm.envs as envs +from vllm.logger import init_logger + +logger = init_logger(__name__) + +T = TypeVar('T') + +_TERMINATE = "TERMINATE" # sentinel + +# ANSI color codes +CYAN = '\033[1;36m' +RESET = '\033[0;0m' + +JOIN_TIMEOUT_S = 2 + +mp_method = envs.VLLM_WORKER_MULTIPROC_METHOD +mp = multiprocessing.get_context(mp_method) + + +@dataclass +class Result(Generic[T]): + """Result of task dispatched to worker""" + + task_id: uuid.UUID + value: Optional[T] = None + exception: Optional[BaseException] = None + + +class ResultFuture(threading.Event, Generic[T]): + """Synchronous future for non-async case""" + + def __init__(self): + super().__init__() + self.result: Optional[Result[T]] = None + + def set_result(self, result: Result[T]): + self.result = result + self.set() + + def get(self) -> T: + self.wait() + assert self.result is not None + if self.result.exception is not None: + raise self.result.exception + return self.result.value # type: ignore[return-value] + + +def _set_future_result(future: Union[ResultFuture, asyncio.Future], + result: Result): + if isinstance(future, ResultFuture): + future.set_result(result) + return + loop = future.get_loop() + if result.exception is not None: + loop.call_soon_threadsafe(future.set_exception, result.exception) + else: + loop.call_soon_threadsafe(future.set_result, result.value) + + +class ResultHandler(threading.Thread): + """Handle results from all workers (in background thread)""" + + def __init__(self) -> None: + super().__init__(daemon=True) + self.result_queue = mp.Queue() + self.tasks: Dict[uuid.UUID, Union[ResultFuture, asyncio.Future]] = {} + + def run(self): + for result in iter(self.result_queue.get, _TERMINATE): + future = self.tasks.pop(result.task_id) + _set_future_result(future, result) + # Ensure that all waiters will receive an exception + for task_id, future in self.tasks.items(): + _set_future_result( + future, + Result(task_id=task_id, + exception=ChildProcessError("worker died"))) + + def close(self): + self.result_queue.put(_TERMINATE) + + +class WorkerMonitor(threading.Thread): + """Monitor worker status (in background thread)""" + + def __init__(self, workers: List['ProcessWorkerWrapper'], + result_handler: ResultHandler): + super().__init__(daemon=True) + self.workers = workers + self.result_handler = result_handler + self._close = False + + def run(self) -> None: + # Blocks until any worker exits + dead_sentinels = wait([w.process.sentinel for w in self.workers]) + if not self._close: + self._close = True + + # Kill / cleanup all workers + for worker in self.workers: + process = worker.process + if process.sentinel in dead_sentinels: + process.join(JOIN_TIMEOUT_S) + if process.exitcode is not None and process.exitcode != 0: + logger.error("Worker %s pid %s died, exit code: %s", + process.name, process.pid, process.exitcode) + # Cleanup any remaining workers + logger.info("Killing local vLLM worker processes") + for worker in self.workers: + worker.kill_worker() + # Must be done after worker task queues are all closed + self.result_handler.close() + + for worker in self.workers: + worker.process.join(JOIN_TIMEOUT_S) + + def close(self): + if self._close: + return + self._close = True + logger.info("Terminating local vLLM worker processes") + for worker in self.workers: + worker.terminate_worker() + # Must be done after worker task queues are all closed + self.result_handler.close() + + +class ProcessWorkerWrapper: + """Local process wrapper for vllm.worker.Worker, + for handling single-node multi-GPU tensor parallel.""" + + def __init__(self, result_handler: ResultHandler, + worker_factory: Callable[[], Any]) -> None: + self._task_queue = mp.Queue() + self.result_queue = result_handler.result_queue + self.tasks = result_handler.tasks + self.process: BaseProcess = mp.Process( # type: ignore[attr-defined] + target=_run_worker_process, + name="VllmWorkerProcess", + kwargs=dict( + worker_factory=worker_factory, + task_queue=self._task_queue, + result_queue=self.result_queue, + ), + daemon=True) + + self.process.start() + + def _enqueue_task(self, future: Union[ResultFuture, asyncio.Future], + method: str, args, kwargs): + task_id = uuid.uuid4() + self.tasks[task_id] = future + try: + self._task_queue.put((task_id, method, args, kwargs)) + except BaseException as e: + del self.tasks[task_id] + raise ChildProcessError("worker died") from e + + def execute_method(self, method: str, *args, **kwargs): + future: ResultFuture = ResultFuture() + self._enqueue_task(future, method, args, kwargs) + return future + + async def execute_method_async(self, method: str, *args, **kwargs): + future = asyncio.get_running_loop().create_future() + self._enqueue_task(future, method, args, kwargs) + return await future + + def terminate_worker(self): + try: + self._task_queue.put(_TERMINATE) + except ValueError: + self.process.kill() + self._task_queue.close() + + def kill_worker(self): + self._task_queue.close() + self.process.kill() + + +def _run_worker_process( + worker_factory: Callable[[], Any], + task_queue: Queue, + result_queue: Queue, +) -> None: + """Worker process event loop""" + + # Add process-specific prefix to stdout and stderr + process_name = mp.current_process().name + pid = os.getpid() + _add_prefix(sys.stdout, process_name, pid) + _add_prefix(sys.stderr, process_name, pid) + + # Initialize worker + worker = worker_factory() + del worker_factory + + # Accept tasks from the engine in task_queue + # and return task output in result_queue + logger.info("Worker ready; awaiting tasks") + try: + for items in iter(task_queue.get, _TERMINATE): + output = None + exception = None + task_id, method, args, kwargs = items + try: + executor = getattr(worker, method) + output = executor(*args, **kwargs) + except BaseException as e: + tb = traceback.format_exc() + logger.error( + "Exception in worker %s while processing method %s: %s, %s", + process_name, method, e, tb) + exception = e + result_queue.put( + Result(task_id=task_id, value=output, exception=exception)) + except KeyboardInterrupt: + pass + except Exception: + logger.exception("Worker failed") + + logger.info("Worker exiting") + + +def _add_prefix(file: TextIO, worker_name: str, pid: int) -> None: + """Prepend each output line with process-specific prefix""" + + prefix = f"{CYAN}({worker_name} pid={pid}){RESET} " + file_write = file.write + + def write_with_prefix(s: str): + if not s: + return + if file.start_new_line: # type: ignore[attr-defined] + file_write(prefix) + idx = 0 + while (next_idx := s.find('\n', idx)) != -1: + next_idx += 1 + file_write(s[idx:next_idx]) + if next_idx == len(s): + file.start_new_line = True # type: ignore[attr-defined] + return + file_write(prefix) + idx = next_idx + file_write(s[idx:]) + file.start_new_line = False # type: ignore[attr-defined] + + file.start_new_line = True # type: ignore[attr-defined] + file.write = write_with_prefix # type: ignore[method-assign] diff --git a/vllm/executor/neuron_executor.py b/vllm/executor/neuron_executor.py new file mode 100644 index 0000000..e7f0e88 --- /dev/null +++ b/vllm/executor/neuron_executor.py @@ -0,0 +1,91 @@ +from typing import List, Set, Tuple + +from vllm.executor.executor_base import ExecutorAsyncBase, ExecutorBase +from vllm.logger import init_logger +from vllm.lora.request import LoRARequest +from vllm.sequence import ExecuteModelRequest, SamplerOutput +from vllm.utils import make_async + +logger = init_logger(__name__) + + +class NeuronExecutor(ExecutorBase): + + def _init_executor(self) -> None: + assert (self.lora_config is + None), "LoRA is not supported for Neuron backend." + assert (not self.speculative_config + ), "Speculative decoding not yet supported for Neuron backend." + + # Instantiate the worker and load the model to the device. + self._init_worker() + + def _init_worker(self): + from vllm.worker.neuron_worker import NeuronWorker + + self.driver_worker = NeuronWorker( + self.model_config, + self.parallel_config, + self.scheduler_config, + self.device_config, + self.cache_config, + ) + self.driver_worker.init_device() + self.driver_worker.load_model() + + def determine_num_available_blocks(self) -> Tuple[int, int]: + """Determine the number of available KV blocks by invoking the + underlying worker. + """ + return self.driver_worker.determine_num_available_blocks() + + def initialize_cache(self, num_gpu_blocks: int, + num_cpu_blocks: int) -> None: + """Initialize the KV cache by invoking the underlying worker. + """ + self.driver_worker.initialize_cache(num_gpu_blocks, num_cpu_blocks) + + def execute_model( + self, + execute_model_req: ExecuteModelRequest) -> List[SamplerOutput]: + assert (execute_model_req.blocks_to_swap_in == {} + and execute_model_req.blocks_to_swap_out == {} + and execute_model_req.blocks_to_copy == {}), ( + "Cache operations are not supported for Neuron backend.") + assert execute_model_req.num_lookahead_slots == 0, ( + "lookahead not supported for Neuron backend.") + + output = self.driver_worker.execute_model( + execute_model_req.seq_group_metadata_list) + return output + + def add_lora(self, lora_request: LoRARequest) -> bool: + return self.driver_worker.add_lora(lora_request) + + def remove_lora(self, lora_id: int) -> bool: + return self.driver_worker.remove_lora(lora_id) + + def list_loras(self) -> Set[int]: + return self.driver_worker.list_loras() + + def check_health(self) -> None: + # NeuronExecutor will always be healthy as long as + # it's running. + return + + +class NeuronExecutorAsync(NeuronExecutor, ExecutorAsyncBase): + + async def execute_model_async( + self, + execute_model_req: ExecuteModelRequest, + ) -> List[SamplerOutput]: + output = await make_async( + self.driver_worker.execute_model + )(seq_group_metadata_list=execute_model_req.seq_group_metadata_list, ) + return output + + async def check_health_async(self) -> None: + # NeuronExecutor will always be healthy as long as + # it's running. + return diff --git a/vllm/executor/ray_gpu_executor.py b/vllm/executor/ray_gpu_executor.py new file mode 100644 index 0000000..afc1c88 --- /dev/null +++ b/vllm/executor/ray_gpu_executor.py @@ -0,0 +1,327 @@ +import asyncio +import os +import pickle +from collections import defaultdict +from itertools import islice, repeat +from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple + +import vllm.envs as envs +from vllm.executor.distributed_gpu_executor import ( # yapf: disable + DistributedGPUExecutor, DistributedGPUExecutorAsync) +from vllm.executor.ray_utils import RayWorkerWrapper, ray +from vllm.logger import init_logger +from vllm.sequence import ExecuteModelRequest, SamplerOutput +from vllm.utils import (get_distributed_init_method, get_ip, get_open_port, + get_vllm_instance_id, make_async) + +if ray is not None: + from ray.util.scheduling_strategies import PlacementGroupSchedulingStrategy + +if TYPE_CHECKING: + from ray.util.placement_group import PlacementGroup + +logger = init_logger(__name__) + +USE_RAY_COMPILED_DAG = envs.VLLM_USE_RAY_COMPILED_DAG + + +class RayGPUExecutor(DistributedGPUExecutor): + + def _init_executor(self) -> None: + assert (not self.speculative_config + ), "Speculative decoding not yet supported for RayGPU backend." + + assert self.parallel_config.worker_use_ray + placement_group = self.parallel_config.placement_group + + # Disable Ray usage stats collection. + ray_usage = os.environ.get("RAY_USAGE_STATS_ENABLED", "0") + if ray_usage != "1": + os.environ["RAY_USAGE_STATS_ENABLED"] = "0" + + # Create the parallel GPU workers. + self._init_workers_ray(placement_group) + + self.forward_dag = None + if USE_RAY_COMPILED_DAG: + self.forward_dag = self._compiled_ray_dag() + + def _configure_ray_workers_use_nsight(self, + ray_remote_kwargs) -> Dict[str, Any]: + # If nsight profiling is enabled, we need to set the profiling + # configuration for the ray workers as runtime env. + runtime_env = ray_remote_kwargs.setdefault("runtime_env", {}) + runtime_env.update({ + "nsight": { + "t": "cuda,cudnn,cublas", + "o": "'worker_process_%p'", + "cuda-graph-trace": "node", + } + }) + + return ray_remote_kwargs + + def _init_workers_ray(self, placement_group: "PlacementGroup", + **ray_remote_kwargs): + if self.parallel_config.tensor_parallel_size == 1: + # For single GPU case, we use a ray worker with constrained memory. + num_gpus = self.cache_config.gpu_memory_utilization + else: + # Otherwise, the ray workers are allocated with a full GPU. + num_gpus = 1 + + # The driver dummy worker does not actually use any resources. + # It holds the resource for the driver worker. + self.driver_dummy_worker: Optional[RayWorkerWrapper] = None + # The remaining workers are the actual ray actors. + self.workers: List[RayWorkerWrapper] = [] + + if self.parallel_config.ray_workers_use_nsight: + ray_remote_kwargs = self._configure_ray_workers_use_nsight( + ray_remote_kwargs) + + # Create the workers. + driver_ip = get_ip() + for bundle_id, bundle in enumerate(placement_group.bundle_specs): + if not bundle.get("GPU", 0): + continue + scheduling_strategy = PlacementGroupSchedulingStrategy( + placement_group=placement_group, + placement_group_capture_child_tasks=True, + placement_group_bundle_index=bundle_id, + ) + worker = ray.remote( + num_cpus=0, + num_gpus=num_gpus, + scheduling_strategy=scheduling_strategy, + **ray_remote_kwargs, + )(RayWorkerWrapper).remote( + worker_module_name="vllm.worker.worker", + worker_class_name="Worker", + trust_remote_code=self.model_config.trust_remote_code, + ) + + worker_ip = ray.get(worker.get_node_ip.remote()) + if worker_ip == driver_ip and self.driver_dummy_worker is None: + # If the worker is on the same node as the driver, we use it + # as the resource holder for the driver process. + self.driver_dummy_worker = worker + self.driver_worker = RayWorkerWrapper( + worker_module_name="vllm.worker.worker", + worker_class_name="Worker", + trust_remote_code=self.model_config.trust_remote_code, + ) + else: + # Else, added to the list of workers. + self.workers.append(worker) + + if self.driver_dummy_worker is None: + raise ValueError( + "Ray does not allocate any GPUs on the driver node. Consider " + "adjusting the Ray placement group or running the driver on a " + "GPU node.") + + # Get the set of GPU IDs used on each node. + worker_node_and_gpu_ids = self._run_workers("get_node_and_gpu_ids", + use_dummy_driver=True) + + node_workers = defaultdict(list) + node_gpus = defaultdict(list) + + for i, (node_id, gpu_ids) in enumerate(worker_node_and_gpu_ids): + node_workers[node_id].append(i) + node_gpus[node_id].extend(gpu_ids) + for node_id, gpu_ids in node_gpus.items(): + node_gpus[node_id] = sorted(gpu_ids) + + VLLM_INSTANCE_ID = get_vllm_instance_id() + + # Set environment variables for the driver and workers. + all_args_to_update_environment_variables = [({ + "CUDA_VISIBLE_DEVICES": + ",".join(map(str, node_gpus[node_id])), + "VLLM_INSTANCE_ID": + VLLM_INSTANCE_ID, + "VLLM_TRACE_FUNCTION": + str(envs.VLLM_TRACE_FUNCTION), + }, ) for (node_id, _) in worker_node_and_gpu_ids] + self._run_workers("update_environment_variables", + all_args=all_args_to_update_environment_variables) + + distributed_init_method = get_distributed_init_method( + driver_ip, get_open_port()) + + # Initialize the actual workers inside worker wrapper. + init_worker_all_kwargs = [ + self._get_worker_kwargs( + local_rank=node_workers[node_id].index(rank), + rank=rank, + distributed_init_method=distributed_init_method, + ) for rank, (node_id, _) in enumerate(worker_node_and_gpu_ids) + ] + self._run_workers("init_worker", all_kwargs=init_worker_all_kwargs) + + self._run_workers("init_device") + self._run_workers("load_model", + max_concurrent_workers=self.parallel_config. + max_parallel_loading_workers) + + def execute_model( + self, + execute_model_req: ExecuteModelRequest) -> List[SamplerOutput]: + all_outputs = self._run_workers( + "execute_model", + driver_kwargs={"execute_model_req": execute_model_req}, + use_ray_compiled_dag=USE_RAY_COMPILED_DAG) + + # Only the driver worker returns the sampling results. + return all_outputs[0] + + def _run_workers( + self, + method: str, + *args, + driver_args: Optional[Tuple[Any, ...]] = None, + driver_kwargs: Optional[Dict[str, Any]] = None, + all_args: Optional[List[Tuple[Any, ...]]] = None, + all_kwargs: Optional[List[Dict[str, Any]]] = None, + use_dummy_driver: bool = False, + max_concurrent_workers: Optional[int] = None, + use_ray_compiled_dag: bool = False, + **kwargs, + ) -> Any: + """Runs the given method on all workers. Can be used in the following + ways: + + - args/kwargs: All workers share the same args/kwargs + - args/kwargs and driver_args/driver_kwargs: Driver worker has + different args + - all_args/all_kwargs: args/kwargs for each worker are specified + individually + """ + + if max_concurrent_workers: + raise NotImplementedError( + "max_concurrent_workers is not supported yet.") + + if driver_args is None: + driver_args = args if all_args is None else all_args[0] + if driver_kwargs is None: + driver_kwargs = kwargs if all_kwargs is None else all_kwargs[0] + + count = len(self.workers) + all_worker_args = repeat(args, count) if all_args is None \ + else islice(all_args, 1, None) + all_worker_kwargs = repeat(kwargs, count) if all_kwargs is None \ + else islice(all_kwargs, 1, None) + + if use_ray_compiled_dag: + # Right now, compiled DAG can only accept a single + # input. TODO(sang): Fix it. + assert self.forward_dag is not None + output_channels = self.forward_dag.execute(1) + else: + # Start the ray workers first. + ray_worker_outputs = [ + worker.execute_method.remote(method, *worker_args, + **worker_kwargs) + for (worker, worker_args, worker_kwargs + ) in zip(self.workers, all_worker_args, all_worker_kwargs) + ] + + # Start the driver worker after all the ray workers. + if not use_dummy_driver: + driver_worker_output = self.driver_worker.execute_method( + method, *driver_args, **driver_kwargs) + else: + assert self.driver_dummy_worker is not None + driver_worker_output = ray.get( + self.driver_dummy_worker.execute_method.remote( + method, *driver_args, **driver_kwargs)) + # Get the results of the ray workers. + if self.workers: + if use_ray_compiled_dag: + try: + ray_worker_outputs = [ + pickle.loads(chan.begin_read()) + for chan in output_channels + ] + finally: + # Has to call end_read in order to reuse the DAG. + for chan in output_channels: + chan.end_read() + else: + ray_worker_outputs = ray.get(ray_worker_outputs) + + return [driver_worker_output] + ray_worker_outputs + + def _compiled_ray_dag(self): + import pkg_resources + required_version = "2.9" + current_version = pkg_resources.get_distribution("ray").version + if current_version < required_version: + raise ValueError(f"Ray version {required_version} or greater is " + f"required, but found {current_version}") + + from ray.dag import InputNode, MultiOutputNode + assert self.parallel_config.worker_use_ray + + # Right now, compiled DAG requires at least 1 arg. We send + # a dummy value for now. It will be fixed soon. + with InputNode() as input_data: + forward_dag = MultiOutputNode([ + worker.execute_model_compiled_dag_remote. + bind( # type: ignore[attr-defined] + input_data) for worker in self.workers + ]) + return forward_dag.experimental_compile() + + def check_health(self) -> None: + """Raises an error if engine is unhealthy.""" + self._check_if_any_actor_is_dead() + + def _check_if_any_actor_is_dead(self): + if not self.workers: + return + + dead_actors = [] + for actor in self.workers: + actor_state = ray.state.actors(actor._ray_actor_id.hex()) # pylint: disable=protected-access + if actor_state["State"] == "DEAD": + dead_actors.append(actor) + if dead_actors: + raise RuntimeError("At least one Worker is dead. " + f"Dead Workers: {dead_actors}. ") + + +class RayGPUExecutorAsync(RayGPUExecutor, DistributedGPUExecutorAsync): + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + self.driver_executor = make_async(self.driver_worker.execute_method) + + async def _run_workers_async( + self, + method: str, + *args, + driver_args: Optional[Tuple[Any, ...]] = None, + driver_kwargs: Optional[Dict[str, Any]] = None, + **kwargs, + ) -> Any: + """Runs the given method on all workers.""" + coros = [] + + if driver_args is None: + driver_args = args + if driver_kwargs is None: + driver_kwargs = kwargs + + coros.append( + self.driver_executor(method, *driver_args, **driver_kwargs)) + + # Run the ray workers asynchronously. + for worker in self.workers: + coros.append(worker.execute_method.remote(method, *args, **kwargs)) + + all_outputs = await asyncio.gather(*coros) + return all_outputs diff --git a/vllm/executor/ray_utils.py b/vllm/executor/ray_utils.py new file mode 100644 index 0000000..4a22216 --- /dev/null +++ b/vllm/executor/ray_utils.py @@ -0,0 +1,119 @@ +import pickle +from typing import List, Optional, Tuple + +from vllm.config import ParallelConfig +from vllm.logger import init_logger +from vllm.utils import get_ip, is_hip +from vllm.worker.worker_base import WorkerWrapperBase + +logger = init_logger(__name__) + +try: + import ray + + class RayWorkerWrapper(WorkerWrapperBase): + """Ray wrapper for vllm.worker.Worker, allowing Worker to be + lazliy initialized after Ray sets CUDA_VISIBLE_DEVICES.""" + + def __init__(self, *args, **kwargs) -> None: + super().__init__(*args, **kwargs) + # Since the compiled DAG runs a main execution + # in a different thread that calls cuda.set_device. + # The flag indicates is set_device is called on + # that thread. + self.compiled_dag_cuda_device_set = False + + def get_node_ip(self) -> str: + return get_ip() + + def get_node_and_gpu_ids(self) -> Tuple[str, List[int]]: + node_id = ray.get_runtime_context().get_node_id() + gpu_ids = ray.get_gpu_ids() + return node_id, gpu_ids + + def execute_model_compiled_dag_remote(self, ignored): + """Used only when compiled DAG is enabled.""" + import torch + if not self.compiled_dag_cuda_device_set: + torch.musa.set_device(self.worker.device) + self.compiled_dag_cuda_device_set = True + + output = self.worker.execute_model() + output = pickle.dumps(output) + return output + +except ImportError as e: + logger.warning( + "Failed to import Ray with %r. For distributed inference, " + "please install Ray with `pip install ray`.", e) + ray = None # type: ignore + RayWorkerWrapper = None # type: ignore + + +def initialize_ray_cluster( + parallel_config: ParallelConfig, + ray_address: Optional[str] = None, +): + """Initialize the distributed cluster with Ray. + + it will connect to the Ray cluster and create a placement group + for the workers, which includes the specification of the resources + for each distributed worker. + + Args: + parallel_config: The configurations for parallel execution. + ray_address: The address of the Ray cluster. If None, uses + the default Ray cluster address. + """ + if ray is None: + raise ImportError( + "Ray is not installed. Please install Ray to use distributed " + "serving.") + + # Connect to a ray cluster. + if is_hip(): + ray.init(address=ray_address, + ignore_reinit_error=True, + num_gpus=parallel_config.world_size) + else: + ray.init(address=ray_address, ignore_reinit_error=True) + + if parallel_config.placement_group: + # Placement group is already set. + return + + # Create placement group for worker processes + current_placement_group = ray.util.get_current_placement_group() + if current_placement_group: + # We are in a placement group + bundles = current_placement_group.bundle_specs + # Verify that we can use the placement group. + gpu_bundles = 0 + for bundle in bundles: + bundle_gpus = bundle.get("GPU", 0) + if bundle_gpus > 1: + raise ValueError( + "Placement group bundle cannot have more than 1 GPU.") + if bundle_gpus: + gpu_bundles += 1 + if parallel_config.world_size > gpu_bundles: + raise ValueError( + "The number of required GPUs exceeds the total number of " + "available GPUs in the placement group.") + else: + num_gpus_in_cluster = ray.cluster_resources().get("GPU", 0) + if parallel_config.world_size > num_gpus_in_cluster: + raise ValueError( + "The number of required GPUs exceeds the total number of " + "available GPUs in the cluster.") + # Create a new placement group + placement_group_specs = ([{"GPU": 1}] * parallel_config.world_size) + current_placement_group = ray.util.placement_group( + placement_group_specs) + # Wait until PG is ready - this will block until all + # requested resources are available, and will timeout + # if they cannot be provisioned. + ray.get(current_placement_group.ready(), timeout=1800) + + # Set the placement group in the parallel config + parallel_config.placement_group = current_placement_group diff --git a/vllm/logger.py b/vllm/logger.py new file mode 100644 index 0000000..153cdfb --- /dev/null +++ b/vllm/logger.py @@ -0,0 +1,153 @@ +"""Logging configuration for vLLM.""" +import datetime +import json +import logging +import os +import sys +from functools import partial +from logging import Logger +from logging.config import dictConfig +from os import path +from typing import Dict, Optional + +import vllm.envs as envs + +VLLM_CONFIGURE_LOGGING = envs.VLLM_CONFIGURE_LOGGING +VLLM_LOGGING_CONFIG_PATH = envs.VLLM_LOGGING_CONFIG_PATH + +_FORMAT = "%(levelname)s %(asctime)s %(filename)s:%(lineno)d] %(message)s" +_DATE_FORMAT = "%m-%d %H:%M:%S" + +DEFAULT_LOGGING_CONFIG = { + "formatters": { + "vllm": { + "class": "vllm.logging.NewLineFormatter", + "datefmt": _DATE_FORMAT, + "format": _FORMAT, + }, + }, + "handlers": { + "vllm": { + "class": "logging.StreamHandler", + "formatter": "vllm", + "level": "INFO", + "stream": "ext://sys.stdout", + }, + }, + "loggers": { + "vllm": { + "handlers": ["vllm"], + "level": "DEBUG", + "propagate": False, + }, + }, + "version": 1, +} + + +def _configure_vllm_root_logger() -> None: + logging_config: Optional[Dict] = None + + if not VLLM_CONFIGURE_LOGGING and VLLM_LOGGING_CONFIG_PATH: + raise RuntimeError( + "VLLM_CONFIGURE_LOGGING evaluated to false, but " + "VLLM_LOGGING_CONFIG_PATH was given. VLLM_LOGGING_CONFIG_PATH " + "implies VLLM_CONFIGURE_LOGGING. Please enable " + "VLLM_CONFIGURE_LOGGING or unset VLLM_LOGGING_CONFIG_PATH.") + + if VLLM_CONFIGURE_LOGGING: + logging_config = DEFAULT_LOGGING_CONFIG + + if VLLM_LOGGING_CONFIG_PATH: + if not path.exists(VLLM_LOGGING_CONFIG_PATH): + raise RuntimeError( + "Could not load logging config. File does not exist: %s", + VLLM_LOGGING_CONFIG_PATH) + with open(VLLM_LOGGING_CONFIG_PATH, encoding="utf-8", + mode="r") as file: + custom_config = json.loads(file.read()) + + if not isinstance(custom_config, dict): + raise ValueError("Invalid logging config. Expected Dict, got %s.", + type(custom_config).__name__) + logging_config = custom_config + + if logging_config: + dictConfig(logging_config) + + +def init_logger(name: str) -> Logger: + """The main purpose of this function is to ensure that loggers are + retrieved in such a way that we can be sure the root vllm logger has + already been configured.""" + + return logging.getLogger(name) + + +# The root logger is initialized when the module is imported. +# This is thread-safe as the module is only imported once, +# guaranteed by the Python GIL. +_configure_vllm_root_logger() + +logger = init_logger(__name__) + + +def _trace_calls(log_path, root_dir, frame, event, arg=None): + if event in ['call', 'return']: + # Extract the filename, line number, function name, and the code object + filename = frame.f_code.co_filename + lineno = frame.f_lineno + func_name = frame.f_code.co_name + if not filename.startswith(root_dir): + # only log the functions in the vllm root_dir + return + # Log every function call or return + try: + last_frame = frame.f_back + if last_frame is not None: + last_filename = last_frame.f_code.co_filename + last_lineno = last_frame.f_lineno + last_func_name = last_frame.f_code.co_name + else: + # initial frame + last_filename = "" + last_lineno = 0 + last_func_name = "" + with open(log_path, 'a') as f: + if event == 'call': + f.write(f"{datetime.datetime.now()} Call to" + f" {func_name} in {filename}:{lineno}" + f" from {last_func_name} in {last_filename}:" + f"{last_lineno}\n") + else: + f.write(f"{datetime.datetime.now()} Return from" + f" {func_name} in {filename}:{lineno}" + f" to {last_func_name} in {last_filename}:" + f"{last_lineno}\n") + except NameError: + # modules are deleted during shutdown + pass + return partial(_trace_calls, log_path, root_dir) + + +def enable_trace_function_call(log_file_path: str, + root_dir: Optional[str] = None): + """ + Enable tracing of every function call in code under `root_dir`. + This is useful for debugging hangs or crashes. + `log_file_path` is the path to the log file. + `root_dir` is the root directory of the code to trace. If None, it is the + vllm root directory. + + Note that this call is thread-level, any threads calling this function + will have the trace enabled. Other threads will not be affected. + """ + logger.warning( + "VLLM_TRACE_FUNCTION is enabled. It will record every" + " function executed by Python. This will slow down the code. It " + "is suggested to be used for debugging hang or crashes only.") + logger.info("Trace frame log is saved to %s", log_file_path) + if root_dir is None: + # by default, this is the vllm root directory + root_dir = os.path.dirname(os.path.dirname(__file__)) + sys.settrace(partial(_trace_calls, log_file_path, root_dir)) diff --git a/vllm/logging/__init__.py b/vllm/logging/__init__.py new file mode 100644 index 0000000..b9aec38 --- /dev/null +++ b/vllm/logging/__init__.py @@ -0,0 +1,5 @@ +from vllm.logging.formatter import NewLineFormatter + +__all__ = [ + "NewLineFormatter", +] diff --git a/vllm/logging/formatter.py b/vllm/logging/formatter.py new file mode 100644 index 0000000..b24b4e1 --- /dev/null +++ b/vllm/logging/formatter.py @@ -0,0 +1,15 @@ +import logging + + +class NewLineFormatter(logging.Formatter): + """Adds logging prefix to newlines to align multi-line messages.""" + + def __init__(self, fmt, datefmt=None, style="%"): + logging.Formatter.__init__(self, fmt, datefmt, style) + + def format(self, record): + msg = logging.Formatter.format(self, record) + if record.message != "": + parts = msg.split(record.message) + msg = msg.replace("\n", "\r\n" + parts[0]) + return msg diff --git a/vllm/lora/__init__.py b/vllm/lora/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/vllm/lora/fully_sharded_layers.py b/vllm/lora/fully_sharded_layers.py new file mode 100644 index 0000000..1720566 --- /dev/null +++ b/vllm/lora/fully_sharded_layers.py @@ -0,0 +1,262 @@ +# pylint: disable=unused-argument +from typing import TYPE_CHECKING, List, Optional + +import torch +import torch.nn as nn +from transformers import PretrainedConfig + +from vllm.config import LoRAConfig +from vllm.distributed.communication_op import ( + tensor_model_parallel_all_gather, tensor_model_parallel_all_reduce) +from vllm.distributed.parallel_state import get_tensor_model_parallel_rank +from vllm.lora.layers import (ColumnParallelLinearWithLoRA, + MergedColumnParallelLinearWithLoRA, + MergedQKVParallelLinearWithLora, + RowParallelLinearWithLoRA) +from vllm.lora.punica import bgmv, dispatch_bgmv_low_level + +if TYPE_CHECKING: + pass + + +def _fully_sharded_can_replace(can_replace): + """ + decorator which adds the condition of fully sharded loras + intended to wrap can_replace_layer() + """ + + def dec(*args, **kwargs): + return (can_replace(*args, **kwargs) + and kwargs['lora_config'].fully_sharded_loras) + + return dec + + +# these layers are based on the tensor parallelism strategy given in +# Y. Sheng et al., S-LoRA: Serving Thousands of Concurrent LoRA Adapters. 2023, +# https://arxiv.org/abs/2311.03285. + + +class ColumnParallelLinearWithShardedLoRA(ColumnParallelLinearWithLoRA): + """ + Differs from ColumnParallelLinearWithLoRA by slicing LoRA A also. + + Based on S-LoRA, slicing happens along the rank dim. + """ + + def slice_lora_a(self, lora_a: torch.Tensor) -> torch.Tensor: + tp_rank = get_tensor_model_parallel_rank() + shard_size = self.lora_a_stacked.shape[2] + start_idx = tp_rank * shard_size + lora_a = lora_a[:, start_idx:start_idx + shard_size] + return lora_a + + def apply_weights(self, x: torch.Tensor, + bias: Optional[torch.Tensor]) -> torch.Tensor: + output = self.base_layer.linear_method.apply_weights( + self.base_layer, x, bias) + + x = x.view(-1, x.shape[-1]) + output, out_orig_shape = output.view(-1, + output.shape[-1]), output.shape + buffer = torch.zeros((x.shape[0], self.lora_a_stacked.shape[2]), + dtype=torch.float32, + device=x.device) + + bgmv(buffer, x, self.lora_a_stacked, + self.indices[:self.indices_len[0]], 0, 1.0) + buffer = tensor_model_parallel_all_gather(buffer) + bgmv(output, buffer, self.lora_b_stacked, + self.indices[:self.indices_len[0]], 0, 1.0) + # now have column partitioned output + + output = output.view(*out_orig_shape) + return output + + @classmethod + @_fully_sharded_can_replace + def can_replace_layer(cls, source_layer: nn.Module, + lora_config: LoRAConfig, packed_modules_list: List, + model_config: Optional[PretrainedConfig]) -> bool: + # specifying kwargs so they can be easily accessed in decorator + return super().can_replace_layer( + source_layer=source_layer, + lora_config=lora_config, + packed_modules_list=packed_modules_list, + model_config=model_config, + decorate=False, + ) + + +def _mcp_apply_weights(x, bias, layer): + """ + MergedColumnParallelLinearWithShardedLoRA and + QKVParallelLinearWithShardedLora share the same + LoRa weight application method. + + The main difference is the step by shard_size for lora_b which can + vary for QKVParallelLinearWithShardedLora but is constant for + MergedColumnParallelLinearWithShardedLoRA. + """ + # expecting 2 for column parallel and 3 for qkv + n = len(layer.lora_a_stacked) + output = layer.base_layer.linear_method.apply_weights( + layer.base_layer, x, bias) + + x = x.view(-1, x.shape[-1]) + output, out_orig_shape = output.view(-1, output.shape[-1]), output.shape + buffers = torch.zeros((n, x.shape[0], layer.lora_a_stacked[0].shape[2]), + dtype=torch.float32, + device=x.device) + for idx in range(n): + bgmv(buffers[idx], x, layer.lora_a_stacked[idx], + layer.indices[:layer.indices_len[0]], 0, 1.0) + + buffers = tensor_model_parallel_all_gather(buffers) + left_offset = 0 + for idx in range(n): + shard_size = layer.lora_b_stacked[idx].shape[2] + dispatch_bgmv_low_level(output, buffers[idx], + layer.lora_b_stacked[idx], + layer.indices[:layer.indices_len[0]], 0, 1.0, + left_offset, shard_size) + left_offset += shard_size + + output = output.view(*out_orig_shape) + # now have column partitioned and packed output + return output + + +class MergedColumnParallelLinearWithShardedLoRA( + MergedColumnParallelLinearWithLoRA): + """ + Differs from MergedColumnParallelLinearWithLoRA by slicing the + LoRA A's also. + + Based on S-LoRA, slicing happens along the rank dim. + """ + + def slice_lora_a(self, lora_a: List[torch.Tensor]) -> List[torch.Tensor]: + output_shard_size = self.lora_a_stacked[0].shape[2] + output_start_idx = self.tp_rank * output_shard_size + lora_a = [ + lora_a[i][:, output_start_idx:output_start_idx + output_shard_size] + for i in range(2) + ] + return lora_a + + def apply_weights(self, x: torch.Tensor, + bias: Optional[torch.Tensor]) -> torch.Tensor: + return _mcp_apply_weights(x, bias, self) + + @classmethod + @_fully_sharded_can_replace + def can_replace_layer(cls, source_layer: nn.Module, + lora_config: LoRAConfig, packed_modules_list: List, + model_config: Optional[PretrainedConfig]) -> bool: + # specifying kwargs so they can be easily accessed in decorator + return super().can_replace_layer( + source_layer=source_layer, + lora_config=lora_config, + packed_modules_list=packed_modules_list, + model_config=model_config, + decorate=False, + ) + + +class MergedQKVParallelLinearWithShardedLora(MergedQKVParallelLinearWithLora): + """ + Differs from QKVParallelLinearWithLora by slicing the + LoRA A's also. + + Based on S-LoRA, slicing happens along the rank dim. + """ + + def slice_lora_a(self, lora_a: List[torch.Tensor]) -> List[torch.Tensor]: + shard_size = [self.lora_a_stacked[i].shape[2] for i in range(3)] + start_idx = [self.tp_rank * shard_size[i] for i in range(3)] + lora_a = [ + lora_a[i][:, start_idx[i]:start_idx[i] + + shard_size[i]] if lora_a[i] is not None else None + for i in range(3) + ] + return lora_a + + def apply_weights(self, x: torch.Tensor, + bias: Optional[torch.Tensor]) -> torch.Tensor: + return _mcp_apply_weights(x, bias, self) + + @classmethod + @_fully_sharded_can_replace + def can_replace_layer(cls, source_layer: nn.Module, + lora_config: LoRAConfig, packed_modules_list: List, + model_config: Optional[PretrainedConfig]) -> bool: + # specifying kwargs so they can be easily accessed in decorator + return super().can_replace_layer( + source_layer=source_layer, + lora_config=lora_config, + packed_modules_list=packed_modules_list, + model_config=model_config, + decorate=False, + ) + + +class RowParallelLinearWithShardedLoRA(RowParallelLinearWithLoRA): + """ + Differs from RowParallelLinearWithLoRA by slicing the + LoRA B's also. + + Based on S-LoRA, slicing happens along the output dim. + This yields a combined partial sum from the row parallel base + layer and column partitioned output from the LoRA. + """ + + def slice_lora_b(self, lora_b: torch.Tensor) -> torch.Tensor: + shard_size = self.lora_b_stacked.shape[2] + start_idx = self.tp_rank * shard_size + end_idx = (self.tp_rank + 1) * shard_size + lora_b = lora_b[:, start_idx:end_idx] + return lora_b + + def apply_weights(self, x: torch.Tensor) -> torch.Tensor: + output = self.base_layer.linear_method.apply_weights( + self.base_layer, x) + + x = x.view(-1, x.shape[-1]) + output, out_orig_shape = output.view(-1, + output.shape[-1]), output.shape + buffer = torch.zeros((x.shape[0], self.lora_a_stacked.shape[2]), + dtype=torch.float32, + device=x.device) + bgmv(buffer, x, self.lora_a_stacked, + self.indices[:self.indices_len[0]], 0, 1.0) + buffer = tensor_model_parallel_all_reduce(buffer) + + # following S-LoRA, allows the fusing of all_gather and all_reduce + # by adding the column partitioned lora output to a slice of output + # tensor, which is a partial sum due to row parallel. All that + # remains is a standard all_reduce. User should be aware though that + # the output is not the same as a normal row_parallel, it should be + # reduced before being used + shard_size = self.lora_b_stacked.shape[2] + start_idx = self.tp_rank * shard_size + dispatch_bgmv_low_level(output, buffer, self.lora_b_stacked, + self.indices[:self.indices_len[0]], 0, 1.0, + start_idx, shard_size) + + output = output.view(*out_orig_shape) + return output + + @classmethod + @_fully_sharded_can_replace + def can_replace_layer(cls, source_layer: nn.Module, + lora_config: LoRAConfig, packed_modules_list: List, + model_config: Optional[PretrainedConfig]) -> bool: + # specifying kwargs so they can be easily accessed in decorator + return super().can_replace_layer( + source_layer=source_layer, + lora_config=lora_config, + packed_modules_list=packed_modules_list, + model_config=model_config, + decorate=False, + ) diff --git a/vllm/lora/layers.py b/vllm/lora/layers.py new file mode 100644 index 0000000..b360966 --- /dev/null +++ b/vllm/lora/layers.py @@ -0,0 +1,1181 @@ +# pylint: disable=unused-argument +import math +from dataclasses import dataclass +from typing import TYPE_CHECKING, List, Optional, Tuple + +import torch +import torch.nn as nn +import torch.nn.functional as F +from transformers import PretrainedConfig + +from vllm.config import LoRAConfig +from vllm.distributed import (get_tensor_model_parallel_rank, + get_tensor_model_parallel_world_size, + split_tensor_along_last_dim, + tensor_model_parallel_all_gather, + tensor_model_parallel_all_reduce, + tensor_model_parallel_gather) +from vllm.distributed.utils import divide +from vllm.lora.punica import add_lora, add_lora_slice, bgmv +from vllm.model_executor.layers.linear import (ColumnParallelLinear, + MergedColumnParallelLinear, + QKVParallelLinear, + RowParallelLinear) +from vllm.model_executor.layers.logits_processor import LogitsProcessor +from vllm.model_executor.layers.vocab_parallel_embedding import ( + VocabParallelEmbedding) + +if TYPE_CHECKING: + pass + + +def _get_lora_device(base_layer: nn.Module) -> torch.device: + # code borrowed from https://github.com/fmmoret/vllm/blob/fm-support-lora-on-quantized-models/vllm/lora/layers.py#L34 + """Returns the device for where to place the LoRA tensors.""" + # unquantizedLinear + if hasattr(base_layer, "weight"): + return base_layer.weight.device + # GPTQ/AWQ/SqueezeLLM + elif hasattr(base_layer, "qweight"): + return base_layer.qweight.device + # marlin + elif hasattr(base_layer, "B"): + return base_layer.B.device + else: + raise ValueError(f"Unsupported base layer: {base_layer}") + + +def _not_fully_sharded_can_replace(can_replace): + """ + decorator which adds the condition of not using fully sharded loras + intended to wrap can_replace_layer() + """ + + def dec(*args, **kwargs): + decorate = kwargs.pop('decorate') if 'decorate' in kwargs else True + condition = (not kwargs['lora_config'].fully_sharded_loras + if decorate else True) + return can_replace(*args, **kwargs) and condition + + return dec + + +def _apply_lora( + x: torch.Tensor, + lora_a_stacked: torch.Tensor, + lora_b_stacked: torch.Tensor, + indices: torch.Tensor, + output: torch.Tensor, +): + """Applies lora to each input. + + This method applies all loras to each input. It uses the + indices vector to determine which lora yields the + correct output. An index of -1 means no lora should be + applied. This method adds the final lora results to the + output. + + Input shapes: + x: (batch_size, hidden_dim) + lora_a_stacked: (num_loras, lora_rank, hidden_dim) + lora_b_stacked: (num_loras, output_dim, lora_rank) + indices: (batch_size) + output: (batch_size, output_dim) + """ + org_output = output + x = x.view(-1, x.shape[-1]) + output = output.view(-1, output.shape[-1]) + indices = indices.view(-1) + add_lora(output, x, lora_a_stacked, lora_b_stacked, indices, 0, 1.0) + return output.view_as(org_output) + + +def _apply_lora_packed_nslice( + x: torch.Tensor, + lora_a_stacked: Tuple[torch.Tensor, torch.Tensor, torch.Tensor], + lora_b_stacked: Tuple[torch.Tensor, torch.Tensor, torch.Tensor], + indices: torch.Tensor, + output: torch.Tensor, + output_slices: Tuple[int, ...], +): + """Applies lora to each input. + + This method applies all loras to each input. It uses the + indices vector to determine which lora yields the + correct output. An index of -1 means no lora should be + applied. This method adds the final lora results to the + output. + + This method is used for layers that are composed of multiple sublayers + (slices) packed together. + + Input shapes: + x: (batch_size, hidden_dim) + lora_a_stacked: 3 element tuple of (num_loras, lora_rank, hidden_dim) + lora_b_stacked: 3 element tuple of (num_loras, output_dim, lora_rank) + indices: (batch_size) + output: (batch_size, q_slice_size + 2*kv_slice_size) + output_slices: n-1 element tuple of (slice_size...), + where n is number of slices + """ + org_output = output + x = x.view(-1, x.shape[-1]) + output = output.view(-1, output.shape[-1]) + indices = indices.view(-1) + offset_left = 0 + for slice_idx in range(len(output_slices)): + add_lora_slice(output, x, lora_a_stacked[slice_idx], + lora_b_stacked[slice_idx], indices, 0, 1.0, offset_left, + output_slices[slice_idx]) + offset_left += output_slices[slice_idx] + return output.view_as(org_output) + + +@dataclass +class LoRAMapping: + # Per every token in input_ids: + index_mapping: Tuple[int, ...] + # Per sampled token: + prompt_mapping: Tuple[int, ...] + + def __post_init__(self): + self.index_mapping = tuple(self.index_mapping) + self.prompt_mapping = tuple(self.prompt_mapping) + + +class BaseLayerWithLoRA(nn.Module): + + def slice_lora_a(self, lora_a: torch.Tensor) -> torch.Tensor: + """Slice lora a if splitting for tensor parallelism.""" + ... + + def slice_lora_b(self, lora_b: torch.Tensor) -> torch.Tensor: + """Slice lora b if splitting with tensor parallelism.""" + ... + + def create_lora_weights( + self, + max_loras: int, + lora_config: LoRAConfig, + model_config: Optional[PretrainedConfig] = None) -> None: + """Initializes lora matrices.""" + ... + + def reset_lora(self, index: int): + """Resets the lora weights at index back to 0.""" + ... + + def set_lora( + self, + index: int, + lora_a: torch.Tensor, + lora_b: torch.Tensor, + embeddings_tensor: Optional[torch.Tensor], + ): + """Overwrites lora tensors at index.""" + ... + + def set_mapping( + self, + base_indices: torch.Tensor, + sampler_indices: torch.Tensor, + sampler_indices_padded: torch.Tensor, + embeddings_indices: torch.Tensor, + indices_len: List[int], + ): + """Sets the mapping indices.""" + ... + + @classmethod + def can_replace_layer(cls, source_layer: nn.Module, + lora_config: LoRAConfig, packed_modules_list: List, + model_config: Optional[PretrainedConfig]) -> bool: + """Returns True if the layer can be replaced by this LoRA layer.""" + raise NotImplementedError + + +class VocabParallelEmbeddingWithLoRA(BaseLayerWithLoRA): + + def __init__(self, base_layer: VocabParallelEmbedding) -> None: + super().__init__() + self.base_layer = base_layer + self.embeddings_slice: Optional[Tuple[int, int]] + self.embeddings_weights: Optional[torch.Tensor] + + def create_lora_weights( + self, + max_loras: int, + lora_config: LoRAConfig, + model_config: Optional[PretrainedConfig] = None) -> None: + + lora_vocab_start_idx = self.base_layer.org_vocab_size + weights_idx = None + if self.base_layer.vocab_end_index > lora_vocab_start_idx: + # We can start adding lora weights + weights_idx = max( + lora_vocab_start_idx - self.base_layer.vocab_start_index, 0) + self.embeddings_slice = (self.base_layer.vocab_start_index - + self.base_layer.org_vocab_size + + weights_idx, + self.base_layer.vocab_end_index - + self.base_layer.org_vocab_size) + self.embeddings_weights = self.base_layer.weight.data[weights_idx:] + self.embeddings_weights.fill_(0) + else: + self.embeddings_slice = None + self.embeddings_weights = None + + self.embeddings_tensors = torch.zeros( + ( + max_loras, + lora_config.lora_extra_vocab_size, + self.base_layer.embedding_dim, + ), + dtype=self.base_layer.weight.dtype, + device=self.base_layer.weight.device, + ) + self.lora_a_stacked = torch.zeros( + ( + max_loras, + self.base_layer.org_vocab_size + + lora_config.lora_extra_vocab_size, + lora_config.max_lora_rank, + ), + dtype=lora_config.lora_dtype, + device=self.base_layer.weight.device, + ) + self.lora_b_stacked = torch.zeros( + ( + max_loras, + 1, + self.base_layer.embedding_dim, + lora_config.max_lora_rank, + ), + dtype=lora_config.lora_dtype, + device=self.base_layer.weight.device, + ) + self.lora_a_stacked_2d = self.lora_a_stacked.view( + self.lora_a_stacked.shape[0] * self.lora_a_stacked.shape[1], + self.lora_a_stacked.shape[2], + ) + # Lazily initialized. + self.indices: torch.Tensor + self.indices_len: List[int] + self.embeddings_indices: torch.Tensor + + def reset_lora(self, index: int): + self.lora_a_stacked[index] = 0 + self.lora_b_stacked[index] = 0 + self.embeddings_tensors[index] = 0 + + def set_lora( + self, + index: int, + lora_a: torch.Tensor, + lora_b: torch.Tensor, + embeddings_tensor: Optional[torch.Tensor], + ): + self.reset_lora(index) + self.lora_a_stacked[index, :lora_a.shape[0], :lora_a.shape[1]].copy_( + lora_a, non_blocking=True) + self.lora_b_stacked[index, + 0, :lora_b.shape[1], :lora_b.shape[0]].copy_( + lora_b.T, non_blocking=True) + if embeddings_tensor is not None: + self.embeddings_tensors[ + index, :embeddings_tensor.shape[0], :embeddings_tensor. + shape[1]].copy_(embeddings_tensor, non_blocking=True) + if self.embeddings_slice is not None: + # TODO(yard1): Optimize this copy, we don't need to copy + # everything, just the modified part + embeddings = self.embeddings_tensors.view( + self.embeddings_tensors.shape[0] * + self.embeddings_tensors.shape[1], + self.embeddings_tensors.shape[2] + )[self.embeddings_slice[0]:self.embeddings_slice[1]] + assert self.embeddings_weights is not None + self.embeddings_weights[:embeddings.shape[0]].copy_(embeddings) + + def set_mapping( + self, + base_indices: torch.Tensor, + sampler_indices: torch.Tensor, + sampler_indices_padded: torch.Tensor, + embeddings_indices: torch.Tensor, + indices_len: List[int], + ): + self.indices = base_indices + self.embeddings_indices = embeddings_indices + self.indices_len = indices_len + + def forward(self, x: torch.Tensor) -> torch.Tensor: + added_tokens_mask = x > self.base_layer.org_vocab_size - 1 + embedding_len = self.indices_len[3] + indices = self.embeddings_indices[1][:embedding_len].view_as(x) + full_lora_a_embeddings = F.embedding( + x + indices, + self.lora_a_stacked_2d, + ) + indices = self.embeddings_indices[0][:embedding_len].view_as(x) + full_output = self.base_layer.forward( + x.add_(indices * added_tokens_mask)) + + full_output_org = full_output + if full_output.ndim == 3: + full_output = full_output.view( + full_output.shape[0] * full_output.shape[1], -1) + if full_lora_a_embeddings.ndim == 3: + full_lora_a_embeddings = full_lora_a_embeddings.view( + full_lora_a_embeddings.shape[0] * + full_lora_a_embeddings.shape[1], -1) + bgmv(full_output, full_lora_a_embeddings, self.lora_b_stacked, + self.indices[:self.indices_len[0]], 0, 1.0) + return full_output.view_as(full_output_org) + + @classmethod + def can_replace_layer(cls, source_layer: nn.Module, + lora_config: LoRAConfig, packed_modules_list: List, + model_config: Optional[PretrainedConfig]) -> bool: + return type(source_layer) is VocabParallelEmbedding + + +class ColumnParallelLinearWithLoRA(BaseLayerWithLoRA): + """ + LoRA on top of ColumnParallelLinear layer. + + LoRA B is sliced for tensor parallelism. + """ + + def __init__(self, base_layer: ColumnParallelLinear) -> None: + super().__init__() + self.base_layer = base_layer + self.tp_size = get_tensor_model_parallel_world_size() + self.input_size = self.base_layer.input_size + self.output_size = self.base_layer.output_size_per_partition + self.device = _get_lora_device(self.base_layer) + + def create_lora_weights( + self, + max_loras: int, + lora_config: LoRAConfig, + model_config: Optional[PretrainedConfig] = None) -> None: + self.lora_config = lora_config + self.tp_size = get_tensor_model_parallel_world_size() + lora_a_output_size_per_partition = ( + lora_config.max_lora_rank if not lora_config.fully_sharded_loras + else divide(lora_config.max_lora_rank, self.tp_size)) + self.lora_a_stacked = torch.zeros( + max_loras, + 1, + lora_a_output_size_per_partition, + self.input_size, + dtype=lora_config.lora_dtype, + device=self.device, + ) + self.lora_b_stacked = torch.zeros( + max_loras, + 1, + self.output_size, + lora_config.max_lora_rank, + dtype=lora_config.lora_dtype, + device=self.device, + ) + self.output_dim = self.lora_b_stacked.shape[2] + + # lazily initialized. + self.indices: torch.Tensor + self.indices_len: List[int] + + def reset_lora(self, index: int): + self.lora_a_stacked[index] = 0 + self.lora_b_stacked[index] = 0 + + def slice_lora_a(self, lora_a: torch.Tensor) -> torch.Tensor: + return lora_a + + def slice_lora_b(self, lora_b: torch.Tensor) -> torch.Tensor: + tensor_model_parallel_rank = get_tensor_model_parallel_rank() + shard_size = self.output_dim + start_idx = tensor_model_parallel_rank * shard_size + end_idx = (tensor_model_parallel_rank + 1) * shard_size + lora_b = lora_b[:, start_idx:end_idx] + return lora_b + + def set_lora( + self, + index: int, + lora_a: torch.Tensor, + lora_b: torch.Tensor, + embeddings_tensor: Optional[torch.Tensor], + ): + self.reset_lora(index) + + if self.tp_size > 1: + lora_a = self.slice_lora_a(lora_a) + lora_b = self.slice_lora_b(lora_b) + + self.lora_a_stacked[index, + 0, :lora_a.shape[1], :lora_a.shape[0]].copy_( + lora_a.T, non_blocking=True) + self.lora_b_stacked[index, + 0, :lora_b.shape[1], :lora_b.shape[0]].copy_( + lora_b.T, non_blocking=True) + + def set_mapping( + self, + base_indices: torch.Tensor, + sampler_indices: torch.Tensor, + sampler_indices_padded: torch.Tensor, + embeddings_indices: torch.Tensor, + indices_len: List[int], + ): + self.indices = base_indices + self.indices_len = indices_len + + def apply(self, x: torch.Tensor, + bias: Optional[torch.Tensor]) -> torch.Tensor: + output = self.base_layer.quant_method.apply(self.base_layer, x, bias) + _apply_lora( + x, + self.lora_a_stacked, + self.lora_b_stacked, + self.indices[:self.indices_len[0]], + output, + ) + return output + + def forward(self, input_): + """Forward of ColumnParallelLinear + + Args: + input_: Tensor whose last dimension is `input_size`. + + Returns: + - output + - bias + """ + bias = (self.base_layer.bias + if not self.base_layer.skip_bias_add else None) + + # Matrix multiply. + output_parallel = self.apply(input_, bias) + if self.base_layer.gather_output: + # All-gather across the partitions. + output = tensor_model_parallel_all_gather(output_parallel) + else: + output = output_parallel + output_bias = (self.base_layer.bias + if self.base_layer.skip_bias_add else None) + return output, output_bias + + @classmethod + @_not_fully_sharded_can_replace + def can_replace_layer(cls, source_layer: nn.Module, + lora_config: LoRAConfig, packed_modules_list: List, + model_config: Optional[PretrainedConfig]) -> bool: + return type(source_layer) is ColumnParallelLinear or ( + type(source_layer) is MergedColumnParallelLinear + and len(packed_modules_list) == 1) + + +class MergedColumnParallelLinearWithLoRA(ColumnParallelLinearWithLoRA): + """ColumnParallelLinear layer that is composed of 2 sublayers (slices) + packed together (eg. gate_proj + up_proj -> gate_up_proj). + + This means we have 2 LoRAs, each applied to one half of the layer. + + Both slices must have the same size. + """ + + def __init__(self, base_layer: MergedColumnParallelLinear) -> None: + super().__init__(base_layer) + + def create_lora_weights( + self, + max_loras: int, + lora_config: LoRAConfig, + model_config: Optional[PretrainedConfig] = None) -> None: + self.lora_config = lora_config + n_slices = 2 + if not (len(self.base_layer.output_sizes) == n_slices + and self.base_layer.output_sizes[0] + == self.base_layer.output_sizes[1]): + raise ValueError( + "LoRAColumnParallelLinear2Slice requires 2 slices with " + "the same size.") + self.tp_size = get_tensor_model_parallel_world_size() + self.tp_rank = get_tensor_model_parallel_rank() + + lora_a_output_size_per_partition = ( + lora_config.max_lora_rank if not lora_config.fully_sharded_loras + else divide(lora_config.max_lora_rank, self.tp_size)) + + self.lora_a_stacked = tuple( + torch.zeros( + max_loras, + 1, + lora_a_output_size_per_partition, + self.input_size, + dtype=lora_config.lora_dtype, + device=self.device, + ) for _ in range(n_slices)) + self.lora_b_stacked = tuple( + torch.zeros( + max_loras, + 1, + self.output_size // 2, + lora_config.max_lora_rank, + dtype=lora_config.lora_dtype, + device=self.device, + ) for _ in range(n_slices)) + + self.output_dim = self.lora_b_stacked[0].shape[2] + # Lazily initialized. + self.indices: torch.Tensor + + def reset_lora(self, index: int): + self.lora_a_stacked[0][index] = 0 + self.lora_a_stacked[1][index] = 0 + self.lora_b_stacked[0][index] = 0 + self.lora_b_stacked[1][index] = 0 + + def slice_lora_a(self, lora_a: List[torch.Tensor]) -> List[torch.Tensor]: + return lora_a + + def slice_lora_b(self, lora_b: List[torch.Tensor]) -> List[torch.Tensor]: + shard_size = self.output_dim + start_idx = self.tp_rank * shard_size + end_idx = (self.tp_rank + 1) * shard_size + lora_b = [ + lora_b[0][:, start_idx:end_idx], lora_b[1][:, start_idx:end_idx] + ] + return lora_b + + def set_lora( + self, + index: int, + lora_a: torch.Tensor, + lora_b: torch.Tensor, + embeddings_tensor: Optional[torch.Tensor], + ): + self.reset_lora(index) + + if self.tp_size > 1: + lora_a = self.slice_lora_a(lora_a) + lora_b = self.slice_lora_b(lora_b) + + if lora_a[0] is not None: + self.lora_a_stacked[0][ + index, 0, :lora_a[0].shape[1], :lora_a[0].shape[0]].copy_( + lora_a[0].T, non_blocking=True) + self.lora_b_stacked[0][ + index, 0, :lora_b[0].shape[1], :lora_b[0].shape[0]].copy_( + lora_b[0].T, non_blocking=True) + if lora_a[1] is not None: + self.lora_a_stacked[1][ + index, 0, :lora_a[1].shape[1], :lora_a[1].shape[0]].copy_( + lora_a[1].T, non_blocking=True) + self.lora_b_stacked[1][ + index, 0, :lora_b[1].shape[1], :lora_b[1].shape[0]].copy_( + lora_b[1].T, non_blocking=True) + + def apply(self, x: torch.Tensor, + bias: Optional[torch.Tensor]) -> torch.Tensor: + output = self.base_layer.quant_method.apply(self.base_layer, x, bias) + _apply_lora_packed_nslice( + x, + self.lora_a_stacked, + self.lora_b_stacked, + self.indices[:self.indices_len[0]], + output, + (self.output_dim, self.output_dim), + ) + return output + + @classmethod + @_not_fully_sharded_can_replace + def can_replace_layer(cls, source_layer: nn.Module, + lora_config: LoRAConfig, packed_modules_list: List, + model_config: Optional[PretrainedConfig]) -> bool: + return type(source_layer) is MergedColumnParallelLinear and len( + packed_modules_list) == 2 + + +class QKVParallelLinearWithLora(ColumnParallelLinearWithLoRA): + """ + ColumnParallelLinear layer that is specifically designed for + qkv_proj. Certain models, such as chtglm3 and baichuan-7b, + only contains a single LoRA within their qkv_proj layer. + + During inference with Tensor Parallel, the weights of lora_b + must be accurately partitioned according to the respective ranks. + + Q slice may have different shape than K and V slices (which both have + the same shape). + """ + + def __init__(self, base_layer: QKVParallelLinear) -> None: + super().__init__(base_layer) + self.tp_size = get_tensor_model_parallel_world_size() + self.q_proj_total_size = (self.base_layer.total_num_heads * + self.base_layer.head_size) + self.q_proj_shard_size = (self.base_layer.num_heads * + self.base_layer.head_size) + self.kv_proj_shard_size = (self.base_layer.num_kv_heads * + self.base_layer.head_size) + self.kv_proj_total_size = (self.base_layer.total_num_kv_heads * + self.base_layer.head_size) + + def set_lora( + self, + index: int, + lora_a: torch.Tensor, + lora_b: torch.Tensor, + embeddings_tensor: Optional[torch.Tensor], + ): + self.reset_lora(index) + if self.tp_size > 1: + tp_rank = get_tensor_model_parallel_rank() + self.q_shard_id = tp_rank + self.kv_shard_id = tp_rank // self.base_layer.num_kv_head_replicas + lora_b_q = lora_b[:, self.q_proj_shard_size * + self.q_shard_id:self.q_proj_shard_size * + (self.q_shard_id + 1)] + k_offset = self.q_proj_total_size + lora_b_k = lora_b[:, k_offset + self.kv_proj_shard_size * + self.kv_shard_id:k_offset + + self.kv_proj_shard_size * (self.kv_shard_id + 1)] + v_offset = k_offset + self.kv_proj_total_size + lora_b_v = lora_b[:, v_offset + self.kv_proj_shard_size * + self.kv_shard_id:v_offset + + self.kv_proj_shard_size * (self.kv_shard_id + 1)] + lora_b = torch.cat([lora_b_q, lora_b_k, lora_b_v], dim=1) + + self.lora_a_stacked[index, + 0, :lora_a.shape[1], :lora_a.shape[0]].copy_( + lora_a.T, non_blocking=True) + self.lora_b_stacked[index, + 0, :lora_b.shape[1], :lora_b.shape[0]].copy_( + lora_b.T, non_blocking=True) + + @classmethod + def can_replace_layer(cls, source_layer: nn.Module, + lora_config: LoRAConfig, packed_modules_list: List, + model_config: Optional[PretrainedConfig]) -> bool: + return type(source_layer) is QKVParallelLinear and len( + packed_modules_list) == 1 + + +class MergedQKVParallelLinearWithLora(ColumnParallelLinearWithLoRA): + """ColumnParallelLinear layer that is composed of 3 sublayers (slices) + packed together in qkv proj fashion + (q_proj + k_proj + v_proj -> qkv_proj). + + This means we have 3 LoRAs, each applied to one slice of the layer. + + Q slice may have different shape than K and V slices (which both have + the same shape). + """ + + def __init__(self, base_layer: QKVParallelLinear) -> None: + super().__init__(base_layer) + + def create_lora_weights( + self, + max_loras: int, + lora_config: LoRAConfig, + model_config: Optional[PretrainedConfig] = None) -> None: + self.lora_config = lora_config + self.tp_size = get_tensor_model_parallel_world_size() + self.tp_rank = get_tensor_model_parallel_rank() + self.q_proj_shard_size = (self.base_layer.num_heads * + self.base_layer.head_size) + self.kv_proj_shard_size = (self.base_layer.num_kv_heads * + self.base_layer.head_size) + self.q_shard_id = self.tp_rank + self.kv_shard_id = self.tp_rank // self.base_layer.num_kv_head_replicas + + lora_a_output_size_per_partition = ( + lora_config.max_lora_rank if not lora_config.fully_sharded_loras + else divide(lora_config.max_lora_rank, self.tp_size)) + # q, k, v + self.lora_a_stacked = ( + torch.zeros( + max_loras, + 1, + lora_a_output_size_per_partition, + self.input_size, + dtype=lora_config.lora_dtype, + device=self.device, + ), + torch.zeros( + max_loras, + 1, + lora_a_output_size_per_partition, + self.input_size, + dtype=lora_config.lora_dtype, + device=self.device, + ), + torch.zeros( + max_loras, + 1, + lora_a_output_size_per_partition, + self.input_size, + dtype=lora_config.lora_dtype, + device=self.device, + ), + ) + self.lora_b_stacked = ( + torch.zeros( + max_loras, + 1, + self.q_proj_shard_size, + lora_config.max_lora_rank, + dtype=lora_config.lora_dtype, + device=self.device, + ), + torch.zeros( + max_loras, + 1, + self.kv_proj_shard_size, + lora_config.max_lora_rank, + dtype=lora_config.lora_dtype, + device=self.device, + ), + torch.zeros( + max_loras, + 1, + self.kv_proj_shard_size, + lora_config.max_lora_rank, + dtype=lora_config.lora_dtype, + device=self.device, + ), + ) + + self.output_slices = (self.q_proj_shard_size, self.kv_proj_shard_size, + self.kv_proj_shard_size) + self.packed_indices: Optional[torch.Tensor] = None + self.standard_indices: Optional[torch.Tensor] = None + # lazily initialized. + self.indices_len: List[int] + + def reset_lora(self, index: int): + self.lora_a_stacked[0][index] = 0 + self.lora_b_stacked[0][index] = 0 + self.lora_a_stacked[1][index] = 0 + self.lora_b_stacked[1][index] = 0 + self.lora_a_stacked[2][index] = 0 + self.lora_b_stacked[2][index] = 0 + + def slice_lora_a(self, lora_a: List[torch.Tensor]) -> List[torch.Tensor]: + return lora_a + + def slice_lora_b(self, lora_b: List[torch.Tensor]) -> List[torch.Tensor]: + if lora_b[0] is not None: + lora_b_q = lora_b[0][:, self.q_proj_shard_size * + self.q_shard_id:self.q_proj_shard_size * + (self.q_shard_id + 1)] + if lora_b[1] is not None: + lora_b_k = lora_b[1][:, self.kv_proj_shard_size * + self.kv_shard_id:self.kv_proj_shard_size * + (self.kv_shard_id + 1)] + if lora_b[2] is not None: + lora_b_v = lora_b[2][:, self.kv_proj_shard_size * + self.kv_shard_id:self.kv_proj_shard_size * + (self.kv_shard_id + 1)] + lora_b = [lora_b_q, lora_b_k, lora_b_v] + return lora_b + + def set_lora( + self, + index: int, + lora_a: torch.Tensor, + lora_b: torch.Tensor, + embeddings_tensor: Optional[torch.Tensor], + ): + self.reset_lora(index) + + if self.tp_size > 1: + lora_a = self.slice_lora_a(lora_a) + lora_b = self.slice_lora_b(lora_b) + + if lora_b[0] is not None: + lora_b_q = lora_b[0] + self.lora_b_stacked[0][ + index, 0, :lora_b_q.shape[1], :lora_b_q.shape[0]].copy_( + lora_b_q.T, non_blocking=True) + if lora_b[1] is not None: + lora_b_k = lora_b[1] + self.lora_b_stacked[1][ + index, 0, :lora_b_k.shape[1], :lora_b_k.shape[0]].copy_( + lora_b_k.T, non_blocking=True) + if lora_b[2] is not None: + lora_b_v = lora_b[2] + self.lora_b_stacked[2][ + index, 0, :lora_b_v.shape[1], :lora_b_v.shape[0]].copy_( + lora_b_v.T, non_blocking=True) + + if lora_a[0] is not None: + self.lora_a_stacked[0][ + index, 0, :lora_a[0].shape[1], :lora_a[0].shape[0]].copy_( + lora_a[0].T, non_blocking=True) + if lora_a[1] is not None: + self.lora_a_stacked[1][ + index, 0, :lora_a[1].shape[1], :lora_a[1].shape[0]].copy_( + lora_a[1].T, non_blocking=True) + if lora_a[2] is not None: + self.lora_a_stacked[2][ + index, 0, :lora_a[2].shape[1], :lora_a[2].shape[0]].copy_( + lora_a[2].T, non_blocking=True) + + def apply(self, x: torch.Tensor, + bias: Optional[torch.Tensor]) -> torch.Tensor: + output = self.base_layer.quant_method.apply(self.base_layer, x, bias) + _apply_lora_packed_nslice( + x, + self.lora_a_stacked, + self.lora_b_stacked, + self.indices[:self.indices_len[0]], + output, + self.output_slices, + ) + return output + + @classmethod + @_not_fully_sharded_can_replace + def can_replace_layer(cls, source_layer: nn.Module, + lora_config: LoRAConfig, packed_modules_list: List, + model_config: Optional[PretrainedConfig]) -> bool: + return type(source_layer) is QKVParallelLinear and len( + packed_modules_list) == 3 + + +class RowParallelLinearWithLoRA(BaseLayerWithLoRA): + + def __init__(self, base_layer: RowParallelLinear) -> None: + super().__init__() + self.base_layer = base_layer + self.input_size = self.base_layer.input_size_per_partition + self.output_size = self.base_layer.output_size + self.device = _get_lora_device(self.base_layer) + + def create_lora_weights( + self, + max_loras: int, + lora_config: LoRAConfig, + model_config: Optional[PretrainedConfig] = None) -> None: + self.lora_config = lora_config + self.tp_rank = get_tensor_model_parallel_rank() + self.lora_a_stacked = torch.zeros( + ( + max_loras, + 1, + lora_config.max_lora_rank, + self.input_size, + ), + dtype=lora_config.lora_dtype, + device=self.device, + ) + tp_size = get_tensor_model_parallel_world_size() + lora_b_output_size_per_partition = ( + self.output_size if not lora_config.fully_sharded_loras else + divide(self.output_size, tp_size)) + + self.lora_b_stacked = torch.zeros( + ( + max_loras, + 1, + lora_b_output_size_per_partition, + lora_config.max_lora_rank, + ), + dtype=lora_config.lora_dtype, + device=self.device, + ) + # Lazily initialized + self.indices: torch.Tensor + self.indices_len: List[int] + + def reset_lora(self, index: int): + self.lora_a_stacked[index] = 0 + self.lora_b_stacked[index] = 0 + + def slice_lora_a(self, lora_a: torch.Tensor) -> torch.Tensor: + tensor_model_parallel_rank = get_tensor_model_parallel_rank() + shard_size = self.input_size + start_idx = tensor_model_parallel_rank * shard_size + end_idx = (tensor_model_parallel_rank + 1) * shard_size + lora_a = lora_a[start_idx:end_idx, :] + return lora_a + + def slice_lora_b(self, lora_b: torch.Tensor) -> torch.Tensor: + return lora_b + + def set_lora( + self, + index: int, + lora_a: torch.Tensor, + lora_b: torch.Tensor, + embeddings_tensor: Optional[torch.Tensor], + ): + self.reset_lora(index) + + if self.base_layer.tp_size > 1: + lora_a = self.slice_lora_a(lora_a) + lora_b = self.slice_lora_b(lora_b) + + self.lora_a_stacked[index, + 0, :lora_a.shape[1], :lora_a.shape[0]].copy_( + lora_a.T, non_blocking=True) + self.lora_b_stacked[index, + 0, :lora_b.shape[1], :lora_b.shape[0]].copy_( + lora_b.T, non_blocking=True) + + def set_mapping( + self, + base_indices: torch.Tensor, + sampler_indices: torch.Tensor, + sampler_indices_padded: torch.Tensor, + embeddings_indices: torch.Tensor, + indices_len: List[int], + ): + self.indices = base_indices + self.indices_len = indices_len + + def apply(self, x: torch.Tensor) -> torch.Tensor: + output = self.base_layer.quant_method.apply(self.base_layer, x) + _apply_lora( + x, + self.lora_a_stacked, + self.lora_b_stacked, + self.indices[:self.indices_len[0]], + output, + ) + return output + + def forward(self, input_): + """Forward of RowParallelLinear + + Args: + input_: tensor whose last dimension is `input_size`. If + `input_is_parallel` is set, then the last dimension + is `input_size // tp_size`. + + Returns: + - output + - bias + """ + # Set up backprop all-reduce. + if self.base_layer.input_is_parallel: + input_parallel = input_ + else: + # TODO: simplify code below + tp_rank = get_tensor_model_parallel_rank() + splitted_input = split_tensor_along_last_dim( + input_, num_partitions=self.base_layer.tp_size) + input_parallel = splitted_input[tp_rank].contiguous() + + # Matrix multiply. + output_parallel = self.apply(input_parallel) + if self.base_layer.reduce_results and self.base_layer.tp_size > 1: + output_ = tensor_model_parallel_all_reduce(output_parallel) + else: + output_ = output_parallel + + if not self.base_layer.skip_bias_add: + output = (output_ + self.base_layer.bias + if self.base_layer.bias is not None else output_) + output_bias = None + else: + output = output_ + output_bias = self.base_layer.bias + return output, output_bias + + @property + def weight(self): + + return self.base_layer.weight if hasattr( + self.base_layer, "weight") else self.base_layer.qweight + + @classmethod + @_not_fully_sharded_can_replace + def can_replace_layer(cls, source_layer: nn.Module, + lora_config: LoRAConfig, packed_modules_list: List, + model_config: Optional[PretrainedConfig]) -> bool: + return type(source_layer) is RowParallelLinear + + +class LogitsProcessorWithLoRA(BaseLayerWithLoRA): + + def __init__( + self, + base_layer: LogitsProcessor, + hidden_size: int, + dtype: torch.dtype, + device: torch.device, + ) -> None: + super().__init__() + self.base_layer = base_layer + self.hidden_size = hidden_size + self.dtype = dtype + self.device = device + + @property + def logits_as_input(self): + return self.base_layer.logits_as_input + + @property + def vocab_size(self): + return self.base_layer.vocab_size + + @property + def scale(self): + return self.base_layer.scale + + @property + def org_vocab_size(self): + return self.base_layer.org_vocab_size + + @property + def include_gpu_probs_tensor(self): + return self.base_layer.include_gpu_probs_tensor + + def create_lora_weights( + self, + max_loras: int, + lora_config: LoRAConfig, + model_config: Optional[PretrainedConfig] = None, + ) -> None: + # Keep this in sync with csrc/punica/bgmv/bgmv_config.h + if 32000 < self.base_layer.vocab_size > 128512: + raise ValueError("When using LoRA, vocab size must be " + "32000 >= vocab_size <= 128512") + self.lora_a_stacked = torch.zeros( + ( + max_loras, + 1, + lora_config.max_lora_rank, + self.hidden_size, + ), + dtype=lora_config.lora_dtype, + device=self.device, + ) + self.lora_b_stacked = torch.zeros( + ( + max_loras, + 1, + # Pad for kernel compatibility + math.ceil(self.base_layer.vocab_size / + lora_config.lora_vocab_padding_size) * + lora_config.lora_vocab_padding_size, + lora_config.max_lora_rank, + ), + dtype=lora_config.lora_dtype, + device=self.device, + ) + self.embeddings_tensors = torch.full( + (max_loras, lora_config.lora_extra_vocab_size, self.hidden_size), + fill_value=float("-inf"), + dtype=self.dtype, + device=self.device, + ) + # Lazily initialized. + self.indices: torch.Tensor + self.indices_len: List[int] + self.indices_padded: torch.Tensor + + def reset_lora(self, index: int): + self.lora_a_stacked[index] = 0 + self.lora_b_stacked[index] = 0 + self.embeddings_tensors[index] = float("-inf") + + def set_lora( + self, + index: int, + lora_a: torch.Tensor, + lora_b: torch.Tensor, + embeddings_tensor: Optional[torch.Tensor], + ): + self.reset_lora(index) + self.lora_a_stacked[index, + 0, :lora_a.shape[1], :lora_a.shape[0]].copy_( + lora_a.T, non_blocking=True) + self.lora_b_stacked[index, + 0, :lora_b.shape[1], :lora_b.shape[0]].copy_( + lora_b.T, non_blocking=True) + if embeddings_tensor is not None: + self.embeddings_tensors[ + index, :embeddings_tensor.shape[0], :embeddings_tensor. + shape[1], ] = embeddings_tensor + + def set_mapping( + self, + base_indices: torch.Tensor, + sampler_indices: torch.Tensor, + sampler_indices_padded: torch.Tensor, + embeddings_indices: torch.Tensor, + indices_len: List[int], + ): + self.indices = sampler_indices + self.indices_padded = sampler_indices_padded + self.indices_len = indices_len + + def _get_logits( + self, + hidden_states: torch.Tensor, + embedding: torch.Tensor, + embedding_bias: Optional[torch.Tensor] = None, + ) -> Optional[torch.Tensor]: + # Get the logits for the next tokens. + logits = torch.matmul(hidden_states, embedding.t()) + if embedding_bias is not None: + logits += embedding_bias + logits = tensor_model_parallel_gather(logits) + if logits is None: + return None + + lora_logits = torch.empty( + self.embeddings_tensors.shape[0] + 1, + self.embeddings_tensors.shape[1], + hidden_states.shape[0], + dtype=self.embeddings_tensors.dtype, + device=self.embeddings_tensors.device, + ) + torch.matmul(self.embeddings_tensors, + hidden_states.T, + out=lora_logits[:-1]) + lora_logits[-1] = float("-inf") + lora_logits = lora_logits.mT + lora_logits = (lora_logits.reshape( + lora_logits.shape[0] * lora_logits.shape[1], + lora_logits.shape[2], + ).index_select(0, + self.indices_padded[:self.indices_len[2]]).nan_to_num_( + nan=float("-inf"), + posinf=float("inf"), + neginf=float("-inf"))) + logits[:, + self.base_layer.org_vocab_size:self.base_layer.org_vocab_size + + lora_logits.shape[1]] = lora_logits + + _apply_lora( + hidden_states, + self.lora_a_stacked, + self.lora_b_stacked, + self.indices[:self.indices_len[1]], + logits, + ) + + # Remove paddings in vocab (if any). + logits = logits[:, :self.base_layer.vocab_size] + + return logits + + def forward(self, *args, **kwargs): + return type(self.base_layer).forward(self, *args, **kwargs) + + @classmethod + def can_replace_layer(cls, source_layer: nn.Module, + lora_config: LoRAConfig, packed_modules_list: List, + model_config: Optional[PretrainedConfig]) -> bool: + # Special handling for the LogitsProcessor. + return False diff --git a/vllm/lora/lora.py b/vllm/lora/lora.py new file mode 100644 index 0000000..d7794aa --- /dev/null +++ b/vllm/lora/lora.py @@ -0,0 +1,167 @@ +from typing import List, Optional + +import torch + +from vllm.utils import is_pin_memory_available + + +class LoRALayerWeights: + """LoRA weights for a layer composed of two low rank matrixes.""" + + def __init__( + self, + module_name: str, + rank: int, + lora_alpha: int, + lora_a: torch.Tensor, + lora_b: torch.Tensor, + embeddings_tensor: Optional[torch.Tensor] = None, + scaling: Optional[float] = None, + ) -> None: + self.module_name = module_name + self.rank = rank + self.lora_alpha = lora_alpha + self.lora_a = lora_a + self.lora_b = lora_b + self.embeddings_tensor = embeddings_tensor + + if scaling is None: + self.scaling = self.lora_alpha / self.rank + else: + self.scaling = scaling + + def optimize(self) -> "LoRALayerWeights": + """Optimize the LoRA by merging the scaling into lora_b.""" + if self.scaling == 1: + return self + self.lora_b *= self.scaling + self.scaling = 1 + return self + + @property + def input_dim(self) -> int: + return self.lora_a.shape[0] + + @property + def output_dim(self) -> int: + return self.lora_b.shape[1] + + @property + def is_packed(self) -> bool: + return False + + @property + def extra_vocab_size(self) -> int: + return self.embeddings_tensor.shape[ + 0] if self.embeddings_tensor is not None else 0 + + @classmethod + def create_dummy_lora_weights( + cls, + module_name: str, + input_dim: int, + output_dim: int, + rank: int, + dtype: torch.dtype, + device: torch.device, + embeddings_tensor_dim: Optional[int] = None) -> "LoRALayerWeights": + pin_memory = str(device) == "cpu" and is_pin_memory_available() + lora_a = torch.zeros([input_dim, rank], + dtype=dtype, + device=device, + pin_memory=pin_memory) + lora_b = torch.zeros([rank, output_dim], + dtype=dtype, + device=device, + pin_memory=pin_memory) + embeddings_tensor = torch.rand( + 10, + embeddings_tensor_dim, + dtype=dtype, + device=device, + pin_memory=pin_memory) if embeddings_tensor_dim else None + return cls( + module_name, + rank=rank, + lora_alpha=1, + lora_a=lora_a, + lora_b=lora_b, + embeddings_tensor=embeddings_tensor, + ) + + +class PackedLoRALayerWeights(LoRALayerWeights): + """LoRA used for packed layers (eg. qkv_proj).""" + + def __init__( + self, + module_name: str, + rank: int, + lora_alphas: List[Optional[int]], + lora_a: List[Optional[torch.Tensor]], + lora_b: List[Optional[torch.Tensor]], + scaling: Optional[List[float]] = None, + ) -> None: + super().__init__( + module_name=module_name, + rank=rank, + lora_alpha=0, + lora_a=lora_a, + lora_b=lora_b, + scaling=scaling, # type: ignore + embeddings_tensor=None, + ) + self.lora_alphas = lora_alphas + if scaling is None: + self.scaling = [ # type: ignore + lora_alpha / self.rank # type: ignore # noqa + for lora_alpha in self.lora_alphas + ] + + @classmethod + def pack( + cls, loras: List[Optional["LoRALayerWeights"]] + ) -> "PackedLoRALayerWeights": + """Pack a list of LoRAs into a single LoRA. + + If LoRA is None, it signifies that the submodule does not have a LoRA. + """ + first_lora = next(lora for lora in loras if lora is not None) + for lora in loras: + if lora is None: + continue + lora.optimize() + rank = first_lora.rank + module_name = first_lora.module_name + obj = cls( + module_name, + rank, + [lora.lora_alpha if lora is not None else None for lora in loras], + [lora.lora_a if lora is not None else None for lora in loras], + [lora.lora_b if lora is not None else None for lora in loras], + scaling=[ + 1 if lora is not None else None # type: ignore + for lora in loras + ]) + return obj + + def optimize(self) -> "PackedLoRALayerWeights": + """Optimize the LoRA by merging the scaling into lora_b.""" + for i in range(len(self.lora_b)): + if self.scaling[i] == 1 or self.lora_b[i] is None: # type: ignore + continue + self.lora_b[i] *= self.scaling[i] # type: ignore + self.scaling[i] = 1 # type: ignore + return self + + @property + def input_dim(self) -> int: + raise NotImplementedError() + + @property + def output_dim(self) -> int: + raise NotImplementedError() + + @property + def is_packed(self) -> bool: + return True diff --git a/vllm/lora/models.py b/vllm/lora/models.py new file mode 100644 index 0000000..50d7e91 --- /dev/null +++ b/vllm/lora/models.py @@ -0,0 +1,645 @@ +import copy +import json +import math +import os +import re +from typing import Callable, Dict, List, Optional, Tuple, Type + +import safetensors.torch +import torch +from torch import nn + +from vllm.config import LoRAConfig +from vllm.logger import init_logger +from vllm.lora.layers import BaseLayerWithLoRA, LoRAMapping +from vllm.lora.lora import LoRALayerWeights, PackedLoRALayerWeights +from vllm.lora.utils import (from_layer, from_layer_logits_processor, + parse_fine_tuned_lora_name, replace_submodule) +from vllm.utils import LRUCache, is_pin_memory_available + +logger = init_logger(__name__) + +_GLOBAL_LORA_ID = 0 + + +def convert_mapping( + mapping: LoRAMapping, lora_index_to_id: List[Optional[int]], + max_loras: int, vocab_size: int, extra_vocab_size: int +) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor, List[int]]: + """Converts LoRAMapping to index tensors. + + Args: + mapping: LoRAMapping mapping rows in a batch to LoRA ids. + lora_index_to_id: List mapping LoRA ids to LoRA indices. + max_loras: Maximum number of LoRAs. + vocab_size: Model vocab size. + extra_vocab_size: Extra vocab size each LoRA can have. + + Returns: + A tuple of tensors: + base_indices: Tensor of shape [batch_size] mapping batch rows to + LoRA indices. + sampler_indices: Tensor of shape [batch_size] mapping requests to + LoRA indices for sampler. For generation, this will be the + same as base_indicies. For prefill, this will map requests + to LoRA indices. + sampler_indices_padded: Tensor of shape [batch_size] mapping + requests to LoRA indices for sampler with padding. + Same as sampler_indicies, but -1 is replaced with + max_loras. + embeddings_indices: Tensor of shape [2, batch_size] mapping + requests to embedding indices. First row is for embeddings + added by the LoRAs, second row is for the LoRA.lora_a + embeddings. + indices_len: List of lengths of the above tensors. + """ + index_mapping_indices: List[int] = list(mapping.index_mapping).copy() + embedding_indices = index_mapping_indices.copy() + lora_indices = index_mapping_indices.copy() + prompt_mapping: List[int] = [ + lora_index_to_id.index(x) if x > 0 else -1 + for x in mapping.prompt_mapping + ] + lora_idx = None + for i in range(len(index_mapping_indices)): + # TODO index can be slow. optimize + lora_idx = (lora_index_to_id.index(index_mapping_indices[i]) + if index_mapping_indices[i] > 0 else -1) + embedding_indices[i] = lora_idx if index_mapping_indices[i] > 0 else 0 + index_mapping_indices[i] = i + lora_indices[i] = lora_idx + + indices = torch.tensor( + [index_mapping_indices, lora_indices, embedding_indices], + dtype=torch.long, + device="cuda") + prompt_mapping_tensor = torch.tensor(prompt_mapping, + device="cuda", + dtype=torch.long) + embeddings_indices = torch.stack([ + indices[2] * extra_vocab_size, + indices[2] * (vocab_size + extra_vocab_size) + ]) + embeddings_indices[embeddings_indices == -1] = max_loras - 1 + base_indices = indices[1] + sampler_indices = prompt_mapping_tensor + sampler_indices_padded = sampler_indices.clone() + sampler_indices_padded[sampler_indices_padded == -1] = max_loras - 1 + sampler_indices_padded = ( + torch.arange( + 0, len(sampler_indices_padded), device="cuda", dtype=torch.long) + + (sampler_indices_padded * len(sampler_indices_padded))) + indices_len = [ + base_indices.shape[-1], sampler_indices.shape[-1], + sampler_indices_padded.shape[-1], embeddings_indices.shape[-1] + ] + + return (base_indices, sampler_indices, sampler_indices_padded, + embeddings_indices, indices_len) + + +def get_lora_id(): + global _GLOBAL_LORA_ID + _GLOBAL_LORA_ID += 1 + return _GLOBAL_LORA_ID + + +class LoRAModel: + """A LoRA fine-tuned model.""" + + def __init__( + self, + lora_model_id: int, + rank: int, + loras: Dict[str, LoRALayerWeights], + ) -> None: + self.id = lora_model_id + assert (lora_model_id > + 0), f"a valid lora id should be greater than 0, got {self.id}" + self.rank = rank + self.loras: Dict[str, LoRALayerWeights] = loras + + @property + def extra_vocab_size(self) -> int: + return max(lora.extra_vocab_size + for lora in self.loras.values()) if self.loras else 0 + + def get_lora(self, module_name: str) -> Optional[LoRALayerWeights]: + """Get LoRA for a given module by name""" + return self.loras.get(module_name, None) + + # (yard1): TODO see if we can derive target_embedding_padding automatically + @classmethod + def from_lora_tensors( + cls, + lora_model_id: int, + rank: int, + lora_alpha: int, + tensors: Dict[str, torch.Tensor], + device: str = "cuda", + dtype: Optional[torch.dtype] = None, + embeddings: Optional[Dict[str, torch.Tensor]] = None, + target_embedding_padding: Optional[int] = None, + embedding_modules: Optional[Dict[str, str]] = None, + embedding_padding_modules: Optional[List[str]] = None, + ) -> "LoRAModel": + """Create a LoRAModel from a dictionary of tensors.""" + pin_memory = str(device) == "cpu" and is_pin_memory_available() + loras: Dict[str, LoRALayerWeights] = {} + for tensor_name, tensor in tensors.items(): + module_name, is_lora_a = parse_fine_tuned_lora_name(tensor_name) + if module_name not in loras: + lora_embeddings_tensor = None + if embeddings: + assert embedding_modules is not None + embeddings_module = next( + (k for k in embedding_modules if k in module_name), + None) + if embeddings_module: + lora_embeddings_tensor = embeddings[ + embedding_modules[embeddings_module]].to( + device=device, dtype=dtype) + if pin_memory: + lora_embeddings_tensor = ( + lora_embeddings_tensor.pin_memory()) + loras[module_name] = LoRALayerWeights(module_name, rank, + lora_alpha, None, None, + lora_embeddings_tensor) + if is_lora_a: + loras[module_name].lora_a = tensor.to(device=device, + dtype=dtype).t() + if pin_memory: + loras[module_name].lora_a = loras[ + module_name].lora_a.pin_memory() + else: + loras[module_name].lora_b = tensor.to(device=device, + dtype=dtype).t() + assert embedding_padding_modules is not None + if any(name in module_name + for name in embedding_padding_modules + ) and target_embedding_padding is not None: + lora_b = loras[module_name].lora_b + assert target_embedding_padding >= lora_b.shape[1] + addition = target_embedding_padding - lora_b.shape[1] + loras[module_name].lora_b = torch.nn.functional.pad( + lora_b, (0, addition)) + if pin_memory: + loras[module_name].lora_b = loras[ + module_name].lora_b.pin_memory() + + for lora in loras.values(): + lora.optimize() + return cls(lora_model_id, rank, loras) + + @classmethod + def from_local_checkpoint( + cls, + lora_dir: str, + expected_lora_modules: List[str], + lora_model_id: Optional[int] = None, + device: str = "cuda", + dtype: Optional[torch.dtype] = None, + target_embedding_padding: Optional[int] = None, + embedding_modules: Optional[Dict[str, str]] = None, + embedding_padding_modules: Optional[List[str]] = None, + ) -> "LoRAModel": + """Create a LoRAModel from a local checkpoint.""" + lora_config_path = os.path.join(lora_dir, "adapter_config.json") + lora_tensor_path = os.path.join(lora_dir, "adapter_model.safetensors") + lora_bin_file_path = os.path.join(lora_dir, "adapter_model.bin") + new_embeddings_tensor_path = os.path.join( + lora_dir, "new_embeddings.safetensors") + new_embeddings_bin_file_path = os.path.join(lora_dir, + "new_embeddings.bin") + with open(lora_config_path) as f: + config = json.load(f) + target_modules = config["target_modules"] + unexpected_modules = [] + for module in target_modules: + # Compatible with more modules, such as:layers.11.self_attn.k_proj + part_name = module.split(".")[-1] + if part_name not in expected_lora_modules: + unexpected_modules.append(module) + # loaded lora's target modules must be a subset of expected_lora_modules + if unexpected_modules: + raise ValueError( + f"While loading {lora_dir}, expected" + f" target modules in {expected_lora_modules}" + f" but received {unexpected_modules}." + f" Please verify that the loaded LoRA module is correct") + if os.path.isfile(lora_tensor_path): + tensors = safetensors.torch.load_file(lora_tensor_path) + elif os.path.isfile(lora_bin_file_path): + tensors = torch.load(lora_bin_file_path) + else: + raise ValueError(f"{lora_dir} doesn't contain tensors") + + embeddings = None + if os.path.isfile(new_embeddings_tensor_path): + embeddings = safetensors.torch.load_file( + new_embeddings_tensor_path) + elif os.path.isfile(new_embeddings_bin_file_path): + embeddings = torch.load(new_embeddings_bin_file_path) + + rank = config["r"] + lora_alpha = config["lora_alpha"] + return cls.from_lora_tensors( + lora_model_id=get_lora_id() + if lora_model_id is None else lora_model_id, + rank=rank, + lora_alpha=lora_alpha, + tensors=tensors, + device=device, + dtype=dtype, + embeddings=embeddings, + target_embedding_padding=target_embedding_padding, + embedding_modules=embedding_modules, + embedding_padding_modules=embedding_padding_modules, + ) + + +class LoRAModelManager: + """A manager that manages multiple LoRA-fine-tuned models.""" + + def __init__( + self, + model: nn.Module, + max_num_seqs: int, + max_num_batched_tokens: int, + vocab_size: int, + lora_config: LoRAConfig, + ): + """Create a LoRAModelManager and adapter for a given model. + + Args: + model: the model to be adapted. + max_num_seqs: the maximum number of sequences model can run in a + single batch. + max_num_batched_tokens: the maximum number of tokens model can run + in a single batch. + vocab_size: the vocab size of the model. + lora_config: the LoRA configuration. + """ + self.lora_config = lora_config + self.max_num_seqs = max_num_seqs + assert self.capacity >= self.lora_slots + self.max_num_batched_tokens = math.ceil(max_num_batched_tokens / 8) * 8 + self.lora_index_to_id: List[Optional[int]] = [None] * self.lora_slots + self.vocab_size = vocab_size + self.base_indices = torch.empty(self.max_num_batched_tokens, + dtype=torch.long, + device="cuda") + self.sampler_indices = torch.empty(self.max_num_batched_tokens, + dtype=torch.long, + device="cuda") + self.sampler_indices_padded = torch.empty(self.max_num_batched_tokens, + dtype=torch.long, + device="cuda") + self.embeddings_indices = torch.empty(2, + self.max_num_batched_tokens, + dtype=torch.long, + device="cuda") + # 4 is the number of indicies tensors defined above + # base_indices, sampler_indices, sampler_indices_padded, + # embeddings_indices + self.indices_len: List[Optional[int]] = [None] * 4 + + self.model: nn.Module = model + if hasattr(self.model, "supported_lora_modules"): + self.supported_lora_modules = copy.deepcopy( + self.model.supported_lora_modules) + self.packed_modules_mapping = copy.deepcopy( + self.model.packed_modules_mapping) + self.packed_modules: Dict[str, List[str]] = {} + self.modules: Dict[str, "BaseLayerWithLoRA"] = {} + self._registered_loras: Dict[int, LoRAModel] = {} + # Dict instead of a Set for compatibility with LRUCache. + self._active_loras: Dict[int, None] = {} + self._last_mapping: Optional[LoRAMapping] = None + self._create_lora_modules() + self.model.lora_manager = self + + @property + def capacity(self) -> int: + return self.lora_config.max_cpu_loras + + @property + def lora_slots(self) -> int: + return self.lora_config.max_loras + + def __len__(self) -> int: + return len(self._registered_loras) + + def activate_lora( + self, + lora_id: int, + ) -> bool: + """Move LoRA into a GPU buffer to be used in the forward pass.""" + if lora_id in self._active_loras: + return False + first_free_slot = next( + ((i, lora_id) for i, lora_id in enumerate(self.lora_index_to_id) + if lora_id is None), None) + if first_free_slot is None: + raise ValueError("No free lora slots") + index, _ = first_free_slot + self._active_loras[lora_id] = None + lora_model = self._registered_loras[lora_id] + logger.debug("Activating LoRA. int id: %d, slot index: %d", + lora_model.id, index) + self.lora_index_to_id[index] = lora_model.id + for module_name, module in self.modules.items(): + module_lora = lora_model.get_lora(module_name) + if module_lora: + module_lora.optimize() + module.set_lora(index, module_lora.lora_a, module_lora.lora_b, + module_lora.embeddings_tensor) + else: + module.reset_lora(index) + return True + + def _deactivate_lora(self, lora_id: int): + try: + index = self.lora_index_to_id.index(lora_id) + self.lora_index_to_id[index] = None + except ValueError: + pass + + def deactivate_lora(self, lora_id: int) -> bool: + """Remove a LoRA from a GPU buffer.""" + if lora_id in self._active_loras: + self._deactivate_lora(lora_id) + self._active_loras.pop(lora_id) + return True + return False + + def _add_lora(self, lora: LoRAModel): + self._create_merged_loras_inplace(lora) + self._registered_loras[lora.id] = lora + + def add_lora(self, lora: LoRAModel) -> bool: + """Add a LoRAModel to the manager CPU cache.""" + if lora.id not in self._registered_loras: + if len(self._registered_loras) >= self.capacity: + raise RuntimeError("No free LoRA slots.") + self._add_lora(lora) + return True + return False + + def remove_lora(self, lora_id: int) -> bool: + """Remove a LoRAModel from the manager CPU cache.""" + # TODO: should we check active lora? + self.deactivate_lora(lora_id) + return bool(self._registered_loras.pop(lora_id, None)) + + # TODO see if this can be vectorized + def _set_lora_mapping(self, mapping: LoRAMapping) -> None: + (base_indices, sampler_indices, sampler_indices_padded, + embeddings_indices, + indices_len) = convert_mapping(mapping, self.lora_index_to_id, + self.lora_slots + 1, self.vocab_size, + self.lora_config.lora_extra_vocab_size) + self.base_indices[:base_indices.shape[0]].copy_(base_indices) + self.sampler_indices[:sampler_indices.shape[0]].copy_(sampler_indices) + self.sampler_indices_padded[:sampler_indices_padded.shape[0]].copy_( + sampler_indices_padded) + self.embeddings_indices[:embeddings_indices. + shape[0], :embeddings_indices.shape[1]].copy_( + embeddings_indices) + # Maintain the reference + self.indices_len[:] = indices_len + + def set_lora_mapping(self, lora_mapping: LoRAMapping) -> None: + if self._last_mapping != lora_mapping: + self._set_lora_mapping(lora_mapping) + self._last_mapping = lora_mapping + + def list_loras(self) -> Dict[int, LoRAModel]: + """List all registered LoRAModels.""" + return dict(self._registered_loras) + + def get_lora(self, lora_id: int) -> Optional[LoRAModel]: + return self._registered_loras.get(lora_id, None) + + def remove_all_loras(self): + """Remove all LoRAModels from the manager.""" + self._registered_loras.clear() + self.lora_index_to_id = [None] * self.lora_slots + self._active_loras.clear() + + def _create_lora_modules(self): + for module_name, module in self.model.named_modules(): + if not self._match_target_modules(module_name): + continue + parts = module_name.split(".")[-1] + packed_moduled_lst = self.packed_modules_mapping.get(parts, []) + new_module = replace_submodule( + self.model, module_name, + from_layer(module, self.lora_slots, self.lora_config, + packed_moduled_lst, self.model.config)) + # (yard1): TODO make this more robust + if "lm_head" in module_name: + logits_processor_module = self.model.get_submodule( + "logits_processor") + new_module = replace_submodule( + self.model, "logits_processor", + from_layer_logits_processor(logits_processor_module, + module, self.lora_slots, + self.lora_config, + self.model.config)) + self.register_module(module_name, new_module) + self._register_packed_modules(module_name) + new_module.set_mapping(self.base_indices, self.sampler_indices, + self.sampler_indices_padded, + self.embeddings_indices, self.indices_len) + + def register_module(self, module_name: str, module: "BaseLayerWithLoRA"): + assert isinstance(module, BaseLayerWithLoRA) + self.modules[module_name] = module + + def create_dummy_lora( + self, + lora_id: int, + rank: int, + embedding_modules: Optional[Dict[str, str]] = None) -> LoRAModel: + """Create zero-initialized LoRAModel for warmup.""" + model = LoRAModel(lora_id, rank, {}) + for module_name, module in self.model.named_modules(): + if not self._match_target_modules(module_name) or not isinstance( + module, BaseLayerWithLoRA): + continue + parts = module_name.split(".") + if module_name not in self.packed_modules: + assert embedding_modules is not None + if parts[-1] in embedding_modules: + input_dim = (module.base_layer.org_vocab_size + + self.lora_config.lora_extra_vocab_size if + hasattr(module.base_layer, "org_vocab_size") + else module.base_layer.weight.shape[1]) + output_dim = module.base_layer.embedding_dim if hasattr( + module.base_layer, + "embedding_dim") else module.base_layer.weight.shape[0] + embeddings_tensor_dim = (module.base_layer.embedding_dim if + hasattr(module.base_layer, + "embedding_dim") else + module.base_layer.weight.shape[1]) + lora = LoRALayerWeights.create_dummy_lora_weights( + module_name, + input_dim, + output_dim, + rank, + module.lora_a_stacked.dtype, + "cpu", + embeddings_tensor_dim=embeddings_tensor_dim) + else: + lora = LoRALayerWeights.create_dummy_lora_weights( + module_name, + module.lora_a_stacked.shape[-1], + module.lora_b_stacked.shape[-2], + rank, + module.lora_a_stacked.dtype, + "cpu", + ) + lora.optimize() + else: + parts = module_name.split(".") + replacements = self.packed_modules_mapping[parts[-1]] + subloras: List[Optional["LoRALayerWeights"]] = [] + for i, r in enumerate(replacements): + lora = LoRALayerWeights.create_dummy_lora_weights( + module_name + "." + r, + module.lora_a_stacked[i].shape[-1], + module.lora_b_stacked[i].shape[-2], + rank, + module.lora_a_stacked[i].dtype, + "cpu", + ) + lora.optimize() + subloras.append(lora) + lora = PackedLoRALayerWeights.pack(subloras) + model.loras[module_name] = lora + return model + + def _match_target_modules(self, module_name: str): + return any( + re.match( + r".*\.{target_module}$".format(target_module=target_module), + module_name) or target_module == module_name + for target_module in self.supported_lora_modules) + + def _register_packed_modules(self, module_full_name: str) -> None: + parts = module_full_name.split(".") + module_name = parts[-1] + replacements = self.packed_modules_mapping.get(module_name, []) + # When replacements is less than or equal to 1, it indicates that this + # module is not a packed module. + if len(replacements) <= 1: + return + prefix = ".".join(parts[:-1]) + self.packed_modules[module_full_name] = [ + prefix + "." + r if prefix else r for r in replacements + ] + + def _create_merged_loras_inplace(self, lora_model: LoRAModel) -> None: + for module_name, new_module_names in self.packed_modules.items(): + replacement_loras: List[Optional[LoRALayerWeights]] = [] + has_replacement = False + for r in new_module_names: + lora = lora_model.get_lora(r) + replacement_loras.append(lora) + if lora: + has_replacement = True + if not has_replacement: + continue + for i in range(len(replacement_loras)): + if replacement_loras[i]: + continue + replacement_loras[i] = None + lora_model.loras[module_name] = PackedLoRALayerWeights.pack( + replacement_loras) + + +class LoRALRUCache(LRUCache[LoRAModel]): + + def __init__(self, capacity: int, deactivate_lora_fn: Callable[[int], + bool]): + super().__init__(capacity) + self.deactivate_lora_fn = deactivate_lora_fn + + def _on_remove(self, key: int, value: LoRAModel): + logger.debug("Removing LoRA. int id: %d", key) + self.deactivate_lora_fn(key) + return super()._on_remove(key, value) + + +class LRUCacheLoRAModelManager(LoRAModelManager): + """A model manager that manages multiple LoRAs with LRU cache.""" + + def __init__( + self, + model: nn.Module, + max_num_seqs: int, + max_num_batched_tokens: int, + vocab_size: int, + lora_config: LoRAConfig, + ): + super().__init__(model, max_num_seqs, max_num_batched_tokens, + vocab_size, lora_config) + self._registered_loras: LoRALRUCache = LoRALRUCache( + self.capacity, self.deactivate_lora) + self._active_loras: LoRALRUCache = LoRALRUCache( + self.lora_slots, self._deactivate_lora) + + def list_loras(self) -> Dict[int, LoRAModel]: + """List all registered LoRAModels.""" + return dict(self._registered_loras.cache) + + def add_lora(self, lora: LoRAModel) -> bool: + """Add a LoRAModel to the manager.""" + if lora.id not in self._registered_loras: + self._add_lora(lora) + was_added = True + else: + # We always touch to update the LRU cache order + self._registered_loras.touch(lora.id) + was_added = False + return was_added + + def activate_lora( + self, + lora_id: int, + ) -> bool: + if lora_id not in self._active_loras and len( + self._active_loras) >= self.lora_slots: + self._active_loras.remove_oldest() + result = super().activate_lora(lora_id) + # We always touch to update the LRU cache order + self._active_loras.touch(lora_id) + return result + + def remove_oldest_lora(self) -> bool: + if len(self._registered_loras) > 0: + self._registered_loras.remove_oldest() + return True + return False + + +def create_lora_manager( + model: nn.Module, + max_num_seqs: int, + max_num_batched_tokens: int, + vocab_size: int, + lora_config: LoRAConfig, + lora_manager_cls: Type[LoRAModelManager] = LoRAModelManager, + **kwargs) -> LoRAModelManager: + """Create a LoRA adapter for a given model.""" + if not hasattr(model, "supported_lora_modules"): + raise ValueError(f"Model {type(model)} is not supported for LoRA.") + lora_manager = lora_manager_cls( + model=model, + max_num_seqs=max_num_seqs, + max_num_batched_tokens=max_num_batched_tokens, + vocab_size=vocab_size, + lora_config=lora_config, + **kwargs) + return lora_manager diff --git a/vllm/lora/punica.py b/vllm/lora/punica.py new file mode 100644 index 0000000..c87bed5 --- /dev/null +++ b/vllm/lora/punica.py @@ -0,0 +1,213 @@ +# Based on code from https://github.com/punica-ai/punica + +from typing import Optional + +import torch + + +def _raise_import_error(e): + if torch.cuda.get_device_capability() < (8, 0): + raise ImportError( + "punica LoRA kernels require compute capability >= 8.0") from e + else: + raise ImportError( + "punica LoRA kernels could not be imported. If you built vLLM " + "from source, make sure VLLM_INSTALL_PUNICA_KERNELS=1 env var " + "was set.") from e + + +def bgmv( + y: torch.Tensor, + x: torch.Tensor, + w_t_all: torch.Tensor, + indicies: torch.LongTensor, + layer_idx: int, + scale: float, +): + """ + Semantics: + y[i] += ( + x[i].unsqueeze(0) + @ w_t_all[indices[i], layer_idx, :, :].transpose(-1, -2) + * scale + ).squeeze(0) + + Args: + y: Shape: `[B, H2]`. Output vectors. Will be changed in-place. + x: Shape: `[B, H1]`. Input vectors. + w_t_all: Shape: `[None, L, H2, H1]`. All of the transposed weight + matrices. + indicies: Shape: `[B]`. Indices of the weight matrices. + layer_idx: Layer index of the weight matrices. + scale: Scaling factor. + """ + try: + import vllm._punica_C as punica_kernels + except ImportError as e: + _raise_import_error(e) + + punica_kernels.dispatch_bgmv(y, x, w_t_all, indicies, layer_idx, scale) + + +def dispatch_bgmv_low_level(y: torch.Tensor, x: torch.Tensor, + w_t_all: torch.Tensor, indicies: torch.LongTensor, + layer_idx: int, scale: float, y_offset: int, + y_slice_size: int): + """ + Same as `bgmv` but you can operate on slices of y. + Pass whole y, define y_offset and y_slice_size. + + Semantics: + y[i] += ( + x[i].unsqueeze(0) + @ w_t_all[indices[i], layer_idx, :, :].transpose(-1, -2) + * scale + ).squeeze(0) + + Args: + y: Shape: `[B, H2]`. Output vectors. Will be changed in-place. + x: Shape: `[B, H1]`. Input vectors. + w_t_all: Shape: `[None, L, y_slice_size, H1]`. Column partition of + all of the transposed LoRA matrices. + indicies: Shape: `[B]`. Indices of the LoRA weights. + layer_idx: Layer index of LoRA weights. + scale: Scaling factor. + y_offset: Offset to apply to the starting column of y. + y_slice_size: Size of the y column slice. + """ + try: + import vllm._punica_C as punica_kernels + except ImportError as e: + _raise_import_error(e) + punica_kernels.dispatch_bgmv_low_level( + y, + x, + w_t_all, + indicies, + layer_idx, + scale, + x.size(1), + y_slice_size, + y_offset, + ) + + +def add_lora(y: torch.Tensor, + x: torch.Tensor, + wa_t_all: torch.Tensor, + wb_t_all: torch.Tensor, + indicies: torch.LongTensor, + layer_idx: int, + scale: float, + *, + buffer: Optional[torch.Tensor] = None): + """ + Semantics: + y[i] += ( + x[i].unsqueeze(0) + @ wa_t_all[indices[i], layer_idx, :, :].transpose(-1, -2) + @ wb_t_all[indices[i], layer_idx, :, :].transpose(-1, -2) + * scale + ).squeeze(0) + + Args: + y: Shape: `[B, H2]`. Output vectors. Will be changed in-place. + x: Shape: `[B, H1]`. Input vectors. + wa_t_all: Shape: `[None, L, R, H1]`. All of the transposed + LoRA A matrices. + wb_t_all: Shape: `[None, L, H2, R]`. All of the transposed + LoRA B matrices. + indicies: Shape: `[B]`. Indices of the LoRA weights. + layer_idx: Layer index of LoRA weights. + scale: Scaling factor. + buffer: Optional. Shape: `[B, R]`. Temporary buffer. + """ + try: + import vllm._punica_C as punica_kernels + except ImportError as e: + _raise_import_error(e) + + r = wb_t_all.size(-1) + if buffer is None: + # We set the buffer to be float32 by default to avoid + # numerical inaccuracies that would otherwise happen + # due to downcasting. + buffer = torch.zeros((x.size(0), r), + dtype=torch.float32, + device=x.device) + punica_kernels.dispatch_bgmv(buffer, x, wa_t_all, indicies, layer_idx, 1.0) + punica_kernels.dispatch_bgmv(y, buffer, wb_t_all, indicies, layer_idx, + scale) + + +def add_lora_slice(y: torch.Tensor, + x: torch.Tensor, + wa_t_all: torch.Tensor, + wb_t_all: torch.Tensor, + indicies: torch.LongTensor, + layer_idx: int, + scale: float, + y_offset: int, + y_slice_size: int, + *, + buffer: Optional[torch.Tensor] = None): + """ + Same as `add_lora` but you can operate on slices of y. + Pass whole y, define y_offset and y_slice_size. + + Semantics: + y[i] += ( + x[i].unsqueeze(0) + @ wa_t_all[indices[i], layer_idx, :, :].transpose(-1, -2) + @ wb_t_all[indices[i], layer_idx, :, :].transpose(-1, -2) + * scale + ).squeeze(0) + + Args: + y: Shape: `[B, H2]`. Output vectors. Will be changed in-place. + x: Shape: `[B, H1]`. Input vectors. + wa_t_all: Shape: `[None, L, R, H1]`. All of the transposed + LoRA A matrices. + wb_t_all: Shape: `[None, L, H2, R]`. All of the transposed + LoRA B matrices. + indicies: Shape: `[B]`. Indices of the LoRA weights. + layer_idx: Layer index of LoRA weights. + scale: Scaling factor. + y_offset: Offset to apply to the starting column of y. + y_slice_size: Size of the y column slice. + """ + try: + import vllm._punica_C as punica_kernels + except ImportError as e: + _raise_import_error(e) + + r = wb_t_all.size(-1) + if buffer is None: + # We set the buffer to be float32 by default to avoid + # numerical inaccuracies that would otherwise happen + # due to downcasting. + buffer = torch.zeros((x.size(0), r), + dtype=torch.float32, + device=x.device) + punica_kernels.dispatch_bgmv_low_level( + buffer, + x, + wa_t_all, + indicies, + layer_idx, + 1.0, + x.size(1), + buffer.size(1), + 0, + ) + punica_kernels.dispatch_bgmv_low_level( + y, + buffer, + wb_t_all, + indicies, + layer_idx, + scale, + buffer.size(1), + y_slice_size, + y_offset, + ) diff --git a/vllm/lora/request.py b/vllm/lora/request.py new file mode 100644 index 0000000..bbbf488 --- /dev/null +++ b/vllm/lora/request.py @@ -0,0 +1,32 @@ +from dataclasses import dataclass + + +@dataclass +class LoRARequest: + """ + Request for a LoRA adapter. + + Note that this class should be be used internally. For online + serving, it is recommended to not allow users to use this class but + instead provide another layer of abstraction to prevent users from + accessing unauthorized LoRA adapters. + + lora_int_id must be globally unique for a given adapter. + This is currently not enforced in vLLM. + """ + + lora_name: str + lora_int_id: int + lora_local_path: str + + def __post_init__(self): + if self.lora_int_id < 1: + raise ValueError( + f"lora_int_id must be > 0, got {self.lora_int_id}") + + def __eq__(self, value: object) -> bool: + return isinstance( + value, LoRARequest) and self.lora_int_id == value.lora_int_id + + def __hash__(self) -> int: + return self.lora_int_id diff --git a/vllm/lora/utils.py b/vllm/lora/utils.py new file mode 100644 index 0000000..9942a5f --- /dev/null +++ b/vllm/lora/utils.py @@ -0,0 +1,98 @@ +from typing import List, Optional, Set, Tuple, Type + +from torch import nn +from transformers import PretrainedConfig + +from vllm.config import LoRAConfig +from vllm.logger import init_logger +from vllm.lora.fully_sharded_layers import ( + ColumnParallelLinearWithShardedLoRA, + MergedColumnParallelLinearWithShardedLoRA, + MergedQKVParallelLinearWithShardedLora, RowParallelLinearWithShardedLoRA) +# being imported for _all_lora_classes below +# yapf conflicts with isort for this block +# yapf: disable +from vllm.lora.layers import (BaseLayerWithLoRA, ColumnParallelLinearWithLoRA, + LogitsProcessorWithLoRA, + MergedColumnParallelLinearWithLoRA, + MergedQKVParallelLinearWithLora, + QKVParallelLinearWithLora, + RowParallelLinearWithLoRA, + VocabParallelEmbeddingWithLoRA) +# yapf: enable +from vllm.model_executor.layers.logits_processor import LogitsProcessor +from vllm.model_executor.layers.vocab_parallel_embedding import ParallelLMHead + +logger = init_logger(__name__) + +_all_lora_classes: Set[Type[BaseLayerWithLoRA]] = { + VocabParallelEmbeddingWithLoRA, ColumnParallelLinearWithLoRA, + MergedColumnParallelLinearWithLoRA, QKVParallelLinearWithLora, + MergedQKVParallelLinearWithLora, RowParallelLinearWithLoRA, + LogitsProcessorWithLoRA, ColumnParallelLinearWithShardedLoRA, + MergedColumnParallelLinearWithShardedLoRA, + MergedQKVParallelLinearWithShardedLora, RowParallelLinearWithShardedLoRA +} + + +def from_layer(layer: nn.Module, + max_loras: int, + lora_config: LoRAConfig, + packed_modules_list: List, + model_config: Optional[PretrainedConfig] = None) -> nn.Module: + for lora_cls in _all_lora_classes: + # specifying kwargs so they can be easily accessed in decorator + if lora_cls.can_replace_layer(source_layer=layer, + lora_config=lora_config, + packed_modules_list=packed_modules_list, + model_config=model_config): + ret = lora_cls(layer) + ret.create_lora_weights(max_loras, lora_config, model_config) + return ret + return layer + + +def from_layer_logits_processor( + layer: LogitsProcessor, + lm_head: ParallelLMHead, + max_loras: int, + lora_config: LoRAConfig, + model_config: Optional[PretrainedConfig] = None, +) -> LogitsProcessorWithLoRA: + ret = LogitsProcessorWithLoRA(layer, lm_head.embedding_dim, + lm_head.weight.dtype, lm_head.weight.device) + ret.create_lora_weights(max_loras, lora_config, model_config) + return ret + + +def replace_submodule(model: nn.Module, module_name: str, + new_module: nn.Module) -> nn.Module: + """Replace a submodule in a model with a new module.""" + parent = model.get_submodule(".".join(module_name.split(".")[:-1])) + target_name = module_name.split(".")[-1] + setattr(parent, target_name, new_module) + return new_module + + +def parse_fine_tuned_lora_name(name: str) -> Tuple[str, bool]: + """Parse the name of lora weights. + + args: + name: the name of the fine-tuned LoRA, e.g. + base_model.model.dense1.weight + return: + Tuple(module_name, is_lora_a): + module_name: the name of the module, e.g. model.dense1, + is_lora_a whether the tensor is lora_a or lora_b. + """ + parts = name.split(".") + assert parts[0] == "base_model" + assert parts[1] == "model" + if parts[-1] == "weight": + assert parts[-2] == "lora_A" or parts[-2] == "lora_B" + return ".".join(parts[2:-2]), parts[-2] == "lora_A" + + if parts[-1] == "lora_embedding_A" or parts[-1] == "lora_embedding_B": + return ".".join(parts[2:-1]), parts[-1] == "lora_embedding_A" + + raise ValueError(f"{name} is unsupported format") diff --git a/vllm/lora/worker_manager.py b/vllm/lora/worker_manager.py new file mode 100644 index 0000000..ec3c10c --- /dev/null +++ b/vllm/lora/worker_manager.py @@ -0,0 +1,251 @@ +from abc import ABC, abstractmethod, abstractproperty +from typing import Any, Dict, List, Set, Type + +import torch + +from vllm.config import LoRAConfig +from vllm.logger import init_logger +from vllm.lora.layers import LoRAMapping +from vllm.lora.models import (LoRAModel, LoRAModelManager, + LRUCacheLoRAModelManager, create_lora_manager) +from vllm.lora.request import LoRARequest + +logger = init_logger(__name__) + + +class AbstractWorkerLoRAManager(ABC): + """Abstract class for managing LoRA models on the worker side.""" + + def __init__(self, max_num_seqs: int, max_num_batched_tokens: int, + vocab_size: int, lora_config: LoRAConfig, + device: torch.device): + self.max_num_seqs = max_num_seqs + self.max_num_batched_tokens = max_num_batched_tokens + self.vocab_size = vocab_size + self.device = device + self.lora_config = lora_config + + @abstractproperty + def is_enabled(self) -> bool: + ... + + @abstractmethod + def create_lora_manager( + self, + model: torch.nn.Module, + ) -> Any: + ... + + @abstractmethod + def set_active_loras(self, lora_requests: Set[LoRARequest], + lora_mapping: LoRAMapping) -> None: + ... + + @abstractmethod + def add_lora(self, lora_request: LoRARequest) -> bool: + ... + + @abstractmethod + def add_dummy_lora(self, lora_request: LoRARequest, rank: int) -> bool: + ... + + @abstractmethod + def remove_lora(self, lora_id: int) -> bool: + ... + + @abstractmethod + def remove_all_loras(self): + ... + + @abstractmethod + def list_loras(self) -> Set[int]: + ... + + +class WorkerLoRAManager(AbstractWorkerLoRAManager): + """WorkerLoRAManager that manages LoRA models on the worker side. + + Every request, the requested LoRAs will be loaded (unless they are already + loaded), and every other LoRA will be unloaded.""" + + _lora_manager_cls: Type[LoRAModelManager] = LoRAModelManager + + def __init__( + self, + max_num_seqs: int, + max_num_batched_tokens: int, + vocab_size: int, + lora_config: LoRAConfig, + device: torch.device, + embedding_modules: Dict[str, str], + embedding_padding_modules: List[str], + lora_model_cls: Type[LoRAModel] = LoRAModel, + ): + self._lora_model_cls = lora_model_cls + self.embedding_modules = embedding_modules + self.embedding_padding_modules = embedding_padding_modules + # Lazily initialized by create_lora_manager. + self._lora_manager: LoRAModelManager + super().__init__(max_num_seqs, max_num_batched_tokens, vocab_size, + lora_config, device) + + @property + def is_enabled(self) -> bool: + return True + + def create_lora_manager( + self, + model: torch.nn.Module, + ) -> Any: + lora_manager = create_lora_manager( + model, + max_num_seqs=self.max_num_seqs, + max_num_batched_tokens=self.max_num_batched_tokens, + vocab_size=self.vocab_size, + lora_config=self.lora_config, + lora_manager_cls=self._lora_manager_cls, + ) + self._lora_manager = lora_manager + return lora_manager.model + + def set_active_loras(self, lora_requests: Set[LoRARequest], + lora_mapping: LoRAMapping) -> None: + self._apply_loras(lora_requests) + self._lora_manager.set_lora_mapping(lora_mapping) + + def _apply_loras(self, lora_requests: Set[LoRARequest]) -> None: + loras_that_exist = self.list_loras() + loras_map = { + lora_request.lora_int_id: lora_request + for lora_request in lora_requests if lora_request + } + if len(loras_map) > self._lora_manager.lora_slots: + raise RuntimeError( + f"Number of requested LoRAs ({len(loras_map)}) is greater " + "than the number of GPU LoRA slots " + f"({self._lora_manager.lora_slots}).") + + new_loras = set(loras_map) + loras_to_add = new_loras - loras_that_exist + loras_to_remove = loras_that_exist - new_loras + + for lora_id in loras_to_remove: + self.remove_lora(lora_id) + + for lora_id in loras_to_add: + self.add_lora(loras_map[lora_id]) + + def _load_lora(self, lora_request: LoRARequest) -> LoRAModel: + try: + model = self._lora_manager.model + supported_lora_modules = model.supported_lora_modules + packed_modules_mapping = model.packed_modules_mapping + expected_lora_modules = [] + for module in supported_lora_modules: + if module in packed_modules_mapping: + expected_lora_modules.extend( + packed_modules_mapping[module]) + else: + expected_lora_modules.append(module) + lora = self._lora_model_cls.from_local_checkpoint( + lora_request.lora_local_path, + expected_lora_modules, + lora_model_id=lora_request.lora_int_id, + device="cpu", + dtype=self.lora_config.lora_dtype, + target_embedding_padding=self.vocab_size + + self.lora_config.lora_extra_vocab_size, + embedding_modules=self.embedding_modules, + embedding_padding_modules=self.embedding_padding_modules, + ) + except Exception as e: + raise RuntimeError( + f"Loading lora {lora_request.lora_local_path} failed") from e + if lora.rank > self.lora_config.max_lora_rank: + raise ValueError( + f"LoRA rank {lora.rank} is greater than max_lora_rank " + f"{self.lora_config.max_lora_rank}.") + if lora.extra_vocab_size > self.lora_config.lora_extra_vocab_size: + raise ValueError(f"LoRA added vocab size {lora.extra_vocab_size} " + f"is greater than lora_extra_vocab_size " + f"{self.lora_config.lora_extra_vocab_size}.") + return lora + + def add_dummy_lora(self, lora_request: LoRARequest, rank: int) -> bool: + if lora_request.lora_int_id in self.list_loras(): + return False + return self._lora_manager.add_lora( + self._lora_manager.create_dummy_lora(lora_request.lora_int_id, + rank, self.embedding_modules)) + + def add_lora(self, lora_request: LoRARequest) -> bool: + if lora_request.lora_int_id in self.list_loras(): + return False + lora = self._load_lora(lora_request) + loaded = self._lora_manager.add_lora(lora) + self._lora_manager.activate_lora(lora.id) + return loaded + + def remove_lora(self, lora_id: int) -> bool: + return self._lora_manager.remove_lora(lora_id) + + def remove_all_loras(self): + self._lora_manager.remove_all_loras() + + def list_loras(self) -> Set[int]: + return set(self._lora_manager.list_loras()) + + +class LRUCacheWorkerLoRAManager(WorkerLoRAManager): + """WorkerLoRAManager that manages LoRA models on the worker side. + + Uses an LRU Cache. Every request, the requested LoRAs will be loaded + (unless they are already loaded) and least recently used LoRAs will + be unloaded if the cache is above capacity.""" + + _lora_manager_cls: Type[ + LRUCacheLoRAModelManager] = LRUCacheLoRAModelManager + + def create_lora_manager( + self, + model: torch.nn.Module, + ) -> Any: + lora_manager = create_lora_manager( + model, + lora_manager_cls=self._lora_manager_cls, + max_num_seqs=self.max_num_seqs, + vocab_size=self.vocab_size, + lora_config=self.lora_config, + max_num_batched_tokens=self.max_num_batched_tokens, + ) + self._lora_manager = lora_manager + return lora_manager.model + + def _apply_loras(self, lora_requests: Set[LoRARequest]) -> None: + loras_map = { + lora_request.lora_int_id: lora_request + for lora_request in lora_requests if lora_request + } + if len(loras_map) > self._lora_manager.lora_slots: + raise RuntimeError( + f"Number of requested LoRAs ({len(loras_map)}) is greater " + "than the number of GPU LoRA slots " + f"({self._lora_manager.lora_slots}).") + for lora in loras_map.values(): + self.add_lora(lora) + + def add_lora(self, lora_request: LoRARequest) -> bool: + if lora_request.lora_int_id not in self.list_loras(): + # Remove before we load the new lora to save memory + if len(self._lora_manager) + 1 > self._lora_manager.capacity: + assert isinstance(self._lora_manager, LRUCacheLoRAModelManager) + self._lora_manager.remove_oldest_lora() + lora = self._load_lora(lora_request) + loaded = self._lora_manager.add_lora(lora) + else: + # If the lora is already loaded, just touch it to + # update its position in the caches + loaded = self._lora_manager.get_lora( + lora_request.lora_int_id) is not None + self._lora_manager.activate_lora(lora_request.lora_int_id) + return loaded diff --git a/vllm/model_executor/__init__.py b/vllm/model_executor/__init__.py new file mode 100644 index 0000000..fb98f4a --- /dev/null +++ b/vllm/model_executor/__init__.py @@ -0,0 +1,7 @@ +from vllm.model_executor.sampling_metadata import SamplingMetadata +from vllm.model_executor.utils import set_random_seed + +__all__ = [ + "SamplingMetadata", + "set_random_seed", +] diff --git a/vllm/model_executor/guided_decoding/__init__.py b/vllm/model_executor/guided_decoding/__init__.py new file mode 100644 index 0000000..0558d6c --- /dev/null +++ b/vllm/model_executor/guided_decoding/__init__.py @@ -0,0 +1,25 @@ +from typing import Optional, Union + +from vllm.entrypoints.openai.protocol import (ChatCompletionRequest, + CompletionRequest) +from vllm.model_executor.guided_decoding.lm_format_enforcer_decoding import ( + get_lm_format_enforcer_guided_decoding_logits_processor) +from vllm.model_executor.guided_decoding.outlines_decoding import ( + get_outlines_guided_decoding_logits_processor) +from vllm.sampling_params import LogitsProcessor + + +async def get_guided_decoding_logits_processor( + guided_decoding_backend: str, request: Union[CompletionRequest, + ChatCompletionRequest], + tokenizer) -> Optional[LogitsProcessor]: + if guided_decoding_backend == 'outlines': + return await get_outlines_guided_decoding_logits_processor( + request, tokenizer) + if guided_decoding_backend == 'lm-format-enforcer': + return await get_lm_format_enforcer_guided_decoding_logits_processor( + request, tokenizer) + + raise ValueError( + f"Unknown guided decoding backend '{guided_decoding_backend}'. " + "Must be one of 'outlines, 'lm-format-enforcer'") diff --git a/vllm/model_executor/guided_decoding/lm_format_enforcer_decoding.py b/vllm/model_executor/guided_decoding/lm_format_enforcer_decoding.py new file mode 100644 index 0000000..d0a5ca5 --- /dev/null +++ b/vllm/model_executor/guided_decoding/lm_format_enforcer_decoding.py @@ -0,0 +1,70 @@ +from functools import lru_cache +from json import loads as json_loads +from typing import Optional, Union + +from lmformatenforcer import (CharacterLevelParser, JsonSchemaParser, + RegexParser, StringParser, + TokenEnforcerTokenizerData, UnionParser) +from lmformatenforcer.integrations.vllm import ( + build_vllm_logits_processor, build_vllm_token_enforcer_tokenizer_data) +from pydantic import BaseModel +from transformers import PreTrainedTokenizerBase + +from vllm.entrypoints.openai.protocol import (ChatCompletionRequest, + CompletionRequest) +from vllm.model_executor.guided_decoding.outlines_decoding import ( + get_outlines_guided_decoding_logits_processor) +from vllm.sampling_params import LogitsProcessor + + +async def get_lm_format_enforcer_guided_decoding_logits_processor( + request: Union[CompletionRequest, ChatCompletionRequest], + tokenizer) -> Optional[LogitsProcessor]: + """ + Given an OpenAI-compatible request, check for guided decoding parameters + and get the necessary logits processor for the given guide. + We cache logit processors by (guide, tokenizer), and on cache hit + we make a shallow copy to reuse the same underlying FSM. + """ + + tokenizer_data = _cached_build_vllm_token_enforcer_tokenizer_data( + tokenizer) + character_level_parser: CharacterLevelParser + if request.guided_json: + schema = _normalize_json_schema_object(request.guided_json) + character_level_parser = JsonSchemaParser(schema) + elif request.guided_choice: + character_level_parser = UnionParser( + [StringParser(choice) for choice in request.guided_choice]) + elif request.guided_regex: + character_level_parser = RegexParser(request.guided_regex) + elif request.guided_grammar: + # CFG grammar not supported by LMFE, revert to outlines + return await get_outlines_guided_decoding_logits_processor( + request, tokenizer) + elif (request.response_format is not None + and request.response_format.type == "json_object"): + character_level_parser = JsonSchemaParser( + None) # None means any json object + else: + return None + + logits_processor = build_vllm_logits_processor(tokenizer_data, + character_level_parser) + return logits_processor + + +def _normalize_json_schema_object(schema: Union[str, dict, BaseModel]) -> dict: + if isinstance(schema, str): + return json_loads(schema) + if isinstance(schema, dict): + return schema + if isinstance(schema, BaseModel): + return schema.model_json_schema() + raise AssertionError(f"Unsupported schema type {schema}") + + +@lru_cache +def _cached_build_vllm_token_enforcer_tokenizer_data( + tokenizer: PreTrainedTokenizerBase) -> TokenEnforcerTokenizerData: + return build_vllm_token_enforcer_tokenizer_data(tokenizer) diff --git a/vllm/model_executor/guided_decoding/outlines_decoding.py b/vllm/model_executor/guided_decoding/outlines_decoding.py new file mode 100644 index 0000000..8403604 --- /dev/null +++ b/vllm/model_executor/guided_decoding/outlines_decoding.py @@ -0,0 +1,130 @@ +import asyncio +import concurrent.futures +from copy import copy +from enum import Enum +from functools import lru_cache +from json import dumps as json_dumps +from re import escape as regex_escape +from typing import Tuple, Union + +from pydantic import BaseModel +from transformers import PreTrainedTokenizerBase + +from vllm.entrypoints.openai.protocol import (ChatCompletionRequest, + CompletionRequest) +from vllm.model_executor.guided_decoding.outlines_logits_processors import ( + CFGLogitsProcessor, JSONLogitsProcessor, RegexLogitsProcessor) + + +class GuidedDecodingMode(Enum): + JSON = "json" + REGEX = "regex" + CHOICE = "choice" + GRAMMAR = "grammar" + + +# https://github.com/outlines-dev/outlines/blob/main/outlines/grammars/json.lark +# the main difference is that we changed the start: value to +# start: object | array, so we are denying scalar values as the root of the +# JSON. Starting with scalars as the root seems to cause llama to generate +# without stop. +JSON_GRAMMAR = r""" +?start: object | array + +?value: object +| array +| UNESCAPED_STRING +| SIGNED_NUMBER -> number +| "true" -> true +| "false" -> false +| "null" -> null + +array : "[" [value ("," value)*] "]" +object : "{" [pair ("," pair)*] "}" +pair : UNESCAPED_STRING ":" value + +%import common.UNESCAPED_STRING +%import common.SIGNED_NUMBER +%import common.WS + +%ignore WS +""" + +global_thread_pool = None # used for generating logits processor fsm + + +async def get_outlines_guided_decoding_logits_processor( + request: Union[CompletionRequest, ChatCompletionRequest], + tokenizer) -> Union[JSONLogitsProcessor, RegexLogitsProcessor, None]: + """ + Given an OpenAI-compatible request, check for guided decoding parameters + and get the necessary logits processor for the given guide. + We cache logit processors by (guide, tokenizer), and on cache hit + we make a shallow copy to reuse the same underlying FSM. + """ + global global_thread_pool + guide, mode = _get_guide_and_mode(request) + if not guide: + return None + + if global_thread_pool is None: + global_thread_pool = concurrent.futures.ThreadPoolExecutor( + max_workers=2) + loop = asyncio.get_running_loop() + + result = await loop.run_in_executor(global_thread_pool, + _get_cached_logits_processor, guide, + tokenizer, mode, + request.guided_whitespace_pattern) + + logits_processor = copy(result) + # reset logits processor's internal state + logits_processor.init_state() + return logits_processor + + +def _get_guide_and_mode( + request: Union[CompletionRequest, ChatCompletionRequest] +) -> Union[Tuple[str, GuidedDecodingMode], Tuple[None, None]]: + + if request.guided_json: + json = request.guided_json + if isinstance(json, dict): + # turn dict into hashable string + json = json_dumps(json) + elif isinstance(json, BaseModel): + # use pydantic signature so that different model classes + # with the same fields will get hashed the same + json = str(json.__signature__) + return json, GuidedDecodingMode.JSON + elif request.guided_regex: + return request.guided_regex, GuidedDecodingMode.REGEX + elif request.guided_choice: + # choice just uses regex + choices = [ + regex_escape(str(choice)) for choice in request.guided_choice + ] + choices_regex = "(" + "|".join(choices) + ")" + return choices_regex, GuidedDecodingMode.CHOICE + elif request.guided_grammar: + return request.guided_grammar, GuidedDecodingMode.GRAMMAR + elif (request.response_format is not None + and request.response_format.type == "json_object"): + return JSON_GRAMMAR, GuidedDecodingMode.GRAMMAR + else: + return None, None + + +@lru_cache(maxsize=32) +def _get_cached_logits_processor(guide: str, + tokenizer: PreTrainedTokenizerBase, + mode: GuidedDecodingMode, + whitespace_pattern: Union[str, None]): + if mode == GuidedDecodingMode.JSON: + return JSONLogitsProcessor(guide, tokenizer, whitespace_pattern) + elif mode == GuidedDecodingMode.REGEX or mode == GuidedDecodingMode.CHOICE: + return RegexLogitsProcessor(guide, tokenizer) + elif mode == GuidedDecodingMode.GRAMMAR: + return CFGLogitsProcessor(guide, tokenizer) + else: + raise ValueError(f"Unknown guided decoding mode {mode}") diff --git a/vllm/model_executor/guided_decoding/outlines_logits_processors.py b/vllm/model_executor/guided_decoding/outlines_logits_processors.py new file mode 100644 index 0000000..a131c6a --- /dev/null +++ b/vllm/model_executor/guided_decoding/outlines_logits_processors.py @@ -0,0 +1,184 @@ +# Copyright 2024- the Outlines developers +# This file is adapted from +# https://github.com/outlines-dev/outlines/blob/main/outlines/serve/vllm.py +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at + +# http://www.apache.org/licenses/LICENSE-2.0 + +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import copy +import json +import math +from collections import defaultdict +from functools import lru_cache +from typing import Callable, DefaultDict, Dict, List, Union + +import torch +from outlines.fsm.fsm import CFGFSM, FSM, RegexFSM +from outlines.fsm.json_schema import build_regex_from_schema +from pydantic import BaseModel +from transformers import PreTrainedTokenizerBase + + +class BaseLogitsProcessor: + + def __init__(self): + # Child class should use initialize in their init. + self.fsm: FSM + + def init_state(self): + """Initialize the FSM states.""" + self.fsm_state: DefaultDict[int, int] = defaultdict(int) + + def __call__(self, input_ids: List[int], + scores: torch.Tensor) -> torch.Tensor: + """Use the FSM to bias the logits before sampling the next token.""" + seq_id = hash(tuple(input_ids)) + + if len(input_ids) == 0: + self.init_state() + else: + last_token = input_ids[-1] + last_seq_id = hash(tuple(input_ids[:-1])) + self.fsm_state[seq_id] = self.fsm.next_state( + self.fsm_state[last_seq_id], last_token) + + allowed_tokens = self.fsm.allowed_token_ids(self.fsm_state[seq_id]) + + mask = torch.full((scores.shape[-1], ), + -math.inf, + device=scores.device) + mask[allowed_tokens] = 0 + scores.add_(mask) + return scores + + +class RegexLogitsProcessor(BaseLogitsProcessor): + + def __init__(self, regex_string: str, tokenizer: PreTrainedTokenizerBase): + """Compile the FSM that drives the regex-structured generation. + + Parameters + ---------- + regex_string + A string that represents a regular expression + tokenizer + The model's tokenizer + + """ + tokenizer = _adapt_tokenizer(tokenizer) + fsm = RegexFSM(regex_string, tokenizer) + self.fsm = fsm + + +class JSONLogitsProcessor(RegexLogitsProcessor): + + def __init__(self, schema: Union[str, Dict, BaseModel], + tokenizer: PreTrainedTokenizerBase, + whitespace_pattern: Union[str, None]): + """Compile the FSM that drives the JSON-guided generation. + + Parameters + ---------- + schema + A JSON schema that encodes the structure we want the model to + generate + tokenizer + The model's tokenizer + whitespace_pattern + Pattern to use for JSON syntactic whitespace (doesn't impact + string literals) + Example: allow only a single space or newline with + `whitespace_pattern=r"[\n ]?"` + """ + if isinstance(schema, type(BaseModel)): + schema_str = json.dumps(schema.model_json_schema()) + elif isinstance(schema, Dict): + schema_str = json.dumps(schema) + elif isinstance(schema, str): + schema_str = schema + else: + raise ValueError( + f"Cannot parse schema {schema}. The schema must be either " + f"a Pydantic object, a dictionary or a string that contains " + f"the JSON Schema specification") + regex_string = build_regex_from_schema(schema_str, whitespace_pattern) + super().__init__(regex_string, tokenizer) + + +class CFGLogitsProcessor(BaseLogitsProcessor): + + def __init__(self, cfg: str, tokenizer: PreTrainedTokenizerBase): + """Compile the FSM that drives the context free grammar generation. + + Parameters + ---------- + cfg + A string that represents a context-free grammar + tokenizer + The model's tokenizer + + """ + tokenizer = _adapt_tokenizer(tokenizer) + fsm = CFGFSM(cfg, tokenizer) + self.fsm = fsm + + def init_state(self): + """Initialize state with a CFGFSM copy.""" + super().init_state() + self.fsm = self.fsm.copy() + + +@lru_cache +def _adapt_tokenizer(tokenizer: PreTrainedTokenizerBase): + """Adapt vLLM's tokenizer to use to compile the FSM. + + The API of Outlines tokenizers is slightly different to that of + `transformers`. The decoder of outlines, returns a list whereas + the decode of vLLM returns an str. To sync the vLLM decoder with + outlines internal api, the decoder should be adapted. In addition + we need to handle the missing spaces to Llama's tokenizer to be + able to compile FSMs for this model. + + """ + if getattr(tokenizer, "_outlines_adapted", False): + return tokenizer + + tokenizer = copy.deepcopy(tokenizer) + + tokenizer.vocabulary = tokenizer.get_vocab() + tokenizer.special_tokens = set(tokenizer.all_special_tokens) + + def convert_token_to_string(token: str) -> str: + from transformers.file_utils import SPIECE_UNDERLINE + + string = tokenizer.convert_tokens_to_string([token]) + + # A hack to handle missing spaces to HF's Llama tokenizers + if token.startswith(SPIECE_UNDERLINE) or token == "<0x20>": + return " " + string + + return string + + def change_decoder( + decoder: Callable[[List[int]], + str]) -> Callable[[List[int]], List[str]]: + """Sync vLLM's decoder with the outlines by returning list.""" + + def new_decoder(inp_tokens: List[int]) -> List[str]: + return [decoder(inp_tokens)] + + return new_decoder + + tokenizer.convert_token_to_string = convert_token_to_string + tokenizer.decode = change_decoder(tokenizer.decode) + setattr(tokenizer, "_outlines_adapted", True) # noqa: B010 + + return tokenizer diff --git a/vllm/model_executor/layers/__init__.py b/vllm/model_executor/layers/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/vllm/model_executor/layers/activation.py b/vllm/model_executor/layers/activation.py new file mode 100644 index 0000000..d101aa3 --- /dev/null +++ b/vllm/model_executor/layers/activation.py @@ -0,0 +1,173 @@ +"""Custom activation functions.""" +import math +from typing import Optional + +import torch +import torch.nn as nn +import torch.nn.functional as F + +from vllm import _custom_ops as ops +from vllm.distributed import (divide, get_tensor_model_parallel_rank, + get_tensor_model_parallel_world_size) +from vllm.model_executor.layers.quantization import QuantizationConfig +from vllm.model_executor.utils import set_weight_attrs + + +class SiluAndMul(nn.Module): + """An activation function for SwiGLU. + + The function computes x -> silu(x[:d]) * x[d:] where d = x.shape[-1] // 2. + + Shapes: + x: (num_tokens, 2 * d) or (batch_size, seq_len, 2 * d) + return: (num_tokens, d) or (batch_size, seq_len, d) + """ + + def _forward(self, x: torch.Tensor) -> torch.Tensor: + """PyTorch-native implementation equivalent to forward().""" + d = x.shape[-1] // 2 + return F.silu(x[..., :d]) * x[..., d:] + + def forward(self, x: torch.Tensor) -> torch.Tensor: + d = x.shape[-1] // 2 + output_shape = (x.shape[:-1] + (d, )) + out = torch.empty(output_shape, dtype=x.dtype, device=x.device) + ops.silu_and_mul(out, x) + return out + + +class GeluAndMul(nn.Module): + """An activation function for GeGLU. + + The function computes x -> GELU(x[:d]) * x[d:] where d = x.shape[-1] // 2. + + Shapes: + x: (batch_size, seq_len, 2 * d) or (num_tokens, 2 * d) + return: (batch_size, seq_len, d) or (num_tokens, d) + """ + + def __init__(self, approximate: str = "none"): + super().__init__() + self.approximate = approximate + if approximate not in ("none", "tanh"): + raise ValueError(f"Unknown approximate mode: {approximate}") + + def _forward(self, x: torch.Tensor) -> torch.Tensor: + """PyTorch-native implementation equivalent to forward().""" + d = x.shape[-1] // 2 + return F.gelu(x[..., :d], approximate=self.approximate) * x[..., d:] + + def forward(self, x: torch.Tensor) -> torch.Tensor: + d = x.shape[-1] // 2 + output_shape = (x.shape[:-1] + (d, )) + out = torch.empty(output_shape, dtype=x.dtype, device=x.device) + if self.approximate == "none": + ops.gelu_and_mul(out, x) + elif self.approximate == "tanh": + ops.gelu_tanh_and_mul(out, x) + return out + + def extra_repr(self) -> str: + return f'approximate={repr(self.approximate)}' + + +class NewGELU(nn.Module): + + def _forward(self, x: torch.Tensor) -> torch.Tensor: + """PyTorch-native implementation equivalent to forward().""" + c = math.sqrt(2.0 / math.pi) + return 0.5 * x * (1.0 + torch.tanh(c * + (x + 0.044715 * torch.pow(x, 3.0)))) + + def forward(self, x: torch.Tensor) -> torch.Tensor: + out = torch.empty_like(x) + ops.gelu_new(out, x) + return out + + +class FastGELU(nn.Module): + + def _forward(self, x: torch.Tensor) -> torch.Tensor: + """PyTorch-native implementation equivalent to forward().""" + return 0.5 * x * (1.0 + torch.tanh(x * 0.7978845608 * + (1.0 + 0.044715 * x * x))) + + def forward(self, x: torch.Tensor) -> torch.Tensor: + out = torch.empty_like(x) + ops.gelu_fast(out, x) + return out + + +class ScaledActivation(nn.Module): + """An activation function with post-scale parameters. + + This is used for some quantization methods like AWQ. + """ + + def __init__( + self, + act_module: nn.Module, + intermediate_size: int, + input_is_parallel: bool = True, + params_dtype: Optional[torch.dtype] = None, + ): + super().__init__() + self.act = act_module + self.input_is_parallel = input_is_parallel + if input_is_parallel: + tp_size = get_tensor_model_parallel_world_size() + intermediate_size_per_partition = divide(intermediate_size, + tp_size) + else: + intermediate_size_per_partition = intermediate_size + if params_dtype is None: + params_dtype = torch.get_default_dtype() + self.scales = nn.Parameter( + torch.empty(intermediate_size_per_partition, dtype=params_dtype)) + set_weight_attrs(self.scales, {"weight_loader": self.weight_loader}) + + def forward(self, x: torch.Tensor) -> torch.Tensor: + return self.act(x) / self.scales + + def weight_loader(self, param: nn.Parameter, loaded_weight: torch.Tensor): + param_data = param.data + if self.input_is_parallel: + tp_rank = get_tensor_model_parallel_rank() + shard_size = param_data.shape[0] + start_idx = tp_rank * shard_size + loaded_weight = loaded_weight.narrow(0, start_idx, shard_size) + assert param_data.shape == loaded_weight.shape + param_data.copy_(loaded_weight) + + +_ACTIVATION_REGISTRY = { + "gelu": nn.GELU(), + "gelu_fast": FastGELU(), + "gelu_new": NewGELU(), + "gelu_pytorch_tanh": nn.GELU(approximate="tanh"), + "relu": nn.ReLU(), +} + + +def get_act_fn( + act_fn_name: str, + quant_config: Optional[QuantizationConfig] = None, + intermediate_size: Optional[int] = None, + input_is_parallel: bool = True, + params_dtype: Optional[torch.dtype] = None, +) -> nn.Module: + """Get an activation function by name.""" + act_fn_name = act_fn_name.lower() + if act_fn_name not in _ACTIVATION_REGISTRY: + raise ValueError( + f"Activation function {act_fn_name!r} is not supported.") + + act_fn = _ACTIVATION_REGISTRY[act_fn_name] + if (quant_config is not None + and act_fn_name in quant_config.get_scaled_act_names()): + if intermediate_size is None: + raise ValueError("intermediate_size must be specified for scaled " + "activation functions.") + return ScaledActivation(act_fn, intermediate_size, input_is_parallel, + params_dtype) + return act_fn diff --git a/vllm/model_executor/layers/fused_moe/__init__.py b/vllm/model_executor/layers/fused_moe/__init__.py new file mode 100644 index 0000000..496d69c --- /dev/null +++ b/vllm/model_executor/layers/fused_moe/__init__.py @@ -0,0 +1,7 @@ +from vllm.model_executor.layers.fused_moe.fused_moe import ( + fused_moe, get_config_file_name) + +__all__ = [ + "fused_moe", + "get_config_file_name", +] diff --git a/vllm/model_executor/layers/fused_moe/configs/E=16,N=1344,device_name=NVIDIA_A100-SXM4-40GB.json b/vllm/model_executor/layers/fused_moe/configs/E=16,N=1344,device_name=NVIDIA_A100-SXM4-40GB.json new file mode 100644 index 0000000..9262a74 --- /dev/null +++ b/vllm/model_executor/layers/fused_moe/configs/E=16,N=1344,device_name=NVIDIA_A100-SXM4-40GB.json @@ -0,0 +1,146 @@ +{ + "1": { + "BLOCK_SIZE_M": 16, + "BLOCK_SIZE_N": 32, + "BLOCK_SIZE_K": 64, + "GROUP_SIZE_M": 1, + "num_warps": 4, + "num_stages": 4 + }, + "2": { + "BLOCK_SIZE_M": 16, + "BLOCK_SIZE_N": 128, + "BLOCK_SIZE_K": 128, + "GROUP_SIZE_M": 16, + "num_warps": 4, + "num_stages": 4 + }, + "4": { + "BLOCK_SIZE_M": 16, + "BLOCK_SIZE_N": 64, + "BLOCK_SIZE_K": 64, + "GROUP_SIZE_M": 16, + "num_warps": 4, + "num_stages": 4 + }, + "8": { + "BLOCK_SIZE_M": 16, + "BLOCK_SIZE_N": 64, + "BLOCK_SIZE_K": 128, + "GROUP_SIZE_M": 64, + "num_warps": 4, + "num_stages": 4 + }, + "16": { + "BLOCK_SIZE_M": 16, + "BLOCK_SIZE_N": 128, + "BLOCK_SIZE_K": 128, + "GROUP_SIZE_M": 32, + "num_warps": 8, + "num_stages": 4 + }, + "24": { + "BLOCK_SIZE_M": 16, + "BLOCK_SIZE_N": 64, + "BLOCK_SIZE_K": 128, + "GROUP_SIZE_M": 1, + "num_warps": 4, + "num_stages": 4 + }, + "32": { + "BLOCK_SIZE_M": 16, + "BLOCK_SIZE_N": 64, + "BLOCK_SIZE_K": 256, + "GROUP_SIZE_M": 1, + "num_warps": 4, + "num_stages": 4 + }, + "48": { + "BLOCK_SIZE_M": 16, + "BLOCK_SIZE_N": 64, + "BLOCK_SIZE_K": 256, + "GROUP_SIZE_M": 1, + "num_warps": 4, + "num_stages": 4 + }, + "64": { + "BLOCK_SIZE_M": 16, + "BLOCK_SIZE_N": 64, + "BLOCK_SIZE_K": 256, + "GROUP_SIZE_M": 1, + "num_warps": 4, + "num_stages": 4 + }, + "96": { + "BLOCK_SIZE_M": 16, + "BLOCK_SIZE_N": 64, + "BLOCK_SIZE_K": 256, + "GROUP_SIZE_M": 64, + "num_warps": 4, + "num_stages": 4 + }, + "128": { + "BLOCK_SIZE_M": 32, + "BLOCK_SIZE_N": 128, + "BLOCK_SIZE_K": 128, + "GROUP_SIZE_M": 1, + "num_warps": 4, + "num_stages": 4 + }, + "256": { + "BLOCK_SIZE_M": 32, + "BLOCK_SIZE_N": 128, + "BLOCK_SIZE_K": 128, + "GROUP_SIZE_M": 32, + "num_warps": 4, + "num_stages": 4 + }, + "512": { + "BLOCK_SIZE_M": 32, + "BLOCK_SIZE_N": 128, + "BLOCK_SIZE_K": 128, + "GROUP_SIZE_M": 64, + "num_warps": 4, + "num_stages": 4 + }, + "1024": { + "BLOCK_SIZE_M": 128, + "BLOCK_SIZE_N": 128, + "BLOCK_SIZE_K": 64, + "GROUP_SIZE_M": 16, + "num_warps": 8, + "num_stages": 4 + }, + "1536": { + "BLOCK_SIZE_M": 128, + "BLOCK_SIZE_N": 128, + "BLOCK_SIZE_K": 64, + "GROUP_SIZE_M": 16, + "num_warps": 8, + "num_stages": 4 + }, + "2048": { + "BLOCK_SIZE_M": 128, + "BLOCK_SIZE_N": 128, + "BLOCK_SIZE_K": 64, + "GROUP_SIZE_M": 16, + "num_warps": 8, + "num_stages": 4 + }, + "3072": { + "BLOCK_SIZE_M": 128, + "BLOCK_SIZE_N": 128, + "BLOCK_SIZE_K": 64, + "GROUP_SIZE_M": 16, + "num_warps": 8, + "num_stages": 4 + }, + "4096": { + "BLOCK_SIZE_M": 128, + "BLOCK_SIZE_N": 128, + "BLOCK_SIZE_K": 64, + "GROUP_SIZE_M": 16, + "num_warps": 8, + "num_stages": 4 + } +} diff --git a/vllm/model_executor/layers/fused_moe/configs/E=16,N=1344,device_name=NVIDIA_A100-SXM4-80GB.json b/vllm/model_executor/layers/fused_moe/configs/E=16,N=1344,device_name=NVIDIA_A100-SXM4-80GB.json new file mode 100644 index 0000000..d251f9b --- /dev/null +++ b/vllm/model_executor/layers/fused_moe/configs/E=16,N=1344,device_name=NVIDIA_A100-SXM4-80GB.json @@ -0,0 +1,146 @@ +{ + "1": { + "BLOCK_SIZE_M": 16, + "BLOCK_SIZE_N": 32, + "BLOCK_SIZE_K": 64, + "GROUP_SIZE_M": 16, + "num_warps": 4, + "num_stages": 4 + }, + "2": { + "BLOCK_SIZE_M": 16, + "BLOCK_SIZE_N": 32, + "BLOCK_SIZE_K": 64, + "GROUP_SIZE_M": 1, + "num_warps": 4, + "num_stages": 4 + }, + "4": { + "BLOCK_SIZE_M": 16, + "BLOCK_SIZE_N": 128, + "BLOCK_SIZE_K": 128, + "GROUP_SIZE_M": 32, + "num_warps": 4, + "num_stages": 4 + }, + "8": { + "BLOCK_SIZE_M": 16, + "BLOCK_SIZE_N": 128, + "BLOCK_SIZE_K": 128, + "GROUP_SIZE_M": 32, + "num_warps": 4, + "num_stages": 4 + }, + "16": { + "BLOCK_SIZE_M": 16, + "BLOCK_SIZE_N": 128, + "BLOCK_SIZE_K": 128, + "GROUP_SIZE_M": 32, + "num_warps": 4, + "num_stages": 4 + }, + "24": { + "BLOCK_SIZE_M": 16, + "BLOCK_SIZE_N": 64, + "BLOCK_SIZE_K": 256, + "GROUP_SIZE_M": 1, + "num_warps": 4, + "num_stages": 4 + }, + "32": { + "BLOCK_SIZE_M": 16, + "BLOCK_SIZE_N": 128, + "BLOCK_SIZE_K": 128, + "GROUP_SIZE_M": 1, + "num_warps": 4, + "num_stages": 4 + }, + "48": { + "BLOCK_SIZE_M": 16, + "BLOCK_SIZE_N": 64, + "BLOCK_SIZE_K": 256, + "GROUP_SIZE_M": 1, + "num_warps": 4, + "num_stages": 4 + }, + "64": { + "BLOCK_SIZE_M": 16, + "BLOCK_SIZE_N": 64, + "BLOCK_SIZE_K": 256, + "GROUP_SIZE_M": 1, + "num_warps": 4, + "num_stages": 4 + }, + "96": { + "BLOCK_SIZE_M": 16, + "BLOCK_SIZE_N": 128, + "BLOCK_SIZE_K": 128, + "GROUP_SIZE_M": 32, + "num_warps": 4, + "num_stages": 4 + }, + "128": { + "BLOCK_SIZE_M": 32, + "BLOCK_SIZE_N": 128, + "BLOCK_SIZE_K": 128, + "GROUP_SIZE_M": 1, + "num_warps": 4, + "num_stages": 4 + }, + "256": { + "BLOCK_SIZE_M": 64, + "BLOCK_SIZE_N": 256, + "BLOCK_SIZE_K": 64, + "GROUP_SIZE_M": 1, + "num_warps": 8, + "num_stages": 4 + }, + "512": { + "BLOCK_SIZE_M": 64, + "BLOCK_SIZE_N": 256, + "BLOCK_SIZE_K": 64, + "GROUP_SIZE_M": 1, + "num_warps": 8, + "num_stages": 4 + }, + "1024": { + "BLOCK_SIZE_M": 64, + "BLOCK_SIZE_N": 256, + "BLOCK_SIZE_K": 64, + "GROUP_SIZE_M": 64, + "num_warps": 8, + "num_stages": 4 + }, + "1536": { + "BLOCK_SIZE_M": 64, + "BLOCK_SIZE_N": 256, + "BLOCK_SIZE_K": 64, + "GROUP_SIZE_M": 32, + "num_warps": 8, + "num_stages": 4 + }, + "2048": { + "BLOCK_SIZE_M": 128, + "BLOCK_SIZE_N": 128, + "BLOCK_SIZE_K": 64, + "GROUP_SIZE_M": 16, + "num_warps": 8, + "num_stages": 4 + }, + "3072": { + "BLOCK_SIZE_M": 128, + "BLOCK_SIZE_N": 128, + "BLOCK_SIZE_K": 64, + "GROUP_SIZE_M": 1, + "num_warps": 8, + "num_stages": 4 + }, + "4096": { + "BLOCK_SIZE_M": 128, + "BLOCK_SIZE_N": 128, + "BLOCK_SIZE_K": 64, + "GROUP_SIZE_M": 16, + "num_warps": 8, + "num_stages": 4 + } +} diff --git a/vllm/model_executor/layers/fused_moe/configs/E=16,N=1344,device_name=NVIDIA_H100_80GB_HBM3.json b/vllm/model_executor/layers/fused_moe/configs/E=16,N=1344,device_name=NVIDIA_H100_80GB_HBM3.json new file mode 100644 index 0000000..0ecf814 --- /dev/null +++ b/vllm/model_executor/layers/fused_moe/configs/E=16,N=1344,device_name=NVIDIA_H100_80GB_HBM3.json @@ -0,0 +1,146 @@ +{ + "1": { + "BLOCK_SIZE_M": 16, + "BLOCK_SIZE_N": 32, + "BLOCK_SIZE_K": 64, + "GROUP_SIZE_M": 32, + "num_warps": 4, + "num_stages": 4 + }, + "2": { + "BLOCK_SIZE_M": 16, + "BLOCK_SIZE_N": 256, + "BLOCK_SIZE_K": 64, + "GROUP_SIZE_M": 16, + "num_warps": 4, + "num_stages": 4 + }, + "4": { + "BLOCK_SIZE_M": 16, + "BLOCK_SIZE_N": 32, + "BLOCK_SIZE_K": 64, + "GROUP_SIZE_M": 64, + "num_warps": 8, + "num_stages": 4 + }, + "8": { + "BLOCK_SIZE_M": 16, + "BLOCK_SIZE_N": 256, + "BLOCK_SIZE_K": 64, + "GROUP_SIZE_M": 1, + "num_warps": 8, + "num_stages": 4 + }, + "16": { + "BLOCK_SIZE_M": 16, + "BLOCK_SIZE_N": 64, + "BLOCK_SIZE_K": 256, + "GROUP_SIZE_M": 64, + "num_warps": 4, + "num_stages": 4 + }, + "24": { + "BLOCK_SIZE_M": 16, + "BLOCK_SIZE_N": 64, + "BLOCK_SIZE_K": 256, + "GROUP_SIZE_M": 64, + "num_warps": 4, + "num_stages": 4 + }, + "32": { + "BLOCK_SIZE_M": 16, + "BLOCK_SIZE_N": 64, + "BLOCK_SIZE_K": 128, + "GROUP_SIZE_M": 16, + "num_warps": 4, + "num_stages": 4 + }, + "48": { + "BLOCK_SIZE_M": 16, + "BLOCK_SIZE_N": 64, + "BLOCK_SIZE_K": 256, + "GROUP_SIZE_M": 1, + "num_warps": 4, + "num_stages": 4 + }, + "64": { + "BLOCK_SIZE_M": 16, + "BLOCK_SIZE_N": 64, + "BLOCK_SIZE_K": 128, + "GROUP_SIZE_M": 1, + "num_warps": 4, + "num_stages": 4 + }, + "96": { + "BLOCK_SIZE_M": 16, + "BLOCK_SIZE_N": 64, + "BLOCK_SIZE_K": 128, + "GROUP_SIZE_M": 64, + "num_warps": 4, + "num_stages": 4 + }, + "128": { + "BLOCK_SIZE_M": 16, + "BLOCK_SIZE_N": 64, + "BLOCK_SIZE_K": 128, + "GROUP_SIZE_M": 32, + "num_warps": 4, + "num_stages": 4 + }, + "256": { + "BLOCK_SIZE_M": 64, + "BLOCK_SIZE_N": 64, + "BLOCK_SIZE_K": 64, + "GROUP_SIZE_M": 1, + "num_warps": 4, + "num_stages": 4 + }, + "512": { + "BLOCK_SIZE_M": 64, + "BLOCK_SIZE_N": 128, + "BLOCK_SIZE_K": 64, + "GROUP_SIZE_M": 32, + "num_warps": 4, + "num_stages": 4 + }, + "1024": { + "BLOCK_SIZE_M": 128, + "BLOCK_SIZE_N": 256, + "BLOCK_SIZE_K": 64, + "GROUP_SIZE_M": 1, + "num_warps": 8, + "num_stages": 4 + }, + "1536": { + "BLOCK_SIZE_M": 128, + "BLOCK_SIZE_N": 256, + "BLOCK_SIZE_K": 64, + "GROUP_SIZE_M": 1, + "num_warps": 8, + "num_stages": 4 + }, + "2048": { + "BLOCK_SIZE_M": 64, + "BLOCK_SIZE_N": 256, + "BLOCK_SIZE_K": 64, + "GROUP_SIZE_M": 16, + "num_warps": 4, + "num_stages": 4 + }, + "3072": { + "BLOCK_SIZE_M": 128, + "BLOCK_SIZE_N": 256, + "BLOCK_SIZE_K": 64, + "GROUP_SIZE_M": 32, + "num_warps": 8, + "num_stages": 4 + }, + "4096": { + "BLOCK_SIZE_M": 128, + "BLOCK_SIZE_N": 256, + "BLOCK_SIZE_K": 64, + "GROUP_SIZE_M": 1, + "num_warps": 8, + "num_stages": 4 + } +} diff --git a/vllm/model_executor/layers/fused_moe/configs/E=16,N=2688,device_name=NVIDIA_A100-SXM4-80GB.json b/vllm/model_executor/layers/fused_moe/configs/E=16,N=2688,device_name=NVIDIA_A100-SXM4-80GB.json new file mode 100644 index 0000000..039a10e --- /dev/null +++ b/vllm/model_executor/layers/fused_moe/configs/E=16,N=2688,device_name=NVIDIA_A100-SXM4-80GB.json @@ -0,0 +1,146 @@ +{ + "1": { + "BLOCK_SIZE_M": 16, + "BLOCK_SIZE_N": 32, + "BLOCK_SIZE_K": 64, + "GROUP_SIZE_M": 32, + "num_warps": 4, + "num_stages": 4 + }, + "2": { + "BLOCK_SIZE_M": 16, + "BLOCK_SIZE_N": 64, + "BLOCK_SIZE_K": 256, + "GROUP_SIZE_M": 64, + "num_warps": 8, + "num_stages": 4 + }, + "4": { + "BLOCK_SIZE_M": 16, + "BLOCK_SIZE_N": 128, + "BLOCK_SIZE_K": 128, + "GROUP_SIZE_M": 64, + "num_warps": 4, + "num_stages": 4 + }, + "8": { + "BLOCK_SIZE_M": 16, + "BLOCK_SIZE_N": 64, + "BLOCK_SIZE_K": 256, + "GROUP_SIZE_M": 16, + "num_warps": 4, + "num_stages": 4 + }, + "16": { + "BLOCK_SIZE_M": 16, + "BLOCK_SIZE_N": 128, + "BLOCK_SIZE_K": 128, + "GROUP_SIZE_M": 64, + "num_warps": 4, + "num_stages": 4 + }, + "24": { + "BLOCK_SIZE_M": 16, + "BLOCK_SIZE_N": 128, + "BLOCK_SIZE_K": 128, + "GROUP_SIZE_M": 64, + "num_warps": 4, + "num_stages": 4 + }, + "32": { + "BLOCK_SIZE_M": 16, + "BLOCK_SIZE_N": 64, + "BLOCK_SIZE_K": 128, + "GROUP_SIZE_M": 64, + "num_warps": 4, + "num_stages": 4 + }, + "48": { + "BLOCK_SIZE_M": 16, + "BLOCK_SIZE_N": 128, + "BLOCK_SIZE_K": 128, + "GROUP_SIZE_M": 1, + "num_warps": 4, + "num_stages": 4 + }, + "64": { + "BLOCK_SIZE_M": 16, + "BLOCK_SIZE_N": 128, + "BLOCK_SIZE_K": 128, + "GROUP_SIZE_M": 1, + "num_warps": 4, + "num_stages": 4 + }, + "96": { + "BLOCK_SIZE_M": 32, + "BLOCK_SIZE_N": 128, + "BLOCK_SIZE_K": 128, + "GROUP_SIZE_M": 1, + "num_warps": 4, + "num_stages": 4 + }, + "128": { + "BLOCK_SIZE_M": 32, + "BLOCK_SIZE_N": 128, + "BLOCK_SIZE_K": 128, + "GROUP_SIZE_M": 1, + "num_warps": 4, + "num_stages": 4 + }, + "256": { + "BLOCK_SIZE_M": 64, + "BLOCK_SIZE_N": 256, + "BLOCK_SIZE_K": 64, + "GROUP_SIZE_M": 1, + "num_warps": 8, + "num_stages": 4 + }, + "512": { + "BLOCK_SIZE_M": 64, + "BLOCK_SIZE_N": 256, + "BLOCK_SIZE_K": 64, + "GROUP_SIZE_M": 64, + "num_warps": 8, + "num_stages": 4 + }, + "1024": { + "BLOCK_SIZE_M": 64, + "BLOCK_SIZE_N": 256, + "BLOCK_SIZE_K": 64, + "GROUP_SIZE_M": 64, + "num_warps": 8, + "num_stages": 4 + }, + "1536": { + "BLOCK_SIZE_M": 64, + "BLOCK_SIZE_N": 256, + "BLOCK_SIZE_K": 64, + "GROUP_SIZE_M": 16, + "num_warps": 8, + "num_stages": 4 + }, + "2048": { + "BLOCK_SIZE_M": 64, + "BLOCK_SIZE_N": 256, + "BLOCK_SIZE_K": 64, + "GROUP_SIZE_M": 16, + "num_warps": 8, + "num_stages": 4 + }, + "3072": { + "BLOCK_SIZE_M": 64, + "BLOCK_SIZE_N": 256, + "BLOCK_SIZE_K": 64, + "GROUP_SIZE_M": 32, + "num_warps": 8, + "num_stages": 4 + }, + "4096": { + "BLOCK_SIZE_M": 128, + "BLOCK_SIZE_N": 128, + "BLOCK_SIZE_K": 64, + "GROUP_SIZE_M": 16, + "num_warps": 8, + "num_stages": 4 + } +} diff --git a/vllm/model_executor/layers/fused_moe/configs/E=16,N=2688,device_name=NVIDIA_H100_80GB_HBM3.json b/vllm/model_executor/layers/fused_moe/configs/E=16,N=2688,device_name=NVIDIA_H100_80GB_HBM3.json new file mode 100644 index 0000000..3793fca --- /dev/null +++ b/vllm/model_executor/layers/fused_moe/configs/E=16,N=2688,device_name=NVIDIA_H100_80GB_HBM3.json @@ -0,0 +1,146 @@ +{ + "1": { + "BLOCK_SIZE_M": 16, + "BLOCK_SIZE_N": 64, + "BLOCK_SIZE_K": 256, + "GROUP_SIZE_M": 32, + "num_warps": 8, + "num_stages": 4 + }, + "2": { + "BLOCK_SIZE_M": 16, + "BLOCK_SIZE_N": 64, + "BLOCK_SIZE_K": 256, + "GROUP_SIZE_M": 32, + "num_warps": 4, + "num_stages": 4 + }, + "4": { + "BLOCK_SIZE_M": 16, + "BLOCK_SIZE_N": 128, + "BLOCK_SIZE_K": 128, + "GROUP_SIZE_M": 64, + "num_warps": 4, + "num_stages": 4 + }, + "8": { + "BLOCK_SIZE_M": 16, + "BLOCK_SIZE_N": 128, + "BLOCK_SIZE_K": 128, + "GROUP_SIZE_M": 32, + "num_warps": 4, + "num_stages": 4 + }, + "16": { + "BLOCK_SIZE_M": 16, + "BLOCK_SIZE_N": 128, + "BLOCK_SIZE_K": 128, + "GROUP_SIZE_M": 16, + "num_warps": 8, + "num_stages": 4 + }, + "24": { + "BLOCK_SIZE_M": 16, + "BLOCK_SIZE_N": 64, + "BLOCK_SIZE_K": 128, + "GROUP_SIZE_M": 64, + "num_warps": 4, + "num_stages": 4 + }, + "32": { + "BLOCK_SIZE_M": 16, + "BLOCK_SIZE_N": 64, + "BLOCK_SIZE_K": 128, + "GROUP_SIZE_M": 16, + "num_warps": 4, + "num_stages": 4 + }, + "48": { + "BLOCK_SIZE_M": 16, + "BLOCK_SIZE_N": 64, + "BLOCK_SIZE_K": 128, + "GROUP_SIZE_M": 16, + "num_warps": 4, + "num_stages": 4 + }, + "64": { + "BLOCK_SIZE_M": 16, + "BLOCK_SIZE_N": 64, + "BLOCK_SIZE_K": 128, + "GROUP_SIZE_M": 64, + "num_warps": 4, + "num_stages": 4 + }, + "96": { + "BLOCK_SIZE_M": 32, + "BLOCK_SIZE_N": 32, + "BLOCK_SIZE_K": 128, + "GROUP_SIZE_M": 1, + "num_warps": 4, + "num_stages": 4 + }, + "128": { + "BLOCK_SIZE_M": 32, + "BLOCK_SIZE_N": 32, + "BLOCK_SIZE_K": 128, + "GROUP_SIZE_M": 1, + "num_warps": 4, + "num_stages": 4 + }, + "256": { + "BLOCK_SIZE_M": 64, + "BLOCK_SIZE_N": 256, + "BLOCK_SIZE_K": 64, + "GROUP_SIZE_M": 1, + "num_warps": 8, + "num_stages": 4 + }, + "512": { + "BLOCK_SIZE_M": 64, + "BLOCK_SIZE_N": 128, + "BLOCK_SIZE_K": 64, + "GROUP_SIZE_M": 32, + "num_warps": 4, + "num_stages": 4 + }, + "1024": { + "BLOCK_SIZE_M": 128, + "BLOCK_SIZE_N": 256, + "BLOCK_SIZE_K": 64, + "GROUP_SIZE_M": 64, + "num_warps": 8, + "num_stages": 4 + }, + "1536": { + "BLOCK_SIZE_M": 128, + "BLOCK_SIZE_N": 256, + "BLOCK_SIZE_K": 64, + "GROUP_SIZE_M": 16, + "num_warps": 8, + "num_stages": 4 + }, + "2048": { + "BLOCK_SIZE_M": 128, + "BLOCK_SIZE_N": 256, + "BLOCK_SIZE_K": 64, + "GROUP_SIZE_M": 32, + "num_warps": 8, + "num_stages": 4 + }, + "3072": { + "BLOCK_SIZE_M": 128, + "BLOCK_SIZE_N": 256, + "BLOCK_SIZE_K": 64, + "GROUP_SIZE_M": 16, + "num_warps": 8, + "num_stages": 4 + }, + "4096": { + "BLOCK_SIZE_M": 128, + "BLOCK_SIZE_N": 256, + "BLOCK_SIZE_K": 64, + "GROUP_SIZE_M": 16, + "num_warps": 8, + "num_stages": 4 + } +} diff --git a/vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=NVIDIA_A100-SXM4-40GB.json b/vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=NVIDIA_A100-SXM4-40GB.json new file mode 100644 index 0000000..f4c0f84 --- /dev/null +++ b/vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=NVIDIA_A100-SXM4-40GB.json @@ -0,0 +1,146 @@ +{ + "1": { + "BLOCK_SIZE_M": 16, + "BLOCK_SIZE_N": 256, + "BLOCK_SIZE_K": 64, + "GROUP_SIZE_M": 1, + "num_warps": 8, + "num_stages": 4 + }, + "2": { + "BLOCK_SIZE_M": 16, + "BLOCK_SIZE_N": 32, + "BLOCK_SIZE_K": 64, + "GROUP_SIZE_M": 16, + "num_warps": 4, + "num_stages": 4 + }, + "4": { + "BLOCK_SIZE_M": 16, + "BLOCK_SIZE_N": 64, + "BLOCK_SIZE_K": 64, + "GROUP_SIZE_M": 64, + "num_warps": 8, + "num_stages": 4 + }, + "8": { + "BLOCK_SIZE_M": 16, + "BLOCK_SIZE_N": 32, + "BLOCK_SIZE_K": 128, + "GROUP_SIZE_M": 1, + "num_warps": 8, + "num_stages": 4 + }, + "16": { + "BLOCK_SIZE_M": 16, + "BLOCK_SIZE_N": 128, + "BLOCK_SIZE_K": 128, + "GROUP_SIZE_M": 16, + "num_warps": 8, + "num_stages": 4 + }, + "24": { + "BLOCK_SIZE_M": 32, + "BLOCK_SIZE_N": 128, + "BLOCK_SIZE_K": 64, + "GROUP_SIZE_M": 32, + "num_warps": 4, + "num_stages": 4 + }, + "32": { + "BLOCK_SIZE_M": 32, + "BLOCK_SIZE_N": 256, + "BLOCK_SIZE_K": 64, + "GROUP_SIZE_M": 64, + "num_warps": 4, + "num_stages": 4 + }, + "48": { + "BLOCK_SIZE_M": 16, + "BLOCK_SIZE_N": 32, + "BLOCK_SIZE_K": 64, + "GROUP_SIZE_M": 16, + "num_warps": 8, + "num_stages": 4 + }, + "64": { + "BLOCK_SIZE_M": 32, + "BLOCK_SIZE_N": 256, + "BLOCK_SIZE_K": 64, + "GROUP_SIZE_M": 64, + "num_warps": 4, + "num_stages": 4 + }, + "96": { + "BLOCK_SIZE_M": 32, + "BLOCK_SIZE_N": 128, + "BLOCK_SIZE_K": 128, + "GROUP_SIZE_M": 1, + "num_warps": 4, + "num_stages": 4 + }, + "128": { + "BLOCK_SIZE_M": 32, + "BLOCK_SIZE_N": 64, + "BLOCK_SIZE_K": 128, + "GROUP_SIZE_M": 32, + "num_warps": 8, + "num_stages": 4 + }, + "256": { + "BLOCK_SIZE_M": 64, + "BLOCK_SIZE_N": 256, + "BLOCK_SIZE_K": 64, + "GROUP_SIZE_M": 64, + "num_warps": 8, + "num_stages": 4 + }, + "512": { + "BLOCK_SIZE_M": 64, + "BLOCK_SIZE_N": 256, + "BLOCK_SIZE_K": 64, + "GROUP_SIZE_M": 32, + "num_warps": 8, + "num_stages": 4 + }, + "1024": { + "BLOCK_SIZE_M": 64, + "BLOCK_SIZE_N": 256, + "BLOCK_SIZE_K": 64, + "GROUP_SIZE_M": 64, + "num_warps": 8, + "num_stages": 4 + }, + "1536": { + "BLOCK_SIZE_M": 64, + "BLOCK_SIZE_N": 256, + "BLOCK_SIZE_K": 64, + "GROUP_SIZE_M": 32, + "num_warps": 8, + "num_stages": 4 + }, + "2048": { + "BLOCK_SIZE_M": 64, + "BLOCK_SIZE_N": 256, + "BLOCK_SIZE_K": 64, + "GROUP_SIZE_M": 16, + "num_warps": 8, + "num_stages": 4 + }, + "3072": { + "BLOCK_SIZE_M": 128, + "BLOCK_SIZE_N": 128, + "BLOCK_SIZE_K": 64, + "GROUP_SIZE_M": 16, + "num_warps": 8, + "num_stages": 4 + }, + "4096": { + "BLOCK_SIZE_M": 128, + "BLOCK_SIZE_N": 128, + "BLOCK_SIZE_K": 64, + "GROUP_SIZE_M": 16, + "num_warps": 8, + "num_stages": 4 + } +} diff --git a/vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=NVIDIA_A100-SXM4-80GB.json b/vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=NVIDIA_A100-SXM4-80GB.json new file mode 100644 index 0000000..5c8185c --- /dev/null +++ b/vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=NVIDIA_A100-SXM4-80GB.json @@ -0,0 +1,146 @@ +{ + "1": { + "BLOCK_SIZE_M": 16, + "BLOCK_SIZE_N": 32, + "BLOCK_SIZE_K": 128, + "GROUP_SIZE_M": 16, + "num_warps": 8, + "num_stages": 4 + }, + "2": { + "BLOCK_SIZE_M": 16, + "BLOCK_SIZE_N": 64, + "BLOCK_SIZE_K": 64, + "GROUP_SIZE_M": 16, + "num_warps": 4, + "num_stages": 4 + }, + "4": { + "BLOCK_SIZE_M": 16, + "BLOCK_SIZE_N": 128, + "BLOCK_SIZE_K": 128, + "GROUP_SIZE_M": 1, + "num_warps": 4, + "num_stages": 4 + }, + "8": { + "BLOCK_SIZE_M": 16, + "BLOCK_SIZE_N": 128, + "BLOCK_SIZE_K": 64, + "GROUP_SIZE_M": 32, + "num_warps": 4, + "num_stages": 4 + }, + "16": { + "BLOCK_SIZE_M": 16, + "BLOCK_SIZE_N": 256, + "BLOCK_SIZE_K": 64, + "GROUP_SIZE_M": 32, + "num_warps": 8, + "num_stages": 4 + }, + "24": { + "BLOCK_SIZE_M": 32, + "BLOCK_SIZE_N": 32, + "BLOCK_SIZE_K": 64, + "GROUP_SIZE_M": 32, + "num_warps": 8, + "num_stages": 4 + }, + "32": { + "BLOCK_SIZE_M": 16, + "BLOCK_SIZE_N": 32, + "BLOCK_SIZE_K": 256, + "GROUP_SIZE_M": 32, + "num_warps": 4, + "num_stages": 4 + }, + "48": { + "BLOCK_SIZE_M": 16, + "BLOCK_SIZE_N": 32, + "BLOCK_SIZE_K": 128, + "GROUP_SIZE_M": 32, + "num_warps": 4, + "num_stages": 4 + }, + "64": { + "BLOCK_SIZE_M": 32, + "BLOCK_SIZE_N": 32, + "BLOCK_SIZE_K": 256, + "GROUP_SIZE_M": 16, + "num_warps": 4, + "num_stages": 4 + }, + "96": { + "BLOCK_SIZE_M": 32, + "BLOCK_SIZE_N": 32, + "BLOCK_SIZE_K": 64, + "GROUP_SIZE_M": 64, + "num_warps": 4, + "num_stages": 4 + }, + "128": { + "BLOCK_SIZE_M": 16, + "BLOCK_SIZE_N": 128, + "BLOCK_SIZE_K": 128, + "GROUP_SIZE_M": 64, + "num_warps": 8, + "num_stages": 4 + }, + "256": { + "BLOCK_SIZE_M": 64, + "BLOCK_SIZE_N": 256, + "BLOCK_SIZE_K": 64, + "GROUP_SIZE_M": 32, + "num_warps": 4, + "num_stages": 4 + }, + "512": { + "BLOCK_SIZE_M": 64, + "BLOCK_SIZE_N": 256, + "BLOCK_SIZE_K": 64, + "GROUP_SIZE_M": 64, + "num_warps": 8, + "num_stages": 4 + }, + "1024": { + "BLOCK_SIZE_M": 64, + "BLOCK_SIZE_N": 256, + "BLOCK_SIZE_K": 64, + "GROUP_SIZE_M": 32, + "num_warps": 8, + "num_stages": 4 + }, + "1536": { + "BLOCK_SIZE_M": 64, + "BLOCK_SIZE_N": 256, + "BLOCK_SIZE_K": 64, + "GROUP_SIZE_M": 64, + "num_warps": 8, + "num_stages": 4 + }, + "2048": { + "BLOCK_SIZE_M": 64, + "BLOCK_SIZE_N": 256, + "BLOCK_SIZE_K": 64, + "GROUP_SIZE_M": 32, + "num_warps": 8, + "num_stages": 4 + }, + "3072": { + "BLOCK_SIZE_M": 128, + "BLOCK_SIZE_N": 128, + "BLOCK_SIZE_K": 64, + "GROUP_SIZE_M": 16, + "num_warps": 8, + "num_stages": 4 + }, + "4096": { + "BLOCK_SIZE_M": 128, + "BLOCK_SIZE_N": 128, + "BLOCK_SIZE_K": 64, + "GROUP_SIZE_M": 16, + "num_warps": 8, + "num_stages": 4 + } +} diff --git a/vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=NVIDIA_H100_80GB_HBM3.json b/vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=NVIDIA_H100_80GB_HBM3.json new file mode 100644 index 0000000..97c9f44 --- /dev/null +++ b/vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=NVIDIA_H100_80GB_HBM3.json @@ -0,0 +1,146 @@ +{ + "1": { + "BLOCK_SIZE_M": 16, + "BLOCK_SIZE_N": 32, + "BLOCK_SIZE_K": 64, + "GROUP_SIZE_M": 32, + "num_warps": 8, + "num_stages": 4 + }, + "2": { + "BLOCK_SIZE_M": 16, + "BLOCK_SIZE_N": 256, + "BLOCK_SIZE_K": 64, + "GROUP_SIZE_M": 32, + "num_warps": 8, + "num_stages": 4 + }, + "4": { + "BLOCK_SIZE_M": 16, + "BLOCK_SIZE_N": 32, + "BLOCK_SIZE_K": 256, + "GROUP_SIZE_M": 64, + "num_warps": 8, + "num_stages": 4 + }, + "8": { + "BLOCK_SIZE_M": 16, + "BLOCK_SIZE_N": 128, + "BLOCK_SIZE_K": 64, + "GROUP_SIZE_M": 1, + "num_warps": 4, + "num_stages": 4 + }, + "16": { + "BLOCK_SIZE_M": 16, + "BLOCK_SIZE_N": 256, + "BLOCK_SIZE_K": 64, + "GROUP_SIZE_M": 64, + "num_warps": 8, + "num_stages": 4 + }, + "24": { + "BLOCK_SIZE_M": 32, + "BLOCK_SIZE_N": 128, + "BLOCK_SIZE_K": 64, + "GROUP_SIZE_M": 64, + "num_warps": 4, + "num_stages": 4 + }, + "32": { + "BLOCK_SIZE_M": 32, + "BLOCK_SIZE_N": 128, + "BLOCK_SIZE_K": 128, + "GROUP_SIZE_M": 64, + "num_warps": 4, + "num_stages": 4 + }, + "48": { + "BLOCK_SIZE_M": 32, + "BLOCK_SIZE_N": 128, + "BLOCK_SIZE_K": 128, + "GROUP_SIZE_M": 1, + "num_warps": 4, + "num_stages": 4 + }, + "64": { + "BLOCK_SIZE_M": 32, + "BLOCK_SIZE_N": 128, + "BLOCK_SIZE_K": 64, + "GROUP_SIZE_M": 16, + "num_warps": 4, + "num_stages": 4 + }, + "96": { + "BLOCK_SIZE_M": 32, + "BLOCK_SIZE_N": 128, + "BLOCK_SIZE_K": 128, + "GROUP_SIZE_M": 1, + "num_warps": 4, + "num_stages": 4 + }, + "128": { + "BLOCK_SIZE_M": 64, + "BLOCK_SIZE_N": 128, + "BLOCK_SIZE_K": 128, + "GROUP_SIZE_M": 1, + "num_warps": 4, + "num_stages": 4 + }, + "256": { + "BLOCK_SIZE_M": 128, + "BLOCK_SIZE_N": 256, + "BLOCK_SIZE_K": 64, + "GROUP_SIZE_M": 1, + "num_warps": 8, + "num_stages": 4 + }, + "512": { + "BLOCK_SIZE_M": 64, + "BLOCK_SIZE_N": 128, + "BLOCK_SIZE_K": 64, + "GROUP_SIZE_M": 32, + "num_warps": 4, + "num_stages": 4 + }, + "1024": { + "BLOCK_SIZE_M": 64, + "BLOCK_SIZE_N": 256, + "BLOCK_SIZE_K": 64, + "GROUP_SIZE_M": 64, + "num_warps": 4, + "num_stages": 4 + }, + "1536": { + "BLOCK_SIZE_M": 128, + "BLOCK_SIZE_N": 256, + "BLOCK_SIZE_K": 64, + "GROUP_SIZE_M": 32, + "num_warps": 8, + "num_stages": 4 + }, + "2048": { + "BLOCK_SIZE_M": 128, + "BLOCK_SIZE_N": 256, + "BLOCK_SIZE_K": 64, + "GROUP_SIZE_M": 64, + "num_warps": 8, + "num_stages": 4 + }, + "3072": { + "BLOCK_SIZE_M": 128, + "BLOCK_SIZE_N": 256, + "BLOCK_SIZE_K": 64, + "GROUP_SIZE_M": 64, + "num_warps": 8, + "num_stages": 4 + }, + "4096": { + "BLOCK_SIZE_M": 128, + "BLOCK_SIZE_N": 256, + "BLOCK_SIZE_K": 64, + "GROUP_SIZE_M": 32, + "num_warps": 8, + "num_stages": 4 + } +} diff --git a/vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=NVIDIA_A100-SXM4-80GB.json b/vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=NVIDIA_A100-SXM4-80GB.json new file mode 100644 index 0000000..0bb423b --- /dev/null +++ b/vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=NVIDIA_A100-SXM4-80GB.json @@ -0,0 +1,146 @@ +{ + "1": { + "BLOCK_SIZE_M": 16, + "BLOCK_SIZE_N": 32, + "BLOCK_SIZE_K": 64, + "GROUP_SIZE_M": 16, + "num_warps": 4, + "num_stages": 4 + }, + "2": { + "BLOCK_SIZE_M": 16, + "BLOCK_SIZE_N": 32, + "BLOCK_SIZE_K": 64, + "GROUP_SIZE_M": 16, + "num_warps": 4, + "num_stages": 4 + }, + "4": { + "BLOCK_SIZE_M": 16, + "BLOCK_SIZE_N": 64, + "BLOCK_SIZE_K": 256, + "GROUP_SIZE_M": 16, + "num_warps": 4, + "num_stages": 4 + }, + "8": { + "BLOCK_SIZE_M": 16, + "BLOCK_SIZE_N": 32, + "BLOCK_SIZE_K": 128, + "GROUP_SIZE_M": 1, + "num_warps": 4, + "num_stages": 4 + }, + "16": { + "BLOCK_SIZE_M": 16, + "BLOCK_SIZE_N": 128, + "BLOCK_SIZE_K": 128, + "GROUP_SIZE_M": 32, + "num_warps": 4, + "num_stages": 4 + }, + "24": { + "BLOCK_SIZE_M": 16, + "BLOCK_SIZE_N": 128, + "BLOCK_SIZE_K": 128, + "GROUP_SIZE_M": 1, + "num_warps": 4, + "num_stages": 4 + }, + "32": { + "BLOCK_SIZE_M": 16, + "BLOCK_SIZE_N": 128, + "BLOCK_SIZE_K": 128, + "GROUP_SIZE_M": 1, + "num_warps": 4, + "num_stages": 4 + }, + "48": { + "BLOCK_SIZE_M": 16, + "BLOCK_SIZE_N": 128, + "BLOCK_SIZE_K": 128, + "GROUP_SIZE_M": 16, + "num_warps": 4, + "num_stages": 4 + }, + "64": { + "BLOCK_SIZE_M": 32, + "BLOCK_SIZE_N": 128, + "BLOCK_SIZE_K": 128, + "GROUP_SIZE_M": 1, + "num_warps": 4, + "num_stages": 4 + }, + "96": { + "BLOCK_SIZE_M": 32, + "BLOCK_SIZE_N": 128, + "BLOCK_SIZE_K": 128, + "GROUP_SIZE_M": 32, + "num_warps": 4, + "num_stages": 4 + }, + "128": { + "BLOCK_SIZE_M": 32, + "BLOCK_SIZE_N": 128, + "BLOCK_SIZE_K": 128, + "GROUP_SIZE_M": 32, + "num_warps": 4, + "num_stages": 4 + }, + "256": { + "BLOCK_SIZE_M": 64, + "BLOCK_SIZE_N": 256, + "BLOCK_SIZE_K": 64, + "GROUP_SIZE_M": 32, + "num_warps": 8, + "num_stages": 4 + }, + "512": { + "BLOCK_SIZE_M": 64, + "BLOCK_SIZE_N": 256, + "BLOCK_SIZE_K": 64, + "GROUP_SIZE_M": 32, + "num_warps": 8, + "num_stages": 4 + }, + "1024": { + "BLOCK_SIZE_M": 64, + "BLOCK_SIZE_N": 256, + "BLOCK_SIZE_K": 64, + "GROUP_SIZE_M": 64, + "num_warps": 8, + "num_stages": 4 + }, + "1536": { + "BLOCK_SIZE_M": 64, + "BLOCK_SIZE_N": 256, + "BLOCK_SIZE_K": 64, + "GROUP_SIZE_M": 16, + "num_warps": 8, + "num_stages": 4 + }, + "2048": { + "BLOCK_SIZE_M": 128, + "BLOCK_SIZE_N": 128, + "BLOCK_SIZE_K": 64, + "GROUP_SIZE_M": 16, + "num_warps": 8, + "num_stages": 4 + }, + "3072": { + "BLOCK_SIZE_M": 128, + "BLOCK_SIZE_N": 128, + "BLOCK_SIZE_K": 64, + "GROUP_SIZE_M": 16, + "num_warps": 8, + "num_stages": 4 + }, + "4096": { + "BLOCK_SIZE_M": 128, + "BLOCK_SIZE_N": 128, + "BLOCK_SIZE_K": 64, + "GROUP_SIZE_M": 16, + "num_warps": 8, + "num_stages": 4 + } +} diff --git a/vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=NVIDIA_H100_80GB_HBM3.json b/vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=NVIDIA_H100_80GB_HBM3.json new file mode 100644 index 0000000..26bcbf2 --- /dev/null +++ b/vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=NVIDIA_H100_80GB_HBM3.json @@ -0,0 +1,146 @@ +{ + "1": { + "BLOCK_SIZE_M": 16, + "BLOCK_SIZE_N": 128, + "BLOCK_SIZE_K": 128, + "GROUP_SIZE_M": 32, + "num_warps": 8, + "num_stages": 4 + }, + "2": { + "BLOCK_SIZE_M": 16, + "BLOCK_SIZE_N": 64, + "BLOCK_SIZE_K": 128, + "GROUP_SIZE_M": 64, + "num_warps": 4, + "num_stages": 4 + }, + "4": { + "BLOCK_SIZE_M": 16, + "BLOCK_SIZE_N": 32, + "BLOCK_SIZE_K": 256, + "GROUP_SIZE_M": 16, + "num_warps": 4, + "num_stages": 4 + }, + "8": { + "BLOCK_SIZE_M": 16, + "BLOCK_SIZE_N": 32, + "BLOCK_SIZE_K": 256, + "GROUP_SIZE_M": 16, + "num_warps": 4, + "num_stages": 4 + }, + "16": { + "BLOCK_SIZE_M": 16, + "BLOCK_SIZE_N": 256, + "BLOCK_SIZE_K": 64, + "GROUP_SIZE_M": 64, + "num_warps": 8, + "num_stages": 4 + }, + "24": { + "BLOCK_SIZE_M": 32, + "BLOCK_SIZE_N": 64, + "BLOCK_SIZE_K": 128, + "GROUP_SIZE_M": 1, + "num_warps": 8, + "num_stages": 4 + }, + "32": { + "BLOCK_SIZE_M": 16, + "BLOCK_SIZE_N": 128, + "BLOCK_SIZE_K": 128, + "GROUP_SIZE_M": 1, + "num_warps": 4, + "num_stages": 4 + }, + "48": { + "BLOCK_SIZE_M": 32, + "BLOCK_SIZE_N": 128, + "BLOCK_SIZE_K": 128, + "GROUP_SIZE_M": 1, + "num_warps": 4, + "num_stages": 4 + }, + "64": { + "BLOCK_SIZE_M": 32, + "BLOCK_SIZE_N": 256, + "BLOCK_SIZE_K": 64, + "GROUP_SIZE_M": 1, + "num_warps": 4, + "num_stages": 4 + }, + "96": { + "BLOCK_SIZE_M": 32, + "BLOCK_SIZE_N": 128, + "BLOCK_SIZE_K": 128, + "GROUP_SIZE_M": 1, + "num_warps": 4, + "num_stages": 4 + }, + "128": { + "BLOCK_SIZE_M": 64, + "BLOCK_SIZE_N": 128, + "BLOCK_SIZE_K": 128, + "GROUP_SIZE_M": 1, + "num_warps": 4, + "num_stages": 4 + }, + "256": { + "BLOCK_SIZE_M": 64, + "BLOCK_SIZE_N": 64, + "BLOCK_SIZE_K": 64, + "GROUP_SIZE_M": 32, + "num_warps": 4, + "num_stages": 4 + }, + "512": { + "BLOCK_SIZE_M": 64, + "BLOCK_SIZE_N": 256, + "BLOCK_SIZE_K": 64, + "GROUP_SIZE_M": 64, + "num_warps": 4, + "num_stages": 4 + }, + "1024": { + "BLOCK_SIZE_M": 128, + "BLOCK_SIZE_N": 256, + "BLOCK_SIZE_K": 64, + "GROUP_SIZE_M": 32, + "num_warps": 8, + "num_stages": 4 + }, + "1536": { + "BLOCK_SIZE_M": 128, + "BLOCK_SIZE_N": 256, + "BLOCK_SIZE_K": 64, + "GROUP_SIZE_M": 64, + "num_warps": 8, + "num_stages": 4 + }, + "2048": { + "BLOCK_SIZE_M": 128, + "BLOCK_SIZE_N": 256, + "BLOCK_SIZE_K": 64, + "GROUP_SIZE_M": 16, + "num_warps": 8, + "num_stages": 4 + }, + "3072": { + "BLOCK_SIZE_M": 128, + "BLOCK_SIZE_N": 256, + "BLOCK_SIZE_K": 64, + "GROUP_SIZE_M": 32, + "num_warps": 8, + "num_stages": 4 + }, + "4096": { + "BLOCK_SIZE_M": 128, + "BLOCK_SIZE_N": 256, + "BLOCK_SIZE_K": 64, + "GROUP_SIZE_M": 1, + "num_warps": 8, + "num_stages": 4 + } +} diff --git a/vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_A100-SXM4-40GB.json b/vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_A100-SXM4-40GB.json new file mode 100644 index 0000000..b41f9d4 --- /dev/null +++ b/vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_A100-SXM4-40GB.json @@ -0,0 +1,146 @@ +{ + "1": { + "BLOCK_SIZE_M": 16, + "BLOCK_SIZE_N": 32, + "BLOCK_SIZE_K": 64, + "GROUP_SIZE_M": 64, + "num_warps": 4, + "num_stages": 4 + }, + "2": { + "BLOCK_SIZE_M": 16, + "BLOCK_SIZE_N": 256, + "BLOCK_SIZE_K": 64, + "GROUP_SIZE_M": 1, + "num_warps": 4, + "num_stages": 4 + }, + "4": { + "BLOCK_SIZE_M": 16, + "BLOCK_SIZE_N": 32, + "BLOCK_SIZE_K": 128, + "GROUP_SIZE_M": 1, + "num_warps": 4, + "num_stages": 4 + }, + "8": { + "BLOCK_SIZE_M": 16, + "BLOCK_SIZE_N": 64, + "BLOCK_SIZE_K": 256, + "GROUP_SIZE_M": 1, + "num_warps": 4, + "num_stages": 4 + }, + "16": { + "BLOCK_SIZE_M": 16, + "BLOCK_SIZE_N": 64, + "BLOCK_SIZE_K": 256, + "GROUP_SIZE_M": 64, + "num_warps": 4, + "num_stages": 4 + }, + "24": { + "BLOCK_SIZE_M": 16, + "BLOCK_SIZE_N": 64, + "BLOCK_SIZE_K": 256, + "GROUP_SIZE_M": 1, + "num_warps": 4, + "num_stages": 4 + }, + "32": { + "BLOCK_SIZE_M": 16, + "BLOCK_SIZE_N": 64, + "BLOCK_SIZE_K": 256, + "GROUP_SIZE_M": 1, + "num_warps": 4, + "num_stages": 4 + }, + "48": { + "BLOCK_SIZE_M": 16, + "BLOCK_SIZE_N": 128, + "BLOCK_SIZE_K": 128, + "GROUP_SIZE_M": 64, + "num_warps": 4, + "num_stages": 4 + }, + "64": { + "BLOCK_SIZE_M": 32, + "BLOCK_SIZE_N": 128, + "BLOCK_SIZE_K": 128, + "GROUP_SIZE_M": 1, + "num_warps": 4, + "num_stages": 4 + }, + "96": { + "BLOCK_SIZE_M": 32, + "BLOCK_SIZE_N": 128, + "BLOCK_SIZE_K": 128, + "GROUP_SIZE_M": 32, + "num_warps": 4, + "num_stages": 4 + }, + "128": { + "BLOCK_SIZE_M": 32, + "BLOCK_SIZE_N": 128, + "BLOCK_SIZE_K": 128, + "GROUP_SIZE_M": 64, + "num_warps": 4, + "num_stages": 4 + }, + "256": { + "BLOCK_SIZE_M": 32, + "BLOCK_SIZE_N": 128, + "BLOCK_SIZE_K": 128, + "GROUP_SIZE_M": 64, + "num_warps": 4, + "num_stages": 4 + }, + "512": { + "BLOCK_SIZE_M": 64, + "BLOCK_SIZE_N": 256, + "BLOCK_SIZE_K": 64, + "GROUP_SIZE_M": 64, + "num_warps": 8, + "num_stages": 4 + }, + "1024": { + "BLOCK_SIZE_M": 64, + "BLOCK_SIZE_N": 256, + "BLOCK_SIZE_K": 64, + "GROUP_SIZE_M": 64, + "num_warps": 8, + "num_stages": 4 + }, + "1536": { + "BLOCK_SIZE_M": 128, + "BLOCK_SIZE_N": 128, + "BLOCK_SIZE_K": 64, + "GROUP_SIZE_M": 16, + "num_warps": 8, + "num_stages": 4 + }, + "2048": { + "BLOCK_SIZE_M": 128, + "BLOCK_SIZE_N": 128, + "BLOCK_SIZE_K": 64, + "GROUP_SIZE_M": 16, + "num_warps": 8, + "num_stages": 4 + }, + "3072": { + "BLOCK_SIZE_M": 128, + "BLOCK_SIZE_N": 128, + "BLOCK_SIZE_K": 64, + "GROUP_SIZE_M": 16, + "num_warps": 8, + "num_stages": 4 + }, + "4096": { + "BLOCK_SIZE_M": 128, + "BLOCK_SIZE_N": 128, + "BLOCK_SIZE_K": 64, + "GROUP_SIZE_M": 16, + "num_warps": 8, + "num_stages": 4 + } +} diff --git a/vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_A100-SXM4-80GB.json b/vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_A100-SXM4-80GB.json new file mode 100644 index 0000000..edf2a38 --- /dev/null +++ b/vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_A100-SXM4-80GB.json @@ -0,0 +1,146 @@ +{ + "1": { + "BLOCK_SIZE_M": 16, + "BLOCK_SIZE_N": 64, + "BLOCK_SIZE_K": 64, + "GROUP_SIZE_M": 32, + "num_warps": 8, + "num_stages": 4 + }, + "2": { + "BLOCK_SIZE_M": 16, + "BLOCK_SIZE_N": 64, + "BLOCK_SIZE_K": 64, + "GROUP_SIZE_M": 1, + "num_warps": 4, + "num_stages": 4 + }, + "4": { + "BLOCK_SIZE_M": 16, + "BLOCK_SIZE_N": 64, + "BLOCK_SIZE_K": 128, + "GROUP_SIZE_M": 32, + "num_warps": 4, + "num_stages": 4 + }, + "8": { + "BLOCK_SIZE_M": 16, + "BLOCK_SIZE_N": 128, + "BLOCK_SIZE_K": 128, + "GROUP_SIZE_M": 1, + "num_warps": 4, + "num_stages": 4 + }, + "16": { + "BLOCK_SIZE_M": 16, + "BLOCK_SIZE_N": 64, + "BLOCK_SIZE_K": 256, + "GROUP_SIZE_M": 64, + "num_warps": 4, + "num_stages": 4 + }, + "24": { + "BLOCK_SIZE_M": 16, + "BLOCK_SIZE_N": 64, + "BLOCK_SIZE_K": 256, + "GROUP_SIZE_M": 1, + "num_warps": 4, + "num_stages": 4 + }, + "32": { + "BLOCK_SIZE_M": 16, + "BLOCK_SIZE_N": 128, + "BLOCK_SIZE_K": 128, + "GROUP_SIZE_M": 1, + "num_warps": 4, + "num_stages": 4 + }, + "48": { + "BLOCK_SIZE_M": 16, + "BLOCK_SIZE_N": 128, + "BLOCK_SIZE_K": 128, + "GROUP_SIZE_M": 64, + "num_warps": 4, + "num_stages": 4 + }, + "64": { + "BLOCK_SIZE_M": 32, + "BLOCK_SIZE_N": 128, + "BLOCK_SIZE_K": 128, + "GROUP_SIZE_M": 1, + "num_warps": 4, + "num_stages": 4 + }, + "96": { + "BLOCK_SIZE_M": 32, + "BLOCK_SIZE_N": 128, + "BLOCK_SIZE_K": 128, + "GROUP_SIZE_M": 32, + "num_warps": 4, + "num_stages": 4 + }, + "128": { + "BLOCK_SIZE_M": 32, + "BLOCK_SIZE_N": 128, + "BLOCK_SIZE_K": 128, + "GROUP_SIZE_M": 16, + "num_warps": 4, + "num_stages": 4 + }, + "256": { + "BLOCK_SIZE_M": 64, + "BLOCK_SIZE_N": 256, + "BLOCK_SIZE_K": 64, + "GROUP_SIZE_M": 32, + "num_warps": 8, + "num_stages": 4 + }, + "512": { + "BLOCK_SIZE_M": 64, + "BLOCK_SIZE_N": 256, + "BLOCK_SIZE_K": 64, + "GROUP_SIZE_M": 32, + "num_warps": 8, + "num_stages": 4 + }, + "1024": { + "BLOCK_SIZE_M": 64, + "BLOCK_SIZE_N": 256, + "BLOCK_SIZE_K": 64, + "GROUP_SIZE_M": 64, + "num_warps": 8, + "num_stages": 4 + }, + "1536": { + "BLOCK_SIZE_M": 64, + "BLOCK_SIZE_N": 256, + "BLOCK_SIZE_K": 64, + "GROUP_SIZE_M": 16, + "num_warps": 8, + "num_stages": 4 + }, + "2048": { + "BLOCK_SIZE_M": 128, + "BLOCK_SIZE_N": 128, + "BLOCK_SIZE_K": 64, + "GROUP_SIZE_M": 16, + "num_warps": 8, + "num_stages": 4 + }, + "3072": { + "BLOCK_SIZE_M": 128, + "BLOCK_SIZE_N": 128, + "BLOCK_SIZE_K": 64, + "GROUP_SIZE_M": 16, + "num_warps": 8, + "num_stages": 4 + }, + "4096": { + "BLOCK_SIZE_M": 128, + "BLOCK_SIZE_N": 128, + "BLOCK_SIZE_K": 64, + "GROUP_SIZE_M": 32, + "num_warps": 8, + "num_stages": 4 + } +} diff --git a/vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_H100_80GB_HBM3,dtype=float8.json b/vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_H100_80GB_HBM3,dtype=float8.json new file mode 100644 index 0000000..9287808 --- /dev/null +++ b/vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_H100_80GB_HBM3,dtype=float8.json @@ -0,0 +1,140 @@ +{ + "1": { + "BLOCK_SIZE_M": 16, + "BLOCK_SIZE_N": 32, + "BLOCK_SIZE_K": 64, + "GROUP_SIZE_M": 1 + }, + "2": { + "BLOCK_SIZE_M": 16, + "BLOCK_SIZE_N": 32, + "BLOCK_SIZE_K": 64, + "GROUP_SIZE_M": 1 + }, + "4": { + "BLOCK_SIZE_M": 16, + "BLOCK_SIZE_N": 32, + "BLOCK_SIZE_K": 64, + "GROUP_SIZE_M": 1 + }, + "8": { + "BLOCK_SIZE_M": 64, + "BLOCK_SIZE_N": 64, + "BLOCK_SIZE_K": 256, + "GROUP_SIZE_M": 1, + "num_warps": 8, + "num_stages": 5 + }, + "16": { + "BLOCK_SIZE_M": 64, + "BLOCK_SIZE_N": 128, + "BLOCK_SIZE_K": 64, + "GROUP_SIZE_M": 16, + "num_warps": 4, + "num_stages": 5 + }, + "24": { + "BLOCK_SIZE_M": 128, + "BLOCK_SIZE_N": 64, + "BLOCK_SIZE_K": 128, + "GROUP_SIZE_M": 1, + "num_warps": 8, + "num_stages": 5 + }, + "32": { + "BLOCK_SIZE_M": 64, + "BLOCK_SIZE_N": 256, + "BLOCK_SIZE_K": 128, + "GROUP_SIZE_M": 16, + "num_warps": 8, + "num_stages": 4 + }, + "48": { + "BLOCK_SIZE_M": 64, + "BLOCK_SIZE_N": 256, + "BLOCK_SIZE_K": 128, + "GROUP_SIZE_M": 64, + "num_warps": 8, + "num_stages": 3 + }, + "64": { + "BLOCK_SIZE_M": 64, + "BLOCK_SIZE_N": 256, + "BLOCK_SIZE_K": 64, + "GROUP_SIZE_M": 64, + "num_warps": 4, + "num_stages": 4 + }, + "96": { + "BLOCK_SIZE_M": 128, + "BLOCK_SIZE_N": 64, + "BLOCK_SIZE_K": 256, + "GROUP_SIZE_M": 32, + "num_warps": 8, + "num_stages": 2 + }, + "128": { + "BLOCK_SIZE_M": 128, + "BLOCK_SIZE_N": 64, + "BLOCK_SIZE_K": 128, + "GROUP_SIZE_M": 32, + "num_warps": 4, + "num_stages": 3 + }, + "256": { + "BLOCK_SIZE_M": 128, + "BLOCK_SIZE_N": 32, + "BLOCK_SIZE_K": 128, + "GROUP_SIZE_M": 16, + "num_warps": 4, + "num_stages": 5 + }, + "512": { + "BLOCK_SIZE_M": 64, + "BLOCK_SIZE_N": 128, + "BLOCK_SIZE_K": 256, + "GROUP_SIZE_M": 64, + "num_warps": 4, + "num_stages": 2 + }, + "1024": { + "BLOCK_SIZE_M": 128, + "BLOCK_SIZE_N": 256, + "BLOCK_SIZE_K": 128, + "GROUP_SIZE_M": 64, + "num_warps": 8, + "num_stages": 4 + }, + "1536": { + "BLOCK_SIZE_M": 128, + "BLOCK_SIZE_N": 256, + "BLOCK_SIZE_K": 128, + "GROUP_SIZE_M": 32, + "num_warps": 8, + "num_stages": 4 + }, + "2048": { + "BLOCK_SIZE_M": 128, + "BLOCK_SIZE_N": 256, + "BLOCK_SIZE_K": 128, + "GROUP_SIZE_M": 64, + "num_warps": 8, + "num_stages": 4 + }, + "3072": { + "BLOCK_SIZE_M": 128, + "BLOCK_SIZE_N": 256, + "BLOCK_SIZE_K": 128, + "GROUP_SIZE_M": 32, + "num_warps": 8, + "num_stages": 4 + }, + "4096": { + "BLOCK_SIZE_M": 128, + "BLOCK_SIZE_N": 256, + "BLOCK_SIZE_K": 128, + "GROUP_SIZE_M": 16, + "num_warps": 8, + "num_stages": 4 + } +} diff --git a/vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_H100_80GB_HBM3.json b/vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_H100_80GB_HBM3.json new file mode 100644 index 0000000..b2100ce --- /dev/null +++ b/vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_H100_80GB_HBM3.json @@ -0,0 +1,146 @@ +{ + "1": { + "BLOCK_SIZE_M": 16, + "BLOCK_SIZE_N": 64, + "BLOCK_SIZE_K": 256, + "GROUP_SIZE_M": 64, + "num_warps": 4, + "num_stages": 4 + }, + "2": { + "BLOCK_SIZE_M": 16, + "BLOCK_SIZE_N": 256, + "BLOCK_SIZE_K": 64, + "GROUP_SIZE_M": 32, + "num_warps": 4, + "num_stages": 4 + }, + "4": { + "BLOCK_SIZE_M": 16, + "BLOCK_SIZE_N": 64, + "BLOCK_SIZE_K": 256, + "GROUP_SIZE_M": 16, + "num_warps": 4, + "num_stages": 4 + }, + "8": { + "BLOCK_SIZE_M": 16, + "BLOCK_SIZE_N": 64, + "BLOCK_SIZE_K": 128, + "GROUP_SIZE_M": 1, + "num_warps": 4, + "num_stages": 4 + }, + "16": { + "BLOCK_SIZE_M": 16, + "BLOCK_SIZE_N": 64, + "BLOCK_SIZE_K": 128, + "GROUP_SIZE_M": 1, + "num_warps": 4, + "num_stages": 4 + }, + "24": { + "BLOCK_SIZE_M": 16, + "BLOCK_SIZE_N": 64, + "BLOCK_SIZE_K": 128, + "GROUP_SIZE_M": 1, + "num_warps": 4, + "num_stages": 4 + }, + "32": { + "BLOCK_SIZE_M": 16, + "BLOCK_SIZE_N": 64, + "BLOCK_SIZE_K": 128, + "GROUP_SIZE_M": 1, + "num_warps": 4, + "num_stages": 4 + }, + "48": { + "BLOCK_SIZE_M": 32, + "BLOCK_SIZE_N": 128, + "BLOCK_SIZE_K": 128, + "GROUP_SIZE_M": 1, + "num_warps": 4, + "num_stages": 4 + }, + "64": { + "BLOCK_SIZE_M": 32, + "BLOCK_SIZE_N": 128, + "BLOCK_SIZE_K": 128, + "GROUP_SIZE_M": 1, + "num_warps": 4, + "num_stages": 4 + }, + "96": { + "BLOCK_SIZE_M": 32, + "BLOCK_SIZE_N": 128, + "BLOCK_SIZE_K": 128, + "GROUP_SIZE_M": 32, + "num_warps": 4, + "num_stages": 4 + }, + "128": { + "BLOCK_SIZE_M": 64, + "BLOCK_SIZE_N": 128, + "BLOCK_SIZE_K": 128, + "GROUP_SIZE_M": 1, + "num_warps": 4, + "num_stages": 4 + }, + "256": { + "BLOCK_SIZE_M": 128, + "BLOCK_SIZE_N": 256, + "BLOCK_SIZE_K": 64, + "GROUP_SIZE_M": 1, + "num_warps": 8, + "num_stages": 4 + }, + "512": { + "BLOCK_SIZE_M": 64, + "BLOCK_SIZE_N": 128, + "BLOCK_SIZE_K": 64, + "GROUP_SIZE_M": 32, + "num_warps": 4, + "num_stages": 4 + }, + "1024": { + "BLOCK_SIZE_M": 64, + "BLOCK_SIZE_N": 256, + "BLOCK_SIZE_K": 64, + "GROUP_SIZE_M": 64, + "num_warps": 4, + "num_stages": 4 + }, + "1536": { + "BLOCK_SIZE_M": 128, + "BLOCK_SIZE_N": 256, + "BLOCK_SIZE_K": 64, + "GROUP_SIZE_M": 64, + "num_warps": 8, + "num_stages": 4 + }, + "2048": { + "BLOCK_SIZE_M": 128, + "BLOCK_SIZE_N": 256, + "BLOCK_SIZE_K": 64, + "GROUP_SIZE_M": 64, + "num_warps": 8, + "num_stages": 4 + }, + "3072": { + "BLOCK_SIZE_M": 128, + "BLOCK_SIZE_N": 256, + "BLOCK_SIZE_K": 64, + "GROUP_SIZE_M": 64, + "num_warps": 8, + "num_stages": 4 + }, + "4096": { + "BLOCK_SIZE_M": 128, + "BLOCK_SIZE_N": 256, + "BLOCK_SIZE_K": 64, + "GROUP_SIZE_M": 1, + "num_warps": 8, + "num_stages": 4 + } +} diff --git a/vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=NVIDIA_A100-SXM4-80GB.json b/vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=NVIDIA_A100-SXM4-80GB.json new file mode 100644 index 0000000..dbc6247 --- /dev/null +++ b/vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=NVIDIA_A100-SXM4-80GB.json @@ -0,0 +1,146 @@ +{ + "1": { + "BLOCK_SIZE_M": 16, + "BLOCK_SIZE_N": 64, + "BLOCK_SIZE_K": 256, + "GROUP_SIZE_M": 1, + "num_warps": 4, + "num_stages": 4 + }, + "2": { + "BLOCK_SIZE_M": 16, + "BLOCK_SIZE_N": 64, + "BLOCK_SIZE_K": 128, + "GROUP_SIZE_M": 64, + "num_warps": 4, + "num_stages": 4 + }, + "4": { + "BLOCK_SIZE_M": 16, + "BLOCK_SIZE_N": 64, + "BLOCK_SIZE_K": 256, + "GROUP_SIZE_M": 16, + "num_warps": 8, + "num_stages": 4 + }, + "8": { + "BLOCK_SIZE_M": 16, + "BLOCK_SIZE_N": 64, + "BLOCK_SIZE_K": 256, + "GROUP_SIZE_M": 32, + "num_warps": 4, + "num_stages": 4 + }, + "16": { + "BLOCK_SIZE_M": 16, + "BLOCK_SIZE_N": 64, + "BLOCK_SIZE_K": 256, + "GROUP_SIZE_M": 1, + "num_warps": 4, + "num_stages": 4 + }, + "24": { + "BLOCK_SIZE_M": 16, + "BLOCK_SIZE_N": 128, + "BLOCK_SIZE_K": 128, + "GROUP_SIZE_M": 1, + "num_warps": 8, + "num_stages": 4 + }, + "32": { + "BLOCK_SIZE_M": 16, + "BLOCK_SIZE_N": 128, + "BLOCK_SIZE_K": 128, + "GROUP_SIZE_M": 1, + "num_warps": 4, + "num_stages": 4 + }, + "48": { + "BLOCK_SIZE_M": 32, + "BLOCK_SIZE_N": 128, + "BLOCK_SIZE_K": 128, + "GROUP_SIZE_M": 1, + "num_warps": 4, + "num_stages": 4 + }, + "64": { + "BLOCK_SIZE_M": 32, + "BLOCK_SIZE_N": 128, + "BLOCK_SIZE_K": 128, + "GROUP_SIZE_M": 1, + "num_warps": 4, + "num_stages": 4 + }, + "96": { + "BLOCK_SIZE_M": 32, + "BLOCK_SIZE_N": 128, + "BLOCK_SIZE_K": 128, + "GROUP_SIZE_M": 16, + "num_warps": 4, + "num_stages": 4 + }, + "128": { + "BLOCK_SIZE_M": 32, + "BLOCK_SIZE_N": 128, + "BLOCK_SIZE_K": 128, + "GROUP_SIZE_M": 16, + "num_warps": 4, + "num_stages": 4 + }, + "256": { + "BLOCK_SIZE_M": 64, + "BLOCK_SIZE_N": 256, + "BLOCK_SIZE_K": 64, + "GROUP_SIZE_M": 16, + "num_warps": 8, + "num_stages": 4 + }, + "512": { + "BLOCK_SIZE_M": 64, + "BLOCK_SIZE_N": 256, + "BLOCK_SIZE_K": 64, + "GROUP_SIZE_M": 32, + "num_warps": 8, + "num_stages": 4 + }, + "1024": { + "BLOCK_SIZE_M": 64, + "BLOCK_SIZE_N": 256, + "BLOCK_SIZE_K": 64, + "GROUP_SIZE_M": 64, + "num_warps": 8, + "num_stages": 4 + }, + "1536": { + "BLOCK_SIZE_M": 64, + "BLOCK_SIZE_N": 256, + "BLOCK_SIZE_K": 64, + "GROUP_SIZE_M": 16, + "num_warps": 8, + "num_stages": 4 + }, + "2048": { + "BLOCK_SIZE_M": 128, + "BLOCK_SIZE_N": 128, + "BLOCK_SIZE_K": 64, + "GROUP_SIZE_M": 16, + "num_warps": 8, + "num_stages": 4 + }, + "3072": { + "BLOCK_SIZE_M": 128, + "BLOCK_SIZE_N": 128, + "BLOCK_SIZE_K": 64, + "GROUP_SIZE_M": 16, + "num_warps": 8, + "num_stages": 4 + }, + "4096": { + "BLOCK_SIZE_M": 128, + "BLOCK_SIZE_N": 128, + "BLOCK_SIZE_K": 64, + "GROUP_SIZE_M": 16, + "num_warps": 8, + "num_stages": 4 + } +} diff --git a/vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=NVIDIA_H100_80GB_HBM3.json b/vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=NVIDIA_H100_80GB_HBM3.json new file mode 100644 index 0000000..32c0c9d --- /dev/null +++ b/vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=NVIDIA_H100_80GB_HBM3.json @@ -0,0 +1,146 @@ +{ + "1": { + "BLOCK_SIZE_M": 16, + "BLOCK_SIZE_N": 32, + "BLOCK_SIZE_K": 64, + "GROUP_SIZE_M": 16, + "num_warps": 4, + "num_stages": 4 + }, + "2": { + "BLOCK_SIZE_M": 16, + "BLOCK_SIZE_N": 64, + "BLOCK_SIZE_K": 128, + "GROUP_SIZE_M": 64, + "num_warps": 8, + "num_stages": 4 + }, + "4": { + "BLOCK_SIZE_M": 16, + "BLOCK_SIZE_N": 64, + "BLOCK_SIZE_K": 256, + "GROUP_SIZE_M": 16, + "num_warps": 4, + "num_stages": 4 + }, + "8": { + "BLOCK_SIZE_M": 16, + "BLOCK_SIZE_N": 64, + "BLOCK_SIZE_K": 256, + "GROUP_SIZE_M": 16, + "num_warps": 4, + "num_stages": 4 + }, + "16": { + "BLOCK_SIZE_M": 16, + "BLOCK_SIZE_N": 128, + "BLOCK_SIZE_K": 128, + "GROUP_SIZE_M": 32, + "num_warps": 4, + "num_stages": 4 + }, + "24": { + "BLOCK_SIZE_M": 16, + "BLOCK_SIZE_N": 128, + "BLOCK_SIZE_K": 128, + "GROUP_SIZE_M": 64, + "num_warps": 4, + "num_stages": 4 + }, + "32": { + "BLOCK_SIZE_M": 16, + "BLOCK_SIZE_N": 64, + "BLOCK_SIZE_K": 128, + "GROUP_SIZE_M": 1, + "num_warps": 4, + "num_stages": 4 + }, + "48": { + "BLOCK_SIZE_M": 32, + "BLOCK_SIZE_N": 128, + "BLOCK_SIZE_K": 128, + "GROUP_SIZE_M": 1, + "num_warps": 4, + "num_stages": 4 + }, + "64": { + "BLOCK_SIZE_M": 32, + "BLOCK_SIZE_N": 128, + "BLOCK_SIZE_K": 128, + "GROUP_SIZE_M": 1, + "num_warps": 4, + "num_stages": 4 + }, + "96": { + "BLOCK_SIZE_M": 32, + "BLOCK_SIZE_N": 128, + "BLOCK_SIZE_K": 128, + "GROUP_SIZE_M": 32, + "num_warps": 4, + "num_stages": 4 + }, + "128": { + "BLOCK_SIZE_M": 64, + "BLOCK_SIZE_N": 128, + "BLOCK_SIZE_K": 128, + "GROUP_SIZE_M": 1, + "num_warps": 4, + "num_stages": 4 + }, + "256": { + "BLOCK_SIZE_M": 64, + "BLOCK_SIZE_N": 128, + "BLOCK_SIZE_K": 128, + "GROUP_SIZE_M": 64, + "num_warps": 4, + "num_stages": 4 + }, + "512": { + "BLOCK_SIZE_M": 64, + "BLOCK_SIZE_N": 256, + "BLOCK_SIZE_K": 64, + "GROUP_SIZE_M": 32, + "num_warps": 4, + "num_stages": 4 + }, + "1024": { + "BLOCK_SIZE_M": 128, + "BLOCK_SIZE_N": 256, + "BLOCK_SIZE_K": 64, + "GROUP_SIZE_M": 64, + "num_warps": 8, + "num_stages": 4 + }, + "1536": { + "BLOCK_SIZE_M": 128, + "BLOCK_SIZE_N": 256, + "BLOCK_SIZE_K": 64, + "GROUP_SIZE_M": 64, + "num_warps": 8, + "num_stages": 4 + }, + "2048": { + "BLOCK_SIZE_M": 128, + "BLOCK_SIZE_N": 256, + "BLOCK_SIZE_K": 64, + "GROUP_SIZE_M": 16, + "num_warps": 8, + "num_stages": 4 + }, + "3072": { + "BLOCK_SIZE_M": 128, + "BLOCK_SIZE_N": 256, + "BLOCK_SIZE_K": 64, + "GROUP_SIZE_M": 64, + "num_warps": 8, + "num_stages": 4 + }, + "4096": { + "BLOCK_SIZE_M": 128, + "BLOCK_SIZE_N": 256, + "BLOCK_SIZE_K": 64, + "GROUP_SIZE_M": 32, + "num_warps": 8, + "num_stages": 4 + } +} diff --git a/vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=NVIDIA_A100-SXM4-80GB.json b/vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=NVIDIA_A100-SXM4-80GB.json new file mode 100644 index 0000000..f578c8d --- /dev/null +++ b/vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=NVIDIA_A100-SXM4-80GB.json @@ -0,0 +1,146 @@ +{ + "1": { + "BLOCK_SIZE_M": 16, + "BLOCK_SIZE_N": 64, + "BLOCK_SIZE_K": 64, + "GROUP_SIZE_M": 1, + "num_warps": 4, + "num_stages": 4 + }, + "2": { + "BLOCK_SIZE_M": 16, + "BLOCK_SIZE_N": 64, + "BLOCK_SIZE_K": 256, + "GROUP_SIZE_M": 16, + "num_warps": 4, + "num_stages": 4 + }, + "4": { + "BLOCK_SIZE_M": 16, + "BLOCK_SIZE_N": 64, + "BLOCK_SIZE_K": 256, + "GROUP_SIZE_M": 64, + "num_warps": 4, + "num_stages": 4 + }, + "8": { + "BLOCK_SIZE_M": 16, + "BLOCK_SIZE_N": 64, + "BLOCK_SIZE_K": 256, + "GROUP_SIZE_M": 16, + "num_warps": 4, + "num_stages": 4 + }, + "16": { + "BLOCK_SIZE_M": 16, + "BLOCK_SIZE_N": 64, + "BLOCK_SIZE_K": 256, + "GROUP_SIZE_M": 1, + "num_warps": 4, + "num_stages": 4 + }, + "24": { + "BLOCK_SIZE_M": 16, + "BLOCK_SIZE_N": 64, + "BLOCK_SIZE_K": 256, + "GROUP_SIZE_M": 1, + "num_warps": 4, + "num_stages": 4 + }, + "32": { + "BLOCK_SIZE_M": 16, + "BLOCK_SIZE_N": 64, + "BLOCK_SIZE_K": 256, + "GROUP_SIZE_M": 1, + "num_warps": 4, + "num_stages": 4 + }, + "48": { + "BLOCK_SIZE_M": 32, + "BLOCK_SIZE_N": 128, + "BLOCK_SIZE_K": 128, + "GROUP_SIZE_M": 1, + "num_warps": 4, + "num_stages": 4 + }, + "64": { + "BLOCK_SIZE_M": 32, + "BLOCK_SIZE_N": 128, + "BLOCK_SIZE_K": 128, + "GROUP_SIZE_M": 1, + "num_warps": 4, + "num_stages": 4 + }, + "96": { + "BLOCK_SIZE_M": 32, + "BLOCK_SIZE_N": 128, + "BLOCK_SIZE_K": 128, + "GROUP_SIZE_M": 16, + "num_warps": 4, + "num_stages": 4 + }, + "128": { + "BLOCK_SIZE_M": 32, + "BLOCK_SIZE_N": 128, + "BLOCK_SIZE_K": 128, + "GROUP_SIZE_M": 32, + "num_warps": 4, + "num_stages": 4 + }, + "256": { + "BLOCK_SIZE_M": 64, + "BLOCK_SIZE_N": 256, + "BLOCK_SIZE_K": 64, + "GROUP_SIZE_M": 64, + "num_warps": 8, + "num_stages": 4 + }, + "512": { + "BLOCK_SIZE_M": 64, + "BLOCK_SIZE_N": 256, + "BLOCK_SIZE_K": 64, + "GROUP_SIZE_M": 32, + "num_warps": 8, + "num_stages": 4 + }, + "1024": { + "BLOCK_SIZE_M": 64, + "BLOCK_SIZE_N": 256, + "BLOCK_SIZE_K": 64, + "GROUP_SIZE_M": 64, + "num_warps": 8, + "num_stages": 4 + }, + "1536": { + "BLOCK_SIZE_M": 64, + "BLOCK_SIZE_N": 256, + "BLOCK_SIZE_K": 64, + "GROUP_SIZE_M": 32, + "num_warps": 8, + "num_stages": 4 + }, + "2048": { + "BLOCK_SIZE_M": 128, + "BLOCK_SIZE_N": 128, + "BLOCK_SIZE_K": 64, + "GROUP_SIZE_M": 16, + "num_warps": 8, + "num_stages": 4 + }, + "3072": { + "BLOCK_SIZE_M": 128, + "BLOCK_SIZE_N": 128, + "BLOCK_SIZE_K": 64, + "GROUP_SIZE_M": 16, + "num_warps": 8, + "num_stages": 4 + }, + "4096": { + "BLOCK_SIZE_M": 128, + "BLOCK_SIZE_N": 128, + "BLOCK_SIZE_K": 64, + "GROUP_SIZE_M": 16, + "num_warps": 8, + "num_stages": 4 + } +} diff --git a/vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=float8.json b/vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=float8.json new file mode 100644 index 0000000..2ad07bf --- /dev/null +++ b/vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=float8.json @@ -0,0 +1,146 @@ +{ + "1": { + "BLOCK_SIZE_M": 16, + "BLOCK_SIZE_N": 32, + "BLOCK_SIZE_K": 128, + "GROUP_SIZE_M": 1, + "num_warps": 4, + "num_stages": 4 + }, + "2": { + "BLOCK_SIZE_M": 128, + "BLOCK_SIZE_N": 64, + "BLOCK_SIZE_K": 128, + "GROUP_SIZE_M": 1, + "num_warps": 4, + "num_stages": 4 + }, + "4": { + "BLOCK_SIZE_M": 64, + "BLOCK_SIZE_N": 64, + "BLOCK_SIZE_K": 64, + "GROUP_SIZE_M": 64, + "num_warps": 4, + "num_stages": 4 + }, + "8": { + "BLOCK_SIZE_M": 64, + "BLOCK_SIZE_N": 128, + "BLOCK_SIZE_K": 256, + "GROUP_SIZE_M": 64, + "num_warps": 8, + "num_stages": 4 + }, + "16": { + "BLOCK_SIZE_M": 64, + "BLOCK_SIZE_N": 256, + "BLOCK_SIZE_K": 128, + "GROUP_SIZE_M": 1, + "num_warps": 8, + "num_stages": 4 + }, + "24": { + "BLOCK_SIZE_M": 64, + "BLOCK_SIZE_N": 256, + "BLOCK_SIZE_K": 128, + "GROUP_SIZE_M": 1, + "num_warps": 8, + "num_stages": 4 + }, + "32": { + "BLOCK_SIZE_M": 64, + "BLOCK_SIZE_N": 128, + "BLOCK_SIZE_K": 128, + "GROUP_SIZE_M": 16, + "num_warps": 8, + "num_stages": 4 + }, + "48": { + "BLOCK_SIZE_M": 64, + "BLOCK_SIZE_N": 128, + "BLOCK_SIZE_K": 128, + "GROUP_SIZE_M": 32, + "num_warps": 4, + "num_stages": 4 + }, + "64": { + "BLOCK_SIZE_M": 64, + "BLOCK_SIZE_N": 128, + "BLOCK_SIZE_K": 128, + "GROUP_SIZE_M": 16, + "num_warps": 8, + "num_stages": 4 + }, + "96": { + "BLOCK_SIZE_M": 64, + "BLOCK_SIZE_N": 32, + "BLOCK_SIZE_K": 256, + "GROUP_SIZE_M": 32, + "num_warps": 4, + "num_stages": 4 + }, + "128": { + "BLOCK_SIZE_M": 64, + "BLOCK_SIZE_N": 128, + "BLOCK_SIZE_K": 128, + "GROUP_SIZE_M": 16, + "num_warps": 8, + "num_stages": 4 + }, + "256": { + "BLOCK_SIZE_M": 128, + "BLOCK_SIZE_N": 256, + "BLOCK_SIZE_K": 128, + "GROUP_SIZE_M": 1, + "num_warps": 8, + "num_stages": 4 + }, + "512": { + "BLOCK_SIZE_M": 128, + "BLOCK_SIZE_N": 256, + "BLOCK_SIZE_K": 128, + "GROUP_SIZE_M": 16, + "num_warps": 8, + "num_stages": 4 + }, + "1024": { + "BLOCK_SIZE_M": 128, + "BLOCK_SIZE_N": 256, + "BLOCK_SIZE_K": 128, + "GROUP_SIZE_M": 64, + "num_warps": 8, + "num_stages": 4 + }, + "1536": { + "BLOCK_SIZE_M": 128, + "BLOCK_SIZE_N": 256, + "BLOCK_SIZE_K": 128, + "GROUP_SIZE_M": 32, + "num_warps": 8, + "num_stages": 4 + }, + "2048": { + "BLOCK_SIZE_M": 128, + "BLOCK_SIZE_N": 256, + "BLOCK_SIZE_K": 128, + "GROUP_SIZE_M": 64, + "num_warps": 8, + "num_stages": 4 + }, + "3072": { + "BLOCK_SIZE_M": 128, + "BLOCK_SIZE_N": 256, + "BLOCK_SIZE_K": 128, + "GROUP_SIZE_M": 32, + "num_warps": 8, + "num_stages": 4 + }, + "4096": { + "BLOCK_SIZE_M": 128, + "BLOCK_SIZE_N": 256, + "BLOCK_SIZE_K": 128, + "GROUP_SIZE_M": 64, + "num_warps": 8, + "num_stages": 4 + } +} diff --git a/vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=NVIDIA_H100_80GB_HBM3.json b/vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=NVIDIA_H100_80GB_HBM3.json new file mode 100644 index 0000000..e341a67 --- /dev/null +++ b/vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=NVIDIA_H100_80GB_HBM3.json @@ -0,0 +1,146 @@ +{ + "1": { + "BLOCK_SIZE_M": 16, + "BLOCK_SIZE_N": 64, + "BLOCK_SIZE_K": 128, + "GROUP_SIZE_M": 64, + "num_warps": 4, + "num_stages": 4 + }, + "2": { + "BLOCK_SIZE_M": 16, + "BLOCK_SIZE_N": 128, + "BLOCK_SIZE_K": 128, + "GROUP_SIZE_M": 32, + "num_warps": 8, + "num_stages": 4 + }, + "4": { + "BLOCK_SIZE_M": 16, + "BLOCK_SIZE_N": 32, + "BLOCK_SIZE_K": 256, + "GROUP_SIZE_M": 16, + "num_warps": 4, + "num_stages": 4 + }, + "8": { + "BLOCK_SIZE_M": 16, + "BLOCK_SIZE_N": 128, + "BLOCK_SIZE_K": 128, + "GROUP_SIZE_M": 32, + "num_warps": 8, + "num_stages": 4 + }, + "16": { + "BLOCK_SIZE_M": 16, + "BLOCK_SIZE_N": 128, + "BLOCK_SIZE_K": 128, + "GROUP_SIZE_M": 1, + "num_warps": 4, + "num_stages": 4 + }, + "24": { + "BLOCK_SIZE_M": 16, + "BLOCK_SIZE_N": 128, + "BLOCK_SIZE_K": 128, + "GROUP_SIZE_M": 1, + "num_warps": 4, + "num_stages": 4 + }, + "32": { + "BLOCK_SIZE_M": 16, + "BLOCK_SIZE_N": 128, + "BLOCK_SIZE_K": 128, + "GROUP_SIZE_M": 1, + "num_warps": 4, + "num_stages": 4 + }, + "48": { + "BLOCK_SIZE_M": 32, + "BLOCK_SIZE_N": 128, + "BLOCK_SIZE_K": 128, + "GROUP_SIZE_M": 1, + "num_warps": 4, + "num_stages": 4 + }, + "64": { + "BLOCK_SIZE_M": 32, + "BLOCK_SIZE_N": 128, + "BLOCK_SIZE_K": 128, + "GROUP_SIZE_M": 1, + "num_warps": 4, + "num_stages": 4 + }, + "96": { + "BLOCK_SIZE_M": 32, + "BLOCK_SIZE_N": 128, + "BLOCK_SIZE_K": 128, + "GROUP_SIZE_M": 64, + "num_warps": 4, + "num_stages": 4 + }, + "128": { + "BLOCK_SIZE_M": 64, + "BLOCK_SIZE_N": 128, + "BLOCK_SIZE_K": 128, + "GROUP_SIZE_M": 1, + "num_warps": 4, + "num_stages": 4 + }, + "256": { + "BLOCK_SIZE_M": 64, + "BLOCK_SIZE_N": 128, + "BLOCK_SIZE_K": 128, + "GROUP_SIZE_M": 64, + "num_warps": 4, + "num_stages": 4 + }, + "512": { + "BLOCK_SIZE_M": 64, + "BLOCK_SIZE_N": 256, + "BLOCK_SIZE_K": 64, + "GROUP_SIZE_M": 32, + "num_warps": 4, + "num_stages": 4 + }, + "1024": { + "BLOCK_SIZE_M": 128, + "BLOCK_SIZE_N": 256, + "BLOCK_SIZE_K": 64, + "GROUP_SIZE_M": 16, + "num_warps": 8, + "num_stages": 4 + }, + "1536": { + "BLOCK_SIZE_M": 128, + "BLOCK_SIZE_N": 256, + "BLOCK_SIZE_K": 64, + "GROUP_SIZE_M": 64, + "num_warps": 8, + "num_stages": 4 + }, + "2048": { + "BLOCK_SIZE_M": 128, + "BLOCK_SIZE_N": 256, + "BLOCK_SIZE_K": 64, + "GROUP_SIZE_M": 32, + "num_warps": 8, + "num_stages": 4 + }, + "3072": { + "BLOCK_SIZE_M": 128, + "BLOCK_SIZE_N": 256, + "BLOCK_SIZE_K": 64, + "GROUP_SIZE_M": 64, + "num_warps": 8, + "num_stages": 4 + }, + "4096": { + "BLOCK_SIZE_M": 128, + "BLOCK_SIZE_N": 256, + "BLOCK_SIZE_K": 64, + "GROUP_SIZE_M": 16, + "num_warps": 8, + "num_stages": 4 + } +} diff --git a/vllm/model_executor/layers/fused_moe/configs/README b/vllm/model_executor/layers/fused_moe/configs/README new file mode 100644 index 0000000..45d40cb --- /dev/null +++ b/vllm/model_executor/layers/fused_moe/configs/README @@ -0,0 +1,10 @@ +This directory contains tuned configurations for different settings of the fused_moe kernel. +For different settings of +- E (number of experts) +- N (intermediate size) +- device_name (torch.cuda.get_device_name()) +the JSON file contains a mapping from M (batch size) to the chosen configuration. + +The example configurations provided are for the Mixtral model for TP2 on H100 +and TP4 on A100. Mixtral has intermediate size N = 14336, i.e. for TP2 we have +N = 7168 and for TP4 we have N = 3584. diff --git a/vllm/model_executor/layers/fused_moe/fused_moe.py b/vllm/model_executor/layers/fused_moe/fused_moe.py new file mode 100644 index 0000000..27fa24a --- /dev/null +++ b/vllm/model_executor/layers/fused_moe/fused_moe.py @@ -0,0 +1,479 @@ +"""Fused MoE kernel.""" +import functools +import json +import os +from typing import Any, Dict, Optional, Tuple + +import torch +import triton +import triton.language as tl + +from vllm import _custom_ops as ops +from vllm.logger import init_logger +from vllm.utils import is_hip + +logger = init_logger(__name__) + + +@triton.jit +def fused_moe_kernel( + # Pointers to matrices + a_ptr, + b_ptr, + c_ptr, + a_scale_ptr, + b_scale_ptr, + topk_weights_ptr, + sorted_token_ids_ptr, + expert_ids_ptr, + num_tokens_post_padded_ptr, + # Matrix dimensions + N, + K, + EM, + num_valid_tokens, + # The stride variables represent how much to increase the ptr by when + # moving by 1 element in a particular dimension. E.g. `stride_am` is + # how much to increase `a_ptr` by to get the element one row down + # (A has M rows). + stride_am, + stride_ak, + stride_be, + stride_bk, + stride_bn, + stride_cm, + stride_cn, + # Meta-parameters + BLOCK_SIZE_M: tl.constexpr, + BLOCK_SIZE_N: tl.constexpr, + BLOCK_SIZE_K: tl.constexpr, + GROUP_SIZE_M: tl.constexpr, + MUL_ROUTED_WEIGHT: tl.constexpr, + top_k: tl.constexpr, + compute_type: tl.constexpr, + use_fp8: tl.constexpr, +): + """ + Implements the fused computation for a Mixture of Experts (MOE) using + token and expert matrices. + + Key Parameters: + - A: The input tensor representing tokens with shape (*, K), where '*' can + be any shape representing batches and K is the feature dimension of + each token. + - B: The stacked MOE weight tensor with shape (E, N, K), where E is + the number of experts, K is the input feature dimension, and N is + the output feature dimension. + - C: The output cache tensor with shape (M, topk, N), where M is the + total number of tokens post padding, topk is the number of times + each token is repeated, and N is the output feature dimension. + - sorted_token_ids: A tensor containing the sorted indices of tokens, + repeated topk times and arranged by the expert index they are + assigned to. + - expert_ids: A tensor containing the indices of the expert for each + block. It determines which expert matrix from B should be used for + each block in A. + This kernel performs the multiplication of a token by its corresponding + expert matrix as determined by `expert_ids`. The sorting of + `sorted_token_ids` by expert index and padding ensures divisibility by + BLOCK_SIZE_M, which is necessary to maintain consistency in block matrix + multiplication across different blocks processed by the same expert. + """ + # ----------------------------------------------------------- + # Map program ids `pid` to the block of C it should compute. + # This is done in a grouped ordering to promote L2 data reuse. + pid = tl.program_id(axis=0) + num_pid_m = tl.cdiv(EM, BLOCK_SIZE_M) + num_pid_n = tl.cdiv(N, BLOCK_SIZE_N) + num_pid_in_group = GROUP_SIZE_M * num_pid_n + group_id = pid // num_pid_in_group + first_pid_m = group_id * GROUP_SIZE_M + group_size_m = min(num_pid_m - first_pid_m, GROUP_SIZE_M) + pid_m = first_pid_m + ((pid % num_pid_in_group) % group_size_m) + pid_n = (pid % num_pid_in_group) // group_size_m + + # ---------------------------------------------------------- + # Create pointers for the first blocks of A and B. + # We will advance this pointer as we move in the K direction + # and accumulate + # `a_ptrs` is a block of [BLOCK_SIZE_M, BLOCK_SIZE_K] pointers + # `b_ptrs` is a block of [BLOCK_SIZE_K, BLOCK_SIZE_N] pointers + num_tokens_post_padded = tl.load(num_tokens_post_padded_ptr) + if pid_m * BLOCK_SIZE_M >= num_tokens_post_padded: + return + offs_token_id = pid_m * BLOCK_SIZE_M + tl.arange(0, BLOCK_SIZE_M) + offs_token = tl.load(sorted_token_ids_ptr + offs_token_id) + token_mask = offs_token < num_valid_tokens + + offs_bn = (pid_n * BLOCK_SIZE_N + tl.arange(0, BLOCK_SIZE_N)) % N + offs_k = tl.arange(0, BLOCK_SIZE_K) + a_ptrs = a_ptr + (offs_token[:, None] // top_k * stride_am + + offs_k[None, :] * stride_ak) + + off_experts = tl.load(expert_ids_ptr + pid_m) + b_ptrs = b_ptr + off_experts * stride_be + (offs_k[:, None] * stride_bk + + offs_bn[None, :] * stride_bn) + + if use_fp8: + a_scale = tl.load(a_scale_ptr) + b_scale = tl.load(b_scale_ptr + off_experts) + + # ----------------------------------------------------------- + # Iterate to compute a block of the C matrix. + # We accumulate into a `[BLOCK_SIZE_M, BLOCK_SIZE_N]` block + # of fp32 values for higher accuracy. + # `accumulator` will be converted back to fp16 after the loop. + accumulator = tl.zeros((BLOCK_SIZE_M, BLOCK_SIZE_N), dtype=tl.float32) + + for k in range(0, tl.cdiv(K, BLOCK_SIZE_K)): + # Load the next block of A and B, generate a mask by checking the + # K dimension. + a = tl.load(a_ptrs, + mask=token_mask[:, None] & + (offs_k[None, :] < K - k * BLOCK_SIZE_K), + other=0.0) + b = tl.load(b_ptrs, + mask=offs_k[:, None] < K - k * BLOCK_SIZE_K, + other=0.0) + # We accumulate along the K dimension. + if use_fp8: + accumulator = tl.dot(a, b, acc=accumulator) + else: + accumulator += tl.dot(a, b) + # Advance the ptrs to the next K block. + a_ptrs += BLOCK_SIZE_K * stride_ak + b_ptrs += BLOCK_SIZE_K * stride_bk + + if MUL_ROUTED_WEIGHT: + moe_weight = tl.load(topk_weights_ptr + offs_token, + mask=token_mask, + other=0) + accumulator = accumulator * moe_weight[:, None] + + if use_fp8: + accumulator = (accumulator * a_scale * b_scale).to(compute_type) + else: + accumulator = accumulator.to(compute_type) + # ----------------------------------------------------------- + # Write back the block of the output + offs_cn = pid_n * BLOCK_SIZE_N + tl.arange(0, BLOCK_SIZE_N) + c_ptrs = c_ptr + stride_cm * offs_token[:, None] + stride_cn * offs_cn[ + None, :] + c_mask = token_mask[:, None] & (offs_cn[None, :] < N) + tl.store(c_ptrs, accumulator, mask=c_mask) + + +def moe_align_block_size( + topk_ids: torch.Tensor, block_size: int, + num_experts: int) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]: + """ + Aligns the token distribution across experts to be compatible with block + size for matrix multiplication. + + Parameters: + - topk_ids: A tensor of shape [total_tokens, top_k] representing the + top-k expert indices for each token. + - block_size: The block size used in block matrix multiplication. + - num_experts: The total number of experts. + + Returns: + - sorted_token_ids: A tensor containing the sorted token indices according + to their allocated expert. + - expert_ids: A tensor indicating the assigned expert index for each block. + - num_tokens_post_padded: The total number of tokens after padding, + ensuring divisibility by block_size. + + This function pads the number of tokens that each expert needs to process + so that it is divisible by block_size. + Padding ensures that during block matrix multiplication, the dimensions + align correctly. + + Example: + Given topk_ids = [[2, 3, 4], [1, 2, 4], [1, 3, 4], [1, 2, 3]], + block_size = 4, and num_experts = 4: + - We initially have 12 tokens (after repeating 'top_k' times) and 4 experts, + with each expert needing to process 3 tokens. + - As block_size is 4, we pad 1 token for each expert. + - First, flatten topk_ids to [2, 3, 4, 1, 2, 4, 1, 3, 4, 1, 2, 3]. + - Then append padding tokens [12, 12, 12, 12] for each block. + - After sorting by expert index, we obtain token_ids + [3, 6, 9, 12, 0, 4, 10, 12, 1, 7, 11, 12, 2, 5, 8, 12]. + Tokens 12 are non-existent (padding) and are ignored in + the subsequent matrix multiplication. + - The padding ensures that the total number of tokens is now divisible + by block_size for proper block matrix operations. + """ + max_num_tokens_padded = topk_ids.numel() + num_experts * (block_size - 1) + sorted_ids = torch.empty((max_num_tokens_padded, ), + dtype=torch.int32, + device=topk_ids.device) + sorted_ids.fill_(topk_ids.numel()) + max_num_m_blocks = triton.cdiv(max_num_tokens_padded, block_size) + expert_ids = torch.empty((max_num_m_blocks, ), + dtype=torch.int32, + device=topk_ids.device) + num_tokens_post_pad = torch.empty((1), + dtype=torch.int32, + device=topk_ids.device) + ops.moe_align_block_size(topk_ids, num_experts, block_size, sorted_ids, + expert_ids, num_tokens_post_pad) + return sorted_ids, expert_ids, num_tokens_post_pad + + +def invoke_fused_moe_kernel(A: torch.Tensor, B: torch.Tensor, C: torch.Tensor, + A_scale: Optional[torch.Tensor], + B_scale: Optional[torch.Tensor], + topk_weights: torch.Tensor, topk_ids: torch.Tensor, + sorted_token_ids: torch.Tensor, + expert_ids: torch.Tensor, + num_tokens_post_padded: torch.Tensor, + mul_routed_weight: bool, top_k: int, + config: Dict[str, Any], compute_type: tl.dtype, + use_fp8: bool) -> None: + assert topk_weights.stride(1) == 1 + assert sorted_token_ids.stride(0) == 1 + + if not use_fp8: + assert A_scale is None + assert B_scale is None + else: + A, A_scale = ops.scaled_fp8_quant(A, A_scale) + assert B_scale is not None + + grid = lambda META: (triton.cdiv(sorted_token_ids.shape[0], META[ + 'BLOCK_SIZE_M']) * triton.cdiv(B.shape[1], META['BLOCK_SIZE_N']), ) + + fused_moe_kernel[grid]( + A, + B, + C, + A_scale, + B_scale, + topk_weights, + sorted_token_ids, + expert_ids, + num_tokens_post_padded, + B.shape[1], + B.shape[2], + sorted_token_ids.shape[0], + topk_ids.numel(), + A.stride(0), + A.stride(1), + B.stride(0), + B.stride(2), + B.stride(1), + C.stride(1), + C.stride(2), + MUL_ROUTED_WEIGHT=mul_routed_weight, + top_k=top_k, + compute_type=compute_type, + use_fp8=use_fp8, + **config, + ) + + +def get_config_file_name(E: int, N: int, dtype: Optional[str]) -> str: + device_name = torch.musa.get_device_name().replace(" ", "_") + dtype_selector = "" if not dtype else f",dtype={dtype}" + return f"E={E},N={N},device_name={device_name}{dtype_selector}.json" + + +@functools.lru_cache +def get_moe_configs(E: int, N: int, + dtype: Optional[str]) -> Optional[Dict[int, Any]]: + """ + Return optimized configurations for the fused MoE kernel. + + The return value will be a dictionary that maps an irregular grid of + batch sizes to configurations of the fused_moe kernel. To evaluate the + kernel on a given batch size bs, the closest batch size in the grid should + be picked and the associated configuration chosen to invoke the kernel. + """ + + # First look up if an optimized configuration is available in the configs + # directory + json_file_name = get_config_file_name(E, N, dtype) + + config_file_path = os.path.join( + os.path.dirname(os.path.realpath(__file__)), "configs", json_file_name) + if os.path.exists(config_file_path): + with open(config_file_path) as f: + logger.info("Using configuration from %s for MoE layer.", + config_file_path) + # If a configuration has been found, return it + return {int(key): val for key, val in json.load(f).items()} + + # If no optimized configuration is available, we will use the default + # configuration + return None + + +def fused_moe( + hidden_states: torch.Tensor, + w1: torch.Tensor, + w2: torch.Tensor, + gating_output: torch.Tensor, + topk: int, + renormalize: bool, + inplace: bool = False, + override_config: Optional[Dict[str, Any]] = None, + use_fp8: bool = False, + w1_scale: Optional[torch.Tensor] = None, + w2_scale: Optional[torch.Tensor] = None, + a1_scale: Optional[torch.Tensor] = None, + a2_scale: Optional[torch.Tensor] = None, +) -> torch.Tensor: + """ + This function computes a Mixture of Experts (MoE) layer using two sets of + weights, w1 and w2, and top-k gating mechanism. + + Parameters: + - hidden_states (torch.Tensor): The input tensor to the MoE layer. + - w1 (torch.Tensor): The first set of expert weights. + - w2 (torch.Tensor): The second set of expert weights. + - gating_output (torch.Tensor): The output of the gating operation + (before softmax). + - topk (int): The number of top-k experts to select. + - renormalize (bool): If True, renormalize the top-k weights to sum to 1. + - inplace (bool): If True, perform the operation in-place. + Defaults to False. + - override_config (Optional[Dict[str, Any]]): Optional override + for the kernel configuration. + - use_fp8 (bool): If True, use fp8 arithmetic to compute the inner + products for w1 and w2. Defaults to False. + - w1_scale (Optional[torch.Tensor]): Optional scale to be used for + w1. + - w2_scale (Optional[torch.Tensor]): Optional scale to be used for + w2. + + Returns: + - torch.Tensor: The output tensor after applying the MoE layer. + """ + # Check constraints. + assert hidden_states.shape[0] == gating_output.shape[0], ( + "Number of tokens mismatch") + assert hidden_states.shape[1] == w1.shape[2], "Hidden size mismatch" + assert gating_output.shape[1] == w1.shape[0], "Number of experts mismatch" + assert hidden_states.is_contiguous(), "Hidden_states must be contiguous" + assert w1.is_contiguous(), "Expert weights1 must be contiguous" + assert w2.is_contiguous(), "Expert weights2 must be contiguous" + assert hidden_states.dtype in [ + torch.float32, torch.float16, torch.bfloat16 + ] + M, _ = hidden_states.shape + E, N, _ = w1.shape + + if is_hip(): + # The MoE kernels are not yet supported on ROCm. + routing_weights = torch.softmax(gating_output, + dim=-1, + dtype=torch.float32) + topk_weights, topk_ids = torch.topk(routing_weights, topk, dim=-1) + else: + import vllm._moe_C as moe_kernels + + topk_weights = torch.empty(M, + topk, + dtype=torch.float32, + device=hidden_states.device) + topk_ids = torch.empty(M, + topk, + dtype=torch.int32, + device=hidden_states.device) + token_expert_indicies = torch.empty(M, + topk, + dtype=torch.int32, + device=hidden_states.device) + moe_kernels.topk_softmax( + topk_weights, + topk_ids, + token_expert_indicies, + gating_output.float(), # TODO(woosuk): Optimize this. + ) + del token_expert_indicies # Not used. Will be used in the future. + if renormalize: + topk_weights = topk_weights / topk_weights.sum(dim=-1, keepdim=True) + + if override_config: + config = override_config + else: + # First try to load optimal config from the file + configs = get_moe_configs(E, w2.shape[2], + "float8" if use_fp8 else None) + + if configs: + # If an optimal configuration map has been found, look up the + # optimal config + config = configs[min(configs.keys(), key=lambda x: abs(x - M))] + else: + # Else use the default config + config = { + 'BLOCK_SIZE_M': 64, + 'BLOCK_SIZE_N': 64, + 'BLOCK_SIZE_K': 32, + 'GROUP_SIZE_M': 8 + } + + if M <= E: + config = { + 'BLOCK_SIZE_M': 16, + 'BLOCK_SIZE_N': 32, + 'BLOCK_SIZE_K': 64, + 'GROUP_SIZE_M': 1 + } + + intermediate_cache1 = torch.empty((M, topk_ids.shape[1], N), + device=hidden_states.device, + dtype=hidden_states.dtype) + intermediate_cache2 = torch.empty((M * topk_ids.shape[1], N // 2), + device=hidden_states.device, + dtype=hidden_states.dtype) + intermediate_cache3 = torch.empty((M, topk_ids.shape[1], w2.shape[1]), + device=hidden_states.device, + dtype=hidden_states.dtype) + + sorted_token_ids, expert_ids, num_tokens_post_padded = moe_align_block_size( + topk_ids, config['BLOCK_SIZE_M'], E) + compute_type = (tl.bfloat16 + if hidden_states.dtype == torch.bfloat16 else tl.float16) + + invoke_fused_moe_kernel(hidden_states, + w1, + intermediate_cache1, + a1_scale, + w1_scale, + topk_weights, + topk_ids, + sorted_token_ids, + expert_ids, + num_tokens_post_padded, + False, + topk_ids.shape[1], + config, + compute_type=compute_type, + use_fp8=use_fp8) + + ops.silu_and_mul(intermediate_cache2, intermediate_cache1.view(-1, N)) + + invoke_fused_moe_kernel(intermediate_cache2, + w2, + intermediate_cache3, + a2_scale, + w2_scale, + topk_weights, + topk_ids, + sorted_token_ids, + expert_ids, + num_tokens_post_padded, + True, + 1, + config, + compute_type=compute_type, + use_fp8=use_fp8) + + if inplace: + return torch.sum(intermediate_cache3.view(*intermediate_cache3.shape), + dim=1, + out=hidden_states) + return torch.sum(intermediate_cache3.view(*intermediate_cache3.shape), + dim=1) diff --git a/vllm/model_executor/layers/layernorm.py b/vllm/model_executor/layers/layernorm.py new file mode 100644 index 0000000..8de0794 --- /dev/null +++ b/vllm/model_executor/layers/layernorm.py @@ -0,0 +1,71 @@ +"""Custom normalization layers.""" +from typing import Optional, Tuple, Union + +import torch +import torch.nn as nn + +from vllm import _custom_ops as ops + + +class RMSNorm(nn.Module): + """Root mean square normalization. + + Computes x -> w * x / sqrt(E[x^2] + eps) where w is the learned weight. + Refer to https://arxiv.org/abs/1910.07467 + """ + + def __init__( + self, + hidden_size: int, + eps: float = 1e-6, + ) -> None: + super().__init__() + self.weight = nn.Parameter(torch.ones(hidden_size)) + self.variance_epsilon = eps + + def _forward( + self, + x: torch.Tensor, + residual: Optional[torch.Tensor] = None, + ) -> Union[torch.Tensor, Tuple[torch.Tensor, torch.Tensor]]: + """PyTorch-native implementation equivalent to forward().""" + orig_dtype = x.dtype + x = x.to(torch.float32) + if residual is not None: + x = x + residual.to(torch.float32) + residual = x.to(orig_dtype) + + variance = x.pow(2).mean(dim=-1, keepdim=True) + x = x * torch.rsqrt(variance + self.variance_epsilon) + x = x.to(orig_dtype) * self.weight + if residual is None: + return x + else: + return x, residual + + def forward( + self, + x: torch.Tensor, + residual: Optional[torch.Tensor] = None, + ) -> Union[torch.Tensor, Tuple[torch.Tensor, torch.Tensor]]: + if residual is not None: + ops.fused_add_rms_norm( + x, + residual, + self.weight.data, + self.variance_epsilon, + ) + return x, residual + out = torch.empty_like(x) + ops.rms_norm( + out, + x, + self.weight.data, + self.variance_epsilon, + ) + return out + + def extra_repr(self) -> str: + s = f"hidden_size={self.weight.data.size(0)}" + s += f", eps={self.variance_epsilon}" + return s diff --git a/vllm/model_executor/layers/linear.py b/vllm/model_executor/layers/linear.py new file mode 100644 index 0000000..4d0aab5 --- /dev/null +++ b/vllm/model_executor/layers/linear.py @@ -0,0 +1,709 @@ +from abc import abstractmethod +from typing import List, Optional + +import torch +import torch.nn.functional as F +from torch.nn.parameter import Parameter + +from vllm.distributed import (divide, get_tensor_model_parallel_rank, + get_tensor_model_parallel_world_size, + split_tensor_along_last_dim, + tensor_model_parallel_all_gather, + tensor_model_parallel_all_reduce) +from vllm.logger import init_logger +from vllm.model_executor.layers.quantization.base_config import ( + QuantizationConfig, QuantizeMethodBase) +from vllm.model_executor.utils import set_weight_attrs + +logger = init_logger(__name__) + + +def adjust_marlin_shard(param, shard_size, shard_offset): + marlin_tile_size = getattr(param, "marlin_tile_size", None) + if marlin_tile_size is None: + return shard_size, shard_offset + + return shard_size * marlin_tile_size, shard_offset * marlin_tile_size + + +class LinearMethodBase(QuantizeMethodBase): + """Base class for different (maybe quantized) linear methods.""" + + @abstractmethod + def create_weights(self, layer: torch.nn.Module, + input_size_per_partition: int, + output_partition_sizes: List[int], input_size: int, + output_size: int, params_dtype: torch.dtype, + **extra_weight_attrs): + """Create weights for a linear layer. + The weights will be set as attributes of the layer. + + Args: + layer: The layer that is using the LinearMethodBase factory. + input_size_per_partition: Size of the weight input dim on rank X. + output_partition_sizes: Sizes of the output dim of each logical + weight on rank X. E.g., output_partition_sizes for QKVLinear + is a list contains the width of Wq, Wk, Wv on rank X. + input_size: Size of the input dim of the weight across all ranks. + output_size: Size of the output dim of the weight across all ranks. + params_dtype: Datatype of the parameters. + """ + raise NotImplementedError + + @abstractmethod + def apply(self, + layer: torch.nn.Module, + x: torch.Tensor, + bias: Optional[torch.Tensor] = None) -> torch.Tensor: + """Apply the weights in layer to the input tensor. + + Expects create_weights to have been called before on the layer.""" + raise NotImplementedError + + +class UnquantizedLinearMethod(LinearMethodBase): + """Linear method without quantization. + + Args: + separate_bias_add: If true, add bias separately after matrix + multiplication. + """ + + def __init__(self, separate_bias_add: bool = False): + self.separate_bias_add = separate_bias_add + + def create_weights(self, layer: torch.nn.Module, + input_size_per_partition: int, + output_partition_sizes: List[int], input_size: int, + output_size: int, params_dtype: torch.dtype, + **extra_weight_attrs): + output_size_per_partition = sum(output_partition_sizes) + weight = Parameter(torch.empty(output_size_per_partition, + input_size_per_partition, + dtype=params_dtype), + requires_grad=False) + set_weight_attrs(weight, {"input_dim": 1, "output_dim": 0}) + layer.register_parameter("weight", weight) + set_weight_attrs(weight, extra_weight_attrs) + + def apply(self, + layer: torch.nn.Module, + x: torch.Tensor, + bias: Optional[torch.Tensor] = None) -> torch.Tensor: + weight = layer.weight + if self.separate_bias_add: + if bias is not None: + return F.linear(x, weight) + bias + return F.linear(x, weight) + return F.linear(x.to(weight.device), weight, bias) + + +class LinearBase(torch.nn.Module): + """Base linear layer. + + Args: + input_size: input dimension of the linear layer. + output_size: output dimension of the linear layer. + bias: If true, add bias. + skip_bias_add: If true, skip adding bias but instead return it. + params_dtype: Data type for the parameters. + quant_config: Quantization configure. + """ + + def __init__( + self, + input_size: int, + output_size: int, + skip_bias_add: bool = False, + params_dtype: Optional[torch.dtype] = None, + quant_config: Optional[QuantizationConfig] = None, + ): + super().__init__() + + # Keep input parameters + self.input_size = input_size + self.output_size = output_size + self.skip_bias_add = skip_bias_add + if params_dtype is None: + params_dtype = torch.get_default_dtype() + self.params_dtype = params_dtype + if quant_config is None: + self.quant_method: Optional[ + QuantizeMethodBase] = UnquantizedLinearMethod() + else: + self.quant_method = quant_config.get_quant_method(self) + + def forward(self, x: torch.Tensor) -> torch.Tensor: + raise NotImplementedError + + +class ReplicatedLinear(LinearBase): + """Replicated linear layer. + + Args: + input_size: input dimension of the linear layer. + output_size: output dimension of the linear layer. + bias: If true, add bias. + skip_bias_add: If true, skip adding bias but instead return it. + params_dtype: Data type for the parameters. + quant_config: Quantization configure. + """ + + def __init__( + self, + input_size: int, + output_size: int, + bias: bool = True, + skip_bias_add: bool = False, + params_dtype: Optional[torch.dtype] = None, + quant_config: Optional[QuantizationConfig] = None, + ): + super().__init__(input_size, output_size, skip_bias_add, params_dtype, + quant_config) + + # All the linear layer supports quant method. + assert self.quant_method is not None + self.quant_method.create_weights(self, self.input_size, + [self.output_size], self.input_size, + self.output_size, self.params_dtype) + + if bias: + self.bias = Parameter( + torch.empty(self.output_size, dtype=self.params_dtype)) + set_weight_attrs(self.bias, {"output_dim": 0}) + else: + self.register_parameter("bias", None) + + def forward(self, x: torch.Tensor) -> torch.Tensor: + bias = self.bias if not self.skip_bias_add else None + assert self.quant_method is not None + output = self.quant_method.apply(self, x, bias) + output_bias = self.bias if self.skip_bias_add else None + return output, output_bias + + def extra_repr(self) -> str: + s = f"in_features={self.input_size}" + s += f", output_features={self.output_size}" + s += f", bias={self.bias is not None}" + return s + + +class ColumnParallelLinear(LinearBase): + """Linear layer with column parallelism. + + The linear layer is defined as Y = XA + b. A is parallelized along + its second dimension as A = [A_1, ..., A_p]. + + Args: + input_size: first dimension of matrix A. + output_size: second dimension of matrix A. + bias: If true, add bias. + gather_output: If true, call all-gather on output and make Y available + to all GPUs, otherwise, every GPU will have its output + which is Y_i = XA_i + skip_bias_add: This was added to enable performance optimizations where + bias can be fused with other element-wise operations. we + skip adding bias but instead return it. + params_dtype: Data type for the parameters. + quant_config: Quantization configure. + output_sizes: list of output sizes packed into one output, like for QKV + the list would be size 3. + """ + + def __init__( + self, + input_size: int, + output_size: int, + bias: bool = True, + gather_output: bool = False, + skip_bias_add: bool = False, + params_dtype: Optional[torch.dtype] = None, + quant_config: Optional[QuantizationConfig] = None, + output_sizes: Optional[List[int]] = None, + ): + super().__init__(input_size, output_size, skip_bias_add, params_dtype, + quant_config) + + self.gather_output = gather_output + + # Divide the weight matrix along the last dimension. + tp_size = get_tensor_model_parallel_world_size() + self.output_size_per_partition = divide(output_size, tp_size) + if output_sizes is None: + output_sizes = [output_size] + # All the linear layer supports quant method. + assert self.quant_method is not None + self.quant_method.create_weights(self, + self.input_size, + [x // tp_size for x in output_sizes], + self.input_size, + self.output_size, + self.params_dtype, + weight_loader=self.weight_loader) + if bias: + self.bias = Parameter( + torch.empty(self.output_size_per_partition, + dtype=params_dtype)) + set_weight_attrs(self.bias, { + "output_dim": 0, + "weight_loader": self.weight_loader, + }) + else: + self.register_parameter("bias", None) + + def weight_loader(self, param: Parameter, loaded_weight: torch.Tensor): + # Special case for Fp8 scales. + fp8_scales_shard_indexer = getattr(param, "fp8_scales_shard_indexer", + None) + + tp_rank = get_tensor_model_parallel_rank() + output_dim = getattr(param, "output_dim", None) + param_data = param.data + if output_dim is not None: + shard_size = param_data.shape[output_dim] + start_idx = tp_rank * shard_size + loaded_weight = loaded_weight.narrow(output_dim, start_idx, + shard_size) + # Special case for Fp8 scales. + elif fp8_scales_shard_indexer is not None: + param_data, loaded_weight = fp8_scales_shard_indexer(param_data, + loaded_weight, + shard_id=0) + + assert param_data.shape == loaded_weight.shape + param_data.copy_(loaded_weight) + + def forward(self, input_): + bias = self.bias if not self.skip_bias_add else None + + # Matrix multiply. + assert self.quant_method is not None + output_parallel = self.quant_method.apply(self, input_, bias) + if self.gather_output: + # All-gather across the partitions. + output = tensor_model_parallel_all_gather(output_parallel) + else: + output = output_parallel + output_bias = self.bias if self.skip_bias_add else None + return output, output_bias + + def extra_repr(self) -> str: + s = f"in_features={self.input_size}" + s += f", output_features={self.output_size_per_partition}" + s += f", bias={self.bias is not None}" + s += f", tp_size={get_tensor_model_parallel_world_size()}" + s += f", gather_output={self.gather_output}" + return s + + +class MergedColumnParallelLinear(ColumnParallelLinear): + """Packed linear layers with column parallelism. + + Similar to ColumnParallelLinear, but the weight matrix is concatenated + along the output dimension. When the weight matrix is loaded, the + different partitions are sharded separately. + + Args: + input_size: input dimension of the linear layer. + output_sizes: list of output dimensions of the linear layer. + bias: If true, add bias. + gather_output: If true, call all-gather on output and make the output + available to all GPUs, otherwise, every GPU will have + its own output. + skip_bias_add: This was added to enable performance optimizations where + bias can be fused with other element-wise operations. we + skip adding bias but instead return it. + params_dtype: Data type for the parameters. + quant_config: Quantization configure. + """ + + def __init__( + self, + input_size: int, + output_sizes: List[int], + bias: bool = True, + gather_output: bool = False, + skip_bias_add: bool = False, + params_dtype: Optional[torch.dtype] = None, + quant_config: Optional[QuantizationConfig] = None, + ): + self.output_sizes = output_sizes + tp_size = get_tensor_model_parallel_world_size() + assert all(output_size % tp_size == 0 for output_size in output_sizes) + super().__init__(input_size, sum(output_sizes), bias, gather_output, + skip_bias_add, params_dtype, quant_config, + self.output_sizes) + + def weight_loader(self, + param: Parameter, + loaded_weight: torch.Tensor, + loaded_shard_id: Optional[int] = None): + + param_data = param.data + output_dim = getattr(param, "output_dim", None) + # Special case for AQLM codebooks. + is_metadata = getattr(param, "is_metadata", False) + # Special case for Fp8 scales. + fp8_scales_shard_indexer = getattr(param, "fp8_scales_shard_indexer", + None) + + if loaded_shard_id is None: + # Loaded weight is already packed. + if output_dim is None: + assert param_data.shape == loaded_weight.shape + param_data.copy_(loaded_weight) + return + current_shard_offset = 0 + shard_offsets = [] + for i, output_size in enumerate(self.output_sizes): + shard_offsets.append((i, current_shard_offset, output_size)) + current_shard_offset += output_size + packed_dim = getattr(param, "packed_dim", None) + for shard_id, shard_offset, shard_size in shard_offsets: + # Special case for Quantization. + # If quantized, we need to adjust the offset and size to account + # for the packing. + if packed_dim == output_dim: + shard_size = shard_size // param.pack_factor + shard_offset = shard_offset // param.pack_factor + # Special case for Marlin. + shard_size, shard_offset = adjust_marlin_shard( + param, shard_size, shard_offset) + + loaded_weight_shard = loaded_weight.narrow( + output_dim, shard_offset, shard_size) + self.weight_loader(param, loaded_weight_shard, shard_id) + return + + assert loaded_shard_id < len(self.output_sizes) + tp_rank = get_tensor_model_parallel_rank() + tp_size = get_tensor_model_parallel_world_size() + if output_dim is not None: + shard_offset = sum(self.output_sizes[:loaded_shard_id]) // tp_size + shard_size = self.output_sizes[loaded_shard_id] // tp_size + # Special case for quantization. + # If quantized, we need to adjust the offset and size to account + # for the packing. + packed_dim = getattr(param, "packed_dim", None) + if packed_dim == output_dim: + shard_size = shard_size // param.pack_factor + shard_offset = shard_offset // param.pack_factor + # Special case for Marlin. + shard_size, shard_offset = adjust_marlin_shard( + param, shard_size, shard_offset) + + param_data = param_data.narrow(output_dim, shard_offset, + shard_size) + start_idx = tp_rank * shard_size + loaded_weight = loaded_weight.narrow(output_dim, start_idx, + shard_size) + # Special case for AQLM codebooks. + elif is_metadata: + # metadata indicates fixed size concatenated along dim 0 + shard_size = loaded_weight.shape[0] + shard_offset = loaded_shard_id * shard_size + param_data = param_data.narrow(0, shard_offset, shard_size) + # Special case for Fp8 scales. + elif fp8_scales_shard_indexer is not None: + param_data, loaded_weight = fp8_scales_shard_indexer( + param_data, loaded_weight, loaded_shard_id) + + else: + ignore_warning = getattr(param, "ignore_warning", False) + if not ignore_warning: + logger.warning( + "Loading a weight without `output_dim` attribute in " + "MergedColumnParallelLinear, assume the weight is " + "the same for all partitions.") + assert param_data.shape == loaded_weight.shape + param_data.copy_(loaded_weight) + + +class QKVParallelLinear(ColumnParallelLinear): + """Linear layers for the attention's QKV transformation. + + Linear layers for the linear transformation of the query, key, and value + vectors in the attention layer. The weight matrix is concatenated along + the output dimension. The layer is parallelized along the head dimension. + When the number of key/value heads is smaller than the number of query + heads (e.g., multi-query/grouped-query attention), the key/value head may + be replicated while the query heads are partitioned. + + Args: + hidden_size: input hidden state size of the transformer. + head_size: size of each attention head. + total_num_heads: total number of attention query heads. + total_num_kv_heads: total number of attention key/value heads. If + None, assume total_num_kv_heads = total_num_heads. + bias: If true, add bias. + skip_bias_add: This was added to enable performance optimizations where + bias can be fused with other element-wise operations. we + skip adding bias but instead return it. + params_dtype: Data type for the parameters. + quant_config: Quantization configure. + """ + + def __init__( + self, + hidden_size: int, + head_size: int, + total_num_heads: int, + total_num_kv_heads: Optional[int] = None, + bias: bool = True, + skip_bias_add: bool = False, + params_dtype: Optional[torch.dtype] = None, + quant_config: Optional[QuantizationConfig] = None, + ): + self.hidden_size = hidden_size + self.head_size = head_size + self.total_num_heads = total_num_heads + if total_num_kv_heads is None: + total_num_kv_heads = total_num_heads + self.total_num_kv_heads = total_num_kv_heads + # Divide the weight matrix along the last dimension. + tp_size = get_tensor_model_parallel_world_size() + self.num_heads = divide(self.total_num_heads, tp_size) + if tp_size >= self.total_num_kv_heads: + self.num_kv_heads = 1 + self.num_kv_head_replicas = divide(tp_size, + self.total_num_kv_heads) + else: + self.num_kv_heads = divide(self.total_num_kv_heads, tp_size) + self.num_kv_head_replicas = 1 + input_size = self.hidden_size + output_size = (self.num_heads + + 2 * self.num_kv_heads) * tp_size * self.head_size + output_sizes = [ + self.num_heads * tp_size * self.head_size, + self.num_kv_heads * tp_size * self.head_size, + self.num_kv_heads * tp_size * self.head_size + ] + + super().__init__(input_size, output_size, bias, False, skip_bias_add, + params_dtype, quant_config, output_sizes) + + def weight_loader(self, + param: Parameter, + loaded_weight: torch.Tensor, + loaded_shard_id: Optional[str] = None): + param_data = param.data + output_dim = getattr(param, "output_dim", None) + # Special case for AQLM codebooks. + is_metadata = getattr(param, "is_metadata", False) + # Special case for Fp8 scales. + fp8_scales_shard_indexer = getattr(param, "fp8_scales_shard_indexer", + None) + + if loaded_shard_id is None: + # Loaded weight is already packed. + if output_dim is None: + assert param_data.shape == loaded_weight.shape + param_data.copy_(loaded_weight) + return + shard_offsets = [ + # (shard_id, shard_offset, shard_size) + ("q", 0, self.total_num_heads * self.head_size), + ("k", self.total_num_heads * self.head_size, + self.total_num_kv_heads * self.head_size), + ("v", (self.total_num_heads + self.total_num_kv_heads) * + self.head_size, self.total_num_kv_heads * self.head_size), + ] + packed_dim = getattr(param, "packed_dim", None) + for shard_id, shard_offset, shard_size in shard_offsets: + # Special case for Quantized Weights. + # If quantized, we need to adjust the offset and size to account + # for the packing. + if packed_dim == output_dim: + shard_size = shard_size // param.pack_factor + shard_offset = shard_offset // param.pack_factor + + # Special case for Marlin. + shard_size, shard_offset = adjust_marlin_shard( + param, shard_size, shard_offset) + + loaded_weight_shard = loaded_weight.narrow( + output_dim, shard_offset, shard_size) + self.weight_loader(param, loaded_weight_shard, shard_id) + return + + tp_rank = get_tensor_model_parallel_rank() + assert loaded_shard_id in ["q", "k", "v"] + if output_dim is not None: + if loaded_shard_id == "q": + shard_offset = 0 + shard_size = self.num_heads * self.head_size + elif loaded_shard_id == "k": + shard_offset = self.num_heads * self.head_size + shard_size = self.num_kv_heads * self.head_size + elif loaded_shard_id == "v": + shard_offset = (self.num_heads + + self.num_kv_heads) * self.head_size + shard_size = self.num_kv_heads * self.head_size + # Special case for Quantized Weights. + # If quantized, we need to adjust the offset and size to account + # for the packing. + packed_dim = getattr(param, "packed_dim", None) + if packed_dim == output_dim: + shard_size = shard_size // param.pack_factor + shard_offset = shard_offset // param.pack_factor + + # Special case for Marlin. + shard_size, shard_offset = adjust_marlin_shard( + param, shard_size, shard_offset) + + param_data = param_data.narrow(output_dim, shard_offset, + shard_size) + if loaded_shard_id == "q": + shard_id = tp_rank + else: + shard_id = tp_rank // self.num_kv_head_replicas + start_idx = shard_id * shard_size + loaded_weight = loaded_weight.narrow(output_dim, start_idx, + shard_size) + # Special case for for AQLM codebooks. + elif is_metadata: + # metadata indicates fixed size concatenated along dim 0 + shard_size = loaded_weight.shape[0] + shard_index = ["q", "k", "v"].index(loaded_shard_id) + param_data = param_data.narrow(0, shard_index * shard_size, + shard_size) + # Special case for Fp8 scales. + elif fp8_scales_shard_indexer is not None: + param_data, loaded_weight = fp8_scales_shard_indexer( + param_data, loaded_weight, loaded_shard_id) + else: + ignore_warning = getattr(param, "ignore_warning", False) + if not ignore_warning: + logger.warning( + "Loading a weight without `output_dim` attribute in " + "QKVParallelLinear, assume the weight is the same " + "for all partitions.") + assert param_data.shape == loaded_weight.shape + param_data.copy_(loaded_weight) + + +class RowParallelLinear(LinearBase): + """Linear layer with row parallelism. + + The linear layer is defined as Y = XA + b. A is parallelized along + its first dimension and X along its second dimension as: + - - + | A_1 | + | . | + A = | . | X = [X_1, ..., X_p] + | . | + | A_p | + - - + Arguments: + input_size: first dimension of matrix A. + output_size: second dimension of matrix A. + bias: If true, add bias. Note that bias is not parallelized. + input_is_parallel: If true, we assume that the input is already + split across the GPUs and we do not split + again. + skip_bias_add: This was added to enable performance optimization where + bias can be fused with other element-wise operations. + We skip adding bias but instead return it. + params_dtype: Data type for the parameters. + quant_config: Quantization configure. + """ + + def __init__( + self, + input_size: int, + output_size: int, + bias: bool = True, + input_is_parallel: bool = True, + skip_bias_add: bool = False, + params_dtype: Optional[torch.dtype] = None, + reduce_results: bool = True, + quant_config: Optional[QuantizationConfig] = None, + ): + super().__init__(input_size, output_size, skip_bias_add, params_dtype, + quant_config) + + self.input_is_parallel = input_is_parallel + self.reduce_results = reduce_results + + # Divide the weight matrix along the last dimension. + self.tp_size = get_tensor_model_parallel_world_size() + self.input_size_per_partition = divide(input_size, self.tp_size) + # All the linear layer supports quant method. + assert self.quant_method is not None + self.quant_method.create_weights(self, + self.input_size_per_partition, + [self.output_size], + self.input_size, + self.output_size, + self.params_dtype, + weight_loader=self.weight_loader) + + if not reduce_results and (bias and not skip_bias_add): + raise ValueError("When not reduce the results, adding bias to the " + "results can lead to incorrect results") + + if bias: + self.bias = Parameter( + torch.empty(self.output_size, dtype=params_dtype)) + set_weight_attrs(self.bias, { + "output_dim": 0, + "weight_loader": self.weight_loader, + }) + else: + self.register_parameter("bias", None) + + def weight_loader(self, param: Parameter, loaded_weight: torch.Tensor): + # Special case for Fp8 scales. + fp8_scales_shard_indexer = getattr(param, "fp8_scales_shard_indexer", + None) + + tp_rank = get_tensor_model_parallel_rank() + input_dim = getattr(param, "input_dim", None) + param_data = param.data + if input_dim is not None: + shard_size = param_data.shape[input_dim] + start_idx = tp_rank * shard_size + loaded_weight = loaded_weight.narrow(input_dim, start_idx, + shard_size) + # Special case for Fp8 scales. + elif fp8_scales_shard_indexer is not None: + param_data, loaded_weight = fp8_scales_shard_indexer(param_data, + loaded_weight, + shard_id=0) + + assert param_data.shape == loaded_weight.shape + param_data.copy_(loaded_weight) + + def forward(self, input_): + # Set up backprop all-reduce. + if self.input_is_parallel: + input_parallel = input_ + else: + tp_rank = get_tensor_model_parallel_rank() + splitted_input = split_tensor_along_last_dim( + input_, num_partitions=self.tp_size) + input_parallel = splitted_input[tp_rank].contiguous() + + # Matrix multiply. + assert self.quant_method is not None + output_parallel = self.quant_method.apply(self, input_parallel) + if self.reduce_results and self.tp_size > 1: + output_ = tensor_model_parallel_all_reduce(output_parallel) + else: + output_ = output_parallel + + if not self.skip_bias_add: + output = output_ + self.bias if self.bias is not None else output_ + output_bias = None + else: + output = output_ + output_bias = self.bias + return output, output_bias + + def extra_repr(self) -> str: + s = f"input_features={self.input_size_per_partition}" + s += f", output_features={self.output_size}" + s += f", bias={self.bias is not None}" + s += f", tp_size={self.tp_size}" + s += f", reduce_results={self.reduce_results}" + return s diff --git a/vllm/model_executor/layers/logits_processor.py b/vllm/model_executor/layers/logits_processor.py new file mode 100644 index 0000000..91eb969 --- /dev/null +++ b/vllm/model_executor/layers/logits_processor.py @@ -0,0 +1,115 @@ +"""A layer that compute logits from hidden_stats.""" +from typing import Optional + +import torch +import torch.nn as nn + +from vllm.distributed import tensor_model_parallel_gather +from vllm.model_executor.sampling_metadata import SamplingMetadata + + +class LogitsProcessor(nn.Module): + """Process logits and apply logits processors from sampling metadata. + + This layer does the following: + 1. Gather logits from model hidden_states. + 2. Scale logits if needed. + 3. Apply logits processors (if any). + """ + + def __init__(self, + vocab_size: int, + org_vocab_size: Optional[int] = None, + scale: Optional[float] = 1.0, + logits_as_input: bool = False) -> None: + """ + Args: + scale: A scaling factor to apply to the logits. + """ + super().__init__() + self.scale = scale + self.vocab_size = vocab_size + # Whether the input is logits (default is hidden states). + self.logits_as_input = logits_as_input + # original vocabulary size (without LoRA). + self.org_vocab_size = org_vocab_size or vocab_size + + def forward( + self, + embedding: torch.Tensor, + hidden_states: torch.Tensor, + sampling_metadata: SamplingMetadata, + embedding_bias: Optional[torch.Tensor] = None, + ) -> torch.Tensor: + if self.logits_as_input: + logits = hidden_states + else: + hidden_states = _prune_hidden_states(hidden_states, + sampling_metadata) + + # Get the logits for the next tokens. + logits = self._get_logits(hidden_states, embedding, embedding_bias) + + if logits is not None: + logits *= self.scale + + # Apply logits processors (if any). + logits = _apply_logits_processors(logits, sampling_metadata) + + return logits + + def _get_logits(self, hidden_states: torch.Tensor, embedding: torch.Tensor, + embedding_bias: Optional[torch.Tensor]) -> torch.Tensor: + # Get the logits for the next tokens. + logits = torch.matmul(hidden_states, embedding.t()) + if embedding_bias is not None: + logits += embedding_bias + logits = tensor_model_parallel_gather(logits) + # Remove paddings in vocab (if any). + if logits is not None: + logits = logits[:, :self.org_vocab_size] + return logits + + def extra_repr(self) -> str: + s = f"vocab_size={self.vocab_size}" + s += f", forg_vocab_size={self.org_vocab_size}" + s += f", scale={self.scale}, logits_as_input={self.logits_as_input}" + return s + + +def _prune_hidden_states( + hidden_states: torch.Tensor, + sampling_metadata: SamplingMetadata, +) -> torch.Tensor: + return hidden_states.index_select(0, + sampling_metadata.selected_token_indices) + + +def _apply_logits_processors( + logits: torch.Tensor, + sampling_metadata: SamplingMetadata, +) -> torch.Tensor: + found_logits_processors = False + logits_processed = 0 + for seq_group in sampling_metadata.seq_groups: + seq_ids = seq_group.seq_ids + sampling_params = seq_group.sampling_params + logits_processors = sampling_params.logits_processors + + if logits_processors: + found_logits_processors = True + for seq_id, logits_row_idx in zip(seq_ids, + seq_group.sample_indices): + logits_row = logits[logits_row_idx] + token_ids = seq_group.seq_data[seq_id].output_token_ids + for logits_processor in logits_processors: + logits_row = logits_processor(token_ids, logits_row) + logits[logits_row_idx] = logits_row + + logits_processed += len(seq_group.sample_indices) + len( + seq_group.prompt_logprob_indices) + + if found_logits_processors: + # verifies that no rows in logits were missed unexpectedly + assert logits_processed == logits.shape[0] + return logits diff --git a/vllm/model_executor/layers/ops/__init__.py b/vllm/model_executor/layers/ops/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/vllm/model_executor/layers/ops/rand.py b/vllm/model_executor/layers/ops/rand.py new file mode 100644 index 0000000..4a429e3 --- /dev/null +++ b/vllm/model_executor/layers/ops/rand.py @@ -0,0 +1,157 @@ +from typing import Optional, Union + +import torch +import triton +import triton.language as tl + + +def seeded_uniform( + *size, + seeds: torch.Tensor, + out: Optional[torch.Tensor] = None, + dtype: Optional[torch.dtype] = None, + device: Optional[Union[torch.device, str]] = None, + pin_memory: Optional[bool] = False, +) -> torch.Tensor: + """Similar to torch.rand, but allows for seeds to be set per row. + + seeds must be a 1d tensor. The output tensor may be 1d, 2d, or 3d. + If it is 3d, the additional seeds needed will be derived automatically + in a deterministic fashion: + [ + row 0: [columns_with_seed_0], [columns_with_seed0^1], ... + ] + """ + n_dims = len(size) + + if n_dims > 3: + raise ValueError("seeded_uniform only supports up to 3D tensors") + + if out is None: + out = torch.empty(*size, + dtype=dtype, + device=device, + pin_memory=pin_memory) + elif out.shape != size: + raise ValueError("shape of out and size must be the same") + + if n_dims == 3: + n_rows, n_3d, n_cols = out.shape + stride_row = out.stride(0) + stride_3d = out.stride(1) + elif n_dims == 2: + n_rows, n_cols = out.shape + n_3d = 1 + stride_row = out.stride(0) + stride_3d = 1 + else: + n_cols = out.shape[0] + n_rows = 1 + n_3d = 1 + stride_row = 1 + stride_3d = 1 + + if seeds.ndim != 1: + raise ValueError("seeds must be a 1D tensor") + + if seeds.numel() != n_rows: + raise ValueError( + "seeds must have the same number of elements as out has rows") + + # The philox PRNG Triton uses generates 4 random numbers at once. + # Therefore, the most efficient use of it is to divide the + # block size by 4, and then save the generated random numbers to + # each of the 4 slices of the tensor. + full_block_size = triton.next_power_of_2(n_cols) + philox_block_size = max(full_block_size // 4, 1) + n_slices = full_block_size // philox_block_size + num_warps = 4 + # Manual tuning. This seems to give best performance on A100 for + # simple kernels like this. + if philox_block_size >= 8192: + num_warps = 32 + elif philox_block_size >= 4096: + num_warps = 16 + elif philox_block_size >= 2048: + num_warps = 8 + + _seeded_uniform_triton[(n_rows, n_3d)]( + out, + seeds, + stride_row, + stride_3d, + seeds.stride(0), + n_rows, + n_3d, + n_cols, + n_slices=n_slices, + num_warps=num_warps, + block_size=philox_block_size, + ) + return out + + +@triton.jit +def _seeded_uniform_triton( + out_ptr: torch.Tensor, + seed_ptr: torch.Tensor, + out_row_stride: int, + out_3d_stride: int, + seed_row_stride: int, + n_rows: int, + n_3d: int, + n_cols: int, + n_slices: tl.constexpr, + block_size: tl.constexpr, +): + """ + Generate a random float32 number in [0, 1) for each element in the output + tensor. The random numbers in a row generated using the seed for that row. + + Args: + out_ptr: The output tensor. + seed_ptr: The per-row seeds to use for random number generation. + out_row_stride: The stride between rows of the output tensor. + out_3d_stride: The stride between 3D slices of the output tensor. + seed_row_stride: The stride between rows of the seed tensor. + n_rows: The number of rows in the output tensor. + n_3d: The size of second dimension of the output tensor, + if output tensor is 3D. + n_cols: The number of columns in the output tensor. + n_slices: The number of philox outputs to use. + """ + tl.static_assert(n_slices > 0 and n_slices <= 4, "0 < n_slices <= 4") + + # Get the row index. + row_idx = tl.program_id(axis=0) + three_d_idx = tl.program_id(axis=1) + + philox_offsets = tl.arange(0, block_size) + # Get the seed for the current element. + seed = tl.load(seed_ptr + row_idx * seed_row_stride) + if three_d_idx > 0: + seed ^= three_d_idx + # Generate random numbers in [0, 1). + out1, out2, out3, out4 = tl.rand4x(seed, philox_offsets) + + output_row_start_ptr = (out_ptr + row_idx * out_row_stride + + three_d_idx * out_3d_stride) + out1_offsets = philox_offsets + tl.store(output_row_start_ptr + out1_offsets, + out1, + mask=out1_offsets < n_cols) + if n_slices > 1: + out2_offsets = tl.arange(block_size, block_size * 2) + tl.store(output_row_start_ptr + out2_offsets, + out2, + mask=out2_offsets < n_cols) + if n_slices > 2: + out3_offsets = tl.arange(block_size * 2, block_size * 3) + tl.store(output_row_start_ptr + out3_offsets, + out3, + mask=out3_offsets < n_cols) + if n_slices > 3: + out4_offsets = tl.arange(block_size * 3, block_size * 4) + tl.store(output_row_start_ptr + out4_offsets, + out4, + mask=out4_offsets < n_cols) diff --git a/vllm/model_executor/layers/ops/sample.py b/vllm/model_executor/layers/ops/sample.py new file mode 100644 index 0000000..d08ae60 --- /dev/null +++ b/vllm/model_executor/layers/ops/sample.py @@ -0,0 +1,406 @@ +import math +from typing import Optional, Tuple + +import torch +import triton +import triton.language as tl + +from vllm.model_executor.layers.ops.rand import seeded_uniform + +_EPS = 1e-6 + +# This is a hardcoded limit in Triton (max block size). +MAX_TRITON_N_COLS = 131072 + + +def get_num_triton_sampler_splits(n_cols: int) -> int: + """Get the number of splits to use for Triton sampling. + + Triton has a limit on the number of columns it can handle, so we need to + split the tensor and call the kernel multiple times if it's too large. + """ + return math.ceil(n_cols / MAX_TRITON_N_COLS) + + +def _multi_split_sample( + probs: torch.Tensor, + seeds: torch.Tensor, + n_splits: int, + sampled_tokens_size: Tuple[int, int], + sampled_logprobs_size: Tuple[int, int], + sample_indices: torch.Tensor, + logprobs: torch.Tensor, + *, + modify_greedy_probs: bool = False, + save_logprobs: bool = False, +): + """Sample tokens where vocab size is split into multiple parts + (too large for Triton otherwise).""" + assert seeds.ndim == 2 and seeds.shape[0] == n_splits + split_probs = probs.tensor_split(n_splits, 1) + split_logprobs = logprobs.tensor_split(n_splits, 1) + sampled_tokens_tmp = [ + torch.empty(sampled_tokens_size, dtype=torch.long, device=probs.device) + for _ in range(n_splits) + ] + sampled_logprobs_tmp = [ + torch.empty(sampled_logprobs_size, + dtype=probs.dtype, + device=probs.device) for _ in range(n_splits) + ] + # We are purposefuly using sampled_tokens_size as we need to always + # save modified probs in this case. + sampled_modified_probs_tmp = [ + torch.empty(sampled_tokens_size, + dtype=probs.dtype, + device=probs.device) for _ in range(n_splits) + ] + for i in range(n_splits): + n_samples = sample_indices.shape[0] + n_cols = split_probs[i].shape[1] + n_best = sampled_tokens_tmp[i].shape[1] + uniform_noise = seeded_uniform(n_samples, + n_best, + n_cols, + seeds=seeds[i].flatten(), + device=split_probs[i].device, + dtype=split_probs[i].dtype) + # TODO(yard1): See if we can remove the contiguous() calls. + # Will need kernel support. + _sample( + split_probs[i].contiguous(), + split_logprobs[i].contiguous(), + sample_indices, + sampled_tokens_tmp[i], + sampled_logprobs_tmp[i], + sampled_modified_probs_tmp[i], + seeds[i], + uniform_noise, + modify_greedy_probs=False, + save_logprobs=save_logprobs, + save_modified_probs=True, + ) + if i > 0: + # Add offset to sampled tokens + sampled_tokens_tmp[i].add_(i * split_probs[i - 1].shape[1]) + sampled_tokens = torch.stack(sampled_tokens_tmp) + sampled_modified_probs = torch.stack(sampled_modified_probs_tmp) + # Reduce the results from the splits. + sampled_modified_probs, indices = torch.max(sampled_modified_probs, + dim=0, + keepdim=True) + sampled_tokens = sampled_tokens.gather(0, indices).squeeze(0) + if save_logprobs: + sampled_logprobs = torch.stack(sampled_logprobs_tmp) + sampled_logprobs = sampled_logprobs.gather(0, indices).squeeze(0) + else: + sampled_logprobs = None + sampled_modified_probs = sampled_modified_probs.squeeze(0) + + if modify_greedy_probs: + # We need to modify the greedy probs for the sampled tokens. + # We can't do this in the kernel as we need to know the + # sampled tokens. + probs.fill_(0.0) + probs.scatter_(1, sampled_tokens, 1.0) + + return (sampled_tokens, sampled_logprobs, sampled_modified_probs) + + +def sample( + probs: torch.Tensor, + seeds: torch.Tensor, + *, + max_best_of: int = 1, + sample_indices: Optional[torch.Tensor] = None, + logprobs: Optional[torch.Tensor] = None, + modify_greedy_probs: bool = False, + save_logprobs: bool = False, + _save_modified_probs: bool = False, # pylint: disable=invalid-name +) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[torch.Tensor]]: + """Sample tokens from probs. with per-sequence seeds. + + Can sample from a subset of sequences through sample_indices. + + Args: + probs: Probabilities to sample from. + shape = [batch_size, vocab_size] + seeds: Per-sequence seed values. + shape = [n, math.ceil(vocab_size / MAX_TRITON_N_COLS)] + max_best_of: Number of samples to generate per sequence. + Sequence seed will be incremented by 1 each time. + sample_indices: Indices of sequences to sample from. + If not provided, will sample from all sequences. + shape = [n] + logprobs: Log-probabilities of the sampled tokens. + Only used for saving the logprobs if save_logprobs is True. + shape = [batch_size, vocab_size] + modify_greedy_probs: Whether to modify the greedy probabilities + for speculative sampling (sampled token = 1.0, + everything else = 0.0). + save_logprobs: Whether to save the log-probabilities of the + sampled tokens to a tensor. + _save_modified_probs: Whether to save the modified probabilities + (including gumbel noise) of the sampled tokens to a tensor. + DOES NOT include the modification done by modify_greedy_probs + (because we want to use the unmodified probs to pick the best + split in case of multi-split sampling). + This is exposed only for testing. + + Returns: + sampled_tokens: shape = [n, max_best_of] + sampled_logprobs: shape = [n, max_best_of] if save_logprobs else None + sampled_modified_probs: shape = [n, max_best_of] + if save_modified_probs else None + """ + if sample_indices is None: + sample_indices = torch.arange(0, probs.shape[0], device=probs.device) + + sampled_tokens_size = (sample_indices.size(0), max_best_of) + if save_logprobs: + if logprobs is None: + raise ValueError( + "logprobs tensor must be provided if save_logprobs is True") + sampled_logprobs_size = sampled_tokens_size + else: + # Empty tensors to invoke the kernel + sampled_logprobs_size = (0, 0) + logprobs = probs + + assert logprobs is not None + if _save_modified_probs: + sampled_modified_probs_size = sampled_tokens_size + else: + # Empty tensors to invoke the kernel + sampled_modified_probs_size = (0, 0) + + # If the number of columns in probs is too large for Triton to handle, + # we split the tensor and sample from each split separately, and then + # do an argmax+gather to combine the results. + n_splits = get_num_triton_sampler_splits(probs.shape[1]) + if n_splits > 1: + (sampled_tokens, sampled_logprobs, + sampled_modified_probs) = _multi_split_sample( + probs, + seeds, + n_splits, + sampled_tokens_size, + sampled_logprobs_size, + sample_indices, + logprobs=logprobs, + modify_greedy_probs=modify_greedy_probs, + save_logprobs=save_logprobs) + else: + sampled_tokens = torch.empty(sampled_tokens_size, + dtype=torch.long, + device=probs.device) + sampled_logprobs = torch.empty(sampled_logprobs_size, + dtype=probs.dtype, + device=probs.device) + sampled_modified_probs = torch.empty(sampled_modified_probs_size, + dtype=probs.dtype, + device=probs.device) + n_samples = sample_indices.shape[0] + n_cols = probs.shape[1] + uniform_noise = seeded_uniform(n_samples, + max_best_of, + n_cols, + seeds=seeds.flatten(), + device=probs.device, + dtype=probs.dtype) + + _sample( + probs, + logprobs, + sample_indices, + sampled_tokens, + sampled_logprobs, + sampled_modified_probs, + seeds, + uniform_noise, + modify_greedy_probs=modify_greedy_probs, + save_logprobs=save_logprobs, + save_modified_probs=_save_modified_probs, + ) + return (sampled_tokens, sampled_logprobs if save_logprobs else None, + sampled_modified_probs if _save_modified_probs else None) + + +def _sample(probs: torch.Tensor, + logprobs: torch.Tensor, + sample_indices: torch.Tensor, + output_samples: torch.Tensor, + output_logprobs: torch.Tensor, + output_modified_probs: torch.Tensor, + seeds: torch.Tensor, + uniform_noise: torch.Tensor, + *, + modify_greedy_probs: bool = False, + save_logprobs: bool = True, + save_modified_probs: bool = False) -> torch.Tensor: + """Sample tokens from probs. + + Args: + probs [batch_size, vocab_size]: probs to sample from. + logprobs [batch_size, vocab_size]: logprobs (used when + save_logprobsis True). + sample_indices [n]: Indices of the samples to use for each row of probs. + output_samples [n, n_best]: Output tensor to store samples in. + output_logprobs [n, n_best]: Output tensor to store logprobs in. + output_modified_probs [n, n_best]: Output tensor to store + probs of chosen tokens in (modified with noise). + seeds [n]: Seeds to use for sampling. If the seed is 0, we use + greedy sampling. Note this is ONLY used for determining + whether to use random sampling or not. The actual random + noise should be passed as uniform_noise. + uniform_noise [batch_size, n_best, vocab_size]: Uniform + noise to use for random sampling (will be converted + to exponential gumbel noise by the kernel). + modify_greedy_probs: If True, we modify the probs tensor in-place + to encode the sampling method used for each row. This is used + in speculative decoding. Only applies in greedy decoding. + save_logprobs: If True, we save the logprobs of the sampled tokens + in the output_logprobs tensor. + save_modified_probs: If True, we save the modified probs (with noise) + of the sampled tokens in the output_modified_probs tensor. + DOES NOT include the modification done by modify_greedy_probs + (because we want to use the unmodified probs to pick the best + split in case of multi-split sampling). + """ + n_samples = sample_indices.shape[0] + n_cols = probs.shape[1] + n_best = output_samples.shape[1] if len(output_samples.shape) > 1 else 1 + + # The block size is the smallest power of two greater than the number of + # columns in probs + block_size = triton.next_power_of_2(n_cols) + num_warps = 4 + # Manual tuning. This seems to give best performance on A100 for + # simple kernels like this. + if block_size >= 8192: + num_warps = 32 + elif block_size >= 4096: + num_warps = 16 + elif block_size >= 2048: + num_warps = 8 + + # Enqueue kernel. The 1D launch grid is simple: we have one kernel + # instance per row of the probs matrix + _sample_triton[(n_samples, n_best)]( + sample_indices, + output_samples, + output_logprobs, + output_modified_probs, + probs, + logprobs, + seeds, + uniform_noise, + output_samples.stride(0), + probs.stride(0), + uniform_noise.stride(0), + uniform_noise.stride(1) if n_best > 1 else 1, + n_samples, + n_cols, + n_best, + num_warps=num_warps, + block_size=block_size, + modify_greedy_probs=modify_greedy_probs, + save_logprobs=save_logprobs, + save_modified_probs=save_modified_probs, + ) + return output_samples, output_logprobs, output_modified_probs + + +@triton.jit +def _uniform_to_exponential(uniform_noise): + """Convert uniform samples to exponential samples.""" + # tl.rand returns values in [0, 1), so we clamp lower bound + # to _EPS to avoid log(0) and thus division by 0 later + lb = tl.full(uniform_noise.shape, _EPS, uniform_noise.dtype) + uniform_noise = tl.maximum(uniform_noise, lb) + # Use the inversion method to turn uniform samples + # into exponential samples + exponential_noise = -tl.log(uniform_noise) + return exponential_noise + + +@triton.jit +def _sample_triton( + sample_indices_ptr: torch.Tensor, output_ptr: torch.Tensor, + output_logprobs_ptr: torch.Tensor, + output_modified_probs_ptr: torch.Tensor, probs_ptr: torch.Tensor, + logprobs_ptr: torch.Tensor, seeds_ptr: torch.Tensor, + uniform_noise_ptr: torch.Tensor, output_row_stride: int, + probs_row_stride: int, uniform_noise_row_stride: int, + uniform_noise_best_stride: int, n_samples: int, n_cols: int, + n_best: int, block_size: tl.constexpr, + modify_greedy_probs: tl.constexpr, save_logprobs: tl.constexpr, + save_modified_probs: tl.constexpr): + # The rows are independent, so we parallelize across those + sample_idx = tl.program_id(0) + best_idx = tl.program_id(1) + + # Load the row index from DRAM + row_idx = tl.load(sample_indices_ptr + sample_idx) + seed = tl.load(seeds_ptr + sample_idx) + uses_random_sampling = seed != 0 + + # The stride represents how much we need to increase the + # pointer to advance 1 row + row_start_ptr = probs_ptr + row_idx * probs_row_stride + + # The block size is the next power of two greater than n_cols, + # so we can fit each row in a single block + col_offsets = tl.arange(0, block_size) + + # Load the row into SRAM, using a mask since block_size may be > than n_cols + row = tl.load(row_start_ptr + col_offsets, + mask=col_offsets < n_cols, + other=float("-inf")) + + if uses_random_sampling: + uniform_noise_start_ptr = (uniform_noise_ptr + + sample_idx * uniform_noise_row_stride + + best_idx * uniform_noise_best_stride) + uniform_noise = tl.load(uniform_noise_start_ptr + col_offsets, + mask=col_offsets < n_cols, + other=0.5) + exponential_noise = _uniform_to_exponential(uniform_noise) + row /= exponential_noise + + sampled_value, sampled_token = tl.max(row, axis=0, return_indices=True) + # clamp sampled token to n_cols - 1 + # this should not be necessary, but we do it + # just in case + if sampled_token >= n_cols: + sampled_token = n_cols - 1 + # Write back output to DRAM + output_row_start_ptr = (output_ptr + sample_idx * output_row_stride + + best_idx) + tl.store(output_row_start_ptr, sampled_token) + + if modify_greedy_probs: # noqa + if not uses_random_sampling: + # Set the probability of the sampled token to 1, all other + # tokens to zero. This is used in speculative decoding where + # the sampling method must be encoded within the sampled + # probability distributions. + row = tl.where(col_offsets == sampled_token, 1.0, 0.0) + tl.store(row_start_ptr + col_offsets, + row, + mask=col_offsets < n_cols) + + if save_modified_probs: + output_row_start_ptr = (output_modified_probs_ptr + + sample_idx * output_row_stride + best_idx) + tl.store(output_row_start_ptr, sampled_value) + + if save_logprobs: + # Load the row into SRAM, using a mask since block_size + # may be > than n_cols + sampled_logprob = tl.load(logprobs_ptr + row_idx * probs_row_stride + + sampled_token) + # Write back output to DRAM + output_row_start_ptr = (output_logprobs_ptr + + sample_idx * output_row_stride + best_idx) + tl.store(output_row_start_ptr, sampled_logprob) diff --git a/vllm/model_executor/layers/quantization/__init__.py b/vllm/model_executor/layers/quantization/__init__.py new file mode 100644 index 0000000..1c652e3 --- /dev/null +++ b/vllm/model_executor/layers/quantization/__init__.py @@ -0,0 +1,35 @@ +from typing import Dict, Type + +from vllm.model_executor.layers.quantization.aqlm import AQLMConfig +from vllm.model_executor.layers.quantization.awq import AWQConfig +from vllm.model_executor.layers.quantization.base_config import ( + QuantizationConfig) +from vllm.model_executor.layers.quantization.fp8 import Fp8Config +from vllm.model_executor.layers.quantization.gptq import GPTQConfig +from vllm.model_executor.layers.quantization.gptq_marlin import ( + GPTQMarlinConfig) +from vllm.model_executor.layers.quantization.marlin import MarlinConfig +from vllm.model_executor.layers.quantization.squeezellm import SqueezeLLMConfig + +QUANTIZATION_METHODS: Dict[str, Type[QuantizationConfig]] = { + "aqlm": AQLMConfig, + "awq": AWQConfig, + "fp8": Fp8Config, + "gptq": GPTQConfig, + "squeezellm": SqueezeLLMConfig, + "gptq_marlin": GPTQMarlinConfig, + "marlin": MarlinConfig, +} + + +def get_quantization_config(quantization: str) -> Type[QuantizationConfig]: + if quantization not in QUANTIZATION_METHODS: + raise ValueError(f"Invalid quantization method: {quantization}") + return QUANTIZATION_METHODS[quantization] + + +__all__ = [ + "QuantizationConfig", + "get_quantization_config", + "QUANTIZATION_METHODS", +] diff --git a/vllm/model_executor/layers/quantization/aqlm.py b/vllm/model_executor/layers/quantization/aqlm.py new file mode 100644 index 0000000..83e24fa --- /dev/null +++ b/vllm/model_executor/layers/quantization/aqlm.py @@ -0,0 +1,376 @@ +# Supports AQLM compression, see https://github.com/Vahe1994/AQLM +# and https://arxiv.org/pdf/2401.06118.pdf + +import math +from typing import Any, Dict, List, Optional + +import torch +import torch.nn.functional as F +from torch.nn.parameter import Parameter + +from vllm import _custom_ops as ops +from vllm.model_executor.layers.linear import LinearBase, LinearMethodBase +from vllm.model_executor.layers.quantization.base_config import ( + QuantizationConfig) +from vllm.model_executor.utils import set_weight_attrs + + +def get_int_dtype(nbits: int) -> torch.dtype: + if nbits <= 8: + return torch.int8 + if nbits <= 16: + return torch.int16 + if nbits <= 32: + return torch.int32 + if nbits <= 64: + return torch.int64 + raise ValueError(f"No dtype available for {nbits}-bit codebooks") + + +@torch.inference_mode() +def unpack_int_data(data: torch.IntTensor, nbits: int) -> torch.IntTensor: + return data.to(torch.int64) % (2**nbits) + + +def dequantize_weight(codes: torch.Tensor, + codebooks: torch.Tensor, + scales: Optional[torch.Tensor] = None) -> torch.Tensor: + """ + Decode float weights from quantization codes. Differentiable. + :param codes: tensor of integer quantization codes, shape + [*dims, num_out_groups, num_in_groups, num_codebooks] + :param codebooks: tensor of vectors for each quantization code, + [num_codebooks, codebook_size, out_group_size, in_group_size] + :param scales: weight will be multiplied by this factor, must be + broadcastble with + [*dims, out_groups, num_in_groups, out_group_size, in_group_size] + :return: reconstructed weight tensor of shape + [*dims, num_in_groups*group_size] + """ + num_out_groups, num_in_groups, num_codebooks = codes.shape[-3:] + num_codebooks, codebook_size, out_group_size, in_group_size = \ + codebooks.shape + out_features = num_out_groups * out_group_size + in_features = num_in_groups * in_group_size + codebook_offsets = torch.arange( + 0, num_codebooks * codebook_size, codebook_size, + device=codes.device) # shape: [num_codebooks] + reconstructed_weight_flat = F.embedding_bag( + codes.flatten(0, -2) + codebook_offsets, + codebooks.flatten(0, 1).flatten(-2, -1), + mode="sum" + ) # [prod(dims) * num_out_groups * num_in_groups, out_group_size + # * in_group_size] + + reconstructed_weight_groupwise = reconstructed_weight_flat.view( + list(codes.shape[:-3]) + + [num_out_groups, num_in_groups, out_group_size, in_group_size]) + if scales is not None: + reconstructed_weight_groupwise = reconstructed_weight_groupwise.mul( + scales) + return reconstructed_weight_groupwise.swapaxes( + -3, -2).reshape(list(codes.shape[:-3]) + [out_features, in_features]) + + +def dequantize_gemm( + input: torch.Tensor, # [..., in_features] + codes: torch.IntTensor, # [num_out_groups, num_in_groups, num_codebooks] + codebooks: torch. + Tensor, # [num_codebooks, codebook_size, out_group_size, in_group_size] + scales: torch.Tensor, # [num_out_groups, 1, 1, 1] + bias: Optional[torch.Tensor], +) -> torch.Tensor: + dequantized_weight = dequantize_weight( + unpack_int_data(codes, codebooks.shape[1].bit_length() - 1), + codebooks, + scales, + ) + return F.linear(input, dequantized_weight, bias) + + +# Generic dequantization, slow but flexible. +def generic_dequantize_gemm( + input: torch.Tensor, # [..., in_features] + codes: torch.IntTensor, # [num_out_groups, num_in_groups, num_codebooks] + codebooks: torch. + Tensor, # [num_codebooks, codebook_size, out_group_size, in_group_size] + scales: torch.Tensor, # [num_out_groups, 1, 1, 1] + output_partition_sizes: torch.IntTensor, + bias: Optional[torch.Tensor], +) -> torch.Tensor: + output_shape = input.shape[:-1] + (scales.shape[0], ) + output = torch.empty(output_shape, dtype=input.dtype, device=input.device) + num_outputs = len(output_partition_sizes) + + # break the inputs and codebooks apart then combine the outputs. + # Surprisingly (to me) this is faster than doing 3 de-quants and 1 big + # multiply at the end. + num_codebooks = codebooks.shape[0] // num_outputs + assert (scales.shape[0] == codes.shape[0]) + assert (sum(output_partition_sizes) == scales.shape[0]) + output_offset = 0 + codebooks_offset = 0 + for output_size in output_partition_sizes: + shard_output = dequantize_gemm( + input, codes.narrow(0, output_offset, output_size), + codebooks.narrow(0, codebooks_offset, num_codebooks), + scales.narrow(0, output_offset, output_size), None + if bias is None else bias.narrow(0, output_offset, output_size)) + + output_slice = output.narrow(-1, output_offset, output_size) + assert (output_slice.shape == shard_output.shape) + output_slice.copy_(shard_output) + output_offset += output_size + codebooks_offset += num_codebooks + return output + + +# Optimized dequnantize/decompression kernels, supports 1x16 and 2x8 +# at 6 and 9 times faster than the generic version above, respectively. +def optimized_dequantize_gemm( + input: torch.Tensor, # [..., in_features] + codes: torch.IntTensor, # [num_out_groups, num_in_groups, num_codebooks] + codebooks: torch. + Tensor, # [num_codebooks, codebook_size, out_group_size, in_group_size] + scales: torch.Tensor, # [num_out_groups, 1, 1, 1] + output_partition_sizes: torch.IntTensor, + bias: Optional[torch.Tensor], +) -> torch.Tensor: + weights = ops.aqlm_dequant(codes, codebooks, output_partition_sizes) + + if bias is None: + # scaling the output is fastest, so we do that when possible. + output = F.linear(input, weights, bias) + orig_shape = output.shape + flattened_output = output.view(-1, output.size(-1)) + f_scales = scales.view(-1, scales.shape[0]) + b_scales = f_scales.expand(flattened_output.shape[0], -1) + flattened_output *= b_scales + return output.view(orig_shape) + else: + b_scales = scales.view(scales.shape[:-3] + (-1, )).expand( + -1, weights.shape[1]) + weights *= b_scales + return F.linear(input, weights, bias) + + +class AQLMConfig(QuantizationConfig): + """Config class for AQLM. + + Reference: https://github.com/Vahe1994/AQLM + """ + + def __init__( + self, + in_group_size: int, + nbits_per_codebook: int, + num_codebooks: int, + out_group_size: int, + ) -> None: + self.in_group_size = in_group_size + self.nbits_per_codebook = nbits_per_codebook + self.num_codebooks = num_codebooks + self.out_group_size = out_group_size + + # out_group_size > 1 is untested, and probably won't work as-is. + assert (self.out_group_size == 1) + self.pack_factor = (self.in_group_size * self.out_group_size) + + def __repr__(self) -> str: + return (f"AQLMConfig(in_group_size={self.in_group_size}, " + f"nbits_per_codebook={self.nbits_per_codebook}, " + f"num_codebooks={self.num_codebooks}, " + f"out_group_size={self.out_group_size})") + + @classmethod + def get_name(cls) -> str: + return "aqlm" + + @classmethod + def get_supported_act_dtypes(cls) -> List[torch.dtype]: + return [torch.half] + + @classmethod + def get_min_capability(cls) -> int: + return 70 + + @classmethod + def get_config_filenames(cls) -> List[str]: + return [] # no extra configs. + + @classmethod + def from_config(cls, config: Dict[str, Any]) -> "AQLMConfig": + in_group_size = cls.get_from_keys(config, ["in_group_size"]) + nbits_per_codebook = cls.get_from_keys(config, ["nbits_per_codebook"]) + num_code_books = cls.get_from_keys(config, ["num_codebooks"]) + out_group_size = cls.get_from_keys(config, ["out_group_size"]) + return cls(in_group_size, nbits_per_codebook, num_code_books, + out_group_size) + + def get_quant_method( + self, layer: torch.nn.Module) -> Optional["AQLMLinearMethod"]: + if isinstance(layer, LinearBase): + return AQLMLinearMethod(self) + return None + + def get_scaled_act_names(self) -> List[str]: + return [] + + +class AQLMLinearMethod(LinearMethodBase): + """Linear method for AQLM. + + Args: + quant_config: The AQLM quantization config. + """ + + def __init__(self, quant_config: AQLMConfig): + self.quant_config = quant_config + + def create_weights(self, layer: torch.nn.Module, + input_size_per_partition: int, + output_partition_sizes: List[int], input_size: int, + output_size: int, params_dtype: torch.dtype, + **extra_weight_attrs): + del output_size # Unused. + del input_size # Unused. + + if params_dtype != torch.half: + raise ValueError("Only half is currently supported by aqlm") + if input_size_per_partition % self.quant_config.in_group_size != 0: + raise ValueError( + "The input size is not aligned with the quantized " + "weight shape. This can be caused by too large " + "tensor parallel size.") + + output_size_per_partition = sum(output_partition_sizes) + if output_size_per_partition % self.quant_config.out_group_size != 0: + raise ValueError( + "The output size is not aligned with the quantized " + "weight shape. This can be caused by too large " + "tensor parallel size.") + + codes = Parameter( + torch.empty( + # There could actually be two pack factors, one along input and + # one along output, but we don't currently support + # out_group_size, and only the one along output needs to be + # marked with "packed_dim" in order for QKVLinear to work. + output_size_per_partition, + input_size_per_partition // self.quant_config.pack_factor, + self.quant_config.num_codebooks, + dtype=get_int_dtype(self.quant_config.nbits_per_codebook), + ), + requires_grad=False, + ) + + set_weight_attrs( + codes, + { + "input_dim": 1, + "output_dim": 0, + "packed_dim": 1, + "pack_factor": self.quant_config.pack_factor, + }, + ) + + codebooks = Parameter( + torch.empty( + self.quant_config.num_codebooks * len(output_partition_sizes), + 2**self.quant_config.nbits_per_codebook, + self.quant_config.out_group_size, + self.quant_config.in_group_size, + dtype=params_dtype, + ), + requires_grad=False, + ) + set_weight_attrs( + codebooks, + { + # metadata indicates fixed size concatenated along dim 0 + "is_metadata": + True, + "output_partition_sizes": + torch.tensor(output_partition_sizes, device='cpu'), + }, + ) + + scales = Parameter( + torch.empty( + ( + output_size_per_partition // + self.quant_config.out_group_size, + 1, + 1, + 1, + ), + dtype=params_dtype, + ), + requires_grad=False, + ) + set_weight_attrs( + scales, + { + "output_dim": 0, + "packed_dim": 0, + "pack_factor": self.quant_config.out_group_size + }, + ) + + layer.register_parameter("codes", codes) + set_weight_attrs(codes, extra_weight_attrs) + layer.register_parameter("codebooks", codebooks) + set_weight_attrs(codebooks, extra_weight_attrs) + layer.register_parameter("scales", scales) + set_weight_attrs(scales, extra_weight_attrs) + + def apply( + self, + layer: torch.nn.Module, + x: torch.Tensor, + bias: Optional[torch.Tensor] = None, + ) -> torch.Tensor: + codebooks = layer.codebooks + codes = layer.codes + scales = layer.scales + output_partition_sizes = getattr(codebooks, "output_partition_sizes", + None) + + nbooks = codes.shape[2] + ingroups = codebooks.shape[3] + outgroups = codebooks.shape[2] + bits = codebooks.shape[1] + + # We support these formats with dedicated gemm and decompression + # kernels. + if ingroups == 8 and outgroups == 1 and ( + (bits == 256 and nbooks == 2) or (bits == 65536 and nbooks == 1)): + + # thresholds determined by timings on an A6000, one GPU + use_gemv = math.prod(x.shape[:-1]) <= 6 + + return ops.aqlm_gemm( + x, + codes, + codebooks, + scales, + output_partition_sizes, + bias, + ) if use_gemv else optimized_dequantize_gemm( + x, + codes, + codebooks, + scales, + output_partition_sizes, + bias, + ) + + # fall back all unoptimized formats + return generic_dequantize_gemm( + x, + codes, + codebooks, + scales, + output_partition_sizes, + bias, + ) diff --git a/vllm/model_executor/layers/quantization/awq.py b/vllm/model_executor/layers/quantization/awq.py new file mode 100644 index 0000000..f4fc7ce --- /dev/null +++ b/vllm/model_executor/layers/quantization/awq.py @@ -0,0 +1,175 @@ +from typing import Any, Dict, List, Optional + +import torch +from torch.nn.parameter import Parameter + +from vllm import _custom_ops as ops +from vllm.model_executor.layers.linear import LinearBase, LinearMethodBase +from vllm.model_executor.layers.quantization.base_config import ( + QuantizationConfig) +from vllm.model_executor.utils import set_weight_attrs + + +class AWQConfig(QuantizationConfig): + """Config class for AWQ. + + Reference: https://arxiv.org/abs/2306.00978 + """ + + def __init__( + self, + weight_bits: int, + group_size: int, + zero_point: bool, + ) -> None: + self.weight_bits = weight_bits + self.group_size = group_size + self.zero_point = zero_point + + if self.weight_bits != 4: + raise ValueError( + "Currently, only 4-bit weight quantization is supported for " + f"AWQ, but got {self.weight_bits} bits.") + self.pack_factor = 32 // self.weight_bits + + def __repr__(self) -> str: + return (f"AWQConfig(weight_bits={self.weight_bits}, " + f"group_size={self.group_size}, " + f"zero_point={self.zero_point})") + + def get_name(self) -> str: + return "awq" + + def get_supported_act_dtypes(self) -> List[torch.dtype]: + return [torch.half] + + def get_min_capability(self) -> int: + # The AWQ kernel only supports Turing or newer GPUs. + return 75 + + @staticmethod + def get_config_filenames() -> List[str]: + return [ + "quant_config.json", # E.g., casperhansen/vicuna-7b-v1.5-awq + # E.g., abhinavkulkarni/mosaicml-mpt-7b-instruct-w4-g128-awq + "quantize_config.json", + ] + + @classmethod + def from_config(cls, config: Dict[str, Any]) -> "AWQConfig": + weight_bits = cls.get_from_keys(config, ["w_bit", "bits"]) + group_size = cls.get_from_keys(config, ["q_group_size", "group_size"]) + zero_point = cls.get_from_keys(config, ["zero_point"]) + return cls(weight_bits, group_size, zero_point) + + def get_quant_method( + self, layer: torch.nn.Module) -> Optional["AWQLinearMethod"]: + if isinstance(layer, LinearBase): + return AWQLinearMethod(self) + return None + + def get_scaled_act_names(self) -> List[str]: + return ["gelu", "gelu_fast", "gelu_new", "gelu_pytorch_tanh"] + + +class AWQLinearMethod(LinearMethodBase): + """Linear method for AWQ. + + Args: + quant_config: The AWQ quantization config. + """ + + def __init__(self, quant_config: AWQConfig): + self.quant_config = quant_config + + def create_weights(self, layer: torch.nn.Module, + input_size_per_partition: int, + output_partition_sizes: List[int], input_size: int, + output_size: int, params_dtype: torch.dtype, + **extra_weight_attrs): + if input_size_per_partition % self.quant_config.group_size != 0: + raise ValueError( + "The input size is not aligned with the quantized " + "weight shape. This can be caused by too large " + "tensor parallel size.") + + output_size_per_partition = sum(output_partition_sizes) + if output_size_per_partition % self.quant_config.pack_factor != 0: + raise ValueError( + "The output size is not aligned with the quantized " + "weight shape. This can be caused by too large " + "tensor parallel size.") + + qweight = Parameter( + torch.empty( + input_size_per_partition, + output_size_per_partition // self.quant_config.pack_factor, + dtype=torch.int32, + ), + requires_grad=False, + ) + set_weight_attrs( + qweight, { + "input_dim": 0, + "output_dim": 1, + "packed_dim": 1, + "pack_factor": self.quant_config.pack_factor, + }) + qzeros = Parameter( + torch.empty( + input_size_per_partition // self.quant_config.group_size, + output_size_per_partition // self.quant_config.pack_factor, + dtype=torch.int32, + ), + requires_grad=False, + ) + set_weight_attrs( + qzeros, { + "input_dim": 0, + "output_dim": 1, + "packed_dim": 1, + "pack_factor": self.quant_config.pack_factor, + }) + scales = Parameter( + torch.empty( + input_size_per_partition // self.quant_config.group_size, + output_size_per_partition, + dtype=params_dtype, + ), + requires_grad=False, + ) + set_weight_attrs(scales, { + "input_dim": 0, + "output_dim": 1, + }) + + layer.register_parameter("qweight", qweight) + set_weight_attrs(qweight, extra_weight_attrs) + layer.register_parameter("qzeros", qzeros) + set_weight_attrs(qzeros, extra_weight_attrs) + layer.register_parameter("scales", scales) + set_weight_attrs(scales, extra_weight_attrs) + + def apply(self, + layer: torch.nn.Module, + x: torch.Tensor, + bias: Optional[torch.Tensor] = None) -> torch.Tensor: + qweight = layer.qweight + scales = layer.scales + qzeros = layer.qzeros + pack_factor = self.quant_config.pack_factor + out_shape = (x.shape[:-1] + (qweight.shape[-1] * pack_factor, )) + reshaped_x = x.reshape(-1, x.shape[-1]) + + # num_tokens >= threshold + FP16_MATMUL_HEURISTIC_CONDITION = x.shape[:-1].numel() >= 256 + + if FP16_MATMUL_HEURISTIC_CONDITION: + out = ops.awq_dequantize(qweight, scales, qzeros, 0, 0, 0) + out = torch.matmul(reshaped_x, out) + else: + out = ops.awq_gemm(reshaped_x, qweight, scales, qzeros, + pack_factor) + if bias is not None: + out.add_(bias) + return out.reshape(out_shape) diff --git a/vllm/model_executor/layers/quantization/base_config.py b/vllm/model_executor/layers/quantization/base_config.py new file mode 100644 index 0000000..ff5cf0b --- /dev/null +++ b/vllm/model_executor/layers/quantization/base_config.py @@ -0,0 +1,97 @@ +from abc import ABC, abstractmethod +from typing import Any, Dict, List, Optional + +import torch +from torch import nn + + +class QuantizeMethodBase(ABC): + """Base class for different quantized methods.""" + + @abstractmethod + def create_weights(self, layer: torch.nn.Module, *weight_args, + **extra_weight_attrs): + """Create weights for a layer. + + The weights will be set as attributes of the layer.""" + raise NotImplementedError + + @abstractmethod + def apply(self, layer: torch.nn.Module, *args, **kwargs) -> torch.Tensor: + """Apply the weights in layer to the input tensor. + + Expects create_weights to have been called before on the layer.""" + raise NotImplementedError + + def process_weights_after_loading(self, layer: nn.Module) -> None: + """Process the weight after loading. + + This can be used for example, to transpose weights for computation. + """ + return + + +class QuantizationConfig(ABC): + """Base class for quantization configs.""" + + @abstractmethod + def get_name(self) -> str: + """Name of the quantization method.""" + raise NotImplementedError + + @abstractmethod + def get_supported_act_dtypes(self) -> List[torch.dtype]: + """List of supported activation dtypes.""" + raise NotImplementedError + + @abstractmethod + def get_min_capability(self) -> int: + """Minimum GPU capability to support the quantization method. + + E.g., 70 for Volta, 75 for Turing, 80 for Ampere. + This requirement is due to the custom CUDA kernels used by the + quantization method. + """ + raise NotImplementedError + + @staticmethod + @abstractmethod + def get_config_filenames() -> List[str]: + """List of filenames to search for in the model directory.""" + raise NotImplementedError + + @classmethod + @abstractmethod + def from_config(cls, config: Dict[str, Any]) -> "QuantizationConfig": + """Create a config class from the model's quantization config.""" + raise NotImplementedError + + @staticmethod + def get_from_keys(config: Dict[str, Any], keys: List[str]) -> Any: + """Get a value from the model's quantization config.""" + for key in keys: + if key in config: + return config[key] + raise ValueError(f"Cannot find any of {keys} in the model's " + "quantization config.") + + @abstractmethod + def get_quant_method( + self, layer: torch.nn.Module) -> Optional[QuantizeMethodBase]: + """Get the quantize method to use for the quantized layer. + + Args: + layer: The layer for the quant method. + Returns: + The quantize method. None if the given layer doesn't support quant + method. + """ + raise NotImplementedError + + @abstractmethod + def get_scaled_act_names(self) -> List[str]: + """Returns the activation function names that should be post-scaled. + + For now, this is only used by AWQ. + """ + raise NotImplementedError diff --git a/vllm/model_executor/layers/quantization/fp8.py b/vllm/model_executor/layers/quantization/fp8.py new file mode 100644 index 0000000..b57e1dd --- /dev/null +++ b/vllm/model_executor/layers/quantization/fp8.py @@ -0,0 +1,265 @@ +from typing import Any, Dict, List, Optional, Tuple, Union + +import torch +from torch.nn import Module +from torch.nn.parameter import Parameter + +from vllm import _custom_ops as ops +from vllm.logger import init_logger +from vllm.model_executor.layers.linear import LinearBase, LinearMethodBase +from vllm.model_executor.layers.quantization.base_config import ( + QuantizationConfig) +from vllm.model_executor.utils import set_weight_attrs + +ACTIVATION_SCHEMES = ["static", "dynamic"] + +logger = init_logger(__name__) + + +class Fp8Config(QuantizationConfig): + """Config class for FP8.""" + + def __init__( + self, + is_checkpoint_fp8_serialized: bool = False, + activation_scheme: str = "dynamic", + ) -> None: + self.is_checkpoint_fp8_serialized = is_checkpoint_fp8_serialized + if is_checkpoint_fp8_serialized: + logger.warning("Detected fp8 checkpoint. Please note that the " + "format is experimental and subject to change.") + if activation_scheme not in ACTIVATION_SCHEMES: + raise ValueError( + f"Unsupported activation scheme {activation_scheme}") + self.activation_scheme = activation_scheme + + @classmethod + def get_name(cls) -> str: + return "fp8" + + @classmethod + def get_supported_act_dtypes(cls) -> List[torch.dtype]: + return [torch.bfloat16, torch.half] + + @classmethod + def get_min_capability(cls) -> int: + return 89 + + @classmethod + def get_config_filenames(cls) -> List[str]: + return [] + + @classmethod + def from_config(cls, config: Dict[str, Any]) -> "Fp8Config": + quant_method = cls.get_from_keys(config, ["quant_method"]) + is_checkpoint_fp8_serialized = ("fp8" in quant_method) + activation_scheme = cls.get_from_keys(config, ["activation_scheme"]) + return cls(is_checkpoint_fp8_serialized=is_checkpoint_fp8_serialized, + activation_scheme=activation_scheme) + + def get_quant_method( + self, layer: torch.nn.Module) -> Optional["Fp8LinearMethod"]: + if isinstance(layer, LinearBase): + return Fp8LinearMethod(self) + return None + + def get_scaled_act_names(self) -> List[str]: + return [] + + +class Fp8LinearMethod(LinearMethodBase): + """Linear method for FP8. + Supports loading FP8 checkpoints with static weight scale and + dynamic/static activation scale. + + Also supports loading quantized FP16/BF16 model checkpoints with dynamic + activation scaling. The weight scaling factor will be initialized after + the model weights are loaded. + + Limitations: + 1. Only support per-tensor quantization due to torch._scaled_mm support. + 2. Only support float8_e4m3fn data type due to the limitation of + torch._scaled_mm (https://github.com/pytorch/pytorch/blob/2e48b39603411a41c5025efbe52f89560b827825/aten/src/ATen/native/cuda/Blas.cpp#L854-L856) + + Args: + quant_config: The quantization config. + """ + + def __init__(self, quant_config: Fp8Config): + self.quant_config = quant_config + + def _create_scale_param( + self, + scale_name: str, + layer: torch.nn.Module, + output_partition_sizes: List[int], + **extra_weight_attrs, + ) -> None: + scale = Parameter(torch.empty(len(output_partition_sizes), + dtype=torch.float32), + requires_grad=False) + layer.register_parameter(scale_name, scale) + set_weight_attrs( + scale, { + **extra_weight_attrs, + "fp8_scales_shard_indexer": + self.scales_shard_indexer, + }) + + def create_weights( + self, + layer: torch.nn.Module, + input_size_per_partition: int, + output_partition_sizes: List[int], + input_size: int, + output_size: int, + params_dtype: torch.dtype, + **extra_weight_attrs, + ): + del input_size, output_size + output_size_per_partition = sum(output_partition_sizes) + + layer.process_after_load = True + layer.logical_widths = output_partition_sizes + + # WEIGHT + weight_dtype = (torch.float8_e4m3fn + if self.quant_config.is_checkpoint_fp8_serialized else + params_dtype) + weight = Parameter(torch.empty(output_size_per_partition, + input_size_per_partition, + dtype=weight_dtype), + requires_grad=False) + layer.register_parameter("weight", weight) + set_weight_attrs(weight, { + **extra_weight_attrs, + "input_dim": 1, + "output_dim": 0, + }) + + # If checkpoint is serialized fp8, load them. + # Otherwise, wait until process_weights_after_loading. + if self.quant_config.is_checkpoint_fp8_serialized: + # WEIGHT SCALE + self._create_scale_param( + scale_name="weight_scale", + layer=layer, + output_partition_sizes=output_partition_sizes, + **extra_weight_attrs) + + # ACTIVATION SCALE + if self.quant_config.activation_scheme == "static": + self._create_scale_param( + scale_name="act_scale", + layer=layer, + output_partition_sizes=output_partition_sizes, + **extra_weight_attrs) + + def scales_shard_indexer( + self, param: torch.Tensor, loaded_weight: torch.Tensor, + shard_id: Union[str, int]) -> Tuple[torch.Tensor, torch.Tensor]: + qkv_idxs = {"q": 0, "k": 1, "v": 2} + + if isinstance(shard_id, int): + pass + elif isinstance(shard_id, str): + if shard_id not in qkv_idxs: + raise ValueError(f"Unknown shard_id: {shard_id}") + shard_id = qkv_idxs[shard_id] + else: + ValueError(f"Shard id must be int or str but got {type(shard_id)}") + + return param[shard_id], loaded_weight + + def process_weights_after_loading(self, layer: Module) -> None: + if (not hasattr(layer, "process_after_load") + or not layer.process_after_load): + return + + # If checkpoint is fp/bf16 (not serialized fp8), quantize the weights. + if not self.quant_config.is_checkpoint_fp8_serialized: + qweight, weight_scale = ops.scaled_fp8_quant(layer.weight, + scale=None) + layer.weight = Parameter(qweight.t(), requires_grad=False) + layer.weight_scale = Parameter(weight_scale, requires_grad=False) + layer.logical_widths = None + layer.act_scale = None + return + + # If checkpoint is fp8, requantize the separately quantized logical + # weights into a single fp8 weight with a single weight scale. + else: + # WEIGHT_SCALE / WEIGHT + # Loop over logical weights, requantizing with single scale. + max_w_scale = layer.weight_scale.max() + start = 0 + for idx, logical_width in enumerate(layer.logical_widths): + end = start + logical_width + weight_dq = per_tensor_dequantize(layer.weight[start:end, :], + layer.weight_scale[idx]) + + layer.weight[start:end, :] = per_tensor_quantize( + weight_dq, layer.weight_scale.max()) + start = end + layer.weight_scale = Parameter(max_w_scale, requires_grad=False) + + # WEIGHT + # Transpose weight for passing to torch._scaled_mm + weight = layer.weight + layer.weight = Parameter(weight.t(), requires_grad=False) + + # ACT_SCALE + # Dynamic: set to None (required input to ops.scaled_fp8_quant). + # Static: set to max of the act_scales (since they are equal). + if self.quant_config.activation_scheme == "dynamic": + layer.act_scale = None + elif self.quant_config.activation_scheme == "static": + if not all_close_1d(layer.act_scale): + raise ValueError( + "All the act_scales for the logical weights of a layer " + f"must be equal. But got {layer.act_scale}") + layer.act_scale = Parameter(layer.act_scale.max(), + requires_grad=False) + else: + raise ValueError( + f"Unknown scheme {self.quant_config.activation_scheme}") + + def apply(self, + layer: torch.nn.Module, + x: torch.Tensor, + bias: Optional[torch.Tensor] = None) -> torch.Tensor: + # ops.scaled_fp8_quant supports both dynamic and static quant. + # If dynamic, layer.act_scale is None and x_scale computed from x. + # If static, layer.act_scale is scalar and x_scale set to act_scale. + qinput, x_scale = ops.scaled_fp8_quant(x, layer.act_scale) + + # Fused GEMM_DQ + output, _ = torch._scaled_mm( + qinput, + layer.weight, + out_dtype=x.dtype, + scale_a=x_scale, + scale_b=layer.weight_scale, + bias=bias, + ) + + return output + + +def all_close_1d(x: torch.Tensor) -> bool: + assert len(x.shape) == 1 + return all(torch.allclose(x[0], x[i]) for i in range(x.shape[0])) + + +def per_tensor_quantize(tensor: torch.Tensor, + inv_scale: float) -> torch.Tensor: + finfo = torch.finfo(torch.float8_e4m3fn) + qweight = (tensor / inv_scale).clamp(min=finfo.min, max=finfo.max) + return qweight.to(torch.float8_e4m3fn) + + +def per_tensor_dequantize(tensor: torch.Tensor, + inv_scale: float) -> torch.Tensor: + fake_qweight = tensor.to(torch.float16) + dq_weight = fake_qweight * inv_scale + return dq_weight diff --git a/vllm/model_executor/layers/quantization/gptq.py b/vllm/model_executor/layers/quantization/gptq.py new file mode 100644 index 0000000..ae9f701 --- /dev/null +++ b/vllm/model_executor/layers/quantization/gptq.py @@ -0,0 +1,224 @@ +import enum +from enum import Enum +from fractions import Fraction +from typing import Any, Dict, List, Optional + +import torch +from torch.nn.parameter import Parameter + +from vllm import _custom_ops as ops +from vllm.model_executor.layers.linear import LinearBase, LinearMethodBase +from vllm.model_executor.layers.quantization.base_config import ( + QuantizationConfig) +from vllm.model_executor.utils import set_weight_attrs + + +class GPTQConfig(QuantizationConfig): + """Config class for GPTQ. + + Reference: https://arxiv.org/abs/2210.17323 + """ + + def __init__( + self, + weight_bits: int, + group_size: int, + desc_act: bool, + ) -> None: + self.weight_bits = weight_bits + self.group_size = group_size + self.desc_act = desc_act + self.pack_factor = Fraction(32, self.weight_bits) + if self.weight_bits not in [2, 3, 4, 8]: + raise ValueError( + "Currently, only 2/3/4/8-bit weight quantization is " + f"supported for GPTQ, but got {self.weight_bits} bits.") + + def __repr__(self) -> str: + return (f"GPTQConfig(weight_bits={self.weight_bits}, " + f"group_size={self.group_size}, " + f"desc_act={self.desc_act})") + + @classmethod + def get_name(cls) -> str: + return "gptq" + + @classmethod + def get_supported_act_dtypes(cls) -> List[torch.dtype]: + return [torch.half] + + @classmethod + # Need to figure it out + def get_min_capability(cls) -> int: + return 60 + + @classmethod + def get_config_filenames(cls) -> List[str]: + return ["quantize_config.json"] + + @classmethod + def from_config(cls, config: Dict[str, Any]) -> "GPTQConfig": + weight_bits = cls.get_from_keys(config, ["bits"]) + group_size = cls.get_from_keys(config, ["group_size"]) + desc_act = cls.get_from_keys(config, ["desc_act"]) + return cls(weight_bits, group_size, desc_act) + + def get_quant_method( + self, layer: torch.nn.Module) -> Optional["GPTQLinearMethod"]: + if isinstance(layer, LinearBase): + return GPTQLinearMethod(self) + return None + + def get_scaled_act_names(self) -> List[str]: + return [] + + +class ExllamaState(Enum): + + UNUSED = enum.auto() + UNINITIALIZED = enum.auto() + READY = enum.auto() + + +class GPTQLinearMethod(LinearMethodBase): + """Linear method for GPTQ. + + Args: + quant_config: The GPTQ quantization config. + """ + + def __init__(self, quant_config: GPTQConfig): + self.quant_config = quant_config + + def create_weights( + self, + layer: torch.nn.Module, + input_size_per_partition: int, + output_partition_sizes: List[int], + input_size: int, + output_size: int, + params_dtype: torch.dtype, + **extra_weight_attrs, + ): + del output_size # Unused. + if input_size_per_partition % self.quant_config.group_size != 0: + raise ValueError( + "The input size is not aligned with the quantized " + "weight shape. This can be caused by too large " + "tensor parallel size.") + output_size_per_partition = sum(output_partition_sizes) + if (output_size_per_partition % self.quant_config.pack_factor.numerator + != 0): + raise ValueError( + "The output size is not aligned with the quantized " + "weight shape. This can be caused by too large " + "tensor parallel size.") + + if self.quant_config.group_size != -1: + group_size = self.quant_config.group_size + else: + group_size = input_size + exllama_state = ExllamaState.UNINITIALIZED + scale_and_zero_size = input_size // group_size + scale_and_zero_input_dim = None + if (input_size != input_size_per_partition + and self.quant_config.group_size != -1): + # For act-order models, we cannot use Exllama for row parallel layer + if self.quant_config.desc_act: + exllama_state = ExllamaState.UNUSED + else: + # we need to partition qzeros and scales for exllama kernel + scale_and_zero_size = input_size_per_partition // group_size + scale_and_zero_input_dim = 0 + + qweight = Parameter( + torch.empty( + input_size_per_partition // self.quant_config.pack_factor, + output_size_per_partition, + dtype=torch.int32, + ), + requires_grad=False, + ) + set_weight_attrs( + qweight, { + "input_dim": 0, + "output_dim": 1, + "packed_dim": 0, + "pack_factor": self.quant_config.pack_factor, + }) + g_idx = Parameter( + torch.tensor( + [ + i // self.quant_config.group_size + for i in range(input_size_per_partition) + ], + dtype=torch.int32, + ), + requires_grad=False, + ) + # Ignore warning from fused linear layers such as QKVParallelLinear. + set_weight_attrs(g_idx, {"input_dim": 0, "ignore_warning": True}) + qzeros = Parameter( + torch.empty( + scale_and_zero_size, + output_size_per_partition // self.quant_config.pack_factor, + dtype=torch.int32, + ), + requires_grad=False, + ) + set_weight_attrs( + qzeros, { + "input_dim": scale_and_zero_input_dim, + "output_dim": 1, + "packed_dim": 1, + "pack_factor": self.quant_config.pack_factor, + }) + scales = Parameter( + torch.empty( + scale_and_zero_size, + output_size_per_partition, + dtype=params_dtype, + ), + requires_grad=False, + ) + set_weight_attrs(scales, { + "input_dim": scale_and_zero_input_dim, + "output_dim": 1, + }) + + layer.register_parameter("qweight", qweight) + set_weight_attrs(qweight, extra_weight_attrs) + layer.register_parameter("g_idx", g_idx) + set_weight_attrs(g_idx, extra_weight_attrs) + layer.register_parameter("qzeros", qzeros) + set_weight_attrs(qzeros, extra_weight_attrs) + layer.register_parameter("scales", scales) + set_weight_attrs(scales, extra_weight_attrs) + + layer.exllama_state = exllama_state + + def apply(self, + layer: torch.nn.Module, + x: torch.Tensor, + bias: Optional[torch.Tensor] = None) -> torch.Tensor: + qweight = layer.qweight + out_shape = x.shape[:-1] + (qweight.shape[-1], ) + reshaped_x = x.reshape(-1, x.shape[-1]) + # exllama needs to shuffle the weight after the weight is loaded + # here we do the shuffle on first forward pass + if layer.exllama_state == ExllamaState.UNINITIALIZED: + if self.quant_config.desc_act: + layer.g_idx.data = torch.argsort(layer.g_idx).to(torch.int) + else: + layer.g_idx.data = torch.empty((0, ), + device=layer.g_idx.device) + layer.exllama_state = ExllamaState.READY + ops.gptq_shuffle(layer.qweight, layer.g_idx, + self.quant_config.weight_bits) + output = ops.gptq_gemm(reshaped_x, layer.qweight, layer.qzeros, + layer.scales, layer.g_idx, + layer.exllama_state == ExllamaState.READY, + self.quant_config.weight_bits) + if bias is not None: + output.add_(bias) + return output.reshape(out_shape) diff --git a/vllm/model_executor/layers/quantization/gptq_marlin.py b/vllm/model_executor/layers/quantization/gptq_marlin.py new file mode 100644 index 0000000..e246400 --- /dev/null +++ b/vllm/model_executor/layers/quantization/gptq_marlin.py @@ -0,0 +1,438 @@ +import enum +from enum import Enum +from typing import Any, Dict, List, Optional + +import torch +from torch.nn.parameter import Parameter + +from vllm import _custom_ops as ops +from vllm.model_executor.layers.linear import (LinearBase, LinearMethodBase, + set_weight_attrs) +from vllm.model_executor.layers.quantization.base_config import ( + QuantizationConfig) + +GPTQ_MARLIN_TILE = 16 +GPTQ_MARLIN_MIN_THREAD_N = 64 +GPTQ_MARLIN_MIN_THREAD_K = 128 +GPTQ_MARLIN_MAX_PARALLEL = 16 + +GPTQ_MARLIN_SUPPORTED_NUM_BITS = [4, 8] +GPTQ_MARLIN_SUPPORTED_GROUP_SIZES = [-1, 32, 64, 128] +GPTQ_MARLIN_SUPPORTED_SYM = [True] + + +# Permutations for Marlin scale shuffling +def get_scale_perms(num_bits): + scale_perm = [] + for i in range(8): + scale_perm.extend([i + 8 * j for j in range(8)]) + scale_perm_single = [] + for i in range(4): + scale_perm_single.extend( + [2 * i + j for j in [0, 1, 8, 9, 16, 17, 24, 25]]) + return scale_perm, scale_perm_single + + +def get_pack_factor(num_bits): + assert (num_bits in GPTQ_MARLIN_SUPPORTED_NUM_BITS + ), f"Unsupported num_bits = {num_bits}" + return 32 // num_bits + + +def marlin_permute_scales(s, size_k, size_n, group_size, num_bits): + scale_perm, scale_perm_single = get_scale_perms(num_bits) + if group_size < size_k and group_size != -1: + s = s.reshape((-1, len(scale_perm)))[:, scale_perm] + else: + s = s.reshape((-1, len(scale_perm_single)))[:, scale_perm_single] + s = s.reshape((-1, size_n)).contiguous() + + return s + + +class GPTQMarlinConfig(QuantizationConfig): + """Config class for GPTQ Marlin""" + + def __init__(self, weight_bits: int, group_size: int, desc_act: bool, + is_sym: bool) -> None: + if desc_act and group_size == -1: + # In this case, act_order == True is the same as act_order == False + # (since we have only one group per output channel) + desc_act = False + + self.weight_bits = weight_bits + self.group_size = group_size + self.desc_act = desc_act + self.is_sym = is_sym + + # Verify + if self.weight_bits not in GPTQ_MARLIN_SUPPORTED_NUM_BITS: + raise ValueError( + f"Marlin does not support weight_bits = {self.weight_bits}. " + f"Only weight_bits = {GPTQ_MARLIN_SUPPORTED_NUM_BITS} " + "are supported.") + if self.group_size not in GPTQ_MARLIN_SUPPORTED_GROUP_SIZES: + raise ValueError( + f"Marlin does not support group_size = {self.group_size}. " + f"Only group_sizes = {GPTQ_MARLIN_SUPPORTED_GROUP_SIZES} " + "are supported.") + if self.is_sym not in GPTQ_MARLIN_SUPPORTED_SYM: + raise ValueError( + f"Marlin does not support is_sym = {self.is_sym}. " + f"Only sym = {GPTQ_MARLIN_SUPPORTED_SYM} are supported.") + + # Init + self.pack_factor = get_pack_factor(weight_bits) + self.tile_size = GPTQ_MARLIN_TILE + self.min_thread_n = GPTQ_MARLIN_MIN_THREAD_N + self.min_thread_k = GPTQ_MARLIN_MIN_THREAD_K + self.max_parallel = GPTQ_MARLIN_MAX_PARALLEL + + def __repr__(self) -> str: + return (f"GPTQMarlinConfig(weight_bits={self.weight_bits}, " + f"group_size={self.group_size}, " + f"desc_act={self.desc_act})") + + @classmethod + def get_name(cls) -> str: + return "gptq_marlin" + + @classmethod + def get_supported_act_dtypes(cls) -> List[torch.dtype]: + return [torch.half] + + @classmethod + def get_min_capability(cls) -> int: + return 80 + + @classmethod + def get_config_filenames(cls) -> List[str]: + return ["quantize_config.json"] + + @classmethod + def from_config(cls, config: Dict[str, Any]) -> "GPTQMarlinConfig": + weight_bits = cls.get_from_keys(config, ["bits"]) + group_size = cls.get_from_keys(config, ["group_size"]) + desc_act = cls.get_from_keys(config, ["desc_act"]) + is_sym = cls.get_from_keys(config, ["sym"]) + return cls(weight_bits, group_size, desc_act, is_sym) + + def get_quant_method( + self, + layer: torch.nn.Module) -> Optional["GPTQMarlinLinearMethod"]: + if isinstance(layer, LinearBase): + return GPTQMarlinLinearMethod(self) + return None + + def get_scaled_act_names(self) -> List[str]: + return [] + + @classmethod + def is_marlin_compatible(cls, quant_config: Dict[str, Any]): + # Extract data from quant config. + num_bits = quant_config.get("bits", None) + group_size = quant_config.get("group_size", None) + sym = quant_config.get("sym", None) + desc_act = quant_config.get("desc_act", None) + + # If we cannot find the info needed in the config, cannot convert. + if (num_bits is None or group_size is None or sym is None + or desc_act is None): + return False + + # If the capability of the device is too low, cannot convert. + major, minor = torch.cuda.get_device_capability() + device_capability = major * 10 + minor + if device_capability < cls.get_min_capability(): + return False + + # Otherwise, can convert if model satisfies marlin constraints. + return (num_bits in GPTQ_MARLIN_SUPPORTED_NUM_BITS + and group_size in GPTQ_MARLIN_SUPPORTED_GROUP_SIZES + and sym in GPTQ_MARLIN_SUPPORTED_SYM) + + +class GPTQMarlinState(Enum): + REPACK = enum.auto() + READY = enum.auto() + + +class GPTQMarlinLinearMethod(LinearMethodBase): + """Linear method for GPTQ Marlin. + + Args: + quant_config: The GPTQ Marlin quantization config. + """ + + def __init__(self, quant_config: GPTQMarlinConfig) -> None: + self.quant_config = quant_config + + def create_weights( + self, + layer: torch.nn.Module, + input_size_per_partition: int, + output_partition_sizes: List[int], + input_size: int, + output_size: int, + params_dtype: torch.dtype, + **extra_weight_attrs, + ) -> None: + del output_size + + # Normalize group_size + if self.quant_config.group_size != -1: + group_size = self.quant_config.group_size + else: + group_size = input_size + + # Validate dtype + if params_dtype != torch.float16: + raise ValueError( + f"The params dtype must be float16, but got {params_dtype}") + + # Validate output_size_per_partition + output_size_per_partition = sum(output_partition_sizes) + if output_size_per_partition % self.quant_config.min_thread_n != 0: + raise ValueError( + f"Weight output_size_per_partition = " + f"{output_size_per_partition} is not divisible by " + f" min_thread_n = {self.quant_config.min_thread_n}.") + + # Validate input_size_per_partition + if input_size_per_partition % self.quant_config.min_thread_k != 0: + raise ValueError( + f"Weight input_size_per_partition = " + f"{input_size_per_partition} is not divisible " + f"by min_thread_k = {self.quant_config.min_thread_k}.") + + if (group_size < input_size + and input_size_per_partition % group_size != 0): + raise ValueError( + f"Weight input_size_per_partition = {input_size_per_partition}" + f" is not divisible by group_size = {group_size}.") + + # Detect sharding of scales/zp + + # By default, no sharding over "input dim" + scales_and_zp_size = input_size // group_size + scales_and_zp_input_dim = None + + if self.quant_config.desc_act: + # Act-order case + assert self.quant_config.group_size != -1 + + is_k_full = input_size_per_partition == input_size + + else: + # No act-order case + + # K is always full due to full alignment with + # group-size and shard of scales/zp + is_k_full = True + + # If this is a row-parallel case, then shard scales/zp + if (input_size != input_size_per_partition + and self.quant_config.group_size != -1): + scales_and_zp_size = input_size_per_partition // group_size + scales_and_zp_input_dim = 0 + + # Init buffers + + # Quantized weights + qweight = Parameter( + torch.empty( + input_size_per_partition // self.quant_config.pack_factor, + output_size_per_partition, + dtype=torch.int32, + ), + requires_grad=False, + ) + set_weight_attrs( + qweight, + { + **extra_weight_attrs, + "input_dim": 0, + "output_dim": 1, + "packed_dim": 0, + "pack_factor": self.quant_config.pack_factor, + }, + ) + + # Activation order + g_idx = Parameter( + torch.empty( + input_size_per_partition, + dtype=torch.int32, + ), + requires_grad=False, + ) + # Ignore warning from fused linear layers such as QKVParallelLinear. + set_weight_attrs( + g_idx, + { + **extra_weight_attrs, "input_dim": 0, + "ignore_warning": True + }, + ) + + g_idx_sort_indices = Parameter( + torch.empty( + g_idx.shape, + dtype=torch.int32, + ), + requires_grad=False, + ) + set_weight_attrs(g_idx_sort_indices, extra_weight_attrs) + + # Scales + scales = Parameter( + torch.empty( + scales_and_zp_size, + output_size_per_partition, + dtype=params_dtype, + ), + requires_grad=False, + ) + set_weight_attrs( + scales, + { + **extra_weight_attrs, + "input_dim": scales_and_zp_input_dim, + "output_dim": 1, + }, + ) + + # Quantized zero-points + qzeros = Parameter( + torch.empty( + scales_and_zp_size, + output_size_per_partition // self.quant_config.pack_factor, + dtype=torch.int32, + device="meta", + ), + requires_grad=False, + ) + set_weight_attrs( + qzeros, + { + **extra_weight_attrs, + "input_dim": scales_and_zp_input_dim, + "output_dim": 1, + "packed_dim": 1, + "pack_factor": self.quant_config.pack_factor, + }, + ) + + # Allocate marlin workspace + max_workspace_size = ( + output_size_per_partition // + self.quant_config.min_thread_n) * self.quant_config.max_parallel + workspace = torch.zeros(max_workspace_size, + dtype=torch.int, + requires_grad=False) + + layer.register_parameter("qweight", qweight) + layer.register_parameter("g_idx", g_idx) + layer.register_parameter("g_idx_sort_indices", g_idx_sort_indices) + layer.register_parameter("scales", scales) + layer.register_parameter("qzeros", qzeros) + layer.workspace = workspace + layer.input_size_per_partition = input_size_per_partition + layer.output_size_per_partition = output_size_per_partition + layer.input_size = input_size + layer.is_k_full = is_k_full + layer.marlin_state = GPTQMarlinState.REPACK + + def apply( + self, + layer: torch.nn.Module, + x: torch.Tensor, + bias: Optional[torch.Tensor] = None, + ) -> torch.Tensor: + reshaped_x = x.reshape(-1, x.shape[-1]) + + size_m = reshaped_x.shape[0] + part_size_n = layer.output_size_per_partition + part_size_k = layer.input_size_per_partition + full_size_k = layer.input_size + + out_shape = x.shape[:-1] + (part_size_n, ) + + if layer.marlin_state == GPTQMarlinState.REPACK: + layer.marlin_state = GPTQMarlinState.READY + + # Newly generated tensors need to replace existing tensors that are + # already registered as parameters by vLLM (and won't be freed) + def replace_tensor(name, new_t): + # It is important to use resize_() here since it ensures + # the same buffer is reused + getattr(layer, name).resize_(new_t.shape) + getattr(layer, name).copy_(new_t) + del new_t + + cur_device = layer.qweight.device + + # Process act_order + if self.quant_config.desc_act: + # Get sorting based on g_idx + g_idx_sort_indices = torch.argsort(layer.g_idx).to(torch.int) + + sorted_g_idx = layer.g_idx[g_idx_sort_indices] + + replace_tensor("g_idx", sorted_g_idx) + replace_tensor("g_idx_sort_indices", g_idx_sort_indices) + + else: + # Reset g_idx related tensors + layer.g_idx = Parameter( + torch.empty(0, dtype=torch.int, device=cur_device), + requires_grad=False, + ) + layer.g_idx_sort_indices = Parameter( + torch.empty(0, dtype=torch.int, device=cur_device), + requires_grad=False, + ) + + # Repack weights + marlin_qweight = ops.gptq_marlin_repack( + layer.qweight, + layer.g_idx_sort_indices, + part_size_k, + part_size_n, + self.quant_config.weight_bits, + ) + replace_tensor("qweight", marlin_qweight) + + # Permute scales + scales_size_k = part_size_k + scales_size_n = part_size_n + if self.quant_config.desc_act: + scales_size_k = full_size_k + + marlin_scales = marlin_permute_scales( + layer.scales, + scales_size_k, + scales_size_n, + self.quant_config.group_size, + self.quant_config.weight_bits, + ) + replace_tensor("scales", marlin_scales) + + output = ops.gptq_marlin_gemm( + reshaped_x, + layer.qweight, + layer.scales, + layer.g_idx, + layer.g_idx_sort_indices, + layer.workspace, + self.quant_config.weight_bits, + size_m, + part_size_n, + part_size_k, + layer.is_k_full, + ) + + if bias is not None: + output.add_(bias) # In-place add + + return output.reshape(out_shape) diff --git a/vllm/model_executor/layers/quantization/marlin.py b/vllm/model_executor/layers/quantization/marlin.py new file mode 100644 index 0000000..94aba62 --- /dev/null +++ b/vllm/model_executor/layers/quantization/marlin.py @@ -0,0 +1,227 @@ +from typing import Any, Dict, List, Optional + +import torch +from torch.nn.parameter import Parameter + +from vllm import _custom_ops as ops +from vllm.model_executor.layers.linear import LinearBase, LinearMethodBase +from vllm.model_executor.layers.quantization.base_config import ( + QuantizationConfig) +from vllm.model_executor.utils import set_weight_attrs + + +class MarlinConfig(QuantizationConfig): + """Config class for Marlin. + + Reference: https://github.com/IST-DASLab/marlin/tree/master + """ + + def __init__( + self, + group_size: int, + ) -> None: + # Group size for the quantization. + self.group_size = group_size + if self.group_size != 128 and self.group_size != -1: + raise ValueError( + "Currently, only group size 128 and -1 (channelwise) " + "is supported for Marlin, but got group_size of " + f"{self.group_size}") + + # 4 Bits packed into 32 bit datatype. + self.pack_factor = 32 // 4 + + # Tile size used by marlin kernels. + self.tile_size = 16 + + # Min out_features dim + self.min_n_threads = 64 + + # Min in_features dim + self.min_k_threads = 128 + + # Max parallel problems to solve at once (improves large + # batch performance) + self.max_parallel = 16 + + # Permutation length used by the marlin kernels. + self.perm_len = 1024 + + def __repr__(self) -> str: + return f"MarlinConfig(group_size={self.group_size})" + + @classmethod + def get_name(cls) -> str: + return "marlin" + + @classmethod + def get_supported_act_dtypes(cls) -> List[torch.dtype]: + return [torch.half] + + @classmethod + # Need to figure it out + def get_min_capability(cls) -> int: + return 80 + + @classmethod + def get_config_filenames(cls) -> List[str]: + return ["quantize_config.json"] + + @classmethod + def from_config(cls, config: Dict[str, Any]) -> "MarlinConfig": + group_size = cls.get_from_keys(config, ["group_size"]) + return cls(group_size) + + def get_quant_method( + self, layer: torch.nn.Module) -> Optional["MarlinLinearMethod"]: + if isinstance(layer, LinearBase): + return MarlinLinearMethod(self) + return None + + def get_scaled_act_names(self) -> List[str]: + return [] + + +class MarlinLinearMethod(LinearMethodBase): + """Linear method for Marlin. + + Args: + quant_config: The Marlin quantization config. + """ + + def __init__(self, quant_config: MarlinConfig): + self.quant_config = quant_config + + def create_weights( + self, + layer: torch.nn.Module, + input_size_per_partition: int, + output_partition_sizes: List[int], + input_size: int, + output_size: int, + params_dtype: torch.dtype, + **extra_weight_attrs, + ): + del output_size # Unused. + + if params_dtype != torch.float16: + raise ValueError( + f"The params dtype must be float16, but got {params_dtype}") + + # Validate output_size_per_partition + output_size_per_partition = sum(output_partition_sizes) + if output_size_per_partition % self.quant_config.min_n_threads != 0: + raise ValueError( + f"Weight output_size_per_partition = " + f"{output_size_per_partition} is not divisible by " + f"min_n_threads = {self.quant_config.min_n_threads}.") + if output_size_per_partition % self.quant_config.pack_factor != 0: + raise ValueError( + f"Weight output_size_per_partition = " + f"{output_size_per_partition} is not divisible by " + f"pack_factor = {self.quant_config.pack_factor}.") + + # Validate input_size_per_partition + if input_size_per_partition % self.quant_config.min_k_threads != 0: + raise ValueError( + f"Weight input_size_per_partition = " + f"{input_size_per_partition} is not divisible by " + f"min_k_threads = {self.quant_config.min_k_threads}.") + if (self.quant_config.group_size != -1 and + input_size_per_partition % self.quant_config.group_size != 0): + raise ValueError(f"Weight input_size_per_partition = " + f"{input_size_per_partition} is not divisible by " + f"group_size = {self.quant_config.group_size}.") + + # Check that we have at least 4 tiles horizontally in the shard + num_tiles_per_perm = self.quant_config.perm_len // ( + self.quant_config.tile_size**2) + if output_size_per_partition % num_tiles_per_perm != 0: + raise ValueError( + "Each permutation group must reside on the same gpu") + + # Quantized 4Bit weights packed into Int32. + qweight = Parameter( + torch.empty( + input_size_per_partition // self.quant_config.tile_size, + output_size_per_partition * self.quant_config.tile_size // + self.quant_config.pack_factor, + device="cuda", + dtype=torch.int32, + ), + requires_grad=False, + ) + set_weight_attrs( + qweight, + { + "input_dim": 0, + "output_dim": 1, + "packed_dim": 1, + "pack_factor": self.quant_config.pack_factor, + "marlin_tile_size": self.quant_config.tile_size, + }, + ) + + # Determine if channelwise or not + input_groups = (1 if self.quant_config.group_size == -1 else + input_size_per_partition // + self.quant_config.group_size) + + scales = Parameter( + torch.empty( + input_groups, + output_size_per_partition, + device="cuda", + dtype=params_dtype, + ), + requires_grad=False, + ) + set_weight_attrs( + scales, + { + "input_dim": None if input_groups == 1 else 0, + "output_dim": 1, + }, + ) + + # Allocate workspace (Used for internal locking mechanism) + max_workspace_size = ( + output_size_per_partition // + self.quant_config.min_n_threads) * self.quant_config.max_parallel + workspace = Parameter(torch.zeros(max_workspace_size, + device="cuda", + dtype=torch.int), + requires_grad=False) + + layer.register_parameter("B", qweight) + set_weight_attrs(qweight, extra_weight_attrs) + layer.register_parameter("s", scales) + set_weight_attrs(scales, extra_weight_attrs) + layer.register_parameter("workspace", workspace) + set_weight_attrs(workspace, extra_weight_attrs) + + def apply( + self, + layer: torch.nn.Module, + x: torch.Tensor, + bias: Optional[torch.Tensor] = None, + ) -> torch.Tensor: + qweight = layer.B + scales = layer.s + workspace = layer.workspace + + x_2d = x.view(-1, x.shape[-1]) + + size_m = x_2d.shape[0] + size_k = x_2d.shape[1] + size_n = scales.shape[1] + + output_2d = ops.marlin_gemm(x_2d, qweight, scales, workspace, size_m, + size_n, size_k) + + output = output_2d.view(x.shape[:-1] + (output_2d.shape[1], )) + + if bias is not None: + output.add_(bias) # In-place add + + return output diff --git a/vllm/model_executor/layers/quantization/schema.py b/vllm/model_executor/layers/quantization/schema.py new file mode 100644 index 0000000..a26c524 --- /dev/null +++ b/vllm/model_executor/layers/quantization/schema.py @@ -0,0 +1,84 @@ +""" +This file contains the Pydantic schemas for various quantization-related +parameters. When a relevant quantization technique is specified, these +parameters are loaded in the form of a JSON alongside the model weights +and augment the model with additional information needed for use of that +technique. The format of this JSON should be specified by one or more +schemas contained here. + +For example, when the KV cache is quantized to FP8-E4M3 (currently only +possible on ROCm), the model can be optionally augmented with KV cache +scaling factors. +""" + +from typing import Dict, Optional + +from pydantic import BaseModel, ConfigDict, ValidationInfo, model_validator + + +class KVCacheQuantSchema(BaseModel): + dtype: str + # Each key is a TP rank. Each value is a dictionary mapping a TP rank's + # layer indices to their per-tensor KV cache scaling factor. + # TODO: Consider pulling this and its validation methods out into its + # own schema class (tricky as its members are variable) + scaling_factor: Dict[int, Dict[int, float]] + + @model_validator(mode="after") + def check_is_fp8(self) -> "KVCacheQuantSchema": + assert self.dtype == "float8_e4m3fn", ( + "Loaded scaling factors intended for KV cache dtype = " + f"{self.dtype} rather than float8_e4m3fn!") + return self + + @model_validator(mode="after") + def check_tp_ranks(self, info: ValidationInfo) -> "KVCacheQuantSchema": + context = info.context + if context: + tp_size = context["tp_size"] + num_hidden_layers = context["num_hidden_layers"] + assert len(self.scaling_factor) == tp_size, ( + f"Loaded dictionary has TP size {len(self.scaling_factor)} " + f"but LLM engine is currently running with TP size {tp_size}.") + for tp_rank, layer_maps in self.scaling_factor.items(): + assert len(layer_maps) == num_hidden_layers, ( + f"KV cache scales map for TP rank {tp_rank} is malformed. " + f"Expected {num_hidden_layers} layers, got " + f"{len(layer_maps)}.") + for i in range(tp_size): + assert i in self.scaling_factor, ( + f"KV cache scales map for TP rank {i} not found.") + return self + + @model_validator(mode="after") + def check_current_rank(self, info: ValidationInfo) -> "KVCacheQuantSchema": + context = info.context + if context: + tp_rank = context["tp_rank"] + num_hidden_layers = context["num_hidden_layers"] + layer_scales_map = self.scaling_factor[tp_rank] + for i in range(num_hidden_layers): + assert i in layer_scales_map, ( + f"Could not find KV cache scales for layer {i} in " + f"TP rank {tp_rank}.") + return self + + +class QuantParamSchema(BaseModel): + # TODO: Generalize and extend with more fields + # (e.g. weights/activations params) once functionality is enabled + model_config = ConfigDict(protected_namespaces=()) + model_type: Optional[str] + kv_cache: KVCacheQuantSchema + + @model_validator(mode="after") + def check_model_type(self, info: ValidationInfo) -> "QuantParamSchema": + context = info.context + if context: + model_type = context.get("model_type", None) + if model_type is not None: + assert model_type == self.model_type, ( + f"Model type is {model_type} but loaded " + f"scaling factors belonging to different " + f"model type {self.model_type}!") + return self diff --git a/vllm/model_executor/layers/quantization/squeezellm.py b/vllm/model_executor/layers/quantization/squeezellm.py new file mode 100644 index 0000000..207dbce --- /dev/null +++ b/vllm/model_executor/layers/quantization/squeezellm.py @@ -0,0 +1,137 @@ +from typing import Any, Dict, List, Optional + +import torch +from torch.nn.parameter import Parameter + +from vllm import _custom_ops as ops +from vllm.model_executor.layers.linear import LinearBase +from vllm.model_executor.layers.quantization.base_config import ( + QuantizationConfig, QuantizeMethodBase) +from vllm.model_executor.utils import set_weight_attrs +from vllm.utils import is_hip + + +class SqueezeLLMConfig(QuantizationConfig): + """Config class for SqueezeLLM. + + Reference: https://arxiv.org/pdf/2306.07629 + """ + + def __init__( + self, + weight_bits: int, + ) -> None: + self.weight_bits = weight_bits + + if self.weight_bits != 4: + raise ValueError( + "Currently, only 4-bit weight quantization is supported for " + f"SqueezeLLM, but got {self.weight_bits} bits.") + + self.pack_factor = 32 // self.weight_bits + + def __repr__(self) -> str: + return f"SqueezeLLMConfig(weight_bits={self.weight_bits})" + + def get_name(self) -> str: + return "squeezellm" + + def get_supported_act_dtypes(self) -> List[torch.dtype]: + return [torch.half] + + def get_min_capability(self) -> int: + return 70 + + @staticmethod + def get_config_filenames() -> List[str]: + return ["quant_config.json"] + + @classmethod + def from_config(cls, config: Dict[str, Any]) -> "SqueezeLLMConfig": + weight_bits = cls.get_from_keys(config, ["wbits"]) + return cls(weight_bits) + + def get_quant_method( + self, layer: torch.nn.Module) -> Optional[QuantizeMethodBase]: + if isinstance(layer, LinearBase): + return SqueezeLLMLinearMethod(self) + return None + + def get_scaled_act_names(self) -> List[str]: + return [] + + +class SqueezeLLMLinearMethod(QuantizeMethodBase): + """Linear method for SqueezeLLM. + + Args: + quant_config: The SqueezeLLM quantization config. + """ + + def __init__(self, quant_config: SqueezeLLMConfig): + self.quant_config = quant_config + + def create_weights(self, layer: torch.nn.Module, + input_size_per_partition: int, + output_partition_sizes: List[int], input_size: int, + output_size: int, params_dtype: torch.dtype, + **extra_weight_attrs): + if input_size_per_partition % self.quant_config.pack_factor != 0: + raise ValueError( + "The input size is not aligned with the quantized " + "weight shape. This can be caused by too large " + "tensor parallel size.") + + output_size_per_partition = sum(output_partition_sizes) + qweight = Parameter( + torch.empty( + input_size_per_partition // self.quant_config.pack_factor, + output_size_per_partition, + dtype=torch.int32, + ), + requires_grad=False, + ) + set_weight_attrs( + qweight, { + "input_dim": 0, + "output_dim": 1, + "packed_dim": 0, + "pack_factor": self.quant_config.pack_factor, + }) + lookup_table = Parameter( + torch.empty( + output_size, + self.quant_config.weight_bits**2, + dtype=params_dtype, + ), + requires_grad=False, + ) + set_weight_attrs(lookup_table, { + "output_dim": 0, + }) + + layer.register_parameter("qweight", qweight) + set_weight_attrs(qweight, extra_weight_attrs) + layer.register_parameter("lookup_table", lookup_table) + set_weight_attrs(lookup_table, extra_weight_attrs) + + def apply(self, + layer: torch.nn.Module, + x: torch.Tensor, + bias: Optional[torch.Tensor] = None) -> torch.Tensor: + qweight = layer.qweight + lookup_table = layer.lookup_table + out_shape = x.shape[:-1] + (qweight.shape[-1], ) + reshaped_x = x.reshape(-1, x.shape[-1]) + if is_hip(): + out_f = torch.zeros(out_shape, dtype=torch.float) + ops.squeezellm_gemm(reshaped_x, qweight, out_f, lookup_table) + out = out_f.to(dtype=torch.float16) + else: + # NOTE: The output tensor should be zero-initialized. + out = torch.zeros(out_shape, dtype=torch.float16) + ops.squeezellm_gemm(reshaped_x, qweight, out, lookup_table) + + if bias is not None: + out.add_(bias) + return out.reshape(out_shape) diff --git a/vllm/model_executor/layers/rejection_sampler.py b/vllm/model_executor/layers/rejection_sampler.py new file mode 100644 index 0000000..5edbbf2 --- /dev/null +++ b/vllm/model_executor/layers/rejection_sampler.py @@ -0,0 +1,405 @@ +from functools import cached_property +from typing import Optional, Tuple + +import torch +import torch.jit +import torch.nn as nn + + +class RejectionSampler(nn.Module): + """Apply modified rejection sampling as described in "Accelerating Large + Language Model Decoding with Speculative Sampling" + https://arxiv.org/pdf/2302.01318.pdf. + """ + + def __init__(self, strict_mode: bool = False): + """Create a rejection sampler. + + Args: + strict_mode: Whether or not to perform shape/device/dtype checks + during sampling. This catches correctness issues but adds + nontrivial latency. + """ + super().__init__() + self._strict_mode = strict_mode + + # NOTE: A "bonus token" is accepted iff all proposal tokens are + # accepted. There is always only one possible bonus token. We store this + # value in a variable for readability. + self._num_bonus_tokens = 1 + + self.num_accepted_tokens: Optional[torch.Tensor] = None + self.num_emitted_tokens: Optional[torch.Tensor] = None + self.num_draft_tokens: int = 0 + + def init_gpu_tensors(self, rank: int) -> None: + assert self.num_accepted_tokens is None + device = f"cuda:{rank}" + self.num_accepted_tokens = torch.tensor(0, + dtype=torch.long, + device=device) + self.num_emitted_tokens = torch.tensor(0, + dtype=torch.long, + device=device) + + @property + def probs_dtype(self): + return torch.float32 + + @property + def token_id_dtype(self): + return torch.int64 + + def forward( + self, + target_probs: torch.Tensor, + bonus_token_ids: torch.Tensor, + draft_probs: torch.Tensor, + draft_token_ids: torch.Tensor, + ) -> torch.Tensor: + """Sample token ids using rejection sampling. This accepts or rejects + tokens proposed by the draft model using the probability of each token + according to the draft and target models. + + In the worst case where all draft tokens are rejected, it is guaranteed + one correct token will be emitted. + + In the case where all draft tokens are accepted, a bonus token will be + accepted as its cheap to have the target model score this speculative + sequence. + + Args: + target_probs: The probability distribution over token ids given + context according to the target model. + shape = [batch_size, num_speculative_tokens, vocab_size] + + bonus_token_ids: The "bonus" token ids that are accepted iff all + speculative tokens in a sequence are accepted. + shape = [batch_size, num_bonus_tokens] + + draft_probs: The probability distribution over token ids given + context according to the draft model. + shape = [batch_size, num_speculative_tokens, vocab_size] + + draft_token_ids: The token ids that were sampled from the draft + probabilities. + shape = [batch_size, num_speculative_tokens] + + Returns: + output_token_ids: The token ids sampled via rejection sampling, + or -1 if unable to sample a token because the previous token + was rejected. + shape = [batch_size, num_speculative_tokens + num_bonus_tokens] + """ + # Only perform shape/dtype/device checking in strict mode, as it adds + # overhead. + if self._strict_mode: + self._raise_if_incorrect_shape(target_probs, bonus_token_ids, + draft_probs, draft_token_ids) + self._raise_if_incorrect_dtype(target_probs, bonus_token_ids, + draft_probs, draft_token_ids) + self._raise_if_inconsistent_device(target_probs, bonus_token_ids, + draft_probs, draft_token_ids) + self._raise_if_out_of_bounds_vocab(target_probs.shape[-1], + bonus_token_ids, + draft_token_ids) + + accepted, recovered_token_ids = self._batch_modified_rejection_sampling( + target_probs, + draft_probs, + draft_token_ids, + ) + + output_token_ids = self._create_output( + accepted, + recovered_token_ids, + draft_token_ids, + bonus_token_ids, + ) + return output_token_ids + + def _batch_modified_rejection_sampling( + self, + target_probs: torch.Tensor, # [batch_size, k, vocab_size] + draft_probs: torch.Tensor, # [batch_size, k, vocab_size] + draft_token_ids: torch.Tensor, # [batch_size, k] + ) -> Tuple[torch.Tensor, torch.Tensor]: + """Perform modified rejection sampling on each sequence. + + Returns: + A tuple of two tensors: + 0: A bool tensor of which tokens in each sequence is accepted. + shape = [batch_size, k] + 1: Token ids sampled from a recovered distribution, to be used + when a token is rejected. + shape = [batch_size, k] + """ + + batch_size, k, vocab_size = draft_probs.shape + + # shape [batch_size, k] + accepted = self._get_accepted(target_probs, draft_probs, + draft_token_ids) + + recovered_probs = self._get_recovered_probs( + target_probs, draft_probs).reshape(batch_size * k, vocab_size) + + # NOTE: the recovered_probs are overwritten by this method. + recovered_token_ids = _multinomial(recovered_probs, + num_samples=1).reshape( + batch_size, k) + return accepted, recovered_token_ids + + def _get_accepted( + self, + target_probs: torch.Tensor, # [batch_size, k, vocab_size] + draft_probs: torch.Tensor, # [batch_size, k, vocab_size] + draft_token_ids: torch.Tensor, # [batch_size, k] + ) -> torch.Tensor: + r"""Create bool matrix over the proposed draft tokens. If + True, then a token can be accepted, else it should be + rejected. + + Given :math:`q(\hat{x}_{n+1}|x_1, \dots, x_n)`, the probability of + :math:`\hat{x}_{n+1}` given context :math:`x_1, \dots, x_n` according + to the target model, and :math:`p(\hat{x}_{n+1}|x_1, \dots, x_n)`, the + same conditional probability according to the draft model, the token + is accepted with probability: + + .. math:: + \min\left(1, \frac{q(\hat{x}_{n+1}|x_1, \dots, x_n)} + {p(\hat{x}_{n+1}|x_1, \dots, x_n)}\right) + + This implementation does not apply causality. When using the output, + if a token is rejected, subsequent tokens should not be used. + + Returns a bool tensor of shape [batch_size, k] specifying which tokens + are accepted. + """ + batch_size, k, _ = draft_probs.shape + batch_indices = torch.arange(batch_size, + device=target_probs.device)[:, None] + probs_indicies = torch.arange(k, device=target_probs.device) + + # shape [batch_size, k] + selected_draft_probs = draft_probs[batch_indices, probs_indicies, + draft_token_ids] + + # shape [batch_size, k] + selected_target_probs = target_probs[batch_indices, probs_indicies, + draft_token_ids] + + uniform_rand = torch.rand(batch_size, + k, + dtype=self.probs_dtype, + device=target_probs.device) + capped_ratio = torch.minimum( + selected_target_probs / selected_draft_probs, + torch.full((1, ), 1, device=target_probs.device)) + accepted = uniform_rand < capped_ratio + + return accepted + + def _get_recovered_probs( + self, + target_probs: torch.Tensor, # [k, vocab_size] + draft_probs: torch.Tensor, # [k, vocab_size] + ) -> torch.Tensor: + r"""Create a probability distribution for each proposed token which can + be sampled if the proposed token is rejected. + + When this routine is applied sequentially, the true distribution of the + target model is recovered (within hardware numerics). + + The probability distribution used in this rejection case is constructed + as follows. Given :math:`q(x|x_1, \dots, x_n)`, the probability of + :math:`x` given context :math:`x_1, \dots, x_n` according to the target + model and :math:`p(x|x_1, \dots, x_n)`, the same conditional probability + according to the draft model: + + .. math:: + x_{n+1} \sim (q(x|x_1, \dots, x_n) - p(x|x_1, \dots, x_n))_+ + + where :math:`(f(x))_+` is defined as: + + .. math:: + (f(x))_+ = \frac{\max(0, f(x))}{\sum_x \max(0, f(x))} + + See https://github.com/vllm-project/vllm/pull/2336 for a visualization + of the draft, target, and recovered probability distributions. + + Returns a tensor of shape [batch_size, k, vocab_size]. + + Note: This batches operations on GPU and thus constructs the recovered + distribution for all tokens, even if they are accepted. This causes + division-by-zero errors, so we use self._smallest_positive_value to + avoid that. This introduces some drift to the distribution. + """ + _, k, _ = draft_probs.shape + + # shape [batch_size, k, vocab_size] + difference = target_probs - draft_probs + + # TODO(cade): Can we use logprobs instead of probs, and avoid the + # division-by-zero errors without introducing distribution drift? + + # shape [batch_size, k, vocab_size] + f = torch.clamp(difference, min=self._smallest_positive_value) + + # shape [batch_size, k, vocab_size] + recovered_probs = f / torch.sum(f, dim=-1).reshape(-1, k, 1) + + return recovered_probs + + @cached_property + def _smallest_positive_value(self) -> float: + """Return the smallest positive value representable by the probs dtype. + This value is used when constructing a distribution from which to sample + recovered tokens in the first rejection case. + + See _get_recovered_probs for more details + + Note that this isn't actually the smallest positive value representable + by float32, but the smallest positive normal value. + See https://en.wikipedia.org/wiki/Subnormal_number for more information. + """ + return torch.finfo(self.probs_dtype).tiny + + def _create_output( + self, + accepted: torch.Tensor, # [batch_size, k] + recovered_token_ids: torch.Tensor, # [batch_size, k] + draft_token_ids: torch.Tensor, # [batch_size, k] + bonus_token_ids: torch.Tensor, # [batch_size] + ) -> torch.Tensor: + """Format output. Returns a matrix of token ids. When + a token is rejected via rejection sampling, all subsequent + token ids are set to -1 for the sequence. + + shape = [batch_size, k + num_bonus_tokens] + """ + bonus_token_ids = bonus_token_ids.squeeze() + batch_size, k = recovered_token_ids.shape + + # Determine the index of the first False value for each row. + limits = (accepted == 0).max(1).indices + limits[~(accepted == 0).any(1)] = k + + # Create masks using the indices. + indices = torch.arange(k, device=accepted.device).unsqueeze(0) + accepted_mask = indices < limits.unsqueeze(1) + after_false_mask = indices == limits.unsqueeze(1) + + # Create an extended output tensor + output_with_bonus_tokens = -torch.ones( + (batch_size, k + self._num_bonus_tokens), + dtype=self.token_id_dtype, + device=accepted.device) + output = output_with_bonus_tokens[:, :k] + + # Fill in the first k columns of the output tensor using masks and data + # tensors. + output[:, :k] = torch.where(accepted_mask, draft_token_ids, + -torch.ones_like(draft_token_ids)) + + # Fill the last column. + # We check output directly as accepted may have True values inconsistent + # with causal acceptance. + output_with_bonus_tokens[:, -1] = torch.where(output[:, -1] != -1, + bonus_token_ids, -1) + + # We disable bonus tokens because it causes corrupt KV cache for + # proposal methods that require KV cache. We can fix it by "prefilling" + # the bonus token in the proposer. The following issue tracks the fix. + # https://github.com/vllm-project/vllm/issues/4212 + output_with_bonus_tokens[:, -1] = -1 + + # Fill the recovered token ids. + output.mul_(~after_false_mask).add_( + recovered_token_ids.mul(after_false_mask)) + + self.num_accepted_tokens += accepted.sum() + self.num_emitted_tokens += (output_with_bonus_tokens != -1).sum() + self.num_draft_tokens += batch_size * k + + return output_with_bonus_tokens + + def _raise_if_incorrect_shape( + self, + target_probs: torch.Tensor, + bonus_token_ids: torch.Tensor, + draft_probs: torch.Tensor, + draft_token_ids: torch.Tensor, + ) -> None: + (target_batch_size, num_target_probs, + target_vocab_size) = target_probs.shape + bonus_batch_size, num_bonus_tokens = bonus_token_ids.shape + draft_batch_size, num_draft_probs, draft_vocab_size = draft_probs.shape + draft_token_ids_batch_size, num_draft_token_ids = draft_token_ids.shape + + assert draft_batch_size == target_batch_size + assert num_draft_probs == num_target_probs + assert (draft_vocab_size == target_vocab_size + ), f"{draft_vocab_size=} {target_vocab_size=}" + + assert draft_token_ids_batch_size == draft_batch_size + assert num_draft_token_ids == num_draft_probs + + assert bonus_batch_size == target_batch_size + assert num_bonus_tokens == self._num_bonus_tokens + + def _raise_if_incorrect_dtype( + self, + target_probs: torch.Tensor, + bonus_token_ids: torch.Tensor, + draft_probs: torch.Tensor, + draft_token_ids: torch.Tensor, + ) -> None: + assert all(probs.dtype == self.probs_dtype + for probs in [target_probs, draft_probs]) + assert all(token_ids.dtype == self.token_id_dtype + for token_ids in [bonus_token_ids, draft_token_ids]) + + def _raise_if_inconsistent_device( + self, + target_probs: torch.Tensor, + bonus_token_ids: torch.Tensor, + draft_probs: torch.Tensor, + draft_token_ids: torch.Tensor, + ) -> None: + devices = [ + t.device for t in + [target_probs, bonus_token_ids, draft_probs, draft_token_ids] + ] + assert all([devices[0] == device for device in devices]) + + def _raise_if_out_of_bounds_vocab( + self, + vocab_size: int, + bonus_token_ids: torch.Tensor, + draft_token_ids: torch.Tensor, + ) -> None: + assert torch.all(bonus_token_ids < vocab_size) + assert torch.all(bonus_token_ids >= 0) + assert torch.all(draft_token_ids < vocab_size) + assert torch.all(draft_token_ids >= 0) + + +# torch.multinomial forces a GPU<->CPU sync. +# Therefore, we use an optimized implementation instead that skips the sync. +# Note that we always sample with replacement. +# probs will be modified in place, but this is fine, as we pass +# in a copy already. +@torch.jit.script +def _multinomial( + probs: torch.Tensor, + num_samples: int, +) -> torch.Tensor: + if num_samples > 1: + # This is equivalent to torch.repeat_interleaved (which also + # forces a GPU<->CPU sync). + probs = probs[:, None, :].expand(probs.shape[0], num_samples, + probs.shape[1]).contiguous().view( + -1, probs.shape[1]) + q = torch.empty_like(probs).exponential_(1.0) + return probs.div_(q).argmax(dim=1).view(-1, num_samples) diff --git a/vllm/model_executor/layers/rotary_embedding.py b/vllm/model_executor/layers/rotary_embedding.py new file mode 100644 index 0000000..b102e1a --- /dev/null +++ b/vllm/model_executor/layers/rotary_embedding.py @@ -0,0 +1,531 @@ +# coding=utf-8 +# Adapted from +# https://github.com/huggingface/transformers/blob/v4.33.2/src/transformers/models/llama/modeling_llama.py +# Copyright 2023 The vLLM team. +# Copyright 2022 EleutherAI and the HuggingFace Inc. team. All rights reserved. +# +# This code is based on EleutherAI's GPT-NeoX library and the GPT-NeoX +# and OPT implementations in this library. It has been modified from its +# original forms to accommodate minor architectural differences compared +# to GPT-NeoX and OPT used by the Meta AI team that trained the model. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Rotary Positional Embeddings.""" +import math +from typing import Any, Dict, List, Optional, Tuple, Union + +import torch +import torch.nn as nn + +from vllm import _custom_ops as ops + + +def _rotate_neox(x: torch.Tensor) -> torch.Tensor: + x1 = x[..., :x.shape[-1] // 2] + x2 = x[..., x.shape[-1] // 2:] + return torch.cat((-x2, x1), dim=-1) + + +def _rotate_gptj(x: torch.Tensor) -> torch.Tensor: + x1 = x[..., ::2] + x2 = x[..., 1::2] + x = torch.stack((-x2, x1), dim=-1) + return x.flatten(-2) + + +class RotaryEmbedding(nn.Module): + """Original rotary positional embedding.""" + + def __init__( + self, + head_size: int, + rotary_dim: int, + max_position_embeddings: int, + base: int, + is_neox_style: bool, + ) -> None: + super().__init__() + self.head_size = head_size + self.rotary_dim = rotary_dim + self.max_position_embeddings = max_position_embeddings + self.base = base + self.is_neox_style = is_neox_style + + cache = self._compute_cos_sin_cache() + cache = cache.to(torch.get_default_dtype()) + self.register_buffer("cos_sin_cache", cache, persistent=False) + + def _compute_inv_freq(self, base: Union[int, float]) -> torch.Tensor: + """Compute the inverse frequency.""" + # NOTE(woosuk): The HF implementation uses `torch.arange(...).float()`. + # However, we use `torch.arange(..., dtype=torch.float)` instead to + # avoid numerical issues with large base values (e.g., 10000000). + # This may cause a slight numerical difference between the HF + # implementation and ours. + # NOTE(woosuk): To exactly match the HF implementation, we need to + # use CPU to compute the cache and then move it to GPU. However, we + # create the cache on GPU for faster initialization. This may cause + # a slight numerical difference between the HF implementation and ours. + + # torch_musa did not support pow_scalar_out + # inv_freq = 1.0 / (base**(torch.arange( + # 0, self.rotary_dim, 2, dtype=torch.float) / self.rotary_dim)) + + exp = torch.arange(0, self.rotary_dim, 2, dtype=torch.float) + device = exp.device + inv_freq = 1.0 / (base**(exp.cpu() / self.rotary_dim)) + return inv_freq.to(device) + + def _compute_cos_sin_cache(self) -> torch.Tensor: + """Compute the cos and sin cache.""" + inv_freq = self._compute_inv_freq(self.base) + t = torch.arange(self.max_position_embeddings, dtype=torch.float) + + freqs = torch.einsum("i,j -> ij", t, inv_freq) + cos = freqs.cos() + sin = freqs.sin() + cache = torch.cat((cos, sin), dim=-1) + return cache + + def _forward( + self, + positions: torch.Tensor, + query: torch.Tensor, + key: torch.Tensor, + offsets: Optional[torch.Tensor] = None, + ) -> Tuple[torch.Tensor, torch.Tensor]: + """PyTorch-native implementation equivalent to forward().""" + query = query.view(*query.shape[:-1], -1, self.head_size) + key = key.view(*key.shape[:-1], -1, self.head_size) + + query_rot = query[..., :self.rotary_dim] + key_rot = key[..., :self.rotary_dim] + if self.rotary_dim < self.head_size: + query_pass = query[..., self.rotary_dim:] + key_pass = key[..., self.rotary_dim:] + + self.cos_sin_cache: torch.Tensor = self.cos_sin_cache.to( + positions.device) + cos_sin = self.cos_sin_cache[torch.add(positions, offsets) + if offsets is not None else positions] + cos, sin = cos_sin.chunk(2, dim=-1) + if self.is_neox_style: + # NOTE(woosuk): Here we assume that the positions tensor has the + # shape [batch_size, seq_len]. + cos = cos.repeat(1, 1, 2).unsqueeze(-2) + sin = sin.repeat(1, 1, 2).unsqueeze(-2) + else: + cos = cos.repeat_interleave(2, dim=-1).unsqueeze(-2) + sin = sin.repeat_interleave(2, dim=-1).unsqueeze(-2) + + rotate_fn = _rotate_neox if self.is_neox_style else _rotate_gptj + query_rot = query_rot * cos + rotate_fn(query_rot) * sin + key_rot = key_rot * cos + rotate_fn(key_rot) * sin + + if self.rotary_dim < self.head_size: + query = torch.cat((query_rot, query_pass), dim=-1) + key = torch.cat((key_rot, key_pass), dim=-1) + else: + query = query_rot + key = key_rot + query = query.flatten(-2) + key = key.flatten(-2) + return query, key + + def forward( + self, + positions: torch.Tensor, + query: torch.Tensor, + key: torch.Tensor, + offsets: Optional[torch.Tensor] = None, + ) -> Tuple[torch.Tensor, torch.Tensor]: + self.cos_sin_cache = self.cos_sin_cache.to(positions.device) + # ops.rotary_embedding()/batched_rotary_embedding() + # are in-place operations that update the query and key tensors. + if offsets is not None: + ops.batched_rotary_embedding(positions, query, key, self.head_size, + self.cos_sin_cache, + self.is_neox_style, self.rotary_dim, + offsets) + else: + ops.rotary_embedding(positions, query, key, self.head_size, + self.cos_sin_cache, self.is_neox_style) + return query, key + + def extra_repr(self) -> str: + s = f"head_size={self.head_size}, rotary_dim={self.rotary_dim}" + s += f", max_position_embeddings={self.max_position_embeddings}" + s += f", base={self.base}, is_neox_style={self.is_neox_style}" + return s + + +class LinearScalingRotaryEmbedding(RotaryEmbedding): + """RotaryEmbedding extended with linear scaling. + + Credits to the Reddit user /u/kaiokendev + """ + + def __init__( + self, + head_size: int, + rotary_dim: int, + max_position_embeddings: int, + base: int, + is_neox_style: bool, + scaling_factors: Union[List[float], float], + ) -> None: + if isinstance(scaling_factors, float): + scaling_factors = [scaling_factors] + self.scaling_factors = scaling_factors + super().__init__(head_size, rotary_dim, max_position_embeddings, base, + is_neox_style) + + def _compute_cos_sin_cache(self) -> torch.Tensor: + inv_freq = self._compute_inv_freq(self.base) + cache_list = [] + for scaling_factor in self.scaling_factors: + # NOTE(woosuk): self.max_position_embeddings is the original + # maximum length before applying the rope scaling. + # Thus, the maximum length after applying the rope scaling is + # self.max_position_embeddings * self.scaling_factor. + max_len = self.max_position_embeddings * scaling_factor + t = torch.arange(max_len, dtype=torch.float) + t = t / scaling_factor + + freqs = torch.einsum("i,j -> ij", t, inv_freq) + cos = freqs.cos() + sin = freqs.sin() + cache = torch.cat((cos, sin), dim=-1) + cache_list.append(cache) + return torch.cat(cache_list, dim=0) + + +class DynamicNTKScalingRotaryEmbedding(RotaryEmbedding): + """RotaryEmbedding extended with Dynamic NTK scaling. + + Credits to the Reddit users /u/bloc97 and /u/emozilla + """ + + def __init__( + self, + head_size: int, + rotary_dim: int, + max_position_embeddings: int, + base: int, + is_neox_style: bool, + scaling_factor: float, + ) -> None: + self.scaling_factor = scaling_factor + super().__init__(head_size, rotary_dim, max_position_embeddings, base, + is_neox_style) + + def _compute_cos_sin_cache(self) -> torch.Tensor: + # NOTE(woosuk): self.max_position_embeddings is the original + # maximum length before applying the rope scaling. + # Thus, the maximum length after applying the rope scaling is + # self.max_position_embeddings * self.scaling_factor. + max_len = self.max_position_embeddings * self.scaling_factor + base = self.base * ( + (self.scaling_factor * max_len / self.max_position_embeddings) - + (self.scaling_factor - 1))**(self.rotary_dim / + (self.rotary_dim - 2)) + inv_freq = self._compute_inv_freq(base) + t = torch.arange(max_len, dtype=torch.float) + + freqs = torch.einsum("i,j -> ij", t, inv_freq) + cos = freqs.cos() + sin = freqs.sin() + cache = torch.cat((cos, sin), dim=-1) + return cache + + +# Inverse dim formula to find dim based on number of rotations +def _yarn_find_correction_dim(num_rotations: int, + dim: int, + base: float = 10000, + max_position_embeddings: int = 2048) -> float: + return (dim * math.log(max_position_embeddings / + (num_rotations * 2 * math.pi))) / (2 * + math.log(base)) + + +# Find dim range bounds based on rotations +def _yarn_find_correction_range( + low_rot: int, + high_rot: int, + dim: int, + base: float = 10000, + max_position_embeddings: int = 2048) -> Tuple[int, int]: + low = math.floor( + _yarn_find_correction_dim(low_rot, dim, base, max_position_embeddings)) + high = math.ceil( + _yarn_find_correction_dim(high_rot, dim, base, + max_position_embeddings)) + return max(low, 0), min(high, dim - 1) # Clamp values just in case + + +def _yarn_linear_ramp_mask(low: float, high: float, dim: int, + dtype: torch.dtype) -> torch.Tensor: + if low == high: + high += 0.001 # Prevent singularity + + linear_func = (torch.arange(dim, dtype=dtype) - low) / (high - low) + ramp_func = torch.clamp(linear_func, 0, 1) + return ramp_func + + +def _yarn_get_mscale(scale: float = 1) -> float: + if scale <= 1: + return 1.0 + return 0.1 * math.log(scale) + 1.0 + + +class YaRNScalingRotaryEmbedding(RotaryEmbedding): + """RotaryEmbedding extended with YaRN method. + + Credits to Peng et al. github.com/jquesnelle/yarn + """ + + def __init__( + self, + head_size: int, + rotary_dim: int, + max_position_embeddings: int, + base: int, + is_neox_style: bool, + scaling_factor: float, + *, + extrapolation_factor: float = 1, + attn_factor: float = 1, + beta_fast: int = 32, + beta_slow: int = 1, + ) -> None: + self.scaling_factor = scaling_factor + self.extrapolation_factor = extrapolation_factor + self.attn_factor = attn_factor + self.beta_fast = beta_fast + self.beta_slow = beta_slow + # Get n-d magnitude scaling corrected for interpolation + self.mscale = float( + _yarn_get_mscale(self.scaling_factor) * attn_factor) + super().__init__(head_size, rotary_dim, max_position_embeddings, base, + is_neox_style) + + def _compute_inv_freq(self, scaling_factor: float) -> torch.Tensor: + pos_freqs = self.base**( + torch.arange(0, self.rotary_dim, 2, dtype=torch.float) / + self.rotary_dim) + inv_freq_extrapolation = 1.0 / pos_freqs + inv_freq_interpolation = 1.0 / (scaling_factor * pos_freqs) + + low, high = _yarn_find_correction_range(self.beta_fast, self.beta_slow, + self.rotary_dim, self.base, + self.max_position_embeddings) + # Get n-d rotational scaling corrected for extrapolation + inv_freq_mask = (1 - _yarn_linear_ramp_mask( + low, high, self.rotary_dim // 2, + dtype=torch.float)) * self.extrapolation_factor + inv_freq = inv_freq_interpolation * ( + 1 - inv_freq_mask) + inv_freq_extrapolation * inv_freq_mask + return inv_freq + + def _compute_cos_sin_cache(self) -> torch.Tensor: + inv_freq = self._compute_inv_freq(self.scaling_factor) + t = torch.arange(self.max_position_embeddings * self.scaling_factor, + dtype=torch.float32) + freqs = torch.einsum("i,j -> ij", t, inv_freq) + cos = (freqs.cos() * self.mscale) + sin = (freqs.sin() * self.mscale) + cache = torch.cat((cos, sin), dim=-1) + return cache + + +class Phi3SuScaledRotaryEmbedding(nn.Module): + """Phi3 family of models scaled rotary embedding. + + Based on the original RotaryEmbedding implementation. + """ + + def __init__( + self, + head_size: int, + rotary_dim: int, + max_position_embeddings: int, + original_max_position_embeddings: int, + base: int, + is_neox_style: bool, + short_factor: List[float], + long_factor: List[float], + short_mscale: float = 1.1, + long_mscale: float = 1.225, + ): + super().__init__() + + if rotary_dim != head_size: + raise ValueError( + f"`Phi3SuScaledRotaryEmbedding` does not support rotary_dim != \ + head_size ({rotary_dim}!={head_size}).") + if is_neox_style is False: + raise ValueError( + "`Phi3SuScaledRotaryEmbedding` only supports neox_style.") + + self.head_size = head_size + self.max_position_embeddings = max_position_embeddings + self.original_max_position_embeddings = original_max_position_embeddings + self.base = base + self.short_factor = short_factor + self.long_factor = long_factor + self.short_mscale = short_mscale + self.long_mscale = long_mscale + + short_cache = self._compute_cos_sin_cache( + original_max_position_embeddings, short_factor, short_mscale) + short_cache = short_cache.to(torch.get_default_dtype()) + self.register_buffer("short_cos_sin_cache", + short_cache, + persistent=False) + + long_cache = self._compute_cos_sin_cache(max_position_embeddings, + long_factor, long_mscale) + long_cache = long_cache.to(torch.get_default_dtype()) + self.register_buffer("long_cos_sin_cache", + long_cache, + persistent=False) + + long_short_cache = torch.cat( + [self.short_cos_sin_cache, self.long_cos_sin_cache], dim=0) + self.register_buffer("long_short_cos_sin_cache", + long_short_cache, + persistent=False) + + def _compute_inv_freq(self, rescale_factors: List[float]) -> torch.Tensor: + rescale_factors = torch.tensor(rescale_factors, dtype=torch.float32) + inv_freq = 1.0 / (rescale_factors * (self.base**(torch.arange( + 0, self.head_size, 2, dtype=torch.float) / self.head_size))) + return inv_freq + + def _compute_cos_sin_cache( + self, + max_position_embeddings: int, + rescale_factors: List[float], + mscale: float, + ) -> torch.Tensor: + inv_freq = self._compute_inv_freq(rescale_factors) + t = torch.arange(max_position_embeddings, dtype=torch.float) + freqs = torch.einsum("i,j -> ij", t, inv_freq) + cos = freqs.cos() * mscale + sin = freqs.sin() * mscale + cache = torch.cat((cos, sin), dim=-1) + return cache + + def forward( + self, + positions: torch.Tensor, + query: torch.Tensor, + key: torch.Tensor, + offsets: Optional[torch.Tensor] = None, + ) -> Tuple[torch.Tensor, torch.Tensor]: + query = query.view(*query.shape[:-1], -1, self.head_size) + key = key.view(*key.shape[:-1], -1, self.head_size) + + k = self.original_max_position_embeddings + long_prompt_offset = (torch.any(positions > k).float() * + torch.full_like(positions, k)).long() + idx = (torch.add(positions, long_prompt_offset) + if long_prompt_offset is not None else positions) + self.long_short_cos_sin_cache: torch.Tensor = ( + self.long_short_cos_sin_cache.to(idx.device)) + idx = torch.add(idx, offsets) if offsets is not None else idx + cos_sin = torch.index_select(self.long_short_cos_sin_cache, 0, idx) + + cos, sin = cos_sin.chunk(2, dim=-1) + cos = cos.repeat(1, 2).unsqueeze(-2) + sin = sin.repeat(1, 2).unsqueeze(-2) + + query = query * cos + _rotate_neox(query) * sin + key = key * cos + _rotate_neox(key) * sin + + return query.flatten(-2), key.flatten(-2) + + +_ROPE_DICT: Dict[Tuple, RotaryEmbedding] = {} + + +def get_rope( + head_size: int, + rotary_dim: int, + max_position: int, + base: int, + is_neox_style: bool = True, + rope_scaling: Optional[Dict[str, Any]] = None, +) -> RotaryEmbedding: + if rope_scaling is not None: + # Transforms every value that is a list into a tuple for caching calls + rope_scaling_tuple = { + k: tuple(v) if isinstance(v, list) else v + for k, v in rope_scaling.items() + } + rope_scaling_args = tuple(rope_scaling_tuple.items()) + else: + rope_scaling_args = None + key = (head_size, rotary_dim, max_position, base, is_neox_style, + rope_scaling_args) + if key in _ROPE_DICT: + return _ROPE_DICT[key] + if rope_scaling is None: + rotary_emb = RotaryEmbedding(head_size, rotary_dim, max_position, base, + is_neox_style) + else: + scaling_type = rope_scaling["type"] + if scaling_type != "su": + scaling_factor = rope_scaling["factor"] + if scaling_type == "linear": + rotary_emb = LinearScalingRotaryEmbedding(head_size, rotary_dim, + max_position, base, + is_neox_style, + scaling_factor) + elif scaling_type == "dynamic": + rotary_emb = DynamicNTKScalingRotaryEmbedding( + head_size, rotary_dim, max_position, base, is_neox_style, + scaling_factor) + elif scaling_type == "yarn": + original_max_position = rope_scaling[ + "original_max_position_embeddings"] + extra_kwargs = { + k: v + for k, v in rope_scaling.items() + if k in ("extrapolation_factor", "attn_factor", "beta_fast", + "beta_slow") + } + rotary_emb = YaRNScalingRotaryEmbedding(head_size, rotary_dim, + original_max_position, + base, is_neox_style, + scaling_factor, + **extra_kwargs) + elif scaling_type == "su": + short_factor = rope_scaling["short_factor"] + long_factor = rope_scaling["long_factor"] + original_max_position = rope_scaling[ + "original_max_position_embeddings"] + extra_kwargs = { + k: v + for k, v in rope_scaling.items() + if k in ("short_mscale", "long_mscale") + } + rotary_emb = Phi3SuScaledRotaryEmbedding( + head_size, rotary_dim, max_position, original_max_position, + base, is_neox_style, short_factor, long_factor, **extra_kwargs) + else: + raise ValueError(f"Unknown RoPE scaling type {scaling_type}") + _ROPE_DICT[key] = rotary_emb + return rotary_emb diff --git a/vllm/model_executor/layers/sampler.py b/vllm/model_executor/layers/sampler.py new file mode 100644 index 0000000..1f19d20 --- /dev/null +++ b/vllm/model_executor/layers/sampler.py @@ -0,0 +1,1051 @@ +"""A layer that samples the next tokens from the model's outputs.""" +import itertools +from typing import Dict, List, Optional, Tuple + +import torch +import torch.nn as nn + +from vllm.model_executor.layers.ops.sample import sample as sample_triton +from vllm.model_executor.sampling_metadata import (SamplingMetadata, + SamplingTensors, + SequenceGroupToSample) +from vllm.sampling_params import SamplingType +from vllm.sequence import (Logprob, PromptLogprobs, SampleLogprobs, + SamplerOutput, SequenceGroupOutput, SequenceOutput) + +# (num_token_ids, num_parent_ids) per sequence group. +SampleResultType = List[Tuple[List[int], List[int]]] + + +class Sampler(nn.Module): + """Samples the next tokens from the model's outputs. + + This layer does the following: + 1. Discard the hidden states that are not used for sampling (i.e., all + tokens except the final one in each prompt). + 2. Compute the logits for the next tokens. + 3. Apply presence, frequency and repetition penalties. + 4. Apply temperature scaling. + 5. Apply top-p and top-k truncation. + 6. Sample the next tokens. + Here, each sequence group within the batch can have different sampling + parameters (e.g., sampling method, temperature, top-p, top-k, etc.). + + The structure of the logits tensor is coupled with the seq_groups in + sampling_metadata. Typically, each sequence in each seq_group has one row in + logits for the next token to be sampled; however, for a seq_group with a + prompt request with the prompt_logprobs sampling parameter, there are rows + in logits for each token in the input prompt. + """ + + def __init__(self): + super().__init__() + + # Whether or not the SamplerOutput should have on-device tensors + # containing the sampled token ids and probabilities. This is used by + # speculative decoding. + self.include_gpu_probs_tensor = False + + def forward( + self, + logits: torch.Tensor, + sampling_metadata: SamplingMetadata, + ) -> Optional[SamplerOutput]: + """ + Args: + logits: (num_tokens, vocab_size). + sampling_metadata: Metadata for sampling. + """ + assert logits is not None + _, vocab_size = logits.shape + + logits = _apply_min_tokens_penalty(logits, sampling_metadata) + + # Prepare sampling tensors with pinned memory to avoid blocking. + (sampling_tensors, do_penalties, do_top_p_top_k, + do_min_p) = SamplingTensors.from_sampling_metadata( + sampling_metadata, vocab_size, logits.device, logits.dtype) + + # Apply presence and frequency penalties. + if do_penalties: + logits = _apply_penalties(logits, sampling_tensors.prompt_tokens, + sampling_tensors.output_tokens, + sampling_tensors.presence_penalties, + sampling_tensors.frequency_penalties, + sampling_tensors.repetition_penalties) + + # Apply temperature scaling. + # Use in-place division to avoid creating a new tensor. + logits.div_(sampling_tensors.temperatures.unsqueeze_(dim=1)) + + if do_top_p_top_k: + logits = _apply_top_k_top_p(logits, sampling_tensors.top_ps, + sampling_tensors.top_ks) + + if do_min_p: + logits = _apply_min_p(logits, sampling_tensors.min_ps) + + # We use float32 for probabilities and log probabilities. + # Compute the probabilities. + probs = torch.softmax(logits, dim=-1, dtype=torch.float) + # Compute the log probabilities. + logprobs = torch.log_softmax(logits, dim=-1, dtype=torch.float) + + # Sample the next tokens. + sample_results, maybe_sampled_tokens_tensor = _sample( + probs, + logprobs, + sampling_metadata, + sampling_tensors, + include_gpu_probs_tensor=self.include_gpu_probs_tensor, + modify_greedy_probs=self._should_modify_greedy_probs_inplace, + ) + + if self.include_gpu_probs_tensor: + assert maybe_sampled_tokens_tensor is not None + on_device_tensors = (probs, logprobs, maybe_sampled_tokens_tensor) + else: + on_device_tensors = None + + # Get the logprobs query results. + prompt_logprobs, sample_logprobs = _get_logprobs( + logprobs, sampling_metadata, sample_results) + return _build_sampler_output(sample_results, + sampling_metadata, + prompt_logprobs, + sample_logprobs, + on_device_tensors=on_device_tensors) + + @property + def _should_modify_greedy_probs_inplace(self) -> bool: + """Whether or not the sampler should modify the probability distribution + of greedily-sampled tokens such that multinomial sampling would sample + the greedily-sampled token. + + In other words, if True then we set the probability of the greedily- + sampled token to 1. + + This is used by speculative decoding, which requires that the sampling + method be encoded into the probability distribution. + """ + # Modify greedy probs if include_gpu_probs_tensor is set. + return self.include_gpu_probs_tensor + + +def _get_bin_counts_and_mask( + tokens: torch.Tensor, + vocab_size: int, + num_seqs: int, +) -> Tuple[torch.Tensor, torch.Tensor]: + # Compute the bin counts for the tokens. + # vocab_size + 1 for padding. + bin_counts = torch.zeros((num_seqs, vocab_size + 1), + dtype=torch.long, + device=tokens.device) + bin_counts.scatter_add_(1, tokens, torch.ones_like(tokens)) + bin_counts = bin_counts[:, :vocab_size] + mask = bin_counts > 0 + + return bin_counts, mask + + +def _apply_min_tokens_penalty( + logits: torch.Tensor, + sampling_metadata: SamplingMetadata, +) -> torch.Tensor: + """Apply min_tokens penalty which sets stop tokens to -inf if min_tokens + have not been generated yet + """ + # list of indices in logits that will be set to -inf + logits_to_penalize: List[Tuple[int, int]] = [] + logits_applied = 0 + for seq_group in sampling_metadata.seq_groups: + seq_ids = seq_group.seq_ids + sampling_params = seq_group.sampling_params + + sample_indices = seq_group.sample_indices + logits_applied += len(sample_indices) + len( + seq_group.prompt_logprob_indices) + if not seq_group.do_sample: + continue + + start_idx = sample_indices[0] + min_tokens = sampling_params.min_tokens + token_ids_to_penalize = sampling_params.all_stop_token_ids + if min_tokens > 0 and token_ids_to_penalize: + seqs_to_penalize = [] + for j, seq_id in enumerate(seq_ids): + seq_data = seq_group.seq_data[seq_id] + if len(seq_data.output_token_ids) < min_tokens: + seqs_to_penalize.append(j) + + if seqs_to_penalize: + # convert to the index into logits + seqs_to_penalize = [start_idx + j for j in seqs_to_penalize] + # itertools.product pairs each seq index with every token id + logits_to_penalize.extend( + itertools.product(seqs_to_penalize, token_ids_to_penalize)) + + if logits_to_penalize: + # use zip and * to group indices along each dimension + # eg. [ (1,2), (1,3), (5,6) ] -> ( (1,1,5), (2,3,6) ) + logits[tuple(zip(*logits_to_penalize))] = -float("inf") + + # verifies that no rows in logits were missed unexpectedly + assert logits_applied == logits.shape[0] + return logits + + +def _apply_penalties(logits: torch.Tensor, prompt_tokens_tensor: torch.Tensor, + output_tokens_tensor: torch.Tensor, + presence_penalties: torch.Tensor, + frequency_penalties: torch.Tensor, + repetition_penalties: torch.Tensor) -> torch.Tensor: + num_seqs, vocab_size = logits.shape + _, prompt_mask = _get_bin_counts_and_mask(prompt_tokens_tensor, vocab_size, + num_seqs) + output_bin_counts, output_mask = _get_bin_counts_and_mask( + output_tokens_tensor, vocab_size, num_seqs) + + repetition_penalties = repetition_penalties[:, None].repeat(1, vocab_size) + repetition_penalties[~(prompt_mask | output_mask)] = 1.0 + logits = torch.where(logits > 0, logits / repetition_penalties, + logits * repetition_penalties) + + # We follow the definition in OpenAI API. + # Refer to https://platform.openai.com/docs/api-reference/parameter-details + logits -= frequency_penalties.unsqueeze_(dim=1) * output_bin_counts + logits -= presence_penalties.unsqueeze_(dim=1) * output_mask + return logits + + +def _apply_top_k_top_p( + logits: torch.Tensor, + p: torch.Tensor, + k: torch.Tensor, +) -> torch.Tensor: + logits_sort, logits_idx = logits.sort(dim=-1, descending=False) + + # Apply top-k. + top_k_mask = logits_sort.size(1) - k.to(torch.long) + # Get all the top_k values. + top_k_mask = logits_sort.gather(1, top_k_mask.unsqueeze(dim=1)) + top_k_mask = logits_sort < top_k_mask + logits_sort.masked_fill_(top_k_mask, -float("inf")) + + # Apply top-p. + probs_sort = logits_sort.softmax(dim=-1) + probs_sum = probs_sort.cumsum(dim=-1) + top_p_mask = probs_sum <= 1 - p.unsqueeze(dim=1) + # at least one + top_p_mask[:, -1] = False + logits_sort.masked_fill_(top_p_mask, -float("inf")) + + # Re-sort the probabilities. + src = torch.arange(logits_idx.shape[-1], + device=logits_idx.device).expand_as(logits_idx) + logits_idx_inv = torch.empty_like(logits_idx).scatter_(dim=-1, + index=logits_idx, + src=src) + logits = torch.gather(logits_sort, dim=-1, index=logits_idx_inv) + return logits + + +def _apply_min_p( + logits: torch.Tensor, + min_p: torch.Tensor, +) -> torch.Tensor: + """ + Adapted from + https://github.com/oobabooga/text-generation-webui/blob/3146124ec01f02c8fb1650a6517cf1b60b537aaf/modules/sampler_hijack.py#L16C17-L16C17 + """ + probs = torch.softmax(logits, dim=-1) + top_probs, _ = probs.max(dim=-1, keepdim=True) + scaled_min_p = min_p.unsqueeze_(dim=1) * top_probs + tokens_to_remove = probs < scaled_min_p + logits = logits.masked_fill_(tokens_to_remove, -float("inf")) + + return logits + + +def _greedy_sample( + selected_seq_groups: List[SequenceGroupToSample], + samples: torch.Tensor, +) -> SampleResultType: + """Run greedy sampling on a given samples. + + Args: + selected_seq_groups: A list of sequence groups batched. + samples: (num_selected_samples,) A tensor of samples. The length of + samples could be smaller than selected_seq_groups if + seq_group.do_sample is False. + Returns: + Tuple of (next_token_ids, parent_ids). The length of returned list is + same as the length of selected_seq_groups. If the corresponding + seq_group has do_sample=False, tuple contains ([], []) + """ + samples = samples.tolist() + sample_idx = 0 + results: SampleResultType = [] + for seq_group in selected_seq_groups: + if not seq_group.do_sample: + results.append(([], [])) + continue + + seq_ids = seq_group.seq_ids + num_parent_seqs = len(seq_ids) + assert num_parent_seqs == 1, ( + "Greedy sampling should have only one seq.") + parent_ids = list(range(num_parent_seqs)) + next_token_ids = [samples[sample_idx]] + results.append((next_token_ids, parent_ids)) + sample_idx += num_parent_seqs + return results + + +def _random_sample( + selected_seq_groups: List[SequenceGroupToSample], + random_samples: torch.Tensor, +) -> SampleResultType: + """Run random sampling on a given samples. + + Args: + selected_seq_groups: A list of sequence groups batched. + random_samples: (num_selected_samples,) A tensor of samples. The + length of samples could be smaller than selected_seq_groups if + seq_group.do_sample is False. + Returns: + Tuple of (next_token_ids, parent_ids). The length of returned list is + same as the length of selected_seq_groups. If the corresponding + seq_group has do_sample=False, tuple contains ([], []) + """ + # Find the maximum best_of value of the prompt phase requests. + random_samples = random_samples.cpu() + sample_idx = 0 + results: SampleResultType = [] + for seq_group in selected_seq_groups: + if not seq_group.do_sample: + results.append(([], [])) + continue + + seq_ids = seq_group.seq_ids + sampling_params = seq_group.sampling_params + is_prompt = seq_group.is_prompt + num_parent_seqs = len(seq_ids) + if is_prompt: + # Prompt phase. + parent_ids = [0] * sampling_params.best_of + next_token_ids = random_samples[ + sample_idx, :sampling_params.best_of].tolist() + else: + # Generation phase. + parent_ids = list(range(num_parent_seqs)) + next_token_ids = random_samples[sample_idx:sample_idx + + num_parent_seqs, 0].tolist() + results.append((next_token_ids, parent_ids)) + sample_idx += num_parent_seqs + return results + + +def _beam_search_sample( + selected_seq_groups: List[SequenceGroupToSample], + logprobs: torch.Tensor, +) -> SampleResultType: + """Run beam sampling on a given samples. + + Args: + selected_seq_groups: A list of sequence groups batched. + logprobs: (num_selected_samples, vocab_size,) A tensor of logprob + on selected sample indices. + Returns: + Tuple of (next_token_ids, parent_ids). The length of returned list is + same as the length of selected_seq_groups. If the corresponding + seq_group has do_sample=False, tuple contains ([], []) + """ + # We sample 2 * beam_width candidates to make sure that with high + # probability we can get `beam_width` candidates in addition to + # the finished sequences for the next iteration. See + # https://github.com/tensorflow/tensor2tensor/blob/bafdc1b67730430d38d6ab802cbd51f9d053ba2e/tensor2tensor/utils/beam_search.py#L557-L563 + # for details. See also HF reference: + # https://github.com/huggingface/transformers/blob/a4dd53d88e4852f023332d284ff07a01afcd5681/src/transformers/generation/utils.py#L3063-L3065 + # + # NOTE: Beam search is not vectorized, so its speed can be slower than + # other sampling methods. + sample_idx = 0 + results: SampleResultType = [] + for seq_group in selected_seq_groups: + if not seq_group.do_sample: + results.append(([], [])) + continue + + is_prompt = seq_group.is_prompt + seq_ids, sampling_params = seq_group.seq_ids, seq_group.sampling_params + num_parent_seqs = len(seq_ids) + beam_width = sampling_params.best_of + seq_group_logprobs = logprobs[sample_idx:sample_idx + num_parent_seqs] + if is_prompt: + # Prompt phase. + assert num_parent_seqs == 1, ( + "Prompt input should have only one seq.") + parent_ids = [0] * (2 * beam_width) + _, next_token_ids = torch.topk(seq_group_logprobs[0], + 2 * beam_width) + next_token_ids = next_token_ids.tolist() + else: + # Generation phase. + cumulative_logprobs: List[int] = [ + seq_group.seq_data[seq_id].cumulative_logprob + for seq_id in seq_ids + ] + cumulative_logprobs_tensor = torch.tensor( + cumulative_logprobs, + dtype=torch.float, + device=seq_group_logprobs.device) + seq_group_logprobs = (seq_group_logprobs + + cumulative_logprobs_tensor.unsqueeze(dim=1)) + _, topk_ids = torch.topk(seq_group_logprobs.flatten(), + 2 * beam_width) + topk_ids = topk_ids.tolist() + vocab_size = seq_group_logprobs.size(-1) + parent_ids = [i // vocab_size for i in topk_ids] + next_token_ids = [i % vocab_size for i in topk_ids] + results.append((next_token_ids, parent_ids)) + sample_idx += num_parent_seqs + assert sample_idx == logprobs.size(0) + return results + + +# torch.multinomial forces a GPU<->CPU sync. +# Therefore, we use an optimized implementation instead. +# Note that we always sample with replacement. +# probs will be modified in place, but this is fine, as we pass +# in a copy already. +def _multinomial( + probs: torch.Tensor, + num_samples: int, + seq_groups: Optional[List[SequenceGroupToSample]] = None, +) -> torch.Tensor: + if num_samples > 1: + # This is equivalent to torch.repeat_interleaved (which also + # forces a GPU<->CPU sync). + # This allows us to do sampling with replacement by creating + # num_samples copies of each row in the tensor, and then + # batch sampling the resulting tensor. + probs = probs[:, None, :].expand(probs.shape[0], num_samples, + probs.shape[1]).contiguous().view( + -1, probs.shape[1]) + q = torch.empty_like(probs) + if seq_groups is None: + q.exponential_() + else: + sample_idx = 0 + for seq_group in seq_groups: + seq_ids = seq_group.seq_ids + next_sample_idx = sample_idx + len(seq_ids) * num_samples + q[sample_idx:next_sample_idx].exponential_( + generator=seq_group.generator) + sample_idx = next_sample_idx + return probs.div_(q).argmax(dim=1).view(-1, num_samples) + + +def _sample_with_torch( + probs: torch.Tensor, + logprobs: torch.Tensor, + sampling_metadata: SamplingMetadata, + include_gpu_probs_tensor: bool, + modify_greedy_probs: bool, +) -> Tuple[SampleResultType, Optional[torch.Tensor]]: + categorized_seq_group_ids: Dict[SamplingType, + List[int]] = {t: [] + for t in SamplingType} + categorized_sample_indices = sampling_metadata.categorized_sample_indices + for i, seq_group in enumerate(sampling_metadata.seq_groups): + sampling_params = seq_group.sampling_params + sampling_type = sampling_params.sampling_type + categorized_seq_group_ids[sampling_type].append(i) + + sample_results_dict: Dict[int, Tuple[List[int], List[int]]] = {} + sample_metadata = {} + multinomial_samples = {} + + # Create output tensor for sampled token ids. + if include_gpu_probs_tensor: + sampled_token_ids_tensor = torch.empty(logprobs.shape[0], + 1, + dtype=torch.long, + device=logprobs.device) + else: + sampled_token_ids_tensor = None + + # Counterintiutively, having two loops here is actually faster. + # The first loop can run without waiting on GPU<->CPU sync. + for sampling_type in SamplingType: + sample_indices = categorized_sample_indices[sampling_type][:, 0] + num_tokens = len(sample_indices) + if num_tokens == 0: + continue + + seq_group_id = categorized_seq_group_ids[sampling_type] + seq_groups = [sampling_metadata.seq_groups[i] for i in seq_group_id] + sample_metadata[sampling_type] = (seq_group_id, seq_groups) + long_sample_indices = sample_indices.long() + if sampling_type == SamplingType.GREEDY: + greedy_samples = torch.argmax(logprobs[long_sample_indices], + dim=-1) + + if include_gpu_probs_tensor: + # Store sampled tokens in output tensor. + sampled_token_ids_tensor[ + long_sample_indices] = greedy_samples.unsqueeze(-1) + + if modify_greedy_probs: + # If required, modify the probabilities such that sampling from + # the modified distribution would always sample the argmax + # token id. + _modify_greedy_probs_inplace(logprobs, probs, + long_sample_indices, + greedy_samples) + + elif sampling_type in (SamplingType.RANDOM, SamplingType.RANDOM_SEED): + max_best_of_in_batch = 1 + for seq_group in seq_groups: + if seq_group.is_prompt: + sampling_params = seq_group.sampling_params + max_best_of_in_batch = max(max_best_of_in_batch, + sampling_params.best_of) + seeded_args = {} if sampling_type == SamplingType.RANDOM else { + "seq_groups": seq_groups, + } + + multinomial_samples[sampling_type] = _multinomial( + probs[long_sample_indices], max_best_of_in_batch, + **seeded_args) + + if include_gpu_probs_tensor: + # Store sampled tokens in output tensor. + sampled_token_ids_tensor[ + long_sample_indices] = multinomial_samples[sampling_type] + + elif sampling_type == SamplingType.BEAM: + beam_search_logprobs = logprobs[sample_indices] + else: + raise ValueError(f"Unsupported sampling type: {sampling_type}") + + # GPU<->CPU sync happens in the loop below. + # This also converts the sample output to Python objects. + for sampling_type in SamplingType: + if sampling_type not in sample_metadata: + continue + (seq_group_id, seq_groups) = sample_metadata[sampling_type] + if sampling_type == SamplingType.GREEDY: + sample_results = _greedy_sample(seq_groups, greedy_samples) + elif sampling_type in (SamplingType.RANDOM, SamplingType.RANDOM_SEED): + sample_results = _random_sample(seq_groups, + multinomial_samples[sampling_type]) + elif sampling_type == SamplingType.BEAM: + sample_results = _beam_search_sample(seq_groups, + beam_search_logprobs) + sample_results_dict.update(zip(seq_group_id, sample_results)) + + sample_results = [ + sample_results_dict.get(i, ([], [])) + for i in range(len(sampling_metadata.seq_groups)) + ] + return sample_results, sampled_token_ids_tensor + + +def _sample_with_triton_kernel( + probs: torch.Tensor, + logprobs: torch.Tensor, + sampling_metadata: SamplingMetadata, + sampling_tensors: SamplingTensors, +) -> SampleResultType: + categorized_seq_group_ids: Dict[SamplingType, + List[int]] = {t: [] + for t in SamplingType} + categorized_sample_indices = sampling_metadata.categorized_sample_indices + for i, seq_group in enumerate(sampling_metadata.seq_groups): + sampling_params = seq_group.sampling_params + sampling_type = sampling_params.sampling_type + categorized_seq_group_ids[sampling_type].append(i) + + sample_results_dict: Dict[int, Tuple[List[int], List[int]]] = {} + sample_metadata = {} + max_best_of_in_batch = 1 + + # Counterintiutively, having two loops here is actually faster. + # The first loop can run without waiting on GPU<->CPU sync. + for sampling_type in SamplingType: + sample_indices = categorized_sample_indices[sampling_type][:, 0] + sampled_token_indices = categorized_sample_indices[sampling_type][:, 1] + num_tokens = len(sample_indices) + if num_tokens == 0: + continue + seq_group_id = categorized_seq_group_ids[sampling_type] + seq_groups = [sampling_metadata.seq_groups[i] for i in seq_group_id] + sample_metadata[sampling_type] = (seq_group_id, seq_groups, + sample_indices, + sampled_token_indices) + if sampling_type in (SamplingType.GREEDY, SamplingType.RANDOM, + SamplingType.RANDOM_SEED): + for seq_group in seq_groups: + if seq_group.is_prompt: + sampling_params = seq_group.sampling_params + max_best_of_in_batch = max(max_best_of_in_batch, + sampling_params.best_of) + elif sampling_type == SamplingType.BEAM: + beam_search_logprobs = logprobs[sample_indices] + else: + raise ValueError(f"Unsupported sampling type: {sampling_type}") + + sampled_tokens, _, _ = sample_triton( + probs=probs, + seeds=sampling_tensors.sampling_seeds, + max_best_of=max_best_of_in_batch, + sample_indices=sampling_tensors.sample_indices, + logprobs=logprobs, + # don't save logprobs because we have logic for that below + # TODO: use this instead of the CPU-based logic below + save_logprobs=False, + ) + + # GPU<->CPU sync happens in the loop below. + + for sampling_type in SamplingType: + if sampling_type not in sample_metadata: + continue + (seq_group_id, seq_groups, sample_indices, + sampled_token_indices) = sample_metadata[sampling_type] + if sampling_type == SamplingType.GREEDY: + sample_results = _greedy_sample( + seq_groups, sampled_tokens[sampled_token_indices][:, 0]) + elif sampling_type in (SamplingType.RANDOM, SamplingType.RANDOM_SEED): + sample_results = _random_sample( + seq_groups, sampled_tokens[sampled_token_indices]) + elif sampling_type == SamplingType.BEAM: + sample_results = _beam_search_sample(seq_groups, + beam_search_logprobs) + sample_results_dict.update(zip(seq_group_id, sample_results)) + + sample_results = [ + sample_results_dict.get(i, ([], [])) + for i in range(len(sampling_metadata.seq_groups)) + ] + return sample_results + + +def _sample( + probs: torch.Tensor, logprobs: torch.Tensor, + sampling_metadata: SamplingMetadata, sampling_tensors: SamplingTensors, + include_gpu_probs_tensor: bool, modify_greedy_probs: bool +) -> Tuple[SampleResultType, Optional[torch.Tensor]]: + """ + Args: + probs: (num_query_tokens_in_batch, num_vocab) + logprobs: (num_query_tokens_in_batch, num_vocab) + sampling_metadata: The metadata for a batch for sampling. + sampling_tensors: Tensors that include sampling related metadata. + + Returns: + (next_token_ids, parent_seq_ids) for each seq group in a batch. + If sampling is skipped, it returns ([], []) + sampled_token_ids_tensor: A tensor of sampled token ids. + """ + return _sample_with_torch( + probs, + logprobs, + sampling_metadata, + include_gpu_probs_tensor=include_gpu_probs_tensor, + modify_greedy_probs=modify_greedy_probs, + ) + + # TODO: Enable once Triton kernel & associated code is faster. + # return _sample_with_triton_kernel(probs, logprobs, sampling_metadata, + # sampling_tensors) + + +def _get_ranks(x: torch.Tensor, indices: torch.Tensor) -> torch.Tensor: + """ + This function calculates the ranks of the chosen tokens in a logprob tensor. + + Args: + x (torch.Tensor): 2D logprob tensor of shape (N, M) + where N is the no. of tokens and M is the vocab dim. + indices (torch.Tensor): List of chosen token indices. + + Returns: + torch.Tensor: 1D tensor of shape (N,) where N is the no. of tokens. + Each element in the returned tensor represents the rank + of the chosen token in the input logprob tensor. + """ + vals = x[torch.arange(0, len(x), device=x.device, dtype=indices.dtype), + indices] + return (x > vals[:, None]).long().sum(1).add_(1) + + +def _get_logprobs( + logprobs: torch.Tensor, + sampling_metadata: SamplingMetadata, + sample_results: SampleResultType, +) -> Tuple[List[Optional[PromptLogprobs]], List[SampleLogprobs]]: + """Return sample lobprobs and prompt logprobs. + + The logic consists of 3 parts. + - Select indices to compute logprob from, ranks of token ids, and + the top k token ids from logprobs. + - Compute prompt logprobs if required. + - Compute sample logprobs if required. + + Args: + logprobs: (num_query_tokens_across_batch, num_vocab). Each query token's + logprob per vocab. Sequence groups' query tokens are batched in a + single flattened tensor. For example, assuming there are N + seq groups, it is sorted by prefill tokens for seq_group_1 (if + prompt logprob is enabled), decode tokens for seq_group_1 (if + sampling is required), prefill tokens for seq_group_2, ... + sampling_metadata: The sampling metadata. + sample_results: (num_seq_groups) The tuple of (next_token_ids, + parent_ids) for each sequence group. When beam search is enabled, + sample_results can contain different number of seq_ids from + sampling_metadata.seq_groups. It is because beam search creates + 2 * BEAM_WIDTH number of samples (whereas there are only up to + BEAM_WIDTH number of seq_ids). + + Returns: + A tuple of prompt and sample logprobs per sequence group in a batch. + """ + # The index of query token to calculate logprobs. It includes both + # prompt and sample logprob indices. + query_indices: List[int] = [] + # The next token ids to get the logprob value from. + next_token_ids: List[int] = [] + # The largest requested number of logprobs. We find logprobs as many as the + # largest num logprobs in this API. + largest_num_logprobs = 1 + + # Select indices to compute logprob from, ranks of token ids, and the top + # k token ids from logprobs. + for (seq_group, sample_result) in zip(sampling_metadata.seq_groups, + sample_results): + sampling_params = seq_group.sampling_params + + # Update indices and tokens for prompt logprobs. + if (seq_group.is_prompt + and sampling_params.prompt_logprobs is not None): + largest_num_logprobs = max(largest_num_logprobs, + sampling_params.prompt_logprobs) + next_prompt_tokens = _get_next_prompt_tokens(seq_group) + query_indices.extend(seq_group.prompt_logprob_indices) + next_token_ids.extend(next_prompt_tokens) + + # Update indices and next tokenes for sample logprob. + if seq_group.do_sample: + token_ids, parent_seq_ids = sample_result + # NOTE: We cannot directly use sample_indices because + # sample_indices only contain parent seq_ids of a previous step. + # The current step may have different number of seq_ids, and + # we can obtain it from `sample_result[1]`. + query_idx = seq_group.sample_indices[0] + query_indices.extend( + [query_idx + parent_id for parent_id in parent_seq_ids]) + next_token_ids.extend(token_ids) + + if sampling_params.logprobs is not None: + largest_num_logprobs = max(largest_num_logprobs, + sampling_params.logprobs) + + assert len(next_token_ids) == len(query_indices) + + if len(query_indices) == 0: + empty_sampled_logprob: SampleLogprobs = [] + empty_prompt_logprob: Optional[PromptLogprobs] = None + return [empty_prompt_logprob], [empty_sampled_logprob] + + query_indices_gpu = torch.tensor(query_indices, device=logprobs.device) + next_token_ids_gpu = torch.tensor(next_token_ids, device=logprobs.device) + + # (num_selected_query_tokens, num_logprobs). Note that query_indices can + # contain duplicates if beam search is enabled. + selected_logprobs = logprobs[[ + query_indices_gpu, + next_token_ids_gpu, + ]] + ranks = _get_ranks( + logprobs[query_indices_gpu], + next_token_ids_gpu, + ) + assert selected_logprobs.shape[0] == ranks.shape[0] + + # Logprobs of topk tokens for a batch of sequence groups. + # (num_query_tokens_across_batch). + if largest_num_logprobs > 0: + top_logprobs, top_token_ids = torch.topk(logprobs, + largest_num_logprobs, + dim=-1) + top_logprobs = top_logprobs.cpu() + top_token_ids = top_token_ids.cpu() + else: + top_logprobs, top_token_ids = None, None + + selected_logprobs = selected_logprobs.cpu() + ranks = ranks.cpu() + + # Find prompt/sample logprobs. + prompt_logprobs_per_seq_group: List[Optional[PromptLogprobs]] = [] + sample_logprobs_per_seq_group: List[SampleLogprobs] = [] + top_logprob_idx = 0 + selected_logprobs_idx = 0 + + for seq_group, sample_result in zip(sampling_metadata.seq_groups, + sample_results): + (prompt_logprobs, top_logprob_idx, + selected_logprobs_idx) = _get_prompt_logprob_if_needed( + seq_group, selected_logprobs, ranks, top_token_ids, top_logprobs, + selected_logprobs_idx, top_logprob_idx) + prompt_logprobs_per_seq_group.append(prompt_logprobs) + + (sampled_logprobs, top_logprob_idx, + selected_logprobs_idx) = _get_sampled_logprob_if_needed( + seq_group, sample_result, selected_logprobs, ranks, top_token_ids, + top_logprobs, selected_logprobs_idx, top_logprob_idx) + sample_logprobs_per_seq_group.append(sampled_logprobs) + + return prompt_logprobs_per_seq_group, sample_logprobs_per_seq_group + + +def _get_prompt_logprob_if_needed( + seq_group: SequenceGroupToSample, + selected_logprobs: torch.Tensor, + ranks: torch.Tensor, + top_token_ids: torch.Tensor, + top_logprobs: torch.Tensor, + selected_logprobs_idx: int, + top_logprob_idx: int, +): + """Compute the prompt logprob from a sequence group if needed.""" + sampling_params = seq_group.sampling_params + is_prompt = seq_group.is_prompt + + # Find prompt logprobs + prompt_logprobs: Optional[PromptLogprobs] = None + if (is_prompt and sampling_params.prompt_logprobs is not None): + prompt_logprobs = [] + num_logprobs = sampling_params.prompt_logprobs + next_prompt_tokens = _get_next_prompt_tokens(seq_group) + for token_id in next_prompt_tokens: + # Calculate the prompt logprob of the real prompt tokens. + # Use tuple here for performance (to use to_list()). + # {token_id: (logprob, rank_from_vocab)} + prompt_logprobs_dict: Dict[int, Tuple[float, int]] = { + token_id: (selected_logprobs[selected_logprobs_idx].item(), + ranks[selected_logprobs_idx].item()) + } + + # Add top K prompt logprobs along with its rank. + if num_logprobs > 0: + prompt_logprobs_dict.update( + zip( + top_token_ids[top_logprob_idx, :num_logprobs].tolist(), + zip( + top_logprobs[ + top_logprob_idx, :num_logprobs].tolist(), + # This is ranks. Since top_logprob is sorted, + # we can just use a range here. + range(1, num_logprobs + 1)))) + prompt_logprobs.append({ + token_id: Logprob(*logprob_and_rank) + for token_id, logprob_and_rank in prompt_logprobs_dict.items() + }) + # + 1 to go to the next prompt token. + top_logprob_idx += 1 + selected_logprobs_idx += 1 + return prompt_logprobs, top_logprob_idx, selected_logprobs_idx + + +def _get_sampled_logprob_if_needed( + seq_group: SequenceGroupToSample, + sample_result: Tuple[List[int], List[int]], + selected_logprobs: torch.Tensor, + ranks: torch.Tensor, + top_token_ids: torch.Tensor, + top_logprobs: torch.Tensor, + selected_logprobs_idx: int, + top_logprob_idx: int, +): + """Compute the sample logprob if needed.""" + seq_ids = seq_group.seq_ids + num_logprobs = seq_group.sampling_params.logprobs + if num_logprobs is None: + num_logprobs = 0 + sampled_logprobs: SampleLogprobs = [] + next_token_ids, parent_seq_ids = sample_result + + if seq_group.do_sample: + assert len(next_token_ids) > 0 + for (next_token_id, parent_id) in zip(next_token_ids, parent_seq_ids): + # Calculate the sample logprob of the real sampled tokens. + # Use tuple here for performance (to use to_list()). + # token_id: (logprob, rank_from_vocab) + sampled_logprobs_dict: Dict[int, Tuple[float, int]] = { + next_token_id: + (selected_logprobs[selected_logprobs_idx].item(), + ranks[selected_logprobs_idx].item()) + } + # +1 to go to the next sampled token. Note that + # selected_logprobs can contain duplicates unlike top_logprobs + # when beam search is enabled. + selected_logprobs_idx += 1 + + # Second, add top K logprobs along with its rank. + if num_logprobs >= 0: + sampled_logprobs_dict.update( + zip( + top_token_ids[top_logprob_idx + + parent_id, :num_logprobs].tolist(), + zip( + top_logprobs[top_logprob_idx + + parent_id, :num_logprobs].tolist(), + # This is rank. Since top_logprob is sorted, we + # can just use a range here. + range(1, num_logprobs + 1)))) + sampled_logprobs.append({ + token_id: Logprob(*logprob_and_rank) + for token_id, logprob_and_rank in + sampled_logprobs_dict.items() + }) + # There are len(seq_ids) number of sampled tokens for the current + # sequence group in top_logprobs. Jump to the next seq_group. + top_logprob_idx += len(seq_ids) + return sampled_logprobs, top_logprob_idx, selected_logprobs_idx + + +def _modify_greedy_probs_inplace(logprobs: torch.Tensor, probs: torch.Tensor, + sample_indices: torch.Tensor, + greedy_samples: torch.Tensor) -> None: + """Modify the probability distributions of the greedily-sampled tokens such + that each sampled token has a "probability" of 1.0. This is required by + speculative decoding, which depends on the sampling method being encoded + within the probability distribution for correctness. + + # Why do we only need to do this for greedy sampling? + + vLLM's sampler performs the following steps for greedy or multinomial + (random) sampling: + 1. Get logits from model. + 2. Modify logits according to per-sequence sampling parameters. + - Multiply by temperature, top-k and top-p masking, penalize tokens + according to their frequency, etc. + 3. Sample a token. + - Random sampling simply samples from the modified probability + distribution. + - Greedy sampling performs `argmax` to obtain the token with the + highest likelihood. + + Ignoring greedy sampling for a moment, we find that the computed probability + distribution has the following property: we can sample from it independently + and find that the token sampled by the Sampler has a frequency corresponding + to how often we see it in our sampling. In other words, for tokens sampled + with vLLM's random SamplingType, the computed probability distribution + encodes the sampling methodology completely. + + Greedy sampling does not normally have this property. vLLM modifies logits + according to sampling params, then performs `argmax`, then returns the + sampled token and the computed probability distribution. If we sample from + the distribution, we'll find the likelihood of the greedily-sampled token + is not always 1.0. + + Since lossless speculative decoding requires that the sampling methodology + be encoded within the probability distribution, we are motivated to modify + the probability distribution such that the sampled token has probability 1 + when speculative decoding is used. + + NOTE: Alternatively, we could use an extremely low temperature to achieve + greedy sampling using multinomial computation and unite the codepaths. This + has implications on the overall design of the sampler, e.g. how to record + accurate logprobs for the user, so this improvement is deferred to later. + """ + # NOTE: logprobs are not modified so they can be returned to the user. + probs[sample_indices, :] = 0 + probs[sample_indices, greedy_samples] = 1.0 + + +def _build_sampler_output( + sample_results: SampleResultType, + sampling_metadata: SamplingMetadata, + prompt_logprobs: List[Optional[PromptLogprobs]], + sample_logprobs: List[SampleLogprobs], + on_device_tensors: Optional[Tuple[torch.Tensor, torch.Tensor, + torch.Tensor]], +) -> SamplerOutput: + """Construct Python objects with the output of sampling. + + Args: + on_device_tensors: Tuple containing on-device tensors with the + probabilities used in sampling and the sampled token ids. This + allows post-processing without copies to CPU/serialization, e.g. in + speculative decoding rejection sampling. + """ + + sampler_output = [] + for (seq_group, sample_result, group_prompt_logprobs, + group_sample_logprobs) in zip(sampling_metadata.seq_groups, + sample_results, prompt_logprobs, + sample_logprobs): + seq_ids = seq_group.seq_ids + next_token_ids, parent_ids = sample_result + seq_outputs = [] + for parent_id, next_token_id, logprobs in zip(parent_ids, + next_token_ids, + group_sample_logprobs): + seq_outputs.append( + SequenceOutput(seq_ids[parent_id], next_token_id, logprobs)) + sampler_output.append( + SequenceGroupOutput(seq_outputs, group_prompt_logprobs)) + + # If not specified, store None values in SamplerOutput. + if on_device_tensors is not None: + (sampled_token_probs, logprobs_tensor, + sampled_token_ids) = on_device_tensors + else: + sampled_token_probs, logprobs_tensor, sampled_token_ids = (None, None, + None) + + return SamplerOutput( + outputs=sampler_output, + sampled_token_probs=sampled_token_probs, + sampled_token_ids=sampled_token_ids, + logprobs=logprobs_tensor, + ) + + +def _get_next_prompt_tokens(seq_group: SequenceGroupToSample) -> List[int]: + """Get a list of next prompt tokens to compute logprob from a + given sequence group. + + It is used to compute prompt logprob. Imagine you have logprob for each + query token. Query token needs to know the next prompt token id to compute + prompt logprob. This is a helper to obtain next prompt token ids. + + This API has to be used only when the caller knows seq_group is in prefill + stage. + + Returns: + A list of next prompt tokens to compute logprob. + """ + assert seq_group.is_prompt, ( + "Caller should ensure the sequence group is in a prefill stage.") + seq_ids = seq_group.seq_ids + query_len = seq_group.query_len + assert query_len is not None + # prompt has only 1 seq id. + assert len(seq_ids) == 1 + seq_data = seq_group.seq_data[seq_ids[0]] + computed_len = seq_data.get_num_computed_tokens() + prompt_tokens = seq_data.prompt_token_ids + # +1 because we are looking for a next prompt token. + next_token_index_start = computed_len + 1 + next_token_index_end = min(computed_len + query_len + 1, + len(prompt_tokens)) + next_prompt_tokens = prompt_tokens[ + next_token_index_start:next_token_index_end] + return next_prompt_tokens diff --git a/vllm/model_executor/layers/vocab_parallel_embedding.py b/vllm/model_executor/layers/vocab_parallel_embedding.py new file mode 100644 index 0000000..4585b16 --- /dev/null +++ b/vllm/model_executor/layers/vocab_parallel_embedding.py @@ -0,0 +1,155 @@ +from typing import Optional, Sequence + +import torch +import torch.nn.functional as F +from torch.nn.parameter import Parameter + +from vllm.distributed import (divide, get_tensor_model_parallel_rank, + get_tensor_model_parallel_world_size, + tensor_model_parallel_all_reduce) +from vllm.model_executor.utils import set_weight_attrs + +DEFAULT_VOCAB_PADDING_SIZE = 64 + + +def pad_vocab_size(vocab_size: int, + pad_to: int = DEFAULT_VOCAB_PADDING_SIZE) -> int: + """Pad the vocab size to the given value.""" + return ((vocab_size + pad_to - 1) // pad_to) * pad_to + + +def vocab_range_from_per_partition_vocab_size(per_partition_vocab_size: int, + rank: int) -> Sequence[int]: + index_f = rank * per_partition_vocab_size + index_l = index_f + per_partition_vocab_size + return index_f, index_l + + +def vocab_range_from_global_vocab_size(global_vocab_size: int, rank: int, + world_size: int) -> Sequence[int]: + per_partition_vocab_size = divide(global_vocab_size, world_size) + return vocab_range_from_per_partition_vocab_size(per_partition_vocab_size, + rank) + + +class VocabParallelEmbedding(torch.nn.Module): + """Embedding parallelized in the vocabulary dimension. + + Adapted from torch.nn.Embedding, note that we pad the vocabulary size to + make sure it is divisible by the number of model parallel GPUs. + + Args: + num_embeddings: vocabulary size. + embedding_dim: size of hidden state. + params_dtype: type of the parameters. + org_num_embeddings: original vocabulary size (without LoRA). + padding_size: padding size for the vocabulary. + """ + + def __init__(self, + num_embeddings: int, + embedding_dim: int, + params_dtype: Optional[torch.dtype] = None, + org_num_embeddings: Optional[int] = None, + padding_size: int = DEFAULT_VOCAB_PADDING_SIZE): + super().__init__() + + # Keep the input dimensions. + self.num_embeddings = num_embeddings + self.org_vocab_size = org_num_embeddings or num_embeddings + self.num_embeddings_padded = pad_vocab_size(num_embeddings, + padding_size) + self.embedding_dim = embedding_dim + if params_dtype is None: + params_dtype = torch.get_default_dtype() + self.tp_size = get_tensor_model_parallel_world_size() + # Divide the weight matrix along the vocaburaly dimension. + self.vocab_start_index, self.vocab_end_index = ( + vocab_range_from_global_vocab_size( + self.num_embeddings_padded, get_tensor_model_parallel_rank(), + self.tp_size)) + self.num_embeddings_per_partition = (self.vocab_end_index - + self.vocab_start_index) + self.weight = Parameter( + torch.empty(self.num_embeddings_per_partition, + self.embedding_dim, + dtype=params_dtype)) + set_weight_attrs(self.weight, { + "parallel_dim": 0, + "weight_loader": self.weight_loader + }) + + def weight_loader(self, param: Parameter, loaded_weight: torch.Tensor): + parallel_dim = param.parallel_dim + assert loaded_weight.shape[parallel_dim] == self.org_vocab_size + loaded_weight = loaded_weight[self.vocab_start_index:self. + vocab_end_index] + param[:loaded_weight.shape[0]].data.copy_(loaded_weight) + + def forward(self, input_): + if self.tp_size > 1: + # Build the mask. + input_mask = ((input_ < self.vocab_start_index) | + (input_ >= self.vocab_end_index)) + # Mask the input. + masked_input = input_.clone() - self.vocab_start_index + masked_input[input_mask] = 0 + else: + masked_input = input_ + # Get the embeddings. + output_parallel = F.embedding(masked_input, self.weight) + # Mask the output embedding. + if self.tp_size > 1: + output_parallel[input_mask, :] = 0.0 + # Reduce across all the model parallel GPUs. + output = tensor_model_parallel_all_reduce(output_parallel) + return output + + def extra_repr(self) -> str: + s = f"num_embeddings={self.num_embeddings_per_partition}" + s += f", embedding_dim={self.embedding_dim}" + s += f", org_vocab_size={self.org_vocab_size}" + s += f', num_embeddings_padded={self.num_embeddings_padded}' + s += f', tp_size={self.tp_size}' + return s + + +class ParallelLMHead(VocabParallelEmbedding): + """Parallelized LM head. + + Output logits weight matrices used in the Sampler. The weight and bias + tensors are padded to make sure they are divisible by the number of + model parallel GPUs. + + Args: + num_embeddings: vocabulary size. + embedding_dim: size of hidden state. + bias: whether to use bias. + params_dtype: type of the parameters. + org_num_embeddings: original vocabulary size (without LoRA). + padding_size: padding size for the vocabulary. + """ + + def __init__(self, + num_embeddings: int, + embedding_dim: int, + bias: bool = False, + params_dtype: Optional[torch.dtype] = None, + org_num_embeddings: Optional[int] = None, + padding_size: int = DEFAULT_VOCAB_PADDING_SIZE): + super().__init__(num_embeddings, embedding_dim, params_dtype, + org_num_embeddings, padding_size) + if bias: + self.bias = Parameter( + torch.empty(self.num_embeddings_per_partition, + dtype=params_dtype)) + set_weight_attrs(self.bias, { + "parallel_dim": 0, + "weight_loader": self.weight_loader + }) + else: + self.register_parameter("bias", None) + + def forward(self, input_): + del input_ + raise RuntimeError("LMHead's weights should be used in the sampler.") diff --git a/vllm/model_executor/model_loader/__init__.py b/vllm/model_executor/model_loader/__init__.py new file mode 100644 index 0000000..6f90e49 --- /dev/null +++ b/vllm/model_executor/model_loader/__init__.py @@ -0,0 +1,30 @@ +from typing import Optional + +from torch import nn + +from vllm.config import (DeviceConfig, LoadConfig, LoRAConfig, ModelConfig, + ParallelConfig, SchedulerConfig, VisionLanguageConfig) +from vllm.model_executor.model_loader.loader import (BaseModelLoader, + get_model_loader) +from vllm.model_executor.model_loader.utils import ( + get_architecture_class_name, get_model_architecture) + + +def get_model( + *, model_config: ModelConfig, load_config: LoadConfig, + device_config: DeviceConfig, parallel_config: ParallelConfig, + scheduler_config: SchedulerConfig, lora_config: Optional[LoRAConfig], + vision_language_config: Optional[VisionLanguageConfig]) -> nn.Module: + loader = get_model_loader(load_config) + return loader.load_model(model_config=model_config, + device_config=device_config, + lora_config=lora_config, + vision_language_config=vision_language_config, + parallel_config=parallel_config, + scheduler_config=scheduler_config) + + +__all__ = [ + "get_model", "get_model_loader", "BaseModelLoader", + "get_architecture_class_name", "get_model_architecture" +] diff --git a/vllm/model_executor/model_loader/loader.py b/vllm/model_executor/model_loader/loader.py new file mode 100644 index 0000000..bafa2de --- /dev/null +++ b/vllm/model_executor/model_loader/loader.py @@ -0,0 +1,362 @@ +# ruff: noqa: SIM117 +import copy +import glob +import os +from abc import ABC, abstractmethod +from typing import Any, Dict, Generator, List, Optional, Tuple, Type + +import huggingface_hub +import torch +from torch import nn + +from vllm.config import (DeviceConfig, LoadConfig, LoadFormat, LoRAConfig, + ModelConfig, ParallelConfig, SchedulerConfig, + VisionLanguageConfig) +from vllm.envs import VLLM_USE_MODELSCOPE +from vllm.logger import init_logger +from vllm.model_executor.layers.quantization.base_config import ( + QuantizationConfig) +from vllm.model_executor.model_loader.tensorizer import ( + TensorizerConfig, is_vllm_serialized_tensorizer, load_with_tensorizer, + tensorizer_weights_iterator) +from vllm.model_executor.model_loader.utils import (get_model_architecture, + set_default_torch_dtype) +from vllm.model_executor.model_loader.weight_utils import ( + download_weights_from_hf, filter_files_not_needed_for_inference, + get_quant_config, initialize_dummy_weights, np_cache_weights_iterator, + pt_weights_iterator, safetensors_weights_iterator) +from vllm.model_executor.models.llava import LlavaForConditionalGeneration + +_VISION_MODEL_CLASSES = [ + LlavaForConditionalGeneration, +] + +logger = init_logger(__name__) + + +def _get_quantization_config( + model_config: ModelConfig, + load_config: LoadConfig) -> Optional[QuantizationConfig]: + """Get the quantization config.""" + if model_config.quantization is not None: + quant_config = get_quant_config(model_config, load_config) + capability = torch.cuda.get_device_capability() + capability = capability[0] * 10 + capability[1] + if capability < quant_config.get_min_capability(): + raise ValueError( + f"The quantization method {model_config.quantization} is not " + "supported for the current GPU. " + f"Minimum capability: {quant_config.get_min_capability()}. " + f"Current capability: {capability}.") + supported_dtypes = quant_config.get_supported_act_dtypes() + if model_config.dtype not in supported_dtypes: + raise ValueError( + f"{model_config.dtype} is not supported for quantization " + f"method {model_config.quantization}. Supported dtypes: " + f"{supported_dtypes}") + return quant_config + return None + + +def _get_model_initialization_kwargs( + model_class: Type[nn.Module], lora_config: Optional[LoRAConfig], + vision_language_config: Optional[VisionLanguageConfig] +) -> Dict[str, Any]: + """Get extra kwargs for model initialization.""" + extra_kwargs = {} + if hasattr(model_class, "supported_lora_modules"): + extra_kwargs["lora_config"] = lora_config + elif lora_config: + raise ValueError( + f"Model {model_class.__name__} does not support LoRA, " + "but LoRA is enabled. Support for this model may " + "be added in the future. If this is important to you, " + "please open an issue on github.") + elif model_class in _VISION_MODEL_CLASSES: + extra_kwargs["vision_language_config"] = vision_language_config + return extra_kwargs + + +def _initialize_model( + model_config: ModelConfig, load_config: LoadConfig, + lora_config: Optional[LoRAConfig], + vision_language_config: Optional[VisionLanguageConfig]) -> nn.Module: + """Initialize a model with the given configurations.""" + model_class = get_model_architecture(model_config)[0] + quant_config = _get_quantization_config(model_config, load_config) + + return model_class(config=model_config.hf_config, + quant_config=quant_config, + **_get_model_initialization_kwargs( + model_class, lora_config, vision_language_config)) + + +class BaseModelLoader(ABC): + """Base class for model loaders.""" + + def __init__(self, load_config: LoadConfig): + self.load_config = load_config + + @abstractmethod + def load_model(self, *, model_config: ModelConfig, + device_config: DeviceConfig, + lora_config: Optional[LoRAConfig], + vision_language_config: Optional[VisionLanguageConfig], + parallel_config: ParallelConfig, + scheduler_config: SchedulerConfig) -> nn.Module: + """Load a model with the given configurations.""" + ... + + +class DefaultModelLoader(BaseModelLoader): + """Model loader that can load different file types from disk.""" + + def __init__(self, load_config: LoadConfig): + super().__init__(load_config) + if load_config.model_loader_extra_config: + raise ValueError(f"Model loader extra config is not supported for " + f"load format {load_config.load_format}") + + def _maybe_download_from_modelscope( + self, model: str, revision: Optional[str]) -> Optional[str]: + """Download model from ModelScope hub if VLLM_USE_MODELSCOPE is True. + + Returns the path to the downloaded model, or None if the model is not + downloaded from ModelScope.""" + if VLLM_USE_MODELSCOPE: + # download model from ModelScope hub, + # lazy import so that modelscope is not required for normal use. + # pylint: disable=C. + from modelscope.hub.snapshot_download import snapshot_download + + if not os.path.exists(model): + model_path = snapshot_download( + model_id=model, + cache_dir=self.load_config.download_dir, + local_files_only=huggingface_hub.constants.HF_HUB_OFFLINE, + revision=revision, + ) + else: + model_path = model + return model_path + return None + + def _prepare_weights(self, model_name_or_path: str, + revision: Optional[str], + fall_back_to_pt: bool) -> Tuple[str, List[str], bool]: + """Prepare weights for the model. + + If the model is not local, it will be downloaded.""" + model_name_or_path = self._maybe_download_from_modelscope( + model_name_or_path, revision) or model_name_or_path + + is_local = os.path.isdir(model_name_or_path) + load_format = self.load_config.load_format + use_safetensors = False + # Some quantized models use .pt files for storing the weights. + if load_format == LoadFormat.AUTO: + allow_patterns = ["*.safetensors", "*.bin"] + elif load_format == LoadFormat.SAFETENSORS: + use_safetensors = True + allow_patterns = ["*.safetensors"] + elif load_format == LoadFormat.PT: + allow_patterns = ["*.pt"] + elif load_format == LoadFormat.NPCACHE: + allow_patterns = ["*.bin"] + else: + raise ValueError(f"Unknown load_format: {load_format}") + + if fall_back_to_pt: + allow_patterns += ["*.pt"] + + if not is_local: + hf_folder = download_weights_from_hf(model_name_or_path, + self.load_config.download_dir, + allow_patterns, revision) + else: + hf_folder = model_name_or_path + + hf_weights_files: List[str] = [] + for pattern in allow_patterns: + hf_weights_files += glob.glob(os.path.join(hf_folder, pattern)) + if len(hf_weights_files) > 0: + if pattern == "*.safetensors": + use_safetensors = True + break + + if not use_safetensors: + hf_weights_files = filter_files_not_needed_for_inference( + hf_weights_files) + + if len(hf_weights_files) == 0: + raise RuntimeError( + f"Cannot find any model weights with `{model_name_or_path}`") + + return hf_folder, hf_weights_files, use_safetensors + + def _get_weights_iterator( + self, model_name_or_path: str, revision: Optional[str], + fall_back_to_pt: bool + ) -> Generator[Tuple[str, torch.Tensor], None, None]: + """Get an iterator for the model weights based on the load format.""" + hf_folder, hf_weights_files, use_safetensors = self._prepare_weights( + model_name_or_path, revision, fall_back_to_pt) + if self.load_config.load_format == LoadFormat.NPCACHE: + # Currently np_cache only support *.bin checkpoints + assert use_safetensors is False + return np_cache_weights_iterator(model_name_or_path, + self.load_config.download_dir, + hf_folder, hf_weights_files) + if use_safetensors: + return safetensors_weights_iterator(hf_weights_files) + return pt_weights_iterator(hf_weights_files) + + def load_model(self, *, model_config: ModelConfig, + device_config: DeviceConfig, + lora_config: Optional[LoRAConfig], + vision_language_config: Optional[VisionLanguageConfig], + parallel_config: ParallelConfig, + scheduler_config: SchedulerConfig) -> nn.Module: + with set_default_torch_dtype(model_config.dtype): + with torch.device(device_config.device): + model = _initialize_model(model_config, self.load_config, + lora_config, vision_language_config) + model.load_weights( + self._get_weights_iterator(model_config.model, + model_config.revision, + fall_back_to_pt=getattr( + model, + "fall_back_to_pt_during_load", + True)), ) + for _, module in model.named_modules(): + quant_method = getattr(module, "quant_method", None) + if quant_method is not None: + quant_method.process_weights_after_loading(module) + # FIXME: Remove this after Mixtral is updated + # to use quant_method. + if hasattr(module, "process_weights_after_loading"): + module.process_weights_after_loading() + return model.eval() + + +class DummyModelLoader(BaseModelLoader): + """Model loader that will set model weights to random values.""" + + def __init__(self, load_config: LoadConfig): + super().__init__(load_config) + if load_config.model_loader_extra_config: + raise ValueError(f"Model loader extra config is not supported for " + f"load format {load_config.load_format}") + + def load_model(self, *, model_config: ModelConfig, + device_config: DeviceConfig, + lora_config: Optional[LoRAConfig], + vision_language_config: Optional[VisionLanguageConfig], + parallel_config: ParallelConfig, + scheduler_config: SchedulerConfig) -> nn.Module: + with set_default_torch_dtype(model_config.dtype): + with torch.device(device_config.device): + model = _initialize_model(model_config, self.load_config, + lora_config, vision_language_config) + # NOTE(woosuk): For accurate performance evaluation, we assign + # random values to the weights. + initialize_dummy_weights(model) + return model.eval() + + +class TensorizerLoader(BaseModelLoader): + """Model loader using CoreWeave's tensorizer library.""" + + def __init__(self, load_config: LoadConfig): + super().__init__(load_config) + if isinstance(load_config.model_loader_extra_config, TensorizerConfig): + self.tensorizer_config = load_config.model_loader_extra_config + else: + self.tensorizer_config = TensorizerConfig( + **load_config.model_loader_extra_config) + + def _verify_config(self, model_config: ModelConfig, + parallel_config: ParallelConfig): + self.tensorizer_config.verify_with_model_config(model_config) + self.tensorizer_config.verify_with_parallel_config(parallel_config) + + def _get_weights_iterator( + self) -> Generator[Tuple[str, torch.Tensor], None, None]: + tensorizer_args = self.tensorizer_config._construct_tensorizer_args() + return tensorizer_weights_iterator(tensorizer_args) + + def _load_model_unserialized( + self, model_config: ModelConfig, device_config: DeviceConfig, + lora_config: Optional[LoRAConfig], + vision_language_config: Optional[VisionLanguageConfig] + ) -> nn.Module: + """Load an unserialized model with tensorizer. + + Unserialized here means "not serialized with tensorizer". This + should still be faster than default HuggingFace loading, but will + be slower than loading a tensorizer-serialized model. + """ + with set_default_torch_dtype(model_config.dtype): + with torch.device(device_config.device): + model = _initialize_model(model_config, self.load_config, + lora_config, vision_language_config) + + model.load_weights(self._get_weights_iterator()) + return model.eval() + + def _load_model_serialized( + self, model_config: ModelConfig, device_config: DeviceConfig, + lora_config: Optional[LoRAConfig], + vision_language_config: Optional[VisionLanguageConfig] + ) -> nn.Module: + """Load a serialized model with tensorizer. + + See the examples/tensorize_vllm_model.py example " + script for serializing vLLM models.""" + with set_default_torch_dtype(model_config.dtype): + with torch.device(device_config.device): + model_class = get_model_architecture(model_config)[0] + quant_config = _get_quantization_config( + model_config, self.load_config) + extra_kwargs = _get_model_initialization_kwargs( + model_class, lora_config, vision_language_config) + extra_kwargs["quant_config"] = quant_config + + tensorizer_config = copy.copy(self.tensorizer_config) + tensorizer_config.model_class = model_class + tensorizer_config.hf_config = model_config.hf_config + tensorizer_config.dtype = model_config.dtype + + model = load_with_tensorizer(tensorizer_config, **extra_kwargs) + return model.eval() + + def load_model(self, *, model_config: ModelConfig, + device_config: DeviceConfig, + lora_config: Optional[LoRAConfig], + vision_language_config: Optional[VisionLanguageConfig], + parallel_config: ParallelConfig, + scheduler_config: SchedulerConfig) -> nn.Module: + self._verify_config(model_config, parallel_config) + + if is_vllm_serialized_tensorizer(self.tensorizer_config): + return self._load_model_serialized(model_config, device_config, + lora_config, + vision_language_config) + return self._load_model_unserialized(model_config, device_config, + lora_config, + vision_language_config) + + +def get_model_loader(load_config: LoadConfig) -> BaseModelLoader: + """Get a model loader based on the load format.""" + + if isinstance(load_config.load_format, type): + return load_config.load_format(load_config) + + if load_config.load_format == LoadFormat.DUMMY: + return DummyModelLoader(load_config) + + if load_config.load_format == LoadFormat.TENSORIZER: + return TensorizerLoader(load_config) + + return DefaultModelLoader(load_config) diff --git a/vllm/model_executor/model_loader/neuron.py b/vllm/model_executor/model_loader/neuron.py new file mode 100644 index 0000000..07e23ac --- /dev/null +++ b/vllm/model_executor/model_loader/neuron.py @@ -0,0 +1,136 @@ +"""Utilities for selecting and loading neuron models.""" +import importlib +import os +from typing import Dict, Optional, Tuple + +import torch +import torch.nn as nn +import transformers +from transformers import PretrainedConfig + +from vllm.config import ModelConfig, ParallelConfig, SchedulerConfig +from vllm.model_executor.layers.logits_processor import LogitsProcessor +from vllm.model_executor.layers.sampler import Sampler +from vllm.model_executor.sampling_metadata import SamplingMetadata +from vllm.sequence import SamplerOutput + +TORCH_DTYPE_TO_NEURON_AMP = { + "auto": "f32", + "half": "f16", + "float16": "f16", + "bfloat16": "bf16", + "float": "f32", + "float32": "f32", + torch.float16: "f16", + torch.bfloat16: "bf16", + torch.float32: "f32", +} + +# Models supported by Neuron. +_NEURON_SUPPORTED_MODELS: Dict[str, Tuple[str, str, str]] = { + "LlamaForCausalLM": ("transformers_neuronx.llama.model", + "LlamaForSampling", "LlamaForCausalLM"), + "MistralForCausalLM": ("transformers_neuronx.mistral.model", + "MistralForSampling", "MistralForCausalLM") +} + + +class NeuronCasualLM(nn.Module): + + def __init__( + self, + config: PretrainedConfig, + ) -> None: + super().__init__() + self.config = config + self.logits_processor = LogitsProcessor(config.vocab_size, + logits_as_input=True) + self.sampler = Sampler() + + # Lazy initialized + self.model: nn.Module + + def forward( + self, + input_ids: torch.Tensor, + positions: torch.Tensor, + input_block_ids: torch.Tensor, + ) -> torch.Tensor: + logits = self.model(input_ids, + cache_ids=positions, + start_ids=input_block_ids) + return logits + + def compute_logits(self, hidden_states: torch.Tensor, + sampling_metadata: SamplingMetadata) -> torch.Tensor: + logits = self.logits_processor(None, hidden_states, sampling_metadata) + return logits + + def sample( + self, + logits: torch.Tensor, + sampling_metadata: SamplingMetadata, + ) -> Optional[SamplerOutput]: + next_tokens = self.sampler(logits, sampling_metadata) + return next_tokens + + def load_weights(self, model_name_or_path: str, **kwargs): + arch = _get_model_architecture(self.config) + neuronx_module_path, neuronx_model_cls_name, hf_model_cls_name = ( + _NEURON_SUPPORTED_MODELS[arch]) + neuronx_module = importlib.import_module(neuronx_module_path) + neuronx_model_cls = getattr(neuronx_module, neuronx_model_cls_name) + + split_model_dir = f"{model_name_or_path}-split" + if os.path.isdir(os.path.join(model_name_or_path, + "pytorch_model.bin")): + split_model_dir = model_name_or_path + elif not os.path.exists(f"{model_name_or_path}-split"): + hf_model_cls = getattr(transformers, hf_model_cls_name) + from transformers_neuronx.module import save_pretrained_split + + hf_model = hf_model_cls.from_pretrained(model_name_or_path, + low_cpu_mem_usage=True) + save_pretrained_split(hf_model, f"{model_name_or_path}-split") + + self.model = neuronx_model_cls.from_pretrained(split_model_dir, + **kwargs) + self.model.to_neuron() + + +def _get_model_architecture(config: PretrainedConfig) -> str: + architectures = getattr(config, "architectures", []) + for arch in architectures: + if arch in _NEURON_SUPPORTED_MODELS: + return arch + raise ValueError( + f"Model architectures {architectures} are not supported on Neuron " + f"for now. Supported architectures: " + f"{list(_NEURON_SUPPORTED_MODELS.keys())}") + + +def get_neuron_model(model_config: ModelConfig, + parallel_config: ParallelConfig, + scheduler_config: SchedulerConfig) -> nn.Module: + from transformers_neuronx.config import (ContinuousBatchingConfig, + NeuronConfig) + + # Create a model instance. + model = NeuronCasualLM(model_config.hf_config) + + continuous_batching_config = ContinuousBatchingConfig( + batch_size_for_shared_caches=scheduler_config.max_num_seqs) + neuron_config = NeuronConfig( + continuous_batching=continuous_batching_config) + + # Load the weights from the cached or downloaded files. + model.load_weights( + model_config.model, + tp_degree=parallel_config.tensor_parallel_size, + amp=TORCH_DTYPE_TO_NEURON_AMP[model_config.dtype], + neuron_config=neuron_config, + context_length_estimate=[scheduler_config.max_model_len], + n_positions=[scheduler_config.max_model_len], + batch_size=scheduler_config.max_num_seqs) + + return model.eval() diff --git a/vllm/model_executor/model_loader/tensorizer.py b/vllm/model_executor/model_loader/tensorizer.py new file mode 100644 index 0000000..af433b8 --- /dev/null +++ b/vllm/model_executor/model_loader/tensorizer.py @@ -0,0 +1,368 @@ +import argparse +import dataclasses +import io +import os +import time +import typing +from dataclasses import dataclass +from typing import Generator, Optional, Tuple, Type, Union + +import torch +from torch import nn +from transformers import PretrainedConfig + +import vllm.envs as envs +from vllm.config import ModelConfig, ParallelConfig +from vllm.logger import init_logger +from vllm.model_executor.layers.quantization.base_config import ( + QuantizationConfig) +from vllm.model_executor.layers.vocab_parallel_embedding import ( + VocabParallelEmbedding) + +tensorizer_load_fail = None + +try: + from tensorizer import (DecryptionParams, EncryptionParams, + TensorDeserializer, TensorSerializer) + from tensorizer.stream_io import open_stream + from tensorizer.utils import (convert_bytes, get_mem_usage, + no_init_or_tensor) +except ImportError as e: + tensorizer_load_fail = e + +__all__ = [ + 'EncryptionParams', 'DecryptionParams', 'TensorDeserializer', + 'TensorSerializer', 'open_stream', 'convert_bytes', 'get_mem_usage', + 'no_init_or_tensor', 'TensorizerConfig' +] + +logger = init_logger(__name__) + + +@dataclass +class TensorizerConfig: + tensorizer_uri: Union[io.BufferedIOBase, io.RawIOBase, typing.BinaryIO, + str, bytes, os.PathLike, int] + vllm_tensorized: bool + verify_hash: Optional[bool] = False + num_readers: Optional[int] = None + encryption_keyfile: Optional[str] = None + s3_access_key_id: Optional[str] = None + s3_secret_access_key: Optional[str] = None + s3_endpoint: Optional[str] = None + model_class: Optional[Type[torch.nn.Module]] = None + hf_config: Optional[PretrainedConfig] = None + dtype: Optional[Union[str, torch.dtype]] = None + + def _construct_tensorizer_args(self) -> "TensorizerArgs": + tensorizer_args = { + "tensorizer_uri": self.tensorizer_uri, + "vllm_tensorized": self.vllm_tensorized, + "verify_hash": self.verify_hash, + "num_readers": self.num_readers, + "encryption_keyfile": self.encryption_keyfile, + "s3_access_key_id": self.s3_access_key_id, + "s3_secret_access_key": self.s3_secret_access_key, + "s3_endpoint": self.s3_endpoint, + } + return TensorizerArgs(**tensorizer_args) # type: ignore + + def verify_with_parallel_config( + self, + parallel_config: "ParallelConfig", + ) -> None: + if (parallel_config.tensor_parallel_size > 1 + and self.tensorizer_uri is not None): + raise ValueError( + "Loading to multiple GPUs is not currently supported with " + "vLLM-serialized models. Please set tensor_parallel_size=1." + " or use a non-vLLM-serialized model, such as a " + "serialized Hugging Face `PretrainedModel`.") + + def verify_with_model_config(self, model_config: "ModelConfig") -> None: + if (model_config.quantization is not None + and self.tensorizer_uri is not None): + logger.warning( + "Loading a model using Tensorizer with quantization on vLLM" + " is unstable and may lead to errors.") + + +def load_with_tensorizer(tensorizer_config: TensorizerConfig, + **extra_kwargs) -> nn.Module: + tensorizer = TensorizerAgent(tensorizer_config, **extra_kwargs) + return tensorizer.deserialize() + + +def is_vllm_serialized_tensorizer(tensorizer_config: TensorizerConfig) -> bool: + if tensorizer_config is None: + return False + return tensorizer_config.vllm_tensorized + + +@dataclass +class TensorizerArgs: + tensorizer_uri: Union[io.BufferedIOBase, io.RawIOBase, typing.BinaryIO, + str, bytes, os.PathLike, int] + vllm_tensorized: bool + verify_hash: Optional[bool] = False + num_readers: Optional[int] = None + encryption_keyfile: Optional[str] = None + s3_access_key_id: Optional[str] = None + s3_secret_access_key: Optional[str] = None + s3_endpoint: Optional[str] = None + """ + Args for the TensorizerAgent class. These are used to configure the behavior + of the TensorDeserializer when loading tensors from a serialized model. + + Args: + tensorizer_uri: Path to serialized model tensors. Can be a local file + path or a S3 URI. + vllm_tensorized: If True, indicates that the serialized model is a + vLLM model. This is used to determine the behavior of the + TensorDeserializer when loading tensors from a serialized model. + It is far faster to deserialize a vLLM model as it utilizes + tensorizer's optimized GPU loading. + verify_hash: If True, the hashes of each tensor will be verified against + the hashes stored in the metadata. A `HashMismatchError` will be + raised if any of the hashes do not match. + num_readers: Controls how many threads are allowed to read concurrently + from the source file. Default is `None`, which will dynamically set + the number of readers based on the number of available + resources and model size. This greatly increases performance. + encryption_keyfile: File path to a binary file containing a + binary key to use for decryption. `None` (the default) means + no decryption. See the example script in + examples/tensorize_vllm_model.py. + s3_access_key_id: The access key for the S3 bucket. Can also be set via + the S3_ACCESS_KEY_ID environment variable. + s3_secret_access_key: The secret access key for the S3 bucket. Can also + be set via the S3_SECRET_ACCESS_KEY environment variable. + s3_endpoint: The endpoint for the S3 bucket. Can also be set via the + S3_ENDPOINT_URL environment variable. + """ + + def __post_init__(self): + self.file_obj = self.tensorizer_uri + self.s3_access_key_id = self.s3_access_key_id or envs.S3_ACCESS_KEY_ID + self.s3_secret_access_key = (self.s3_secret_access_key + or envs.S3_SECRET_ACCESS_KEY) + self.s3_endpoint = self.s3_endpoint or envs.S3_ENDPOINT_URL + self.stream_params = { + "s3_access_key_id": self.s3_access_key_id, + "s3_secret_access_key": self.s3_secret_access_key, + "s3_endpoint": self.s3_endpoint, + } + + self.deserializer_params = { + "verify_hash": self.verify_hash, + "encryption": self.encryption_keyfile, + "num_readers": self.num_readers + } + if self.encryption_keyfile: + with open_stream( + self.encryption_keyfile, + **self.stream_params, + ) as stream: + key = stream.read() + decryption_params = DecryptionParams.from_key(key) + self.deserializer_params['encryption'] = decryption_params + + @staticmethod + def add_cli_args( + parser: argparse.ArgumentParser) -> argparse.ArgumentParser: + """Tensorizer CLI arguments""" + + # Tensorizer options arg group + group = parser.add_argument_group( + 'tensorizer options', + description=('Options for configuring the behavior of the' + ' tensorizer deserializer when ' + '--load-format=tensorizer')) + + group.add_argument( + "--tensorizer-uri", + help="Path to serialized model tensors. Can be a local file path," + " or an HTTP(S) or S3 URI.", + ) + group.add_argument( + "--verify-hash", + action="store_true", + help="If enabled, the hashes of each tensor will be verified" + " against the hashes stored in the file metadata. An exception" + " will be raised if any of the hashes do not match.", + ) + group.add_argument( + "--encryption-keyfile", + default=None, + help="The file path to a binary file containing a binary key to " + "use for decryption. Can be a file path or S3 network URI.") + group.add_argument( + "--num-readers", + default=None, + type=int, + help="Controls how many threads are allowed to read concurrently " + "from the source file. Default is `None`, which will dynamically " + "set the number of readers based on the available resources " + "and model size. This greatly increases performance.") + group.add_argument( + "--s3-access-key-id", + default=None, + help="The access key for the S3 bucket. Can also be set via the " + "S3_ACCESS_KEY_ID environment variable.", + ) + group.add_argument( + "--s3-secret-access-key", + default=None, + help="The secret access key for the S3 bucket. Can also be set via " + "the S3_SECRET_ACCESS_KEY environment variable.", + ) + group.add_argument( + "--s3-endpoint", + default=None, + help="The endpoint for the S3 bucket. Can also be set via the " + "S3_ENDPOINT_URL environment variable.", + ) + group.add_argument( + "--vllm-tensorized", + action="store_true", + help="If enabled, indicates that the serialized model is a vLLM " + "model. This is used to determine the behavior of the " + "TensorDeserializer when loading tensors from a " + "serialized model.") + + return parser + + @classmethod + def from_cli_args(cls, args: argparse.Namespace) -> "TensorizerArgs": + attrs = [attr.name for attr in dataclasses.fields(cls)] + tensorizer_args = cls(**{ + attr: getattr(args, attr) + for attr in attrs if hasattr(args, attr) + }) + return tensorizer_args + + +class TensorizerAgent: + """ + A class for performing tensorizer deserializations specifically for + vLLM models using plaid_mode. Uses TensorizerArgs to configure the + behavior of the TensorDeserializer when loading tensors from a serialized + model. For deserializations of HuggingFace models, TensorDeserializer is + instead used as an iterator directly in the func hf_model_weights_iterator + in vllm/model_executor/model_loader/weight_utils.py + """ + + def __init__(self, tensorizer_config: TensorizerConfig, + quant_config: QuantizationConfig, **extra_kwargs): + if tensorizer_load_fail is not None: + raise ImportError( + "Tensorizer is not installed. Please install tensorizer " + "to use this feature with `pip install vllm[tensorizer]`." + ) from tensorizer_load_fail + + self.tensorizer_config = tensorizer_config + self.tensorizer_args = ( + self.tensorizer_config._construct_tensorizer_args()) + self.extra_kwargs = extra_kwargs + if extra_kwargs.get("quant_config", None) is not None: + self.quant_config = extra_kwargs["quant_config"] + else: + self.quant_config = quant_config + self.model = self._init_model() + + def _init_model(self): + assert self.tensorizer_config.hf_config is not None + model_args = self.tensorizer_config.hf_config + model_args.torch_dtype = self.tensorizer_config.dtype + assert self.tensorizer_config.model_class is not None + with no_init_or_tensor(): + return self.tensorizer_config.model_class( + config=model_args, + quant_config=self.quant_config, + **self.extra_kwargs) + + def _resize_lora_embeddings(self): + """Modify LoRA embedding layers to use bigger tensors + to allow for adapter added tokens.""" + for child in self.model.modules(): + if (isinstance(child, VocabParallelEmbedding) + and child.weight.shape[0] < + child.num_embeddings_per_partition): + new_weight = torch.empty(child.num_embeddings_per_partition, + child.embedding_dim, + dtype=child.weight.dtype, + device=child.weight.device) + new_weight[:child.weight.shape[0]].copy_(child.weight.data) + new_weight[child.weight.shape[0]:].fill_(0) + child.weight.data = new_weight + + def _check_tensors_on_meta_device(self): + for tensor in self.model.state_dict().values(): + if tensor.device.type == 'meta': + raise ValueError( + "The serialized model contains tensors on the meta device," + " indicating that some tensors were not loaded properly." + " Please check that the parameters of the model being" + " specified match that of the serialized model, such as" + " its quantization.") + + def deserialize(self): + """ + Deserialize the model using the TensorDeserializer. This method is + specifically for vLLM models using tensorizer's plaid_mode. + + The deserializer makes use of tensorizer_args.stream_params + to configure the behavior of the stream when loading tensors from a + serialized model. The deserializer_params are used to configure the + behavior of the TensorDeserializer when loading tensors themselves. + Documentation on these params can be found in TensorizerArgs + + Returns: + nn.Module: The deserialized model. + """ + before_mem = get_mem_usage() + start = time.perf_counter() + with open_stream( + self.tensorizer_args.tensorizer_uri, + mode="rb", + **self.tensorizer_args.stream_params, + ) as stream, TensorDeserializer( + stream, + dtype=self.tensorizer_config.dtype, + **self.tensorizer_args.deserializer_params) as deserializer: + deserializer.load_into_module(self.model) + end = time.perf_counter() + + total_bytes_str = convert_bytes(deserializer.total_tensor_bytes) + duration = end - start + per_second = convert_bytes(deserializer.total_tensor_bytes / duration) + after_mem = get_mem_usage() + deserializer.close() + logger.info("Deserialized %s in %0.2fs, %s/s", total_bytes_str, + end - start, per_second) + logger.info("Memory usage before: %s", before_mem) + logger.info("Memory usage after: %s", after_mem) + + self._check_tensors_on_meta_device() + self._resize_lora_embeddings() + return self.model.eval() + + +def tensorizer_weights_iterator( + tensorizer_args: "TensorizerArgs" +) -> Generator[Tuple[str, torch.Tensor], None, None]: + logger.warning( + "Deserializing HuggingFace models is not optimized for " + "loading on vLLM, as tensorizer is forced to load to CPU. " + "Consider deserializing a vLLM model instead for faster " + "load times. See the examples/tensorize_vllm_model.py example " + "script for serializing vLLM models.") + + deserializer_args = tensorizer_args.deserializer_params + stream_params = tensorizer_args.stream_params + stream = open_stream(tensorizer_args.tensorizer_uri, **stream_params) + with TensorDeserializer(stream, **deserializer_args, + device="cpu") as state: + for name, param in state.items(): + yield name, param + del state diff --git a/vllm/model_executor/model_loader/utils.py b/vllm/model_executor/model_loader/utils.py new file mode 100644 index 0000000..f7e0f56 --- /dev/null +++ b/vllm/model_executor/model_loader/utils.py @@ -0,0 +1,41 @@ +"""Utilities for selecting and loading models.""" +import contextlib +from typing import Tuple, Type + +import torch +from torch import nn + +from vllm.config import ModelConfig +from vllm.model_executor.models import ModelRegistry + + +@contextlib.contextmanager +def set_default_torch_dtype(dtype: torch.dtype): + """Sets the default torch dtype to the given dtype.""" + old_dtype = torch.get_default_dtype() + torch.set_default_dtype(dtype) + yield + torch.set_default_dtype(old_dtype) + + +def get_model_architecture( + model_config: ModelConfig) -> Tuple[Type[nn.Module], str]: + architectures = getattr(model_config.hf_config, "architectures", []) + # Special handling for quantized Mixtral. + # FIXME(woosuk): This is a temporary hack. + if (model_config.quantization is not None + and model_config.quantization != "fp8" + and "MixtralForCausalLM" in architectures): + architectures = ["QuantMixtralForCausalLM"] + + for arch in architectures: + model_cls = ModelRegistry.load_model_cls(arch) + if model_cls is not None: + return (model_cls, arch) + raise ValueError( + f"Model architectures {architectures} are not supported for now. " + f"Supported architectures: {ModelRegistry.get_supported_archs()}") + + +def get_architecture_class_name(model_config: ModelConfig) -> str: + return get_model_architecture(model_config)[1] diff --git a/vllm/model_executor/model_loader/weight_utils.py b/vllm/model_executor/model_loader/weight_utils.py new file mode 100644 index 0000000..a6c9fda --- /dev/null +++ b/vllm/model_executor/model_loader/weight_utils.py @@ -0,0 +1,372 @@ +"""Utilities for downloading and initializing model weights.""" +import fnmatch +import glob +import hashlib +import json +import os +import tempfile +from collections import defaultdict +from typing import Any, Generator, Iterable, List, Optional, Tuple + +import filelock +import huggingface_hub.constants +import numpy as np +import torch +from huggingface_hub import HfFileSystem, snapshot_download +from safetensors.torch import load_file, safe_open, save_file +from tqdm.auto import tqdm + +from vllm.config import LoadConfig, ModelConfig +from vllm.logger import init_logger +from vllm.model_executor.layers.quantization import (QuantizationConfig, + get_quantization_config) +from vllm.model_executor.layers.quantization.schema import QuantParamSchema + +logger = init_logger(__name__) + +# use system-level temp directory for file locks, so that multiple users +# can share the same lock without error. +# lock files in the temp directory will be automatically deleted when the +# system reboots, so users will not complain about annoying lock files +temp_dir = tempfile.gettempdir() + + +def enable_hf_transfer(): + """automatically activates hf_transfer + """ + if "HF_HUB_ENABLE_HF_TRANSFER" not in os.environ: + try: + # enable hf hub transfer if available + import hf_transfer # type: ignore # noqa + huggingface_hub.constants.HF_HUB_ENABLE_HF_TRANSFER = True + except ImportError: + pass + + +enable_hf_transfer() + + +class DisabledTqdm(tqdm): + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs, disable=True) + + +def get_lock(model_name_or_path: str, cache_dir: Optional[str] = None): + lock_dir = cache_dir or temp_dir + os.makedirs(os.path.dirname(lock_dir), exist_ok=True) + model_name = model_name_or_path.replace("/", "-") + hash_name = hashlib.sha256(model_name.encode()).hexdigest() + # add hash to avoid conflict with old users' lock files + lock_file_name = hash_name + model_name + ".lock" + # mode 0o666 is required for the filelock to be shared across users + lock = filelock.FileLock(os.path.join(lock_dir, lock_file_name), + mode=0o666) + return lock + + +def _shared_pointers(tensors): + ptrs = defaultdict(list) + for k, v in tensors.items(): + ptrs[v.data_ptr()].append(k) + failing = [] + for _, names in ptrs.items(): + if len(names) > 1: + failing.append(names) + return failing + + +def convert_bin_to_safetensor_file( + pt_filename: str, + sf_filename: str, +) -> None: + loaded = torch.load(pt_filename, map_location="cpu") + if "state_dict" in loaded: + loaded = loaded["state_dict"] + shared = _shared_pointers(loaded) + for shared_weights in shared: + for name in shared_weights[1:]: + loaded.pop(name) + + # For tensors to be contiguous + loaded = {k: v.contiguous() for k, v in loaded.items()} + + dirname = os.path.dirname(sf_filename) + os.makedirs(dirname, exist_ok=True) + save_file(loaded, sf_filename, metadata={"format": "pt"}) + + # check file size + sf_size = os.stat(sf_filename).st_size + pt_size = os.stat(pt_filename).st_size + if (sf_size - pt_size) / pt_size > 0.01: + raise RuntimeError(f"""The file size different is more than 1%: + - {sf_filename}: {sf_size} + - {pt_filename}: {pt_size} + """) + + # check if the tensors are the same + reloaded = load_file(sf_filename) + for k in loaded: + pt_tensor = loaded[k] + sf_tensor = reloaded[k] + if not torch.equal(pt_tensor, sf_tensor): + raise RuntimeError(f"The output tensors do not match for key {k}") + + +# TODO(woosuk): Move this to other place. +def get_quant_config(model_config: ModelConfig, + load_config: LoadConfig) -> QuantizationConfig: + quant_cls = get_quantization_config(model_config.quantization) + # Read the quantization config from the HF model config, if available. + hf_quant_config = getattr(model_config.hf_config, "quantization_config", + None) + if hf_quant_config is not None: + return quant_cls.from_config(hf_quant_config) + model_name_or_path = model_config.model + is_local = os.path.isdir(model_name_or_path) + if not is_local: + # Download the config files. + with get_lock(model_name_or_path, load_config.download_dir): + hf_folder = snapshot_download( + model_name_or_path, + revision=model_config.revision, + allow_patterns="*.json", + cache_dir=load_config.download_dir, + local_files_only=huggingface_hub.constants.HF_HUB_OFFLINE, + tqdm_class=DisabledTqdm, + ) + else: + hf_folder = model_name_or_path + + possible_config_filenames = quant_cls.get_config_filenames() + + # If the quantization config is not found, use the default config. + if not possible_config_filenames: + return quant_cls() + + config_files = glob.glob(os.path.join(hf_folder, "*.json")) + + quant_config_files = [ + f for f in config_files if any( + f.endswith(x) for x in possible_config_filenames) + ] + if len(quant_config_files) == 0: + raise ValueError( + f"Cannot find the config file for {model_config.quantization}") + if len(quant_config_files) > 1: + raise ValueError( + f"Found multiple config files for {model_config.quantization}: " + f"{quant_config_files}") + + quant_config_file = quant_config_files[0] + with open(quant_config_file, "r") as f: + config = json.load(f) + return quant_cls.from_config(config) + + +def download_weights_from_hf( + model_name_or_path: str, + cache_dir: Optional[str], + allow_patterns: List[str], + revision: Optional[str] = None, +) -> str: + """Download model weights from Hugging Face Hub. + + Args: + model_name_or_path (str): The model name or path. + cache_dir (Optional[str]): The cache directory to store the model + weights. If None, will use HF defaults. + allow_patterns (List[str]): The allowed patterns for the + weight files. Files matched by any of the patterns will be + downloaded. + revision (Optional[str]): The revision of the model. + + Returns: + str: The path to the downloaded model weights. + """ + if not huggingface_hub.constants.HF_HUB_OFFLINE: + # Before we download we look at that is available: + fs = HfFileSystem() + file_list = fs.ls(model_name_or_path, detail=False, revision=revision) + + # depending on what is available we download different things + for pattern in allow_patterns: + matching = fnmatch.filter(file_list, pattern) + if len(matching) > 0: + allow_patterns = [pattern] + break + + logger.info("Using model weights format %s", allow_patterns) + # Use file lock to prevent multiple processes from + # downloading the same model weights at the same time. + with get_lock(model_name_or_path, cache_dir): + hf_folder = snapshot_download( + model_name_or_path, + allow_patterns=allow_patterns, + cache_dir=cache_dir, + tqdm_class=DisabledTqdm, + revision=revision, + local_files_only=huggingface_hub.constants.HF_HUB_OFFLINE, + ) + return hf_folder + + +def filter_files_not_needed_for_inference( + hf_weights_files: List[str]) -> List[str]: + """ + Exclude files that are not needed for inference. + + See https://github.com/huggingface/transformers/blob/v4.34.0/src/transformers/trainer.py#L227-L233 + """ + blacklist = [ + "training_args.bin", + "optimizer.bin", + "optimizer.pt", + "scheduler.pt", + "scaler.pt", + ] + hf_weights_files = [ + f for f in hf_weights_files + if not any(f.endswith(x) for x in blacklist) + ] + return hf_weights_files + + +def np_cache_weights_iterator( + model_name_or_path: str, cache_dir: Optional[str], hf_folder: str, + hf_weights_files: List[str] +) -> Generator[Tuple[str, torch.Tensor], None, None]: + """Iterate over the weights in the model np files. + + Will dump the model weights to numpy files if they are not already dumped. + """ + # Convert the model weights from torch tensors to numpy arrays for + # faster loading. + np_folder = os.path.join(hf_folder, "np") + os.makedirs(np_folder, exist_ok=True) + weight_names_file = os.path.join(np_folder, "weight_names.json") + # Use file lock to prevent multiple processes from + # dumping the same model weights to numpy at the same time. + with get_lock(model_name_or_path, cache_dir): + if not os.path.exists(weight_names_file): + weight_names = [] + for bin_file in hf_weights_files: + state = torch.load(bin_file, map_location="cpu") + for name, param in state.items(): + param_path = os.path.join(np_folder, name) + with open(param_path, "wb") as f: + np.save(f, param.cpu().detach().numpy()) + weight_names.append(name) + with open(weight_names_file, "w") as f: + json.dump(weight_names, f) + + with open(weight_names_file, "r") as f: + weight_names = json.load(f) + + for name in weight_names: + param_path = os.path.join(np_folder, name) + with open(param_path, "rb") as f: + param = np.load(f) + yield name, torch.from_numpy(param) + + +def safetensors_weights_iterator( + hf_weights_files: List[str] +) -> Generator[Tuple[str, torch.Tensor], None, None]: + """Iterate over the weights in the model safetensor files.""" + for st_file in hf_weights_files: + with safe_open(st_file, framework="pt") as f: + for name in f.keys(): # noqa: SIM118 + param = f.get_tensor(name) + yield name, param + + +def pt_weights_iterator( + hf_weights_files: List[str] +) -> Generator[Tuple[str, torch.Tensor], None, None]: + """Iterate over the weights in the model bin/pt files.""" + for bin_file in hf_weights_files: + state = torch.load(bin_file, map_location="cpu") + for name, param in state.items(): + yield name, param + del state + torch.musa.empty_cache() + + +def kv_cache_scales_loader( + filename: str, tp_rank: int, tp_size: int, num_hidden_layers: int, + model_type: Optional[str]) -> Iterable[Tuple[int, float]]: + """ + A simple utility to read in KV cache scaling factors that have been + previously serialized to disk. Used by the model to populate the appropriate + KV cache scaling factors. The serialization should represent a dictionary + whose keys are the TP ranks and values are another dictionary mapping layers + to their KV cache scaling factors. + Keep this function in sync with the output of examples/fp8/extract_scales.py + """ + try: + with open(filename) as f: + context = { + "model_type": model_type, + "num_hidden_layers": num_hidden_layers, + "tp_rank": tp_rank, + "tp_size": tp_size, + } + schema_dct = json.load(f) + schema = QuantParamSchema.model_validate(schema_dct, + context=context) + layer_scales_map = schema.kv_cache.scaling_factor[tp_rank] + return layer_scales_map.items() + + except FileNotFoundError: + logger.error("File or directory '%s' not found.", filename) + except json.JSONDecodeError: + logger.error("Error decoding JSON in file '%s'.", filename) + except Exception as e: + logger.error("An error occurred while reading '%s': %s", filename, e) + # This section is reached if and only if any of the excepts are hit + # Return an empty iterable (list) => no KV cache scales are loaded + # which ultimately defaults to 1.0 scales + logger.warning( + "Defaulting to KV cache scaling factors = 1.0 for all " + "layers in TP rank %d as an error occurred during loading.", tp_rank) + return [] + + +def convert_pyslice_to_tensor(x: Any) -> torch.Tensor: + """convert PySafeSlice object from safetensors to torch.Tensor + + PySafeSlice object supports indexing, which is done before loading the + actual tensor and can reduce the amount of memory being read into the + memory. However, it does not support more advanced functionalities + like `.view()` or `.t()`. Therefore, if we need to modify the loaded + tensor with these more complicated operators, we need to convert to + tensor first. + """ + if not isinstance(x, torch.Tensor): + x = x[:] + return x + + +def default_weight_loader(param: torch.Tensor, + loaded_weight: torch.Tensor) -> None: + """Default weight loader.""" + assert param.size() == loaded_weight.size() + param.data.copy_(loaded_weight) + + +def initialize_dummy_weights( + model: torch.nn.Module, + low: float = -1e-3, + high: float = 1e-3, +) -> None: + """Initialize model weights with random values. + + The model weights must be randomly initialized for accurate performance + measurements. Additionally, the model weights should not cause NaNs in the + forward pass. We empirically found that initializing the weights with + values between -1e-3 and 1e-3 works well for most models. + """ + for param in model.state_dict().values(): + if torch.is_floating_point(param): + param.data.uniform_(low, high) diff --git a/vllm/model_executor/models/__init__.py b/vllm/model_executor/models/__init__.py new file mode 100755 index 0000000..c5cdc05 --- /dev/null +++ b/vllm/model_executor/models/__init__.py @@ -0,0 +1,119 @@ +import importlib +from typing import Dict, List, Optional, Type + +import torch.nn as nn + +from vllm.logger import init_logger +from vllm.utils import is_hip + +logger = init_logger(__name__) + +# Architecture -> (module, class). +_MODELS = { + "AquilaModel": ("llama", "LlamaForCausalLM"), + "AquilaForCausalLM": ("llama", "LlamaForCausalLM"), # AquilaChat2 + "BaiChuanForCausalLM": ("baichuan", "BaiChuanForCausalLM"), # baichuan-7b + "BaichuanForCausalLM": ("baichuan", "BaichuanForCausalLM"), # baichuan-13b + "BloomForCausalLM": ("bloom", "BloomForCausalLM"), + "ChatGLMModel": ("chatglm", "ChatGLMForCausalLM"), + "ChatGLMForConditionalGeneration": ("chatglm", "ChatGLMForCausalLM"), + "CohereForCausalLM": ("commandr", "CohereForCausalLM"), + "DbrxForCausalLM": ("dbrx", "DbrxForCausalLM"), + "DeciLMForCausalLM": ("decilm", "DeciLMForCausalLM"), + "DeepseekForCausalLM": ("deepseek", "DeepseekForCausalLM"), + "FalconForCausalLM": ("falcon", "FalconForCausalLM"), + "GemmaForCausalLM": ("gemma", "GemmaForCausalLM"), + "GPT2LMHeadModel": ("gpt2", "GPT2LMHeadModel"), + "GPTBigCodeForCausalLM": ("gpt_bigcode", "GPTBigCodeForCausalLM"), + "GPTJForCausalLM": ("gpt_j", "GPTJForCausalLM"), + "GPTNeoXForCausalLM": ("gpt_neox", "GPTNeoXForCausalLM"), + "InternLMForCausalLM": ("llama", "LlamaForCausalLM"), + "InternLM2ForCausalLM": ("internlm2", "InternLM2ForCausalLM"), + "JAISLMHeadModel": ("jais", "JAISLMHeadModel"), + "LlamaForCausalLM": ("llama", "LlamaForCausalLM"), + "LlavaForConditionalGeneration": + ("llava", "LlavaForConditionalGeneration"), + # For decapoda-research/llama-* + "LLaMAForCausalLM": ("llama", "LlamaForCausalLM"), + "MistralForCausalLM": ("llama", "LlamaForCausalLM"), + "MixtralForCausalLM": ("mixtral", "MixtralForCausalLM"), + "QuantMixtralForCausalLM": ("mixtral_quant", "MixtralForCausalLM"), + # transformers's mpt class has lower case + "MptForCausalLM": ("mpt", "MPTForCausalLM"), + "MPTForCausalLM": ("mpt", "MPTForCausalLM"), + "MiniCPMForCausalLM": ("minicpm", "MiniCPMForCausalLM"), + "OlmoForCausalLM": ("olmo", "OlmoForCausalLM"), + "OPTForCausalLM": ("opt", "OPTForCausalLM"), + "OrionForCausalLM": ("orion", "OrionForCausalLM"), + "PhiForCausalLM": ("phi", "PhiForCausalLM"), + "Phi3ForCausalLM": ("llama", "LlamaForCausalLM"), + "QWenLMHeadModel": ("qwen", "QWenLMHeadModel"), + "Qwen2ForCausalLM": ("qwen2", "Qwen2ForCausalLM"), + "Qwen2MoeForCausalLM": ("qwen2_moe", "Qwen2MoeForCausalLM"), + "RWForCausalLM": ("falcon", "FalconForCausalLM"), + "StableLMEpochForCausalLM": ("stablelm", "StablelmForCausalLM"), + "StableLmForCausalLM": ("stablelm", "StablelmForCausalLM"), + "Starcoder2ForCausalLM": ("starcoder2", "Starcoder2ForCausalLM"), + "XverseForCausalLM": ("xverse", "XverseForCausalLM"), +} + +# Architecture -> type. +# out of tree models +_OOT_MODELS: Dict[str, Type[nn.Module]] = {} + +# Models not supported by ROCm. +_ROCM_UNSUPPORTED_MODELS = [] + +# Models partially supported by ROCm. +# Architecture -> Reason. +_ROCM_PARTIALLY_SUPPORTED_MODELS = { + "Qwen2ForCausalLM": + "Sliding window attention is not yet supported in ROCm's flash attention", + "MistralForCausalLM": + "Sliding window attention is not yet supported in ROCm's flash attention", + "MixtralForCausalLM": + "Sliding window attention is not yet supported in ROCm's flash attention", +} + + +class ModelRegistry: + + @staticmethod + def load_model_cls(model_arch: str) -> Optional[Type[nn.Module]]: + if model_arch in _OOT_MODELS: + return _OOT_MODELS[model_arch] + if model_arch not in _MODELS: + return None + if is_hip(): + if model_arch in _ROCM_UNSUPPORTED_MODELS: + raise ValueError( + f"Model architecture {model_arch} is not supported by " + "ROCm for now.") + if model_arch in _ROCM_PARTIALLY_SUPPORTED_MODELS: + logger.warning( + "Model architecture %s is partially supported by ROCm: %s", + model_arch, _ROCM_PARTIALLY_SUPPORTED_MODELS[model_arch]) + + module_name, model_cls_name = _MODELS[model_arch] + module = importlib.import_module( + f"vllm.model_executor.models.{module_name}") + return getattr(module, model_cls_name, None) + + @staticmethod + def get_supported_archs() -> List[str]: + return list(_MODELS.keys()) + + @staticmethod + def register_model(model_arch: str, model_cls: Type[nn.Module]): + if model_arch in _MODELS: + logger.warning( + "Model architecture %s is already registered, and will be " + "overwritten by the new model class %s.", model_arch, + model_cls.__name__) + global _OOT_MODELS + _OOT_MODELS[model_arch] = model_cls + + +__all__ = [ + "ModelRegistry", +] diff --git a/vllm/model_executor/models/baichuan.py b/vllm/model_executor/models/baichuan.py new file mode 100644 index 0000000..186cee2 --- /dev/null +++ b/vllm/model_executor/models/baichuan.py @@ -0,0 +1,410 @@ +# coding=utf-8 +# Copyright 2022 EleutherAI and the HuggingFace Inc. team. All rights reserved. +# +# This code is based on EleutherAI's GPT-NeoX library and the GPT-NeoX +# and OPT implementations in this library. It has been modified from its +# original forms to accommodate minor architectural differences compared +# to GPT-NeoX and OPT used by the Meta AI team that trained the model. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Inference-only BaiChuan model compatible with HuggingFace weights.""" +import math +from typing import Iterable, List, Optional, Tuple + +import torch +from torch import nn +from transformers import PretrainedConfig + +from vllm.attention import Attention, AttentionMetadata +from vllm.config import LoRAConfig +from vllm.distributed import (get_tensor_model_parallel_rank, + get_tensor_model_parallel_world_size) +from vllm.model_executor.layers.activation import SiluAndMul +from vllm.model_executor.layers.layernorm import RMSNorm +from vllm.model_executor.layers.linear import (MergedColumnParallelLinear, + QKVParallelLinear, + RowParallelLinear) +from vllm.model_executor.layers.logits_processor import LogitsProcessor +from vllm.model_executor.layers.quantization.base_config import ( + QuantizationConfig) +from vllm.model_executor.layers.rotary_embedding import get_rope +from vllm.model_executor.layers.sampler import Sampler +from vllm.model_executor.layers.vocab_parallel_embedding import ( + ParallelLMHead, VocabParallelEmbedding) +from vllm.model_executor.model_loader.weight_utils import default_weight_loader +from vllm.model_executor.sampling_metadata import SamplingMetadata +from vllm.sequence import SamplerOutput + + +def _get_alibi_slopes(total_num_heads: int) -> torch.Tensor: + closest_power_of_2 = 2**math.floor(math.log2(total_num_heads)) + base = torch.tensor( + 2**(-(2**-(math.log2(closest_power_of_2) - 3))), + dtype=torch.float32, + ) + powers = torch.arange(1, 1 + closest_power_of_2, dtype=torch.int32) + slopes = torch.pow(base, powers) + + if closest_power_of_2 != total_num_heads: + extra_base = torch.tensor( + 2**(-(2**-(math.log2(2 * closest_power_of_2) - 3))), + dtype=torch.float32, + ) + num_remaining_heads = min(closest_power_of_2, + total_num_heads - closest_power_of_2) + extra_powers = torch.arange(start=1, + end=1 + 2 * num_remaining_heads, + step=2, + dtype=torch.int32) + slopes = torch.cat( + [slopes, torch.pow(extra_base, extra_powers)], dim=0) + return slopes + + +class BaiChuanMLP(nn.Module): + + def __init__( + self, + hidden_size: int, + intermediate_size: int, + hidden_act: str, + quant_config: Optional[QuantizationConfig] = None, + ): + super().__init__() + self.gate_up_proj = MergedColumnParallelLinear( + hidden_size, [intermediate_size] * 2, + bias=False, + quant_config=quant_config) + self.down_proj = RowParallelLinear(intermediate_size, + hidden_size, + bias=False, + quant_config=quant_config) + if hidden_act != "silu": + raise ValueError(f"Unsupported activation: {hidden_act}. " + "Only silu is supported for now.") + self.act_fn = SiluAndMul() + + def forward(self, x): + gate_up, _ = self.gate_up_proj(x) + x = self.act_fn(gate_up) + x, _ = self.down_proj(x) + return x + + +class BaiChuanAttention(nn.Module): + """Multi-headed attention from 'Attention Is All You Need' paper""" + + def __init__( + self, + hidden_size: int, + num_heads: int, + position_embedding: str, + rope_theta: float = 10000, + max_position_embeddings: int = 8192, + quant_config: Optional[QuantizationConfig] = None, + ): + super().__init__() + self.hidden_size = hidden_size + tensor_model_parallel_world_size = get_tensor_model_parallel_world_size( + ) + self.total_num_heads = num_heads + assert self.total_num_heads % tensor_model_parallel_world_size == 0 + self.num_heads = (self.total_num_heads // + tensor_model_parallel_world_size) + self.head_dim = hidden_size // self.total_num_heads + self.postion_embedding = position_embedding + self.rope_theta = rope_theta + self.max_position_embeddings = max_position_embeddings + + # pylint: disable=invalid-name + self.W_pack = QKVParallelLinear( + hidden_size, + self.head_dim, + self.total_num_heads, + self.total_num_heads, + bias=False, + quant_config=quant_config, + ) + self.o_proj = RowParallelLinear( + self.total_num_heads * self.head_dim, + hidden_size, + bias=False, + quant_config=quant_config, + ) + # Create the alibi slopes and slice them. + if self.postion_embedding == "ALIBI": + tp_rank = get_tensor_model_parallel_rank() + head_start = tp_rank * self.num_heads + head_end = (tp_rank + 1) * self.num_heads + alibi_slopes = _get_alibi_slopes(self.total_num_heads) + alibi_slopes = alibi_slopes[head_start:head_end].tolist() + + scaling = self.head_dim**-0.5 + self.attn = Attention(self.num_heads, + self.head_dim, + scaling, + alibi_slopes=alibi_slopes) + else: + self.rotary_emb = get_rope( + self.head_dim, + rotary_dim=self.head_dim, + max_position=self.max_position_embeddings, + base=self.rope_theta, + ) + self.scaling = self.head_dim**-0.5 + self.attn = Attention(self.num_heads, self.head_dim, self.scaling) + + def forward( + self, + positions: torch.Tensor, + hidden_states: torch.Tensor, + kv_cache: torch.Tensor, + attn_metadata: AttentionMetadata, + ) -> torch.Tensor: + qkv, _ = self.W_pack(hidden_states) + q, k, v = qkv.chunk(chunks=3, dim=-1) + if self.postion_embedding != "ALIBI": + q, k = self.rotary_emb(positions, q, k) + attn_output = self.attn(q, k, v, kv_cache, attn_metadata) + output, _ = self.o_proj(attn_output) + return output + + +class BaiChuanDecoderLayer(nn.Module): + + def __init__(self, + config: PretrainedConfig, + position_embedding: str, + quant_config: Optional[QuantizationConfig] = None): + super().__init__() + self.hidden_size = config.hidden_size + rope_theta = getattr(config, "rope_theta", 10000) + max_position_embeddings = getattr(config, "max_position_embeddings", + 8192) + self.self_attn = BaiChuanAttention( + hidden_size=self.hidden_size, + num_heads=config.num_attention_heads, + position_embedding=position_embedding, + rope_theta=rope_theta, + max_position_embeddings=max_position_embeddings, + quant_config=quant_config, + ) + self.mlp = BaiChuanMLP( + hidden_size=self.hidden_size, + intermediate_size=config.intermediate_size, + hidden_act=config.hidden_act, + quant_config=quant_config, + ) + self.input_layernorm = RMSNorm(config.hidden_size, + eps=config.rms_norm_eps) + self.post_attention_layernorm = RMSNorm(config.hidden_size, + eps=config.rms_norm_eps) + + def forward( + self, + positions: torch.Tensor, + hidden_states: torch.Tensor, + kv_cache: torch.Tensor, + attn_metadata: AttentionMetadata, + residual: Optional[torch.Tensor], + ) -> Tuple[torch.Tensor, torch.Tensor]: + # Self Attention + if residual is None: + residual = hidden_states + hidden_states = self.input_layernorm(hidden_states) + else: + hidden_states, residual = self.input_layernorm( + hidden_states, residual) + hidden_states = self.self_attn( + positions=positions, + hidden_states=hidden_states, + kv_cache=kv_cache, + attn_metadata=attn_metadata, + ) + + # Fully Connected + hidden_states, residual = self.post_attention_layernorm( + hidden_states, residual) + hidden_states = self.mlp(hidden_states) + return hidden_states, residual + + +class BaiChuanModel(nn.Module): + + def __init__(self, + config: PretrainedConfig, + position_embedding: str, + quant_config: Optional[QuantizationConfig] = None): + super().__init__() + self.config = config + self.padding_idx = config.pad_token_id + self.vocab_size = config.vocab_size + + self.embed_tokens = VocabParallelEmbedding( + config.vocab_size, + config.hidden_size, + ) + self.layers = nn.ModuleList([ + BaiChuanDecoderLayer(config, position_embedding, quant_config) + for _ in range(config.num_hidden_layers) + ]) + self.norm = RMSNorm(config.hidden_size, eps=config.rms_norm_eps) + + def forward( + self, + input_ids: torch.Tensor, + positions: torch.Tensor, + kv_caches: List[torch.Tensor], + attn_metadata: AttentionMetadata, + ) -> torch.Tensor: + hidden_states = self.embed_tokens(input_ids) + residual = None + for i in range(len(self.layers)): + layer = self.layers[i] + hidden_states, residual = layer( + positions, + hidden_states, + kv_caches[i], + attn_metadata, + residual, + ) + hidden_states, _ = self.norm(hidden_states, residual) + return hidden_states + + +class BaiChuanBaseForCausalLM(nn.Module): + packed_modules_mapping = { + "W_pack": ["W_pack"], + "gate_up_proj": [ + "gate_proj", + "up_proj", + ], + } + # LoRA specific attributes + supported_lora_modules = [ + "W_pack", + "o_proj", + "gate_up_proj", + "down_proj", + ] + embedding_modules = {} + embedding_padding_modules = [] + + def __init__( + self, + config, + position_embedding: str, + quant_config: Optional[QuantizationConfig] = None, + lora_config: Optional[LoRAConfig] = None, + ): + super().__init__() + self.config = config + self.quant_config = quant_config + self.model = BaiChuanModel(config, position_embedding, quant_config) + self.lm_head = ParallelLMHead(config.vocab_size, config.hidden_size) + self.logits_processor = LogitsProcessor(config.vocab_size) + self.sampler = Sampler() + + def forward( + self, + input_ids: torch.Tensor, + positions: torch.Tensor, + kv_caches: List[torch.Tensor], + attn_metadata: AttentionMetadata, + ) -> torch.Tensor: + hidden_states = self.model(input_ids, positions, kv_caches, + attn_metadata) + return hidden_states + + def compute_logits(self, hidden_states: torch.Tensor, + sampling_metadata: SamplingMetadata) -> torch.Tensor: + logits = self.logits_processor(self.lm_head.weight, hidden_states, + sampling_metadata) + return logits + + def sample( + self, + logits: torch.Tensor, + sampling_metadata: SamplingMetadata, + ) -> Optional[SamplerOutput]: + next_tokens = self.sampler(logits, sampling_metadata) + return next_tokens + + def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): + stacked_params_mapping = [ + # (param_name, shard_name, shard_id) + ("gate_up_proj", "gate_proj", 0), + ("gate_up_proj", "up_proj", 1), + ] + params_dict = dict(self.named_parameters()) + for name, loaded_weight in weights: + if "rotary_emb.inv_freq" in name: + continue + if name == "lm_head.weight": + # Unlike Baichuan, Baichuan2 normalizes the head weights. + # Refer to: + # https://huggingface.co/baichuan-inc/Baichuan2-7B-Chat/blob/84603cde5ebffb6084e476cfaeceaf0b8b91fe54/modeling_baichuan.py#L508 + # Distinguish between Baichuan and Baichuan2 by checking the + # vocab size. This is suggested by + # https://github.com/vllm-project/vllm/pull/1022#discussion_r1325652704 + is_baichuan2 = self.config.vocab_size == 125696 + if is_baichuan2: + loaded_weight = torch.nn.functional.normalize( + loaded_weight) + + for (param_name, weight_name, shard_id) in stacked_params_mapping: + if weight_name not in name: + continue + name = name.replace(weight_name, param_name) + # Skip loading extra bias for GPTQ models. + if name.endswith(".bias") and name not in params_dict: + continue + param = params_dict[name] + weight_loader = param.weight_loader + weight_loader(param, loaded_weight, shard_id) + break + else: + # Skip loading extra bias for GPTQ models. + if name.endswith(".bias") and name not in params_dict: + continue + param = params_dict[name] + weight_loader = getattr(param, "weight_loader", + default_weight_loader) + weight_loader(param, loaded_weight) + + +class BaichuanForCausalLM(BaiChuanBaseForCausalLM): + """Baichuan 13B and Baichuan2 7B/13B.""" + + def __init__( + self, + config, + quant_config: Optional[QuantizationConfig] = None, + lora_config: Optional[LoRAConfig] = None, + ): + if config.hidden_size == 4096: # baichuan2 7b + super().__init__(config, "ROPE", quant_config, lora_config) + else: # baichuan 13b, baichuan2 13b + super().__init__(config, "ALIBI", quant_config, lora_config) + + +class BaiChuanForCausalLM(BaiChuanBaseForCausalLM): + """Baichuan 7B.""" + + def __init__( + self, + config, + quant_config: Optional[QuantizationConfig] = None, + lora_config: Optional[LoRAConfig] = None, + ): + super().__init__(config, "ROPE", quant_config, lora_config) diff --git a/vllm/model_executor/models/bloom.py b/vllm/model_executor/models/bloom.py new file mode 100644 index 0000000..1d7e5d2 --- /dev/null +++ b/vllm/model_executor/models/bloom.py @@ -0,0 +1,327 @@ +# coding=utf-8 +# Adapted from +# https://github.com/huggingface/transformers/blob/v4.28.0/src/transformers/models/bloom/modeling_bloom.py +# Copyright 2023 The vLLM team. +# Copyright 2022 HuggingFace Inc. team and BigScience workshop. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Inference-only BLOOM model compatible with HuggingFace weights.""" +import math +from typing import Iterable, List, Optional, Tuple + +import torch +from torch import nn +from transformers import BloomConfig + +from vllm.attention import Attention, AttentionMetadata +from vllm.distributed import (get_tensor_model_parallel_rank, + get_tensor_model_parallel_world_size) +from vllm.model_executor.layers.activation import get_act_fn +from vllm.model_executor.layers.linear import (ColumnParallelLinear, + QKVParallelLinear, + RowParallelLinear) +from vllm.model_executor.layers.logits_processor import LogitsProcessor +from vllm.model_executor.layers.quantization.base_config import ( + QuantizationConfig) +from vllm.model_executor.layers.sampler import Sampler +from vllm.model_executor.layers.vocab_parallel_embedding import ( + VocabParallelEmbedding) +from vllm.model_executor.model_loader.weight_utils import default_weight_loader +from vllm.model_executor.sampling_metadata import SamplingMetadata +from vllm.sequence import SamplerOutput + + +def _get_alibi_slopes(total_num_heads: int) -> torch.Tensor: + closest_power_of_2 = 2**math.floor(math.log2(total_num_heads)) + base = torch.tensor( + 2**(-(2**-(math.log2(closest_power_of_2) - 3))), + dtype=torch.float32, + ) + powers = torch.arange(1, 1 + closest_power_of_2, dtype=torch.int32) + slopes = torch.pow(base, powers) + + if closest_power_of_2 != total_num_heads: + extra_base = torch.tensor( + 2**(-(2**-(math.log2(2 * closest_power_of_2) - 3))), + dtype=torch.float32, + ) + num_remaining_heads = min(closest_power_of_2, + total_num_heads - closest_power_of_2) + extra_powers = torch.arange(start=1, + end=1 + 2 * num_remaining_heads, + step=2, + dtype=torch.int32) + slopes = torch.cat( + [slopes, torch.pow(extra_base, extra_powers)], dim=0) + return slopes + + +class BloomAttention(nn.Module): + + def __init__( + self, + config: BloomConfig, + quant_config: Optional[QuantizationConfig] = None, + ): + super().__init__() + self.hidden_size = config.hidden_size + self.total_num_heads = config.n_head + self.head_dim = self.hidden_size // self.total_num_heads + assert self.head_dim * self.total_num_heads == self.hidden_size + + tp_world_size = get_tensor_model_parallel_world_size() + assert self.total_num_heads % tp_world_size == 0 + self.num_heads = self.total_num_heads // tp_world_size + + self.query_key_value = QKVParallelLinear( + self.hidden_size, + self.head_dim, + self.total_num_heads, + bias=True, + quant_config=quant_config, + ) + self.dense = RowParallelLinear( + self.hidden_size, + self.hidden_size, + bias=True, + quant_config=quant_config, + ) + + # Create the alibi slopes and slice them. + tp_rank = get_tensor_model_parallel_rank() + head_start = tp_rank * self.num_heads + head_end = (tp_rank + 1) * self.num_heads + alibi_slopes = _get_alibi_slopes(self.total_num_heads) + alibi_slopes = alibi_slopes[head_start:head_end].tolist() + + scaling = self.head_dim**-0.5 + self.attn = Attention(self.num_heads, + self.head_dim, + scaling, + alibi_slopes=alibi_slopes) + + def forward( + self, + position_ids: torch.Tensor, + hidden_states: torch.Tensor, + kv_cache: torch.Tensor, + attn_metadata: AttentionMetadata, + ) -> torch.Tensor: + del position_ids # Unused. + qkv, _ = self.query_key_value(hidden_states) + q, k, v = qkv.chunk(chunks=3, dim=-1) + attn_output = self.attn(q, k, v, kv_cache, attn_metadata) + output, _ = self.dense(attn_output) + return output + + +class BloomMLP(nn.Module): + + def __init__( + self, + config: BloomConfig, + quant_config: Optional[QuantizationConfig] = None, + ): + super().__init__() + hidden_size = config.hidden_size + self.dense_h_to_4h = ColumnParallelLinear( + hidden_size, + 4 * hidden_size, + quant_config=quant_config, + ) + self.gelu_impl = get_act_fn("gelu", quant_config, 4 * hidden_size) + self.dense_4h_to_h = RowParallelLinear( + 4 * hidden_size, + hidden_size, + quant_config=quant_config, + ) + + def forward(self, x: torch.Tensor) -> torch.Tensor: + x, _ = self.dense_h_to_4h(x) + x = self.gelu_impl(x) + x, _ = self.dense_4h_to_h(x) + return x + + +class BloomBlock(nn.Module): + + def __init__( + self, + config: BloomConfig, + quant_config: Optional[QuantizationConfig] = None, + ): + super().__init__() + hidden_size = config.hidden_size + + self.input_layernorm = nn.LayerNorm(hidden_size, + eps=config.layer_norm_epsilon) + self.self_attention = BloomAttention(config, quant_config) + self.post_attention_layernorm = nn.LayerNorm( + hidden_size, eps=config.layer_norm_epsilon) + self.mlp = BloomMLP(config, quant_config) + self.apply_residual_connection_post_layernorm = ( + config.apply_residual_connection_post_layernorm) + + def forward( + self, + position_ids: torch.Tensor, + hidden_states: torch.Tensor, + kv_cache: torch.Tensor, + attn_metadata: AttentionMetadata, + ) -> torch.Tensor: + # Layer norm at the beginning of the transformer layer. + layernorm_output = self.input_layernorm(hidden_states) + + # Layer norm post the self attention. + if self.apply_residual_connection_post_layernorm: + residual = layernorm_output + else: + residual = hidden_states + + # Self attention. + attention_output = self.self_attention( + position_ids=position_ids, + hidden_states=layernorm_output, + kv_cache=kv_cache, + attn_metadata=attn_metadata, + ) + attention_output = attention_output + residual + layernorm_output = self.post_attention_layernorm(attention_output) + + # Get residual + if self.apply_residual_connection_post_layernorm: + residual = layernorm_output + else: + residual = attention_output + + # MLP. + output = self.mlp(layernorm_output) + residual + return output + + +class BloomModel(nn.Module): + + def __init__( + self, + config: BloomConfig, + quant_config: Optional[QuantizationConfig] = None, + ): + super().__init__() + self.embed_dim = config.hidden_size + + # Embedding + LN Embedding + self.word_embeddings = VocabParallelEmbedding( + config.vocab_size, + self.embed_dim, + ) + self.word_embeddings_layernorm = nn.LayerNorm( + self.embed_dim, eps=config.layer_norm_epsilon) + + # Transformer blocks + self.h = nn.ModuleList([ + BloomBlock(config, quant_config) + for _ in range(config.num_hidden_layers) + ]) + + # Final Layer Norm + self.ln_f = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_epsilon) + + def forward( + self, + input_ids: torch.Tensor, + position_ids: torch.Tensor, + kv_caches: List[torch.Tensor], + attn_metadata: AttentionMetadata, + ) -> torch.Tensor: + hidden_states = self.word_embeddings(input_ids) + hidden_states = self.word_embeddings_layernorm(hidden_states) + for i in range(len(self.h)): + layer = self.h[i] + hidden_states = layer( + position_ids, + hidden_states, + kv_caches[i], + attn_metadata, + ) + hidden_states = self.ln_f(hidden_states) + return hidden_states + + +class BloomForCausalLM(nn.Module): + + def __init__( + self, + config: BloomConfig, + quant_config: Optional[QuantizationConfig] = None, + ): + super().__init__() + self.config = config + self.quant_config = quant_config + self.transformer = BloomModel(config, quant_config) + self.lm_head_weight = self.transformer.word_embeddings.weight + self.logits_processor = LogitsProcessor(config.vocab_size) + self.sampler = Sampler() + + def forward( + self, + input_ids: torch.Tensor, + positions: torch.Tensor, + kv_caches: List[torch.Tensor], + attn_metadata: AttentionMetadata, + ) -> torch.Tensor: + hidden_states = self.transformer(input_ids, positions, kv_caches, + attn_metadata) + return hidden_states + + def compute_logits(self, hidden_states: torch.Tensor, + sampling_metadata: SamplingMetadata) -> torch.Tensor: + logits = self.logits_processor(self.lm_head_weight, hidden_states, + sampling_metadata) + return logits + + def sample( + self, + logits: torch.Tensor, + sampling_metadata: SamplingMetadata, + ) -> Optional[SamplerOutput]: + next_tokens = self.sampler(logits, sampling_metadata) + return next_tokens + + def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): + params_dict = dict(self.named_parameters(remove_duplicate=False)) + for name, loaded_weight in weights: + if name == "lm_head.weight": + continue + if not name.startswith("transformer."): + name = "transformer." + name + param = params_dict[name] + + if "query_key_value" in name: + # NOTE: BLOOM's fused QKV's output_dim has the shape of + # (num_heads * 3 * head_size), while the + # required shape is (3 * num_heads * head_size). + # Thus, we need weight conversion. + output_dim = getattr(param, "output_dim", None) + num_heads = self.config.num_attention_heads + if output_dim is not None: + loaded_weight_shape = loaded_weight.shape + loaded_weight = loaded_weight.view( + loaded_weight_shape[:output_dim] + (num_heads, 3, -1) + + loaded_weight_shape[output_dim + 1:]) + loaded_weight = loaded_weight.transpose( + output_dim, output_dim + 1) + loaded_weight = loaded_weight.reshape(loaded_weight_shape) + + weight_loader = getattr(param, "weight_loader", + default_weight_loader) + weight_loader(param, loaded_weight) diff --git a/vllm/model_executor/models/chatglm.py b/vllm/model_executor/models/chatglm.py new file mode 100644 index 0000000..e116af2 --- /dev/null +++ b/vllm/model_executor/models/chatglm.py @@ -0,0 +1,386 @@ +# coding=utf-8 +# Adapted from +# https://github.com/THUDM/ChatGLM2-6B +"""Inference-only ChatGLM model compatible with THUDM weights.""" +from typing import Iterable, List, Optional, Tuple + +import torch +from torch import nn +from torch.nn import LayerNorm + +from vllm.attention import Attention, AttentionMetadata +from vllm.config import LoRAConfig +from vllm.distributed import get_tensor_model_parallel_world_size +from vllm.model_executor.layers.activation import SiluAndMul +from vllm.model_executor.layers.layernorm import RMSNorm +from vllm.model_executor.layers.linear import (MergedColumnParallelLinear, + QKVParallelLinear, + RowParallelLinear) +from vllm.model_executor.layers.logits_processor import LogitsProcessor +from vllm.model_executor.layers.quantization.base_config import ( + QuantizationConfig) +from vllm.model_executor.layers.rotary_embedding import get_rope +from vllm.model_executor.layers.sampler import Sampler +from vllm.model_executor.layers.vocab_parallel_embedding import ( + ParallelLMHead, VocabParallelEmbedding) +from vllm.model_executor.model_loader.weight_utils import default_weight_loader +from vllm.model_executor.sampling_metadata import SamplingMetadata +from vllm.sequence import SamplerOutput +from vllm.transformers_utils.configs import ChatGLMConfig + + +class GLMAttention(nn.Module): + + def __init__( + self, + config, + quant_config: Optional[QuantizationConfig] = None, + ): + super().__init__() + self.hidden_size = config.hidden_size + tp_size = get_tensor_model_parallel_world_size() + self.total_num_heads = config.num_attention_heads + assert self.total_num_heads % tp_size == 0 + self.num_heads = self.total_num_heads // tp_size + self.multi_query_attention = config.multi_query_attention + self.total_num_kv_heads = (config.multi_query_group_num + if config.multi_query_attention else + config.num_attention_heads) + if self.total_num_kv_heads >= tp_size: + # Number of KV heads is greater than TP size, so we partition + # the KV heads across multiple tensor parallel GPUs. + assert self.total_num_kv_heads % tp_size == 0 + else: + # Number of KV heads is less than TP size, so we replicate + # the KV heads across multiple tensor parallel GPUs. + assert tp_size % self.total_num_kv_heads == 0 + self.num_kv_heads = max(1, self.total_num_kv_heads // tp_size) + self.head_dim = config.hidden_size // self.total_num_heads + self.q_size = self.num_heads * self.head_dim + self.kv_size = self.num_kv_heads * self.head_dim + self.scaling = self.head_dim**-0.5 + + self.query_key_value = QKVParallelLinear( + self.hidden_size, + self.head_dim, + self.total_num_heads, + self.total_num_kv_heads, + bias=config.add_bias_linear or config.add_qkv_bias, + quant_config=quant_config, + ) + self.dense = RowParallelLinear( + self.total_num_heads * self.head_dim, + config.hidden_size, + bias=config.add_bias_linear, + quant_config=quant_config, + ) + + # https://huggingface.co/THUDM/chatglm3-6b-32k/blob/e210410255278dd9d74463cf396ba559c0ef801c/modeling_chatglm.py#L141 + rope_ratio = getattr(config, "rope_ratio", 1.0) + max_positions = getattr(config, "seq_length", 8192) + self.rotary_emb = get_rope( + self.head_dim, + rotary_dim=self.head_dim // 2, + max_position=max_positions, + base=10000 * rope_ratio, + is_neox_style=False, + ) + self.attn = Attention( + self.num_heads, + self.head_dim, + self.scaling, + num_kv_heads=self.num_kv_heads, + ) + + def forward( + self, + hidden_states: torch.Tensor, + position_ids: torch.Tensor, + kv_cache: torch.Tensor, + attn_metadata: AttentionMetadata, + ) -> torch.Tensor: + qkv, _ = self.query_key_value(hidden_states) + q, k, v = qkv.split([self.q_size, self.kv_size, self.kv_size], dim=-1) + q, k = self.rotary_emb(position_ids, q, k) + context_layer = self.attn( + q, + k, + v, + kv_cache, + attn_metadata, + ) + attn_output, _ = self.dense(context_layer) + return attn_output + + +class GLMMLP(nn.Module): + """MLP. + + MLP will take the input with h hidden state, project it to 4*h + hidden dimension, perform nonlinear transformation, and project the + state back into h hidden dimension. + """ + + def __init__( + self, + config, + quant_config: Optional[QuantizationConfig] = None, + ): + super().__init__() + + self.add_bias = config.add_bias_linear + + # Project to 4h. + self.dense_h_to_4h = MergedColumnParallelLinear( + config.hidden_size, + [config.ffn_hidden_size] * 2, + bias=config.add_bias_linear, + quant_config=quant_config, + ) + + self.activation_func = SiluAndMul() + + # Project back to h. + self.dense_4h_to_h = RowParallelLinear( + config.ffn_hidden_size, + config.hidden_size, + bias=config.add_bias_linear, + quant_config=quant_config, + ) + + def forward(self, hidden_states): + # [s, b, 4hp] + intermediate_parallel, _ = self.dense_h_to_4h(hidden_states) + intermediate_parallel = self.activation_func(intermediate_parallel) + # [s, b, h] + output, _ = self.dense_4h_to_h(intermediate_parallel) + return output + + +class GLMBlock(nn.Module): + """A single transformer layer. + + Transformer layer takes input with size [s, b, h] and returns an + output of the same size. + """ + + def __init__( + self, + config, + quant_config: Optional[QuantizationConfig] = None, + ): + super().__init__() + self.apply_residual_connection_post_layernorm = ( + config.apply_residual_connection_post_layernorm) + + self.fp32_residual_connection = config.fp32_residual_connection + + layer_norm_func = RMSNorm if config.rmsnorm else LayerNorm + # Layernorm on the input data. + self.input_layernorm = layer_norm_func(config.hidden_size, + eps=config.layernorm_epsilon) + + # Self attention. + self.self_attention = GLMAttention(config, quant_config) + self.hidden_dropout = config.hidden_dropout + + # Layernorm on the attention output + self.post_attention_layernorm = layer_norm_func( + config.hidden_size, eps=config.layernorm_epsilon) + + # MLP + self.mlp = GLMMLP(config, quant_config) + + def forward( + self, + hidden_states: torch.Tensor, + position_ids: torch.Tensor, + kv_cache: torch.Tensor, + attn_metadata: AttentionMetadata, + ) -> torch.Tensor: + # hidden_states: [num_tokens, h] + # Layer norm at the beginning of the transformer layer. + layernorm_output = self.input_layernorm(hidden_states) + # Self attention. + attention_output = self.self_attention( + hidden_states=layernorm_output, + position_ids=position_ids, + kv_cache=kv_cache, + attn_metadata=attn_metadata, + ) + + # Residual connection. + if self.apply_residual_connection_post_layernorm: + residual = layernorm_output + else: + residual = hidden_states + + layernorm_input = residual + attention_output + + # Layer norm post the self attention. + layernorm_output = self.post_attention_layernorm(layernorm_input) + + # Second residual connection. + if self.apply_residual_connection_post_layernorm: + residual = layernorm_output + else: + residual = layernorm_input + + output = self.mlp(layernorm_output) + residual + + return output + + +class GLMTransformer(nn.Module): + """Transformer class.""" + + def __init__( + self, + config, + quant_config: Optional[QuantizationConfig] = None, + ): + super().__init__() + self.post_layer_norm = config.post_layer_norm + + # Number of layers. + self.num_layers = config.num_layers + + # Transformer layers. + self.layers = nn.ModuleList( + [GLMBlock(config, quant_config) for i in range(self.num_layers)]) + + if self.post_layer_norm: + layer_norm_func = RMSNorm if config.rmsnorm else LayerNorm + # Final layer norm before output. + self.final_layernorm = layer_norm_func( + config.hidden_size, eps=config.layernorm_epsilon) + + def forward( + self, + hidden_states: torch.Tensor, + position_ids: torch.Tensor, + kv_caches: List[torch.Tensor], + attn_metadata: AttentionMetadata, + ) -> torch.Tensor: + for i in range(self.num_layers): + layer = self.layers[i] + hidden_states = layer( + hidden_states=hidden_states, + position_ids=position_ids, + kv_cache=kv_caches[i], + attn_metadata=attn_metadata, + ) + # Final layer norm. + if self.post_layer_norm: + hidden_states = self.final_layernorm(hidden_states) + + return hidden_states + + +class ChatGLMModel(nn.Module): + + def __init__( + self, + config, + quant_config: Optional[QuantizationConfig] = None, + ): + super().__init__() + + self.embedding = VocabParallelEmbedding(config.padded_vocab_size, + config.hidden_size) + + self.num_layers = config.num_layers + self.multi_query_group_num = config.multi_query_group_num + self.kv_channels = config.kv_channels + self.encoder = GLMTransformer(config, quant_config) + + self.output_layer = ParallelLMHead(config.padded_vocab_size, + config.hidden_size) + + def forward( + self, + input_ids: torch.Tensor, + position_ids: torch.Tensor, + kv_caches: List[torch.Tensor], + attn_metadata: AttentionMetadata, + ) -> torch.Tensor: + inputs_embeds = self.embedding(input_ids) + + # Run encoder. + hidden_states = self.encoder( + hidden_states=inputs_embeds, + position_ids=position_ids, + kv_caches=kv_caches, + attn_metadata=attn_metadata, + ) + return hidden_states + + +class ChatGLMForCausalLM(nn.Module): + packed_modules_mapping = { + "query_key_value": ["query_key_value"], + "dense_h_to_4h": ["dense_h_to_4h"] + } + # LoRA specific attributes + supported_lora_modules = [ + "query_key_value", + "dense", + "dense_h_to_4h", + "dense_4h_to_h", + ] + embedding_modules = {} + embedding_padding_modules = [] + + def __init__( + self, + config: ChatGLMConfig, + quant_config: Optional[QuantizationConfig] = None, + lora_config: Optional[LoRAConfig] = None, + ): + super().__init__() + self.config: ChatGLMConfig = config + self.quant_config = quant_config + self.transformer = ChatGLMModel(config, quant_config) + self.lm_head_weight = self.transformer.output_layer.weight + self.logits_processor = LogitsProcessor(config.padded_vocab_size) + self.sampler = Sampler() + + def forward( + self, + input_ids: torch.Tensor, + positions: torch.Tensor, + kv_caches: List[torch.Tensor], + attn_metadata: AttentionMetadata, + ) -> torch.Tensor: + hidden_states = self.transformer(input_ids, positions, kv_caches, + attn_metadata) + return hidden_states + + def compute_logits(self, hidden_states: torch.Tensor, + sampling_metadata: SamplingMetadata) -> torch.Tensor: + logits = self.logits_processor(self.lm_head_weight, hidden_states, + sampling_metadata) + return logits + + def sample( + self, + logits: torch.Tensor, + sampling_metadata: SamplingMetadata, + ) -> Optional[SamplerOutput]: + next_tokens = self.sampler(logits, sampling_metadata) + return next_tokens + + def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): + params_dict = dict(self.named_parameters(remove_duplicate=False)) + for name, loaded_weight in weights: + if "rotary_pos_emb.inv_freq" in name: + continue + if "word_embeddings" in name: + name = name.replace(".word_embeddings", "") + # Skip loading extra bias for GPTQ models. + if name.endswith(".bias") and name not in params_dict: + continue + param = params_dict[name] + weight_loader = getattr(param, "weight_loader", + default_weight_loader) + weight_loader(param, loaded_weight) diff --git a/vllm/model_executor/models/commandr.py b/vllm/model_executor/models/commandr.py new file mode 100644 index 0000000..17c2f12 --- /dev/null +++ b/vllm/model_executor/models/commandr.py @@ -0,0 +1,373 @@ +# coding=utf-8 +# Copyright 2024 Cohere and the HuggingFace Inc. team. All rights reserved. +# +# This code is based on EleutherAI's GPT-NeoX library and the GPT-NeoX +# and OPT implementations in this library. It has been modified from its +# original forms to accommodate minor architectural differences compared +# to GPT-NeoX and OPT used by the Meta AI team that trained the model. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# This file is based on the LLama model definition file in transformers +"""PyTorch Cohere model.""" +from typing import Iterable, List, Optional, Tuple + +import torch +import torch.utils.checkpoint +from torch import nn +from torch.nn.parameter import Parameter +from transformers import CohereConfig + +from vllm.attention import Attention, AttentionMetadata +from vllm.distributed import (get_tensor_model_parallel_rank, + get_tensor_model_parallel_world_size) +from vllm.model_executor.layers.activation import SiluAndMul +from vllm.model_executor.layers.linear import (MergedColumnParallelLinear, + QKVParallelLinear, + RowParallelLinear) +from vllm.model_executor.layers.logits_processor import LogitsProcessor +from vllm.model_executor.layers.quantization.base_config import ( + QuantizationConfig) +from vllm.model_executor.layers.rotary_embedding import get_rope +from vllm.model_executor.layers.sampler import Sampler +from vllm.model_executor.layers.vocab_parallel_embedding import ( + VocabParallelEmbedding) +from vllm.model_executor.model_loader.weight_utils import default_weight_loader +from vllm.model_executor.sampling_metadata import SamplingMetadata +from vllm.model_executor.utils import set_weight_attrs +from vllm.sequence import SamplerOutput + + +@torch.compile +def layer_norm_func(hidden_states, weight, variance_epsilon): + input_dtype = hidden_states.dtype + hidden_states = hidden_states.to(torch.float32) + mean = hidden_states.mean(-1, keepdim=True) + variance = (hidden_states - mean).pow(2).mean(-1, keepdim=True) + hidden_states = (hidden_states - mean) * torch.rsqrt(variance + + variance_epsilon) + hidden_states = weight.to(torch.float32) * hidden_states + return hidden_states.to(input_dtype) + + +class LayerNorm(nn.Module): + + def __init__(self, param_shape=None, eps=1e-5): + super().__init__() + self.weight = nn.Parameter(torch.ones(param_shape)) + self.variance_epsilon = eps + set_weight_attrs(self.weight, {"weight_loader": self.weight_loader}) + + def forward(self, hidden_states, residuals=None): + hidden_states = layer_norm_func(hidden_states, self.weight, + self.variance_epsilon) + return hidden_states, residuals + + def weight_loader(self, param: Parameter, loaded_weight: torch.Tensor): + tp_rank = get_tensor_model_parallel_rank() + shard_dim = 0 if param.dim() != 1 else None + param_data = param.data + if shard_dim is not None: + shard_size = param_data.shape[shard_dim] + start_idx = tp_rank * shard_size + loaded_weight = loaded_weight.narrow(shard_dim, start_idx, + shard_size) + assert param_data.shape == loaded_weight.shape + param_data.copy_(loaded_weight) + + +# Copied from transformers.models.llama.modeling_llama.LlamaMLP Llama->Cohere +class CohereMLP(nn.Module): + + def __init__( + self, + config, + quant_config: Optional[QuantizationConfig] = None, + ): + super().__init__() + self.config = config + self.hidden_size = config.hidden_size + self.intermediate_size = config.intermediate_size + self.gate_up_proj = MergedColumnParallelLinear( + self.hidden_size, + [self.intermediate_size] * 2, + bias=False, + quant_config=quant_config, + ) + self.down_proj = RowParallelLinear( + self.intermediate_size, + self.hidden_size, + bias=False, + quant_config=quant_config, + ) + self.act_fn = SiluAndMul() + + def forward(self, x): + gate_up, _ = self.gate_up_proj(x) + x = self.act_fn(gate_up) + x, _ = self.down_proj(x) + return x + + +class CohereAttention(nn.Module): + + def __init__( + self, + config: CohereConfig, + quant_config: Optional[QuantizationConfig] = None, + ): + super().__init__() + tp_size = get_tensor_model_parallel_world_size() + self.config = config + self.attention_dropout = config.attention_dropout + self.hidden_size = config.hidden_size + self.total_num_heads = config.num_attention_heads + self.num_heads = self.total_num_heads // tp_size + self.head_dim = self.hidden_size // self.total_num_heads + self.total_num_kv_heads = config.num_key_value_heads + if self.total_num_kv_heads >= tp_size: + # Number of KV heads is greater than TP size, so we partition + # the KV heads across multiple tensor parallel GPUs. + assert self.total_num_kv_heads % tp_size == 0 + else: + # Number of KV heads is less than TP size, so we replicate + # the KV heads across multiple tensor parallel GPUs. + assert tp_size % self.total_num_kv_heads == 0 + self.num_kv_heads = max(1, self.total_num_kv_heads // tp_size) + self.q_size = self.num_heads * self.head_dim + self.kv_size = self.num_kv_heads * self.head_dim + self.scaling = self.head_dim**-0.5 + self.max_position_embeddings = getattr( + config, "model_max_length", None) or getattr( + config, "max_position_embeddings", 8192) + self.rope_theta = config.rope_theta + self.rope_scaling = getattr(config, "rope_scaling", None) + self.use_qk_norm = getattr(config, "use_qk_norm", False) + self.qkv_proj = QKVParallelLinear( + self.hidden_size, + self.head_dim, + self.total_num_heads, + self.total_num_kv_heads, + bias=False, + quant_config=quant_config, + ) + self.o_proj = RowParallelLinear( + self.total_num_heads * self.head_dim, + self.hidden_size, + bias=False, + quant_config=quant_config, + ) + self.rotary_emb = get_rope( + self.head_dim, + rotary_dim=self.head_dim, + max_position=self.max_position_embeddings, + base=self.rope_theta, + rope_scaling=self.rope_scaling, + is_neox_style=False, + ) + self.attn = Attention( + self.num_heads, + self.head_dim, + self.scaling, + num_kv_heads=self.num_kv_heads, + ) + if self.use_qk_norm: + self.q_norm = LayerNorm(param_shape=(self.num_heads, + self.head_dim), + eps=config.layer_norm_eps) + self.k_norm = LayerNorm(param_shape=(self.num_kv_heads, + self.head_dim), + eps=config.layer_norm_eps) + + def _apply_qk_norm(self, q, k): + q = q.view(*q.shape[:-1], -1, self.head_dim) + k = k.view(*k.shape[:-1], -1, self.head_dim) + q, _ = self.q_norm(q) + k, _ = self.k_norm(k) + q = q.view(*q.shape[:-2], -1) + k = k.view(*k.shape[:-2], -1) + return q, k + + def forward( + self, + positions: torch.Tensor, + hidden_states: torch.Tensor, + kv_cache: torch.Tensor, + attn_metadata: AttentionMetadata, + ) -> torch.Tensor: + qkv, _ = self.qkv_proj(hidden_states) + q, k, v = qkv.split([self.q_size, self.kv_size, self.kv_size], dim=-1) + if self.use_qk_norm: + q, k = self._apply_qk_norm(q, k) + q, k = self.rotary_emb(positions, q, k) + attn_output = self.attn(q, k, v, kv_cache, attn_metadata) + output, _ = self.o_proj(attn_output) + return output + + +class CohereDecoderLayer(nn.Module): + + def __init__(self, + config: CohereConfig, + quant_config: Optional[QuantizationConfig] = None): + super().__init__() + self.hidden_size = config.hidden_size + + self.self_attn = CohereAttention(config, quant_config=quant_config) + + self.mlp = CohereMLP(config, quant_config=quant_config) + self.input_layernorm = LayerNorm(param_shape=(config.hidden_size), + eps=config.layer_norm_eps) + + def forward( + self, + positions: torch.Tensor, + hidden_states: torch.Tensor, + kv_cache: torch.Tensor, + attn_metadata: AttentionMetadata, + residual: Optional[torch.Tensor], + ) -> Tuple[torch.Tensor, torch.Tensor]: + # Self Attention + residual = hidden_states + hidden_states, residual = self.input_layernorm(hidden_states, residual) + hidden_states_attention = self.self_attn( + positions=positions, + hidden_states=hidden_states, + kv_cache=kv_cache, + attn_metadata=attn_metadata, + ) + hidden_states_mlp = self.mlp(hidden_states) + # Add everything together + hidden_states = residual + hidden_states_attention + hidden_states_mlp + + return hidden_states, residual + + +class CohereModel(nn.Module): + + def __init__( + self, + config: CohereConfig, + quant_config: Optional[QuantizationConfig] = None, + ): + super().__init__() + self.config = config + self.vocab_size = config.vocab_size + self.embed_tokens = VocabParallelEmbedding(config.vocab_size, + config.hidden_size) + self.layers = nn.ModuleList([ + CohereDecoderLayer(config, quant_config=quant_config) + for _ in range(config.num_hidden_layers) + ]) + self.norm = LayerNorm(param_shape=(config.hidden_size), + eps=config.layer_norm_eps) + + def forward( + self, + input_ids: torch.Tensor, + positions: torch.Tensor, + kv_caches: List[torch.Tensor], + attn_metadata: AttentionMetadata, + ) -> torch.Tensor: + hidden_states = self.embed_tokens(input_ids) + residual = None + for i in range(len(self.layers)): + layer = self.layers[i] + hidden_states, residual = layer( + positions, + hidden_states, + kv_caches[i], + attn_metadata, + residual, + ) + hidden_states, _ = self.norm(hidden_states, residual) + return hidden_states + + +class CohereForCausalLM(nn.Module): + + def __init__( + self, + config: CohereConfig, + quant_config: Optional[QuantizationConfig] = None, + ) -> None: + super().__init__() + self.config = config + self.quant_config = quant_config + self.logits_processor = LogitsProcessor(config.vocab_size, + scale=config.logit_scale) + self.model = CohereModel(config, quant_config) + self.sampler = Sampler() + + @torch.no_grad() + def forward( + self, + input_ids: torch.Tensor, + positions: torch.Tensor, + kv_caches: List[torch.Tensor], + attn_metadata: AttentionMetadata, + ) -> torch.Tensor: + hidden_states = self.model(input_ids, positions, kv_caches, + attn_metadata) + return hidden_states + + def compute_logits(self, hidden_states: torch.Tensor, + sampling_metadata: SamplingMetadata) -> torch.Tensor: + logits = self.logits_processor(self.model.embed_tokens.weight, + hidden_states, sampling_metadata) + return logits + + def sample( + self, + logits: torch.Tensor, + sampling_metadata: SamplingMetadata, + ) -> Optional[SamplerOutput]: + next_tokens = self.sampler(logits, sampling_metadata) + return next_tokens + + def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): + stacked_params_mapping = [ + # (param_name, shard_name, shard_id) + ("qkv_proj", "q_proj", "q"), + ("qkv_proj", "k_proj", "k"), + ("qkv_proj", "v_proj", "v"), + ("gate_up_proj", "gate_proj", 0), + ("gate_up_proj", "up_proj", 1), + ] + params_dict = dict(self.named_parameters()) + loaded_params = set() + for name, loaded_weight in weights: + for param_name, shard_name, shard_id in stacked_params_mapping: + if shard_name not in name: + continue + name = name.replace(shard_name, param_name) + # Skip loading extra bias for GPTQ models. + if name.endswith(".bias") and name not in params_dict: + continue + param = params_dict[name] + weight_loader = param.weight_loader + weight_loader(param, loaded_weight, shard_id) + break + else: + # lm_head is not used in vllm as it is tied with embed_token. + # To prevent errors, skip loading lm_head.weight. + if "lm_head.weight" in name: + continue + # Skip loading extra bias for GPTQ models. + if name.endswith(".bias") and name not in params_dict: + continue + param = params_dict[name] + weight_loader = getattr(param, "weight_loader", + default_weight_loader) + weight_loader(param, loaded_weight) + loaded_params.add(name) diff --git a/vllm/model_executor/models/dbrx.py b/vllm/model_executor/models/dbrx.py new file mode 100644 index 0000000..a4a0ae5 --- /dev/null +++ b/vllm/model_executor/models/dbrx.py @@ -0,0 +1,413 @@ +# coding=utf-8 +from typing import Iterable, List, Optional, Tuple + +import torch +import torch.nn as nn + +from vllm.attention import Attention, AttentionMetadata +from vllm.distributed import (get_tensor_model_parallel_rank, + get_tensor_model_parallel_world_size, + tensor_model_parallel_all_reduce) +from vllm.model_executor.layers.fused_moe import fused_moe +from vllm.model_executor.layers.linear import (QKVParallelLinear, + ReplicatedLinear, + RowParallelLinear) +from vllm.model_executor.layers.logits_processor import LogitsProcessor +from vllm.model_executor.layers.quantization.base_config import ( + QuantizationConfig) +from vllm.model_executor.layers.rotary_embedding import get_rope +from vllm.model_executor.layers.sampler import Sampler +from vllm.model_executor.layers.vocab_parallel_embedding import ( + DEFAULT_VOCAB_PADDING_SIZE, ParallelLMHead, VocabParallelEmbedding) +from vllm.model_executor.model_loader.weight_utils import default_weight_loader +from vllm.model_executor.sampling_metadata import SamplingMetadata +from vllm.model_executor.utils import set_weight_attrs +from vllm.sequence import SamplerOutput +from vllm.transformers_utils.configs.dbrx import DbrxConfig + + +class DbrxRouter(nn.Module): + """A Router implementation for DBRX that returns logits for each expert + per token. + """ + + def __init__( + self, + config: DbrxConfig, + params_dtype: Optional[torch.dtype] = None, + ): + super().__init__() + self.tp_size = get_tensor_model_parallel_world_size() + self.num_total_experts = config.ffn_config.moe_num_experts + self.d_model = config.d_model + self.layer = ReplicatedLinear( + self.d_model, + self.num_total_experts, + bias=False, + params_dtype=params_dtype, + quant_config=None, + ) + + def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: + router_logits, _ = self.layer(hidden_states) + return router_logits + + +class DbrxExperts(nn.Module): + """A tensor-parallel MoE implementation for DBRX. + + Each expert's weights are sharded across all ranks and a fused MoE + kernel is used for the forward pass, and finally we reduce the outputs + across ranks. + """ + + def __init__( + self, + config: DbrxConfig, + quant_config: Optional[QuantizationConfig] = None, + params_dtype: Optional[torch.dtype] = None, + ): + super().__init__() + self.tp_size = get_tensor_model_parallel_world_size() + self.num_total_experts = config.ffn_config.moe_num_experts + self.top_k = config.ffn_config.moe_top_k + self.d_model = config.d_model + self.intermediate_size = (config.ffn_config.ffn_hidden_size // + self.tp_size) + + if params_dtype is None: + params_dtype = torch.get_default_dtype() + self.params_dtype = params_dtype + + self.router = DbrxRouter(config, self.params_dtype) + self.ws = nn.Parameter( + torch.empty( + self.num_total_experts, + 2 * self.intermediate_size, + self.d_model, + device="cuda", + dtype=self.params_dtype, + )) + self.w2s = nn.Parameter( + torch.empty( + self.num_total_experts, + self.d_model, + self.intermediate_size, + device="cuda", + dtype=self.params_dtype, + )) + + set_weight_attrs( + self.ws, + { + "weight_loader": self.weight_loader, + }, + ) + set_weight_attrs( + self.w2s, + { + "weight_loader": self.weight_loader, + }, + ) + + def weight_loader(self, param: nn.Parameter, loaded_weight: torch.Tensor, + weight_name: str): + tp_rank = get_tensor_model_parallel_rank() + param_data = param.data + shard_size = self.intermediate_size + shard = slice(tp_rank * shard_size, (tp_rank + 1) * shard_size) + # DBRX uses GLU for each experts. + # GLU has 3 linear layers: w1, v1 and w2. + if weight_name.endswith("w1"): + loaded_weight = torch.reshape( + loaded_weight, + [-1, self.intermediate_size * self.tp_size, self.d_model], + ) + param_data[:, 0:shard_size, :] = loaded_weight[:, shard, :] + if weight_name.endswith("v1"): + loaded_weight = torch.reshape( + loaded_weight, + [-1, self.intermediate_size * self.tp_size, self.d_model], + ) + param_data[:, + shard_size:2 * shard_size, :] = loaded_weight[:, + shard, :] + if weight_name.endswith("w2"): + loaded_weight = torch.reshape( + loaded_weight, + [-1, self.intermediate_size * self.tp_size, self.d_model], + ).transpose(1, 2) + param_data[:] = loaded_weight[:, :, shard] + + def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: + num_tokens, hidden_size = hidden_states.shape + hidden_states = hidden_states.view(-1, self.d_model) + # router_logits: (num_tokens, n_experts) + router_logits = self.router(hidden_states) + final_hidden_states = fused_moe( + hidden_states, + self.ws, + self.w2s, + router_logits, + self.top_k, + renormalize=True, + inplace=True, + ) + + if self.tp_size > 1: + final_hidden_states = tensor_model_parallel_all_reduce( + final_hidden_states) + + return final_hidden_states.view(num_tokens, hidden_size) + + +class DbrxAttention(nn.Module): + + def __init__( + self, + config: DbrxConfig, + quant_config: Optional[QuantizationConfig] = None, + ): + super().__init__() + self.d_model = config.d_model + self.total_num_heads = config.n_heads + self.head_dim = self.d_model // self.total_num_heads + self.total_num_kv_heads = config.attn_config.kv_n_heads + self.clip_qkv = config.attn_config.clip_qkv + self.rope_theta = config.attn_config.rope_theta + self.max_position = config.max_seq_len + + # pylint: disable=invalid-name + self.Wqkv = QKVParallelLinear( + self.d_model, + self.head_dim, + self.total_num_heads, + self.total_num_kv_heads, + bias=False, + quant_config=quant_config, + ) + self.out_proj = RowParallelLinear( + self.d_model, + self.d_model, + bias=False, + quant_config=quant_config, + ) + self.rotary_emb = get_rope( + self.head_dim, + rotary_dim=self.head_dim, + max_position=self.max_position, + base=int(self.rope_theta), + is_neox_style=True, + ) + + tp_world_size = get_tensor_model_parallel_world_size() + self.tp_size = tp_world_size + assert self.total_num_heads % tp_world_size == 0 + self.num_heads = self.total_num_heads // tp_world_size + if self.total_num_kv_heads >= tp_world_size: + # Number of KV heads is greater than TP size, so we partition + # the KV heads across multiple tensor parallel GPUs. + assert self.total_num_kv_heads % tp_world_size == 0 + else: + # Number of KV heads is less than TP size, so we replicate + # the KV heads across multiple tensor parallel GPUs. + assert tp_world_size % self.total_num_kv_heads == 0 + self.num_kv_heads = max(1, self.total_num_kv_heads // tp_world_size) + self.q_size = self.num_heads * self.head_dim + self.kv_size = self.num_kv_heads * self.head_dim + self.scaling = self.head_dim**-0.5 + self.attn = Attention( + self.num_heads, + self.head_dim, + self.scaling, + num_kv_heads=self.num_kv_heads, + ) + + def forward( + self, + position_ids: torch.Tensor, + hidden_states: torch.Tensor, + kv_cache: torch.Tensor, + attn_metadata: AttentionMetadata, + ) -> torch.Tensor: + qkv, _ = self.Wqkv(hidden_states) + if self.clip_qkv is not None: + qkv.clamp_(min=-self.clip_qkv, max=self.clip_qkv) + q, k, v = qkv.split([self.q_size, self.kv_size, self.kv_size], dim=-1) + q, k = self.rotary_emb(position_ids, q, k) + attn_output = self.attn(q, k, v, kv_cache, attn_metadata) + hidden_states, _ = self.out_proj(attn_output) + return hidden_states + + +class DbrxFusedNormAttention(nn.Module): + + def __init__( + self, + config: DbrxConfig, + quant_config: Optional[QuantizationConfig] = None, + ): + super().__init__() + self.d_model = config.d_model + self.attn = DbrxAttention(config, quant_config) + self.norm_1 = nn.LayerNorm(self.d_model) + self.norm_2 = nn.LayerNorm(self.d_model) + + def forward( + self, + position_ids: torch.Tensor, + hidden_states: torch.Tensor, + kv_cache: torch.Tensor, + attn_metadata: AttentionMetadata, + ) -> torch.Tensor: + residual = hidden_states + hidden_states = self.norm_1(hidden_states) + x = self.attn( + position_ids=position_ids, + hidden_states=hidden_states, + kv_cache=kv_cache, + attn_metadata=attn_metadata, + ) + hidden_states = residual + x + residual = hidden_states + hidden_states = self.norm_2(hidden_states) + return hidden_states, residual + + +class DbrxBlock(nn.Module): + + def __init__( + self, + config: DbrxConfig, + quant_config: Optional[QuantizationConfig] = None, + ): + super().__init__() + self.norm_attn_norm = DbrxFusedNormAttention(config, quant_config) + self.ffn = DbrxExperts(config, quant_config) + + def forward( + self, + position_ids: torch.Tensor, + hidden_states: torch.Tensor, + kv_cache: torch.Tensor, + attn_metadata: AttentionMetadata, + ) -> torch.Tensor: + hidden_states, residual = self.norm_attn_norm( + position_ids=position_ids, + hidden_states=hidden_states, + kv_cache=kv_cache, + attn_metadata=attn_metadata, + ) + hidden_states = self.ffn(hidden_states) + hidden_states = hidden_states + residual + return hidden_states + + +class DbrxModel(nn.Module): + + def __init__( + self, + config: DbrxConfig, + quant_config: Optional[QuantizationConfig] = None, + ): + super().__init__() + self.wte = VocabParallelEmbedding( + config.vocab_size, + config.d_model, + ) + self.blocks = nn.ModuleList( + [DbrxBlock(config, quant_config) for _ in range(config.n_layers)]) + self.norm_f = nn.LayerNorm(config.d_model, eps=1e-5) + for module in self.modules(): + if hasattr(module, "bias") and isinstance(module.bias, + nn.Parameter): + # Remove the bias term in Linear and LayerNorm. + module.register_parameter("bias", None) + + def forward( + self, + input_ids: torch.Tensor, + position_ids: torch.Tensor, + kv_caches: List[torch.Tensor], + attn_metadata: AttentionMetadata, + ) -> torch.Tensor: + hidden_states = self.wte(input_ids) + for i in range(len(self.blocks)): + block = self.blocks[i] + hidden_states = block( + position_ids, + hidden_states, + kv_caches[i], + attn_metadata, + ) + hidden_states = self.norm_f(hidden_states) + return hidden_states + + +class DbrxForCausalLM(nn.Module): + + def __init__( + self, + config: DbrxConfig, + quant_config: Optional[QuantizationConfig] = None, + ): + super().__init__() + self.config = config + self.quant_config = quant_config + self.unpadded_vocab_size = config.vocab_size + self.transformer = DbrxModel(config, quant_config) + self.lm_head = ParallelLMHead( + config.vocab_size, + config.d_model, + org_num_embeddings=config.vocab_size, + padding_size=DEFAULT_VOCAB_PADDING_SIZE, + ) + self.logits_processor = LogitsProcessor(self.unpadded_vocab_size, + config.vocab_size) + self.sampler = Sampler() + + def forward( + self, + input_ids: torch.Tensor, + positions: torch.Tensor, + kv_caches: List[torch.Tensor], + attn_metadata: AttentionMetadata, + ) -> torch.Tensor: + hidden_states = self.transformer(input_ids, positions, kv_caches, + attn_metadata) + return hidden_states + + def compute_logits(self, hidden_states: torch.Tensor, + sampling_metadata: SamplingMetadata) -> torch.Tensor: + logits = self.logits_processor(self.lm_head.weight, hidden_states, + sampling_metadata) + return logits + + def sample( + self, + logits: Optional[torch.Tensor], + sampling_metadata: SamplingMetadata, + ) -> Optional[SamplerOutput]: + next_tokens = self.sampler(logits, sampling_metadata) + return next_tokens + + def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): + expert_params_mapping = [( + "ws" if weight_name in ["w1", "v1"] else "w2s", + f"experts.mlp.{weight_name}", + ) for weight_name in ["w1", "v1", "w2"]] + params_dict = dict(self.named_parameters(remove_duplicate=False)) + for name, loaded_weight in weights: + for param_name, weight_name in expert_params_mapping: + if weight_name not in name: + continue + name = name.replace(weight_name, param_name) + param = params_dict[name] + weight_loader = param.weight_loader + weight_loader(param, loaded_weight, weight_name) + break + else: + param = params_dict[name] + weight_loader = getattr(param, "weight_loader", + default_weight_loader) + weight_loader(param, loaded_weight) diff --git a/vllm/model_executor/models/decilm.py b/vllm/model_executor/models/decilm.py new file mode 100644 index 0000000..be9a6b6 --- /dev/null +++ b/vllm/model_executor/models/decilm.py @@ -0,0 +1,122 @@ +# coding=utf-8 +# Adapted from +# https://github.com/huggingface/transformers/blob/v4.28.0/src/transformers/models/llama/modeling_llama.py +# Copyright 2023 DeciAI Research Team. All rights reserved. +# Copyright 2023 The vLLM team. +# Copyright 2022 EleutherAI and the HuggingFace Inc. team. All rights reserved. +# +# This code is based on MistralAI GPT-NeoX library and the GPT-NeoX +# and OPT implementations in this library. It has been modified from its +# original forms to accommodate minor architectural differences compared +# to GPT-NeoX and OPT used by the Meta AI team that trained the model. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Inference-only DeciLM model compatible with HuggingFace weights.""" + +from typing import Iterable, Optional, Tuple + +import torch +from transformers import PretrainedConfig + +from vllm.config import LoRAConfig +from vllm.model_executor.layers.quantization.base_config import ( + QuantizationConfig) +from vllm.model_executor.model_loader.weight_utils import default_weight_loader +from vllm.model_executor.models.llama import LlamaForCausalLM + + +class DeciLMForCausalLM(LlamaForCausalLM): + """ + Implementation for https://huggingface.co/Deci/DeciLM-7b-instruct. + Based on the llama executor. + + The main difference is that DeciLM uses Variable Grouped Query Attention. + The constant number of GQA heads in the decoder is overridden with a value + per layer. + + Usually, in the HuggingFace implementation, instead of + "config.num_key_value_heads", we use + "config.num_key_value_heads_per_layer[i]" which varies. + + Currently, PagedAttention does not work well with variable GQA, so we + normalize the weights upon loading, and use uniform GQA with the max value + instead. + """ + + def __init__( + self, + config: Optional[PretrainedConfig] = None, + quant_config: Optional[QuantizationConfig] = None, + lora_config: Optional[LoRAConfig] = None, + ) -> None: + config.num_key_value_heads = max(config.num_key_value_heads_per_layer) + delattr(config, "num_key_value_heads_per_layer") + super().__init__(config=config, + quant_config=quant_config, + lora_config=lora_config) + + def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): + stacked_params_mapping = [ + # (param_name, shard_name, shard_id) + ("qkv_proj", "q_proj", "q"), + ("qkv_proj", "k_proj", "k"), + ("qkv_proj", "v_proj", "v"), + ("gate_up_proj", "gate_proj", 0), + ("gate_up_proj", "up_proj", 1), + ] + params_dict = dict(self.named_parameters()) + for name, loaded_weight in weights: + if "rotary_emb.inv_freq" in name: + continue + + if "k_proj" in name or "v_proj" in name: + loaded_weight = self._degroup_weight(loaded_weight) + + for (param_name, weight_name, shard_id) in stacked_params_mapping: + if weight_name not in name: + continue + name = name.replace(weight_name, param_name) + # Skip loading extra bias for GPTQ models. + if name.endswith(".bias") and name not in params_dict: + continue + param = params_dict[name] + weight_loader = param.weight_loader + weight_loader(param, loaded_weight, shard_id) + break + else: + # Skip loading extra bias for GPTQ models. + if name.endswith(".bias") and name not in params_dict: + continue + param = params_dict[name] + weight_loader = getattr(param, "weight_loader", + default_weight_loader) + weight_loader(param, loaded_weight) + + def _degroup_weight(self, loaded_weight: torch.Tensor) -> torch.Tensor: + hidden_size = self.config.hidden_size + head_size = self.config.hidden_size // self.config.num_attention_heads + target_num_kv_heads = self.config.num_key_value_heads + num_kv_heads = loaded_weight.shape[0] // head_size + n_repeats = target_num_kv_heads / num_kv_heads + assert n_repeats == int(n_repeats) + + n_repeats = int(n_repeats) + loaded_weight = loaded_weight.view(num_kv_heads, head_size, + hidden_size) + loaded_weight = torch.repeat_interleave(loaded_weight, + repeats=n_repeats, + dim=0) + loaded_weight = loaded_weight.reshape(target_num_kv_heads * head_size, + hidden_size) + + return loaded_weight diff --git a/vllm/model_executor/models/deepseek.py b/vllm/model_executor/models/deepseek.py new file mode 100644 index 0000000..e5f7ba0 --- /dev/null +++ b/vllm/model_executor/models/deepseek.py @@ -0,0 +1,438 @@ +# coding=utf-8 +# Adapted from +# https://github.com/huggingface/transformers/blob/v4.28.0/src/transformers/models/llama/modeling_llama.py +# Copyright 2023 The vLLM team. +# Copyright 2023 DeepSeek-AI and the HuggingFace Inc. team. All rights reserved. +# +# This code is based on EleutherAI's GPT-NeoX library and the GPT-NeoX +# and OPT implementations in this library. It has been modified from its +# original forms to accommodate minor architectural differences compared +# to GPT-NeoX and OPT used by the Meta AI team that trained the model. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Inference-only Deepseek model.""" +from typing import Any, Dict, Iterable, List, Optional, Tuple + +import torch +from torch import nn +from transformers import PretrainedConfig + +from vllm.attention import Attention, AttentionMetadata +from vllm.distributed import (get_tensor_model_parallel_rank, + get_tensor_model_parallel_world_size, + tensor_model_parallel_all_reduce) +from vllm.model_executor.layers.activation import SiluAndMul +from vllm.model_executor.layers.fused_moe import fused_moe +from vllm.model_executor.layers.layernorm import RMSNorm +from vllm.model_executor.layers.linear import (MergedColumnParallelLinear, + QKVParallelLinear, + ReplicatedLinear, + RowParallelLinear) +from vllm.model_executor.layers.logits_processor import LogitsProcessor +from vllm.model_executor.layers.quantization.base_config import ( + QuantizationConfig) +from vllm.model_executor.layers.rotary_embedding import get_rope +from vllm.model_executor.layers.sampler import Sampler +from vllm.model_executor.layers.vocab_parallel_embedding import ( + ParallelLMHead, VocabParallelEmbedding) +from vllm.model_executor.model_loader.weight_utils import default_weight_loader +from vllm.model_executor.sampling_metadata import SamplingMetadata +from vllm.sequence import SamplerOutput + + +class DeepseekMLP(nn.Module): + + def __init__( + self, + hidden_size: int, + intermediate_size: int, + hidden_act: str, + quant_config: Optional[QuantizationConfig] = None, + reduce_results: bool = True, + ) -> None: + super().__init__() + self.gate_up_proj = MergedColumnParallelLinear( + hidden_size, [intermediate_size] * 2, + bias=False, + quant_config=quant_config) + self.down_proj = RowParallelLinear(intermediate_size, + hidden_size, + bias=False, + quant_config=quant_config, + reduce_results=reduce_results) + if hidden_act != "silu": + raise ValueError(f"Unsupported activation: {hidden_act}. " + "Only silu is supported for now.") + self.act_fn = SiluAndMul() + + def forward(self, x): + gate_up, _ = self.gate_up_proj(x) + x = self.act_fn(gate_up) + x, _ = self.down_proj(x) + return x + + +class DeepseekMoE(nn.Module): + + def __init__( + self, + config: PretrainedConfig, + quant_config: Optional[QuantizationConfig] = None, + ): + super().__init__() + self.config = config + self.rank = get_tensor_model_parallel_rank() + self.tp_size = get_tensor_model_parallel_world_size() + self.n_routed_experts = config.n_routed_experts + self.top_k = config.num_experts_per_tok + if self.tp_size > self.n_routed_experts: + raise ValueError( + f"Tensor parallel size {self.tp_size} is greater than " + f"the number of experts {self.n_routed_experts}.") + + self.experts = nn.ModuleList([ + DeepseekMLP(hidden_size=config.hidden_size, + intermediate_size=config.moe_intermediate_size, + hidden_act=config.hidden_act, + quant_config=quant_config, + reduce_results=False) + for idx in range(self.n_routed_experts) + ]) + self.pack_params() + + self.gate = ReplicatedLinear(config.hidden_size, + self.n_routed_experts, + bias=False, + quant_config=None) + + if config.n_shared_experts is not None: + intermediate_size = (config.moe_intermediate_size * + config.n_shared_experts) + self.shared_experts = DeepseekMLP( + hidden_size=config.hidden_size, + intermediate_size=intermediate_size, + hidden_act=config.hidden_act, + quant_config=quant_config, + reduce_results=False, + ) + + def pack_params(self): + w1 = [] + w2 = [] + for expert in self.experts: + w1.append(expert.gate_up_proj.weight) + w2.append(expert.down_proj.weight) + self.w1 = torch._utils._flatten_dense_tensors(w1) + w1s = torch._utils._unflatten_dense_tensors(self.w1, w1) + for data, param in zip(w1s, w1): + param.data = data + self.w1 = self.w1.view(len(w1), *w1s[0].shape) + + self.w2 = torch._utils._flatten_dense_tensors(w2) + w2s = torch._utils._unflatten_dense_tensors(self.w2, w2) + for data, param in zip(w2s, w2): + param.data = data + + self.w2 = self.w2.view(len(w2), *w2s[0].shape) + + def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: + num_tokens, hidden_dim = hidden_states.shape + hidden_states = hidden_states.view(-1, hidden_dim) + if self.config.n_shared_experts is not None: + shared_output = self.shared_experts(hidden_states) + # router_logits: (num_tokens, n_experts) + router_logits, _ = self.gate(hidden_states) + final_hidden_states = fused_moe(hidden_states, + self.w1, + self.w2, + router_logits, + self.top_k, + renormalize=self.config.norm_topk_prob, + inplace=True) + + if self.config.n_shared_experts is not None: + final_hidden_states = final_hidden_states + shared_output + final_hidden_states = tensor_model_parallel_all_reduce( + final_hidden_states) + + return final_hidden_states.view(num_tokens, hidden_dim) + + +class DeepseekAttention(nn.Module): + + def __init__( + self, + hidden_size: int, + num_heads: int, + num_kv_heads: int, + rope_theta: float = 10000, + rope_scaling: Optional[Dict[str, Any]] = None, + max_position_embeddings: int = 8192, + quant_config: Optional[QuantizationConfig] = None, + ) -> None: + super().__init__() + self.hidden_size = hidden_size + tp_size = get_tensor_model_parallel_world_size() + self.total_num_heads = num_heads + assert self.total_num_heads % tp_size == 0 + self.num_heads = self.total_num_heads // tp_size + self.total_num_kv_heads = num_kv_heads + if self.total_num_kv_heads >= tp_size: + # Number of KV heads is greater than TP size, so we partition + # the KV heads across multiple tensor parallel GPUs. + assert self.total_num_kv_heads % tp_size == 0 + else: + # Number of KV heads is less than TP size, so we replicate + # the KV heads across multiple tensor parallel GPUs. + assert tp_size % self.total_num_kv_heads == 0 + self.num_kv_heads = max(1, self.total_num_kv_heads // tp_size) + self.head_dim = hidden_size // self.total_num_heads + self.q_size = self.num_heads * self.head_dim + self.kv_size = self.num_kv_heads * self.head_dim + self.scaling = self.head_dim**-0.5 + self.rope_theta = rope_theta + self.max_position_embeddings = max_position_embeddings + + self.qkv_proj = QKVParallelLinear( + hidden_size, + self.head_dim, + self.total_num_heads, + self.total_num_kv_heads, + bias=False, + quant_config=quant_config, + ) + + self.o_proj = RowParallelLinear( + self.total_num_heads * self.head_dim, + hidden_size, + bias=False, + quant_config=quant_config, + ) + + self.rotary_emb = get_rope( + self.head_dim, + rotary_dim=self.head_dim, + max_position=max_position_embeddings, + base=rope_theta, + rope_scaling=rope_scaling, + ) + self.attn = Attention(self.num_heads, + self.head_dim, + self.scaling, + num_kv_heads=self.num_kv_heads) + + def forward( + self, + positions: torch.Tensor, + hidden_states: torch.Tensor, + kv_cache: torch.Tensor, + attn_metadata: AttentionMetadata, + ) -> torch.Tensor: + qkv, _ = self.qkv_proj(hidden_states) + q, k, v = qkv.split([self.q_size, self.kv_size, self.kv_size], dim=-1) + q, k = self.rotary_emb(positions, q, k) + attn_output = self.attn(q, k, v, kv_cache, attn_metadata) + output, _ = self.o_proj(attn_output) + return output + + +class DeepseekDecoderLayer(nn.Module): + + def __init__( + self, + config: PretrainedConfig, + layer_idx: int, + quant_config: Optional[QuantizationConfig] = None, + ) -> None: + super().__init__() + self.hidden_size = config.hidden_size + rope_theta = getattr(config, "rope_theta", 10000) + rope_scaling = getattr(config, "rope_scaling", None) + max_position_embeddings = getattr(config, "max_position_embeddings", + 8192) + self.self_attn = DeepseekAttention( + hidden_size=self.hidden_size, + num_heads=config.num_attention_heads, + num_kv_heads=config.num_key_value_heads, + rope_theta=rope_theta, + rope_scaling=rope_scaling, + max_position_embeddings=max_position_embeddings, + quant_config=quant_config, + ) + if (config.n_routed_experts is not None + and layer_idx >= config.first_k_dense_replace + and layer_idx % config.moe_layer_freq == 0): + self.mlp = DeepseekMoE(config=config, quant_config=quant_config) + else: + self.mlp = DeepseekMLP( + hidden_size=config.hidden_size, + intermediate_size=config.intermediate_size, + hidden_act=config.hidden_act, + quant_config=quant_config, + ) + self.input_layernorm = RMSNorm(config.hidden_size, + eps=config.rms_norm_eps) + self.post_attention_layernorm = RMSNorm(config.hidden_size, + eps=config.rms_norm_eps) + + def forward( + self, + positions: torch.Tensor, + hidden_states: torch.Tensor, + kv_cache: torch.Tensor, + attn_metadata: AttentionMetadata, + residual: Optional[torch.Tensor], + ) -> torch.Tensor: + # Self Attention + if residual is None: + residual = hidden_states + hidden_states = self.input_layernorm(hidden_states) + else: + hidden_states, residual = self.input_layernorm( + hidden_states, residual) + hidden_states = self.self_attn( + positions=positions, + hidden_states=hidden_states, + kv_cache=kv_cache, + attn_metadata=attn_metadata, + ) + + # Fully Connected + hidden_states, residual = self.post_attention_layernorm( + hidden_states, residual) + hidden_states = self.mlp(hidden_states) + return hidden_states, residual + + +class DeepseekModel(nn.Module): + + fall_back_to_pt_during_load = False + + def __init__( + self, + config: PretrainedConfig, + quant_config: Optional[QuantizationConfig] = None, + ) -> None: + super().__init__() + self.padding_idx = config.pad_token_id + self.vocab_size = config.vocab_size + + self.embed_tokens = VocabParallelEmbedding( + config.vocab_size, + config.hidden_size, + ) + self.layers = nn.ModuleList([ + DeepseekDecoderLayer(config, layer_idx, quant_config=quant_config) + for layer_idx in range(config.num_hidden_layers) + ]) + self.norm = RMSNorm(config.hidden_size, eps=config.rms_norm_eps) + + def forward( + self, + input_ids: torch.Tensor, + positions: torch.Tensor, + kv_caches: List[torch.Tensor], + attn_metadata: AttentionMetadata, + ) -> torch.Tensor: + hidden_states = self.embed_tokens(input_ids) + residual = None + for i in range(len(self.layers)): + layer = self.layers[i] + hidden_states, residual = layer(positions, hidden_states, + kv_caches[i], attn_metadata, + residual) + hidden_states, _ = self.norm(hidden_states, residual) + return hidden_states + + +class DeepseekForCausalLM(nn.Module): + + def __init__( + self, + config: PretrainedConfig, + quant_config: Optional[QuantizationConfig] = None, + ) -> None: + super().__init__() + self.config = config + self.quant_config = quant_config + self.model = DeepseekModel(config, quant_config) + self.lm_head = ParallelLMHead(config.vocab_size, config.hidden_size) + self.logits_processor = LogitsProcessor(config.vocab_size) + self.sampler = Sampler() + + def forward( + self, + input_ids: torch.Tensor, + positions: torch.Tensor, + kv_caches: List[torch.Tensor], + attn_metadata: AttentionMetadata, + ) -> torch.Tensor: + hidden_states = self.model(input_ids, positions, kv_caches, + attn_metadata) + return hidden_states + + def compute_logits(self, hidden_states: torch.Tensor, + sampling_metadata: SamplingMetadata) -> torch.Tensor: + logits = self.logits_processor(self.lm_head.weight, hidden_states, + sampling_metadata) + return logits + + def sample( + self, + logits: Optional[torch.Tensor], + sampling_metadata: SamplingMetadata, + ) -> Optional[SamplerOutput]: + next_tokens = self.sampler(logits, sampling_metadata) + return next_tokens + + def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): + stacked_params_mapping = [ + # (param_name, shard_name, shard_id) + ("qkv_proj", "q_proj", "q"), + ("qkv_proj", "k_proj", "k"), + ("qkv_proj", "v_proj", "v"), + ("gate_up_proj", "gate_proj", 0), + ("gate_up_proj", "up_proj", 1), + ] + + params_dict = dict(self.named_parameters()) + for name, loaded_weight in weights: + if "rotary_emb.inv_freq" in name: + continue + for (param_name, weight_name, shard_id) in stacked_params_mapping: + if weight_name not in name: + continue + name = name.replace(weight_name, param_name) + # Skip loading extra bias for GPTQ models. + if name.endswith(".bias") and name not in params_dict: + continue + # Skip experts that are not assigned to this worker. + if (("mlp.experts." in name or "mlp.shared_experts." in name) + and name not in params_dict): + continue + param = params_dict[name] + weight_loader = param.weight_loader + weight_loader(param, loaded_weight, shard_id) + break + else: + # Skip loading extra bias for GPTQ models. + if name.endswith(".bias") and name not in params_dict: + continue + # Skip experts that are not assigned to this worker. + if (("mlp.experts." in name or "mlp.shared_experts." in name) + and name not in params_dict): + continue + param = params_dict[name] + weight_loader = getattr(param, "weight_loader", + default_weight_loader) + weight_loader(param, loaded_weight) diff --git a/vllm/model_executor/models/falcon.py b/vllm/model_executor/models/falcon.py new file mode 100644 index 0000000..08dd699 --- /dev/null +++ b/vllm/model_executor/models/falcon.py @@ -0,0 +1,444 @@ +# coding=utf-8 +# Adapted from +# https://github.com/huggingface/transformers/blob/a5cc30d72ae2dc19af534e4b35c986cc28db1275/src/transformers/models/falcon/modeling_falcon.py +# Copyright 2023 The vLLM team. +# Copyright 2023 the Falcon authors and HuggingFace Inc. team. All rights +# reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""PyTorch Falcon model.""" + +import math +from typing import Iterable, List, Optional, Tuple, Union + +import torch +from torch import nn +from torch.nn import LayerNorm +from transformers import FalconConfig as HF_FalconConfig + +from vllm.attention import Attention, AttentionMetadata +from vllm.distributed import (get_tensor_model_parallel_rank, + get_tensor_model_parallel_world_size, + tensor_model_parallel_all_reduce) +from vllm.model_executor.layers.activation import get_act_fn +from vllm.model_executor.layers.linear import (ColumnParallelLinear, + QKVParallelLinear, + RowParallelLinear) +from vllm.model_executor.layers.logits_processor import LogitsProcessor +from vllm.model_executor.layers.quantization.base_config import ( + QuantizationConfig) +from vllm.model_executor.layers.rotary_embedding import get_rope +from vllm.model_executor.layers.sampler import Sampler +from vllm.model_executor.layers.vocab_parallel_embedding import ( + VocabParallelEmbedding) +from vllm.model_executor.model_loader.weight_utils import default_weight_loader +from vllm.model_executor.sampling_metadata import SamplingMetadata +from vllm.sequence import SamplerOutput +from vllm.transformers_utils.configs import RWConfig + +FalconConfig = Union[HF_FalconConfig, RWConfig] + + +def _get_alibi_slopes(total_num_heads: int) -> torch.Tensor: + closest_power_of_2 = 2**math.floor(math.log2(total_num_heads)) + base = torch.tensor(2**(-(2**-(math.log2(closest_power_of_2) - 3))), + dtype=torch.float32) + powers = torch.arange(1, 1 + closest_power_of_2, dtype=torch.int32) + slopes = torch.pow(base, powers) + + if closest_power_of_2 != total_num_heads: + extra_base = torch.tensor( + 2**(-(2**-(math.log2(2 * closest_power_of_2) - 3))), + dtype=torch.float32) + num_remaining_heads = min(closest_power_of_2, + total_num_heads - closest_power_of_2) + extra_powers = torch.arange(1, + 1 + 2 * num_remaining_heads, + 2, + dtype=torch.int32) + slopes = torch.cat( + [slopes, torch.pow(extra_base, extra_powers)], dim=0) + + return slopes + + +class FalconAttention(nn.Module): + + def __init__( + self, + config: FalconConfig, + quant_config: Optional[QuantizationConfig] = None, + ): + super().__init__() + + self.hidden_size = config.hidden_size + tp_size = get_tensor_model_parallel_world_size() + + self.total_num_heads = config.num_attention_heads + assert self.total_num_heads % tp_size == 0 + self.num_heads = self.total_num_heads // tp_size + self.head_dim = self.hidden_size // self.total_num_heads + assert self.head_dim * self.total_num_heads == self.hidden_size + + self.new_decoder_architecture = config.new_decoder_architecture + self.multi_query = config.multi_query + + if self.new_decoder_architecture: + self.total_num_kv_heads = config.num_kv_heads + elif self.multi_query: + self.total_num_kv_heads = 1 + else: + self.total_num_kv_heads = self.total_num_heads + if self.total_num_kv_heads >= tp_size: + # Number of KV heads is greater than TP size, so we partition + # the KV heads across multiple tensor parallel GPUs. + assert self.total_num_kv_heads % tp_size == 0 + else: + # Number of KV heads is less than TP size, so we replicate + # the KV heads across multiple tensor parallel GPUs. + assert tp_size % self.total_num_kv_heads == 0 + self.num_kv_heads = max(1, self.total_num_kv_heads // tp_size) + + self.query_key_value = QKVParallelLinear( + self.hidden_size, + self.head_dim, + self.total_num_heads, + self.total_num_kv_heads, + bias=config.bias, + skip_bias_add=True, + quant_config=quant_config, + ) + self.q_size = self.num_heads * self.head_dim + self.kv_size = self.num_kv_heads * self.head_dim + + # Layer-wise attention scaling + self.inv_norm_factor = 1.0 / math.sqrt(self.head_dim) + self.reduce_row_parallel_results = not (config.new_decoder_architecture + or config.parallel_attn) + self.dense = RowParallelLinear( + self.hidden_size, + self.hidden_size, + bias=config.bias, + skip_bias_add=True, + quant_config=quant_config, + reduce_results=self.reduce_row_parallel_results) + + self.use_rotary = config.rotary + self.use_alibi = config.alibi + assert not (self.use_rotary and self.use_alibi), ( + "Rotary and alibi are mutually exclusive.") + + if self.use_rotary: + rope_theta = getattr(config, "rope_theta", 10000) + max_position_embeddings = getattr(config, + "max_position_embeddings", 8192) + self.rotary_emb = get_rope( + self.head_dim, + rotary_dim=self.head_dim, + max_position=max_position_embeddings, + base=rope_theta, + ) + self.attn = Attention(self.num_heads, + self.head_dim, + self.inv_norm_factor, + num_kv_heads=self.num_kv_heads) + elif self.use_alibi: + tp_rank = get_tensor_model_parallel_rank() + head_start = tp_rank * self.num_heads + head_end = (tp_rank + 1) * self.num_heads + alibi_slopes = (_get_alibi_slopes(self.total_num_heads) * + self.inv_norm_factor) + alibi_slopes = alibi_slopes[head_start:head_end].tolist() + self.attn = Attention(self.num_heads, + self.head_dim, + self.inv_norm_factor, + num_kv_heads=self.num_kv_heads, + alibi_slopes=alibi_slopes) + else: + self.attn = Attention(self.num_heads, + self.head_dim, + scale=self.inv_norm_factor, + num_kv_heads=self.num_kv_heads) + + def forward( + self, + positions: torch.Tensor, + hidden_states: torch.Tensor, + kv_cache: torch.Tensor, + attn_metadata: AttentionMetadata, + ) -> torch.Tensor: + qkv, bias = self.query_key_value(hidden_states) + if bias is not None: + qkv += bias + q, k, v = qkv.split([self.q_size, self.kv_size, self.kv_size], dim=-1) + if self.use_rotary: + q, k = self.rotary_emb(positions, q, k) + attn_output = self.attn(q, k, v, kv_cache, attn_metadata) + attn_output, bias = self.dense(attn_output) + return attn_output, bias + + +class FalconMLP(nn.Module): + + def __init__( + self, + config: FalconConfig, + quant_config: Optional[QuantizationConfig] = None, + ): + super().__init__() + hidden_size = config.hidden_size + + self.dense_h_to_4h = ColumnParallelLinear(hidden_size, + 4 * hidden_size, + bias=config.bias, + skip_bias_add=True, + quant_config=quant_config) + self.act = get_act_fn("gelu", quant_config, 4 * hidden_size) + self.reduce_row_parallel_results = not (config.new_decoder_architecture + or config.parallel_attn) + self.dense_4h_to_h = RowParallelLinear( + 4 * hidden_size, + hidden_size, + bias=config.bias, + skip_bias_add=True, + reduce_results=self.reduce_row_parallel_results, + quant_config=quant_config) + + def forward(self, x: torch.Tensor) -> torch.Tensor: + # NOTE(zhuohan): Following huggingface, we do not fuse bias add here. + x, bias = self.dense_h_to_4h(x) + if bias is not None: + x += bias + x = self.act(x) + x, bias = self.dense_4h_to_h(x) + return x, bias + + +class FalconDecoderLayer(nn.Module): + + def __init__( + self, + config: FalconConfig, + quant_config: Optional[QuantizationConfig] = None, + ): + super().__init__() + hidden_size = config.hidden_size + self.num_heads = config.num_attention_heads + self.self_attention = FalconAttention(config, quant_config) + self.mlp = FalconMLP(config, quant_config) + self.config = config + + if config.new_decoder_architecture: + # The layer norm before self-attention + self.ln_attn = LayerNorm(hidden_size, + eps=config.layer_norm_epsilon) + # The layer norm before the MLP + self.ln_mlp = LayerNorm(hidden_size, eps=config.layer_norm_epsilon) + else: + self.input_layernorm = LayerNorm(hidden_size, + eps=config.layer_norm_epsilon) + if not config.parallel_attn: + self.post_attention_layernorm = LayerNorm( + hidden_size, eps=config.layer_norm_epsilon) + + self.reduce_row_parallel_results = not (config.new_decoder_architecture + or config.parallel_attn) + + def forward( + self, + positions: torch.Tensor, + hidden_states: torch.Tensor, + kv_cache: torch.Tensor, + attn_metadata: AttentionMetadata, + ) -> torch.Tensor: + residual = hidden_states + + if self.config.new_decoder_architecture: + attention_layernorm_out = self.ln_attn(hidden_states) + mlp_layernorm_out = self.ln_mlp(hidden_states) + else: + attention_layernorm_out = self.input_layernorm(hidden_states) + + # Self attention. + attention_output, attention_bias = self.self_attention( + positions=positions, + hidden_states=attention_layernorm_out, + kv_cache=kv_cache, + attn_metadata=attn_metadata, + ) + if self.reduce_row_parallel_results and attention_bias is not None: + attention_output += attention_bias + + if not self.config.new_decoder_architecture: + if self.config.parallel_attn: + mlp_layernorm_out = attention_layernorm_out + else: + residual += attention_output + mlp_layernorm_out = self.post_attention_layernorm(residual) + + # MLP. + mlp_output, mlp_bias = self.mlp(mlp_layernorm_out) + if self.reduce_row_parallel_results and mlp_bias is not None: + mlp_output += mlp_bias + + if not self.reduce_row_parallel_results: + # When MLP and Attention layers are parallel, we can use + # only one all-reduce operator to reduce the results from + # both MLP and Attention layers. + mlp_output += attention_output + mlp_output = tensor_model_parallel_all_reduce(mlp_output) + if attention_bias is not None: + mlp_output += attention_bias + if mlp_bias is not None: + mlp_output += mlp_bias + + output = mlp_output + residual + return output + + +class FalconModel(nn.Module): + + def __init__( + self, + config: FalconConfig, + quant_config: Optional[QuantizationConfig] = None, + ): + super().__init__() + self.config = config + self.embed_dim = config.hidden_size + self.num_heads = config.num_attention_heads + self.use_alibi = config.alibi + + # Embedding + LN Embedding + self.word_embeddings = VocabParallelEmbedding( + config.vocab_size, + self.embed_dim, + ) + + # Transformer blocks + self.h = nn.ModuleList([ + FalconDecoderLayer(config, quant_config) + for _ in range(config.num_hidden_layers) + ]) + + # Final Layer Norm + self.ln_f = LayerNorm(self.embed_dim, eps=config.layer_norm_epsilon) + + def forward( + self, + input_ids: torch.LongTensor, + positions: torch.Tensor, + kv_caches: List[torch.Tensor], + attn_metadata: AttentionMetadata, + ) -> torch.Tensor: + hidden_states = self.word_embeddings(input_ids) + for i in range(len(self.h)): + layer = self.h[i] + hidden_states = layer( + positions, + hidden_states, + kv_caches[i], + attn_metadata, + ) + hidden_states = self.ln_f(hidden_states) + return hidden_states + + +class FalconForCausalLM(nn.Module): + + def __init__( + self, + config: FalconConfig, + quant_config: Optional[QuantizationConfig] = None, + ): + super().__init__() + self.config = config + self.quant_config = quant_config + self.transformer = FalconModel(config, quant_config) + self.lm_head_weight = self.transformer.word_embeddings.weight + self.logits_processor = LogitsProcessor(config.vocab_size) + self.sampler = Sampler() + + def forward( + self, + input_ids: torch.LongTensor, + positions: torch.Tensor, + kv_caches: List[torch.Tensor], + attn_metadata: AttentionMetadata, + ) -> torch.Tensor: + hidden_states = self.transformer( + input_ids, + positions, + kv_caches, + attn_metadata, + ) + return hidden_states + + def compute_logits(self, hidden_states: torch.Tensor, + sampling_metadata: SamplingMetadata) -> torch.Tensor: + logits = self.logits_processor(self.lm_head_weight, hidden_states, + sampling_metadata) + return logits + + def sample( + self, + logits: torch.Tensor, + sampling_metadata: SamplingMetadata, + ) -> Optional[SamplerOutput]: + next_tokens = self.sampler(logits, sampling_metadata) + return next_tokens + + def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): + total_num_heads = self.config.num_attention_heads + if self.config.new_decoder_architecture: + total_num_kv_heads = self.config.num_kv_heads + elif self.config.multi_query: + total_num_kv_heads = 1 + else: + total_num_kv_heads = total_num_heads + num_query_heads_per_kv_head = total_num_heads // total_num_kv_heads + params_dict = dict(self.named_parameters(remove_duplicate=False)) + for name, loaded_weight in weights: + if name == "lm_head.weight": + # Falcon uses tied embeddings. + continue + # Skip loading extra bias for GPTQ models. + if name.endswith(".bias") and name not in params_dict: + continue + param = params_dict[name] + if "query_key_value" in name: + output_dim = getattr(param, "output_dim", None) + loaded_weight_shape = loaded_weight.shape + if output_dim is not None: + loaded_weight = loaded_weight.view( + loaded_weight_shape[:output_dim] + + (total_num_kv_heads, num_query_heads_per_kv_head + 2, + -1) + loaded_weight_shape[output_dim + 1:]) + wq = loaded_weight.narrow( + output_dim + 1, 0, + num_query_heads_per_kv_head).reshape( + *loaded_weight_shape[:output_dim], -1, + *loaded_weight_shape[output_dim + 1:]) + wk = loaded_weight.narrow( + output_dim + 1, num_query_heads_per_kv_head, + 1).reshape(*loaded_weight_shape[:output_dim], -1, + *loaded_weight_shape[output_dim + 1:]) + wv = loaded_weight.narrow( + output_dim + 1, num_query_heads_per_kv_head + 1, + 1).reshape(*loaded_weight_shape[:output_dim], -1, + *loaded_weight_shape[output_dim + 1:]) + loaded_weight = torch.cat([wq, wk, wv], dim=output_dim) + + weight_loader = getattr(param, "weight_loader", + default_weight_loader) + weight_loader(param, loaded_weight) diff --git a/vllm/model_executor/models/gemma.py b/vllm/model_executor/models/gemma.py new file mode 100644 index 0000000..c625373 --- /dev/null +++ b/vllm/model_executor/models/gemma.py @@ -0,0 +1,394 @@ +# coding=utf-8 +# Copyright 2023 The vLLM team. +# Copyright (c) 2024 - 2024 Moore Threads Technology Co., Ltd("Moore Threads"). All rights reserved. +# Copyright (c) Google Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Inference-only Gemma model compatible with HuggingFace weights.""" +from functools import lru_cache +from typing import Iterable, List, Optional, Tuple + +import torch +from torch import nn +from transformers import GemmaConfig + +from vllm.attention import Attention, AttentionMetadata +from vllm.config import LoRAConfig +from vllm.distributed import get_tensor_model_parallel_world_size +from vllm.logger import init_logger +from vllm.model_executor.layers.activation import GeluAndMul +from vllm.model_executor.layers.layernorm import RMSNorm +from vllm.model_executor.layers.linear import (MergedColumnParallelLinear, + QKVParallelLinear, + RowParallelLinear) +from vllm.model_executor.layers.logits_processor import LogitsProcessor +from vllm.model_executor.layers.quantization.base_config import ( + QuantizationConfig) +from vllm.model_executor.layers.rotary_embedding import get_rope +from vllm.model_executor.layers.sampler import Sampler +from vllm.model_executor.layers.vocab_parallel_embedding import ( + VocabParallelEmbedding) +from vllm.model_executor.model_loader.weight_utils import default_weight_loader +from vllm.model_executor.sampling_metadata import SamplingMetadata +from vllm.sequence import SamplerOutput + +logger = init_logger(__name__) + + +@lru_cache(maxsize=None) +def _get_gemma_act_fn( + hidden_act: Optional[str], + hidden_activation: Optional[str], +) -> nn.Module: + if hidden_activation is None: + if hidden_act is not None: + logger.warning( + "Gemma's activation function was incorrectly set to exact GeLU " + "in the config JSON file when it was initially released. " + "Changing the activation function to approximate GeLU " + "(`gelu_pytorch_tanh`). If you want to use the legacy " + "`%s`, edit the config JSON to set " + "`hidden_activation=%s` instead of `hidden_act`. " + "See https://github.com/huggingface/transformers/pull/29402 " + "for more details.", hidden_act, hidden_act) + return GeluAndMul(approximate="tanh") + elif hidden_activation == "gelu_pytorch_tanh": + return GeluAndMul(approximate="tanh") + elif hidden_activation == "gelu": + return GeluAndMul(approximate="none") + else: + raise ValueError(f"Activation function {hidden_act} is not " + "supported for Gemma models.") + + +class GemmaMLP(nn.Module): + + def __init__( + self, + hidden_size: int, + intermediate_size: int, + hidden_act: Optional[str] = None, + hidden_activation: Optional[str] = None, + quant_config: Optional[QuantizationConfig] = None, + ) -> None: + super().__init__() + self.gate_up_proj = MergedColumnParallelLinear( + hidden_size, [intermediate_size] * 2, + bias=False, + quant_config=quant_config) + self.down_proj = RowParallelLinear(intermediate_size, + hidden_size, + bias=False, + quant_config=quant_config) + self.act_fn = _get_gemma_act_fn(hidden_act, hidden_activation) + + def forward(self, x): + gate_up, _ = self.gate_up_proj(x) + x = self.act_fn(gate_up) + x, _ = self.down_proj(x) + return x + + +class GemmaAttention(nn.Module): + + def __init__(self, + hidden_size: int, + num_heads: int, + num_kv_heads: int, + head_dim: int, + max_position_embeddings: int = 8192, + rope_theta: float = 10000, + quant_config: Optional[QuantizationConfig] = None) -> None: + super().__init__() + self.hidden_size = hidden_size + tp_size = get_tensor_model_parallel_world_size() + self.total_num_heads = num_heads + assert self.total_num_heads % tp_size == 0 + self.num_heads = self.total_num_heads // tp_size + self.total_num_kv_heads = num_kv_heads + if self.total_num_kv_heads >= tp_size: + # Number of KV heads is greater than TP size, so we partition + # the KV heads across multiple tensor parallel GPUs. + assert self.total_num_kv_heads % tp_size == 0 + else: + # Number of KV heads is less than TP size, so we replicate + # the KV heads across multiple tensor parallel GPUs. + assert tp_size % self.total_num_kv_heads == 0 + self.num_kv_heads = max(1, self.total_num_kv_heads // tp_size) + self.head_dim = head_dim + self.q_size = self.num_heads * self.head_dim + self.kv_size = self.num_kv_heads * self.head_dim + self.scaling = self.head_dim**-0.5 + self.rope_theta = rope_theta + + self.qkv_proj = QKVParallelLinear( + hidden_size, + self.head_dim, + self.total_num_heads, + self.total_num_kv_heads, + bias=False, + quant_config=quant_config, + ) + self.o_proj = RowParallelLinear( + self.total_num_heads * self.head_dim, + hidden_size, + bias=False, + quant_config=quant_config, + ) + + self.rotary_emb = get_rope( + self.head_dim, + rotary_dim=self.head_dim, + max_position=max_position_embeddings, + base=self.rope_theta, + is_neox_style=True, + ) + self.attn = Attention(self.num_heads, + self.head_dim, + self.scaling, + num_kv_heads=self.num_kv_heads) + + def forward( + self, + positions: torch.Tensor, + hidden_states: torch.Tensor, + kv_cache: torch.Tensor, + attn_metadata: AttentionMetadata, + ) -> torch.Tensor: + qkv, _ = self.qkv_proj(hidden_states) + q, k, v = qkv.split([self.q_size, self.kv_size, self.kv_size], dim=-1) + q, k = self.rotary_emb(positions, q, k) + attn_output = self.attn(q, k, v, kv_cache, attn_metadata) + output, _ = self.o_proj(attn_output) + return output + + +class GemmaDecoderLayer(nn.Module): + + def __init__( + self, + config: GemmaConfig, + quant_config: Optional[QuantizationConfig] = None, + ) -> None: + super().__init__() + self.hidden_size = config.hidden_size + self.self_attn = GemmaAttention( + hidden_size=self.hidden_size, + num_heads=config.num_attention_heads, + num_kv_heads=config.num_key_value_heads, + head_dim=config.head_dim, + max_position_embeddings=config.max_position_embeddings, + rope_theta=config.rope_theta, + quant_config=quant_config, + ) + self.mlp = GemmaMLP( + hidden_size=self.hidden_size, + intermediate_size=config.intermediate_size, + hidden_act=config.hidden_act, + hidden_activation=getattr(config, "hidden_activation", None), + quant_config=quant_config, + ) + self.input_layernorm = RMSNorm(config.hidden_size, + eps=config.rms_norm_eps) + self.post_attention_layernorm = RMSNorm(config.hidden_size, + eps=config.rms_norm_eps) + + def forward( + self, + positions: torch.Tensor, + hidden_states: torch.Tensor, + kv_cache: torch.Tensor, + attn_metadata: AttentionMetadata, + residual: Optional[torch.Tensor], + ) -> Tuple[torch.Tensor, torch.Tensor]: + # Self Attention + if residual is None: + residual = hidden_states + hidden_states = self.input_layernorm(hidden_states) + else: + hidden_states, residual = self.input_layernorm( + hidden_states, residual) + hidden_states = self.self_attn( + positions=positions, + hidden_states=hidden_states, + kv_cache=kv_cache, + attn_metadata=attn_metadata, + ) + + # Fully Connected + hidden_states, residual = self.post_attention_layernorm( + hidden_states, residual) + hidden_states = self.mlp(hidden_states) + return hidden_states, residual + + +class GemmaModel(nn.Module): + + def __init__( + self, + config: GemmaConfig, + quant_config: Optional[QuantizationConfig] = None, + ) -> None: + super().__init__() + self.config = config + + self.embed_tokens = VocabParallelEmbedding( + config.vocab_size, + config.hidden_size, + ) + self.layers = nn.ModuleList([ + GemmaDecoderLayer(config, quant_config) + for _ in range(config.num_hidden_layers) + ]) + self.norm = RMSNorm(config.hidden_size, eps=config.rms_norm_eps) + + # Normalize the embedding by sqrt(hidden_size) + # The normalizer's data type should be downcasted to the model's + # data type such as bfloat16, not float32. + # See https://github.com/huggingface/transformers/pull/29402 + normalizer = self.config.hidden_size**0.5 + self.register_buffer("normalizer", torch.tensor(normalizer)) + + def forward( + self, + input_ids: torch.Tensor, + positions: torch.Tensor, + kv_caches: List[torch.Tensor], + attn_metadata: AttentionMetadata, + ) -> torch.Tensor: + hidden_states = self.embed_tokens(input_ids) + hidden_states *= self.normalizer + + residual = None + for i in range(len(self.layers)): + layer = self.layers[i] + hidden_states, residual = layer( + positions, + hidden_states, + kv_caches[i], + attn_metadata, + residual, + ) + hidden_states, _ = self.norm(hidden_states, residual) + return hidden_states + + +class GemmaForCausalLM(nn.Module): + packed_modules_mapping = { + "qkv_proj": [ + "q_proj", + "k_proj", + "v_proj", + ], + "gate_up_proj": [ + "gate_proj", + "up_proj", + ], + } + + # LoRA specific attributes + supported_lora_modules = [ + "qkv_proj", + "o_proj", + "gate_up_proj", + "down_proj", + ] + # Gemma does not apply LoRA to the embedding layer. + embedding_modules = {} + embedding_padding_modules = [] + + def __init__( + self, + config: GemmaConfig, + quant_config: Optional[QuantizationConfig] = None, + lora_config: Optional[LoRAConfig] = None, + ) -> None: + del lora_config # Unused. + super().__init__() + self.config = config + self.quant_config = quant_config + self.model = GemmaModel(config, quant_config) + self.logits_processor = LogitsProcessor(config.vocab_size) + self.sampler = Sampler() + + @torch.no_grad() + def forward( + self, + input_ids: torch.Tensor, + positions: torch.Tensor, + kv_caches: List[torch.Tensor], + attn_metadata: AttentionMetadata, + ) -> torch.Tensor: + hidden_states = self.model(input_ids, positions, kv_caches, + attn_metadata) + return hidden_states + + def compute_logits(self, hidden_states: torch.Tensor, + sampling_metadata: SamplingMetadata) -> torch.Tensor: + logits = self.logits_processor(self.model.embed_tokens.weight, + hidden_states, sampling_metadata) + return logits + + def sample( + self, + logits: torch.Tensor, + sampling_metadata: SamplingMetadata, + ) -> Optional[SamplerOutput]: + next_tokens = self.sampler(logits, sampling_metadata) + return next_tokens + + def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): + stacked_params_mapping = [ + # (param_name, shard_name, shard_id) + ("qkv_proj", "q_proj", "q"), + ("qkv_proj", "k_proj", "k"), + ("qkv_proj", "v_proj", "v"), + ("gate_up_proj", "gate_proj", 0), + ("gate_up_proj", "up_proj", 1), + ] + params_dict = dict(self.named_parameters()) + loaded_params = set() + for name, loaded_weight in weights: + for (param_name, shard_name, shard_id) in stacked_params_mapping: + if shard_name not in name: + continue + name = name.replace(shard_name, param_name) + # Skip loading extra bias for GPTQ models. + if name.endswith(".bias") and name not in params_dict: + continue + param = params_dict[name] + weight_loader = param.weight_loader + weight_loader(param, loaded_weight, shard_id) + break + else: + # lm_head is not used in vllm as it is tied with embed_token. + # To prevent errors, skip loading lm_head.weight. + if "lm_head.weight" in name: + continue + # Skip loading extra bias for GPTQ models. + if name.endswith(".bias") and name not in params_dict: + continue + # GemmaRMSNorm is different from Llama's in that it multiplies + # (1 + weight) to the output, instead of just weight. + if "norm.weight" in name: + loaded_weight += 1.0 + param = params_dict[name] + weight_loader = getattr(param, "weight_loader", + default_weight_loader) + weight_loader(param, loaded_weight) + loaded_params.add(name) + unloaded_params = params_dict.keys() - loaded_params + if unloaded_params: + raise RuntimeError( + "Some weights are not initialized from checkpoints: " + f"{unloaded_params}") diff --git a/vllm/model_executor/models/gpt2.py b/vllm/model_executor/models/gpt2.py new file mode 100644 index 0000000..9890261 --- /dev/null +++ b/vllm/model_executor/models/gpt2.py @@ -0,0 +1,267 @@ +# coding=utf-8 +# Adapted from +# https://github.com/huggingface/transformers/blob/v4.28.0/src/transformers/models/gpt2/modeling_gpt2.py +# Copyright 2023 The vLLM team. +# Copyright 2018 The OpenAI Team Authors and HuggingFace Inc. team. +# Copyright (c) 2024 - 2024 Moore Threads Technology Co., Ltd("Moore Threads"). All rights reserved. +# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Inference-only GPT-2 model compatible with HuggingFace weights.""" +from typing import Iterable, List, Optional, Tuple + +import torch +from torch import nn +from transformers import GPT2Config + +from vllm.attention import Attention, AttentionMetadata +from vllm.distributed import get_tensor_model_parallel_world_size +from vllm.model_executor.layers.activation import get_act_fn +from vllm.model_executor.layers.linear import (ColumnParallelLinear, + QKVParallelLinear, + RowParallelLinear) +from vllm.model_executor.layers.logits_processor import LogitsProcessor +from vllm.model_executor.layers.quantization.base_config import ( + QuantizationConfig) +from vllm.model_executor.layers.sampler import Sampler +from vllm.model_executor.layers.vocab_parallel_embedding import ( + VocabParallelEmbedding) +from vllm.model_executor.model_loader.weight_utils import default_weight_loader +from vllm.model_executor.sampling_metadata import SamplingMetadata +from vllm.sequence import SamplerOutput + + +class GPT2Attention(nn.Module): + + def __init__( + self, + config: GPT2Config, + quant_config: Optional[QuantizationConfig] = None, + ): + super().__init__() + self.hidden_size = config.hidden_size + total_num_heads = config.num_attention_heads + tensor_model_parallel_world_size = ( + get_tensor_model_parallel_world_size()) + assert total_num_heads % tensor_model_parallel_world_size == 0 + self.num_heads = total_num_heads // tensor_model_parallel_world_size + self.head_dim = self.hidden_size // total_num_heads + self.scale = self.head_dim**-0.5 + + self.c_attn = QKVParallelLinear( + self.hidden_size, + self.head_dim, + total_num_heads, + bias=True, + quant_config=quant_config, + ) + self.c_proj = RowParallelLinear( + self.hidden_size, + self.hidden_size, + bias=True, + quant_config=quant_config, + ) + self.attn = Attention(self.num_heads, self.head_dim, scale=self.scale) + + def forward( + self, + hidden_states: torch.Tensor, + kv_cache: torch.Tensor, + attn_metadata: AttentionMetadata, + ) -> torch.Tensor: + qkv, _ = self.c_attn(hidden_states) + q, k, v = qkv.chunk(chunks=3, dim=-1) + attn_output = self.attn(q, k, v, kv_cache, attn_metadata) + attn_output, _ = self.c_proj(attn_output) + return attn_output + + +class GPT2MLP(nn.Module): + + def __init__( + self, + intermediate_size: int, + config: GPT2Config, + quant_config: Optional[QuantizationConfig] = None, + ): + super().__init__() + hidden_size = config.hidden_size + self.c_fc = ColumnParallelLinear( + hidden_size, + intermediate_size, + bias=True, + quant_config=quant_config, + ) + self.c_proj = RowParallelLinear( + intermediate_size, + hidden_size, + bias=True, + quant_config=quant_config, + ) + self.act = get_act_fn(config.activation_function, quant_config, + intermediate_size) + + def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: + hidden_states, _ = self.c_fc(hidden_states) + hidden_states = self.act(hidden_states) + hidden_states, _ = self.c_proj(hidden_states) + return hidden_states + + +class GPT2Block(nn.Module): + + def __init__( + self, + config: GPT2Config, + quant_config: Optional[QuantizationConfig] = None, + ): + super().__init__() + hidden_size = config.hidden_size + inner_dim = (config.n_inner if config.n_inner is not None else 4 * + hidden_size) + + self.ln_1 = nn.LayerNorm(hidden_size, eps=config.layer_norm_epsilon) + self.attn = GPT2Attention(config, quant_config) + self.ln_2 = nn.LayerNorm(hidden_size, eps=config.layer_norm_epsilon) + self.mlp = GPT2MLP(inner_dim, config, quant_config) + + def forward( + self, + hidden_states: torch.Tensor, + kv_cache: torch.Tensor, + attn_metadata: AttentionMetadata, + ) -> torch.Tensor: + residual = hidden_states + hidden_states = self.ln_1(hidden_states) + attn_output = self.attn( + hidden_states=hidden_states, + kv_cache=kv_cache, + attn_metadata=attn_metadata, + ) + # residual connection + hidden_states = attn_output + residual + + residual = hidden_states + hidden_states = self.ln_2(hidden_states) + feed_forward_hidden_states = self.mlp(hidden_states) + # residual connection + hidden_states = residual + feed_forward_hidden_states + return hidden_states + + +class GPT2Model(nn.Module): + + def __init__( + self, + config: GPT2Config, + quant_config: Optional[QuantizationConfig] = None, + ): + super().__init__() + self.config = config + assert not config.add_cross_attention + assert not config.scale_attn_by_inverse_layer_idx + assert not config.reorder_and_upcast_attn + self.embed_dim = config.hidden_size + self.wte = VocabParallelEmbedding(config.vocab_size, self.embed_dim) + self.wpe = nn.Embedding(config.max_position_embeddings, self.embed_dim) + self.h = nn.ModuleList([ + GPT2Block(config, quant_config) + for _ in range(config.num_hidden_layers) + ]) + self.ln_f = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_epsilon) + + def forward( + self, + input_ids: torch.Tensor, + position_ids: torch.Tensor, + kv_caches: List[torch.Tensor], + attn_metadata: AttentionMetadata, + ) -> torch.Tensor: + inputs_embeds = self.wte(input_ids) + position_embeds = self.wpe(position_ids) + hidden_states = inputs_embeds + position_embeds + + for i in range(len(self.h)): + layer = self.h[i] + hidden_states = layer(hidden_states, kv_caches[i], attn_metadata) + + hidden_states = self.ln_f(hidden_states) + return hidden_states + + +class GPT2LMHeadModel(nn.Module): + + def __init__( + self, + config: GPT2Config, + quant_config: Optional[QuantizationConfig] = None, + ): + super().__init__() + self.config = config + self.quant_config = quant_config + self.transformer = GPT2Model(config, quant_config) + self.lm_head_weight = self.transformer.wte.weight + self.logits_processor = LogitsProcessor(config.vocab_size) + self.sampler = Sampler() + + def forward( + self, + input_ids: torch.Tensor, + positions: torch.Tensor, + kv_caches: List[torch.Tensor], + attn_metadata: AttentionMetadata, + ) -> torch.Tensor: + hidden_states = self.transformer(input_ids, positions, kv_caches, + attn_metadata) + return hidden_states + + def compute_logits(self, hidden_states: torch.Tensor, + sampling_metadata: SamplingMetadata) -> torch.Tensor: + logits = self.logits_processor(self.lm_head_weight, hidden_states, + sampling_metadata) + return logits + + def sample( + self, + logits: torch.Tensor, + sampling_metadata: SamplingMetadata, + ) -> Optional[SamplerOutput]: + next_tokens = self.sampler(logits, sampling_metadata) + return next_tokens + + def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): + params_dict = dict(self.named_parameters(remove_duplicate=False)) + for name, loaded_weight in weights: + if "lm_head.weight" in name: + # GPT-2 ties the weights of the embedding layer and the final + # linear layer. + continue + if ".attn.bias" in name or ".attn.masked_bias" in name: + # Skip attention mask. + # NOTE: "c_attn.bias" should not be skipped. + continue + if not name.startswith("transformer."): + name = "transformer." + name + param = params_dict[name] + # The HF's GPT-2 implementation uses Conv1D instead of Linear. + # Because of this, we need to transpose the weights. + # Note(zhuohan): the logic below might break quantized models. + for conv1d_weight_name in ["c_attn", "c_proj", "c_fc"]: + if conv1d_weight_name not in name: + continue + if not name.endswith(".weight"): + continue + loaded_weight = loaded_weight.t() + weight_loader = getattr(param, "weight_loader", + default_weight_loader) + weight_loader(param, loaded_weight) diff --git a/vllm/model_executor/models/gpt_bigcode.py b/vllm/model_executor/models/gpt_bigcode.py new file mode 100644 index 0000000..c060c7b --- /dev/null +++ b/vllm/model_executor/models/gpt_bigcode.py @@ -0,0 +1,275 @@ +# coding=utf-8 +# Adapted from +# https://github.com/huggingface/transformers/blob/v4.28.0/src/transformers/models/gpt2/modeling_gpt2.py +# Copyright 2023 The vLLM team. +# Copyright 2023 CTranslate2, and Michael Feil +# Copyright 2018 The OpenAI Team Authors and HuggingFace Inc. team. +# Copyright (c) 2024 - 2024 Moore Threads Technology Co., Ltd("Moore Threads"). All rights reserved. +# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Inference-only GPTBigCode model compatible with HuggingFace weights.""" +from typing import Iterable, List, Optional, Tuple + +import torch +from torch import nn +from transformers import GPTBigCodeConfig + +from vllm.attention import Attention, AttentionMetadata +from vllm.distributed import get_tensor_model_parallel_world_size +from vllm.model_executor.layers.activation import get_act_fn +from vllm.model_executor.layers.linear import (ColumnParallelLinear, + QKVParallelLinear, + RowParallelLinear) +from vllm.model_executor.layers.logits_processor import LogitsProcessor +from vllm.model_executor.layers.quantization.base_config import ( + QuantizationConfig) +from vllm.model_executor.layers.sampler import Sampler +from vllm.model_executor.layers.vocab_parallel_embedding import ( + VocabParallelEmbedding) +from vllm.model_executor.model_loader.weight_utils import default_weight_loader +from vllm.model_executor.sampling_metadata import SamplingMetadata +from vllm.sequence import SamplerOutput + + +class GPTBigCodeAttention(nn.Module): + + def __init__( + self, + config: GPTBigCodeConfig, + quant_config: Optional[QuantizationConfig] = None, + ): + super().__init__() + self.hidden_size = config.hidden_size + total_num_heads = config.num_attention_heads + self.tensor_model_parallel_world_size = ( + get_tensor_model_parallel_world_size()) + assert total_num_heads % self.tensor_model_parallel_world_size == 0 + self.num_heads = (total_num_heads // + self.tensor_model_parallel_world_size) + self.head_dim = self.hidden_size // total_num_heads + self.scale = self.head_dim**-0.5 + + self.multi_query = config.multi_query + if self.multi_query: + total_num_kv_heads = 1 + self.num_kv_heads = 1 + else: + total_num_kv_heads = total_num_heads + self.num_kv_heads = self.num_heads + self.kv_dim = self.head_dim * self.num_kv_heads + self.c_attn = QKVParallelLinear( + self.hidden_size, + self.head_dim, + total_num_heads, + total_num_kv_heads, + bias=True, + quant_config=quant_config, + ) + + self.c_proj = RowParallelLinear( + self.hidden_size, + self.hidden_size, + bias=True, + quant_config=quant_config, + ) + self.attn = Attention(self.num_heads, + self.head_dim, + scale=self.scale, + num_kv_heads=self.num_kv_heads) + + def forward( + self, + hidden_states: torch.Tensor, + kv_cache: torch.Tensor, + attn_metadata: AttentionMetadata, + ) -> torch.Tensor: + qkv, _ = self.c_attn(hidden_states) + q, k, v = qkv.split( + [ + self.hidden_size // self.tensor_model_parallel_world_size, + self.kv_dim, self.kv_dim + ], + dim=-1, + ) + attn_output = self.attn(q, k, v, kv_cache, attn_metadata) + attn_output, _ = self.c_proj(attn_output) + return attn_output + + +class GPTBigMLP(nn.Module): + + def __init__( + self, + intermediate_size: int, + config: GPTBigCodeConfig, + quant_config: Optional[QuantizationConfig] = None, + ): + super().__init__() + hidden_size = config.hidden_size + self.c_fc = ColumnParallelLinear( + hidden_size, + intermediate_size, + bias=True, + quant_config=quant_config, + ) + self.c_proj = RowParallelLinear( + intermediate_size, + hidden_size, + bias=True, + quant_config=quant_config, + ) + self.act = get_act_fn(config.activation_function, quant_config, + intermediate_size) + + def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: + hidden_states, _ = self.c_fc(hidden_states) + hidden_states = self.act(hidden_states) + hidden_states, _ = self.c_proj(hidden_states) + return hidden_states + + +class GPTBigCodeBlock(nn.Module): + + def __init__( + self, + config: GPTBigCodeConfig, + quant_config: Optional[QuantizationConfig] = None, + ): + super().__init__() + hidden_size = config.hidden_size + inner_dim = (config.n_inner if config.n_inner is not None else 4 * + hidden_size) + + self.ln_1 = nn.LayerNorm(hidden_size, eps=config.layer_norm_epsilon) + self.attn = GPTBigCodeAttention(config, quant_config) + self.ln_2 = nn.LayerNorm(hidden_size, eps=config.layer_norm_epsilon) + self.mlp = GPTBigMLP(inner_dim, config, quant_config) + + def forward( + self, + hidden_states: torch.Tensor, + kv_cache: torch.Tensor, + attn_metadata: AttentionMetadata, + ) -> torch.Tensor: + residual = hidden_states + hidden_states = self.ln_1(hidden_states) + attn_output = self.attn( + hidden_states=hidden_states, + kv_cache=kv_cache, + attn_metadata=attn_metadata, + ) + # residual connection + hidden_states = attn_output + residual + + residual = hidden_states + hidden_states = self.ln_2(hidden_states) + feed_forward_hidden_states = self.mlp(hidden_states) + # residual connection + hidden_states = residual + feed_forward_hidden_states + return hidden_states + + +class GPTBigCodeModel(nn.Module): + + def __init__( + self, + config: GPTBigCodeConfig, + quant_config: Optional[QuantizationConfig] = None, + ): + super().__init__() + self.config = config + assert not config.add_cross_attention + + self.embed_dim = config.hidden_size + + self.wte = VocabParallelEmbedding(config.vocab_size, self.embed_dim) + self.wpe = nn.Embedding(config.max_position_embeddings, self.embed_dim) + self.h = nn.ModuleList([ + GPTBigCodeBlock(config, quant_config) + for _ in range(config.num_hidden_layers) + ]) + self.ln_f = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_epsilon) + + def forward( + self, + input_ids: torch.Tensor, + position_ids: torch.Tensor, + kv_caches: List[torch.Tensor], + attn_metadata: AttentionMetadata, + ) -> torch.Tensor: + inputs_embeds = self.wte(input_ids) + position_embeds = self.wpe(position_ids) + hidden_states = inputs_embeds + position_embeds + + for i in range(len(self.h)): + layer = self.h[i] + hidden_states = layer(hidden_states, kv_caches[i], attn_metadata) + + hidden_states = self.ln_f(hidden_states) + return hidden_states + + +class GPTBigCodeForCausalLM(nn.Module): + + def __init__( + self, + config: GPTBigCodeConfig, + quant_config: Optional[QuantizationConfig] = None, + ): + super().__init__() + self.config = config + self.quant_config = quant_config + self.transformer = GPTBigCodeModel(config, quant_config) + self.lm_head_weight = self.transformer.wte.weight + self.logits_processor = LogitsProcessor(config.vocab_size) + self.sampler = Sampler() + + def forward( + self, + input_ids: torch.Tensor, + positions: torch.Tensor, + kv_caches: List[torch.Tensor], + attn_metadata: AttentionMetadata, + ) -> torch.Tensor: + hidden_states = self.transformer(input_ids, positions, kv_caches, + attn_metadata) + return hidden_states + + def compute_logits(self, hidden_states: torch.Tensor, + sampling_metadata: SamplingMetadata) -> torch.Tensor: + logits = self.logits_processor(self.lm_head_weight, hidden_states, + sampling_metadata) + return logits + + def sample( + self, + logits: torch.Tensor, + sampling_metadata: SamplingMetadata, + ) -> Optional[SamplerOutput]: + next_tokens = self.sampler(logits, sampling_metadata) + return next_tokens + + def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): + params_dict = dict(self.named_parameters(remove_duplicate=False)) + for name, loaded_weight in weights: + if "lm_head.weight" in name: + continue + if ".attn.bias" in name: + # Skip attention mask. + # NOTE: "c_attn.bias" should not be skipped. + continue + param = params_dict[name] + weight_loader = getattr(param, "weight_loader", + default_weight_loader) + weight_loader(param, loaded_weight) diff --git a/vllm/model_executor/models/gpt_j.py b/vllm/model_executor/models/gpt_j.py new file mode 100644 index 0000000..8d7fe8a --- /dev/null +++ b/vllm/model_executor/models/gpt_j.py @@ -0,0 +1,281 @@ +# coding=utf-8 +# Adapted from +# https://github.com/huggingface/transformers/blob/v4.28.0/src/transformers/models/gptj/modeling_gptj.py +# Copyright 2023 The vLLM team. +# Copyright 2021 The EleutherAI and HuggingFace Teams. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Inference-only GPT-J model compatible with HuggingFace weights.""" +from typing import Iterable, List, Optional, Tuple + +import torch +from torch import nn +from transformers import GPTJConfig + +from vllm.attention import Attention, AttentionMetadata +from vllm.distributed import get_tensor_model_parallel_world_size +from vllm.model_executor.layers.activation import get_act_fn +from vllm.model_executor.layers.linear import (ColumnParallelLinear, + QKVParallelLinear, + RowParallelLinear) +from vllm.model_executor.layers.logits_processor import LogitsProcessor +from vllm.model_executor.layers.quantization.base_config import ( + QuantizationConfig) +from vllm.model_executor.layers.rotary_embedding import get_rope +from vllm.model_executor.layers.sampler import Sampler +from vllm.model_executor.layers.vocab_parallel_embedding import ( + ParallelLMHead, VocabParallelEmbedding) +from vllm.model_executor.model_loader.weight_utils import default_weight_loader +from vllm.model_executor.sampling_metadata import SamplingMetadata +from vllm.sequence import SamplerOutput + + +class GPTJAttention(nn.Module): + + def __init__( + self, + config: GPTJConfig, + quant_config: Optional[QuantizationConfig] = None, + ): + super().__init__() + self.total_num_heads = config.num_attention_heads + self.hidden_size = config.hidden_size + self.head_size = self.hidden_size // self.total_num_heads + + self.qkv_proj = QKVParallelLinear( + config.hidden_size, + self.head_size, + self.total_num_heads, + bias=False, + quant_config=quant_config, + ) + self.out_proj = RowParallelLinear( + config.hidden_size, + config.hidden_size, + bias=False, + quant_config=quant_config, + ) + + tp_world_size = get_tensor_model_parallel_world_size() + assert self.total_num_heads % tp_world_size == 0 + self.num_heads = self.total_num_heads // tp_world_size + + scaling = self.head_size**-0.5 + assert getattr(config, "rotary", True) + assert config.rotary_dim % 2 == 0 + rope_theta = getattr(config, "rope_theta", 10000) + max_position_embeddings = getattr(config, "max_position_embeddings", + 8192) + self.rotary_emb = get_rope( + self.head_size, + rotary_dim=config.rotary_dim, + max_position=max_position_embeddings, + base=rope_theta, + is_neox_style=False, + ) + self.attn = Attention(self.num_heads, self.head_size, scaling) + + def forward( + self, + position_ids: torch.Tensor, + hidden_states: torch.Tensor, + kv_cache: torch.Tensor, + attn_metadata: AttentionMetadata, + ) -> torch.Tensor: + qkv, _ = self.qkv_proj(hidden_states) + q, k, v = qkv.chunk(chunks=3, dim=-1) + q, k = self.rotary_emb(position_ids, q, k) + attn_output = self.attn(q, k, v, kv_cache, attn_metadata) + attn_output, _ = self.out_proj(attn_output) + return attn_output + + +class GPTJMLP(nn.Module): + + def __init__( + self, + intermediate_size: int, + config: GPTJConfig, + quant_config: Optional[QuantizationConfig] = None, + ): + super().__init__() + hidden_size = config.n_embd + self.fc_in = ColumnParallelLinear( + hidden_size, + intermediate_size, + quant_config=quant_config, + ) + self.fc_out = RowParallelLinear( + intermediate_size, + hidden_size, + quant_config=quant_config, + ) + self.act = get_act_fn(config.activation_function, quant_config, + intermediate_size) + + def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: + hidden_states, _ = self.fc_in(hidden_states) + hidden_states = self.act(hidden_states) + hidden_states, _ = self.fc_out(hidden_states) + return hidden_states + + +class GPTJBlock(nn.Module): + + def __init__( + self, + config: GPTJConfig, + quant_config: Optional[QuantizationConfig] = None, + ): + super().__init__() + inner_dim = (4 * config.n_embd + if config.n_inner is None else config.n_inner) + self.ln_1 = nn.LayerNorm(config.n_embd, eps=config.layer_norm_epsilon) + self.attn = GPTJAttention(config, quant_config) + self.mlp = GPTJMLP(inner_dim, config, quant_config) + + def forward( + self, + position_ids: torch.Tensor, + hidden_states: torch.Tensor, + kv_cache: torch.Tensor, + attn_metadata: AttentionMetadata, + ) -> torch.Tensor: + residual = hidden_states + hidden_states = self.ln_1(hidden_states) + attn_output = self.attn( + position_ids=position_ids, + hidden_states=hidden_states, + kv_cache=kv_cache, + attn_metadata=attn_metadata, + ) + mlp_output = self.mlp(hidden_states) + hidden_states = attn_output + mlp_output + residual + return hidden_states + + +class GPTJModel(nn.Module): + + def __init__( + self, + config: GPTJConfig, + quant_config: Optional[QuantizationConfig] = None, + ): + super().__init__() + self.config = config + self.embed_dim = config.n_embd + self.wte = VocabParallelEmbedding( + config.vocab_size, + self.embed_dim, + ) + self.h = nn.ModuleList( + [GPTJBlock(config, quant_config) for _ in range(config.n_layer)]) + self.ln_f = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_epsilon) + + def forward( + self, + input_ids: torch.Tensor, + position_ids: torch.Tensor, + kv_caches: List[torch.Tensor], + attn_metadata: AttentionMetadata, + ) -> torch.Tensor: + hidden_states = self.wte(input_ids) + for i in range(len(self.h)): + layer = self.h[i] + hidden_states = layer( + position_ids, + hidden_states, + kv_caches[i], + attn_metadata, + ) + hidden_states = self.ln_f(hidden_states) + return hidden_states + + +class GPTJForCausalLM(nn.Module): + + def __init__( + self, + config: GPTJConfig, + quant_config: Optional[QuantizationConfig] = None, + ): + super().__init__() + self.config = config + self.quant_config = quant_config + assert not config.tie_word_embeddings + self.transformer = GPTJModel(config, quant_config) + self.lm_head = ParallelLMHead( + config.vocab_size, + config.n_embd, + bias=True, + ) + self.logits_processor = LogitsProcessor(config.vocab_size) + self.sampler = Sampler() + + def forward( + self, + input_ids: torch.Tensor, + positions: torch.Tensor, + kv_caches: List[torch.Tensor], + attn_metadata: AttentionMetadata, + ) -> torch.Tensor: + hidden_states = self.transformer(input_ids, positions, kv_caches, + attn_metadata) + return hidden_states + + def compute_logits(self, hidden_states: torch.Tensor, + sampling_metadata: SamplingMetadata) -> torch.Tensor: + logits = self.logits_processor(self.lm_head.weight, hidden_states, + sampling_metadata, self.lm_head.bias) + return logits + + def sample( + self, + logits: torch.Tensor, + sampling_metadata: SamplingMetadata, + ) -> Optional[SamplerOutput]: + next_tokens = self.sampler(logits, sampling_metadata) + return next_tokens + + def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): + stacked_params_mapping = [ + # (param_name, shard_name, shard_id) + ("qkv_proj", "q_proj", "q"), + ("qkv_proj", "k_proj", "k"), + ("qkv_proj", "v_proj", "v"), + ("gate_up_proj", "gate_proj", 0), + ("gate_up_proj", "up_proj", 1), + ] + params_dict = dict(self.named_parameters()) + for name, loaded_weight in weights: + if "attn.bias" in name or "attn.masked_bias" in name: + continue + for (param_name, weight_name, shard_id) in stacked_params_mapping: + if weight_name not in name: + continue + name = name.replace(weight_name, param_name) + # Skip loading extra bias for GPTQ models. + if name.endswith(".bias") and name not in params_dict: + continue + param = params_dict[name] + weight_loader = param.weight_loader + weight_loader(param, loaded_weight, shard_id) + break + else: + # Skip loading extra bias for GPTQ models. + if name.endswith(".bias") and name not in params_dict: + continue + param = params_dict[name] + weight_loader = getattr(param, "weight_loader", + default_weight_loader) + weight_loader(param, loaded_weight) diff --git a/vllm/model_executor/models/gpt_neox.py b/vllm/model_executor/models/gpt_neox.py new file mode 100644 index 0000000..bab563b --- /dev/null +++ b/vllm/model_executor/models/gpt_neox.py @@ -0,0 +1,295 @@ +# coding=utf-8 +# Adapted from +# https://github.com/huggingface/transformers/blob/v4.28.0/src/transformers/models/gpt_neox/modeling_gpt_neox.py +# Copyright 2023 The vLLM team. +# Copyright 2022 EleutherAI The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Inference-only GPT-NeoX model compatible with HuggingFace weights.""" +from typing import Iterable, List, Optional, Tuple + +import torch +from torch import nn +from transformers import GPTNeoXConfig + +from vllm.attention import Attention, AttentionMetadata +from vllm.distributed import get_tensor_model_parallel_world_size +from vllm.model_executor.layers.activation import get_act_fn +from vllm.model_executor.layers.linear import (ColumnParallelLinear, + QKVParallelLinear, + RowParallelLinear) +from vllm.model_executor.layers.logits_processor import LogitsProcessor +from vllm.model_executor.layers.quantization.base_config import ( + QuantizationConfig) +from vllm.model_executor.layers.rotary_embedding import get_rope +from vllm.model_executor.layers.sampler import Sampler +from vllm.model_executor.layers.vocab_parallel_embedding import ( + ParallelLMHead, VocabParallelEmbedding) +from vllm.model_executor.model_loader.weight_utils import default_weight_loader +from vllm.model_executor.sampling_metadata import SamplingMetadata +from vllm.sequence import SamplerOutput + + +class GPTNeoXAttention(nn.Module): + + def __init__( + self, + config: GPTNeoXConfig, + quant_config: Optional[QuantizationConfig] = None, + ): + super().__init__() + self.total_num_heads = config.num_attention_heads + self.hidden_size = config.hidden_size + self.head_size = self.hidden_size // self.total_num_heads + self.bias = getattr(config, "attention_bias", True) + + tensor_model_parallel_world_size = ( + get_tensor_model_parallel_world_size()) + assert self.total_num_heads % tensor_model_parallel_world_size == 0 + self.num_heads = (self.total_num_heads // + tensor_model_parallel_world_size) + + self.query_key_value = QKVParallelLinear( + config.hidden_size, + self.head_size, + self.total_num_heads, + bias=self.bias, + quant_config=quant_config, + ) + self.dense = RowParallelLinear( + config.hidden_size, + config.hidden_size, + bias=self.bias, + quant_config=quant_config, + ) + scaling = self.head_size**-0.5 + rotary_dim = int(self.head_size * config.rotary_pct) + assert rotary_dim % 2 == 0 + rope_theta = getattr(config, "rope_theta", 10000) + max_position_embeddings = getattr(config, "max_position_embeddings", + 8192) + self.rotary_emb = get_rope( + self.head_size, + rotary_dim=rotary_dim, + max_position=max_position_embeddings, + base=rope_theta, + ) + self.attn = Attention(self.num_heads, self.head_size, scaling) + + def forward( + self, + position_ids: torch.Tensor, + hidden_states: torch.Tensor, + kv_cache: torch.Tensor, + attn_metadata: AttentionMetadata, + ) -> torch.Tensor: + qkv, _ = self.query_key_value(hidden_states) + q, k, v = qkv.chunk(chunks=3, dim=-1) + q, k = self.rotary_emb(position_ids, q, k) + attn_output = self.attn(q, k, v, kv_cache, attn_metadata) + output, _ = self.dense(attn_output) + return output + + +class GPTNeoXMLP(nn.Module): + + def __init__( + self, + config: GPTNeoXConfig, + quant_config: Optional[QuantizationConfig] = None, + ): + super().__init__() + self.dense_h_to_4h = ColumnParallelLinear( + config.hidden_size, + config.intermediate_size, + quant_config=quant_config, + ) + self.dense_4h_to_h = RowParallelLinear( + config.intermediate_size, + config.hidden_size, + quant_config=quant_config, + ) + self.act = get_act_fn(config.hidden_act, quant_config, + config.intermediate_size) + + def forward(self, hidden_states): + hidden_states, _ = self.dense_h_to_4h(hidden_states) + hidden_states = self.act(hidden_states) + hidden_states, _ = self.dense_4h_to_h(hidden_states) + return hidden_states + + +class GPTNeoXLayer(nn.Module): + + def __init__( + self, + config: GPTNeoXConfig, + quant_config: Optional[QuantizationConfig] = None, + ): + super().__init__() + self.use_parallel_residual = config.use_parallel_residual + self.input_layernorm = nn.LayerNorm(config.hidden_size, + eps=config.layer_norm_eps) + self.post_attention_layernorm = nn.LayerNorm(config.hidden_size, + eps=config.layer_norm_eps) + self.attention = GPTNeoXAttention(config, quant_config) + self.mlp = GPTNeoXMLP(config, quant_config) + + def forward( + self, + position_ids: torch.Tensor, + hidden_states: torch.Tensor, + kv_cache: torch.Tensor, + attn_metadata: AttentionMetadata, + ) -> torch.Tensor: + attn_input = self.input_layernorm(hidden_states) + attn_output = self.attention( + position_ids=position_ids, + hidden_states=attn_input, + kv_cache=kv_cache, + attn_metadata=attn_metadata, + ) + + if self.use_parallel_residual: + # pseudocode: + # x = x + attn(ln1(x)) + mlp(ln2(x)) + mlp_input = self.post_attention_layernorm(hidden_states) + mlp_output = self.mlp(mlp_input) + hidden_states = mlp_output + attn_output + hidden_states + else: + # pseudocode: + # x = x + attn(ln1(x)) + # x = x + mlp(ln2(x)) + attn_output = attn_output + hidden_states + mlp_input = self.post_attention_layernorm(attn_output) + mlp_output = self.mlp(mlp_input) + hidden_states = mlp_output + attn_output + return hidden_states + + +class GPTNeoXModel(nn.Module): + + def __init__( + self, + config: GPTNeoXConfig, + quant_config: Optional[QuantizationConfig] = None, + ): + super().__init__() + self.config = config + + self.embed_in = VocabParallelEmbedding( + config.vocab_size, + config.hidden_size, + ) + self.layers = nn.ModuleList([ + GPTNeoXLayer(config, quant_config) + for _ in range(config.num_hidden_layers) + ]) + self.final_layer_norm = nn.LayerNorm(config.hidden_size, + eps=config.layer_norm_eps) + + def forward( + self, + input_ids: torch.Tensor, + position_ids: torch.Tensor, + kv_caches: List[torch.Tensor], + attn_metadata: AttentionMetadata, + ) -> torch.Tensor: + hidden_states = self.embed_in(input_ids) + for i in range(len(self.layers)): + layer = self.layers[i] + hidden_states = layer( + position_ids, + hidden_states, + kv_caches[i], + attn_metadata, + ) + hidden_states = self.final_layer_norm(hidden_states) + return hidden_states + + +class GPTNeoXForCausalLM(nn.Module): + + def __init__( + self, + config, + quant_config: Optional[QuantizationConfig] = None, + ): + super().__init__() + self.config = config + self.quant_config = quant_config + self.gpt_neox = GPTNeoXModel(config, quant_config) + self.embed_out = ParallelLMHead( + config.vocab_size, + config.hidden_size, + ) + self.logits_processor = LogitsProcessor(config.vocab_size) + self.sampler = Sampler() + + def forward( + self, + input_ids: torch.Tensor, + positions: torch.Tensor, + kv_caches: List[torch.Tensor], + attn_metadata: AttentionMetadata, + ) -> torch.Tensor: + hidden_states = self.gpt_neox(input_ids, positions, kv_caches, + attn_metadata) + return hidden_states + + def compute_logits(self, hidden_states: torch.Tensor, + sampling_metadata: SamplingMetadata) -> torch.Tensor: + logits = self.logits_processor(self.embed_out.weight, hidden_states, + sampling_metadata) + return logits + + def sample( + self, + logits: torch.Tensor, + sampling_metadata: SamplingMetadata, + ) -> Optional[SamplerOutput]: + next_tokens = self.sampler(logits, sampling_metadata) + return next_tokens + + def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): + params_dict = dict(self.named_parameters()) + for name, loaded_weight in weights: + if ("attention.bias" in name or "attention.masked_bias" in name + or "rotary_emb.inv_freq" in name): + continue + if ("rotary_emb.cos_cached" in name + or "rotary_emb.sin_cached" in name): + # Models trained using OpenRLHF may include + # these tensors in the checkpoint. Skip them. + continue + param = params_dict[name] + + if "query_key_value" in name: + # NOTE: GPT-NeoX's fused QKV's output_dim has the shape of + # (num_heads * 3 * head_size), while the + # required shape is (3 * num_heads * head_size). + # Thus, we need weight conversion. + output_dim = getattr(param, "output_dim", None) + num_heads = self.config.num_attention_heads + if output_dim is not None: + loaded_weight_shape = loaded_weight.shape + loaded_weight = loaded_weight.view( + loaded_weight_shape[:output_dim] + (num_heads, 3, -1) + + loaded_weight_shape[output_dim + 1:]) + loaded_weight = loaded_weight.transpose( + output_dim, output_dim + 1) + loaded_weight = loaded_weight.reshape(loaded_weight_shape) + + weight_loader = getattr(param, "weight_loader", + default_weight_loader) + weight_loader(param, loaded_weight) diff --git a/vllm/model_executor/models/internlm2.py b/vllm/model_executor/models/internlm2.py new file mode 100644 index 0000000..5811cae --- /dev/null +++ b/vllm/model_executor/models/internlm2.py @@ -0,0 +1,323 @@ +# -*- coding: utf-8 -*- +from typing import Any, Dict, Iterable, List, Optional, Tuple + +import torch +from torch import nn +from transformers import PretrainedConfig + +from vllm.attention import Attention, AttentionMetadata +from vllm.distributed import get_tensor_model_parallel_world_size +from vllm.model_executor.layers.activation import SiluAndMul +from vllm.model_executor.layers.layernorm import RMSNorm +from vllm.model_executor.layers.linear import (MergedColumnParallelLinear, + QKVParallelLinear, + RowParallelLinear) +from vllm.model_executor.layers.logits_processor import LogitsProcessor +from vllm.model_executor.layers.quantization.base_config import ( + QuantizationConfig) +from vllm.model_executor.layers.rotary_embedding import get_rope +from vllm.model_executor.layers.sampler import Sampler +from vllm.model_executor.layers.vocab_parallel_embedding import ( + ParallelLMHead, VocabParallelEmbedding) +from vllm.model_executor.model_loader.weight_utils import default_weight_loader +from vllm.model_executor.sampling_metadata import SamplingMetadata +from vllm.sequence import SamplerOutput + + +class InternLM2MLP(nn.Module): + + def __init__( + self, + hidden_size: int, + intermediate_size: int, + hidden_act: str, + quant_config: Optional[QuantizationConfig] = None, + ) -> None: + super().__init__() + self.gate_up_proj = MergedColumnParallelLinear( + hidden_size, [intermediate_size] * 2, + bias=False, + quant_config=quant_config) + self.w2 = RowParallelLinear(intermediate_size, + hidden_size, + bias=False, + quant_config=quant_config) + if hidden_act != "silu": + raise ValueError(f"Unsupported activation: {hidden_act}. " + "Only silu is supported for now.") + self.act_fn = SiluAndMul() + + def forward(self, x): + gate_up, _ = self.gate_up_proj(x) + x = self.act_fn(gate_up) + x, _ = self.w2(x) + return x + + +class InternLM2Attention(nn.Module): + + def __init__( + self, + hidden_size: int, + num_heads: int, + num_kv_heads: int, + rope_theta: float = 10000, + rope_scaling: Optional[Dict[str, Any]] = None, + max_position_embeddings: int = 8192, + quant_config: Optional[QuantizationConfig] = None, + ) -> None: + super().__init__() + self.hidden_size = hidden_size + tp_size = get_tensor_model_parallel_world_size() + self.total_num_heads = num_heads + assert self.total_num_heads % tp_size == 0 + self.num_heads = self.total_num_heads // tp_size + self.total_num_kv_heads = num_kv_heads + if self.total_num_kv_heads >= tp_size: + # Number of KV heads is greater than TP size, so we partition + # the KV heads across multiple tensor parallel GPUs. + assert self.total_num_kv_heads % tp_size == 0 + else: + # Number of KV heads is less than TP size, so we replicate + # the KV heads across multiple tensor parallel GPUs. + assert tp_size % self.total_num_kv_heads == 0 + self.num_kv_heads = max(1, self.total_num_kv_heads // tp_size) + self.head_dim = hidden_size // self.total_num_heads + self.q_size = self.num_heads * self.head_dim + self.kv_size = self.num_kv_heads * self.head_dim + self.scaling = self.head_dim**-0.5 + self.rope_theta = rope_theta + self.max_position_embeddings = max_position_embeddings + + self.wqkv = QKVParallelLinear( + hidden_size, + self.head_dim, + self.total_num_heads, + self.total_num_kv_heads, + bias=False, + quant_config=quant_config, + ) + self.wo = RowParallelLinear( + self.total_num_heads * self.head_dim, + hidden_size, + bias=False, + quant_config=quant_config, + ) + + self.rotary_emb = get_rope( + self.head_dim, + rotary_dim=self.head_dim, + max_position=max_position_embeddings, + base=rope_theta, + rope_scaling=rope_scaling, + ) + self.attn = Attention(self.num_heads, + self.head_dim, + self.scaling, + num_kv_heads=self.num_kv_heads) + + def forward( + self, + positions: torch.Tensor, + hidden_states: torch.Tensor, + kv_cache: torch.Tensor, + attn_metadata: AttentionMetadata, + ) -> torch.Tensor: + qkv, _ = self.wqkv(hidden_states) + q, k, v = qkv.split([self.q_size, self.kv_size, self.kv_size], dim=-1) + q, k = self.rotary_emb(positions, q, k) + attn_output = self.attn(q, k, v, kv_cache, attn_metadata) + output, _ = self.wo(attn_output) + return output + + +class InternLMDecoderLayer(nn.Module): + + def __init__( + self, + config: PretrainedConfig, + quant_config: Optional[QuantizationConfig] = None, + ) -> None: + super().__init__() + self.hidden_size = config.hidden_size + rope_theta = getattr(config, "rope_theta", 10000) + rope_scaling = getattr(config, "rope_scaling", None) + max_position_embeddings = getattr(config, "max_position_embeddings", + 8192) + self.attention = InternLM2Attention( + hidden_size=self.hidden_size, + num_heads=config.num_attention_heads, + num_kv_heads=config.num_key_value_heads, + rope_theta=rope_theta, + rope_scaling=rope_scaling, + max_position_embeddings=max_position_embeddings, + quant_config=quant_config, + ) + self.feed_forward = InternLM2MLP( + hidden_size=self.hidden_size, + intermediate_size=config.intermediate_size, + hidden_act=config.hidden_act, + quant_config=quant_config, + ) + self.attention_norm = RMSNorm(config.hidden_size, + eps=config.rms_norm_eps) + self.ffn_norm = RMSNorm(config.hidden_size, eps=config.rms_norm_eps) + + def forward( + self, + positions: torch.Tensor, + hidden_states: torch.Tensor, + kv_cache: torch.Tensor, + attn_metadata: AttentionMetadata, + residual: Optional[torch.Tensor], + ) -> Tuple[torch.Tensor, torch.Tensor]: + # Self Attention + if residual is None: + residual = hidden_states + hidden_states = self.attention_norm(hidden_states) + else: + hidden_states, residual = self.attention_norm( + hidden_states, residual) + hidden_states = self.attention( + positions=positions, + hidden_states=hidden_states, + kv_cache=kv_cache, + attn_metadata=attn_metadata, + ) + + # Fully Connected + hidden_states, residual = self.ffn_norm(hidden_states, residual) + hidden_states = self.feed_forward(hidden_states) + return hidden_states, residual + + +class InternLM2Model(nn.Module): + + def __init__( + self, + config: PretrainedConfig, + quant_config: Optional[QuantizationConfig] = None, + ) -> None: + super().__init__() + self.config = config + self.padding_idx = config.pad_token_id + self.vocab_size = config.vocab_size + self.tok_embeddings = VocabParallelEmbedding( + config.vocab_size, + config.hidden_size, + ) + self.layers = nn.ModuleList([ + InternLMDecoderLayer(config, quant_config) + for _ in range(config.num_hidden_layers) + ]) + self.norm = RMSNorm(config.hidden_size, eps=config.rms_norm_eps) + + def forward( + self, + input_ids: torch.Tensor, + positions: torch.Tensor, + kv_caches: List[torch.Tensor], + attn_metadata: AttentionMetadata, + ) -> torch.Tensor: + hidden_states = self.tok_embeddings(input_ids) + residual = None + for i in range(len(self.layers)): + layer = self.layers[i] + hidden_states, residual = layer( + positions, + hidden_states, + kv_caches[i], + attn_metadata, + residual, + ) + hidden_states, _ = self.norm(hidden_states, residual) + return hidden_states + + +class InternLM2ForCausalLM(nn.Module): + + def __init__( + self, + config: PretrainedConfig, + quant_config: Optional[QuantizationConfig] = None, + ) -> None: + super().__init__() + self.config = config + self.quant_config = quant_config + self.model = InternLM2Model(config, quant_config) + self.output = ParallelLMHead(config.vocab_size, config.hidden_size) + self.logits_processor = LogitsProcessor(config.vocab_size) + self.sampler = Sampler() + + def forward( + self, + input_ids: torch.Tensor, + positions: torch.Tensor, + kv_caches: List[torch.Tensor], + attn_metadata: AttentionMetadata, + ) -> torch.Tensor: + hidden_states = self.model(input_ids, positions, kv_caches, + attn_metadata) + return hidden_states + + def compute_logits(self, hidden_states: torch.Tensor, + sampling_metadata: SamplingMetadata) -> torch.Tensor: + logits = self.logits_processor(self.output.weight, hidden_states, + sampling_metadata) + return logits + + def sample( + self, + logits: torch.Tensor, + sampling_metadata: SamplingMetadata, + ) -> Optional[SamplerOutput]: + next_tokens = self.sampler(logits, sampling_metadata) + return next_tokens + + def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): + stacked_params_mapping = [ + # (param_name, shard_name, shard_id) + ("gate_up_proj", "w1", 0), + ("gate_up_proj", "w3", 1), + ] + params_dict = dict(self.named_parameters()) + for name, loaded_weight in weights: + if "rotary_emb.inv_freq" in name: + continue + for (param_name, weight_name, shard_id) in stacked_params_mapping: + if weight_name not in name: + continue + name = name.replace(weight_name, param_name) + # Skip loading extra bias for GPTQ models. + if name.endswith(".bias") and name not in params_dict: + continue + param = params_dict[name] + weight_loader = param.weight_loader + weight_loader(param, loaded_weight, shard_id) + break + else: + # Skip loading extra bias for GPTQ models. + if name.endswith(".bias") and name not in params_dict: + continue + param = params_dict[name] + if "wqkv" in name: + config = self.config + kv_groups = (config.num_attention_heads // + config.num_key_value_heads) + head_dim = config.hidden_size // config.num_attention_heads + loaded_weight = loaded_weight.view(-1, 2 + kv_groups, + head_dim, + loaded_weight.shape[-1]) + wq, wk, wv = torch.split(loaded_weight, [kv_groups, 1, 1], + dim=1) + wq = wq.reshape(-1, wq.shape[-1]) + wk = wk.reshape(-1, wk.shape[-1]) + wv = wv.reshape(-1, wv.shape[-1]) + weight_loader = param.weight_loader + weight_loader(param, wq, 'q') + weight_loader(param, wk, 'k') + weight_loader(param, wv, 'v') + else: + weight_loader = getattr(param, "weight_loader", + default_weight_loader) + weight_loader(param, loaded_weight) diff --git a/vllm/model_executor/models/jais.py b/vllm/model_executor/models/jais.py new file mode 100644 index 0000000..bd6a180 --- /dev/null +++ b/vllm/model_executor/models/jais.py @@ -0,0 +1,333 @@ +# coding=utf-8 +# Adapted from +# https://huggingface.co/core42/jais-30b-chat-v3/blob/main/modeling_jais.py +# Copyright 2023 The vLLM team. +# Copyright 2023 the Jais authors and HuggingFace Inc. team. All rights +# reserved. +# Copyright 2023 Cerebras Systems. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Inference-only Jais model compatible with HuggingFace weights.""" + +import math +from typing import Iterable, List, Optional, Tuple + +import torch +from torch import nn + +from vllm.attention import Attention, AttentionMetadata +from vllm.distributed import (get_tensor_model_parallel_rank, + get_tensor_model_parallel_world_size) +from vllm.model_executor.layers.linear import (ColumnParallelLinear, + QKVParallelLinear, + RowParallelLinear) +from vllm.model_executor.layers.logits_processor import LogitsProcessor +from vllm.model_executor.layers.quantization.base_config import ( + QuantizationConfig) +from vllm.model_executor.layers.sampler import Sampler +from vllm.model_executor.layers.vocab_parallel_embedding import ( + VocabParallelEmbedding) +from vllm.model_executor.model_loader.weight_utils import default_weight_loader +from vllm.model_executor.sampling_metadata import SamplingMetadata +from vllm.sequence import SamplerOutput +from vllm.transformers_utils.configs import JAISConfig + + +class SwiGLUActivation(nn.Module): + + def forward(self, x1: torch.Tensor, x2: torch.Tensor) -> torch.Tensor: + return x1 * nn.functional.silu(x2) + + +def _get_alibi_slopes(n): + + def get_slopes_power_of_2(n): + start = 2**(-(2**-(math.log2(n) - 3))) + ratio = start + return [start * ratio**i for i in range(n)] + + if math.log2(n).is_integer(): + return get_slopes_power_of_2(n) + else: + closest_power_of_2 = 2**math.floor(math.log2(n)) + return (get_slopes_power_of_2(closest_power_of_2) + _get_alibi_slopes( + 2 * closest_power_of_2)[0::2][:n - closest_power_of_2]) + + +class JAISAttention(nn.Module): + + def __init__( + self, + config: JAISConfig, + quant_config: Optional[QuantizationConfig] = None, + ): + super().__init__() + self.hidden_size = config.hidden_size + total_num_heads = config.num_attention_heads + tensor_model_parallel_world_size = ( + get_tensor_model_parallel_world_size()) + assert total_num_heads % tensor_model_parallel_world_size == 0 + self.num_heads = total_num_heads // tensor_model_parallel_world_size + self.head_dim = self.hidden_size // total_num_heads + if hasattr(config, "scale_qk_dot_by_d"): + config.mup_scale_qk_dot_by_d = config.scale_qk_dot_by_d + self.attn_scale_power = 1.0 if config.mup_scale_qk_dot_by_d else 0.5 + self.scale = self.head_dim**-self.attn_scale_power + + self.c_attn = QKVParallelLinear( + self.hidden_size, + self.head_dim, + total_num_heads, + bias=True, + quant_config=quant_config, + ) + self.c_proj = RowParallelLinear( + self.hidden_size, + self.hidden_size, + bias=True, + quant_config=quant_config, + ) + + tp_rank = get_tensor_model_parallel_rank() + head_start = tp_rank * self.num_heads + head_end = (tp_rank + 1) * self.num_heads + alibi_slopes = _get_alibi_slopes(total_num_heads) + alibi_slopes = alibi_slopes[head_start:head_end] + self.attn = Attention( + self.num_heads, + self.head_dim, + scale=self.scale, + alibi_slopes=alibi_slopes, + ) + + def forward( + self, + hidden_states: torch.Tensor, + kv_cache: torch.Tensor, + attn_metadata: AttentionMetadata, + ) -> torch.Tensor: + qkv, _ = self.c_attn(hidden_states) + q, k, v = qkv.chunk(chunks=3, dim=-1) + attn_output = self.attn(q, k, v, kv_cache, attn_metadata) + attn_output, _ = self.c_proj(attn_output) + return attn_output + + +class JAISMLP(nn.Module): + + def __init__( + self, + intermediate_size: int, + config: JAISConfig, + quant_config: Optional[QuantizationConfig] = None, + ): + super().__init__() + hidden_size = config.hidden_size + self.swiglu = config.activation_function == "swiglu" + self.c_fc = ColumnParallelLinear( + hidden_size, + intermediate_size, + bias=True, + quant_config=quant_config, + ) + self.c_fc2 = (ColumnParallelLinear( + hidden_size, + intermediate_size, + bias=True, + quant_config=quant_config, + ) if self.swiglu else None) + self.c_proj = RowParallelLinear( + intermediate_size, + hidden_size, + bias=True, + quant_config=quant_config, + ) + + self.act = SwiGLUActivation() + + def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: + if self.swiglu: + hidden_states2, _ = self.c_fc2(hidden_states) + hidden_states, _ = self.c_fc(hidden_states) + hidden_states = (self.act(hidden_states, hidden_states2) + if self.swiglu else self.act(hidden_states)) + hidden_states, _ = self.c_proj(hidden_states) + return hidden_states + + +class JAISBlock(nn.Module): + + def __init__( + self, + config: JAISConfig, + quant_config: Optional[QuantizationConfig] = None, + ): + super().__init__() + hidden_size = config.hidden_size + inner_dim = (config.n_inner if config.n_inner is not None else 4 * + hidden_size) + + self.ln_1 = nn.LayerNorm(hidden_size, eps=config.layer_norm_epsilon) + self.attn = JAISAttention(config, quant_config) + self.ln_2 = nn.LayerNorm(hidden_size, eps=config.layer_norm_epsilon) + self.mlp = JAISMLP(inner_dim, config, quant_config) + + def forward( + self, + hidden_states: torch.Tensor, + kv_cache: torch.Tensor, + attn_metadata: AttentionMetadata, + ) -> torch.Tensor: + residual = hidden_states + hidden_states = self.ln_1(hidden_states) + attn_output = self.attn( + hidden_states=hidden_states, + kv_cache=kv_cache, + attn_metadata=attn_metadata, + ) + # residual connection + hidden_states = attn_output + residual + + residual = hidden_states + hidden_states = self.ln_2(hidden_states) + feed_forward_hidden_states = self.mlp(hidden_states) + # residual connection + hidden_states = residual + feed_forward_hidden_states + return hidden_states + + +class JAISModel(nn.Module): + + def __init__( + self, + config: JAISConfig, + quant_config: Optional[QuantizationConfig] = None, + ): + super().__init__() + self.config = config + assert not config.add_cross_attention + assert not config.scale_attn_by_inverse_layer_idx + assert not config.reorder_and_upcast_attn + self.embed_dim = config.hidden_size + self.wte = VocabParallelEmbedding(config.vocab_size, self.embed_dim) + self.wpe = (nn.Embedding(config.max_position_embeddings, + self.embed_dim) + if config.position_embedding_type != "alibi" else None) + if hasattr(config, "embeddings_scale"): + self.embeddings_scale = config.embeddings_scale + else: + self.embeddings_scale = config.mup_embeddings_scale + self.h = nn.ModuleList([ + JAISBlock(config, quant_config) + for _ in range(config.num_hidden_layers) + ]) + self.ln_f = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_epsilon) + + def forward( + self, + input_ids: torch.Tensor, + position_ids: torch.Tensor, + kv_caches: List[torch.Tensor], + attn_metadata: AttentionMetadata, + ) -> torch.Tensor: + inputs_embeds = self.wte(input_ids) + if self.wpe is not None: + position_embeds = self.wpe(position_ids) + hidden_states = inputs_embeds + position_embeds + else: + hidden_states = inputs_embeds + hidden_states *= torch.tensor(float(self.embeddings_scale), + dtype=hidden_states.dtype) + + for i in range(len(self.h)): + layer = self.h[i] + hidden_states = layer(hidden_states, kv_caches[i], attn_metadata) + + hidden_states = self.ln_f(hidden_states) + return hidden_states + + +class JAISLMHeadModel(nn.Module): + + def __init__( + self, + config: JAISConfig, + quant_config: Optional[QuantizationConfig] = None, + ): + super().__init__() + self.config = config + self.quant_config = quant_config + self.transformer = JAISModel(config, quant_config) + self.lm_head_weight = self.transformer.wte.weight + if hasattr(config, "width_scale"): + self.output_logits_scale = config.width_scale + else: + self.output_logits_scale = (config.mup_output_alpha * + config.mup_width_scale) + self.logits_processor = LogitsProcessor(vocab_size=config.vocab_size, + scale=self.output_logits_scale) + self.sampler = Sampler() + + def forward( + self, + input_ids: torch.Tensor, + positions: torch.Tensor, + kv_caches: List[torch.Tensor], + attn_metadata: AttentionMetadata, + ) -> torch.Tensor: + hidden_states = self.transformer(input_ids, positions, kv_caches, + attn_metadata) + return hidden_states + + def compute_logits(self, hidden_states: torch.Tensor, + sampling_metadata: SamplingMetadata) -> torch.Tensor: + logits = self.logits_processor(self.lm_head_weight, hidden_states, + sampling_metadata) + return logits + + def sample( + self, + logits: torch.Tensor, + sampling_metadata: SamplingMetadata, + ) -> Optional[SamplerOutput]: + next_tokens = self.sampler(logits, sampling_metadata) + return next_tokens + + def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): + params_dict = dict(self.named_parameters(remove_duplicate=False)) + for name, loaded_weight in weights: + if "lm_head.weight" in name: + # GPT-2 ties the weights of the embedding layer and the final + # linear layer. + continue + if ".attn.bias" in name or ".attn.masked_bias" in name: + # Skip attention mask. + # NOTE: "c_attn.bias" should not be skipped. + continue + if "relative_pe" in name: + continue + if not name.startswith("transformer."): + name = "transformer." + name + param = params_dict[name] + # The HF's GPT-2 implementation uses Conv1D instead of Linear. + # Because of this, we need to transpose the weights. + # Note(zhuohan): the logic below might break quantized models. + for conv1d_weight_name in ["c_attn", "c_proj", "c_fc"]: + if conv1d_weight_name not in name: + continue + if not name.endswith(".weight"): + continue + loaded_weight = loaded_weight.t() + weight_loader = getattr(param, "weight_loader", + default_weight_loader) + weight_loader(param, loaded_weight) diff --git a/vllm/model_executor/models/llama.py b/vllm/model_executor/models/llama.py new file mode 100644 index 0000000..f6d7fc8 --- /dev/null +++ b/vllm/model_executor/models/llama.py @@ -0,0 +1,442 @@ +# coding=utf-8 +# Adapted from +# https://github.com/huggingface/transformers/blob/v4.28.0/src/transformers/models/llama/modeling_llama.py +# Copyright 2023 The vLLM team. +# Copyright 2022 EleutherAI and the HuggingFace Inc. team. All rights reserved. +# +# This code is based on EleutherAI's GPT-NeoX library and the GPT-NeoX +# and OPT implementations in this library. It has been modified from its +# original forms to accommodate minor architectural differences compared +# to GPT-NeoX and OPT used by the Meta AI team that trained the model. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Inference-only LLaMA model compatible with HuggingFace weights.""" +from typing import Any, Dict, Iterable, List, Optional, Tuple + +import torch +from torch import nn +from transformers import LlamaConfig + +from vllm.attention import Attention, AttentionMetadata +from vllm.config import LoRAConfig +from vllm.distributed import (get_tensor_model_parallel_rank, + get_tensor_model_parallel_world_size) +from vllm.model_executor.layers.activation import SiluAndMul +from vllm.model_executor.layers.layernorm import RMSNorm +from vllm.model_executor.layers.linear import (MergedColumnParallelLinear, + QKVParallelLinear, + RowParallelLinear) +from vllm.model_executor.layers.logits_processor import LogitsProcessor +from vllm.model_executor.layers.quantization.base_config import ( + QuantizationConfig) +from vllm.model_executor.layers.rotary_embedding import get_rope +from vllm.model_executor.layers.sampler import Sampler +from vllm.model_executor.layers.vocab_parallel_embedding import ( + DEFAULT_VOCAB_PADDING_SIZE, ParallelLMHead, VocabParallelEmbedding) +from vllm.model_executor.model_loader.weight_utils import ( + default_weight_loader, kv_cache_scales_loader) +from vllm.model_executor.sampling_metadata import SamplingMetadata +from vllm.sequence import SamplerOutput +from vllm.utils import is_hip + + +class LlamaMLP(nn.Module): + + def __init__( + self, + hidden_size: int, + intermediate_size: int, + hidden_act: str, + quant_config: Optional[QKVParallelLinear] = None, + ) -> None: + super().__init__() + self.gate_up_proj = MergedColumnParallelLinear( + hidden_size, [intermediate_size] * 2, + bias=False, + quant_config=quant_config) + self.down_proj = RowParallelLinear(intermediate_size, + hidden_size, + bias=False, + quant_config=quant_config) + if hidden_act != "silu": + raise ValueError(f"Unsupported activation: {hidden_act}. " + "Only silu is supported for now.") + self.act_fn = SiluAndMul() + + def forward(self, x): + gate_up, _ = self.gate_up_proj(x) + x = self.act_fn(gate_up) + x, _ = self.down_proj(x) + return x + + +class LlamaAttention(nn.Module): + + def __init__( + self, + hidden_size: int, + num_heads: int, + num_kv_heads: int, + rope_theta: float = 10000, + rope_scaling: Optional[Dict[str, Any]] = None, + max_position_embeddings: int = 8192, + quant_config: Optional[QuantizationConfig] = None, + bias: bool = False, + sliding_window: Optional[int] = None, + ) -> None: + super().__init__() + self.hidden_size = hidden_size + tp_size = get_tensor_model_parallel_world_size() + self.total_num_heads = num_heads + assert self.total_num_heads % tp_size == 0 + self.num_heads = self.total_num_heads // tp_size + self.total_num_kv_heads = num_kv_heads + if self.total_num_kv_heads >= tp_size: + # Number of KV heads is greater than TP size, so we partition + # the KV heads across multiple tensor parallel GPUs. + assert self.total_num_kv_heads % tp_size == 0 + else: + # Number of KV heads is less than TP size, so we replicate + # the KV heads across multiple tensor parallel GPUs. + assert tp_size % self.total_num_kv_heads == 0 + self.num_kv_heads = max(1, self.total_num_kv_heads // tp_size) + self.head_dim = hidden_size // self.total_num_heads + self.q_size = self.num_heads * self.head_dim + self.kv_size = self.num_kv_heads * self.head_dim + self.scaling = self.head_dim**-0.5 + self.rope_theta = rope_theta + self.max_position_embeddings = max_position_embeddings + + # This will be overwritten by model initialization if we are using it. + # N.B. currently we only support per tensor scalar scaling factors + # & only applicable to ROCm (AMD GPU). + # The scaling factor convention we are assuming is + # quantized_value * scaling_factor ~= true_value + # which is consistent with the practice of setting + # scaling_factor = tensor_amax / FPtype_max + self.kv_scale = 1.0 + + self.qkv_proj = QKVParallelLinear( + hidden_size, + self.head_dim, + self.total_num_heads, + self.total_num_kv_heads, + bias=bias, + quant_config=quant_config, + ) + self.o_proj = RowParallelLinear( + self.total_num_heads * self.head_dim, + hidden_size, + bias=bias, + quant_config=quant_config, + ) + + self.rotary_emb = get_rope( + self.head_dim, + rotary_dim=self.head_dim, + max_position=max_position_embeddings, + base=rope_theta, + rope_scaling=rope_scaling, + ) + self.attn = Attention(self.num_heads, + self.head_dim, + self.scaling, + num_kv_heads=self.num_kv_heads, + sliding_window=sliding_window) + + def forward( + self, + positions: torch.Tensor, + hidden_states: torch.Tensor, + kv_cache: torch.Tensor, + attn_metadata: AttentionMetadata, + ) -> torch.Tensor: + qkv, _ = self.qkv_proj(hidden_states) + q, k, v = qkv.split([self.q_size, self.kv_size, self.kv_size], dim=-1) + q, k = self.rotary_emb(positions, q, k) + attn_output = self.attn(q, k, v, kv_cache, attn_metadata, + self.kv_scale) + output, _ = self.o_proj(attn_output) + return output + + +class LlamaDecoderLayer(nn.Module): + + def __init__( + self, + config: LlamaConfig, + quant_config: Optional[QuantizationConfig] = None, + ) -> None: + super().__init__() + self.hidden_size = config.hidden_size + rope_theta = getattr(config, "rope_theta", 10000) + rope_scaling = getattr(config, "rope_scaling", None) + if rope_scaling is not None and getattr( + config, "original_max_position_embeddings", None): + rope_scaling["original_max_position_embeddings"] = ( + config.original_max_position_embeddings) + max_position_embeddings = getattr(config, "max_position_embeddings", + 8192) + sliding_window = getattr(config, "sliding_window", None) + # Support abacusai/Smaug-72B-v0.1 with attention_bias + # Support internlm/internlm-7b with bias + attention_bias = getattr(config, "attention_bias", False) or getattr( + config, "bias", False) + self.self_attn = LlamaAttention( + hidden_size=self.hidden_size, + num_heads=config.num_attention_heads, + num_kv_heads=getattr(config, "num_key_value_heads", + config.num_attention_heads), + rope_theta=rope_theta, + rope_scaling=rope_scaling, + max_position_embeddings=max_position_embeddings, + quant_config=quant_config, + bias=attention_bias, + sliding_window=sliding_window, + ) + self.mlp = LlamaMLP( + hidden_size=self.hidden_size, + intermediate_size=config.intermediate_size, + hidden_act=config.hidden_act, + quant_config=quant_config, + ) + self.input_layernorm = RMSNorm(config.hidden_size, + eps=config.rms_norm_eps) + self.post_attention_layernorm = RMSNorm(config.hidden_size, + eps=config.rms_norm_eps) + + def forward( + self, + positions: torch.Tensor, + hidden_states: torch.Tensor, + kv_cache: torch.Tensor, + attn_metadata: AttentionMetadata, + residual: Optional[torch.Tensor], + ) -> Tuple[torch.Tensor, torch.Tensor]: + # Self Attention + if residual is None: + residual = hidden_states + hidden_states = self.input_layernorm(hidden_states) + else: + hidden_states, residual = self.input_layernorm( + hidden_states, residual) + hidden_states = self.self_attn( + positions=positions, + hidden_states=hidden_states, + kv_cache=kv_cache, + attn_metadata=attn_metadata, + ) + + # Fully Connected + hidden_states, residual = self.post_attention_layernorm( + hidden_states, residual) + hidden_states = self.mlp(hidden_states) + return hidden_states, residual + + +class LlamaModel(nn.Module): + + def __init__( + self, + config: LlamaConfig, + quant_config: Optional[QuantizationConfig] = None, + lora_config: Optional[LoRAConfig] = None, + ) -> None: + super().__init__() + self.config = config + self.padding_idx = config.pad_token_id + lora_vocab = (lora_config.lora_extra_vocab_size * + (lora_config.max_loras or 1)) if lora_config else 0 + self.vocab_size = config.vocab_size + lora_vocab + self.org_vocab_size = config.vocab_size + self.embed_tokens = VocabParallelEmbedding( + self.vocab_size, + config.hidden_size, + org_num_embeddings=config.vocab_size, + ) + self.layers = nn.ModuleList([ + LlamaDecoderLayer(config, quant_config) + for _ in range(config.num_hidden_layers) + ]) + self.norm = RMSNorm(config.hidden_size, eps=config.rms_norm_eps) + + def get_input_embeddings(self, input_ids: torch.Tensor) -> torch.Tensor: + return self.embed_tokens(input_ids) + + def forward( + self, + input_ids: Optional[torch.Tensor], + positions: torch.Tensor, + kv_caches: List[torch.Tensor], + attn_metadata: AttentionMetadata, + inputs_embeds: Optional[torch.Tensor] = None, + ) -> torch.Tensor: + if inputs_embeds is not None: + hidden_states = inputs_embeds + else: + hidden_states = self.get_input_embeddings(input_ids) + residual = None + for i in range(len(self.layers)): + layer = self.layers[i] + hidden_states, residual = layer( + positions, + hidden_states, + kv_caches[i], + attn_metadata, + residual, + ) + hidden_states, _ = self.norm(hidden_states, residual) + return hidden_states + + +class LlamaForCausalLM(nn.Module): + packed_modules_mapping = { + "qkv_proj": [ + "q_proj", + "k_proj", + "v_proj", + ], + "gate_up_proj": [ + "gate_proj", + "up_proj", + ], + } + + # LoRA specific attributes + supported_lora_modules = [ + "qkv_proj", + "o_proj", + "gate_up_proj", + "down_proj", + "embed_tokens", + "lm_head", + ] + embedding_modules = { + "embed_tokens": "input_embeddings", + "lm_head": "output_embeddings", + } + embedding_padding_modules = ["lm_head"] + + def __init__( + self, + config: LlamaConfig, + quant_config: Optional[QuantizationConfig] = None, + lora_config: Optional[LoRAConfig] = None, + ) -> None: + super().__init__() + self.config = config + self.model = LlamaModel(config, quant_config, lora_config=lora_config) + self.unpadded_vocab_size = config.vocab_size + if lora_config: + self.unpadded_vocab_size += lora_config.lora_extra_vocab_size + self.lm_head = ParallelLMHead( + self.unpadded_vocab_size, + config.hidden_size, + org_num_embeddings=config.vocab_size, + padding_size=DEFAULT_VOCAB_PADDING_SIZE + # We need bigger padding if using lora for kernel + # compatibility + if not lora_config else lora_config.lora_vocab_padding_size, + ) + + logit_scale = getattr(config, "logit_scale", 1.0) + self.logits_processor = LogitsProcessor(self.unpadded_vocab_size, + config.vocab_size, logit_scale) + self.sampler = Sampler() + + def forward( + self, + input_ids: torch.Tensor, + positions: torch.Tensor, + kv_caches: List[torch.Tensor], + attn_metadata: AttentionMetadata, + ) -> torch.Tensor: + hidden_states = self.model(input_ids, positions, kv_caches, + attn_metadata) + return hidden_states + + def compute_logits(self, hidden_states: torch.Tensor, + sampling_metadata: SamplingMetadata) -> torch.Tensor: + logits = self.logits_processor(self.lm_head.weight, hidden_states, + sampling_metadata) + return logits + + def sample( + self, + logits: torch.Tensor, + sampling_metadata: SamplingMetadata, + ) -> Optional[SamplerOutput]: + next_tokens = self.sampler(logits, sampling_metadata) + return next_tokens + + def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): + stacked_params_mapping = [ + # (param_name, shard_name, shard_id) + (".qkv_proj", ".q_proj", "q"), + (".qkv_proj", ".k_proj", "k"), + (".qkv_proj", ".v_proj", "v"), + (".gate_up_proj", ".gate_proj", 0), + (".gate_up_proj", ".up_proj", 1), + ] + params_dict = dict(self.named_parameters()) + for name, loaded_weight in weights: + if "rotary_emb.inv_freq" in name: + continue + if ("rotary_emb.cos_cached" in name + or "rotary_emb.sin_cached" in name): + # Models trained using ColossalAI may include these tensors in + # the checkpoint. Skip them. + continue + for (param_name, weight_name, shard_id) in stacked_params_mapping: + if weight_name not in name: + continue + name = name.replace(weight_name, param_name) + # Skip loading extra bias for GPTQ models. + if name.endswith(".bias") and name not in params_dict: + continue + param = params_dict[name] + weight_loader = param.weight_loader + weight_loader(param, loaded_weight, shard_id) + break + else: + # Skip loading extra bias for GPTQ models. + if name.endswith(".bias") and name not in params_dict: + continue + param = params_dict[name] + weight_loader = getattr(param, "weight_loader", + default_weight_loader) + weight_loader(param, loaded_weight) + + # If this function is called, it should always initialize KV cache scale + # factors (or else raise an exception). Thus, handled exceptions should + # make sure to leave KV cache scale factors in a known good (dummy) state + def load_kv_cache_scales(self, quantization_param_path: str) -> None: + tp_size = get_tensor_model_parallel_world_size() + tp_rank = get_tensor_model_parallel_rank() + for layer_idx, scaling_factor in kv_cache_scales_loader( + quantization_param_path, tp_rank, tp_size, + self.config.num_hidden_layers, + self.config.__class__.model_type): + layer_self_attn = self.model.layers[layer_idx].self_attn + + if is_hip(): + # The scaling factor convention we are assuming is + # quantized_value * scaling_factor ~= true_value + # which is consistent with the practice of setting + # scaling_factor = tensor_amax / FPtype_max + scaling_factor *= 2 + if hasattr(layer_self_attn, "kv_scale"): + layer_self_attn.kv_scale = scaling_factor + else: + raise RuntimeError("Self attention has no KV cache scaling " + "factor attribute!") diff --git a/vllm/model_executor/models/llava.py b/vllm/model_executor/models/llava.py new file mode 100644 index 0000000..dcde4df --- /dev/null +++ b/vllm/model_executor/models/llava.py @@ -0,0 +1,239 @@ +from typing import Iterable, List, Optional, Tuple + +import torch +from torch import nn +# TODO(xwjiang): We should port CLIPVisionModel's code over to not depend on +# transformers' impl. +from transformers import CLIPVisionModel, LlavaConfig + +from vllm.attention import AttentionMetadata +from vllm.config import VisionLanguageConfig +from vllm.model_executor.layers.activation import get_act_fn +from vllm.model_executor.layers.logits_processor import LogitsProcessor +from vllm.model_executor.layers.quantization.base_config import ( + QuantizationConfig) +from vllm.model_executor.layers.sampler import Sampler +from vllm.model_executor.layers.vocab_parallel_embedding import ParallelLMHead +from vllm.model_executor.model_loader.weight_utils import default_weight_loader +from vllm.model_executor.models.llama import LlamaModel +from vllm.model_executor.sampling_metadata import SamplingMetadata +from vllm.sequence import SamplerOutput + +_KEYS_TO_MODIFY_MAPPING = { + "language_model.lm_head": "lm_head", + "language_model.model": "language_model", +} + + +# TODO(xwjiang): Run benchmark and decide if TP. +class LlavaMultiModalProjector(nn.Module): + + def __init__(self, vision_hidden_size: int, text_hidden_size: int, + projector_hidden_act: str): + super().__init__() + + self.linear_1 = nn.Linear(vision_hidden_size, + text_hidden_size, + bias=True) + self.act = get_act_fn(projector_hidden_act) + self.linear_2 = nn.Linear(text_hidden_size, + text_hidden_size, + bias=True) + + def forward(self, image_features): + hidden_states = self.linear_1(image_features) + hidden_states = self.act(hidden_states) + hidden_states = self.linear_2(hidden_states) + return hidden_states + + +def _merge_vision_embeddings(input_ids: torch.Tensor, + inputs_embeds: torch.Tensor, + vision_embeddings: torch.Tensor, + image_token_id: int): + """In place merges in vision_embeddings with inputs_embeds.""" + mask = (input_ids == image_token_id) + inputs_embeds[mask] = vision_embeddings.view(-1, + vision_embeddings.shape[-1]) + + +class LlavaForConditionalGeneration(nn.Module): + + def __init__(self, + config: "LlavaConfig", + vision_language_config: VisionLanguageConfig, + quant_config: Optional["QuantizationConfig"] = None) -> None: + super().__init__() + self.config = config + + self.vision_language_config = vision_language_config + + assert self.vision_language_config, ( + "Provide `image_input_type` and other vision " + "related configurations through LLM entrypoint " + "or engine arguments.") + + if self.vision_language_config.image_input_type == ( + VisionLanguageConfig.ImageInputType.PIXEL_VALUES): + self.vision_tower = CLIPVisionModel(config.vision_config) + else: + self.vision_tower = None + + self.multi_modal_projector = LlavaMultiModalProjector( + vision_hidden_size=config.vision_config.hidden_size, + text_hidden_size=config.text_config.hidden_size, + projector_hidden_act=config.projector_hidden_act) + + self.quant_config = quant_config + self.language_model = LlamaModel(config.text_config, quant_config) + self.unpadded_vocab_size = config.text_config.vocab_size + self.lm_head = ParallelLMHead( + self.unpadded_vocab_size, + config.text_config.hidden_size, + org_num_embeddings=self.language_model.org_vocab_size) + logit_scale = getattr(config, "logit_scale", 1.0) + self.logits_processor = LogitsProcessor(self.unpadded_vocab_size, + config.vocab_size, logit_scale) + self.sampler = Sampler() + + def forward( + self, + input_ids: torch.Tensor, + positions: torch.Tensor, + kv_caches: List[torch.Tensor], + attn_metadata: AttentionMetadata, + image_input: Optional[torch.Tensor] = None + ) -> SamplerOutput: # noqa: E501 + """Run forward pass for Llava 1.5. + + One key thing to understand is the `input_ids` already accounts for the + positions of the to-be-inserted image embeddings. + Concretely, consider a text prompt: + "\nUSER: What's the content of the image?\nASSISTANT:". + Tokenizer outputs: + [1, 32000, 29871, 13, 11889, 29901, 1724, 29915, 29879, 278, + 2793, 310, 278, 1967, 29973, 13, 22933, 9047, 13566, 29901]. + The to-be-inserted image has a size of 576 (24 * 24) along the context + length dimension. + `input_ids` is thus [1, 32000, ..., 32000, 29871, 13, 11889, 29901, + 1724, 29915, 29879, 278, 2793, 310, 278, 1967, 29973, 13, 22933, + 9047, 13566, 29901]. + There will be 576 `32000` in the `input_ids`. + (32000 is the token id for ``.) + + This way, the `positions` and `attn_metadata` are consistent + with the `input_ids`. + + The model takes two types of image inputs: + PIXEL_VALUES and IMAGE_FEATURES. + The following shows how each maps to huggingface implementation. + PIXEL_VALUES: + - https://github.com/huggingface/transformers/blob/07bdbeb/src/transformers/models/llava/modeling_llava.py#L353 + IMAGE_FEATURES: + - https://github.com/huggingface/transformers/blob/07bdbeb/src/transformers/models/llava/modeling_llava.py#L430 + before going through the multi modal projector. + + Args: + input_ids: Flattened (concatenated) input_ids corresponding to a + batch. + image_input: A batch of image inputs. + For PIXEL_VALUES, expecting [1, 3, 336, 336]. + For IMAGE_FEATURES, expecting [1, 576, 1024]. + """ + if image_input is not None: + if list(image_input.shape[1:]) != list( + self.vision_language_config.image_input_shape[1:]): + raise ValueError( + f"The expected image tensor shape is batch dimension " + f"plus " + f"{self.vision_language_config.image_input_shape[1:]}." + f" You supplied {image_input.shape}. " + f"If you are using vLLM's entrypoint, make sure your " + f"supplied image input is consistent with " + f"image_input_shape in engine args.") + if self.vision_tower is not None: + # TODO(xwjiang): Maybe port minimal CLIPVisionModel over. + image_outputs = self.vision_tower(image_input, + output_hidden_states=True) + image_features = image_outputs.hidden_states[ + self.config.vision_feature_layer] + # Copied from https://github.com/huggingface/transformers/blob/39c3c0a72af6fbda5614dde02ff236069bb79827/src/transformers/models/llava/modeling_llava.py#L421 # noqa + if self.config.vision_feature_select_strategy == "default": + image_features = image_features[:, 1:] + elif self.config.vision_feature_select_strategy == "full": + image_features = image_features + else: + raise ValueError( + f"Unexpected select feature strategy: " + f"{self.config.vision_feature_select_strategy}") + else: + image_features = image_input + vision_embeddings = self.multi_modal_projector(image_features) + inputs_embeds = self.language_model.get_input_embeddings(input_ids) + _merge_vision_embeddings( + input_ids, inputs_embeds, vision_embeddings, + self.vision_language_config.image_token_id) + input_ids = None + else: + inputs_embeds = None + hidden_states = self.language_model(input_ids, + positions, + kv_caches, + attn_metadata, + inputs_embeds=inputs_embeds) + + return hidden_states + + def compute_logits(self, hidden_states: torch.Tensor, + sampling_metadata: SamplingMetadata) -> torch.Tensor: + logits = self.logits_processor(self.lm_head.weight, hidden_states, + sampling_metadata) + return logits + + def sample( + self, + logits: torch.Tensor, + sampling_metadata: SamplingMetadata, + ) -> Optional[SamplerOutput]: + next_tokens = self.sampler(logits, sampling_metadata) + return next_tokens + + def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): + # only doing this for language model part for now. + stacked_params_mapping = [ + # (param_name, shard_name, shard_id) + ("qkv_proj", "q_proj", "q"), + ("qkv_proj", "k_proj", "k"), + ("qkv_proj", "v_proj", "v"), + ("gate_up_proj", "gate_proj", 0), + ("gate_up_proj", "up_proj", 1), + ] + params_dict = dict(self.named_parameters()) + for name, loaded_weight in weights: + if "rotary_emb.inv_freq" in name: + continue + for key_to_modify, new_key in _KEYS_TO_MODIFY_MAPPING.items(): + if key_to_modify in name: + name = name.replace(key_to_modify, new_key) + use_default_weight_loading = False + if "vision" in name: + if self.vision_tower is not None: + # We only do sharding for language model and + # not vision model for now. + use_default_weight_loading = True + else: + for (param_name, weight_name, + shard_id) in stacked_params_mapping: + if weight_name not in name: + continue + param = params_dict[name.replace(weight_name, param_name)] + weight_loader = param.weight_loader + weight_loader(param, loaded_weight, shard_id) + break + else: + use_default_weight_loading = True + if use_default_weight_loading: + param = params_dict[name] + weight_loader = getattr(param, "weight_loader", + default_weight_loader) + weight_loader(param, loaded_weight) diff --git a/vllm/model_executor/models/minicpm.py b/vllm/model_executor/models/minicpm.py new file mode 100644 index 0000000..c90bcfb --- /dev/null +++ b/vllm/model_executor/models/minicpm.py @@ -0,0 +1,531 @@ +# coding=utf-8 +# Adapted from +# https://github.com/huggingface/transformers/blob/v4.28.0/src/transformers/models/llama/modeling_llama.py +# Copyright 2023 The vLLM team. +# Copyright 2022 EleutherAI and the HuggingFace Inc. team. All rights reserved. +# +# This code is based on EleutherAI's GPT-NeoX library and the GPT-NeoX +# and OPT implementations in this library. It has been modified from its +# original forms to accommodate minor architectural differences compared +# to GPT-NeoX and OPT used by the Meta AI team that trained the model. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Inference-only MiniCPM model compatible with HuggingFace weights.""" +import math +from typing import Any, Dict, Iterable, List, Optional, Tuple + +import torch +from torch import nn + +from vllm.attention import Attention, AttentionMetadata +from vllm.config import LoRAConfig +from vllm.distributed import (get_tensor_model_parallel_rank, + get_tensor_model_parallel_world_size, + tensor_model_parallel_all_reduce) +from vllm.model_executor.layers.activation import SiluAndMul +from vllm.model_executor.layers.fused_moe import fused_moe +from vllm.model_executor.layers.layernorm import RMSNorm +from vllm.model_executor.layers.linear import (MergedColumnParallelLinear, + QKVParallelLinear, + ReplicatedLinear, + RowParallelLinear) +from vllm.model_executor.layers.logits_processor import LogitsProcessor +from vllm.model_executor.layers.quantization.base_config import ( + QuantizationConfig) +from vllm.model_executor.layers.rotary_embedding import get_rope +from vllm.model_executor.layers.sampler import Sampler +from vllm.model_executor.layers.vocab_parallel_embedding import ( + DEFAULT_VOCAB_PADDING_SIZE, ParallelLMHead, VocabParallelEmbedding) +from vllm.model_executor.model_loader.weight_utils import default_weight_loader +from vllm.model_executor.sampling_metadata import SamplingMetadata +from vllm.model_executor.utils import set_weight_attrs +from vllm.sequence import SamplerOutput + + +class MiniCPMMoE(nn.Module): + """A tensor-parallel MoE implementation that shards each expert + across all ranks. + + Each expert's weights are sharded across all ranks and a fused MoE + kernel is used for the forward pass, and finally we reduce the outputs + across ranks. + """ + + def __init__( + self, + num_experts: int, + top_k: int, + hidden_size: int, + intermediate_size: int, + params_dtype: Optional[torch.dtype] = None, + tp_size: Optional[int] = None, + ): + super().__init__() + self.tp_size = tp_size or get_tensor_model_parallel_world_size() + self.num_total_experts = num_experts + self.top_k = top_k + self.hidden_size = hidden_size + self.intermediate_size = intermediate_size // self.tp_size + + if params_dtype is None: + params_dtype = torch.get_default_dtype() + self.params_dtype = params_dtype + + self.gate = ReplicatedLinear(self.hidden_size, + self.num_total_experts, + bias=False, + params_dtype=self.params_dtype, + quant_config=None) + + self.ws = nn.Parameter( + torch.empty(self.num_total_experts, + 2 * self.intermediate_size, + self.hidden_size, + device="cuda", + dtype=self.params_dtype)) + self.w2s = nn.Parameter( + torch.empty(self.num_total_experts, + self.hidden_size, + self.intermediate_size, + device="cuda", + dtype=self.params_dtype)) + + set_weight_attrs(self.ws, { + "weight_loader": self.weight_loader, + }) + set_weight_attrs(self.w2s, { + "weight_loader": self.weight_loader, + }) + + def weight_loader(self, param: nn.Parameter, loaded_weight: torch.Tensor, + weight_name: str, expert_id: int): + tp_rank = get_tensor_model_parallel_rank() + param_data = param.data + shard_size = self.intermediate_size + shard = slice(tp_rank * shard_size, (tp_rank + 1) * shard_size) + if weight_name.endswith("w1.weight"): + param_data[expert_id, 0:shard_size, :] = loaded_weight[shard, :] + if weight_name.endswith("w3.weight"): + param_data[expert_id, + shard_size:2 * shard_size, :] = loaded_weight[shard, :] + if weight_name.endswith("w2.weight"): + param_data[expert_id, :, :] = loaded_weight[:, shard] + + def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: + num_tokens, hidden_size = hidden_states.shape + hidden_states = hidden_states.view(-1, self.hidden_size) + # router_logits: (num_tokens, n_experts) + router_logits, _ = self.gate(hidden_states) + final_hidden_states = fused_moe(hidden_states, + self.ws, + self.w2s, + router_logits, + self.top_k, + renormalize=True, + inplace=True) + + if self.tp_size > 1: + final_hidden_states = tensor_model_parallel_all_reduce( + final_hidden_states) + + return final_hidden_states.view(num_tokens, hidden_size) + + +class MiniCPMMLP(nn.Module): + + def __init__( + self, + hidden_size: int, + intermediate_size: int, + hidden_act: str, + quant_config: Optional[QuantizationConfig] = None, + ) -> None: + super().__init__() + self.gate_up_proj = MergedColumnParallelLinear( + hidden_size, [intermediate_size] * 2, + bias=False, + quant_config=quant_config) + self.down_proj = RowParallelLinear(intermediate_size, + hidden_size, + bias=False, + quant_config=quant_config) + if hidden_act != "silu": + raise ValueError(f"Unsupported activation: {hidden_act}. " + "Only silu is supported for now.") + self.act_fn = SiluAndMul() + + def forward(self, x): + gate_up, _ = self.gate_up_proj(x) + x = self.act_fn(gate_up) + x, _ = self.down_proj(x) + return x + + +class MiniCPMAttention(nn.Module): + + def __init__( + self, + hidden_size: int, + num_heads: int, + num_kv_heads: int, + rope_theta: float = 10000, + rope_scaling: Optional[Dict[str, Any]] = None, + max_position_embeddings: int = 8192, + quant_config: Optional[QuantizationConfig] = None, + ) -> None: + super().__init__() + self.hidden_size = hidden_size + tp_size = get_tensor_model_parallel_world_size() + self.total_num_heads = num_heads + assert self.total_num_heads % tp_size == 0 + self.num_heads = self.total_num_heads // tp_size + self.total_num_kv_heads = num_kv_heads + if self.total_num_kv_heads >= tp_size: + # Number of KV heads is greater than TP size, so we partition + # the KV heads across multiple tensor parallel GPUs. + assert self.total_num_kv_heads % tp_size == 0 + else: + # Number of KV heads is less than TP size, so we replicate + # the KV heads across multiple tensor parallel GPUs. + assert tp_size % self.total_num_kv_heads == 0 + self.num_kv_heads = max(1, self.total_num_kv_heads // tp_size) + self.head_dim = hidden_size // self.total_num_heads + self.q_size = self.num_heads * self.head_dim + self.kv_size = self.num_kv_heads * self.head_dim + self.scaling = self.head_dim**-0.5 + self.rope_theta = rope_theta + self.max_position_embeddings = max_position_embeddings + + self.qkv_proj = QKVParallelLinear( + hidden_size, + self.head_dim, + self.total_num_heads, + self.total_num_kv_heads, + bias=False, + quant_config=quant_config, + ) + self.o_proj = RowParallelLinear( + self.total_num_heads * self.head_dim, + hidden_size, + bias=False, + quant_config=quant_config, + ) + + self.rotary_emb = get_rope( + self.head_dim, + rotary_dim=self.head_dim, + max_position=max_position_embeddings, + base=rope_theta, + rope_scaling=rope_scaling, + ) + # set rope as fp32 instead of bf16 + self.rotary_emb.cos_sin_cache = self.rotary_emb._compute_cos_sin_cache( + ) + self.attn = Attention(self.num_heads, + self.head_dim, + self.scaling, + num_kv_heads=self.num_kv_heads) + + def forward( + self, + positions: torch.Tensor, + hidden_states: torch.Tensor, + kv_cache: torch.Tensor, + attn_metadata: AttentionMetadata, + ) -> torch.Tensor: + qkv, _ = self.qkv_proj(hidden_states) + q, k, v = qkv.split([self.q_size, self.kv_size, self.kv_size], dim=-1) + orig_dtype = q.dtype + q, k = q.float(), k.float() + q, k = self.rotary_emb(positions, q, k) + q, k = q.to(orig_dtype), k.to(orig_dtype) + attn_output = self.attn(q, k, v, kv_cache, attn_metadata) + output, _ = self.o_proj(attn_output) + return output + + +class MiniCPMDecoderLayer(nn.Module): + + def __init__( + self, + config, + quant_config: Optional[QuantizationConfig] = None, + ) -> None: + super().__init__() + self.config = config + self.hidden_size = config.hidden_size + rope_theta = getattr(config, "rope_theta", 10000) + rope_scaling = getattr(config, "rope_scaling", None) + max_position_embeddings = getattr(config, "max_position_embeddings", + 8192) + self.self_attn = MiniCPMAttention( + hidden_size=self.hidden_size, + num_heads=config.num_attention_heads, + num_kv_heads=config.num_key_value_heads, + rope_theta=rope_theta, + rope_scaling=rope_scaling, + max_position_embeddings=max_position_embeddings, + quant_config=quant_config, + ) + self.num_experts = getattr(self.config, "num_experts", 0) + if self.num_experts == 0: + self.mlp = MiniCPMMLP( + hidden_size=self.hidden_size, + intermediate_size=config.intermediate_size, + hidden_act=config.hidden_act, + quant_config=quant_config, + ) + else: + self.mlp = MiniCPMMoE(num_experts=config.num_experts, + top_k=config.num_experts_per_tok, + hidden_size=config.hidden_size, + intermediate_size=config.intermediate_size) + self.input_layernorm = RMSNorm(config.hidden_size, + eps=config.rms_norm_eps) + self.post_attention_layernorm = RMSNorm(config.hidden_size, + eps=config.rms_norm_eps) + + def forward( + self, + positions: torch.Tensor, + hidden_states: torch.Tensor, + kv_cache: torch.Tensor, + attn_metadata: AttentionMetadata, + residual: Optional[torch.Tensor], + ) -> Tuple[torch.Tensor, torch.Tensor]: + # Self Attention + residual = hidden_states + hidden_states = self.input_layernorm(hidden_states) + hidden_states = self.self_attn( + positions=positions, + hidden_states=hidden_states, + kv_cache=kv_cache, + attn_metadata=attn_metadata, + ) + hidden_states = residual + hidden_states * \ + (self.config.scale_depth / math.sqrt(self.config.num_hidden_layers)) + + # Fully Connected + residual = hidden_states + hidden_states = self.post_attention_layernorm(hidden_states) + hidden_states = self.mlp(hidden_states) + hidden_states = residual + hidden_states * \ + (self.config.scale_depth / math.sqrt(self.config.num_hidden_layers)) + + return hidden_states, None + + +class MiniCPMModel(nn.Module): + + def __init__( + self, + config, + quant_config: Optional[QuantizationConfig] = None, + lora_config: Optional[LoRAConfig] = None, + ) -> None: + super().__init__() + self.config = config + self.padding_idx = config.pad_token_id + lora_vocab = (lora_config.lora_extra_vocab_size * + (lora_config.max_loras or 1)) if lora_config else 0 + self.vocab_size = config.vocab_size + lora_vocab + self.org_vocab_size = config.vocab_size + self.embed_tokens = VocabParallelEmbedding( + self.vocab_size, + config.hidden_size, + org_num_embeddings=config.vocab_size, + ) + self.layers = nn.ModuleList([ + MiniCPMDecoderLayer(config, quant_config) + for _ in range(config.num_hidden_layers) + ]) + self.norm = RMSNorm(config.hidden_size, eps=config.rms_norm_eps) + + def get_input_embeddings(self, input_ids: torch.Tensor) -> torch.Tensor: + embedding = self.embed_tokens(input_ids) + return embedding * self.config.scale_emb + + def forward( + self, + input_ids: torch.Tensor, + positions: torch.Tensor, + kv_caches: List[torch.Tensor], + attn_metadata: AttentionMetadata, + inputs_embeds: Optional[torch.Tensor] = None, + ) -> torch.Tensor: + if inputs_embeds is not None: + hidden_states = inputs_embeds + else: + hidden_states = self.get_input_embeddings(input_ids) + residual = None + + for i in range(len(self.layers)): + layer = self.layers[i] + hidden_states, residual = layer( + positions, + hidden_states, + kv_caches[i], + attn_metadata, + residual, + ) + hidden_states = self.norm(hidden_states) + return hidden_states + + +class MiniCPMForCausalLM(nn.Module): + packed_modules_mapping = { + "qkv_proj": [ + "q_proj", + "k_proj", + "v_proj", + ], + "gate_up_proj": [ + "gate_proj", + "up_proj", + ], + } + + # LoRA specific attributes + supported_lora_modules = [ + "qkv_proj", + "o_proj", + "gate_up_proj", + "down_proj", + "embed_tokens", + "lm_head", + ] + embedding_modules = { + "embed_tokens": "input_embeddings", + "lm_head": "output_embeddings", + } + embedding_padding_modules = ["lm_head"] + + def __init__( + self, + config, + quant_config: Optional[QuantizationConfig] = None, + lora_config: Optional[LoRAConfig] = None, + ) -> None: + super().__init__() + self.config = config + self.num_experts = getattr(self.config, "num_experts", 0) + self.quant_config = quant_config + self.model = MiniCPMModel(config, + quant_config, + lora_config=lora_config) + unpadded_vocab_size = config.vocab_size + if lora_config: + unpadded_vocab_size += lora_config.lora_extra_vocab_size + if not self.config.tie_word_embeddings: + self.lm_head = ParallelLMHead( + unpadded_vocab_size, + config.hidden_size, + org_num_embeddings=config.vocab_size, + padding_size=DEFAULT_VOCAB_PADDING_SIZE + # We need bigger padding if using lora for kernel + # compatibility + if not lora_config else lora_config.lora_vocab_padding_size, + ) + self.scale_width = self.config.hidden_size / self.config.dim_model_base + + self.logits_processor = LogitsProcessor(unpadded_vocab_size, + config.vocab_size) + self.sampler = Sampler() + + def forward( + self, + input_ids: torch.Tensor, + positions: torch.Tensor, + kv_caches: List[torch.Tensor], + attn_metadata: AttentionMetadata, + ) -> torch.Tensor: + hidden_states = self.model(input_ids, positions, kv_caches, + attn_metadata) + return hidden_states + + def compute_logits(self, hidden_states: torch.Tensor, + sampling_metadata: SamplingMetadata) -> torch.Tensor: + hidden_states = hidden_states / self.scale_width + if self.config.tie_word_embeddings: + lm_head_weight = self.model.embed_tokens.weight + else: + lm_head_weight = self.lm_head.weight + logits = self.logits_processor(lm_head_weight, hidden_states, + sampling_metadata) + return logits + + def sample( + self, + logits: torch.Tensor, + sampling_metadata: SamplingMetadata, + ) -> Optional[SamplerOutput]: + next_tokens = self.sampler(logits, sampling_metadata) + return next_tokens + + def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): + stacked_params_mapping = [ + # (param_name, shard_name, shard_id) + ("qkv_proj", "q_proj", "q"), + ("qkv_proj", "k_proj", "k"), + ("qkv_proj", "v_proj", "v"), + ("gate_up_proj", "gate_proj", 0), + ("gate_up_proj", "up_proj", 1), + ] + expert_params_mapping = [ + # (param_name, weight_name, expert_id) + ("ws" if weight_name in ["w1", "w3"] else "w2s", + f"experts.{expert_id}.{weight_name}.weight", expert_id) + for expert_id in range(self.num_experts) + for weight_name in ["w1", "w2", "w3"] + ] + params_dict = dict(self.named_parameters()) + for name, loaded_weight in weights: + if "rotary_emb.inv_freq" in name: + continue + if ("rotary_emb.cos_cached" in name + or "rotary_emb.sin_cached" in name): + # Models trained using ColossalAI may include these tensors in + # the checkpoint. Skip them. + continue + + for (param_name, weight_name, shard_id) in stacked_params_mapping: + if weight_name not in name: + continue + name = name.replace(weight_name, param_name) + # Skip loading extra bias for GPTQ models. + if name.endswith(".bias") and name not in params_dict: + continue + param = params_dict[name] + weight_loader = param.weight_loader + weight_loader(param, loaded_weight, shard_id) + break + else: + for param_name, weight_name, expert_id in expert_params_mapping: + if weight_name not in name: + continue + name = name.replace(weight_name, param_name) + param = params_dict[name] + weight_loader = param.weight_loader + weight_loader(param, + loaded_weight, + weight_name, + expert_id=expert_id) + break + else: + # Skip loading extra bias for GPTQ models. + if name.endswith(".bias") and name not in params_dict: + continue + param = params_dict[name] + weight_loader = getattr(param, "weight_loader", + default_weight_loader) + weight_loader(param, loaded_weight) diff --git a/vllm/model_executor/models/mixtral.py b/vllm/model_executor/models/mixtral.py new file mode 100644 index 0000000..efa4de7 --- /dev/null +++ b/vllm/model_executor/models/mixtral.py @@ -0,0 +1,583 @@ +# coding=utf-8 +# Adapted from +# https://github.com/huggingface/transformers/blob/v4.28.0/src/transformers/models/llama/modeling_llama.py +# Copyright 2023 The vLLM team. +# Copyright 2022 EleutherAI and the HuggingFace Inc. team. All rights reserved. +# +# This code is based on EleutherAI's GPT-NeoX library and the GPT-NeoX +# and OPT implementations in this library. It has been modified from its +# original forms to accommodate minor architectural differences compared +# to GPT-NeoX and OPT used by the Meta AI team that trained the model. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Inference-only Mixtral model.""" +from typing import Iterable, List, Optional, Tuple + +import torch +from torch import nn +from transformers import MixtralConfig + +from vllm import _custom_ops as ops +from vllm.attention import Attention, AttentionMetadata +from vllm.config import LoRAConfig +from vllm.distributed import (get_tensor_model_parallel_rank, + get_tensor_model_parallel_world_size, + tensor_model_parallel_all_reduce) +from vllm.model_executor.layers.fused_moe import fused_moe +from vllm.model_executor.layers.layernorm import RMSNorm +from vllm.model_executor.layers.linear import (QKVParallelLinear, + ReplicatedLinear, + RowParallelLinear) +from vllm.model_executor.layers.logits_processor import LogitsProcessor +from vllm.model_executor.layers.quantization.base_config import ( + QuantizationConfig) +from vllm.model_executor.layers.quantization.fp8 import Fp8Config +from vllm.model_executor.layers.rotary_embedding import get_rope +from vllm.model_executor.layers.sampler import Sampler +from vllm.model_executor.layers.vocab_parallel_embedding import ( + DEFAULT_VOCAB_PADDING_SIZE, ParallelLMHead, VocabParallelEmbedding) +from vllm.model_executor.model_loader.weight_utils import default_weight_loader +from vllm.model_executor.sampling_metadata import SamplingMetadata +from vllm.model_executor.utils import set_weight_attrs +from vllm.sequence import SamplerOutput +from vllm.utils import print_warning_once + + +class MixtralMoE(nn.Module): + """A tensor-parallel MoE implementation for Mixtral that shards each expert + across all ranks. + + Each expert's weights are sharded across all ranks and a fused MoE + kernel is used for the forward pass, and finally we reduce the outputs + across ranks. + """ + + def __init__( + self, + num_experts: int, + top_k: int, + hidden_size: int, + intermediate_size: int, + params_dtype: Optional[torch.dtype] = None, + tp_size: Optional[int] = None, + quant_config: Optional[QuantizationConfig] = None, + ): + super().__init__() + self.tp_size = tp_size or get_tensor_model_parallel_world_size() + self.num_total_experts = num_experts + self.top_k = top_k + self.hidden_size = hidden_size + self.intermediate_size = intermediate_size // self.tp_size + self.quant_config = quant_config + + # FIXME(pcmoritz): Make this more general to support different + # quantization schemes + self.use_fp8 = isinstance(quant_config, Fp8Config) + + if params_dtype is None: + params_dtype = torch.get_default_dtype() + self.params_dtype = params_dtype + + # Gate always runs at half / full precision for now. + self.gate = ReplicatedLinear(self.hidden_size, + self.num_total_experts, + bias=False, + params_dtype=self.params_dtype, + quant_config=None) + + if self.use_fp8: + params_dtype = torch.float8_e4m3fn + + self.w13_weight = nn.Parameter( + torch.empty(self.num_total_experts, + 2 * self.intermediate_size, + self.hidden_size, + dtype=params_dtype)) + self.w2_weight = nn.Parameter( + torch.empty(self.num_total_experts, + self.hidden_size, + self.intermediate_size, + dtype=params_dtype)) + + set_weight_attrs(self.w13_weight, { + "weight_loader": self.weight_loader, + }) + set_weight_attrs(self.w2_weight, { + "weight_loader": self.weight_loader, + }) + + # Used for fp8. + self.w13_scale = None + self.w2_scale = None + self.a13_scale = None + self.a2_scale = None + + if self.use_fp8: + # WEIGHT_SCALE (for fp8) + self.w13_scale = nn.Parameter(torch.ones(self.num_total_experts, + dtype=torch.float32), + requires_grad=False) + self.w2_scale = nn.Parameter(torch.ones(self.num_total_experts, + dtype=torch.float32), + requires_grad=False) + + # If loading fp8 checkpoint, pass the weight loaders. + # If loading an fp16 checkpoint, do not (we will quantize in + # process_weights_after_loading() + if quant_config.is_checkpoint_fp8_serialized: + set_weight_attrs(self.w13_scale, { + "weight_loader": self.weight_loader, + }) + set_weight_attrs(self.w2_scale, { + "weight_loader": self.weight_loader, + }) + + # ACT_SCALE (for fp8) + if quant_config.activation_scheme == "static": + if not quant_config.is_checkpoint_fp8_serialized: + raise ValueError( + "Found static activation scheme for checkpoint that " + "was not serialized fp8.") + self.a13_scale = nn.Parameter(torch.zeros( + self.num_total_experts, dtype=torch.float32), + requires_grad=False) + self.a2_scale = nn.Parameter(torch.zeros( + self.num_total_experts, dtype=torch.float32), + requires_grad=False) + + set_weight_attrs(self.a13_scale, { + "weight_loader": self.weight_loader, + }) + set_weight_attrs(self.a2_scale, { + "weight_loader": self.weight_loader, + }) + + def weight_loader(self, param: nn.Parameter, loaded_weight: torch.Tensor, + weight_name: str, expert_id: int): + tp_rank = get_tensor_model_parallel_rank() + param_data = param.data + shard_size = self.intermediate_size + shard = slice(tp_rank * shard_size, (tp_rank + 1) * shard_size) + if weight_name.endswith("w1.weight"): + param_data[expert_id, 0:shard_size, :] = loaded_weight[shard, :] + if weight_name.endswith("w3.weight"): + param_data[expert_id, + shard_size:2 * shard_size, :] = loaded_weight[shard, :] + if weight_name.endswith("w2.weight"): + param_data[expert_id, :, :] = loaded_weight[:, shard] + if "act_scale" in weight_name or "weight_scale" in weight_name: + param_data[expert_id] = loaded_weight + + def process_weights_after_loading(self): + # Fp8 is the only case where we need to process after loading. + if not self.use_fp8: + return + + # If checkpoint is fp16, quantize here. + if not self.quant_config.is_checkpoint_fp8_serialized: + w13_weight = torch.empty_like(self.w13_weight.data, + dtype=torch.float8_e4m3fn) + w2_weight = torch.empty_like(self.w2_weight.data, + dtype=torch.float8_e4m3fn) + for expert in range(self.num_total_experts): + w13_weight[expert, :, :], self.w13_scale[ + expert] = ops.scaled_fp8_quant( + self.w13_weight.data[expert, :, :]) + w2_weight[expert, :, :], self.w2_scale[ + expert] = ops.scaled_fp8_quant( + self.w2_weight.data[expert, :, :]) + self.w13_weight = nn.Parameter(w13_weight, requires_grad=False) + self.w2_weight = nn.Parameter(w2_weight, requires_grad=False) + + # If checkpoint is fp8 + static, cleanup act_scales. + # Since state_dict has an act_scale per expert but our kernels + # are passed one act_scale shared across all experts. + elif self.quant_config.activation_scheme == "static": + if self.a13_scale is None or self.a2_scale is None: + raise ValueError( + "QuantConfig has static quantization, but found " + "activation scales are None.") + + if (not all_close_1d(self.a13_scale) + or not all_close_1d(self.a2_scale)): + print_warning_once( + "Found act_scales that are not equal for fp8 MoE layer. " + "Using the maximum across experts for each layer. ") + + self.a13_scale = nn.Parameter(self.a13_scale.max(), + requires_grad=False) + self.a2_scale = nn.Parameter(self.a2_scale.max(), + requires_grad=False) + + def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: + num_tokens, hidden_size = hidden_states.shape + hidden_states = hidden_states.view(-1, self.hidden_size) + # router_logits: (num_tokens, n_experts) + router_logits, _ = self.gate(hidden_states) + final_hidden_states = fused_moe(hidden_states, + self.w13_weight, + self.w2_weight, + router_logits, + self.top_k, + renormalize=True, + inplace=True, + use_fp8=self.use_fp8, + w1_scale=self.w13_scale, + w2_scale=self.w2_scale, + a1_scale=self.a13_scale, + a2_scale=self.a2_scale) + + if self.tp_size > 1: + final_hidden_states = tensor_model_parallel_all_reduce( + final_hidden_states) + + return final_hidden_states.view(num_tokens, hidden_size) + + +class MixtralAttention(nn.Module): + + def __init__(self, + hidden_size: int, + num_heads: int, + num_kv_heads: int, + max_position: int = 4096 * 32, + rope_theta: float = 10000, + quant_config: Optional[QuantizationConfig] = None, + sliding_window: Optional[int] = None) -> None: + super().__init__() + self.hidden_size = hidden_size + tp_size = get_tensor_model_parallel_world_size() + self.total_num_heads = num_heads + assert self.total_num_heads % tp_size == 0 + self.num_heads = self.total_num_heads // tp_size + self.total_num_kv_heads = num_kv_heads + if self.total_num_kv_heads >= tp_size: + # Number of KV heads is greater than TP size, so we partition + # the KV heads across multiple tensor parallel GPUs. + assert self.total_num_kv_heads % tp_size == 0 + else: + # Number of KV heads is less than TP size, so we replicate + # the KV heads across multiple tensor parallel GPUs. + assert tp_size % self.total_num_kv_heads == 0 + self.num_kv_heads = max(1, self.total_num_kv_heads // tp_size) + self.head_dim = hidden_size // self.total_num_heads + self.q_size = self.num_heads * self.head_dim + self.kv_size = self.num_kv_heads * self.head_dim + self.scaling = self.head_dim**-0.5 + self.rope_theta = rope_theta + self.sliding_window = sliding_window + + if isinstance( + quant_config, + Fp8Config) and not quant_config.is_checkpoint_fp8_serialized: + print_warning_once( + "For Mixtral FP8 quantization, we currently do not quantize " + "the attention layers until their FP8 performance is improved." + ) + quant_config = None + + self.qkv_proj = QKVParallelLinear( + hidden_size, + self.head_dim, + self.total_num_heads, + self.total_num_kv_heads, + bias=False, + quant_config=quant_config, + ) + self.o_proj = RowParallelLinear( + self.total_num_heads * self.head_dim, + hidden_size, + bias=False, + quant_config=quant_config, + ) + self.rotary_emb = get_rope( + self.head_dim, + rotary_dim=self.head_dim, + max_position=max_position, + base=int(self.rope_theta), + is_neox_style=True, + ) + self.attn = Attention( + self.num_heads, + self.head_dim, + self.scaling, + num_kv_heads=self.num_kv_heads, + sliding_window=self.sliding_window, + ) + + def forward( + self, + positions: torch.Tensor, + hidden_states: torch.Tensor, + kv_cache: torch.Tensor, + attn_metadata: AttentionMetadata, + ) -> torch.Tensor: + qkv, _ = self.qkv_proj(hidden_states) + q, k, v = qkv.split([self.q_size, self.kv_size, self.kv_size], dim=-1) + q, k = self.rotary_emb(positions, q, k) + attn_output = self.attn(q, k, v, kv_cache, attn_metadata) + output, _ = self.o_proj(attn_output) + return output + + +class MixtralDecoderLayer(nn.Module): + + def __init__( + self, + config: MixtralConfig, + quant_config: Optional[QuantizationConfig] = None, + ) -> None: + super().__init__() + self.hidden_size = config.hidden_size + # Requires transformers > 4.32.0 + rope_theta = getattr(config, "rope_theta", 10000) + self.self_attn = MixtralAttention( + hidden_size=self.hidden_size, + num_heads=config.num_attention_heads, + max_position=config.max_position_embeddings, + num_kv_heads=config.num_key_value_heads, + rope_theta=rope_theta, + sliding_window=config.sliding_window, + quant_config=quant_config) + self.block_sparse_moe = MixtralMoE( + num_experts=config.num_local_experts, + top_k=config.num_experts_per_tok, + hidden_size=config.hidden_size, + intermediate_size=config.intermediate_size, + quant_config=quant_config) + self.input_layernorm = RMSNorm(config.hidden_size, + eps=config.rms_norm_eps) + self.post_attention_layernorm = RMSNorm(config.hidden_size, + eps=config.rms_norm_eps) + + def forward( + self, + positions: torch.Tensor, + hidden_states: torch.Tensor, + kv_cache: torch.Tensor, + attn_metadata: AttentionMetadata, + residual: Optional[torch.Tensor], + ) -> torch.Tensor: + # Self Attention + if residual is None: + residual = hidden_states + hidden_states = self.input_layernorm(hidden_states) + else: + hidden_states, residual = self.input_layernorm( + hidden_states, residual) + hidden_states = self.self_attn( + positions=positions, + hidden_states=hidden_states, + kv_cache=kv_cache, + attn_metadata=attn_metadata, + ) + + # Fully Connected + hidden_states, residual = self.post_attention_layernorm( + hidden_states, residual) + hidden_states = self.block_sparse_moe(hidden_states) + return hidden_states, residual + + +class MixtralModel(nn.Module): + + def __init__( + self, + config: MixtralConfig, + quant_config: Optional[QuantizationConfig] = None, + lora_config: Optional[LoRAConfig] = None, + ) -> None: + super().__init__() + self.padding_idx = config.pad_token_id + lora_vocab = (lora_config.lora_extra_vocab_size * + (lora_config.max_loras or 1)) if lora_config else 0 + self.vocab_size = config.vocab_size + lora_vocab + self.org_vocab_size = config.vocab_size + + self.embed_tokens = VocabParallelEmbedding( + self.vocab_size, + config.hidden_size, + org_num_embeddings=config.vocab_size, + ) + self.layers = nn.ModuleList([ + MixtralDecoderLayer(config, quant_config=quant_config) + for _ in range(config.num_hidden_layers) + ]) + self.norm = RMSNorm(config.hidden_size, eps=config.rms_norm_eps) + + def forward( + self, + input_ids: torch.Tensor, + positions: torch.Tensor, + kv_caches: List[torch.Tensor], + attn_metadata: AttentionMetadata, + ) -> torch.Tensor: + hidden_states = self.embed_tokens(input_ids) + residual = None + for i in range(len(self.layers)): + layer = self.layers[i] + hidden_states, residual = layer(positions, hidden_states, + kv_caches[i], attn_metadata, + residual) + hidden_states, _ = self.norm(hidden_states, residual) + return hidden_states + + +class MixtralForCausalLM(nn.Module): + fall_back_to_pt_during_load = False + + packed_modules_mapping = { + "qkv_proj": [ + "q_proj", + "k_proj", + "v_proj", + ], + } + + # LoRA specific attributes + supported_lora_modules = [ + "qkv_proj", + "o_proj", + "embed_tokens", + "lm_head", + ] + embedding_modules = { + "embed_tokens": "input_embeddings", + "lm_head": "output_embeddings", + } + embedding_padding_modules = ["lm_head"] + + def __init__( + self, + config: MixtralConfig, + quant_config: Optional[QuantizationConfig] = None, + lora_config: Optional[LoRAConfig] = None, + ) -> None: + super().__init__() + self.config = config + self.model = MixtralModel(config, + quant_config, + lora_config=lora_config) + self.unpadded_vocab_size = config.vocab_size + if lora_config: + self.unpadded_vocab_size += lora_config.lora_extra_vocab_size + self.lm_head = ParallelLMHead( + self.unpadded_vocab_size, + config.hidden_size, + org_num_embeddings=config.vocab_size, + padding_size=DEFAULT_VOCAB_PADDING_SIZE + # We need bigger padding if using lora for kernel + # compatibility + if not lora_config else lora_config.lora_vocab_padding_size, + ) + self.logits_processor = LogitsProcessor(self.unpadded_vocab_size, + config.vocab_size) + self.sampler = Sampler() + + def forward( + self, + input_ids: torch.Tensor, + positions: torch.Tensor, + kv_caches: List[torch.Tensor], + attn_metadata: AttentionMetadata, + ) -> torch.Tensor: + hidden_states = self.model(input_ids, positions, kv_caches, + attn_metadata) + return hidden_states + + def compute_logits(self, hidden_states: torch.Tensor, + sampling_metadata: SamplingMetadata) -> torch.Tensor: + logits = self.logits_processor(self.lm_head.weight, hidden_states, + sampling_metadata) + return logits + + def sample( + self, + logits: Optional[torch.Tensor], + sampling_metadata: SamplingMetadata, + ) -> Optional[SamplerOutput]: + next_tokens = self.sampler(logits, sampling_metadata) + return next_tokens + + def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): + stacked_params_mapping = [ + # (param_name, shard_name, shard_id) + ("qkv_proj", "q_proj", "q"), + ("qkv_proj", "k_proj", "k"), + ("qkv_proj", "v_proj", "v"), + ] + + expert_params_mapping = [ + # These are the weight scales for the experts + # (param_name, weight_name, expert_id) + ("w13_scale" if weight_name in ["w1", "w3"] else "w2_scale", + f"experts.{expert_id}.{weight_name}.weight_scale", expert_id) + for expert_id in range(self.config.num_local_experts) + for weight_name in ["w1", "w2", "w3"] + ] + [ + # These are the weights for the experts + # (param_name, weight_name, expert_id) + ("w13_weight" if weight_name in ["w1", "w3"] else "w2_weight", + f"experts.{expert_id}.{weight_name}.weight", expert_id) + for expert_id in range(self.config.num_local_experts) + for weight_name in ["w1", "w2", "w3"] + ] + [ + # These are the activation scales for the experts + # (param_name, weight_name, expert_id) + ("a13_scale" if weight_name in ["w1", "w3"] else "a2_scale", + f"experts.{expert_id}.{weight_name}.act_scale", expert_id) + for expert_id in range(self.config.num_local_experts) + for weight_name in ["w1", "w2", "w3"] + ] + + params_dict = dict(self.named_parameters()) + for name, loaded_weight in weights: + if "rotary_emb.inv_freq" in name: + continue + + for (param_name, weight_name, shard_id) in stacked_params_mapping: + if weight_name not in name: + continue + name = name.replace(weight_name, param_name) + # Skip loading extra bias for GPTQ models. + if name.endswith(".bias") and name not in params_dict: + continue + param = params_dict[name] + weight_loader = param.weight_loader + weight_loader(param, loaded_weight, shard_id) + break + else: + for param_name, weight_name, expert_id in expert_params_mapping: + if weight_name not in name: + continue + name = name.replace(weight_name, param_name) + param = params_dict[name] + weight_loader = param.weight_loader + weight_loader(param, + loaded_weight, + weight_name, + expert_id=expert_id) + break + else: + # Skip loading extra bias for GPTQ models. + if name.endswith(".bias") and name not in params_dict: + continue + param = params_dict[name] + weight_loader = getattr(param, "weight_loader", + default_weight_loader) + weight_loader(param, loaded_weight) + + +def all_close_1d(x: torch.Tensor) -> bool: + assert len(x.shape) == 1 + return all(torch.allclose(x[0], x[i]) for i in range(x.shape[0])) diff --git a/vllm/model_executor/models/mixtral_quant.py b/vllm/model_executor/models/mixtral_quant.py new file mode 100644 index 0000000..38c62af --- /dev/null +++ b/vllm/model_executor/models/mixtral_quant.py @@ -0,0 +1,404 @@ +# coding=utf-8 +# Adapted from +# https://github.com/huggingface/transformers/blob/v4.28.0/src/transformers/models/llama/modeling_llama.py +# Copyright 2023 The vLLM team. +# Copyright 2022 EleutherAI and the HuggingFace Inc. team. All rights reserved. +# +# This code is based on EleutherAI's GPT-NeoX library and the GPT-NeoX +# and OPT implementations in this library. It has been modified from its +# original forms to accommodate minor architectural differences compared +# to GPT-NeoX and OPT used by the Meta AI team that trained the model. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Inference-only Mixtral model.""" +from typing import Iterable, List, Optional, Tuple + +import numpy as np +import torch +import torch.nn.functional as F +from torch import nn +from transformers import MixtralConfig + +from vllm.attention import Attention, AttentionMetadata +from vllm.distributed import (get_tensor_model_parallel_rank, + get_tensor_model_parallel_world_size, + tensor_model_parallel_all_reduce) +from vllm.model_executor.layers.layernorm import RMSNorm +from vllm.model_executor.layers.linear import (QKVParallelLinear, + ReplicatedLinear, + RowParallelLinear) +from vllm.model_executor.layers.logits_processor import LogitsProcessor +from vllm.model_executor.layers.quantization.base_config import ( + QuantizationConfig) +from vllm.model_executor.layers.rotary_embedding import get_rope +from vllm.model_executor.layers.sampler import Sampler +from vllm.model_executor.layers.vocab_parallel_embedding import ( + ParallelLMHead, VocabParallelEmbedding) +from vllm.model_executor.model_loader.weight_utils import default_weight_loader +from vllm.model_executor.sampling_metadata import SamplingMetadata +from vllm.sequence import SamplerOutput + + +class MixtralMLP(nn.Module): + + def __init__( + self, + num_experts: int, + hidden_size: int, + intermediate_size: int, + quant_config: Optional[QuantizationConfig] = None, + ) -> None: + super().__init__() + self.num_experts = num_experts + self.ffn_dim = intermediate_size + self.hidden_dim = hidden_size + + self.w1 = ReplicatedLinear(self.hidden_dim, + self.ffn_dim, + bias=False, + quant_config=quant_config) + self.w2 = ReplicatedLinear(self.ffn_dim, + self.hidden_dim, + bias=False, + quant_config=quant_config) + self.w3 = ReplicatedLinear(self.hidden_dim, + self.ffn_dim, + bias=False, + quant_config=quant_config) + + # TODO: Use vllm's SiluAndMul + self.act_fn = nn.SiLU() + + def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: + w1_out, _ = self.w1(hidden_states) + w1_out = self.act_fn(w1_out) + w3_out, _ = self.w3(hidden_states) + current_hidden_states = w1_out * w3_out + current_hidden_states, _ = self.w2(current_hidden_states) + return current_hidden_states + + +class MixtralMoE(nn.Module): + + def __init__( + self, + config: MixtralConfig, + quant_config: Optional[QuantizationConfig] = None, + ): + super().__init__() + self.config = config + self.rank = get_tensor_model_parallel_rank() + self.tp_size = get_tensor_model_parallel_world_size() + self.num_total_experts = config.num_local_experts + self.top_k = config.num_experts_per_tok + if self.tp_size > self.num_total_experts: + raise ValueError( + f"Tensor parallel size {self.tp_size} is greater than " + f"the number of experts {self.num_total_experts}.") + # Split experts equally between ranks + self.expert_indicies = np.array_split(range( + self.num_total_experts), self.tp_size)[self.rank].tolist() + if not self.expert_indicies: + raise ValueError( + f"Rank {self.rank} has no experts assigned to it.") + + self.experts = nn.ModuleList([ + MixtralMLP(self.num_total_experts, + config.hidden_size, + config.intermediate_size, + quant_config=quant_config) + if idx in self.expert_indicies else None + for idx in range(self.num_total_experts) + ]) + self.gate = ReplicatedLinear(config.hidden_size, + self.num_total_experts, + bias=False, + quant_config=None) + + def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: + num_tokens, hidden_dim = hidden_states.shape + hidden_states = hidden_states.view(-1, hidden_dim) + # router_logits: (num_tokens, n_experts) + router_logits, _ = self.gate(hidden_states) + + routing_weights = F.softmax(router_logits, dim=1, dtype=torch.float) + routing_weights, selected_experts = torch.topk(routing_weights, + self.top_k, + dim=-1) + routing_weights /= routing_weights.sum(dim=-1, keepdim=True) + + final_hidden_states = None + for expert_idx in self.expert_indicies: + expert_layer = self.experts[expert_idx] + expert_mask = (selected_experts == expert_idx) + expert_weights = (routing_weights * expert_mask).sum(dim=-1, + keepdim=True) + + current_hidden_states = expert_layer(hidden_states).mul_( + expert_weights) + if final_hidden_states is None: + final_hidden_states = current_hidden_states + else: + final_hidden_states.add_(current_hidden_states) + + return tensor_model_parallel_all_reduce(final_hidden_states).view( + num_tokens, hidden_dim) + + +class MixtralAttention(nn.Module): + + def __init__(self, + hidden_size: int, + num_heads: int, + num_kv_heads: int, + max_position: int = 4096 * 32, + rope_theta: float = 10000, + quant_config: Optional[QuantizationConfig] = None, + sliding_window: Optional[int] = None) -> None: + super().__init__() + self.hidden_size = hidden_size + tp_size = get_tensor_model_parallel_world_size() + self.total_num_heads = num_heads + assert self.total_num_heads % tp_size == 0 + self.num_heads = self.total_num_heads // tp_size + self.total_num_kv_heads = num_kv_heads + if self.total_num_kv_heads >= tp_size: + # Number of KV heads is greater than TP size, so we partition + # the KV heads across multiple tensor parallel GPUs. + assert self.total_num_kv_heads % tp_size == 0 + else: + # Number of KV heads is less than TP size, so we replicate + # the KV heads across multiple tensor parallel GPUs. + assert tp_size % self.total_num_kv_heads == 0 + self.num_kv_heads = max(1, self.total_num_kv_heads // tp_size) + self.head_dim = hidden_size // self.total_num_heads + self.q_size = self.num_heads * self.head_dim + self.kv_size = self.num_kv_heads * self.head_dim + self.scaling = self.head_dim**-0.5 + self.rope_theta = rope_theta + self.sliding_window = sliding_window + + self.qkv_proj = QKVParallelLinear( + hidden_size, + self.head_dim, + self.total_num_heads, + self.total_num_kv_heads, + bias=False, + quant_config=quant_config, + ) + self.o_proj = RowParallelLinear( + self.total_num_heads * self.head_dim, + hidden_size, + bias=False, + quant_config=quant_config, + ) + self.rotary_emb = get_rope( + self.head_dim, + rotary_dim=self.head_dim, + max_position=max_position, + base=int(self.rope_theta), + is_neox_style=True, + ) + self.attn = Attention( + self.num_heads, + self.head_dim, + self.scaling, + num_kv_heads=self.num_kv_heads, + sliding_window=self.sliding_window, + ) + + def forward( + self, + positions: torch.Tensor, + hidden_states: torch.Tensor, + kv_cache: torch.Tensor, + attn_metadata: AttentionMetadata, + ) -> torch.Tensor: + qkv, _ = self.qkv_proj(hidden_states) + q, k, v = qkv.split([self.q_size, self.kv_size, self.kv_size], dim=-1) + q, k = self.rotary_emb(positions, q, k) + attn_output = self.attn(q, k, v, kv_cache, attn_metadata) + output, _ = self.o_proj(attn_output) + return output + + +class MixtralDecoderLayer(nn.Module): + + def __init__( + self, + config: MixtralConfig, + quant_config: Optional[QuantizationConfig] = None, + ) -> None: + super().__init__() + self.hidden_size = config.hidden_size + # Requires transformers > 4.32.0 + rope_theta = getattr(config, "rope_theta", 10000) + self.self_attn = MixtralAttention( + hidden_size=self.hidden_size, + num_heads=config.num_attention_heads, + max_position=config.max_position_embeddings, + num_kv_heads=config.num_key_value_heads, + rope_theta=rope_theta, + sliding_window=config.sliding_window, + quant_config=quant_config) + self.block_sparse_moe = MixtralMoE(config=config, + quant_config=quant_config) + self.input_layernorm = RMSNorm(config.hidden_size, + eps=config.rms_norm_eps) + self.post_attention_layernorm = RMSNorm(config.hidden_size, + eps=config.rms_norm_eps) + + def forward( + self, + positions: torch.Tensor, + hidden_states: torch.Tensor, + kv_cache: torch.Tensor, + attn_metadata: AttentionMetadata, + residual: Optional[torch.Tensor], + ) -> torch.Tensor: + # Self Attention + if residual is None: + residual = hidden_states + hidden_states = self.input_layernorm(hidden_states) + else: + hidden_states, residual = self.input_layernorm( + hidden_states, residual) + hidden_states = self.self_attn( + positions=positions, + hidden_states=hidden_states, + kv_cache=kv_cache, + attn_metadata=attn_metadata, + ) + + # Fully Connected + hidden_states, residual = self.post_attention_layernorm( + hidden_states, residual) + hidden_states = self.block_sparse_moe(hidden_states) + return hidden_states, residual + + +class MixtralModel(nn.Module): + + def __init__( + self, + config: MixtralConfig, + quant_config: Optional[QuantizationConfig] = None, + ) -> None: + super().__init__() + self.padding_idx = config.pad_token_id + self.vocab_size = config.vocab_size + + self.embed_tokens = VocabParallelEmbedding( + config.vocab_size, + config.hidden_size, + ) + self.layers = nn.ModuleList([ + MixtralDecoderLayer(config, quant_config=quant_config) + for _ in range(config.num_hidden_layers) + ]) + self.norm = RMSNorm(config.hidden_size, eps=config.rms_norm_eps) + + def forward( + self, + input_ids: torch.Tensor, + positions: torch.Tensor, + kv_caches: List[torch.Tensor], + attn_metadata: AttentionMetadata, + ) -> torch.Tensor: + hidden_states = self.embed_tokens(input_ids) + residual = None + for i in range(len(self.layers)): + layer = self.layers[i] + hidden_states, residual = layer(positions, hidden_states, + kv_caches[i], attn_metadata, + residual) + hidden_states, _ = self.norm(hidden_states, residual) + return hidden_states + + +class MixtralForCausalLM(nn.Module): + fall_back_to_pt_during_load = False + + def __init__( + self, + config: MixtralConfig, + quant_config: Optional[QuantizationConfig] = None, + ) -> None: + super().__init__() + self.config = config + self.quant_config = quant_config + self.model = MixtralModel(config, quant_config) + self.lm_head = ParallelLMHead(config.vocab_size, config.hidden_size) + self.logits_processor = LogitsProcessor(config.vocab_size) + self.sampler = Sampler() + + def forward( + self, + input_ids: torch.Tensor, + positions: torch.Tensor, + kv_caches: List[torch.Tensor], + attn_metadata: AttentionMetadata, + ) -> torch.Tensor: + hidden_states = self.model(input_ids, positions, kv_caches, + attn_metadata) + return hidden_states + + def compute_logits(self, hidden_states: torch.Tensor, + sampling_metadata: SamplingMetadata) -> torch.Tensor: + logits = self.logits_processor(self.lm_head.weight, hidden_states, + sampling_metadata) + return logits + + def sample( + self, + logits: Optional[torch.Tensor], + sampling_metadata: SamplingMetadata, + ) -> Optional[SamplerOutput]: + next_tokens = self.sampler(logits, sampling_metadata) + return next_tokens + + def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): + stacked_params_mapping = [ + # (param_name, shard_name, shard_id) + ("qkv_proj", "q_proj", "q"), + ("qkv_proj", "k_proj", "k"), + ("qkv_proj", "v_proj", "v"), + ] + + params_dict = dict(self.named_parameters()) + for name, loaded_weight in weights: + if "rotary_emb.inv_freq" in name: + continue + for (param_name, weight_name, shard_id) in stacked_params_mapping: + if weight_name not in name: + continue + name = name.replace(weight_name, param_name) + # Skip loading extra bias for GPTQ models. + if name.endswith(".bias") and name not in params_dict: + continue + param = params_dict[name] + weight_loader = param.weight_loader + weight_loader(param, loaded_weight, shard_id) + break + else: + # Skip loading extra bias for GPTQ models. + if name.endswith(".bias") and name not in params_dict: + continue + # Skip experts that are not assigned to this worker. + if ("block_sparse_moe.experts." in name + and name not in params_dict): + continue + param = params_dict[name] + weight_loader = getattr(param, "weight_loader", + default_weight_loader) + weight_loader(param, loaded_weight) diff --git a/vllm/model_executor/models/mpt.py b/vllm/model_executor/models/mpt.py new file mode 100644 index 0000000..6fa5c5b --- /dev/null +++ b/vllm/model_executor/models/mpt.py @@ -0,0 +1,295 @@ +# coding=utf-8 +# Adapted from https://huggingface.co/mosaicml/mpt-7b/tree/main +import math +from typing import Iterable, List, Optional, Tuple + +import torch +import torch.nn as nn + +from vllm.attention import Attention, AttentionMetadata +from vllm.distributed import (get_tensor_model_parallel_rank, + get_tensor_model_parallel_world_size) +from vllm.model_executor.layers.activation import get_act_fn +from vllm.model_executor.layers.linear import (ColumnParallelLinear, + QKVParallelLinear, + RowParallelLinear) +from vllm.model_executor.layers.logits_processor import LogitsProcessor +from vllm.model_executor.layers.quantization.base_config import ( + QuantizationConfig) +from vllm.model_executor.layers.sampler import Sampler +from vllm.model_executor.layers.vocab_parallel_embedding import ( + VocabParallelEmbedding) +from vllm.model_executor.model_loader.weight_utils import default_weight_loader +from vllm.model_executor.sampling_metadata import SamplingMetadata +from vllm.sequence import SamplerOutput +from vllm.transformers_utils.configs.mpt import MPTConfig + + +def _get_alibi_slopes( + total_num_heads: int, + alibi_bias_max: int, +) -> torch.Tensor: + next_power_of_2 = 2**math.ceil(math.log2(total_num_heads)) + m = torch.arange(1, next_power_of_2 + 1, dtype=torch.float32) + m = m.mul(alibi_bias_max / next_power_of_2) + slopes = 1.0 / torch.pow(2, m) + if next_power_of_2 != total_num_heads: + slopes = torch.concat([slopes[1::2], slopes[::2]])[:total_num_heads] + return slopes + + +class MPTAttention(nn.Module): + + def __init__( + self, + config: MPTConfig, + quant_config: Optional[QuantizationConfig] = None, + ): + super().__init__() + self.d_model = config.d_model + self.total_num_heads = config.n_heads + self.head_dim = self.d_model // self.total_num_heads + self.clip_qkv = config.attn_config["clip_qkv"] + self.qk_ln = config.attn_config["qk_ln"] + self.alibi_bias_max = config.attn_config["alibi_bias_max"] + if "kv_n_heads" in config.attn_config: + self.total_num_kv_heads = config.attn_config['kv_n_heads'] + else: + self.total_num_kv_heads = self.total_num_heads + assert not config.attn_config["prefix_lm"] + assert config.attn_config["alibi"] + + # pylint: disable=invalid-name + self.Wqkv = QKVParallelLinear( + self.d_model, + self.d_model // self.total_num_heads, + self.total_num_heads, + self.total_num_kv_heads, + bias=not config.no_bias, + quant_config=quant_config, + ) + if self.qk_ln: + self.q_ln = nn.LayerNorm(self.d_model) + self.k_ln = nn.LayerNorm(self.d_model) + self.out_proj = RowParallelLinear( + self.d_model, + self.d_model, + bias=not config.no_bias, + quant_config=quant_config, + ) + + tp_world_size = get_tensor_model_parallel_world_size() + assert self.total_num_heads % tp_world_size == 0 + self.num_heads = self.total_num_heads // tp_world_size + + if self.total_num_kv_heads >= tp_world_size: + # Number of KV heads is greater than TP size, so we partition + # the KV heads across multiple tensor parallel GPUs. + assert self.total_num_kv_heads % tp_world_size == 0 + else: + # Number of KV heads is less than TP size, so we replicate + # the KV heads across multiple tensor parallel GPUs. + assert tp_world_size % self.total_num_kv_heads == 0 + self.num_kv_heads = max(1, self.total_num_kv_heads // tp_world_size) + self.q_size = self.num_heads * self.head_dim + self.kv_size = self.num_kv_heads * self.head_dim + # Create the alibi slopes and slice them. + tp_rank = get_tensor_model_parallel_rank() + head_start = tp_rank * self.num_heads + head_end = (tp_rank + 1) * self.num_heads + alibi_slopes = _get_alibi_slopes(self.total_num_heads, + self.alibi_bias_max) + alibi_slopes = alibi_slopes[head_start:head_end].tolist() + + self.head_dim = self.d_model // self.total_num_heads + scaling = self.head_dim**-0.5 + self.attn = Attention(self.num_heads, + self.head_dim, + scaling, + alibi_slopes=alibi_slopes, + num_kv_heads=self.num_kv_heads) + + def forward( + self, + position_ids: torch.Tensor, + hidden_states: torch.Tensor, + kv_cache: torch.Tensor, + attn_metadata: AttentionMetadata, + ) -> torch.Tensor: + del position_ids # unused. + qkv, _ = self.Wqkv(hidden_states) + if self.clip_qkv is not None: + qkv.clamp_(min=-self.clip_qkv, max=self.clip_qkv) + q, k, v = qkv.split([self.q_size, self.kv_size, self.kv_size], dim=-1) + if self.qk_ln: + q = self.q_ln(q) + k = self.k_ln(k) + attn_output = self.attn(q, k, v, kv_cache, attn_metadata) + output, _ = self.out_proj(attn_output) + return output + + +class MPTMLP(nn.Module): + + def __init__( + self, + config: MPTConfig, + quant_config: Optional[QuantizationConfig] = None, + ): + super().__init__() + hidden_size = config.d_model + expansion_ratio = config.expansion_ratio + intermediate_size = expansion_ratio * hidden_size + self.up_proj = ColumnParallelLinear( + hidden_size, + intermediate_size, + bias=not config.no_bias, + quant_config=quant_config, + ) + self.act = get_act_fn("gelu", quant_config, intermediate_size) + self.down_proj = RowParallelLinear( + intermediate_size, + hidden_size, + bias=not config.no_bias, + quant_config=quant_config, + ) + + def forward(self, x: torch.Tensor) -> torch.Tensor: + x, _ = self.up_proj(x) + x = self.act(x) + x, _ = self.down_proj(x) + return x + + +class MPTBlock(nn.Module): + + def __init__( + self, + config: MPTConfig, + quant_config: Optional[QuantizationConfig] = None, + ): + super().__init__() + hidden_size = config.d_model + self.norm_1 = nn.LayerNorm(hidden_size) + self.attn = MPTAttention(config, quant_config) + self.norm_2 = nn.LayerNorm(hidden_size) + self.ffn = MPTMLP(config, quant_config) + + def forward( + self, + position_ids: torch.Tensor, + hidden_states: torch.Tensor, + kv_cache: torch.Tensor, + attn_metadata: AttentionMetadata, + ) -> torch.Tensor: + x = self.norm_1(hidden_states) + x = self.attn( + position_ids=position_ids, + hidden_states=x, + kv_cache=kv_cache, + attn_metadata=attn_metadata, + ) + hidden_states = hidden_states + x + x = self.norm_2(hidden_states) + x = self.ffn(x) + hidden_states = hidden_states + x + return hidden_states + + +class MPTModel(nn.Module): + + def __init__( + self, + config: MPTConfig, + quant_config: Optional[QuantizationConfig] = None, + ): + super().__init__() + assert config.embedding_fraction == 1.0 + assert config.norm_type == "low_precision_layernorm" + + self.wte = VocabParallelEmbedding( + config.vocab_size, + config.d_model, + ) + self.blocks = nn.ModuleList( + [MPTBlock(config, quant_config) for _ in range(config.n_layers)]) + self.norm_f = nn.LayerNorm(config.d_model) + if config.no_bias: + for module in self.modules(): + if hasattr(module, "bias") and isinstance( + module.bias, nn.Parameter): + # Remove the bias term in Linear and LayerNorm. + module.register_parameter("bias", None) + + def forward( + self, + input_ids: torch.Tensor, + position_ids: torch.Tensor, + kv_caches: List[torch.Tensor], + attn_metadata: AttentionMetadata, + ) -> torch.Tensor: + hidden_states = self.wte(input_ids) + for i in range(len(self.blocks)): + block = self.blocks[i] + hidden_states = block( + position_ids, + hidden_states, + kv_caches[i], + attn_metadata, + ) + hidden_states = self.norm_f(hidden_states) + return hidden_states + + +class MPTForCausalLM(nn.Module): + + def __init__( + self, + config: MPTConfig, + quant_config: Optional[QuantizationConfig] = None, + ): + super().__init__() + self.config = config + assert config.tie_word_embeddings + self.quant_config = quant_config + + self.transformer = MPTModel(config, quant_config) + self.lm_head_weight = self.transformer.wte.weight + self.logits_processor = LogitsProcessor(config.vocab_size) + self.sampler = Sampler() + + def forward( + self, + input_ids: torch.Tensor, + positions: torch.Tensor, + kv_caches: List[torch.Tensor], + attn_metadata: AttentionMetadata, + ) -> torch.Tensor: + hidden_states = self.transformer(input_ids, positions, kv_caches, + attn_metadata) + return hidden_states + + def compute_logits(self, hidden_states: torch.Tensor, + sampling_metadata: SamplingMetadata) -> torch.Tensor: + logits = self.logits_processor(self.lm_head_weight, hidden_states, + sampling_metadata) + return logits + + def sample( + self, + logits: torch.Tensor, + sampling_metadata: SamplingMetadata, + ) -> Optional[SamplerOutput]: + next_tokens = self.sampler(logits, sampling_metadata) + return next_tokens + + def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): + params_dict = dict(self.named_parameters(remove_duplicate=False)) + for name, loaded_weight in weights: + # Skip loading extra bias for GPTQ models. + if name.endswith(".bias") and name not in params_dict: + continue + param = params_dict[name] + weight_loader = getattr(param, "weight_loader", + default_weight_loader) + weight_loader(param, loaded_weight) diff --git a/vllm/model_executor/models/olmo.py b/vllm/model_executor/models/olmo.py new file mode 100644 index 0000000..f212ea2 --- /dev/null +++ b/vllm/model_executor/models/olmo.py @@ -0,0 +1,356 @@ +# coding=utf-8 +# Adapted from +# https://github.com/huggingface/transformers/blob/v4.40.1/src/transformers/models/olmo/modeling_olmo.py +# Copyright 2024 The vLLM team. +# Copyright 2024 EleutherAI and the HuggingFace Inc. team. All rights reserved. +# +# This code is based on EleutherAI's GPT-NeoX library and the GPT-NeoX +# and OPT implementations in this library. It has been modified from its +# original forms to accommodate minor architectural differences compared +# to GPT-NeoX and OPT used by the Meta AI team that trained the model. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Inference-only OLMo model compatible with HuggingFace weights.""" +from typing import Iterable, List, Optional, Tuple + +import torch +from torch import nn +from transformers import OlmoConfig + +from vllm.attention import Attention, AttentionMetadata +from vllm.distributed import get_tensor_model_parallel_world_size +from vllm.model_executor.layers.activation import SiluAndMul +from vllm.model_executor.layers.linear import (MergedColumnParallelLinear, + QKVParallelLinear, + RowParallelLinear) +from vllm.model_executor.layers.logits_processor import LogitsProcessor +from vllm.model_executor.layers.quantization.base_config import ( + QuantizationConfig) +from vllm.model_executor.layers.rotary_embedding import get_rope +from vllm.model_executor.layers.sampler import Sampler +from vllm.model_executor.layers.vocab_parallel_embedding import ( + ParallelLMHead, VocabParallelEmbedding) +from vllm.model_executor.model_loader.weight_utils import default_weight_loader +from vllm.model_executor.sampling_metadata import SamplingMetadata +from vllm.sequence import SamplerOutput + + +class OlmoAttention(nn.Module): + """ + This is the attention block where the output is computed as + ``Attention(LN(x))`` in ``MLP(LN(x + Attention(LN(x))))`` + (plus another skip connection). + """ + + def __init__( + self, + config: OlmoConfig, + quant_config: Optional[QuantizationConfig] = None, + ): + super().__init__() + self.config = config + self.hidden_size = config.hidden_size + tensor_model_parallel_world_size = ( + get_tensor_model_parallel_world_size()) + self.total_num_heads = config.num_attention_heads + + assert self.hidden_size % self.total_num_heads == 0 + assert self.total_num_heads % tensor_model_parallel_world_size == 0 + + self.num_heads = (self.total_num_heads // + tensor_model_parallel_world_size) + self.head_dim = self.hidden_size // self.total_num_heads + self.max_position_embeddings = config.max_position_embeddings + self.rope_theta = config.rope_theta + self.clip_qkv = config.clip_qkv + + # Attention input projection. Projects x -> (q, k, v) + self.qkv_proj = QKVParallelLinear( + self.hidden_size, + self.head_dim, + self.total_num_heads, + bias=config.attention_bias, + quant_config=quant_config, + ) + + # Rotary embeddings. + self.rotary_emb = get_rope( + self.head_dim, + rotary_dim=self.head_dim, + max_position=self.max_position_embeddings, + base=self.rope_theta, + ) + self.scaling = self.head_dim**-0.5 + self.attn = Attention(self.num_heads, + self.head_dim, + scale=self.scaling) + + # Attention output projection. + self.o_proj = RowParallelLinear( + self.hidden_size, + self.hidden_size, + bias=config.attention_bias, + quant_config=quant_config, + ) + + def forward( + self, + positions: torch.Tensor, + hidden_states: torch.Tensor, + kv_cache: torch.Tensor, + attn_metadata: AttentionMetadata, + ) -> torch.Tensor: + qkv, _ = self.qkv_proj(hidden_states) + if self.clip_qkv is not None: + qkv.clamp_(min=-self.clip_qkv, max=self.clip_qkv) + q, k, v = qkv.chunk(chunks=3, dim=-1) + q, k = self.rotary_emb(positions, q, k) + attn_output = self.attn(q, k, v, kv_cache, attn_metadata) + output, _ = self.o_proj(attn_output) + return output + + +class OlmoMLP(nn.Module): + """ + This is the MLP block where the output is computed as + ``MLP(LN(x))`` in ``MLP(LN(x + Attention(LN(x))))`` + (plus another skip connection). + """ + + def __init__( + self, + config: OlmoConfig, + quant_config: Optional[QuantizationConfig] = None, + ): + super().__init__() + self.config = config + self.hidden_size = config.hidden_size + self.intermediate_size = config.intermediate_size + + # Feed-forward input projection. + self.gate_up_proj = MergedColumnParallelLinear( + self.hidden_size, + [self.intermediate_size] * 2, + bias=False, + quant_config=quant_config, + ) + + # Activation function. + self.act_fn = SiluAndMul() + + # Feed-forward output projection. + self.down_proj = RowParallelLinear( + self.intermediate_size, + self.hidden_size, + bias=False, + quant_config=quant_config, + ) + + def forward( + self, + x: torch.Tensor, + ) -> torch.Tensor: + gate_up, _ = self.gate_up_proj(x) + x = self.act_fn(gate_up) + x, _ = self.down_proj(x) + return x + + +class OlmoDecoderLayer(nn.Module): + """ + This is a typical transformer block where the output is + computed as ``MLP(LN(x + Attention(LN(x))))`` + (plus another skip connection). + """ + + def __init__(self, + config: OlmoConfig, + quant_config: Optional[QuantizationConfig] = None): + super().__init__() + # Attention block. + self.self_attn = OlmoAttention(config, quant_config) + + # MLP block. + self.mlp = OlmoMLP(config, quant_config) + + # LayerNorm + self.input_layernorm = nn.LayerNorm(config.hidden_size, + elementwise_affine=False, + bias=False) + self.post_attention_layernorm = nn.LayerNorm(config.hidden_size, + elementwise_affine=False, + bias=False) + + def forward( + self, + positions: torch.Tensor, + hidden_states: torch.Tensor, + kv_cache: torch.Tensor, + attn_metadata: AttentionMetadata, + ) -> Tuple[torch.Tensor, Optional[Tuple[torch.Tensor, torch.Tensor]]]: + # Attention block. + residual = hidden_states + hidden_states = self.input_layernorm(hidden_states) + hidden_states = self.self_attn(positions, hidden_states, kv_cache, + attn_metadata) + hidden_states = hidden_states + residual + + # MLP block. + residual = hidden_states + hidden_states = self.post_attention_layernorm(hidden_states) + hidden_states = self.mlp(hidden_states) + hidden_states = residual + hidden_states + return hidden_states + + +class OlmoModel(nn.Module): + + def __init__(self, + config: OlmoConfig, + quant_config: Optional[QuantizationConfig] = None): + super().__init__() + self.config = config + + self.embed_tokens = VocabParallelEmbedding(config.vocab_size, + config.hidden_size) + self.layers = nn.ModuleList([ + OlmoDecoderLayer(config, quant_config) + for layer_idx in range(config.num_hidden_layers) + ]) + self.norm = nn.LayerNorm(config.hidden_size, + elementwise_affine=False, + bias=False) + + def forward( + self, + input_ids: torch.Tensor, + positions: torch.Tensor, + kv_caches: List[torch.Tensor], + attn_metadata: AttentionMetadata, + ) -> torch.Tensor: + """ + :param input_ids: A tensor of shape `(batch_size, seq_len)`. + """ + # Get embeddings of input. + # shape: (batch_size, seq_len, d_model) + inputs_embeds = self.embed_tokens(input_ids) + + # embed positions + hidden_states = inputs_embeds + + # Apply blocks one-by-one. + for layer_idx, decoder_layer in enumerate(self.layers): + # shape: (batch_size, seq_len, d_model) + hidden_states = decoder_layer( + positions, + hidden_states, + kv_caches[layer_idx], + attn_metadata, + ) + + # Apply final layer norm. + # shape: (batch_size, seq_len or 1, d_model) + hidden_states = self.norm(hidden_states) + return hidden_states + + +class OlmoForCausalLM(nn.Module): + """ + Extremely barebones HF model wrapper. + """ + + def __init__(self, + config: OlmoConfig, + quant_config: Optional[QuantizationConfig] = None): + super().__init__() + self.config = config + self.model = OlmoModel(config, quant_config) + if config.tie_word_embeddings: + self.lm_head_weight = self.model.embed_tokens.weight + else: + self.unpadded_vocab_size = config.vocab_size + self.lm_head = ParallelLMHead( + self.unpadded_vocab_size, + config.hidden_size, + org_num_embeddings=config.vocab_size, + ) + self.lm_head_weight = self.lm_head.weight + self.logits_processor = LogitsProcessor(config.vocab_size) + self.sampler = Sampler() + + def forward( + self, + input_ids: torch.Tensor, + positions: torch.Tensor, + kv_caches: List[torch.Tensor], + attn_metadata: AttentionMetadata, + ) -> torch.Tensor: + hidden_states = self.model( + input_ids=input_ids, + positions=positions, + kv_caches=kv_caches, + attn_metadata=attn_metadata, + ) + return hidden_states + + def compute_logits(self, hidden_states: torch.Tensor, + sampling_metadata: SamplingMetadata) -> torch.Tensor: + logits = self.logits_processor(self.lm_head_weight, hidden_states, + sampling_metadata) + return logits + + def sample( + self, + logits: torch.Tensor, + sampling_metadata: SamplingMetadata, + ) -> Optional[SamplerOutput]: + next_tokens = self.sampler(logits, sampling_metadata) + return next_tokens + + def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): + stacked_params_mapping = [ + # (param_name, shard_name, shard_id) + ("qkv_proj", "q_proj", "q"), + ("qkv_proj", "k_proj", "k"), + ("qkv_proj", "v_proj", "v"), + ("gate_up_proj", "gate_proj", 0), + ("gate_up_proj", "up_proj", 1), + ] + params_dict = dict(self.named_parameters(remove_duplicate=False)) + for name, loaded_weight in weights: + if "rotary_emb.inv_freq" in name: + continue + if ("rotary_emb.cos_cached" in name + or "rotary_emb.sin_cached" in name): + # Models trained using ColossalAI may include these tensors in + # the checkpoint. Skip them. + continue + for (param_name, weight_name, shard_id) in stacked_params_mapping: + if weight_name not in name: + continue + name = name.replace(weight_name, param_name) + # Skip loading extra bias for GPTQ models. + if name.endswith(".bias") and name not in params_dict: + continue + param = params_dict[name] + weight_loader = param.weight_loader + weight_loader(param, loaded_weight, shard_id) + break + else: + # Skip loading extra bias for GPTQ models. + if name.endswith(".bias") and name not in params_dict: + continue + param = params_dict[name] + weight_loader = getattr(param, "weight_loader", + default_weight_loader) + weight_loader(param, loaded_weight) diff --git a/vllm/model_executor/models/opt.py b/vllm/model_executor/models/opt.py new file mode 100644 index 0000000..336f765 --- /dev/null +++ b/vllm/model_executor/models/opt.py @@ -0,0 +1,349 @@ +# coding=utf-8 +# Adapted from +# https://github.com/huggingface/transformers/blob/v4.28.0/src/transformers/models/opt/modeling_opt.py +# Copyright 2023 The vLLM team. +# Copyright 2022 The Fairseq Authors and The HuggingFace Inc. team. All rights +# reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Inference-only OPT model compatible with HuggingFace weights.""" +from typing import Iterable, List, Optional, Tuple + +import torch +from torch import nn +from transformers import OPTConfig + +from vllm.attention import Attention, AttentionMetadata +from vllm.distributed import get_tensor_model_parallel_world_size +from vllm.model_executor.layers.activation import get_act_fn +from vllm.model_executor.layers.linear import (ColumnParallelLinear, + QKVParallelLinear, + ReplicatedLinear, + RowParallelLinear) +from vllm.model_executor.layers.logits_processor import LogitsProcessor +from vllm.model_executor.layers.quantization.base_config import ( + QuantizationConfig) +from vllm.model_executor.layers.sampler import Sampler +from vllm.model_executor.layers.vocab_parallel_embedding import ( + VocabParallelEmbedding) +from vllm.model_executor.model_loader.weight_utils import default_weight_loader +from vllm.model_executor.sampling_metadata import SamplingMetadata +from vllm.sequence import SamplerOutput + + +class OPTLearnedPositionalEmbedding(nn.Embedding): + + def __init__(self, num_embeddings: int, embedding_dim: int): + # OPT is set up so that if padding_idx is specified then offset the + # embedding ids by 2 and adjust num_embeddings appropriately. Other + # models don't have this hack + self.offset = 2 + super().__init__(num_embeddings + self.offset, embedding_dim) + + def forward(self, positions: torch.Tensor): + return super().forward(positions + self.offset) + + +class OPTAttention(nn.Module): + + def __init__( + self, + embed_dim: int, + num_heads: int, + bias: bool = True, + quant_config: Optional[QuantizationConfig] = None, + ) -> None: + super().__init__() + self.embed_dim = embed_dim + tensor_model_parallel_world_size = ( + get_tensor_model_parallel_world_size()) + total_num_heads = num_heads + assert num_heads % tensor_model_parallel_world_size == 0 + self.num_heads = total_num_heads // tensor_model_parallel_world_size + self.head_dim = embed_dim // total_num_heads + self.scaling = self.head_dim**-0.5 + + self.qkv_proj = QKVParallelLinear( + embed_dim, + self.head_dim, + total_num_heads, + bias=bias, + quant_config=quant_config, + ) + self.out_proj = RowParallelLinear( + embed_dim, + embed_dim, + bias=bias, + quant_config=quant_config, + ) + self.attn = Attention(self.num_heads, + self.head_dim, + scale=self.scaling) + + def forward( + self, + hidden_states: torch.Tensor, + kv_cache: torch.Tensor, + attn_metadata: AttentionMetadata, + ) -> torch.Tensor: + qkv, _ = self.qkv_proj(hidden_states) + q, k, v = qkv.chunk(chunks=3, dim=-1) + attn_output = self.attn(q, k, v, kv_cache, attn_metadata) + output, _ = self.out_proj(attn_output) + return output + + +class OPTDecoderLayer(nn.Module): + + def __init__( + self, + config: OPTConfig, + quant_config: Optional[QuantizationConfig] = None, + ): + super().__init__() + self.config = config + self.embed_dim = config.hidden_size + self.self_attn = OPTAttention( + embed_dim=self.embed_dim, + num_heads=config.num_attention_heads, + bias=config.enable_bias, + quant_config=quant_config, + ) + self.do_layer_norm_before = config.do_layer_norm_before + + self.self_attn_layer_norm = nn.LayerNorm( + self.embed_dim, + elementwise_affine=config.layer_norm_elementwise_affine) + self.fc1 = ColumnParallelLinear( + self.embed_dim, + config.ffn_dim, + bias=config.enable_bias, + quant_config=quant_config, + ) + self.activation_fn = get_act_fn(config.activation_function, + quant_config, config.ffn_dim) + self.fc2 = RowParallelLinear( + config.ffn_dim, + self.embed_dim, + bias=config.enable_bias, + quant_config=quant_config, + ) + self.final_layer_norm = nn.LayerNorm( + self.embed_dim, + elementwise_affine=config.layer_norm_elementwise_affine) + + def forward( + self, + hidden_states: torch.Tensor, + kv_cache: torch.Tensor, + attn_metadata: AttentionMetadata, + ) -> torch.Tensor: + # Self Attention + residual = hidden_states + # 125m, 1.7B, ..., 175B applies layer norm BEFORE attention + if self.do_layer_norm_before: + hidden_states = self.self_attn_layer_norm(hidden_states) + hidden_states = self.self_attn(hidden_states=hidden_states, + kv_cache=kv_cache, + attn_metadata=attn_metadata) + hidden_states = residual + hidden_states + # 350m applies layer norm AFTER attention + if not self.do_layer_norm_before: + hidden_states = self.self_attn_layer_norm(hidden_states) + + # Fully Connected + residual = hidden_states + # 125m, 1.7B, ..., 175B applies layer norm BEFORE attention + if self.do_layer_norm_before: + hidden_states = self.final_layer_norm(hidden_states) + hidden_states, _ = self.fc1(hidden_states) + hidden_states = self.activation_fn(hidden_states) + hidden_states, _ = self.fc2(hidden_states) + hidden_states = residual + hidden_states + # 350m applies layer norm AFTER attention + if not self.do_layer_norm_before: + hidden_states = self.final_layer_norm(hidden_states) + return hidden_states + + +class OPTDecoder(nn.Module): + + def __init__( + self, + config: OPTConfig, + quant_config: Optional[QuantizationConfig] = None, + ): + super().__init__() + self.config = config + self.padding_idx = config.pad_token_id + self.max_target_positions = config.max_position_embeddings + self.vocab_size = config.vocab_size + + self.embed_tokens = VocabParallelEmbedding( + config.vocab_size, + config.word_embed_proj_dim, + ) + # Positional embeddings are replicated (not sharded). + self.embed_positions = OPTLearnedPositionalEmbedding( + config.max_position_embeddings, config.hidden_size) + + # Project out & in will be replicated if they exist. + if config.word_embed_proj_dim != config.hidden_size: + self.project_out = ReplicatedLinear(config.hidden_size, + config.word_embed_proj_dim, + bias=False, + quant_config=quant_config) + else: + self.project_out = None + + if config.word_embed_proj_dim != config.hidden_size: + self.project_in = ReplicatedLinear(config.word_embed_proj_dim, + config.hidden_size, + bias=False, + quant_config=quant_config) + else: + self.project_in = None + + # Note that the only purpose of `config._remove_final_layer_norm` is to + # keep backward compatibility with checkpoints that have been fine-tuned + # before transformers v4.20.1 + # see https://github.com/facebookresearch/metaseq/pull/164 + if config.do_layer_norm_before and not config._remove_final_layer_norm: + self.final_layer_norm = nn.LayerNorm( + config.hidden_size, + elementwise_affine=config.layer_norm_elementwise_affine) + else: + self.final_layer_norm = None + + self.layers = nn.ModuleList([ + OPTDecoderLayer(config, quant_config) + for _ in range(config.num_hidden_layers) + ]) + + def forward( + self, + input_ids: torch.Tensor, + positions: torch.Tensor, + kv_caches: List[torch.Tensor], + attn_metadata: AttentionMetadata, + ) -> torch.Tensor: + inputs_embeds = self.embed_tokens(input_ids) + pos_embeds = self.embed_positions(positions) + if self.project_in is not None: + inputs_embeds, _ = self.project_in(inputs_embeds) + hidden_states = inputs_embeds + pos_embeds + + for i in range(len(self.layers)): + layer = self.layers[i] + hidden_states = layer(hidden_states, kv_caches[i], attn_metadata) + + if self.final_layer_norm is not None: + hidden_states = self.final_layer_norm(hidden_states) + if self.project_out is not None: + hidden_states, _ = self.project_out(hidden_states) + return hidden_states + + +class OPTModel(nn.Module): + + def __init__( + self, + config: OPTConfig, + quant_config: Optional[QuantizationConfig] = None, + ): + super().__init__() + self.decoder = OPTDecoder(config, quant_config) + + def forward( + self, + input_ids: torch.Tensor, + positions: torch.Tensor, + kv_caches: List[torch.Tensor], + attn_metadata: AttentionMetadata, + ) -> torch.Tensor: + return self.decoder(input_ids, positions, kv_caches, attn_metadata) + + +class OPTForCausalLM(nn.Module): + + def __init__( + self, + config, + quant_config: Optional[QuantizationConfig] = None, + ): + super().__init__() + self.config = config + self.quant_config = quant_config + self.model = OPTModel(config, quant_config) + self.lm_head_weight = self.model.decoder.embed_tokens.weight + self.logits_processor = LogitsProcessor(config.vocab_size) + self.sampler = Sampler() + + def forward( + self, + input_ids: torch.Tensor, + positions: torch.Tensor, + kv_caches: List[torch.Tensor], + attn_metadata: AttentionMetadata, + ) -> torch.Tensor: + hidden_states = self.model(input_ids, positions, kv_caches, + attn_metadata) + return hidden_states + + def compute_logits(self, hidden_states: torch.Tensor, + sampling_metadata: SamplingMetadata) -> torch.Tensor: + logits = self.logits_processor(self.lm_head_weight, hidden_states, + sampling_metadata) + return logits + + def sample( + self, + logits: torch.Tensor, + sampling_metadata: SamplingMetadata, + ) -> Optional[SamplerOutput]: + next_tokens = self.sampler(logits, sampling_metadata) + return next_tokens + + def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): + stacked_params_mapping = [ + # (param_name, shard_name, shard_id) + ("qkv_proj", "q_proj", "q"), + ("qkv_proj", "k_proj", "k"), + ("qkv_proj", "v_proj", "v"), + ] + params_dict = dict(self.named_parameters(remove_duplicate=False)) + for name, loaded_weight in weights: + if "lm_head.weight" in name: + continue + if name.startswith("decoder."): + name = "model." + name + + for (param_name, weight_name, shard_id) in stacked_params_mapping: + if weight_name not in name: + continue + name = name.replace(weight_name, param_name) + # Skip loading extra bias for GPTQ models. + if name.endswith(".bias") and name not in params_dict: + continue + param = params_dict[name] + weight_loader = param.weight_loader + weight_loader(param, loaded_weight, shard_id) + break + else: + # Skip loading extra bias for GPTQ models. + if name.endswith(".bias") and name not in params_dict: + continue + param = params_dict[name] + weight_loader = getattr(param, "weight_loader", + default_weight_loader) + weight_loader(param, loaded_weight) diff --git a/vllm/model_executor/models/orion.py b/vllm/model_executor/models/orion.py new file mode 100644 index 0000000..8b66f71 --- /dev/null +++ b/vllm/model_executor/models/orion.py @@ -0,0 +1,320 @@ +# coding=utf-8 +# Adapted from +# https://huggingface.co/OrionStarAI/Orion-14B-Base/blob/main/modeling_orion.py +# Copyright (c) 2024 - 2024 Moore Threads Technology Co., Ltd("Moore Threads"). All rights reserved. +# Copyright (c) OrionStar Inc. +# LICENSE: https://huggingface.co/OrionStarAI/Orion-14B-Base/blob/main/LICENSE +"""Inference-only Orion-14B model compatible with HuggingFace weights.""" +from typing import Any, Dict, Iterable, List, Optional, Tuple + +import torch +from torch import nn +from transformers import PretrainedConfig + +from vllm.attention import Attention, AttentionMetadata +from vllm.distributed import get_tensor_model_parallel_world_size +from vllm.model_executor.layers.activation import SiluAndMul +from vllm.model_executor.layers.linear import (MergedColumnParallelLinear, + QKVParallelLinear, + RowParallelLinear) +from vllm.model_executor.layers.logits_processor import LogitsProcessor +from vllm.model_executor.layers.quantization.base_config import ( + QuantizationConfig) +from vllm.model_executor.layers.rotary_embedding import get_rope +from vllm.model_executor.layers.sampler import Sampler +from vllm.model_executor.layers.vocab_parallel_embedding import ( + ParallelLMHead, VocabParallelEmbedding) +from vllm.model_executor.model_loader.weight_utils import default_weight_loader +from vllm.model_executor.sampling_metadata import SamplingMetadata +from vllm.sequence import SamplerOutput + + +class OrionMLP(nn.Module): + + def __init__( + self, + hidden_size: int, + intermediate_size: int, + hidden_act: str, + quant_config: Optional[QuantizationConfig] = None, + ) -> None: + super().__init__() + self.gate_up_proj = MergedColumnParallelLinear( + hidden_size, [intermediate_size] * 2, + bias=False, + quant_config=quant_config) + self.down_proj = RowParallelLinear(intermediate_size, + hidden_size, + bias=False, + quant_config=quant_config) + if hidden_act != "silu": + raise ValueError(f"Unsupported activation: {hidden_act}. " + "Only silu is supported for now.") + self.act_fn = SiluAndMul() + + def forward(self, x): + gate_up, _ = self.gate_up_proj(x) + x = self.act_fn(gate_up) + x, _ = self.down_proj(x) + return x + + +class OrionAttention(nn.Module): + + def __init__( + self, + hidden_size: int, + num_heads: int, + num_kv_heads: int, + rope_theta: float = 10000, + rope_scaling: Optional[Dict[str, Any]] = None, + max_position_embeddings: int = 8192, + quant_config: Optional[QuantizationConfig] = None, + ) -> None: + super().__init__() + self.hidden_size = hidden_size + tp_size = get_tensor_model_parallel_world_size() + self.total_num_heads = num_heads + assert self.total_num_heads % tp_size == 0 + self.num_heads = self.total_num_heads // tp_size + self.total_num_kv_heads = num_kv_heads + if self.total_num_kv_heads >= tp_size: + # Number of KV heads is greater than TP size, so we partition + # the KV heads across multiple tensor parallel GPUs. + assert self.total_num_kv_heads % tp_size == 0 + else: + # Number of KV heads is less than TP size, so we replicate + # the KV heads across multiple tensor parallel GPUs. + assert tp_size % self.total_num_kv_heads == 0 + self.num_kv_heads = max(1, self.total_num_kv_heads // tp_size) + self.head_dim = hidden_size // self.total_num_heads + self.q_size = self.num_heads * self.head_dim + self.kv_size = self.num_kv_heads * self.head_dim + self.scaling = self.head_dim**-0.5 + self.rope_theta = rope_theta + self.max_position_embeddings = max_position_embeddings + + self.qkv_proj = QKVParallelLinear( + hidden_size, + self.head_dim, + self.total_num_heads, + self.total_num_kv_heads, + bias=False, + quant_config=quant_config, + ) + self.o_proj = RowParallelLinear( + self.total_num_heads * self.head_dim, + hidden_size, + bias=False, + quant_config=quant_config, + ) + + self.rotary_emb = get_rope( + self.head_dim, + rotary_dim=self.head_dim, + max_position=max_position_embeddings, + base=rope_theta, + rope_scaling=rope_scaling, + ) + self.attn = Attention(self.num_heads, + self.head_dim, + self.scaling, + num_kv_heads=self.num_kv_heads) + + def forward( + self, + positions: torch.Tensor, + hidden_states: torch.Tensor, + kv_cache: torch.Tensor, + attn_metadata: AttentionMetadata, + ) -> torch.Tensor: + qkv, _ = self.qkv_proj(hidden_states) + q, k, v = qkv.split([self.q_size, self.kv_size, self.kv_size], dim=-1) + q, k = self.rotary_emb(positions, q, k) + attn_output = self.attn(q, k, v, kv_cache, attn_metadata) + output, _ = self.o_proj(attn_output) + return output + + +class OrionDecoderLayer(nn.Module): + + def __init__( + self, + config: PretrainedConfig, + quant_config: Optional[QuantizationConfig] = None, + ) -> None: + super().__init__() + self.hidden_size = config.hidden_size + rope_theta = getattr(config, "rope_theta", 10000) + rope_scaling = getattr(config, "rope_scaling", None) + max_position_embeddings = getattr(config, "max_position_embeddings", + 8192) + self.self_attn = OrionAttention( + hidden_size=self.hidden_size, + num_heads=config.num_attention_heads, + num_kv_heads=config.num_key_value_heads, + rope_theta=rope_theta, + rope_scaling=rope_scaling, + max_position_embeddings=max_position_embeddings, + quant_config=quant_config, + ) + self.mlp = OrionMLP( + hidden_size=self.hidden_size, + intermediate_size=config.intermediate_size, + hidden_act=config.hidden_act, + quant_config=quant_config, + ) + + self.input_layernorm = nn.LayerNorm(config.hidden_size, + eps=config.rms_norm_eps) + self.post_attention_layernorm = nn.LayerNorm(config.hidden_size, + eps=config.rms_norm_eps) + + def forward( + self, + positions: torch.Tensor, + hidden_states: torch.Tensor, + kv_cache: torch.Tensor, + attn_metadata: AttentionMetadata, + residual: Optional[torch.Tensor], + ) -> Tuple[torch.Tensor, torch.Tensor]: + # Self Attention + residual = hidden_states + hidden_states = self.input_layernorm(hidden_states) + hidden_states = self.self_attn( + positions=positions, + hidden_states=hidden_states, + kv_cache=kv_cache, + attn_metadata=attn_metadata, + ) + + hidden_states = residual + hidden_states + + # Fully Connected + residual = hidden_states + hidden_states = self.post_attention_layernorm(hidden_states) + hidden_states = self.mlp(hidden_states) + hidden_states = residual + hidden_states + return hidden_states, None + + +class OrionModel(nn.Module): + + def __init__( + self, + config: PretrainedConfig, + quant_config: Optional[QuantizationConfig] = None, + ) -> None: + super().__init__() + self.config = config + self.padding_idx = config.pad_token_id + self.vocab_size = config.vocab_size + self.embed_tokens = VocabParallelEmbedding( + config.vocab_size, + config.hidden_size, + ) + self.layers = nn.ModuleList([ + OrionDecoderLayer(config, quant_config) + for _ in range(config.num_hidden_layers) + ]) + self.norm = nn.LayerNorm(config.hidden_size, eps=config.rms_norm_eps) + + def forward( + self, + input_ids: torch.Tensor, + positions: torch.Tensor, + kv_caches: List[torch.Tensor], + attn_metadata: AttentionMetadata, + ) -> torch.Tensor: + hidden_states = self.embed_tokens(input_ids) + residual = None + for i in range(len(self.layers)): + layer = self.layers[i] + hidden_states, residual = layer( + positions, + hidden_states, + kv_caches[i], + attn_metadata, + residual, + ) + hidden_states = self.norm(hidden_states) + return hidden_states + + +class OrionForCausalLM(nn.Module): + + def __init__( + self, + config: PretrainedConfig, + quant_config: Optional[QuantizationConfig] = None, + ) -> None: + super().__init__() + self.config = config + self.quant_config = quant_config + self.model = OrionModel(config, quant_config) + self.lm_head = ParallelLMHead(config.vocab_size, config.hidden_size) + self.logits_processor = LogitsProcessor(config.vocab_size) + self.sampler = Sampler() + + def forward( + self, + input_ids: torch.Tensor, + positions: torch.Tensor, + kv_caches: List[torch.Tensor], + attn_metadata: AttentionMetadata, + ) -> torch.Tensor: + hidden_states = self.model(input_ids, positions, kv_caches, + attn_metadata) + return hidden_states + + def compute_logits(self, hidden_states: torch.Tensor, + sampling_metadata: SamplingMetadata) -> torch.Tensor: + logits = self.logits_processor(self.lm_head.weight, hidden_states, + sampling_metadata) + return logits + + def sample( + self, + logits: torch.Tensor, + sampling_metadata: SamplingMetadata, + ) -> Optional[SamplerOutput]: + next_tokens = self.sampler(logits, sampling_metadata) + return next_tokens + + def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): + stacked_params_mapping = [ + # (param_name, shard_name, shard_id) + ("qkv_proj", "q_proj", "q"), + ("qkv_proj", "k_proj", "k"), + ("qkv_proj", "v_proj", "v"), + ("gate_up_proj", "gate_proj", 0), + ("gate_up_proj", "up_proj", 1), + ] + params_dict = dict(self.named_parameters()) + for name, loaded_weight in weights: + if "rotary_emb.inv_freq" in name: + continue + if ("rotary_emb.cos_cached" in name + or "rotary_emb.sin_cached" in name): + # Models trained using ColossalAI may include these tensors in + # the checkpoint. Skip them. + continue + for (param_name, weight_name, shard_id) in stacked_params_mapping: + if weight_name not in name: + continue + name = name.replace(weight_name, param_name) + # Skip loading extra bias for GPTQ models. + if name.endswith(".bias") and name not in params_dict: + continue + param = params_dict[name] + weight_loader = param.weight_loader + weight_loader(param, loaded_weight, shard_id) + break + else: + # Skip loading extra bias for GPTQ models. + if name.endswith(".bias") and name not in params_dict: + continue + param = params_dict[name] + weight_loader = getattr(param, "weight_loader", + default_weight_loader) + weight_loader(param, loaded_weight) diff --git a/vllm/model_executor/models/phi.py b/vllm/model_executor/models/phi.py new file mode 100644 index 0000000..46e4195 --- /dev/null +++ b/vllm/model_executor/models/phi.py @@ -0,0 +1,301 @@ +# coding=utf-8 +# Adapted from +# https://huggingface.co/microsoft/phi-1_5/blob/main/modeling_phi.py +# Copyright 2023 The vLLM team. +# Copyright (c) Microsoft Corporation. +# Copyright (c) 2024 - 2024 Moore Threads Technology Co., Ltd("Moore Threads"). All rights reserved. +# Licensed under the MIT license. +# +# BSD 3-Clause License +# +# Copyright (c) 2022, Tri Dao, trid@cs.stanford.edu. +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright notice, this +# list of conditions and the following disclaimer. +# +# * Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# * Neither the name of the copyright holder nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +"""Inference-only Phi-1.5 model compatible with HuggingFace weights.""" +from typing import Iterable, List, Optional, Tuple + +import torch +from torch import nn +from transformers import PretrainedConfig + +from vllm.attention import Attention, AttentionMetadata +from vllm.distributed import get_tensor_model_parallel_world_size +from vllm.model_executor.layers.activation import get_act_fn +from vllm.model_executor.layers.linear import (ColumnParallelLinear, + QKVParallelLinear, + RowParallelLinear) +from vllm.model_executor.layers.logits_processor import LogitsProcessor +from vllm.model_executor.layers.quantization.base_config import ( + QuantizationConfig) +from vllm.model_executor.layers.rotary_embedding import get_rope +from vllm.model_executor.layers.sampler import Sampler +from vllm.model_executor.layers.vocab_parallel_embedding import ( + ParallelLMHead, VocabParallelEmbedding) +from vllm.model_executor.model_loader.weight_utils import default_weight_loader +from vllm.model_executor.sampling_metadata import SamplingMetadata +from vllm.sequence import SamplerOutput + + +class PhiAttention(nn.Module): + + def __init__(self, + config: PretrainedConfig, + quant_config: Optional[QuantizationConfig] = None): + super().__init__() + self.total_num_heads = config.num_attention_heads + self.hidden_size = config.hidden_size + self.head_size = self.hidden_size // self.total_num_heads + + tensor_model_parallel_world_size = ( + get_tensor_model_parallel_world_size()) + assert self.total_num_heads % tensor_model_parallel_world_size == 0 + self.num_heads = (self.total_num_heads // + tensor_model_parallel_world_size) + + # pylint: disable=C0103 + self.qkv_proj = QKVParallelLinear( + self.hidden_size, + self.head_size, + self.total_num_heads, + bias=True, + quant_config=quant_config, + ) + self.dense = RowParallelLinear( + self.hidden_size, + self.hidden_size, + quant_config=quant_config, + ) + + scaling = self.head_size**-0.5 + rotary_dim = int(config.partial_rotary_factor * + (config.hidden_size // config.num_attention_heads)) + assert rotary_dim % 2 == 0 + + # pylint: disable=C0301 + # Refer to: + # https://huggingface.co/microsoft/phi-1_5/blob/d212a789620c380ff32ca1d1ee9943a777360987/modeling_phi.py#L518 + rope_theta = 10000 + max_position_embeddings = getattr(config, "n_positions", 2048) + self.rotary_emb = get_rope( + self.head_size, + rotary_dim=rotary_dim, + max_position=max_position_embeddings, + base=rope_theta, + ) + self.attn = Attention(self.num_heads, self.head_size, scaling) + + def forward( + self, + position_ids: torch.Tensor, + hidden_states: torch.Tensor, + kv_cache: torch.Tensor, + attn_metadata: AttentionMetadata, + ) -> torch.Tensor: + qkv, _ = self.qkv_proj(hidden_states) + q, k, v = qkv.chunk(chunks=3, dim=-1) + q, k = self.rotary_emb(position_ids, q, k) + attn_output = self.attn(q, k, v, kv_cache, attn_metadata) + output, _ = self.dense(attn_output) + return output + + +class PhiMLP(nn.Module): + + def __init__(self, + config: PretrainedConfig, + quant_config: Optional[QuantizationConfig] = None): + super().__init__() + + n_inner = getattr(config, "n_inner", None) + n_inner = n_inner if n_inner is not None else 4 * config.hidden_size + + self.fc1 = ColumnParallelLinear( + config.hidden_size, + n_inner, + quant_config=quant_config, + ) + self.fc2 = RowParallelLinear( + n_inner, + config.hidden_size, + quant_config=quant_config, + ) + self.act = get_act_fn(config.hidden_act, quant_config, n_inner) + + def forward(self, hidden_states): + hidden_states, _ = self.fc1(hidden_states) + hidden_states = self.act(hidden_states) + hidden_states, _ = self.fc2(hidden_states) + return hidden_states + + +class PhiLayer(nn.Module): + + def __init__(self, + config: PretrainedConfig, + quant_config: Optional[QuantizationConfig] = None): + super().__init__() + self.input_layernorm = nn.LayerNorm(config.hidden_size, + eps=config.layer_norm_eps) + self.self_attn = PhiAttention(config, quant_config) + self.mlp = PhiMLP(config, quant_config) + + def forward( + self, + position_ids: torch.Tensor, + hidden_states: torch.Tensor, + kv_cache: torch.Tensor, + attn_metadata: AttentionMetadata, + ) -> torch.Tensor: + residual = hidden_states + hidden_states = self.input_layernorm(hidden_states) + attn_outputs = self.self_attn( + position_ids=position_ids, + hidden_states=hidden_states, + kv_cache=kv_cache, + attn_metadata=attn_metadata, + ) + feed_forward_hidden_states = self.mlp(hidden_states) + hidden_states = attn_outputs + feed_forward_hidden_states + residual + return hidden_states + + +class PhiModel(nn.Module): + + def __init__(self, + config: PretrainedConfig, + quant_config: Optional[QuantizationConfig] = None): + super().__init__() + self.config = config + self.quant_config = quant_config + self.embed_tokens = VocabParallelEmbedding(config.vocab_size, + config.hidden_size) + self.layers = nn.ModuleList([ + PhiLayer(config, quant_config) + for _ in range(config.num_hidden_layers) + ]) + self.final_layernorm = nn.LayerNorm(config.hidden_size, + eps=config.layer_norm_eps) + + def forward( + self, + input_ids: torch.Tensor, + positions: torch.Tensor, + kv_caches: List[torch.Tensor], + attn_metadata: AttentionMetadata, + ) -> torch.Tensor: + hidden_states = self.embed_tokens(input_ids) + for i in range(self.config.num_hidden_layers): + layer = self.layers[i] + hidden_states = layer( + positions, + hidden_states, + kv_caches[i], + attn_metadata, + ) + + hidden_states = self.final_layernorm(hidden_states) + + return hidden_states + + +class PhiForCausalLM(nn.Module): + + def __init__(self, + config: PretrainedConfig, + quant_config: Optional[QuantizationConfig] = None): + super().__init__() + self.config = config + self.quant_config = quant_config + + self.model = PhiModel(config, quant_config) + + self.lm_head = ParallelLMHead(config.vocab_size, + config.hidden_size, + bias=True) + self.logits_processor = LogitsProcessor(config.vocab_size) + self.sampler = Sampler() + + def forward( + self, + input_ids: torch.Tensor, + positions: torch.Tensor, + kv_caches: List[torch.Tensor], + attn_metadata: AttentionMetadata, + ) -> torch.Tensor: + hidden_states = self.model(input_ids, positions, kv_caches, + attn_metadata) + + return hidden_states + + def compute_logits(self, hidden_states: torch.Tensor, + sampling_metadata: SamplingMetadata) -> torch.Tensor: + logits = self.logits_processor(self.lm_head.weight, hidden_states, + sampling_metadata, self.lm_head.bias) + return logits + + def sample( + self, + logits: torch.Tensor, + sampling_metadata: SamplingMetadata, + ) -> Optional[SamplerOutput]: + next_tokens = self.sampler(logits, sampling_metadata) + return next_tokens + + def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): + stacked_params_mapping = [ + # (param_name, shard_name, shard_id) + ("qkv_proj", "q_proj", "q"), + ("qkv_proj", "k_proj", "k"), + ("qkv_proj", "v_proj", "v") + ] + params_dict = dict(self.named_parameters()) + + for name, loaded_weight in weights: + if "rotary_emb.inv_freq" in name: + continue + + for (param_name, weight_name, shard_id) in stacked_params_mapping: + if weight_name not in name: + continue + name = name.replace(weight_name, param_name) + # Skip loading extra bias for GPTQ models. + if name.endswith(".bias") and name not in params_dict: + continue + param = params_dict[name] + weight_loader = param.weight_loader + weight_loader(param, loaded_weight, shard_id) + break + else: + # Skip loading extra bias for GPTQ models. + if name.endswith(".bias") and name not in params_dict: + continue + # pylint: disable=E1136 + + param = params_dict[name] + weight_loader = getattr(param, "weight_loader", + default_weight_loader) + weight_loader(param, loaded_weight) diff --git a/vllm/model_executor/models/qwen.py b/vllm/model_executor/models/qwen.py new file mode 100644 index 0000000..cba9ce7 --- /dev/null +++ b/vllm/model_executor/models/qwen.py @@ -0,0 +1,285 @@ +# coding=utf-8 +# Adapted from +# https://huggingface.co/Qwen/Qwen-7B/blob/main/modeling_qwen.py +# Copyright (c) Alibaba Cloud. +# Copyright (c) 2024 - 2024 Moore Threads Technology Co., Ltd("Moore Threads"). All rights reserved. +# LICENSE: https://huggingface.co/Qwen/Qwen-7B/blob/main/LICENSE +"""Inference-only QWen model compatible with HuggingFace weights.""" +from typing import Any, Dict, Iterable, List, Optional, Tuple + +import torch +from torch import nn +from transformers import PretrainedConfig + +from vllm.attention import Attention, AttentionMetadata +from vllm.distributed import get_tensor_model_parallel_world_size +from vllm.model_executor.layers.activation import SiluAndMul +from vllm.model_executor.layers.layernorm import RMSNorm +from vllm.model_executor.layers.linear import (MergedColumnParallelLinear, + QKVParallelLinear, + RowParallelLinear) +from vllm.model_executor.layers.logits_processor import LogitsProcessor +from vllm.model_executor.layers.quantization.base_config import ( + QuantizationConfig) +from vllm.model_executor.layers.rotary_embedding import get_rope +from vllm.model_executor.layers.sampler import Sampler +from vllm.model_executor.layers.vocab_parallel_embedding import ( + ParallelLMHead, VocabParallelEmbedding) +from vllm.model_executor.model_loader.weight_utils import default_weight_loader +from vllm.model_executor.sampling_metadata import SamplingMetadata +from vllm.sequence import SamplerOutput + + +class QWenMLP(nn.Module): + + def __init__( + self, + hidden_size: int, + intermediate_size: int, + hidden_act: str = "silu", + quant_config: Optional[QuantizationConfig] = None, + ): + super().__init__() + self.gate_up_proj = MergedColumnParallelLinear( + hidden_size, [intermediate_size] * 2, + bias=False, + quant_config=quant_config) + self.c_proj = RowParallelLinear(intermediate_size, + hidden_size, + bias=False, + quant_config=quant_config) + if hidden_act != "silu": + raise ValueError(f"Unsupported activation: {hidden_act}. " + "Only silu is supported for now.") + self.act_fn = SiluAndMul() + + def forward(self, x): + gate_up, _ = self.gate_up_proj(x) + x = self.act_fn(gate_up) + x, _ = self.c_proj(x) + return x + + +class QWenAttention(nn.Module): + + def __init__( + self, + hidden_size: int, + num_heads: int, + max_position_embeddings: int, + rope_theta: float = 10000, + rope_scaling: Optional[Dict[str, Any]] = None, + quant_config: Optional[QuantizationConfig] = None, + ): + super().__init__() + self.hidden_size = hidden_size + tensor_model_parallel_world_size = get_tensor_model_parallel_world_size( + ) + self.total_num_heads = num_heads + assert self.total_num_heads % tensor_model_parallel_world_size == 0 + self.num_heads = (self.total_num_heads // + tensor_model_parallel_world_size) + self.head_dim = hidden_size // self.total_num_heads + self.c_attn = QKVParallelLinear( + hidden_size, + self.head_dim, + self.total_num_heads, + bias=True, + quant_config=quant_config, + ) + self.c_proj = RowParallelLinear( + self.total_num_heads * self.head_dim, + hidden_size, + bias=False, + quant_config=quant_config, + ) + self.scaling = self.head_dim**-0.5 + + self.rotary_emb = get_rope( + self.head_dim, + rotary_dim=self.head_dim, + max_position=max_position_embeddings, + base=rope_theta, + rope_scaling=rope_scaling, + ) + self.attn = Attention(self.num_heads, self.head_dim, self.scaling) + + def forward( + self, + positions: torch.Tensor, + hidden_states: torch.Tensor, + kv_cache: torch.Tensor, + attn_metadata: AttentionMetadata, + ) -> torch.Tensor: + qkv, _ = self.c_attn(hidden_states) + q, k, v = qkv.chunk(chunks=3, dim=-1) + q, k = self.rotary_emb(positions, q, k) + attn_output = self.attn(q, k, v, kv_cache, attn_metadata) + output, _ = self.c_proj(attn_output) + return output + + +class QWenBlock(nn.Module): + + def __init__( + self, + config: PretrainedConfig, + quant_config: Optional[QuantizationConfig] = None, + ): + super().__init__() + self.ln_1 = RMSNorm(config.hidden_size, eps=config.layer_norm_epsilon) + + rope_theta = getattr(config, "rope_theta", 10000) + rope_scaling = getattr(config, "rope_scaling", None) + self.attn = QWenAttention(config.hidden_size, + config.num_attention_heads, + config.max_position_embeddings, + rope_theta=rope_theta, + rope_scaling=rope_scaling, + quant_config=quant_config) + + self.ln_2 = RMSNorm(config.hidden_size, eps=config.layer_norm_epsilon) + + self.mlp = QWenMLP(config.hidden_size, + config.intermediate_size // 2, + quant_config=quant_config) + + def forward( + self, + positions: torch.Tensor, + hidden_states: torch.Tensor, + kv_cache: torch.Tensor, + attn_metadata: AttentionMetadata, + residual: Optional[torch.Tensor], + ) -> Tuple[torch.Tensor, torch.Tensor]: + # Self Attention + if residual is None: + residual = hidden_states + hidden_states = self.ln_1(hidden_states) + else: + hidden_states, residual = self.ln_1(hidden_states, residual) + hidden_states = self.attn( + positions=positions, + hidden_states=hidden_states, + kv_cache=kv_cache, + attn_metadata=attn_metadata, + ) + + # Fully Connected + hidden_states, residual = self.ln_2(hidden_states, residual) + hidden_states = self.mlp(hidden_states) + return hidden_states, residual + + +class QWenModel(nn.Module): + + def __init__( + self, + config: PretrainedConfig, + quant_config: Optional[QuantizationConfig] = None, + ): + super().__init__() + self.config = config + self.vocab_size = config.vocab_size + + self.wte = VocabParallelEmbedding( + config.vocab_size, + config.hidden_size, + ) + self.h = nn.ModuleList([ + QWenBlock(config, quant_config) + for _ in range(config.num_hidden_layers) + ]) + self.ln_f = RMSNorm(config.hidden_size, eps=config.layer_norm_epsilon) + + def forward( + self, + input_ids: torch.Tensor, + positions: torch.Tensor, + kv_caches: List[torch.Tensor], + attn_metadata: AttentionMetadata, + ) -> torch.Tensor: + hidden_states = self.wte(input_ids) + residual = None + for i in range(len(self.h)): + layer = self.h[i] + hidden_states, residual = layer( + positions, + hidden_states, + kv_caches[i], + attn_metadata, + residual, + ) + hidden_states, _ = self.ln_f(hidden_states, residual) + return hidden_states + + +class QWenLMHeadModel(nn.Module): + + def __init__( + self, + config: PretrainedConfig, + quant_config: Optional[QuantizationConfig] = None, + ): + super().__init__() + self.config = config + self.quant_config = quant_config + self.transformer = QWenModel(config, quant_config) + self.lm_head = ParallelLMHead(config.vocab_size, config.hidden_size) + self.logits_processor = LogitsProcessor(config.vocab_size) + self.sampler = Sampler() + + def forward( + self, + input_ids: torch.Tensor, + positions: torch.Tensor, + kv_caches: List[torch.Tensor], + attn_metadata: AttentionMetadata, + ) -> torch.Tensor: + hidden_states = self.transformer(input_ids, positions, kv_caches, + attn_metadata) + return hidden_states + + def compute_logits(self, hidden_states: torch.Tensor, + sampling_metadata: SamplingMetadata) -> torch.Tensor: + logits = self.logits_processor(self.lm_head.weight, hidden_states, + sampling_metadata) + return logits + + def sample( + self, + logits: torch.Tensor, + sampling_metadata: SamplingMetadata, + ) -> Optional[SamplerOutput]: + next_tokens = self.sampler(logits, sampling_metadata) + return next_tokens + + def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): + stacked_params_mapping = [ + # (param_name, shard_name, shard_id) + ("gate_up_proj", "w2", 0), + ("gate_up_proj", "w1", 1), + ] + params_dict = dict(self.named_parameters()) + for name, loaded_weight in weights: + if "rotary_emb.inv_freq" in name: + continue + for (param_name, weight_name, shard_id) in stacked_params_mapping: + if weight_name not in name: + continue + name = name.replace(weight_name, param_name) + # Skip loading extra bias for GPTQ models. + if name.endswith(".bias") and name not in params_dict: + continue + param = params_dict[name] + weight_loader = param.weight_loader + weight_loader(param, loaded_weight, shard_id) + break + else: + # Skip loading extra bias for GPTQ models. + if name.endswith(".bias") and name not in params_dict: + continue + param = params_dict[name] + weight_loader = getattr(param, "weight_loader", + default_weight_loader) + weight_loader(param, loaded_weight) diff --git a/vllm/model_executor/models/qwen2.py b/vllm/model_executor/models/qwen2.py new file mode 100644 index 0000000..62bc7fe --- /dev/null +++ b/vllm/model_executor/models/qwen2.py @@ -0,0 +1,367 @@ +# coding=utf-8 +# Adapted from +# https://github.com/huggingface/transformers/blob/v4.28.0/src/transformers/models/qwen2/modeling_qwen2.py +# Copyright 2024 The Qwen team. +# Copyright 2023 The vLLM team. +# Copyright 2022 EleutherAI and the HuggingFace Inc. team. All rights reserved. +# +# This code is based on EleutherAI's GPT-NeoX library and the GPT-NeoX +# and OPT implementations in this library. It has been modified from its +# original forms to accommodate minor architectural differences compared +# to GPT-NeoX and OPT used by the Meta AI team that trained the model. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Inference-only Qwen2 model compatible with HuggingFace weights.""" +from typing import Iterable, List, Optional, Tuple + +import torch +from torch import nn +from transformers import Qwen2Config + +from vllm.attention import Attention, AttentionMetadata +from vllm.config import LoRAConfig +from vllm.distributed import get_tensor_model_parallel_world_size +from vllm.model_executor.layers.activation import SiluAndMul +from vllm.model_executor.layers.layernorm import RMSNorm +from vllm.model_executor.layers.linear import (MergedColumnParallelLinear, + QKVParallelLinear, + RowParallelLinear) +from vllm.model_executor.layers.logits_processor import LogitsProcessor +from vllm.model_executor.layers.quantization.base_config import ( + QuantizationConfig) +from vllm.model_executor.layers.rotary_embedding import get_rope +from vllm.model_executor.layers.sampler import Sampler +from vllm.model_executor.layers.vocab_parallel_embedding import ( + ParallelLMHead, VocabParallelEmbedding) +from vllm.model_executor.model_loader.weight_utils import default_weight_loader +from vllm.model_executor.sampling_metadata import SamplingMetadata +from vllm.sequence import SamplerOutput + + +class Qwen2MLP(nn.Module): + + def __init__( + self, + hidden_size: int, + intermediate_size: int, + hidden_act: str, + quant_config: Optional[QuantizationConfig] = None, + ) -> None: + super().__init__() + self.gate_up_proj = MergedColumnParallelLinear( + hidden_size, [intermediate_size] * 2, + bias=False, + quant_config=quant_config) + self.down_proj = RowParallelLinear(intermediate_size, + hidden_size, + bias=False, + quant_config=quant_config) + if hidden_act != "silu": + raise ValueError(f"Unsupported activation: {hidden_act}. " + "Only silu is supported for now.") + self.act_fn = SiluAndMul() + + def forward(self, x): + gate_up, _ = self.gate_up_proj(x) + x = self.act_fn(gate_up) + x, _ = self.down_proj(x) + return x + + +class Qwen2Attention(nn.Module): + + def __init__(self, + hidden_size: int, + num_heads: int, + num_kv_heads: int, + max_position: int = 4096 * 32, + rope_theta: float = 10000, + use_sliding_window: bool = False, + quant_config: Optional[QuantizationConfig] = None, + sliding_window: Optional[int] = None) -> None: + super().__init__() + self.hidden_size = hidden_size + tp_size = get_tensor_model_parallel_world_size() + self.total_num_heads = num_heads + assert self.total_num_heads % tp_size == 0 + self.num_heads = self.total_num_heads // tp_size + self.total_num_kv_heads = num_kv_heads + if self.total_num_kv_heads >= tp_size: + # Number of KV heads is greater than TP size, so we partition + # the KV heads across multiple tensor parallel GPUs. + assert self.total_num_kv_heads % tp_size == 0 + else: + # Number of KV heads is less than TP size, so we replicate + # the KV heads across multiple tensor parallel GPUs. + assert tp_size % self.total_num_kv_heads == 0 + self.num_kv_heads = max(1, self.total_num_kv_heads // tp_size) + self.head_dim = hidden_size // self.total_num_heads + self.q_size = self.num_heads * self.head_dim + self.kv_size = self.num_kv_heads * self.head_dim + self.scaling = self.head_dim**-0.5 + self.rope_theta = rope_theta + self.sliding_window = sliding_window if use_sliding_window else None + + self.qkv_proj = QKVParallelLinear( + hidden_size, + self.head_dim, + self.total_num_heads, + self.total_num_kv_heads, + bias=True, + quant_config=quant_config, + ) + self.o_proj = RowParallelLinear( + self.total_num_heads * self.head_dim, + hidden_size, + bias=False, + quant_config=quant_config, + ) + + self.rotary_emb = get_rope( + self.head_dim, + rotary_dim=self.head_dim, + max_position=max_position, + base=self.rope_theta, + ) + self.attn = Attention(self.num_heads, + self.head_dim, + self.scaling, + num_kv_heads=self.num_kv_heads, + sliding_window=self.sliding_window) + + def forward( + self, + positions: torch.Tensor, + hidden_states: torch.Tensor, + kv_cache: torch.Tensor, + attn_metadata: AttentionMetadata, + ) -> torch.Tensor: + qkv, _ = self.qkv_proj(hidden_states) + q, k, v = qkv.split([self.q_size, self.kv_size, self.kv_size], dim=-1) + q, k = self.rotary_emb(positions, q, k) + attn_output = self.attn(q, k, v, kv_cache, attn_metadata) + output, _ = self.o_proj(attn_output) + return output + + +class Qwen2DecoderLayer(nn.Module): + + def __init__( + self, + config: Qwen2Config, + layer_idx: int, + quant_config: Optional[QuantizationConfig] = None, + ) -> None: + super().__init__() + self.hidden_size = config.hidden_size + # Requires transformers > 4.32.0 + rope_theta = getattr(config, "rope_theta", 1000000) + use_sliding_window = (config.use_sliding_window + and layer_idx < config.max_window_layers) + self.self_attn = Qwen2Attention( + hidden_size=self.hidden_size, + num_heads=config.num_attention_heads, + max_position=config.max_position_embeddings, + num_kv_heads=config.num_key_value_heads, + rope_theta=rope_theta, + use_sliding_window=use_sliding_window, + quant_config=quant_config, + sliding_window=config.sliding_window) + self.mlp = Qwen2MLP( + hidden_size=self.hidden_size, + intermediate_size=config.intermediate_size, + hidden_act=config.hidden_act, + quant_config=quant_config, + ) + self.input_layernorm = RMSNorm(config.hidden_size, + eps=config.rms_norm_eps) + self.post_attention_layernorm = RMSNorm(config.hidden_size, + eps=config.rms_norm_eps) + + def forward( + self, + positions: torch.Tensor, + hidden_states: torch.Tensor, + kv_cache: torch.Tensor, + attn_metadata: AttentionMetadata, + residual: Optional[torch.Tensor], + ) -> Tuple[torch.Tensor, torch.Tensor]: + # Self Attention + if residual is None: + residual = hidden_states + hidden_states = self.input_layernorm(hidden_states) + else: + hidden_states, residual = self.input_layernorm( + hidden_states, residual) + hidden_states = self.self_attn( + positions=positions, + hidden_states=hidden_states, + kv_cache=kv_cache, + attn_metadata=attn_metadata, + ) + + # Fully Connected + hidden_states, residual = self.post_attention_layernorm( + hidden_states, residual) + hidden_states = self.mlp(hidden_states) + return hidden_states, residual + + +class Qwen2Model(nn.Module): + + def __init__( + self, + config: Qwen2Config, + quant_config: Optional[QuantizationConfig] = None, + ) -> None: + super().__init__() + self.config = config + self.padding_idx = config.pad_token_id + self.vocab_size = config.vocab_size + + self.embed_tokens = VocabParallelEmbedding( + config.vocab_size, + config.hidden_size, + ) + self.layers = nn.ModuleList([ + Qwen2DecoderLayer(config, layer_idx, quant_config) + for layer_idx in range(config.num_hidden_layers) + ]) + self.norm = RMSNorm(config.hidden_size, eps=config.rms_norm_eps) + + def forward( + self, + input_ids: torch.Tensor, + positions: torch.Tensor, + kv_caches: List[torch.Tensor], + attn_metadata: AttentionMetadata, + ) -> torch.Tensor: + hidden_states = self.embed_tokens(input_ids) + residual = None + for i in range(len(self.layers)): + layer = self.layers[i] + hidden_states, residual = layer( + positions, + hidden_states, + kv_caches[i], + attn_metadata, + residual, + ) + hidden_states, _ = self.norm(hidden_states, residual) + return hidden_states + + +class Qwen2ForCausalLM(nn.Module): + packed_modules_mapping = { + "qkv_proj": [ + "q_proj", + "k_proj", + "v_proj", + ], + "gate_up_proj": [ + "gate_proj", + "up_proj", + ], + } + + # LoRA specific attributes + supported_lora_modules = [ + "qkv_proj", + "o_proj", + "gate_up_proj", + "down_proj", + ] + embedding_modules = {} + embedding_padding_modules = [] + + def __init__( + self, + config: Qwen2Config, + quant_config: Optional[QuantizationConfig] = None, + lora_config: Optional[LoRAConfig] = None, + ) -> None: + del lora_config + super().__init__() + self.config = config + self.quant_config = quant_config + self.model = Qwen2Model(config, quant_config) + + if config.tie_word_embeddings: + self.lm_head_weight = self.model.embed_tokens.weight + else: + self.lm_head = ParallelLMHead(config.vocab_size, + config.hidden_size) + self.lm_head_weight = self.lm_head.weight + + self.logits_processor = LogitsProcessor(config.vocab_size) + self.sampler = Sampler() + + def forward( + self, + input_ids: torch.Tensor, + positions: torch.Tensor, + kv_caches: List[torch.Tensor], + attn_metadata: AttentionMetadata, + ) -> torch.Tensor: + hidden_states = self.model(input_ids, positions, kv_caches, + attn_metadata) + return hidden_states + + def compute_logits(self, hidden_states: torch.Tensor, + sampling_metadata: SamplingMetadata) -> torch.Tensor: + logits = self.logits_processor(self.lm_head_weight, hidden_states, + sampling_metadata) + return logits + + def sample( + self, + logits: torch.Tensor, + sampling_metadata: SamplingMetadata, + ) -> Optional[SamplerOutput]: + next_tokens = self.sampler(logits, sampling_metadata) + return next_tokens + + def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): + stacked_params_mapping = [ + # (param_name, shard_name, shard_id) + ("qkv_proj", "q_proj", "q"), + ("qkv_proj", "k_proj", "k"), + ("qkv_proj", "v_proj", "v"), + ("gate_up_proj", "gate_proj", 0), + ("gate_up_proj", "up_proj", 1), + ] + params_dict = dict(self.named_parameters(remove_duplicate=False)) + for name, loaded_weight in weights: + if "rotary_emb.inv_freq" in name: + continue + if self.config.tie_word_embeddings and "lm_head.weight" in name: + continue + for (param_name, weight_name, shard_id) in stacked_params_mapping: + if weight_name not in name: + continue + name = name.replace(weight_name, param_name) + # Skip loading extra bias for GPTQ models. + if name.endswith(".bias") and name not in params_dict: + continue + param = params_dict[name] + weight_loader = param.weight_loader + weight_loader(param, loaded_weight, shard_id) + break + else: + # Skip loading extra bias for GPTQ models. + if name.endswith(".bias") and name not in params_dict: + continue + param = params_dict[name] + weight_loader = getattr(param, "weight_loader", + default_weight_loader) + weight_loader(param, loaded_weight) diff --git a/vllm/model_executor/models/qwen2_moe.py b/vllm/model_executor/models/qwen2_moe.py new file mode 100644 index 0000000..8da89a2 --- /dev/null +++ b/vllm/model_executor/models/qwen2_moe.py @@ -0,0 +1,447 @@ +# coding=utf-8 +# Adapted from +# https://github.com/huggingface/transformers/blob/v4.28.0/src/transformers/models/qwen2_moe/modeling_qwen2_moe.py +# Copyright 2024 The Qwen team. +# Copyright 2023 The vLLM team. +# Copyright 2022 EleutherAI and the HuggingFace Inc. team. All rights reserved. +# +# This code is based on EleutherAI's GPT-NeoX library and the GPT-NeoX +# and OPT implementations in this library. It has been modified from its +# original forms to accommodate minor architectural differences compared +# to GPT-NeoX and OPT used by the Meta AI team that trained the model. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Inference-only Qwen2MoE model compatible with HuggingFace weights.""" +from typing import Any, Dict, Iterable, List, Optional, Tuple + +import torch +import torch.nn.functional as F +from torch import nn +from transformers import PretrainedConfig + +from vllm.attention import Attention, AttentionMetadata +from vllm.distributed import (get_tensor_model_parallel_rank, + get_tensor_model_parallel_world_size, + tensor_model_parallel_all_reduce) +from vllm.model_executor.layers.activation import SiluAndMul +from vllm.model_executor.layers.fused_moe import fused_moe +from vllm.model_executor.layers.layernorm import RMSNorm +from vllm.model_executor.layers.linear import (MergedColumnParallelLinear, + QKVParallelLinear, + ReplicatedLinear, + RowParallelLinear) +from vllm.model_executor.layers.logits_processor import LogitsProcessor +from vllm.model_executor.layers.quantization.base_config import ( + QuantizationConfig) +from vllm.model_executor.layers.rotary_embedding import get_rope +from vllm.model_executor.layers.sampler import Sampler +from vllm.model_executor.layers.vocab_parallel_embedding import ( + ParallelLMHead, VocabParallelEmbedding) +from vllm.model_executor.model_loader.weight_utils import default_weight_loader +from vllm.model_executor.sampling_metadata import SamplingMetadata +from vllm.sequence import SamplerOutput + + +class Qwen2MoeMLP(nn.Module): + + def __init__( + self, + hidden_size: int, + intermediate_size: int, + hidden_act: str, + quant_config: Optional[QuantizationConfig] = None, + reduce_results: bool = True, + ) -> None: + super().__init__() + self.gate_up_proj = MergedColumnParallelLinear( + hidden_size, [intermediate_size] * 2, + bias=False, + quant_config=quant_config) + self.down_proj = RowParallelLinear(intermediate_size, + hidden_size, + bias=False, + quant_config=quant_config, + reduce_results=reduce_results) + if hidden_act != "silu": + raise ValueError(f"Unsupported activation: {hidden_act}. " + "Only silu is supported for now.") + self.act_fn = SiluAndMul() + + def forward(self, x): + gate_up, _ = self.gate_up_proj(x) + x = self.act_fn(gate_up) + x, _ = self.down_proj(x) + return x + + +class Qwen2MoeSparseMoeBlock(nn.Module): + + def __init__( + self, + config: PretrainedConfig, + quant_config: Optional[QuantizationConfig] = None, + ): + super().__init__() + self.config = config + self.rank = get_tensor_model_parallel_rank() + self.tp_size = get_tensor_model_parallel_world_size() + self.n_routed_experts = config.num_experts + self.top_k = config.num_experts_per_tok + if self.tp_size > self.n_routed_experts: + raise ValueError( + f"Tensor parallel size {self.tp_size} is greater than " + f"the number of experts {self.n_routed_experts}.") + + self.experts = nn.ModuleList([ + Qwen2MoeMLP(hidden_size=config.hidden_size, + intermediate_size=config.moe_intermediate_size, + hidden_act=config.hidden_act, + quant_config=quant_config, + reduce_results=False) + for idx in range(self.n_routed_experts) + ]) + self.pack_params() + + self.gate = ReplicatedLinear(config.hidden_size, + self.n_routed_experts, + bias=False, + quant_config=None) + if config.shared_expert_intermediate_size > 0: + self.shared_expert = Qwen2MoeMLP( + hidden_size=config.hidden_size, + intermediate_size=config.shared_expert_intermediate_size, + hidden_act=config.hidden_act, + quant_config=quant_config, + reduce_results=False, + ) + else: + self.shared_expert = None + self.shared_expert_gate = torch.nn.Linear(config.hidden_size, + 1, + bias=False) + + def pack_params(self): + w1 = [] + w2 = [] + for expert in self.experts: + w1.append(expert.gate_up_proj.weight) + w2.append(expert.down_proj.weight) + self.w1 = torch._utils._flatten_dense_tensors(w1) + w1s = torch._utils._unflatten_dense_tensors(self.w1, w1) + for data, param in zip(w1s, w1): + param.data = data + self.w1 = self.w1.view(len(w1), *w1s[0].shape) + + self.w2 = torch._utils._flatten_dense_tensors(w2) + w2s = torch._utils._unflatten_dense_tensors(self.w2, w2) + for data, param in zip(w2s, w2): + param.data = data + + self.w2 = self.w2.view(len(w2), *w2s[0].shape) + + def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: + num_tokens, hidden_dim = hidden_states.shape + hidden_states = hidden_states.view(-1, hidden_dim) + shared_output = None + if self.shared_expert is not None: + shared_output = self.shared_expert(hidden_states) + if self.shared_expert_gate is not None: + shared_output = F.sigmoid( + self.shared_expert_gate(hidden_states)) * shared_output + + # router_logits: (num_tokens, n_experts) + router_logits, _ = self.gate(hidden_states) + final_hidden_states = fused_moe(hidden_states, + self.w1, + self.w2, + router_logits, + self.top_k, + renormalize=self.config.norm_topk_prob, + inplace=True) + + if shared_output is not None: + final_hidden_states = final_hidden_states + shared_output + final_hidden_states = tensor_model_parallel_all_reduce( + final_hidden_states) + + return final_hidden_states.view(num_tokens, hidden_dim) + + +class Qwen2MoeAttention(nn.Module): + + def __init__( + self, + hidden_size: int, + num_heads: int, + num_kv_heads: int, + rope_theta: float = 10000, + rope_scaling: Optional[Dict[str, Any]] = None, + max_position_embeddings: int = 8192, + quant_config: Optional[QuantizationConfig] = None, + ) -> None: + super().__init__() + self.hidden_size = hidden_size + tp_size = get_tensor_model_parallel_world_size() + self.total_num_heads = num_heads + assert self.total_num_heads % tp_size == 0 + self.num_heads = self.total_num_heads // tp_size + self.total_num_kv_heads = num_kv_heads + if self.total_num_kv_heads >= tp_size: + # Number of KV heads is greater than TP size, so we partition + # the KV heads across multiple tensor parallel GPUs. + assert self.total_num_kv_heads % tp_size == 0 + else: + # Number of KV heads is less than TP size, so we replicate + # the KV heads across multiple tensor parallel GPUs. + assert tp_size % self.total_num_kv_heads == 0 + self.num_kv_heads = max(1, self.total_num_kv_heads // tp_size) + self.head_dim = hidden_size // self.total_num_heads + self.q_size = self.num_heads * self.head_dim + self.kv_size = self.num_kv_heads * self.head_dim + self.scaling = self.head_dim**-0.5 + self.rope_theta = rope_theta + self.max_position_embeddings = max_position_embeddings + + self.qkv_proj = QKVParallelLinear( + hidden_size, + self.head_dim, + self.total_num_heads, + self.total_num_kv_heads, + bias=True, + quant_config=quant_config, + ) + + self.o_proj = RowParallelLinear( + self.total_num_heads * self.head_dim, + hidden_size, + bias=False, + quant_config=quant_config, + ) + + self.rotary_emb = get_rope( + self.head_dim, + rotary_dim=self.head_dim, + max_position=max_position_embeddings, + base=rope_theta, + rope_scaling=rope_scaling, + ) + self.attn = Attention(self.num_heads, + self.head_dim, + self.scaling, + num_kv_heads=self.num_kv_heads) + + def forward( + self, + positions: torch.Tensor, + hidden_states: torch.Tensor, + kv_cache: torch.Tensor, + attn_metadata: AttentionMetadata, + ) -> torch.Tensor: + qkv, _ = self.qkv_proj(hidden_states) + q, k, v = qkv.split([self.q_size, self.kv_size, self.kv_size], dim=-1) + q, k = self.rotary_emb(positions, q, k) + attn_output = self.attn(q, k, v, kv_cache, attn_metadata) + output, _ = self.o_proj(attn_output) + return output + + +class Qwen2MoeDecoderLayer(nn.Module): + + def __init__( + self, + config: PretrainedConfig, + layer_idx: int, + quant_config: Optional[QuantizationConfig] = None, + ) -> None: + super().__init__() + self.hidden_size = config.hidden_size + rope_theta = getattr(config, "rope_theta", 10000) + rope_scaling = getattr(config, "rope_scaling", None) + max_position_embeddings = getattr(config, "max_position_embeddings", + 8192) + self.self_attn = Qwen2MoeAttention( + hidden_size=self.hidden_size, + num_heads=config.num_attention_heads, + num_kv_heads=config.num_key_value_heads, + rope_theta=rope_theta, + rope_scaling=rope_scaling, + max_position_embeddings=max_position_embeddings, + quant_config=quant_config, + ) + if (config.num_experts is not None + and (layer_idx + 1) % config.decoder_sparse_step == 0): + self.mlp = Qwen2MoeSparseMoeBlock(config=config, + quant_config=quant_config) + else: + self.mlp = Qwen2MoeMLP( + hidden_size=config.hidden_size, + intermediate_size=config.intermediate_size, + hidden_act=config.hidden_act, + quant_config=quant_config, + ) + self.input_layernorm = RMSNorm(config.hidden_size, + eps=config.rms_norm_eps) + self.post_attention_layernorm = RMSNorm(config.hidden_size, + eps=config.rms_norm_eps) + + def forward( + self, + positions: torch.Tensor, + hidden_states: torch.Tensor, + kv_cache: torch.Tensor, + attn_metadata: AttentionMetadata, + residual: Optional[torch.Tensor], + ) -> torch.Tensor: + # Self Attention + if residual is None: + residual = hidden_states + hidden_states = self.input_layernorm(hidden_states) + else: + hidden_states, residual = self.input_layernorm( + hidden_states, residual) + hidden_states = self.self_attn( + positions=positions, + hidden_states=hidden_states, + kv_cache=kv_cache, + attn_metadata=attn_metadata, + ) + + # Fully Connected + hidden_states, residual = self.post_attention_layernorm( + hidden_states, residual) + hidden_states = self.mlp(hidden_states) + return hidden_states, residual + + +class Qwen2MoeModel(nn.Module): + + def __init__( + self, + config: PretrainedConfig, + quant_config: Optional[QuantizationConfig] = None, + ) -> None: + super().__init__() + self.padding_idx = config.pad_token_id + self.vocab_size = config.vocab_size + + self.embed_tokens = VocabParallelEmbedding( + config.vocab_size, + config.hidden_size, + ) + self.layers = nn.ModuleList([ + Qwen2MoeDecoderLayer(config, layer_idx, quant_config=quant_config) + for layer_idx in range(config.num_hidden_layers) + ]) + self.norm = RMSNorm(config.hidden_size, eps=config.rms_norm_eps) + + def forward( + self, + input_ids: torch.Tensor, + positions: torch.Tensor, + kv_caches: List[torch.Tensor], + attn_metadata: AttentionMetadata, + ) -> torch.Tensor: + hidden_states = self.embed_tokens(input_ids) + residual = None + for i in range(len(self.layers)): + layer = self.layers[i] + hidden_states, residual = layer(positions, hidden_states, + kv_caches[i], attn_metadata, + residual) + hidden_states, _ = self.norm(hidden_states, residual) + return hidden_states + + +class Qwen2MoeForCausalLM(nn.Module): + + fall_back_to_pt_during_load = False + + def __init__( + self, + config: PretrainedConfig, + quant_config: Optional[QuantizationConfig] = None, + ) -> None: + super().__init__() + self.config = config + self.quant_config = quant_config + self.model = Qwen2MoeModel(config, quant_config) + self.lm_head = ParallelLMHead(config.vocab_size, config.hidden_size) + self.logits_processor = LogitsProcessor(config.vocab_size) + self.sampler = Sampler() + + def forward( + self, + input_ids: torch.Tensor, + positions: torch.Tensor, + kv_caches: List[torch.Tensor], + attn_metadata: AttentionMetadata, + ) -> torch.Tensor: + hidden_states = self.model(input_ids, positions, kv_caches, + attn_metadata) + return hidden_states + + def compute_logits(self, hidden_states: torch.Tensor, + sampling_metadata: SamplingMetadata) -> torch.Tensor: + logits = self.logits_processor(self.lm_head.weight, hidden_states, + sampling_metadata) + return logits + + def sample( + self, + logits: Optional[torch.Tensor], + sampling_metadata: SamplingMetadata, + ) -> Optional[SamplerOutput]: + next_tokens = self.sampler(logits, sampling_metadata) + return next_tokens + + def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): + stacked_params_mapping = [ + # (param_name, shard_name, shard_id) + ("qkv_proj", "q_proj", "q"), + ("qkv_proj", "k_proj", "k"), + ("qkv_proj", "v_proj", "v"), + ("gate_up_proj", "gate_proj", 0), + ("gate_up_proj", "up_proj", 1), + ] + + params_dict = dict(self.named_parameters()) + for name, loaded_weight in weights: + if "rotary_emb.inv_freq" in name: + continue + for (param_name, weight_name, shard_id) in stacked_params_mapping: + if weight_name not in name: + continue + name = name.replace(weight_name, param_name) + # Skip loading extra bias for GPTQ models. + if name.endswith(".bias") and name not in params_dict: + continue + # Skip experts that are not assigned to this worker. + if (("mlp.experts." in name or "mlp.shared_expert." in name) + and name not in params_dict): + continue + param = params_dict[name] + weight_loader = param.weight_loader + weight_loader(param, loaded_weight, shard_id) + break + else: + # Skip loading extra bias for GPTQ models. + if name.endswith(".bias") and name not in params_dict: + continue + # Skip experts that are not assigned to this worker. + if (("mlp.experts." in name or "mlp.shared_expert." in name) + and name not in params_dict): + continue + param = params_dict[name] + weight_loader = getattr(param, "weight_loader", + default_weight_loader) + weight_loader(param, loaded_weight) diff --git a/vllm/model_executor/models/stablelm.py b/vllm/model_executor/models/stablelm.py new file mode 100644 index 0000000..3d4f4f7 --- /dev/null +++ b/vllm/model_executor/models/stablelm.py @@ -0,0 +1,301 @@ +# coding=utf-8 +# Copyright 2023 Stability AI, EleutherAI, and The HuggingFace Inc. team. +# All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# This code is based off the following work: +# https://huggingface.co/stabilityai/stablelm-3b-4e1t/blob/main/modeling_stablelm_epoch.py +# https://huggingface.co/stabilityai/stablelm-3b-4e1t/blob/main/config.json +"""Inference-only StabeLM (https://github.com/Stability-AI/StableLM) +model compatible with HuggingFace weights.""" +from typing import Iterable, List, Optional, Tuple + +import torch +from torch import nn +from transformers import PretrainedConfig + +from vllm.attention import Attention, AttentionMetadata +from vllm.distributed import get_tensor_model_parallel_world_size +from vllm.model_executor.layers.activation import SiluAndMul +from vllm.model_executor.layers.linear import (MergedColumnParallelLinear, + QKVParallelLinear, + RowParallelLinear) +from vllm.model_executor.layers.logits_processor import LogitsProcessor +from vllm.model_executor.layers.quantization.base_config import ( + QuantizationConfig) +from vllm.model_executor.layers.rotary_embedding import get_rope +from vllm.model_executor.layers.sampler import Sampler +from vllm.model_executor.layers.vocab_parallel_embedding import ( + ParallelLMHead, VocabParallelEmbedding) +from vllm.model_executor.model_loader.weight_utils import default_weight_loader +from vllm.model_executor.sampling_metadata import SamplingMetadata +from vllm.sequence import SamplerOutput + + +class StablelmMLP(nn.Module): + + def __init__(self, + config: PretrainedConfig, + quant_config: Optional[QuantizationConfig] = None) -> None: + super().__init__() + self.config = config + self.hidden_size = config.hidden_size + self.intermediate_size = config.intermediate_size + self.gate_up_proj = MergedColumnParallelLinear( + config.hidden_size, [config.intermediate_size] * 2, + bias=False, + quant_config=quant_config) + self.down_proj = RowParallelLinear(config.intermediate_size, + config.hidden_size, + bias=False) + self.act_fn = SiluAndMul() + + def forward(self, x: torch.Tensor) -> torch.Tensor: + gate_up, _ = self.gate_up_proj(x) + x = self.act_fn(gate_up) + x, _ = self.down_proj(x) + return x + + +class StablelmAttention(nn.Module): + + def __init__(self, + config: PretrainedConfig, + quant_config: Optional[QuantizationConfig] = None) -> None: + super().__init__() + self.config = config + self.hidden_size = config.hidden_size + tp_size = get_tensor_model_parallel_world_size() + self.total_num_heads = config.num_attention_heads + self.num_heads = self.total_num_heads // tp_size + + self.total_num_key_value_heads = config.num_key_value_heads + if self.total_num_key_value_heads >= tp_size: + # Number of KV heads is greater than TP size, so we partition + # the KV heads across multiple tensor parallel GPUs. + assert self.total_num_key_value_heads % tp_size == 0 + else: + # Number of KV heads is less than TP size, so we replicate + # the KV heads across multiple tensor parallel GPUs. + assert tp_size % self.total_num_key_value_heads == 0 + self.num_key_value_heads = max( + 1, self.total_num_key_value_heads // tp_size) + self.head_dim = self.hidden_size // self.total_num_heads + self.max_position_embeddings = config.max_position_embeddings + rope_pct = getattr(config, "rope_pct", + getattr(config, "partial_rotary_factor", 1)) + self.rotary_ndims = int(self.head_dim * rope_pct) + self.scaling = self.head_dim**-0.5 + self.q_size = self.num_heads * self.head_dim + self.kv_size = self.num_key_value_heads * self.head_dim + self.qkv_bias = getattr(config, "use_qkv_bias", False) + if (self.head_dim * self.num_heads * tp_size) != self.hidden_size: + raise ValueError(f"hidden_size must be divisible by num_heads " + f"(got `hidden_size`: {self.hidden_size}" + f" and `num_heads`: {self.num_heads}).") + + self.qkv_proj = QKVParallelLinear(self.hidden_size, + self.head_dim, + self.total_num_heads, + self.total_num_key_value_heads, + self.qkv_bias, + quant_config=quant_config) + self.o_proj = RowParallelLinear(self.total_num_heads * self.head_dim, + self.hidden_size, + bias=False, + quant_config=quant_config) + self.rotary_emb = get_rope( + self.head_dim, + rotary_dim=self.rotary_ndims, + max_position=self.config.max_position_embeddings, + base=self.config.rope_theta, + ) + self.attn = Attention(self.num_heads, + self.head_dim, + self.scaling, + num_kv_heads=self.num_key_value_heads) + + def forward( + self, + positions: torch.Tensor, + hidden_states: torch.Tensor, + kv_cache: torch.Tensor, + attn_metadata: AttentionMetadata, + ) -> torch.Tensor: + qkv, _ = self.qkv_proj(hidden_states) + q, k, v = qkv.split([self.q_size, self.kv_size, self.kv_size], dim=-1) + q, k = self.rotary_emb(positions, q, k) + attn_output = self.attn(q, k, v, kv_cache, attn_metadata) + output, _ = self.o_proj(attn_output) + return output + + +class StablelmDecoderLayer(nn.Module): + + def __init__( + self, + config: PretrainedConfig, + quant_config: Optional[QuantizationConfig] = None, + ) -> None: + super().__init__() + self.self_attn = StablelmAttention(config) + self.mlp = StablelmMLP(config, quant_config) + norm_eps = getattr(config, "norm_eps", + getattr(config, "layer_norm_eps", 1e-05)) + self.input_layernorm = nn.LayerNorm(config.hidden_size, eps=norm_eps) + self.post_attention_layernorm = nn.LayerNorm(config.hidden_size, + eps=norm_eps) + + def forward( + self, + positions: torch.Tensor, + hidden_states: torch.Tensor, + kv_cache: torch.Tensor, + attn_metadata: AttentionMetadata, + ) -> Tuple[torch.Tensor, torch.Tensor]: + # Self Attention + residual = hidden_states + hidden_states = self.input_layernorm(hidden_states) + hidden_states = self.self_attn( + positions=positions, + hidden_states=hidden_states, + kv_cache=kv_cache, + attn_metadata=attn_metadata, + ) + hidden_states = residual + hidden_states + + # Fully Connected + residual = hidden_states + hidden_states = self.post_attention_layernorm(hidden_states) + hidden_states = self.mlp(hidden_states) + hidden_states = residual + hidden_states + + return hidden_states, residual + + +class StableLMEpochModel(nn.Module): + + def __init__(self, + config: PretrainedConfig, + quant_config: Optional[QuantizationConfig] = None) -> None: + super().__init__() + self.embed_tokens = VocabParallelEmbedding( + config.vocab_size, + config.hidden_size, + ) + self.layers = nn.ModuleList([ + StablelmDecoderLayer(config, quant_config) + for _ in range(config.num_hidden_layers) + ]) + norm_eps = getattr(config, "norm_eps", + getattr(config, "layer_norm_eps", 1e-05)) + self.norm = nn.LayerNorm(config.hidden_size, eps=norm_eps) + + def forward( + self, + input_ids: torch.Tensor, + positions: torch.Tensor, + kv_caches: List[torch.Tensor], + attn_metadata: AttentionMetadata, + ) -> torch.Tensor: + hidden_states = self.embed_tokens(input_ids) + for i in range(len(self.layers)): + layer = self.layers[i] + hidden_states, residual = layer( + positions, + hidden_states, + kv_caches[i], + attn_metadata, + ) + hidden_states = self.norm(hidden_states) + return hidden_states + + +class StablelmForCausalLM(nn.Module): + + def __init__( + self, + config: PretrainedConfig, + quant_config: Optional[QuantizationConfig] = None, + ) -> None: + super().__init__() + self.config = config + self.quant_config = quant_config + self.model = StableLMEpochModel(config, quant_config) + self.lm_head = ParallelLMHead(config.vocab_size, config.hidden_size) + self.logits_processor = LogitsProcessor(config.vocab_size) + self.sampler = Sampler() + + def forward( + self, + input_ids: torch.Tensor, + positions: torch.Tensor, + kv_caches: List[torch.Tensor], + attn_metadata: AttentionMetadata, + ) -> torch.Tensor: + hidden_states = self.model(input_ids, positions, kv_caches, + attn_metadata) + return hidden_states + + def compute_logits(self, hidden_states: torch.Tensor, + sampling_metadata: SamplingMetadata) -> torch.Tensor: + logits = self.logits_processor(self.lm_head.weight, hidden_states, + sampling_metadata) + return logits + + def sample( + self, + logits: torch.Tensor, + sampling_metadata: SamplingMetadata, + ) -> Optional[SamplerOutput]: + next_tokens = self.sampler(logits, sampling_metadata) + return next_tokens + + def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): + stacked_params_mapping = [ + # (param_name, shard_name, shard_id) + ("qkv_proj", "q_proj", "q"), + ("qkv_proj", "k_proj", "k"), + ("qkv_proj", "v_proj", "v"), + ("gate_up_proj", "gate_proj", 0), + ("gate_up_proj", "up_proj", 1), + ] + params_dict = dict(self.named_parameters()) + for name, loaded_weight in weights: + if "rotary_emb.inv_freq" in name: + continue + if ("rotary_emb.cos_cached" in name + or "rotary_emb.sin_cached" in name): + # Models trained using ColossalAI may include these tensors in + # the checkpoint. Skip them. + continue + for (param_name, weight_name, shard_id) in stacked_params_mapping: + if weight_name not in name: + continue + name = name.replace(weight_name, param_name) + # Skip loading extra bias for GPTQ models. + if name.endswith(".bias") and name not in params_dict: + continue + param = params_dict[name] + weight_loader = param.weight_loader + weight_loader(param, loaded_weight, shard_id) + break + else: + # Skip loading extra bias for GPTQ models. + if name.endswith(".bias") and name not in params_dict: + continue + param = params_dict[name] + weight_loader = getattr(param, "weight_loader", + default_weight_loader) + weight_loader(param, loaded_weight) diff --git a/vllm/model_executor/models/starcoder2.py b/vllm/model_executor/models/starcoder2.py new file mode 100644 index 0000000..33998e2 --- /dev/null +++ b/vllm/model_executor/models/starcoder2.py @@ -0,0 +1,302 @@ +# coding=utf-8 +# Copyright 2024 BigCode and the HuggingFace Inc. team. All rights reserved. +# +# This code is based on EleutherAI's GPT-NeoX library and the GPT-NeoX +# and OPT implementations in this library. It has been modified from its +# original forms to accommodate minor architectural differences compared +# to GPT-NeoX and OPT used by the Meta AI team that trained the model. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" PyTorch Starcoder2 model.""" +from typing import Iterable, List, Optional, Tuple + +import torch +from torch import nn +from transformers import Starcoder2Config + +from vllm.attention import Attention, AttentionMetadata +from vllm.distributed import get_tensor_model_parallel_world_size +from vllm.model_executor.layers.activation import get_act_fn +from vllm.model_executor.layers.linear import (ColumnParallelLinear, + QKVParallelLinear, + RowParallelLinear) +from vllm.model_executor.layers.logits_processor import LogitsProcessor +from vllm.model_executor.layers.quantization.base_config import ( + QuantizationConfig) +from vllm.model_executor.layers.rotary_embedding import get_rope +from vllm.model_executor.layers.sampler import Sampler +from vllm.model_executor.layers.vocab_parallel_embedding import ( + DEFAULT_VOCAB_PADDING_SIZE, ParallelLMHead, VocabParallelEmbedding) +from vllm.model_executor.model_loader.weight_utils import default_weight_loader +from vllm.model_executor.sampling_metadata import SamplingMetadata +from vllm.sequence import SamplerOutput + + +class Starcoder2Attention(nn.Module): + + def __init__(self, + config: Starcoder2Config, + quant_config: Optional[QuantizationConfig] = None): + super().__init__() + self.config = config + + self.hidden_size = config.hidden_size + tp_size = get_tensor_model_parallel_world_size() + self.total_num_heads = config.num_attention_heads + assert self.total_num_heads % tp_size == 0 + self.num_heads = self.total_num_heads // tp_size + self.total_num_kv_heads = config.num_key_value_heads + if self.total_num_kv_heads >= tp_size: + # Number of KV heads is greater than TP size, so we partition + # the KV heads across multiple tensor parallel GPUs. + assert self.total_num_kv_heads % tp_size == 0 + else: + # Number of KV heads is less than TP size, so we replicate + # the KV heads across multiple tensor parallel GPUs. + assert tp_size % self.total_num_kv_heads == 0 + self.num_kv_heads = max(1, self.total_num_kv_heads // tp_size) + self.head_dim = self.hidden_size // self.total_num_heads + self.q_size = self.num_heads * self.head_dim + self.kv_size = self.num_kv_heads * self.head_dim + self.scaling = self.head_dim**-0.5 + self.rope_theta = config.rope_theta + self.max_position_embeddings = config.max_position_embeddings + self.use_bias = config.use_bias + self.sliding_window = config.sliding_window + + self.qkv_proj = QKVParallelLinear( + self.hidden_size, + self.head_dim, + self.total_num_heads, + self.total_num_kv_heads, + bias=self.use_bias, + quant_config=quant_config, + ) + self.o_proj = RowParallelLinear( + self.total_num_heads * self.head_dim, + self.hidden_size, + bias=self.use_bias, + quant_config=quant_config, + ) + self.rotary_emb = get_rope( + self.head_dim, + rotary_dim=self.head_dim, + max_position=self.max_position_embeddings, + base=int(self.rope_theta), + is_neox_style=True, + ) + self.attn = Attention( + self.num_heads, + self.head_dim, + self.scaling, + num_kv_heads=self.num_kv_heads, + sliding_window=self.sliding_window, + ) + + def forward( + self, + positions: torch.Tensor, + hidden_states: torch.Tensor, + kv_cache: torch.Tensor, + attn_metadata: AttentionMetadata, + ) -> torch.Tensor: + qkv, _ = self.qkv_proj(hidden_states) + q, k, v = qkv.split([self.q_size, self.kv_size, self.kv_size], dim=-1) + q, k = self.rotary_emb(positions, q, k) + attn_output = self.attn(q, k, v, kv_cache, attn_metadata) + output, _ = self.o_proj(attn_output) + return output + + +class Starcoder2MLP(nn.Module): + + def __init__(self, + config: Starcoder2Config, + quant_config: Optional[QuantizationConfig] = None): + super().__init__() + self.c_fc = ColumnParallelLinear( + config.hidden_size, + config.intermediate_size, + bias=config.use_bias, + quant_config=quant_config, + ) + self.c_proj = RowParallelLinear( + config.intermediate_size, + config.hidden_size, + bias=config.use_bias, + quant_config=quant_config, + ) + self.act = get_act_fn(config.hidden_act, quant_config, + config.intermediate_size) + + def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: + hidden_states, _ = self.c_fc(hidden_states) + hidden_states = self.act(hidden_states) + hidden_states, _ = self.c_proj(hidden_states) + return hidden_states + + +class Starcoder2DecoderLayer(nn.Module): + + def __init__(self, + config: Starcoder2Config, + quant_config: Optional[QuantizationConfig] = None): + super().__init__() + self.hidden_size = config.hidden_size + self.self_attn = Starcoder2Attention(config, quant_config=quant_config) + self.mlp = Starcoder2MLP(config, quant_config=quant_config) + self.input_layernorm = nn.LayerNorm(config.hidden_size, + eps=config.norm_epsilon) + self.post_attention_layernorm = nn.LayerNorm(config.hidden_size, + eps=config.norm_epsilon) + + def forward( + self, + positions: torch.Tensor, + hidden_states: torch.Tensor, + kv_cache: torch.Tensor, + attn_metadata: AttentionMetadata, + ) -> torch.Tensor: + # Self Attention + residual = hidden_states + hidden_states = self.input_layernorm(hidden_states) + hidden_states = self.self_attn( + positions=positions, + hidden_states=hidden_states, + kv_cache=kv_cache, + attn_metadata=attn_metadata, + ) + hidden_states = residual + hidden_states + + # Fully Connected + residual = hidden_states + hidden_states = self.post_attention_layernorm(hidden_states) + hidden_states = self.mlp(hidden_states) + hidden_states = residual + hidden_states + + return hidden_states + + +class Starcoder2Model(nn.Module): + + def __init__(self, + config: Starcoder2Config, + quant_config: Optional[QuantizationConfig] = None): + super().__init__() + self.config = config + self.padding_idx = config.pad_token_id + self.vocab_size = config.vocab_size + + # TODO: consider padding_idx (currently removed) + self.embed_tokens = VocabParallelEmbedding(config.vocab_size, + config.hidden_size) + self.layers = nn.ModuleList([ + Starcoder2DecoderLayer(config, quant_config=quant_config) + for _ in range(config.num_hidden_layers) + ]) + self.norm = nn.LayerNorm(config.hidden_size, eps=config.norm_epsilon) + + def forward( + self, + input_ids: torch.Tensor, + positions: torch.Tensor, + kv_caches: List[torch.Tensor], + attn_metadata: AttentionMetadata, + ) -> torch.Tensor: + hidden_states = self.embed_tokens(input_ids) + for i in range(len(self.layers)): + layer = self.layers[i] + hidden_states = layer(positions, hidden_states, kv_caches[i], + attn_metadata) + hidden_states = self.norm(hidden_states) + return hidden_states + + +class Starcoder2ForCausalLM(nn.Module): + + def __init__(self, + config: Starcoder2Config, + quant_config: Optional[QuantizationConfig] = None): + super().__init__() + self.config = config + self.model = Starcoder2Model(config, quant_config=quant_config) + self.vocab_size = config.vocab_size + self.unpadded_vocab_size = config.vocab_size + if config.tie_word_embeddings: + self.lm_head_weight = self.model.embed_tokens.weight + else: + self.unpadded_vocab_size = config.vocab_size + self.lm_head = ParallelLMHead( + self.unpadded_vocab_size, + config.hidden_size, + org_num_embeddings=config.vocab_size, + padding_size=DEFAULT_VOCAB_PADDING_SIZE, + ) + self.lm_head_weight = self.lm_head.weight + self.logits_processor = LogitsProcessor(self.unpadded_vocab_size, + config.vocab_size) + self.sampler = Sampler() + + def forward( + self, + input_ids: torch.Tensor, + positions: torch.Tensor, + kv_caches: List[torch.Tensor], + attn_metadata: AttentionMetadata, + ) -> torch.Tensor: + hidden_states = self.model(input_ids, positions, kv_caches, + attn_metadata) + return hidden_states + + def compute_logits(self, hidden_states: torch.Tensor, + sampling_metadata: SamplingMetadata) -> torch.Tensor: + logits = self.logits_processor(self.lm_head_weight, hidden_states, + sampling_metadata) + return logits + + def sample( + self, + logits: Optional[torch.Tensor], + sampling_metadata: SamplingMetadata, + ) -> Optional[SamplerOutput]: + next_tokens = self.sampler(logits, sampling_metadata) + return next_tokens + + def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): + stacked_params_mapping = [ + # (param_name, shard_name, shard_id) + ("qkv_proj", "q_proj", "q"), + ("qkv_proj", "k_proj", "k"), + ("qkv_proj", "v_proj", "v"), + ] + + params_dict = dict(self.named_parameters(remove_duplicate=False)) + for name, loaded_weight in weights: + if "rotary_emb.inv_freq" in name: + continue + + for (param_name, weight_name, shard_id) in stacked_params_mapping: + if weight_name not in name: + continue + name = name.replace(weight_name, param_name) + param = params_dict[name] + weight_loader = param.weight_loader + weight_loader(param, loaded_weight, shard_id) + break + else: + if self.config.tie_word_embeddings and "lm_head.weight" in name: + continue + param = params_dict[name] + weight_loader = getattr(param, "weight_loader", + default_weight_loader) + weight_loader(param, loaded_weight) diff --git a/vllm/model_executor/models/xverse.py b/vllm/model_executor/models/xverse.py new file mode 100644 index 0000000..0fb2662 --- /dev/null +++ b/vllm/model_executor/models/xverse.py @@ -0,0 +1,366 @@ +# coding=utf-8 +# Adapted from +# https://huggingface.co/xverse/XVERSE-7B/blob/main/modeling_xverse.py +# Copyright 2022 EleutherAI and the HuggingFace Inc. team. All rights reserved. +# +# This code is based on EleutherAI's GPT-NeoX library and the GPT-NeoX +# and OPT implementations in this library. It has been modified from its +# original forms to accommodate minor architectural differences compared +# to GPT-NeoX and OPT used by the Meta AI team that trained the model. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Inference-only Xverse model compatible with HuggingFace weights.""" +from typing import Any, Dict, Iterable, List, Optional, Tuple + +import torch +from torch import nn +from transformers import PretrainedConfig + +from vllm.attention import Attention, AttentionMetadata +from vllm.config import LoRAConfig +from vllm.distributed import get_tensor_model_parallel_world_size +from vllm.model_executor.layers.activation import SiluAndMul +from vllm.model_executor.layers.layernorm import RMSNorm +from vllm.model_executor.layers.linear import (MergedColumnParallelLinear, + QKVParallelLinear, + RowParallelLinear) +from vllm.model_executor.layers.logits_processor import LogitsProcessor +from vllm.model_executor.layers.quantization.base_config import ( + QuantizationConfig) +from vllm.model_executor.layers.rotary_embedding import get_rope +from vllm.model_executor.layers.sampler import Sampler +from vllm.model_executor.layers.vocab_parallel_embedding import ( + ParallelLMHead, VocabParallelEmbedding) +from vllm.model_executor.model_loader.weight_utils import default_weight_loader +from vllm.model_executor.sampling_metadata import SamplingMetadata +from vllm.sequence import SamplerOutput + + +class XverseMLP(nn.Module): + + def __init__( + self, + hidden_size: int, + intermediate_size: int, + hidden_act: str, + quant_config: Optional[QuantizationConfig] = None, + ) -> None: + super().__init__() + self.gate_up_proj = MergedColumnParallelLinear( + hidden_size, [intermediate_size] * 2, + bias=False, + quant_config=quant_config) + self.down_proj = RowParallelLinear(intermediate_size, + hidden_size, + bias=False, + quant_config=quant_config) + if hidden_act != "silu": + raise ValueError(f"Unsupported activation: {hidden_act}. " + "Only silu is supported for now.") + self.act_fn = SiluAndMul() + + def forward(self, x): + gate, _ = self.gate_up_proj(x) + x = self.act_fn(gate) + x, _ = self.down_proj(x) + return x + + +class XverseAttention(nn.Module): + + def __init__( + self, + hidden_size: int, + num_heads: int, + num_kv_heads: int, + rope_theta: float = 10000, + rope_scaling: Optional[Dict[str, Any]] = None, + max_position_embeddings: int = 8192, + quant_config: Optional[QuantizationConfig] = None, + bias: bool = False, + sliding_window: Optional[int] = None, + ) -> None: + super().__init__() + self.hidden_size = hidden_size + tp_size = get_tensor_model_parallel_world_size() + self.total_num_heads = num_heads + assert self.total_num_heads % tp_size == 0 + self.num_heads = self.total_num_heads // tp_size + self.total_num_kv_heads = num_kv_heads + # partition the KV heads across multiple tensor parallel GPUs. + assert self.total_num_kv_heads % tp_size == 0 + self.num_kv_heads = max(1, self.total_num_kv_heads // tp_size) + self.head_dim = hidden_size // self.total_num_heads + self.q_size = self.num_heads * self.head_dim + self.kv_size = self.num_kv_heads * self.head_dim + self.scaling = self.head_dim**-0.5 + self.rope_theta = rope_theta + self.max_position_embeddings = max_position_embeddings + + self.qkv_proj = QKVParallelLinear( + hidden_size, + self.head_dim, + self.total_num_heads, + self.total_num_kv_heads, + bias=bias, + quant_config=quant_config, + ) + self.o_proj = RowParallelLinear( + self.total_num_heads * self.head_dim, + hidden_size, + bias=bias, + quant_config=quant_config, + ) + + self.rotary_emb = get_rope( + self.head_dim, + rotary_dim=self.head_dim, + max_position=max_position_embeddings, + base=rope_theta, + rope_scaling=rope_scaling, + ) + self.attn = Attention(self.num_heads, + self.head_dim, + self.scaling, + num_kv_heads=self.num_kv_heads, + sliding_window=sliding_window) + + def forward( + self, + positions: torch.Tensor, + hidden_states: torch.Tensor, + kv_cache: torch.Tensor, + attn_metadata: AttentionMetadata, + ) -> torch.Tensor: + qkv, _ = self.qkv_proj(hidden_states) + q, k, v = qkv.split([self.q_size, self.kv_size, self.kv_size], dim=-1) + q, k = self.rotary_emb(positions, q, k) + attn_output = self.attn(q, k, v, kv_cache, attn_metadata) + output, _ = self.o_proj(attn_output) + return output + + +class XverseDecoderLayer(nn.Module): + + def __init__( + self, + config: PretrainedConfig, + quant_config: Optional[QuantizationConfig] = None, + ) -> None: + super().__init__() + self.hidden_size = config.hidden_size + rope_theta = getattr(config, "rope_theta", 10000) + rope_scaling = getattr(config, "rope_scaling", None) + max_position_embeddings = getattr(config, "max_position_embeddings", + 8192) + sliding_window = getattr(config, "sliding_window", None) + self.self_attn = XverseAttention( + hidden_size=self.hidden_size, + num_heads=config.num_attention_heads, + num_kv_heads=getattr(config, "num_key_value_heads", + config.num_attention_heads), + rope_theta=rope_theta, + rope_scaling=rope_scaling, + max_position_embeddings=max_position_embeddings, + quant_config=quant_config, + bias=getattr(config, "bias", False), + sliding_window=sliding_window, + ) + self.mlp = XverseMLP( + hidden_size=self.hidden_size, + intermediate_size=config.intermediate_size, + hidden_act=config.hidden_act, + quant_config=quant_config, + ) + self.input_layernorm = RMSNorm(config.hidden_size, + eps=config.rms_norm_eps) + self.post_attention_layernorm = RMSNorm(config.hidden_size, + eps=config.rms_norm_eps) + + def forward( + self, + positions: torch.Tensor, + hidden_states: torch.Tensor, + kv_cache: torch.Tensor, + attn_metadata: AttentionMetadata, + residual: Optional[torch.Tensor], + ) -> Tuple[torch.Tensor, torch.Tensor]: + # Self Attention + if residual is None: + residual = hidden_states + hidden_states = self.input_layernorm(hidden_states) + else: + hidden_states, residual = self.input_layernorm( + hidden_states, residual) + hidden_states = self.self_attn( + positions=positions, + hidden_states=hidden_states, + kv_cache=kv_cache, + attn_metadata=attn_metadata, + ) + + # Fully Connected + hidden_states, residual = self.post_attention_layernorm( + hidden_states, residual) + hidden_states = self.mlp(hidden_states) + return hidden_states, residual + + +class XverseModel(nn.Module): + + def __init__( + self, + config: PretrainedConfig, + quant_config: Optional[QuantizationConfig] = None, + lora_config: Optional[LoRAConfig] = None, + ) -> None: + super().__init__() + self.config = config + self.padding_idx = config.pad_token_id + lora_vocab = (lora_config.lora_extra_vocab_size * + (lora_config.max_loras or 1)) if lora_config else 0 + self.vocab_size = config.vocab_size + lora_vocab + self.org_vocab_size = config.vocab_size + self.embed_tokens = VocabParallelEmbedding( + self.vocab_size, + config.hidden_size, + org_num_embeddings=config.vocab_size, + ) + self.layers = nn.ModuleList([ + XverseDecoderLayer(config, quant_config) + for _ in range(config.num_hidden_layers) + ]) + self.norm = RMSNorm(config.hidden_size, eps=config.rms_norm_eps) + + def forward( + self, + input_ids: torch.Tensor, + positions: torch.Tensor, + kv_caches: List[torch.Tensor], + attn_metadata: AttentionMetadata, + ) -> torch.Tensor: + hidden_states = self.embed_tokens(input_ids) + residual = None + for i in range(len(self.layers)): + layer = self.layers[i] + hidden_states, residual = layer( + positions, + hidden_states, + kv_caches[i], + attn_metadata, + residual, + ) + hidden_states, _ = self.norm(hidden_states, residual) + return hidden_states + + +class XverseForCausalLM(nn.Module): + packed_modules_mapping = { + "qkv_proj": [ + "q_proj", + "k_proj", + "v_proj", + ], + "gate_up_proj": [ + "gate_proj", + "up_proj", + ], + } + + # LoRA specific attributes + supported_lora_modules = [ + "qkv_proj", + "o_proj", + "gate_up_proj", + "down_proj", + "embed_tokens", + "lm_head", + ] + embedding_modules = { + "embed_tokens": "input_embeddings", + "lm_head": "output_embeddings", + } + embedding_padding_modules = ["lm_head"] + + def __init__( + self, + config: PretrainedConfig, + quant_config: Optional[QuantizationConfig] = None, + lora_config=None, + ) -> None: + super().__init__() + self.config = config + self.quant_config = quant_config + self.model = XverseModel(config, quant_config) + self.lm_head = ParallelLMHead(config.vocab_size, config.hidden_size) + self.logits_processor = LogitsProcessor(config.vocab_size) + self.sampler = Sampler() + + def forward( + self, + input_ids: torch.Tensor, + positions: torch.Tensor, + kv_caches: List[torch.Tensor], + attn_metadata: AttentionMetadata, + ) -> torch.Tensor: + hidden_states = self.model(input_ids, positions, kv_caches, + attn_metadata) + return hidden_states + + def compute_logits(self, hidden_states: torch.Tensor, + sampling_metadata: SamplingMetadata) -> torch.Tensor: + logits = self.logits_processor(self.lm_head.weight, hidden_states, + sampling_metadata) + return logits + + def sample( + self, + logits: torch.Tensor, + sampling_metadata: SamplingMetadata, + ) -> Optional[SamplerOutput]: + next_tokens = self.sampler(logits, sampling_metadata) + return next_tokens + + def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): + stacked_params_mapping = [ + ("qkv_proj", "q_proj", "q"), + ("qkv_proj", "k_proj", "k"), + ("qkv_proj", "v_proj", "v"), + ("gate_up_proj", "gate_proj", 0), + ("gate_up_proj", "up_proj", 1), + ] + params_dict = dict(self.named_parameters()) + for name, loaded_weight in weights: + if ("rotary_emb.inv_freq" in name + or "rotary_emb.cos_cached" in name + or "rotary_emb.sin_cached" in name): + continue + for (param_name, weight_name, shard_id) in stacked_params_mapping: + if weight_name not in name: + continue + name = name.replace(weight_name, param_name) + # Skip loading extra bias for GPTQ models. + if name.endswith(".bias") and name not in params_dict: + continue + param = params_dict[name] + weight_loader = param.weight_loader + weight_loader(param, loaded_weight, shard_id) + break + else: + # Skip loading extra bias for GPTQ models. + if name.endswith(".bias") and name not in params_dict: + continue + param = params_dict[name] + weight_loader = getattr(param, "weight_loader", + default_weight_loader) + weight_loader(param, loaded_weight) diff --git a/vllm/model_executor/sampling_metadata.py b/vllm/model_executor/sampling_metadata.py new file mode 100644 index 0000000..9969c45 --- /dev/null +++ b/vllm/model_executor/sampling_metadata.py @@ -0,0 +1,588 @@ +import random +from dataclasses import dataclass +from typing import Dict, List, Optional, Tuple + +import torch + +from vllm.model_executor.layers.ops.sample import get_num_triton_sampler_splits +from vllm.sampling_params import SamplingParams, SamplingType +from vllm.sequence import SequenceData, SequenceGroupMetadata +from vllm.utils import (async_tensor_h2d, is_pin_memory_available, + maybe_expand_dim) + +_SAMPLING_EPS = 1e-5 +_SEED_0_REPLACEMENT = 3403598558 + + +@dataclass +class SequenceGroupToSample: + # |---------- N-1 iteration --------| + # |---------------- N iteration ---------------------| + # |- tokenA -|......................|-- newTokens ---| + # |---------- context_len ----------| + # |-------------------- seq_len ----------------------| + # |-- query_len ---| + + # Sequence ids for the sequence group in a previous step. + seq_ids: List[int] + sampling_params: SamplingParams + # seq_id -> sequence data. + seq_data: Dict[int, SequenceData] + # The length of the sequence (all tokens seen in the past + new token to + # compute attention) of the sequence group. None if it is in a decode + # stage. + seq_len: Optional[int] + # The length of new query tokens to compute in the current step. None if it + # is in a decode stage. The length of query_len <= seq_len if chunked + # prefill is enabled. + query_len: Optional[int] + # A random number generator for sampling. + generator: Optional[torch.Generator] + # True if the sequence group is in prefill stage. False if it is in a + # decode stage. + is_prompt: bool + # Query token indices from logits. to compute prompt logprob. Empty if + # prompt logprob is not required. + prompt_logprob_indices: List[int] + # Sample token indices from logits. Empty if sampling is not required. + sample_indices: List[int] + + @property + def do_sample(self): + return len(self.sample_indices) > 0 + + def __post_init__(self): + if len(self.prompt_logprob_indices) > 0: + assert self.sampling_params.prompt_logprobs is not None + if self.is_prompt: + assert self.seq_len is not None + assert self.query_len is not None + + +class SamplingMetadata: + """Metadata for input sequences. Used in sampler. + + The usage is as follow; + ``` + hidden_states = execute_model(...) + logits = hidden_states[sampling_metadata.selected_token_indices] + sample(logits) + + def sample(logits): + # Use categorized_sample_indices for sampling.... + ``` + + Args: + seq_groups: List of batched sequence groups. + selected_token_indices: (num_query_tokens_to_logprob). Indices to find + logits from the initial model output hidden states. + categorized_sample_indices: SamplingType -> token indices to sample. + Each token indices is 2D tensor of (num_indices, num_indices) where + the first item means the sample index within the returned logit + (before pruning padding), and the second item means the sample + index after pruning using selected_token_indices. + For example, if the returned logit is [1, 2, 3], and we select + [1, 2] for sampling, the pruned logit will be [2, 3]. In this case, + The first tuple is [1, 2] (sampled index within original logit), + and the second tuple is [0, 1] (sampled index within pruned logit). + num_prompts: Number of prompt sequence groups in seq_groups. + """ + + def __init__( + self, + seq_groups: List[SequenceGroupToSample], + selected_token_indices: torch.Tensor, + categorized_sample_indices: Dict[SamplingType, torch.Tensor], + num_prompts: int, + ) -> None: + self.seq_groups = seq_groups + self.selected_token_indices = selected_token_indices + self.categorized_sample_indices = categorized_sample_indices + self.num_prompts = num_prompts + + @staticmethod + def prepare( + seq_group_metadata_list: List[SequenceGroupMetadata], + seq_lens: List[int], + query_lens: Optional[List[int]], + device: str, + pin_memory: bool, + ) -> "SamplingMetadata": + ( + seq_groups, + selected_token_indices, + categorized_sample_indices, + num_prompts, + ) = _prepare_seq_groups(seq_group_metadata_list, seq_lens, query_lens, + device) + selected_token_indices = async_tensor_h2d(selected_token_indices, + dtype=torch.long, + target_device=device, + pin_memory=pin_memory) + categorized_sample_indices = { + t: maybe_expand_dim( + async_tensor_h2d(seq_ids, + dtype=torch.int, + target_device=device, + pin_memory=pin_memory), 2, 2) + for t, seq_ids in categorized_sample_indices.items() + } + + sampling_metadata = SamplingMetadata( + seq_groups=seq_groups, + selected_token_indices=selected_token_indices, + categorized_sample_indices=categorized_sample_indices, + num_prompts=num_prompts, + ) + return sampling_metadata + + def __repr__(self) -> str: + return ( + "SamplingMetadata(" + f"seq_groups={self.seq_groups}, " + f"selected_token_indices={self.selected_token_indices}, " + f"categorized_sample_indices={self.categorized_sample_indices}), ") + + +def _prepare_seq_groups( + seq_group_metadata_list: List[SequenceGroupMetadata], + seq_lens: List[int], + query_lens: Optional[List[int]], + device: str, +) -> Tuple[List[SequenceGroupToSample], List[int], Dict[ + SamplingType, List[Tuple[int, int]]], int]: + """Prepare sequence groups and indices for sampling. + + Args: + seq_group_metadata_list: A list of sequence group to batch. + seq_lens: A list of sequence lens per sequence group. + Index of prompt len should match with seq_group_metadata_list. + query_lens: A list of query lengths. Prompt lens include the length + of entire prompt tokens, and it could be shorter. + device: A device to use for random number generator, + `SequenceGroupToSample.generator`. + + Returns: + seq_groups: A list of sequence group to sample. + selected_token_indices: See the definition from `SamplingMetadata`. + categorized_sample_indices: See the definition from `SamplingMetadata`. + num_prompts: Total number of prompts from `seq_group_metadata_list`. + """ + # Batched sequence groups for the current model forward stsep. + seq_groups: List[SequenceGroupToSample] = [] + # A list of token indices to sample/compute logprob. It is used to + # prune the outcome logits from the model for the performance. + selected_token_indices: List[int] = [] + # Used for selected_token_indices. + model_output_idx = 0 + + # Sampling type -> ( + # indices to sample/prompt logprob within pruned output logits, + # indices to sample within pruned logits) + categorized_sample_indices: Dict[SamplingType, List[Tuple[int, int]]] = { + t: [] + for t in SamplingType + } + # Index of logits to compute logprob. Logits include both prompt logprob + # and sample logprob indices. + logit_idx = 0 + # Index to sample from a sample tensor. It is used by triton sample kernel. + # See `_sample_with_triton_kernel` for more details. + sample_idx = 0 + # Total number of prompts from given sequence groups. + num_prompts = 0 + + for i, seq_group_metadata in enumerate(seq_group_metadata_list): + seq_ids = list(seq_group_metadata.seq_data.keys()) + sampling_params = seq_group_metadata.sampling_params + is_prompt = seq_group_metadata.is_prompt + generator: Optional[torch.Generator] = None + # If the current seq group is in decode stage, it is None. + seq_len: Optional[int] = None + query_len: Optional[int] = None + prompt_logprob_indices: List[int] = [] + sample_indices: List[int] = [] + do_sample = seq_group_metadata.do_sample + + if seq_group_metadata.is_prompt: + if sampling_params.seed is not None: + seq_group_metadata.state.generator = torch.Generator( + device=device).manual_seed(sampling_params.seed) + + num_prompts += 1 + num_prefill_sample = len(seq_ids) + assert num_prefill_sample == 1 + assert query_lens is not None and seq_lens is not None + query_len, seq_len = query_lens[i], seq_lens[i] + # If we need sampling, exclude num_prefill_sample tokens from + # prompt logprob. + prompt_logprob_len = (query_len - num_prefill_sample + if do_sample else query_len) + sample_len = num_prefill_sample if do_sample else 0 + else: + # Decode + prompt_logprob_len = 0 + sample_len = len(seq_ids) if do_sample else 0 + + # Update indices to select from the model output. + """ + This blocks computes selected_token_indices which is used in the + following way. + + hidden_states = model(...) + logits = hidden_states[selected_token_indices] + """ + + if sampling_params.prompt_logprobs: + selected_token_indices.extend( + range(model_output_idx, model_output_idx + prompt_logprob_len)) + model_output_idx += prompt_logprob_len + if do_sample: + selected_token_indices.extend( + range(model_output_idx, model_output_idx + sample_len)) + model_output_idx += sample_len + + # We now find indices for logprob computation and sampling. + """ + This block computes categorized_sample_indices which is used in the + following way. + + hidden_states = model(...) + logits = hidden_states[selected_token_indices] + def sample(logits): + # Use categorized_sample_indices for sampling. + # prompt_logprob_indices to find prompt logprob indices. + # sample_indices to find sample indices. + """ + + if sampling_params.prompt_logprobs is not None: + prompt_logprob_indices.extend( + range(logit_idx, logit_idx + prompt_logprob_len)) + logit_idx += prompt_logprob_len + if do_sample: + sample_indices.extend(range(logit_idx, logit_idx + sample_len)) + categorized_sample_indices[sampling_params.sampling_type].extend( + list( + zip(range(logit_idx, logit_idx + sample_len), + range(sample_idx, sample_idx + sample_len)))) + logit_idx += sample_len + sample_idx += sample_len + + if sampling_params.seed is not None: + generator = seq_group_metadata.state.generator + + seq_groups.append( + SequenceGroupToSample( + seq_ids=seq_ids, + sampling_params=sampling_params, + seq_data=seq_group_metadata.seq_data, + seq_len=seq_len, + query_len=query_len, + generator=generator, + is_prompt=is_prompt, + prompt_logprob_indices=list(prompt_logprob_indices), + sample_indices=list(sample_indices))) + return (seq_groups, selected_token_indices, categorized_sample_indices, + num_prompts) + + +@dataclass +class SamplingTensors: + """Tensors for sampling.""" + + temperatures: torch.Tensor + top_ps: torch.Tensor + top_ks: torch.Tensor + min_ps: torch.Tensor + presence_penalties: torch.Tensor + frequency_penalties: torch.Tensor + repetition_penalties: torch.Tensor + sampling_seeds: torch.Tensor + sample_indices: torch.Tensor + extra_seeds: Optional[torch.Tensor] + prompt_tokens: torch.Tensor + output_tokens: torch.Tensor + + @classmethod + def from_sampling_metadata( + cls, + sampling_metadata: "SamplingMetadata", + vocab_size: int, + device: torch.device, + dtype: torch.dtype, + *, + extra_seeds_to_generate: int = 0, + extra_entropy: Optional[Tuple[int, ...]] = None + ) -> Tuple["SamplingTensors", bool, bool, bool]: + """ + extra_seeds_to_generate: extra seeds to generate using the + user-defined seed for each sequence. + extra_entropy: extra entropy to use when generating seeds. + """ + prompt_tokens: List[List[int]] = [] + output_tokens: List[List[int]] = [] + top_ks: List[int] = [] + temperatures: List[float] = [] + top_ps: List[float] = [] + min_ps: List[float] = [] + presence_penalties: List[float] = [] + frequency_penalties: List[float] = [] + repetition_penalties: List[float] = [] + sampling_seeds: List[int] = [] + sample_indices: List[int] = [] + prompt_best_of: List[int] = [] + do_penalties = False + do_top_p_top_k = False + do_min_p = False + + # We need one base seed per Triton slice. + seeds_to_generate = (extra_seeds_to_generate + + get_num_triton_sampler_splits(vocab_size)) + + assert sampling_metadata.seq_groups is not None + for seq_group in sampling_metadata.seq_groups: + seq_ids = seq_group.seq_ids + sampling_params = seq_group.sampling_params + temperature = sampling_params.temperature + p = sampling_params.presence_penalty + f = sampling_params.frequency_penalty + r = sampling_params.repetition_penalty + top_p = sampling_params.top_p + min_p = sampling_params.min_p + seed = sampling_params.seed + + is_greedy = sampling_params.sampling_type == SamplingType.GREEDY + + # k should not be greater than the vocab size. + top_k = min(sampling_params.top_k, vocab_size) + top_k = vocab_size if top_k == -1 else top_k + if temperature < _SAMPLING_EPS: + # NOTE: Zero temperature means deterministic sampling + # (i.e., greedy sampling or beam search). + # Set the temperature to 1 to avoid division by zero. + temperature = 1.0 + if not do_top_p_top_k and (top_p < 1.0 - _SAMPLING_EPS + or top_k != vocab_size): + do_top_p_top_k = True + if not do_min_p and min_p > _SAMPLING_EPS: + do_min_p = True + if not do_penalties and (abs(p) >= _SAMPLING_EPS + or abs(f) >= _SAMPLING_EPS + or abs(r - 1.0) >= _SAMPLING_EPS): + do_penalties = True + + is_prompt = seq_group.is_prompt + if (seq_group.is_prompt + and sampling_params.prompt_logprobs is not None): + # For tokens in the prompt that we only need to get + # their logprobs + query_len = seq_group.query_len + assert query_len is not None + prefill_len = len(seq_group.prompt_logprob_indices) + temperatures += [temperature] * prefill_len + top_ps += [top_p] * prefill_len + top_ks += [top_k] * prefill_len + min_ps += [min_p] * prefill_len + presence_penalties += [0] * prefill_len + frequency_penalties += [0] * prefill_len + repetition_penalties += [1] * prefill_len + prompt_tokens.extend([] for _ in range(prefill_len)) + output_tokens.extend([] for _ in range(prefill_len)) + + if seq_group.do_sample: + sample_lens = len(seq_group.sample_indices) + assert sample_lens == len(seq_ids) + for seq_id in seq_ids: + seq_data = seq_group.seq_data[seq_id] + prompt_tokens.append(seq_data.prompt_token_ids) + output_tokens.append(seq_data.output_token_ids) + temperatures += [temperature] * len(seq_ids) + top_ps += [top_p] * len(seq_ids) + top_ks += [top_k] * len(seq_ids) + min_ps += [min_p] * len(seq_ids) + presence_penalties += [p] * len(seq_ids) + frequency_penalties += [f] * len(seq_ids) + repetition_penalties += [r] * len(seq_ids) + + if is_prompt: + prompt_best_of.append(sampling_params.best_of) + query_len = seq_group.query_len + assert query_len is not None + + for seq_id in seq_ids: + seq_data = seq_group.seq_data[seq_id] + extra_entropy = extra_entropy or () + seq_seeds = cls._get_sequence_seeds( + seed, + seq_data.get_len(), + *extra_entropy, + seq_id, + seeds_to_generate=seeds_to_generate, + is_greedy=is_greedy) + sampling_seeds.append(seq_seeds) + sample_indices.extend(seq_group.sample_indices) + + sampling_tensors = SamplingTensors.from_lists( + temperatures, top_ps, top_ks, min_ps, presence_penalties, + frequency_penalties, repetition_penalties, sampling_seeds, + sample_indices, prompt_tokens, output_tokens, vocab_size, + extra_seeds_to_generate, device, dtype) + return (sampling_tensors, do_penalties, do_top_p_top_k, do_min_p) + + @classmethod + def from_lists(cls, temperatures: List[float], top_ps: List[float], + top_ks: List[int], min_ps: List[float], + presence_penalties: List[float], + frequency_penalties: List[float], + repetition_penalties: List[float], + sampling_seeds: List[int], sample_indices: List[int], + prompt_tokens: List[List[int]], + output_tokens: List[List[int]], vocab_size: int, + extra_seeds_to_generate: int, device: torch.device, + dtype: torch.dtype) -> "SamplingTensors": + # Note that the performance will be very bad without + # pinned memory. + pin_memory = is_pin_memory_available() + prompt_max_len = max([len(tokens) for tokens in prompt_tokens], + default=0) + prompt_padded_tokens = [ + tokens + [vocab_size] * (prompt_max_len - len(tokens)) + for tokens in prompt_tokens + ] + output_max_len = max([len(tokens) for tokens in output_tokens], + default=0) + output_padded_tokens = [ + tokens + [vocab_size] * (output_max_len - len(tokens)) + for tokens in output_tokens + ] + + temperatures_t = torch.tensor( + temperatures, + device="cpu", + dtype=dtype, + pin_memory=pin_memory, + ) + top_ps_t = torch.tensor( + top_ps, + device="cpu", + dtype=dtype, + pin_memory=pin_memory, + ) + min_ps_t = torch.tensor( + min_ps, + device="cpu", + dtype=dtype, + pin_memory=pin_memory, + ) + presence_penalties_t = torch.tensor( + presence_penalties, + device="cpu", + dtype=dtype, + pin_memory=pin_memory, + ) + frequency_penalties_t = torch.tensor( + frequency_penalties, + device="cpu", + dtype=dtype, + pin_memory=pin_memory, + ) + repetition_penalties_t = torch.tensor( + repetition_penalties, + device="cpu", + dtype=dtype, + pin_memory=pin_memory, + ) + top_ks_t = torch.tensor( + top_ks, + device="cpu", + dtype=torch.int, + pin_memory=pin_memory, + ) + sample_indices_t = torch.tensor( + sample_indices, + device="cpu", + dtype=torch.long, + pin_memory=pin_memory, + ) + prompt_tensor = torch.tensor( + prompt_padded_tokens, + device="cpu", + dtype=torch.long, + pin_memory=pin_memory, + ) + output_tensor = torch.tensor( + output_padded_tokens, + device="cpu", + dtype=torch.long, + pin_memory=pin_memory, + ) + # need to transpose and make contiguous to + # copy the tensor correctly. + # [batch_size, n_seeds] -> [n_seeds, batch_size] + sampling_seeds_t = torch.tensor( + sampling_seeds, + device="cpu", + dtype=torch.long, + pin_memory=pin_memory, + ).T.contiguous() + + # Because the memory is pinned, we can do non-blocking + # transfer to device. + + # How many seeds the sample operation itself will need. + num_base_seeds = sampling_seeds_t.shape[0] - extra_seeds_to_generate + sampling_seeds_gpu = sampling_seeds_t.to(device=device, + non_blocking=True) + extra_seeds_gpu = sampling_seeds_gpu[num_base_seeds:] + if not extra_seeds_gpu.numel(): + extra_seeds_gpu = None + sampling_seeds_gpu = sampling_seeds_gpu[:num_base_seeds] + + return cls( + temperatures=temperatures_t.to(device=device, non_blocking=True), + top_ps=top_ps_t.to(device=device, non_blocking=True), + top_ks=top_ks_t.to(device=device, non_blocking=True), + min_ps=min_ps_t.to(device=device, non_blocking=True), + presence_penalties=presence_penalties_t.to(device=device, + non_blocking=True), + frequency_penalties=frequency_penalties_t.to(device=device, + non_blocking=True), + repetition_penalties=repetition_penalties_t.to(device=device, + non_blocking=True), + prompt_tokens=prompt_tensor.to(device=device, non_blocking=True), + output_tokens=output_tensor.to(device=device, non_blocking=True), + sampling_seeds=sampling_seeds_gpu, + sample_indices=sample_indices_t.to(device=device, + non_blocking=True), + extra_seeds=extra_seeds_gpu, + ) + + @staticmethod + def _get_sequence_seeds( + seed: int, + *extra_entropy: int, + seeds_to_generate: int, + is_greedy: bool, + ): + """Get `seeds_to_generate` child seeds from `seed` and extra entropy.""" + if not is_greedy: + if seed is None: + randint_fn = random.randint + else: + generator = random.Random(str((seed, ) + extra_entropy)) + randint_fn = generator.randint + lo, hi = torch.iinfo(torch.long).min, torch.iinfo(torch.long).max + # If the user/random sets seed = 0 but request should + # have sampling, we need to change it to something + # else. We use a constant in that case. + # This way we don't need to create and load a bool + # matrix in the sampling kernel, which reduces CPU + # overhead and latency. + seq_seeds = [ + randint_fn(lo, hi) or _SEED_0_REPLACEMENT + for _ in range(seeds_to_generate) + ] + else: + # For the kernel, seed == 0 means greedy decoding. + seq_seeds = [0] * seeds_to_generate + return seq_seeds diff --git a/vllm/model_executor/utils.py b/vllm/model_executor/utils.py new file mode 100644 index 0000000..aef308a --- /dev/null +++ b/vllm/model_executor/utils.py @@ -0,0 +1,37 @@ +"""Utils for model executor.""" +import random +from typing import Any, Dict, Optional + +import numpy as np +import torch + + +def set_random_seed(seed: int) -> None: + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + elif torch.musa.is_available(): + torch.musa.manual_seed_all(seed) + + +def set_weight_attrs( + weight: torch.Tensor, + weight_attrs: Optional[Dict[str, Any]], +): + """Set attributes on a weight tensor. + + This method is used to set attributes on a weight tensor. This method + will not overwrite existing attributes. + + Args: + weight: The weight tensor. + weight_attrs: A dictionary of attributes to set on the weight tensor. + """ + if weight_attrs is None: + return + for key, value in weight_attrs.items(): + assert not hasattr( + weight, key), (f"Overwriting existing tensor attribute: {key}") + setattr(weight, key, value) diff --git a/vllm/outputs.py b/vllm/outputs.py new file mode 100644 index 0000000..d01be0e --- /dev/null +++ b/vllm/outputs.py @@ -0,0 +1,150 @@ +import time +from typing import List, Optional, Union + +from vllm.lora.request import LoRARequest +from vllm.sequence import (PromptLogprobs, RequestMetrics, SampleLogprobs, + SequenceGroup, SequenceStatus) + + +class CompletionOutput: + """The output data of one completion output of a request. + + Args: + index: The index of the output in the request. + text: The generated output text. + token_ids: The token IDs of the generated output text. + cumulative_logprob: The cumulative log probability of the generated + output text. + logprobs: The log probabilities of the top probability words at each + position if the logprobs are requested. + finish_reason: The reason why the sequence is finished. + stop_reason: The stop string or token id that caused the completion + to stop, None if the completion finished for some other reason + including encountering the EOS token. + lora_request: The LoRA request that was used to generate the output. + """ + + def __init__( + self, + index: int, + text: str, + token_ids: List[int], + cumulative_logprob: float, + logprobs: Optional[SampleLogprobs], + finish_reason: Optional[str] = None, + stop_reason: Union[int, str, None] = None, + lora_request: Optional[LoRARequest] = None, + ) -> None: + self.index = index + self.text = text + self.token_ids = token_ids + self.cumulative_logprob = cumulative_logprob + self.logprobs = logprobs + self.finish_reason = finish_reason + self.stop_reason = stop_reason + self.lora_request = lora_request + + def finished(self) -> bool: + return self.finish_reason is not None + + def __repr__(self) -> str: + return (f"CompletionOutput(index={self.index}, " + f"text={self.text!r}, " + f"token_ids={self.token_ids}, " + f"cumulative_logprob={self.cumulative_logprob}, " + f"logprobs={self.logprobs}, " + f"finish_reason={self.finish_reason}, " + f"stop_reason={self.stop_reason})") + + +class RequestOutput: + """The output data of a request to the LLM. + + Args: + request_id: The unique ID of the request. + prompt: The prompt string of the request. + prompt_token_ids: The token IDs of the prompt. + prompt_logprobs: The log probabilities to return per prompt token. + outputs: The output sequences of the request. + finished: Whether the whole request is finished. + metrics: Metrics associated with the request. + lora_request: The LoRA request that was used to generate the output. + """ + + def __init__( + self, + request_id: str, + prompt: str, + prompt_token_ids: List[int], + prompt_logprobs: Optional[PromptLogprobs], + outputs: List[CompletionOutput], + finished: bool, + metrics: Optional[RequestMetrics] = None, + lora_request: Optional[LoRARequest] = None, + ) -> None: + self.request_id = request_id + self.prompt = prompt + self.prompt_token_ids = prompt_token_ids + self.prompt_logprobs = prompt_logprobs + self.outputs = outputs + self.finished = finished + self.metrics = metrics + self.lora_request = lora_request + + @classmethod + def from_seq_group(cls, seq_group: SequenceGroup) -> "RequestOutput": + seqs = seq_group.get_seqs() + if len(seqs) == 1: + top_n_seqs = seqs + else: + # Get the top-n sequences. + n = seq_group.sampling_params.n + if seq_group.sampling_params.use_beam_search: + sorting_key = lambda seq: seq.get_beam_search_score( + seq_group.sampling_params.length_penalty) + else: + sorting_key = lambda seq: seq.get_cumulative_logprob() + sorted_seqs = sorted(seqs, key=sorting_key, reverse=True) + top_n_seqs = sorted_seqs[:n] + + # Create the outputs. + # NOTE: We need omit logprobs here explicitly because the sequence + # always has the logprobs of the sampled tokens even if the + # logprobs are not requested. + include_logprobs = seq_group.sampling_params.logprobs is not None + text_buffer_length = seq_group.sampling_params.output_text_buffer_length + outputs = [ + CompletionOutput(seqs.index(seq), + seq.get_output_text_to_return(text_buffer_length), + seq.get_output_token_ids(), + seq.get_cumulative_logprob(), + seq.output_logprobs if include_logprobs else None, + SequenceStatus.get_finished_reason(seq.status), + seq.stop_reason) for seq in top_n_seqs + ] + + # Every sequence in the sequence group should have the same prompt. + prompt = seq_group.prompt + prompt_token_ids = seq_group.prompt_token_ids + prompt_logprobs = seq_group.prompt_logprobs + finished = seq_group.is_finished() + finished_time = time.time() if finished else None + seq_group.set_finished_time(finished_time) + return cls(seq_group.request_id, + prompt, + prompt_token_ids, + prompt_logprobs, + outputs, + finished, + seq_group.metrics, + lora_request=seq_group.lora_request) + + def __repr__(self) -> str: + return (f"RequestOutput(request_id={self.request_id}, " + f"prompt={self.prompt!r}, " + f"prompt_token_ids={self.prompt_token_ids}, " + f"prompt_logprobs={self.prompt_logprobs}, " + f"outputs={self.outputs}, " + f"finished={self.finished}, " + f"metrics={self.metrics}, " + f"lora_request={self.lora_request})") diff --git a/vllm/py.typed b/vllm/py.typed new file mode 100644 index 0000000..33b3ad7 --- /dev/null +++ b/vllm/py.typed @@ -0,0 +1,2 @@ +# Marker file for PEP 561. +# The vllm package uses inline types. diff --git a/vllm/sampling_params.py b/vllm/sampling_params.py new file mode 100644 index 0000000..5fa94eb --- /dev/null +++ b/vllm/sampling_params.py @@ -0,0 +1,340 @@ +"""Sampling parameters for text generation.""" +import copy +from enum import IntEnum +from functools import cached_property +from typing import Any, Callable, Dict, List, Optional, Union + +import torch +from pydantic import Field +from typing_extensions import Annotated + +_SAMPLING_EPS = 1e-5 + + +class SamplingType(IntEnum): + GREEDY = 0 + RANDOM = 1 + RANDOM_SEED = 2 + BEAM = 3 + + +LogitsProcessor = Callable[[List[int], torch.Tensor], torch.Tensor] +"""LogitsProcessor is a function that takes a list of previously generated +tokens and a tensor of the logits for the next token, and returns a modified +tensor of logits to sample from.""" + + +class SamplingParams: + """Sampling parameters for text generation. + + Overall, we follow the sampling parameters from the OpenAI text completion + API (https://platform.openai.com/docs/api-reference/completions/create). + In addition, we support beam search, which is not supported by OpenAI. + + Args: + n: Number of output sequences to return for the given prompt. + best_of: Number of output sequences that are generated from the prompt. + From these `best_of` sequences, the top `n` sequences are returned. + `best_of` must be greater than or equal to `n`. This is treated as + the beam width when `use_beam_search` is True. By default, `best_of` + is set to `n`. + presence_penalty: Float that penalizes new tokens based on whether they + appear in the generated text so far. Values > 0 encourage the model + to use new tokens, while values < 0 encourage the model to repeat + tokens. + frequency_penalty: Float that penalizes new tokens based on their + frequency in the generated text so far. Values > 0 encourage the + model to use new tokens, while values < 0 encourage the model to + repeat tokens. + repetition_penalty: Float that penalizes new tokens based on whether + they appear in the prompt and the generated text so far. Values > 1 + encourage the model to use new tokens, while values < 1 encourage + the model to repeat tokens. + temperature: Float that controls the randomness of the sampling. Lower + values make the model more deterministic, while higher values make + the model more random. Zero means greedy sampling. + top_p: Float that controls the cumulative probability of the top tokens + to consider. Must be in (0, 1]. Set to 1 to consider all tokens. + top_k: Integer that controls the number of top tokens to consider. Set + to -1 to consider all tokens. + min_p: Float that represents the minimum probability for a token to be + considered, relative to the probability of the most likely token. + Must be in [0, 1]. Set to 0 to disable this. + seed: Random seed to use for the generation. + use_beam_search: Whether to use beam search instead of sampling. + length_penalty: Float that penalizes sequences based on their length. + Used in beam search. + early_stopping: Controls the stopping condition for beam search. It + accepts the following values: `True`, where the generation stops as + soon as there are `best_of` complete candidates; `False`, where an + heuristic is applied and the generation stops when is it very + unlikely to find better candidates; `"never"`, where the beam search + procedure only stops when there cannot be better candidates + (canonical beam search algorithm). + stop: List of strings that stop the generation when they are generated. + The returned output will not contain the stop strings. + stop_token_ids: List of tokens that stop the generation when they are + generated. The returned output will contain the stop tokens unless + the stop tokens are special tokens. + include_stop_str_in_output: Whether to include the stop strings in + output text. Defaults to False. + ignore_eos: Whether to ignore the EOS token and continue generating + tokens after the EOS token is generated. + max_tokens: Maximum number of tokens to generate per output sequence. + min_tokens: Minimum number of tokens to generate per output sequence + before EOS or stop_token_ids can be generated + logprobs: Number of log probabilities to return per output token. + Note that the implementation follows the OpenAI API: The return + result includes the log probabilities on the `logprobs` most likely + tokens, as well the chosen tokens. The API will always return the + log probability of the sampled token, so there may be up to + `logprobs+1` elements in the response. + prompt_logprobs: Number of log probabilities to return per prompt token. + detokenize: Whether to detokenize the output. Defaults to True. + skip_special_tokens: Whether to skip special tokens in the output. + spaces_between_special_tokens: Whether to add spaces between special + tokens in the output. Defaults to True. + logits_processors: List of functions that modify logits based on + previously generated tokens. + truncate_prompt_tokens: If set to an integer k, will use only the last k + tokens from the prompt (i.e., left truncation). Defaults to None + (i.e., no truncation). + """ + + def __init__( + self, + n: int = 1, + best_of: Optional[int] = None, + presence_penalty: float = 0.0, + frequency_penalty: float = 0.0, + repetition_penalty: float = 1.0, + temperature: float = 1.0, + top_p: float = 1.0, + top_k: int = -1, + min_p: float = 0.0, + seed: Optional[int] = None, + use_beam_search: bool = False, + length_penalty: float = 1.0, + early_stopping: Union[bool, str] = False, + stop: Optional[Union[str, List[str]]] = None, + stop_token_ids: Optional[List[int]] = None, + include_stop_str_in_output: bool = False, + ignore_eos: bool = False, + max_tokens: Optional[int] = 16, + min_tokens: int = 0, + logprobs: Optional[int] = None, + prompt_logprobs: Optional[int] = None, + detokenize: bool = True, + skip_special_tokens: bool = True, + spaces_between_special_tokens: bool = True, + logits_processors: Optional[List[LogitsProcessor]] = None, + truncate_prompt_tokens: Optional[Annotated[int, Field(ge=1)]] = None, + ) -> None: + self.n = n + self.best_of = best_of if best_of is not None else n + self.presence_penalty = presence_penalty + self.frequency_penalty = frequency_penalty + self.repetition_penalty = repetition_penalty + self.temperature = temperature + self.top_p = top_p + self.top_k = top_k + self.min_p = min_p + if seed == -1: + self.seed = None + else: + self.seed = seed + self.use_beam_search = use_beam_search + self.length_penalty = length_penalty + self.early_stopping = early_stopping + if stop is None: + self.stop = [] + elif isinstance(stop, str): + self.stop = [stop] + else: + self.stop = list(stop) + if stop_token_ids is None: + self.stop_token_ids = [] + else: + self.stop_token_ids = list(stop_token_ids) + self.ignore_eos = ignore_eos + self.max_tokens = max_tokens + self.min_tokens = min_tokens + self.logprobs = logprobs + self.prompt_logprobs = prompt_logprobs + # NOTE: This parameter is only exposed at the engine level for now. + # It is not exposed in the OpenAI API server, as the OpenAI API does + # not support returning only a list of token IDs. + self.detokenize = detokenize + self.skip_special_tokens = skip_special_tokens + self.spaces_between_special_tokens = spaces_between_special_tokens + self.logits_processors = logits_processors + self.include_stop_str_in_output = include_stop_str_in_output + self.truncate_prompt_tokens = truncate_prompt_tokens + # Number of characters to hold back for stop string evaluation + # until sequence is finished. + if self.stop and not include_stop_str_in_output: + self.output_text_buffer_length = max(len(s) for s in self.stop) - 1 + else: + self.output_text_buffer_length = 0 + + self._verify_args() + if self.use_beam_search: + self._verify_beam_search() + else: + self._verify_non_beam_search() + if self.temperature < _SAMPLING_EPS: + # Zero temperature means greedy sampling. + self.top_p = 1.0 + self.top_k = -1 + self.min_p = 0.0 + self._verify_greedy_sampling() + # eos_token_id is added to this by the engine + self.all_stop_token_ids = set(self.stop_token_ids) + + def _verify_args(self) -> None: + if self.n < 1: + raise ValueError(f"n must be at least 1, got {self.n}.") + if self.best_of < self.n: + raise ValueError(f"best_of must be greater than or equal to n, " + f"got n={self.n} and best_of={self.best_of}.") + if not -2.0 <= self.presence_penalty <= 2.0: + raise ValueError("presence_penalty must be in [-2, 2], got " + f"{self.presence_penalty}.") + if not -2.0 <= self.frequency_penalty <= 2.0: + raise ValueError("frequency_penalty must be in [-2, 2], got " + f"{self.frequency_penalty}.") + if not 0.0 < self.repetition_penalty <= 2.0: + raise ValueError("repetition_penalty must be in (0, 2], got " + f"{self.repetition_penalty}.") + if self.temperature < 0.0: + raise ValueError( + f"temperature must be non-negative, got {self.temperature}.") + if not 0.0 < self.top_p <= 1.0: + raise ValueError(f"top_p must be in (0, 1], got {self.top_p}.") + if self.top_k < -1 or self.top_k == 0: + raise ValueError(f"top_k must be -1 (disable), or at least 1, " + f"got {self.top_k}.") + if not 0.0 <= self.min_p <= 1.0: + raise ValueError("min_p must be in [0, 1], got " + f"{self.min_p}.") + if self.max_tokens is not None and self.max_tokens < 1: + raise ValueError( + f"max_tokens must be at least 1, got {self.max_tokens}.") + if self.min_tokens < 0: + raise ValueError(f"min_tokens must be greater than or equal to 0, " + f"got {self.min_tokens}.") + if self.max_tokens is not None and self.min_tokens > self.max_tokens: + raise ValueError( + f"min_tokens must be less than or equal to " + f"max_tokens={self.max_tokens}, got {self.min_tokens}.") + if self.logprobs is not None and self.logprobs < 0: + raise ValueError( + f"logprobs must be non-negative, got {self.logprobs}.") + if self.prompt_logprobs is not None and self.prompt_logprobs < 0: + raise ValueError(f"prompt_logprobs must be non-negative, got " + f"{self.prompt_logprobs}.") + if (self.truncate_prompt_tokens is not None + and self.truncate_prompt_tokens < 1): + raise ValueError(f"truncate_prompt_tokens must be >= 1, " + f"got {self.truncate_prompt_tokens}") + if any(not stop_str for stop_str in self.stop): + raise ValueError("stop cannot contain an empty string.") + if self.stop and not self.detokenize: + raise ValueError( + "stop strings are only supported when detokenize is True. " + "Set detokenize=True to use stop.") + + def _verify_beam_search(self) -> None: + if self.best_of == 1: + raise ValueError("best_of must be greater than 1 when using beam " + f"search. Got {self.best_of}.") + if self.temperature > _SAMPLING_EPS: + raise ValueError("temperature must be 0 when using beam search.") + if self.top_p < 1.0 - _SAMPLING_EPS: + raise ValueError("top_p must be 1 when using beam search.") + if self.top_k != -1: + raise ValueError("top_k must be -1 when using beam search.") + if self.early_stopping not in [True, False, "never"]: + raise ValueError( + f"early_stopping must be True, False, or 'never', " + f"got {self.early_stopping}.") + + def _verify_non_beam_search(self) -> None: + if self.early_stopping is not False: + raise ValueError("early_stopping is not effective and must be " + "False when not using beam search.") + if (self.length_penalty < 1.0 - _SAMPLING_EPS + or self.length_penalty > 1.0 + _SAMPLING_EPS): + raise ValueError( + "length_penalty is not effective and must be the " + "default value of 1.0 when not using beam search.") + + def _verify_greedy_sampling(self) -> None: + if self.best_of > 1: + raise ValueError("best_of must be 1 when using greedy sampling." + f"Got {self.best_of}.") + + def update_from_generation_config( + self, generation_config: Dict[str, Any]) -> None: + """Update if there are non-default values from generation_config""" + # Update eos_token_id for generation + if (not self.ignore_eos) and (eos_ids := + generation_config.get("eos_token_id")): + # it can be either int or list of int + if isinstance(eos_ids, int): + eos_ids = [eos_ids] + original_stop_token_ids = set(self.stop_token_ids) + original_stop_token_ids.update(eos_ids) + self.stop_token_ids = list(original_stop_token_ids) + + @cached_property + def sampling_type(self) -> SamplingType: + if self.use_beam_search: + return SamplingType.BEAM + if self.temperature < _SAMPLING_EPS: + return SamplingType.GREEDY + if self.seed is not None: + return SamplingType.RANDOM_SEED + return SamplingType.RANDOM + + def clone(self) -> "SamplingParams": + """Deep copy excluding LogitsProcessor objects. + + LogitsProcessor objects are excluded because they may contain an + arbitrary, nontrivial amount of data. + See https://github.com/vllm-project/vllm/issues/3087 + """ + + logit_processor_refs = None if self.logits_processors is None else { + id(lp): lp + for lp in self.logits_processors + } + return copy.deepcopy(self, memo=logit_processor_refs) + + def __repr__(self) -> str: + return ( + f"SamplingParams(n={self.n}, " + f"best_of={self.best_of}, " + f"presence_penalty={self.presence_penalty}, " + f"frequency_penalty={self.frequency_penalty}, " + f"repetition_penalty={self.repetition_penalty}, " + f"temperature={self.temperature}, " + f"top_p={self.top_p}, " + f"top_k={self.top_k}, " + f"min_p={self.min_p}, " + f"seed={self.seed}, " + f"use_beam_search={self.use_beam_search}, " + f"length_penalty={self.length_penalty}, " + f"early_stopping={self.early_stopping}, " + f"stop={self.stop}, " + f"stop_token_ids={self.stop_token_ids}, " + f"include_stop_str_in_output={self.include_stop_str_in_output}, " + f"ignore_eos={self.ignore_eos}, " + f"max_tokens={self.max_tokens}, " + f"min_tokens={self.min_tokens}, " + f"logprobs={self.logprobs}, " + f"prompt_logprobs={self.prompt_logprobs}, " + f"skip_special_tokens={self.skip_special_tokens}, " + "spaces_between_special_tokens=" + f"{self.spaces_between_special_tokens}, " + f"truncate_prompt_tokens={self.truncate_prompt_tokens})") diff --git a/vllm/sequence.py b/vllm/sequence.py new file mode 100644 index 0000000..f2939ef --- /dev/null +++ b/vllm/sequence.py @@ -0,0 +1,766 @@ +"""Sequence and its related classes.""" +import copy +import enum +from dataclasses import dataclass, field +from typing import TYPE_CHECKING, Dict, List, Optional, Union + +from vllm.block import LogicalTokenBlock +from vllm.lora.request import LoRARequest +from vllm.sampling_params import SamplingParams + +if TYPE_CHECKING: + import torch + + from vllm.spec_decode.metrics import SpecDecodeWorkerMetrics + + +@dataclass +class Logprob: + """Infos for supporting OpenAI compatible logprobs and token ranks. + + Attributes: + logprob: The logprob of chosen token + rank: The vocab rank of chosen token (>=1) + decoded_token: The decoded chosen token index + """ + logprob: float + rank: Optional[int] = None + decoded_token: Optional[str] = None + + +# {token_id -> logprob} per each sequence group. None if the corresponding +# sequence group doesn't require prompt logprob. +PromptLogprobs = List[Optional[Dict[int, Logprob]]] +# {token_id -> logprob} for each sequence group. +SampleLogprobs = List[Dict[int, Logprob]] + + +class SequenceStatus(enum.Enum): + """Status of a sequence.""" + WAITING = enum.auto() + RUNNING = enum.auto() + SWAPPED = enum.auto() + FINISHED_STOPPED = enum.auto() + FINISHED_LENGTH_CAPPED = enum.auto() + FINISHED_ABORTED = enum.auto() + FINISHED_IGNORED = enum.auto() + + @staticmethod + def is_finished(status: "SequenceStatus") -> bool: + return status in [ + SequenceStatus.FINISHED_STOPPED, + SequenceStatus.FINISHED_LENGTH_CAPPED, + SequenceStatus.FINISHED_ABORTED, + SequenceStatus.FINISHED_IGNORED, + ] + + @staticmethod + def get_finished_reason(status: "SequenceStatus") -> Union[str, None]: + if status == SequenceStatus.FINISHED_STOPPED: + finish_reason = "stop" + elif status == SequenceStatus.FINISHED_LENGTH_CAPPED: + finish_reason = "length" + elif status == SequenceStatus.FINISHED_ABORTED: + finish_reason = "abort" + elif status == SequenceStatus.FINISHED_IGNORED: + # The ignored sequences are the sequences whose prompt lengths + # are longer than the model's length cap. Therefore, the stop + # reason should also be "length" as in OpenAI API. + finish_reason = "length" + else: + finish_reason = None + return finish_reason + + +class SequenceStage(enum.Enum): + PREFILL = enum.auto() + DECODE = enum.auto() + + +@dataclass +class RequestMetrics: + """Metrics associated with a request. + + Attributes: + arrival_time: The time when the request arrived. + first_scheduled_time: The time when the request was first scheduled. + first_token_time: The time when the first token was generated. + time_in_queue: The time the request spent in the queue. + finished_time: The time when the request was finished. + """ + arrival_time: float + last_token_time: float + first_scheduled_time: Optional[float] + first_token_time: Optional[float] + time_in_queue: Optional[float] + finished_time: Optional[float] = None + + +class SequenceData: + """Data associated with a sequence. + + Args: + prompt_token_ids: The token IDs of the prompt. + output_token_ids: The token IDs of the output. Set to an empty list if + None. + + Attributes: + prompt_token_ids: The token IDs of the prompt. + output_token_ids: The token IDs of the output. + cumulative_logprob: The cumulative log probability of the output. + """ + + def __init__( + self, + prompt_token_ids: List[int], + output_token_ids: Optional[List[int]] = None, + ) -> None: + if output_token_ids is None: + output_token_ids = [] + + self.prompt_token_ids = prompt_token_ids + self.output_token_ids = output_token_ids + self.cumulative_logprob = 0.0 + # The number of tokens that are computed (that run against the model). + self._num_computed_tokens = 0 + self._stage: SequenceStage = SequenceStage.PREFILL + + def append_token_id(self, token_id: int, logprob: float) -> None: + self.output_token_ids.append(token_id) + self.cumulative_logprob += logprob + + def get_len(self) -> int: + return len(self.output_token_ids) + len(self.prompt_token_ids) + + def get_prompt_len(self) -> int: + return len(self.prompt_token_ids) + + def get_output_len(self) -> int: + return len(self.output_token_ids) + + def get_token_ids(self) -> List[int]: + return self.prompt_token_ids + self.output_token_ids + + def get_num_computed_tokens(self) -> int: + """Return the number of prefill tokens that are already computed.""" + return self._num_computed_tokens + + def update_num_computed_tokens(self, num_new_computed_tokens: int): + """Update number of tokens computed so far.""" + self._num_computed_tokens += num_new_computed_tokens + assert self._num_computed_tokens <= self.get_len(), ( + self._num_computed_tokens, self.get_len()) + # If all tokens are computed, it means it is in decoding phase. + if self.get_num_uncomputed_tokens() == 0: + self._stage = SequenceStage.DECODE + + def reset_state_for_recompute(self) -> None: + """Reset the number of computed tokens from this sequence. It is + supposed to be called when a sequence needs to be started from + the beginning again (e.g., sequence is preempted). + """ + self._num_computed_tokens = 0 + self._stage = SequenceStage.PREFILL + + def get_num_uncomputed_tokens(self) -> int: + """Return the number of prefill tokens that are not computed.""" + # we use `get_len()` which includes prompt_len + output_len instead + # of prompt_len here. This is because during recompute we need to + # prefill for both prompt and output. + return self.get_len() - self.get_num_computed_tokens() + + def get_last_token_id(self) -> int: + if not self.output_token_ids: + return self.prompt_token_ids[-1] + return self.output_token_ids[-1] + + def get_prompt_token_ids(self) -> List[int]: + return self.prompt_token_ids + + def get_output_token_ids(self) -> List[int]: + return self.output_token_ids + + @property + def stage(self) -> SequenceStage: + return self._stage + + def __repr__(self) -> str: + return (f"SequenceData(" + f"prompt_token_ids={self.prompt_token_ids}, " + f"output_token_ids={self.output_token_ids}, " + f"cumulative_logprob={self.cumulative_logprob})") + + +class Sequence: + """Stores the data, status, and block information of a sequence. + + Args: + seq_id: The ID of the sequence. + prompt: The prompt of the sequence. + prompt_token_ids: The token IDs of the prompt. + block_size: The block size of the sequence. Should be the same as the + block size used by the block manager and cache engine. + lora_request: LoRA request. + """ + + def __init__( + self, + seq_id: int, + prompt: str, + prompt_token_ids: List[int], + block_size: int, + eos_token_id: Optional[int] = None, + lora_request: Optional[LoRARequest] = None, + ) -> None: + self.seq_id = seq_id + self.prompt = prompt + self.block_size = block_size + self.eos_token_id = eos_token_id + self.lora_request = lora_request + + self.data: SequenceData = SequenceData(prompt_token_ids) + self.output_logprobs: SampleLogprobs = [] + self.output_text = "" + + self.logical_token_blocks: List[LogicalTokenBlock] = [] + # Initialize the logical token blocks with the prompt token ids. + self._append_tokens_to_blocks(prompt_token_ids) + self.status = SequenceStatus.WAITING + self.stop_reason: Union[int, str, None] = None + + # Used for incremental detokenization + self.prefix_offset = 0 + self.read_offset = 0 + # Input + output tokens + self.tokens: Optional[List[str]] = None + + @property + def lora_int_id(self) -> int: + return self.lora_request.lora_int_id if self.lora_request else 0 + + def get_output_text_to_return(self, buffer_length: int): + # We return the full output text if the sequence is finished. + truncate = buffer_length and not self.is_finished() + return self.output_text[:-buffer_length] if truncate else ( + self.output_text) + + def hash_of_block(self, logical_idx: int) -> int: + # TODO This can produce incorrect hash when block size > prompt size + + # Compute the number of tokens in the sequence + # TODO: The current hashing function is O(L^2). We should optimize + # this in the future. + num_tokens = self.num_hashed_tokens_of_block(logical_idx) + return hash( + (tuple(self.data.get_token_ids()[0:num_tokens]), self.lora_int_id)) + + def num_hashed_tokens_of_block(self, logical_idx: int): + return logical_idx * self.block_size + self.block_size + + def reset_state_for_recompute(self): + """Reset the sequence states for recomputation.""" + self.data.reset_state_for_recompute() + + def _append_logical_block(self) -> None: + block = LogicalTokenBlock( + block_number=len(self.logical_token_blocks), + block_size=self.block_size, + ) + self.logical_token_blocks.append(block) + + def _append_tokens_to_blocks(self, token_ids: List[int]) -> None: + cursor = 0 + while cursor < len(token_ids): + if not self.logical_token_blocks: + self._append_logical_block() + + last_block = self.logical_token_blocks[-1] + if last_block.is_full(): + self._append_logical_block() + last_block = self.logical_token_blocks[-1] + + num_empty_slots = last_block.get_num_empty_slots() + last_block.append_tokens(token_ids[cursor:cursor + + num_empty_slots]) + cursor += num_empty_slots + + def append_token_id( + self, + token_id: int, + logprobs: Dict[int, Logprob], + ) -> None: + assert token_id in logprobs + self._append_tokens_to_blocks([token_id]) + self.output_logprobs.append(logprobs) + self.data.append_token_id(token_id, logprobs[token_id].logprob) + + def get_len(self) -> int: + return self.data.get_len() + + def get_prompt_len(self) -> int: + return self.data.get_prompt_len() + + def get_output_len(self) -> int: + return self.data.get_output_len() + + def get_token_ids(self) -> List[int]: + return self.data.get_token_ids() + + def get_prompt_token_ids(self) -> List[int]: + return self.data.get_prompt_token_ids() + + def get_last_token_id(self) -> int: + return self.data.get_last_token_id() + + def get_output_token_ids(self) -> List[int]: + return self.data.output_token_ids + + def get_cumulative_logprob(self) -> float: + return self.data.cumulative_logprob + + def get_beam_search_score(self, + length_penalty: float = 1.0, + seq_len: Optional[int] = None, + eos_token_id: Optional[int] = None) -> float: + """Calculate the beam search score with length penalty. + + Adapted from + + https://github.com/huggingface/transformers/blob/ccb92be23def445f2afdea94c31286f84b89eb5b/src/transformers/generation/beam_search.py#L938 + """ + if seq_len is None: + seq_len = self.get_len() + # NOTE: HF implementation does not count the EOS token + # towards the length, we align with that here for testing. + if (eos_token_id is not None + and self.get_last_token_id() == eos_token_id): + seq_len -= 1 + return self.get_cumulative_logprob() / (seq_len**length_penalty) + + def is_finished(self) -> bool: + return SequenceStatus.is_finished(self.status) + + def fork(self, new_seq_id: int) -> "Sequence": + new_seq = copy.deepcopy(self) + new_seq.seq_id = new_seq_id + return new_seq + + def get_num_new_tokens(self) -> int: + """Get the number of new tokens to be computed. + + Returns: + The new number of tokens to be computed. I.e., 1 for decode, or + the remaining prompt size for prefill. + """ + if self.data.stage == SequenceStage.DECODE: + return 1 + return self.data.get_num_uncomputed_tokens() + + def is_prefill(self) -> bool: + return self.data.stage == SequenceStage.PREFILL + + def __repr__(self) -> str: + return (f"Sequence(seq_id={self.seq_id}, " + f"status={self.status.name}, " + f"num_blocks={len(self.logical_token_blocks)})") + + +@dataclass +class SequenceGroupState: + """Mutable state tied to a specific sequence group""" + + # torch.Generator used in seeded sampling + generator: Optional = None # type: ignore + + +class MultiModalData: + """Multi modal request. + + Args: + type: The data type. + data: The actual data. + The required shape and semantic meaning of it depends on the vision + language config of the hosted model. + See `VisionLanguageConfig` in `config.py`. + """ + + class Type(enum.Enum): + IMAGE = enum.auto() + + def __init__(self, type: Type, data: "torch.Tensor"): + self.type = type + self.data = data + + +class SequenceGroup: + """A group of sequences that are generated from the same prompt. + + Args: + request_id: The ID of the request. + seqs: The list of sequences. + sampling_params: The sampling parameters used to generate the outputs. + arrival_time: The arrival time of the request. + lora_request: LoRA request. + multi_modal_data: Multi modal data associated with the request. + """ + + def __init__( + self, + request_id: str, + seqs: List[Sequence], + sampling_params: SamplingParams, + arrival_time: float, + lora_request: Optional[LoRARequest] = None, + multi_modal_data: Optional[MultiModalData] = None, + ) -> None: + self.request_id = request_id + self.seqs_dict = {seq.seq_id: seq for seq in seqs} + self.sampling_params = sampling_params + self.metrics = RequestMetrics(arrival_time=arrival_time, + last_token_time=arrival_time, + first_scheduled_time=None, + first_token_time=None, + time_in_queue=None) + self.lora_request = lora_request + self.prompt_logprobs: Optional[PromptLogprobs] = None + self.state = SequenceGroupState() + self.multi_modal_data = multi_modal_data + + @property + def prompt(self) -> str: + # All sequences in the group should have the same prompt. + # We use the prompt of an arbitrary sequence. + return next(iter(self.seqs_dict.values())).prompt + + @property + def prompt_token_ids(self) -> List[int]: + # All sequences in the group should have the same prompt. + # We use the prompt of an arbitrary sequence. + return next(iter(self.seqs_dict.values())).data.prompt_token_ids + + @property + def lora_int_id(self) -> int: + return self.lora_request.lora_int_id if self.lora_request else 0 + + def get_last_latency(self, now: float) -> Optional[float]: + """Sets the last token time for Request level timings.""" + # If still in prefill phase, raise Error. + if self.is_prefill(): + raise ValueError( + "seq_group.get_last_latency() should not be called " + "if the seq_group is in prefill phase.") + + # Otherwise return token latency. + latency = now - self.metrics.last_token_time + self.metrics.last_token_time = now + return latency + + def maybe_set_first_token_time(self, time: float) -> None: + """Sets the first token time for Request level timings.""" + # Note: in a case where a sequence_group is swapped and + # recomputed, the time between iterations is counted + # in TPOT, rather than recalculating TTFT (since from the ) + # POV of the user, there is simply a long generation delay. + if (self.metrics.first_token_time is None + and self.get_seqs()[0].get_output_len() == 1): + self.metrics.first_token_time = time + + def maybe_set_first_scheduled_time(self, time: float) -> None: + """Sets the first scheduled time and time in queue for Request + level timings.""" + if self.metrics.first_scheduled_time is None: + self.metrics.first_scheduled_time = time + self.metrics.time_in_queue = time - self.metrics.arrival_time + + def set_finished_time(self, time: Optional[float]) -> None: + """Sets the finished time for Request level timings.""" + self.metrics.finished_time = time + + def get_max_num_running_seqs(self) -> int: + """The maximum number of sequences running in parallel in the remaining + lifetime of the request.""" + if self.sampling_params.use_beam_search: + # For beam search, maximally there will always be `best_of` beam + # candidates running in the future. + return self.sampling_params.best_of + else: + if self.sampling_params.best_of > self.num_seqs(): + # At prompt stage, the sequence group is not yet filled up + # and only have one sequence running. However, in the + # generation stage, we will have `best_of` sequences running. + return self.sampling_params.best_of + # At sampling stages, return the number of actual sequences + # that are not finished yet. + return self.num_unfinished_seqs() + + def get_seqs( + self, + status: Optional[SequenceStatus] = None, + ) -> List[Sequence]: + return list(self.seqs_dict.values()) if status is None else [ + seq for seq in self.seqs_dict.values() if seq.status == status + ] + + def get_unfinished_seqs(self) -> List[Sequence]: + return [ + seq for seq in self.seqs_dict.values() if not seq.is_finished() + ] + + def get_finished_seqs(self) -> List[Sequence]: + return [seq for seq in self.seqs_dict.values() if seq.is_finished()] + + def update_num_computed_tokens(self, num_new_computed_tokens: int): + """Update number of tokens computed so far.""" + for seq in self.seqs_dict.values(): + if not seq.is_finished(): + seq.data.update_num_computed_tokens(num_new_computed_tokens) + + def get_num_uncomputed_tokens(self) -> int: + num_uncomputed_tokens = 0 + for seq in self.get_seqs(): + if not seq.is_finished(): + num_uncomputed_tokens += seq.data.get_num_uncomputed_tokens() + return num_uncomputed_tokens + + def num_seqs(self, status: Optional[SequenceStatus] = None) -> int: + # Optimization. We don't need to call get_seqs if we don't need to + # filter by states. + if status is None: + return len(self.seqs_dict) + + return len(self.get_seqs(status)) + + def num_unfinished_seqs(self) -> int: + return len(self.get_unfinished_seqs()) + + def num_finished_seqs(self) -> int: + return len(self.get_finished_seqs()) + + def find(self, seq_id: int) -> Sequence: + if seq_id not in self.seqs_dict: + raise ValueError(f"Sequence {seq_id} not found.") + return self.seqs_dict[seq_id] + + def add(self, seq: Sequence) -> None: + if seq.seq_id in self.seqs_dict: + raise ValueError(f"Sequence {seq.seq_id} already exists.") + self.seqs_dict[seq.seq_id] = seq + + def remove(self, seq_id: int) -> None: + if seq_id not in self.seqs_dict: + raise ValueError(f"Sequence {seq_id} not found.") + del self.seqs_dict[seq_id] + + def is_finished(self) -> bool: + return all(seq.is_finished() for seq in self.get_seqs()) + + def is_prefill(self) -> bool: + # Every sequences should be in the same stage. + return self.get_seqs()[0].is_prefill() + + def __repr__(self) -> str: + return (f"SequenceGroup(request_id={self.request_id}, " + f"sampling_params={self.sampling_params}, " + f"num_seqs={len(self.seqs_dict)})") + + +class SequenceGroupMetadata: + """Metadata for a sequence group. Used to create `AttentionMetadata`. + + Args: + request_id: The ID of the request. + is_prompt: Whether the request is at prompt stage. + seq_data: The sequence data. (Seq id -> sequence data) + sampling_params: The sampling parameters used to generate the outputs. + block_tables: The block tables. (Seq id -> list of physical block + numbers) + do_sample: True if sampling is required. Sampling is not required when + e.g., prefill is chunked, and the current iteration only computes + query tokens for prefill, we don't need sampling. + token_chunk_size: The number of tokens to be processed (per sequence). + None if chunking is not required. + lora_request: LoRA request. + computed_block_nums: The block numbers that are already computed, + used in prefix caching. + state: Internal state tied to this sequence group. + multi_modal_data: Multi modal data. + """ + + def __init__( + self, + request_id: str, + is_prompt: bool, + seq_data: Dict[int, SequenceData], + sampling_params: SamplingParams, + block_tables: Dict[int, List[int]], + do_sample: bool = True, + token_chunk_size: Optional[int] = None, + lora_request: Optional[LoRARequest] = None, + computed_block_nums: Optional[List[int]] = None, + state: Optional[SequenceGroupState] = None, + multi_modal_data: Optional[MultiModalData] = None, + ) -> None: + self.request_id = request_id + self.is_prompt = is_prompt + self.seq_data = seq_data + self.sampling_params = sampling_params + self.block_tables = block_tables + self.lora_request = lora_request + self.computed_block_nums = computed_block_nums + self.multi_modal_data = multi_modal_data + self.state = SequenceGroupState() if state is None else state + self._token_chunk_size = token_chunk_size + self.do_sample = do_sample + + if self._token_chunk_size is None: + if is_prompt: + self._token_chunk_size = list(seq_data.values())[0].get_len() + else: + self._token_chunk_size = 1 + + @property + def lora_int_id(self) -> int: + return self.lora_request.lora_int_id if self.lora_request else 0 + + @property + def token_chunk_size(self) -> Optional[int]: + """Return the number of tokens to be processed (chunk size).""" + return self._token_chunk_size + + +class SequenceOutput: + """The model output associated with a sequence. + + Args: + parent_seq_id: The ID of the parent sequence (for forking in beam + search). + output_token: The output token ID. + logprobs: The logprobs of the output token. + (Token id -> logP(x_i+1 | x_0, ..., x_i)) + """ + + def __init__( + self, + parent_seq_id: int, + output_token: int, + logprobs: Dict[int, Logprob], + ) -> None: + self.parent_seq_id = parent_seq_id + self.output_token = output_token + self.logprobs = logprobs + + def __repr__(self) -> str: + return (f"SequenceOutput(parent_seq_id={self.parent_seq_id}, " + f"output_token={self.output_token}, " + f"logprobs={self.logprobs})") + + def __eq__(self, other: object) -> bool: + if not isinstance(other, SequenceOutput): + raise NotImplementedError() + equal = (self.parent_seq_id == other.parent_seq_id + and self.output_token == other.output_token) + log_probs_equal = other.logprobs == self.logprobs + return equal and log_probs_equal + + +class SequenceGroupOutput: + """The model output associated with a sequence group.""" + + def __init__( + self, + samples: List[SequenceOutput], + prompt_logprobs: Optional[PromptLogprobs], + ) -> None: + self.samples = samples + # Prompt logprob for each prompt query token. + self.prompt_logprobs = prompt_logprobs + + def __repr__(self) -> str: + return (f"SequenceGroupOutput(samples={self.samples}, " + f"prompt_logprobs={self.prompt_logprobs})") + + def __eq__(self, other: object) -> bool: + if not isinstance(other, SequenceGroupOutput): + raise NotImplementedError() + return (self.samples == other.samples + and self.prompt_logprobs == other.prompt_logprobs) + + +@dataclass +class SamplerOutput: + """For each sequence group, we generate a list of SequenceOutput object, + each of which contains one possible candidate for the next token. + + This datastructure implements methods so it can be used like a list, but + also has optional fields for device tensors. + """ + + outputs: List[SequenceGroupOutput] + + # On-device tensor containing probabilities of each token. + sampled_token_probs: Optional["torch.Tensor"] = None + + # On-device tensor containing the logprobs of each token. + logprobs: Optional["torch.Tensor"] = None + + # On-device tensor containing the sampled token ids. + sampled_token_ids: Optional["torch.Tensor"] = None + + # Spec decode metrics populated by workers. + spec_decode_worker_metrics: Optional["SpecDecodeWorkerMetrics"] = None + + def __getitem__(self, idx: int): + return self.outputs[idx] + + def __setitem__(self, idx: int, value): + self.outputs[idx] = value + + def __len__(self): + return len(self.outputs) + + def __eq__(self, other: object): + return isinstance(other, + self.__class__) and self.outputs == other.outputs + + def __repr__(self) -> str: + """Show the shape of a tensor instead of its values to reduce noise. + """ + sampled_token_probs_repr = ("None" if self.sampled_token_probs is None + else self.sampled_token_probs.shape) + sampled_token_ids_repr = ("None" if self.sampled_token_ids is None else + self.sampled_token_ids.shape) + return ( + f"SamplerOutput(outputs={self.outputs}, " + f"sampled_token_probs={sampled_token_probs_repr}, " + f"sampled_token_ids={sampled_token_ids_repr}, " + f"spec_decode_worker_metrics={self.spec_decode_worker_metrics})") + + +@dataclass +class ExecuteModelRequest: + """The model execution request.""" + # The sequence group metadata list. + seq_group_metadata_list: List[SequenceGroupMetadata] + # Blocks to swap in. Dict of CPU -> GPU block number. + blocks_to_swap_in: Dict[int, int] = field(default_factory=dict) + # Blocks to swap out. Dict of GPU -> CPU block number. + blocks_to_swap_out: Dict[int, int] = field(default_factory=dict) + # Blocks to copy. Source to a list of dest blocks. + blocks_to_copy: Dict[int, List[int]] = field(default_factory=dict) + # The number of slots for lookahead decoding. + num_lookahead_slots: int = 0 + # The number of requests in the running queue. + running_queue_size: int = 0 + + def clone( + self, seq_group_metadata_list: List[SequenceGroupMetadata] + ) -> "ExecuteModelRequest": + """Clone the request with a new sequence group metadata list.""" + return ExecuteModelRequest( + seq_group_metadata_list=seq_group_metadata_list, + blocks_to_swap_in=self.blocks_to_swap_in.copy(), + blocks_to_swap_out=self.blocks_to_swap_out.copy(), + blocks_to_copy=self.blocks_to_copy.copy(), + num_lookahead_slots=self.num_lookahead_slots, + running_queue_size=self.running_queue_size, + ) diff --git a/vllm/spec_decode/__init__.py b/vllm/spec_decode/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/vllm/spec_decode/batch_expansion.py b/vllm/spec_decode/batch_expansion.py new file mode 100644 index 0000000..d5fd969 --- /dev/null +++ b/vllm/spec_decode/batch_expansion.py @@ -0,0 +1,397 @@ +from itertools import chain, count +from typing import Iterator, List, Tuple + +import torch + +from vllm.sequence import (ExecuteModelRequest, SamplerOutput, SequenceData, + SequenceGroupMetadata) +from vllm.spec_decode.interfaces import (SpeculativeProposals, + SpeculativeScorer, SpeculativeScores) +from vllm.spec_decode.util import (get_all_seq_ids, nvtx_range, + sampler_output_to_torch, + split_batch_by_proposal_len) +from vllm.worker.worker_base import WorkerBase + +SeqId = int +TargetSeqId = int +TokenId = int + + +class BatchExpansionTop1Scorer(SpeculativeScorer): + """Implements a speculative scorer that uses batch expansion to get + probabilities of speculative tokens according to the scoring model. + + Batch expansion converts a list of sequences and multiple query positions + to a new batch of sequences, each with a single query position. This allows + for MQA-like scoring in speculative decoding without requiring an MQA + kernel. + + It is strictly less efficient than MQA scoring. + + It only supports scoring the top1 proposal tokens of the proposer, instead + of topk/tree. + """ + + def __init__(self, scorer_worker: WorkerBase, device: str, + vocab_size: int): + self._scorer_worker = scorer_worker + self._device = device + self._vocab_size = vocab_size + + @nvtx_range("BatchExpansionTop1Scorer.score_proposals") + def score_proposals( + self, + execute_model_req: ExecuteModelRequest, + proposals: SpeculativeProposals, + ) -> SpeculativeScores: + """Score the proposed tokens via the scorer model. + + This converts each input sequence to a set of k+1 target sequences. The + target sequences have the unique continuations to be scored and a + unique sequence ID that is different from all input sequence ids. + + If a speculative sequence length would exceed the max model length, then + no speculation is produced for that sequence. + + Args: + execute_model_req: The execution request. + proposals: The speculative proposals to score. + Returns: + SpeculativeScores: The scores of each speculative token, along with + which sequences were ignored during scoring. + """ + + # TODO(cade) perform this on GPU to remove blocking call. + proposal_lens_list = proposals.proposal_lens.tolist() + proposal_token_ids_list = proposals.proposal_token_ids.tolist() + + # Filter the list to ignore -1 proposals. + proposal_token_ids_list_without_skips = [ + proposals for proposals in proposal_token_ids_list + if -1 not in proposals + ] + + (spec_indices, non_spec_indices, target_seq_group_metadata_list, + num_scoring_tokens) = self._expand_batch( + seq_group_metadata_list=execute_model_req.seq_group_metadata_list, + proposal_token_ids_list=proposal_token_ids_list_without_skips, + proposal_lens_list=proposal_lens_list, + ) + + target_sampler_output = self._scorer_worker.execute_model( + execute_model_req=execute_model_req.clone( + seq_group_metadata_list=target_seq_group_metadata_list, )) + assert len(target_sampler_output) == 1, "expected single-step output" + target_sampler_output = target_sampler_output[0] + + all_tokens, all_probs, spec_logprobs = self._contract_batch( + contracted_bs=len(execute_model_req.seq_group_metadata_list), + target_sampler_output=target_sampler_output, + proposals=proposals, + num_scoring_tokens=num_scoring_tokens, + non_spec_indices=non_spec_indices, + spec_indices=spec_indices, + k=execute_model_req.num_lookahead_slots, + ) + + return SpeculativeScores( + probs=all_probs, + token_ids=all_tokens, + logprobs=spec_logprobs, + ) + + def _expand_batch( + self, + seq_group_metadata_list: List[SequenceGroupMetadata], + proposal_token_ids_list: List[List[TokenId]], + proposal_lens_list: List[int], + ) -> Tuple[List[int], List[int], List[SequenceGroupMetadata], int]: + """Given the input sequences and potentially multiple corresponding + proposal tokens, create a new batch where each sequence has a single + query token. + """ + + # vLLM currently only supports proposal lens equal to zero or the batch + # proposal len. This adds some complexity (splitting the batch into spec + # and non spec sequences) and should be removed in the future. It can be + # done by supporting per-sequence proposal lens. + spec_seqs, spec_indices = split_batch_by_proposal_len( + seq_group_metadata_list, + proposal_lens_list, + select_proposal_len_zero=False) + non_spec_seqs, non_spec_indices = split_batch_by_proposal_len( + seq_group_metadata_list, + proposal_lens_list, + select_proposal_len_zero=True) + + target_seq_group_metadata_list = self._create_scoring_model_input( + seq_group_metadata_list=spec_seqs, + proposal_token_ids=proposal_token_ids_list, + # NOTE: We determine the seq ids in the expanded batch using the + # full seq_group_metadata_list, instead of only spec_seqs. + target_seq_ids_iter=self._create_target_seq_id_iterator( + seq_ids=get_all_seq_ids(seq_group_metadata_list)), + ) + + num_scoring_tokens = len(target_seq_group_metadata_list) + target_seq_group_metadata_list.extend(non_spec_seqs) + + return (spec_indices, non_spec_indices, target_seq_group_metadata_list, + num_scoring_tokens) + + def _contract_batch( + self, contracted_bs: int, + target_sampler_output: List[SamplerOutput], + proposals: SpeculativeProposals, num_scoring_tokens: int, + non_spec_indices: List[int], spec_indices: List[int], + k: int) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]: + """Contract the expanded batch back into its original size. + This maps the scores of speculative tokens back to their original + sequences. + + contracted_bs is the original batch size, and the batch size that the + target_sampler_output will be contracted to. + """ + (target_token_ids, target_probs, target_logprobs, + non_spec_target_token_ids, non_spec_target_probs, + non_spec_target_logprobs) = self._split_scoring_output( + target_sampler_output, num_scoring_tokens) + + # Map distinct sequences used to score each token + # of shape [batch_size * k + 1] back to [batch_size, k + 1]. + expanded_batch_size, k = proposals.proposal_token_ids.shape + + # The number of tokens in the expanded batch used for speculation is + # equal to the total expanded batch size minus the number of samples for + # non-speculative sequences. + non_spec_expanded_bs, _ = non_spec_target_token_ids.shape + spec_expanded_bs = expanded_batch_size - non_spec_expanded_bs + + target_token_ids = target_token_ids.squeeze().reshape( + spec_expanded_bs, k + 1) + target_probs = target_probs.squeeze().reshape(spec_expanded_bs, k + 1, + self._vocab_size) + target_logprobs = target_logprobs.squeeze().reshape( + spec_expanded_bs, k + 1, self._vocab_size) + + all_tokens = torch.full(size=(contracted_bs, k + 1), + fill_value=-1, + device=self._device, + dtype=torch.long) + all_probs = torch.zeros(contracted_bs, + k + 1, + self._vocab_size, + device=self._device, + dtype=torch.float32) + all_logprobs = torch.full(size=( + contracted_bs, + k + 1, + self._vocab_size, + ), + fill_value=-float("inf"), + device=self._device, + dtype=torch.float32) + + if non_spec_indices: + all_tokens[non_spec_indices, :1] = non_spec_target_token_ids + all_probs[non_spec_indices, :1, :] = non_spec_target_probs + all_logprobs[non_spec_indices, :1, :] = non_spec_target_logprobs + + if spec_indices: + all_tokens[spec_indices] = target_token_ids + all_probs[spec_indices] = target_probs + all_logprobs[spec_indices] = target_logprobs + + return all_tokens, all_probs, all_logprobs + + def _create_scoring_model_input( + self, + seq_group_metadata_list: List[SequenceGroupMetadata], + proposal_token_ids: List[List[TokenId]], # shape: [batch_size, k] + target_seq_ids_iter: Iterator[TargetSeqId], + ) -> List[SequenceGroupMetadata]: + """Given the original input sequences and proposed tokens from the draft + model, create a list of target sequences that can be used for scoring. + + target_seq_ids_iter provides sequence ids for the expanded batch, + fulfilling the requirement that no seq id in the expanded batch is equal + to the seq id in the original batch. + """ + + if not seq_group_metadata_list: + return [] + + target_seq_group_metadata = list( + chain.from_iterable( + self._create_target_seq_group_metadata( + seq_group_metadata, + proposal_token_ids, + i, + target_seq_ids_iter, + ) for i, seq_group_metadata in enumerate( + seq_group_metadata_list))) + + return target_seq_group_metadata + + def _create_target_seq_group_metadata( + self, + input_seq_group_metadata: SequenceGroupMetadata, + proposal_token_ids: List[List[TokenId]], # shape: [batch_size, k] + batch_index: int, + target_seq_ids_iter: Iterator[TargetSeqId], + ) -> List[SequenceGroupMetadata]: + """Given an input sequence group metadata and a list of draft tokens, + create a list of target SequenceGroupMetadata, one for each + token id that needs to be scored. + + Naive speculative decoding requires K target model scores, one for each + draft model token. However one can add a bonus token such that if each + token is accepted, then a final token may be sampled from the model. + This function creates K+1 target SequenceGroupMetadata to take + advantage of the bonus token. + """ + assert not input_seq_group_metadata.is_prompt, ( + "Speculating on " + "prompts not yet supported") + assert len(input_seq_group_metadata.seq_data) == 1, ( + "Beam search " + "not supported in speculative decoding") + input_seq_id = next(iter(input_seq_group_metadata.seq_data.keys())) + + token_ids_to_score = self._get_token_ids_to_score( + proposal_token_ids[batch_index]) + + target_seq_group_metadata_list: List[SequenceGroupMetadata] = [] + for token_ids in token_ids_to_score: + target_seq_group_metadata_list.append( + self._create_single_target_seq_group_metadata( + input_seq_group_metadata, + input_seq_id, + next(target_seq_ids_iter), + token_ids, + )) + + return target_seq_group_metadata_list + + def _create_single_target_seq_group_metadata( + self, + seq_group_metadata: SequenceGroupMetadata, + seq_id: SeqId, + target_seq_id: TargetSeqId, + token_ids: List[TokenId], + ) -> SequenceGroupMetadata: + """Create a single target SequenceGroupMetadata. + + Args: + seq_group_metadata: The metadata for the input sequence. + seq_id: The input sequence ID. + target_seq_id: The corresponding target sequence ID. + token_ids: The list of token ids that are to be appended to the + input sequence. + """ + seq_data = seq_group_metadata.seq_data[seq_id] + prompt_token_ids = seq_data.get_prompt_token_ids() + new_output_token_ids = [*seq_data.get_output_token_ids(), *token_ids] + + return SequenceGroupMetadata( + request_id=seq_group_metadata.request_id, + is_prompt=seq_group_metadata.is_prompt, + seq_data={ + target_seq_id: + SequenceData( + prompt_token_ids=prompt_token_ids, + output_token_ids=new_output_token_ids, + ), + }, + sampling_params=seq_group_metadata.sampling_params, + block_tables={ + target_seq_id: seq_group_metadata.block_tables[seq_id], + }, + lora_request=None, + ) + + def _split_scoring_output( + self, sampler_output: SamplerOutput, num_scoring_tokens: int + ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor, + torch.Tensor, torch.Tensor]: + """Split the target model output into speculative and non-speculative + output. + """ + + # vLLM currently only supports proposal lens equal to zero or the batch + # proposal len. This adds some complexity (splitting the batch into spec + # and non spec sequences) and should be removed in the future. It can be + # done by supporting per-sequence proposal lens. + # + # First samples are from speculative scoring, latter samples are non- + # speculative samples. + split_sizes = [ + num_scoring_tokens, + sampler_output.sampled_token_ids.numel() - num_scoring_tokens + ] + (spec_probs, non_spec_probs + ) = sampler_output.sampled_token_probs.split(split_sizes) + (spec_sampled_tokens, non_spec_sampled_tokens + ) = sampler_output.sampled_token_ids.flatten().split(split_sizes) + ( + spec_logprobs, + non_spec_logprobs, + ) = sampler_output.logprobs.split(split_sizes) + + # Convert scores to tensors. + sampler_output.sampled_token_probs = spec_probs + sampler_output.sampled_token_ids = spec_sampled_tokens + sampler_output.logprobs = spec_logprobs + (target_token_ids, target_probs, + target_logprobs) = sampler_output_to_torch([sampler_output], True) + + # Convert non-speculative output tokens to tensors. + sampler_output.sampled_token_probs = non_spec_probs + sampler_output.sampled_token_ids = non_spec_sampled_tokens + sampler_output.logprobs = non_spec_logprobs + (non_spec_target_token_ids, non_spec_target_probs, + non_spec_target_logprobs) = sampler_output_to_torch([sampler_output], + True) + + return (target_token_ids, target_probs, target_logprobs, + non_spec_target_token_ids, non_spec_target_probs, + non_spec_target_logprobs) + + def _create_target_seq_id_iterator( + self, seq_ids: List[SeqId]) -> Iterator[TargetSeqId]: + """Create an iterator for creating target sequence ids. + Target sequence ids are distinct from sequence ids because we create a + distinct target sequence id for each proposal token to be scored. + + This implementation increments a counter starting at 1 + max of all + provided input sequence ids. + """ + return count(start=max(seq_ids) + 1) + + def _get_token_ids_to_score( + self, + full_spec_token_ids: List[TokenId] # shape: [k] + ) -> List[List[TokenId]]: + """Given an int tensor of proposal token ids, return a list of + token ids that should be scored. + + Returns k+1 output lists. The additional one is used for generating the + bonus token. + + Example: + Input: [0, 1, 2, 3] (k=4) + Output: (k+1 lists) + [] + [0] + [0, 1] + [0, 1, 2] + [0, 1, 2, 3] + """ + empty_token_ids: List[TokenId] = [] + + token_ids_to_score = [empty_token_ids] + token_ids_to_score.extend([ + full_spec_token_ids[:i + 1] + for i in range(len(full_spec_token_ids)) + ]) + return token_ids_to_score diff --git a/vllm/spec_decode/interfaces.py b/vllm/spec_decode/interfaces.py new file mode 100644 index 0000000..d311bfe --- /dev/null +++ b/vllm/spec_decode/interfaces.py @@ -0,0 +1,73 @@ +from abc import ABC, abstractmethod +from dataclasses import dataclass + +import torch + +from vllm.sequence import ExecuteModelRequest + + +@dataclass +class SpeculativeProposals: + """Datastructure used to represent proposal tokens from some proposer. It + also tracks how many speculative tokens each sequence has. + """ + + # Speculative proposal tokens. + proposal_token_ids: torch.Tensor + + # Probabilities of the proposal tokens according to the proposer. + proposal_probs: torch.Tensor + + # The valid length of each proposal; can be zero. + proposal_lens: torch.Tensor + + def __repr__(self): + return (f"SpeculativeProposals(" + f"proposal_token_ids={self.proposal_token_ids}, " + f"proposal_probs={self.proposal_probs.shape}, " + f"proposal_lens={self.proposal_lens})") + + +@dataclass +class SpeculativeScores: + """Datastructure used to represent the scores of speculative tokens + according to the scoring model. + """ + + # Probabilities of the speculative tokens according to the scoring model. + probs: torch.Tensor + + # Log-probabilities of the speculative tokens according to the scoring + # model. These values can be used to generate Logprob objects that are + # returned to the user. + logprobs: torch.Tensor + + # Token ids sampled from the scoring model. Used for speculative bonus + # tokens and also non-speculative normal decoding. + token_ids: torch.Tensor + + def __repr__(self): + return (f"SpeculativeScores(" + f"probs={self.probs.shape}, " + f"token_ids={self.token_ids.shape})") + + +class SpeculativeProposer(ABC): + + @abstractmethod + def get_proposals( + self, + execute_model_req: ExecuteModelRequest, + ) -> SpeculativeProposals: + raise NotImplementedError + + +class SpeculativeScorer(ABC): + + @abstractmethod + def score_proposals( + self, + execute_model_req: ExecuteModelRequest, + proposals: SpeculativeProposals, + ) -> SpeculativeScores: + raise NotImplementedError diff --git a/vllm/spec_decode/metrics.py b/vllm/spec_decode/metrics.py new file mode 100644 index 0000000..b2112fa --- /dev/null +++ b/vllm/spec_decode/metrics.py @@ -0,0 +1,191 @@ +import time +from dataclasses import dataclass +from typing import Callable, Optional + +import torch + +from vllm.model_executor.layers.rejection_sampler import RejectionSampler +from vllm.utils import is_pin_memory_available + + +@dataclass +class SpecDecodeWorkerMetrics: + """Dataclass holding metrics emitted from the spec decode worker. + """ + + # The empirical acceptance rate of the proposal method on a per-token basis. + # This is useful for evaluating how well the proposal method aligns with the + # scoring method. + draft_acceptance_rate: float + + # The empirical efficiency, measured as the number of tokens emitted by the + # system divided by the number of tokens that could be emitted by the system + # if the proposal method were perfect. + system_efficiency: float + + # The number of speculative tokens produced by the proposal method. + draft_tokens: int + + # The number of tokens emitted by the entire system. + emitted_tokens: int + + # The number of tokens accepted by the scoring model and verification + # routine, e.g. Llama2-70B and lossless rejection sampling. + # + # NOTE: Any token accepted by the verification routine is considered + # accepted (regardless of if the speculative prefix is also accepted). The + # user will usually see less accepted tokens. This metric is helpful when + # evaluating alignment of the proposal method with the scoring model. + accepted_tokens: int + + # The number of speculative tokens per sequence. + num_spec_tokens: int + + +Timer = Callable[[], float] + + +class AsyncMetricsCollector: + """Class which copies rejection sampler metrics from the device to CPU on a + non-default Torch stream. + """ + + def __init__(self, + rejection_sampler: RejectionSampler, + timer: Optional[Timer] = None, + collect_interval_s: float = 5.0): + self._rejection_sampler = rejection_sampler + self._timer = time.time if timer is None else timer + + self._rank: Optional[int] = None + + # We don't have a device set yet. + self._copy_stream: Optional[torch.cuda.Stream] = None + + self._in_flight_copy: Optional[torch.cuda.Event] = None + + pin_memory = is_pin_memory_available() + self._aggregate_num_accepted_tokens = torch.tensor( + 0, dtype=torch.long, device="cpu", pin_memory=pin_memory) + self._aggregate_num_emitted_tokens = torch.tensor( + 0, dtype=torch.long, device="cpu", pin_memory=pin_memory) + self._aggregate_num_draft_tokens = 0 + + self._rejsample_metrics_collect_interval_s = collect_interval_s + self._last_metrics_collect_time = self._timer() + + def init_gpu_tensors(self, rank: int) -> None: + self._rank = rank + self._copy_stream = torch.musa.Stream() + + def maybe_collect_rejsample_metrics( + self, k: int) -> Optional[SpecDecodeWorkerMetrics]: + + # If a copy was initiated in the previous call, collect and return. + if self._in_flight_copy is not None: + ready_event = self._in_flight_copy + self._in_flight_copy = None + return self._collect_rejsample_metrics(k, ready_event) + + # Otherwise, check if we should start a new copy. + if self._should_collect_rejsample_metrics(self._timer()): + assert self._in_flight_copy is None + self._in_flight_copy = self._copy_rejsample_metrics_async() + + return None + + def _should_collect_rejsample_metrics(self, now: float) -> bool: + """Return whether or not this iteration should print rejection sampling + metrics. + """ + if self._rank != 0: + return False + + if (now - self._last_metrics_collect_time < + self._rejsample_metrics_collect_interval_s): + return False + return True + + def _copy_rejsample_metrics_async(self) -> torch.cuda.Event: + """Copy rejection sampling metrics (number of accepted tokens, etc) to + CPU asynchronously. + + Returns a CUDA event recording when the copy is complete. + """ + assert self._copy_stream is not None + self._copy_stream.wait_stream(torch.musa.current_stream()) + + with torch.musa.stream(self._copy_stream): + self._aggregate_num_accepted_tokens.copy_( + self._rejection_sampler.num_accepted_tokens, non_blocking=True) + self._aggregate_num_emitted_tokens.copy_( + self._rejection_sampler.num_emitted_tokens, non_blocking=True) + # Number of draft tokens is calculated on CPU, so no copy is + # required. + self._aggregate_num_draft_tokens = ( + self._rejection_sampler.num_draft_tokens) + + aggregate_metrics_ready = torch.musa.Event() + aggregate_metrics_ready.record(self._copy_stream) + + return aggregate_metrics_ready + + def _collect_rejsample_metrics( + self, k: int, + ready_event: torch.cuda.Event) -> SpecDecodeWorkerMetrics: + """Create metrics object from statistics copied asynchronously. + + Args: + k: int. The number of speculative tokens; used to determine system + efficiency. + ready_event: torch.cuda.Event. The CUDA event recording when the + async GPU->CPU copy is complete. + """ + + ready_event.synchronize() + accepted_tokens = self._aggregate_num_accepted_tokens.item() + emitted_tokens = self._aggregate_num_emitted_tokens.item() + draft_tokens = self._aggregate_num_draft_tokens + + max_num_emitted_tokens = self.get_max_num_emitted_tokens( + draft_tokens, k) + + if draft_tokens > 0: + draft_acceptance_rate = accepted_tokens / draft_tokens + else: + draft_acceptance_rate = float("nan") + + if max_num_emitted_tokens > 0: + system_efficiency = emitted_tokens / max_num_emitted_tokens + else: + system_efficiency = float("nan") + + return SpecDecodeWorkerMetrics( + num_spec_tokens=k, + draft_acceptance_rate=draft_acceptance_rate, + system_efficiency=system_efficiency, + accepted_tokens=accepted_tokens, + draft_tokens=draft_tokens, + emitted_tokens=emitted_tokens, + ) + + @staticmethod + def get_max_num_emitted_tokens(draft_tokens: int, k: int) -> int: + """Calculate the number of emitted tokens, assuming all tokens are + accepted. + + This is equal to the number of sequences that have been speculated on, + times (speculation len + 1). The +1 comes from the bonus token. + """ + # Determine the number of sequences that have been speculated on. Since + # the batch size can be variable, we divide by k. + assert draft_tokens % k == 0 + total_num_spec_seqs = draft_tokens // k + + # A single sequence may emit k accepted tokens and one bonus token in + # the best case. + num_emitted_per_seq_if_all_accepted = k + 1 + + # The max num of emitted tokens is the number of speculated sequences + # times the max emitted per seq. + return total_num_spec_seqs * num_emitted_per_seq_if_all_accepted diff --git a/vllm/spec_decode/multi_step_worker.py b/vllm/spec_decode/multi_step_worker.py new file mode 100644 index 0000000..5044cc1 --- /dev/null +++ b/vllm/spec_decode/multi_step_worker.py @@ -0,0 +1,203 @@ +import copy +from typing import List, Tuple + +import torch + +from vllm.sequence import (ExecuteModelRequest, SamplerOutput, + SequenceGroupMetadata) +from vllm.spec_decode.interfaces import SpeculativeProposals +from vllm.spec_decode.top1_proposer import Top1Proposer +from vllm.worker.worker import Worker + + +class MultiStepWorker(Worker): + """The MultiStepWorker is equivalent to a Worker except that it allows + multiple forward passes in a single call, assuming the scheduler has + allocated enough space to store the additional KV. This reduces overhead + by invoking the scheduler less. + + The MultiStepWorker does not support cache swap operations, or beam search. + Cache swap operations do not require large modifications. On the other hand, + beam search requires memory allocations during sequence forks and thus + requires more thought for MultiStepWorker support. + """ + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + + # Lazy initialization list. + self._proposer: Top1Proposer + + def init_device(self): + super().init_device() + + self._proposer = Top1Proposer( + self, + self.device, + self.vocab_size, + max_proposal_len=self.max_model_len, + ) + + def set_include_gpu_probs_tensor(self): + # Need include_gpu_probs_tensor for multi_step_worker + self.model_runner.model.sampler.include_gpu_probs_tensor = True + + @torch.inference_mode() + def sampler_output( + self, + execute_model_req: ExecuteModelRequest, + sample_len: int, + ) -> Tuple[List[SamplerOutput], bool]: + """Run the model forward pass sample_len times. Returns the list of + sampler output, one per model forward pass, along with indicator of + whether torch tensor in sampler output need to be transposed in latter + sampler_output_to_torch logic. + + For multi step worker, this indicator shall be True. + """ + self._raise_if_unsupported(execute_model_req) + + # Shallow copy input data so modifications (such as appending tokens) + # do not cause side-effects. + copied_seq_group_metadata_list = self._shallow_copy_inputs( + execute_model_req.seq_group_metadata_list) + copied_execute_model_req = execute_model_req.clone( + copied_seq_group_metadata_list) + + # Assert enough KV space for sample_len tokens per sequence. + self._assert_enough_kv_space(execute_model_req.seq_group_metadata_list, + sample_len) + + # Run model sample_len times. + model_outputs = [] + for _ in range(sample_len): + model_output = super().execute_model( + execute_model_req=copied_execute_model_req) + assert (len(model_output) == 1 + ), "composing multistep workers not supported" + model_output = model_output[0] + + self._append_new_tokens(model_output, + copied_seq_group_metadata_list) + model_outputs.append(model_output) + + return model_outputs, True + + def get_spec_proposals( + self, + execute_model_req: ExecuteModelRequest, + ) -> SpeculativeProposals: + """Produce speculations given an input batch of sequences. The number of + speculative tokens per sequence is determined by max_proposal_len. + """ + + return self._proposer.get_proposals(execute_model_req) + + def _append_new_tokens( + self, model_output: SamplerOutput, + seq_group_metadata_list: SequenceGroupMetadata) -> None: + """Given model output from a single run, append the tokens to the + sequences. This is normally done outside of the worker, but it is + required if the worker is to perform multiple forward passes. + """ + for seq_group_metadata, sequence_group_outputs in zip( + seq_group_metadata_list, model_output): + seq_group_metadata.is_prompt = False + + for seq_output in sequence_group_outputs.samples: + # NOTE: Beam search is not supported, so we can assume that + # parent_seq_id == seq_id. + seq = seq_group_metadata.seq_data[seq_output.parent_seq_id] + + token_id = seq_output.output_token + token_logprob = seq_output.logprobs[token_id] + + seq.append_token_id(token_id, token_logprob.logprob) + + def _shallow_copy_inputs( + self, seq_group_metadata_list: List[SequenceGroupMetadata] + ) -> List[SequenceGroupMetadata]: + """Copy input data structures to remove side-effects when input data + structures are shared with other modules. + + Helpful when the vLLM scheduler runs in the same process as the worker. + The alternative is deep-copying (or other form of deep copy); this has + performance downsides. + """ + + # Shallow-copy the list of SequenceGroupMetadata. This allows us to + # append tokens and change is_prompt without external side-effects. + new_seq_group_metadata_list = [] + + for old_seq_group_metadata in seq_group_metadata_list: + # We must shallow-copy seq_group_metadata as is_prompt could change. + seq_group_metadata = copy.copy(old_seq_group_metadata) + new_seq_group_metadata_list.append(seq_group_metadata) + + # We must shallow-copy seq_data as we will append token ids + new_seq_data = {} + for seq_id, old_seq_data in seq_group_metadata.seq_data.items(): + new_seq_data[seq_id] = copy.copy(old_seq_data) + new_seq_data[ + seq_id].output_token_ids = old_seq_data.output_token_ids[:] + + seq_group_metadata.seq_data = new_seq_data + + return new_seq_group_metadata_list + + def _assert_enough_kv_space( + self, seq_group_metadata_list: List[SequenceGroupMetadata], + num_steps: int) -> None: + """Assert there are enough physical blocks per sequence to store the + current KV plus additional KV from num_steps tokens. + """ + assert self.model_runner.block_size is not None + for seq_group_metadata in seq_group_metadata_list: + # Only one seq_id is guaranteed because there is no beam search. + seq_id = list(seq_group_metadata.seq_data.keys())[0] + seq = seq_group_metadata.seq_data[seq_id] + + # After num_steps, the seq len will be the current seq len + # plus one token per step. + final_seq_len = seq.get_len() + num_steps + + # We will have final_seq_len - 1 KV because vLLM saves KV for a + # token in the iteration after the token was generated. + required_num_kv_slots = final_seq_len - 1 + + # The allocated number of kv slots is the number of allocated blocks + # times the number of slots of block. + number_physical_blocks = len( + seq_group_metadata.block_tables[seq_id]) + allocated_kv_slots = (number_physical_blocks * + self.model_runner.block_size) + + if required_num_kv_slots > allocated_kv_slots: + request_id = seq_group_metadata.request_id + raise ValueError( + "The worker attempted to run " + f"{num_steps} times but found insufficient KV space for " + f"{request_id=} {seq_id=}. ({allocated_kv_slots=} " + f"{required_num_kv_slots=}).") + + def _raise_if_unsupported( + self, + execute_model_req: ExecuteModelRequest, + ) -> None: + """MultiStepWorker does not yet implement support for cache swap + operations or beam search. + """ + if any([ + execute_model_req.blocks_to_swap_in, + execute_model_req.blocks_to_swap_out, + execute_model_req.blocks_to_copy + ]): + raise NotImplementedError( + "MultiStepWorker does not support cache operations") + + if any( + len(seq_group_metadata.seq_data.keys()) != 1 + for seq_group_metadata in + execute_model_req.seq_group_metadata_list): + raise NotImplementedError( + "MultiStepWorker does not support beam search.") diff --git a/vllm/spec_decode/ngram_worker.py b/vllm/spec_decode/ngram_worker.py new file mode 100644 index 0000000..fed8be4 --- /dev/null +++ b/vllm/spec_decode/ngram_worker.py @@ -0,0 +1,176 @@ +from typing import List, Optional, Tuple + +import torch + +from vllm.sequence import ExecuteModelRequest, SamplerOutput +from vllm.spec_decode.interfaces import SpeculativeProposals +from vllm.spec_decode.top1_proposer import Top1Proposer +from vllm.worker.worker_base import LoraNotSupportedWorkerBase + + +class NGramWorker(LoraNotSupportedWorkerBase): + """NGramWorker provides a light drafter without need for model. + + Current NGramWorker only implement prompt lookup decoding, + and in future we may also do RAG type drafter and other scenerios + which don't rely on LLM model to give proposals. + """ + + def __init__(self, *args, **kwargs): + # Get local_rank/vocab_size from kwargs attribute + self.local_rank = kwargs["local_rank"] + self.vocab_size = kwargs["model_config"].get_vocab_size() + + # Lazy initialization list. + self._proposer: Top1Proposer + + def set_ngram_window_size(self, ngram_prompt_lookup_min: int, + ngram_prompt_lookup_max: int): + # Search valid candidate window between + # ngram_prompt_lookup_min/ngram_prompt_lookup_max + self.ngram_prompt_lookup_max = ngram_prompt_lookup_max + self.ngram_prompt_lookup_min = ngram_prompt_lookup_min + + def init_device(self): + self.device = torch.device(f"cuda:{self.local_rank}") + self.load_model = lambda *args, **kwargs: None + + # Current only support Top1Proposer + self._proposer = Top1Proposer( + self, + device=self.device, + vocab_size=self.vocab_size, + ) + + def set_include_gpu_probs_tensor(self): + # NGram don't need gpu sampler + pass + + def execute_model(self, execute_model_req: ExecuteModelRequest) -> None: + """NGram doesn't depend on model execution, just pass this function""" + pass + + def determine_num_available_blocks(self) -> None: + """NGram doesn't depend on model execution, no need to check blocks""" + pass + + def initialize_cache(self, num_gpu_blocks: int, + num_cpu_blocks: int) -> None: + """As there is no cache need to handle, just pass this function""" + pass + + def get_cache_block_size_bytes(self): + """Return the size of a cache block in bytes.""" + return 0 + + def sampler_output( + self, + execute_model_req: ExecuteModelRequest, + sample_len: int, + ) -> Tuple[Optional[List[SamplerOutput]], bool]: + """NGram match algo to pick proposal candidate. Returns the list of + sampler output, one per SequenceGroupMetadata. + + For ngram worker, we already done needed transposed internal, so the + indicator pass to sampler_output_to_torch shall be False. + """ + self._raise_if_unsupported(execute_model_req) + + arr = [] + has_spec_out = False + for seq_group_metadata in execute_model_req.seq_group_metadata_list: + seq_data = next(iter(seq_group_metadata.seq_data.values())) + + input_ids = torch.as_tensor(seq_data.get_token_ids(), + dtype=torch.long, + device=self.device) + input_length = seq_data.get_len() + + for ngram_size in range( + min(self.ngram_prompt_lookup_max, input_length - 1), + self.ngram_prompt_lookup_min, + -1, + ): + ngram_tensor = input_ids[-1 * ngram_size:] + windows = input_ids.unfold(dimension=0, + size=ngram_size, + step=1) + matches = (windows == ngram_tensor).all(dim=1) + match_indices = matches.nonzero(as_tuple=True)[0] + if match_indices.size()[0] > 1: + has_spec_out = True + res = seq_data.get_token_ids() + res = res[match_indices[0] + ngram_size:match_indices[0] + + ngram_size + sample_len] + res_len = len(res) + # pad 0 towards output as sample_len tokens required + res += [0] * (sample_len - res_len) + + break + else: + # if no candidate found, fill with 0 + res = [0] * sample_len + + arr.append(res) + + if not has_spec_out: + return None, False + + outputs = [] + token_ids = torch.as_tensor(arr, dtype=torch.long, device=self.device) + indices = token_ids.unsqueeze(2) + + token_probs = torch.zeros( + (len(execute_model_req.seq_group_metadata_list), sample_len, + self.vocab_size), + dtype=torch.float32, + device=self.device, + ) + token_probs.scatter_(2, indices, 1) + token_logprobs = torch.zeros( + (len(execute_model_req.seq_group_metadata_list), sample_len, + self.vocab_size), + dtype=torch.float32, + device=self.device, + ) + for i in range(len(execute_model_req.seq_group_metadata_list)): + outputs.append( + SamplerOutput( + outputs=None, + sampled_token_probs=token_probs[i], + logprobs=token_logprobs, + sampled_token_ids=token_ids[i], + )) + return outputs, False + + def get_spec_proposals( + self, + execute_model_req: ExecuteModelRequest, + ) -> SpeculativeProposals: + """Produce speculations given an input batch of sequences. The number of + speculative tokens per sequence is determined by max_proposal_len. + """ + + return self._proposer.get_proposals(execute_model_req) + + def _raise_if_unsupported( + self, + execute_model_req: ExecuteModelRequest, + ) -> None: + """NGramWorker does not yet implement support for cache swap + operations or beam search. + """ + if any([ + execute_model_req.blocks_to_swap_in, + execute_model_req.blocks_to_swap_out, + execute_model_req.blocks_to_copy + ]): + raise NotImplementedError( + "NGramWorker does not support cache operations") + + if any( + len(seq_group_metadata.seq_data.keys()) != 1 + for seq_group_metadata in + execute_model_req.seq_group_metadata_list): + raise NotImplementedError( + "NGramWorker does not support beam search.") diff --git a/vllm/spec_decode/spec_decode_worker.py b/vllm/spec_decode/spec_decode_worker.py new file mode 100644 index 0000000..c2b119f --- /dev/null +++ b/vllm/spec_decode/spec_decode_worker.py @@ -0,0 +1,472 @@ +from functools import cached_property +from typing import List, Optional, Tuple + +import torch + +from vllm.logger import init_logger +from vllm.model_executor.layers.rejection_sampler import RejectionSampler +from vllm.sequence import (ExecuteModelRequest, SamplerOutput, + SequenceGroupMetadata) +from vllm.spec_decode.batch_expansion import BatchExpansionTop1Scorer +from vllm.spec_decode.interfaces import (SpeculativeProposals, + SpeculativeScorer, SpeculativeScores) +from vllm.spec_decode.metrics import AsyncMetricsCollector +from vllm.spec_decode.multi_step_worker import MultiStepWorker +from vllm.spec_decode.ngram_worker import NGramWorker +from vllm.spec_decode.util import (create_sequence_group_output, + get_all_num_logprobs, get_all_seq_ids, + get_sampled_token_logprobs, nvtx_range, + split_batch_by_proposal_len) +from vllm.worker.worker_base import LoraNotSupportedWorkerBase, WorkerBase + +logger = init_logger(__name__) + + +class SpecDecodeWorker(LoraNotSupportedWorkerBase): + """Worker which implements speculative decoding. + + Speculative decoding reduces decoding per-token latency by using a proposal + method, such as a small draft model, to speculate ahead of a larger LLM. The + probabilities of the speculative tokens are then determined by the larger + LLM, after which some verification routine determines which (if any) of the + speculative tokens are accepted by the larger LLM. + + See https://github.com/vllm-project/vllm/pull/2188 and + https://github.com/vllm-project/vllm/pull/3103 for more info. + + The current implementation has the following limitations: + * Only draft-model proposal is implemented (contributions for more forms are + welcome!). + * Only top-1 proposal and scoring are implemented. Tree-attention is left as + future work. + * Only lossless rejection sampling is supported. Contributions adding lossy + verification routines are welcome (e.g. Medusa's typical acceptance). + * All sequences in a batch must have the same proposal length, or zero. This + can be improved by having per-sequence speculation in the future. + * The scoring forward pass is done without an MQA kernel, which is + suboptimal especially as the batch size, proposal length, and sequence + lengths grow. Contributions to add a MQA scoring are welcome once + correctness tests pass. + More info here https://docs.google.com/document/d/1T-JaS2T1NRfdP51qzqpyakoCXxSXTtORppiwaj5asxA/edit. + """ + + @classmethod + def create_worker( + cls, + scorer_worker: WorkerBase, + draft_worker_kwargs, + ) -> "SpecDecodeWorker": + + if "ngram_prompt_lookup_max" in draft_worker_kwargs: + ngram_prompt_lookup_max = ( + draft_worker_kwargs.pop("ngram_prompt_lookup_max")) + ngram_prompt_lookup_min = ( + draft_worker_kwargs.pop("ngram_prompt_lookup_min")) + else: + ngram_prompt_lookup_max = 0 + + if ngram_prompt_lookup_max > 0: + proposer_worker = NGramWorker(**draft_worker_kwargs) + proposer_worker.set_ngram_window_size(ngram_prompt_lookup_min, + ngram_prompt_lookup_max) + else: + proposer_worker = MultiStepWorker(**draft_worker_kwargs) + + return SpecDecodeWorker( + proposer_worker, + scorer_worker, + # TODO(cade) disable strict mode for speedup. + rejection_sampler=RejectionSampler(strict_mode=True), + ) + + def __init__( + self, + proposer_worker: WorkerBase, + scorer_worker: WorkerBase, + rejection_sampler: RejectionSampler, + metrics_collector: Optional[AsyncMetricsCollector] = None, + ): + """ + Create a SpecDecodeWorker. + + Args: + proposer_worker: A worker that can produce speculative tokens for + sequences. + scorer_worker: A worker that produces probabilities of speculative + tokens according to some base model. Typically a vanilla vLLM + Worker. + rejection_sampler: A Torch module used to perform modified rejection + sampling for speculative decoding. + metrics_collector: Helper class for collecting metrics; can be set + for testing purposes. + """ + self.proposer_worker = proposer_worker + self.scorer_worker = scorer_worker + self.rejection_sampler = rejection_sampler + + self._metrics = AsyncMetricsCollector( + rejection_sampler + ) if metrics_collector is None else metrics_collector + + self.probs_dtype = self.rejection_sampler.probs_dtype + self.token_id_dtype = self.rejection_sampler.token_id_dtype + + # Lazy initiazliation. + self.scorer: SpeculativeScorer + + def init_device(self) -> None: + """Initialize both scorer and proposer models. + """ + # The scorer worker model is initialized first in case the proposer + # model has a smaller TP degree than the target worker. + self.scorer_worker.init_device() + self.proposer_worker.init_device() + + # NOTE(cade): load_model is not part of the WorkerBase interface. + self.scorer_worker.load_model() + self.proposer_worker.load_model() + + self._metrics.init_gpu_tensors(self.rank) + self.rejection_sampler.init_gpu_tensors(self.rank) + self.scorer = BatchExpansionTop1Scorer( + scorer_worker=self.scorer_worker, + device=self.device, + vocab_size=self._vocab_size) + + self._configure_model_sampler_for_spec_decode() + + def _configure_model_sampler_for_spec_decode(self): + """Configure model sampler to emit GPU tensors. This allows spec decode + to keep data on device without transferring to CPU and serializing, + which significantly reduces overhead of rejection sampling. + + NOTE(cade): This breaks abstraction boundaries pretty badly. The better + design is to have the "move to CPU and serialize" sampling decision be + done outside of the model/sampler; this way the "last-mile" worker + object which interfaces with the scheduler can serialize and incur the + performance hit as necessary. This allows us to run the worker several + iterations in a row without incurring the "move to CPU and serialize" + performance penalty. + + Since this requires a large change to vLLM, we defer it to later and + temporarily accept this broken abstraction boundary. + + NOTE(cade): This will require a special check if the proposer worker + does not have a sampler (e.g. ngram speculation). + """ + (self.scorer_worker.model_runner.model.sampler.include_gpu_probs_tensor + ) = True + self.proposer_worker.set_include_gpu_probs_tensor() + + def determine_num_available_blocks(self) -> Tuple[int, int]: + """Determine the number of cache blocks to use. + + This is done by profiling the scorer model (which is typically the + larger of the two). Then the total memory which would be used by the + scorer cache is divided evenly between the proposer and scorer model KV, + such that the number of blocks is equal in both KV caches. + """ + num_gpu_blocks, num_cpu_blocks = ( + self.scorer_worker.determine_num_available_blocks()) + + scorer_cache_block_size_bytes = ( + self.scorer_worker.get_cache_block_size_bytes()) + proposer_cache_block_size_bytes = ( + self.proposer_worker.get_cache_block_size_bytes()) + + new_num_gpu_blocks = split_num_cache_blocks_evenly( + scorer_cache_block_size_bytes, proposer_cache_block_size_bytes, + num_gpu_blocks) + return new_num_gpu_blocks, num_cpu_blocks + + def initialize_cache(self, num_gpu_blocks: int, + num_cpu_blocks: int) -> None: + """Initialize the cache engine of the scorer and proposer workers. + """ + self.scorer_worker.initialize_cache(num_gpu_blocks=num_gpu_blocks, + num_cpu_blocks=num_cpu_blocks) + self.proposer_worker.initialize_cache(num_gpu_blocks=num_gpu_blocks, + num_cpu_blocks=num_cpu_blocks) + + @torch.inference_mode() + def execute_model( + self, + execute_model_req: ExecuteModelRequest) -> List[SamplerOutput]: + """Perform speculative decoding on the input batch. + """ + + assert execute_model_req.seq_group_metadata_list is not None, ( + "speculative decoding " + "requires non-None seq_group_metadata_list") + + # If no spec tokens, call the proposer and scorer workers normally. + # Used for prefill. + if execute_model_req.num_lookahead_slots == 0 or len( + execute_model_req.seq_group_metadata_list) == 0: + return self._run_no_spec(execute_model_req) + + return self._run_speculative_decoding_step(execute_model_req) + + @nvtx_range("spec_decode_worker._run_no_spec") + def _run_no_spec( + self, + execute_model_req: ExecuteModelRequest) -> List[SamplerOutput]: + """Run a prefill step, without any speculation. The input is sent to the + proposer and scorer model so that the KV cache is consistent between the + two. + """ + #logger.info("run proposer worker no spec") + + self.proposer_worker.execute_model(execute_model_req) + + #logger.info("run target worker no spec") + sampler_output = self.scorer_worker.execute_model(execute_model_req) + assert len(sampler_output) == 1 + sampler_output = sampler_output[0] + + # Clear device tensors from sampler output. This reduces communication + # overhead when the engine runs in a different process than the workers. + sampler_output.probs = None + sampler_output.sampled_tokens = None + sampler_output.logprobs = None + return [sampler_output] + + @nvtx_range("spec_decode_worker._run_speculative_decoding_step") + def _run_speculative_decoding_step( + self, + execute_model_req: ExecuteModelRequest) -> List[SamplerOutput]: + """Execute a single step of speculative decoding. + + This invokes the proposer worker to get k speculative tokens for each + sequence, then scores each speculative token using the scoring worker. + + Returns a list of SamplerOutput, each containing a single token per + sequence. + """ + + #logger.info("get spec proposals") + # Generate proposals using draft worker. + proposals = self.proposer_worker.get_spec_proposals(execute_model_req) + + #logger.info("score proposals") + proposal_scores = self.scorer.score_proposals( + execute_model_req, + proposals, + ) + + #logger.info("verify proposals") + accepted_token_ids, target_logprobs = self._verify_tokens( + execute_model_req.seq_group_metadata_list, proposal_scores, + proposals, execute_model_req.num_lookahead_slots) + + #logger.info("create output list") + return self._create_output_sampler_list( + execute_model_req.seq_group_metadata_list, + accepted_token_ids, + target_logprobs=target_logprobs, + k=execute_model_req.num_lookahead_slots) + + @nvtx_range("spec_decode_worker._verify_tokens") + def _verify_tokens( + self, + seq_group_metadata_list: List[SequenceGroupMetadata], + proposal_scores: SpeculativeScores, + proposals: SpeculativeProposals, + max_proposal_len: int, + ) -> Tuple[torch.Tensor, torch.Tensor]: + """Determine which speculative tokens are accepted using the + probabilities of each token according to the proposer and scorer models. + + Returns a tuple of Tensors, one for the accepted token ids and one for + the logprobs according to the scoring model. + """ + proposal_lens_list = proposals.proposal_lens.tolist() + + # vLLM currently only supports proposal lens equal to zero or the batch + # proposal len. This adds some complexity (splitting the batch into spec + # and non spec sequences) and should be removed in the future. It can be + # done by supporting per-sequence proposal lens. + _, spec_indices = split_batch_by_proposal_len( + seq_group_metadata_list, + proposal_lens_list, + select_proposal_len_zero=False) + _, non_spec_indices = split_batch_by_proposal_len( + seq_group_metadata_list, + proposal_lens_list, + select_proposal_len_zero=True) + original_indices = spec_indices + non_spec_indices + + # Get probabilities of target model, excluding bonus token. + proposal_verifier_probs = proposal_scores.probs[spec_indices, :-1] + + # Get non-speculative sampled tokens from target model. + non_spec_token_ids = proposal_scores.token_ids[non_spec_indices] + + # Get bonus tokens from target model. + bonus_token_ids = proposal_scores.token_ids[spec_indices, -1:] + + # Get probabilities according to proposal method. + proposal_probs = proposals.proposal_probs[spec_indices] + + # Get proposed tokens. + proposal_token_ids = proposals.proposal_token_ids[spec_indices] + + accepted_token_ids = self.rejection_sampler( + target_probs=proposal_verifier_probs, + bonus_token_ids=bonus_token_ids, + draft_probs=proposal_probs, + draft_token_ids=proposal_token_ids, + ) + + # Append output tokens from non-speculative sequences to + # the accepted token ids tensor. + non_spec_token_ids = non_spec_token_ids.expand(-1, max_proposal_len + + 1).clone() + non_spec_token_ids[:, 1:] = -1 + accepted_token_ids = torch.cat( + [accepted_token_ids, non_spec_token_ids]) + logprobs = proposal_scores.logprobs + + # Rearrange so that results are in the order of the original seq group + # metadata. + accepted_token_ids[original_indices] = accepted_token_ids.clone() + + return accepted_token_ids, logprobs + + def _create_output_sampler_list( + self, + seq_group_metadata_list: List[SequenceGroupMetadata], + accepted_token_ids: torch.Tensor, # shape: [batch_size, k+1] + target_logprobs: torch.Tensor, # shape: [batch_size, k+1, vocab_size] + k: int, + ) -> List[SamplerOutput]: + """Given the accepted token ids, create a list of SamplerOutput. + + The output is padded with -1 tokens such that each sequence has + the same number of outputs. + """ + batch_size, num_steps = accepted_token_ids.shape + + # Organize input tensors by step instead of by sequence. + target_logprobs_by_step = target_logprobs.transpose(0, 1) + accepted_token_ids_by_step = accepted_token_ids.transpose(0, 1) + + # Get the logprobs/rank of the accepted tokens. + (accepted_token_id_ranks_by_step, + accepted_token_id_logprobs_by_step) = get_sampled_token_logprobs( + logprob_tensor=target_logprobs_by_step, + sampled_token_ids=accepted_token_ids_by_step, + ) + + # Get the top-k logprobs (which may or may not include the logprob of + # the accepted token). + (topk_logprobs_by_step, + topk_indices_by_step) = target_logprobs_by_step.topk( + k=self.scorer_worker.model_config.max_logprobs, + dim=-1, + ) + + # Get the sequence ids and num_logprobs (sampling parameter) in the + # batch. + seq_ids = get_all_seq_ids(seq_group_metadata_list) + num_logprobs_per_seq = get_all_num_logprobs(seq_group_metadata_list) + + # Serialize all tensors to CPU Python lists. + accepted_token_ids_by_step = accepted_token_ids_by_step.tolist() + accepted_token_id_ranks_by_step = ( + accepted_token_id_ranks_by_step.tolist()) + accepted_token_id_logprobs_by_step = ( + accepted_token_id_logprobs_by_step.tolist()) + topk_logprobs_by_step = topk_logprobs_by_step.tolist() + topk_indices_by_step = topk_indices_by_step.tolist() + + # Construct the output on a per-step, per-sequence basis. + sampler_output_list = [] + for step_index in range(num_steps): + if all(token_id == -1 + for token_id in accepted_token_ids_by_step[step_index]): + break + + step_output_token_ids = [] + for sequence_index in range(batch_size): + # Each sequence may have a different num_logprobs; retrieve it. + num_logprobs = num_logprobs_per_seq[sequence_index] + + step_output_token_ids.append( + create_sequence_group_output( + token_id=accepted_token_ids_by_step[step_index] + [sequence_index], + token_id_logprob_rank=accepted_token_id_ranks_by_step[ + step_index][sequence_index], + token_id_logprob=accepted_token_id_logprobs_by_step[ + step_index][sequence_index], + seq_id=seq_ids[sequence_index], + topk_token_ids=topk_indices_by_step[step_index] + [sequence_index][:num_logprobs], + topk_logprobs=topk_logprobs_by_step[step_index] + [sequence_index][:num_logprobs], + )) + + sampler_output_list.append( + SamplerOutput(outputs=step_output_token_ids)) + + maybe_rejsample_metrics = ( + self._metrics.maybe_collect_rejsample_metrics(k)) + if maybe_rejsample_metrics is not None: + sampler_output_list[ + 0].spec_decode_worker_metrics = maybe_rejsample_metrics + + return sampler_output_list + + @cached_property + def _vocab_size(self) -> int: + """Get the vocab size of the model and make sure it's consistent between + draft and target workers. + """ + vocab_sizes = [ + worker.vocab_size + for worker in [self.proposer_worker, self.scorer_worker] + ] + assert all(vocab_sizes[0] == vocab_size for vocab_size in vocab_sizes) + return vocab_sizes[0] + + @property + def rank(self): + return self.scorer_worker.rank + + @property + def device(self): + return self.scorer_worker.device + + def get_cache_block_size_bytes(self): + """Return the size of a cache block in bytes. + + This function is only used to compose workers within a SpecDecodeWorker. + We leave composing a SpecDecodeWorker within a SpecDecodeWorker + undefined for now, although it could be implemented in the future. + See https://arxiv.org/abs/2308.04623. + """ + raise NotImplementedError + + +def split_num_cache_blocks_evenly(scorer_cache_block_size_bytes: int, + proposer_cache_block_size_bytes: int, + total_num_gpu_blocks: int) -> int: + """Given total_num_gpu_blocks, the number of GPU blocks that could be + allocate to the target model, this function calculates how many blocks + should be given to the draft and target model. + + Note that usually the block size, in bytes, of each model is different, + as it's a function of number of KV/layer, number of heads, and hidden + dimension size. + + Since the target and draft models allocate the same number of blocks, we + simply calculate the number of blocks where if allocated by both models, + the total memory usage from KV cache is no larger than the number of + blocks allocatable by the target model alone. + """ + new_num_gpu_blocks = int( + total_num_gpu_blocks * scorer_cache_block_size_bytes / + (proposer_cache_block_size_bytes + scorer_cache_block_size_bytes)) + + return new_num_gpu_blocks diff --git a/vllm/spec_decode/top1_proposer.py b/vllm/spec_decode/top1_proposer.py new file mode 100644 index 0000000..eb622a0 --- /dev/null +++ b/vllm/spec_decode/top1_proposer.py @@ -0,0 +1,200 @@ +from typing import List, Optional, Tuple + +import torch + +from vllm.sequence import (ExecuteModelRequest, SamplerOutput, + SequenceGroupMetadata) +from vllm.spec_decode.interfaces import (SpeculativeProposals, + SpeculativeProposer) +from vllm.spec_decode.util import sampler_output_to_torch +from vllm.worker.worker_base import WorkerBase + + +class Top1Proposer(SpeculativeProposer): + """Helper class which separates out sequences which would exceed the max + model length when speculated upon. + + This allows combinations of models such as JackFram/llama-68m draft with + meta-llama/Llama2-13b-chat-hf, as llama-68m has max_position_embeddings of + 2048 while Llama2-13b has max_position_embeddings of 4096. + + We treat the sequences which exceed the proposal draft model length as + "non-spec sequences". Essentially they skip the draft model and go through + normal decoding in the target model. + + Currently, only proposal_lens of 0 and k are supported, where k is a global + batch proposal length. In the future vLLM should support per-sequence + proposal lengths. + """ + + def __init__( + self, + worker: WorkerBase, + device: str, + vocab_size: int, + max_proposal_len: Optional[int] = None, + ): + self._worker = worker + self._device = device + self.max_proposal_len = max_proposal_len + self._vocab_size = vocab_size + + def get_proposals( + self, + execute_model_req: ExecuteModelRequest, + ) -> SpeculativeProposals: + """Get speculative proposals given the input batch. + + Sequences which would exceed the max model length are skipped during + speculation. + """ + proposal_len = execute_model_req.num_lookahead_slots + seq_group_metadata_list = execute_model_req.seq_group_metadata_list + + # Split speculative- and non-speculative- sequences. + ( + proposal_lens, + nonzero_proposal_len_seqs, + nonzero_proposal_len_indices, + ) = self._split_by_max_model_len(seq_group_metadata_list, proposal_len) + + if nonzero_proposal_len_seqs: + # Speculate tokens using the draft worker for the speculative + # sequences. + # If sampler_transposed is true, then maybe_sampler_output's + # token_ids is like [batch] format in proposal_len size list, + # while if it is false, the format would be [proposal_len] + # in batch size list + nonzero_execute_model_req = ExecuteModelRequest( + seq_group_metadata_list=nonzero_proposal_len_seqs, + num_lookahead_slots=proposal_len, + ) + maybe_sampler_output, transposed = self._worker.sampler_output( + execute_model_req=nonzero_execute_model_req, + sample_len=proposal_len, + ) + else: + # If no sequences can be speculated, set sampler output to None. + maybe_sampler_output = None + transposed = False + + # Combine speculative- and non-speculative sequences into the same + # representation. + proposal_tokens, proposal_probs, proposal_lens = self._merge_outputs( + batch_size=len(seq_group_metadata_list), + proposal_len=proposal_len, + maybe_sampler_output=maybe_sampler_output, + proposal_lens=proposal_lens, + nonzero_proposal_len_indices=nonzero_proposal_len_indices, + sampler_transposed=transposed, + ) + + proposals = SpeculativeProposals( + proposal_token_ids=proposal_tokens, + proposal_probs=proposal_probs, + proposal_lens=proposal_lens, + ) + + return proposals + + def _split_by_max_model_len( + self, + seq_group_metadata_list: List[SequenceGroupMetadata], + proposal_len: int, + ) -> Tuple[List[int], List[SequenceGroupMetadata], List[int]]: + """Determine which sequences would exceed the max model length.""" + + proposal_lens: List[int] = [] + nonzero_proposal_len_seqs: List[SequenceGroupMetadata] = [] + nonzero_proposal_len_indices: List[int] = [] + for i, seq_group_metadata in enumerate(seq_group_metadata_list): + seq_data = next(iter(seq_group_metadata.seq_data.values())) + seq_len = seq_data.get_len() + + # Currently only proposal lens of 0 or the global batch proposal len + # are supported. + # If max_proposal_len is defined, then we shall no exccess this + # quota for nonzero_proposal + if (self.max_proposal_len is None + or seq_len + proposal_len < self.max_proposal_len): + proposal_lens.append(proposal_len) + nonzero_proposal_len_seqs.append(seq_group_metadata) + nonzero_proposal_len_indices.append(i) + else: + proposal_lens.append(0) + + return ( + proposal_lens, + nonzero_proposal_len_seqs, + nonzero_proposal_len_indices, + ) + + def _merge_outputs( + self, + batch_size: int, + proposal_len: int, + maybe_sampler_output: Optional[SamplerOutput], + proposal_lens: List[int], + nonzero_proposal_len_indices: List[int], + sampler_transposed: bool, + ) -> Tuple[torch.Tensor, torch.tensor, torch.Tensor]: + """After speculations are produced, merge the speculation results with + the skipped sequences. + """ + if maybe_sampler_output is None: + # If no speculative tokens, the sampler output will be None. + # In this case we return empty proposals. + proposal_tokens = torch.full( + size=( + batch_size, + proposal_len, + ), + fill_value=-1, + dtype=torch.long, + device=self._device, + ) + proposal_probs = torch.zeros( + batch_size, + proposal_len, + self._vocab_size, + dtype=torch.float32, + device=self._device, + ) + proposal_lens_tensor = torch.zeros(len(proposal_lens), + dtype=torch.long, + device=self._device) + return proposal_tokens, proposal_probs, proposal_lens_tensor + + sampler_output = maybe_sampler_output + proposal_tokens, proposal_probs, _ = sampler_output_to_torch( + sampler_output, sampler_transposed) + + # Now, reformat the output GPU tensors such that each sequence has + # a proposal. the proposal can be empty, e.g. [-1, -1, -1] + + entire_proposal_tokens = torch.full( + size=(batch_size, *proposal_tokens.shape[1:]), + fill_value=-1, + dtype=torch.long, + device=self._device, + ) + entire_proposal_tokens[nonzero_proposal_len_indices] = proposal_tokens + entire_proposal_probs = torch.zeros( + batch_size, + *proposal_probs.shape[1:], + dtype=torch.float32, + device=self._device, + ) + entire_proposal_probs[nonzero_proposal_len_indices] = proposal_probs + + proposal_tokens, proposal_probs = ( + entire_proposal_tokens, + entire_proposal_probs, + ) + + proposal_lens_tensor = torch.zeros(batch_size, + dtype=torch.long, + device=self._device) + proposal_lens_tensor[nonzero_proposal_len_indices] = proposal_len + + return proposal_tokens, proposal_probs, proposal_lens_tensor diff --git a/vllm/spec_decode/util.py b/vllm/spec_decode/util.py new file mode 100644 index 0000000..d6f80c8 --- /dev/null +++ b/vllm/spec_decode/util.py @@ -0,0 +1,228 @@ +from contextlib import contextmanager +from itertools import chain +from typing import Dict, List, Tuple + +import torch + +from vllm.sequence import (Logprob, SamplerOutput, SequenceGroupMetadata, + SequenceGroupOutput, SequenceOutput) + +SeqId = int + + +def get_all_seq_ids( + seq_group_metadata_list: List[SequenceGroupMetadata]) -> List[SeqId]: + """Given a list of SequenceGroupMetadata, create a list of all + sequence ids. + """ + return list( + chain.from_iterable([ + seq_group_metadata.seq_data.keys() + for seq_group_metadata in seq_group_metadata_list + ])) + + +def get_all_num_logprobs( + seq_group_metadata_list: List[SequenceGroupMetadata]) -> List[int]: + """Given a list of SequenceGroupMetadata, create a list of all num_logprobs. + + If the sampling params do not call for any logprobs, return 0 for that + sequence. + """ + + all_num_logprobs = [] + for seq_group_metadata in seq_group_metadata_list: + num_logprobs = seq_group_metadata.sampling_params.logprobs + if seq_group_metadata.sampling_params.logprobs is None: + num_logprobs = 0 + all_num_logprobs.append(num_logprobs) + + return all_num_logprobs + + +def get_sampled_token_logprobs( + # shape [num_steps, batch_size, vocab_size] + logprob_tensor: torch.Tensor, + sampled_token_ids: torch.Tensor, # shape [num_steps, batch_size] +) -> Tuple[torch.Tensor, torch.Tensor]: + """Get the logprobs for the sampled tokens. Returns the ranks and logprobs. + """ + num_steps, batch_size, vocab_size = logprob_tensor.shape + + selected_logprobs = logprob_tensor[torch.arange(num_steps).unsqueeze(1), + torch.arange(batch_size), + sampled_token_ids, ] + expanded_selected_logprobs = selected_logprobs.unsqueeze(-1).expand( + -1, -1, vocab_size) + sampled_token_ids_ranks = (logprob_tensor >= + expanded_selected_logprobs).sum(-1) + + return sampled_token_ids_ranks, selected_logprobs + + +def create_sequence_group_output( + token_id: int, + token_id_logprob_rank: int, + token_id_logprob: float, + seq_id: SeqId, + topk_token_ids: List[int], + topk_logprobs: List[float], +) -> SequenceGroupOutput: + """Create a SequenceGroupOutput given the sampling results. + + Args: + token_id (int): The sampled token for the sequence. + token_id_logprob_rank (int): The logprob rank of the sampled token. + token_id_logprob (float): The logprob value of the sampled token. + seq_id (int): The sequence id. + topk_token_ids (List[int]): The list of top-k token ids. + topk_logprobs (List[float]): The list of top-k logprobs. + """ + # vLLM logprobs always include the sampled token. In addition, the user may + # request topk-logprobs (where top-k varies per user up to max_logprobs). + logprobs: Dict[int, Logprob] = { + token_id: Logprob( + logprob=token_id_logprob, + rank=token_id_logprob_rank, + ), + } + logprobs.update({ + topk_token_ids[topk_logprob_index]: Logprob( + logprob=topk_logprobs[topk_logprob_index], + rank=topk_logprob_index + 1, + ) + for topk_logprob_index, _ in enumerate(topk_token_ids) + }) + + return SequenceGroupOutput( + samples=[ + SequenceOutput(parent_seq_id=seq_id, + output_token=token_id, + logprobs=logprobs) + ], + # TODO add prompt logprobs support. + prompt_logprobs=None, + ) + + +def split_batch_by_proposal_len( + seq_group_metadata_list: List[SequenceGroupMetadata], + proposal_lens: List[int], select_proposal_len_zero: bool +) -> Tuple[List[SequenceGroupMetadata], List[int]]: + """Utility function that splits a batch based on whether the proposal len is + zero or not. We should remove this once vLLM supports per-sequence proposal + lens in a batch. + """ + + if select_proposal_len_zero: + predicate = lambda proposal_len: proposal_len == 0 + else: + predicate = lambda proposal_len: proposal_len != 0 + + indices = [ + i for i, (_, proposal_len + ) in enumerate(zip(seq_group_metadata_list, proposal_lens)) + if predicate(proposal_len) + ] + seq_groups = [ + seq_group for seq_group, proposal_len in zip( + seq_group_metadata_list, proposal_lens) if predicate(proposal_len) + ] + + return seq_groups, indices + + +def sampler_output_to_torch( + sampler_output_list: List[SamplerOutput], sampler_transposed: bool +) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]: + """Utility function which converts a list of SamplerOutput to tensors. + + sampler_transposed here is used as the indicator for whether + we need do additional tensor transpose logic here. + + Returns: + sampled_token_ids: torch.Tensor + shape: [batch_size, len(sampler_output_list)] + + sampled_token_probs: torch.Tensor + shape: [batch_size, len(sampler_output_list), vocab_size] + """ + + # shape: [batch_size, num_sampler_output, vocab_size] + sampled_token_probs = torch.stack( + [ + sampler_output.sampled_token_probs + for sampler_output in sampler_output_list + ], + dim=0, + ) + + if sampler_transposed: + sampled_token_probs = sampled_token_probs.transpose(0, 1) + + # shape: [batch_size, num_sampler_output, vocab_size] + sampled_token_logprobs = torch.stack( + [sampler_output.logprobs for sampler_output in sampler_output_list], + dim=0, + ) + + if sampler_transposed: + sampled_token_logprobs = sampled_token_logprobs.transpose(0, 1) + + # shape: [batch_size, num_sampler_output] + sampled_token_ids = torch.stack( + [ + sampler_output.sampled_token_ids.flatten() + for sampler_output in sampler_output_list + ], + dim=0, + ) + if sampler_transposed: + sampled_token_ids = sampled_token_ids.transpose(0, 1) + + return sampled_token_ids, sampled_token_probs, sampled_token_logprobs + + +def maybe_mock_device_tensors(sampler_output: SamplerOutput, batch_size: int, + vocab_size: int, device: str) -> None: + """Helper method which mocks out the GPU tensors in SamplerOutput with dummy + values. This will be removed in PR 7/9. + https://docs.google.com/document/d/1rE4pr3IdspRw97XbImY4fS9IWYuJJ3HGtL7AdIKGrw8/edit#heading=h.qijw1sdidrer + """ + values = [ + sampler_output.sampled_token_probs, sampler_output.sampled_token_ids + ] + assert all(v is None for v in values) or not any(v is None for v in values) + if not any(v is None for v in values): + # Do nothing if the tensors are already created (usually in unit tests). + return + + # Softmax to ensure valid probs. + sampler_output.sampled_token_probs = torch.nn.functional.softmax( + torch.rand(batch_size, vocab_size, dtype=torch.float32, device=device), + dim=-1) + + sampler_output.sampled_token_ids = torch.randint(low=10, + high=100, + size=(batch_size, ), + dtype=torch.long, + device=device) + + +@contextmanager +def nvtx_range(msg, *args, **kwargs): + """ + Context manager / decorator that pushes an NVTX range at the beginning + of its scope, and pops it at the end. If extra arguments are given, + they are passed as arguments to msg.format(). + + If running with cuda graphs, you must enable nsys cuda graph profiling. + + Arguments: + msg (string): message to associate with the range + """ + torch.cuda.nvtx.range_push(msg.format(*args, **kwargs)) + try: + yield + finally: + torch.cuda.nvtx.range_pop() diff --git a/vllm/test_utils.py b/vllm/test_utils.py new file mode 100644 index 0000000..0cf23e4 --- /dev/null +++ b/vllm/test_utils.py @@ -0,0 +1,41 @@ +import ray + +from vllm.distributed import (ensure_model_parallel_initialized, + init_distributed_environment) +from vllm.utils import get_open_port + + +def init_test_distributed_environment( + pipeline_parallel_size: int, + tensor_parallel_size: int, + rank: int, + distributed_init_port: str, + local_rank: int = -1, +) -> None: + distributed_init_method = f"tcp://localhost:{distributed_init_port}" + init_distributed_environment( + world_size=pipeline_parallel_size * tensor_parallel_size, + rank=rank, + distributed_init_method=distributed_init_method, + local_rank=local_rank) + ensure_model_parallel_initialized(tensor_parallel_size, + pipeline_parallel_size) + + +def multi_process_tensor_parallel( + tensor_parallel_size: int, + test_target, +) -> None: + # Using ray helps debugging the error when it failed + # as compared to multiprocessing. + ray.init() + + distributed_init_port = get_open_port() + refs = [] + for rank in range(tensor_parallel_size): + refs.append( + test_target.remote(tensor_parallel_size, rank, + distributed_init_port)) + ray.get(refs) + + ray.shutdown() diff --git a/vllm/transformers_utils/__init__.py b/vllm/transformers_utils/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/vllm/transformers_utils/config.py b/vllm/transformers_utils/config.py new file mode 100644 index 0000000..1756c91 --- /dev/null +++ b/vllm/transformers_utils/config.py @@ -0,0 +1,58 @@ +from typing import Dict, Optional + +from transformers import AutoConfig, PretrainedConfig + +from vllm.transformers_utils.configs import (ChatGLMConfig, DbrxConfig, + JAISConfig, MPTConfig, RWConfig) + +_CONFIG_REGISTRY: Dict[str, PretrainedConfig] = { + "chatglm": ChatGLMConfig, + "dbrx": DbrxConfig, + "mpt": MPTConfig, + "RefinedWeb": RWConfig, # For tiiuae/falcon-40b(-instruct) + "RefinedWebModel": RWConfig, # For tiiuae/falcon-7b(-instruct) + "jais": JAISConfig, +} + + +def get_config(model: str, + trust_remote_code: bool, + revision: Optional[str] = None, + code_revision: Optional[str] = None) -> PretrainedConfig: + try: + config = AutoConfig.from_pretrained( + model, + trust_remote_code=trust_remote_code, + revision=revision, + code_revision=code_revision) + except ValueError as e: + if (not trust_remote_code and + "requires you to execute the configuration file" in str(e)): + err_msg = ( + "Failed to load the model config. If the model is a custom " + "model not yet available in the HuggingFace transformers " + "library, consider setting `trust_remote_code=True` in LLM " + "or using the `--trust-remote-code` flag in the CLI.") + raise RuntimeError(err_msg) from e + else: + raise e + if config.model_type in _CONFIG_REGISTRY: + config_class = _CONFIG_REGISTRY[config.model_type] + config = config_class.from_pretrained(model, + revision=revision, + code_revision=code_revision) + return config + + +def get_hf_text_config(config: PretrainedConfig): + """Get the "sub" config relevant to llm for multi modal models. + No op for pure text models. + """ + if hasattr(config, "text_config"): + # The code operates under the assumption that text_config should have + # `num_attention_heads` (among others). Assert here to fail early + # if transformers config doesn't align with this assumption. + assert hasattr(config.text_config, "num_attention_heads") + return config.text_config + else: + return config diff --git a/vllm/transformers_utils/configs/__init__.py b/vllm/transformers_utils/configs/__init__.py new file mode 100644 index 0000000..0e48692 --- /dev/null +++ b/vllm/transformers_utils/configs/__init__.py @@ -0,0 +1,16 @@ +from vllm.transformers_utils.configs.chatglm import ChatGLMConfig +from vllm.transformers_utils.configs.dbrx import DbrxConfig +# RWConfig is for the original tiiuae/falcon-40b(-instruct) and +# tiiuae/falcon-7b(-instruct) models. Newer Falcon models will use the +# `FalconConfig` class from the official HuggingFace transformers library. +from vllm.transformers_utils.configs.falcon import RWConfig +from vllm.transformers_utils.configs.jais import JAISConfig +from vllm.transformers_utils.configs.mpt import MPTConfig + +__all__ = [ + "ChatGLMConfig", + "DbrxConfig", + "MPTConfig", + "RWConfig", + "JAISConfig", +] diff --git a/vllm/transformers_utils/configs/chatglm.py b/vllm/transformers_utils/configs/chatglm.py new file mode 100644 index 0000000..c4244f8 --- /dev/null +++ b/vllm/transformers_utils/configs/chatglm.py @@ -0,0 +1,68 @@ +# coding=utf-8 +# Adapted from +# https://github.com/THUDM/ChatGLM2-6B +from transformers import PretrainedConfig + + +class ChatGLMConfig(PretrainedConfig): + model_type = "chatglm" + attribute_map = { + "num_hidden_layers": "num_layers", + "n_head_kv": "multi_query_group_num", + } + + def __init__(self, + num_layers=28, + padded_vocab_size=65024, + hidden_size=4096, + ffn_hidden_size=13696, + kv_channels=128, + num_attention_heads=32, + seq_length=2048, + hidden_dropout=0.0, + attention_dropout=0.0, + layernorm_epsilon=1e-5, + rmsnorm=True, + apply_residual_connection_post_layernorm=False, + post_layer_norm=True, + add_bias_linear=False, + add_qkv_bias=False, + interleaved_qkv=False, + bias_dropout_fusion=True, + multi_query_attention=False, + multi_query_group_num=1, + apply_query_key_layer_scaling=True, + attention_softmax_in_fp32=True, + fp32_residual_connection=False, + quantization_bit=0, + pre_seq_len=None, + prefix_projection=False, + **kwargs): + self.num_layers = num_layers + self.vocab_size = padded_vocab_size + self.padded_vocab_size = padded_vocab_size + self.hidden_size = hidden_size + self.ffn_hidden_size = ffn_hidden_size + self.kv_channels = kv_channels + self.num_attention_heads = num_attention_heads + self.seq_length = seq_length + self.hidden_dropout = hidden_dropout + self.attention_dropout = attention_dropout + self.layernorm_epsilon = layernorm_epsilon + self.rmsnorm = rmsnorm + self.apply_residual_connection_post_layernorm = ( + apply_residual_connection_post_layernorm) + self.post_layer_norm = post_layer_norm + self.add_bias_linear = add_bias_linear + self.add_qkv_bias = add_qkv_bias + self.bias_dropout_fusion = bias_dropout_fusion + self.multi_query_attention = multi_query_attention + self.multi_query_group_num = multi_query_group_num + self.apply_query_key_layer_scaling = apply_query_key_layer_scaling + self.attention_softmax_in_fp32 = attention_softmax_in_fp32 + self.fp32_residual_connection = fp32_residual_connection + self.quantization_bit = quantization_bit + self.pre_seq_len = pre_seq_len + self.prefix_projection = prefix_projection + self.interleaved_qkv = interleaved_qkv + super().__init__(**kwargs) diff --git a/vllm/transformers_utils/configs/dbrx.py b/vllm/transformers_utils/configs/dbrx.py new file mode 100644 index 0000000..0dc9664 --- /dev/null +++ b/vllm/transformers_utils/configs/dbrx.py @@ -0,0 +1,278 @@ +# yapf: disable +# ruff: noqa: E501 +# coding=utf-8 +# Copied from +# https://huggingface.co/databricks/dbrx-base/blob/main/configuration_dbrx.py +"""Dbrx configuration.""" + +from typing import Any, Optional + +from transformers.configuration_utils import PretrainedConfig +from transformers.utils import logging + +logger = logging.get_logger(__name__) + +DBRX_PRETRAINED_CONFIG_ARCHIVE_MAP = {} # type: ignore + + +class DbrxAttentionConfig(PretrainedConfig): + """Configuration class for Dbrx Attention. + + [`DbrxAttention`] class. It is used to instantiate attention layers + according to the specified arguments, defining the layers architecture. + + Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the + documentation from [`PretrainedConfig`] for more information. + + Args: + attn_pdrop (`float`, *optional*, defaults to 0.0): + The dropout probability for the attention layers. + clip_qkv (`float`, *optional*, defaults to None): + If not `None`, clip the queries, keys, and values in the attention layer to this value. + kv_n_heads (Optional[int]): For grouped_query_attention only, allow user to specify number of kv heads. + rope_theta (float): The base frequency for rope. + """ + + def __init__( + self, + attn_pdrop: float = 0, + clip_qkv: Optional[float] = None, + kv_n_heads: int = 1, + rope_theta: float = 10000.0, + **kwargs: Any, + ): + super().__init__(**kwargs) + self.attn_pdrop = attn_pdrop + self.clip_qkv = clip_qkv + self.kv_n_heads = kv_n_heads + self.rope_theta = rope_theta + + for k in ["model_type"]: + if k in kwargs: + kwargs.pop(k) + if len(kwargs) != 0: + raise ValueError(f"Found unknown {kwargs=}") + + @classmethod + def from_pretrained( + cls, pretrained_model_name_or_path: str, **kwargs: Any + ) -> "PretrainedConfig": + cls._set_token_in_kwargs(kwargs) + + config_dict, kwargs = cls.get_config_dict( + pretrained_model_name_or_path, **kwargs + ) + + if config_dict.get("model_type") == "dbrx": + config_dict = config_dict["attn_config"] + + if ( + "model_type" in config_dict + and hasattr(cls, "model_type") + and config_dict["model_type"] != cls.model_type + ): + logger.warning( + "You are using a model of type %s to instantiate a model of " + "type %s. This is not supported for all configurations of " + "models and can yield errors.", + config_dict["model_type"], cls.model_type) + + return cls.from_dict(config_dict, **kwargs) + + +class DbrxFFNConfig(PretrainedConfig): + """Configuration class for Dbrx FFN. + + [`DbrxFFN`] class. It is used to instantiate feedforward layers according to + the specified arguments, defining the layers architecture. + + Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the + documentation from [`PretrainedConfig`] for more information. + + Args: + ffn_act_fn (dict, optional): A dict specifying activation function for the FFN. + The dict should have a key 'name' with the value being the name of + the activation function along with any additional keyword arguments. + ffn_hidden_size (int, optional): The hidden size of the feedforward network. + moe_num_experts (int, optional): The number of experts in the mixture of experts layer. + moe_top_k (int, optional): The number of experts to use in the mixture of experts layer. + moe_jitter_eps (float, optional): The jitter epsilon for the mixture of experts layer. + moe_loss_weight (float, optional): The loss weight for the mixture of experts layer. + moe_normalize_expert_weights (float, optional): The normalization factor for the expert weights. + uniform_expert_assignment (bool, optional): Whether to use uniform expert assignment. + This should only be used for benchmarking purposes. + """ + + def __init__( + self, + ffn_act_fn: Optional[dict] = None, + ffn_hidden_size: int = 3584, + moe_num_experts: int = 4, + moe_top_k: int = 1, + moe_jitter_eps: Optional[float] = None, + moe_loss_weight: float = 0.01, + moe_normalize_expert_weights: Optional[float] = 1, + uniform_expert_assignment: bool = False, + **kwargs: Any, + ): + super().__init__() + if ffn_act_fn is None: + ffn_act_fn = {"name": "silu"} + self.ffn_act_fn = ffn_act_fn + self.ffn_hidden_size = ffn_hidden_size + self.moe_num_experts = moe_num_experts + self.moe_top_k = moe_top_k + self.moe_jitter_eps = moe_jitter_eps + self.moe_loss_weight = moe_loss_weight + self.moe_normalize_expert_weights = moe_normalize_expert_weights + self.uniform_expert_assignment = uniform_expert_assignment + + for k in ["model_type"]: + if k in kwargs: + kwargs.pop(k) + if len(kwargs) != 0: + raise ValueError(f"Found unknown {kwargs=}") + + @classmethod + def from_pretrained( + cls, pretrained_model_name_or_path: str, **kwargs: Any + ) -> "PretrainedConfig": + cls._set_token_in_kwargs(kwargs) + + config_dict, kwargs = cls.get_config_dict( + pretrained_model_name_or_path, **kwargs + ) + + if config_dict.get("model_type") == "dbrx": + config_dict = config_dict["ffn_config"] + + if ( + "model_type" in config_dict + and hasattr(cls, "model_type") + and config_dict["model_type"] != cls.model_type + ): + logger.warning( + "You are using a model of type %s to instantiate a model of " + "type %s. This is not supported for all " + "configurations of models and can yield errors.", config_dict["model_type"], cls.model_type) + + return cls.from_dict(config_dict, **kwargs) + + +class DbrxConfig(PretrainedConfig): + """Configuration class for Dbrx. + + [`DbrxModel`]. It is used to instantiate a Dbrx model according to the + specified arguments, defining the model architecture. + + Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the + documentation from [`PretrainedConfig`] for more information. + + + Args: + d_model (`int`, *optional*, defaults to 6144): + Dimensionality of the embeddings and hidden states. + n_heads (`int`, *optional*, defaults to 48): + Number of attention heads for each attention layer in the Transformer encoder. + n_layers (`int`, *optional*, defaults to 40): + Number of hidden layers in the Transformer encoder. + max_seq_len (`int`, *optional*, defaults to 32768): + The maximum sequence length of the model. + vocab_size (`int`, *optional*, defaults to 100352): + Vocabulary size of the Dbrx model. Defines the maximum number of different tokens that can be represented by + the `inputs_ids` passed when calling [`DbrxModel`]. + resid_pdrop (`float`, *optional*, defaults to 0.0): + The dropout probability applied to the attention output before combining with residual. + emb_pdrop (`float`, *optional*, defaults to 0.0): + The dropout probability for the embedding layer. + attn_config (`dict`, *optional*): + A dictionary used to configure the model's attention module. + ffn_config (`dict`, *optional*): + A dictionary used to configure the model's FFN module. + use_cache (`bool`, *optional*, defaults to `False`): + Whether or not the model should return the last key/values attentions (not used by all models). + initializer_range (`float`, *optional*, defaults to 0.02): + The standard deviation of the truncated_normal_initializer for initializing all weight matrices. + output_router_logits (`bool`, *optional*, defaults to `False`): + Whether or not the router logits should be returned by the model. Enabling this will also + allow the model to output the auxiliary loss. See [here]() for more details + router_aux_loss_coef (`float`, *optional*, defaults to 0.001): + The aux loss factor for the total loss. + + + Example: + ```python + >>> from transformers import DbrxConfig, DbrxModel + + >>> # Initializing a Dbrx configuration + >>> configuration = DbrxConfig() + + >>> # Initializing a model (with random weights) from the configuration + >>> model = DbrxModel(configuration) + + >>> # Accessing the model configuration + >>> configuration = model.config + ``` + """ + + model_type = "dbrx" + attribute_map = { + "num_attention_heads": "n_heads", + "hidden_size": "d_model", + "num_hidden_layers": "n_layers", + "max_position_embeddings": "max_seq_len", + } + + def __init__( + self, + d_model: int = 2048, + n_heads: int = 16, + n_layers: int = 24, + max_seq_len: int = 2048, + vocab_size: int = 32000, + resid_pdrop: float = 0.0, + emb_pdrop: float = 0.0, + attn_config: Optional[DbrxAttentionConfig] = None, + ffn_config: Optional[DbrxFFNConfig] = None, + use_cache: bool = True, + initializer_range: float = 0.02, + output_router_logits: bool = False, + router_aux_loss_coef: float = 0.05, + **kwargs: Any, + ): + if attn_config is None: + self.attn_config = DbrxAttentionConfig() + elif isinstance(attn_config, dict): + self.attn_config = DbrxAttentionConfig(**attn_config) + else: + self.attn_config = attn_config + + if ffn_config is None: + self.ffn_config = DbrxFFNConfig() + elif isinstance(ffn_config, dict): + self.ffn_config = DbrxFFNConfig(**ffn_config) + else: + self.ffn_config = ffn_config + + self.d_model = d_model + self.n_heads = n_heads + self.n_layers = n_layers + self.max_seq_len = max_seq_len + self.vocab_size = vocab_size + self.resid_pdrop = resid_pdrop + self.emb_pdrop = emb_pdrop + self.use_cache = use_cache + self.initializer_range = initializer_range + self.output_router_logits = output_router_logits + self.router_aux_loss_coef = router_aux_loss_coef + + tie_word_embeddings = kwargs.pop("tie_word_embeddings", False) + if tie_word_embeddings: + raise ValueError( + "tie_word_embeddings is not supported for Dbrx models." + ) + + super().__init__( + tie_word_embeddings=tie_word_embeddings, + **kwargs, + ) diff --git a/vllm/transformers_utils/configs/falcon.py b/vllm/transformers_utils/configs/falcon.py new file mode 100644 index 0000000..c82cc60 --- /dev/null +++ b/vllm/transformers_utils/configs/falcon.py @@ -0,0 +1,87 @@ +# Adapted from +# https://huggingface.co/tiiuae/falcon-7b/blob/main/configuration_RW.py +# Copyright 2023 The vLLM team. +# Copyright 2022 the Big Science Workshop and HuggingFace Inc. team. +# All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Falcon configuration""" +from transformers.configuration_utils import PretrainedConfig + + +class RWConfig(PretrainedConfig): + model_type = "falcon" + keys_to_ignore_at_inference = ["past_key_values"] + attribute_map = { + "num_hidden_layers": "n_layer", + "num_attention_heads": "n_head", + "num_kv_heads": "n_head_kv", + } + + def __init__( + self, + vocab_size=250880, + hidden_size=64, + n_layer=2, + n_head=8, + layer_norm_epsilon=1e-5, + initializer_range=0.02, + use_cache=True, + bos_token_id=1, + eos_token_id=2, + hidden_dropout=0.0, + attention_dropout=0.0, + multi_query=True, + n_head_kv=None, + alibi=False, + bias=False, + parallel_attn=False, + new_decoder_architecture=False, + **kwargs, + ) -> None: + self.vocab_size = vocab_size + # Backward compatibility with n_embed kwarg + n_embed = kwargs.pop("n_embed", None) + self.hidden_size = hidden_size if n_embed is None else n_embed + self.n_layer = n_layer + self.n_head = n_head + self.layer_norm_epsilon = layer_norm_epsilon + self.initializer_range = initializer_range + self.use_cache = use_cache + self.hidden_dropout = hidden_dropout + self.attention_dropout = attention_dropout + + self.bos_token_id = bos_token_id + self.eos_token_id = eos_token_id + self.multi_query = multi_query + self.n_head_kv = 1 if n_head_kv is None else n_head_kv + self.alibi = alibi + self.bias = bias + self.parallel_attn = parallel_attn + self.new_decoder_architecture = new_decoder_architecture + + if self.hidden_size == 8192: + # Hack for falcon-40b + self.new_decoder_architecture = True + + super().__init__(bos_token_id=bos_token_id, + eos_token_id=eos_token_id, + **kwargs) + + @property + def head_dim(self): + return self.hidden_size // self.n_head + + @property + def rotary(self): + return not self.alibi diff --git a/vllm/transformers_utils/configs/jais.py b/vllm/transformers_utils/configs/jais.py new file mode 100644 index 0000000..2f6e00d --- /dev/null +++ b/vllm/transformers_utils/configs/jais.py @@ -0,0 +1,237 @@ +# coding=utf-8 +# Copyright 2023 The OpenAI Team Authors and HuggingFace Inc. team. +# Copyright (c) 2024 - 2024 Moore Threads Technology Co., Ltd("Moore Threads"). All rights reserved. +# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. +# Copyright 2023 Cerebras Systems. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""JAIS configuration""" + +from transformers.configuration_utils import PretrainedConfig +from transformers.utils import logging + +logger = logging.get_logger(__name__) + + +class JAISConfig(PretrainedConfig): + """ + This is the configuration class to store the configuration of a + [`JAISModel`]. It is used to instantiate a JAIS model according to the + specified arguments, defining the model architecture. + + Configuration objects inherit from [`PretrainedConfig`] and can be used + to control the model outputs. Read the documentation from + [`PretrainedConfig`] for more information. + + + Args: + vocab_size (`int`, *optional*, defaults to 50257): + Vocabulary size of the JAIS model. Defines the number of different + tokens that can be represented by the + `inputs_ids` passed when calling [`JAISModel`]. + n_positions (`int`, *optional*, defaults to 1024): + The maximum sequence length that this model might ever be used + with. Typically set this to something large just in case + (e.g., 512 or 1024 or 2048). + n_embd (`int`, *optional*, defaults to 768): + Dimensionality of the embeddings and hidden states. + n_layer (`int`, *optional*, defaults to 12): + Number of hidden layers in the Transformer encoder. + n_head (`int`, *optional*, defaults to 12): + Number of attention heads for each attention layer in the + Transformer encoder. + n_inner (`int`, *optional*, defaults to None): + Dimensionality of the inner feed-forward layers. `None` will set + it to 4 times n_embd + activation_function (`str`, *optional*, defaults to `"gelu"`): + Activation function, to be selected in the list + `["relu", "silu", "gelu", "tanh", "gelu_new", "swiglu"]`. + resid_pdrop (`float`, *optional*, defaults to 0.1): + The dropout probability for all fully connected layers in + the embeddings, encoder, and pooler. + embd_pdrop (`float`, *optional*, defaults to 0.1): + The dropout ratio for the embeddings. + attn_pdrop (`float`, *optional*, defaults to 0.1): + The dropout ratio for the attention. + layer_norm_epsilon (`float`, *optional*, defaults to 1e-5): + The epsilon to use in the layer normalization layers. + initializer_range (`float`, *optional*, defaults to 0.02): + The standard deviation of the truncated_normal_initializer for + initializing all weight matrices. + scale_attn_weights (`bool`, *optional*, defaults to `True`): + Scale attention weights by dividing by sqrt(hidden_size).. + use_cache (`bool`, *optional*, defaults to `True`): + Whether or not the model should return the last key/values + attentions (not used by all models). + scale_attn_by_inverse_layer_idx (`bool`, *optional*, + defaults to `False`): + Whether to additionally scale attention weights by + `1 / layer_idx + 1`. + reorder_and_upcast_attn (`bool`, *optional*, defaults to `False`): + Whether to scale keys (K) prior to computing attention + (dot-product) + and upcast attention dot-product/softmax to float() when training + with mixed precision. + position_embedding_type (`str`, *optional*, defaults to `"learned"`): + Positional embedding can be either `"alibi"` or `"learned"`. + mup_width_scale (`float`, *optional*, defaults to 1.0): + muP parameter to scale learning rate and initializers. Calculated + as (`d_model,0 / d_model`), where + `d_model` is the model's width and `d_model,0` is the proxy + model's width. + mup_embeddings_scale (`float`, *optional*, defaults to 1.0): + muP parameter to scale token and position embeddings. + mup_output_alpha (`float`, *optional*, defaults to 1.0): + muP parameter to scale output logits + (`output_logits_scale = mup_output_alpha * mup_width_scale`). + mup_scale_qk_dot_by_d (`bool`, *optional*, defaults to `False`): + Scale attention weights by dividing by hidden_size instead of + sqrt(hidden_size). Need to set scale_attn_weights to `True` as + well. + alibi_scaling (`Dict`, *optional*): + Dictionary containing the scaling configuration for ALiBi + embeddings. Currently only supports linear + scaling strategy. Can specify either the scaling `factor` (must be + a float greater than 1) for fixed scaling + or `train_seq_len` for dynamic scaling on input samples with + sequence length > `train_seq_len`. The expected + formats are `{"type": strategy name, "factor": scaling factor}` or + `{"type": strategy name, + "train_seq_len": training sequence length}`. + architectures (`List`, *optional*, defaults to ['JAISLMHeadModel']): + architecture names for Jais. + + Example: + + ```python + >>> from transformers import JAISConfig, JAISModel + + >>> # Initializing a JAIS configuration + >>> configuration = JAISConfig() + + >>> # Initializing a model (with random weights) from the configuration + >>> model = JAISModel(configuration) + + >>> # Accessing the model configuration + >>> configuration = model.config + ```""" + + model_type = "jais" + keys_to_ignore_at_inference = ["past_key_values"] + attribute_map = { + "hidden_size": "n_embd", + "max_position_embeddings": "n_positions", + "num_attention_heads": "n_head", + "num_hidden_layers": "n_layer", + } + + def __init__( + self, + vocab_size=50257, + n_positions=1024, + n_embd=768, + n_layer=12, + n_head=12, + n_inner=None, + activation_function="gelu_new", + resid_pdrop=0.1, + embd_pdrop=0.1, + attn_pdrop=0.1, + layer_norm_epsilon=1e-5, + initializer_range=0.02, + scale_attn_weights=True, + use_cache=True, + bos_token_id=50256, + eos_token_id=50256, + scale_attn_by_inverse_layer_idx=False, + reorder_and_upcast_attn=False, + position_embedding_type="learned", + mup_width_scale=1.0, + mup_embeddings_scale=1.0, + mup_output_alpha=1.0, + mup_scale_qk_dot_by_d=False, + alibi_scaling=None, + architectures=None, + **kwargs, + ): + self.vocab_size = vocab_size + self.n_positions = n_positions + self.n_embd = n_embd + self.n_layer = n_layer + self.n_head = n_head + self.n_inner = n_inner + self.activation_function = activation_function + self.resid_pdrop = resid_pdrop + self.embd_pdrop = embd_pdrop + self.attn_pdrop = attn_pdrop + self.layer_norm_epsilon = layer_norm_epsilon + self.initializer_range = initializer_range + self.scale_attn_weights = scale_attn_weights + self.use_cache = use_cache + self.scale_attn_by_inverse_layer_idx = scale_attn_by_inverse_layer_idx + self.reorder_and_upcast_attn = reorder_and_upcast_attn + + self.bos_token_id = bos_token_id + self.eos_token_id = eos_token_id + + self.position_embedding_type = position_embedding_type + self.mup_width_scale = mup_width_scale + self.mup_embeddings_scale = mup_embeddings_scale + self.mup_output_alpha = mup_output_alpha + self.mup_scale_qk_dot_by_d = mup_scale_qk_dot_by_d + + self.alibi_scaling = alibi_scaling + self._alibi_scaling_validation() + if architectures is None: + architectures = ["JAISLMHeadModel"] + + super().__init__( + bos_token_id=bos_token_id, + eos_token_id=eos_token_id, + architectures=architectures, + **kwargs, + ) + + def _alibi_scaling_validation(self): + """ + Validate the `alibi_scaling` configuration. + """ + if self.alibi_scaling is None: + return + + if (not isinstance(self.alibi_scaling, dict) + or len(self.alibi_scaling) != 2): + raise ValueError( + "`alibi_scaling` must be a dictionary with two fields," + "`type` and `factor` or `type` and `train_seq_len`, " + f"got {self.alibi_scaling}") + alibi_scaling_type = self.alibi_scaling.get("type", None) + alibi_scaling_factor = self.alibi_scaling.get("factor", None) + alibi_dynamic_scaling = self.alibi_scaling.get("train_seq_len", None) + if alibi_scaling_type is None or alibi_scaling_type != "linear": + raise ValueError(f"`alibi_scaling`'s type field must be 'linear'," + f"got {alibi_scaling_type}") + if (alibi_scaling_factor is not None + and not isinstance(alibi_scaling_factor, float) + or (alibi_scaling_factor is not None + and alibi_scaling_factor <= 1.0)): + raise ValueError( + f"`alibi_scaling`'s factor field must be a float > 1.0," + f"got {alibi_scaling_factor}") + if (alibi_dynamic_scaling is not None + and not isinstance(alibi_dynamic_scaling, int) + or (alibi_dynamic_scaling is not None + and alibi_dynamic_scaling <= 1)): + raise ValueError( + f"`alibi_scaling`'s `train_seq_len` field must be an" + f"integer > 1, got {alibi_dynamic_scaling}") diff --git a/vllm/transformers_utils/configs/mpt.py b/vllm/transformers_utils/configs/mpt.py new file mode 100644 index 0000000..497db0a --- /dev/null +++ b/vllm/transformers_utils/configs/mpt.py @@ -0,0 +1,178 @@ +# coding=utf-8 +# Copied from +# https://huggingface.co/mosaicml/mpt-7b/blob/main/configuration_mpt.py +"""A HuggingFace-style model configuration.""" +import warnings +from typing import Any, Dict, Optional, Union + +from transformers import PretrainedConfig + +attn_config_defaults: Dict = { + 'attn_type': 'multihead_attention', + 'attn_pdrop': 0.0, + 'attn_impl': 'triton', + 'qk_ln': False, + 'clip_qkv': None, + 'softmax_scale': None, + 'prefix_lm': False, + 'attn_uses_sequence_id': False, + 'alibi': False, + 'alibi_bias_max': 8 +} +ffn_config_defaults: Dict = {'ffn_type': 'mptmlp'} +init_config_defaults: Dict = { + 'name': 'kaiming_normal_', + 'fan_mode': 'fan_in', + 'init_nonlinearity': 'relu', + 'init_div_is_residual': True, + 'emb_init_std': None, + 'emb_init_uniform_lim': None, + 'init_std': None, + 'init_gain': 0.0 +} + + +class MPTConfig(PretrainedConfig): + model_type = 'mpt' + attribute_map = { + 'num_attention_heads': 'n_heads', + 'hidden_size': 'd_model', + 'num_hidden_layers': 'n_layers', + } + + # pylint: disable=dangerous-default-value + def __init__(self, + d_model: int = 2048, + n_heads: int = 16, + n_layers: int = 24, + expansion_ratio: int = 4, + max_seq_len: int = 2048, + vocab_size: int = 50368, + resid_pdrop: float = 0.0, + emb_pdrop: float = 0.0, + learned_pos_emb: bool = True, + attn_config: Dict = attn_config_defaults, + ffn_config: Dict = ffn_config_defaults, + init_device: str = 'cpu', + logit_scale: Optional[Union[float, str]] = None, + no_bias: bool = False, + embedding_fraction: float = 1.0, + norm_type: str = 'low_precision_layernorm', + use_cache: bool = False, + init_config: Dict = init_config_defaults, + fc_type: str = 'torch', + verbose: Optional[int] = None, + **kwargs: Any): + self.d_model = d_model + self.n_heads = n_heads + self.n_layers = n_layers + self.expansion_ratio = expansion_ratio + self.max_seq_len = max_seq_len + self.vocab_size = vocab_size + self.resid_pdrop = resid_pdrop + self.emb_pdrop = emb_pdrop + self.learned_pos_emb = learned_pos_emb + self.attn_config = attn_config + self.ffn_config = ffn_config + self.init_device = init_device + self.logit_scale = logit_scale + self.no_bias = no_bias + self.embedding_fraction = embedding_fraction + self.norm_type = norm_type + self.use_cache = use_cache + self.init_config = init_config + self.fc_type = fc_type + if verbose is not None: + warnings.warn(DeprecationWarning( + 'verbose argument for MPTConfig is now ignored and ' + 'will be removed. Use python_log_level instead.'), + stacklevel=2) + if 'name' in kwargs: + del kwargs['name'] + if 'loss_fn' in kwargs: + del kwargs['loss_fn'] + if self.attn_config.get('alibi', False): + self.learned_pos_emb = False + warnings.warn( + f'alibi is turned on, setting `learned_pos_emb` ' + f'to {self.learned_pos_emb}`', + stacklevel=2) + super().__init__(**kwargs) + self._validate_config() + + def _set_config_defaults( + self, config: Dict[str, Any], + config_defaults: Dict[str, Any]) -> Dict[str, Any]: + for (k, v) in config_defaults.items(): + if k not in config: + config[k] = v + return config + + def _validate_config(self) -> None: + self.attn_config = self._set_config_defaults(self.attn_config, + attn_config_defaults) + self.ffn_config = self._set_config_defaults(self.ffn_config, + ffn_config_defaults) + self.init_config = self._set_config_defaults(self.init_config, + init_config_defaults) + if self.d_model % self.n_heads != 0: + raise ValueError('d_model must be divisible by n_heads') + if any(( + prob < 0 or prob > 1 for prob in + [self.attn_config['attn_pdrop'], self.resid_pdrop, self.emb_pdrop] + )): + raise ValueError( + "self.attn_config['attn_pdrop'], resid_pdrop, emb_pdrop are " + "probabilities and must be between 0 and 1") + if self.attn_config['attn_impl'] not in ['torch', 'flash', 'triton']: + raise ValueError( + f"Unknown attn_impl={self.attn_config['attn_impl']}") + if self.attn_config['prefix_lm'] and self.attn_config[ + 'attn_impl'] not in ['torch', 'triton']: + raise NotImplementedError( + 'prefix_lm only implemented with torch and triton attention.') + if self.attn_config['alibi'] and self.attn_config['attn_impl'] not in [ + 'torch', 'triton' + ]: + raise NotImplementedError( + 'alibi only implemented with torch and triton attention.') + if self.attn_config['attn_uses_sequence_id'] and self.attn_config[ + 'attn_impl'] not in ['torch', 'triton']: + raise NotImplementedError( + 'attn_uses_sequence_id only implemented with torch ' + 'and triton attention.') + if self.embedding_fraction > 1 or self.embedding_fraction <= 0: + raise ValueError( + 'model.embedding_fraction must be between 0 (exclusive) ' + 'and 1 (inclusive)!') + if isinstance(self.logit_scale, + str) and self.logit_scale != 'inv_sqrt_d_model': + raise ValueError( + f"self.logit_scale={self.logit_scale!r} is not recognized as " + "an option; use numeric value or 'inv_sqrt_d_model'.") + if self.init_config.get('name', None) is None: + raise ValueError( + f"self.init_config={self.init_config!r} 'name' needs to be set." + ) + if not self.learned_pos_emb and (not self.attn_config['alibi']): + warnings.warn( + 'Positional information not being provided to the model.', + stacklevel=2) + if self.fc_type == 'te' or self.ffn_config['ffn_type'] == 'te_ln_mlp': + try: + # pylint: disable=import-outside-toplevel + import transformer_engine.pytorch as te + del te + except Exception as exc: + raise ImportError( + 'TransformerEngine import fail. `fc_type: te` requires ' + 'TransformerEngine be installed. ' + 'The required version of transformer_engine also requires ' + 'FlashAttention v1.0.6 is installed:\n' + 'pip install flash-attn==1.0.6 --no-build-isolation \n' + 'pip install git+https://github.com/NVIDIA/TransformerEngine.git@144e4888b2cdd60bd52e706d5b7a79cb9c1a7156' + ) from exc + if self.ffn_config['ffn_type'] == 'mptmlp': + self.ffn_config['fc_type'] = self.fc_type + elif self.ffn_config['ffn_type'] == 'te_ln_mlp': + self.ffn_config['bias'] = not self.no_bias diff --git a/vllm/transformers_utils/detokenizer.py b/vllm/transformers_utils/detokenizer.py new file mode 100644 index 0000000..f064c26 --- /dev/null +++ b/vllm/transformers_utils/detokenizer.py @@ -0,0 +1,313 @@ +from typing import Dict, List, Optional, Tuple, Union + +from transformers import PreTrainedTokenizer, PreTrainedTokenizerFast + +from vllm.sequence import Logprob, SamplingParams, Sequence, SequenceGroup +from vllm.transformers_utils.tokenizer_group.base_tokenizer_group import ( + BaseTokenizerGroup) + +# Used eg. for marking rejected tokens in spec decoding. +INVALID_TOKEN_ID = -1 + + +class Detokenizer: + """Provides methods to decode the output of a model into text.""" + + def __init__(self, tokenizer_group: BaseTokenizerGroup): + self.tokenizer_group = tokenizer_group + + def get_tokenizer_for_seq(self, + sequence: Sequence) -> "PreTrainedTokenizer": + """Returns the HF tokenizer to use for a given sequence.""" + return self.tokenizer_group.get_lora_tokenizer(sequence.lora_request) + + def decode_prompt_logprobs_inplace( + self, seq_group: SequenceGroup, + prompt_logprobs: List[Optional[Dict[int, Logprob]]]) -> None: + """Decodes the logprobs for the prompt of a sequence group. + + Args: + seq_group: The sequence group to decode. + prompt_logprobs: The logprobs to decode. + + Returns: + The prompt logprobs with the decoded tokens. + """ + prms = seq_group.sampling_params + # We can pick any sequence for the prompt. + seq = next(iter(seq_group.seqs_dict.values())) + # Only prompt, without the generated token. + all_token_ids = seq.get_token_ids() + prompt_token_ids = all_token_ids[:-1] + tokenizer = self.get_tokenizer_for_seq(seq) + prefix_offset = 0 + read_offset = 0 + next_iter_prefix_offset = 0 + next_iter_read_offset = 0 + next_iter_tokens = [] + prev_tokens = None + + for token_position, prompt_logprobs_for_token in enumerate( + prompt_logprobs): + if not prompt_logprobs_for_token: + continue + for token_id, sample_logprob in prompt_logprobs_for_token.items(): + if (sample_logprob.decoded_token is None + and token_id != INVALID_TOKEN_ID): + prompt_token_ids_with_token = ( + prompt_token_ids[:token_position] + [token_id]) + (new_tokens, new_text, new_prefix_offset, + new_read_offset) = detokenize_incrementally( + tokenizer=tokenizer, + all_input_ids=prompt_token_ids_with_token, + prev_tokens=prev_tokens, + prefix_offset=prefix_offset, + read_offset=read_offset, + skip_special_tokens=prms.skip_special_tokens, + spaces_between_special_tokens=prms. + spaces_between_special_tokens, + ) + + sample_logprob.decoded_token = new_text + + # Use the offsets & prev tokens corresponding to + # real tokens to ensure detokenization is consistent + # actual with prompt. + if token_id == all_token_ids[token_position]: + next_iter_prefix_offset = new_prefix_offset + next_iter_read_offset = new_read_offset + next_iter_tokens = new_tokens + + # Advance to the next token position. + prefix_offset = next_iter_prefix_offset + read_offset = next_iter_read_offset + if prev_tokens is None: + prev_tokens = next_iter_tokens + else: + prev_tokens.extend(next_iter_tokens) + + def decode_sequence_inplace(self, seq: Sequence, + prms: SamplingParams) -> int: + """Decodes the new token for a sequence. In-place operation. + + Args: + seq: The sequence to decode. + prms: The sampling parameters used to generate the sequence. + + Returns: + The number of characters added to the output text. + """ + all_input_ids = seq.get_token_ids() + token_id_generated_this_iteration = all_input_ids[-1] + tokenizer = self.get_tokenizer_for_seq(seq) + + # Convert prompt token IDs to tokens if necessary. + # Do it here so that we don't have to repeat this + # computation for each logprob. + if seq.tokens is None: + (seq.tokens, seq.prefix_offset, + seq.read_offset) = convert_prompt_ids_to_tokens( + tokenizer=tokenizer, + prompt_ids=all_input_ids[:-1], + skip_special_tokens=prms.skip_special_tokens, + ) + + (new_tokens, new_decoded_token_text, prefix_offset, + read_offset) = detokenize_incrementally( + tokenizer=tokenizer, + all_input_ids=all_input_ids, + prev_tokens=seq.tokens, + prefix_offset=seq.prefix_offset, + read_offset=seq.read_offset, + skip_special_tokens=prms.skip_special_tokens, + spaces_between_special_tokens=prms.spaces_between_special_tokens, + ) + + # Decode logprobs + logprobs = seq.output_logprobs[-1] + if logprobs: + previous_tokens = all_input_ids[:-1] + for token_id, sample_logprob in logprobs.items(): + # If the token was generated this iteration, + # use the provided text. + if token_id == token_id_generated_this_iteration: + sample_logprob.decoded_token = new_decoded_token_text + continue + + if (sample_logprob.decoded_token is None + and token_id != INVALID_TOKEN_ID): + all_input_ids_with_logprob = previous_tokens + [token_id] + (_, new_text, _, _) = detokenize_incrementally( + tokenizer=tokenizer, + all_input_ids=all_input_ids_with_logprob, + prev_tokens=seq.tokens, + prefix_offset=seq.prefix_offset, + read_offset=seq.read_offset, + skip_special_tokens=prms.skip_special_tokens, + spaces_between_special_tokens=prms. + spaces_between_special_tokens, + ) + sample_logprob.decoded_token = new_text + + seq.tokens.extend(new_tokens) + seq.prefix_offset = prefix_offset + seq.read_offset = read_offset + seq.output_text += new_decoded_token_text + + return len(new_decoded_token_text) + + +def _convert_tokens_to_string_with_added_encoders( + tokenizer: Union[PreTrainedTokenizer, PreTrainedTokenizerFast], + output_tokens: List[str], + skip_special_tokens: bool, + spaces_between_special_tokens: bool, +) -> str: + # Adapted from + # https://github.com/huggingface/transformers/blob/v4.28.0/src/transformers/tokenization_utils.py#L921 + # NOTE(woosuk): The following code is slow because it runs a for loop over + # the output_tokens. In Python, running a for loop over a list can be slow + # even when the loop body is very simple. + sub_texts: List[str] = [] + current_sub_text: List[str] = [] + all_special_tokens = set(tokenizer.all_special_tokens) + for token in output_tokens: + if skip_special_tokens and token in all_special_tokens: + continue + if token in tokenizer.get_added_vocab(): + if current_sub_text: + sub_text = tokenizer.convert_tokens_to_string(current_sub_text) + sub_texts.append(sub_text) + current_sub_text = [] + sub_texts.append(token) + else: + current_sub_text.append(token) + if current_sub_text: + sub_text = tokenizer.convert_tokens_to_string(current_sub_text) + sub_texts.append(sub_text) + if spaces_between_special_tokens: + return " ".join(sub_texts) + else: + return "".join(sub_texts) + + +# 5 is an arbitrary value that should work for all +# tokenizers (bigger = more conservative). +INITIAL_INCREMENTAL_DETOKENIZATION_OFFSET = 5 + + +def convert_prompt_ids_to_tokens( + tokenizer: Union[PreTrainedTokenizer, PreTrainedTokenizerFast], + prompt_ids: List[int], + skip_special_tokens: bool = False, +) -> Tuple[List[str], int, int]: + """Converts the prompt ids to tokens and returns the tokens and offsets + for incremental detokenization. + + Note that not all tokens are converted to strings. Only the tokens that + are necessary for incremental detokenization are converted to strings. + """ + # We do not need to convert the whole prompt to tokens. + # Offset a little more in case we have special tokens. + new_tokens = tokenizer.convert_ids_to_tokens( + prompt_ids[-INITIAL_INCREMENTAL_DETOKENIZATION_OFFSET - 2:], + skip_special_tokens=skip_special_tokens) + read_offset = len(new_tokens) + prefix_offset = max( + read_offset - INITIAL_INCREMENTAL_DETOKENIZATION_OFFSET, 0) + return new_tokens, prefix_offset, read_offset + + +# Based on +# https://github.com/huggingface/text-generation-inference/blob/v0.9.4/server/text_generation_server/models/model.py#L62C9-L62C15 +# under Apache 2.0 license +def detokenize_incrementally( + tokenizer: Union[PreTrainedTokenizer, PreTrainedTokenizerFast], + all_input_ids: List[int], + prev_tokens: Optional[List[str]], + prefix_offset: int, + read_offset: int, + skip_special_tokens: bool = False, + spaces_between_special_tokens: bool = True, +) -> Tuple[List[str], str, int, int]: + """Detokenizes the input ids incrementally and returns the new tokens + and the new text. + + If `prev_tokens` is None, this function will convert the input ids to + tokens and return the tokens and the new text. Otherwise, it will return the + new tokens and the new text. + + This function will also return the new prefix offset and the new read + offset to be used in the next iteration. + + The offsets are necessary to defeat cleanup algorithms in the decode which + decide to add a space or not depending on the surrounding ids. + + Args: + tokenizer: The tokenizer to use. + all_input_ids: The input ids. The last id is the new token id. + prev_tokens: The previous tokens. If None, this function will convert + the input ids to tokens and return the tokens and the new text. + prefix_offset: The prefix offset. + read_offset: The read offset. + skip_special_tokens: Whether to skip special tokens. + spaces_between_special_tokens: Whether to add spaces between special + tokens. + """ + new_token_id = all_input_ids[-1] + # This is the first iteration for this sequence + is_first_iter = prev_tokens is None + if is_first_iter: + (prev_tokens, prefix_offset, + read_offset) = convert_prompt_ids_to_tokens( + tokenizer, + all_input_ids[:-1], + skip_special_tokens=skip_special_tokens) + assert prev_tokens is not None + + # If the new token id is out of bounds, return an empty string. + if new_token_id >= len(tokenizer): + new_tokens = [""] + else: + # Put new_token_id in a list so skip_special_tokens is respected + new_tokens = tokenizer.convert_ids_to_tokens( + [new_token_id], skip_special_tokens=skip_special_tokens) + if isinstance(new_tokens, str): + new_tokens = [new_tokens] + output_tokens = prev_tokens + new_tokens + + # If this is the first iteration, return all tokens. + if is_first_iter: + new_tokens = output_tokens + + # The prefix text is necessary only to defeat cleanup algorithms in + # the decode which decide to add a space or not depending on the + # surrounding ids. + if tokenizer.is_fast or not tokenizer.get_added_vocab(): + prefix_text = tokenizer.convert_tokens_to_string( + output_tokens[prefix_offset:read_offset]) + new_text = tokenizer.convert_tokens_to_string( + output_tokens[prefix_offset:]) + else: + prefix_text = _convert_tokens_to_string_with_added_encoders( + tokenizer, + output_tokens[prefix_offset:read_offset], + skip_special_tokens=skip_special_tokens, + spaces_between_special_tokens=spaces_between_special_tokens, + ) + new_text = _convert_tokens_to_string_with_added_encoders( + tokenizer, + output_tokens[prefix_offset:], + skip_special_tokens=skip_special_tokens, + spaces_between_special_tokens=spaces_between_special_tokens, + ) + + if len(new_text) <= len(prefix_text) or new_text.endswith("�"): + # utf-8 char at the end means it's a potential unfinished byte sequence + # from byte fallback tokenization. + # If it's in the middle, it's probably a real invalid id generated + # by the model + return new_tokens, "", prefix_offset, read_offset + + new_text = new_text[len(prefix_text):] + return new_tokens, new_text, read_offset, len(output_tokens) diff --git a/vllm/transformers_utils/tokenizer.py b/vllm/transformers_utils/tokenizer.py new file mode 100644 index 0000000..f5684db --- /dev/null +++ b/vllm/transformers_utils/tokenizer.py @@ -0,0 +1,149 @@ +import os +from typing import Optional, Union + +import huggingface_hub +from transformers import (AutoTokenizer, PreTrainedTokenizer, + PreTrainedTokenizerFast) + +from vllm.envs import VLLM_USE_MODELSCOPE +from vllm.logger import init_logger +from vllm.lora.request import LoRARequest +from vllm.transformers_utils.tokenizers import BaichuanTokenizer +from vllm.utils import make_async + +logger = init_logger(__name__) + + +def get_cached_tokenizer( + tokenizer: Union[PreTrainedTokenizer, PreTrainedTokenizerFast] +) -> Union[PreTrainedTokenizer, PreTrainedTokenizerFast]: + """Get tokenizer with cached properties. + + This will patch the tokenizer object in place. + + By default, transformers will recompute multiple tokenizer properties + each time they are called, leading to a significant slowdown. This + function caches these properties for faster access.""" + + tokenizer_all_special_ids = set(tokenizer.all_special_ids) + tokenizer_all_special_tokens_extended = ( + tokenizer.all_special_tokens_extended) + tokenizer_all_special_tokens = set(tokenizer.all_special_tokens) + tokenizer_len = len(tokenizer) + + class CachedTokenizer(tokenizer.__class__): # type: ignore + + @property + def all_special_ids(self): + return tokenizer_all_special_ids + + @property + def all_special_tokens(self): + return tokenizer_all_special_tokens + + @property + def all_special_tokens_extended(self): + return tokenizer_all_special_tokens_extended + + def __len__(self): + return tokenizer_len + + CachedTokenizer.__name__ = f"Cached{tokenizer.__class__.__name__}" + + tokenizer.__class__ = CachedTokenizer + return tokenizer + + +def get_tokenizer( + tokenizer_name: str, + *args, + tokenizer_mode: str = "auto", + trust_remote_code: bool = False, + revision: Optional[str] = None, + download_dir: Optional[str] = None, + **kwargs, +) -> Union[PreTrainedTokenizer, PreTrainedTokenizerFast]: + """Gets a tokenizer for the given model name via HuggingFace or ModelScope. + """ + if VLLM_USE_MODELSCOPE: + # download model from ModelScope hub, + # lazy import so that modelscope is not required for normal use. + # pylint: disable=C. + from modelscope.hub.snapshot_download import snapshot_download + + # Only set the tokenizer here, model will be downloaded on the workers. + if not os.path.exists(tokenizer_name): + tokenizer_path = snapshot_download( + model_id=tokenizer_name, + cache_dir=download_dir, + revision=revision, + local_files_only=huggingface_hub.constants.HF_HUB_OFFLINE, + # Ignore weights - we only need the tokenizer. + ignore_file_pattern=[".*.pt", ".*.safetensors", ".*.bin"]) + tokenizer_name = tokenizer_path + + if tokenizer_mode == "slow": + if kwargs.get("use_fast", False): + raise ValueError( + "Cannot use the fast tokenizer in slow tokenizer mode.") + kwargs["use_fast"] = False + + try: + tokenizer = AutoTokenizer.from_pretrained( + tokenizer_name, + *args, + trust_remote_code=trust_remote_code, + revision=revision, + **kwargs) + except ValueError as e: + # If the error pertains to the tokenizer class not existing or not + # currently being imported, suggest using the --trust-remote-code flag. + if (not trust_remote_code and + ("does not exist or is not currently imported." in str(e) + or "requires you to execute the tokenizer file" in str(e))): + err_msg = ( + "Failed to load the tokenizer. If the tokenizer is a custom " + "tokenizer not yet available in the HuggingFace transformers " + "library, consider setting `trust_remote_code=True` in LLM " + "or using the `--trust-remote-code` flag in the CLI.") + raise RuntimeError(err_msg) from e + else: + raise e + except AttributeError as e: + if "BaichuanTokenizer" in str(e): + # This is for the error "'BaichuanTokenizer' object has no + # attribute 'sp_model'". + tokenizer = BaichuanTokenizer.from_pretrained( + tokenizer_name, + *args, + trust_remote_code=trust_remote_code, + revision=revision, + **kwargs) + else: + raise e + + if not isinstance(tokenizer, PreTrainedTokenizerFast): + logger.warning( + "Using a slow tokenizer. This might cause a significant " + "slowdown. Consider using a fast tokenizer instead.") + return get_cached_tokenizer(tokenizer) + + +def get_lora_tokenizer(lora_request: LoRARequest, *args, + **kwargs) -> Optional[PreTrainedTokenizer]: + if lora_request is None: + return None + try: + tokenizer = get_tokenizer(lora_request.lora_local_path, *args, + **kwargs) + except OSError as e: + # No tokenizer was found in the LoRA folder, + # use base model tokenizer + logger.warning( + "No tokenizer found in %s, using base model tokenizer instead. " + "(Exception: %s)", lora_request.lora_local_path, e) + tokenizer = None + return tokenizer + + +get_lora_tokenizer_async = make_async(get_lora_tokenizer) diff --git a/vllm/transformers_utils/tokenizer_group/__init__.py b/vllm/transformers_utils/tokenizer_group/__init__.py new file mode 100644 index 0000000..0195c40 --- /dev/null +++ b/vllm/transformers_utils/tokenizer_group/__init__.py @@ -0,0 +1,33 @@ +from typing import Optional + +from vllm.config import TokenizerPoolConfig +from vllm.executor.ray_utils import ray +from vllm.transformers_utils.tokenizer_group.base_tokenizer_group import ( + BaseTokenizerGroup) +from vllm.transformers_utils.tokenizer_group.tokenizer_group import ( + TokenizerGroup) + +if ray: + from vllm.transformers_utils.tokenizer_group.ray_tokenizer_group import ( + RayTokenizerGroupPool) +else: + RayTokenizerGroupPool = None # type: ignore + + +def get_tokenizer_group(tokenizer_pool_config: Optional[TokenizerPoolConfig], + **init_kwargs) -> BaseTokenizerGroup: + if tokenizer_pool_config is None: + return TokenizerGroup(**init_kwargs) + if tokenizer_pool_config.pool_type == "ray": + if RayTokenizerGroupPool is None: + raise ImportError( + "RayTokenizerGroupPool is not available. Please install " + "the ray package to use the Ray tokenizer group pool.") + return RayTokenizerGroupPool.from_config(tokenizer_pool_config, + **init_kwargs) + else: + raise ValueError( + f"Unknown pool type: {tokenizer_pool_config.pool_type}") + + +__all__ = ["get_tokenizer_group", "BaseTokenizerGroup"] diff --git a/vllm/transformers_utils/tokenizer_group/base_tokenizer_group.py b/vllm/transformers_utils/tokenizer_group/base_tokenizer_group.py new file mode 100644 index 0000000..3cce96e --- /dev/null +++ b/vllm/transformers_utils/tokenizer_group/base_tokenizer_group.py @@ -0,0 +1,55 @@ +from abc import ABC, abstractmethod +from typing import List, Optional + +from transformers import PreTrainedTokenizer + +from vllm.lora.request import LoRARequest + + +class BaseTokenizerGroup(ABC): + """A group of tokenizers that can be used for LoRA adapters.""" + + @abstractmethod + def ping(self) -> bool: + """Check if the tokenizer group is alive.""" + pass + + @abstractmethod + def get_max_input_len(self, + lora_request: Optional[LoRARequest] = None + ) -> Optional[int]: + """Get the maximum input length for the LoRA request.""" + pass + + @abstractmethod + def encode(self, + prompt: str, + request_id: Optional[str] = None, + lora_request: Optional[LoRARequest] = None) -> List[int]: + """Encode a prompt using the tokenizer group.""" + pass + + @abstractmethod + async def encode_async( + self, + prompt: str, + request_id: Optional[str] = None, + lora_request: Optional[LoRARequest] = None) -> List[int]: + """Encode a prompt using the tokenizer group.""" + pass + + @abstractmethod + def get_lora_tokenizer( + self, + lora_request: Optional[LoRARequest] = None + ) -> "PreTrainedTokenizer": + """Get a tokenizer for a LoRA request.""" + pass + + @abstractmethod + async def get_lora_tokenizer_async( + self, + lora_request: Optional[LoRARequest] = None + ) -> "PreTrainedTokenizer": + """Get a tokenizer for a LoRA request.""" + pass diff --git a/vllm/transformers_utils/tokenizer_group/ray_tokenizer_group.py b/vllm/transformers_utils/tokenizer_group/ray_tokenizer_group.py new file mode 100644 index 0000000..7c60541 --- /dev/null +++ b/vllm/transformers_utils/tokenizer_group/ray_tokenizer_group.py @@ -0,0 +1,169 @@ +import asyncio +import os +from typing import List, Optional + +from ray.util.scheduling_strategies import NodeAffinitySchedulingStrategy +from transformers import PreTrainedTokenizer + +from vllm.config import TokenizerPoolConfig +from vllm.executor.ray_utils import ray +from vllm.lora.request import LoRARequest +from vllm.transformers_utils.tokenizer_group.base_tokenizer_group import ( + BaseTokenizerGroup) +from vllm.transformers_utils.tokenizer_group.tokenizer_group import ( + TokenizerGroup) + + +class RayTokenizerGroupPool(BaseTokenizerGroup): + """A Ray-based pool of TokenizerGroups for async tokenization.""" + + # Class to use for workers making up the pool. + _worker_cls = TokenizerGroup + + @classmethod + def from_config(cls, tokenizer_pool_config: TokenizerPoolConfig, + **init_kwargs) -> "RayTokenizerGroupPool": + ray_actor_options = (tokenizer_pool_config.extra_config or { + "num_cpus": 0 + }) + ray_actor_options.setdefault( + "scheduling_strategy", + NodeAffinitySchedulingStrategy( + node_id=ray.get_runtime_context().get_node_id(), soft=True)) + + # Carry over the env vars to the actors. + # This is necessary for API keys and such. + ray_actor_options.setdefault("runtime_env", {}) + _carry_over_env_vars_to_runtime_env(ray_actor_options["runtime_env"]) + + init_kwargs["num_actors"] = tokenizer_pool_config.pool_size + init_kwargs["ray_actor_options"] = ray_actor_options + + return cls(**init_kwargs) + + def __init__(self, tokenizer_id: str, enable_lora: bool, max_num_seqs: int, + max_input_length: Optional[int], num_actors: int, + ray_actor_options: dict, **tokenizer_config): + # Store a local copy of the TokenizerGroup for quick access + # to underlying HF tokenizers. + self._local_tokenizer_group = self._worker_cls( + tokenizer_id=tokenizer_id, + enable_lora=enable_lora, + max_num_seqs=max_num_seqs, + max_input_length=max_input_length, + **tokenizer_config, + ) + + ray_tokenizer_group_cls = ray.remote( + self._worker_cls).options(**ray_actor_options) + self.tokenizer_actors = [ + ray_tokenizer_group_cls.remote(tokenizer_id, enable_lora, + max_num_seqs, max_input_length, + **tokenizer_config) + for _ in range(num_actors) + ] + self._idle_actors: Optional[asyncio.Queue] = None + + @property + def pool_size(self) -> int: + return len(self.tokenizer_actors) + + def ping(self): + return ray.get( + [actor.ping.remote() for actor in self.tokenizer_actors]) + + def _ensure_queue_initialized(self): + if self._idle_actors is None: + self._idle_actors = asyncio.Queue() + for actor in self.tokenizer_actors: + self._idle_actors.put_nowait(actor) + + def encode(self, + prompt: str, + request_id: Optional[str] = None, + lora_request: Optional[LoRARequest] = None) -> List[int]: + """Encode a prompt using the tokenizer group. + + We pick an idle actor and use it to encode the prompt. + The actor is then put back in the queue for future use. + This is blocking. + """ + self._ensure_queue_initialized() + assert self._idle_actors is not None + + if self._idle_actors.empty(): + raise RuntimeError("No idle actors available.") + actor = self._idle_actors.get_nowait() + try: + ret = ray.get( + actor.encode.remote(request_id=request_id, + prompt=prompt, + lora_request=lora_request)) + finally: + # Put the actor back in the queue. + # This is done in a finally block to ensure that the actor is + # always put back in the queue, even if an exception/cancellation + # is raised. + self._idle_actors.put_nowait(actor) + return ret + + async def encode_async( + self, + prompt: str, + request_id: Optional[str] = None, + lora_request: Optional[LoRARequest] = None) -> List[int]: + """Encode a prompt using the tokenizer group. + + We pick an idle actor and use it to encode the prompt. + If there are no idle actors, we wait until one becomes + available. + The actor is then put back in the queue for future use. + This is non-blocking. + """ + self._ensure_queue_initialized() + assert self._idle_actors is not None + + actor = await self._idle_actors.get() + try: + ret = await actor.encode.remote(request_id=request_id, + prompt=prompt, + lora_request=lora_request) + finally: + # Put the actor back in the queue. + # This is done in a finally block to ensure that the actor is + # always put back in the queue, even if an exception/cancellation + # is raised. + self._idle_actors.put_nowait(actor) + return ret + + def get_max_input_len(self, + lora_request: Optional[LoRARequest] = None + ) -> Optional[int]: + """Get the maximum input length for the LoRA request.""" + return self._local_tokenizer_group.get_max_input_len(lora_request) + + def get_lora_tokenizer( + self, + lora_request: Optional[LoRARequest] = None + ) -> "PreTrainedTokenizer": + return self._local_tokenizer_group.get_lora_tokenizer(lora_request) + + async def get_lora_tokenizer_async( + self, + lora_request: Optional[LoRARequest] = None + ) -> "PreTrainedTokenizer": + return await self._local_tokenizer_group.get_lora_tokenizer_async( + lora_request) + + +def _carry_over_env_vars_to_runtime_env(runtime_env: dict) -> None: + """Copy over all current process environment variables to the runtime_env. + + The variables in runtime_env will take precedence over the current process + environment variables. + + runtime_env will be modified in place.""" + env_vars = os.environ.copy() + runtime_env.setdefault("env_vars", {}) + env_vars.update(runtime_env["env_vars"]) + runtime_env["env_vars"] = env_vars diff --git a/vllm/transformers_utils/tokenizer_group/tokenizer_group.py b/vllm/transformers_utils/tokenizer_group/tokenizer_group.py new file mode 100644 index 0000000..927cbee --- /dev/null +++ b/vllm/transformers_utils/tokenizer_group/tokenizer_group.py @@ -0,0 +1,78 @@ +from typing import List, Optional + +from transformers import PreTrainedTokenizer + +from vllm.lora.request import LoRARequest +from vllm.transformers_utils.tokenizer import (get_lora_tokenizer, + get_lora_tokenizer_async, + get_tokenizer) +from vllm.transformers_utils.tokenizer_group.base_tokenizer_group import ( + BaseTokenizerGroup) +from vllm.utils import LRUCache + + +class TokenizerGroup(BaseTokenizerGroup): + """A group of tokenizers that can be used for LoRA adapters.""" + + def __init__(self, tokenizer_id: str, enable_lora: bool, max_num_seqs: int, + max_input_length: Optional[int], **tokenizer_config): + self.tokenizer_id = tokenizer_id + self.tokenizer_config = tokenizer_config + self.enable_lora = enable_lora + self.max_input_length = max_input_length + self.tokenizer = get_tokenizer(self.tokenizer_id, **tokenizer_config) + self.lora_tokenizers = LRUCache[PreTrainedTokenizer]( + capacity=max_num_seqs) if enable_lora else None + + def ping(self) -> bool: + """Check if the tokenizer group is alive.""" + return True + + def get_max_input_len(self, + lora_request: Optional[LoRARequest] = None + ) -> Optional[int]: + """Get the maximum input length for the LoRA request.""" + return self.max_input_length + + def encode(self, + prompt: str, + request_id: Optional[str] = None, + lora_request: Optional[LoRARequest] = None) -> List[int]: + tokenizer = self.get_lora_tokenizer(lora_request) + return tokenizer.encode(prompt) + + async def encode_async( + self, + prompt: str, + request_id: Optional[str] = None, + lora_request: Optional[LoRARequest] = None) -> List[int]: + tokenizer = await self.get_lora_tokenizer_async(lora_request) + return tokenizer.encode(prompt) + + def get_lora_tokenizer( + self, + lora_request: Optional[LoRARequest] = None + ) -> "PreTrainedTokenizer": + if not lora_request or not self.enable_lora: + return self.tokenizer + if lora_request.lora_int_id not in self.lora_tokenizers: + tokenizer = (get_lora_tokenizer( + lora_request, **self.tokenizer_config) or self.tokenizer) + self.lora_tokenizers.put(lora_request.lora_int_id, tokenizer) + return tokenizer + else: + return self.lora_tokenizers.get(lora_request.lora_int_id) + + async def get_lora_tokenizer_async( + self, + lora_request: Optional[LoRARequest] = None + ) -> "PreTrainedTokenizer": + if not lora_request or not self.enable_lora: + return self.tokenizer + if lora_request.lora_int_id not in self.lora_tokenizers: + tokenizer = (await get_lora_tokenizer_async( + lora_request, **self.tokenizer_config) or self.tokenizer) + self.lora_tokenizers.put(lora_request.lora_int_id, tokenizer) + return tokenizer + else: + return self.lora_tokenizers.get(lora_request.lora_int_id) diff --git a/vllm/transformers_utils/tokenizers/__init__.py b/vllm/transformers_utils/tokenizers/__init__.py new file mode 100644 index 0000000..e6b5972 --- /dev/null +++ b/vllm/transformers_utils/tokenizers/__init__.py @@ -0,0 +1,5 @@ +from vllm.transformers_utils.tokenizers.baichuan import BaichuanTokenizer + +__all__ = [ + "BaichuanTokenizer", +] diff --git a/vllm/transformers_utils/tokenizers/baichuan.py b/vllm/transformers_utils/tokenizers/baichuan.py new file mode 100644 index 0000000..86fbe48 --- /dev/null +++ b/vllm/transformers_utils/tokenizers/baichuan.py @@ -0,0 +1,256 @@ +# Adapted from +# https://huggingface.co/baichuan-inc/Baichuan2-13B-Chat/blob/8f6e343d545c503b91429582231d1d354dac2740/tokenization_baichuan.py +# This includes a fix suggested in +# https://github.com/vllm-project/vllm/issues/1403#issuecomment-1767503058 +# Copyright (c) 2024 - 2024 Moore Threads Technology Co., Ltd("Moore Threads"). All rights reserved. +# Copyright (c) 2023, Baichuan Intelligent Technology. All rights reserved. + +import os +from shutil import copyfile +from typing import Any, Dict, List, Optional, Tuple + +import sentencepiece as spm +from transformers.tokenization_utils import AddedToken, PreTrainedTokenizer +from transformers.utils import logging + +logger = logging.get_logger(__name__) + +VOCAB_FILES_NAMES = {"vocab_file": "tokenizer.model"} + +PRETRAINED_VOCAB_FILES_MAP = { # type: ignore + "vocab_file": {}, + "tokenizer_file": {}, +} +PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = {} # type: ignore + + +class BaichuanTokenizer(PreTrainedTokenizer): + """ + Construct a Baichuan tokenizer. Based on byte-level Byte-Pair-Encoding. + + Args: + vocab_file (`str`): + Path to the vocabulary file. + """ + + vocab_files_names = VOCAB_FILES_NAMES + pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP + max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES + model_input_names = ["input_ids", "attention_mask"] + + def __init__( + self, + vocab_file, + unk_token="", + bos_token="", + eos_token="", + pad_token=None, + sp_model_kwargs: Optional[Dict[str, Any]] = None, + add_bos_token=True, + add_eos_token=False, + clean_up_tokenization_spaces=False, + **kwargs, + ): + self.sp_model_kwargs = ({} if sp_model_kwargs is None else + sp_model_kwargs) + bos_token = (AddedToken(bos_token, lstrip=False, rstrip=False) + if isinstance(bos_token, str) else bos_token) + eos_token = (AddedToken(eos_token, lstrip=False, rstrip=False) + if isinstance(eos_token, str) else eos_token) + unk_token = (AddedToken(unk_token, lstrip=False, rstrip=False) + if isinstance(unk_token, str) else unk_token) + pad_token = (AddedToken(pad_token, lstrip=False, rstrip=False) + if isinstance(pad_token, str) else pad_token) + self.vocab_file = vocab_file + self.add_bos_token = add_bos_token + self.add_eos_token = add_eos_token + self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs) + self.sp_model.Load(vocab_file) + super().__init__( + bos_token=bos_token, + eos_token=eos_token, + unk_token=unk_token, + pad_token=pad_token, + add_bos_token=add_bos_token, + add_eos_token=add_eos_token, + sp_model_kwargs=self.sp_model_kwargs, + clean_up_tokenization_spaces=clean_up_tokenization_spaces, + **kwargs, + ) + + def __getstate__(self): + state = self.__dict__.copy() + state["sp_model"] = None + return state + + def __setstate__(self, d): + self.__dict__ = d + self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs) + self.sp_model.Load(self.vocab_file) + + @property + def vocab_size(self): + """Returns vocab size""" + return self.sp_model.get_piece_size() + + def get_vocab(self): + """Returns vocab as a dict""" + vocab = { + self.convert_ids_to_tokens(i): i + for i in range(self.vocab_size) + } + vocab.update(self.added_tokens_encoder) + return vocab + + def _tokenize(self, text): + """Returns a tokenized string.""" + return self.sp_model.encode(text, out_type=str) + + def _convert_token_to_id(self, token): + """Converts a token (str) in an id using the vocab.""" + return self.sp_model.piece_to_id(token) + + def _convert_id_to_token(self, index): + """Converts an index (integer) in a token (str) using the vocab.""" + token = self.sp_model.IdToPiece(index) + return token + + def convert_tokens_to_string(self, tokens: List[str]): + """Converts a sequence of tokens (string) in a single string.""" + current_sub_tokens: List[str] = [] + out_string = "" + prev_is_special = False + for i, token in enumerate(tokens): + # make sure that special tokens are not decoded using + # sentencepiece model + if token in self.all_special_tokens: + if not prev_is_special and i != 0: + out_string += " " + out_string += self.sp_model.decode(current_sub_tokens) + token + prev_is_special = True + current_sub_tokens = [] + else: + current_sub_tokens.append(token) + prev_is_special = False + out_string += self.sp_model.decode(current_sub_tokens) + return out_string + + def save_vocabulary(self, + save_directory, + filename_prefix: Optional[str] = None) -> Tuple[str]: + """ + Save the vocabulary and special tokens file to a directory. + + Args: + save_directory (`str`): + The directory in which to save the vocabulary. + + Returns: + `Tuple(str)`: Paths to the files saved. + """ + if not os.path.isdir(save_directory): + raise ValueError(f"Vocabulary path ({save_directory}) " + "should be a directory") + + out_vocab_file = os.path.join( + save_directory, + (filename_prefix + "-" if filename_prefix else "") + + VOCAB_FILES_NAMES["vocab_file"], + ) + + if os.path.abspath(self.vocab_file) != os.path.abspath( + out_vocab_file) and os.path.isfile(self.vocab_file): + copyfile(self.vocab_file, out_vocab_file) + elif not os.path.isfile(self.vocab_file): + with open(out_vocab_file, "wb") as fi: + content_spiece_model = self.sp_model.serialized_model_proto() + fi.write(content_spiece_model) + + return (out_vocab_file, ) + + def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None): + bos_token_id = [self.bos_token_id] if self.add_bos_token else [] + eos_token_id = [self.eos_token_id] if self.add_eos_token else [] + + output = bos_token_id + token_ids_0 + eos_token_id + + if token_ids_1 is not None: + output = output + bos_token_id + token_ids_1 + eos_token_id + + return output + + def get_special_tokens_mask( + self, + token_ids_0: List[int], + token_ids_1: Optional[List[int]] = None, + already_has_special_tokens: bool = False, + ) -> List[int]: + """ + Retrieve sequence ids from a token list that has no special tokens + added. This method is called when adding + special tokens using the tokenizer `prepare_for_model` method. + + Args: + token_ids_0 (`List[int]`): + List of IDs. + token_ids_1 (`List[int]`, *optional*): + Optional second list of IDs for sequence pairs. + already_has_special_tokens (`bool`, *optional*, defaults to + `False`): + Whether or not the token list is already formatted with + special tokens for the model. + + Returns: + `List[int]`: A list of integers in the range [0, 1]: + 1 for a special token, 0 for a sequence token. + """ + if already_has_special_tokens: + return super().get_special_tokens_mask( + token_ids_0=token_ids_0, + token_ids_1=token_ids_1, + already_has_special_tokens=True, + ) + + bos_token_id = [1] if self.add_bos_token else [] + eos_token_id = [1] if self.add_eos_token else [] + + if token_ids_1 is None: + return bos_token_id + ([0] * len(token_ids_0)) + eos_token_id + return (bos_token_id + ([0] * len(token_ids_0)) + eos_token_id + + bos_token_id + ([0] * len(token_ids_1)) + eos_token_id) + + def create_token_type_ids_from_sequences( + self, + token_ids_0: List[int], + token_ids_1: Optional[List[int]] = None) -> List[int]: + """ + Creates a mask from the two sequences passed to be used in a + sequence-pair classification task. An ALBERT + sequence pair mask has the following format: + + ``` + 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 + | first sequence | second sequence | + ``` + + if token_ids_1 is None, only returns the first portion of the mask (0s). + + Args: + token_ids_0 (`List[int]`): + List of ids. + token_ids_1 (`List[int]`, *optional*): + Optional second list of IDs for sequence pairs. + + Returns: + `List[int]`: List of [token type IDs](../glossary#token-type-ids) + according to the given sequence(s). + """ + bos_token_id = [self.bos_token_id] if self.add_bos_token else [] + eos_token_id = [self.eos_token_id] if self.add_eos_token else [] + + output = [0] * len(bos_token_id + token_ids_0 + eos_token_id) + + if token_ids_1 is not None: + output += [1] * len(bos_token_id + token_ids_1 + eos_token_id) + + return output diff --git a/vllm/usage/__init__.py b/vllm/usage/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/vllm/usage/usage_lib.py b/vllm/usage/usage_lib.py new file mode 100644 index 0000000..9029a5b --- /dev/null +++ b/vllm/usage/usage_lib.py @@ -0,0 +1,209 @@ +import datetime +import json +import logging +import os +import platform +import time +from enum import Enum +from pathlib import Path +from threading import Thread +from typing import Any, Dict, Optional +from uuid import uuid4 + +import cpuinfo +import psutil +import requests +import torch + +import vllm.envs as envs + +_config_home = envs.VLLM_CONFIG_ROOT +_USAGE_STATS_JSON_PATH = os.path.join(_config_home, "vllm/usage_stats.json") +_USAGE_STATS_DO_NOT_TRACK_PATH = os.path.join(_config_home, + "vllm/do_not_track") +_USAGE_STATS_ENABLED = None +_USAGE_STATS_SERVER = envs.VLLM_USAGE_STATS_SERVER + + +def is_usage_stats_enabled(): + """Determine whether or not we can send usage stats to the server. + The logic is as follows: + - By default, it should be enabled. + - Three environment variables can disable it: + - VLLM_DO_NOT_TRACK=1 + - DO_NOT_TRACK=1 + - VLLM_NO_USAGE_STATS=1 + - A file in the home directory can disable it if it exists: + - $HOME/.config/vllm/do_not_track + """ + global _USAGE_STATS_ENABLED + if _USAGE_STATS_ENABLED is None: + do_not_track = envs.VLLM_DO_NOT_TRACK + no_usage_stats = envs.VLLM_NO_USAGE_STATS + do_not_track_file = os.path.exists(_USAGE_STATS_DO_NOT_TRACK_PATH) + + _USAGE_STATS_ENABLED = not (do_not_track or no_usage_stats + or do_not_track_file) + return _USAGE_STATS_ENABLED + + +def _get_current_timestamp_ns() -> int: + return int(datetime.datetime.now(datetime.timezone.utc).timestamp() * 1e9) + + +def _detect_cloud_provider() -> str: + # Try detecting through vendor file + vendor_files = [ + "/sys/class/dmi/id/product_version", "/sys/class/dmi/id/bios_vendor", + "/sys/class/dmi/id/product_name", + "/sys/class/dmi/id/chassis_asset_tag", "/sys/class/dmi/id/sys_vendor" + ] + # Mapping of identifiable strings to cloud providers + cloud_identifiers = { + "amazon": "AWS", + "microsoft corporation": "AZURE", + "google": "GCP", + "oraclecloud": "OCI", + } + + for vendor_file in vendor_files: + path = Path(vendor_file) + if path.is_file(): + file_content = path.read_text().lower() + for identifier, provider in cloud_identifiers.items(): + if identifier in file_content: + return provider + + # Try detecting through environment variables + env_to_cloud_provider = { + "RUNPOD_DC_ID": "RUNPOD", + } + for env_var, provider in env_to_cloud_provider.items(): + if os.environ.get(env_var): + return provider + + return "UNKNOWN" + + +class UsageContext(str, Enum): + UNKNOWN_CONTEXT = "UNKNOWN_CONTEXT" + LLM_CLASS = "LLM_CLASS" + API_SERVER = "API_SERVER" + OPENAI_API_SERVER = "OPENAI_API_SERVER" + ENGINE_CONTEXT = "ENGINE_CONTEXT" + + +class UsageMessage: + """Collect platform information and send it to the usage stats server.""" + + def __init__(self) -> None: + # NOTE: vLLM's server _only_ support flat KV pair. + # Do not use nested fields. + + self.uuid = str(uuid4()) + + # Environment Information + self.provider: Optional[str] = None + self.num_cpu: Optional[int] = None + self.cpu_type: Optional[str] = None + self.cpu_family_model_stepping: Optional[str] = None + self.total_memory: Optional[int] = None + self.architecture: Optional[str] = None + self.platform: Optional[str] = None + self.gpu_count: Optional[int] = None + self.gpu_type: Optional[str] = None + self.gpu_memory_per_device: Optional[int] = None + + # vLLM Information + self.model_architecture: Optional[str] = None + self.vllm_version: Optional[str] = None + self.context: Optional[str] = None + + # Metadata + self.log_time: Optional[int] = None + self.source: Optional[str] = None + + def report_usage(self, + model_architecture: str, + usage_context: UsageContext, + extra_kvs: Optional[Dict[str, Any]] = None) -> None: + t = Thread(target=self._report_usage_worker, + args=(model_architecture, usage_context, extra_kvs or {}), + daemon=True) + t.start() + + def _report_usage_worker(self, model_architecture: str, + usage_context: UsageContext, + extra_kvs: Dict[str, Any]) -> None: + self._report_usage_once(model_architecture, usage_context, extra_kvs) + self._report_continous_usage() + + def _report_usage_once(self, model_architecture: str, + usage_context: UsageContext, + extra_kvs: Dict[str, Any]) -> None: + # Platform information + if torch.cuda.is_available(): + device_property = torch.cuda.get_device_properties(0) + self.gpu_count = torch.cuda.device_count() + self.gpu_type = device_property.name + self.gpu_memory_per_device = device_property.total_memory + self.provider = _detect_cloud_provider() + self.architecture = platform.machine() + self.platform = platform.platform() + self.total_memory = psutil.virtual_memory().total + + info = cpuinfo.get_cpu_info() + self.num_cpu = info.get("count", None) + self.cpu_type = info.get("brand_raw", "") + self.cpu_family_model_stepping = ",".join([ + str(info.get("family", "")), + str(info.get("model", "")), + str(info.get("stepping", "")) + ]) + + # vLLM information + import vllm # delayed import to prevent circular import + self.context = usage_context.value + self.vllm_version = vllm.__version__ + self.model_architecture = model_architecture + + # Metadata + self.log_time = _get_current_timestamp_ns() + self.source = envs.VLLM_USAGE_SOURCE + + data = vars(self) + if extra_kvs: + data.update(extra_kvs) + + self._write_to_file(data) + self._send_to_server(data) + + def _report_continous_usage(self): + """Report usage every 10 minutes. + + This helps us to collect more data points for uptime of vLLM usages. + This function can also help send over performance metrics over time. + """ + while True: + time.sleep(600) + data = {"uuid": self.uuid, "log_time": _get_current_timestamp_ns()} + + self._write_to_file(data) + self._send_to_server(data) + + def _send_to_server(self, data): + try: + requests.post(_USAGE_STATS_SERVER, json=data) + except requests.exceptions.RequestException: + # silently ignore unless we are using debug log + logging.debug("Failed to send usage data to server") + + def _write_to_file(self, data): + os.makedirs(os.path.dirname(_USAGE_STATS_JSON_PATH), exist_ok=True) + Path(_USAGE_STATS_JSON_PATH).touch(exist_ok=True) + with open(_USAGE_STATS_JSON_PATH, "a") as f: + json.dump(data, f) + f.write("\n") + + +usage_message = UsageMessage() diff --git a/vllm/utils.py b/vllm/utils.py new file mode 100644 index 0000000..fa8851b --- /dev/null +++ b/vllm/utils.py @@ -0,0 +1,728 @@ +import asyncio +import datetime +import enum +import gc +import glob +import os +import socket +import subprocess +import tempfile +import threading +import uuid +import warnings +from collections import defaultdict +from functools import lru_cache, partial +from platform import uname +from typing import (Any, AsyncIterator, Awaitable, Callable, Dict, Generic, + Hashable, List, Optional, OrderedDict, Tuple, TypeVar, + Union) + +import psutil +import torch +from packaging.version import Version, parse + +import vllm.envs as envs +from vllm.logger import enable_trace_function_call, init_logger + +T = TypeVar("T") +logger = init_logger(__name__) + +STR_DTYPE_TO_TORCH_DTYPE = { + "half": torch.half, + "bfloat16": torch.bfloat16, + "float": torch.float, + "fp8": torch.uint8, +} + + +class Device(enum.Enum): + GPU = enum.auto() + CPU = enum.auto() + + +class Counter: + + def __init__(self, start: int = 0) -> None: + self.counter = start + + def __next__(self) -> int: + i = self.counter + self.counter += 1 + return i + + def reset(self) -> None: + self.counter = 0 + + +class LRUCache(Generic[T]): + + def __init__(self, capacity: int): + self.cache: OrderedDict[Hashable, T] = OrderedDict() + self.capacity = capacity + + def __contains__(self, key: Hashable) -> bool: + return key in self.cache + + def __len__(self) -> int: + return len(self.cache) + + def __getitem__(self, key: Hashable) -> Optional[T]: + return self.get(key) + + def __setitem__(self, key: Hashable, value: T) -> None: + self.put(key, value) + + def __delitem__(self, key: Hashable) -> None: + self.pop(key) + + def touch(self, key: Hashable) -> None: + self.cache.move_to_end(key) + + def get(self, + key: Hashable, + default_value: Optional[T] = None) -> Optional[T]: + if key in self.cache: + value: Optional[T] = self.cache[key] + self.cache.move_to_end(key) + else: + value = default_value + return value + + def put(self, key: Hashable, value: T) -> None: + self.cache[key] = value + self.cache.move_to_end(key) + self._remove_old_if_needed() + + def _on_remove(self, key: Hashable, value: Optional[T]): + pass + + def remove_oldest(self): + if not self.cache: + return + key, value = self.cache.popitem(last=False) + self._on_remove(key, value) + + def _remove_old_if_needed(self) -> None: + while len(self.cache) > self.capacity: + self.remove_oldest() + + def pop(self, + key: Hashable, + default_value: Optional[T] = None) -> Optional[T]: + run_on_remove = key in self.cache + value: Optional[T] = self.cache.pop(key, default_value) + if run_on_remove: + self._on_remove(key, value) + return value + + def clear(self): + while len(self.cache) > 0: + self.remove_oldest() + self.cache.clear() + + +def is_hip() -> bool: + return torch.version.hip is not None + +def is_musa() -> bool: + return torch.version.musa is not None + +@lru_cache(maxsize=None) +def is_cpu() -> bool: + from importlib.metadata import PackageNotFoundError, version + try: + return "cpu" in version("vllm") + except PackageNotFoundError: + return False + + +@lru_cache(maxsize=None) +def is_neuron() -> bool: + try: + import transformers_neuronx + except ImportError: + transformers_neuronx = None + return transformers_neuronx is not None + + +@lru_cache(maxsize=None) +def get_max_shared_memory_bytes(gpu: int = 0) -> int: + """Returns the maximum shared memory per thread block in bytes.""" + # NOTE: This import statement should be executed lazily since + # the Neuron-X backend does not have the `musa_utils` module. + from vllm_C import musa_utils + + max_shared_mem = ( + musa_utils.get_max_shared_memory_per_block_device_attribute(gpu)) + # value 0 will cause MAX_SEQ_LEN become negative and test_attention.py + # will fail + assert max_shared_mem > 0, "max_shared_mem can not be zero" + return int(max_shared_mem) + + +def get_cpu_memory() -> int: + """Returns the total CPU memory of the node in bytes.""" + return psutil.virtual_memory().total + + +def random_uuid() -> str: + return str(uuid.uuid4().hex) + + +@lru_cache(maxsize=None) +def get_vllm_instance_id(): + """ + If the environment variable VLLM_INSTANCE_ID is set, return it. + Otherwise, return a random UUID. + Instance id represents an instance of the VLLM. All processes in the same + instance should have the same instance id. + """ + return envs.VLLM_INSTANCE_ID or f"vllm-instance-{random_uuid()}" + + +@lru_cache(maxsize=None) +def in_wsl() -> bool: + # Reference: https://github.com/microsoft/WSL/issues/4071 + return "microsoft" in " ".join(uname()).lower() + + +def make_async(func: Callable[..., T]) -> Callable[..., Awaitable[T]]: + """Take a blocking function, and run it on in an executor thread. + + This function prevents the blocking function from blocking the + asyncio event loop. + The code in this function needs to be thread safe. + """ + + def _async_wrapper(*args, **kwargs) -> asyncio.Future: + loop = asyncio.get_event_loop() + p_func = partial(func, *args, **kwargs) + return loop.run_in_executor(executor=None, func=p_func) + + return _async_wrapper + + +def merge_async_iterators( + *iterators: AsyncIterator[T]) -> AsyncIterator[Tuple[int, T]]: + """Merge multiple asynchronous iterators into a single iterator. + + This method handle the case where some iterators finish before others. + When it yields, it yields a tuple (i, item) where i is the index of the + iterator that yields the item. + """ + queue: asyncio.Queue[Union[Tuple[int, T], Exception]] = asyncio.Queue() + + finished = [False] * len(iterators) + + async def producer(i: int, iterator: AsyncIterator[T]): + try: + async for item in iterator: + await queue.put((i, item)) + except Exception as e: + await queue.put(e) + finished[i] = True + + _tasks = [ + asyncio.create_task(producer(i, iterator)) + for i, iterator in enumerate(iterators) + ] + + async def consumer(): + try: + while not all(finished) or not queue.empty(): + item = await queue.get() + if isinstance(item, Exception): + raise item + yield item + except (Exception, asyncio.CancelledError) as e: + for task in _tasks: + # NOTE: Pass the error msg in cancel() + # when only Python 3.9+ is supported. + task.cancel() + raise e + await asyncio.gather(*_tasks) + + return consumer() + + +def get_ip() -> str: + host_ip = envs.VLLM_HOST_IP + if host_ip: + return host_ip + + # IP is not set, try to get it from the network interface + + # try ipv4 + s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) + try: + s.connect(("8.8.8.8", 80)) # Doesn't need to be reachable + return s.getsockname()[0] + except Exception: + pass + + # try ipv6 + try: + s = socket.socket(socket.AF_INET6, socket.SOCK_DGRAM) + # Google's public DNS server, see + # https://developers.google.com/speed/public-dns/docs/using#addresses + s.connect(("2001:4860:4860::8888", 80)) # Doesn't need to be reachable + return s.getsockname()[0] + except Exception: + pass + + warnings.warn( + "Failed to get the IP address, using 0.0.0.0 by default." + "The value can be set by the environment variable" + " VLLM_HOST_IP or HOST_IP.", + stacklevel=2) + return "0.0.0.0" + + +def get_distributed_init_method(ip: str, port: int) -> str: + # Brackets are not permitted in ipv4 addresses, + # see https://github.com/python/cpython/issues/103848 + return f"tcp://[{ip}]:{port}" if ":" in ip else f"tcp://{ip}:{port}" + + +def get_open_port() -> int: + # try ipv4 + try: + with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s: + s.bind(("", 0)) + return s.getsockname()[1] + except OSError: + # try ipv6 + with socket.socket(socket.AF_INET6, socket.SOCK_STREAM) as s: + s.bind(("", 0)) + return s.getsockname()[1] + + +def update_environment_variables(envs: Dict[str, str]): + for k, v in envs.items(): + if k in os.environ and os.environ[k] != v: + logger.warning( + "Overwriting environment variable %s " + "from '%s' to '%s'", k, os.environ[k], v) + os.environ[k] = v + + +def chunk_list(lst, chunk_size): + """Yield successive chunk_size chunks from lst.""" + return [lst[i:i + chunk_size] for i in range(0, len(lst), chunk_size)] + + +def cdiv(a: int, b: int) -> int: + """Ceiling division.""" + return -(a // -b) + + +@lru_cache(maxsize=None) +def get_mcc_musa_version() -> Optional[Version]: + musa_home = envs.MUSA_HOME + if not musa_home: + musa_home = '/usr/local/musa' + if os.path.isfile(musa_home + '/bin/nvcc'): + logger.info( + 'MUSA_HOME is not found in the environment. ' + 'Using %s as MUSA_HOME.', musa_home) + else: + logger.warning('Not found nvcc in %s. Skip musa version check!', + musa_home) + return None + mcc_output = subprocess.check_output([musa_home + "/bin/mcc", "-V"], + universal_newlines=True) + output = mcc_output.split() + release_idx = output.index("release") + 1 + mcc_musa_version = parse(output[release_idx].split(",")[0]) + return mcc_musa_version + + +def _generate_random_fp8( + tensor: torch.tensor, + low: float, + high: float, +) -> None: + # NOTE(zhaoyang): Due to NaN and Inf representation for fp8 data type, + # it may occur Inf or NaN if we directly use torch.randint + # to generate random data for fp8 data. + # For example, s.11111.00 in fp8e5m2 format represents Inf. + # | E4M3 | E5M2 + #-----|-------------|------------------- + # Inf | N/A | s.11111.00 + # NaN | s.1111.111 | s.11111.{01,10,11} + from vllm import _custom_ops as ops + tensor_tmp = torch.empty_like(tensor, dtype=torch.float16) + tensor_tmp.uniform_(low, high) + ops.convert_fp8(tensor_tmp, tensor) + del tensor_tmp + + +def get_kv_cache_torch_dtype( + cache_dtype: Optional[Union[str, torch.dtype]], + model_dtype: Optional[Union[str, torch.dtype]] = None) -> torch.dtype: + if isinstance(cache_dtype, str): + if cache_dtype == "auto": + if isinstance(model_dtype, str): + torch_dtype = STR_DTYPE_TO_TORCH_DTYPE[model_dtype] + elif isinstance(model_dtype, torch.dtype): + torch_dtype = model_dtype + else: + raise ValueError(f"Invalid model dtype: {model_dtype}") + elif cache_dtype in ["half", "bfloat16", "float"]: + torch_dtype = STR_DTYPE_TO_TORCH_DTYPE[cache_dtype] + elif cache_dtype == "fp8": + torch_dtype = torch.uint8 + else: + raise ValueError(f"Invalid kv cache dtype: {cache_dtype}") + elif isinstance(cache_dtype, torch.dtype): + torch_dtype = cache_dtype + else: + raise ValueError(f"Invalid kv cache dtype: {cache_dtype}") + return torch_dtype + + +def create_kv_caches_with_random_flash( + num_blocks: int, + block_size: int, + num_layers: int, + num_heads: int, + head_size: int, + cache_dtype: Optional[Union[str, torch.dtype]], + model_dtype: Optional[Union[str, torch.dtype]] = None, + seed: int = 0, + device: Optional[str] = "musa", +) -> Tuple[List[torch.Tensor], List[torch.Tensor]]: + assert cache_dtype != "fp8" + torch.random.manual_seed(seed) + if torch.musa.is_available(): + torch.musa.manual_seed(seed) + elif torch.musa.is_available(): + torch.musa.manual_seed(seed) + + torch_dtype = get_kv_cache_torch_dtype(cache_dtype, model_dtype) + key_value_cache_shape = (num_blocks, 2, block_size, num_heads, head_size) + scale = head_size**-0.5 + key_caches, value_caches = [], [] + for _ in range(num_layers): + key_value_cache = torch.empty(size=key_value_cache_shape, + dtype=torch_dtype, + device=device) + key_value_cache.uniform_(-scale, scale) + key_caches.append(key_value_cache[:, 0]) + value_caches.append(key_value_cache[:, 1]) + return key_caches, value_caches + + +def create_kv_caches_with_random( + num_blocks: int, + block_size: int, + num_layers: int, + num_heads: int, + head_size: int, + cache_dtype: Optional[Union[str, torch.dtype]], + model_dtype: Optional[Union[str, torch.dtype]] = None, + seed: int = 0, + device: Optional[str] = "musa", +) -> Tuple[List[torch.Tensor], List[torch.Tensor]]: + torch.random.manual_seed(seed) + if torch.musa.is_available(): + torch.musa.manual_seed(seed) + + torch_dtype = get_kv_cache_torch_dtype(cache_dtype, model_dtype) + + scale = head_size**-0.5 + x = 16 // torch.tensor([], dtype=torch_dtype).element_size() + key_cache_shape = (num_blocks, num_heads, head_size // x, block_size, x) + key_caches = [] + for _ in range(num_layers): + key_cache = torch.empty(size=key_cache_shape, + dtype=torch_dtype, + device=device) + if cache_dtype in ["auto", "half", "bfloat16", "float"]: + key_cache.uniform_(-scale, scale) + elif cache_dtype == 'fp8': + _generate_random_fp8(key_cache, -scale, scale) + else: + raise ValueError( + f"Does not support key cache of type {cache_dtype}") + key_caches.append(key_cache) + + value_cache_shape = (num_blocks, num_heads, head_size, block_size) + value_caches = [] + for _ in range(num_layers): + value_cache = torch.empty(size=value_cache_shape, + dtype=torch_dtype, + device=device) + if cache_dtype in ["auto", "half", "bfloat16", "float"]: + value_cache.uniform_(-scale, scale) + elif cache_dtype == 'fp8': + _generate_random_fp8(value_cache, -scale, scale) + else: + raise ValueError( + f"Does not support value cache of type {cache_dtype}") + value_caches.append(value_cache) + return key_caches, value_caches + + +@lru_cache +def print_warning_once(msg: str) -> None: + logger.warning(msg) + + +@lru_cache(maxsize=None) +def is_pin_memory_available() -> bool: + + if in_wsl(): + # Pinning memory in WSL is not supported. + # https://docs.nvidia.com/musa/wsl-user-guide/index.html#known-limitations-for-linux-musa-applications + print_warning_once("Using 'pin_memory=False' as WSL is detected. " + "This may slow down the performance.") + return False + elif is_neuron(): + print_warning_once("Pin memory is not supported on Neuron.") + return False + elif is_cpu(): + return False + return True + + +class CudaMemoryProfiler: + + def __init__(self, device=None): + self.device = device + + def current_memory_usage(self) -> float: + # Return the memory usage in bytes. + torch.musa.reset_peak_memory_stats(self.device) + mem = torch.musa.max_memory_allocated(self.device) + return mem + + def __enter__(self): + self.initial_memory = self.current_memory_usage() + # This allows us to call methods of the context manager if needed + return self + + def __exit__(self, exc_type, exc_val, exc_tb): + self.final_memory = self.current_memory_usage() + self.consumed_memory = self.final_memory - self.initial_memory + + # Force garbage collection + gc.collect() + + +def str_to_int_tuple(s: str) -> Tuple[int, ...]: + """Convert a string to a tuple of integers.""" + try: + return tuple(map(int, s.split(","))) + except ValueError as e: + raise ValueError( + "String must be a series of integers separated by commas " + f"(e.g., 1, 2, 3). Given input: {s}") from e + + +def pad_to_max_length(x: List[int], max_len: int, pad: int) -> List[int]: + assert len(x) <= max_len + return x + [pad] * (max_len - len(x)) + + +def make_tensor_with_pad( + x: List[List[int]], + max_len: int, + pad: int, + dtype: torch.dtype, + device: Optional[Union[str, torch.device]], +) -> torch.Tensor: + """Make a padded tensor of a 2D inputs. + + The padding is applied to the end of each inner list until it reaches + `max_len`. + """ + padded_x = [pad_to_max_length(x_i, max_len, pad) for x_i in x] + return torch.tensor(padded_x, dtype=dtype, device=device) + + +def async_tensor_h2d( + data: list, + dtype: torch.dtype, + target_device: Union[str, torch.device], + pin_memory: bool, +) -> torch.Tensor: + """Asynchronously create a tensor and copy it from host to device.""" + t = torch.tensor(data, dtype=dtype, pin_memory=pin_memory, device="cpu") + return t.to(device=target_device, non_blocking=True) + + +def maybe_expand_dim(tensor: torch.Tensor, + target_dims: int, + size: int = 1) -> torch.Tensor: + """Expand the tensor to the target_dims.""" + if tensor.ndim < target_dims: + tensor = tensor.view(-1, *([size] * (target_dims - tensor.ndim))) + return tensor + + +def merge_dicts(dict1: Dict[Any, List[Any]], + dict2: Dict[Any, List[Any]]) -> Dict[Any, List[Any]]: + """Merge 2 dicts that have key -> List of items. + + When a key conflicts, the values in dict1 is prioritized. + """ + merged_dict = defaultdict(list) + + for key, value in dict1.items(): + merged_dict[key].extend(value) + + for key, value in dict2.items(): + merged_dict[key].extend(value) + + return dict(merged_dict) + + +def init_cached_hf_modules(): + """ + Lazy initialization of the Hugging Face modules. + """ + from transformers.dynamic_module_utils import init_hf_modules + init_hf_modules() + + +def nccl_integrity_check(filepath): + """ + when the library is corrupted, we cannot catch + the exception in python. it will crash the process. + instead, we use the exit code of `ldd` to check + if the library is corrupted. if not, we will return + the version of the library. + """ + exit_code = os.system(f"ldd {filepath} 2>&1 > /dev/null") + if exit_code != 0: + raise RuntimeError(f"Failed to load NCCL library from {filepath} .") + import ctypes + + nccl = ctypes.CDLL(filepath) + version = ctypes.c_int() + nccl.ncclGetVersion.restype = ctypes.c_int + nccl.ncclGetVersion.argtypes = [ctypes.POINTER(ctypes.c_int)] + result = nccl.ncclGetVersion(ctypes.byref(version)) + assert result == 0 + return version.value + +def mccl_integrity_check(filepath): + """ + when the library is corrupted, we cannot catch + the exception in python. it will crash the process. + instead, we use the exit code of `ldd` to check + if the library is corrupted. if not, we will return + the version of the library. + """ + exit_code = os.system(f"ldd {filepath} 2>&1 > /dev/null") + if exit_code != 0: + raise RuntimeError(f"Failed to load MCCL library from {filepath} .") + import ctypes + + mccl = ctypes.CDLL(filepath) + version = ctypes.c_int() + mccl.mcclGetVersion.restype = ctypes.c_int + mccl.mcclGetVersion.argtypes = [ctypes.POINTER(ctypes.c_int)] + result = mccl.mcclGetVersion(ctypes.byref(version)) + assert result == 0 + return version.value + +@lru_cache(maxsize=None) +def find_library(lib_name: str) -> str: + """ + Find the library file in the system. + `lib_name` is full filename, with both prefix and suffix. + This function resolves `lib_name` to the full path of the library. + """ + # Adapted from https://github.com/openai/triton/blob/main/third_party/nvidia/backend/driver.py#L19 # noqa + # According to https://en.wikipedia.org/wiki/Filesystem_Hierarchy_Standard + # `/sbin/ldconfig` should exist in all Linux systems. + # `/sbin/ldconfig` searches the library in the system + libs = subprocess.check_output(["/sbin/ldconfig", "-p"]).decode() + # each line looks like the following: + # libcuda.so.1 (libc6,x86-64) => /lib/x86_64-linux-gnu/libcuda.so.1 + locs = [line.split()[-1] for line in libs.splitlines() if lib_name in line] + # `LD_LIBRARY_PATH` searches the library in the user-defined paths + env_ld_library_path = envs.LD_LIBRARY_PATH + if not locs and env_ld_library_path: + locs = [ + os.path.join(dir, lib_name) + for dir in env_ld_library_path.split(":") + if os.path.exists(os.path.join(dir, lib_name)) + ] + if not locs: + raise ValueError(f"Cannot find {lib_name} in the system.") + return locs[0] + + +def find_nccl_library(): + so_file = envs.VLLM_NCCL_SO_PATH + VLLM_CONFIG_ROOT = envs.VLLM_CONFIG_ROOT + + # check if we have vllm-managed nccl + vllm_nccl_path = None + if torch.version.musa is not None: + cuda_major = torch.version.musa.split(".")[0] + path = os.path.expanduser( + f"{VLLM_CONFIG_ROOT}/vllm/nccl/cu{cuda_major}/libnccl.so.*") + files = glob.glob(path) + vllm_nccl_path = files[0] if files else None + + # manually load the nccl library + if so_file: + logger.info( + "Found nccl from environment variable VLLM_NCCL_SO_PATH=%s", + so_file) + else: + if torch.version.musa is not None: + so_file = vllm_nccl_path or find_library("libnccl.so.2") + elif torch.version.hip is not None: + so_file = find_library("librccl.so.1") + else: + raise ValueError("NCCL only supports CUDA and ROCm backends.") + logger.info("Found nccl from library %s", so_file) + return so_file + +def find_mccl_library(): + so_file = envs.VLLM_NCCL_SO_PATH + VLLM_CONFIG_ROOT = envs.VLLM_CONFIG_ROOT + + # check if we have vllm-managed nccl + vllm_mccl_path = None + if torch.version.musa is not None: + path = os.path.expanduser( + f"{VLLM_CONFIG_ROOT}/vllm/nccl/libmccl.so.*") + files = glob.glob(path) + vllm_nccl_path = files[0] if files else None + + # manually load the nccl library + if so_file: + logger.info( + "Found nccl from environment variable VLLM_NCCL_SO_PATH=%s", + so_file) + else: + if torch.version.musa is not None: + so_file = find_library("libmccl.so.2") + elif torch.version.hip is not None: + so_file = find_library("librccl.so.1") + else: + raise ValueError("NCCL only supports CUDA and ROCm backends.") + logger.info("Found mccl from library %s", so_file) + return so_file + + +def enable_trace_function_call_for_thread() -> None: + """Set up function tracing for the current thread, + if enabled via the VLLM_TRACE_FUNCTION environment variable + """ + + if envs.VLLM_TRACE_FUNCTION: + tmp_dir = tempfile.gettempdir() + filename = (f"VLLM_TRACE_FUNCTION_for_process_{os.getpid()}" + f"_thread_{threading.get_ident()}_" + f"at_{datetime.datetime.now()}.log").replace(" ", "_") + log_path = os.path.join(tmp_dir, "vllm", get_vllm_instance_id(), + filename) + os.makedirs(os.path.dirname(log_path), exist_ok=True) + enable_trace_function_call(log_path) diff --git a/vllm/worker/__init__.py b/vllm/worker/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/vllm/worker/cache_engine.py b/vllm/worker/cache_engine.py new file mode 100644 index 0000000..db688ce --- /dev/null +++ b/vllm/worker/cache_engine.py @@ -0,0 +1,105 @@ +"""CacheEngine class for managing the KV cache.""" +from typing import Dict, List + +import torch + +from vllm.attention import get_attn_backend +from vllm.config import CacheConfig, ModelConfig, ParallelConfig +from vllm.logger import init_logger +from vllm.utils import STR_DTYPE_TO_TORCH_DTYPE, is_pin_memory_available + +logger = init_logger(__name__) + + +class CacheEngine: + """Manages the KV cache. + + This class is responsible for initializing and managing the GPU and CPU KV + caches. It also provides methods for performing KV cache operations, such + as swapping and copying. + """ + + def __init__( + self, + cache_config: CacheConfig, + model_config: ModelConfig, + parallel_config: ParallelConfig, + ) -> None: + self.cache_config = cache_config + self.model_config = model_config + self.parallel_config = parallel_config + + self.head_size = model_config.get_head_size() + self.num_layers = model_config.get_num_layers(parallel_config) + self.num_heads = model_config.get_num_kv_heads(parallel_config) + + self.block_size = cache_config.block_size + self.num_gpu_blocks = cache_config.num_gpu_blocks + self.num_cpu_blocks = cache_config.num_cpu_blocks + + if cache_config.cache_dtype == "auto": + self.dtype = model_config.dtype + else: + self.dtype = STR_DTYPE_TO_TORCH_DTYPE[cache_config.cache_dtype] + + # Get attention backend. + self.attn_backend = get_attn_backend(model_config.dtype) + + # Initialize the cache. + self.gpu_cache = self._allocate_kv_cache(self.num_gpu_blocks, "musa") + self.cpu_cache = self._allocate_kv_cache(self.num_cpu_blocks, "cpu") + + def _allocate_kv_cache( + self, + num_blocks: int, + device: str, + ) -> List[torch.Tensor]: + """Allocates KV cache on the specified device.""" + kv_cache_shape = self.attn_backend.get_kv_cache_shape( + num_blocks, self.block_size, self.num_heads, self.head_size) + pin_memory = is_pin_memory_available() if device == "cpu" else False + kv_cache: List[torch.Tensor] = [] + for _ in range(self.num_layers): + kv_cache.append( + torch.empty(kv_cache_shape, + dtype=self.dtype, + pin_memory=pin_memory, + device=device)) + return kv_cache + + def swap_in(self, src_to_dst: Dict[int, int]) -> None: + for i in range(self.num_layers): + self.attn_backend.swap_blocks(self.cpu_cache[i], self.gpu_cache[i], + src_to_dst) + + def swap_out(self, src_to_dst: Dict[int, int]) -> None: + for i in range(self.num_layers): + self.attn_backend.swap_blocks(self.gpu_cache[i], self.cpu_cache[i], + src_to_dst) + + def copy(self, src_to_dsts: Dict[int, List[int]]) -> None: + self.attn_backend.copy_blocks(self.gpu_cache, src_to_dsts) + + @staticmethod + def get_cache_block_size( + cache_config: CacheConfig, + model_config: ModelConfig, + parallel_config: ParallelConfig, + ) -> int: + head_size = model_config.get_head_size() + num_heads = model_config.get_num_kv_heads(parallel_config) + num_layers = model_config.get_num_layers(parallel_config) + + key_cache_block = cache_config.block_size * num_heads * head_size + value_cache_block = key_cache_block + total = num_layers * (key_cache_block + value_cache_block) + if cache_config.cache_dtype == "auto": + dtype = model_config.dtype + else: + dtype = STR_DTYPE_TO_TORCH_DTYPE[cache_config.cache_dtype] + dtype_size = _get_dtype_size(dtype) + return dtype_size * total + + +def _get_dtype_size(dtype: torch.dtype) -> int: + return torch.tensor([], dtype=dtype).element_size() diff --git a/vllm/worker/cpu_model_runner.py b/vllm/worker/cpu_model_runner.py new file mode 100644 index 0000000..193b021 --- /dev/null +++ b/vllm/worker/cpu_model_runner.py @@ -0,0 +1,346 @@ +from typing import List, Optional, Tuple + +import torch +from torch import nn + +from vllm.attention import AttentionMetadata, get_attn_backend +from vllm.config import (DeviceConfig, LoadConfig, LoRAConfig, ModelConfig, + ParallelConfig, SchedulerConfig, VisionLanguageConfig) +from vllm.distributed import broadcast_tensor_dict +from vllm.logger import init_logger +from vllm.model_executor import SamplingMetadata +from vllm.model_executor.model_loader import get_model +from vllm.sequence import SamplerOutput, SequenceGroupMetadata +from vllm.utils import make_tensor_with_pad + +logger = init_logger(__name__) + +_PAD_SLOT_ID = -1 + + +class CPUModelRunner: + + def __init__( + self, + model_config: ModelConfig, + parallel_config: ParallelConfig, + scheduler_config: SchedulerConfig, + device_config: DeviceConfig, + load_config: LoadConfig, + lora_config: Optional[LoRAConfig], + vision_language_config: Optional[VisionLanguageConfig], + kv_cache_dtype: Optional[str] = "auto", + is_driver_worker: bool = False, + *args, + **kwargs, + ): + self.model_config = model_config + self.parallel_config = parallel_config + self.scheduler_config = scheduler_config + # Currently, CPU worker doesn't support chunked prefill. + assert self.scheduler_config.chunked_prefill_enabled is False + self.lora_config = lora_config + self.vision_language_config = vision_language_config + self.load_config = load_config + self.is_driver_worker = is_driver_worker + + # model_config can be None in tests/samplers/test_sampler.py. + # FIXME(woosuk): This is a hack to make the tests work. Refactor this. + self.sliding_window = (model_config.get_sliding_window() + if model_config is not None else None) + self.device_config = (device_config + if device_config is not None else DeviceConfig()) + self.device = self.device_config.device + + self.kv_cache_dtype = kv_cache_dtype + + self.attn_backend = get_attn_backend( + self.model_config.dtype if model_config is not None else None) + + # Lazy initialization. + self.model: nn.Module # Set after init_Model + self.block_size: int # Set after initial profiling. + + def load_model(self) -> None: + self.model = get_model( + model_config=self.model_config, + load_config=self.load_config, + device_config=self.device_config, + vision_language_config=self.vision_language_config, + lora_config=self.lora_config, + parallel_config=self.parallel_config, + scheduler_config=self.scheduler_config) + + def _prepare_prompt( + self, + seq_group_metadata_list: List[SequenceGroupMetadata], + ) -> Tuple[torch.Tensor, torch.Tensor, AttentionMetadata, List[int], + Optional[torch.Tensor]]: + assert len(seq_group_metadata_list) > 0 + input_tokens: List[int] = [] + input_positions: List[int] = [] + slot_mapping: List[int] = [] + seq_lens: List[int] = [] + multi_modal_input_list: List[torch.Tensor] = [] + + for seq_group_metadata in seq_group_metadata_list: + assert seq_group_metadata.is_prompt + seq_ids = list(seq_group_metadata.seq_data.keys()) + assert len(seq_ids) == 1 + seq_id = seq_ids[0] + + seq_data = seq_group_metadata.seq_data[seq_id] + prompt_tokens = seq_data.get_token_ids() + computed_len = seq_data.get_num_computed_tokens() + seq_len = len(prompt_tokens) + + seq_lens.append(seq_len) # Prompt token num + input_tokens.extend(prompt_tokens) # Token ids + + # Token position ids + # NOTE(woosuk): Here we assume that the first token in the prompt + # is always the first token in the sequence. + input_positions.extend(list(range(computed_len, seq_len))) + + if seq_group_metadata.multi_modal_data: + multi_modal_input_list.append( + seq_group_metadata.multi_modal_data.data) + + # Compute the slot mapping. + block_table = seq_group_metadata.block_tables[seq_id] + # Mask the [0, start_idx) tokens of the prompt with _PAD_SLOT_ID, + # where start_idx is max(0, seq_len - sliding_window). + # For example, if the prompt len is 10, sliding window is 8, and + # block size is 4, the first two tokens are masked and the slot + # mapping will be [-1, -1, 2, 3, 4, 5, 6, 7, 0, 1]. + start_idx = 0 + if self.sliding_window is not None: + start_idx = max(0, seq_len - self.sliding_window) + + for i in range(computed_len, seq_len): + if i < start_idx: + slot_mapping.append(_PAD_SLOT_ID) + continue + + block_number = block_table[i // + self.block_size] # type: ignore + block_offset = i % self.block_size # type: ignore + slot = block_number * self.block_size + block_offset + slot_mapping.append(slot) + + if multi_modal_input_list: + assert self.vision_language_config, ( + "Multi-modal inputs are only supported by " + "vision language models.") + multi_modal_input = torch.cat(multi_modal_input_list, + dim=0).to(self.device) + else: + multi_modal_input = None + + num_prompt_tokens = len(input_tokens) + + input_tokens = torch.tensor(input_tokens, + dtype=torch.long, + device=self.device) # type: ignore + input_positions = torch.tensor(input_positions, + dtype=torch.long, + device=self.device) # type: ignore + slot_mapping = torch.tensor(slot_mapping, + dtype=torch.long, + device=self.device) # type: ignore + + attn_metadata = self.attn_backend.make_metadata( + is_prompt=True, + seq_lens=seq_lens, + seq_lens_tensor=None, + max_seq_len=None, + num_prefills=len(seq_lens), + num_prefill_tokens=num_prompt_tokens, + num_decode_tokens=0, + prefill_metadata=None, + decode_metadata=None, + block_tables=torch.tensor([]), + slot_mapping=slot_mapping, + kv_cache_dtype=self.kv_cache_dtype, + ) + return (input_tokens, input_positions, attn_metadata, seq_lens, + multi_modal_input) + + def _prepare_decode( + self, + seq_group_metadata_list: List[SequenceGroupMetadata], + ) -> Tuple[torch.Tensor, torch.Tensor, AttentionMetadata]: + assert len(seq_group_metadata_list) > 0 + input_tokens: List[int] = [] + input_positions: List[int] = [] + slot_mapping: List[int] = [] + seq_lens: List[int] = [] + block_tables: List[List[int]] = [] + + for seq_group_metadata in seq_group_metadata_list: + assert not seq_group_metadata.is_prompt + assert seq_group_metadata.token_chunk_size == 1 + + seq_ids = list(seq_group_metadata.seq_data.keys()) + + for seq_id in seq_ids: + seq_data = seq_group_metadata.seq_data[seq_id] + generation_token = seq_data.get_last_token_id() + input_tokens.append(generation_token) + + seq_len = seq_data.get_len() + position = seq_len - 1 + input_positions.append(position) + + seq_len = seq_len if self.sliding_window is None else min( + seq_len, self.sliding_window) + seq_lens.append(seq_len) + + block_table = seq_group_metadata.block_tables[seq_id] + block_number = block_table[position // self.block_size] + block_offset = position % self.block_size + slot = block_number * self.block_size + block_offset + slot_mapping.append(slot) + + if self.sliding_window is not None: + sliding_window_blocks = (self.sliding_window // + self.block_size) + block_table = block_table[-sliding_window_blocks:] + block_tables.append(block_table) + + max_seq_len = max(seq_lens) + + input_tokens = torch.tensor(input_tokens, + dtype=torch.long, + device=self.device) + input_positions = torch.tensor(input_positions, + dtype=torch.long, + device=self.device) + slot_mapping = torch.tensor(slot_mapping, + dtype=torch.long, + device=self.device) + seq_lens_tensor = torch.tensor(seq_lens, + dtype=torch.int, + device=self.device) + + max_block_table_len = max( + len(block_table) for block_table in block_tables) + block_tables = make_tensor_with_pad( + block_tables, + max_len=max_block_table_len, + pad=0, + dtype=torch.int, + device=self.device, + ) + + attn_metadata = self.attn_backend.make_metadata( + is_prompt=False, + slot_mapping=slot_mapping, + seq_lens=seq_lens, + seq_lens_tensor=seq_lens_tensor, + max_seq_len=max_seq_len, + num_prefill_tokens=0, + num_decode_tokens=len(input_tokens), + num_prefills=0, + prefill_metadata=None, + decode_metadata=None, + block_tables=block_tables, + kv_cache_dtype=self.kv_cache_dtype, + ) + return ( + input_tokens, + input_positions, + attn_metadata, + ) + + def prepare_input_tensors( + self, + seq_group_metadata_list: List[SequenceGroupMetadata], + ) -> Tuple[torch.Tensor, torch.Tensor, AttentionMetadata, SamplingMetadata, + Optional[torch.Tensor]]: + multi_modal_input = None + if self.is_driver_worker: + # NOTE: We assume that all sequences in the group are all prompts or + # all decodes. + is_prompt = seq_group_metadata_list[0].is_prompt + # Prepare input tensors. + if is_prompt: + (input_tokens, input_positions, attn_metadata, seq_lens, + multi_modal_input + ) = self._prepare_prompt(seq_group_metadata_list) + else: + (input_tokens, input_positions, + attn_metadata) = self._prepare_decode(seq_group_metadata_list) + seq_lens = [] + sampling_metadata = SamplingMetadata.prepare( + seq_group_metadata_list, + seq_lens, + # query_lens is not needed if chunked prefill is not + # supported. Since CPU worker doesn't support chunked prefill + # just use seq_lens instead. + seq_lens, + self.device, + pin_memory=False) + # Broadcast the metadata. + metadata_dict = { + "input_tokens": input_tokens, + "input_positions": input_positions, + "selected_token_indices": + sampling_metadata.selected_token_indices, + } + metadata_dict.update(attn_metadata.asdict_zerocopy()) + broadcast_tensor_dict(metadata_dict, src=0) + else: + metadata_dict = broadcast_tensor_dict(src=0) + input_tokens = metadata_dict.pop("input_tokens") + input_positions = metadata_dict.pop("input_positions") + selected_token_indices = metadata_dict.pop( + "selected_token_indices") + attn_metadata = self.attn_backend.make_metadata(**metadata_dict) + sampling_metadata = SamplingMetadata( + seq_groups=None, + seq_data=None, + seq_lens=None, + selected_token_indices=selected_token_indices, + categorized_sample_indices=None, + generators=None, + ) + + return (input_tokens, input_positions, attn_metadata, + sampling_metadata, multi_modal_input) + + @torch.inference_mode() + def execute_model( + self, + seq_group_metadata_list: List[SequenceGroupMetadata], + kv_caches: List[torch.Tensor], + ) -> Optional[SamplerOutput]: + (input_tokens, input_positions, attn_metadata, sampling_metadata, + multi_modal_input + ) = self.prepare_input_tensors(seq_group_metadata_list) + + model_executable = self.model + execute_model_kwargs = { + "input_ids": input_tokens, + "positions": input_positions, + "kv_caches": kv_caches, + "attn_metadata": attn_metadata, + } + if self.vision_language_config: + execute_model_kwargs.update({"image_input": multi_modal_input}) + + hidden_states = model_executable(**execute_model_kwargs) + + # Compute the logits. + logits = self.model.compute_logits(hidden_states, sampling_metadata) + + # Only perform sampling in the driver worker. + if not self.is_driver_worker: + return None + + # Sample the next token. + output = self.model.sample( + logits=logits, + sampling_metadata=sampling_metadata, + ) + return output diff --git a/vllm/worker/cpu_worker.py b/vllm/worker/cpu_worker.py new file mode 100644 index 0000000..4420d4c --- /dev/null +++ b/vllm/worker/cpu_worker.py @@ -0,0 +1,321 @@ +"""A CPU worker class.""" +from typing import Any, Dict, List, Optional, Tuple + +import torch +import torch.distributed + +from vllm.attention import get_attn_backend +from vllm.config import (CacheConfig, DeviceConfig, LoadConfig, LoRAConfig, + ModelConfig, ParallelConfig, SchedulerConfig, + VisionLanguageConfig) +from vllm.distributed import (broadcast_tensor_dict, + ensure_model_parallel_initialized, + init_distributed_environment) +from vllm.logger import init_logger +from vllm.model_executor import set_random_seed +from vllm.sequence import ExecuteModelRequest, SamplerOutput +from vllm.utils import STR_DTYPE_TO_TORCH_DTYPE +from vllm.worker.cpu_model_runner import CPUModelRunner +from vllm.worker.worker_base import LoraNotSupportedWorkerBase + +logger = init_logger(__name__) + + +class CPUCacheEngine: + """Manages the KV cache for CPU backend. + + This class is responsible for initializing and managing CPU KV + caches. It also provides methods for performing KV cache operations, such + as copying. + """ + + def __init__(self, cache_config: CacheConfig, model_config: ModelConfig, + parallel_config: ParallelConfig, + device_config: DeviceConfig) -> None: + assert device_config.device_type == "cpu" + self.cache_config = cache_config + self.model_config = model_config + self.parallel_config = parallel_config + + self.head_size = model_config.get_head_size() + self.num_layers = model_config.get_num_layers(parallel_config) + self.num_heads = model_config.get_num_kv_heads(parallel_config) + + self.block_size = cache_config.block_size + # Note: In CacheConfig, num_gpu_blocks actual is num_cpu_blocks + # for CPU backend, because we want to reuse KV cache management + # in the scheduler. + self.num_cpu_blocks = cache_config.num_gpu_blocks + + if cache_config.cache_dtype == "auto": + self.dtype = model_config.dtype + else: + self.dtype = STR_DTYPE_TO_TORCH_DTYPE[cache_config.cache_dtype] + + # Get attention backend. + self.attn_backend = get_attn_backend(model_config.dtype) + + # Initialize the cache. + self.cpu_cache = self._allocate_kv_cache(self.num_cpu_blocks) + + def _allocate_kv_cache( + self, + num_blocks: int, + ) -> List[torch.Tensor]: + """Allocates KV cache on CPU.""" + kv_cache_shape = self.attn_backend.get_kv_cache_shape( + num_blocks, self.block_size, self.num_heads, self.head_size) + kv_cache: List[torch.Tensor] = [] + for _ in range(self.num_layers): + kv_cache.append( + torch.empty(kv_cache_shape, dtype=self.dtype, device="cpu")) + return kv_cache + + def swap_in(self, src_to_dst: Dict[int, int]) -> None: + raise NotImplementedError("Swap is not supported in CPUCacheEngine.") + + def swap_out(self, src_to_dst: Dict[int, int]) -> None: + raise NotImplementedError("Swap is not supported in CPUCacheEngine.") + + def copy(self, src_to_dsts: Dict[int, List[int]]) -> None: + self.attn_backend.copy_blocks(self.cpu_cache, src_to_dsts) + + @staticmethod + def get_cache_block_size( + block_size: int, + cache_dtype: str, + model_config: ModelConfig, + parallel_config: ParallelConfig, + ) -> int: + head_size = model_config.get_head_size() + num_heads = model_config.get_num_kv_heads(parallel_config) + num_layers = model_config.get_num_layers(parallel_config) + + key_cache_block = block_size * num_heads * head_size + value_cache_block = key_cache_block + total = num_layers * (key_cache_block + value_cache_block) + if cache_dtype == "auto": + dtype = model_config.dtype + else: + dtype = STR_DTYPE_TO_TORCH_DTYPE[cache_dtype] + dtype_size = torch.tensor([], dtype=dtype).element_size() + return dtype_size * total + + +class CPUWorker(LoraNotSupportedWorkerBase): + """A worker class that executes (a partition of) the model on a CPU socket. + + Each worker is associated with a single CPU socket. The worker is + responsible for maintaining the KV cache and executing the model on the + CPU. In case of distributed inference, each worker is assigned a partition + of the model. + """ + + def __init__( + self, + model_config: ModelConfig, + parallel_config: ParallelConfig, + scheduler_config: SchedulerConfig, + device_config: DeviceConfig, + cache_config: CacheConfig, + load_config: LoadConfig, + local_rank: int, + rank: int, + distributed_init_method: str, + lora_config: Optional[LoRAConfig] = None, + vision_language_config: Optional[VisionLanguageConfig] = None, + kv_cache_dtype: Optional[str] = "auto", + is_driver_worker: bool = False, + ) -> None: + self.model_config = model_config + self.parallel_config = parallel_config + self.scheduler_config = scheduler_config + self.device_config = device_config + self.cache_config = cache_config + self.load_config = load_config + self.local_rank = local_rank + self.rank = rank + self.distributed_init_method = distributed_init_method + self.lora_config = lora_config + self.vision_language_config = vision_language_config + self.is_driver_worker = is_driver_worker + if self.is_driver_worker: + assert self.rank == 0, "The driver worker must have rank 0." + + if self.model_config.trust_remote_code: + # note: lazy import to avoid importing torch before initializing + from vllm.utils import init_cached_hf_modules + init_cached_hf_modules() + self.model_runner = CPUModelRunner( + model_config, + parallel_config, + scheduler_config, + device_config, + load_config=self.load_config, + lora_config=self.lora_config, + vision_language_config=self.vision_language_config, + kv_cache_dtype=kv_cache_dtype, + is_driver_worker=is_driver_worker) + # Uninitialized cache engine. Will be initialized by + # initialize_cache. + self.cache_engine: CPUCacheEngine + self.cpu_cache: List[torch.Tensor] + + def init_device(self) -> None: + self.init_distributed_environment() + # Set random seed. + set_random_seed(self.model_config.seed) + + def load_model(self): + self.model_runner.load_model() + + def determine_num_available_blocks(self) -> Tuple[int, int]: + """Determine the number of blocks available for the KV cache. + + This determines how many KV blocks can fit into the configured CPU + KV cache space. + + Note that since vLLM assumes a block resides on GPU if it can be + modified, we return num_gpu_blocks=num_cpu_blocks and num_cpu_blocks=0. + This allows us to reuse the scheduler of vLLM without generalizing it + to different devices. + """ + # For CPU device, the block number will be calculated based on the + # cpu_kvcache_space. + cache_block_size = self.get_cache_block_size_bytes() + num_cpu_blocks = int(self.cache_config.cpu_kvcache_space_bytes // + cache_block_size) + num_cpu_blocks = max(num_cpu_blocks, 0) + + # Note: To reuse the cache management procedure, + # use cpu cache as 'gpu cache'. + num_gpu_blocks = num_cpu_blocks + num_cpu_blocks = 0 + return num_gpu_blocks, num_cpu_blocks + + def initialize_cache(self, num_gpu_blocks: int, + num_cpu_blocks: int) -> None: + """Initialize the KV cache. Currently, swappable CPU memory is not + supported. + + Since this worker does not support GPUs, we use the num_gpu_blocks to + determine how many non-swappable CPU blocks to allocate. + """ + assert (num_cpu_blocks == 0 + ), f"{type(self)} does not support swappable cache" + + # Note: To reuse the cache management procedure, + # use cpu cache as 'gpu cache'. + num_cpu_blocks = num_gpu_blocks + + self._validate_num_cpu_blocks(num_cpu_blocks) + self.cache_config.num_gpu_blocks = num_cpu_blocks + self.cache_config.num_cpu_blocks = 0 + + # Initialize the cache. + self._init_cache_engine() + + def _validate_num_cpu_blocks(self, num_cpu_blocks: int) -> None: + """Raise errors if the num_cpu_blocks is invalid. + """ + if num_cpu_blocks <= 0: + raise ValueError("No available memory for the cache blocks. " + "Try increasing `VLLM_CPU_KVCACHE_SPACE` when " + "initializing the engine.") + + max_seq_len = self.cache_config.block_size * num_cpu_blocks + if self.model_config.max_model_len > max_seq_len: + raise ValueError( + f"The model's max seq len ({self.model_config.max_model_len}) " + "is larger than the maximum number of tokens that can be " + f"stored in KV cache ({max_seq_len}). Try increasing " + "`VLLM_CPU_KVCACHE_SPACE` or decreasing `max_model_len` when " + "initializing the engine.") + + def _init_cache_engine(self) -> None: + self.cache_engine = CPUCacheEngine(self.cache_config, + self.model_config, + self.parallel_config, + self.device_config) + self.cpu_cache = self.cache_engine.cpu_cache + self.model_runner.block_size = self.cache_engine.block_size + + assert self.cpu_cache is not None + + # Populate the cache to warmup the memory + for layer_cache in self.cpu_cache: + layer_cache.fill_(0) + + def cache_copy( + self, + blocks_to_copy: Dict[int, List[int]], + ) -> None: + if blocks_to_copy: + self.cache_engine.copy(blocks_to_copy) + + @torch.inference_mode() + def execute_model( + self, + execute_model_req: Optional[ExecuteModelRequest] = None, + ) -> List[SamplerOutput]: + + if execute_model_req is None: + seq_group_metadata_list = None + else: + seq_group_metadata_list = execute_model_req.seq_group_metadata_list + + if self.is_driver_worker: + assert seq_group_metadata_list is not None + num_seq_groups: int = len(seq_group_metadata_list) + assert execute_model_req is not None + blocks_to_copy = execute_model_req.blocks_to_copy + assert len(execute_model_req.blocks_to_swap_in) == 0 + assert len(execute_model_req.blocks_to_swap_out) == 0 + data: Dict[str, Any] = { + "num_seq_groups": num_seq_groups, + "blocks_to_copy": execute_model_req.blocks_to_copy, + } + broadcast_tensor_dict(data, src=0) + else: + data = broadcast_tensor_dict(src=0) + num_seq_groups = data["num_seq_groups"] + blocks_to_copy = data["blocks_to_copy"] + + self.cache_copy(blocks_to_copy) + + # If there is no input, we don't need to execute the model. + if num_seq_groups == 0: + return [] + + output = self.model_runner.execute_model(seq_group_metadata_list, + self.cpu_cache) + + # CPU worker only supports single-step execution. + return [output] + + def init_distributed_environment(self) -> None: + """Initialize the distributed environment.""" + + parallel_config = self.parallel_config + rank = self.rank + distributed_init_method = self.distributed_init_method + init_distributed_environment( + world_size=parallel_config.world_size, + rank=rank, + distributed_init_method=distributed_init_method, + backend="gloo", + ) + + # A small all_reduce for warmup. + torch.distributed.all_reduce(torch.zeros(1).cpu()) + + ensure_model_parallel_initialized( + parallel_config.tensor_parallel_size, + parallel_config.pipeline_parallel_size) + + def get_cache_block_size_bytes(self) -> int: + """Return the size in bytes of a single KV cache block. + """ + return CPUCacheEngine.get_cache_block_size( + self.cache_config.block_size, self.cache_config.cache_dtype, + self.model_config, self.parallel_config) diff --git a/vllm/worker/model_runner.py b/vllm/worker/model_runner.py new file mode 100644 index 0000000..3c99341 --- /dev/null +++ b/vllm/worker/model_runner.py @@ -0,0 +1,1172 @@ +import contextlib +import time +from enum import IntEnum +from typing import Dict, List, NamedTuple, Optional, Set, Tuple + +import numpy as np +import torch +import torch_musa +import torch.nn as nn + +from vllm.attention import (AttentionMetadata, AttentionMetadataPerStage, + get_attn_backend) +from vllm.attention.backends.flashinfer import FlashInferBackend +from vllm.config import (DeviceConfig, LoadConfig, LoRAConfig, ModelConfig, + ParallelConfig, SchedulerConfig, VisionLanguageConfig) +from vllm.distributed import broadcast_tensor_dict, with_pynccl_for_all_reduce +from vllm.distributed.device_communicators import (custom_all_reduce, + pymccl_utils) +from vllm.logger import init_logger +from vllm.lora.layers import LoRAMapping +from vllm.lora.request import LoRARequest +from vllm.lora.worker_manager import LRUCacheWorkerLoRAManager +from vllm.model_executor import SamplingMetadata +from vllm.model_executor.model_loader import get_model +from vllm.sampling_params import SamplingParams +from vllm.sequence import (MultiModalData, SamplerOutput, SequenceData, + SequenceGroupMetadata) +from vllm.utils import (CudaMemoryProfiler, get_kv_cache_torch_dtype, is_hip, + is_pin_memory_available, make_tensor_with_pad) + +logger = init_logger(__name__) + +_PAD_SLOT_ID = -1 +LORA_WARMUP_RANK = 8 +_BATCH_SIZE_ALIGNMENT = 8 +# Capture graphs for token size 1, 2, 4, 8, 16, 24, 32, 40, ..., 256. +# NOTE: _get_graph_batch_size needs to be updated if this list is changed. +_BATCH_SIZES_TO_CAPTURE = [1, 2, 4] + [ + _BATCH_SIZE_ALIGNMENT * i for i in range(1, 33) +] + + +class PreparePromptMetadata(NamedTuple): + input_tokens: List[int] + input_positions: List[int] + attn_metadata: Optional[AttentionMetadataPerStage] + seq_lens: List[int] + query_lens: List[int] + lora_index_mapping: List[int] + lora_prompt_mapping: List[int] + lora_requests: Set[LoRARequest] + multi_modal_input: Optional[torch.Tensor] + slot_mapping: List[int] + + @classmethod + def empty(cls): + return PreparePromptMetadata( + input_tokens=[], + input_positions=[], + attn_metadata=None, + seq_lens=[], + query_lens=[], + lora_index_mapping=[], + lora_prompt_mapping=[], + lora_requests=set(), + multi_modal_input=None, + slot_mapping=[], + ) + + +class PrepareDecodeMetadata(NamedTuple): + input_tokens: List[int] + input_positions: List[int] + attn_metadata: Optional[AttentionMetadata] + lora_index_mapping: List[int] + lora_prompt_mapping: List[int] + lora_requests: Set[LoRARequest] + slot_mapping: List[int] + + @classmethod + def empty(cls): + return PrepareDecodeMetadata( + input_tokens=[], + input_positions=[], + attn_metadata=None, + lora_index_mapping=[], + lora_prompt_mapping=[], + lora_requests=set(), + slot_mapping=[], + ) + + +# How batches are constructed. +class BatchType(IntEnum): + # Every batch is prefill. + PREFILL = 0 + # Every batch is decode. + DECODE = 1 + # Batch is a mixture of prefill and decode. + MIXED = 2 + + +class ModelRunner: + + def __init__( + self, + model_config: ModelConfig, + parallel_config: ParallelConfig, + scheduler_config: SchedulerConfig, + device_config: DeviceConfig, + load_config: LoadConfig, + lora_config: Optional[LoRAConfig], + kv_cache_dtype: Optional[str] = "auto", + is_driver_worker: bool = False, + vision_language_config: Optional[VisionLanguageConfig] = None, + ): + self.model_config = model_config + self.parallel_config = parallel_config + self.scheduler_config = scheduler_config + self.lora_config = lora_config + self.load_config = load_config + self.is_driver_worker = is_driver_worker + + # model_config can be None in tests/samplers/test_sampler.py. + # FIXME(woosuk): This is a hack to make the tests work. Refactor this. + self.sliding_window = (model_config.get_sliding_window() + if model_config is not None else None) + self.device_config = (device_config + if device_config is not None else DeviceConfig()) + self.device = self.device_config.device + + # Set after load_model. + self.lora_manager: LRUCacheWorkerLoRAManager = None + + self.graph_runners: Dict[int, CUDAGraphRunner] = {} + self.graph_memory_pool: Optional[Tuple[ + int, int]] = None # Set during graph capture. + + self.max_seq_len_to_capture = (self.model_config.max_seq_len_to_capture + if self.model_config is not None else 0) + + self.pin_memory = is_pin_memory_available() + self.kv_cache_dtype = kv_cache_dtype + self.vision_language_config = vision_language_config + + self.attn_backend = get_attn_backend( + self.model_config.dtype if model_config is not None else None) + + # Lazy initialization + self.model: torch.nn.Module # Set after load_model + self.block_size: int # Set after initial profiling. + # When using CUDA graph, the input block tables must be padded to + # max_seq_len_to_capture. However, creating the block table in + # Python can be expensive. To optimize this, we cache the block table + # in numpy and only copy the actual input content at every iteration. + # The shape of the cached block table will be + # (max batch size to capture, max context len to capture / block size). + self.graph_block_tables: torch.Tensor # Set after initial profiling. + + # Set if the backend is flashinfer. + self.flashinfer_workspace_buffer: torch.Tensor + + def load_model(self) -> None: + self.model = get_model( + model_config=self.model_config, + device_config=self.device_config, + load_config=self.load_config, + lora_config=self.lora_config, + vision_language_config=self.vision_language_config, + parallel_config=self.parallel_config, + scheduler_config=self.scheduler_config, + ) + + # self.model_memory_usage = m.consumed_memory + # logger.info("Loading model weights took %.4f GB", + # self.model_memory_usage / float(2**30)) + + if self.lora_config: + assert hasattr(self.model, "supported_lora_modules" + ) and self.model.supported_lora_modules, ( + "Model does not support LoRA") + assert hasattr( + self.model, + "embedding_modules"), "Model does not have embedding_modules" + assert hasattr(self.model, "embedding_padding_modules" + ), "Model does not have embedding_padding_modules" + self.lora_manager = LRUCacheWorkerLoRAManager( + self.scheduler_config.max_num_seqs, + self.scheduler_config.max_num_batched_tokens, self.vocab_size, + self.lora_config, self.device, self.model.embedding_modules, + self.model.embedding_padding_modules) + self.model = self.lora_manager.create_lora_manager(self.model) + + if self.kv_cache_dtype == "fp8" and is_hip(): + # Currently scaled KV cache is only enabled on ROCm + if self.model_config.quantization_param_path is not None: + if callable(getattr(self.model, "load_kv_cache_scales", None)): + self.model.load_kv_cache_scales( + self.model_config.quantization_param_path) + else: + raise RuntimeError( + "Using FP8 KV cache and scaling factors provided but " + "model %s does not support loading scaling factors.", + self.model.__class__) + else: + logger.warning( + "Using FP8 KV cache but no scaling factors " + "provided. Defaulting to scaling factors of 1.0. " + "This may lead to less accurate results!") + elif self.model_config.quantization_param_path is not None: + logger.warning("KV cache scaling factors provided, " + "but the KV cache data type is not FP8. " + "KV cache scaling factors will not be used.") + + def set_block_size(self, block_size: int) -> None: + self.block_size = block_size + + self.graph_block_tables = np.zeros( + (max(_BATCH_SIZES_TO_CAPTURE), self.get_max_block_per_batch()), + dtype=np.int32) + + def get_max_block_per_batch(self) -> int: + block_size = self.block_size + return (self.max_seq_len_to_capture + block_size - 1) // block_size + + def _prepare_prompt( + self, + seq_group_metadata_list: List[SequenceGroupMetadata], + ) -> PreparePromptMetadata: + input_tokens: List[int] = [] + input_positions: List[int] = [] + slot_mapping: List[int] = [] + lora_index_mapping: List[int] = [] + lora_prompt_mapping: List[int] = [] + lora_requests: Set[LoRARequest] = set() + + seq_lens: List[int] = [] + context_lens: List[int] = [] + query_lens: List[int] = [] + prefix_block_tables: List[List[int]] = [] + multi_modal_input_list: List[torch.Tensor] = [] + + if len(seq_group_metadata_list) == 0: + return PreparePromptMetadata.empty() + + for seq_group_metadata in seq_group_metadata_list: + assert seq_group_metadata.is_prompt + seq_ids = list(seq_group_metadata.seq_data.keys()) + assert len(seq_ids) == 1 + seq_id = seq_ids[0] + + computed_block_nums = seq_group_metadata.computed_block_nums + if (self.scheduler_config is not None + and self.scheduler_config.chunked_prefill_enabled + and not (computed_block_nums is None + or computed_block_nums == [])): + raise RuntimeError( + "chunked prefill cannot be used with prefix caching " + "now.") + + token_chunk_size = seq_group_metadata.token_chunk_size + seq_data = seq_group_metadata.seq_data[seq_id] + context_len = seq_data.get_num_computed_tokens() + # We should use get_len here because in case of preemption + # it contains output tokens. + seq_len = min(seq_data.get_len(), context_len + token_chunk_size) + prompt_tokens = seq_data.get_token_ids()[context_len:seq_len] + seq_lens.append(seq_len) + + # NOTE: This only works for oooooooxxx style attention. + if computed_block_nums is not None and len( + computed_block_nums) > 0 and self.sliding_window is None: + # Prefix is not supported with sliding_window + context_len = len(computed_block_nums) * self.block_size + prompt_tokens = prompt_tokens[context_len:] + prefix_block_tables.append(computed_block_nums) + elif self.scheduler_config.chunked_prefill_enabled: + if seq_group_metadata.block_tables is not None: + # Prefill has chunked before. + block_table = seq_group_metadata.block_tables[seq_id] + prefix_block_tables.append(block_table) + else: + # The first prefill. + prefix_block_tables.append([]) + else: + prefix_block_tables.append([]) + # Right now, prefill start is always 0. However, this + # assumption can be changed once chunked prefill is introduced. + assert context_len == 0 + + # actual prompt lens + context_lens.append(context_len) + query_lens.append(seq_len - context_len) + + input_tokens.extend(prompt_tokens) + # NOTE(woosuk): Here we assume that the first token in the prompt + # is always the first token in the sequence. + input_positions.extend(list(range(context_len, seq_len))) + lora_id = seq_group_metadata.lora_int_id + + if lora_id > 0: + lora_requests.add(seq_group_metadata.lora_request) + + lora_index_mapping += [lora_id] * (seq_len - context_len) + lora_prompt_mapping.extend( + [lora_id] * + (seq_len - context_len + if seq_group_metadata.sampling_params.prompt_logprobs else 1)) + + if seq_group_metadata.multi_modal_data: + multi_modal_input_list.append( + seq_group_metadata.multi_modal_data.data) + + if seq_group_metadata.block_tables is None: + # During memory profiling, the block tables are not initialized + # yet. In this case, we just use a dummy slot mapping. + slot_mapping.extend([_PAD_SLOT_ID] * seq_len) + continue + + # Compute the slot mapping. + block_table = seq_group_metadata.block_tables[seq_id] + + # Mask the [0, start_idx) tokens of the prompt with _PAD_SLOT_ID, + # where start_idx is max(0, seq_len - sliding_window). + # For example, if the prompt len is 10, sliding window is 8, and + # block size is 4, the first two tokens are masked and the slot + # mapping will be [-1, -1, 2, 3, 4, 5, 6, 7, 0, 1]. + start_idx = 0 + if self.sliding_window is not None: + assert context_len == 0, ( + "Prefix caching is currently not supported with " + "sliding window attention") + start_idx = max(0, seq_len - self.sliding_window) + + for i in range(context_len, seq_len): + if i < start_idx: + slot_mapping.append(_PAD_SLOT_ID) + continue + + block_number = block_table[i // self.block_size] + block_offset = i % self.block_size + slot = block_number * self.block_size + block_offset + slot_mapping.append(slot) + + max_query_len = max(query_lens) + max_seq_len = max(seq_lens) + assert max_query_len > 0 + + context_lens_tensor = torch.tensor(context_lens, + dtype=torch.int, + device=self.device) + + if multi_modal_input_list: + assert self.vision_language_config, ( + "Multi-modal inputs are only supported by " + "vision language models.") + multi_modal_input = torch.cat(multi_modal_input_list, + dim=0).to(self.device) + else: + multi_modal_input = None + + # Prepare prefix block tables + max_prompt_block_table_len = max(len(t) for t in prefix_block_tables) + block_tables = make_tensor_with_pad( + prefix_block_tables, + max_len=max_prompt_block_table_len, + pad=0, + dtype=torch.int, + device=self.device, + ) + + # Query length can be shorter than key (i.e., prompt) when prefill + # is chunked or prefix cached. + query_lens_tensor = torch.tensor(query_lens, + dtype=torch.long, + device=self.device) + subquery_start_loc = torch.zeros(query_lens_tensor.shape[0] + 1, + dtype=torch.long, + device=self.device) + + seq_lens_tensor = torch.tensor(seq_lens, + dtype=torch.int, + device=self.device) + seq_start_loc = torch.zeros(seq_lens_tensor.shape[0] + 1, + dtype=torch.long, + device=self.device) + + torch.cumsum(query_lens_tensor, + dim=0, + dtype=subquery_start_loc.dtype, + out=subquery_start_loc[1:]) + + torch.cumsum(seq_lens_tensor, + dim=0, + dtype=seq_start_loc.dtype, + out=seq_start_loc[1:]) + subquery_start_loc = subquery_start_loc.int() + seq_start_loc = seq_start_loc.int() + + if self.attn_backend is FlashInferBackend: + attn_metadata = self.attn_backend.make_metadata( + is_prompt=True, + use_cuda_graph=False, + seq_start_loc=seq_start_loc, + max_seq_len=max_seq_len, + block_tables=block_tables) + else: + attn_metadata = self.attn_backend.make_metadata( + is_prompt=True, + seq_lens=seq_lens, + seq_lens_tensor=seq_lens_tensor, + max_query_len=max_query_len, + max_seq_len=max_seq_len, + subquery_start_loc=subquery_start_loc, + seq_start_loc=seq_start_loc, + context_lens_tensor=context_lens_tensor, + block_tables=block_tables, + use_cuda_graph=False, + ) + + return PreparePromptMetadata( + input_tokens=input_tokens, + input_positions=input_positions, + attn_metadata=attn_metadata, + seq_lens=seq_lens, + query_lens=query_lens, + lora_index_mapping=lora_index_mapping, + lora_prompt_mapping=lora_prompt_mapping, + lora_requests=lora_requests, + multi_modal_input=multi_modal_input, + slot_mapping=slot_mapping, + ) + + def _prepare_decode( + self, + seq_group_metadata_list: List[SequenceGroupMetadata], + ) -> PrepareDecodeMetadata: + input_tokens: List[int] = [] + input_positions: List[int] = [] + slot_mapping: List[int] = [] + seq_lens: List[int] = [] + block_tables: List[List[int]] = [] + lora_index_mapping: List[int] = [] + lora_prompt_mapping: List[int] = [] + lora_requests: Set[LoRARequest] = set() + + # The following fields are only for flashinfer + # Please follow https://docs.flashinfer.ai/tutorials/kv_layout.html#page-layout + # for the precise definition of the following fields. + # An example: + # request 1, page indices [0, 5, 8] + # request 2, page indices [1, 6, 7] + # request 3, page indices [3, 4] + # paged_kv_indices is a concatenation of page indices of all requests: + # [0, 5, 8, 1, 6, 7, 3, 4] + # paged_kv_indptr is used to index into paged_kv_indices: + # [0, 3, 6, 8] + paged_kv_indices: List[int] = [] + # 0 at the beginning of paged_kv_indptr indicates the start of the + # first request’s page indices in the paged_kv_indices list. + paged_kv_indptr: List[int] = [0] + # paged_kv_last_page_len is the length of the last page of each request + paged_kv_last_page_len: List[int] = [] + + if len(seq_group_metadata_list) == 0: + return PrepareDecodeMetadata.empty() + + for seq_group_metadata in seq_group_metadata_list: + assert not seq_group_metadata.is_prompt + assert seq_group_metadata.token_chunk_size == 1 + + seq_ids = list(seq_group_metadata.seq_data.keys()) + lora_id = seq_group_metadata.lora_int_id + + if lora_id > 0: + lora_requests.add(seq_group_metadata.lora_request) + + for seq_id in seq_ids: + seq_data = seq_group_metadata.seq_data[seq_id] + generation_token = seq_data.get_last_token_id() + input_tokens.append(generation_token) + + seq_len = seq_data.get_len() + position = seq_len - 1 + input_positions.append(position) + + seq_len = seq_len if self.sliding_window is None else min( + seq_len, self.sliding_window) + seq_lens.append(seq_len) + + block_table = seq_group_metadata.block_tables[seq_id] + block_number = block_table[position // self.block_size] + block_offset = position % self.block_size + slot = block_number * self.block_size + block_offset + slot_mapping.append(slot) + lora_index_mapping.append(lora_id) + lora_prompt_mapping.append(lora_id) + + if self.sliding_window is not None: + sliding_window_blocks = (self.sliding_window // + self.block_size) + block_table = block_table[-sliding_window_blocks:] + block_tables.append(block_table) + + paged_kv_indices.extend(block_table) + paged_kv_indptr.append(paged_kv_indptr[-1] + len(block_table)) + last_page_len = seq_data.get_len() % self.block_size + if last_page_len == 0: + last_page_len = self.block_size + paged_kv_last_page_len.append(last_page_len) + + # vLLM uses cuda graph only for decoding requests. + # See `capture_model` API for more details. + # For decoding requests, batch_size == input_tokens. + batch_size = len(input_tokens) + max_seq_len = max(seq_lens) + # use_captured_graph = (not self.model_config.enforce_eager + # and batch_size <= _BATCH_SIZES_TO_CAPTURE[-1] + # and max_seq_len <= self.max_seq_len_to_capture) + use_captured_graph = False + if use_captured_graph: + graph_batch_size = _get_graph_batch_size(batch_size) + assert graph_batch_size >= batch_size + for _ in range(graph_batch_size - batch_size): + input_tokens.append(0) + input_positions.append(0) + slot_mapping.append(_PAD_SLOT_ID) + seq_lens.append(1) + block_tables.append([]) + lora_index_mapping.append(0) + batch_size = graph_batch_size + + seq_lens_tensor = torch.tensor(seq_lens, + dtype=torch.int, + device=self.device) + + if use_captured_graph: + # When using cuda-graph all these tensors should be + # padded. + assert seq_lens_tensor.shape[0] == len(input_tokens) + assert seq_lens_tensor.shape[0] == len(input_positions) + assert seq_lens_tensor.shape[0] == len(slot_mapping) + + # The shape of graph_block_tables is + # [max batch size, max context len // block size]. + input_block_tables = self.graph_block_tables[:batch_size] + for i, block_table in enumerate(block_tables): + if block_table: + input_block_tables[i, :len(block_table)] = block_table + block_tables = torch.tensor(input_block_tables, device=self.device) + else: + max_block_table_len = max( + len(block_table) for block_table in block_tables) + block_tables = make_tensor_with_pad( + block_tables, + max_len=max_block_table_len, + pad=0, + dtype=torch.int, + device=self.device, + ) + + if self.attn_backend is FlashInferBackend: + if not hasattr(self, "flashinfer_workspace_buffer"): + # Allocate 16MB workspace buffer + # Follow the example of flashinfer: https://docs.flashinfer.ai/api/python/decode.html + self.flashinfer_workspace_buffer = torch.empty( + 16 * 1024 * 1024, dtype=torch.uint8, device=self.device) + paged_kv_indptr = torch.tensor(paged_kv_indptr, + dtype=torch.int, + device=self.device) + paged_kv_indices = torch.tensor(paged_kv_indices, + dtype=torch.int, + device=self.device) + paged_kv_last_page_len = torch.tensor(paged_kv_last_page_len, + dtype=torch.int, + device=self.device) + kv_cache_dtype = get_kv_cache_torch_dtype(self.kv_cache_dtype, + self.model_config.dtype) + + attn_metadata = self.attn_backend.make_metadata( + is_prompt=False, + use_cuda_graph=False, + workspace_buffer=self.flashinfer_workspace_buffer, + paged_kv_indptr=paged_kv_indptr, + paged_kv_indices=paged_kv_indices, + paged_kv_last_page_len=paged_kv_last_page_len, + num_qo_heads=self.model_config.get_num_attention_heads( + self.parallel_config), + num_kv_heads=self.model_config.get_num_kv_heads( + self.parallel_config), + head_dim=self.model_config.get_head_size(), + page_size=self.block_size, + data_type=kv_cache_dtype) + else: + attn_metadata = self.attn_backend.make_metadata( + is_prompt=False, + seq_lens=None, + seq_lens_tensor=seq_lens_tensor, + max_seq_len=max_seq_len, + max_query_len=None, + subquery_start_loc=None, + seq_start_loc=None, + context_lens_tensor=None, + block_tables=block_tables, + use_cuda_graph=use_captured_graph, + ) + return PrepareDecodeMetadata( + input_tokens=input_tokens, + input_positions=input_positions, + attn_metadata=attn_metadata, + lora_index_mapping=lora_index_mapping, + lora_prompt_mapping=lora_prompt_mapping, + lora_requests=lora_requests, + slot_mapping=slot_mapping, + ) + + def prepare_input_tensors( + self, + seq_group_metadata_list: List[SequenceGroupMetadata], + ) -> Tuple[torch.Tensor, torch.Tensor, AttentionMetadata, SamplingMetadata, + Set[LoRARequest], LoRAMapping, torch.Tensor]: + if self.is_driver_worker: + prefill_reqs = [] + decode_reqs = [] + for seq_group_meta in seq_group_metadata_list: + if seq_group_meta.is_prompt: + prefill_reqs.append(seq_group_meta) + else: + decode_reqs.append(seq_group_meta) + + # Prepare input tensors. + ( + input_tokens, + input_positions, + prefill_attn_metadata, + seq_lens, + query_lens, + lora_index_mapping, + lora_prompt_mapping, + lora_requests, + multi_modal_input, + slot_mapping, + ) = self._prepare_prompt(prefill_reqs) + ( + decode_input_tokens, + decode_input_positions, + decode_attn_metadata, + decode_lora_index_mapping, + decode_lora_prompt_mapping, + decode_lora_requests, + decode_slot_mapping, + ) = self._prepare_decode(decode_reqs) + sampling_metadata = SamplingMetadata.prepare( + seq_group_metadata_list, seq_lens, query_lens, self.device, + self.pin_memory) + + if not self.scheduler_config.chunked_prefill_enabled: + assert (len(prefill_reqs) and len(decode_reqs)) == 0 + + num_prefills = len(seq_lens) + num_prefill_tokens = len(input_tokens) + num_decode_tokens = len(decode_input_tokens) + + # Coalesce tensors. Note that attn_metadata is currently not + # coalesced for simplicity. + input_tokens.extend(decode_input_tokens) + input_positions.extend(decode_input_positions) + slot_mapping.extend(decode_slot_mapping) + lora_index_mapping.extend(decode_lora_index_mapping) + lora_prompt_mapping.extend(decode_lora_prompt_mapping) + lora_requests.update(decode_lora_requests) + + input_tokens = torch.tensor(input_tokens, + dtype=torch.long, + device=self.device) + input_positions = torch.tensor(input_positions, + dtype=torch.long, + device=self.device) + slot_mapping = torch.tensor(slot_mapping, + dtype=torch.long, + device=self.device) + + if self.lora_config: + lora_mapping = LoRAMapping( + lora_index_mapping, + lora_prompt_mapping, + ) + else: + lora_mapping = None + + # Broadcast the metadata. + # If batch contains both prefill and decode, it sends 2 broadcasts. + # If it only contains 1 type, it triggers a single broadcast. + if (prefill_attn_metadata is not None + and decode_attn_metadata is not None): + batch_type = BatchType.MIXED + elif prefill_attn_metadata is not None: + batch_type = BatchType.PREFILL + else: + batch_type = BatchType.DECODE + + metadata_dict = { + "input_tokens": input_tokens, + "input_positions": input_positions, + "selected_token_indices": + sampling_metadata.selected_token_indices, + "lora_requests": lora_requests, + "lora_mapping": lora_mapping, + "multi_modal_input": multi_modal_input, + "num_prefill_tokens": num_prefill_tokens, + "num_decode_tokens": num_decode_tokens, + "slot_mapping": slot_mapping, + "num_prefills": num_prefills, + "batch_type": batch_type, + } + if prefill_attn_metadata is not None: + metadata_dict.update(prefill_attn_metadata.asdict_zerocopy()) + else: + assert decode_attn_metadata is not None + metadata_dict.update(decode_attn_metadata.asdict_zerocopy()) + broadcast_tensor_dict(metadata_dict, src=0) + + # Broadcast decode attn metadata for mixed batch type. + # The additional broadcast costs 300us overhead on 4 A10 GPUs. + # We can potentially reduce the overhead by coelescing tensors. + if batch_type == BatchType.MIXED: + assert decode_attn_metadata is not None + metadata_dict = decode_attn_metadata.asdict_zerocopy() + broadcast_tensor_dict(metadata_dict, src=0) + else: + metadata_dict = broadcast_tensor_dict(src=0) + input_tokens = metadata_dict.pop("input_tokens") + input_positions = metadata_dict.pop("input_positions") + slot_mapping = metadata_dict.pop("slot_mapping") + num_prefills = metadata_dict.pop("num_prefills") + selected_token_indices = metadata_dict.pop( + "selected_token_indices") + lora_mapping = metadata_dict.pop("lora_mapping") + lora_requests = metadata_dict.pop("lora_requests") + multi_modal_input = metadata_dict.pop("multi_modal_input") + num_prefill_tokens = metadata_dict.pop("num_prefill_tokens") + num_decode_tokens = metadata_dict.pop("num_decode_tokens") + batch_type = metadata_dict.pop("batch_type") + + # Create an attention metadata. + prefill_attn_metadata = None + decode_attn_metadata = None + if batch_type == BatchType.PREFILL or batch_type == BatchType.MIXED: + prefill_attn_metadata = self.attn_backend.make_metadata( + **metadata_dict) + else: + decode_attn_metadata = self.attn_backend.make_metadata( + **metadata_dict) + sampling_metadata = SamplingMetadata( + seq_groups=None, + selected_token_indices=selected_token_indices, + categorized_sample_indices=None, + num_prompts=0, + ) + + # if it is a mixed batch, decode attn_metadata is broadcasted + # separately. + if batch_type == BatchType.MIXED: + metadata_dict = broadcast_tensor_dict(src=0) + decode_attn_metadata = self.attn_backend.make_metadata( + **metadata_dict) + + attn_metadata = AttentionMetadata( + num_prefills=num_prefills, + slot_mapping=slot_mapping, + num_prefill_tokens=num_prefill_tokens, + num_decode_tokens=num_decode_tokens, + prefill_metadata=prefill_attn_metadata, + decode_metadata=decode_attn_metadata, + kv_cache_dtype=self.kv_cache_dtype, + ) + + return (input_tokens, input_positions, attn_metadata, + sampling_metadata, lora_requests, lora_mapping, + multi_modal_input) + + @torch.inference_mode() + def execute_model( + self, + seq_group_metadata_list: List[SequenceGroupMetadata], + kv_caches: List[torch.Tensor], + ) -> Optional[SamplerOutput]: + (input_tokens, input_positions, attn_metadata, sampling_metadata, + lora_requests, lora_mapping, multi_modal_input + ) = self.prepare_input_tensors(seq_group_metadata_list) + + if self.lora_config: + self.set_active_loras(lora_requests, lora_mapping) + + # Currently cuda graph is only supported by the decode phase. + prefill_meta = attn_metadata.prefill_metadata + decode_meta = attn_metadata.decode_metadata + if prefill_meta is None and decode_meta.use_cuda_graph: + graph_batch_size = input_tokens.shape[0] + model_executable = self.graph_runners[graph_batch_size] + else: + model_executable = self.model + execute_model_kwargs = { + "input_ids": input_tokens, + "positions": input_positions, + "kv_caches": kv_caches, + "attn_metadata": attn_metadata, + } + if self.vision_language_config: + execute_model_kwargs.update({"image_input": multi_modal_input}) + hidden_states = model_executable(**execute_model_kwargs) + + # Compute the logits. + logits = self.model.compute_logits(hidden_states, sampling_metadata) + + # Only perform sampling in the driver worker. + if not self.is_driver_worker: + return None + + # Sample the next token. + output = self.model.sample( + logits=logits, + sampling_metadata=sampling_metadata, + ) + + return output + + @torch.inference_mode() + def profile_run(self) -> None: + # Enable top-k sampling to reflect the accurate memory usage. + sampling_params = SamplingParams(top_p=0.99, top_k=self.vocab_size - 1) + max_num_batched_tokens = self.scheduler_config.max_num_batched_tokens + max_num_seqs = self.scheduler_config.max_num_seqs + + # This represents the maximum number of different requests + # that will have unique loras, an therefore the max amount of memory + # consumption create dummy lora request copies from the lora request + # passed in, which contains a lora from the lora warmup path. + dummy_lora_requests = [] + dummy_lora_requests_per_seq = [] + if self.lora_config: + for idx in range(self.lora_config.max_loras): + lora_id = idx + 1 + dummy_lora_request = LoRARequest( + lora_name=f"warmup_{lora_id}", + lora_int_id=lora_id, + lora_local_path="/not/a/real/path", + ) + self.lora_manager.add_dummy_lora(dummy_lora_request, + rank=LORA_WARMUP_RANK) + dummy_lora_requests.append(dummy_lora_request) + dummy_lora_requests_per_seq = [ + dummy_lora_requests[idx % len(dummy_lora_requests)] + for idx in range(max_num_seqs) + ] + + # Profile memory usage with max_num_sequences sequences and the total + # number of tokens equal to max_num_batched_tokens. + seqs: List[SequenceGroupMetadata] = [] + # Additional GPU memory may be needed for vision encoding, which needs + # to be accounted for when calculating the GPU blocks for + # vLLM blocker manager. + # To exercise the worst scenario for GPU memory consumption, + # the number of seqs (batch_size) is chosen to maximize the number + # of images processed. + if self.vision_language_config: + max_num_seqs = min( + max_num_seqs, + int(max_num_batched_tokens / + self.vision_language_config.image_feature_size)) + for group_id in range(max_num_seqs): + seq_len = (max_num_batched_tokens // max_num_seqs + + (group_id < max_num_batched_tokens % max_num_seqs)) + seq_data, fake_multi_modal_input = _prepare_fake_inputs( + seq_len, self.vision_language_config) + seq = SequenceGroupMetadata( + request_id=str(group_id), + is_prompt=True, + seq_data={group_id: seq_data}, + sampling_params=sampling_params, + block_tables=None, + lora_request=dummy_lora_requests_per_seq[group_id] + if dummy_lora_requests_per_seq else None, + multi_modal_data=fake_multi_modal_input, + ) + seqs.append(seq) + + # Run the model with the dummy inputs. + num_layers = self.model_config.get_num_layers(self.parallel_config) + kv_caches = [None] * num_layers + self.execute_model(seqs, kv_caches) + torch.musa.synchronize() + return + + def remove_all_loras(self): + if not self.lora_manager: + raise RuntimeError("LoRA is not enabled.") + self.lora_manager.remove_all_loras() + + def set_active_loras(self, lora_requests: Set[LoRARequest], + lora_mapping: LoRAMapping) -> None: + if not self.lora_manager: + raise RuntimeError("LoRA is not enabled.") + self.lora_manager.set_active_loras(lora_requests, lora_mapping) + + def add_lora(self, lora_request: LoRARequest) -> bool: + if not self.lora_manager: + raise RuntimeError("LoRA is not enabled.") + return self.lora_manager.add_lora(lora_request) + + def remove_lora(self, lora_id: int) -> bool: + if not self.lora_manager: + raise RuntimeError("LoRA is not enabled.") + return self.lora_manager.remove_lora(lora_id) + + def list_loras(self) -> Set[int]: + if not self.lora_manager: + raise RuntimeError("LoRA is not enabled.") + return self.lora_manager.list_loras() + + @torch.inference_mode() + def capture_model(self, kv_caches: List[torch.Tensor]) -> None: + """Cuda graph capture a model. + + Note that CUDA graph's performance gain is negligible if number + of batched tokens are larger than 200. And since CUDA graph + requires fixed sized tensors, supporting large/variable batch + size requires high GPU memory overhead. Thus, vLLM only captures + decoding requests. Mixed batch (chunked prefill + decoding) or + prefill requests are not captured. + + Since it is used for decoding-only, it assumes there's only 1 token + per sequence in the batch. + """ + # NOTE(woosuk): This is a hack to ensure that the NCCL backend is never + # deleted before the CUDA graphs. + self.pynccl_backend = pymccl_utils.get_nccl_backend() + + assert not self.model_config.enforce_eager + logger.info("Capturing the model for CUDA graphs. This may lead to " + "unexpected consequences if the model is not static. To " + "run the model in eager mode, set 'enforce_eager=True' or " + "use '--enforce-eager' in the CLI.") + logger.info("CUDA graphs can take additional 1~3 GiB memory per GPU. " + "If you are running out of memory, consider decreasing " + "`gpu_memory_utilization` or enforcing eager mode. " + "You can also reduce the `max_num_seqs` as needed " + "to decrease memory usage.") + start_time = time.perf_counter() + + # Prepare dummy inputs. These will be reused for all batch sizes. + max_batch_size = max(_BATCH_SIZES_TO_CAPTURE) + input_tokens = torch.zeros(max_batch_size, dtype=torch.long).musa() + input_positions = torch.zeros(max_batch_size, dtype=torch.long).musa() + slot_mapping = torch.empty(max_batch_size, dtype=torch.long).musa() + slot_mapping.fill_(_PAD_SLOT_ID) + seq_lens = torch.ones(max_batch_size, dtype=torch.int32).musa() + block_tables = torch.from_numpy(self.graph_block_tables).musa() + + graph_batch_size = _get_graph_batch_size( + self.scheduler_config.max_num_seqs) + batch_size_capture_list = [ + bs for bs in _BATCH_SIZES_TO_CAPTURE if bs <= graph_batch_size + ] + + # NOTE(woosuk): There are 3 backends for all-reduce: custom all-reduce + # kernel, pynccl, and PyTorch NCCL. When using CUDA graph, we use + # either custom all-reduce kernel or pynccl. When not using CUDA + # graph, we use either custom all-reduce kernel or PyTorch NCCL. + # We always prioritize using custom all-reduce kernel but fall back + # to PyTorch or pynccl if it is disabled or not supported. + with custom_all_reduce.capture(): + # NOTE: Capturing the largest batch size first may help reduce the + # memory usage of CUDA graph. + for batch_size in reversed(batch_size_capture_list): + # Create dummy attn_metadata. + decode_metadata = self.attn_backend.make_metadata( + is_prompt=False, + seq_lens=None, + seq_lens_tensor=seq_lens[:batch_size], + max_query_len=None, + max_seq_len=self.max_seq_len_to_capture, + subquery_start_loc=None, + seq_start_loc=None, + context_lens_tensor=None, + block_tables=block_tables[:batch_size], + use_cuda_graph=False, + ) + attn_metadata = AttentionMetadata( + num_prefills=0, + num_prefill_tokens=0, + num_decode_tokens=batch_size, + slot_mapping=slot_mapping[:batch_size], + prefill_metadata=None, + decode_metadata=decode_metadata, + kv_cache_dtype=self.kv_cache_dtype, + ) + + if self.lora_config: + lora_mapping = LoRAMapping( + [0] * batch_size, + [0] * batch_size, + ) + self.set_active_loras(set(), lora_mapping) + + # graph_runner = CUDAGraphRunner(self.model) + # graph_runner.capture( + # input_tokens[:batch_size], + # input_positions[:batch_size], + # kv_caches, + # attn_metadata, + # memory_pool=self.graph_memory_pool, + # ) + # self.graph_memory_pool = graph_runner.graph.pool() + # self.graph_runners[batch_size] = graph_runner + + end_time = time.perf_counter() + elapsed_time = end_time - start_time + # This usually takes < 10 seconds. + logger.info("Graph capturing finished in %.0f secs.", elapsed_time) + + def __del__(self) -> None: + # Delete the CUDA graphs before deleting the pynccl communicator. + # NOTE(woosuk): This is necessary because otherwise deadlocks can + # happen. + # FIXME(woosuk): This is a bit hacky. Find a more robust solution. + # TODO(youkaichao): when we get enough user feedback that pynccl is + # more stable than cupy, we can remove this, e.g. in v0.4.1. + self.graph_runners.clear() + self.pynccl_backend = None + + @property + def vocab_size(self) -> int: + return self.model_config.get_vocab_size() + + +class CUDAGraphRunner: + + def __init__(self, model: nn.Module): + self.model = model + self.input_buffers: Dict[str, torch.Tensor] = {} + self.output_buffers: Dict[str, torch.Tensor] = {} + + self._graph: Optional[torch.musa.MUSAGraph] = None + + @property + def graph(self): + assert self._graph is not None + return self._graph + + def capture( + self, + input_ids: torch.Tensor, + positions: torch.Tensor, + kv_caches: List[torch.Tensor], + attn_metadata: AttentionMetadata, + memory_pool, + **kwargs, + ) -> None: + assert self._graph is None + # Run the model once without capturing the graph. + # This is to make sure that the captured graph does not include the + # kernel launches for initial benchmarking (e.g., Triton autotune). + with _maybe_pynccl(): + self.model( + input_ids, + positions, + kv_caches, + attn_metadata, + **kwargs, + ) + torch.musa.synchronize() + + # Capture the graph. + # NOTE(woosuk): Python 3.8 does not support multi-line with statements. + # https://stackoverflow.com/questions/31039022/python-multi-line-with-statement + import pdb;pdb.set_trace() + self._graph = torch.musa.MUSAGraph() + with torch.musa.graph(self._graph, pool=memory_pool): # noqa: SIM117 + with _maybe_pynccl(): + hidden_states = self.model( + input_ids, + positions, + kv_caches, + attn_metadata, + **kwargs, + ) + torch.musa.synchronize() + + # Save the input and output buffers. + self.input_buffers = { + "input_ids": input_ids, + "positions": positions, + "kv_caches": kv_caches, + "slot_mapping": attn_metadata.slot_mapping, + "seq_lens_tensor": attn_metadata.decode_metadata.seq_lens_tensor, + "block_tables": attn_metadata.decode_metadata.block_tables, + } + self.output_buffers = {"hidden_states": hidden_states} + return + + def forward( + self, + input_ids: torch.Tensor, + positions: torch.Tensor, + kv_caches: List[torch.Tensor], + attn_metadata: AttentionMetadata, + **kwargs, + ) -> torch.Tensor: + # KV caches are fixed tensors, so we don't need to copy them. + del kv_caches + + # Copy the input tensors to the input buffers. + self.input_buffers["input_ids"].copy_(input_ids, non_blocking=True) + self.input_buffers["positions"].copy_(positions, non_blocking=True) + self.input_buffers["slot_mapping"].copy_(attn_metadata.slot_mapping, + non_blocking=True) + self.input_buffers["seq_lens_tensor"].copy_( + attn_metadata.decode_metadata.seq_lens_tensor, non_blocking=True) + self.input_buffers["block_tables"].copy_( + attn_metadata.decode_metadata.block_tables, non_blocking=True) + # Run the graph. + self.graph.replay() + + # Return the output tensor. + return self.output_buffers["hidden_states"] + + def __call__(self, *args, **kwargs): + return self.forward(*args, **kwargs) + + +@contextlib.contextmanager +def _maybe_pynccl(): + if pymccl_utils.is_initialized( + ) and not custom_all_reduce.is_initialized(): + with with_pynccl_for_all_reduce(): + yield + else: + yield + + +def _get_graph_batch_size(batch_size: int) -> int: + """Returns the padded batch size given actual batch size. + + Batch sizes are 1, 2, 4, _BATCH_SIZE_ALIGNMENT, + 2*_BATCH_SIZE_ALIGNMENT, 3*_BATCH_SIZE_ALIGNMENT... + """ + if batch_size <= 2: + return batch_size + elif batch_size <= 4: + return 4 + else: + return ((batch_size + _BATCH_SIZE_ALIGNMENT - 1) // + _BATCH_SIZE_ALIGNMENT * _BATCH_SIZE_ALIGNMENT) + + +def _prepare_fake_inputs( + seq_len: int, vision_language_config: Optional[VisionLanguageConfig]): + """Prepare fake inputs for profile run.""" + if vision_language_config: + prompt_tokens = [ + vision_language_config.image_token_id + ] * vision_language_config.image_feature_size + [0] * ( + seq_len - vision_language_config.image_feature_size) + fake_image_input = MultiModalData( + type=MultiModalData.Type.IMAGE, + data=torch.zeros(vision_language_config.image_input_shape, + dtype=torch.float16)) + else: + prompt_tokens = [0] * seq_len + fake_image_input = None + return SequenceData(prompt_tokens), fake_image_input diff --git a/vllm/worker/neuron_model_runner.py b/vllm/worker/neuron_model_runner.py new file mode 100644 index 0000000..a336be0 --- /dev/null +++ b/vllm/worker/neuron_model_runner.py @@ -0,0 +1,196 @@ +from typing import List, Optional, Tuple + +import torch +from torch import nn + +from vllm.config import (DeviceConfig, ModelConfig, ParallelConfig, + SchedulerConfig) +from vllm.logger import init_logger +from vllm.model_executor import SamplingMetadata +from vllm.model_executor.model_loader.neuron import get_neuron_model +from vllm.sequence import SamplerOutput, SequenceGroupMetadata +from vllm.utils import is_pin_memory_available, make_tensor_with_pad + +logger = init_logger(__name__) + + +class NeuronModelRunner: + + def __init__( + self, + model_config: ModelConfig, + parallel_config: ParallelConfig, + scheduler_config: SchedulerConfig, + device_config: DeviceConfig, + ): + self.model_config = model_config + self.parallel_config = parallel_config + self.scheduler_config = scheduler_config + + if model_config is not None and model_config.get_sliding_window(): + logger.warning("Sliding window is not supported on Neuron. " + "The model will run without sliding window.") + self.device_config = (device_config + if device_config is not None else DeviceConfig()) + self.device = self.device_config.device + self.pin_memory = is_pin_memory_available() + + # Lazy initialization. + self.model: nn.Module # initialize after load_model. + + def load_model(self) -> None: + self.model = get_neuron_model(self.model_config, + parallel_config=self.parallel_config, + scheduler_config=self.scheduler_config) + + def _prepare_prompt( + self, + seq_group_metadata_list: List[SequenceGroupMetadata], + ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor, List[int]]: + assert len(seq_group_metadata_list) > 0 + input_tokens: List[List[int]] = [] + input_positions: List[List[int]] = [] + input_block_ids: List[int] = [] + + seq_lens: List[int] = [] + for seq_group_metadata in seq_group_metadata_list: + assert seq_group_metadata.is_prompt + seq_ids = list(seq_group_metadata.seq_data.keys()) + assert len(seq_ids) == 1 + seq_id = seq_ids[0] + + seq_data = seq_group_metadata.seq_data[seq_id] + prompt_tokens = seq_data.get_token_ids() + seq_len = len(prompt_tokens) + seq_lens.append(seq_len) + + input_tokens.append(prompt_tokens) + input_positions.append(list(range(seq_len))) + + assert seq_group_metadata.block_tables is not None + block_table = seq_group_metadata.block_tables[seq_id] + assert len(block_table) == 1 + input_block_ids.append(block_table[0]) + + max_seq_len = max(seq_lens) + assert max_seq_len > 0 + input_tokens = make_tensor_with_pad(input_tokens, + max_seq_len, + pad=0, + dtype=torch.long, + device=self.device) + input_positions = make_tensor_with_pad(input_positions, + max_seq_len, + pad=0, + dtype=torch.long, + device=self.device) + input_block_ids = torch.tensor(input_block_ids, + dtype=torch.long, + device=self.device) + + return input_tokens, input_positions, input_block_ids, seq_lens + + def _prepare_decode( + self, + seq_group_metadata_list: List[SequenceGroupMetadata], + ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]: + assert len(seq_group_metadata_list) > 0 + input_tokens: List[List[int]] = [] + input_positions: List[List[int]] = [] + input_block_ids: List[int] = [] + context_lens: List[int] = [] + + for seq_group_metadata in seq_group_metadata_list: + assert not seq_group_metadata.is_prompt + + seq_ids = list(seq_group_metadata.seq_data.keys()) + + for seq_id in seq_ids: + seq_data = seq_group_metadata.seq_data[seq_id] + generation_token = seq_data.get_last_token_id() + input_tokens.append([generation_token]) + + seq_len = seq_data.get_len() + position = seq_len - 1 + input_positions.append([position]) + context_lens.append(seq_len) + + assert seq_group_metadata.block_tables is not None + block_table = seq_group_metadata.block_tables[seq_id] + assert len(block_table) == 1 + input_block_ids.append(block_table[0]) + + input_tokens = make_tensor_with_pad(input_tokens, + max_len=1, + pad=0, + dtype=torch.long, + device=self.device) + input_positions = make_tensor_with_pad(input_positions, + max_len=1, + pad=0, + dtype=torch.long, + device=self.device) + context_lens = torch.tensor(context_lens, + dtype=torch.int, + device=self.device) + input_block_ids = torch.tensor(input_block_ids, + dtype=torch.long, + device=self.device) + + return input_tokens, input_positions, input_block_ids + + def prepare_input_tensors( + self, + seq_group_metadata_list: List[SequenceGroupMetadata], + ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor, SamplingMetadata]: + # NOTE: We assume that all sequences in the group are all prompts or + # all decodes. + is_prompt = seq_group_metadata_list[0].is_prompt + # Prepare input tensors. + if is_prompt: + (input_tokens, input_positions, input_block_ids, + seq_lens) = self._prepare_prompt(seq_group_metadata_list) + else: + (input_tokens, input_positions, + input_block_ids) = self._prepare_decode(seq_group_metadata_list) + seq_lens = [] + sampling_metadata = SamplingMetadata.prepare( + seq_group_metadata_list, + seq_lens, + # query_lens is not needed if chunked prefill is not + # supported. Since neuron worker doesn't support chunked prefill + # just use seq_lens instead. + seq_lens, + self.device, + self.pin_memory) + + return (input_tokens, input_positions, input_block_ids, + sampling_metadata) + + @torch.inference_mode() + def execute_model( + self, + seq_group_metadata_list: List[SequenceGroupMetadata], + ) -> Optional[SamplerOutput]: + (input_tokens, input_positions, input_block_ids, sampling_metadata + ) = self.prepare_input_tensors(seq_group_metadata_list) + + hidden_states = self.model( + input_ids=input_tokens, + positions=input_positions, + input_block_ids=input_block_ids, + ) + + # Compute the logits. + logits = self.model.compute_logits(hidden_states, sampling_metadata) + + # Sample the next token. + output = self.model.sample( + logits=logits, + sampling_metadata=sampling_metadata, + ) + return output + + @property + def vocab_size(self) -> int: + return self.model_config.get_vocab_size() diff --git a/vllm/worker/neuron_worker.py b/vllm/worker/neuron_worker.py new file mode 100644 index 0000000..d0e6aae --- /dev/null +++ b/vllm/worker/neuron_worker.py @@ -0,0 +1,98 @@ +"""A Neuron worker class.""" +from typing import List, Tuple + +import torch +import torch.distributed + +from vllm.config import (CacheConfig, DeviceConfig, ModelConfig, + ParallelConfig, SchedulerConfig) +from vllm.model_executor import set_random_seed +from vllm.sequence import SamplerOutput, SequenceGroupMetadata +from vllm.worker.neuron_model_runner import NeuronModelRunner +from vllm.worker.worker_base import LoraNotSupportedWorkerBase + + +class NeuronWorker(LoraNotSupportedWorkerBase): + """A worker class that executes the model on a group of neuron cores. + """ + + def __init__( + self, + model_config: ModelConfig, + parallel_config: ParallelConfig, + scheduler_config: SchedulerConfig, + device_config: DeviceConfig, + cache_config: CacheConfig, + ) -> None: + self.model_config = model_config + self.parallel_config = parallel_config + self.scheduler_config = scheduler_config + self.device_config = device_config + self.cache_config = cache_config + if self.model_config.trust_remote_code: + # note: lazy import to avoid importing torch before initializing + from vllm.utils import init_cached_hf_modules + init_cached_hf_modules() + + self.model_runner = NeuronModelRunner(model_config, parallel_config, + scheduler_config, device_config) + + def init_device(self) -> None: + # Set random seed. + set_random_seed(self.model_config.seed) + + def load_model(self): + self.model_runner.load_model() + + def determine_num_available_blocks(self) -> Tuple[int, int]: + """Determine the number of available KV blocks. + + Swapping is not yet supported, so always return num_cpu_blocks=0. + + We configure num_gpu_blocks to be equal to max_num_seqs. + """ + # Set the number of GPU blocks to be the same as the maximum number of + # sequences that can be processed in a single batch. This is equivalent + # to schedule without PagedAttention. + num_gpu_blocks = self.scheduler_config.max_num_seqs + + # Swap not yet supported with Neuron backend. + num_cpu_blocks = 0 + + return num_gpu_blocks, num_cpu_blocks + + def initialize_cache(self, num_gpu_blocks: int, + num_cpu_blocks: int) -> None: + """Initialize the KV cache. + """ + + # Different values are not tested. + assert num_cpu_blocks == 0 + assert num_gpu_blocks == self.scheduler_config.max_num_seqs + + self.cache_config.num_gpu_blocks = num_gpu_blocks + self.cache_config.num_cpu_blocks = num_cpu_blocks + + @torch.inference_mode() + def execute_model( + self, + seq_group_metadata_list: List[SequenceGroupMetadata], + ) -> List[SamplerOutput]: + num_seq_groups = len(seq_group_metadata_list) + + # If there is no input, we don't need to execute the model. + if num_seq_groups == 0: + return [] + + output = self.model_runner.execute_model(seq_group_metadata_list) + + # Neuron worker only supports single-step output. Wrap the output in a + # list to conform to interface. + return [output] + + def get_cache_block_size_bytes(self) -> int: + """Determine the size in bytes of a cache block. + + This is required for speculative decoding; it is not yet implemented. + """ + raise NotImplementedError diff --git a/vllm/worker/worker.py b/vllm/worker/worker.py new file mode 100644 index 0000000..2ff01be --- /dev/null +++ b/vllm/worker/worker.py @@ -0,0 +1,366 @@ +"""A GPU worker class.""" +import gc +import os +from typing import Any, Dict, List, Optional, Set, Tuple + +import torch +import torch_musa +import torch.distributed + +from vllm.config import (CacheConfig, DeviceConfig, LoadConfig, LoRAConfig, + ModelConfig, ParallelConfig, SchedulerConfig, + VisionLanguageConfig) +from vllm.distributed import (broadcast_tensor_dict, + ensure_model_parallel_initialized, + get_tensor_model_parallel_cpu_group, + init_distributed_environment) +from vllm.distributed.device_communicators import pymccl_utils +from vllm.distributed.device_communicators.custom_all_reduce import ( + init_custom_ar) +from vllm.lora.request import LoRARequest +from vllm.model_executor import set_random_seed +from vllm.sequence import ExecuteModelRequest, SamplerOutput +from vllm.worker.cache_engine import CacheEngine +from vllm.worker.model_runner import ModelRunner +from vllm.worker.worker_base import WorkerBase + + +class Worker(WorkerBase): + """A worker class that executes (a partition of) the model on a GPU. + + Each worker is associated with a single GPU. The worker is responsible for + maintaining the KV cache and executing the model on the GPU. In case of + distributed inference, each worker is assigned a partition of the model. + """ + + def __init__( + self, + model_config: ModelConfig, + parallel_config: ParallelConfig, + scheduler_config: SchedulerConfig, + device_config: DeviceConfig, + cache_config: CacheConfig, + load_config: LoadConfig, + local_rank: int, + rank: int, + distributed_init_method: str, + lora_config: Optional[LoRAConfig] = None, + vision_language_config: Optional[VisionLanguageConfig] = None, + is_driver_worker: bool = False, + ) -> None: + self.model_config = model_config + self.parallel_config = parallel_config + self.scheduler_config = scheduler_config + self.device_config = device_config + self.cache_config = cache_config + self.local_rank = local_rank + self.rank = rank + self.distributed_init_method = distributed_init_method + self.lora_config = lora_config + self.load_config = load_config + self.is_driver_worker = is_driver_worker + if self.is_driver_worker: + assert self.rank == 0, "The driver worker must have rank 0." + + if self.model_config.trust_remote_code: + # note: lazy import to avoid importing torch before initializing + from vllm.utils import init_cached_hf_modules + init_cached_hf_modules() + self.vision_language_config = vision_language_config + if self.vision_language_config: + assert not self.lora_config, ( + "To be tested: vision language model with LoRA settings.") + + self.model_runner = ModelRunner( + model_config, + parallel_config, + scheduler_config, + device_config, + load_config=load_config, + lora_config=self.lora_config, + kv_cache_dtype=self.cache_config.cache_dtype, + is_driver_worker=is_driver_worker, + vision_language_config=vision_language_config, + ) + # Uninitialized cache engine. Will be initialized by + # initialize_cache. + self.cache_engine: CacheEngine + self.gpu_cache: List[torch.Tensor] + + def init_device(self) -> None: + if self.device_config.device.type == "cuda": + # torch.distributed.all_reduce does not free the input tensor until + # the synchronization point. This causes the memory usage to grow + # as the number of all_reduce calls increases. This env var disables + # this behavior. + # Related issue: + # https://discuss.pytorch.org/t/cuda-allocation-lifetime-for-inputs-to-distributed-all-reduce/191573 + os.environ["TORCH_NCCL_AVOID_RECORD_STREAMS"] = "1" + + # This env var set by Ray causes exceptions with graph building. + os.environ.pop("NCCL_ASYNC_ERROR_HANDLING", None) + self.device = torch.device(f"cuda:{self.local_rank}") + torch.cuda.set_device(self.device) + + _check_if_gpu_supports_dtype(self.model_config.dtype) + torch.cuda.empty_cache() + self.init_gpu_memory = torch.cuda.mem_get_info()[0] + elif self.device_config.device.type == "musa": + os.environ["TORCH_NCCL_AVOID_RECORD_STREAMS"] = "1" + os.environ["TORCH_MCCL_AVOID_RECORD_STREAMS"] = "1" + + # This env var set by Ray causes exceptions with graph building. + os.environ.pop("NCCL_ASYNC_ERROR_HANDLING", None) + os.environ.pop("MCCL_ASYNC_ERROR_HANDLING", None) + self.device = torch.device(f"musa:{self.local_rank}") + torch.musa.set_device(self.device) + + _check_if_gpu_supports_dtype(self.model_config.dtype) + torch.musa.empty_cache() + self.init_gpu_memory = torch.musa.mem_get_info()[0] + else: + raise RuntimeError( + f"Not support device type: {self.device_config.device}") + # Initialize the distributed environment. + init_worker_distributed_environment(self.parallel_config, self.rank, + self.distributed_init_method, + self.local_rank, + backend="mccl") + # Set random seed. + set_random_seed(self.model_config.seed) + + def load_model(self): + self.model_runner.load_model() + + @torch.inference_mode() + def determine_num_available_blocks(self) -> Tuple[int, int]: + """Profiles the peak memory usage of the model to determine how many + KV blocks may be allocated without OOMs. + + The engine will first conduct a profiling of the existing memory usage. + Then, it calculate the maximum possible number of GPU and CPU blocks + that can be allocated with the remaining free memory. + + .. tip:: + You may limit the usage of GPU memory + by adjusting the `gpu_memory_utilization` parameter. + """ + # Profile the memory usage of the model and get the maximum number of + # cache blocks that can be allocated with the remaining free memory. + torch.musa.empty_cache() + + # Execute a forward pass with dummy inputs to profile the memory usage + # of the model. + self.model_runner.profile_run() + + # Calculate the number of blocks that can be allocated with the + # profiled peak memory. + torch.musa.synchronize() + free_gpu_memory, total_gpu_memory = torch.musa.mem_get_info() + # NOTE(woosuk): Here we assume that the other processes using the same + # GPU did not change their memory usage during the profiling. + peak_memory = self.init_gpu_memory - free_gpu_memory + assert peak_memory > 0, ( + "Error in memory profiling. This happens when the GPU memory was " + "not properly cleaned up before initializing the vLLM instance.") + + cache_block_size = self.get_cache_block_size_bytes() + num_gpu_blocks = int( + (total_gpu_memory * self.cache_config.gpu_memory_utilization - + peak_memory) // cache_block_size) + num_cpu_blocks = int(self.cache_config.swap_space_bytes // + cache_block_size) + num_gpu_blocks = max(num_gpu_blocks, 0) + num_cpu_blocks = max(num_cpu_blocks, 0) + if self.model_runner.lora_manager: + self.model_runner.remove_all_loras() + gc.collect() + torch.cuda.empty_cache() + return num_gpu_blocks, num_cpu_blocks + + def initialize_cache(self, num_gpu_blocks: int, + num_cpu_blocks: int) -> None: + """Allocate GPU and CPU KV cache with the specified number of blocks. + + This also warms up the model, which may record CUDA graphs. + """ + raise_if_cache_size_invalid(num_gpu_blocks, + self.cache_config.block_size, + self.model_config.max_model_len) + + self.cache_config.num_gpu_blocks = num_gpu_blocks + self.cache_config.num_cpu_blocks = num_cpu_blocks + + self._init_cache_engine() + self._warm_up_model() + + def _init_cache_engine(self): + assert self.cache_config.num_gpu_blocks is not None + self.cache_engine = CacheEngine(self.cache_config, self.model_config, + self.parallel_config) + self.gpu_cache = self.cache_engine.gpu_cache + self.model_runner.set_block_size(self.cache_engine.block_size) + + def _warm_up_model(self) -> None: + if not self.model_config.enforce_eager: + self.model_runner.capture_model(self.gpu_cache) + # Reset the seed to ensure that the random state is not affected by + # the model initialization and profiling. + set_random_seed(self.model_config.seed) + + def cache_swap( + self, + blocks_to_swap_in: Dict[int, int], + blocks_to_swap_out: Dict[int, int], + blocks_to_copy: Dict[int, List[int]], + ) -> None: + # Issue cache operations. + # TODO(woosuk): Profile swapping overhead and optimize if needed. + if blocks_to_swap_in: + self.cache_engine.swap_in(blocks_to_swap_in) + if blocks_to_swap_out: + self.cache_engine.swap_out(blocks_to_swap_out) + if blocks_to_copy: + self.cache_engine.copy(blocks_to_copy) + + @torch.inference_mode() + def execute_model( + self, + execute_model_req: Optional[ExecuteModelRequest] = None + ) -> List[SamplerOutput]: + + if execute_model_req is None: + seq_group_metadata_list = None + else: + seq_group_metadata_list = execute_model_req.seq_group_metadata_list + + if self.is_driver_worker: + assert seq_group_metadata_list is not None + assert execute_model_req is not None + num_seq_groups = len(seq_group_metadata_list) + blocks_to_swap_in = execute_model_req.blocks_to_swap_in + blocks_to_swap_out = execute_model_req.blocks_to_swap_out + blocks_to_copy = execute_model_req.blocks_to_copy + data: Dict[str, Any] = { + "num_seq_groups": num_seq_groups, + "blocks_to_swap_in": blocks_to_swap_in, + "blocks_to_swap_out": blocks_to_swap_out, + "blocks_to_copy": blocks_to_copy, + } + broadcast_tensor_dict(data, src=0) + else: + data = broadcast_tensor_dict(src=0) + num_seq_groups = data["num_seq_groups"] + blocks_to_swap_in = data["blocks_to_swap_in"] + blocks_to_swap_out = data["blocks_to_swap_out"] + blocks_to_copy = data["blocks_to_copy"] + + self.cache_swap(blocks_to_swap_in, blocks_to_swap_out, blocks_to_copy) + + # If there is no input, we don't need to execute the model. + if num_seq_groups == 0: + return [] + + output = self.model_runner.execute_model(seq_group_metadata_list, + self.gpu_cache) + + # Worker only supports single-step execution. Wrap the output in a list + # to conform to interface. + return [output] + + def add_lora(self, lora_request: LoRARequest) -> bool: + return self.model_runner.add_lora(lora_request) + + def remove_lora(self, lora_id: int) -> bool: + return self.model_runner.remove_lora(lora_id) + + def list_loras(self) -> Set[int]: + return self.model_runner.list_loras() + + @property + def max_model_len(self) -> int: + return self.model_config.max_model_len + + @property + def vocab_size(self) -> int: + return self.model_runner.vocab_size + + def get_cache_block_size_bytes(self) -> int: + """Get the size of the KV cache block size in bytes. + """ + return CacheEngine.get_cache_block_size(self.cache_config, + self.model_config, + self.parallel_config) + + +def init_worker_distributed_environment( + parallel_config: ParallelConfig, + rank: int, + distributed_init_method: Optional[str] = None, + local_rank: int = -1, + backend: str = "nccl", +) -> None: + """Initialize the distributed environment.""" + init_distributed_environment(parallel_config.world_size, rank, + distributed_init_method, local_rank, backend) + + ensure_model_parallel_initialized(parallel_config.tensor_parallel_size, + parallel_config.pipeline_parallel_size) + + if pymccl_utils.is_initialized(): + pynccl_world_size = pymccl_utils.get_world_size() + if pynccl_world_size != parallel_config.world_size: + raise RuntimeError( + "pynccl is already initialized but the pynccl world " + "size does not match parallel_config.world_size " + f"({pynccl_world_size} vs. {parallel_config.world_size}).") + elif parallel_config.world_size > 1: + # NOTE(woosuk): We don't initialize pynccl process group when world size + # is 1. + # NOTE(kaichao): By default, pynccl is initialized for tp group. + pymccl_utils.init_process_group( + group=get_tensor_model_parallel_cpu_group()) + + # Initialize a custom fast all-reduce implementation. + if not parallel_config.disable_custom_all_reduce: + init_custom_ar() + + # A small all_reduce for warmup. + if backend == "mccl": + torch.distributed.all_reduce(torch.zeros(1).musa()) + if pymccl_utils.is_initialized(): + pymccl_utils.all_reduce(torch.zeros(1).musa()) + else: + torch.distributed.all_reduce(torch.zeros(1).cuda()) + if pymccl_utils.is_initialized(): + pymccl_utils.all_reduce(torch.zeros(1).cuda()) + + +def _check_if_gpu_supports_dtype(torch_dtype: torch.dtype): + # Check if the GPU supports the dtype. + if torch_dtype == torch.bfloat16: + compute_capability = torch.cuda.get_device_capability() + if compute_capability[0] < 8: + gpu_name = torch.cuda.get_device_name() + raise ValueError( + "Bfloat16 is only supported on GPUs with compute capability " + f"of at least 8.0. Your {gpu_name} GPU has compute capability " + f"{compute_capability[0]}.{compute_capability[1]}. " + "You can use float16 instead by explicitly setting the" + "`dtype` flag in CLI, for example: --dtype=half.") + + +def raise_if_cache_size_invalid(num_gpu_blocks, block_size, + max_model_len) -> None: + if num_gpu_blocks <= 0: + raise ValueError("No available memory for the cache blocks. " + "Try increasing `gpu_memory_utilization` when " + "initializing the engine.") + max_seq_len = block_size * num_gpu_blocks + if max_model_len > max_seq_len: + raise ValueError( + f"The model's max seq len ({max_model_len}) " + "is larger than the maximum number of tokens that can be " + f"stored in KV cache ({max_seq_len}). Try increasing " + "`gpu_memory_utilization` or decreasing `max_model_len` when " + "initializing the engine.") diff --git a/vllm/worker/worker_base.py b/vllm/worker/worker_base.py new file mode 100644 index 0000000..fb32fea --- /dev/null +++ b/vllm/worker/worker_base.py @@ -0,0 +1,146 @@ +import importlib +import os +from abc import ABC, abstractmethod +from typing import Dict, List, Set, Tuple + +from vllm.logger import init_logger +from vllm.lora.request import LoRARequest +from vllm.sequence import ExecuteModelRequest, SamplerOutput +from vllm.utils import (enable_trace_function_call_for_thread, + update_environment_variables) + +logger = init_logger(__name__) + + +class WorkerBase(ABC): + """Worker interface that allows vLLM to cleanly separate implementations for + different hardware. + """ + + @abstractmethod + def init_device(self) -> None: + """Initialize device state, such as loading the model or other on-device + memory allocations. + """ + raise NotImplementedError + + @abstractmethod + def determine_num_available_blocks(self) -> Tuple[int, int]: + """Determine the number of available blocks for the GPU KV cache and + swappable CPU KV cache. + + The implementation may run profiling or other heuristics to determine + the size of caches. + + Returns a Tuple[num_gpu_blocks, num_cpu_blocks], where num_gpu_blocks + are blocks that are "active" on the device and can be appended to. + num_cpu_blocks refers to "swapped" blocks in CPU memory and cannot be + appended to. + """ + raise NotImplementedError + + @abstractmethod + def initialize_cache(self, num_gpu_blocks: int, + num_cpu_blocks: int) -> None: + """Initialize the KV cache with the given size in blocks. + """ + raise NotImplementedError + + @abstractmethod + def execute_model( + self, + execute_model_req: ExecuteModelRequest) -> List[SamplerOutput]: + """Executes at least one model step on the given sequences, unless no + sequences are provided.""" + raise NotImplementedError + + @abstractmethod + def get_cache_block_size_bytes(self) -> int: + """Return the size of a single cache block, in bytes. Used in + speculative decoding. + """ + raise NotImplementedError + + @abstractmethod + def add_lora(self, lora_request: LoRARequest) -> bool: + raise NotImplementedError + + @abstractmethod + def remove_lora(self, lora_id: int) -> bool: + raise NotImplementedError + + @abstractmethod + def list_loras(self) -> Set[int]: + raise NotImplementedError + + +class LoraNotSupportedWorkerBase(WorkerBase): + """Partial implementation of WorkerBase that raises exceptions when LoRA + methods are invoked. + """ + + def add_lora(self, lora_request: LoRARequest) -> bool: + raise ValueError(f"{type(self)} does not support LoRA") + + def remove_lora(self, lora_id: int) -> bool: + raise ValueError(f"{type(self)} does not support LoRA") + + def list_loras(self) -> Set[int]: + raise ValueError(f"{type(self)} does not support LoRA") + + +class WorkerWrapperBase: + """ + The whole point of this class is to lazily initialize the worker. + We first instantiate the WorkerWrapper, which remembers the worker module + and class name. Then, when we call `update_environment_variables`, and the + real initialization happens in `init_worker`. + """ + + def __init__(self, + worker_module_name=None, + worker_class_name=None, + trust_remote_code: bool = False) -> None: + self.worker_module_name = worker_module_name + self.worker_class_name = worker_class_name + self.worker = None + if trust_remote_code: + # note: lazy import to avoid importing torch before initializing + from vllm.utils import init_cached_hf_modules + init_cached_hf_modules() + + @staticmethod + def update_environment_variables(envs: Dict[str, str]) -> None: + key = 'CUDA_VISIBLE_DEVICES' + if key in envs and key in os.environ: + # overwriting CUDA_VISIBLE_DEVICES is desired behavior + # suppress the warning in `update_environment_variables` + del os.environ[key] + update_environment_variables(envs) + + def init_worker(self, *args, **kwargs): + """ + Actual initialization of the worker class, and set up + function tracing if required. + Arguments are passed to the worker class constructor. + """ + enable_trace_function_call_for_thread() + + mod = importlib.import_module(self.worker_module_name) + worker_class = getattr(mod, self.worker_class_name) + self.worker = worker_class(*args, **kwargs) + + def execute_method(self, method, *args, **kwargs): + try: + target = self if self.worker is None else self.worker + executor = getattr(target, method) + return executor(*args, **kwargs) + except Exception as e: + # if the driver worker also execute methods, + # exceptions in the rest worker may cause deadlock in rpc like ray + # see https://github.com/vllm-project/vllm/issues/3455 + # print the error and inform the user to solve the error + msg = (f"Error executing method {method}. " + "This might cause deadlock in distributed execution.") + logger.exception(msg) + raise e