Files
xc-llm-ascend/pytest.ini
wangxiyuan 9c7428b3d5 [CI] enable custom ops build (#466)
### What this PR does / why we need it?
This PR enable custom ops build  by default. 

### Does this PR introduce _any_ user-facing change?

Yes, users now install vllm-ascend from source will trigger custom ops
build step.

### How was this patch tested?
By image build and e2e CI

---------

Signed-off-by: wangxiyuan <wangxiyuan1007@gmail.com>
2025-04-12 10:24:53 +08:00

67 lines
2.6 KiB
INI

[pytest]
minversion = 6.0
norecursedirs =
vllm-empty/tests/prefix_caching
vllm-empty/tests/weight_loading
vllm-empty/tests/samplers
vllm-empty/tests/kernels
vllm-empty/tests/quantization
vllm-empty/tests/tool_use
vllm-empty/tests/runai_model_streamer_test
vllm-empty/tests/kv_transfer
vllm-empty/tests/plugins
vllm-empty/tests/plugins_tests
vllm-empty/tests/prompt_adapter
vllm-empty/tests/compile
vllm-empty/tests/lora
vllm-empty/tests/models
vllm-empty/tests/mistral_tool_use
vllm-empty/tests/standalone_tests
vllm-empty/tests/async_engine
vllm-empty/tests/mq_llm_engine
vllm-empty/tests/tokenization
vllm-empty/tests/core
vllm-empty/tests/tracing
vllm-empty/tests/engine
vllm-empty/tests/tensorizer_loader
vllm-empty/tests/entrypoints
vllm-empty/tests/model_executor
vllm-empty/tests/encoder_decoder
vllm-empty/tests/v1
vllm-empty/tests/spec_decode
vllm-empty/tests/multi_step
vllm-empty/tests/vllm_test_utils
vllm-empty/tests/tpu
vllm-empty/tests/distributed
vllm-empty/tests/basic_correctness
vllm-empty/tests/worker
vllm-empty/tests/metrics
vllm-empty/tests/neuron
; fastsafetensors not support npu now
vllm-empty/tests/fastsafetensors_loader
addopts = --ignore=vllm-empty/tests/test_utils.py
--ignore=vllm-empty/tests/test_config.py
--ignore=vllm-empty/tests/test_scalartype.py
--ignore=vllm-empty/tests/test_version.py
--ignore=vllm-empty/tests/test_embedded_commit.py
--ignore=vllm-empty/tests/test_inputs.py
--ignore=vllm-empty/tests/test_sharded_state_loader.py
--ignore=vllm-empty/tests/test_logger.py
--ignore=vllm-empty/tests/test_logits_processor.py
--ignore=vllm-empty/tests/test_regression.py
--ignore=vllm-empty/tests/models/decoder_only/language/test_aqlm.py
--ignore=vllm-empty/tests/models/decoder_only/language/test_gptq_marlin.py
--ignore=vllm-empty/tests/models/decoder_only/language/test_gptq_marlin_24.py
--ignore=vllm-empty/tests/multimodal/test_processing.py
--ignore=vllm-empty/tests/multimodal/test_processor_kwargs.py
--ignore=vllm-empty/tests/multimodal/test_utils.py
; Both V1 and V0 engine will be run in detokenizer/test_stop_reason
; VLLM_USE_V1=1 is not supported with device type=npu.
--ignore=vllm-empty/tests/detokenizer/test_stop_reason.py
; oom on llama-2-7b-hf
--ignore=vllm-empty/tests/detokenizer/test_stop_strings.py
testpaths =
vllm-empty/tests