Files
xc-llm-ascend/pyproject.toml

136 lines
3.3 KiB
TOML
Raw Normal View History

[build-system]
# Should be mirrored in requirements.txt
requires = [
"attrs",
"cmake>=3.26",
"decorator",
"einops",
"googleapis-common-protos",
"numpy<2.0.0",
"packaging",
"pip",
"pybind11",
"pyyaml",
"scipy",
"pandas",
"pandas-stubs",
"psutil",
"setuptools>=64",
"setuptools-scm>=8",
"transformers<=4.57.1",
"torch-npu==2.9.0",
"torch==2.9.0",
"torchvision",
"wheel",
"msgpack",
"quart",
"numba",
"xgrammar>=0.1.30",
"fastapi<0.124.0",
"opencv-python-headless<=4.11.0.86", # Required to avoid numpy version conflict with vllm
"compressed_tensors>=0.11.0",
"arctic-inference==0.1.1",
"triton-ascend==3.2.0"
]
build-backend = "setuptools.build_meta"
[tool.pymarkdown]
plugins.md004.style = "sublist" # ul-style
plugins.md007.indent = 4 # ul-indent
plugins.md007.start_indented = true # ul-indent
plugins.md013.enabled = false # line-length
plugins.md041.enabled = false # first-line-h1
plugins.md033.enabled = false # inline-html
plugins.md046.enabled = false # code-block-style
plugins.md024.allow_different_nesting = true # no-duplicate-headers
plugins.md029.enabled = false # ol-prefix
[tool.ruff]
# TODO: according to PEP8, there should be 80 characters per line
line-length = 120
# Folder to be modified
exclude = [
"tests/**",
# (3)
"vllm_ascend/attention/*.py",
"vllm_ascend/core/*.py",
"vllm_ascend/distributed/device_communicators/**",
"vllm_ascend/distributed/utils.py",
# (5)
"vllm_ascend/distributed/kv_transfer/kv_pool/**",
"vllm_ascend/distributed/kv_transfer/utils/**",
"vllm_ascend/kv_offload/**",
"vllm_ascend/lora/**",
# (6)
"vllm_ascend/eplb/**",
"vllm_ascend/model_loader/netloader/**",
"vllm_ascend/patch/**",
# (7)
"vllm_ascend/quantization/**",
"vllm_ascend/sample/*.py",
"vllm_ascend/worker/v2/**",
"vllm_ascend/worker/block_table.py",
"vllm_ascend/worker/npu_input_batch.py",
# (8)
"vllm_ascend/ops/__init__.py",
[Lint]Style: Convert `vllm-ascend/` to ruff format(Batch #12) (#6177) ### What this PR does / why we need it? **Scope of Changes**: | File Path | | :--- | | `vllm_ascend/ops/triton/activation/swiglu_quant.py` | | `vllm_ascend/ops/triton/batch_invariant/matmul.py` | | `vllm_ascend/ops/triton/batch_invariant/mean.py` | | `vllm_ascend/ops/triton/batch_invariant/rmsnorm.py` | | `vllm_ascend/ops/triton/fla/chunk.py` | | `vllm_ascend/ops/triton/fla/chunk_delta_h.py` | | `vllm_ascend/ops/triton/fla/chunk_o.py` | | `vllm_ascend/ops/triton/fla/chunk_scaled_dot_kkt.py` | | `vllm_ascend/ops/triton/fla/cumsum.py` | | `vllm_ascend/ops/triton/fla/fused_qkvzba_split_reshape.py` | | `vllm_ascend/ops/triton/fla/l2norm.py` | | `vllm_ascend/ops/triton/fla/layernorm_guard.py` | | `vllm_ascend/ops/triton/fla/sigmoid_gating.py` | | `vllm_ascend/ops/triton/fla/solve_tril.py` | | `vllm_ascend/ops/triton/fla/utils.py` | | `vllm_ascend/ops/triton/fla/wy_fast.py` | | `vllm_ascend/ops/triton/fused_gdn_gating.py` | | `vllm_ascend/ops/triton/layernorm_gated.py` | | `vllm_ascend/ops/triton/linearnorm/split_qkv_rmsnorm_rope.py` | | `vllm_ascend/ops/triton/mamba/causal_conv1d.py` | | `vllm_ascend/ops/triton/reject_sample.py` | | `vllm_ascend/ops/triton/rope.py` | | `vllm_ascend/ops/triton/spec_decode/utils.py` | | `vllm_ascend/ops/triton/triton_utils.py` | ### Does this PR introduce _any_ user-facing change? ### How was this patch tested? - vLLM version: v0.14.0 - vLLM main: https://github.com/vllm-project/vllm/commit/d68209402ddab3f54a09bc1f4de9a9495a283b60 Signed-off-by: MrZ20 <2609716663@qq.com>
2026-01-23 14:59:19 +08:00
"vllm_ascend/ops/activation.py",
"vllm_ascend/ops/flashcomm2_oshard_manager.py",
"vllm_ascend/ops/layernorm.py",
"vllm_ascend/ops/mla.py",
"vllm_ascend/ops/mm_encoder_attention.py",
"vllm_ascend/ops/register_custom_ops.py",
"vllm_ascend/ops/rotary_embedding.py",
"vllm_ascend/ops/vocab_parallel_embedding.py",
"vllm_ascend/ops/weight_prefetch.py",
"vllm_ascend/spec_decode/**",
# (9)
"vllm_ascend/worker/model_runner_v1.py",
"vllm_ascend/worker/pcp_utils.py",
# (10)
"vllm_ascend/ops/*linear*.py",
"vllm_ascend/worker/worker.py",
"vllm_ascend/distributed/parallel_state.py",
"vllm_ascend/distributed/utils.py",
"vllm_ascend/xlite/*.py",
# (11)
"vllm_ascend/ops/fused_moe/**",
]
[tool.ruff.lint]
select = [
# pycodestyle
"E",
# Pyflakes
"F",
# pyupgrade
"UP",
# flake8-bugbear
"B",
# flake8-simplify
"SIM",
# isort
"I",
# flake8-logging-format
"G",
]
ignore = [
# star imports
"F405", "F403",
# lambda expression assignment
"E731",
# zip without `strict=`
"B905",
# Loop control variable not used within loop body
"B007",
# f-string format
"UP032",
# TODO: FIE ME
"G004",
"B904",
"SIM108",
"SIM102"
]
[tool.ruff.format]
docstring-code-format = true