[bugfix] restore pr-7029 and fix patch error (#7294)

### What this PR does / why we need it?
This PR restores #7029, which adds W8A8C8 support for dsv3.2/glm5 using
the `lightning_indexer_quant` ops in the pd-mix stage.

The original PR was reverted by #7288 because the patch did not work
with the recompute scheduler.

This PR also fixes the patching issue so that it works correctly with
the recompute scheduler.

### Does this PR introduce _any_ user-facing change?
Yes. To enable LI C8, users need to set the `enable_sparse_c8` option to
`"true"` in `additional_config`.

- vLLM version: v0.17.0
- vLLM main:
4034c3d32e
---------
Signed-off-by: rjg-lyh <1318825571@qq.com>
This commit is contained in:
rjg-lyh
2026-03-16 15:39:42 +08:00
committed by GitHub
parent 9320365dab
commit 4d443b9228
25 changed files with 4309 additions and 78 deletions

View File

@@ -529,6 +529,44 @@ std::vector<at::Tensor> moe_grouped_matmul_meta(
return y;
}
at::Tensor npu_lightning_indexer_quant_meta(
const at::Tensor &query, const at::Tensor &key, const at::Tensor &weights,
const at::Tensor &query_dequant_scale, const at::Tensor &key_dequant_scale,
const c10::optional<at::Tensor> &actual_seq_lengths_query,
const c10::optional<at::Tensor> &actual_seq_lengths_key,
const c10::optional<at::Tensor> &block_table, int64_t query_quant_mode, int64_t key_quant_mode,
c10::string_view layout_query, c10::string_view layout_key, int64_t sparse_count, int64_t sparse_mode)
{
std::string query_layout_str = std::string(layout_query);
std::string key_layout_str = std::string(layout_key);
const int SIZE = 8;
const int DIM_0 = 0;
const int DIM_1 = 1;
const int DIM_2 = 2;
const int DIM_3 = 3;
at::SmallVector<int64_t, SIZE> output_size;
for (size_t i = 0; i < query.sizes().size(); i++) {
TORCH_CHECK(query.size(i) > 0, "All values within query's shape should be greater "
"than 0, but shape[", i, "] is ", query.size(i));
}
for (size_t i = 0; i < key.sizes().size(); i++) {
TORCH_CHECK(key.size(i) > 0, "All values within key's shape should be greater "
"than 0, but shape[", i, "] is ", key.size(i));
}
TORCH_CHECK(sparse_count > 0, "sparse count should be greater than 0, but now is ", sparse_count);
int64_t keyHeadNum = (key_layout_str == "TND")? key.size(DIM_1) : key.size(DIM_2);
if (query_layout_str == "BSND") {
output_size = {query.size(DIM_0), query.size(DIM_1), keyHeadNum, sparse_count};
} else {
output_size = {query.size(DIM_0), keyHeadNum, sparse_count};
}
at::Tensor lightning_indexer_quant_output = at::empty(output_size, query.options().dtype(at::kInt));
return lightning_indexer_quant_output;
}
} // namespace meta
} // namespace vllm_ascend
@@ -576,5 +614,7 @@ TORCH_LIBRARY_IMPL_EXPAND(CONCAT(_C, _ascend), Meta, ops) {
ops.impl("causal_conv1d_fn", &vllm_ascend::meta::causal_conv1d_fn_meta);
// moe_grouped_matmul
ops.impl("moe_grouped_matmul", &vllm_ascend::meta::moe_grouped_matmul_meta);
// Lightning indexer quant
ops.impl("npu_lightning_indexer_quant", &vllm_ascend::meta::npu_lightning_indexer_quant_meta);
}
}