[CI]Fixed the spell check function in typos.toml (#6753)

### What this PR does / why we need it?
The incorrect regular expression syntax `.*[UE4M3|ue4m3].*` actually
ignores all words containing any of the following characters: `u, e, 4,
m, 3, |`

```yaml
extend-ignore-identifiers-re = [".*Unc.*", ".*_thw",
    ".*UE8M0.*", ".*[UE4M3|ue4m3].*", ".*eles.*", ".*fo.*", ".*ba.*",
    ".*ot.*", ".*[Tt]h[rR].*"]
```
===fix===>
```yaml
extend-ignore-identifiers-re = [".*Unc.*", ".*_thw",
    ".*UE8M0.*", ".*(UE4M3|ue4m3]).*", ".*eles.*", ".*fo.*", ".*ba.*",
    ".*ot.*", ".*[Tt]h[rR].*"]
```

### Does this PR introduce _any_ user-facing change?

### How was this patch tested?

- vLLM version: v0.15.0
- vLLM main:
9562912cea

Signed-off-by: MrZ20 <2609716663@qq.com>
This commit is contained in:
SILONG ZENG
2026-02-14 11:57:26 +08:00
committed by GitHub
parent 64aea60f2e
commit e2237819a9
31 changed files with 79 additions and 72 deletions

View File

@@ -11,7 +11,7 @@ from vllm_ascend.utils import enable_custom_op
enable_custom_op()
class TestDisptachFFNCombine:
class TestDispatchFFNCombine:
def __init__(self, rank, world_size, port):
self.rank = rank
@@ -208,7 +208,7 @@ class TestDisptachFFNCombine:
def worker(rank: int, world_size: int, port: int, q: mp.SimpleQueue):
op = TestDisptachFFNCombine(rank, world_size, port)
op = TestDispatchFFNCombine(rank, world_size, port)
op.generate_hcom()
out1 = op.run_tensor_list()
q.put(out1)

View File

@@ -11,7 +11,7 @@ from vllm_ascend.utils import enable_custom_op
enable_custom_op()
class TestDisptachFFNCombine:
class TestDispatchFFNCombine:
def __init__(self, rank, world_size, port):
self.rank = rank
@@ -208,7 +208,7 @@ class TestDisptachFFNCombine:
def worker(rank: int, world_size: int, port: int, q: mp.SimpleQueue):
op = TestDisptachFFNCombine(rank, world_size, port)
op = TestDispatchFFNCombine(rank, world_size, port)
op.generate_hcom()
out1 = op.run_tensor_list()
q.put(out1)

View File

@@ -124,10 +124,10 @@ def create_test_data(
logits = torch.randn(num_reqs, vocab_size, device=device, dtype=dtype)
repetiton_penalty = torch.ones(num_reqs, device=device, dtype=torch.float32)
repetition_penalty = torch.ones(num_reqs, device=device, dtype=torch.float32)
for i in range(num_reqs):
if torch.rand(1) > 0.3:
repetiton_penalty[i] = torch.rand(1, device=device).item() * 0.8 + 0.6
repetition_penalty[i] = torch.rand(1, device=device).item() * 0.8 + 0.6
frequency_penalty = torch.zeros(num_reqs, device=device, dtype=torch.float32)
for i in range(num_reqs):
@@ -168,7 +168,7 @@ def create_test_data(
output_bin_counts[state_idx, token] = count
sampling_metadata = SamplingMetadata(
repetition_penalty=repetiton_penalty,
repetition_penalty=repetition_penalty,
frequency_penalty=frequency_penalty,
presence_penalty=presence_penalty,
temperature=temperature,
@@ -217,4 +217,3 @@ def test_apply_penalties_and_temperature(
atol = 1e-02
rtol = 1e-02
assert torch.allclose(logits_triton, logits_pytorch_result, atol=atol, rtol=rtol)