diff --git a/.github/workflows/vllm_ascend_test.yaml b/.github/workflows/vllm_ascend_test.yaml index 1a342d2..04f589e 100644 --- a/.github/workflows/vllm_ascend_test.yaml +++ b/.github/workflows/vllm_ascend_test.yaml @@ -118,7 +118,7 @@ jobs: TORCH_DEVICE_BACKEND_AUTOLOAD: 0 run: | export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/usr/local/Ascend/ascend-toolkit/latest/x86_64-linux/devlib - pytest -sv --cov --cov-report=xml:unittests-coverage.xml tests/ut --ignore=tests/ut/test_platform.py --ignore=tests/ut/ops/test_vocab_parallel_embedding.py + pytest -sv --cov --cov-report=xml:unittests-coverage.xml tests/ut --ignore=tests/ut/test_platform.py - name: Upload coverage to Codecov if: ${{ matrix.vllm_version == 'main' }} diff --git a/tests/ut/ops/test_vocab_parallel_embedding.py b/tests/ut/ops/test_vocab_parallel_embedding.py index 66163f5..d137985 100644 --- a/tests/ut/ops/test_vocab_parallel_embedding.py +++ b/tests/ut/ops/test_vocab_parallel_embedding.py @@ -18,6 +18,7 @@ from unittest.mock import MagicMock, patch import torch +from vllm_ascend.ascend_config import init_ascend_config from vllm_ascend.ops.vocab_parallel_embedding import ( AscendLogitsProcessor, AscendParallelLMHead, AscendVocabParallelEmbedding) @@ -31,6 +32,9 @@ class TestCustomVocabParallelEmbedding(unittest.TestCase): self.embedding_dim = 10 self.org_num_embeddings = 40 self.padding_size = 8 + mock_vllm_config = MagicMock() + mock_vllm_config.additional_config = {} + init_ascend_config(mock_vllm_config) def _create_layer(self): # Patch methods and dependencies for VocabParallelEmbedding