[CI] Add unit test framework (#1201)
This PR added the unit test framework to enable ut for vLLM Ascend. Unit test runs on CPU machines. It'll be ran once lint check is passed the same as e2e test. For unit test, this PR created a new folder called `ut` under `tests` module. All the test file in `ut` should keep the same with the code in `vllm-ascend`. The file name should be start with `test_` prefix. For example, in this PR. the `test_ascend_config.py` is added for `ascend_config.py` test. A new fille `worker/test_worker_v1.py` is also added as the placeholder. This file should be the unit test for `vllm-ascend/worker/worker_v1.py`. Additional, a new `fake_weight` folder is added, it contains the config.json from `facebook/opt-125m`, so that the test will not always visit huggingface. TODO: We should add all the unit test file one by one in the future. Signed-off-by: wangxiyuan <wangxiyuan1007@gmail.com>
This commit is contained in:
@@ -41,9 +41,9 @@ import os
|
||||
|
||||
import pytest
|
||||
|
||||
from tests.long_term.spec_decode.e2e.conftest import \
|
||||
from tests.e2e.long_term.spec_decode.e2e.conftest import \
|
||||
run_equality_correctness_test
|
||||
from tests.long_term.spec_decode.utils import maybe_enable_chunked_prefill
|
||||
from tests.e2e.long_term.spec_decode.utils import maybe_enable_chunked_prefill
|
||||
|
||||
# main model
|
||||
# lmsys/vicuna-7b-v1.3 was to be used but it's causing
|
||||
@@ -41,9 +41,9 @@ import pytest
|
||||
from vllm.model_executor.layers.vocab_parallel_embedding import \
|
||||
pad_vocab_size # noqa: F401
|
||||
|
||||
from tests.long_term.spec_decode.e2e.conftest import \
|
||||
from tests.e2e.long_term.spec_decode.e2e.conftest import \
|
||||
run_equality_correctness_test
|
||||
from tests.long_term.spec_decode.utils import maybe_enable_chunked_prefill
|
||||
from tests.e2e.long_term.spec_decode.utils import maybe_enable_chunked_prefill
|
||||
|
||||
# main model
|
||||
MAIN_MODEL = "JackFram/llama-160m"
|
||||
@@ -44,9 +44,9 @@ for the target model outputs.
|
||||
|
||||
import pytest
|
||||
|
||||
from tests.long_term.spec_decode.e2e.conftest import \
|
||||
from tests.e2e.long_term.spec_decode.e2e.conftest import \
|
||||
run_equality_correctness_test
|
||||
from tests.long_term.spec_decode.utils import maybe_enable_chunked_prefill
|
||||
from tests.e2e.long_term.spec_decode.utils import maybe_enable_chunked_prefill
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
@@ -27,8 +27,8 @@ from vllm.spec_decode.multi_step_worker import MultiStepWorker
|
||||
from vllm.spec_decode.spec_decode_worker import SpecDecodeWorker
|
||||
from vllm.spec_decode.top1_proposer import Top1Proposer
|
||||
|
||||
from tests.long_term.spec_decode.test_utils import mock_spec_decode_sampler
|
||||
from tests.long_term.spec_decode.utils import create_batch, mock_worker
|
||||
from tests.e2e.long_term.spec_decode.test_utils import mock_spec_decode_sampler
|
||||
from tests.e2e.long_term.spec_decode.utils import create_batch, mock_worker
|
||||
|
||||
|
||||
@pytest.mark.parametrize('queue_size', [4])
|
||||
@@ -29,7 +29,7 @@ from vllm.sequence import (ExecuteModelRequest, HiddenStates, Logprob,
|
||||
from vllm.spec_decode.multi_step_worker import MultiStepWorker
|
||||
from vllm.spec_decode.top1_proposer import Top1Proposer
|
||||
|
||||
from tests.long_term.spec_decode.utils import (
|
||||
from tests.e2e.long_term.spec_decode.utils import (
|
||||
assert_logprobs_dict_allclose, create_batch,
|
||||
create_seq_group_metadata_from_prompts, create_worker,
|
||||
patch_execute_model_with_seeds, zero_kv_cache)
|
||||
@@ -22,7 +22,7 @@ from vllm.sequence import ExecuteModelRequest
|
||||
from vllm.spec_decode.ngram_worker import NGramWorker
|
||||
from vllm.spec_decode.top1_proposer import Top1Proposer
|
||||
|
||||
from tests.long_term.spec_decode.utils import (
|
||||
from tests.e2e.long_term.spec_decode.utils import (
|
||||
create_seq_group_metadata_from_prompts, create_worker)
|
||||
|
||||
|
||||
@@ -35,10 +35,10 @@ from vllm.spec_decode.multi_step_worker import MultiStepWorker
|
||||
from vllm.spec_decode.spec_decode_worker import (SpecDecodeWorker,
|
||||
split_num_cache_blocks_evenly)
|
||||
|
||||
from tests.long_term.spec_decode.test_utils import mock_spec_decode_sampler
|
||||
from tests.long_term.spec_decode.utils import (create_batch,
|
||||
create_sampler_output_list,
|
||||
create_worker, mock_worker)
|
||||
from tests.e2e.long_term.spec_decode.test_utils import mock_spec_decode_sampler
|
||||
from tests.e2e.long_term.spec_decode.utils import (create_batch,
|
||||
create_sampler_output_list,
|
||||
create_worker, mock_worker)
|
||||
from vllm_ascend.worker.draft_model_runner import TP1DraftModelRunner
|
||||
from vllm_ascend.worker.worker import NPUWorker
|
||||
|
||||
@@ -1,8 +1,8 @@
|
||||
import pytest
|
||||
|
||||
from tests.conftest import VllmRunner
|
||||
from tests.singlecard.test_ilama_lora import (EXPECTED_LORA_OUTPUT, MODEL_PATH,
|
||||
do_sample)
|
||||
from tests.e2e.singlecard.test_ilama_lora import (EXPECTED_LORA_OUTPUT,
|
||||
MODEL_PATH, do_sample)
|
||||
|
||||
|
||||
@pytest.mark.parametrize("distributed_executor_backend", ["mp"])
|
||||
@@ -1,191 +0,0 @@
|
||||
#
|
||||
# Copyright (c) 2025 Huawei Technologies Co., Ltd. All Rights Reserved.
|
||||
# This file is a part of the vllm-ascend project.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
import os
|
||||
|
||||
import pytest
|
||||
|
||||
from tests.conftest import VllmRunner
|
||||
from vllm_ascend.ascend_config import (clear_ascend_config, get_ascend_config,
|
||||
init_ascend_config)
|
||||
|
||||
|
||||
def _clean_up_ascend_config(func):
|
||||
|
||||
def wrapper(*args, **kwargs):
|
||||
clear_ascend_config()
|
||||
func(*args, **kwargs)
|
||||
clear_ascend_config()
|
||||
|
||||
return wrapper
|
||||
|
||||
|
||||
@_clean_up_ascend_config
|
||||
def test_run_without_ascend_config():
|
||||
with VllmRunner("facebook/opt-125m"):
|
||||
ascend_config = get_ascend_config()
|
||||
|
||||
assert not ascend_config.torchair_graph_config.enabled
|
||||
assert not ascend_config.torchair_graph_config.use_cached_graph
|
||||
assert ascend_config.torchair_graph_config.graph_batch_sizes == []
|
||||
assert not ascend_config.torchair_graph_config.graph_batch_sizes_init
|
||||
assert not ascend_config.ascend_scheduler_config.enabled
|
||||
assert ascend_config.expert_tensor_parallel_size == 0
|
||||
|
||||
|
||||
@_clean_up_ascend_config
|
||||
def test_run_with_ascend_config():
|
||||
if os.getenv("VLLM_USE_V1") == "0":
|
||||
pytest.skip("graph only works on v1")
|
||||
|
||||
input_additional_config_1 = {
|
||||
"torchair_graph_config": {
|
||||
# torchair graph only works with deepseek. The e2e test should be added
|
||||
# in multicard test with deepseek models.
|
||||
"enabled": False,
|
||||
"use_cached_graph": True,
|
||||
"graph_batch_sizes": [1, 2, 4, 8],
|
||||
"graph_batch_sizes_init": False,
|
||||
"enable_multistream_moe": True,
|
||||
"enable_multistream_mla": True,
|
||||
},
|
||||
"ascend_scheduler_config": {
|
||||
"enabled": True,
|
||||
"enable_chunked_prefill": True,
|
||||
},
|
||||
"expert_tensor_parallel_size": 1
|
||||
}
|
||||
|
||||
# check passed with eager mode
|
||||
with VllmRunner("facebook/opt-125m",
|
||||
enforce_eager=True,
|
||||
additional_config=input_additional_config_1):
|
||||
ascend_config = get_ascend_config()
|
||||
|
||||
assert not ascend_config.torchair_graph_config.enabled
|
||||
assert ascend_config.torchair_graph_config.use_cached_graph
|
||||
assert ascend_config.torchair_graph_config.graph_batch_sizes == [
|
||||
1, 2, 4, 8
|
||||
]
|
||||
assert not ascend_config.torchair_graph_config.graph_batch_sizes_init
|
||||
assert ascend_config.torchair_graph_config.enable_multistream_mla
|
||||
assert ascend_config.torchair_graph_config.enable_multistream_moe
|
||||
assert ascend_config.ascend_scheduler_config.enabled
|
||||
assert ascend_config.ascend_scheduler_config.enable_chunked_prefill
|
||||
assert ascend_config.expert_tensor_parallel_size == 1
|
||||
|
||||
|
||||
@_clean_up_ascend_config
|
||||
def test_ascend_config_init_error():
|
||||
# ascend_config should be initialized first
|
||||
with pytest.raises(RuntimeError):
|
||||
_ = get_ascend_config()
|
||||
|
||||
|
||||
@_clean_up_ascend_config
|
||||
def test_ascend_config_load_error():
|
||||
if os.getenv("VLLM_USE_V1") == "0":
|
||||
pytest.skip("graph only works on v1")
|
||||
# graph_batch_sizes should be list.
|
||||
with pytest.raises(TypeError):
|
||||
input_additional_config_fake_1 = {
|
||||
"torchair_graph_config": {
|
||||
"graph_batch_sizes": "fake_size",
|
||||
},
|
||||
}
|
||||
with VllmRunner("facebook/opt-125m",
|
||||
additional_config=input_additional_config_fake_1):
|
||||
pass
|
||||
|
||||
# graph_batch_sizes_init should not be True when graph_batch_sizes is not empty.
|
||||
with pytest.raises(ValueError):
|
||||
input_additional_config_fake_2 = {
|
||||
"torchair_graph_config": {
|
||||
"graph_batch_sizes": [1, 2, 4, 8],
|
||||
"graph_batch_sizes_init": True,
|
||||
},
|
||||
}
|
||||
with VllmRunner("facebook/opt-125m",
|
||||
additional_config=input_additional_config_fake_2):
|
||||
pass
|
||||
|
||||
# torchair graph only works with deepseek.
|
||||
with pytest.raises(NotImplementedError):
|
||||
input_additional_config_fake_2 = {
|
||||
"torchair_graph_config": {
|
||||
"enabled": True,
|
||||
},
|
||||
}
|
||||
with VllmRunner("facebook/opt-125m",
|
||||
enforce_eager=False,
|
||||
additional_config=input_additional_config_fake_2):
|
||||
pass
|
||||
|
||||
# torchair graph should not be enabled with eager mode
|
||||
with pytest.raises(RuntimeError):
|
||||
input_additional_config_fake_3 = {
|
||||
"torchair_graph_config": {
|
||||
"enabled": True,
|
||||
},
|
||||
}
|
||||
with VllmRunner("facebook/opt-125m",
|
||||
enforce_eager=True,
|
||||
additional_config=input_additional_config_fake_3):
|
||||
pass
|
||||
|
||||
|
||||
@_clean_up_ascend_config
|
||||
def test_check_ascend_config_v0():
|
||||
if os.getenv("VLLM_USE_V1") == "1":
|
||||
pytest.skip("graph only works on v1, this is the test for v0")
|
||||
with pytest.raises(NotImplementedError):
|
||||
input_additional_config_fake_1 = {
|
||||
"torchair_graph_config": {
|
||||
"enabled": True,
|
||||
},
|
||||
}
|
||||
with VllmRunner("facebook/opt-125m",
|
||||
additional_config=input_additional_config_fake_1):
|
||||
pass
|
||||
|
||||
|
||||
@_clean_up_ascend_config
|
||||
def test_ascend_config_refresh():
|
||||
from vllm.config import get_current_vllm_config
|
||||
vllm_config = get_current_vllm_config()
|
||||
# set additional_config with none
|
||||
init_ascend_config(vllm_config)
|
||||
|
||||
input_additional_config = {
|
||||
"torchair_graph_config": {
|
||||
"enabled": False,
|
||||
"use_cached_graph": True,
|
||||
"graph_batch_sizes": [1, 2, 4, 8],
|
||||
"graph_batch_sizes_init": False,
|
||||
},
|
||||
"refresh": True,
|
||||
}
|
||||
|
||||
# refresh ascend config
|
||||
with VllmRunner("facebook/opt-125m",
|
||||
additional_config=input_additional_config):
|
||||
ascend_config = get_ascend_config()
|
||||
|
||||
assert not ascend_config.torchair_graph_config.enabled
|
||||
assert ascend_config.torchair_graph_config.use_cached_graph
|
||||
assert ascend_config.torchair_graph_config.graph_batch_sizes == [
|
||||
1, 2, 4, 8
|
||||
]
|
||||
assert not ascend_config.torchair_graph_config.graph_batch_sizes_init
|
||||
28
tests/ut/fake_weight/config.json
Normal file
28
tests/ut/fake_weight/config.json
Normal file
@@ -0,0 +1,28 @@
|
||||
{
|
||||
"_name_or_path": "facebook/opt-125m",
|
||||
"activation_dropout": 0.0,
|
||||
"activation_function": "relu",
|
||||
"architectures": [
|
||||
"OPTForCausalLM"
|
||||
],
|
||||
"attention_dropout": 0.0,
|
||||
"bos_token_id": 2,
|
||||
"do_layer_norm_before": true,
|
||||
"dropout": 0.1,
|
||||
"eos_token_id": 2,
|
||||
"ffn_dim": 3072,
|
||||
"hidden_size": 768,
|
||||
"init_std": 0.02,
|
||||
"layerdrop": 0.0,
|
||||
"max_position_embeddings": 2048,
|
||||
"model_type": "opt",
|
||||
"num_attention_heads": 12,
|
||||
"num_hidden_layers": 12,
|
||||
"pad_token_id": 1,
|
||||
"prefix": "</s>",
|
||||
"torch_dtype": "float16",
|
||||
"transformers_version": "4.21.0.dev0",
|
||||
"use_cache": true,
|
||||
"vocab_size": 50272,
|
||||
"word_embed_proj_dim": 768
|
||||
}
|
||||
244
tests/ut/test_ascend_config.py
Normal file
244
tests/ut/test_ascend_config.py
Normal file
@@ -0,0 +1,244 @@
|
||||
import os
|
||||
import unittest
|
||||
from unittest import mock
|
||||
|
||||
from transformers import PretrainedConfig
|
||||
from vllm.config import ModelConfig, VllmConfig
|
||||
|
||||
from vllm_ascend.ascend_config import (check_ascend_config,
|
||||
clear_ascend_config, get_ascend_config,
|
||||
init_ascend_config)
|
||||
|
||||
|
||||
class TestAscendConfig(unittest.TestCase):
|
||||
|
||||
@staticmethod
|
||||
def _clean_up_ascend_config(func):
|
||||
|
||||
def wrapper(*args, **kwargs):
|
||||
clear_ascend_config()
|
||||
func(*args, **kwargs)
|
||||
clear_ascend_config()
|
||||
|
||||
return wrapper
|
||||
|
||||
@_clean_up_ascend_config
|
||||
def test_init_ascend_config_without_additional_config(self):
|
||||
test_vllm_config = VllmConfig()
|
||||
# No additional config given, check the default value here.
|
||||
ascend_config = init_ascend_config(test_vllm_config)
|
||||
self.assertEqual(ascend_config.expert_tensor_parallel_size, 0)
|
||||
self.assertIsNone(ascend_config.expert_map_path)
|
||||
|
||||
torchair_graph_config = ascend_config.torchair_graph_config
|
||||
self.assertFalse(torchair_graph_config.enabled)
|
||||
self.assertFalse(torchair_graph_config.use_cached_graph)
|
||||
self.assertEqual(torchair_graph_config.graph_batch_sizes, [])
|
||||
self.assertFalse(torchair_graph_config.graph_batch_sizes_init)
|
||||
self.assertFalse(torchair_graph_config.enable_multistream_mla)
|
||||
self.assertFalse(torchair_graph_config.enable_multistream_moe)
|
||||
self.assertTrue(torchair_graph_config.enable_view_optimize)
|
||||
self.assertFalse(torchair_graph_config.enable_kv_nz)
|
||||
|
||||
ascend_scheduler_config = ascend_config.ascend_scheduler_config
|
||||
self.assertFalse(ascend_scheduler_config.enabled)
|
||||
|
||||
@_clean_up_ascend_config
|
||||
def test_init_ascend_config_with_additional_config(self):
|
||||
test_vllm_config = VllmConfig()
|
||||
test_vllm_config.additional_config = {
|
||||
"torchair_graph_config": {
|
||||
"enabled": True,
|
||||
"use_cached_graph": True,
|
||||
"graph_batch_sizes": [1, 2, 4],
|
||||
"graph_batch_sizes_init": False,
|
||||
"enable_multistream_mla": True,
|
||||
"enable_multistream_moe": True,
|
||||
"enable_view_optimize": True,
|
||||
"enable_kv_nz": True
|
||||
},
|
||||
"ascend_scheduler_config": {
|
||||
"enabled": True
|
||||
},
|
||||
"expert_tensor_parallel_size": 1,
|
||||
"expert_map_path": "test_expert_map_path",
|
||||
"refresh": True
|
||||
}
|
||||
ascend_config = init_ascend_config(test_vllm_config)
|
||||
self.assertEqual(ascend_config.expert_tensor_parallel_size, 1)
|
||||
self.assertEqual(ascend_config.expert_map_path, "test_expert_map_path")
|
||||
|
||||
torchair_graph_config = ascend_config.torchair_graph_config
|
||||
self.assertTrue(torchair_graph_config.enabled)
|
||||
self.assertTrue(torchair_graph_config.use_cached_graph)
|
||||
self.assertEqual(torchair_graph_config.graph_batch_sizes, [1, 2, 4])
|
||||
self.assertFalse(torchair_graph_config.graph_batch_sizes_init)
|
||||
self.assertTrue(torchair_graph_config.enable_multistream_mla)
|
||||
self.assertTrue(torchair_graph_config.enable_multistream_moe)
|
||||
self.assertTrue(torchair_graph_config.enable_view_optimize)
|
||||
self.assertTrue(torchair_graph_config.enable_kv_nz)
|
||||
|
||||
ascend_scheduler_config = ascend_config.ascend_scheduler_config
|
||||
self.assertTrue(ascend_scheduler_config.enabled)
|
||||
|
||||
@_clean_up_ascend_config
|
||||
def test_init_ascend_config_with_refresh(self):
|
||||
test_vllm_config = VllmConfig()
|
||||
ascend_config = init_ascend_config(test_vllm_config)
|
||||
self.assertFalse(ascend_config.torchair_graph_config.enabled)
|
||||
|
||||
test_vllm_config.additional_config = {
|
||||
"torchair_graph_config": {
|
||||
"enabled": True,
|
||||
},
|
||||
}
|
||||
ascend_config = init_ascend_config(test_vllm_config)
|
||||
self.assertFalse(ascend_config.torchair_graph_config.enabled)
|
||||
|
||||
test_vllm_config.additional_config = {
|
||||
"torchair_graph_config": {
|
||||
"enabled": True,
|
||||
},
|
||||
"refresh": True,
|
||||
}
|
||||
ascend_config = init_ascend_config(test_vllm_config)
|
||||
self.assertTrue(ascend_config.torchair_graph_config.enabled)
|
||||
|
||||
@_clean_up_ascend_config
|
||||
def test_init_ascend_config_with_wrong_input(self):
|
||||
test_vllm_config = VllmConfig()
|
||||
test_vllm_config.additional_config = {
|
||||
"torchair_graph_config": {
|
||||
"enabled": True,
|
||||
"graph_batch_sizes": "fake_size",
|
||||
},
|
||||
"refresh": True,
|
||||
}
|
||||
with self.assertRaises(TypeError):
|
||||
init_ascend_config(test_vllm_config)
|
||||
|
||||
test_vllm_config.additional_config = {
|
||||
"torchair_graph_config": {
|
||||
"enabled": False,
|
||||
"graph_batch_sizes": [1, 2, 4, 8],
|
||||
"graph_batch_sizes_init": True,
|
||||
},
|
||||
"refresh": True,
|
||||
}
|
||||
with self.assertRaises(ValueError):
|
||||
init_ascend_config(test_vllm_config)
|
||||
|
||||
@_clean_up_ascend_config
|
||||
def test_get_ascend_config(self):
|
||||
test_vllm_config = VllmConfig()
|
||||
ascend_config = init_ascend_config(test_vllm_config)
|
||||
self.assertEqual(get_ascend_config(), ascend_config)
|
||||
|
||||
@_clean_up_ascend_config
|
||||
def test_get_ascend_config_without_init(self):
|
||||
with self.assertRaises(RuntimeError):
|
||||
get_ascend_config()
|
||||
|
||||
@_clean_up_ascend_config
|
||||
def test_clear_ascend_config(self):
|
||||
test_vllm_config = VllmConfig()
|
||||
ascend_config = init_ascend_config(test_vllm_config)
|
||||
self.assertEqual(get_ascend_config(), ascend_config)
|
||||
clear_ascend_config()
|
||||
with self.assertRaises(RuntimeError):
|
||||
get_ascend_config()
|
||||
|
||||
@_clean_up_ascend_config
|
||||
def test_check_ascend_config_pass(self):
|
||||
test_vllm_config = VllmConfig()
|
||||
init_ascend_config(test_vllm_config)
|
||||
check_ascend_config(test_vllm_config, False)
|
||||
|
||||
# For V1 engine
|
||||
with mock.patch.dict(os.environ, {"VLLM_USE_V1": "1"}):
|
||||
test_vllm_config.additional_config = {
|
||||
"torchair_graph_config": {
|
||||
"enabled": True,
|
||||
},
|
||||
"refresh": True
|
||||
}
|
||||
init_ascend_config(test_vllm_config)
|
||||
check_ascend_config(test_vllm_config, False)
|
||||
|
||||
test_vllm_config.additional_config = {
|
||||
"torchair_graph_config": {
|
||||
"enabled": False,
|
||||
},
|
||||
"refresh": True
|
||||
}
|
||||
init_ascend_config(test_vllm_config)
|
||||
check_ascend_config(test_vllm_config, False)
|
||||
|
||||
@_clean_up_ascend_config
|
||||
def test_check_ascend_config_wrong_case(self):
|
||||
test_vllm_config = VllmConfig()
|
||||
# For V0 engine
|
||||
with mock.patch.dict(os.environ, {"VLLM_USE_V1": "0"}):
|
||||
with self.assertRaises(NotImplementedError):
|
||||
test_vllm_config.additional_config = {
|
||||
"torchair_graph_config": {
|
||||
"enabled": True,
|
||||
},
|
||||
"refresh": True
|
||||
}
|
||||
init_ascend_config(test_vllm_config)
|
||||
check_ascend_config(test_vllm_config, False)
|
||||
with self.assertRaises(NotImplementedError):
|
||||
test_vllm_config.additional_config = {
|
||||
"ascend_scheduler_config": {
|
||||
"enabled": True,
|
||||
},
|
||||
"refresh": True
|
||||
}
|
||||
init_ascend_config(test_vllm_config)
|
||||
check_ascend_config(test_vllm_config, True)
|
||||
# For V1 engine
|
||||
with mock.patch.dict(os.environ, {"VLLM_USE_V1": "1"}):
|
||||
# torchair + eager mode
|
||||
with self.assertRaises(RuntimeError):
|
||||
test_vllm_config.additional_config = {
|
||||
"torchair_graph_config": {
|
||||
"enabled": True,
|
||||
},
|
||||
"refresh": True
|
||||
}
|
||||
init_ascend_config(test_vllm_config)
|
||||
enforce_eager = True
|
||||
check_ascend_config(test_vllm_config, enforce_eager)
|
||||
# torchair + non deepseek model
|
||||
with self.assertRaises(NotImplementedError):
|
||||
test_vllm_config.additional_config = {
|
||||
"torchair_graph_config": {
|
||||
"enabled": True,
|
||||
},
|
||||
"refresh": True
|
||||
}
|
||||
model_path = os.path.join(os.path.dirname(__file__),
|
||||
"fake_weight")
|
||||
fake_model_config = ModelConfig(model=model_path)
|
||||
fake_model_config.hf_config = PretrainedConfig()
|
||||
fake_model_config.hf_config.model_type = "llama"
|
||||
test_vllm_config.model_config = fake_model_config
|
||||
init_ascend_config(test_vllm_config)
|
||||
check_ascend_config(test_vllm_config, False)
|
||||
# aclgraph + deepseek model
|
||||
with self.assertRaises(NotImplementedError):
|
||||
test_vllm_config.additional_config = {
|
||||
"torchair_graph_config": {
|
||||
"enabled": False,
|
||||
},
|
||||
"refresh": True
|
||||
}
|
||||
model_path = os.path.join(os.path.dirname(__file__),
|
||||
"fake_weight")
|
||||
fake_model_config = ModelConfig(model=model_path)
|
||||
fake_model_config.hf_config = PretrainedConfig()
|
||||
fake_model_config.hf_config.model_type = "deepseek"
|
||||
test_vllm_config.model_config = fake_model_config
|
||||
init_ascend_config(test_vllm_config)
|
||||
check_ascend_config(test_vllm_config, False)
|
||||
1
tests/ut/worker/test_worker_v1.py
Normal file
1
tests/ut/worker/test_worker_v1.py
Normal file
@@ -0,0 +1 @@
|
||||
# placeholder
|
||||
Reference in New Issue
Block a user