[CI] Fix ci tests (#5769)
This commit is contained in:
@@ -57,6 +57,7 @@ import torch
|
||||
import torch.distributed as dist
|
||||
|
||||
from sglang.srt.configs.model_config import ModelConfig
|
||||
from sglang.srt.distributed.parallel_state import destroy_distributed_environment
|
||||
from sglang.srt.entrypoints.engine import _set_envs_and_config
|
||||
from sglang.srt.hf_transformers_utils import get_tokenizer
|
||||
from sglang.srt.managers.schedule_batch import Req, ScheduleBatch
|
||||
@@ -502,8 +503,13 @@ def latency_test(
|
||||
for result in result_list:
|
||||
fout.write(json.dumps(result) + "\n")
|
||||
|
||||
if server_args.tp_size > 1:
|
||||
destroy_distributed_environment()
|
||||
|
||||
|
||||
def main(server_args, bench_args):
|
||||
server_args.cuda_graph_max_bs = max(bench_args.batch_size)
|
||||
|
||||
_set_envs_and_config(server_args)
|
||||
|
||||
if server_args.model_path:
|
||||
|
||||
@@ -8,6 +8,7 @@ from typing import Callable, Optional
|
||||
import torch
|
||||
from torch.nn import functional as F
|
||||
|
||||
from sglang.srt.layers.activation import GeluAndMul, SiluAndMul
|
||||
from sglang.srt.layers.moe.topk import select_experts
|
||||
|
||||
|
||||
@@ -30,7 +31,7 @@ def fused_moe_forward_native(
|
||||
) -> torch.Tensor:
|
||||
|
||||
if apply_router_weight_on_input:
|
||||
raise NotImplementedError
|
||||
raise NotImplementedError()
|
||||
|
||||
topk_weights, topk_ids = select_experts(
|
||||
hidden_states=x,
|
||||
@@ -75,9 +76,6 @@ def moe_forward_native(
|
||||
activation: str = "silu",
|
||||
routed_scaling_factor: Optional[float] = None,
|
||||
) -> torch.Tensor:
|
||||
|
||||
from sglang.srt.layers.activation import GeluAndMul, SiluAndMul
|
||||
|
||||
topk_weights, topk_ids = select_experts(
|
||||
hidden_states=x,
|
||||
router_logits=router_logits,
|
||||
|
||||
@@ -13,7 +13,16 @@ import triton
|
||||
import triton.language as tl
|
||||
|
||||
from sglang.srt.layers.moe.topk import select_experts
|
||||
from sglang.srt.layers.quantization.fp8_kernel import scaled_fp8_quant
|
||||
from sglang.srt.layers.quantization.fp8_kernel import (
|
||||
per_token_group_quant_fp8,
|
||||
scaled_fp8_quant,
|
||||
sglang_per_token_group_quant_fp8,
|
||||
)
|
||||
from sglang.srt.layers.quantization.int8_kernel import (
|
||||
per_token_group_quant_int8,
|
||||
per_token_quant_int8,
|
||||
sglang_per_token_group_quant_int8,
|
||||
)
|
||||
from sglang.srt.utils import (
|
||||
direct_register_custom_op,
|
||||
get_bool_env_var,
|
||||
@@ -746,21 +755,6 @@ def invoke_fused_moe_kernel(
|
||||
block_shape: Optional[List[int]] = None,
|
||||
no_combine: bool = False,
|
||||
) -> None:
|
||||
from sglang.srt.layers.quantization.int8_kernel import (
|
||||
per_token_group_quant_int8,
|
||||
per_token_quant_int8,
|
||||
)
|
||||
|
||||
if _is_cuda:
|
||||
from sglang.srt.layers.quantization.fp8_kernel import (
|
||||
sglang_per_token_group_quant_fp8,
|
||||
)
|
||||
from sglang.srt.layers.quantization.int8_kernel import (
|
||||
sglang_per_token_group_quant_int8,
|
||||
)
|
||||
else:
|
||||
from sglang.srt.layers.quantization.fp8_kernel import per_token_group_quant_fp8
|
||||
|
||||
assert topk_weights.stride(1) == 1
|
||||
assert sorted_token_ids.stride(0) == 1
|
||||
|
||||
|
||||
@@ -91,11 +91,14 @@ from sglang.srt.utils import (
|
||||
set_cuda_arch,
|
||||
)
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
# Use a small KV cache pool size for tests in CI
|
||||
SGLANG_CI_SMALL_KV_SIZE = os.getenv("SGLANG_CI_SMALL_KV_SIZE", None)
|
||||
|
||||
# Detect stragger ranks in model loading
|
||||
UNBALANCED_MODEL_LOADING_TIMEOUT_S = 300
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class ModelRunner:
|
||||
"""ModelRunner runs the forward passes of the models."""
|
||||
@@ -177,7 +180,7 @@ class ModelRunner:
|
||||
if _ENABLE_JIT_DEEPGEMM:
|
||||
update_deep_gemm_config(gpu_id, server_args)
|
||||
|
||||
# If it is a draft model tp_group can be different.
|
||||
# If it is a draft model, tp_group can be different
|
||||
self.initialize(min_per_gpu_memory)
|
||||
|
||||
def initialize(self, min_per_gpu_memory: float):
|
||||
@@ -230,7 +233,8 @@ class ModelRunner:
|
||||
|
||||
if server_args.attention_backend is None:
|
||||
"""
|
||||
We auto select the fastest attention backend according to the current offering
|
||||
Auto select the fastest attention backend.
|
||||
|
||||
1. Models with MHA Architecture (e.g: Llama, QWen)
|
||||
1.1 We will turn on FA3 on hopper unless user use spec decode with topk > 1 or page_size > 1.
|
||||
1.2 In other cases, we will use flashinfer if available, otherwise use triton.
|
||||
@@ -240,6 +244,7 @@ class ModelRunner:
|
||||
"""
|
||||
|
||||
if not self.use_mla_backend:
|
||||
# MHA architecture
|
||||
if (
|
||||
is_hopper_with_cuda_12_3()
|
||||
and is_no_spec_infer_or_topk_one(server_args)
|
||||
@@ -251,6 +256,7 @@ class ModelRunner:
|
||||
"flashinfer" if is_flashinfer_available() else "triton"
|
||||
)
|
||||
else:
|
||||
# MLA architecture
|
||||
if is_hopper_with_cuda_12_3():
|
||||
server_args.attention_backend = "fa3"
|
||||
else:
|
||||
@@ -259,7 +265,6 @@ class ModelRunner:
|
||||
f"Attention backend not set. Use {server_args.attention_backend} backend by default."
|
||||
)
|
||||
elif self.use_mla_backend:
|
||||
# TODO: add MLA optimization on CPU
|
||||
if server_args.device != "cpu":
|
||||
if server_args.attention_backend in [
|
||||
"flashinfer",
|
||||
@@ -275,7 +280,7 @@ class ModelRunner:
|
||||
f"Invalid attention backend for MLA: {server_args.attention_backend}"
|
||||
)
|
||||
else:
|
||||
raise ValueError(f"MLA optimization not supported on CPU.")
|
||||
raise ValueError("MLA optimization not supported on CPU.")
|
||||
|
||||
if (
|
||||
server_args.attention_backend == "fa3"
|
||||
@@ -310,9 +315,6 @@ class ModelRunner:
|
||||
)
|
||||
server_args.chunked_prefill_size = -1
|
||||
|
||||
if server_args.enable_deepep_moe:
|
||||
logger.info(f"DeepEP is turned on. DeepEP mode: {server_args.deepep_mode}")
|
||||
|
||||
if not self.use_mla_backend:
|
||||
server_args.disable_chunked_prefix_cache = True
|
||||
elif self.page_size > 1:
|
||||
|
||||
@@ -260,7 +260,6 @@ class Llama4Attention(nn.Module):
|
||||
if self.rotary_emb is not None:
|
||||
q_view, k_view = qk.split([self.q_size, self.kv_size], dim=-1)
|
||||
q_out_unused, k_out_unused = self.rotary_emb(positions, q_view, k_view)
|
||||
assert (q_out_unused is q_view) and (k_out_unused is k_view)
|
||||
del q_view, k_view, q_out_unused, k_out_unused
|
||||
|
||||
if self.qk_norm is not None:
|
||||
|
||||
@@ -201,7 +201,7 @@ class ServerArgs:
|
||||
# Expert parallelism
|
||||
if self.enable_ep_moe:
|
||||
self.ep_size = self.tp_size
|
||||
logger.info(
|
||||
logger.warning(
|
||||
f"EP MoE is enabled. The expert parallel size is adjusted to be the same as the tensor parallel size[{self.tp_size}]."
|
||||
)
|
||||
|
||||
@@ -243,19 +243,19 @@ class ServerArgs:
|
||||
self.chunked_prefill_size = 2048
|
||||
else:
|
||||
self.chunked_prefill_size = 8192
|
||||
|
||||
assert self.chunked_prefill_size % self.page_size == 0
|
||||
|
||||
assert self.moe_dense_tp_size in {
|
||||
1,
|
||||
None,
|
||||
}, f"moe_dense_tp_size only support 1 and None currently"
|
||||
}, "moe_dense_tp_size only support 1 and None currently"
|
||||
|
||||
if self.attention_backend == "flashmla":
|
||||
logger.warning(
|
||||
"FlashMLA only supports a page_size of 64, change page_size to 64."
|
||||
)
|
||||
self.page_size = 64
|
||||
|
||||
# Set cuda graph max batch size
|
||||
if self.cuda_graph_max_bs is None:
|
||||
# Based on detailed statistics, when serving TP1/TP2 models on lower-end GPUs with HBM<25G, you can either disable cuda graph or set `cuda_graph_max_bs` to a very small value to reduce the memory overhead of creating cuda graphs, with almost no impact on performance. However, when serving models with TP4 or TP8, we need to enable cuda graph to maintain high performance. In this case, we can set `cuda_graph_max_bs` to 80 (half of the default value 160) to reduce the memory overhead of creating cuda graphs. Looking at the logs from TP4 serving of qwen2-72b, a value of 80 is sufficient and can reduce the memory overhead of creating cuda graphs on lower-end GPUs compared to the original 160, avoiding OOM issues.
|
||||
@@ -270,6 +270,7 @@ class ServerArgs:
|
||||
self.attention_backend = "torch_native"
|
||||
self.sampling_backend = "pytorch"
|
||||
|
||||
# Set kernel backends
|
||||
if self.sampling_backend is None:
|
||||
self.sampling_backend = (
|
||||
"flashinfer" if is_flashinfer_available() else "pytorch"
|
||||
@@ -297,8 +298,8 @@ class ServerArgs:
|
||||
f"DP attention is enabled. The chunked prefill size is adjusted to {self.chunked_prefill_size} to avoid MoE kernel issues. "
|
||||
)
|
||||
|
||||
self.enable_sp_layernorm = False
|
||||
# DeepEP MoE
|
||||
self.enable_sp_layernorm = False
|
||||
if self.enable_deepep_moe:
|
||||
if self.deepep_mode == "auto":
|
||||
assert (
|
||||
@@ -308,7 +309,7 @@ class ServerArgs:
|
||||
self.enable_sp_layernorm = (
|
||||
self.dp_size < self.tp_size if self.enable_dp_attention else True
|
||||
)
|
||||
logger.info(
|
||||
logger.warning(
|
||||
f"DeepEP MoE is enabled. The expert parallel size is adjusted to be the same as the tensor parallel size[{self.tp_size}]."
|
||||
)
|
||||
|
||||
@@ -317,14 +318,11 @@ class ServerArgs:
|
||||
# NEXTN shares the same implementation of EAGLE
|
||||
self.speculative_algorithm = "EAGLE"
|
||||
|
||||
if (
|
||||
self.speculative_algorithm == "EAGLE"
|
||||
or self.speculative_algorithm == "EAGLE3"
|
||||
):
|
||||
if self.speculative_algorithm in ("EAGLE", "EAGLE3"):
|
||||
if self.max_running_requests is None:
|
||||
self.max_running_requests = 48
|
||||
self.disable_overlap_schedule = True
|
||||
logger.info(
|
||||
logger.warning(
|
||||
"Overlap scheduler is disabled because of using "
|
||||
"eagle speculative decoding."
|
||||
)
|
||||
@@ -343,7 +341,7 @@ class ServerArgs:
|
||||
|
||||
if self.page_size > 1 and self.speculative_eagle_topk > 1:
|
||||
self.speculative_eagle_topk = 1
|
||||
logger.info(
|
||||
logger.warning(
|
||||
"speculative_eagle_topk is adjusted to 1 when page_size > 1"
|
||||
)
|
||||
|
||||
@@ -351,7 +349,7 @@ class ServerArgs:
|
||||
self.speculative_eagle_topk == 1
|
||||
and self.speculative_num_draft_tokens != self.speculative_num_steps + 1
|
||||
):
|
||||
logger.info(
|
||||
logger.warning(
|
||||
"speculative_num_draft_tokens is adjusted to speculative_num_steps + 1 when speculative_eagle_topk == 1"
|
||||
)
|
||||
self.speculative_num_draft_tokens = self.speculative_num_steps + 1
|
||||
@@ -381,18 +379,6 @@ class ServerArgs:
|
||||
self.disable_radix_cache = True
|
||||
logger.warning("KV cache is forced as chunk cache for decode server")
|
||||
|
||||
if self.enable_memory_saver:
|
||||
try:
|
||||
import torch_memory_saver
|
||||
except ImportError:
|
||||
logger.warning(
|
||||
"enable_memory_saver is enabled, but "
|
||||
"torch-memory-saver is not installed. Please install it "
|
||||
"via `pip3 uninstall torch-memory-saver`. "
|
||||
"For normal operation, it will be disabled."
|
||||
)
|
||||
raise
|
||||
|
||||
os.environ["SGLANG_ENABLE_TORCH_COMPILE"] = (
|
||||
"1" if self.enable_torch_compile else "0"
|
||||
)
|
||||
|
||||
@@ -6,7 +6,9 @@ try:
|
||||
import torch_memory_saver
|
||||
|
||||
_primary_memory_saver = torch_memory_saver.TorchMemorySaver()
|
||||
except ImportError:
|
||||
import_error = None
|
||||
except ImportError as e:
|
||||
import_error = e
|
||||
pass
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
@@ -15,6 +17,13 @@ logger = logging.getLogger(__name__)
|
||||
class TorchMemorySaverAdapter(ABC):
|
||||
@staticmethod
|
||||
def create(enable: bool):
|
||||
if enable and import_error is not None:
|
||||
logger.warning(
|
||||
"enable_memory_saver is enabled, but "
|
||||
"torch-memory-saver is not installed. Please install it "
|
||||
"via `pip3 install torch-memory-saver`. "
|
||||
)
|
||||
raise import_error
|
||||
return (
|
||||
_TorchMemorySaverAdapterReal() if enable else _TorchMemorySaverAdapterNoop()
|
||||
)
|
||||
|
||||
@@ -1944,7 +1944,7 @@ def get_local_ip_by_remote() -> str:
|
||||
s.connect(("2001:4860:4860::8888", 80)) # Doesn't need to be reachable
|
||||
return s.getsockname()[0]
|
||||
except Exception:
|
||||
raise ValueError(f"Can not get local ip")
|
||||
raise ValueError("Can not get local ip")
|
||||
|
||||
|
||||
def is_page_size_one(server_args):
|
||||
|
||||
@@ -33,33 +33,44 @@ from sglang.srt.utils import (
|
||||
from sglang.test.run_eval import run_eval
|
||||
from sglang.utils import get_exception_traceback
|
||||
|
||||
DEFAULT_FP8_MODEL_NAME_FOR_TEST = "neuralmagic/Meta-Llama-3.1-8B-Instruct-FP8"
|
||||
DEFAULT_FP8_MODEL_NAME_FOR_ACCURACY_TEST = "neuralmagic/Meta-Llama-3-8B-Instruct-FP8"
|
||||
DEFAULT_FP8_MODEL_NAME_FOR_DYNAMIC_QUANT_ACCURACY_TEST = (
|
||||
# General test models
|
||||
DEFAULT_MODEL_NAME_FOR_TEST = "meta-llama/Llama-3.1-8B-Instruct"
|
||||
DEFAULT_SMALL_MODEL_NAME_FOR_TEST = "meta-llama/Llama-3.2-1B-Instruct"
|
||||
DEFAULT_MOE_MODEL_NAME_FOR_TEST = "mistralai/Mixtral-8x7B-Instruct-v0.1"
|
||||
DEFAULT_SMALL_MOE_MODEL_NAME_FOR_TEST = "Qwen/Qwen1.5-MoE-A2.7B"
|
||||
|
||||
# MLA test models
|
||||
DEFAULT_MLA_MODEL_NAME_FOR_TEST = "deepseek-ai/DeepSeek-Coder-V2-Lite-Instruct"
|
||||
DEFAULT_MLA_FP8_MODEL_NAME_FOR_TEST = "neuralmagic/DeepSeek-Coder-V2-Lite-Instruct-FP8"
|
||||
DEFAULT_MODEL_NAME_FOR_TEST_MLA = "lmsys/sglang-ci-dsv3-test"
|
||||
DEFAULT_MODEL_NAME_FOR_TEST_MLA_NEXTN = "lmsys/sglang-ci-dsv3-test-NextN"
|
||||
|
||||
# FP8 models
|
||||
DEFAULT_MODEL_NAME_FOR_TEST_FP8 = "neuralmagic/Meta-Llama-3.1-8B-Instruct-FP8"
|
||||
DEFAULT_MODEL_NAME_FOR_ACCURACY_TEST_FP8 = "neuralmagic/Meta-Llama-3.1-8B-Instruct-FP8"
|
||||
DEFAULT_MODEL_NAME_FOR_DYNAMIC_QUANT_ACCURACY_TEST_FP8 = (
|
||||
"neuralmagic/Meta-Llama-3.1-8B-Instruct-FP8-dynamic"
|
||||
)
|
||||
DEFAULT_FP8_MODEL_NAME_FOR_MODELOPT_QUANT_ACCURACY_TEST = (
|
||||
DEFAULT_MODEL_NAME_FOR_MODELOPT_QUANT_ACCURACY_TEST_FP8 = (
|
||||
"nvidia/Llama-3.1-8B-Instruct-FP8"
|
||||
)
|
||||
|
||||
DEFAULT_MODEL_NAME_FOR_TEST = "meta-llama/Llama-3.1-8B-Instruct"
|
||||
# EAGLE
|
||||
DEFAULT_EAGLE_TARGET_MODEL_FOR_TEST = "meta-llama/Llama-2-7b-chat-hf"
|
||||
DEFAULT_EAGLE_DRAFT_MODEL_FOR_TEST = "lmsys/sglang-EAGLE-llama2-chat-7B"
|
||||
DEFAULT_MODEL_NAME_FOR_TEST_EAGLE3 = "jamesliu1/sglang-EAGLE3-Llama-3.1-Instruct-8B"
|
||||
DEFAULT_MODEL_NAME_FOR_TEST_MLA = "lmsys/sglang-ci-dsv3-test"
|
||||
DEFAULT_MODEL_NAME_FOR_TEST_MLA_NEXTN = "lmsys/sglang-ci-dsv3-test-NextN"
|
||||
DEFAULT_SMALL_MODEL_NAME_FOR_TEST = "meta-llama/Llama-3.2-1B-Instruct"
|
||||
|
||||
# Other use cases
|
||||
DEFAULT_MODEL_NAME_FOR_TEST_LOCAL_ATTENTION = (
|
||||
"meta-llama/Llama-4-Scout-17B-16E-Instruct"
|
||||
)
|
||||
DEFAULT_MOE_MODEL_NAME_FOR_TEST = "mistralai/Mixtral-8x7B-Instruct-v0.1"
|
||||
DEFAULT_SMALL_MOE_MODEL_NAME_FOR_TEST = "Qwen/Qwen1.5-MoE-A2.7B"
|
||||
DEFAULT_SMALL_EMBEDDING_MODEL_NAME_FOR_TEST = "Alibaba-NLP/gte-Qwen2-1.5B-instruct"
|
||||
DEFAULT_MLA_MODEL_NAME_FOR_TEST = "deepseek-ai/DeepSeek-Coder-V2-Lite-Instruct"
|
||||
DEFAULT_MLA_FP8_MODEL_NAME_FOR_TEST = "neuralmagic/DeepSeek-Coder-V2-Lite-Instruct-FP8"
|
||||
DEFAULT_REASONING_MODEL_NAME_FOR_TEST = "deepseek-ai/DeepSeek-R1-Distill-Qwen-7B"
|
||||
DEFAULT_AWQ_MOE_MODEL_NAME_FOR_TEST = (
|
||||
"hugging-quants/Mixtral-8x7B-Instruct-v0.1-AWQ-INT4"
|
||||
)
|
||||
DEFAULT_TIMEOUT_FOR_SERVER_LAUNCH = 1000
|
||||
|
||||
# Nightly tests
|
||||
DEFAULT_MODEL_NAME_FOR_NIGHTLY_EVAL_TP1 = "meta-llama/Llama-3.1-8B-Instruct,mistralai/Mistral-7B-Instruct-v0.3,deepseek-ai/DeepSeek-Coder-V2-Lite-Instruct,google/gemma-2-27b-it"
|
||||
DEFAULT_MODEL_NAME_FOR_NIGHTLY_EVAL_TP2 = "meta-llama/Llama-3.1-70B-Instruct,mistralai/Mixtral-8x7B-Instruct-v0.1,Qwen/Qwen2-57B-A14B-Instruct"
|
||||
DEFAULT_MODEL_NAME_FOR_NIGHTLY_EVAL_FP8_TP1 = "neuralmagic/Meta-Llama-3.1-8B-Instruct-FP8,neuralmagic/Mistral-7B-Instruct-v0.3-FP8,neuralmagic/DeepSeek-Coder-V2-Lite-Instruct-FP8,neuralmagic/gemma-2-2b-it-FP8"
|
||||
@@ -68,12 +79,11 @@ DEFAULT_MODEL_NAME_FOR_NIGHTLY_EVAL_QUANT_TP1 = "hugging-quants/Meta-Llama-3.1-8
|
||||
DEFAULT_SMALL_MODEL_NAME_FOR_TEST_QWEN = "Qwen/Qwen2.5-1.5B-Instruct"
|
||||
DEFAULT_SMALL_VLM_MODEL_NAME = "Qwen/Qwen2-VL-2B"
|
||||
|
||||
DEFAULT_EAGLE_TARGET_MODEL_FOR_TEST = "meta-llama/Llama-2-7b-chat-hf"
|
||||
DEFAULT_EAGLE_DRAFT_MODEL_FOR_TEST = "lmsys/sglang-EAGLE-llama2-chat-7B"
|
||||
|
||||
DEFAULT_IMAGE_URL = "https://github.com/sgl-project/sglang/blob/main/test/lang/example_image.png?raw=true"
|
||||
DEFAULT_VIDEO_URL = "https://raw.githubusercontent.com/EvolvingLMMs-Lab/sglang/dev/onevision_local/assets/jobs.mp4"
|
||||
|
||||
DEFAULT_TIMEOUT_FOR_SERVER_LAUNCH = 1000
|
||||
|
||||
|
||||
def is_in_ci():
|
||||
"""Return whether it is in CI runner."""
|
||||
@@ -499,7 +509,7 @@ def run_unittest_files(files: List[TestFile], timeout_per_file: float):
|
||||
tic = time.time()
|
||||
success = True
|
||||
|
||||
for file in files:
|
||||
for i, file in enumerate(files):
|
||||
filename, estimated_time = file.name, file.estimated_time
|
||||
process = None
|
||||
|
||||
@@ -507,7 +517,10 @@ def run_unittest_files(files: List[TestFile], timeout_per_file: float):
|
||||
nonlocal process
|
||||
|
||||
filename = os.path.join(os.getcwd(), filename)
|
||||
print(f".\n.\nBegin:\npython3 {filename}\n.\n.\n", flush=True)
|
||||
print(
|
||||
f".\n.\nBegin ({i}/{len(files)}):\npython3 {filename}\n.\n.\n",
|
||||
flush=True,
|
||||
)
|
||||
tic = time.time()
|
||||
|
||||
process = subprocess.Popen(
|
||||
@@ -517,7 +530,7 @@ def run_unittest_files(files: List[TestFile], timeout_per_file: float):
|
||||
elapsed = time.time() - tic
|
||||
|
||||
print(
|
||||
f".\n.\nEnd:\n{filename=}, {elapsed=:.0f}, {estimated_time=}\n.\n.\n",
|
||||
f".\n.\nEnd ({i}/{len(files)}):\n{filename=}, {elapsed=:.0f}, {estimated_time=}\n.\n.\n",
|
||||
flush=True,
|
||||
)
|
||||
return process.returncode
|
||||
|
||||
Reference in New Issue
Block a user