[Core][Misc] Clean up ProfileExecuteDuration (#6461)

### What this PR does / why we need it?
This PR removes the custom `ProfileExecuteDuration` utility and its
usages across the codebase. This utility was used for profiling
execution duration of different stages in the inference process. It is
replaced by the standard `vllm.v1.utils.record_function_or_nullcontext`,
which integrates with PyTorch's profiler.

This change simplifies the code by removing a custom implementation in
favor of an upstream utility, improving maintainability. Associated
documentation and tests for `ProfileExecuteDuration` are also removed.

### Does this PR introduce _any_ user-facing change?
`VLLM_ASCEND_MODEL_EXECUTE_TIME_OBSERVE` env is removed now.

### How was this patch tested?
CI passed. The changes are a cleanup and replacement with a standard
utility. Existing tests cover the functionality. The removed feature had
its own tests which are also removed.

Related RFC: #5304

- vLLM version: v0.14.1
- vLLM main:
dc917cceb8

Signed-off-by: wangxiyuan <wangxiyuan1007@gmail.com>
This commit is contained in:
wangxiyuan
2026-02-01 20:06:01 +08:00
committed by GitHub
parent 775fbc4cd2
commit b4aafd4293
10 changed files with 12 additions and 244 deletions

View File

@@ -60,11 +60,6 @@ env_variables: dict[str, Callable[[], Any]] = {
# In this case, developers need to set this value to "0.9.0" to make sure
# that the correct package is installed.
"VLLM_VERSION": lambda: os.getenv("VLLM_VERSION", None),
# Whether to enable the model execute time observe profile. Disable it when
# running vllm ascend in production environment.
"VLLM_ASCEND_MODEL_EXECUTE_TIME_OBSERVE": lambda: bool(
int(os.getenv("VLLM_ASCEND_MODEL_EXECUTE_TIME_OBSERVE", "0"))
),
# Some models are optimized by vllm ascend. While in some case, e.g. rlhf
# training, the optimized model may not be suitable. In this case, set this
# value to False to disable the optimized model.

View File

@@ -98,10 +98,6 @@ SERVICE_PROFILING_SYMBOLS_YAML = """
name: _prepare_inputs
domain: ModelExecute
- symbol: vllm_ascend.utils:ProfileExecuteDuration.capture_async
min_version: "0.9.1"
handler: msserviceprofiler.vllm_profiler.vllm_v1.model_hookers:capture_async
# ===== Request Lifecycle =====
- symbol: vllm.v1.engine.async_llm:AsyncLLM.add_request
min_version: "0.9.1"

View File

@@ -10,6 +10,7 @@ from vllm.v1.attention.backends.utils import CommonAttentionMetadata
from vllm.v1.core.sched.output import SchedulerOutput
from vllm.v1.sample.metadata import SamplingMetadata
from vllm.v1.spec_decode.eagle import PADDING_SLOT_ID
from vllm.v1.utils import record_function_or_nullcontext
from vllm_ascend.ascend_forward_context import set_ascend_forward_context
from vllm_ascend.attention.attention_v1 import AscendAttentionState
@@ -17,7 +18,7 @@ from vllm_ascend.attention.utils import AscendCommonAttentionMetadata
from vllm_ascend.compilation.acl_graph import ACLGraphWrapper
from vllm_ascend.ops.rotary_embedding import get_cos_and_sin_mla
from vllm_ascend.spec_decode.eagle_proposer import EagleProposer
from vllm_ascend.utils import ProfileExecuteDuration, lmhead_tp_enable, vllm_version_is
from vllm_ascend.utils import lmhead_tp_enable, vllm_version_is
class MtpProposer(EagleProposer):
@@ -311,7 +312,7 @@ class MtpProposer(EagleProposer):
batch_descriptor=batch_descriptor,
num_actual_tokens=num_tokens,
is_draft_model=True):
with ProfileExecuteDuration().capture_async('mtp_forward'):
with record_function_or_nullcontext('mtp_forward'):
model_kwargs = {}
model_kwargs["attn_metadata"] = attn_metadata
input_ids = self.input_ids[:num_input_tokens]

View File

@@ -23,7 +23,7 @@ import atexit
import functools
import math
import os
from contextlib import contextmanager, nullcontext
from contextlib import nullcontext
from enum import Enum
from functools import lru_cache
from threading import Lock
@@ -32,7 +32,6 @@ from typing import TYPE_CHECKING, Any
import torch
import torch_npu # noqa: F401
from packaging.version import InvalidVersion, Version
from torch_npu.npu.streams import Event
from vllm.logger import logger
from vllm.sequence import IntermediateTensors
@@ -562,53 +561,6 @@ def dispose_tensor(x: torch.Tensor):
x.set_(torch.empty((0,), device=x.device, dtype=x.dtype))
class ProfileExecuteDuration:
_instance = None
_observations: list[tuple[str, Event, Event]] = []
_lock = Lock()
def __new__(cls):
with cls._lock:
if cls._instance is None:
cls._instance = super().__new__(cls)
atexit.register(cls._instance.destroy)
return cls._instance
def destroy(self):
with self._lock:
self._observations.clear()
@contextmanager
def capture_async(self, duration_tag: str):
if not envs_ascend.VLLM_ASCEND_MODEL_EXECUTE_TIME_OBSERVE:
yield
return
observe_start = Event(enable_timing=True)
observe_start.record()
try:
yield
finally:
observe_end = Event(enable_timing=True)
observe_end.record()
with self._lock:
self._observations.append((duration_tag, observe_start, observe_end))
def pop_captured_sync(self) -> dict:
"""Pop and synchronize all events in the observation list"""
durations: dict[str, float] = {}
if not envs_ascend.VLLM_ASCEND_MODEL_EXECUTE_TIME_OBSERVE:
return durations
while self._observations:
with self._lock:
tag, observe_start, observe_end = self._observations.pop()
observe_end.synchronize()
durations[tag] = observe_start.elapsed_time(observe_end)
return durations
def register_ascend_customop(vllm_config: VllmConfig | None = None):
"""Register Ascend CustomOP

View File

@@ -72,6 +72,7 @@ from vllm.v1.spec_decode.metadata import SpecDecodeMetadata
from vllm.v1.spec_decode.ngram_proposer import NgramProposer
from vllm.v1.spec_decode.suffix_decoding import SuffixDecodingProposer
from vllm.v1.structured_output.utils import apply_grammar_bitmask
from vllm.v1.utils import record_function_or_nullcontext
from vllm.v1.worker.gpu_model_runner import (AsyncGPUModelRunnerOutput,
GPUModelRunner)
from vllm.v1.worker.kv_connector_model_runner_mixin import KVConnectorOutput
@@ -104,11 +105,11 @@ from vllm_ascend.spec_decode import get_spec_decode_method
from vllm_ascend.spec_decode.eagle_proposer import EagleProposer
from vllm_ascend.spec_decode.medusa_proposer import MedusaProposer
from vllm_ascend.spec_decode.mtp_proposer import MtpProposer
from vllm_ascend.utils import (AscendDeviceType, ProfileExecuteDuration,
from vllm_ascend.utils import (AscendDeviceType,
enable_sp, get_ascend_device_type,
is_drafter_moe_model, is_moe_model,
lmhead_tp_enable, maybe_trans_nz,
set_weight_prefetch_method, vllm_version_is)
set_weight_prefetch_method)
from vllm_ascend.worker.npu_input_batch import NPUInputBatch
from vllm_ascend.worker.pcp_utils import PCPManager
@@ -1104,7 +1105,7 @@ class NPUModelRunner(GPUModelRunner):
):
scheduler_output = deepcopy(scheduler_output)
num_scheduled_tokens = scheduler_output.total_num_scheduled_tokens
with ProfileExecuteDuration().capture_async("prepare input"):
with record_function_or_nullcontext("prepare input"):
with self.synchronize_input_prep():
# Update persistent batch states.
self._update_states(scheduler_output)
@@ -1268,7 +1269,7 @@ class NPUModelRunner(GPUModelRunner):
)
# Run forward pass
with ProfileExecuteDuration().capture_async("forward"):
with record_function_or_nullcontext("forward"):
with (
set_ascend_forward_context(
attn_metadata,
@@ -1286,7 +1287,7 @@ class NPUModelRunner(GPUModelRunner):
hidden_states = self._model_forward(
num_tokens_padded, input_ids, positions,
intermediate_tensors, inputs_embeds, **model_kwargs)
with (ProfileExecuteDuration().capture_async("post process")):
with record_function_or_nullcontext("post process"):
if self.pcp_size > 1:
# NOTE we must `slice` hidden_states because pcp_allgather_restore_idx
# ignores the padding from CUDA Graph.
@@ -1408,7 +1409,7 @@ class NPUModelRunner(GPUModelRunner):
self.input_batch, logits)
logits = logits.to(self.device).to(logits_dtype)
with ProfileExecuteDuration().capture_async("Sample"):
with record_function_or_nullcontext("sample_token"):
sampler_output = self._sample(logits, spec_decode_metadata)
def propose_draft_token_ids(sampled_token_ids):
@@ -1444,7 +1445,7 @@ class NPUModelRunner(GPUModelRunner):
spec_decode_metadata,
)
with ProfileExecuteDuration().capture_async("Draft"):
with record_function_or_nullcontext("draft_token"):
if self.speculative_config:
use_padded_batch_for_eagle = self.speculative_config and \
self.speculative_config.use_eagle() and \
@@ -1474,15 +1475,6 @@ class NPUModelRunner(GPUModelRunner):
cudagraph_stats=cudagraph_stats,
)
durations = ProfileExecuteDuration().pop_captured_sync()
if durations:
dr_str = [
f"[{tag}]:{duration:.2f}ms"
for tag, duration in durations.items()
]
captured_name = "Decode" if self.attn_state == AscendAttentionState.DecodeOnly else "Prefill"
logger.info("Profile execute duration [%s]:%s", captured_name,
" ".join(dr_str))
if self.dynamic_eplb:
self.eplb_updator.forward_end()