代码拉取完成,页面将自动刷新
同步操作将从 Ascend/pytorch 强制同步,此操作会覆盖自 Fork 仓库以来所做的任何修改,且无法恢复!!!
确定后同步将在后台操作,完成时将刷新页面,请耐心等待。
from enum import Enum
from torch_npu.utils.error_code import ErrCode, prof_error
from .analysis.prof_common_func.constant import print_warn_msg
__all__ = [
'ProfilerAction',
'Schedule'
]
class ProfilerAction(Enum):
NONE = 0
WARMUP = 1
RECORD = 2
RECORD_AND_SAVE = 3
class Schedule:
"""
The profiler will skip the first ``skip_first`` steps, then wait for ``wait`` steps,
then do the warmup for the next ``warmup`` steps, then do the active recording for the next
``active`` steps and then repeat the cycle starting with ``wait`` steps. The optional number
of cycles is specified with the ``repeat`` parameter, the zero value means that
the cycles will continue until the profiling is finished.
"""
def __init__(self, wait: int, active: int, warmup: int = 0, repeat: int = 0, skip_first: int = 0) -> None:
self.wait = wait
self.active = active
self.warmup = warmup
self.repeat = repeat
self.skip_first = skip_first
self._check_params()
def __call__(self, step: int) -> ProfilerAction:
if step < 0:
raise ValueError("Invalid parameter step, which must be not less than 0." + prof_error(ErrCode.VALUE))
if step < self.skip_first:
return ProfilerAction.NONE
else:
step -= self.skip_first
num_steps = self.wait + self.warmup + self.active
if self.repeat > 0 and step / num_steps >= self.repeat:
return ProfilerAction.NONE
mod_step = step % num_steps
if mod_step < self.wait:
return ProfilerAction.NONE
elif mod_step < self.wait + self.warmup:
return ProfilerAction.WARMUP
else:
return (
ProfilerAction.RECORD
if mod_step < num_steps - 1
else ProfilerAction.RECORD_AND_SAVE
)
def _check_params(self):
if not isinstance(self.wait, int) or self.wait < 0:
print_warn_msg("Invalid parameter wait, reset it to 0.")
self.wait = 0
if not isinstance(self.warmup, int) or self.warmup < 0:
print_warn_msg("Invalid parameter warmup, reset it to 0.")
self.warmup = 0
if not isinstance(self.active, int) or self.active <= 0:
print_warn_msg("Invalid parameter active, reset it to 1.")
self.active = 1
if not isinstance(self.repeat, int) or self.repeat < 0:
print_warn_msg("Invalid parameter repeat, reset it to 0.")
self.repeat = 0
if not isinstance(self.skip_first, int) or self.skip_first < 0:
print_warn_msg("Invalid parameter skip_first, reset it to 0.")
self.skip_first = 0
if self.warmup == 0:
print_warn_msg("Profiler won't be using warmup, this can skew profiler results")
def default_schedule_fn(_: int) -> ProfilerAction:
"""
Default profiler behavior - immediately starts recording the events,
keeps doing it on every profiler step.
"""
return ProfilerAction.RECORD
此处可能存在不合适展示的内容,页面不予展示。您可通过相关编辑功能自查并修改。
如您确认内容无涉及 不当用语 / 纯广告导流 / 暴力 / 低俗色情 / 侵权 / 盗版 / 虚假 / 无价值内容或违法国家有关法律法规的内容,可点击提交进行申诉,我们将尽快为您处理。