代码拉取完成,页面将自动刷新
from mindspeed.features_manager.feature import MindSpeedFeature
class TrainingDefaultFeature(MindSpeedFeature):
def __init__(self):
super(TrainingDefaultFeature, self).__init__('train-default', optimization_level=0)
def register_patches(self, patch_manager, args):
from mindspeed_llm.training import train
from mindspeed_llm.training.checkpointing import load_checkpoint_wrapper
from mindspeed_llm.legacy.data import build_pretraining_data_loader
from mindspeed_llm.tasks.posttrain.lora.utils import is_enable_qlora
if is_enable_qlora(args):
from mindspeed_llm.tasks.posttrain.lora.qlora import get_model
patch_manager.register_patch('megatron.training.training.get_model', get_model)
else:
from mindspeed_llm.training import get_model_wrapper
patch_manager.register_patch('megatron.training.training.get_model', get_model_wrapper)
patch_manager.register_patch('megatron.training.training.build_pretraining_data_loader',
build_pretraining_data_loader)
patch_manager.register_patch('megatron.training.training.train', train)
patch_manager.register_patch('megatron.training.training.load_checkpoint', load_checkpoint_wrapper)
此处可能存在不合适展示的内容,页面不予展示。您可通过相关编辑功能自查并修改。
如您确认内容无涉及 不当用语 / 纯广告导流 / 暴力 / 低俗色情 / 侵权 / 盗版 / 虚假 / 无价值内容或违法国家有关法律法规的内容,可点击提交进行申诉,我们将尽快为您处理。