diff --git a/README.zh.md b/README.zh.md index bdcd5e5ddc6990fd7da2260d039610a2f5ad7836..0e85e667a8cf814edd3e79f3342e1f431ba09aa0 100644 --- a/README.zh.md +++ b/README.zh.md @@ -58,7 +58,7 @@ Apex及Apex-patch的卸载只需执行命令: 使用apex.amp进行混合精度训练: ``` -model = torch.nn.Linear(D_in, D_out).cuda() +model = torch.nn.Linear(D_in, D_out).npu() optimzier = torch.optim.SGD(model.parameters(), lr=1e-3) model, optimizer = amp.initialize(model, optimizer, opt_level='O1') @@ -74,7 +74,7 @@ with amp.scale_loss(loss, optimizer) as scaled_loss: 在amp.initialize()中将参数combine_grad设置为True,如: ``` -model = torch.nn.Linear(D_in, D_out).cuda() +model = torch.nn.Linear(D_in, D_out).npu() optimzier = torch.optim.SGD(model.parameters(), lr=1e-3) model, optimizer = amp.initialize(model, optimizer, opt_level='O1', combine_grad=True)  # 增加combine_grad参数 @@ -88,7 +88,7 @@ with amp.scale_loss(loss, optimizer) as scaled_loss: 将torch原生优化器torch.optim.xxx替换为apex.optimizers.xxx, 其中xxx为融合优化器名称,apex-patch支持的优化器见*特性介绍*。 ``` -model = torch.nn.Linear(D_in, D_out).cuda() +model = torch.nn.Linear(D_in, D_out).npu() optimzier = apex.optimizers.NpuFusedSGD(model.parameters(), lr=1e-3) # 使用apex.optimizers.NpuFusedSGD model, optimizer = amp.initialize(model, optimizer, opt_level='O1', combine_grad=True)