代码拉取完成,页面将自动刷新
import time
from options.options import Options
from datasets import create_dataset
from audiodvp_utils.visualizer import Visualizer
from models import wav2delta_model, audio2expressionnet
if __name__ == '__main__':
opt = Options().parse_args() # get training options
dataset = create_dataset(opt)
# model = wav2delta_model.Wav2DeltaModel(opt)
model = audio2expressionnet.Audio2ExpressionNet(opt)
visualizer = Visualizer(opt) # create a visualizer that display/save images and plots
total_iters = 0
for epoch in range(opt.num_epoch):
epoch_start_time = time.time() # timer for entire epoch
iter_data_time = time.time() # timer for data loading per iteration
epoch_iter = 0 # the number of training iterations in current epoch, reset to 0 every epoch
for i, data in enumerate(dataset): # inner loop within one epoch
iter_start_time = time.time() # timer for computation per iteration
if total_iters % opt.print_freq == 0:
t_data = iter_start_time - iter_data_time
total_iters += opt.batch_size
epoch_iter += opt.batch_size
model.set_input(data) # unpack data from dataset and apply preprocessing
model.optimize_parameters() # calculate loss functions, get gradients, update network weights
if total_iters % opt.display_freq == 0: # display images on visdom and save images to a HTML file
visualizer.display_current_results(model.get_current_visuals(), total_iters)
if total_iters % opt.print_freq == 0: # print training losses
losses = model.get_current_losses()
t_comp = (time.time() - iter_start_time) / opt.batch_size
visualizer.print_current_losses(epoch, epoch_iter, losses, t_comp, t_data)
visualizer.plot_current_losses(total_iters, losses)
iter_data_time = time.time()
print('End of epoch %d / %d \t Time Taken: %d sec' % (epoch, opt.num_epoch, time.time() - epoch_start_time))
model.save_network()
此处可能存在不合适展示的内容,页面不予展示。您可通过相关编辑功能自查并修改。
如您确认内容无涉及 不当用语 / 纯广告导流 / 暴力 / 低俗色情 / 侵权 / 盗版 / 虚假 / 无价值内容或违法国家有关法律法规的内容,可点击提交进行申诉,我们将尽快为您处理。