代码拉取完成,页面将自动刷新
# %%
import torch
from torch.utils.data import Dataset, DataLoader
import torch.nn as nn
import logging
import datetime
# %%
current_date = datetime.datetime.now()
formatted_date = current_date.strftime("%Y-%m-%d-%H-%M")
DEVICE = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
EPOCHS = 30
# %%
# TODO 进行logging配置
logger = logging.getLogger("Deep Learn Log")
handler_KZT = logging.StreamHandler() # * 控制台
handler_Files = logging.FileHandler(filename=f"{formatted_date}_deep_learn.log") # * 文件
# 设置日志级别 必须要有logger.setLevel()
logger.setLevel(logging.INFO)
handler_KZT.setLevel(logging.INFO)
handler_Files.setLevel(logging.INFO)
formatter = logging.Formatter("%(asctime)s - %(name)s - %(levelname)-9s - %(filename)-8s : %(lineno)s line - %(message)s")
handler_KZT.setFormatter(formatter)
handler_Files.setFormatter(formatter)
logger.addHandler(handler_KZT)
logger.addHandler(handler_Files)
# %%
class MyData(Dataset):
def __init__(self, rows, column) -> None:
super().__init__()
self.data = torch.rand(rows, column)
self.label = torch.randint(0, 2,(rows,))
def __len__(self):
return len(self.data)
def __getitem__(self, index):
return self.data[index], self.label[index]
# %%
class MyModel(nn.Module):
def __init__(self, input_dim, hidden_dim, out_dim) -> None:
super().__init__()
self.input_linear = nn.Linear(input_dim, hidden_dim)
self.batchnorm = nn.BatchNorm1d(hidden_dim)
self.relu = nn.ReLU()
self.out_linear = nn.Linear(hidden_dim, out_dim)
def forward(self, x):
out = self.relu(self.batchnorm(self.input_linear(x)))
return self.out_linear(out)
# %%
train_data = MyData(256, 16)
train_dataload = DataLoader(train_data, shuffle=True, num_workers=4, pin_memory=True, batch_size=16, drop_last=True)
net = MyModel(16, 32, 2)
net.to(DEVICE)
loss_fn = nn.CrossEntropyLoss()
optimizer = torch.optim.AdamW(net.parameters(), lr=1e-3)
# %%
for epoch in range(EPOCHS):
net.train()
loss_item = 0
count_steps = 0
for ids, (datas, labels) in enumerate(train_dataload):
optimizer.zero_grad()
datas = datas.to(DEVICE)
labels = labels.to(DEVICE)
out = net(datas)
loss = loss_fn(out, labels)
loss.backward()
count_steps += len(datas)
loss_item += loss.to('cpu').item()
optimizer.step()
if ids % 5 == 0:
acc = (out.argmax(dim=1) == labels).sum() / len(labels)
logger.info(f"Epoch:{epoch} Steps:{ids} AverageLoss:{loss_item/count_steps} Acc:{acc}")
# %%
此处可能存在不合适展示的内容,页面不予展示。您可通过相关编辑功能自查并修改。
如您确认内容无涉及 不当用语 / 纯广告导流 / 暴力 / 低俗色情 / 侵权 / 盗版 / 虚假 / 无价值内容或违法国家有关法律法规的内容,可点击提交进行申诉,我们将尽快为您处理。