Ai
1 Star 1 Fork 0

LEVSONGSW/DeepLearnLog

加入 Gitee
与超过 1200万 开发者一起发现、参与优秀开源项目,私有仓库也完全免费 :)
免费加入
文件
该仓库未声明开源许可证文件(LICENSE),使用请关注具体项目描述及其代码上游依赖。
克隆/下载
SelfMakeUse.py 2.58 KB
一键复制 编辑 原始数据 按行查看 历史
LEVSONGSW 提交于 2025-08-18 19:51 +08:00 . Logging 组件 Use
# %%
import torch
from torch.utils.data import Dataset, DataLoader
import torch.nn as nn
import logging
import datetime
# %%
current_date = datetime.datetime.now()
formatted_date = current_date.strftime("%Y-%m-%d-%H-%M")
DEVICE = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
EPOCHS = 30
# %%
# TODO 进行logging配置
logger = logging.getLogger("Deep Learn Log")
handler_KZT = logging.StreamHandler() # * 控制台
handler_Files = logging.FileHandler(filename=f"{formatted_date}_deep_learn.log") # * 文件
# 设置日志级别 必须要有logger.setLevel()
logger.setLevel(logging.INFO)
handler_KZT.setLevel(logging.INFO)
handler_Files.setLevel(logging.INFO)
formatter = logging.Formatter("%(asctime)s - %(name)s - %(levelname)-9s - %(filename)-8s : %(lineno)s line - %(message)s")
handler_KZT.setFormatter(formatter)
handler_Files.setFormatter(formatter)
logger.addHandler(handler_KZT)
logger.addHandler(handler_Files)
# %%
class MyData(Dataset):
def __init__(self, rows, column) -> None:
super().__init__()
self.data = torch.rand(rows, column)
self.label = torch.randint(0, 2,(rows,))
def __len__(self):
return len(self.data)
def __getitem__(self, index):
return self.data[index], self.label[index]
# %%
class MyModel(nn.Module):
def __init__(self, input_dim, hidden_dim, out_dim) -> None:
super().__init__()
self.input_linear = nn.Linear(input_dim, hidden_dim)
self.batchnorm = nn.BatchNorm1d(hidden_dim)
self.relu = nn.ReLU()
self.out_linear = nn.Linear(hidden_dim, out_dim)
def forward(self, x):
out = self.relu(self.batchnorm(self.input_linear(x)))
return self.out_linear(out)
# %%
train_data = MyData(256, 16)
train_dataload = DataLoader(train_data, shuffle=True, num_workers=4, pin_memory=True, batch_size=16, drop_last=True)
net = MyModel(16, 32, 2)
net.to(DEVICE)
loss_fn = nn.CrossEntropyLoss()
optimizer = torch.optim.AdamW(net.parameters(), lr=1e-3)
# %%
for epoch in range(EPOCHS):
net.train()
loss_item = 0
count_steps = 0
for ids, (datas, labels) in enumerate(train_dataload):
optimizer.zero_grad()
datas = datas.to(DEVICE)
labels = labels.to(DEVICE)
out = net(datas)
loss = loss_fn(out, labels)
loss.backward()
count_steps += len(datas)
loss_item += loss.to('cpu').item()
optimizer.step()
if ids % 5 == 0:
acc = (out.argmax(dim=1) == labels).sum() / len(labels)
logger.info(f"Epoch:{epoch} Steps:{ids} AverageLoss:{loss_item/count_steps} Acc:{acc}")
# %%
Loading...
马建仓 AI 助手
尝试更多
代码解读
代码找茬
代码优化
1
https://gitee.com/levsongsw/deep-learn-log.git
git@gitee.com:levsongsw/deep-learn-log.git
levsongsw
deep-learn-log
DeepLearnLog
master

搜索帮助