1 Star 0 Fork 0

zhujian_nwpu/My_All_Learning

加入 Gitee
与超过 1200万 开发者一起发现、参与优秀开源项目,私有仓库也完全免费 :)
免费加入
文件
该仓库未声明开源许可证文件(LICENSE),使用请关注具体项目描述及其代码上游依赖。
克隆/下载
Transfer_Learning.ipynb 102.84 KB
一键复制 编辑 原始数据 按行查看 历史
zhujian_nwpu 提交于 28天前 . 迁移学习

8. 迁移学习和数据增强

8.1 迁移学习

迁移学习

  1. 使用在大规模数据集上训练好的模型来解决小数据集问题。
  2. 只要大规模图片数据集足够庞大。预训练模型就可以看作一个通用的、有效的特征提取器。
  3. 对于输入的图片,预训练模型能够有效地提取图片的特征,而且提取的特征在不同问题之间具有可移植性。

迁移学习的思路

  • 利用预训练模型的卷积部分(也叫卷积基)提取数据集的图片特征,然后重新训练最后的全连接部分(分类器)
  1. 冻结预训练模型的卷积基
  2. 根据具体问题重新设置分类器
  3. 用自己的数据集训练设置好的分类器

torchvisionmodels模块为我们提供了常见的预训练模型

  1. 这些模型是在ImageNet数据集的子集上训练好的大型卷积神经网络,常见的包括:VGGResNetDenseNetInception
  2. 本章使用VGG16架构
'''
下面是一些准备的代码,导入的必要的库,数据预处理
'''
# 首先导入要用到的库
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import numpy as np
import matplotlib.pyplot as plt
import torchvision
import glob # 用于读取文件路径
from torchvision import transforms # 用于数据预处理
from PIL import Image # 用于读取图片
from torch.utils import data # 用于构建数据集

imgs=glob.glob(r'D:/my_all_learning/dataset2/dataset2/*.jpg') 
# 上面是读取图片的路径,/*.jpg表示读取所有jpg格式的图片,imgs是一个列表,里面包含了所有图片的路径。

species=['cloudy','rain','shine','sunrise'] #4 classes
# 字典推导式获取类别到编号的映射关系
species_to_idx=dict((c,i) for i,c in enumerate(species))
# 字典推导式获取编号到类别的映射关系
idx_to_species=dict((i,c) for i,c in enumerate(species))

# 下面提取图片路径列表对应的标签列表
labels=[]
for img in imgs:
    for i,c in enumerate(species):
        if c in img: # 判断图片路径是否包含某个种类的名称
            labels.append(i)

# 获取图片路径列表与对应的标签列表后我们就可以着手编写自定义dataset类了
# 首先定义预处理图片的transform
transform=transforms.Compose([
    transforms.Resize((96,96)),
    transforms.ToTensor(),
    transforms.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])
])

# 然后创建Dataset类
class WT_Dataset(data.Dataset):
    def __init__(self, imgs_path, labels):
        self.imgs_path = imgs_path
        self.labels = labels

    def __len__(self):
        return len(self.imgs_path)

    def __getitem__(self, index):
        img_path = self.imgs_path[index]
        label = self.labels[index]
        pil_img = Image.open(img_path)
        pil_img = pil_img.convert('RGB') # 转换为RGB格式
        pil_img = transform(pil_img)

        return pil_img, label

# 创建自定义Dataset类
dataset=WT_Dataset(imgs,labels)

# 下面划分数据集和测试集
## 统计数据集的数量
train_count=int(0.8*len(dataset))
test_count=len(dataset)-train_count

## 划分
train_dataset,test_dataset=data.random_split(dataset,[train_count,test_count])

BATCH_SIZE = 16
# 创建DataLoader
train_dl=data.DataLoader(train_dataset, batch_size=BATCH_SIZE, shuffle=True)
test_dl=data.DataLoader(test_dataset, batch_size=BATCH_SIZE, shuffle=False)
device='cuda' if torch.cuda.is_available() else 'cpu' # 判断是否有GPU
print("using {} device".format(device))
using cpu device
# 首先加载预训练模型
## 加载VGG 16预训练模型以及权重
model=torchvision.models.vgg16(pretrained=True)
model
d:\anaconda\envs\d2l\lib\site-packages\torchvision\models\_utils.py:208: UserWarning: The parameter 'pretrained' is deprecated since 0.13 and will be removed in 0.15, please use 'weights' instead.
  warnings.warn(
d:\anaconda\envs\d2l\lib\site-packages\torchvision\models\_utils.py:223: UserWarning: Arguments other than a weight enum or `None` for 'weights' are deprecated since 0.13 and will be removed in 0.15. The current behavior is equivalent to passing `weights=VGG16_Weights.IMAGENET1K_V1`. You can also use `weights=VGG16_Weights.DEFAULT` to get the most up-to-date weights.
  warnings.warn(msg)
Downloading: "https://download.pytorch.org/models/vgg16-397923af.pth" to C:\Users\precious/.cache\torch\hub\checkpoints\vgg16-397923af.pth
100.0%
VGG(
  (features): Sequential(
    (0): Conv2d(3, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
    (1): ReLU(inplace=True)
    (2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
    (3): ReLU(inplace=True)
    (4): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)
    (5): Conv2d(64, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
    (6): ReLU(inplace=True)
    (7): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
    (8): ReLU(inplace=True)
    (9): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)
    (10): Conv2d(128, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
    (11): ReLU(inplace=True)
    (12): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
    (13): ReLU(inplace=True)
    (14): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
    (15): ReLU(inplace=True)
    (16): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)
    (17): Conv2d(256, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
    (18): ReLU(inplace=True)
    (19): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
    (20): ReLU(inplace=True)
    (21): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
    (22): ReLU(inplace=True)
    (23): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)
    (24): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
    (25): ReLU(inplace=True)
    (26): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
    (27): ReLU(inplace=True)
    (28): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
    (29): ReLU(inplace=True)
    (30): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)
  )
  (avgpool): AdaptiveAvgPool2d(output_size=(7, 7))
  (classifier): Sequential(
    (0): Linear(in_features=25088, out_features=4096, bias=True)
    (1): ReLU(inplace=True)
    (2): Dropout(p=0.5, inplace=False)
    (3): Linear(in_features=4096, out_features=4096, bias=True)
    (4): ReLU(inplace=True)
    (5): Dropout(p=0.5, inplace=False)
    (6): Linear(in_features=4096, out_features=1000, bias=True)
  )
)
VGG(
  (features): Sequential(
    (0): Conv2d(3, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
    (1): ReLU(inplace=True)
    (2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
    (3): ReLU(inplace=True)
    (4): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)
    (5): Conv2d(64, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
    (6): ReLU(inplace=True)
    (7): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
    (8): ReLU(inplace=True)
    (9): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)
    (10): Conv2d(128, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
    (11): ReLU(inplace=True)
    (12): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
    (13): ReLU(inplace=True)
    (14): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
    (15): ReLU(inplace=True)
    (16): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)
    (17): Conv2d(256, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
    (18): ReLU(inplace=True)
    (19): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
    (20): ReLU(inplace=True)
    (21): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
    (22): ReLU(inplace=True)
    (23): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)
    (24): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
    (25): ReLU(inplace=True)
    (26): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
    (27): ReLU(inplace=True)
    (28): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
    (29): ReLU(inplace=True)
    (30): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)
  )
  (avgpool): AdaptiveAvgPool2d(output_size=(7, 7))
  (classifier): Sequential(
    (0): Linear(in_features=25088, out_features=4096, bias=True)
    (1): ReLU(inplace=True)
    (2): Dropout(p=0.5, inplace=False)
    (3): Linear(in_features=4096, out_features=4096, bias=True)
    (4): ReLU(inplace=True)
    (5): Dropout(p=0.5, inplace=False)
    (6): Linear(in_features=4096, out_features=1000, bias=True)
  )
)

为了使用迁移学习,必须首先冻结模型卷积基

for param in model.features.parameters():
    param.requires_grad = False  # 冻结卷积基的参数
# 由于我们是四分类问题,最后输出的张量长度应该为四
device='cuda' if torch.cuda.is_available() else 'cpu' # 判断是否有GPU
print("using {} device".format(device))

model.classifier[-1].out_features=4
model=model.to(device)

optimizer=optim.Adam(model.parameters(),lr=0.0001)
loss_fn=nn.CrossEntropyLoss() # 交叉熵损失函数
using cpu device
### 编写训练和测试代码(与前面的相同)
def train(dataloader,model,loss_fn,optimizer):
    size=len(dataloader.dataset) #获取当前数据集样本总数量
    num_batches=len(dataloader) #获取当前data loader总批次数
    
    # train_loss用于累计所有批次的损失之和, correct用于累计预测正确的样本总数
    train_loss,correct=0,0
    '''
    add is below
    '''
    model.train() # 设置模型为训练模式,启用dropout等训练时特有的操作

    for X,y in dataloader:
        X,y=X.to(device),y.to(device)
        
        # 进行预测,并计算第一个批次的损失
        pred=model(X)
        loss=loss_fn(pred,y)
        # 利用反向传播算法,根据损失优化模型参数
        optimizer.zero_grad() #先将梯度清零
        loss.backward() # 损失反向传播,计算模型参数梯度
        optimizer.step() #根据梯度优化参数
        
        with torch.no_grad():
            # correct用于累计预测正确的样本总数
            correct+=(pred.argmax(1)==y).type(torch.float).sum().item()
            # train_loss用于累计所有批次的损失之和
            train_loss+=loss.item()
            
        # train_loss 是所有批次的损失之和,所以计算全部样本的平均损失时需要除以总的批次数
    train_loss/=num_batches
        # correct 是预测正确的样本总数,若计算整个apoch总体正确率,需要除以样本总数量
    correct/=size
    return train_loss,correct

### 测试函数
def test(dataloader,model):
    size=len(dataloader.dataset)
    num_batches=len(dataloader)

    '''
    add is below
    '''
    model.eval() # 设置模型为评估模式,禁用dropout等训练时特有的操作

    test_loss,correct=0,0
    with torch.no_grad():
        for X,y in dataloader:
            X,y=X.to(device),y.to(device)
            pred=model(X)
            test_loss+=loss_fn(pred,y).item()
            correct+=(pred.argmax(1)==y).type(torch.float).sum().item()
    test_loss/=num_batches
    correct/=size
    return test_loss,correct
# 然后在训练循环中,我们添加一行代码:exp_lr_scheduler.step(),用于更新学习率。
# 这行代码通常放在每个epoch的末尾,以便在每个epoch结束时更新学习率。
"""
下面我们把训练循环的代码封装到一个fit()函数中
"""
def fit(epochs,model,train_dl,test_dl,loss_fn,optimizer,exp_lr_scheduler=None): 
    train_loss=[]
    train_acc=[]
    test_loss=[]
    test_acc=[]

    for epoch in range(epochs):
        # 调用train()函数训练
        epoch_loss,epoch_acc=train(train_dl,model,loss_fn,optimizer)
        # 调用test()函数测试
        epoch_test_loss,epoch_test_acc=test(test_dl,model)
        # 记录训练集和测试集的损失和准确率
        train_loss.append(epoch_loss)
        train_acc.append(epoch_acc)
        test_loss.append(epoch_test_loss)
        test_acc.append(epoch_test_acc)
        # 更新学习率
        if exp_lr_scheduler is not None:
            exp_lr_scheduler.step()
        # is not None 是判断变量是否存在的标准写法

        # 定义一个打印模板
        template=("epoch:{:2d},train_loss:{:5f},train_acc:{:.1f}%,""test_loss:{:.5f},test_acc:{:.1f}%")
        # 打印训练集和测试集的损失和准确率
        print(template.format(epoch+1,epoch_loss,epoch_acc*100,epoch_test_loss,epoch_test_acc*100))
    print("Done")

    return train_loss,train_acc,test_loss,test_acc

# 然后我们可以调用fit()函数训练模型:
train_loss,train_acc,test_loss,test_acc=fit(30,model,train_dl,test_dl,loss_fn,optimizer,exp_lr_scheduler=None)
epoch: 1,train_loss:0.038000,train_acc:98.3%,test_loss:0.11699,test_acc:96.0%
epoch: 2,train_loss:0.028070,train_acc:99.1%,test_loss:0.11466,test_acc:97.8%
epoch: 3,train_loss:0.016539,train_acc:99.4%,test_loss:0.14261,test_acc:95.6%
epoch: 4,train_loss:0.004690,train_acc:99.9%,test_loss:0.15131,test_acc:95.6%
epoch: 5,train_loss:0.004323,train_acc:99.9%,test_loss:0.14624,test_acc:97.3%
epoch: 6,train_loss:0.002137,train_acc:100.0%,test_loss:0.20382,test_acc:95.6%
epoch: 7,train_loss:0.016545,train_acc:99.1%,test_loss:0.35990,test_acc:92.0%
epoch: 8,train_loss:0.029447,train_acc:98.9%,test_loss:0.26371,test_acc:96.4%
epoch: 9,train_loss:0.009463,train_acc:99.6%,test_loss:0.37417,test_acc:95.6%
epoch:10,train_loss:0.031786,train_acc:98.4%,test_loss:0.37854,test_acc:93.8%
epoch:11,train_loss:0.055879,train_acc:98.2%,test_loss:0.41102,test_acc:94.7%
epoch:12,train_loss:0.090425,train_acc:97.8%,test_loss:0.34830,test_acc:96.4%
epoch:13,train_loss:0.102788,train_acc:98.1%,test_loss:0.43615,test_acc:96.0%
epoch:14,train_loss:0.021062,train_acc:99.2%,test_loss:0.58441,test_acc:92.0%
epoch:15,train_loss:0.023276,train_acc:99.4%,test_loss:0.32563,test_acc:95.6%
epoch:16,train_loss:0.003991,train_acc:99.9%,test_loss:0.54046,test_acc:93.3%
epoch:17,train_loss:0.007547,train_acc:99.6%,test_loss:0.38203,test_acc:96.0%
epoch:18,train_loss:0.035656,train_acc:99.3%,test_loss:0.29390,test_acc:96.4%
epoch:19,train_loss:0.007680,train_acc:99.8%,test_loss:0.30196,test_acc:96.0%
epoch:20,train_loss:0.002423,train_acc:99.8%,test_loss:0.27838,test_acc:95.6%
epoch:21,train_loss:0.022883,train_acc:99.4%,test_loss:0.35590,test_acc:95.6%
epoch:22,train_loss:0.000219,train_acc:100.0%,test_loss:0.40401,test_acc:96.4%
epoch:23,train_loss:0.002090,train_acc:99.9%,test_loss:0.40745,test_acc:95.6%
epoch:24,train_loss:0.000515,train_acc:100.0%,test_loss:0.41049,test_acc:96.9%
epoch:25,train_loss:0.001713,train_acc:99.9%,test_loss:0.44279,test_acc:94.7%
epoch:26,train_loss:0.047797,train_acc:99.3%,test_loss:0.73068,test_acc:93.8%
epoch:27,train_loss:0.072985,train_acc:99.4%,test_loss:0.54360,test_acc:93.3%
epoch:28,train_loss:0.029188,train_acc:99.1%,test_loss:0.39561,test_acc:96.9%
epoch:29,train_loss:0.043055,train_acc:99.4%,test_loss:0.73281,test_acc:94.7%
epoch:30,train_loss:0.088539,train_acc:98.6%,test_loss:0.40013,test_acc:96.9%
Done
## 绘图
epochs=30 
plt.plot(range(1,epochs+1),train_loss,label='train_loss')
plt.plot(range(1,epochs+1),test_loss,label='test_loss')
plt.legend()
plt.show()

plt.plot(range(1,epochs+1),train_acc,label='train_acc')
plt.plot(range(1,epochs+1),test_acc,label='test_acc')
plt.legend()
plt.show()
Loading...
马建仓 AI 助手
尝试更多
代码解读
代码找茬
代码优化
1
https://gitee.com/zhujiannwpu/my_-all_-learning.git
git@gitee.com:zhujiannwpu/my_-all_-learning.git
zhujiannwpu
my_-all_-learning
My_All_Learning
pytorch

搜索帮助