PyTorch基础-Dropout和正则化-05
生活随笔
收集整理的這篇文章主要介紹了
PyTorch基础-Dropout和正则化-05
小編覺得挺不錯(cuò)的,現(xiàn)在分享給大家,幫大家做個(gè)參考.
Dropout抑制過擬合
import numpy as np import torch from torch import nn,optim from torch.autograd import Variable from torchvision import datasets,transforms from torch.utils.data import DataLoader # 訓(xùn)練集 train_data = datasets.MNIST(root="./", # 存放位置train = True, # 載入訓(xùn)練集transform=transforms.ToTensor(), # 把數(shù)據(jù)變成tensor類型download = True # 下載) # 測試集 test_data = datasets.MNIST(root="./",train = False,transform=transforms.ToTensor(),download = True) # 批次大小 batch_size = 64 # 裝載訓(xùn)練集 train_loader = DataLoader(dataset=train_data,batch_size=batch_size,shuffle=True) # 裝載測試集 test_loader = DataLoader(dataset=test_data,batch_size=batch_size,shuffle=True) for i,data in enumerate(train_loader):inputs,labels = dataprint(inputs.shape)print(labels.shape)break # 定義網(wǎng)絡(luò)結(jié)構(gòu) class Net(nn.Module):def __init__(self):super(Net,self).__init__()# 初始化self.layer1 = nn.Sequential(nn.Linear(784,500),nn.Dropout(p=0.5),nn.Tanh()) # 一個(gè)有序的容器,神經(jīng)網(wǎng)絡(luò)模塊將按照在傳入構(gòu)造器的順序依次被添加到計(jì)算圖中執(zhí)行,Dropout 抑制過擬合0.5丟去百分之50數(shù)據(jù),激活函數(shù)Tanhself.layer2 = nn.Sequential(nn.Linear(500,300),nn.Dropout(p=0.5),nn.Tanh())self.layer3 = nn.Sequential(nn.Linear(300,10),nn.Softmax(dim=1))def forward(self,x):# torch.Size([64, 1, 28, 28]) -> (64,784)x = x.view(x.size()[0],-1) # 4維變2維 (在全連接層做計(jì)算只能2維)x = self.layer1(x)x = self.layer2(x)x = self.layer3(x)return x # 定義模型 model = Net() # 定義代價(jià)函數(shù) mse_loss = nn.CrossEntropyLoss()# 交叉熵 # 定義優(yōu)化器 optimizer = optim.SGD(model.parameters(),lr=0.5)# 隨機(jī)梯度下降 # 定義模型訓(xùn)練和測試的方法 def train():# 模型的訓(xùn)練狀態(tài)model.train()for i,data in enumerate(train_loader):# 獲得一個(gè)批次的數(shù)據(jù)和標(biāo)簽inputs,labels = data# 獲得模型預(yù)測結(jié)果(64,10)out = model(inputs)# 交叉熵代價(jià)函數(shù)out(batch,C:類別的數(shù)量),labels(batch)loss = mse_loss(out,labels)# 梯度清零optimizer.zero_grad()# 計(jì)算梯度loss.backward()# 修改權(quán)值optimizer.step()def test():# 模型的測試狀態(tài)model.eval()correct = 0 # 測試集準(zhǔn)確率for i,data in enumerate(test_loader):# 獲得一個(gè)批次的數(shù)據(jù)和標(biāo)簽inputs,labels = data# 獲得模型預(yù)測結(jié)果(64,10)out = model(inputs)# 獲得最大值,以及最大值所在的位置_,predicted = torch.max(out,1)# 預(yù)測正確的數(shù)量correct += (predicted==labels).sum()print("Test acc:{0}".format(correct.item()/len(test_data)))correct = 0for i,data in enumerate(train_loader): # 訓(xùn)練集準(zhǔn)確率# 獲得一個(gè)批次的數(shù)據(jù)和標(biāo)簽inputs,labels = data# 獲得模型預(yù)測結(jié)果(64,10)out = model(inputs)# 獲得最大值,以及最大值所在的位置_,predicted = torch.max(out,1)# 預(yù)測正確的數(shù)量correct += (predicted==labels).sum()print("Train acc:{0}".format(correct.item()/len(train_data))) # 訓(xùn)練 for epoch in range(11):print("epoch:",epoch)train()test()正則化
import numpy as np import torch from torch import nn,optim from torch.autograd import Variable from torchvision import datasets,transforms from torch.utils.data import DataLoader # 訓(xùn)練集 train_data = datasets.MNIST(root="./", # 存放位置train = True, # 載入訓(xùn)練集transform=transforms.ToTensor(), # 把數(shù)據(jù)變成tensor類型download = True # 下載) # 測試集 test_data = datasets.MNIST(root="./",train = False,transform=transforms.ToTensor(),download = True) # 批次大小 batch_size = 64 # 裝載訓(xùn)練集 train_loader = DataLoader(dataset=train_data,batch_size=batch_size,shuffle=True) # 裝載測試集 test_loader = DataLoader(dataset=test_data,batch_size=batch_size,shuffle=True) for i,data in enumerate(train_loader):inputs,labels = dataprint(inputs.shape)print(labels.shape)break # 定義網(wǎng)絡(luò)結(jié)構(gòu) class Net(nn.Module):def __init__(self):super(Net,self).__init__()# 初始化self.layer1 = nn.Sequential(nn.Linear(784,500),nn.Dropout(p=0),nn.Tanh()) self.layer2 = nn.Sequential(nn.Linear(500,300),nn.Dropout(p=0),nn.Tanh())self.layer3 = nn.Sequential(nn.Linear(300,10),nn.Softmax(dim=1))def forward(self,x):# torch.Size([64, 1, 28, 28]) -> (64,784)x = x.view(x.size()[0],-1) # 4維變2維 (在全連接層做計(jì)算只能2維)x = self.layer1(x)x = self.layer2(x)x = self.layer3(x)return x # 定義模型 model = Net() # 定義代價(jià)函數(shù) mse_loss = nn.CrossEntropyLoss()# 交叉熵 # 定義優(yōu)化器,設(shè)置L2正則化 optimizer = optim.SGD(model.parameters(),lr=0.5,weight_decay=0.001)# 隨機(jī)梯度下降 # 定義模型訓(xùn)練和測試的方法 def train():# 模型的訓(xùn)練狀態(tài)model.train()for i,data in enumerate(train_loader):# 獲得一個(gè)批次的數(shù)據(jù)和標(biāo)簽inputs,labels = data# 獲得模型預(yù)測結(jié)果(64,10)out = model(inputs)# 交叉熵代價(jià)函數(shù)out(batch,C:類別的數(shù)量),labels(batch)loss = mse_loss(out,labels)# 梯度清零optimizer.zero_grad()# 計(jì)算梯度loss.backward()# 修改權(quán)值optimizer.step()def test():# 模型的測試狀態(tài)model.eval()correct = 0 # 測試集準(zhǔn)確率for i,data in enumerate(test_loader):# 獲得一個(gè)批次的數(shù)據(jù)和標(biāo)簽inputs,labels = data# 獲得模型預(yù)測結(jié)果(64,10)out = model(inputs)# 獲得最大值,以及最大值所在的位置_,predicted = torch.max(out,1)# 預(yù)測正確的數(shù)量correct += (predicted==labels).sum()print("Test acc:{0}".format(correct.item()/len(test_data)))correct = 0for i,data in enumerate(train_loader): # 訓(xùn)練集準(zhǔn)確率# 獲得一個(gè)批次的數(shù)據(jù)和標(biāo)簽inputs,labels = data# 獲得模型預(yù)測結(jié)果(64,10)out = model(inputs)# 獲得最大值,以及最大值所在的位置_,predicted = torch.max(out,1)# 預(yù)測正確的數(shù)量correct += (predicted==labels).sum()print("Train acc:{0}".format(correct.item()/len(train_data))) # 訓(xùn)練 for epoch in range(11):print("epoch:",epoch)train()test()總結(jié)
以上是生活随笔為你收集整理的PyTorch基础-Dropout和正则化-05的全部內(nèi)容,希望文章能夠幫你解決所遇到的問題。
- 上一篇: PyTorch基础-交叉熵函数mnist
- 下一篇: PyTorch基础-Adam优化器使用-