神经网络的搭建
模塊Module
為所有神經網絡提供基本類
繼承nn.Module類
1.初始化函數
2.前向傳播函數(x先進性一個卷積1,在進行relu激活函數非線性處理。再進行一個卷積一個非線性處理)
搭建神經網絡
#神經網絡的搭建 import torch from torch import nnclass Tudui(nn.Module): #創建類(繼承)def __init__(self): #重寫兩個方法super(Tudui,self).__init__() # self.conv1 = nn.Conv2d(1,20,5) # self.conv2 = nn.Conv2d(20,20,5)# def forward(self,x): # x = F.relu(self.conv1(x)) # return F.relu(self.conv2(x))def forward(self,input): #舉例簡單的forward函數output = input + 1return outputtudui =Tudui() x = torch,tensor(1.0) output = tudui(x) print(output) #卷積層convolution layers #nn.conv2d 二維的(圖片) import torch import torch.nn.functional as Finput = torch.tensor([[1,2,0,3,1],[0,1,2,3,1],[1,2,1,0,0],[5,2,3,1,1],[2,1,0,1,1]])kernel = torch.tensor([[1,2,1],[0,1,0],[2,1,0]])#調整尺寸 input = torch.reshape(input,(1,1,5,5)) #bitch_size =1,1通道。5×5 kernel = torch.reshape(kernel,(1,1,3,3)) print(input.shape) #輸入是5x5,卷積核是3x3 print(kernel.shape) #改變尺寸前不滿足要求(要求四個數字的尺寸)#輸出 #torch.Size([1, 1, 5, 5]) #torch.Size([1, 1, 3, 3])output = F.conv2d(input,kernel,stride = 1) print(output)輸出:
torch.Size([1, 1, 5, 5]) torch.Size([1, 1, 3, 3]) tensor([[[[10, 12, 12], [18, 16, 16],[13, 9, 3]]]])?改變步長為2
output2 = F.conv2d(input,kernel,stride = 2) print(output2) 輸出: tensor([[[[10, 12],[13, 3]]]])padding填充上下左右
#padding 填充上下左右 output3 = F.conv2d(input,kernel,stride = 1,padding = 1) print(output3)?輸出:
tensor([[[[ 1, 3, 4, 10, 8],[ 5, 10, 12, 12, 6],[ 7, 18, 16, 16, 8],[11, 13, 9, 3, 4],[14, 13, 9, 7, 4]]]])建立一個簡單的神經網絡模型
卷積層:
import torch import torchvision from torch import nn from torch.nn import Conv2d from torch.utils.data import DataLoader from tensorboardX import SummaryWriterdataset = torchvision.datasets.CIFAR10(root="E:\\notebookpytorch\\pyTorch學習\\NN\\data",train = False,transform = torchvision.transforms.ToTensor(),download = False)dataloader = DataLoader(dataset,batch_size = 64) #64為一包class Tudui(nn.Module):def __init__(self):super(Tudui,self).__init__()self.conv1 = Conv2d(in_channels=3,out_channels=6,kernel_size=3,stride=1,padding=0) #一個卷積層def forward(self,x):x = self.conv1(x)return xtudui = Tudui() #print(tudui) #Tudui( # (conv1): Conv2d(3, 6, kernel_size=(3, 3), stride=(1, 1)) #) #輸入通道數為3,輸出通道數為6.卷積核是3×3.步數為1writer = SummaryWriter("logs")step = 0 for data in dataloader: #看dataloader里的每一個數據imgs,targets = dataoutput = tudui(imgs)print(imgs.shape)print(output.shape)#torch.size([64, 3, 32, 32]) batch_size=64,3通道,32×32writer.add_images("input",imgs,step)#torch.size([64, 6, 30, 30])---> [xxx, 3, 30, 30]output = torch.reshape(output,(-1,3,30,30))writer.add_images("output",output,step)step = step+1在黑框框中找網址查看卷積對比的結果
conda activate pytorch tensorboard --logdir=E:\notebookpytorch\pyTorch學習\NN\logs?在瀏覽器中復制網址查看結果
?注意:若已在電腦中下載數據集,使用時直接將數據存在的路徑放在root下。并且download=False不讓它重新下載即可。
若未下載數據集,則可在root下寫入想要存放數據集的位置。download=True讓它下載即可。
Train=False代表使用測試集而不是訓練集。?
dataset = torchvision.datasets.CIFAR10(root="E:\\notebookpytorch\\pyTorch學習\\NN\\data",train = False,transform = torchvision.transforms.ToTensor(),download = False)池化層
#池化層:保留特征但是將數據量減小
#dilation空洞卷積
#ceil為True即會使用ceil模式而不是floor模式(上取整和下取整)
#Input和Output必須是4維的。(N, C, Hin, Win)(N, C, Hout, Wout)?
#batch_size=-1表示根據其他設置,自動計算。channel,高,寬
1.簡單的input
import torch from torch import nn from torch.nn import MaxPool2dinput = torch.tensor([[1,2,0,3,1],[0,1,2,3,1],[1,2,1,0,0],[5,2,3,1,1],[2,1,0,1,1]],dtype=torch.float32)# print(input.shape) #torch.Size([5, 5])input = torch.reshape(input,(-1,1,5,5)) # print(input.shape) #torch.Size([1, 1, 5, 5])class Tudui(nn.Module):def __init__(self):super(Tudui,self).__init__()self.maxpool1 = MaxPool2d(kernel_size=3,ceil_mode=True)def forward(self,input):output = self.maxpool1(input)return outputtudui = Tudui() output = tudui(input) print(output) #出錯。要將輸入改為浮點型。在上面輸入中加上dtype=torch.float32即可。輸出:
tensor([[[[2., 3.],[5., 1.]]]])數據集
import torch from torch import nn from torch.nn import MaxPool2d from torch.utils.data import DataLoader from tensorboardX import SummaryWriter#用數據集 dataset = torchvision.datasets.CIFAR10(root="E:\\notebookpytorch\\pyTorch學習\\NN\\data",train = False,transform = torchvision.transforms.ToTensor(),download = False)dataloader = DataLoader(dataset,batch_size=64)class Tudui(nn.Module):def __init__(self):super(Tudui,self).__init__()self.maxpool1 = MaxPool2d(kernel_size=3,ceil_mode=True)def forward(self,input):output = self.maxpool1(input)return outputtudui = Tudui()#把日志寫出來 writer = SummaryWriter("logs_maxpool") step = 0for data in dataloader:imgs,targets = datawriter.add_images("input",imgs,step)output = tudui(imgs)writer.add_images("output",output,step)step = step + 1writer.close()池化結果:
非線性激活函數Relu
#非線性激活:激活函數常用 RELU,Sigmoid import torch from torch import nn from torch.nn import ReLUinput = torch.tensor([[1,-0.5],[-1,3]])input = torch.reshape(input,(-1,1,2,2)) print(input.shape)class Tudui(nn.Module):def __init__(self):super(Tudui,self).__init__()self.relu1 = ReLU() #inplace,原來的值是否被替代。默認為falsedef forward(self,input):output = self.relu1(input)return outputtudui = Tudui() output = tudui(input) print(output)#輸出:relu激活函數將大于0的保留,小小于0的數變為0; #tensor([[[[1., 0.], # [0., 3.]]]])輸出:
torch.Size([1, 1, 2, 2]) tensor([[[[1., 0.],[0., 3.]]]])激活函數Sigmoid
#Relu函數不是很明顯,換成sigmoid激活函數 import torch import torchvision from torch import nn from torch.nn import Sigmoid from torch.utils.data import DataLoader from tensorboardX import SummaryWriterdataset = torchvision.datasets.CIFAR10(root="E:\\notebookpytorch\\pyTorch學習\\NN\\data",train = False,transform = torchvision.transforms.ToTensor(),download = False)dataloader = DataLoader(dataset,batch_size=64)class Tudui(nn.Module):def __init__(self):super(Tudui,self).__init__()self.sigmoid1 = Sigmoid() #inplace,原來的值是否被替代。默認為falsedef forward(self,input):output = self.sigmoid1(input)return outputtudui = Tudui() writer = SummaryWriter("logs_relu") step = 0for data in dataloader:imgs,targets = datawriter.add_images("input",imgs,global_step = step)output = tudui(imgs)writer.add_images("output",output,step)step += 1writer.close()結果:
Linear layer
import torch import torchvision from torch import nn from torch.nn import Linear from torch.utils.data import DataLoaderdataset = torchvision.datasets.CIFAR10("E:\\notebookpytorch\\pyTorch學習\\NN\\data",train=False,transform=torchvision.transforms.ToTensor(),download=False)dataloader = DataLoader(dataset,batch_size = 64)class Tudui(nn.Module):def __init__(self):super(Tudui,self).__init__()self.linear1 = Linear(196608,10) #infeature,outfeaturedef forward(self,input):output = self.linear1(input)return outputtudui = Tudui()for data in dataloader:imgs,targets = dataprint(imgs.shape) #一開始的圖片大小[64, 3, 32, 32]#output = torch.reshape(imgs,(1, 1, 1, -1))output = torch.flatten(imgs) #將數據展開,1×1×1×196608print(output.shape) #變成[1, 1, 1, 196608]--->[196608]output = tudui(output)print(output.shape) # [1, 1, 1, 10]--->[10]結果:?
?
總結
- 上一篇: fastadmin btn-ajax,F
- 下一篇: 1024这天,马蜂窝程序员选出的最好语言