莫烦Pytorch神经网络第三章代码修改
生活随笔
收集整理的這篇文章主要介紹了
莫烦Pytorch神经网络第三章代码修改
小編覺得挺不錯的,現在分享給大家,幫大家做個參考.
3.1Regression回歸
import torch import torch.nn.functional as F from torch.autograd import Variable import matplotlib.pyplot as plt""" 創建數據 """x = torch.unsqueeze(torch.linspace(-1,1,100),dim=1) y = x.pow(2) + 0.2*torch.rand(x.size()) #增加噪點 x , y = Variable(x),Variable(y)# plt.scatter(x.data.numpy(),y.data.numpy()) #打印數據 # plt.show()""" 搭建網絡 """ class Net(torch.nn.Module):def __init__(self,n_features,n_hidden,n_out):super(Net, self).__init__()self.hidden = torch.nn.Linear(n_features,n_hidden)self.predict = torch.nn.Linear(n_hidden,n_out)def forward(self,x):x = F.relu(self.hidden(x))x = self.predict(x)return xnet = Net(1,10,1) # print(net) plt.ion() #實時打印的 plt.show() """ 優化網絡 """ optimizer = torch.optim.SGD(net.parameters(),lr=0.5) loss_func = torch.nn.MSELoss() #MSELoss是用在線性預測 #打印環節 for t in range(100):prediction = net(x)loss = loss_func(prediction,y)optimizer.zero_grad()loss.backward()optimizer.step()if t % 5 ==0:plt.cla()plt.scatter(x.data.numpy(),y.data.numpy())plt.plot(x.data.numpy(),prediction.data.numpy(),'r-',lw=5)plt.text(0.5,0,'Loss=%.4f' % loss.item(),fontdict={'size':20,'color':'red'}) #注意莫老師這里loss.data[0]得換成loss.item()plt.pause(0.1)plt.ioff() plt.show()3.2Classification分類
import torch import torch.nn.functional as F from torch.autograd import Variable import matplotlib.pyplot as plt""" 創建數據 """ n_data = torch.ones(100,2) x0 = torch.normal(2*n_data,1) y0 = torch.zeros(100) x1 = torch.normal(-2*n_data,1) y1 = torch.ones(100) x = torch.cat((x0,x1),0).type(torch.FloatTensor) y = torch.cat((y0,y1),).type(torch.LongTensor)x,y = Variable(x),Variable(y)# plt.scatter(x.data.numpy()[:,0],x.data.numpy()[:,1],c=y.data.numpy(),s=100,lw=0,cmap='RdYlGn') # plt.show()""" 網絡搭建 """ class Net(torch.nn.Module):def __init__(self,n_features,n_hidden,n_out):super(Net, self).__init__()self.hidden = torch.nn.Linear(n_features,n_hidden)self.predict = torch.nn.Linear(n_hidden,n_out)def forward(self,x):x = F.relu(self.hidden(x))x = self.predict(x)return xnet = Net(2,10,2) # print(net)plt.ion() #實時打印的 plt.show()optimizer = torch.optim.SGD(net.parameters(),lr=0.02) loss_func = torch.nn.CrossEntropyLoss() #CrossEntropyLoss用在分類的損失函數中""" 結果打印 """ for t in range(100):out = net(x)loss = loss_func(out,y)optimizer.zero_grad()loss.backward()optimizer.step()if t % 2 == 0:plt.cla()prediction = torch.max(F.softmax(out),1)[1] #輸出的結果在第二位,因為輸出是二維,例如輸出結果為[0,1],是指最大值為0,類型是1pred_y = prediction.data.numpy().squeeze()target_y = y.data.numpy()plt.scatter(x.data.numpy()[:,0],x.data.numpy()[:,1],c=pred_y,s=100,lw=0,cmap='RdYlGn')accuracy = sum(pred_y == target_y) / 200plt.text(1.5,-4,'Accuracy=%.2f'%accuracy,fontdict={'size':20,'color':'red'})plt.pause(0.1)plt.ioff() plt.show()3.3快速搭建法
import torch import torch.nn.functional as F from torch.autograd import Variable import matplotlib.pyplot as plt""" 創建數據 """ n_data = torch.ones(100,2) x0 = torch.normal(2*n_data,1) y0 = torch.zeros(100) x1 = torch.normal(-2*n_data,1) y1 = torch.ones(100) x = torch.cat((x0,x1),0).type(torch.FloatTensor) y = torch.cat((y0,y1),).type(torch.LongTensor)x,y = Variable(x),Variable(y)# plt.scatter(x.data.numpy()[:,0],x.data.numpy()[:,1],c=y.data.numpy(),s=100,lw=0,cmap='RdYlGn') # plt.show()""" 普通網絡搭建 """ class Net(torch.nn.Module):def __init__(self,n_features,n_hidden,n_out):super(Net, self).__init__()self.hidden = torch.nn.Linear(n_features,n_hidden)self.predict = torch.nn.Linear(n_hidden,n_out)def forward(self,x):x = F.relu(self.hidden(x))x = self.predict(x)return xnet1 = Net(2,10,2)""" 快速網絡搭建 """ net2 = torch.nn.Sequential(torch.nn.Linear(2,10),torch.nn.ReLU(),torch.nn.Linear(10,2) )print(net1) print(net2)3.4保存提取
import torch from torch.autograd import Variable import matplotlib.pyplot as plt#fake data x = torch.unsqueeze(torch.linspace(-1,1,100),dim=1) y = x.pow(2) + 0.2*torch.rand(x.size()) x,y = Variable(x,requires_grad=False),Variable(y,requires_grad=False)""" 保存 """ def save():net1 = torch.nn.Sequential(torch.nn.Linear(1, 10),torch.nn.ReLU(),torch.nn.Linear(10, 1))optimizer = torch.optim.SGD(net1.parameters(), lr=0.5)loss_func = torch.nn.MSELoss()for t in range(100):prediction = net1(x)loss = loss_func(prediction, y)optimizer.zero_grad()loss.backward()optimizer.step()torch.save(net1,'net.pkl') #保存網絡torch.save(net1.state_dict(),'net_params.pkl') #保存參數#畫圖plt.figure(1,figsize=(10,3))plt.subplot(131)plt.title('Net1')plt.scatter(x.data.numpy(),y.data.numpy())plt.plot(x.data.numpy(),prediction.data.numpy(),'r-',lw=5)""" 提取網絡模型 """ def restore_net():net2 = torch.load('net.pkl')prediction = net2(x)plt.subplot(132)plt.title('Net2')plt.scatter(x.data.numpy(), y.data.numpy())plt.plot(x.data.numpy(), prediction.data.numpy(), 'r-', lw=5)""" 提取網絡參數 """ def restore_params():net3 = torch.nn.Sequential(torch.nn.Linear(1, 10),torch.nn.ReLU(),torch.nn.Linear(10, 1))net3.load_state_dict(torch.load('net_params.pkl'))prediction = net3(x)plt.subplot(133)plt.title('Net3')plt.scatter(x.data.numpy(), y.data.numpy())plt.plot(x.data.numpy(), prediction.data.numpy(), 'r-', lw=5)plt.show()save() restore_net() restore_params()3.5批數據訓練
import torch import torch.utils.data as DataBATCH_SIZE = 5x = torch.linspace(1,10,10) y = torch.linspace(10,1,10)#torch_dataset = Data.TensorDataset(data_tensor=x,target_tensor=y) #莫老師使用的這個方法在高版本報錯 使用下邊的語句可以解決 torch_dataset = Data.TensorDataset(x,y)loader = Data.DataLoader(dataset=torch_dataset,batch_size=BATCH_SIZE,shuffle=True,#num_workers=2, #線程數 windows用戶這里要去掉 因為windows系統中沒有Fork函數,多線程會報錯 )for epoch in range(3):for step,(batch_x,batch_y) in enumerate(loader):#trainingprint('Epoch:',epoch,'|Step:',step,'|batch x:',batch_x.numpy(),'|batch y:',batch_y.numpy())3.6Optimizer優化器
import torch import torch.nn.functional as F from torch.autograd import Variable import matplotlib.pyplot as plt import torch.utils.data as Data#hyper parameters LR = 0.01 BATCH_SIZE = 32 EPOCH = 12x = torch.unsqueeze(torch.linspace(-1,1,100),dim=1) y = x.pow(2) + 0.2*torch.rand(x.size())# plt.scatter(x.numpy(),y.numpy()) # plt.show()torch_dataset = Data.TensorDataset(x,y) loader = Data.DataLoader(dataset=torch_dataset,batch_size=BATCH_SIZE,shuffle=True)class Net(torch.nn.Module):def __init__(self):super(Net, self).__init__()self.hidden = torch.nn.Linear(1,20)self.predict = torch.nn.Linear(20,1)def forward(self,x):x = F.relu(self.hidden(x))x = self.predict(x)return xnet_SGD = Net() net_Momentum = Net() net_RMSprop = Net() net_Adam = Net() nets = [net_SGD,net_Momentum,net_RMSprop,net_Adam]opt_SGD = torch.optim.SGD(net_SGD.parameters(),lr=LR) opt_Momentum = torch.optim.SGD(net_Momentum.parameters(),lr=LR,momentum=0.8) opt_RMSprop = torch.optim.RMSprop(net_RMSprop.parameters(),lr=LR,alpha=0.9) opt_Adam = torch.optim.Adam(net_Adam.parameters(),lr=LR,betas=(0.9,0.99))optimizers = [opt_SGD,opt_Momentum,opt_RMSprop,opt_Adam]loss_func = torch.nn.MSELoss() losses_his=[[],[],[],[]] #記錄損失 for epoch in range(EPOCH):print(epoch)for step,(batch_x,batch_y) in enumerate(loader):# b_x = Variable(batch_x) #新版本pytorch不用這個了# b_y = Variable(batch_y)for net,opt,l_his in zip(nets,optimizers,losses_his):output = net(batch_x)loss = loss_func(output,batch_y)opt.zero_grad()loss.backward()opt.step()l_his.append(loss.item())labels = ['SGD','Momentum','RMSprop','Adam'] for i,l_his in enumerate(losses_his):plt.plot(l_his,label = labels[i]) plt.legend(loc = 'best') plt.xlabel('Steps') plt.ylabel('Loss') plt.show()總結
以上是生活随笔為你收集整理的莫烦Pytorch神经网络第三章代码修改的全部內容,希望文章能夠幫你解決所遇到的問題。
- 上一篇: 梦到同学被杀有什么兆头
- 下一篇: 梦到自己数钱什么意思