莫烦Pytorch神经网络第四章代码修改
生活随笔
收集整理的這篇文章主要介紹了
莫烦Pytorch神经网络第四章代码修改
小編覺得挺不錯的,現在分享給大家,幫大家做個參考.
4.1CNN卷積神經網絡
import torch import torch.nn as nn from torch.autograd import Variable import torch.utils.data as Data import torchvision import matplotlib.pyplot as pltEPOCH = 1 BATCH_SIZE = 50 LR = 0.001 DOWNLOAD_MNIST = False #如果數據集已經下載到本地了 這里就是False 如果需要下載這就是Truetrain_data = torchvision.datasets.MNIST(root='./mnist',train=True,transform=torchvision.transforms.ToTensor(), #數據改成tensor格式download=DOWNLOAD_MNIST )# #plot one example # print(train_data.train_data.size()) # print(train_data.train_labels.size()) # plt.imshow(train_data.train_data[0].numpy(),cmap='gray') # plt.title('%i'%train_data.train_labels[0]) # plt.show()train_loader = Data.DataLoader(dataset=train_data,batch_size=BATCH_SIZE,shuffle=True)data = torchvision.datasets.MNIST(root='./mnist',train=False) #train=False代表提取的是不是訓練數據,而是測試數據 #test_x = Variable(torch.unsqueeze(test_data.test_data,dim=1),volatile = True).type(torch.FloatTensor)[:2000]/255. #新版本torch不用Variable test_x = torch.unsqueeze(data.data,dim=1).type(torch.FloatTensor)[:2000]/255. # /255的原因是將像素壓縮到[0,1]內,255后面這個. 是為了變成浮點型 #test_y = test_data.test_labels[:2000] 新版本把test_labels改成targets test_y = data.targets[:2000]class CNN(nn.Module):def __init__(self):super(CNN,self).__init__()self.conv1 = nn.Sequential(nn.Conv2d( #輸入shape (1,28,28)in_channels=1,out_channels=16,kernel_size=5,stride=1,padding=2,), #輸出shape (16,28,28)nn.ReLU(), #輸出shape (16,28,28)nn.MaxPool2d(kernel_size=2), #輸出shape (16,14,14))self.conv2 = nn.Sequential( #輸入shape (16,14,14)nn.Conv2d(16,32,5,1,2), #輸出shape (32,14,14)nn.ReLU(), #輸出shape (32,14,14)nn.MaxPool2d(2), #輸出shape (32,7,7))self.out = nn.Linear(32*7*7,10)def forward(self,x):x = self.conv1(x)x = self.conv2(x) #輸出shape(32,7,7)x = x.view(x.size(0),-1) #-1的作用就是將(32,7,7)->(32*7*7)output= self.out(x)return outputcnn = CNN() optimizer = torch.optim.Adam(cnn.parameters(),lr=LR) loss_func = nn.CrossEntropyLoss()for epoch in range(EPOCH):for step,(x,y) in enumerate(train_loader):out_put = cnn(x)loss = loss_func(out_put,y)optimizer.zero_grad()loss.backward()optimizer.step()if step % 50 == 0:test_output = cnn(test_x)pred_y = torch.max(test_output,1)[1].data.squeeze()accuracy = sum(pred_y == test_y) /test_y.size(0)print('Epoch:',epoch,'|train loss:%.4f' % loss.item(),'|test accuracy:%.2f'%accuracy)#print 10 predictions from test data test_output = cnn(test_x[:10]) pred_y = torch.max(test_output,1)[1].data.numpy().squeeze()4.2RNN循環神經網絡分類
import torch import torch.nn as nn from torch.autograd import Variable import torchvision.datasets as dsets import torchvision.transforms as transforms import matplotlib.pyplot as pltEPOCH = 1 BATCH_SIZE = 64 #批訓練數量 TIME_STEP = 28 #輸入數據次數 INPUT_SIZE = 28 #每次輸入的數據量 LR = 0.01 DOWNLOAD_MNIST = Falsetrain_data = dsets.MNIST(root='./mnist',train=True,transform=transforms.ToTensor(), #數據改成tensor格式download=DOWNLOAD_MNIST ) train_loader = torch.utils.data.DataLoader(dataset=train_data,batch_size=BATCH_SIZE,shuffle=True) data = dsets.MNIST(root='./mnist',train=False, transform=transforms.ToTensor()) test_x = data.data.type(torch.FloatTensor)[:2000]/255. test_y = data.targets.numpy()[:2000]class RNN(nn.Module):def __init__(self):super(RNN, self).__init__()self.rnn = nn.LSTM(input_size=INPUT_SIZE,hidden_size =64,num_layers = 1, #hidden_layer只有一層batch_first=True, #輸出時把batch放在輸出的第一個維度)self.out = nn.Linear(64,10)def forward(self,x):r_out,(h_n,h_c) = self.rnn(x,None)out = self.out(r_out[:,-1,:]) #選取最后一個時刻的outputreturn outrnn = RNN() # print(rnn) optimizer = torch.optim.Adam(rnn.parameters(),lr=LR) loss_func = nn.CrossEntropyLoss()for epoch in range(EPOCH):for step,(x,y) in enumerate(train_loader):x = x.view(-1,28,28)out_put = rnn(x)loss = loss_func(out_put,y)optimizer.zero_grad()loss.backward()optimizer.step()if step % 50 == 0:test_output = rnn(test_x)pred_y = torch.max(test_output,1)[1].data.numpy()# accuracy = sum(pred_y == test_y) / test_y.sizeaccuracy = float((pred_y == test_y).astype(int).sum()) / float(test_y.size)print('Epoch:',epoch,'|train loss:%.4f' % loss.item(),'|test accuracy:%.2f'%accuracy)test_output = rnn(test_x[:10].view(-1,28,28)) pred_y = torch.max(test_output,1)[1].data.numpy() print(pred_y,'prediction number') print(test_y[:10],'real number')4.3RNN循環神經網絡回歸
import torch import torch.nn as nn from torch.autograd import Variable import torchvision.datasets as dsets import torchvision.transforms as transforms import matplotlib.pyplot as plt import numpy as npTIME_STEP = 10 #輸入數據次數 INPUT_SIZE = 1 #每次輸入的數據量 LR = 0.02steps = np.linspace(0,np.pi*2,100,dtype=np.float32) x_np = np.sin(steps) y_np = np.cos(steps) # plt.plot(steps,y_np,'r-',label='target(cos)') # plt.plot(steps,x_np,'b-',label='input(sin)') # plt.legend(loc = 'best') # plt.show()class RNN(nn.Module):def __init__(self):super(RNN, self).__init__()self.rnn = nn.RNN(input_size=INPUT_SIZE,hidden_size=32,num_layers=1,batch_first=True,)self.out = nn.Linear(32,1)def forward(self,x,h_state):r_out,h_state= self.rnn(x,h_state)outs = []for time_step in range(r_out.size(1)):outs.append(self.out(r_out[:,time_step,:]))return torch.stack(outs,dim=1),h_staternn =RNN()optimizer = torch.optim.Adam(rnn.parameters(),lr = LR) loss_func = nn.MSELoss()h_state = Noneplt.figure(1, figsize=(12, 5)) plt.ion()for step in range(60):start ,end = step*np.pi,(step + 1 )*np.pisteps = np.linspace(start,end,TIME_STEP,dtype=np.float32)x_np = np.sin(steps)y_np = np.cos(steps)x= torch.from_numpy(x_np[np.newaxis,:,np.newaxis])y= torch.from_numpy(y_np[np.newaxis,:,np.newaxis])prediction,h_state = rnn(x,h_state)h_state = h_state.data #一定要記得更新h_stateloss = loss_func(prediction, y)optimizer.zero_grad()loss.backward()optimizer.step()plt.plot(steps, y_np.flatten(), 'r-')plt.plot(steps, prediction.data.numpy().flatten(), 'b-')plt.draw()plt.pause(0.05)plt.ioff() plt.show()4.4AutoEncoder自編碼
import torch import torch.nn as nn import torch.utils.data as Data import torchvision import matplotlib.pyplot as plt from mpl_toolkits.mplot3d import Axes3D from matplotlib import cm import numpy as np# torch.manual_seed(1) # reproducible# Hyper Parameters EPOCH = 10 BATCH_SIZE = 64 LR = 0.005 # learning rate DOWNLOAD_MNIST = False N_TEST_IMG = 5# Mnist digits dataset train_data = torchvision.datasets.MNIST(root='./mnist/',train=True, # this is training datatransform=torchvision.transforms.ToTensor(), # Converts a PIL.Image or numpy.ndarray to# torch.FloatTensor of shape (C x H x W) and normalize in the range [0.0, 1.0]download=DOWNLOAD_MNIST, # download it if you don't have it )# plot one example print(train_data.train_data.size()) # (60000, 28, 28) print(train_data.train_labels.size()) # (60000) plt.imshow(train_data.train_data[2].numpy(), cmap='gray') plt.title('%i' % train_data.train_labels[2]) plt.show()# Data Loader for easy mini-batch return in training, the image batch shape will be (50, 1, 28, 28) train_loader = Data.DataLoader(dataset=train_data, batch_size=BATCH_SIZE, shuffle=True)class AutoEncoder(nn.Module):def __init__(self):super(AutoEncoder, self).__init__()self.encoder = nn.Sequential(nn.Linear(28*28, 128),nn.Tanh(),nn.Linear(128, 64),nn.Tanh(),nn.Linear(64, 12),nn.Tanh(),nn.Linear(12, 3), # compress to 3 features which can be visualized in plt)self.decoder = nn.Sequential(nn.Linear(3, 12),nn.Tanh(),nn.Linear(12, 64),nn.Tanh(),nn.Linear(64, 128),nn.Tanh(),nn.Linear(128, 28*28),nn.Sigmoid(), # compress to a range (0, 1))def forward(self, x):encoded = self.encoder(x)decoded = self.decoder(encoded)return encoded, decodedautoencoder = AutoEncoder()optimizer = torch.optim.Adam(autoencoder.parameters(), lr=LR) loss_func = nn.MSELoss()# initialize figure f, a = plt.subplots(2, N_TEST_IMG, figsize=(5, 2)) plt.ion() # continuously plot# original data (first row) for viewing view_data = train_data.train_data[:N_TEST_IMG].view(-1, 28*28).type(torch.FloatTensor)/255. for i in range(N_TEST_IMG):a[0][i].imshow(np.reshape(view_data.data.numpy()[i], (28, 28)), cmap='gray'); a[0][i].set_xticks(()); a[0][i].set_yticks(())for epoch in range(EPOCH):for step, (x, b_label) in enumerate(train_loader):b_x = x.view(-1, 28*28) # batch x, shape (batch, 28*28)b_y = x.view(-1, 28*28) # batch y, shape (batch, 28*28)encoded, decoded = autoencoder(b_x)loss = loss_func(decoded, b_y) # mean square erroroptimizer.zero_grad() # clear gradients for this training steploss.backward() # backpropagation, compute gradientsoptimizer.step() # apply gradientsif step % 100 == 0:print('Epoch: ', epoch, '| train loss: %.4f' % loss.data.numpy())# plotting decoded image (second row)_, decoded_data = autoencoder(view_data)for i in range(N_TEST_IMG):a[1][i].clear()a[1][i].imshow(np.reshape(decoded_data.data.numpy()[i], (28, 28)), cmap='gray')a[1][i].set_xticks(()); a[1][i].set_yticks(())plt.draw(); plt.pause(0.05)plt.ioff() plt.show()# visualize in 3D plot view_data = train_data.train_data[:200].view(-1, 28*28).type(torch.FloatTensor)/255. encoded_data, _ = autoencoder(view_data) fig = plt.figure(2); ax = Axes3D(fig) X, Y, Z = encoded_data.data[:, 0].numpy(), encoded_data.data[:, 1].numpy(), encoded_data.data[:, 2].numpy() values = train_data.train_labels[:200].numpy() for x, y, z, s in zip(X, Y, Z, values):c = cm.rainbow(int(255*s/9)); ax.text(x, y, z, s, backgroundcolor=c) ax.set_xlim(X.min(), X.max()); ax.set_ylim(Y.min(), Y.max()); ax.set_zlim(Z.min(), Z.max()) plt.show()4.5DQN強化學習
import torch import torch.nn as nn import torch.nn.functional as F import numpy as np import gym# Hyper Parameters BATCH_SIZE = 32 LR = 0.01 # learning rate EPSILON = 0.9 # greedy policy GAMMA = 0.9 # reward discount TARGET_REPLACE_ITER = 100 # target update frequency MEMORY_CAPACITY = 2000 env = gym.make('CartPole-v0') env = env.unwrapped N_ACTIONS = env.action_space.n N_STATES = env.observation_space.shape[0] ENV_A_SHAPE = 0 if isinstance(env.action_space.sample(), int) else env.action_space.sample().shape # to confirm the shapeclass Net(nn.Module):def __init__(self, ):super(Net, self).__init__()self.fc1 = nn.Linear(N_STATES, 50)self.fc1.weight.data.normal_(0, 0.1) #正態分布初始權重,可能會有更好的效果self.out = nn.Linear(50, N_ACTIONS)self.out.weight.data.normal_(0, 0.1)def forward(self, x):x = self.fc1(x)x = F.relu(x)actions_value = self.out(x)return actions_valueclass DQN(object):def __init__(self):self.eval_net, self.target_net = Net(), Net()self.learn_step_counter = 0 # for target updatingself.memory_counter = 0 # for storing memoryself.memory = np.zeros((MEMORY_CAPACITY, N_STATES * 2 + 2)) # initialize memoryself.optimizer = torch.optim.Adam(self.eval_net.parameters(), lr=LR)self.loss_func = nn.MSELoss()def choose_action(self, x):x = torch.unsqueeze(torch.FloatTensor(x), 0)# input only one sampleif np.random.uniform() < EPSILON: # greedyactions_value = self.eval_net.forward(x)action = torch.max(actions_value, 1)[1].data.numpy()action = action[0] if ENV_A_SHAPE == 0 else action.reshape(ENV_A_SHAPE) # return the argmax indexelse: # randomaction = np.random.randint(0, N_ACTIONS)action = action if ENV_A_SHAPE == 0 else action.reshape(ENV_A_SHAPE)return actiondef store_transition(self, s, a, r, s_):transition = np.hstack((s, [a, r], s_))# replace the old memory with new memoryindex = self.memory_counter % MEMORY_CAPACITYself.memory[index, :] = transitionself.memory_counter += 1def learn(self):# target parameter updateif self.learn_step_counter % TARGET_REPLACE_ITER == 0:self.target_net.load_state_dict(self.eval_net.state_dict())self.learn_step_counter += 1# sample batch transitionssample_index = np.random.choice(MEMORY_CAPACITY, BATCH_SIZE)b_memory = self.memory[sample_index, :]b_s = torch.FloatTensor(b_memory[:, :N_STATES])b_a = torch.LongTensor(b_memory[:, N_STATES:N_STATES+1].astype(int))b_r = torch.FloatTensor(b_memory[:, N_STATES+1:N_STATES+2])b_s_ = torch.FloatTensor(b_memory[:, -N_STATES:])# q_eval w.r.t the action in experienceq_eval = self.eval_net(b_s).gather(1, b_a) # shape (batch, 1)q_next = self.target_net(b_s_).detach() # detach from graph, don't backpropagateq_target = b_r + GAMMA * q_next.max(1)[0].view(BATCH_SIZE, 1) # shape (batch, 1)loss = self.loss_func(q_eval, q_target)self.optimizer.zero_grad()loss.backward()self.optimizer.step()dqn = DQN()print('\nCollecting experience...') for i_episode in range(400):s = env.reset()ep_r = 0while True:env.render()a = dqn.choose_action(s)# take actions_, r, done, info = env.step(a)# modify the rewardx, x_dot, theta, theta_dot = s_r1 = (env.x_threshold - abs(x)) / env.x_threshold - 0.8r2 = (env.theta_threshold_radians - abs(theta)) / env.theta_threshold_radians - 0.5r = r1 + r2dqn.store_transition(s, a, r, s_)ep_r += rif dqn.memory_counter > MEMORY_CAPACITY:dqn.learn()if done:print('Ep: ', i_episode,'| Ep_r: ', round(ep_r, 2))if done:breaks = s_4.6GAN生成對抗網絡
import torch import torch.nn as nn import numpy as np import matplotlib.pyplot as plt# torch.manual_seed(1) # reproducible # np.random.seed(1)# Hyper Parameters BATCH_SIZE = 64 LR_G = 0.0001 # learning rate for generator LR_D = 0.0001 # learning rate for discriminator N_IDEAS = 5 # think of this as number of ideas for generating an art work (Generator) ART_COMPONENTS = 15 # it could be total point G can draw in the canvas PAINT_POINTS = np.vstack([np.linspace(-1, 1, ART_COMPONENTS) for _ in range(BATCH_SIZE)])# show our beautiful painting range # plt.plot(PAINT_POINTS[0], 2 * np.power(PAINT_POINTS[0], 2) + 1, c='#74BCFF', lw=3, label='upper bound') # plt.plot(PAINT_POINTS[0], 1 * np.power(PAINT_POINTS[0], 2) + 0, c='#FF9359', lw=3, label='lower bound') # plt.legend(loc='upper right') # plt.show()def artist_works(): # painting from the famous artist (real target)a = np.random.uniform(1, 2, size=BATCH_SIZE)[:, np.newaxis]paintings = a * np.power(PAINT_POINTS, 2) + (a - 1)paintings = torch.from_numpy(paintings).float()return paintingsG = nn.Sequential( # Generatornn.Linear(N_IDEAS, 128), # random ideas (could from normal distribution)nn.ReLU(),nn.Linear(128, ART_COMPONENTS), # making a painting from these random ideas )D = nn.Sequential( # Discriminatornn.Linear(ART_COMPONENTS, 128), # receive art work either from the famous artist or a newbie like Gnn.ReLU(),nn.Linear(128, 1),nn.Sigmoid(), # tell the probability that the art work is made by artist )opt_D = torch.optim.Adam(D.parameters(), lr=LR_D) opt_G = torch.optim.Adam(G.parameters(), lr=LR_G)plt.ion() # something about continuous plottingfor step in range(10000):artist_paintings = artist_works() # real painting from artistG_ideas = torch.randn(BATCH_SIZE, N_IDEAS, requires_grad=True) # random ideas\nG_paintings = G(G_ideas) # fake painting from G (random ideas)prob_artist1 = D(G_paintings) # D try to reduce this probG_loss = torch.mean(torch.log(1. - prob_artist1))opt_G.zero_grad()G_loss.backward()opt_G.step()prob_artist0 = D(artist_paintings) # D try to increase this probprob_artist1 = D(G_paintings.detach()) # D try to reduce this probD_loss = - torch.mean(torch.log(prob_artist0) + torch.log(1. - prob_artist1))opt_D.zero_grad()D_loss.backward(retain_graph=True) # reusing computational graphopt_D.step()if step % 50 == 0: # plottingplt.cla()plt.plot(PAINT_POINTS[0], G_paintings.data.numpy()[0], c='#4AD631', lw=3, label='Generated painting', )plt.plot(PAINT_POINTS[0], 2 * np.power(PAINT_POINTS[0], 2) + 1, c='#74BCFF', lw=3, label='upper bound')plt.plot(PAINT_POINTS[0], 1 * np.power(PAINT_POINTS[0], 2) + 0, c='#FF9359', lw=3, label='lower bound')plt.text(-.5, 2.3, 'D accuracy=%.2f (0.5 for D to converge)' % prob_artist0.data.numpy().mean(),fontdict={'size': 13})plt.text(-.5, 2, 'D score= %.2f (-1.38 for G to converge)' % -D_loss.data.numpy(), fontdict={'size': 13})plt.ylim((0, 3));plt.legend(loc='upper right', fontsize=10);plt.draw();plt.pause(0.01)plt.ioff() plt.show()總結
以上是生活随笔為你收集整理的莫烦Pytorch神经网络第四章代码修改的全部內容,希望文章能夠幫你解決所遇到的問題。
- 上一篇: 梦到自己数钱什么意思
- 下一篇: 女人梦到栽红薯苗好吗