吴恩达作业3:利用深层神经网络实现小猫的分类
生活随笔
收集整理的這篇文章主要介紹了
吴恩达作业3:利用深层神经网络实现小猫的分类
小編覺得挺不錯的,現在分享給大家,幫大家做個參考.
利用4層神經網絡實現小貓的分類,小貓訓練樣本是(209,64*64*3=12288),故輸入節(jié)點是12288個,隱藏層節(jié)點依次為20,7,5,輸出層為1。
首先看文件路徑,dnn_utils_v2.py代碼是激活函數和激活函數導數 載入數據集 打印預測錯誤照片的代碼:
?
import numpy as np import matplotlib.pyplot as plt import h5pydef sigmoid(Z):"""Implements the sigmoid activation in numpyArguments:Z -- numpy array of any shapeReturns:A -- output of sigmoid(z), same shape as Zcache -- returns Z as well, useful during backpropagation"""A = 1/(1+np.exp(-Z))cache = Zreturn A, cachedef relu(Z):"""Implement the RELU function.Arguments:Z -- Output of the linear layer, of any shapeReturns:A -- Post-activation parameter, of the same shape as Zcache -- a python dictionary containing "A" ; stored for computing the backward pass efficiently"""A = np.maximum(0,Z)assert(A.shape == Z.shape)cache = Z return A, cachedef relu_backward(dA, cache):"""Implement the backward propagation for a single RELU unit.Arguments:dA -- post-activation gradient, of any shapecache -- 'Z' where we store for computing backward propagation efficientlyReturns:dZ -- Gradient of the cost with respect to Z"""Z = cachedZ = np.array(dA, copy=True) # just converting dz to a correct object.# When z <= 0, you should set dz to 0 as well. dZ[Z <= 0] = 0assert (dZ.shape == Z.shape)return dZdef sigmoid_backward(dA, cache):"""Implement the backward propagation for a single SIGMOID unit.Arguments:dA -- post-activation gradient, of any shapecache -- 'Z' where we store for computing backward propagation efficientlyReturns:dZ -- Gradient of the cost with respect to Z"""Z = caches = 1/(1+np.exp(-Z))dZ = dA * s * (1-s)assert (dZ.shape == Z.shape)return dZ""" 載入數據集 """ def load_data():train_dataset = h5py.File('datasets/train_catvnoncat.h5', "r")train_set_x_orig = np.array(train_dataset["train_set_x"][:]) # your train set featurestrain_set_y_orig = np.array(train_dataset["train_set_y"][:]) # your train set labelstest_dataset = h5py.File('datasets/test_catvnoncat.h5', "r")test_set_x_orig = np.array(test_dataset["test_set_x"][:]) # your test set featurestest_set_y_orig = np.array(test_dataset["test_set_y"][:]) # your test set labelsclasses = np.array(test_dataset["list_classes"][:]) # the list of classestrain_set_y_orig = train_set_y_orig.reshape((1, train_set_y_orig.shape[0]))test_set_y_orig = test_set_y_orig.reshape((1, test_set_y_orig.shape[0]))return train_set_x_orig, train_set_y_orig, test_set_x_orig, test_set_y_orig, classesdef print_mislabeled_images(classes, X, y, p):"""Plots images where predictions and truth were different.X -- datasety -- true labelsp -- predictions"""a = p + ymislabeled_indices = np.asarray(np.where(a == 1))plt.rcParams['figure.figsize'] = (40.0, 40.0) # set default size of plotsnum_images = len(mislabeled_indices[0])for i in range(num_images):index = mislabeled_indices[1][i]plt.subplot(2, num_images, i + 1)plt.imshow(X[:, index].reshape(64, 64, 3), interpolation='nearest')plt.axis('off')plt.title("Prediction: " + classes[int(p[0, index])].decode("utf-8") + " \n Class: " + classes[y[0, index]].decode("utf-8"))StartDeepNeural.py是整個模型前向傳播和后向傳播的代碼
?
?
import numpy as np import dnn_utils_v2 def initialize_parameters(n_x,n_h,n_y):W1 = np.random.randn(n_h,n_x)*0.01b1 = np.zeros((n_h,1))W2 = np.random.randn(n_y, n_h) * 0.01b2 = np.zeros((n_y,1))assert (W1.shape==(n_h,n_x))assert (b1.shape == (n_h, 1))assert (W2.shape == (n_y, n_h))assert (b2.shape == (n_y, 1))parameters={'W1': W1,'b1': b1,'W2': W2,'b2': b2}return parameters """ 多層神經網絡的參數初始化:注意權重的初始化 為防止梯度爆炸和梯度消失 """ def initialize_parameters_deep(layer_dims):L=len(layer_dims)parameters={}for i in range(1,L):parameters['W'+str(i)] = np.random.randn(layer_dims[i],layer_dims[i-1])*np.sqrt(2.0/layer_dims[i-1])parameters['b' + str(i)] = np.zeros((layer_dims[i], 1))assert (parameters['W'+str(i)].shape==(layer_dims[i],layer_dims[i-1]))assert (parameters['b' + str(i)].shape == (layer_dims[i], 1))return parameters """ 前向傳播過程中某一層未加激活函數的操作 """ def linear_forward(A,W,b):Z = np.dot(W,A)+bassert(Z.shape==(W.shape[0],A.shape[1]))cache=(A,W,b)return Z,cache """ 前向傳播過程中 某一層通過激活函數來采取相應的操作 """ def linear_activation_forward(A_prev,W,b,activation):if activation=='sigmoid':Z, linear_cache=linear_forward(A_prev,W,b)A, activation_cache = dnn_utils_v2.sigmoid(Z)elif activation=='relu':Z, linear_cache = linear_forward(A_prev, W, b)A, activation_cache = dnn_utils_v2.relu(Z)assert(A.shape==(W.shape[0],A_prev.shape[1]))cache=(linear_cache,activation_cache)###save ( (A W b ),Z) tuplereturn A,cache """ 整個模型的前向傳播過程 """ def L_model_forward(X,parameters):L=len(parameters)//2A=Xcaches=[]for i in range(1,L):A_prev=AA, cache=linear_activation_forward(A_prev,parameters['W'+str(i)],parameters['b'+str(i)],activation='relu')caches.append(cache)AL, cache = linear_activation_forward(A, parameters['W' + str(L)], parameters['b' + str(L)],activation='sigmoid')caches.append(cache)assert(AL.shape==(parameters['W' + str(L)].shape[0],X.shape[1]))return AL,caches """ 計算損失值 """ def compute_cost(AL,Y):m = Y.shape[1]cost = (1. / m) * (-np.dot(Y, np.log(AL).T) - np.dot(1 - Y, np.log(1 - AL).T))#cost = -1 / m * (np.dot(Y, np.log(AL).T) + np.dot((1 - Y), np.log(1 - AL).T))cost = np.squeeze(cost)assert (cost.shape==())return cost """ 后向傳播過程中某一層未加激活函數的操作 """ def linear_backward(dZ,cache):A_prev,W,b=cachem=A_prev.shape[1]dW=1/m*np.dot(dZ,A_prev.T)db=1/m*np.sum(dZ,axis=1,keepdims=True)dA_prev=np.dot(W.T,dZ)assert (dW.shape == W.shape)assert (db.shape == b.shape)assert (dA_prev.shape == A_prev.shape)return dA_prev,dW,db """ 后向傳播過程中 某一層通過激活函數來采取相應的操作 """ def linear_activation_backward(dA,cache,activation):linear_cache, activation_cache=cacheif activation=='relu':dZ=dnn_utils_v2.relu_backward(dA, activation_cache)dA_prev, dW, db=linear_backward(dZ, linear_cache)elif activation=='sigmoid':dZ=dnn_utils_v2.sigmoid_backward(dA, activation_cache)dA_prev, dW, db = linear_backward(dZ, linear_cache)return dA_prev, dW, db """ 整個模型的后向傳播過程 """ def L_model_backward(AL,Y,caches):grads={}L=len(caches) ###[ [( (X W1 b1 ),Z1)],[( (A1 W2 b2 ),Z2) ] [( (A2 W3 b3 ),Z3) ]]m=AL.shape[1]Y=Y.reshape(AL.shape)dAL=-np.divide(Y,AL)+np.divide((1-Y),(1-AL))current_cache=caches[L-1]grads['dA'+str(L)],grads['dW'+str(L)],grads['db'+str(L)]=linear_activation_backward(dAL, current_cache, activation='sigmoid')for i in reversed(range(L-1)):current_cache = caches[i]grads['dA' + str(i+1)], grads['dW' + str(i+1)], grads['db' + str(i+1)]=linear_activation_backward(grads['dA'+str(i+2)], current_cache, activation='relu')return grads """ 更新參數 """ def update_parameters(parameters,grads,learning_rate):L=len(parameters)//2for i in range(L):parameters['W'+str(i+1)]=parameters['W'+str(i+1)]-learning_rate*grads['dW' + str(i+1)]parameters['b' + str(i + 1)] = parameters['b' + str(i + 1)] - learning_rate * grads['db' + str(i + 1)]return parameters?
DeepNeuralCat_noCat.py就是訓練的代碼:首先看數據集
?
? import numpy as np import dnn_utils_v2 import matplotlib.pyplot as plt import StartDeepNeural train_x_orig, train_y_orig, test_x_orig, test_y_orig,classa=dnn_utils_v2.load_data() print('train={}'.format(train_x_orig.shape)) print(train_y_orig.shape) print(train_y_orig[:,:10]) print(test_x_orig.shape) print(test_y_orig.shape) print(classa) plt.imshow(train_x_orig[0]) plt.show()?
打印結果:
可知訓練樣本為209個,維度為(64,64,3),測試樣本為50個,維度為(64,64,3),0代表不是貓
拉成二維向量 對于訓練樣本(209,64*64*3),測試樣本(50,64*64*3)
?
import numpy as np import dnn_utils_v2 import matplotlib.pyplot as plt import StartDeepNeural train_x_orig, train_y_orig, test_x_orig, test_y_orig,classa=dnn_utils_v2.load_data() # print('train={}'.format(train_x_orig.shape)) # print(train_y_orig.shape) # print(train_y_orig[:,:10]) # print(test_x_orig.shape) # print(test_y_orig.shape) # print(classa) # plt.imshow(train_x_orig[0]) # plt.show() ##train train_x_flatten = train_x_orig.reshape(train_x_orig.shape[0],train_x_orig.shape[1] *train_x_orig.shape[2] * 3).T train_x = train_x_flatten / 255. #print(train_x.shape) ##test test_x_flatten = test_x_orig.reshape(test_x_orig.shape[0],test_x_orig.shape[1] *test_x_orig.shape[2] * 3).T test_x = test_x_flatten / 255. #print(test_x.shape)?
開始訓練,下面代碼也寫了兩層神經網絡,只是沒有用到而已。
?
import numpy as np import dnn_utils_v2 import matplotlib.pyplot as plt import StartDeepNeural train_x_orig, train_y_orig, test_x_orig, test_y_orig,classa=dnn_utils_v2.load_data() # print('train={}'.format(train_x_orig.shape)) # print(train_y_orig.shape) # print(train_y_orig[:,:10]) # print(test_x_orig.shape) # print(test_y_orig.shape) # print(classa) # plt.imshow(train_x_orig[0]) # plt.show() ##train train_x_flatten = train_x_orig.reshape(train_x_orig.shape[0],train_x_orig.shape[1] *train_x_orig.shape[2] * 3).T train_x = train_x_flatten / 255. #print(train_x.shape) ##test test_x_flatten = test_x_orig.reshape(test_x_orig.shape[0],test_x_orig.shape[1] *test_x_orig.shape[2] * 3).T test_x = test_x_flatten / 255. #print(test_x.shape)def two_layer_model(X,Y,num_iterations,learning_rate):n_x = 12288n_h = 7n_y = 1layer_dims = (n_x, n_h, n_y)#m=X.shape[1]#(n_x, n_h, n_y)=layer_dimsparameters=StartDeepNeural.initialize_parameters_deep(layer_dims)# W1 = parameters['W1']# W2 = parameters['W2']# b1 = parameters['b1']# b2 = parameters['b2']costs=[]#return W1,W2,b1,b2for i in range(0,num_iterations):# A1, cache1=StartDeepNeural.linear_activation_forward(X,W1,b1,activation='relu')# A2, cache2 = StartDeepNeural.linear_activation_forward(A1, W2, b2, activation='sigmoid')AL, caches=StartDeepNeural.L_model_forward(X, parameters) ##[ [( (X W1 b1 ),Z1)],[( (A1 W2 b2 ),Z2) ]]cost=StartDeepNeural.compute_cost(AL, Y)grads=StartDeepNeural.L_model_backward(AL, Y, caches)parameters=StartDeepNeural.update_paremeters(parameters, grads, learning_rate)if i %100==0:print('iterations{}:cost {}'.format(i,cost))costs.append(cost)return costs,parameters def predict(X,Y,parameters):AL, caches = StartDeepNeural.L_model_forward(X, parameters)##AL.shape=(1,m)m=X.shape[1]p=np.zeros((1,m))for i in range(AL.shape[1]):if AL[0][i]>0.5:p[0][i]=1else:p[0][i] = 0result = np.squeeze(np.dot(p, Y.T) + np.dot(1 - p, 1 - Y.T))accuracy=result/mreturn accuracy,p def two_layer_model_test():costs, parameters = two_layer_model(train_x, train_y_orig,num_iterations = 3000, learning_rate = 0.0075)# print(parameters)accuracy ,p_train= predict(train_x, train_y_orig, parameters)print('train accuracy is {}'.format(accuracy))accuracy ,p_test= predict(test_x, test_y_orig, parameters)print('test accuracy is {}'.format(accuracy))plt.plot(costs)plt.xlabel('iterations')plt.ylabel('costs')plt.title('learning rate is 0.0075')plt.show()def L_layer_model(X, Y, layer_dims,learning_rate=0.0075,num_iterations=2000):parameters = StartDeepNeural.initialize_parameters_deep(layer_dims)costs = []# return W1,W2,b1,b2for i in range(0, num_iterations):# A1, cache1=StartDeepNeural.linear_activation_forward(X,W1,b1,activation='relu')# A2, cache2 = StartDeepNeural.linear_activation_forward(A1, W2, b2, activation='sigmoid')AL, caches = StartDeepNeural.L_model_forward(X, parameters) ##[ [( (X W1 b1 ),Z1)],[( (A1 W2 b2 ),Z2) ]]cost = StartDeepNeural.compute_cost(AL, Y)grads = StartDeepNeural.L_model_backward(AL, Y, caches)parameters = StartDeepNeural.update_parameters(parameters, grads, learning_rate)if i % 100 == 0:#print(grads)print('iterations{}:cost {}'.format(i, cost))costs.append(cost)return costs, parameters def print_mislabel_images(classes,test_x,test_y_orig,p_test):a=test_y_orig+p_testmislable_index=np.asarray(np.where(a==1))#[[1,0]] [[1,1]]np.where 返回(array([0]),array([1]))##np.asarray 返回 array([[0],[1]])plt.figure(figsize=(40, 40)) # set default size of plots 畫布大小 4000×4000num_images=len(mislable_index[0])for i in range(num_images):index=mislable_index[1][i]plt.subplot(2,num_images,i+1)plt.imshow(test_x[:,index].reshape(64,64,3))#plt.axis('off')plt.title('prediction '+classes[int(p_test[0][index])].decode('utf-8')+' real '+classes[int(test_y_orig[0][index])].decode('utf-8'))plt.savefig('1.jpg') def L_layer_model_test():layers_dims = [12288, 20, 7, 5, 1]costs,parameters = L_layer_model(train_x, train_y_orig, layers_dims)accuracy , p_train= predict(train_x, train_y_orig, parameters)print('train accuracy is {}'.format(accuracy))accuracy , p_test= predict(test_x, test_y_orig, parameters)print('test accuracy is {}'.format(accuracy))print_mislabel_images(classa,test_x,test_y_orig,p_test)# plt.plot(costs)# plt.xlabel('iterations')# plt.ylabel('costs')# plt.title('learning rate is 0.0075')plt.show() if __name__=='__main__':L_layer_model_test()?
打印結果:
總結
以上是生活随笔為你收集整理的吴恩达作业3:利用深层神经网络实现小猫的分类的全部內容,希望文章能夠幫你解決所遇到的問題。
- 上一篇: 三层神经网络实现手写字母的识别(基于te
- 下一篇: Verilog HDL语言设计计数器+加