卷积神经网络CNN(Convolutional Neural Networks)没有原理只有实现
零.說明:
? ? ? ?本文的所有代碼均可在?DML?找到,歡迎點(diǎn)星星。
? ? ? ? 注.CNN的這份代碼非常慢,基本上沒有實(shí)際使用的可能,所以我只是發(fā)出來,代表我還是實(shí)踐過而已
一.引入:
? ? ? ? ? ?CNN這個(gè)模型實(shí)在是有些年份了,最近隨著深度學(xué)習(xí)的興起又開始煥發(fā)青春了,把imagenet測(cè)試的準(zhǔn)確度提高了非常多,一個(gè)是Alex的工作,然后最近好像Zeiler又有突破性的成果,可惜這些我都沒看過,主要是imagenet的數(shù)據(jù)太大了,我根本沒有可能跑得動(dòng),所以學(xué)習(xí)的積極性有些打折扣。不說那么多,還是先實(shí)現(xiàn)一個(gè)最基礎(chǔ)的CNN再說吧:
二.實(shí)現(xiàn):
? ? ? ? ?好吧,基本是根據(jù)DeepLearnToolbox的結(jié)構(gòu)按照?Notes on Convolutional Neural Networks?來寫的,大家可以先看這里的代碼講解:【面向代碼】學(xué)習(xí)
Deep Learning(三)Convolution Neural Network(CNN)
? ? ? ? ?本來是想根據(jù)Notes那篇文章來寫的,只是最后發(fā)現(xiàn)如果給subsampling層加上sigmoid之后整個(gè)結(jié)構(gòu)就不收斂了~~~,我用numeric_grad_check檢測(cè)發(fā)現(xiàn)梯度計(jì)算也是對(duì)的,不明所以~~~這份代碼我也上傳了(old),不過下面的代碼最后就只能改成稍簡(jiǎn)化版的,貌似通常情況下CNN的pooling(subsampling)層也是沒有sigmoid的,先這樣吧,這個(gè)東西理解起來簡(jiǎn)單還是寫了我兩個(gè)下午……傷……
? ? ? 代碼:?
DML/CNN/cnn.py
? ? ? ?
from __future__ import division import numpy as np import scipy as sp from scipy.signal import convolve as conv from dml.tool import sigmoid,expand,showimage from numpy import rot90 '''this algorithm have refered to the DeepLearnToolBox(https://github.com/rasmusbergpalm/DeepLearnToolbox)also:[1]:"Notes on Convolutional Neural Networks" Jake Bouvrie 2006 - How to implement CNNsI want to implement as [1] described,where the subsampling layer have sigmoid functionbut finally it does not converge,but I can pass the gradcheck!!(this version is dml/CNN/cnn.py.old ,if you can figure out what is wrong in the code,PLEASE LET ME KNOW)at last I changed code back to simple version,delete the sigmoid in 's' layer ps:this code in python is too slow!don't use it do anything except reading. ''' class LayerC:def __init__(self,types='i',out=0,scale=0,kernelsize=0):self.types=typesself.a=Noneself.b=Noneself.d=Noneif (types=='i'):passelif (types=='c'):self.out=outself.kernelsize=kernelsizeself.k=Noneelif (types=='s'):self.scale=scaleself.Beta={}self.dBeta={}class CNNC:def __init__(self,X,y,layers,opts):self.X=np.array(X)self.y=np.array(y)self.layers=layersself.opts=optsinputmap = 1mapsize = np.array(self.X[0].shape)for i in range(len(self.layers)):if self.layers[i].types=='s':mapsize = mapsize / self.layers[i].scaleassert np.sum(np.floor(mapsize)== mapsize)==mapsize.sizeself.layers[i].b={}self.layers[i].db={}for j in range(inputmap):self.layers[i].b.setdefault(j,0)self.layers[i].db.setdefault(j,0)self.layers[i].Beta.setdefault(j,1)self.layers[i].dBeta.setdefault(j,0.0)passif self.layers[i].types=='c':mapsize = mapsize - self.layers[i].kernelsize + 1fan_out = self.layers[i].out*self.layers[i].kernelsize**2 self.layers[i].k={}self.layers[i].dk={} self.layers[i].b={}self.layers[i].db={}for j in range(self.layers[i].out):fan_in = inputmap*self.layers[i].kernelsize**2for t in range(inputmap):self.layers[i].k.setdefault(t,{})self.layers[i].k[t].setdefault(j)self.layers[i].k[t][j]=(np.random.rand(self.layers[i].kernelsize,self.layers[i].kernelsize)-0.5)*2*np.sqrt(6/(fan_out+fan_in))self.layers[i].dk.setdefault(t,{})self.layers[i].dk[t].setdefault(j)self.layers[i].dk[t][j]=np.zeros(self.layers[i].k[t][j].shape)self.layers[i].b.setdefault(j,0)self.layers[i].db.setdefault(j,0) inputmap=self.layers[i].outif self.layers[i].types=='i':passfvnum = np.prod(mapsize)*inputmap; onum = self.y.shape[0];self.ffb=np.zeros((onum,1))self.ffW=(np.random.rand(onum, fvnum)-0.5)*2*np.sqrt(6/(onum+fvnum))def cnnff(self,x):#print xself.layers[0].a={}self.layers[0].a.setdefault(0)self.layers[0].a[0]=x.copy()inputmap=1n=len(self.layers)for l in range(1,n):if self.layers[l].types=='s':for j in range(inputmap):temp=np.ones((self.layers[l].scale,self.layers[l].scale))/(self.layers[l].scale**2)z=conv(self.layers[l-1].a[j],np.array([temp]), 'valid')z=np.array(z)[:,::self.layers[l].scale,::self.layers[l].scale]if self.layers[l].a==None:self.layers[l].a={}self.layers[l].a.setdefault(j)self.layers[l].a[j] =zif self.layers[l].types=='c':if self.layers[l].a==None:self.layers[l].a={}for j in range(self.layers[l].out): #for each outmapsz = np.zeros(self.layers[l-1].a[0].shape - np.array([0,self.layers[l].kernelsize-1,self.layers[l].kernelsize-1]))for i in range(inputmap): #cumulate from inputmapsz+=conv(self.layers[l-1].a[i],np.array([self.layers[l].k[i][j]]),'valid') self.layers[l].a.setdefault(j)self.layers[l].a[j]=sigmoid(z+self.layers[l].b[j])inputmap = self.layers[l].outself.fv=Nonefor j in range(len(self.layers[n-1].a)):sa=self.layers[n-1].a[j].shapep=self.layers[n-1].a[j].reshape(sa[0],sa[1]*sa[2]).copy()if (self.fv==None):self.fv=pelse:self.fv=np.concatenate((self.fv,p),axis=1)self.fv=self.fv.transpose()self.o=sigmoid(np.dot(self.ffW,self.fv) + self.ffb)def cnnbp(self,y):n=len(self.layers)self.e=self.o-yself.L=0.5*np.sum(self.e**2)/self.e.shape[1]self.od=self.e*(self.o*(1-self.o))self.fvd=np.dot(self.ffW.transpose(),self.od)if self.layers[n-1].types=='c':self.fvd=self.fvd*(self.fv*(1-self.fv))sa=self.layers[n-1].a[0].shapefvnum=sa[1]*sa[2]for j in range(len(self.layers[n-1].a)):if self.layers[n-1].d==None:self.layers[n-1].d={}self.layers[n-1].d.setdefault(j)self.layers[n-1].d[j]=self.fvd[(j*fvnum):((j+1)*fvnum),:].transpose().reshape(sa[0],sa[1],sa[2])for l in range(n-2,-1,-1):if self.layers[l].types=='c':for j in range(len(self.layers[l].a)):if self.layers[l].d==None:self.layers[l].d={}self.layers[l].d.setdefault(j)self.layers[l].d[j]=self.layers[l].a[j]*(1-self.layers[l].a[j])*np.kron(self.layers[l+1].d[j],np.ones(( self.layers[l+1].scale,self.layers[l+1].scale))/(self.layers[l+1].scale**2))elif self.layers[l].types=='s':for j in range(len(self.layers[l].a)):if self.layers[l].d==None:self.layers[l].d={}self.layers[l].d.setdefault(j)z=np.zeros(self.layers[l].a[0].shape)for i in range(len(self.layers[l+1].a)):rotated=np.array([rot90(self.layers[l+1].k[j][i],2)])z=z+conv(self.layers[l+1].d[i],rotated,'full')self.layers[l].d[j]=zfor l in range(1,n):m=self.layers[l].d[0].shape[0]if self.layers[l].types=='c':for j in range(len(self.layers[l].a)):for i in range(len(self.layers[l-1].a)):#self.layers[l].dk[i][j]=rot90(conv(self.layers[l-1].a[i],rot90(self.layers[l].d[j],2),'valid'),2)self.layers[l].dk[i][j]=self.layers[l].dk[i][j]*0for t in range(self.layers[l].d[0].shape[0]):self.layers[l].dk[i][j]+=rot90(conv(self.layers[l-1].a[i][t],rot90(self.layers[l].d[j][t],2),'valid'),2)self.layers[l].dk[i][j]=self.layers[l].dk[i][j]/mself.layers[l].db[j]=np.sum(self.layers[l].d[j])/mself.dffW=np.dot(self.od,self.fv.transpose())/self.od.shape[1]self.dffb = np.mean(self.od,1).reshape(self.ffb.shape);def cnnapplygrads(self,alpha=0.1):n=len(self.layers)for l in range(1,n):if self.layers[l].types=='c':for j in range(len(self.layers[l].a)):for i in range(len(self.layers[l-1].a)):self.layers[l].k[i][j]-=alpha*self.layers[l].dk[i][j]self.layers[l].b[j]-=alpha*self.layers[l].db[j]passself.ffW-=alpha*self.dffWself.ffb-=alpha*self.dffbdef train(self):m=self.X.shape[0]batchsize=self.opts['batchsize']numbatches = m/batchsizeprint numbatchesself.rL = []for i in range(self.opts['numepochs']):print 'the %d -th epoch is running'% (i+1)kk=np.random.permutation(m)for j in range(numbatches):print 'the %d -th batch is running , totally %d batchs'% ((j+1),numbatches)batch_x=self.X[kk[(j)*batchsize:(j+1)*batchsize],:,:].copy()batch_y=self.y[:,kk[(j)*batchsize:(j+1)*batchsize]].copy()self.cnnff(batch_x)self.cnnbp(batch_y)self.cnnapplygrads(alpha=self.opts['alpha'])if len(self.rL)==0:self.rL.append(self.L)else:p=self.rL[len(self.rL)-1]self.rL.append(p*0.99+0.1*self.L)print self.Ldef gradcheck(self,test_x,test_y):#github上有這部分代碼def test(self,test_x,test_y):self.cnnff(np.array(test_x))p=self.o.argmax(axis=0)bad= np.sum(p!=np.array(test_y).argmax(axis=0))print p,np.array(test_y).argmax(axis=0)print badprint np.array(test_y).shape[1]er=bad/np.array(test_y).shape[1]print erdef pred(self,test_x):self.cnnff(np.array(test_x))p=self.o.argmax(axis=0)return p三.測(cè)試
? ? 因?yàn)閜ython跑這個(gè)實(shí)在是太慢了,主要原因我覺得是convolution函數(shù)(我用的scipy.signal.convolve)比matlab里慢太多了,所以跑MNIST以50為一個(gè)patch跑SGD一輪要二三十分鐘,所以建議不要使用這份代碼,你可以去用DeepLearnToolbox比這都快……
? 使用代碼來測(cè)試:test/CNN_test/test_cnn.py
? ? ? ? ??
layers=[LayerC('i'),LayerC('c',out=6,kernelsize=5),LayerC('s',scale=2),LayerC('c',out=12,kernelsize=5),LayerC('s',scale=2)] opts={} opts['batchsize']=40 opts['numepochs']=1 opts['alpha']=1a=CNNC(X,groundTruth,layers,opts) #a.gradcheck(test_x[1:3,:,:],test_groundTruth[:,1:3]) a.train() a.test(test_x,test_groundTruth)
? 這是一輪的結(jié)果,89.99%的準(zhǔn)確度,應(yīng)該還是正常的: ?
? ? ? ? ? ? ? ?
from:?http://arc9.riaos.com/?p=6350
總結(jié)
以上是生活随笔為你收集整理的卷积神经网络CNN(Convolutional Neural Networks)没有原理只有实现的全部?jī)?nèi)容,希望文章能夠幫你解決所遇到的問題。
- 上一篇: Hadoop MapReduce的模式、
- 下一篇: 深度学习课程Deep Learning