生活随笔
收集整理的这篇文章主要介绍了
深度学习(三)theano学习笔记(2)基础函数-未完待续
小编觉得挺不错的,现在分享给大家,帮大家做个参考.
theano学习笔记(2)基础函数
1、随机函数库的调用
2、卷积神经网络
[python] view plaincopy
import theano import numpy as np import matplotlib.pyplot as plt from loaddata import loadmnist import theano.tensor as T class softmax: def __init__(self,hiddata,outdata,nin,nout): self.w=theano.shared(value=np.zeros((nin,nout),dtype=theano.config.floatX),name='w'); self.b=theano.shared(value=np.zeros((nout,),dtype=theano.config.floatX),name='b') prey=T.nnet.softmax(T.dot(hiddata,self.w)+self.b) self.loss=-T.mean(T.log(prey)[T.arange(outdata.shape[0]),outdata]) self.para=[self.w,self.b] self.predict=T.argmax(prey,axis=1) self.error=T.mean(T.neq(T.argmax(prey,axis=1),outdata)) class HiddenLayer: def __init__(self,inputx,nin,nout): a=np.sqrt(6./(nin+nout)) ranmatrix=np.random.uniform(-a,a,(nin,nout)); self.w=theano.shared(value=np.asarray(ranmatrix,dtype=theano.config.floatX),name='w') self.b=theano.shared(value=np.zeros((nout,),dtype=theano.config.floatX),name='b') self.out=T.tanh(T.dot(inputx,self.w)+self.b) self.para=[self.w,self.b] class mlp: def __init__(self,nin,nhid,nout): x=T.fmatrix('x') y=T.ivector('y') hlayer=HiddenLayer(x,nin,nhid) olayer=softmax(hlayer.out,y,nhid,nout) paras=hlayer.para+olayer.para dparas=T.grad(olayer.loss,paras) updates=[(para,para-0.1*dpara) for para,dpara in zip(paras,dparas)] self.trainfunction=theano.function(inputs=[x,y],outputs=olayer.loss,updates=updates) def train(self,trainx,trainy): return self.trainfunction(trainx,trainy) class LeNetConvPoolLayer: def __init__(self,inputx,img_shape,filter_shape,poolsize=(2,2)): assert img_shape[1]==filter_shape[1] a=np.sqrt(6./(filter_shape[0]+filter_shape[1])) v=np.random.uniform(low=-a,high=a,size=filter_shape) wvalue=np.asarray(v,dtype=theano.config.floatX) self.w=theano.shared(value=wvalue,name='w') bvalue=np.zeros((filter_shape[0],),dtype=theano.config.floatX) self.b=theano.shared(value=bvalue,name='b') covout=T.nnet.conv2d(inputx,self.w) covpool=T.signal.downsample.max_pool_2d(covout,poolsize) self.out=T.tanh(covpool+self.b.dimshuffle('x', 0, 'x', 'x')) self.para=[self.w,self.b] trainx,trainy=loadmnist() trainx=trainx.reshape(-1,1,28,28) batch_size=30 m=trainx.shape[0] ne=m/batch_size batchx=T.tensor4(name='batchx',dtype=theano.config.floatX) batchy=T.ivector('batchy') cov1_layer=LeNetConvPoolLayer(inputx=batchx,img_shape=(batch_size,1,28,28),filter_shape=(20,1,5,5)) cov2_layer=LeNetConvPoolLayer(inputx=cov1_layer.out,img_shape=(batch_size,20,12,12),filter_shape=(50,20,5,5)) cov2out=cov2_layer.out.flatten(2) hlayer=HiddenLayer(cov2out,4*4*50,500) olayer=softmax(hlayer.out,batchy,500,10) paras=cov1_layer.para+cov2_layer.para+hlayer.para+olayer.para dparas=T.grad(olayer.loss,paras) updates=[(para,para-0.1*dpara) for para,dpara in zip(paras,dparas)] train_function=theano.function(inputs=[batchx,batchy],outputs=olayer.loss,updates=updates) test_function=theano.function(inputs=[batchx,batchy],outputs=[olayer.error,olayer.predict]) testx,testy=loadmnist(True) testx=testx.reshape(-1,1,28,28) train_history=[] test_history=[] for it in range(20): sum=0 for i in range(ne): a=trainx[i*batch_size:(i+1)*batch_size] loss_train=train_function(trainx[i*batch_size:(i+1)*batch_size],trainy[i*batch_size:(i+1)*batch_size]) sum=sum+loss_train sum=sum/ne print 'train_loss:',sum test_error,predict=test_function(testx,testy) print 'test_error:',test_error train_history=train_history+[sum] test_history=test_history+[test_error] n=len(train_history) fig1=plt.subplot(111) fig1.set_ylim(0.001,0.2) fig1.plot(np.arange(n),train_history,'-')
from: http://blog.csdn.net/hjimce/article/details/46806923
《新程序员》:云原生和全面数字化实践50位技术专家共同创作,文字、视频、音频交互阅读
总结
以上是生活随笔为你收集整理的深度学习(三)theano学习笔记(2)基础函数-未完待续的全部内容,希望文章能够帮你解决所遇到的问题。
如果觉得生活随笔网站内容还不错,欢迎将生活随笔推荐给好友。