文本内容:
import tensorflowas tfimport numpy,pickle#importmatplotlib.pyplotaspit#
1.处理输入#cifarlO中训练batch存储格式为字典,其中ffdata:10000个32*32*3图像,labels:-9数字表示10个类别,filenames:10000个图像名称,batchlabel为batch序号#输出为X10000,3072,Y10000,10def1oadc ifar_1Odatabatchf i1ename:file=openfilename,‘rbdatadictionary=pickle.load file,encoding=latinlX=datadictionaryf data*]UX.shape=1000,3072Y=datadictionaryf labels,]#filenames-datadictionary[,filenamesfJ#batch_label=datadictionary[batch_label]ffX=X.reshape10000,3,32,
32.transpose02,3,
1.astypefloatfY temp=numpy.arrayYreturn X,Ypath=G:\DEEP LEARING,X_train,Y_train=load_cifar_10_data_batchpath+,/cifar-10-batches-py/data_batch_r X_test,Y_test=loadcifar_10data_batch path+J/cifar-10-batches-py/test_batch,#
2.构造网络所需的函数
2.1参数w初始化,tf.randomnormal为根据shape产生随机分布,均值标准差自定,shape=[height,width,channels,filters]def weightsshape:return tf.Variabletf.random normalshape=shape,mean=0,stddev=
0.1#
2.2参数b初始化,tf.constant为根据shape产生常量def biasesshape:return tf.Variabletf.constant
0.1,shape=shape#
2.3卷积函数,x=[m,height,width,channels],m为待训练集数量def conv2d x,W:return tf.nn.conv2dinput=x,filter二W,strides=[l,1,1,1],padding=SAME,use_cudnn_on_gpu=True
2.4池化函数,池化其实也相当于一次卷积,比较特殊的卷积def max_poolx:return tf.nn.max_pool value=x,ksize=[l,2,2,1],strides=[l,2,2,1],padding=SAME#
2.5计算准确率,根据测试集def computeaccuracyx_test,y_test:global prcdict_functionpredict_test=sess.runpredict_function,feed_dict={xs:x_test}#比较预测值与测试集最大值下标是否相同,返回相同个数y testone hot=tf.one hoty test,depth=10correct_num=tf.equaltf.argmaxpredict_test,1,tf.argmaxy_test_one_hot,1#计算均值即为精确率,并转格式float32accuracy=tf.reducejnean tf.cast correct_num,tf.float32return sess.runaccuracy,feed dict={xs:xtest,ys:y test}#
3.定义输入,xs为训练集输入m*3072矩阵,ys为训练集labelsm*l矩阵xs=tf.placeholdertf.float32,[None,32*32*3]/255ys=tf.placeholdertf.int32,[None]#
4.构造网络#xO为输入m*32*32*3,图像数量m,大小32*32,channels为3xO=tf.transpose tf.reshape xs,[-1,3,32,32],perm=[0,2,3,1]#
4.1构建卷积层,卷积、池化、再卷积、再池化、最后拉伸Wconvl=weights[5,5,3,20]b_convl=biases
[20]a convl=conv2d xO,W convl+b convlz convl=tf.nn.relu aconvl h_convl=max_poolz_convl W_conv2=weights[5,5,20,50]b conv2=biases
[50]a_conv2=conv2d hconvl,W_conv2+b_conv2z_conv2=tf.nn.relua_conv2h_conv2=max_poolz_conv2#x_convO为卷积池化后拉伸的输入8*8*35=2240,图像数量叫输入为2240x_conv0=tf.reshapeh conv2,[-1,8*8*50]#
4.2接入fullyconnectednetworks,网络各层单元数为2240,1024,10W_fcl=weights[8*8*50,1024]b_fcl=biases
[1024]a_fcl=tf.matmulx_conv0,W fcl+b_fclz_fcl=tf.nn.sigmoida_fclW_fc2=weights[1024,10]b_fc2=biases
[10]a_fc2=tf.matmulz_fcl,W_fc2+b_fc2#z_fc2-tf.nn.relua_fc2#43预测函数与代价函数,采用最简单的predictfunction=a_fc2ys_one_hot=tf.one hotys,depth=10#cost_function-tf.reduce_mean-tf.reduce__sumys^tf.logpredict_function,reduction_indices=[l]cost_function=tf.reduce_meantf.nn.softmax cross_entropy withlogitslabels=ys^one hot,logits=predict_function#
4.4梯度下降,使cost_function值最小train_stcp-tf.train.GradicntDcsccntOptimizcrlc-
4.minimizecost_function#train_step=tf.train.AdamOptimizerle-
3.minimizecost_function#5训练并图像化表示sess=tf.SessionO sess.runtf.global_variables_initializer saver=tf.train.Saver fori in range20:for tinrange0,10000-32,32:iter=t//32xs_batch,ys_batch=X_train[t:t+32],Y_train[t:t+32]sess.runtrain^step,feed dict={xs:xs batch,ys:ys_batch}if iter%10==0:cost二sess.runcost_function,feed_dict={xs:xs batch,ys:ys batch}accuracy=compute_accuracyX_test[:10000],Y_test[:10000]print^iters:%s,cost:%s,accuracy:%s%iter,cost,accuracy#saver,savesess,,/Users/wanglei/Downloads/test/model.ckpt,sess.close。
个人认证
优秀文档
获得点赞 0