1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119
| import tensorflow as tf from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets("MNIST_data",one_hot=True)
def variable_summaries(var): with tf.name_scope('summaries'): mean = tf.reduce_mean(var) tf.summary.scalar('mean',mean) with tf.name_scope('stddev'): stddev = tf.sqrt(tf.reduce_mean(tf.square(var - mean))) tf.summary.scalar('stddev',stddev) tf.summary.scalar('max',tf.reduce_max(var)) tf.summary.scalar('min',tf.reduce_min(var)) tf.summary.histogram('histogram',var)
batch_size = 100
n_batch = mnist.train.num_examples // batch_size
with tf.name_scope('input'): x = tf.placeholder(tf.float32,[None,784],name='x') y = tf.placeholder(tf.float32,[None,10],name='y') with tf.name_scope('parameter'): keep_prob = tf.placeholder(tf.float32,name='keep_prob')
with tf.name_scope('network'): with tf.name_scope('network_parameter'): lr = tf.Variable(0.01,name='lr')
L1_size = 500 L2_size = 300 with tf.name_scope('layer1'): W1 = tf.Variable(tf.truncated_normal([784,L1_size],stddev = 0.1),name = 'weight_L1') variable_summaries(W1) b1 = tf.Variable(tf.zeros([1,L1_size]) + 0.1,name = 'bias_L1') variable_summaries(b1) with tf.name_scope('wx_plus_b_L1'): z1 = tf.matmul(x,W1) + b1 with tf.name_scope('tanh_L1'): a1 = tf.nn.tanh(z1) with tf.name_scope('dropout_L1'): L1_drop = tf.nn.dropout(a1,keep_prob) with tf.name_scope('layer2'): W2 = tf.Variable(tf.truncated_normal([L1_size,L2_size],stddev = 0.1),name='weight_L2') variable_summaries(W2) b2 = tf.Variable(tf.zeros([1,L2_size]) + 0.1,name='bias_L2') variable_summaries(b2) with tf.name_scope('wx_plus_b_L2'): z2 = tf.matmul(L1_drop,W2) + b2 with tf.name_scope('tanh_L2'): a2 = tf.nn.tanh(z2) with tf.name_scope('dropout_L2'): L2_drop = tf.nn.dropout(a2,keep_prob) with tf.name_scope('output_layer'): Wout = tf.Variable(tf.truncated_normal([L2_size,10],stddev = 0.1),name='weight_Lout') variable_summaries(Wout) bout = tf.Variable(tf.zeros([1,10]) + 0.1,name='bias_Lout') variable_summaries(bout) with tf.name_scope('wx_plus_b_Lout'): zout = tf.matmul(L2_drop,Wout) + bout with tf.name_scope('softmax_Lout'): prediction = tf.nn.softmax(zout) with tf.name_scope('loss'): loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels = y, logits = prediction)) tf.summary.scalar('loss',loss) with tf.name_scope('train'): train_step = tf.train.AdamOptimizer(lr).minimize(loss) init = tf.global_variables_initializer()
with tf.name_scope('accuracy'): with tf.name_scope('correct_prediction'): correct_prediction = tf.equal(tf.argmax(y,1),tf.argmax(prediction,1)) with tf.name_scope('accuracy_calcalate'): accuracy = tf.reduce_mean(tf.cast(correct_prediction,tf.float32)) tf.summary.scalar('accuracy',accuracy)
merged = tf.summary.merge_all()
with tf.Session() as sess: sess.run(init) writer = tf.summary.FileWriter('logs/',sess.graph) for epoch in range(51): sess.run(tf.assign(lr,lr * (0.95 ** epoch))) for batch in range(n_batch): batch_xs,batch_ys = mnist.train.next_batch(batch_size) summary,_ = sess.run([merged,train_step],feed_dict={x:batch_xs,y:batch_ys,keep_prob:1.0}) writer.add_summary(summary,epoch) train_acc = sess.run(accuracy,feed_dict={x:mnist.train.images,y:mnist.train.labels,keep_prob:1.0}) test_acc = sess.run(accuracy,feed_dict={x:mnist.test.images,y:mnist.test.labels,keep_prob:1.0}) print("Iter" + str(epoch) + ", | Training Accuracy:" + str(train_acc) + ", | Testing Accuracy:" + str(test_acc) + ", | Learning Rate:" + str(sess.run(lr)))
|