0%

Tensorflow-Notes:Tensorboard

GRAPHS

Notes

Note1 create namespace

eg1.创建网络Layer1的命名空间

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
...
with tf.name_scope('layer1'):
W1 = tf.Variable(tf.truncated_normal([input_size,L1_size]),name='weight_L1')
b1 = tf.Variable(tf.zeros(1,L1_size),name='bias_L1')
with tf.name_scope('wx_plus_b_L1'):
z1 = tf.matmul(x,W1) + b1
with tf.name_scope('tanh_L1'):
a1 = tf.nn.tanh(z1)
with tf.name_scope('dropout_L1'):
L1_drop = tf.nn.dropout(a1,keep_prob)
...
...
...
with tf.Session() as sess:
...
writer = tf.summary.FileWriter('logs/',sess.graph)
...
...

Note2 Tensorboard打开方法

1、打开命令行,进入log文件所在目录

2、执行代码

1
Tensorboard --logdir=logs/

3、用谷歌浏览器打开http://localhost:6006/

SUMMARIES

Notes

Note1 tf.summary用法总结

eg1.记录标量信息

1
tf.summary.scalar(tags, values, collections=None, name=None)

例如

1
tf.summary.scalar('loss',loss)

eg2.记录直方图,一般用来显示训练过程中变量的分布情况

1
tf.summary.histogram(tags, values, collections=None, name=None) 

例如

1
tf.summary.histogram('histogram', var)

eg3.分布图,一般用于weights分布

1
tf.summary.distribution

eg4.将文本类型的数据转换为tensor写入summary中

1
tf.summary.text

例如

1
2
text = """/a/b/c\\_d/f\\_g\\_h\\_2017"""
summary_op0 = tf.summary.text('text', tf.convert_to_tensor(text))

eg5.记录图像

输出带图像的probuf,汇总数据的图像的的形式如下: ‘ tag /image/0’, ‘ tag /image/1’…,如:input/image/0等。

1
tf.summary.image(tag, tensor, max_images=3, collections=None, name=None)

eg5.记录音频

1
tf.summary.audio

eg6.merge_all

1
tf.summary.merge_all

merge_all 可以将所有summary全部保存到磁盘,以便tensorboard显示。如果没有特殊要求,一般用这一句就可一显示训练时的各种信息了。

eg7.merge

1
tf.summary.merge(inputs, collections=None, name=None)

一般选择要保存的信息还需要用到tf.get_collection()函数

例如

1
2
3
4
5
6
7
tf.summary.scalar('accuracy',acc)                   #生成准确率标量图  
merge_summary = tf.summary.merge([tf.get_collection(tf.GraphKeys.SUMMARIES,'accuracy'),...(其他要显示的信息)])
train_writer = tf.summary.FileWriter(dir,sess.graph)#定义一个写入summary的目标文件,dir为写入文件地址
......(交叉熵、优化器等定义)
for step in xrange(training_step): #训练循环
train_summary = sess.run(merge_summary,feed_dict = {...})#调用sess.run运行图,生成一步的训练过程数据
train_writer.add_summary(train_summary,step)#调用train_writer的add_summary方法将训练过程以及训练步数保存

或者

1
2
acc_summary = tf.summary.scalar('accuracy',acc)                   #生成准确率标量图  
merge_summary = tf.summary.merge([acc_summary ,...(其他要显示的信息)]) #这里的[]不可省

eg8.保存图

1
tf.summary.FileWriter

可以调用其add_summary()方法将训练过程数据保存在filewriter指定的文件中。

例如

1
2
3
4
5
6
7
tf.summary.scalar('accuracy',acc)                   #生成准确率标量图  
merge_summary = tf.summary.merge_all()
train_writer = tf.summary.FileWriter(dir,sess.graph)#定义一个写入summary的目标文件,dir为写入文件地址
......(交叉熵、优化器等定义)
for step in xrange(training_step): #训练循环
train_summary = sess.run(merge_summary,feed_dict = {...})#调用sess.run运行图,生成一步的训练过程数据
train_writer.add_summary(train_summary,step)#调用train_writer的add_summary方法将训练过程以及训练步数保存

如果要在tensorboard中画多个数据图,需定义多个tf.summary.FileWriter并重复上述过程。

Codes

Code1 定义一个函数来记录变量的suammry

1
2
3
4
5
6
7
8
9
10
11
def variable_summaries(var):

with tf.name_scope('summaries'):
mean = tf.reduce_mean(var)
tf.summary.scalar('mean',mean)#平均值
with tf.name_scope('stddev'):
stddev = tf.sqrt(tf.reduce_mean(tf.square(var - mean)))
tf.summary.scalar('stddev',stddev)#标准差
tf.summary.scalar('max',tf.reduce_max(var))#最大值
tf.summary.scalar('min',tf.reduce_min(var))#最小值
tf.summary.histogram('histogram',var)#直方图

Code2 用法

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
...
def variable_summaries(var):
with tf.name_scope('summaries'):
mean = tf.reduce_mean(var)
tf.summary.scalar('mean',mean)#平均值
with tf.name_scope('stddev'):
stddev = tf.sqrt(tf.reduce_mean(tf.square(var - mean)))
tf.summary.scalar('stddev',stddev)#标准差
tf.summary.scalar('max',tf.reduce_max(var))#最大值
tf.summary.scalar('min',tf.reduce_min(var))#最小值
tf.summary.histogram('histogram',var)#直方图

...
with tf.name_scope('layer1'):
W1 = tf.Variable(tf.truncated_normal([input_size,L1_size]),name='weight_L1')
variable_summaries(W1)
b1 = tf.Variable(tf.zeros(1,L1_size),name='bias_L1')
variable_summaries(b1)
with tf.name_scope('wx_plus_b_L1'):
z1 = tf.matmul(x,W1) + b1
with tf.name_scope('tanh_L1'):
a1 = tf.nn.tanh(z1)
with tf.name_scope('dropout_L1'):
L1_drop = tf.nn.dropout(a1,keep_prob)
...
with tf.name_scope('loss'):
loss = tf.nn.softmax_cross_entropy_with_logits(labels = y,logits = prediction)
tf.summary.scalar('loss',loss)
...
merged = tf.summary.merge_all()
...
#创建计算会话
with tf.Session() as sess:
sess.run(init)
writer = tf.summary.FileWriter('logs/',sess.graph)
for epoch in range(51):
sess.run(tf.assign(lr,lr * (0.95 ** epoch)))
for batch in range(n_batch):
batch_xs,batch_ys = mnist.train.next_batch(batch_size)
summary,_ = sess.run([merged,train_step],feed_dict={x:batch_xs,y:batch_ys,keep_prob:1.0})

writer.add_summary(summary,epoch)
train_acc = sess.run(accuracy,feed_dict={x:mnist.train.images,y:mnist.train.labels,keep_prob:1.0})
test_acc = sess.run(accuracy,feed_dict={x:mnist.test.images,y:mnist.test.labels,keep_prob:1.0})
print("Iter" + str(epoch) + ", | Training Accuracy:" + str(train_acc) + ", | Testing Accuracy:" + str(test_acc) + ", | Learning Rate:" + str(sess.run(lr)))


Code3 MNIST

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data

#载入数据集
mnist = input_data.read_data_sets("MNIST_data",one_hot=True)


def variable_summaries(var):

with tf.name_scope('summaries'):
mean = tf.reduce_mean(var)
tf.summary.scalar('mean',mean)#平均值
with tf.name_scope('stddev'):
stddev = tf.sqrt(tf.reduce_mean(tf.square(var - mean)))
tf.summary.scalar('stddev',stddev)#标准差
tf.summary.scalar('max',tf.reduce_max(var))#最大值
tf.summary.scalar('min',tf.reduce_min(var))#最小值
tf.summary.histogram('histogram',var)#直方图


#batch初试化
#每个批次的大小
batch_size = 100
#计算一共有多少个批次
n_batch = mnist.train.num_examples // batch_size

with tf.name_scope('input'):
#定义placeholder
x = tf.placeholder(tf.float32,[None,784],name='x')
y = tf.placeholder(tf.float32,[None,10],name='y')
with tf.name_scope('parameter'):
keep_prob = tf.placeholder(tf.float32,name='keep_prob')


with tf.name_scope('network'):
with tf.name_scope('network_parameter'):
#定义网络
#定义学习率
lr = tf.Variable(0.01,name='lr')

#网络参数
L1_size = 500
L2_size = 300
with tf.name_scope('layer1'):
#L1
W1 = tf.Variable(tf.truncated_normal([784,L1_size],stddev = 0.1),name = 'weight_L1')
variable_summaries(W1)
b1 = tf.Variable(tf.zeros([1,L1_size]) + 0.1,name = 'bias_L1')
variable_summaries(b1)
with tf.name_scope('wx_plus_b_L1'):
z1 = tf.matmul(x,W1) + b1
with tf.name_scope('tanh_L1'):
a1 = tf.nn.tanh(z1)
with tf.name_scope('dropout_L1'):
L1_drop = tf.nn.dropout(a1,keep_prob)

with tf.name_scope('layer2'):
#L2
W2 = tf.Variable(tf.truncated_normal([L1_size,L2_size],stddev = 0.1),name='weight_L2')
variable_summaries(W2)
b2 = tf.Variable(tf.zeros([1,L2_size]) + 0.1,name='bias_L2')
variable_summaries(b2)
with tf.name_scope('wx_plus_b_L2'):
z2 = tf.matmul(L1_drop,W2) + b2
with tf.name_scope('tanh_L2'):
a2 = tf.nn.tanh(z2)
with tf.name_scope('dropout_L2'):
L2_drop = tf.nn.dropout(a2,keep_prob)

with tf.name_scope('output_layer'):
#Lout
Wout = tf.Variable(tf.truncated_normal([L2_size,10],stddev = 0.1),name='weight_Lout')
variable_summaries(Wout)
bout = tf.Variable(tf.zeros([1,10]) + 0.1,name='bias_Lout')
variable_summaries(bout)
with tf.name_scope('wx_plus_b_Lout'):
zout = tf.matmul(L2_drop,Wout) + bout
with tf.name_scope('softmax_Lout'):
prediction = tf.nn.softmax(zout)
with tf.name_scope('loss'):
#训练优化
#代价函数
loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels = y, logits = prediction))
tf.summary.scalar('loss',loss)
with tf.name_scope('train'):
#训练:优化器
train_step = tf.train.AdamOptimizer(lr).minimize(loss)
# train_step = tf.train.GradientDescentOptimizer(lr).minimize(loss)
#初试化变量
init = tf.global_variables_initializer()

with tf.name_scope('accuracy'):
#计算准确率
with tf.name_scope('correct_prediction'):
#比较label和logit位置中最大值的位置是否相同,结果存在布尔型列表中
correct_prediction = tf.equal(tf.argmax(y,1),tf.argmax(prediction,1))
with tf.name_scope('accuracy_calcalate'):
accuracy = tf.reduce_mean(tf.cast(correct_prediction,tf.float32))
tf.summary.scalar('accuracy',accuracy)

#合并所有的summary
merged = tf.summary.merge_all()

#创建计算会话
with tf.Session() as sess:
sess.run(init)
writer = tf.summary.FileWriter('logs/',sess.graph)
for epoch in range(51):
sess.run(tf.assign(lr,lr * (0.95 ** epoch)))
for batch in range(n_batch):
batch_xs,batch_ys = mnist.train.next_batch(batch_size)
summary,_ = sess.run([merged,train_step],feed_dict={x:batch_xs,y:batch_ys,keep_prob:1.0})

writer.add_summary(summary,epoch)
train_acc = sess.run(accuracy,feed_dict={x:mnist.train.images,y:mnist.train.labels,keep_prob:1.0})
test_acc = sess.run(accuracy,feed_dict={x:mnist.test.images,y:mnist.test.labels,keep_prob:1.0})
print("Iter" + str(epoch) + ", | Training Accuracy:" + str(train_acc) + ", | Testing Accuracy:" + str(test_acc) + ", | Learning Rate:" + str(sess.run(lr)))