问题描述
我正在使用 MNIST 数据集来学习张量流和神经网络。 下面是我在python中的代码。
import numpy as np
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets("data/",one_hot=True)
features = 28*28
classes = 10
batch_size = 100
m_train = mnist.train.num_examples
m_test = mnist.test.num_examples
print(" The neural network will be trained on ",m_train, " examples")
H_L_1_nodes = 500
H_L_2_nodes = 500
H_L_3_nodes = 500
x = tf.placeholder('float',[None,features])
y = tf.placeholder('float',[None,classes])
def neural_net(data):
hidden_layer_1 = {'weights' : tf.Variable(tf.random_normal([features, H_L_1_nodes]) ),
'biases' : tf.Variable(tf.random_normal([H_L_1_nodes]) )}
hidden_layer_2 = {'weights' : tf.Variable(tf.random_normal([H_L_1_nodes, H_L_2_nodes]) ),
'biases' : tf.Variable(tf.random_normal([H_L_2_nodes]))}
hidden_layer_3 = {'weights' : tf.Variable(tf.random_normal([H_L_2_nodes, H_L_3_nodes]) ),
'biases' : tf.Variable(tf.random_normal([H_L_3_nodes]))}
output_layer = {'weights' : tf.Variable(tf.random_normal([H_L_3_nodes, classes]) ),
'biases' : tf.Variable(tf.random_normal([classes]) )}
l1 = tf.add( tf.matmul( data, hidden_layer_1['weights'] ), hidden_layer_1['biases'])
l1 = tf.nn.relu(l1)
l2 = tf.add( tf.matmul( l1, hidden_layer_2['weights'] ), hidden_layer_2['biases'])
l2 = tf.nn.relu(l2)
l3 = tf.add( tf.matmul( l2, hidden_layer_3['weights'] ), hidden_layer_3['biases'])
l3 = tf.nn.relu(l3)
output = tf.add(tf.matmul( l3, output_layer['weights']), output_layer['biases'])
output = tf.nn.relu(output)
return output
def train_neural_network(x):
prediction = neural_net(x)
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(prediction, y))
optimizer = tf.train.AdamOptimizer(0.0001).minimize(cost)
epochs = 5
with tf.Session() as session:
session.run(tf.global_variables_initializer())
for epoch in range(epochs):
epoch_loss = 0
for _ in range(int(m_train/batch_size)):
_x, _y = mnist.train.next_batch(batch_size)
_, c = session.run( [optimizer,cost], feed_dict={x : _x, y : _y} )
epoch_loss += c
print(" Loss in ",epoch," iteration is ", epoch_loss)
correct = tf.equal(tf.argmax(prediction,1), tf.argmax(y,1))
accuracy = tf.reduce_mean(tf.cast(correct,'float'))
print("-------------------------------------------------------------------------")
print(session.run(tf.cast(correct[:10],'float'), feed_dict= { x:mnist.test.images, y: mnist.test.labels } ))
print("-------------------------------------------------------------------------")
print(" The neural network will be tested on ",m_test, " examples")
print(" Accuracy = ", accuracy.eval(feed_dict= { x:mnist.test.images, y: mnist.test.labels } )*100,"%")
print("Initializing training...")
train_neural_network(x)
print("Success!")
我得到了 9% 到 13% 的准确率,但不超过这个。 我想我已经正确地实现了代码,但无法弄清楚出了什么问题。 我发现的一件事是准确性是因为模型正确预测了 0。
1楼
我在计算网络输出时犯了错误,
错误的:
output = tf.add(tf.matmul( l3, output_layer['weights']), output_layer['biases'])
output = tf.nn.relu(output)
正确的:
output = tf.add(tf.matmul( l3, output_layer['weights']), output_layer['biases'])
我再次对输出进行了标准化,这弄乱了所有网络。 发布此答案,因为它将来可能对某人有所帮助。 谢谢!
PS:从借来的代码
编辑:
我发现使用可以进一步提高准确性,甚至可以通过使用进一步提高准确性。 可能有人会发现这很有用。