333
правки
Изменения
→Сверточная нейронная сеть
<font color="green"># Import MNIST data</font>
'''from''' tensorflow.examples.tutorials.mnist '''import''' input_data
mnist = input_data.read_data_sets(<font color="red">"/tmp/data/"</font>, one_hot='''True''')
<font color="green"># Training Parameters</font>
learning_rate = <font color="blue">0.001</font> num_steps = <font color="blue">200</font> batch_size = <font color="blue">128</font> display_step = <font color="blue">10</font>
<font color="green"># Network Parameters</font>
num_input = <font color="blue">784 </font> <font color="green"># MNIST data input (img shape: 28*28)</font> num_classes = <font color="blue">10 </font> <font color="green"># MNIST total classes (0-9 digits)</font> dropout = <font color="blue">0.75 </font> <font color="green"># Dropout, probability to keep units</font>
<font color="green"># tf Graph input</font>
<font color="green"># Create some wrappers for simplicity</font>
'''def''' conv2d(x, W, b, strides=<font color="blue">1</font>):
<font color="green"># Conv2D wrapper, with bias and relu activation</font>
x = tf.nn.conv2d(x, W, strides=[<font color="blue">1</font>, strides, strides, <font color="blue">1</font>], padding=<font color="red">'SAME'</font>)
x = tf.nn.bias_add(x, b)
'''return''' tf.nn.relu(x)
'''def''' maxpool2d(x, k=<font color="blue">2</font>):
<font color="green"># MaxPool2D wrapper</font>
'''return''' tf.nn.max_pool(x, ksize=[<font color="blue">1</font>, k, k, <font color="blue">1</font>], strides=[<font color="blue">1</font>, k, k, <font color="blue">1</font>], padding=<font color="red">'SAME'</font>)
<font color="green"># Create model</font>
# Reshape to match picture format [Height x Width x Channel]
# Tensor input become 4-D: [Batch Size, Height, Width, Channel]</font>
x = tf.reshape(x, shape=[<font color="blue">-1</font>, <font color="blue">28</font>, <font color="blue">28</font>, <font color="blue">1</font>])
<font color="green"># Convolution Layer</font>
conv1 = conv2d(x, weights[<font color="red">'wc1'</font>], biases[<font color="red">'bc1'</font>])
<font color="green"># Max Pooling (down-sampling)</font>
conv1 = maxpool2d(conv1, k=<font color="blue">2</font>)
<font color="green"># Convolution Layer</font>
conv2 = conv2d(conv1, weights[<font color="red">'wc2'</font>], biases[<font color="red">'bc2'</font>])
<font color="green"># Max Pooling (down-sampling)</font>
conv2 = maxpool2d(conv2, k=<font color="blue">2</font>)
<font color="green"># Fully connected layer
# Reshape conv2 output to fit fully connected layer input</font>
fc1 = tf.reshape(conv2, [<font color="blue">-1</font>, weights[<font color="red">'wd1'</font>].get_shape().as_list()[<font color="blue">0</font>]]) fc1 = tf.add(tf.matmul(fc1, weights[<font color="red">'wd1'</font>]), biases[<font color="red">'bd1'</font>])
fc1 = tf.nn.relu(fc1)
<font color="green"># Apply Dropout</font>
fc1 = tf.nn.dropout(fc1, dropout)
<font color="green"># Output, class prediction</font>
out = tf.add(tf.matmul(fc1, weights[<font color="red">'out'</font>]), biases[<font color="red">'out'</font>])
'''return''' out
weights = {
<font color="green"># 5x5 conv, 1 input, 32 outputs</font>
<font color="red">'wc1'</font>: tf.Variable(tf.random_normal([<font color="blue">5</font>, <font color="blue">5</font>, <font color="blue">1</font>, <font color="blue">32</font>])),
<font color="green"># 5x5 conv, 32 inputs, 64 outputs</font>
<font color="red">'wc2'</font>: tf.Variable(tf.random_normal([<font color="blue">5</font>, <font color="blue">5</font>, <font color="blue">32</font>, <font color="blue">64</font>])),
<font color="green"># fully connected, 7*7*64 inputs, 1024 outputs</font>
<font color="red">'wd1'</font>: tf.Variable(tf.random_normal([<font color="blue">7</font>*<font color="blue">7</font>*<font color="blue">64</font>, <font color="blue">1024</font>])),
<font color="green"># 1024 inputs, 10 outputs (class prediction)</font>
<font color="red">'out'</font>: tf.Variable(tf.random_normal([<font color="blue">1024</font>, num_classes]))
}
biases = {
<font color="red">'bc1'</font>: tf.Variable(tf.random_normal([<font color="blue">32</font>])), <font color="red">'bc2'</font>: tf.Variable(tf.random_normal([<font color="blue">64</font>])), <font color="red">'bd1'</font>: tf.Variable(tf.random_normal([<font color="blue">1024</font>])), <font color="red">'out'</font>: tf.Variable(tf.random_normal([num_classes]))
}
<font color="green"># Evaluate model</font>
correct_pred = tf.equal(tf.argmax(prediction, <font color="blue">1</font>), tf.argmax(Y, <font color="blue">1</font>))
accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
<font color="green"># Run the initializer</font>
sess.run(init)
'''for''' step '''in''' '''range'''(<font color="blue">1</font>, num_steps+<font color="blue">1</font>):
batch_x, batch_y = mnist.train.next_batch(batch_size)
<font color="green"># Run optimization op (backprop)</font>
sess.run(train_op, feed_dict={X: batch_x, Y: batch_y, keep_prob: <font color="blue">0.8</font>}) '''if''' step % display_step == <font color="blue">0 </font> '''or''' step == <font color="blue">1</font>:
<font color="green"># Calculate batch loss and accuracy</font>
loss, acc = sess.run([loss_op, accuracy], feed_dict={X: batch_x,
Y: batch_y,
keep_prob: <font color="blue">1.0</font>}) print(<font color="red">"Step " </font> + str(step) + <font color="red">", Minibatch Loss= " </font> + \ <font color="red">"{:.4f}"</font>.format(loss) + <font color="red">", Training Accuracy= " </font> + \ <font color="red">"{:.3f}"</font>.format(acc)) print(<font color="red">"Optimization Finished!"</font>)
<font color="green"># Calculate accuracy for 256 MNIST test images</font>
print(<font color="red">"Testing Accuracy:"</font>, \ sess.run(accuracy, feed_dict={X: mnist.test.images[:<font color="blue">256</font>], Y: mnist.test.labels[:<font color="blue">256</font>], keep_prob: <font color="blue">1.0</font>}))
==Keras==