The MNIST database of handwritten digits, available on http://yann.lecun.com/exdb/mnist/, has a training set of 60,000 examples, and a test set of 10,000 examples. It is a subset of a larger set available from NIST. The digits have been size-normalized and centered in a fixed-size image.
import tensorflow as tf
from tensorflow import keras # TensorFlow and tf.keras
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
print(tf.__version__)
2.2.0
from tensorflow.keras.datasets import mnist
mnist = keras.datasets.mnist
(train_images, train_labels), (test_images, test_labels) = mnist.load_data()
# Convert to float32.
x_train, x_test = np.array(train_images, np.float32), np.array(test_images, np.float32)
print(x_train.shape)
b = []
[b.append(i) for i in train_labels if i not in b]
print(b) # Each label is an integer between 0 and 9
len(train_labels)
class_names = ["zero", "one", "two", "three", "four", "five", "six", "seven", "eight", "nine"]
(60000, 28, 28) [5, 0, 4, 1, 9, 2, 3, 6, 7, 8]
print(x_test.shape)
len(test_labels)
(10000, 28, 28)
10000
#Pixels are organized row-wise. Pixel values are 0 to 255.
#Normalize images value from [0, 255] to [0, 1].
train_images = x_train / 255.0
test_images = x_test / 255.0
print(train_images.shape, test_images.shape)
(60000, 28, 28) (10000, 28, 28)
plt.figure(figsize=(10,10))
for i in range(25):
plt.subplot(5,5,i+1)
plt.xticks([])
plt.yticks([])
plt.grid(True)
plt.imshow(train_images[i], cmap=plt.cm.YlOrRd) #plt.cm.binary)
plt.xlabel(train_labels[i])
plt.show()
Using deep learning with 1 hidden layer to make predictions. Set the activation function as sigmoid function, and output function as softmax function.
num_inter_neuron = 500
model = keras.Sequential()
model.add(keras.layers.Flatten(input_shape=(28, 28))) # Flatten images to 1-D vector of 784 features (28*28).
model.add(tf.keras.layers.Dense(num_inter_neuron, activation='sigmoid', input_shape=(784,))) #28 * 28 =784
model.add(tf.keras.layers.Dense(10, activation='softmax'))
print(model)
<tensorflow.python.keras.engine.sequential.Sequential object at 0x163e81a10>
#Compile the model
model.compile(optimizer='adam',
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=['accuracy'])
model.fit(train_images, train_labels, epochs=10)
Epoch 1/10 1875/1875 [==============================] - 5s 3ms/step - loss: 1.5900 - accuracy: 0.8948 Epoch 2/10 1875/1875 [==============================] - 5s 2ms/step - loss: 1.5282 - accuracy: 0.9395 Epoch 3/10 1875/1875 [==============================] - 6s 3ms/step - loss: 1.5114 - accuracy: 0.9542 Epoch 4/10 1875/1875 [==============================] - 5s 3ms/step - loss: 1.5012 - accuracy: 0.9640: 0s - los Epoch 5/10 1875/1875 [==============================] - 5s 3ms/step - loss: 1.4937 - accuracy: 0.9708 Epoch 6/10 1875/1875 [==============================] - 5s 3ms/step - loss: 1.4883 - accuracy: 0.9758 Epoch 7/10 1875/1875 [==============================] - 5s 3ms/step - loss: 1.4841 - accuracy: 0.9796 Epoch 8/10 1875/1875 [==============================] - 4s 2ms/step - loss: 1.4810 - accuracy: 0.9826 Epoch 9/10 1875/1875 [==============================] - 5s 2ms/step - loss: 1.4781 - accuracy: 0.9851 Epoch 10/10 1875/1875 [==============================] - 4s 2ms/step - loss: 1.4761 - accuracy: 0.9868
<tensorflow.python.keras.callbacks.History at 0x163bbd290>
test_loss, test_acc = model.evaluate(test_images, test_labels, verbose=2)
print('\nTest accuracy:', test_acc)
313/313 - 0s - loss: 1.4865 - accuracy: 0.9768 Test accuracy: 0.9768000245094299
#Make predictions
probability_model = tf.keras.Sequential([model,tf.keras.layers.Softmax()])
predictions = probability_model.predict(test_images)
print(predictions[0]) #the model's "confidence" that the image corresponds to each of the 10 different digits
print(np.argmax(predictions[0]))
[0.08533675 0.08533675 0.08533675 0.08533683 0.08533675 0.08533675 0.08533675 0.2319692 0.08533675 0.08533675] 7
print(test_labels[0])
test_labels[0]
7
7
def plot_image(i, predictions_array, true_label, img):
true_label, img = true_label[i], img[i]
plt.grid(False)
plt.xticks([])
plt.yticks([])
plt.imshow(img, cmap=plt.cm.binary)
predicted_label = np.argmax(predictions_array)
if predicted_label == true_label:
color = 'blue'
else:
color = 'red'
plt.xlabel("{} {:2.0f}% ({})".format(class_names[predicted_label],
100*np.max(predictions_array),
class_names[true_label]),
color=color)
def plot_value_array(i, predictions_array, true_label):
true_label = true_label[i]
plt.grid(False)
plt.xticks(range(10))
plt.yticks([])
thisplot = plt.bar(range(10), predictions_array, color="#777777")
plt.ylim([0, 1])
predicted_label = np.argmax(predictions_array)
thisplot[predicted_label].set_color('red')
thisplot[true_label].set_color('blue')
i = 0
plt.figure(figsize=(6,3))
plt.subplot(1,2,1)
plot_image(i, predictions[i], test_labels, test_images)
plt.subplot(1,2,2)
plot_value_array(i, predictions[i], test_labels)
plt.show()
i = 12
plt.figure(figsize=(6,3))
plt.subplot(1,2,1)
plot_image(i, predictions[i], test_labels, test_images)
plt.subplot(1,2,2)
plot_value_array(i, predictions[i], test_labels)
plt.show()
# Plot the first X test images, their predicted labels, and the true labels.
# Color correct predictions in blue and incorrect predictions in red.
num_rows = 5
num_cols = 3
num_images = num_rows*num_cols
plt.figure(figsize=(2*2*num_cols, 2*num_rows))
for i in range(num_images):
plt.subplot(num_rows, 2*num_cols, 2*i+1)
plot_image(i, predictions[i], test_labels, test_images)
plt.subplot(num_rows, 2*num_cols, 2*i+2)
plot_value_array(i, predictions[i], test_labels)
plt.tight_layout()
plt.show()
pred_result = list(map(lambda x: np.argmax(predictions[x]), list(range(0, len(predictions)))))
error = []
for i in range(0, len(pred_result)):
if pred_result[i] != test_labels[i]: error.append(1)
else: error.append(0)
print("length of error list:", len(error))
print("test set error rate: ", sum(error)/ len(predictions))
length of error list: 10000 test set error rate: 0.0232