Continuation of the previous article Implementation of two-layer neural network We created a two-layer neural network and learned MNIST. Confirmation of prediction accuracy of the learned result. Refer to Chapter 4 of Deep Learning from scratch
TwoLayerNet.py
import numpy as np
class TwoLayerNet:
def __init__(self,input_size,hidden_size,output_size,weight_init_std=0.01):
#Weight initialization
self.params = {}
#784 *50 weight matrix
self.params['W1'] = weight_init_std * np.random.randn(input_size,hidden_size)
#50 *10 weight matrix
self.params['W2'] = weight_init_std * np.random.randn(hidden_size,output_size)
#Bias, as many hidden layers
self.params['b1'] = np.zeros(hidden_size)
#Bias, number of output layers
self.params['b2'] = np.zeros(output_size)
def sigmoid(self,x):
return 1 / (1 + np.exp(-x))
def softmax(self,a):
c = np.max(a)
exp_a = np.exp(a - c)#Overflow measures
sum_exp_a = np.sum(exp_a)
y = exp_a / sum_exp_a
return y
def _numerical_gradient_1d(self,f, x):
h = 1e-4 # 0.0001
grad = np.zeros_like(x)
for idx in range(x.size):
tmp_val = x[idx]
x[idx] = float(tmp_val) + h
fxh1 = f(x) # f(x+h)
x[idx] = tmp_val - h
fxh2 = f(x) # f(x-h)
grad[idx] = (fxh1 - fxh2) / (2*h)
x[idx] = tmp_val #Restore the value
return grad
def numerical_gradient(self,f, X):
if X.ndim == 1:
return self._numerical_gradient_1d(f, X)
else:
grad = np.zeros_like(X)
for idx, x in enumerate(X):
grad[idx] = self._numerical_gradient_1d(f, x)
return grad
def cross_entropy_error(self,y,t):
if y.ndim == 1:
t = t.reshape(1,t.size)
y = y.reshape(1,y.size)
batch_size = y.shape[0]
return -np.sum(t * np.log(y)) / batch_size
def predict(self,x):
W1,W2 = self.params['W1'],self.params['W2']
b1,b2 = self.params['b1'],self.params['b2']
a1 = np.dot(x,W1) + b1 #a = Wx + b
z1 = self.sigmoid(a1)
a2 = np.dot(z1,W2) + b2
z2 = self.softmax(a2)
return z2
def loss(self, x, t):
y = self.predict(x)
return self.cross_entropy_error(y,t)
def accuracy(self, x, t):
y = self.predict(x)
y = np.argmax(y,axis=1)
t = np.argmax(t,axis=1)
accuracy = np.sum(y == t) / float(x.shape[0])
return accuracy
def gradient(self,x,t):
loss_W = lambda W: self.loss(x,t)
grads = {}
grads['W1'] = self.numerical_gradient(loss_W,self.params['W1'])
grads['W2'] = self.numerical_gradient(loss_W,self.params['W2'])
grads['b1'] = self.numerical_gradient(loss_W,self.params['b1'])
grads['b2'] = self.numerical_gradient(loss_W,self.params['b2'])
return grads
Similar to the previous time, mini-batch learning (size 50) was performed 500 times from the MNIST data. This time, the accuracy (acc) is calculated with 50 loops as one epoch.
LearningMNIST.py
#coding: utf-8
import numpy as np
from sklearn.datasets import fetch_mldata
from sklearn.preprocessing import OneHotEncoder
mnist = fetch_mldata('MNIST original', data_home=".")
x_train = mnist['data'][:60000]
t_train = mnist['target'][:60000]
t_train = t_train.reshape(1, -1).transpose()
# encode label
encoder = OneHotEncoder(n_values=max(t_train)+1)
t_train = encoder.fit_transform(t_train).toarray()
x_test = mnist['data'][60000:]
t_test = mnist['target'][60000:]
t_test = t_test.reshape(1, -1).transpose()
# encode label
encoder = OneHotEncoder(n_values=max(t_test)+1)
t_test = encoder.fit_transform(t_test).toarray()
x_train = x_train.astype(np.float64)
x_train /= x_train.max()
x_test = x_test.astype(np.float64)
x_test /= x_test.max()
train_loss_list = []
train_acc_list = []
test_acc_list = []
#hyper parameter
iters_num = 2
train_size = x_train.shape[0]
batch_size = 100
learning_rate = 0.1
#Number of iterations per epoch
#iter_per_epoch = max(train_size / batch_size, 1)
iter_per_epoch = 50
network = TwoLayerNet(input_size=784,hidden_size=50,output_size=10)
iters_num = 500
for i in range(iters_num):
batch_mask = np.random.choice(train_size,batch_size)
x_batch = x_train[batch_mask]
t_batch = t_train[batch_mask]
grad = network.gradient(x_batch,t_batch)
for key in ('W1','W2','b1','b2'):
network.params[key] -= learning_rate * grad[key]
loss = network.loss(x_batch,t_batch)
train_loss_list.append(loss)
#Calculate recognition accuracy for each epoch
if i % iter_per_epoch == 0:
train_acc = network.accuracy(x_train, t_train)
test_acc = network.accuracy(x_test, t_test)
train_acc_list.append(train_acc)
test_acc_list.append(test_acc)
print("train acc, test acc | " + str(train_acc) + ", " + str(test_acc))
The result is the graph below, the vertical axis is the accuracy and the horizontal axis is the number of epochs.
The accuracy improves with each epoch.
Recommended Posts