在練習(xí)二中,手寫數(shù)字識(shí)別使用數(shù)值微分的方式實(shí)現(xiàn)了神經(jīng)網(wǎng)絡(luò),現(xiàn)在用誤差反向傳播法來實(shí)現(xiàn)。兩者的區(qū)別僅僅是使用不同方法求梯度。
1、2層神經(jīng)網(wǎng)絡(luò)的類
將2層神經(jīng)網(wǎng)絡(luò)實(shí)現(xiàn)為一個(gè)TwoLayerNet的類(和上次的代碼僅僅是求梯度的方式不同,不同的地方加*表示):
class TwoLayerNet:
def __init__(self, input_size, hidden_size, output_size, weight_init_std=0.01):
# 初始化權(quán)重
self.params = {}
self.params['W1'] = weight_init_std *np.random.randn(input_size, hidden_size)
self.params['b1'] = np.zeros(hidden_size)
self.params['W2'] = weight_init_std *np.random.randn(hidden_size, output_size)
self.params['b2'] = np.zeros(output_size)
* #生成層
* self.layers = OrderedDict()
* self.layers['Affine1'] = Affine(self.params['W1'], self.params['b1'])
* self.layers['Relu1'] = Relu()
* self.layers['Affine2'] = Affine(self.params['W2'], self.params['b2'])
* self.lastLayer = SoftmaxWithLoss()
* def predict(self, x):
for layer in self.layers.values():
x = layer.forward(x)
return x
# x:輸入數(shù)據(jù), t:監(jiān)督數(shù)據(jù)
def loss(self, x, t):
y = self.predict(x)
* return self.lastLayer.forward(y, t)
def accuracy(self, x, t):
y = self.predict(x)
y = np.argmax(y, axis=1)
* if t.dim != 1 : t=np.argmax(t,axis=1)
accuracy = np.sum(y == t) / float(x.shape[0])
return accuracy
# x:輸入數(shù)據(jù), t:監(jiān)督數(shù)據(jù)
def numerical_gradient(self, x, t):
loss_W = lambda W: self.loss(x, t)
grads = {}
grads['W1'] = numerical_gradient(loss_W, self.params['W1'])
grads['b1'] = numerical_gradient(loss_W, self.params['b1'])
grads['W2'] = numerical_gradient(loss_W, self.params['W2'])
grads['b2'] = numerical_gradient(loss_W, self.params['b2'])
return grads
* def gradient(self, x, t):
# forward
self.loss(x, t)
# backward
dout = 1
dout = self.lastLayer.backward(dout)
layers = list(self.layers.values())
layers.reverse()
for layer in layers:
dout = layer.backward(dout)
# 設(shè)定
grads = {}
grads['W1'], grads['b1'] = self.layers['Affine1'].dW, self.layers['Affine1'].db
grads['W2'], grads['b2'] = self.layers['Affine2'].dW, self.layers['Affine2'].db
return grads