import
numpy as np
class
NeuralNetwork():
def
__init__(
self
, layers):
self
.layers
=
layers
self
.weights
=
[np.random.randn(a, b)
for
a, b
in
zip
(layers[
1
:], layers[:
-
1
])]
self
.biases
=
[np.zeros((a,
1
))
for
a
in
layers[
1
:]]
def
sigmoid(
self
, z):
return
1
/
(
1
+
np.exp(
-
z))
def
forward_propagation(
self
, a):
for
w, b
in
zip
(
self
.weights,
self
.biases):
z
=
np.dot(w, a)
+
b
a
=
self
.sigmoid(z)
return
a
def
backward_propagation(
self
, x, y):
nabla_w
=
[np.zeros(w.shape)
for
w
in
self
.weights]
nabla_b
=
[np.zeros(b.shape)
for
b
in
self
.biases]
a
=
x
activations
=
[x]
zs
=
[]
for
w, b
in
zip
(
self
.weights,
self
.biases):
z
=
np.dot(w, a)
+
b
zs.append(z)
a
=
self
.sigmoid(z)
activations.append(a)
delta
=
self
.cost_derivative(activations[
-
1
], y)
*
self
.sigmoid_prime(zs[
-
1
])
nabla_b[
-
1
]
=
delta
nabla_w[
-
1
]
=
np.dot(delta, activations[
-
2
].transpose())
for
l
in
range
(
2
,
len
(
self
.layers)):
z
=
zs[
-
l]
sp
=
self
.sigmoid_prime(z)
delta
=
np.dot(
self
.weights[
-
l
+
1
].transpose(), delta)
*
sp
nabla_b[
-
l]
=
delta
nabla_w[
-
l]
=
np.dot(delta, activations[
-
l
-
1
].transpose())
return
(nabla_w, nabla_b)
def
train(
self
, x_train, y_train, epochs, learning_rate):
for
epoch
in
range
(epochs):
nabla_w
=
[np.zeros(w.shape)
for
w
in
self
.weights]
nabla_b
=
[np.zeros(b.shape)
for
b
in
self
.biases]
for
x, y
in
zip
(x_train, y_train):
delta_nabla_w, delta_nabla_b
=
self
.backward_propagation(np.array([x]).transpose(), np.array([y]).transpose())
nabla_w
=
[nw
+
dnw
for
nw, dnw
in
zip
(nabla_w, delta_nabla_w)]
nabla_b
=
[nb
+
dnb
for
nb, dnb
in
zip
(nabla_b, delta_nabla_b)]
self
.weights
=
[w
-
(learning_rate
/
len
(x_train))
*
nw
for
w, nw
in
zip
(
self
.weights, nabla_w)]
self
.biases
=
[b
-
(learning_rate
/
len
(x_train))
*
nb
for
b, nb
in
zip
(
self
.biases, nabla_b)]
def
predict(
self
, x_test):
y_predictions
=
[]
for
x
in
x_test:
y_predictions.append(
self
.forward_propagation(np.array([x]).transpose())[
0
][
0
])
return
y_predictions
def
cost_derivative(
self
, output_activations, y):
return
output_activations
-
y
def
sigmoid_prime(
self
, z):
return
self
.sigmoid(z)
*
(
1
-
self
.sigmoid(z))
发表评论:
◎欢迎参与讨论,请在这里发表您的看法、交流您的观点。