oui
This commit is contained in:
parent
2d96166ab0
commit
7531e7118e
|
@ -0,0 +1,13 @@
|
|||
## Overview
|
||||
|
||||
### neuralnetworksanddeeplearning.com integrated scripts for Python 3.5.2 and Theano with CUDA support
|
||||
|
||||
These scrips are updated ones from the **neuralnetworksanddeeplearning.com** gitHub repository in order to work with Python 3.5.2
|
||||
|
||||
The testing file (**test.py**) contains all three networks (network.py, network2.py, network3.py) from the book and it is the starting point to run (i.e. *train and evaluate*) them.
|
||||
|
||||
## Just type at shell: **python3.5 test.py**
|
||||
|
||||
In test.py there are examples of networks configurations with proper comments. I did that to relate with particular chapters from the book.
|
||||
|
||||
|
|
@ -0,0 +1,18 @@
|
|||
#RUN
|
||||
|
||||
import mnist_loader
|
||||
training_data, validation_data, test_data = mnist_loader.load_data_wrapper()
|
||||
|
||||
import network
|
||||
|
||||
# net = network.Network([784, 30, 10]) #Testé : 94,56%
|
||||
# net.SGD(training_data, 30, 10, 3.0, test_data=test_data)
|
||||
|
||||
net = network.Network([784, 100, 10]) #Marche mieux apparemment
|
||||
net.SGD(training_data, 30, 10, 3.0, test_data=test_data)
|
||||
|
||||
# net = network.Network([784, 100, 10]) #Marche pas bien apparemment
|
||||
# net.SGD(training_data, 30, 10, 0.001, test_data=test_data)
|
||||
|
||||
# net = network.Network([784, 30, 10]) #Marche pas du tout apparemment
|
||||
# net.SGD(training_data, 30, 10, 100.0, test_data=test_data)
|
|
@ -0,0 +1,60 @@
|
|||
"""expand_mnist.py
|
||||
~~~~~~~~~~~~~~~~~~
|
||||
|
||||
Take the 50,000 MNIST training images, and create an expanded set of
|
||||
250,000 images, by displacing each training image up, down, left and
|
||||
right, by one pixel. Save the resulting file to
|
||||
../data/mnist_expanded.pkl.gz.
|
||||
|
||||
Note that this program is memory intensive, and may not run on small
|
||||
systems.
|
||||
|
||||
"""
|
||||
|
||||
from __future__ import print_function
|
||||
|
||||
#### Libraries
|
||||
|
||||
# Standard library
|
||||
import cPickle
|
||||
import gzip
|
||||
import os.path
|
||||
import random
|
||||
|
||||
# Third-party libraries
|
||||
import numpy as np
|
||||
|
||||
print("Expanding the MNIST training set")
|
||||
|
||||
if os.path.exists("../data/mnist_expanded.pkl.gz"):
|
||||
print("The expanded training set already exists. Exiting.")
|
||||
else:
|
||||
f = gzip.open("../data/mnist.pkl.gz", 'rb')
|
||||
training_data, validation_data, test_data = cPickle.load(f)
|
||||
f.close()
|
||||
expanded_training_pairs = []
|
||||
j = 0 # counter
|
||||
for x, y in zip(training_data[0], training_data[1]):
|
||||
expanded_training_pairs.append((x, y))
|
||||
image = np.reshape(x, (-1, 28))
|
||||
j += 1
|
||||
if j % 1000 == 0: print("Expanding image number", j)
|
||||
# iterate over data telling us the details of how to
|
||||
# do the displacement
|
||||
for d, axis, index_position, index in [
|
||||
(1, 0, "first", 0),
|
||||
(-1, 0, "first", 27),
|
||||
(1, 1, "last", 0),
|
||||
(-1, 1, "last", 27)]:
|
||||
new_img = np.roll(image, d, axis)
|
||||
if index_position == "first":
|
||||
new_img[index, :] = np.zeros(28)
|
||||
else:
|
||||
new_img[:, index] = np.zeros(28)
|
||||
expanded_training_pairs.append((np.reshape(new_img, 784), y))
|
||||
random.shuffle(expanded_training_pairs)
|
||||
expanded_training_data = [list(d) for d in zip(*expanded_training_pairs)]
|
||||
print("Saving expanded data. This may take a few minutes.")
|
||||
f = gzip.open("../data/mnist_expanded.pkl.gz", "w")
|
||||
cPickle.dump((expanded_training_data, validation_data, test_data), f)
|
||||
f.close()
|
Binary file not shown.
|
@ -0,0 +1,64 @@
|
|||
"""
|
||||
mnist_average_darkness
|
||||
~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
A naive classifier for recognizing handwritten digits from the MNIST
|
||||
data set. The program classifies digits based on how dark they are
|
||||
--- the idea is that digits like "1" tend to be less dark than digits
|
||||
like "8", simply because the latter has a more complex shape. When
|
||||
shown an image the classifier returns whichever digit in the training
|
||||
data had the closest average darkness.
|
||||
|
||||
The program works in two steps: first it trains the classifier, and
|
||||
then it applies the classifier to the MNIST test data to see how many
|
||||
digits are correctly classified.
|
||||
|
||||
Needless to say, this isn't a very good way of recognizing handwritten
|
||||
digits! Still, it's useful to show what sort of performance we get
|
||||
from naive ideas."""
|
||||
|
||||
#### Libraries
|
||||
# Standard library
|
||||
from collections import defaultdict
|
||||
|
||||
# My libraries
|
||||
import mnist_loader
|
||||
|
||||
def main():
|
||||
training_data, validation_data, test_data = mnist_loader.load_data()
|
||||
# training phase: compute the average darknesses for each digit,
|
||||
# based on the training data
|
||||
avgs = avg_darknesses(training_data)
|
||||
# testing phase: see how many of the test images are classified
|
||||
# correctly
|
||||
num_correct = sum(int(guess_digit(image, avgs) == digit)
|
||||
for image, digit in zip(test_data[0], test_data[1]))
|
||||
print("Baseline classifier using average darkness of image.")
|
||||
print("{0} of {1} values correct.".format(num_correct, len(test_data[1])))
|
||||
|
||||
def avg_darknesses(training_data):
|
||||
""" Return a defaultdict whose keys are the digits 0 through 9.
|
||||
For each digit we compute a value which is the average darkness of
|
||||
training images containing that digit. The darkness for any
|
||||
particular image is just the sum of the darknesses for each pixel."""
|
||||
digit_counts = defaultdict(int)
|
||||
darknesses = defaultdict(float)
|
||||
for image, digit in zip(training_data[0], training_data[1]):
|
||||
digit_counts[digit] += 1
|
||||
darknesses[digit] += sum(image)
|
||||
avgs = defaultdict(float)
|
||||
for digit, n in digit_counts.items():
|
||||
avgs[digit] = darknesses[digit] / n
|
||||
return avgs
|
||||
|
||||
def guess_digit(image, avgs):
|
||||
"""Return the digit whose average darkness in the training data is
|
||||
closest to the darkness of ``image``. Note that ``avgs`` is
|
||||
assumed to be a defaultdict whose keys are 0...9, and whose values
|
||||
are the corresponding average darknesses across the training data."""
|
||||
darkness = sum(image)
|
||||
distances = {k: abs(v-darkness) for k, v in avgs.items()}
|
||||
return min(distances, key=distances.get)
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
|
@ -0,0 +1,78 @@
|
|||
# %load mnist_loader.py
|
||||
"""
|
||||
mnist_loader
|
||||
~~~~~~~~~~~~
|
||||
A library to load the MNIST image data. For details of the data
|
||||
structures that are returned, see the doc strings for ``load_data``
|
||||
and ``load_data_wrapper``. In practice, ``load_data_wrapper`` is the
|
||||
function usually called by our neural network code.
|
||||
"""
|
||||
|
||||
#### Libraries
|
||||
# Standard library
|
||||
import pickle
|
||||
import gzip
|
||||
|
||||
# Third-party libraries
|
||||
import numpy as np
|
||||
|
||||
def load_data():
|
||||
"""Return the MNIST data as a tuple containing the training data,
|
||||
the validation data, and the test data.
|
||||
The ``training_data`` is returned as a tuple with two entries.
|
||||
The first entry contains the actual training images. This is a
|
||||
numpy ndarray with 50,000 entries. Each entry is, in turn, a
|
||||
numpy ndarray with 784 values, representing the 28 * 28 = 784
|
||||
pixels in a single MNIST image.
|
||||
The second entry in the ``training_data`` tuple is a numpy ndarray
|
||||
containing 50,000 entries. Those entries are just the digit
|
||||
values (0...9) for the corresponding images contained in the first
|
||||
entry of the tuple.
|
||||
The ``validation_data`` and ``test_data`` are similar, except
|
||||
each contains only 10,000 images.
|
||||
This is a nice data format, but for use in neural networks it's
|
||||
helpful to modify the format of the ``training_data`` a little.
|
||||
That's done in the wrapper function ``load_data_wrapper()``, see
|
||||
below.
|
||||
"""
|
||||
f = gzip.open('mnist.pkl.gz', 'rb')
|
||||
training_data, validation_data, test_data = pickle.load(f, encoding="latin1")
|
||||
f.close()
|
||||
return (training_data, validation_data, test_data)
|
||||
|
||||
def load_data_wrapper():
|
||||
"""Return a tuple containing ``(training_data, validation_data,
|
||||
test_data)``. Based on ``load_data``, but the format is more
|
||||
convenient for use in our implementation of neural networks.
|
||||
In particular, ``training_data`` is a list containing 50,000
|
||||
2-tuples ``(x, y)``. ``x`` is a 784-dimensional numpy.ndarray
|
||||
containing the input image. ``y`` is a 10-dimensional
|
||||
numpy.ndarray representing the unit vector corresponding to the
|
||||
correct digit for ``x``.
|
||||
``validation_data`` and ``test_data`` are lists containing 10,000
|
||||
2-tuples ``(x, y)``. In each case, ``x`` is a 784-dimensional
|
||||
numpy.ndarry containing the input image, and ``y`` is the
|
||||
corresponding classification, i.e., the digit values (integers)
|
||||
corresponding to ``x``.
|
||||
Obviously, this means we're using slightly different formats for
|
||||
the training data and the validation / test data. These formats
|
||||
turn out to be the most convenient for use in our neural network
|
||||
code."""
|
||||
tr_d, va_d, te_d = load_data()
|
||||
training_inputs = [np.reshape(x, (784, 1)) for x in tr_d[0]]
|
||||
training_results = [vectorized_result(y) for y in tr_d[1]]
|
||||
training_data = zip(training_inputs, training_results)
|
||||
validation_inputs = [np.reshape(x, (784, 1)) for x in va_d[0]]
|
||||
validation_data = zip(validation_inputs, va_d[1])
|
||||
test_inputs = [np.reshape(x, (784, 1)) for x in te_d[0]]
|
||||
test_data = zip(test_inputs, te_d[1])
|
||||
return (training_data, validation_data, test_data)
|
||||
|
||||
def vectorized_result(j):
|
||||
"""Return a 10-dimensional unit vector with a 1.0 in the jth
|
||||
position and zeroes elsewhere. This is used to convert a digit
|
||||
(0...9) into a corresponding desired output from the neural
|
||||
network."""
|
||||
e = np.zeros((10, 1))
|
||||
e[j] = 1.0
|
||||
return e
|
|
@ -0,0 +1,28 @@
|
|||
"""
|
||||
mnist_svm
|
||||
~~~~~~~~~
|
||||
|
||||
A classifier program for recognizing handwritten digits from the MNIST
|
||||
data set, using an SVM classifier."""
|
||||
|
||||
#### Libraries
|
||||
# My libraries
|
||||
import mnist_loader
|
||||
|
||||
# Third-party libraries
|
||||
from sklearn import svm
|
||||
|
||||
def svm_baseline():
|
||||
training_data, validation_data, test_data = mnist_loader.load_data()
|
||||
# train
|
||||
clf = svm.SVC()
|
||||
clf.fit(training_data[0], training_data[1])
|
||||
# test
|
||||
predictions = [int(a) for a in clf.predict(test_data[0])]
|
||||
num_correct = sum(int(a == y) for a, y in zip(predictions, test_data[1]))
|
||||
print("Baseline classifier using an SVM.")
|
||||
print(str(num_correct) + " of " + str(len(test_data[1])) + " values correct.")
|
||||
|
||||
if __name__ == "__main__":
|
||||
svm_baseline()
|
||||
|
|
@ -0,0 +1,152 @@
|
|||
# %load network.py
|
||||
|
||||
"""
|
||||
network.py
|
||||
~~~~~~~~~~
|
||||
IT WORKS
|
||||
|
||||
A module to implement the stochastic gradient descent learning
|
||||
algorithm for a feedforward neural network. Gradients are calculated
|
||||
using backpropagation. Note that I have focused on making the code
|
||||
simple, easily readable, and easily modifiable. It is not optimized,
|
||||
and omits many desirable features.
|
||||
"""
|
||||
|
||||
#### Libraries
|
||||
# Standard library
|
||||
import random
|
||||
|
||||
# Third-party libraries
|
||||
import numpy as np
|
||||
|
||||
class Network(object):
|
||||
|
||||
def __init__(self, sizes):
|
||||
"""The list ``sizes`` contains the number of neurons in the
|
||||
respective layers of the network. For example, if the list
|
||||
was [2, 3, 1] then it would be a three-layer network, with the
|
||||
first layer containing 2 neurons, the second layer 3 neurons,
|
||||
and the third layer 1 neuron. The biases and weights for the
|
||||
network are initialized randomly, using a Gaussian
|
||||
distribution with mean 0, and variance 1. Note that the first
|
||||
layer is assumed to be an input layer, and by convention we
|
||||
won't set any biases for those neurons, since biases are only
|
||||
ever used in computing the outputs from later layers."""
|
||||
self.num_layers = len(sizes)
|
||||
self.sizes = sizes
|
||||
self.biases = [np.random.randn(y, 1) for y in sizes[1:]]
|
||||
self.weights = [np.random.randn(y, x)
|
||||
for x, y in zip(sizes[:-1], sizes[1:])]
|
||||
|
||||
def feedforward(self, a):
|
||||
"""Return the output of the network if ``a`` is input."""
|
||||
for b, w in zip(self.biases, self.weights):
|
||||
a = sigmoid(np.dot(w, a)+b)
|
||||
return a
|
||||
|
||||
def SGD(self, training_data, epochs, mini_batch_size, eta,
|
||||
test_data=None):
|
||||
"""Train the neural network using mini-batch stochastic
|
||||
gradient descent. The ``training_data`` is a list of tuples
|
||||
``(x, y)`` representing the training inputs and the desired
|
||||
outputs. The other non-optional parameters are
|
||||
self-explanatory. If ``test_data`` is provided then the
|
||||
network will be evaluated against the test data after each
|
||||
epoch, and partial progress printed out. This is useful for
|
||||
tracking progress, but slows things down substantially."""
|
||||
|
||||
training_data = list(training_data)
|
||||
n = len(training_data)
|
||||
|
||||
if test_data:
|
||||
test_data = list(test_data)
|
||||
n_test = len(test_data)
|
||||
|
||||
for j in range(epochs):
|
||||
random.shuffle(training_data)
|
||||
mini_batches = [
|
||||
training_data[k:k+mini_batch_size]
|
||||
for k in range(0, n, mini_batch_size)]
|
||||
for mini_batch in mini_batches:
|
||||
self.update_mini_batch(mini_batch, eta)
|
||||
if test_data:
|
||||
print("Epoch {} : {} / {}".format(j,self.evaluate(test_data),n_test))
|
||||
else:
|
||||
print("Epoch {} complete".format(j))
|
||||
|
||||
def update_mini_batch(self, mini_batch, eta):
|
||||
"""Update the network's weights and biases by applying
|
||||
gradient descent using backpropagation to a single mini batch.
|
||||
The ``mini_batch`` is a list of tuples ``(x, y)``, and ``eta``
|
||||
is the learning rate."""
|
||||
nabla_b = [np.zeros(b.shape) for b in self.biases]
|
||||
nabla_w = [np.zeros(w.shape) for w in self.weights]
|
||||
for x, y in mini_batch:
|
||||
delta_nabla_b, delta_nabla_w = self.backprop(x, y)
|
||||
nabla_b = [nb+dnb for nb, dnb in zip(nabla_b, delta_nabla_b)]
|
||||
nabla_w = [nw+dnw for nw, dnw in zip(nabla_w, delta_nabla_w)]
|
||||
self.weights = [w-(eta/len(mini_batch))*nw
|
||||
for w, nw in zip(self.weights, nabla_w)]
|
||||
self.biases = [b-(eta/len(mini_batch))*nb
|
||||
for b, nb in zip(self.biases, nabla_b)]
|
||||
|
||||
def backprop(self, x, y):
|
||||
"""Return a tuple ``(nabla_b, nabla_w)`` representing the
|
||||
gradient for the cost function C_x. ``nabla_b`` and
|
||||
``nabla_w`` are layer-by-layer lists of numpy arrays, similar
|
||||
to ``self.biases`` and ``self.weights``."""
|
||||
nabla_b = [np.zeros(b.shape) for b in self.biases]
|
||||
nabla_w = [np.zeros(w.shape) for w in self.weights]
|
||||
# feedforward
|
||||
activation = x
|
||||
activations = [x] # list to store all the activations, layer by layer
|
||||
zs = [] # list to store all the z vectors, layer by layer
|
||||
for b, w in zip(self.biases, self.weights):
|
||||
z = np.dot(w, activation)+b
|
||||
zs.append(z)
|
||||
activation = sigmoid(z)
|
||||
activations.append(activation)
|
||||
# backward pass
|
||||
delta = self.cost_derivative(activations[-1], y) * \
|
||||
sigmoid_prime(zs[-1])
|
||||
nabla_b[-1] = delta
|
||||
nabla_w[-1] = np.dot(delta, activations[-2].transpose())
|
||||
# Note that the variable l in the loop below is used a little
|
||||
# differently to the notation in Chapter 2 of the book. Here,
|
||||
# l = 1 means the last layer of neurons, l = 2 is the
|
||||
# second-last layer, and so on. It's a renumbering of the
|
||||
# scheme in the book, used here to take advantage of the fact
|
||||
# that Python can use negative indices in lists.
|
||||
for l in range(2, self.num_layers):
|
||||
z = zs[-l]
|
||||
sp = sigmoid_prime(z)
|
||||
delta = np.dot(self.weights[-l+1].transpose(), delta) * sp
|
||||
nabla_b[-l] = delta
|
||||
nabla_w[-l] = np.dot(delta, activations[-l-1].transpose())
|
||||
return (nabla_b, nabla_w)
|
||||
|
||||
def evaluate(self, test_data):
|
||||
"""Return the number of test inputs for which the neural
|
||||
network outputs the correct result. Note that the neural
|
||||
network's output is assumed to be the index of whichever
|
||||
neuron in the final layer has the highest activation."""
|
||||
test_results = [(np.argmax(self.feedforward(x)), y)
|
||||
for (x, y) in test_data]
|
||||
return sum(int(x == y) for (x, y) in test_results)
|
||||
|
||||
def cost_derivative(self, output_activations, y):
|
||||
"""Return the vector of partial derivatives \partial C_x /
|
||||
\partial a for the output activations."""
|
||||
return (output_activations-y)
|
||||
|
||||
#### Miscellaneous functions
|
||||
def sigmoid(z):
|
||||
"""The sigmoid function."""
|
||||
return 1.0/(1.0+np.exp(-z))
|
||||
|
||||
def sigmoid_prime(z):
|
||||
"""Derivative of the sigmoid function."""
|
||||
return sigmoid(z)*(1-sigmoid(z))
|
||||
|
||||
|
||||
|
|
@ -0,0 +1,360 @@
|
|||
"""network2.py
|
||||
~~~~~~~~~~~~~~
|
||||
|
||||
An improved version of network.py, implementing the stochastic
|
||||
gradient descent learning algorithm for a feedforward neural network.
|
||||
Improvements include the addition of the cross-entropy cost function,
|
||||
regularization, and better initialization of network weights. Note
|
||||
that I have focused on making the code simple, easily readable, and
|
||||
easily modifiable. It is not optimized, and omits many desirable
|
||||
features.
|
||||
|
||||
"""
|
||||
|
||||
#### Libraries
|
||||
# Standard library
|
||||
import json
|
||||
import random
|
||||
import sys
|
||||
|
||||
# Third-party libraries
|
||||
import numpy as np
|
||||
|
||||
|
||||
#### Define the quadratic and cross-entropy cost functions
|
||||
|
||||
class QuadraticCost(object):
|
||||
|
||||
@staticmethod
|
||||
def fn(a, y):
|
||||
"""Return the cost associated with an output ``a`` and desired output
|
||||
``y``.
|
||||
|
||||
"""
|
||||
return 0.5*np.linalg.norm(a-y)**2
|
||||
|
||||
@staticmethod
|
||||
def delta(z, a, y):
|
||||
"""Return the error delta from the output layer."""
|
||||
return (a-y) * sigmoid_prime(z)
|
||||
|
||||
|
||||
class CrossEntropyCost(object):
|
||||
|
||||
@staticmethod
|
||||
def fn(a, y):
|
||||
"""Return the cost associated with an output ``a`` and desired output
|
||||
``y``. Note that np.nan_to_num is used to ensure numerical
|
||||
stability. In particular, if both ``a`` and ``y`` have a 1.0
|
||||
in the same slot, then the expression (1-y)*np.log(1-a)
|
||||
returns nan. The np.nan_to_num ensures that that is converted
|
||||
to the correct value (0.0).
|
||||
|
||||
"""
|
||||
return np.sum(np.nan_to_num(-y*np.log(a)-(1-y)*np.log(1-a)))
|
||||
|
||||
@staticmethod
|
||||
def delta(z, a, y):
|
||||
"""Return the error delta from the output layer. Note that the
|
||||
parameter ``z`` is not used by the method. It is included in
|
||||
the method's parameters in order to make the interface
|
||||
consistent with the delta method for other cost classes.
|
||||
|
||||
"""
|
||||
return (a-y)
|
||||
|
||||
|
||||
#### Main Network class
|
||||
class Network(object):
|
||||
|
||||
def __init__(self, sizes, cost=CrossEntropyCost):
|
||||
"""The list ``sizes`` contains the number of neurons in the respective
|
||||
layers of the network. For example, if the list was [2, 3, 1]
|
||||
then it would be a three-layer network, with the first layer
|
||||
containing 2 neurons, the second layer 3 neurons, and the
|
||||
third layer 1 neuron. The biases and weights for the network
|
||||
are initialized randomly, using
|
||||
``self.default_weight_initializer`` (see docstring for that
|
||||
method).
|
||||
|
||||
"""
|
||||
self.num_layers = len(sizes)
|
||||
self.sizes = sizes
|
||||
self.default_weight_initializer()
|
||||
self.cost=cost
|
||||
|
||||
def default_weight_initializer(self):
|
||||
"""Initialize each weight using a Gaussian distribution with mean 0
|
||||
and standard deviation 1 over the square root of the number of
|
||||
weights connecting to the same neuron. Initialize the biases
|
||||
using a Gaussian distribution with mean 0 and standard
|
||||
deviation 1.
|
||||
|
||||
Note that the first layer is assumed to be an input layer, and
|
||||
by convention we won't set any biases for those neurons, since
|
||||
biases are only ever used in computing the outputs from later
|
||||
layers.
|
||||
|
||||
"""
|
||||
self.biases = [np.random.randn(y, 1) for y in self.sizes[1:]]
|
||||
self.weights = [np.random.randn(y, x)/np.sqrt(x)
|
||||
for x, y in zip(self.sizes[:-1], self.sizes[1:])]
|
||||
|
||||
def large_weight_initializer(self):
|
||||
"""Initialize the weights using a Gaussian distribution with mean 0
|
||||
and standard deviation 1. Initialize the biases using a
|
||||
Gaussian distribution with mean 0 and standard deviation 1.
|
||||
|
||||
Note that the first layer is assumed to be an input layer, and
|
||||
by convention we won't set any biases for those neurons, since
|
||||
biases are only ever used in computing the outputs from later
|
||||
layers.
|
||||
|
||||
This weight and bias initializer uses the same approach as in
|
||||
Chapter 1, and is included for purposes of comparison. It
|
||||
will usually be better to use the default weight initializer
|
||||
instead.
|
||||
|
||||
"""
|
||||
self.biases = [np.random.randn(y, 1) for y in self.sizes[1:]]
|
||||
self.weights = [np.random.randn(y, x)
|
||||
for x, y in zip(self.sizes[:-1], self.sizes[1:])]
|
||||
|
||||
def feedforward(self, a):
|
||||
"""Return the output of the network if ``a`` is input."""
|
||||
for b, w in zip(self.biases, self.weights):
|
||||
a = sigmoid(np.dot(w, a)+b)
|
||||
return a
|
||||
|
||||
def SGD(self, training_data, epochs, mini_batch_size, eta,
|
||||
lmbda = 0.0,
|
||||
evaluation_data=None,
|
||||
monitor_evaluation_cost=False,
|
||||
monitor_evaluation_accuracy=False,
|
||||
monitor_training_cost=False,
|
||||
monitor_training_accuracy=False,
|
||||
early_stopping_n = 0):
|
||||
"""Train the neural network using mini-batch stochastic gradient
|
||||
descent. The ``training_data`` is a list of tuples ``(x, y)``
|
||||
representing the training inputs and the desired outputs. The
|
||||
other non-optional parameters are self-explanatory, as is the
|
||||
regularization parameter ``lmbda``. The method also accepts
|
||||
``evaluation_data``, usually either the validation or test
|
||||
data. We can monitor the cost and accuracy on either the
|
||||
evaluation data or the training data, by setting the
|
||||
appropriate flags. The method returns a tuple containing four
|
||||
lists: the (per-epoch) costs on the evaluation data, the
|
||||
accuracies on the evaluation data, the costs on the training
|
||||
data, and the accuracies on the training data. All values are
|
||||
evaluated at the end of each training epoch. So, for example,
|
||||
if we train for 30 epochs, then the first element of the tuple
|
||||
will be a 30-element list containing the cost on the
|
||||
evaluation data at the end of each epoch. Note that the lists
|
||||
are empty if the corresponding flag is not set.
|
||||
|
||||
"""
|
||||
|
||||
# early stopping functionality:
|
||||
best_accuracy=1
|
||||
|
||||
training_data = list(training_data)
|
||||
n = len(training_data)
|
||||
|
||||
if evaluation_data:
|
||||
evaluation_data = list(evaluation_data)
|
||||
n_data = len(evaluation_data)
|
||||
|
||||
# early stopping functionality:
|
||||
best_accuracy=0
|
||||
no_accuracy_change=0
|
||||
|
||||
evaluation_cost, evaluation_accuracy = [], []
|
||||
training_cost, training_accuracy = [], []
|
||||
for j in range(epochs):
|
||||
random.shuffle(training_data)
|
||||
mini_batches = [
|
||||
training_data[k:k+mini_batch_size]
|
||||
for k in range(0, n, mini_batch_size)]
|
||||
for mini_batch in mini_batches:
|
||||
self.update_mini_batch(
|
||||
mini_batch, eta, lmbda, len(training_data))
|
||||
|
||||
print("Epoch %s training complete" % j)
|
||||
|
||||
if monitor_training_cost:
|
||||
cost = self.total_cost(training_data, lmbda)
|
||||
training_cost.append(cost)
|
||||
print("Cost on training data: {}".format(cost))
|
||||
if monitor_training_accuracy:
|
||||
accuracy = self.accuracy(training_data, convert=True)
|
||||
training_accuracy.append(accuracy)
|
||||
print("Accuracy on training data: {} / {}".format(accuracy, n))
|
||||
if monitor_evaluation_cost:
|
||||
cost = self.total_cost(evaluation_data, lmbda, convert=True)
|
||||
evaluation_cost.append(cost)
|
||||
print("Cost on evaluation data: {}".format(cost))
|
||||
if monitor_evaluation_accuracy:
|
||||
accuracy = self.accuracy(evaluation_data)
|
||||
evaluation_accuracy.append(accuracy)
|
||||
print("Accuracy on evaluation data: {} / {}".format(self.accuracy(evaluation_data), n_data))
|
||||
|
||||
# Early stopping:
|
||||
if early_stopping_n > 0:
|
||||
if accuracy > best_accuracy:
|
||||
best_accuracy = accuracy
|
||||
no_accuracy_change = 0
|
||||
#print("Early-stopping: Best so far {}".format(best_accuracy))
|
||||
else:
|
||||
no_accuracy_change += 1
|
||||
|
||||
if (no_accuracy_change == early_stopping_n):
|
||||
#print("Early-stopping: No accuracy change in last epochs: {}".format(early_stopping_n))
|
||||
return evaluation_cost, evaluation_accuracy, training_cost, training_accuracy
|
||||
|
||||
return evaluation_cost, evaluation_accuracy, \
|
||||
training_cost, training_accuracy
|
||||
|
||||
def update_mini_batch(self, mini_batch, eta, lmbda, n):
|
||||
"""Update the network's weights and biases by applying gradient
|
||||
descent using backpropagation to a single mini batch. The
|
||||
``mini_batch`` is a list of tuples ``(x, y)``, ``eta`` is the
|
||||
learning rate, ``lmbda`` is the regularization parameter, and
|
||||
``n`` is the total size of the training data set.
|
||||
|
||||
"""
|
||||
nabla_b = [np.zeros(b.shape) for b in self.biases]
|
||||
nabla_w = [np.zeros(w.shape) for w in self.weights]
|
||||
for x, y in mini_batch:
|
||||
delta_nabla_b, delta_nabla_w = self.backprop(x, y)
|
||||
nabla_b = [nb+dnb for nb, dnb in zip(nabla_b, delta_nabla_b)]
|
||||
nabla_w = [nw+dnw for nw, dnw in zip(nabla_w, delta_nabla_w)]
|
||||
self.weights = [(1-eta*(lmbda/n))*w-(eta/len(mini_batch))*nw
|
||||
for w, nw in zip(self.weights, nabla_w)]
|
||||
self.biases = [b-(eta/len(mini_batch))*nb
|
||||
for b, nb in zip(self.biases, nabla_b)]
|
||||
|
||||
def backprop(self, x, y):
|
||||
"""Return a tuple ``(nabla_b, nabla_w)`` representing the
|
||||
gradient for the cost function C_x. ``nabla_b`` and
|
||||
``nabla_w`` are layer-by-layer lists of numpy arrays, similar
|
||||
to ``self.biases`` and ``self.weights``."""
|
||||
nabla_b = [np.zeros(b.shape) for b in self.biases]
|
||||
nabla_w = [np.zeros(w.shape) for w in self.weights]
|
||||
# feedforward
|
||||
activation = x
|
||||
activations = [x] # list to store all the activations, layer by layer
|
||||
zs = [] # list to store all the z vectors, layer by layer
|
||||
for b, w in zip(self.biases, self.weights):
|
||||
z = np.dot(w, activation)+b
|
||||
zs.append(z)
|
||||
activation = sigmoid(z)
|
||||
activations.append(activation)
|
||||
# backward pass
|
||||
delta = (self.cost).delta(zs[-1], activations[-1], y)
|
||||
nabla_b[-1] = delta
|
||||
nabla_w[-1] = np.dot(delta, activations[-2].transpose())
|
||||
# Note that the variable l in the loop below is used a little
|
||||
# differently to the notation in Chapter 2 of the book. Here,
|
||||
# l = 1 means the last layer of neurons, l = 2 is the
|
||||
# second-last layer, and so on. It's a renumbering of the
|
||||
# scheme in the book, used here to take advantage of the fact
|
||||
# that Python can use negative indices in lists.
|
||||
for l in range(2, self.num_layers):
|
||||
z = zs[-l]
|
||||
sp = sigmoid_prime(z)
|
||||
delta = np.dot(self.weights[-l+1].transpose(), delta) * sp
|
||||
nabla_b[-l] = delta
|
||||
nabla_w[-l] = np.dot(delta, activations[-l-1].transpose())
|
||||
return (nabla_b, nabla_w)
|
||||
|
||||
def accuracy(self, data, convert=False):
|
||||
"""Return the number of inputs in ``data`` for which the neural
|
||||
network outputs the correct result. The neural network's
|
||||
output is assumed to be the index of whichever neuron in the
|
||||
final layer has the highest activation.
|
||||
|
||||
The flag ``convert`` should be set to False if the data set is
|
||||
validation or test data (the usual case), and to True if the
|
||||
data set is the training data. The need for this flag arises
|
||||
due to differences in the way the results ``y`` are
|
||||
represented in the different data sets. In particular, it
|
||||
flags whether we need to convert between the different
|
||||
representations. It may seem strange to use different
|
||||
representations for the different data sets. Why not use the
|
||||
same representation for all three data sets? It's done for
|
||||
efficiency reasons -- the program usually evaluates the cost
|
||||
on the training data and the accuracy on other data sets.
|
||||
These are different types of computations, and using different
|
||||
representations speeds things up. More details on the
|
||||
representations can be found in
|
||||
mnist_loader.load_data_wrapper.
|
||||
|
||||
"""
|
||||
if convert:
|
||||
results = [(np.argmax(self.feedforward(x)), np.argmax(y))
|
||||
for (x, y) in data]
|
||||
else:
|
||||
results = [(np.argmax(self.feedforward(x)), y)
|
||||
for (x, y) in data]
|
||||
|
||||
result_accuracy = sum(int(x == y) for (x, y) in results)
|
||||
return result_accuracy
|
||||
|
||||
def total_cost(self, data, lmbda, convert=False):
|
||||
"""Return the total cost for the data set ``data``. The flag
|
||||
``convert`` should be set to False if the data set is the
|
||||
training data (the usual case), and to True if the data set is
|
||||
the validation or test data. See comments on the similar (but
|
||||
reversed) convention for the ``accuracy`` method, above.
|
||||
"""
|
||||
cost = 0.0
|
||||
for x, y in data:
|
||||
a = self.feedforward(x)
|
||||
if convert: y = vectorized_result(y)
|
||||
cost += self.cost.fn(a, y)/len(data)
|
||||
cost += 0.5*(lmbda/len(data))*sum(np.linalg.norm(w)**2 for w in self.weights) # '**' - to the power of.
|
||||
return cost
|
||||
|
||||
def save(self, filename):
|
||||
"""Save the neural network to the file ``filename``."""
|
||||
data = {"sizes": self.sizes,
|
||||
"weights": [w.tolist() for w in self.weights],
|
||||
"biases": [b.tolist() for b in self.biases],
|
||||
"cost": str(self.cost.__name__)}
|
||||
f = open(filename, "w")
|
||||
json.dump(data, f)
|
||||
f.close()
|
||||
|
||||
#### Loading a Network
|
||||
def load(filename):
|
||||
"""Load a neural network from the file ``filename``. Returns an
|
||||
instance of Network.
|
||||
|
||||
"""
|
||||
f = open(filename, "r")
|
||||
data = json.load(f)
|
||||
f.close()
|
||||
cost = getattr(sys.modules[__name__], data["cost"])
|
||||
net = Network(data["sizes"], cost=cost)
|
||||
net.weights = [np.array(w) for w in data["weights"]]
|
||||
net.biases = [np.array(b) for b in data["biases"]]
|
||||
return net
|
||||
|
||||
#### Miscellaneous functions
|
||||
def vectorized_result(j):
|
||||
"""Return a 10-dimensional unit vector with a 1.0 in the j'th position
|
||||
and zeroes elsewhere. This is used to convert a digit (0...9)
|
||||
into a corresponding desired output from the neural network.
|
||||
|
||||
"""
|
||||
e = np.zeros((10, 1))
|
||||
e[j] = 1.0
|
||||
return e
|
||||
|
||||
def sigmoid(z):
|
||||
"""The sigmoid function."""
|
||||
return 1.0/(1.0+np.exp(-z))
|
||||
|
||||
def sigmoid_prime(z):
|
||||
"""Derivative of the sigmoid function."""
|
||||
return sigmoid(z)*(1-sigmoid(z))
|
|
@ -0,0 +1,311 @@
|
|||
"""network3.py
|
||||
~~~~~~~~~~~~~~
|
||||
|
||||
A Theano-based program for training and running simple neural
|
||||
networks.
|
||||
|
||||
Supports several layer types (fully connected, convolutional, max
|
||||
pooling, softmax), and activation functions (sigmoid, tanh, and
|
||||
rectified linear units, with more easily added).
|
||||
|
||||
When run on a CPU, this program is much faster than network.py and
|
||||
network2.py. However, unlike network.py and network2.py it can also
|
||||
be run on a GPU, which makes it faster still.
|
||||
|
||||
Because the code is based on Theano, the code is different in many
|
||||
ways from network.py and network2.py. However, where possible I have
|
||||
tried to maintain consistency with the earlier programs. In
|
||||
particular, the API is similar to network2.py. Note that I have
|
||||
focused on making the code simple, easily readable, and easily
|
||||
modifiable. It is not optimized, and omits many desirable features.
|
||||
|
||||
This program incorporates ideas from the Theano documentation on
|
||||
convolutional neural nets (notably,
|
||||
http://deeplearning.net/tutorial/lenet.html ), from Misha Denil's
|
||||
implementation of dropout (https://github.com/mdenil/dropout ), and
|
||||
from Chris Olah (http://colah.github.io ).
|
||||
|
||||
"""
|
||||
|
||||
#### Libraries
|
||||
# Standard library
|
||||
import pickle
|
||||
import gzip
|
||||
|
||||
# Third-party libraries
|
||||
import numpy as np
|
||||
import theano
|
||||
import theano.tensor as T
|
||||
from theano.tensor.nnet import conv
|
||||
from theano.tensor.nnet import softmax
|
||||
from theano.tensor import shared_randomstreams
|
||||
from theano.tensor.signal.pool import pool_2d
|
||||
|
||||
# Activation functions for neurons
|
||||
def linear(z): return z
|
||||
def ReLU(z): return T.maximum(0.0, z)
|
||||
from theano.tensor.nnet import sigmoid
|
||||
from theano.tensor import tanh
|
||||
|
||||
|
||||
#### Constants
|
||||
GPU = True
|
||||
if GPU:
|
||||
print("Trying to run under a GPU. If this is not desired, then modify "+\
|
||||
"network3.py\nto set the GPU flag to False.")
|
||||
try: theano.config.device = 'gpu'
|
||||
except: pass # it's already set
|
||||
theano.config.floatX = 'float32'
|
||||
else:
|
||||
print("Running with a CPU. If this is not desired, then the modify "+\
|
||||
"network3.py to set\nthe GPU flag to True.")
|
||||
|
||||
#### Load the MNIST data
|
||||
def load_data_shared(filename="mnist.pkl.gz"):
|
||||
f = gzip.open(filename, 'rb')
|
||||
training_data, validation_data, test_data = pickle.load(f, encoding="latin1")
|
||||
f.close()
|
||||
def shared(data):
|
||||
"""Place the data into shared variables. This allows Theano to copy
|
||||
the data to the GPU, if one is available.
|
||||
|
||||
"""
|
||||
shared_x = theano.shared(
|
||||
np.asarray(data[0], dtype=theano.config.floatX), borrow=True)
|
||||
shared_y = theano.shared(
|
||||
np.asarray(data[1], dtype=theano.config.floatX), borrow=True)
|
||||
return shared_x, T.cast(shared_y, "int32")
|
||||
return [shared(training_data), shared(validation_data), shared(test_data)]
|
||||
|
||||
#### Main class used to construct and train networks
|
||||
class Network(object):
|
||||
|
||||
def __init__(self, layers, mini_batch_size):
|
||||
"""Takes a list of `layers`, describing the network architecture, and
|
||||
a value for the `mini_batch_size` to be used during training
|
||||
by stochastic gradient descent.
|
||||
|
||||
"""
|
||||
self.layers = layers
|
||||
self.mini_batch_size = mini_batch_size
|
||||
self.params = [param for layer in self.layers for param in layer.params]
|
||||
self.x = T.matrix("x")
|
||||
self.y = T.ivector("y")
|
||||
init_layer = self.layers[0]
|
||||
init_layer.set_inpt(self.x, self.x, self.mini_batch_size)
|
||||
for j in range(1, len(self.layers)): # xrange() was renamed to range() in Python 3.
|
||||
prev_layer, layer = self.layers[j-1], self.layers[j]
|
||||
layer.set_inpt(
|
||||
prev_layer.output, prev_layer.output_dropout, self.mini_batch_size)
|
||||
self.output = self.layers[-1].output
|
||||
self.output_dropout = self.layers[-1].output_dropout
|
||||
|
||||
def SGD(self, training_data, epochs, mini_batch_size, eta,
|
||||
validation_data, test_data, lmbda=0.0):
|
||||
"""Train the network using mini-batch stochastic gradient descent."""
|
||||
training_x, training_y = training_data
|
||||
validation_x, validation_y = validation_data
|
||||
test_x, test_y = test_data
|
||||
|
||||
# compute number of minibatches for training, validation and testing
|
||||
num_training_batches = int(size(training_data)/mini_batch_size)
|
||||
num_validation_batches = int(size(validation_data)/mini_batch_size)
|
||||
num_test_batches = int(size(test_data)/mini_batch_size)
|
||||
|
||||
# define the (regularized) cost function, symbolic gradients, and updates
|
||||
l2_norm_squared = sum([(layer.w**2).sum() for layer in self.layers])
|
||||
cost = self.layers[-1].cost(self)+\
|
||||
0.5*lmbda*l2_norm_squared/num_training_batches
|
||||
grads = T.grad(cost, self.params)
|
||||
updates = [(param, param-eta*grad)
|
||||
for param, grad in zip(self.params, grads)]
|
||||
|
||||
# define functions to train a mini-batch, and to compute the
|
||||
# accuracy in validation and test mini-batches.
|
||||
i = T.lscalar() # mini-batch index
|
||||
train_mb = theano.function(
|
||||
[i], cost, updates=updates,
|
||||
givens={
|
||||
self.x:
|
||||
training_x[i*self.mini_batch_size: (i+1)*self.mini_batch_size],
|
||||
self.y:
|
||||
training_y[i*self.mini_batch_size: (i+1)*self.mini_batch_size]
|
||||
})
|
||||
validate_mb_accuracy = theano.function(
|
||||
[i], self.layers[-1].accuracy(self.y),
|
||||
givens={
|
||||
self.x:
|
||||
validation_x[i*self.mini_batch_size: (i+1)*self.mini_batch_size],
|
||||
self.y:
|
||||
validation_y[i*self.mini_batch_size: (i+1)*self.mini_batch_size]
|
||||
})
|
||||
test_mb_accuracy = theano.function(
|
||||
[i], self.layers[-1].accuracy(self.y),
|
||||
givens={
|
||||
self.x:
|
||||
test_x[i*self.mini_batch_size: (i+1)*self.mini_batch_size],
|
||||
self.y:
|
||||
test_y[i*self.mini_batch_size: (i+1)*self.mini_batch_size]
|
||||
})
|
||||
self.test_mb_predictions = theano.function(
|
||||
[i], self.layers[-1].y_out,
|
||||
givens={
|
||||
self.x:
|
||||
test_x[i*self.mini_batch_size: (i+1)*self.mini_batch_size]
|
||||
})
|
||||
# Do the actual training
|
||||
best_validation_accuracy = 0.0
|
||||
for epoch in range(epochs):
|
||||
for minibatch_index in range(num_training_batches):
|
||||
iteration = num_training_batches*epoch+minibatch_index
|
||||
if iteration % 1000 == 0:
|
||||
print("Training mini-batch number {0}".format(iteration))
|
||||
cost_ij = train_mb(minibatch_index)
|
||||
if (iteration+1) % num_training_batches == 0:
|
||||
validation_accuracy = np.mean(
|
||||
[validate_mb_accuracy(j) for j in range(num_validation_batches)])
|
||||
print("Epoch {0}: validation accuracy {1:.2%}".format(
|
||||
epoch, validation_accuracy))
|
||||
if validation_accuracy >= best_validation_accuracy:
|
||||
print("This is the best validation accuracy to date.")
|
||||
best_validation_accuracy = validation_accuracy
|
||||
best_iteration = iteration
|
||||
if test_data:
|
||||
test_accuracy = np.mean(
|
||||
[test_mb_accuracy(j) for j in range(num_test_batches)])
|
||||
print('The corresponding test accuracy is {0:.2%}'.format(
|
||||
test_accuracy))
|
||||
print("Finished training network.")
|
||||
print("Best validation accuracy of {0:.2%} obtained at iteration {1}".format(
|
||||
best_validation_accuracy, best_iteration))
|
||||
print("Corresponding test accuracy of {0:.2%}".format(test_accuracy))
|
||||
|
||||
#### Define layer types
|
||||
|
||||
class ConvPoolLayer(object):
|
||||
"""Used to create a combination of a convolutional and a max-pooling
|
||||
layer. A more sophisticated implementation would separate the
|
||||
two, but for our purposes we'll always use them together, and it
|
||||
simplifies the code, so it makes sense to combine them.
|
||||
|
||||
"""
|
||||
|
||||
def __init__(self, filter_shape, image_shape, poolsize=(2, 2),
|
||||
activation_fn=sigmoid):
|
||||
"""`filter_shape` is a tuple of length 4, whose entries are the number
|
||||
of filters, the number of input feature maps, the filter height, and the
|
||||
filter width.
|
||||
|
||||
`image_shape` is a tuple of length 4, whose entries are the
|
||||
mini-batch size, the number of input feature maps, the image
|
||||
height, and the image width.
|
||||
|
||||
`poolsize` is a tuple of length 2, whose entries are the y and
|
||||
x pooling sizes.
|
||||
|
||||
"""
|
||||
self.filter_shape = filter_shape
|
||||
self.image_shape = image_shape
|
||||
self.poolsize = poolsize
|
||||
self.activation_fn=activation_fn
|
||||
# initialize weights and biases
|
||||
n_out = (filter_shape[0]*np.prod(filter_shape[2:])/np.prod(poolsize))
|
||||
self.w = theano.shared(
|
||||
np.asarray(
|
||||
np.random.normal(loc=0, scale=np.sqrt(1.0/n_out), size=filter_shape),
|
||||
dtype=theano.config.floatX),
|
||||
borrow=True)
|
||||
self.b = theano.shared(
|
||||
np.asarray(
|
||||
np.random.normal(loc=0, scale=1.0, size=(filter_shape[0],)),
|
||||
dtype=theano.config.floatX),
|
||||
borrow=True)
|
||||
self.params = [self.w, self.b]
|
||||
|
||||
def set_inpt(self, inpt, inpt_dropout, mini_batch_size):
|
||||
self.inpt = inpt.reshape(self.image_shape)
|
||||
conv_out = conv.conv2d(
|
||||
input=self.inpt, filters=self.w, filter_shape=self.filter_shape,
|
||||
image_shape=self.image_shape)
|
||||
pooled_out = pool_2d(
|
||||
input=conv_out, ws=self.poolsize, ignore_border=True)
|
||||
self.output = self.activation_fn(
|
||||
pooled_out + self.b.dimshuffle('x', 0, 'x', 'x'))
|
||||
self.output_dropout = self.output # no dropout in the convolutional layers
|
||||
|
||||
class FullyConnectedLayer(object):
|
||||
|
||||
def __init__(self, n_in, n_out, activation_fn=sigmoid, p_dropout=0.0):
|
||||
self.n_in = n_in
|
||||
self.n_out = n_out
|
||||
self.activation_fn = activation_fn
|
||||
self.p_dropout = p_dropout
|
||||
# Initialize weights and biases
|
||||
self.w = theano.shared(
|
||||
np.asarray(
|
||||
np.random.normal(
|
||||
loc=0.0, scale=np.sqrt(1.0/n_out), size=(n_in, n_out)),
|
||||
dtype=theano.config.floatX),
|
||||
name='w', borrow=True)
|
||||
self.b = theano.shared(
|
||||
np.asarray(np.random.normal(loc=0.0, scale=1.0, size=(n_out,)),
|
||||
dtype=theano.config.floatX),
|
||||
name='b', borrow=True)
|
||||
self.params = [self.w, self.b]
|
||||
|
||||
def set_inpt(self, inpt, inpt_dropout, mini_batch_size):
|
||||
self.inpt = inpt.reshape((mini_batch_size, self.n_in))
|
||||
self.output = self.activation_fn(
|
||||
(1-self.p_dropout)*T.dot(self.inpt, self.w) + self.b)
|
||||
self.y_out = T.argmax(self.output, axis=1)
|
||||
self.inpt_dropout = dropout_layer(
|
||||
inpt_dropout.reshape((mini_batch_size, self.n_in)), self.p_dropout)
|
||||
self.output_dropout = self.activation_fn(
|
||||
T.dot(self.inpt_dropout, self.w) + self.b)
|
||||
|
||||
def accuracy(self, y):
|
||||
"Return the accuracy for the mini-batch."
|
||||
return T.mean(T.eq(y, self.y_out))
|
||||
|
||||
class SoftmaxLayer(object):
|
||||
|
||||
def __init__(self, n_in, n_out, p_dropout=0.0):
|
||||
self.n_in = n_in
|
||||
self.n_out = n_out
|
||||
self.p_dropout = p_dropout
|
||||
# Initialize weights and biases
|
||||
self.w = theano.shared(
|
||||
np.zeros((n_in, n_out), dtype=theano.config.floatX),
|
||||
name='w', borrow=True)
|
||||
self.b = theano.shared(
|
||||
np.zeros((n_out,), dtype=theano.config.floatX),
|
||||
name='b', borrow=True)
|
||||
self.params = [self.w, self.b]
|
||||
|
||||
def set_inpt(self, inpt, inpt_dropout, mini_batch_size):
|
||||
self.inpt = inpt.reshape((mini_batch_size, self.n_in))
|
||||
self.output = softmax((1-self.p_dropout)*T.dot(self.inpt, self.w) + self.b)
|
||||
self.y_out = T.argmax(self.output, axis=1)
|
||||
self.inpt_dropout = dropout_layer(
|
||||
inpt_dropout.reshape((mini_batch_size, self.n_in)), self.p_dropout)
|
||||
self.output_dropout = softmax(T.dot(self.inpt_dropout, self.w) + self.b)
|
||||
|
||||
def cost(self, net):
|
||||
"Return the log-likelihood cost."
|
||||
return -T.mean(T.log(self.output_dropout)[T.arange(net.y.shape[0]), net.y])
|
||||
|
||||
def accuracy(self, y):
|
||||
"Return the accuracy for the mini-batch."
|
||||
return T.mean(T.eq(y, self.y_out))
|
||||
|
||||
|
||||
#### Miscellanea
|
||||
def size(data):
|
||||
"Return the size of the dataset `data`."
|
||||
return data[0].get_value(borrow=True).shape[0]
|
||||
|
||||
def dropout_layer(layer, p_dropout):
|
||||
srng = shared_randomstreams.RandomStreams(
|
||||
np.random.RandomState(0).randint(999999))
|
||||
mask = srng.binomial(n=1, p=1-p_dropout, size=layer.shape)
|
||||
return layer*T.cast(mask, theano.config.floatX)
|
|
@ -0,0 +1,210 @@
|
|||
"""
|
||||
Testing code for different neural network configurations.
|
||||
Adapted for Python 3.5.2
|
||||
|
||||
Usage in shell:
|
||||
python3.5 test.py
|
||||
|
||||
Network (network.py and network2.py) parameters:
|
||||
2nd param is epochs count
|
||||
3rd param is batch size
|
||||
4th param is learning rate (eta)
|
||||
|
||||
Author:
|
||||
Michał Dobrzański, 2016
|
||||
dobrzanski.michal.daniel@gmail.com
|
||||
"""
|
||||
|
||||
# ----------------------
|
||||
# - read the input data:
|
||||
'''
|
||||
import mnist_loader
|
||||
training_data, validation_data, test_data = mnist_loader.load_data_wrapper()
|
||||
training_data = list(training_data)
|
||||
'''
|
||||
# ---------------------
|
||||
# - network.py example:
|
||||
#import network
|
||||
|
||||
'''
|
||||
net = network.Network([784, 30, 10])
|
||||
net.SGD(training_data, 30, 10, 3.0, test_data=test_data)
|
||||
'''
|
||||
|
||||
# ----------------------
|
||||
# - network2.py example:
|
||||
#import network2
|
||||
|
||||
'''
|
||||
net = network2.Network([784, 30, 10], cost=network2.CrossEntropyCost)
|
||||
#net.large_weight_initializer()
|
||||
net.SGD(training_data, 30, 10, 0.1, lmbda = 5.0,evaluation_data=validation_data,
|
||||
monitor_evaluation_accuracy=True)
|
||||
'''
|
||||
|
||||
# chapter 3 - Overfitting example - too many epochs of learning applied on small (1k samples) amount od data.
|
||||
# Overfitting is treating noise as a signal.
|
||||
'''
|
||||
net = network2.Network([784, 30, 10], cost=network2.CrossEntropyCost)
|
||||
net.large_weight_initializer()
|
||||
net.SGD(training_data[:1000], 400, 10, 0.5, evaluation_data=test_data,
|
||||
monitor_evaluation_accuracy=True,
|
||||
monitor_training_cost=True)
|
||||
'''
|
||||
|
||||
# chapter 3 - Regularization (weight decay) example 1 (only 1000 of training data and 30 hidden neurons)
|
||||
'''
|
||||
net = network2.Network([784, 30, 10], cost=network2.CrossEntropyCost)
|
||||
net.large_weight_initializer()
|
||||
net.SGD(training_data[:1000], 400, 10, 0.5,
|
||||
evaluation_data=test_data,
|
||||
lmbda = 0.1, # this is a regularization parameter
|
||||
monitor_evaluation_cost=True,
|
||||
monitor_evaluation_accuracy=True,
|
||||
monitor_training_cost=True,
|
||||
monitor_training_accuracy=True)
|
||||
'''
|
||||
|
||||
# chapter 3 - Early stopping implemented
|
||||
'''
|
||||
net = network2.Network([784, 30, 10], cost=network2.CrossEntropyCost)
|
||||
net.SGD(training_data[:1000], 30, 10, 0.5,
|
||||
lmbda=5.0,
|
||||
evaluation_data=validation_data,
|
||||
monitor_evaluation_accuracy=True,
|
||||
monitor_training_cost=True,
|
||||
early_stopping_n=10)
|
||||
'''
|
||||
|
||||
# chapter 4 - The vanishing gradient problem - deep networks are hard to train with simple SGD algorithm
|
||||
# this network learns much slower than a shallow one.
|
||||
'''
|
||||
net = network2.Network([784, 30, 30, 30, 30, 10], cost=network2.CrossEntropyCost)
|
||||
net.SGD(training_data, 30, 10, 0.1,
|
||||
lmbda=5.0,
|
||||
evaluation_data=validation_data,
|
||||
monitor_evaluation_accuracy=True)
|
||||
'''
|
||||
|
||||
|
||||
# ----------------------
|
||||
# Theano and CUDA
|
||||
# ----------------------
|
||||
|
||||
"""
|
||||
This deep network uses Theano with GPU acceleration support.
|
||||
I am using Ubuntu 16.04 with CUDA 7.5.
|
||||
Tutorial:
|
||||
http://deeplearning.net/software/theano/install_ubuntu.html#install-ubuntu
|
||||
|
||||
The following command will update only Theano:
|
||||
sudo pip install --upgrade --no-deps theano
|
||||
|
||||
The following command will update Theano and Numpy/Scipy (warning bellow):
|
||||
sudo pip install --upgrade theano
|
||||
|
||||
"""
|
||||
|
||||
"""
|
||||
Below, there is a testing function to check whether your computations have been made on CPU or GPU.
|
||||
If the result is 'Used the cpu' and you want to have it in gpu, do the following:
|
||||
1) install theano:
|
||||
sudo python3.5 -m pip install Theano
|
||||
2) download and install the latest cuda:
|
||||
https://developer.nvidia.com/cuda-downloads
|
||||
I had some issues with that, so I followed this idea (better option is to download the 1,1GB package as .run file):
|
||||
http://askubuntu.com/questions/760242/how-can-i-force-16-04-to-add-a-repository-even-if-it-isnt-considered-secure-eno
|
||||
You may also want to grab the proper NVidia driver, choose it form there:
|
||||
System Settings > Software & Updates > Additional Drivers.
|
||||
3) should work, run it with:
|
||||
THEANO_FLAGS=mode=FAST_RUN,device=gpu,floatX=float32 python3.5 test.py
|
||||
http://deeplearning.net/software/theano/tutorial/using_gpu.html
|
||||
4) Optionally, you can add cuDNN support from:
|
||||
https://developer.nvidia.com/cudnn
|
||||
|
||||
|
||||
"""
|
||||
def testTheano():
|
||||
from theano import function, config, shared, sandbox
|
||||
import theano.tensor as T
|
||||
import numpy
|
||||
import time
|
||||
print("Testing Theano library...")
|
||||
vlen = 10 * 30 * 768 # 10 x #cores x # threads per core
|
||||
iters = 1000
|
||||
|
||||
rng = numpy.random.RandomState(22)
|
||||
x = shared(numpy.asarray(rng.rand(vlen), config.floatX))
|
||||
f = function([], T.exp(x))
|
||||
print(f.maker.fgraph.toposort())
|
||||
t0 = time.time()
|
||||
for i in range(iters):
|
||||
r = f()
|
||||
t1 = time.time()
|
||||
print("Looping %d times took %f seconds" % (iters, t1 - t0))
|
||||
print("Result is %s" % (r,))
|
||||
if numpy.any([isinstance(x.op, T.Elemwise) for x in f.maker.fgraph.toposort()]):
|
||||
print('Used the cpu')
|
||||
else:
|
||||
print('Used the gpu')
|
||||
# Perform check:
|
||||
#testTheano()
|
||||
|
||||
|
||||
# ----------------------
|
||||
# - network3.py example:
|
||||
import network3
|
||||
from network3 import Network, ConvPoolLayer, FullyConnectedLayer, SoftmaxLayer # softmax plus log-likelihood cost is more common in modern image classification networks.
|
||||
|
||||
# read data:
|
||||
training_data, validation_data, test_data = network3.load_data_shared()
|
||||
# mini-batch size:
|
||||
mini_batch_size = 10
|
||||
|
||||
# chapter 6 - shallow architecture using just a single hidden layer, containing 100 hidden neurons.
|
||||
'''
|
||||
net = Network([
|
||||
FullyConnectedLayer(n_in=784, n_out=100),
|
||||
SoftmaxLayer(n_in=100, n_out=10)], mini_batch_size)
|
||||
net.SGD(training_data, 60, mini_batch_size, 0.1, validation_data, test_data)
|
||||
'''
|
||||
|
||||
# chapter 6 - 5x5 local receptive fields, 20 feature maps, max-pooling layer 2x2
|
||||
'''
|
||||
net = Network([
|
||||
ConvPoolLayer(image_shape=(mini_batch_size, 1, 28, 28),
|
||||
filter_shape=(20, 1, 5, 5),
|
||||
poolsize=(2, 2)),
|
||||
FullyConnectedLayer(n_in=20*12*12, n_out=100),
|
||||
SoftmaxLayer(n_in=100, n_out=10)], mini_batch_size)
|
||||
net.SGD(training_data, 60, mini_batch_size, 0.1, validation_data, test_data)
|
||||
'''
|
||||
|
||||
# chapter 6 - inserting a second convolutional-pooling layer to the previous example => better accuracy
|
||||
'''
|
||||
net = Network([
|
||||
ConvPoolLayer(image_shape=(mini_batch_size, 1, 28, 28),
|
||||
filter_shape=(20, 1, 5, 5),
|
||||
poolsize=(2, 2)),
|
||||
ConvPoolLayer(image_shape=(mini_batch_size, 20, 12, 12),
|
||||
filter_shape=(40, 20, 5, 5),
|
||||
poolsize=(2, 2)),
|
||||
FullyConnectedLayer(n_in=40*4*4, n_out=100),
|
||||
SoftmaxLayer(n_in=100, n_out=10)], mini_batch_size)
|
||||
net.SGD(training_data, 60, mini_batch_size, 0.1, validation_data, test_data)
|
||||
'''
|
||||
|
||||
# chapter 6 - rectified linear units and some l2 regularization (lmbda=0.1) => even better accuracy
|
||||
from network3 import ReLU
|
||||
net = Network([
|
||||
ConvPoolLayer(image_shape=(mini_batch_size, 1, 28, 28),
|
||||
filter_shape=(20, 1, 5, 5),
|
||||
poolsize=(2, 2),
|
||||
activation_fn=ReLU),
|
||||
ConvPoolLayer(image_shape=(mini_batch_size, 20, 12, 12),
|
||||
filter_shape=(40, 20, 5, 5),
|
||||
poolsize=(2, 2),
|
||||
activation_fn=ReLU),
|
||||
FullyConnectedLayer(n_in=40*4*4, n_out=100, activation_fn=ReLU),
|
||||
SoftmaxLayer(n_in=100, n_out=10)], mini_batch_size)
|
||||
net.SGD(training_data, 60, mini_batch_size, 0.03, validation_data, test_data, lmbda=0.1)
|
Loading…
Reference in New Issue