Compare commits
1 Commits
master
...
La-branche
Author | SHA1 | Date |
---|---|---|
Absobel | 1276934c54 |
|
@ -1,3 +0,0 @@
|
|||
__pycache__/
|
||||
set/
|
||||
|
14
RUN.py
14
RUN.py
|
@ -3,19 +3,19 @@
|
|||
import mnist_loader
|
||||
training_data, validation_data, test_data = mnist_loader.load_data_wrapper()
|
||||
|
||||
#print(list(training_data)[0][1])
|
||||
|
||||
import network
|
||||
import dataset_loader
|
||||
|
||||
net = network.Network([784, 30, 10]) #Testé : 94,56% / 94,87%
|
||||
net.SGD(training_data, 30, 10, 3.0, test_data=test_data)
|
||||
print("Results : {} / 10000".format(net.evaluate(test_data)))
|
||||
|
||||
net = network.Network([262144,50, 20, 30, 10]) #Testé : 94,56%
|
||||
net.SGD(dataset_loader.loadTrainingSet("training"), 30, 10, 3.0, test_data=dataset_loader.loadTestSet("test"))
|
||||
|
||||
#net = network.Network([784, 100, 10]) #Marche mieux apparemment
|
||||
#net.SGD(dataset_loader.loadTrainingSet("setcomplete"), 30, 10, 3.0, test_data=dataset_loader.loadTestSet("setcomplete"))
|
||||
# net = network.Network([784, 100, 10]) #Marche mieux apparemment
|
||||
# net.SGD(training_data, 30, 10, 3.0, test_data=test_data)
|
||||
|
||||
# net = network.Network([784, 100, 10]) #Marche pas bien apparemment
|
||||
# net.SGD(training_data, 30, 10, 0.001, test_data=test_data)
|
||||
|
||||
# net = network.Network([784, 30, 10]) #Marche pas du tout apparemment
|
||||
# net.SGD(training_data, 30, 10, 100.0, test_data=test_data)
|
||||
|
||||
|
|
Binary file not shown.
Binary file not shown.
|
@ -1,89 +0,0 @@
|
|||
from mnist_loader import load_data
|
||||
import numpy as np
|
||||
import os
|
||||
from PIL import Image
|
||||
import resource
|
||||
|
||||
|
||||
def vectorized_result(j):
|
||||
"""Return a 10-dimensional unit vector with a 1.0 in the jth
|
||||
position and zeroes elsewhere. This is used to convert a digit
|
||||
(0...9) into a corresponding desired output from the neural
|
||||
network."""
|
||||
e = np.zeros((10, 1))
|
||||
e[j] = 1.0
|
||||
return e
|
||||
|
||||
def loadSet(path):
|
||||
|
||||
filelist = []
|
||||
|
||||
for root, dirs, files in os.walk(path):
|
||||
for file in files:
|
||||
filelist.append(os.path.join(root,file))
|
||||
|
||||
i = 0
|
||||
pixels = []
|
||||
result = []
|
||||
|
||||
|
||||
for name in filelist:
|
||||
|
||||
if i >= 500:
|
||||
|
||||
break
|
||||
|
||||
if ".png" in name:
|
||||
|
||||
with Image.open(path + "/" + name.split("/")[-1]) as im:
|
||||
|
||||
pix = im.load()
|
||||
temparray = []
|
||||
|
||||
result.append(int(name.split("/")[-1][0]))
|
||||
|
||||
for x in range(im.size[0]):
|
||||
|
||||
for y in range(im.size[1]):
|
||||
|
||||
temparray.append(pix[x, y] / 255)
|
||||
|
||||
pixels.append(temparray)
|
||||
print(str("%.2f" % round(i / (len(filelist) if len(filelist) < 500 else 500) * 100, 2)) + "% Done, ram usage: " + str("%.2f" % round(resource.getrusage(resource.RUSAGE_SELF).ru_maxrss / (1024*1024), 2)) + "Go", end = '\r')
|
||||
i += 1
|
||||
|
||||
print("max ram usage: " + str(resource.getrusage(resource.RUSAGE_SELF).ru_maxrss / (1024*1024)) + "Go")
|
||||
|
||||
return (pixels, result)
|
||||
|
||||
|
||||
def loadTrainingSet(path):
|
||||
|
||||
print("importing training set...")
|
||||
|
||||
set = loadSet(path)
|
||||
|
||||
training_inputs = [np.reshape(x, (784, 1)) for x in set[0]]
|
||||
training_results = [vectorized_result(int(y)) for y in set[1]]
|
||||
training_data = zip(training_inputs, training_results)
|
||||
|
||||
return training_data
|
||||
|
||||
def loadTestSet(path):
|
||||
|
||||
print("importing test set...")
|
||||
|
||||
set = loadSet(path)
|
||||
|
||||
test_inputs = [np.reshape(x, (784, 1)) for x in set[0]]
|
||||
test_data = zip(test_inputs, set[1])
|
||||
|
||||
return test_data
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
|
||||
print(loadSet("set")[0])
|
Binary file not shown.
|
@ -66,8 +66,6 @@ def load_data_wrapper():
|
|||
validation_data = zip(validation_inputs, va_d[1])
|
||||
test_inputs = [np.reshape(x, (784, 1)) for x in te_d[0]]
|
||||
test_data = zip(test_inputs, te_d[1])
|
||||
print(te_d[0])
|
||||
print("1:", te_d[1])
|
||||
return (training_data, validation_data, test_data)
|
||||
|
||||
def vectorized_result(j):
|
||||
|
|
|
@ -0,0 +1,106 @@
|
|||
import random
|
||||
import numpy as np
|
||||
|
||||
class Network(object):
|
||||
|
||||
def __init__(self, sizes):
|
||||
"""sizes : [nb neurones input, nb de neurones couche 1, ..., nb de neurones couche n, nb de neurones output]
|
||||
biases : seuils générés aléatoirement
|
||||
weights : poids générés aléatoirement"""
|
||||
self.num_layers = len(sizes)
|
||||
self.sizes = sizes
|
||||
self.biases = [np.random.randn(y, 1) for y in sizes[1:]]
|
||||
self.weights = [np.random.randn(y, x)
|
||||
for x, y in zip(sizes[:-1], sizes[1:])]
|
||||
|
||||
def feedforward(self, a):
|
||||
for b, w in zip(self.biases, self.weights):
|
||||
a = sigmoid(np.dot(w, a)+b)
|
||||
return a
|
||||
|
||||
def SGD(self, training_data, epochs, mini_batch_size, eta,
|
||||
test_data=None):
|
||||
"""epochs : iterations
|
||||
eta : taux d'apprentissage
|
||||
test_data : s'il y en a pas le programme ne s'arrêtera pas à chaque iterations pour se tester"""
|
||||
|
||||
training_data = list(training_data)
|
||||
n = len(training_data)
|
||||
|
||||
if test_data:
|
||||
test_data = list(test_data)
|
||||
n_test = len(test_data)
|
||||
|
||||
for j in range(epochs):
|
||||
random.shuffle(training_data)
|
||||
mini_batches = [
|
||||
training_data[k:k+mini_batch_size]
|
||||
for k in range(0, n, mini_batch_size)]
|
||||
for mini_batch in mini_batches:
|
||||
self.update_mini_batch(mini_batch, eta)
|
||||
if test_data:
|
||||
print("Epoch {} : {} / {}".format(j,self.evaluate(test_data),n_test))
|
||||
else:
|
||||
print("Epoch {} complete".format(j))
|
||||
|
||||
def update_mini_batch(self, mini_batch, eta):
|
||||
"""Met à jour les poids et seuils grâce aux gradient descendant"""
|
||||
nabla_b = [np.zeros(b.shape) for b in self.biases]
|
||||
nabla_w = [np.zeros(w.shape) for w in self.weights]
|
||||
for x, y in mini_batch:
|
||||
delta_nabla_b, delta_nabla_w = self.backprop(x, y)
|
||||
nabla_b = [nb+dnb for nb, dnb in zip(nabla_b, delta_nabla_b)]
|
||||
nabla_w = [nw+dnw for nw, dnw in zip(nabla_w, delta_nabla_w)]
|
||||
self.weights = [w-(eta/len(mini_batch))*nw
|
||||
for w, nw in zip(self.weights, nabla_w)]
|
||||
self.biases = [b-(eta/len(mini_batch))*nb
|
||||
for b, nb in zip(self.biases, nabla_b)]
|
||||
|
||||
def backprop(self, x, y):
|
||||
"""Calcul du gradient descendant"""
|
||||
nabla_b = [np.zeros(b.shape) for b in self.biases]
|
||||
nabla_w = [np.zeros(w.shape) for w in self.weights]
|
||||
# feedforward
|
||||
activation = x
|
||||
activations = [x] # list to store all the activations, layer by layer
|
||||
zs = [] # list to store all the z vectors, layer by layer
|
||||
for b, w in zip(self.biases, self.weights):
|
||||
z = np.dot(w, activation)+b
|
||||
zs.append(z)
|
||||
activation = sigmoid(z)
|
||||
activations.append(activation)
|
||||
# backward pass
|
||||
delta = self.cost_derivative(activations[-1], y) * \
|
||||
sigmoid_prime(zs[-1])
|
||||
nabla_b[-1] = delta
|
||||
nabla_w[-1] = np.dot(delta, activations[-2].transpose())
|
||||
# Note that the variable l in the loop below is used a little
|
||||
# differently to the notation in Chapter 2 of the book. Here,
|
||||
# l = 1 means the last layer of neurons, l = 2 is the
|
||||
# second-last layer, and so on. It's a renumbering of the
|
||||
# scheme in the book, used here to take advantage of the fact
|
||||
# that Python can use negative indices in lists.
|
||||
for l in range(2, self.num_layers):
|
||||
z = zs[-l]
|
||||
sp = sigmoid_prime(z)
|
||||
delta = np.dot(self.weights[-l+1].transpose(), delta) * sp
|
||||
nabla_b[-l] = delta
|
||||
nabla_w[-l] = np.dot(delta, activations[-l-1].transpose())
|
||||
return (nabla_b, nabla_w)
|
||||
|
||||
def evaluate(self, test_data):
|
||||
"""Teste le programme avec le dataset fourni"""
|
||||
test_results = [(np.argmax(self.feedforward(x)), y)
|
||||
for (x, y) in test_data]
|
||||
return sum(int(x == y) for (x, y) in test_results)
|
||||
|
||||
def cost_derivative(self, output_activations, y):
|
||||
"""Return the vector of partial derivatives \partial C_x /
|
||||
\partial a for the output activations."""
|
||||
return (output_activations-y)
|
||||
|
||||
def sigmoid(z):
|
||||
return 1.0/(1.0+np.exp(-z))
|
||||
|
||||
def sigmoid_prime(z):
|
||||
return sigmoid(z)*(1-sigmoid(z))
|
|
@ -132,8 +132,6 @@ class Network(object):
|
|||
neuron in the final layer has the highest activation."""
|
||||
test_results = [(np.argmax(self.feedforward(x)), y)
|
||||
for (x, y) in test_data]
|
||||
|
||||
print(test_data[0], test_data[1])
|
||||
return sum(int(x == y) for (x, y) in test_results)
|
||||
|
||||
def cost_derivative(self, output_activations, y):
|
||||
|
@ -149,6 +147,3 @@ def sigmoid(z):
|
|||
def sigmoid_prime(z):
|
||||
"""Derivative of the sigmoid function."""
|
||||
return sigmoid(z)*(1-sigmoid(z))
|
||||
|
||||
|
||||
|
||||
|
|
Loading…
Reference in New Issue