Back

Explore Courses Blog Tutorials Interview Questions
0 votes
2 views
in AI and Deep Learning by (50.2k points)

What I am supposed to do. I have a black and white image (100x100px):

alt text

I am supposed to train a backpropagation neural network with this image. The inputs are x, y coordinates of the image (from 0 to 99) and output is either 1 (white color) or 0 (black color).

Once the network has learned, I would like it to reproduce the image based on its weights and get the closest possible image to the original.

Here is my backprop implementation:

import os

import math

import Image

import random

from random import sample

#------------------------------ class definitions

class Weight:

    def __init__(self, fromNeuron, toNeuron):

        self.value = random.uniform(-0.5, 0.5)

        self.fromNeuron = fromNeuron

        self.toNeuron = toNeuron

        fromNeuron.outputWeights.append(self)

        toNeuron.inputWeights.append(self)

        self.delta = 0.0 # delta value, this will accumulate and after each training cycle used to adjust the weight value

    def calculateDelta(self, network):

        self.delta += self.fromNeuron.value * self.toNeuron.error

class Neuron:

    def __init__(self):

        self.value = 0.0        # the output

        self.idealValue = 0.0   # the ideal output

        self.error = 0.0        # error between output and ideal output

        self.inputWeights = []

        self.outputWeights = []

    def activate(self, network):

        x = 0.0;

        for weight in self.inputWeights:

            x += weight.value * weight.fromNeuron.value

        # sigmoid function

        if x < -320:

            self.value = 0

        elif x > 320:

            self.value = 1

        else:

            self.value = 1 / (1 + math.exp(-x))

class Layer:

    def __init__(self, neurons):

        self.neurons = neurons

    def activate(self, network):

        for neuron in self.neurons:

            neuron.activate(network)

class Network:

    def __init__(self, layers, learningRate):

        self.layers = layers

        self.learningRate = learningRate # the rate at which the network learns

        self.weights = []

        for hiddenNeuron in self.layers[1].neurons:

            for inputNeuron in self.layers[0].neurons:

                self.weights.append(Weight(inputNeuron, hiddenNeuron))

            for outputNeuron in self.layers[2].neurons:

                self.weights.append(Weight(hiddenNeuron, outputNeuron))

    def setInputs(self, inputs):

        self.layers[0].neurons[0].value = float(inputs[0])

        self.layers[0].neurons[1].value = float(inputs[1])

    def setExpectedOutputs(self, expectedOutputs):

        self.layers[2].neurons[0].idealValue = expectedOutputs[0]

    def calculateOutputs(self, expectedOutputs):

        self.setExpectedOutputs(expectedOutputs)

        self.layers[1].activate(self) # activation function for hidden layer

        self.layers[2].activate(self) # activation function for output layer        

    def calculateOutputErrors(self):

        for neuron in self.layers[2].neurons:

            neuron.error = (neuron.idealValue - neuron.value) * neuron.value * (1 - neuron.value)

    def calculateHiddenErrors(self):

        for neuron in self.layers[1].neurons:

            error = 0.0

            for weight in neuron.outputWeights:

                error += weight.toNeuron.error * weight.value

            neuron.error = error * neuron.value * (1 - neuron.value)

    def calculateDeltas(self):

        for weight in self.weights:

            weight.calculateDelta(self)

    def train(self, inputs, expectedOutputs):

        self.setInputs(inputs)

        self.calculateOutputs(expectedOutputs)

        self.calculateOutputErrors()

        self.calculateHiddenErrors()

        self.calculateDeltas()

    def learn(self):

        for weight in self.weights:

            weight.value += self.learningRate * weight.delta

    def calculateSingleOutput(self, inputs):

        self.setInputs(inputs)

        self.layers[1].activate(self)

        self.layers[2].activate(self)

        #return round(self.layers[2].neurons[0].value, 0)

        return self.layers[2].neurons[0].value


 

#------------------------------ initialize objects etc

inputLayer = Layer([Neuron() for n in range(2)])

hiddenLayer = Layer([Neuron() for n in range(10)])

outputLayer = Layer([Neuron() for n in range(1)])

learningRate = 0.4

network = Network([inputLayer, hiddenLayer, outputLayer], learningRate)


 

# let's get the training set

os.chdir("D:/stuff")

image = Image.open("backprop-input.gif")

pixels = image.load()

bbox = image.getbbox()

width = 5#bbox[2] # image width

height = 5#bbox[3] # image height

trainingInputs = []

trainingOutputs = []

b = w = 0

for x in range(0, width):

    for y in range(0, height):

        if (0, 0, 0, 255) == pixels[x, y]:

            color = 0

            b += 1

        elif (255, 255, 255, 255) == pixels[x, y]:

            color = 1

            w += 1

        trainingInputs.append([float(x), float(y)])

        trainingOutputs.append([float(color)])

print "\nOriginal image ... Black:"+str(b)+" White:"+str(w)+"\n"

#------------------------------ let's train

for i in range(500):

    for j in range(len(trainingOutputs)):

        network.train(trainingInputs[j], trainingOutputs[j])

        network.learn()

    for w in network.weights:

        w.delta = 0.0

#------------------------------ let's check

b = w = 0

for x in range(0, width):

    for y in range(0, height):

        out = network.calculateSingleOutput([float(x), float(y)])

        if 0.0 == round(out):

            color = (0, 0, 0, 255)

            b += 1

        elif 1.0 == round(out):

            color = (255, 255, 255, 255)

            w += 1

        pixels[x, y] = color

        #print out

print "\nAfter learning the network thinks ... Black:"+str(b)+" White:"+str(w)+"\n"

There is some issue with my implementation. The above code returns 

the Original image ... Black:21 White:4

After learning the network thinks ... Black:25 White:0

It does the same thing if I try to use a larger training set (I'm testing just 25 pixels from the image above for testing purposes). It returns that all pixels should be black after learning.

Now, if I use a manual training set like this instead:

trainingInputs = [

    [0.0,0.0],

    [1.0,0.0],

    [2.0,0.0],

    [0.0,1.0], 

    [1.0,1.0],

    [2.0,1.0],

    [0.0,2.0],

    [1.0,2.0],

    [2.0,2.0]

]

trainingOutputs = [

    [0.0],

    [1.0],

    [1.0],

    [0.0],

    [1.0],

    [0.0],

    [0.0],

    [0.0],

    [1.0]

]

#------------------------------ let's train

for i in range(500):

    for j in range(len(trainingOutputs)):

        network.train(trainingInputs[j], trainingOutputs[j])

        network.learn()

    for w in network.weights:

        w.delta = 0.0

#------------------------------ let's check

for inputs in trainingInputs:

    print network.calculateSingleOutput(inputs)

The output is for example:

0.0330125791296   # this should be 0, OK

0.953539182136    # this should be 1, OK

0.971854575477    # this should be 1, OK

0.00046146137467  # this should be 0, OK

0.896699762781    # this should be 1, OK

0.112909223162    # this should be 0, OK

0.00034058462280  # this should be 0, OK

0.0929886299643   # this should be 0, OK

0.940489647869    # this should be 1, OK

In other words, the network guessed all pixels right (both black and white). Why does it say all pixels should be black if I use actual pixels from the image instead of hardcoded training set like the above?

I tried changing the number of neurons in the hidden layers (up to 100 neurons) with no success.

1 Answer

0 votes
by (108k points)

What is happening here is you are overloading your middle layer neurons with the input set. What I mean is that your input set consists of 10,000 discrete input values that are 100 pix x 100 pix. You're trying to encode those 10,000 values into 10 neurons. This level of encoding is difficult. I am not saying that it is impossible but it is just very hard to implement. For implementing you can increase your number of middle neurons, and you'll get a reasonable effect, but of course, it'll take a long time to train. 

Browse Categories

...