Home > Mobile >  Debugging Neural Network's feedforward propagation
Debugging Neural Network's feedforward propagation

Time:09-12

I am implementing a Neural Network's forward propagation.

This is my train method:

def train(self, x, y):
    for layer in self.layers:
        prediction = layer.forward_prop(x.T)  # column vector

and this is the forward_prop method:

def forward_prop(self, x):
    if self.is_input:
        self.out = x
    else:
        # bug here: x has to be updated according to previous layer's output!
        w_sum = np.dot(self.weight, x)   self.bias
        self.out = self.activation(w_sum)
    return self.out

There is a error, since layer 2 is trying to multiply self.weight and x instead of the previous layer's output:

ValueError: shapes (4,4) and (2,1) not aligned: 4 (dim 1) != 2 (dim 0)

That is I want:

  • Layer 0 (input layer): returns z^(0) = x
  • Layer 1 (first hidden layer): returns z^(1) = sig(W^(1) @ z^(0) b^(1))
  • Layer 2 (second hidden layer): returns z^(2) = sig(W^(2) @ z^(1) b^(2))
  • Layer 3 (output): returns z^(3) = sig(W^(3) @ z^(2) b^(3))
  • ...
  • Layer i: returns z^(i) = sig(W^(i) @ z^(i-1) b^(i))
  • ...

How can I store the previous layer's output?

This is my replicable example:

import numpy as np

def sigmoid(x):
    return 1 / (1   np.exp(-x))

class NeuralNetwork:

    def __init__(self):
        self.layers = []

    def add_layer(self, layer):
        self.layers.append(layer)

    def create(self):
        for i, layer in enumerate(self.layers):
            if i == 0:
                layer.is_input = True
            else:
                layer.init_parameters(self.layers[i - 1].neurons)

    def summary(self):
        for i, layer in enumerate(self.layers):
            print("Layer", i)
            print("neurons:", layer.neurons)
            print("is_input:", layer.is_input)
            print("act:", layer.activation)
            print("weight:", np.shape(layer.weight))
            print(layer.weight)
            print("bias:", np.shape(layer.bias))
            print(layer.bias)
            print("")

    def train(self, x, y):
        for layer in self.layers:
            prediction = layer.forward_prop(x.T)  # column vector


class Layer:

    def __init__(self, neurons, is_input=False, activation=None):
        self.out = None
        self.weight = None
        self.bias = None
        self.neurons = neurons
        self.is_input = is_input
        self.activation = activation

    def init_parameters(self, prev_layer_neurons):
        self.weight = np.asmatrix(np.random.normal(0, 0.5, (self.neurons, prev_layer_neurons)))
        self.bias = np.asmatrix(np.random.normal(0, 0.5, self.neurons)).T  # column vector
        if self.activation is None:
            self.activation = sigmoid

    def forward_prop(self, x):
        if self.is_input:
            self.out = x
        else:
            w_sum = np.dot(self.weight, x)   self.bias  # Bug
            self.out = self.activation(w_sum)
        return self.out


if __name__ == '__main__':
    net = NeuralNetwork() 
    d = 2  # input dimension 
    c = 1  # output

    for m in (d, 4, 4, c):
        layer = Layer(m) 
        net.add_layer(layer)

    net.create()

    # net.summary()  # dbg

    # Training set
    X = np.asmatrix([
        [0, 0],
        [0, 1],
        [1, 0],
        [1, 1]
    ])
    y = np.asarray([0, 0, 1, 0])

    net.train(X[2], y[2])

CodePudding user response:

I simply solved this way:

def train(self, x, y):
    z = x.T
    for layer in self.layers:
        z = layer.forward_prop(z)
  • Related