This example uses the Jupyter Notebook and python to understand the feedforward and backpropagation methods in an ANN.
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
 
Gerardo Marx Chávez-Campos 0ff53960a8 video 1 year ago
Readme.md video 1 year ago
main.ipynb video 1 year ago

Readme.md

Exam

$$ \frac{\partial E}{\partial w_{jk}}= -e_j\cdot \sigma\left(\sum_i w_{ij} o_i\right) \left(1-\sigma\left(\sum_i w_{ij} o_i\right) \right) o_i $$

$$w_{New} = w_{old}-\alpha \frac{\partial E}{\partial w} $$

Doing step by step

def af(x):
    sigmoid = 1/(1+np.exp(-x))
    return sigmoid


import numpy as np

inN = 3
hiN= 4
outN=3
lr= 0.4
        
#weight W11 W21 W31
#   W12 W22 W32
# .....
np.random.seed(53)    
wih=np.random.rand(hiN, inN)-0.5
who=np.random.rand(outN, hiN)-0.5
print("Wih: ", wih)
print("Who: ", who)

Wih:  [[ 0.34666241  0.06116554 -0.0451246 ]
 [-0.14782509  0.08585138  0.03574974]
 [ 0.32745628 -0.2354578  -0.02162094]
 [-0.15221498 -0.36552168 -0.24002265]]
Who:  [[-0.45236532 -0.1057067  -0.12838381  0.05673292]
 [ 0.39749455 -0.33265411 -0.09279358  0.15235334]
 [ 0.06774908  0.06651886  0.0243551   0.10758002]]

Feedforward

inputList = [0.32, 0.27, 0.18]
inputs = np.array(inputList, ndmin=2).T
Xh = np.dot(wih, inputs)
print('Xh: ', Xh)

Oh = af(Xh)
print('Oh:', Oh)

# computing output 
Xo = np.dot(who, Oh)
print('Xo: ', Xo)
Oo = af(Xo)
print('Oo: ', Oo)

Xh:  [[ 0.11932424]
 [-0.0176892 ]
 [ 0.03732063]
 [-0.19060372]]
Oh: [[0.52979571]
 [0.49557782]
 [0.50932908]
 [0.45249281]]
Xo:  [[-0.33176547]
 [ 0.06741123]
 [ 0.12994239]]
Oo:  [[0.41781112]
 [0.51684643]
 [0.53243996]]

Backpropagation

inputList = [0.32, 0.27, 0.18]
targetList = [0.82, 0.25, 0.44]


inputs = np.array(inputList, ndmin=2).T
target = np.array(targetList, ndmin=2).T
        
#computting hidden layer
Xh = np.dot(wih, inputs)
Oh = af(Xh)
    
# computing output 
Xo = np.dot(who, Oh)
Oo = af(Xo)
        
# Output error
oe = target-Oo
# E propagation
hiddenE = np.dot(who.T, oe)
        
# updating weights
#who+=lr*np.dot(oe*Oo*(1-Oo), Oh.T) 
#wih+=lr*np.dot(hiddenE*Oh*(1-Oh), inputs.T) 
#print('New wih: ', wih)
#print('New who: ', who)


NewW=who-lr*np.dot(-oe*Oo*(1-Oo),Oh.T)
NewW




array([[-0.43163327, -0.08631366, -0.10845266,  0.07443995],
       [ 0.38337319, -0.34586342, -0.10636942,  0.14029244],
       [ 0.06287227,  0.06195702,  0.01966668,  0.10341479]])




newWho=who-lr*np.dot(-oe*Oo*(1-Oo), Oh.T)
newWho




array([[-0.43163327, -0.08631366, -0.10845266,  0.07443995],
       [ 0.38337319, -0.34586342, -0.10636942,  0.14029244],
       [ 0.06287227,  0.06195702,  0.01966668,  0.10341479]])


Using class

import numpy as np


class NeuralNetwork:
    # init method
    def __init__(self, inputN,hiddenN, outputN, lr):
        # creates a NN with three layers (input, hidden, output)
        # inputN - Number of input nodes
        # hiddenN - Number of hidden nodes
        self.inN=inputN
        self.hiN=hiddenN
        self.outN=outputN
        self.lr=lr
        
        #weight W11 W21 W31
            #   W12 W22 W32
            # .....
        np.random.seed(53)    
        self.wih=np.random.rand(self.hiN, self.inN)-0.5
        self.who=np.random.rand(self.outN,self.hiN)-0.5
        print("Wih: ", self.wih)
        print("Who: ", self.who)
        pass
    
    # NN computing method
    def feedforward(self, inputList):
        # computing hidden output
        inputs = np.array(inputList, ndmin=2).T
        self.Xh = np.dot(self.wih, inputs)
        print('Xh: ', self.Xh)
        self.af = lambda x:1/(1+np.exp(-x))
        self.Oh = self.af(self.Xh)
        print('Oh:', self.Oh)
        
        # computing output 
        self.Xo = np.dot(self.who, self.Oh)
        print('Xo: ', self.Xo)
        self.Oo = self.af(self.Xo)
        print('Oo: ', self.Oo)
        pass
    
    # NN trainning method    
    def backpropagation(self, inputList, targetList):
        # data
        lr = self.lr 
        inputs = np.array(inputList, ndmin=2).T
        target = np.array(targetList, ndmin=2).T
        
        #computting hidden layer
        Xh = np.dot(self.wih, inputs)
        af = lambda x:1/(1+np.exp(-x))
        Oh = af(Xh)
        
        # computing output 
        Xo = np.dot(self.who, Oh)
        Oo = af(Xo)
        
        # Output error
        oe = target-Oo
        # E propagation
        hiddenE = np.dot(self.who.T, oe)
        
        # updating weights
        self.who+=lr*np.dot(oe*Oo*(1-Oo), Oh.T) 
        self.wih+=lr*np.dot(hiddenE*Oh*(1-Oh), inputs.T) 
        return self.wih, self.who
NN = NeuralNetwork(3,4,3,0.4)

Wih:  [[ 0.34666241  0.06116554 -0.0451246 ]
 [-0.14782509  0.08585138  0.03574974]
 [ 0.32745628 -0.2354578  -0.02162094]
 [-0.15221498 -0.36552168 -0.24002265]]
Who:  [[-0.45236532 -0.1057067  -0.12838381  0.05673292]
 [ 0.39749455 -0.33265411 -0.09279358  0.15235334]
 [ 0.06774908  0.06651886  0.0243551   0.10758002]]

NN.feedforward([0.32, 0.27, 0.18])

Xh:  [[ 0.11932424]
 [-0.0176892 ]
 [ 0.03732063]
 [-0.19060372]]
Oh: [[0.52979571]
 [0.49557782]
 [0.50932908]
 [0.45249281]]
Xo:  [[-0.33176547]
 [ 0.06741123]
 [ 0.12994239]]
Oo:  [[0.41781112]
 [0.51684643]
 [0.53243996]]
NN.backpropagation([0.32, 0.27, 0.18], [0.82, 0.25, 0.44])




(array([[ 0.33727924,  0.05324849, -0.05040263],
        [-0.14654184,  0.08693412,  0.03647157],
        [ 0.32652462, -0.23624388, -0.02214499],
        [-0.15309598, -0.36626503, -0.24051822]]),
 array([[-0.43163327, -0.08631366, -0.10845266,  0.07443995],
        [ 0.38337319, -0.34586342, -0.10636942,  0.14029244],
        [ 0.06287227,  0.06195702,  0.01966668,  0.10341479]]))