summaryrefslogtreecommitdiff
path: root/2017/ev3-controller/nn_11_function.py
blob: 677db1b41e4b077c4e6277ffe51c9555fc3afea2 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
import numpy as np
from data_processing import process

#get the data
training_x, training_y, test_x, test_y = process()
X=np.array(training_x)
y=np.array(training_y)


#sigmoid function
def nonlin(x, deriv=False):
    if (deriv == True):
        return x * (1 - x)
    return 1 / (1 + np.exp(-x))

#print(X)
#print(y)

def train(X=X,y=y, descent_rate=0.01, hl1=2):
    np.random.seed(1)

    # randomly initialize our weights with mean 0
    syn0 = 2 * np.random.random((len(X[1]), hl1)) - 1
    syn1 = 2 * np.random.random((hl1, len(y[1]))) - 1

    for j in range(5000):

        # Feed forward through layers 0, 1, and 2
        l0 = X
        l1 = nonlin(np.dot(l0, syn0))
        l2 = nonlin(np.dot(l1, syn1))

        #print(l2)

        # calculate the error in from the target value
        l2_error = y - l2

        if (j % 100) == 0:
            print  ( "Error:" + str(np.mean(np.abs(l2_error))))

        # calculate the desired change in weights - bigger if our confidence is higher, use descent rate to control convergence
        l2_delta = l2_error * nonlin(l2, deriv=True)*descent_rate

        # how much did each l1 value contribute to the l2 error (according to the weights)?
        l1_error = l2_delta.dot(syn1.T)

        # calculate the desired change in weights - bigger if our confidence is higher, use descent rate to control convergence.
        l1_delta = l1_error * nonlin(l1, deriv=True)*descent_rate

        #update the weights
        syn1 += l1.T.dot(l2_delta)
        syn0 += l0.T.dot(l1_delta)


    return syn0, syn1


def calculate_output(syn0, syn1, l0):
    l1 = nonlin(np.dot(l0, syn0))
    l2 = nonlin(np.dot(l1, syn1))
    return l2


#test neural network
l0=test_x
#print(l0)

#train the neural network
syn0, syn1=train(X, y)
print(syn0)
print(syn1)

#get the output values based on the trained network
l2 = calculate_output(syn0, syn1, l0)

#check correctness of nn
correct = np.equal(np.argmax(test_y, 1), np.argmax(l2, 1))
print(correct)

accuracy = np.mean(correct.astype(float))
print(accuracy)

#Print inputs and predicted outputs
#for i in range(len(l0)):
    #print(l0[i])
    #print(l2[i])