import numpy as np from data_processing import process #get the data training_x, training_y, test_x, test_y = process() X=np.array(training_x) y=np.array(training_y) #sigmoid function def nonlin(x, deriv=False): if (deriv == True): return x * (1 - x) return 1 / (1 + np.exp(-x)) #print(X) #print(y) def train(X=X,y=y, descent_rate=0.01, hl1=2): np.random.seed(1) # randomly initialize our weights with mean 0 syn0 = 2 * np.random.random((len(X[1]), hl1)) - 1 syn1 = 2 * np.random.random((hl1, len(y[1]))) - 1 for j in range(5000): # Feed forward through layers 0, 1, and 2 l0 = X l1 = nonlin(np.dot(l0, syn0)) l2 = nonlin(np.dot(l1, syn1)) #print(l2) # calculate the error in from the target value l2_error = y - l2 if (j % 100) == 0: print ( "Error:" + str(np.mean(np.abs(l2_error)))) # calculate the desired change in weights - bigger if our confidence is higher, use descent rate to control convergence l2_delta = l2_error * nonlin(l2, deriv=True)*descent_rate # how much did each l1 value contribute to the l2 error (according to the weights)? l1_error = l2_delta.dot(syn1.T) # calculate the desired change in weights - bigger if our confidence is higher, use descent rate to control convergence. l1_delta = l1_error * nonlin(l1, deriv=True)*descent_rate #update the weights syn1 += l1.T.dot(l2_delta) syn0 += l0.T.dot(l1_delta) return syn0, syn1 def calculate_output(syn0, syn1, l0): l1 = nonlin(np.dot(l0, syn0)) l2 = nonlin(np.dot(l1, syn1)) return l2 #test neural network l0=test_x #print(l0) #train the neural network syn0, syn1=train(X, y) print(syn0) print(syn1) #get the output values based on the trained network l2 = calculate_output(syn0, syn1, l0) #check correctness of nn correct = np.equal(np.argmax(test_y, 1), np.argmax(l2, 1)) print(correct) accuracy = np.mean(correct.astype(float)) print(accuracy) #Print inputs and predicted outputs #for i in range(len(l0)): #print(l0[i]) #print(l2[i])