-
Notifications
You must be signed in to change notification settings - Fork 0
/
neural_network.py
63 lines (46 loc) · 1.56 KB
/
neural_network.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
"""
This script contains the feed forward neural network
that is implemented to the algorithm to train the
snake AI agent.
"""
import numpy as np
from settings import *
class Neural_Network(object):
def __init__(self, weights=None, bias=None):
self.layer = layer
self.weights_size = weights_size
self.bias_size = bias_size
self.weights = []
self.bias = []
if weights is None:
for k in range(num_individuals):
weights_temp = []
for i in range(len(layer)-1):
weights_temp.append(np.random.choice(np.arange(-1, 1, step=0.01), size=(layer[i], layer[i+1])))
self.weights.append(weights_temp)
else:
self.weights = weights
if bias is None:
for _ in range(num_individuals):
bias_temp = []
for i in range(1, len(layer)):
bias_temp.append(np.random.choice(np.arange(-1, 1, step=0.01), size=(1, layer[i])))
self.bias.append(bias_temp)
else:
self.bias = bias
def feed_forward(self, X):
output_layer1 = self.relu(np.dot(X, self.weights[0]) + self.bias[0])
current_layer = output_layer1
if len(self.weights) > 2:
for i in range(1, len(self.weights)-1):
print(np.dot(current_layer, self.weights[i]) + self.bias[i])
output_hidden = self.relu(np.dot(current_layer, self.weights[i]) + self.bias[i])
current_layer = output_hidden
output = self.sigmoid(np.dot(current_layer, self.weights[-1]) + self.bias[-1])
return (np.argmax(output))
def relu(self, matrix):
relu = np.maximum(0, matrix)
return (relu)
def sigmoid(self, matrix):
sigmoid = 1 / (1 + np.exp(-matrix))
return (sigmoid)