I thought I would post a couple of useful bits of code from my final year of university.
The first is a feed forward perceptron written in Python. I tried to expand the examples on the internet as much as possible to help me understand how they work.
|
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 |
#!/usr/bin/python2.7 import random class Perceptron: def __init__(self, weights=None, threshold=0.5): self.weights = weights self.threshold = threshold def activation(self, input_vector): # activation of the perceptron. val = sum(value * self.weights[index] for index, value in enumerate(input_vector)) if val > self.threshold: return 1 else: return 0 # learning rate is also the alpha def train(self, training_set, learning_rate = 0.1): # set the initial weights to random numbers in the range [-0.5,0.5] or 0 self.weights = [0 for i in training_set[0][0]] while True: error_count = 0 for input_vector, desired_output in training_set: print self.weights result = self.activation(input_vector) error = desired_output - result if error != 0: error_count += 1 # weight training for index, value in enumerate(input_vector): #delta rule : learning rate * error * current value self.weights[index] += (learning_rate * error * value) if error_count == 0: break def test(self, training_set): for input_vector, desired_result in training_set: if(self.activation(input_vector) != desired_result): return False return True if __name__ == "__main__": # values for a AND gate. training_set_AND = [((0, 0), 0), ((0, 1), 0), ((1, 0), 0), ((-1, 1), 1)] training_set_OR = [((0,1), 1), ((1,0), 1), ((0,0), 0), ((1,1), 1)] # values for a NAND gate. training_set_NAND = [((1, 0, 0), 1), ((1, 1, 0), 1), ((1, 0, 1), 1), ((1, 1, 1), 0) ] p = Perceptron(None, 0.5) p.train(training_set_AND, 0.1) print p.test(training_set_AND) |

Recent Comments