Davids perceptron.pl

From Noisebridge
Revision as of 00:40, 12 March 2009 by David415 (talk | contribs)
Jump to navigation Jump to search
#!/usr/bin/env python

__author__ = "David Stainton"
__license__ = "Apache License"


import sys

class perceptron(object):

    def __init__(self, values):
        self.weight = []
        # is threshold equivalent to bias?
        # http://en.wikipedia.org/wiki/Perceptron
        self.threshold = .5
        self.learning_rate = .1
        for value in values:
            self.weight.append(value)

    def eval(self, input):
        sum = dot_product(input, self.weight)
        if sum == self.threshold:
            output = 0
        if sum < self.threshold:
            output = 0
        if sum > self.threshold:
            output = 1
        return output

    # returns a boolean indicating if the
    def train(self, input, expected):
        output = self.eval(input)
        if output != expected:
            for elk in range(0,len(input)):
                if input[elk] == 1:
                    change = (expected - output) * self.learning_rate
                    #print "%s %s" % (elk, change)
                    self.weight[elk] += change
            return False

        return True


def dot_product(a, b):
    sum = 0
    i = 0
    while i < len(a):
        sum += a[i] * b[i]
        i += 1
    return sum



def main():

    # training set :
    #  a list of expected values for each input list
    # in this case the training set will teach boolean or

    expect = [1,0,1,1]
    input = [[1,1], [0,0], [0,1], [1,0]]

    # try to expose our perceptron to the training set 10 times
    repeat = 10

    # create a perceptron object and initialize weight values...
    p = perceptron([0,0])

    # repeatedly train with the training data set
    for c in range(0,repeat):
        results = []

        for elk in range(len(expect)):
            results.append(p.train(input[elk], expect[elk]))
            print(p.weight)
            
        # if the training doesn't return an error for the entire set
        # then stop training
        if False not in results:
            break

    print(p.weight)

    print "training complete."

    # and now we use the trained perceptron
    # to evaluate the sets of data..

    for i in input:
        output = p.eval(i)
        print "%s eval = %s" % (i,output)

    print "---"

    # of course this below training set doesn't make
    # sense from a boolean logic view point;
    # nevertheless i had to try it...

    input2 = [[1.2,1.3], [0.2,0.7], [0,.5], [1,0]]

    for i in input2:
        output = p.eval(i)
        print "%s eval = %s" % (i,output)


if __name__ == "__main__":
    main()