~rmyeid/+junk/research

« back to all changes in this revision

Viewing changes to neural.py

  • Committer: Rami
  • Date: 2009-12-22 16:00:56 UTC
  • Revision ID: eid@eid-desktop-20091222160056-n96svrnma2igivkg
refactoring the class names and capitalize them

Show diffs side-by-side

added added

removed removed

Lines of Context:
11
11
momentum=0.1
12
12
def random_shifted((l,h)):
13
13
    return random.random()*(h-l)+l
14
 
class neuron:
 
14
class Neuron:
15
15
    def __init__(self,num_inputs,weights=[],bias=0,weight_range=(0,1)):
16
16
        self.bias=bias
17
17
        self.error=0.0
18
18
        self.weighted_sum=0.0
19
19
        self.derivative=0.0
20
20
        if num_inputs==0:
21
 
            print "class neuron: Neuron not initialized"
 
21
            print "class Neuron: Neuron not initialized"
22
22
            return
23
23
        if weights!=[]:
24
24
            self.weights=copy.deepcopy(weights)
44
44
    def error_propagation(self,errors,out_weights):
45
45
        self.error=sum([err*ow for (err,ow) in zip(errors,out_weights)])
46
46
    def error_propagation2(self,next_layer,position):
47
 
        #this a layer aware implementation
48
 
        #position is the position of the current neuron in the current layer
49
 
        #next_layer is supposed to be the layer infront of the current neuron
 
47
        #this a Layer aware implementation
 
48
        #position is the position of the current Neuron in the current Layer
 
49
        #next_layer is supposed to be the Layer infront of the current Neuron
50
50
        self.error=sum([neuron.weights[position]*neuron.error for neuron in next_layer.neurons])
51
51
    def update_weights(self,inputs):
52
52
        global learning,momentum
54
54
        self.weights=[old_weight+learning*change+momentum*old_change for (old_weight,change,old_change) in zip(self.weights,changes,self.changes)]
55
55
        self.changes=changes
56
56
 
57
 
class layer:
 
57
class Layer:
58
58
    def __init__(self,num_neurons,num_weights,weight_range=(0,1),bias_range=(0,0)):
59
59
        if num_neurons<=0 or num_weights<-1:
60
 
            print "class layer: Wrong Initialization"
 
60
            print "class Layer: Wrong Initialization"
61
61
            return
62
62
        self.neurons=[]
63
63
        if num_weights==-1:
64
 
            #num_weights==-1 -> input layer
 
64
            #num_weights==-1 -> input Layer
65
65
            for i in range(num_neurons):
66
 
                self.neurons.append(neuron(num_weights,[1],0))
 
66
                self.neurons.append(Neuron(num_weights,[1],0))
67
67
        else:
68
68
            for i in range(num_neurons):
69
69
                bias_rand=random_shifted(weight_range)
70
 
                self.neurons.append(neuron(num_weights,[],bias_rand,weight_range))
 
70
                self.neurons.append(Neuron(num_weights,[],bias_rand,weight_range))
71
71
        self.inputs=[0.0]*len(self.neurons)
72
72
        self.__outputs__=[0.0]*len(self.neurons)
73
73
    def __str__(self):
74
 
        return reduce(lambda x,y: x+"\t"+y,[neuron.__str__() for neuron in self.neurons])
 
74
        return reduce(lambda x,y: x+"\t"+y,[Neuron.__str__() for Neuron in self.neurons])
75
75
    def calculate(self,inputs):
76
76
            self.inputs=copy.deepcopy(inputs)
77
 
            self.__outputs__=[neuron.calculate(self.inputs) for neuron in self.neurons]
 
77
            self.__outputs__=[Neuron.calculate(self.inputs) for Neuron in self.neurons]
78
78
            return self.__outputs__
79
79
    def error_propagate(self,next_layer):
80
80
        for i in range(len(self.neurons)):
81
81
            self.neurons[i].error_propagation2(next_layer,i)
82
82
    def update_weights(self):
83
 
        for neuron in self.neurons:
84
 
            neuron.update_weights(self.inputs)
 
83
        for Neuron in self.neurons:
 
84
            Neuron.update_weights(self.inputs)
85
85
 
86
 
class network:
 
86
class Network:
87
87
    def __init__(self,array=[],weight_ranges=[(0,1)],bias_range=(0,0)):
88
 
    #array: is a list of the number of the nodes in each layer
 
88
    #array: is a list of the number of the nodes in each Layer
89
89
        if len(array)==0:
90
90
            print "class nNetwork: Network not initialized"
91
91
            return
92
92
        self.layers=[]
93
 
        self.layers.append(layer(array[0],-1,weight_ranges[0],bias_range))
 
93
        self.layers.append(Layer(array[0],-1,weight_ranges[0],bias_range))
94
94
        if  len(weight_ranges)==1:
95
95
            weight_ranges=weight_ranges*len(array)
96
96
        else:
97
97
            if len(weight_ranges)<len(array)-1:
98
98
                    print "class nNetwork: not enough weight_ranges"
99
99
        for i in range(1,len(array)):
100
 
            self.layers.append(layer(array[i],array[i-1],weight_ranges[i],bias_range))
 
100
            self.layers.append(Layer(array[i],array[i-1],weight_ranges[i],bias_range))
101
101
    def __str__(self):
102
102
        temp=""
103
 
        return reduce(lambda x,y: x+"\n"+y,[layer.__str__() for layer in self.layers])
 
103
        return reduce(lambda x,y: x+"\n"+y,[Layer.__str__() for Layer in self.layers])
104
104
    def neurons(self):
105
105
        temp=[]
106
106
        for i in self.layers:
108
108
        return temp
109
109
    def calculate(self,inputs):
110
110
        output=inputs
111
 
        for layer in self.layers[1:]:
112
 
            output=layer.calculate(output)
 
111
        for Layer in self.layers[1:]:
 
112
            output=Layer.calculate(output)
113
113
        return output
114
114
    def error_propagate(self,references):
115
115
        for i in range(len(self.layers[-1].neurons)):
116
116
            self.layers[-1].neurons[i].error=-1*self.layers[-1].__outputs__[i]+references[i]
117
117
        for i  in range(len(self.layers)-2,0,-1):
118
118
            self.layers[i].error_propagate(self.layers[i+1])
119
 
        return sum([neuron.error for neuron in self.layers[-1].neurons])**2
 
119
        return sum([Neuron.error for Neuron in self.layers[-1].neurons])**2
120
120
    def update_weights(self):
121
 
        for layer in self.layers[1:]:
122
 
            layer.update_weights()
 
121
        for Layer in self.layers[1:]:
 
122
            Layer.update_weights()
123
123
    def back_propagation(self,references):
124
124
        error=self.error_propagate(references)
125
125
        self.update_weights()
133
133
        return sum
134
134
    def toChromosome(self):
135
135
        genes=[]
136
 
        for layer in self.layers[1:]:
137
 
            for neuron in layer.neurons:
138
 
                genes.extend(neuron.weights)
 
136
        for Layer in self.layers[1:]:
 
137
            for Neuron in Layer.neurons:
 
138
                genes.extend(Neuron.weights)
139
139
        size=len(genes)
140
140
        if size <2:
141
 
            print "Error converting the network to chromosome"
 
141
            print "Error converting the Network to chromosome"
142
142
            return
143
 
        return genetic.realChromosome(size,genetic.alwaysGood,genetic.alwaysOK,(-1,1),genes)
 
143
        return genetic.RealChromosome(size,genetic.alwaysGood,genetic.alwaysOK,(-1,1),genes)
144
144
        
145
145
 
146
146
 
147
147
def xor():
148
 
    foo=network([2,3,1],[(0,1),(-0.2,0.2),(-2,2)],(1,1))
 
148
    foo=Network([2,3,1],[(0,1),(-0.2,0.2),(-2,2)],(1,1))
149
149
    Input=[[0,0],[0,1],[1,0],[1,1]]
150
150
    out=[[0],[1],[1],[0]]
151
151
    sum=0.0