@cleardusk
2015-11-13T08:59:54.000000Z
字数 3369
阅读 1587
GjzCVCode
# Back-Propagation Neural Networks## Written in Python. See http://www.python.org/# Placed in the public domain.# Neil Schemenauer <nas@arctrix.com>import mathimport randomimport stringrandom.seed(0)# calculate a random number where: a <= rand < bdef rand(a, b):return (b-a)*random.random() + a# Make a matrix (we could use NumPy to speed this up)def makeMatrix(I, J, fill=0.0):m = []for i in range(I):m.append([fill]*J)return m# our sigmoid function, tanh is a little nicer than the standard 1/(1+e^-x)def sigmoid(x):return math.tanh(x) # this is not sigmoid function, but it's effect is better# return 1.0/(math.exp(-x)+1.0) # this is really sigmoid function# derivative of our sigmoid function, in terms of the output (i.e. y)def dsigmoid(y):return 1.0 - y**2 # tanh# return y - y**2 # sigmoidclass NN:def __init__(self, ni, nh, no):# number of input, hidden, and output nodesself.ni = ni + 1 # +1 for bias nodeself.nh = nhself.no = no# activations for nodesself.ai = [1.0]*self.niself.ah = [1.0]*self.nhself.ao = [1.0]*self.no# create weightsself.wi = makeMatrix(self.ni, self.nh)self.wo = makeMatrix(self.nh, self.no)# set them to random vaulesfor i in range(self.ni):for j in range(self.nh):self.wi[i][j] = rand(-0.2, 0.2)for j in range(self.nh):for k in range(self.no):self.wo[j][k] = rand(-2.0, 2.0)# last change in weights for momentumself.ci = makeMatrix(self.ni, self.nh)self.co = makeMatrix(self.nh, self.no)def update(self, inputs):if len(inputs) != self.ni-1:raise ValueError('wrong number of inputs')# input activationsfor i in range(self.ni-1):#self.ai[i] = sigmoid(inputs[i])self.ai[i] = inputs[i]# hidden activationsfor j in range(self.nh):sum = 0.0for i in range(self.ni):sum = sum + self.ai[i] * self.wi[i][j]self.ah[j] = sigmoid(sum)# output activationsfor k in range(self.no):sum = 0.0for j in range(self.nh):sum = sum + self.ah[j] * self.wo[j][k]self.ao[k] = sigmoid(sum)return self.ao[:]def backPropagate(self, targets, N, M):if len(targets) != self.no:raise ValueError('wrong number of target values')# calculate error terms for outputoutput_deltas = [0.0] * self.nofor k in range(self.no):error = targets[k]-self.ao[k]output_deltas[k] = dsigmoid(self.ao[k]) * error# calculate error terms for hiddenhidden_deltas = [0.0] * self.nhfor j in range(self.nh):error = 0.0for k in range(self.no):error = error + output_deltas[k]*self.wo[j][k]hidden_deltas[j] = dsigmoid(self.ah[j]) * error# update output weightsfor j in range(self.nh):for k in range(self.no):change = output_deltas[k]*self.ah[j]# self.wo[j][k] = self.wo[j][k] + N*change + M*self.co[j][k]self.wo[j][k] = self.wo[j][k] + N*changeself.co[j][k] = change#print N*change, M*self.co[j][k]# update input weightsfor i in range(self.ni):for j in range(self.nh):change = hidden_deltas[j]*self.ai[i]# self.wi[i][j] = self.wi[i][j] + N*change + M*self.ci[i][j]self.wi[i][j] = self.wi[i][j] + N*changeself.ci[i][j] = change# calculate errorerror = 0.0for k in range(len(targets)):error = error + 0.5*(targets[k]-self.ao[k])**2return errordef test(self, patterns):for p in patterns:print(p[0], '->', self.update(p[0]))def weights(self):print('Input weights:')for i in range(self.ni):print(self.wi[i])print()print('Output weights:')for j in range(self.nh):print(self.wo[j])def train(self, patterns, iterations=1000, N=0.5, M=0.1):# N: learning rate# M: momentum factorfor i in range(iterations):error = 0.0for p in patterns:inputs = p[0]targets = p[1]self.update(inputs)error = error + self.backPropagate(targets, N, M)if i % 100 == 0:print('error %-.5f' % error)def demo():# Teach network XOR functionpat = [[[0,0], [0]],[[0,1], [1]],[[1,0], [1]],[[1,1], [0]]]# create a network with two input, two hidden, and one output nodesn = NN(2, 2, 1)# train it with some patternsn.train(pat)# test itn.test(pat)if __name__ == '__main__':demo()