代码拉取完成,页面将自动刷新
__author__ = 'carlxie'
from layer.util import *
from layer.Layer import Layer
from layer.LayerType import LayerType
from layer.FullConLayer import FullConLayer
class Network(object):
def __init__(self,neurons_per_layer,eta):
self.neurons_per_layer = neurons_per_layer
self.eta = eta
inputLayer = Layer(None,LayerType.INPUT_TYPE,neurons_per_layer[0],1,1)
self.layers = [inputLayer]
preLayer = inputLayer
for i in range(1,len(neurons_per_layer),1):
layer = FullConLayer(preLayer,neurons_per_layer[i])
self.layers.append(layer)
preLayer = layer
def feed_forward(self,x):
inputLayer = self.layers[0]
inputLayer.output[:,0,0] = x
for layer in self.layers:
layer.forward_prop()
def back_forward(self,x,y):
outputLayer = self.layers[-1]
outputLayer.delta = self.get_last_delta(x,y)
for i in range(len(self.layers)-1,0,-1):
self.layers[i].back_prop()
def get_last_delta(self,x,y):
self.feed_forward(x)
outputLayer = self.layers[-1]
return -(vec_output(y,self.neurons_per_layer[-1]) - outputLayer.output[:,:,0])* 2 * vec_der_tanh(outputLayer.score)
def train(self,training_data,T):
for t in range(T):
data = training_data[int(np.random.random()*len(training_data))]
self.back_forward(data[:-1],data[-1])
for layer in self.layers:
layer.update_weights(self.eta)
def cost(self,x,y):
self.feed_forward(x)
outputLayer = self.layers[-1]
loss = sum((outputLayer.output[:,:,0] - vec_output(y,self.neurons_per_layer[-1])) ** 2)
return loss
def get_analytic_grads(self,x,y):
self.back_forward(x,y)
grads = []
for i in range(1,len(self.layers),1):
grads.append(self.layers[i].w_grads[0])
for i in range(1,len(self.layers),1):
grads.append(self.layers[i].b_grads[0])
return flat_list(grads)
def set_weights_biases(self,w,b):
for i in range(1,len(self.layers),1):
self.layers[i].biases = b[i-1]
self.layers[i].weights = w[i-1]
def cal_loss(self,x,y,w,b):
self.set_weights_biases(w,b)
return self.cost(x,y)
def predict(self,x):
self.feed_forward(x)
return self.layers[-1].output
def real_output(self,x):
if x > 0:
return 1
else:
return -1
def evaluate(self, X, Y):
error = 0.0
for i in range(len(X)):
if self.real_output(self.predict(X[i])) != Y[i]:
error += 1
print self.predict(X[i])
return error / len(X)
def grad_check():
train_data = np.loadtxt("t_train.dat")
nn = Network([2,3,4,1],0.01)
shapes = get_shapes(nn)
ws = get_weights(nn)
bs = get_biases(nn)
weights = np.append(ws,bs)
x = train_data[0][:-1]
y = train_data[0][-1]
num_grads = compute_num_grads(nn,x,y,shapes,weights)
w11,b11 = reconstruct(weights,shapes)
nn.set_weights_biases(w11,b11)
analytic_grads = nn.get_analytic_grads(x,y)
print num_grads
print analytic_grads
diff = num_grads - analytic_grads
num = abs(sum(diff))
den = abs(sum(num_grads)) + abs(sum(analytic_grads))
print num / den
def real_test():
train_data = np.loadtxt("t_train.dat")
nn = Network([2,3,4,1],0.01)
nn.train(train_data,100000)
test_data = np.loadtxt("t_test.dat")
print nn.evaluate(test_data[:,:-1],test_data[:,-1])
if __name__ == "__main__":
real_test()
#grad_check()
此处可能存在不合适展示的内容,页面不予展示。您可通过相关编辑功能自查并修改。
如您确认内容无涉及 不当用语 / 纯广告导流 / 暴力 / 低俗色情 / 侵权 / 盗版 / 虚假 / 无价值内容或违法国家有关法律法规的内容,可点击提交进行申诉,我们将尽快为您处理。