Ai
1 Star 0 Fork 0

Carl-Xie/NN-Toys

加入 Gitee
与超过 1200万 开发者一起发现、参与优秀开源项目,私有仓库也完全免费 :)
免费加入
文件
该仓库未声明开源许可证文件(LICENSE),使用请关注具体项目描述及其代码上游依赖。
克隆/下载
FeedforwardNetwork.py 3.58 KB
一键复制 编辑 原始数据 按行查看 历史
Carl-Xie 提交于 2018-03-18 20:02 +08:00 . 迁移项目
__author__ = 'carlxie'
from layer.util import *
from layer.Layer import Layer
from layer.LayerType import LayerType
from layer.FullConLayer import FullConLayer
class Network(object):
def __init__(self,neurons_per_layer,eta):
self.neurons_per_layer = neurons_per_layer
self.eta = eta
inputLayer = Layer(None,LayerType.INPUT_TYPE,neurons_per_layer[0],1,1)
self.layers = [inputLayer]
preLayer = inputLayer
for i in range(1,len(neurons_per_layer),1):
layer = FullConLayer(preLayer,neurons_per_layer[i])
self.layers.append(layer)
preLayer = layer
def feed_forward(self,x):
inputLayer = self.layers[0]
inputLayer.output[:,0,0] = x
for layer in self.layers:
layer.forward_prop()
def back_forward(self,x,y):
outputLayer = self.layers[-1]
outputLayer.delta = self.get_last_delta(x,y)
for i in range(len(self.layers)-1,0,-1):
self.layers[i].back_prop()
def get_last_delta(self,x,y):
self.feed_forward(x)
outputLayer = self.layers[-1]
return -(vec_output(y,self.neurons_per_layer[-1]) - outputLayer.output[:,:,0])* 2 * vec_der_tanh(outputLayer.score)
def train(self,training_data,T):
for t in range(T):
data = training_data[int(np.random.random()*len(training_data))]
self.back_forward(data[:-1],data[-1])
for layer in self.layers:
layer.update_weights(self.eta)
def cost(self,x,y):
self.feed_forward(x)
outputLayer = self.layers[-1]
loss = sum((outputLayer.output[:,:,0] - vec_output(y,self.neurons_per_layer[-1])) ** 2)
return loss
def get_analytic_grads(self,x,y):
self.back_forward(x,y)
grads = []
for i in range(1,len(self.layers),1):
grads.append(self.layers[i].w_grads[0])
for i in range(1,len(self.layers),1):
grads.append(self.layers[i].b_grads[0])
return flat_list(grads)
def set_weights_biases(self,w,b):
for i in range(1,len(self.layers),1):
self.layers[i].biases = b[i-1]
self.layers[i].weights = w[i-1]
def cal_loss(self,x,y,w,b):
self.set_weights_biases(w,b)
return self.cost(x,y)
def predict(self,x):
self.feed_forward(x)
return self.layers[-1].output
def real_output(self,x):
if x > 0:
return 1
else:
return -1
def evaluate(self, X, Y):
error = 0.0
for i in range(len(X)):
if self.real_output(self.predict(X[i])) != Y[i]:
error += 1
print self.predict(X[i])
return error / len(X)
def grad_check():
train_data = np.loadtxt("t_train.dat")
nn = Network([2,3,4,1],0.01)
shapes = get_shapes(nn)
ws = get_weights(nn)
bs = get_biases(nn)
weights = np.append(ws,bs)
x = train_data[0][:-1]
y = train_data[0][-1]
num_grads = compute_num_grads(nn,x,y,shapes,weights)
w11,b11 = reconstruct(weights,shapes)
nn.set_weights_biases(w11,b11)
analytic_grads = nn.get_analytic_grads(x,y)
print num_grads
print analytic_grads
diff = num_grads - analytic_grads
num = abs(sum(diff))
den = abs(sum(num_grads)) + abs(sum(analytic_grads))
print num / den
def real_test():
train_data = np.loadtxt("t_train.dat")
nn = Network([2,3,4,1],0.01)
nn.train(train_data,100000)
test_data = np.loadtxt("t_test.dat")
print nn.evaluate(test_data[:,:-1],test_data[:,-1])
if __name__ == "__main__":
real_test()
#grad_check()
Loading...
马建仓 AI 助手
尝试更多
代码解读
代码找茬
代码优化
Python
1
https://gitee.com/Carl-Xie/NN-Toys.git
git@gitee.com:Carl-Xie/NN-Toys.git
Carl-Xie
NN-Toys
NN-Toys
master

搜索帮助