2023年11月29日发(作者:)

4.2神经⽹络算法代码实现

参考前⼀篇⽂章“” , ⽤Python实现了⼀个版本,不过这个版本有⼀些坑,懒得去调了,

以后会出⼀个新的版本,这个版本就这样了吧

python代码

import numpy as np

# 双曲正切函数

def tanh(x):

return (x)

# 双曲正切函数导数

def tanh_derivative(x):

return 1 - (x) * (x)

# sigmoid函数

if isinstance(data, list) and len(data) > 0:

if isinstance(data[0], list) == False or len(data[0]) == 0:

raise RuntimeError("参数错误,请传⼀个不为空的⼆维数组")

else:

raise RuntimeError("参数错误,请传⼀个不为空的⼆维数组")

# 校验⼀维数组

def valid_one_array(self, data):

if isinstance(data, list) == False or len(data) == 0:

raise RuntimeError("参数错误,请传⼊⼀个不为空的⼀维数组")

# 初始化权重

def init_weights(self, layers):

s = []

for i in range(1, len(layers)):

(((layers[i - 1], layers[i])))

# 初始化偏向

def init_bias(self, layers):

= []

for i in range(1, len(layers)):

info = "nn weights: " + str(s)

+ "nn bais: " + str()

+ "nn nodes: " + str()

+ "nn errors: " + str()

+ "nn loss: " + str()

return info

# 输出层错误计算

# out:经过激励函数计算后的结果

# predict:原始预测的结果

def calculate_out_layer_error(self, out, predict):

return out * (1 - out) * (predict - out)

# 隐藏层错误计算

# out:经过激励函数计算后的结果

# errors:下⼀层所有节点的损失合计

current_node = current_nodes[i]

predict = [i]

error_value = ate_out_layer_error(current_node, predict)

[counter][i] = error_value

loss += pow(predict - current_node, 2)

= loss

else: # 隐藏层损失计算

next_errors = [counter + 1]

for i in range(len(current_nodes)):

current_node = current_nodes[i]

验证写的代码

from NeraulNetwork import NeuralNetwork