'''
将测试集样本放入训练好的网络中去
'''
error_te = []
score=[]
m=len(data_te)
for it in range(m): #测试集有m条记录
net_in = np.array([data_te.iloc[it, 0], data_te.iloc[it, 1], -1]) # 网络输入
real = data_te.iloc[it, 2]
for i in range(4):
out_in[i] = sigmoid(sum(net_in * w_mid[:, i])) # 从输入到隐层的传输过程
res = sigmoid(sum(out_in * w_out)) # 模型预测值
error_te.append(abs(real-res))
if(abs(real-res)<0.1718):
score.append(1)
else:
score.append(0)
score_array = np.asfarray(score)
print("测试集进行1轮测试的正确率是:",(score_array.sum()/score_array.size)*100,'%')
plt.plot(error_te)#测试集上每一轮的误差
plt.show()
err2=np.mean(error_te)
print("测试集进行1轮测试的平均误差:",err2)
![]()
import numpy as np
import scipy.special
import pylab
import matplotlib.pyplot as plt
#%%
class NeuralNetwork():
# 初始化神经网络
def __init__(self, inputnodes, hiddennodes, outputnodes, learningrate):
# 设置输入层节点,隐藏层节点和输出层节点的数量和学习率
self.inodes = inputnodes
self.hnodes = hiddennodes
self.onodes = outputnodes
self.lr = learningrate #设置神经网络中的学习率
# 使用正态分布,进行权重矩阵的初始化
self.wih = np.random.normal(0.0, pow(self.hnodes, -0.5), (self.hnodes, self.inodes)) #(mu,sigma,矩阵)
self.who = np.random.normal(0.0, pow(self.onodes, -0.5), (self.onodes, self.hnodes))
self.activation_function = lambda x: scipy.special.expit(x) #激活函数设为Sigmod()函数
pass
# 定义训练神经网络
print("************Train start******************")
def train(self,input_list,target_list):
# 将输入、输出列表转换为二维数组
inputs = np.array(input_list, ndmin=2).T #T:转置
targets = np.array(target_list,ndmin= 2).T
hidden_inputs = np.dot(self.wih, inputs) #计算到隐藏层的信号,dot()返回的是两个数组的点积
hidden_outputs = self.activation_function(hidden_inputs) #计算隐藏层输出的信号
final_inputs = np.dot(self.who, hidden_outputs) #计算到输出层的信号
final_outputs = self.activation_function(final_inputs)
output_errors = targets - final_outputs #计算输出值与标签值的差值
#print("*****************************")
#print("output_errors:",output_errors)
hidden_errors = np.dot(self.who.T,output_errors)
#隐藏层和输出层权重更新
self.who += self.lr * np.dot((output_errors*final_outputs*(1.0-final_outputs)),
np.transpose(hidden_outputs))#transpose()转置
#输入层和隐藏层权重更新
self.wih += self.lr * np.dot((hidden_errors*hidden_outputs*(1.0-hidden_outputs)),
np.transpose(inputs))#转置
pass
#查询神经网络
def query(self, input_list): # 转换输入列表到二维数
inputs = np.array(input_list, ndmin=2).T #计算到隐藏层的信号
hidden_inputs = np.dot(self.wih, inputs) #计算隐藏层输出的信号
hidden_outputs = self.activation_function(hidden_inputs) #计算到输出层的信号
final_inputs = np.dot(self.who, hidden_outputs)
final_outputs = self.activation_function(final_inputs)
return final_outputs
#%%
input_nodes = 784 #输入层神经元个数
hidden_nodes = 100 #隐藏层神经元个数
output_nodes = 10 #输出层神经元个数
learning_rate = 0.3 #学习率为0.3
# 创建神经网络
n = NeuralNetwork(input_nodes, hidden_nodes, output_nodes, learning_rate)
#%%
#读取训练数据集 转化为列表
training_data_file = open(r"E:\1大二下课程\人工智能课程\代码和数据\第四章代码和数据\mnist_train.csv",'r')
training_data_list = training_data_file.readlines() #方法用于读取所有行,并返回列表
#print("training_data_list:",training_data_list)
training_data_file.close()
#%%
#训练次数
i = 2
for e in range(i):
#训练神经网络
for record in training_data_list:
all_values = record.split(',') #根据逗号,将文本数据进行拆分
#将文本字符串转化为实数,并创建这些数字的数组。
inputs = (np.asfarray(all_values[1:])/255.0 * 0.99) + 0.01
#创建用零填充的数组,数组的长度为output_nodes,加0.01解决了0输入造成的问题
targets = np.zeros(output_nodes) + 0.01 #10个元素都为0.01的数组
#使用目标标签,将正确元素设置为0.99
targets[int(all_values[0])] = 0.99#all_values[0]=='8'
n.train(inputs,targets)
pass
pass
#%%
test_data_file = open(r"E:\1大二下课程\人工智能课程\代码和数据\第四章代码和数据\mnist_test.csv",'r')
test_data_list = test_data_file.readlines()
test_data_file.close()
all_values = test_data_list[2].split(',') #第3条数据,首元素为1
# print(all_values)
# print(len(all_values))
# print(all_values[0]) #输出目标值
#%%
score = []
print("***************Test start!**********************")
for record in test_data_list:
#用逗号分割将数据进行拆分
all_values = record.split(',')
#正确的答案是第一个值
correct_values = int(all_values[0])
# print(correct_values,"是正确的期望值")
#做输入
inputs = (np.asfarray(all_values[1:])/255.0 * 0.99) + 0.01
#测试网络 作输入
outputs= n.query(inputs)#10行一列的矩阵
#找出输出的最大值的索引
label = np.argmax(outputs)
# print(label,"是网络的输出值\n")
#如果期望值和网络的输出值正确 则往score 数组里面加1 否则添加0
if(label == correct_values):
score.append(1)
else:
score.append(0)
pass
pass
print(outputs)
#%%
# print(score)
score_array = np.asfarray(score)
#%%
print("正确率是:",(score_array.sum()/score_array.size)*100,'%')
![]()
def sigmoid(x): #映射函数
return 1/(1+math.exp(-x))
#%%
import math
import numpy as np
import pandas as pd
from pandas import DataFrame
#%%
Net_in = DataFrame(0.6,index=['input1','input2','theata'],columns=['a'])
Out_in = DataFrame(0,index=['input1','input2','input3','input4','theata'],columns=['a'])
Net_in.iloc[2,0] = -1
Out_in.iloc[4,0] = -1
real=Net_in.iloc[0,0]**2+Net_in.iloc[1,0]**2
print("Out_in")
Out_in
#%%
#中间层和输出层神经元权值
W_mid=DataFrame(0.5,index=['input1','input2','theata'],columns=['mid1','mid2','mid3','mid4'])
W_out=DataFrame(0.5,index=['input1','input2','input3','input4','theata'],columns=['a'])
W_mid_delta=DataFrame(0,index=['input1','input2','theata'],columns=['mid1','mid2','mid3','mid4'])
W_out_delta=DataFrame(0,index=['input1','input2','input3','input4','theata'],columns=['a'])
W_mid
print("W_Out")
W_out
#%%
#中间层的输出
for i in range(0,4):
Out_in.iloc[i,0] = sigmoid(sum(W_mid.iloc[:,i]*Net_in.iloc[:,0]))
#输出层的输出/网络输出
res = sigmoid(sum(Out_in.iloc[:,0]*W_out.iloc[:,0]))
error = abs(res-real)
print("error")
error
#%%
yita=0.6
#输出层权值变化量
W_out_delta.iloc[:,0] = yita*res*(1-res)*(real-res)*Out_in.iloc[:,0]
print("W_out_delta",'\n',W_out_delta)
W_out_delta.iloc[4,0] = -(yita*res*(1-res)*(real-res))#更新输出层阈值theata
print("W_out_delta",'\n',W_out_delta)
W_out = W_out + W_out_delta #输出层权值更新
print("W_out")
W_out
#%%
#中间层权值变化量
for i in range(0,4):
W_mid_delta.iloc[:,i] = yita*Out_in.iloc[i,0]*(1-Out_in.iloc[i,0])*W_out.iloc[i,0]*res*(1-res)*(real-res)*Net_in.iloc[:,0]
W_mid_delta.iloc[2,i] = -(yita*Out_in.iloc[i,0]*(1-Out_in.iloc[i,0])*W_out.iloc[i,0]*res*(1-res)*(real-res))#更新隐含层阈值theat
W_mid = W_mid + W_mid_delta #中间层权值更新
print("W_mid")
W_mid
![]()