神经网络BP算法程序C语言
- 格式:pdf
- 大小:117.27 KB
- 文档页数:11
6.( 1) 试用 C 语言编程实现多层前向 NN 的 BP 算法。
要求:输入、输出结点数目,隐层数目,及各隐层中结点的数目应为任意整数。
( 2) 试用所编出的 BP 算法程序训练出一个实现 XOR 运算的 2 层前向网络。
( 3) 用所编出的 BP 算法程序训练出输入矢量的维数分别为 n=7和 n=8的两个实现奇偶检验运算(即如题 2.(2)所述)的 2 层前向 NN 。
注: 对第 6 题的要求:(i) 列表给出训练收敛后的 NN 权值和所用的迭代次数;(ii) 给出训练收敛后的训练误差和检验误差,及用训练集和检验集做输入时所得到的正确输出率;(iii) 给出 NN 的学习曲线(即 E(W(k))随迭代次数 k 的变化曲线,该结果应是用计算程序计算和打印出来的曲线,而不要是用手画出的曲线)。
(1)用C 语言编程实现前向NN 的BP 算法 解:解题思路:先用 C 语言编程实现前向 NN 的 BP 算法,再将误差保存至文本文件,最后用MATLAB 绘制出误差曲线。
(1.1)开发思路奇偶检验问题可视为 XOR 问题的推广(由 2 输入到 n 输入的推广):若 n 个输入中有奇数个1,则输出为 1;若n 个输入中有偶数个1,则输出为 0。
一个 2 层的 NN 可实现奇偶检验运算。
本文选用2层神经网络,包括隐含层1层,输出层1层,来设计BP 神经网络。
x 1x 2x n2层神经网络本文隐含层和输出层的激活函数选用Sigmoid 函数,1()1ss eϕ-=+,()(1())d s s ds ϕϕϕ=- 其函数曲线如下所示:由奇偶检验问题的定义: 可定义如下分类函数:1 y>=1/2Y()0 y<1/2y ⎧=⎨⎩其中y 为BP 神经网络的输出值,Y 为分类结果。
(1.2)运行流程本文的多层前向 NN 的 BP 算法用C 语言编程实现,最后将运行结果保存成数据文件,通过MATLAB绘图显示,其运行流程图如上图所示,其源代码见附录部分。
BP神经网络C程序在该题的程序设计中采用了文件相关的操作,记录了相关学习和测试信息数据。
权值用伪随机数函数随机产生(范围是(0,0.5))采用结构体及链表来实现神经网络的结构分为实例结构体、层结构体和网络结构体数据结构的设计参照了《人工神经网络原理》(马锐编著,北京:机械工业出版社,2010,7)一书学习算法的优化也参照该书采用学习效率自适应调整算法优化源程序的学习算法,以减少学习次数由于能力和知识有限,该程序存在较大漏洞误差,在调整学习率时,不好掌握调节系数初始权值的限定范围适中,则程序的学习次数将明显减少在随机赋初始权值(0,0.5)时,学习次数可调节至135,但对测试数据的判别效果不理想,没有采用#include<stdio.h>#include<stdlib.h>#include<math.h>#include<malloc.h>#define TRUE 1#define FALSE 0#define NUM_LAYERS 4#define NUM 20 //训练实例个数#define N 2 //输入层单元数#define M 2 //输出层单元数int Units[NUM_LAYERS] = {N,3,3,M}; //每层单元数FILE *fp,*fb;typedef struct //训练实例{float x[N];float y[M];}TRAIN;typedef struct //网络层结构{int Units; //该层中单元的个数float *Output; //第i 个单元的输出float *Error; //第i 个单元的校正误差float **Weight; //第i 个单元的连接权值typedef struct //网络{LAYER **Layer; //隐层定义LAYER *Inputlayer; //输入层LAYER *Outputlayer; //输出层float Error; //允许误差float Eta; //学习率}NET;//初始化伪随机数发生器void InitializeRandoms(){srand(4711);return;}//产生随机实数并规范化float RandomReal() //产生(-0.5,0.5)之间的随机数{return (float)((rand()%100)/200.0);}//初始化训练数据void InitializeTrainingData(TRAIN *training){int i,j;char filename[20];printf("\n请输入训练实例的数据文件名: \n");gets(filename);fb = fopen(filename,"r");fprintf(fp,"\n\n--Saving initialization training datas ...\n");for(i=0;i<NUM;i++){for(j=0;j<N;j++){fscanf(fb,"%f",&(training+i)->x[j]);fprintf(fp,"%10.4f",(training+i)->x[j]);}for(j=0;j<M;j++){fscanf(fb,"%f",&(training+i)->y[j]);fprintf(fp,"%10.4f",(training+i)->y[j]);fprintf(fp,"\n");}fclose(fb);return;}//应用程序初始化void InitializeApplication(NET *Net){Net->Eta = (float)0.3;Net->Error = (float)0.0001;fp = fopen("BPResultData.txt","w+");return;}//应用程序关闭时终止打开的文件void FinalizeApplication(NET *Net){fclose(fp);return;}//分配内存,建立网络void GenerateNetwork(NET *Net){int l,i;Net->Layer = (LAYER **)calloc(NUM_LAYERS,sizeof(LAYER *));for(l=0;l<NUM_LAYERS;l++){Net->Layer[l] = (LAYER *)malloc(sizeof(LAYER));Net->Layer[l]->Units = Units[l];Net->Layer[l]->Output = (float *) calloc(Units[l]+1,sizeof(float));Net->Layer[l]->Error = (float *) calloc(Units[l]+1,sizeof(float));Net->Layer[l]->Weight = (float **)calloc(Units[l]+1,sizeof(float *));Net->Layer[l]->Output[0] = 1;if(l != 0)for(i=1;i <= Units[l];i++) //下标从"1"开始Net->Layer[l]->Weight[i] = (float *)calloc(Units[l-1]+1,sizeof(float));}Net->Inputlayer = Net->Layer[0];Net->Outputlayer = Net->Layer[NUM_LAYERS - 1];return;}//产生随机实数作为初始连接权值void RandomWeights(NET *Net){int l,i,j;for(l=1;l<NUM_LAYERS;l++)for(i=1;i <= Net->Layer[l]->Units;i++)for(j=0;j <= Net->Layer[l-1]->Units;j++)Net->Layer[l]->Weight[i][j] = RandomReal();return;}//设置输入层的输出值void SetInput(NET *Net,float *Input){int i;for(i=1;i <= Net->Inputlayer->Units;i++)Net->Inputlayer->Output[i] = Input[i-1]; //输入层采用u(x) = xreturn;}//设置输出层的输出值void GetOutput(NET *Net,float *Output){int i;for(i=1;i <= Net->Outputlayer->Units;i++)Output[i-1] = (float)(1/(1 + exp(-Net->Outputlayer->Output[i]))); //输出层采用f(x)=1/(1+e^(-x))return;}//层间顺传播void PropagateLayer(NET *Net,LAYER *Lower,LAYER *Upper){int i,j;float sum;for(i=1;i <= Upper->Units;i++){sum = 0;for(j=1;j <= Lower->Units;j++)sum += (Upper->Weight[i][j] * Lower->Output[j]);Upper->Output[i] = (float)(1/(1 + exp(-sum)));}return;}//整个网络所有层间的顺传播void PropagateNet(NET *Net){int l;for(l=0;l < NUM_LAYERS-1;l++)PropagateLayer(Net,Net->Layer[l],Net->Layer[l+1]);return;}//计算输出层误差void ComputeOutputError(NET *Net,float *target){int i;float Out,Err;for(i=1;i <= Net->Outputlayer->Units;i++){Out = Net->Outputlayer->Output[i];Err = target[i-1] - Out;Net->Outputlayer->Error[i] = Out*(1-Out)*Err;}return;}//层间逆传播void BackpropagateLayer(NET *Net,LAYER *Upper,LAYER *Lower) {int i,j;float Out,Err;for(i=1;i <= Lower->Units;i++){Out = Lower->Output[i];Err = 0;for(j=1;j <= Upper->Units;j++)Err += (Upper->Weight[j][i] * Upper->Error[j]);Lower->Error[i] = Out*(1-Out)*Err;}return;}//整个网络所有层间的逆传播void BackpropagateNet(NET *Net){int l;for(l=NUM_LAYERS-1;l>1;l--)BackpropagateLayer(Net,Net->Layer[l],Net->Layer[l-1]);return;}//权值调整void AdjustWeights(NET *Net){int l,i,j;float Out,Err;for(l=1;l<NUM_LAYERS;l++)for(i=1;i <= Net->Layer[l]->Units;i++)for(j=0;j <= Net->Layer[l-1]->Units;j++){Out = Net->Layer[l-1]->Output[j];Err = Net->Layer[l]->Error[i];Net->Layer[l]->Weight[i][j] += (Net->Eta*Err*Out);}return;}//网络处理过程void SimulateNet(NET *Net,float *Input,float *Output,float *target,int TrainOrNot) {SetInput(Net,Input); //输入数据PropagateNet(Net); //模式顺传播GetOutput(Net,Output); //形成输出ComputeOutputError(Net,target); //计算输出误差if(TrainOrNot){BackpropagateNet(Net); //误差逆传播AdjustWeights(Net); //调整权值}return;}//训练过程void TrainNet(NET *Net,TRAIN *training){int l,i,j,k;int count=0,flag=0;float Output[M],outputfront[M],ERR,err,sum;do{flag = 0;sum = 0;ERR = 0;if(count >= 1)for(j=0;j<M;j++)outputfront[j]=Output[j];SimulateNet(Net,(training+(count%NUM))->x,Output,(training+(count%NUM))->y,TRUE);if(count >= 1){k = count%NUM;for(i=1;i <= Net->Outputlayer->Units;i++){sum += Net->Outputlayer->Error[i];err = (training+k-1)->y[i-1] - outputfront[i-1];ERR += (outputfront[i-1] * (1 - outputfront[i-1]) * err);}if(sum <= ERR)Net->Eta = (float)(0.9999 * Net->Eta);elseNet->Eta = (float)(1.0015 * Net->Eta);}if(count >= NUM){for(k=1;k <= M;k++)if(Net->Outputlayer->Error[k] > Net->Error){ flag=1; break; }if(k>M)flag=0;}count++;}while(flag || count <= NUM);fprintf(fp,"\n\n\n");fprintf(fp,"--training results ... \n");fprintf(fp,"training times: %d\n",count);fprintf(fp,"\n*****the final weights*****\n");for(l=1;l<NUM_LAYERS;l++){for(i=1;i <= Net->Layer[l]->Units;i++){for(j=1;j <= Net->Layer[l-1]->Units;j++)fprintf(fp,"%15.6f",Net->Layer[l]->Weight[i][j]);fprintf(fp,"\n");}fprintf(fp,"\n\n");}}//评估过程void EvaluateNet(NET *Net){int i;printf("\n\n(");fprintf(fp,"\n\n(");for(i=1;i <= Net->Inputlayer->Units;i++){printf(" %.4f",Net->Inputlayer->Output[i]);fprintf(fp,"%10.4f",Net->Inputlayer->Output[i]);}printf(")\t");fprintf(fp,")\t");for(i=1;i <= Net->Outputlayer->Units;i++){if(fabs(Net->Outputlayer->Output[i] - 1.0) <= 0.0499){printf("肯定是第%d 类, ",i);fprintf(fp,"肯定是第%d 类, ",i);}if(fabs(Net->Outputlayer->Output[i] - 0.9) <= 0.0499){printf("几乎是第%d 类, ",i);fprintf(fp,"几乎是第%d 类, ",i);}if(fabs(Net->Outputlayer->Output[i] - 0.8) <= 0.0499){printf("极是第%d 类, ",i);fprintf(fp,"极是第%d 类, ",i);}if(fabs(Net->Outputlayer->Output[i] - 0.7) <= 0.0499){printf("很是第%d 类, ",i);fprintf(fp,"很是第%d 类, ",i);}if(fabs(Net->Outputlayer->Output[i] - 0.6) <= 0.0499){printf("相当是第%d 类, ",i);fprintf(fp,"相当是第%d 类, ",i);}if(fabs(Net->Outputlayer->Output[i] - 0.5) <= 0.0499){printf("差不多是第%d 类, ",i);fprintf(fp,"差不多是第%d 类, ",i);}if(fabs(Net->Outputlayer->Output[i] - 0.4) <= 0.0499){printf("比较像是第%d 类, ",i);fprintf(fp,"比较像是第%d 类, ",i);}if(fabs(Net->Outputlayer->Output[i] - 0.3) <= 0.0499){printf("有些像是第%d 类, ",i);fprintf(fp,"有些像是第%d 类, ",i);}if(fabs(Net->Outputlayer->Output[i] - 0.2) <= 0.0499){printf("有点像是第%d 类, ",i);fprintf(fp,"有点像是第%d 类, ",i);}if(fabs(Net->Outputlayer->Output[i] - 0.1) <= 0.0499){printf("稍稍像是第%d 类, ",i);fprintf(fp,"稍稍像是第%d 类, ",i);}if(Net->Outputlayer->Output[i] <= 0.0499){printf("肯定不是第%d 类, ",i);fprintf(fp,"肯定不是第%d 类, ",i);}}printf("\n\n");fprintf(fp,"\n\n\n");return;}//测试过程void TestNet(NET *Net){TRAIN Testdata;float Output[M];int i,j,flag=0;char select;fprintf(fp,"\n\n--Saving test datas ...\n");do{printf("\n请输入测试数据(x1 x2 ... y1 y2 ...): \n");for(j=0;j<N;j++){scanf("%f",&Testdata.x[j]);fprintf(fp,"%10.4f",Testdata.x[j]);}for(j=0;j<M;j++){scanf("%f",&Testdata.y[j]);fprintf(fp,"%10.4f",Testdata.y[j]);}fprintf(fp,"\n");SimulateNet(Net,Testdata.x,Output,Testdata.y,FALSE);fprintf(fp,"\n--NET Output and Error of the Test Data ....\n");for(i=1;i <= Net->Outputlayer->Units;i++)fprintf(fp,"%10.6f %10.6f\n",Net->Outputlayer->Output[i],Net->Outputlayer->Error[i]);EvaluateNet(Net);printf("\n继续测试?(y/n):\n");getchar();scanf("%c",&select);printf("\n");if((select == 'y')||(select == 'Y'))flag = 1;elseflag=0;}while(flag);return;}//主函数void main(){TRAIN TrainingData[NUM];NET Net;InitializeRandoms(); //初始化伪随机数发生器GenerateNetwork(&Net); //建立网络RandomWeights(&Net); //形成初始权值InitializeApplication(&Net); //应用程序初始化,准备运行InitializeTrainingData(TrainingData); //记录训练数据TrainNet(&Net,TrainingData); //开始训练TestNet(&Net);FinalizeApplication(&Net); //程序关闭,完成善后工作return;}。
#include "stdio.h"#include "stdlib.h"#include "time.h"#include "math.h"/*********************************************inpoints 为输入神经元个数,可改变outpoints为输出神经元个数defaultpoints为隐层神经元个数datagrough为样本数据个数****************************************************以下数据定义可以修改*****/#define A 0#define a 1#define b 1#define c 1#define ALFA 0.85#define BETA 0.2 //学习率0~1#define Total 20000#define inpoints 9#define outpoints 5#define defaultpoints 28#define datagrough 44#define forecastdata 4/**********定义所需变量********/double InpointData[datagrough][inpoints],OutpointData[datagrough][outpoints]; /* 输入输出数据 */double InpointData_MAX[inpoints],InpointData_MIN[inpoints]; /* 每个因素最大数据 */double OutpointData_MAX[outpoints],OutpointData_MIN[outpoints]; /* 每个因素最小数据 */doublew[defaultpoints][inpoints],limen[defaultpoints],v[outpoints][defaultpoints]; /* 连接权值、阈值 */doubledlta_w[defaultpoints][inpoints],dlta_limen[defaultpoints],dlta_v[outpoints][defaultpoints]; /* 连接权、阈值修正值 */doubledefaultOutpoint[defaultpoints],Outpoint_dp[outpoints],Outpoint_ep[datagrough];/**************************读数据文件******************************/void ReadData(){FILE *fp1,*fp2;int i,j;if((fp1=fopen("D:\\data\\训练输入.txt","r"))==NULL){printf("1can not open the file\n");exit(0);}for(i=0;i<datagrough;i++)for(j=0;j<inpoints;j++)fscanf(fp1,"%lf",&InpointData[i][j]);fclose(fp1);if((fp2=fopen("D:\\data\\训练输出.txt","r"))==NULL){printf("2can not open the file\n");exit(0);}for(i=0;i<datagrough;i++)for(j=0;j<outpoints;j++)fscanf(fp2,"%lf",&OutpointData[i][j]);fclose(fp2);}/*****************************************************//*****************************************归一化******************************************************/void unitary(){int i,j;int k=0;for(j=0;j<inpoints;j++) //找出每列的最大、最小值存放在数组InpointData_MAX[j]、InpointData_MIN[j]中{InpointData_MAX[j]=InpointData[0][j];InpointData_MIN[j]=InpointData[0][j];for(i=0;i<datagrough;i++)if(InpointData_MAX[j]<InpointData[i][j])InpointData_MAX[j]=InpointData[i][j];else if(InpointData_MIN[j]>InpointData[i][j])InpointData_MIN[j]=InpointData[i][j];}for(j=0;j<outpoints;j++) //找出每列的最大、最小值存放在数组OutpointData_MAX[j]、OutpointData_MIN[j]中{OutpointData_MAX[j]=OutpointData[0][j];OutpointData_MIN[j]=OutpointData[0][j];for(i=0;i<datagrough;i++)if(OutpointData_MAX[j]<OutpointData[i][j])OutpointData_MAX[j]=OutpointData[i][j];else if(OutpointData_MIN[j]>OutpointData[i][j])OutpointData_MIN[j]=OutpointData[i][j];}/***************将数据归一处理,处理之后的数据全部在[0,1]之间*************************/for(j=0;j<inpoints;j++)for(i=0;i<datagrough;i++)if(InpointData_MAX[j]==0)InpointData[i][j]=0;elseInpointData[i][j]=(InpointData[i][j]-InpointData_MIN[j]+A)/(InpointData_MAX[j]-InpointData_MIN[j]+A);for(j=0;j<outpoints;j++)for(i=0;i<datagrough;i++)if(OutpointData_MAX[j]==0)OutpointData[i][j]=0;elseOutpointData[i][j]=(OutpointData[i][j]-OutpointData_MIN[j]+A)/(OutpointData_MAX [j]-OutpointData_MIN[j]+A);}/*****************************************************//*********************初始化,随机赋初值**************************/void Initialization(){int i,j;srand((unsigned)time(NULL)); //头文件名 #include <time.h>for(i=0;i<defaultpoints;i++) //给输入层到隐层的连接权赋随机值LianJie_w[i][j],这些值在[0,1]for(j=0;j<inpoints;j++){w[i][j]=(rand()*2.0/RAND_MAX-1)/2;dlta_w[i][j]=0;}for(i=0;i<defaultpoints;i++){limen[i]=(rand()*2.0/RAND_MAX-1)/2;dlta_limen[i]=0;}for(i=0;i<outpoints;i++) //给隐层到输出层的连接权赋初值for(j=0;j<defaultpoints;j++){v[i][j]=(rand()*2.0/RAND_MAX-1)/2;dlta_v[i][j]=0;}}/**********************求单样本的计算输出误差*******************************/ void out_sub1(int t){int i,j;double defaultInpoint[defaultpoints];double Outpoint_y[outpoints];Outpoint_ep[t]=0;for(i=0;i<defaultpoints;i++){double sum=0;for(j=0;j<inpoints;j++)sum+=w[i][j]*InpointData[t][j];defaultInpoint[i]=sum+limen[i];defaultOutpoint[i]=1/(a+b*exp(-1*c*defaultInpoint[i]));//求O[i] }for(j=0;j<outpoints;j++)//求Y[i]{Outpoint_y[j]=0;for(i=0;i<defaultpoints;i++)Outpoint_y[j]+=v[j][i]*defaultOutpoint[i];Outpoint_dp[j]=OutpointData[t][j]-Outpoint_y[j];Outpoint_ep[t]+=Outpoint_dp[j]*Outpoint_dp[j]/2;}}/*****************************反算权值******************************************/void out_sub2(int t){int i,j,k;double s;for(i=0;i<defaultpoints;i++){s=0;for(j=0;j<outpoints;j++){dlta_v[j][i]=ALFA*dlta_v[j][i]+BETA*Outpoint_dp[j]*defaultOutpoint[i]; //s+=v[j][i]*Outpoint_dp[j];v[j][i]+=dlta_v[j][i];}dlta_limen[i]=ALFA*dlta_limen[i]+BETA*defaultOutpoint[i]*(1-defaultOutpoint[i]) *s;//limen[i]+=dlta_limen[i];for(k=0;k<inpoints;k++){dlta_w[i][k]=ALFA*dlta_w[i][k]+BETA*defaultOutpoint[i]*(1-defaultOutpoint[i])*s *InpointData[t][k];//w[i][k]=w[i][k]+dlta_w[i][k];}}}/*******************************************************/void forecast(){int i,j,t,k=0;double e,e1[forecastdata]={0}; //训练误差double sss;double InputData_x[forecastdata][inpoints],tp[forecastdata][outpoints];doubledefInpoint,defOutpoint[defaultpoints],y[forecastdata][outpoints];//y[forecastda ta][outpoints]为网络检验输出FILE *fp1,*fp3;if((fp1=fopen("D:\\data\\预测输入.txt","r"))==NULL) //检验数据输入{printf("3can not open the file\n");exit(0);}for(i=0;i<forecastdata;i++)for(j=0;j<inpoints;j++)fscanf(fp1,"%lf",&InputData_x[i][j]);fclose(fp1);if((fp3=fopen("D:\\data\\预测输出.txt","r"))==NULL) //实际检验结果输出{printf("31can not open the file\n");exit(0);}for(i=0;i<forecastdata;i++)for(j=0;j<outpoints;j++)fscanf(fp3,"%lf",&tp[i][j]);fclose(fp3);for(j=0;j<inpoints;j++) // 检验数据归一化for(i=0;i<forecastdata;i++)if(InpointData_MAX[j]==0)InputData_x[i][j]=0;elseInputData_x[i][j]=(InputData_x[i][j]-InpointData_MIN[j]+A)/(InpointData_MAX[j]-InpointData_MIN[j]+A);for(j=0;j<outpoints;j++)for(i=0;i<forecastdata;i++)if(OutpointData_MAX[j]==0)tp[i][j]=0;elsetp[i][j]=(tp[i][j]-OutpointData_MIN[j]+A)/(OutpointData_MAX[j]-OutpointData_MIN [j]+A);do{Initialization(); //初始化连接权值w[i][j],limen[i],v[k][i]k=0;do{e=0;for(t=0;t<datagrough;t++){out_sub1(t); //正向计算网络输出out_sub2(t); //反向计算,修正权值e+=Outpoint_ep[t]; //计算输出误差}k++;}while((k<Total)&&(e>0.1));sss=0; //中间参数for(t=0;t<forecastdata;t++){e1[t]=0;for(i=0;i<defaultpoints;i++){double sum=0;for(j=0;j<inpoints;j++)sum+=w[i][j]*InputData_x[t][j];defInpoint=sum+limen[i];defOutpoint[i]=1/(a+b*exp(-1*c*defInpoint));}for(j=0;j<outpoints;j++){y[t][j]=0;for(i=0;i<defaultpoints;i++)y[t][j]+=v[j][i]*defOutpoint[i];e1[t]+=(y[t][j]-tp[t][j])*(y[t][j]-tp[t][j])/2;y[t][j]=y[t][j]*(OutpointData_MAX[j]-OutpointData_MIN[j]+A)+OutpointData_MI N[j]-A;}sss+=e1[t];}sss=sss/forecastdata;printf(" %lf %lf\n",e,sss);}while(sss>0.12);}/********************************************************/void main(){int i,j,k;FILE *fp2;ReadData(); //读训练数据:输入和输出unitary(); //归一化,将输入输出数据归一,结果在[0,1]中forecast(); //检验误差if((fp2=fopen("D:\\data\\计算权值.txt","w"))==NULL) //文件输出训练好的权值{printf("6can not open the file\n");exit(0);}for(i=0;i<defaultpoints;i++){for(k=0;k<inpoints;k++)fprintf(fp2," %lf ",w[i][k]);fprintf(fp2,"\n");}fprintf(fp2,"\n");for(i=0;i<defaultpoints;i++)fprintf(fp2," %lf ",limen[i]);fprintf(fp2,"\n\n");for(i=0;i<defaultpoints;i++){for(j=0;j<outpoints;j++)fprintf(fp2," %lf ",v[j][i]);fprintf(fp2,"\n");}fclose(fp2);}。
/************************************************ Back Propagation Algorithm************************************************/ #include "stdio.h"#include "stdlib.h"#include "math.h"/************************************************ The Definition of User Data************************************************/ #define MAXINPUT 1#define MAXHIDE 3#define MAXOUTPUT 1#define MAX 1#define MIN -1#define T 100#define CA 4double a=0.8;double b=0.05;double k=0;double error=0;int t=0;double sout[MAXOUTPUT];double shide[MAXHIDE];double m=2;double howchange[MAXHIDE][MAXOUTPUT];double ihwchange[MAXINPUT][MAXHIDE];double CatalogueOut[CA][MAXOUTPUT];double CatalogueIn[CA][MAXINPUT];/************************************************ The Definition of Data Structure************************************************/ struct theBP{double input[MAXINPUT];double hide[MAXHIDE];double output[MAXOUTPUT];double ihw[MAXINPUT][MAXHIDE];double how[MAXHIDE][MAXOUTPUT];};struct theBP bpa;/************************************************ Definition of Prototype************************************************/ void WeightInitial();void InitialError();void InPutCatalogue();void CalculateOut(int k);void CalculateError(int k);void ReverseHideError();void CalculateChange();void CalculateNewWeight();void Test();void TestCalculateOut();void camain();void main(){WeightInitial();// InitialError();InPutCatalogue();//doint m=0;while(1){printf("请选择要进行的操作\n");printf("0----------------学习\n");printf("1----------------测试\n");printf("2----------------退出\n");scanf("%d",&m);switch(m){case 0:camain();break;case 1:Test();break;case 2:exit(0);}//while((error)>k);;}}void camain(){for(t=0;t<T;t++){for(int k=0;k<CA;k++){CalculateOut(k);CalculateError(k);ReverseHideError();CalculateChange();CalculateNewWeight();}for(k=0;k<CA;k++){CalculateOut(k);}}}/************************************************Function:initial the weight************************************************/void WeightInitial(){//产生输入层到隐藏层的权值for(int i=0;i<MAXINPUT;i++){for(int j=0;j<MAXHIDE;j++){bpa.ihw[i][j]=0.3;//((double)rand()/(double)(RAND_MAX))*(MAX-MIN)+MIN;}}//产生从隐藏层到输出层的权值for(i=0;i<MAXHIDE;i++){for(int j=0;j<MAXOUTPUT;j++){bpa.how[i][j]=0.2;//((double)rand()/(double)(RAND_MAX))*(MAX-MIN)+MIN;}}}/************************************************Function:input the Catalogue************************************************/void InPutCatalogue(){for(int k=0;k<CA;k++){printf("请输入第%d个样本的输入值:\n",k);for(int i=0;i<MAXINPUT;i++){scanf("%lf",&bpa.input[i]);CatalogueIn[k][i]=bpa.input[i];}printf("请输入第%d个样本的输出值:\n",k);for(i=0;i<MAXOUTPUT;i++){scanf("%lf",&CatalogueOut[k][i]);}}}/************************************************Function:calculate the out************************************************/void CalculateOut(int k){//计算隐藏层的输出for(int j=0;j<MAXHIDE;j++){double sum2=0;for(int i=0;i<MAXINPUT;i++){bpa.input[i]=CatalogueIn[k][i];sum2+=bpa.ihw[i][j]*bpa.input[i];//计算输入}bpa.hide[j]=1/(1+exp(-sum2));//计算输出}//计算每输出层个单元的输入和输出for(j=0;j<MAXOUTPUT;j++){double sum3=0;for(int i=0;i<MAXHIDE;i++){sum3+=bpa.how[i][j]*bpa.hide[i];//计算输入}bpa.output[j]=m*sum3;//计算输出bpa.output[j]=1/(1+exp(-sum3))printf("第%d个样本的最后输出%lf\n",k,bpa.output[j]);}}void TestCalculateOut(){//计算隐藏层的输出for(int j=0;j<MAXHIDE;j++){double sum1=0;for(int i=0;i<MAXINPUT;i++){sum1=sum1+bpa.ihw[i][j]*bpa.input[i];//计算输入}bpa.hide[j]=1/(1+exp(-sum1));//计算输出}//计算每输出层个单元的输入和输出for(j=0;j<MAXOUTPUT;j++){double sum2=0;for(int i=0;i<MAXHIDE;i++){sum2=sum2+bpa.how[i][j]*bpa.hide[i];//计算输入}bpa.output[j]=m*sum2;//计算输出bpa.output[j]=1/(1+exp(sum2))printf("最后输出%lf\n",bpa.output[j]);}}/************************************************Function:对输出层Calculate************************************************/void CalculateError(int k){double temp=0;error=0;for(int i=0;i<MAXOUTPUT;i++){temp=(CatalogueOut[k][i]-bpa.output[i])*(CatalogueOut[k][i]-bpa.output[i]);error=(0.5)*temp+error;}for(i=0;i<MAXOUTPUT;i++){sout[i]=(CatalogueOut[k][i]-bpa.output[i])*bpa.output[i]*(1-bpa.output[i]);}}/************************************************Function: 从后向前对隐藏层************************************************/void ReverseHideError(){for(int i=0;i<MAXHIDE;i++){double sum=0;for(int j=0;j<MAXOUTPUT;j++){sum+=sout[j]*bpa.how[i][j];}shide[i]=(bpa.hide[i])*(1-bpa.hide[i])*sum;}}/************************************************Function:Calculate the 权值的变化量************************************************/void CalculateChange(){int j=0;//隐藏层到输出层for(int i=0;i<MAXHIDE;i++){for(j=0;j<MAXOUTPUT;j++){howchange[i][j]=a*(howchange[i][j])+b*(sout[i])*(bpa.hide[i]);// }}//对输入层到隐藏层for(i=0;i<MAXINPUT;i++){for(j=0;j<MAXHIDE;j++){ihwchange[i][j]=a*(ihwchange[i][j])+b*(shide[i])*(bpa.input[i]);// }}}/************************************************Function:Calculate the 新的权值************************************************/void CalculateNewWeight(){int j=0;//隐藏层到输出层for(int i=0;i<MAXHIDE;i++){for(j=0;j<MAXOUTPUT;j++){bpa.how[i][j]=bpa.how[i][j]+howchange[i][j];}}//对输入层到隐藏层for(i=0;i<MAXINPUT;i++){for(j=0;j<MAXHIDE;j++){bpa.ihw[i][j]=bpa.ihw[i][j]+ihwchange[i][j];}}}void Test(){printf("请输入测试数据的输入值:\n");for(int i=0;i<MAXINPUT;i++){scanf("%lf",&bpa.input[i]);}TestCalculateOut();}。
毕业设计(论文)开题报告
BP(Back Propagation)网络是1986年由Rumelhart和McCelland为首的科学家小组提出,是一种按误差逆传播算法训练的多层前馈网络,是目前应用最广泛的神经网络模型之一。
BP网络能学习和存贮大量的输入-输出模式映射关系,而无需事前揭示描述这种映射关系的数学方程。
它的学习规则是使用最速下降法,通过反向传播来不断调整网络的权值和阈值,使网络的误差平方和最小。
BP神经网络模型拓扑结构包括输入层(input)、隐层(hide layer)和输出层(output layer)(如图3.9所示)。
BP网络的学习过程主要由四部分组成:
(1)输入模式顺传播(输入模式由输入层经中间层向输出层传播计算);
(2)输出误差逆传播(输出的误差由输出层经中间层传向输入层);
(3)循环记忆训练(模式顺传播与误差逆传播的计算过程反复交替循环进行);
(4)学习结果判别(判定全局误差是否趋向极小值)。
就是根据这四个过程并分别编程。
二、设计(论文)的基本内容
1. 熟悉C语言软件
2.研究BP网络工作原理
3. 神经网络训练、测试过程编程
4. 预测实例。
BP神经网络算法程序一、BP神经网络算法原理BP神经网络算法包括输入层、隐藏层和输出层三个层次。
每个层次的神经元节点与下一层次的神经元节点之间存在权重系数。
神经元节点通过输入信号经过激活函数的处理得到输出信号,并将输出信号传递给下一层次的神经元节点。
反向传播过程中,首先根据误差评估结果计算输出层的误差信号,再根据该误差信号分别计算隐藏层和输入层的误差信号。
然后根据误差信号的计算结果,逐层更新网络的权重系数。
二、BP神经网络算法步骤1.初始化网络权重:随机初始化网络各层次之间的权重系数。
2.设置学习率和最大迭代次数。
3.迭代训练网络:重复以下步骤直到满足停止条件为止。
a)根据当前样本的输入信号,通过前向传播算法计算输出结果。
c)根据误差信号,通过反向传播算法更新网络的权重系数。
4.测试网络:使用独立的测试数据集,通过前向传播算法计算网络的输出结果,评估网络的泛化能力。
三、BP神经网络算法示例程序下面给出一个简单的使用Python编写的BP神经网络算法示例程序。
```pythonimport numpy as npclass BPNeuralNetwork:def __init__(self, input_dim, hidden_dim, output_dim): self.input_dim = input_dimself.hidden_dim = hidden_dimself.output_dim = output_dimself.W1 = np.random.randn(input_dim, hidden_dim)self.W2 = np.random.randn(hidden_dim, output_dim)def sigmoid(self, x):return 1 / (1 + np.exp(-x))def sigmoid_derivative(self, x):return x * (1 - x)def forward_propagation(self, X):self.z2 = np.dot(X, self.W1)self.a2 = self.sigmoid(self.z2)self.z3 = np.dot(self.a2, self.W2)self.y_hat = self.sigmoid(self.z3)return self.y_hatdef backward_propagation(self, X, y, lr):self.loss = y - self.y_hatdelta3 = self.loss * self.sigmoid_derivative(self.y_hat) dW2 = np.dot(self.a2.T, delta3)delta2 = np.dot(delta3, self.W2.T) *self.sigmoid_derivative(self.a2)dW1 = np.dot(X.T, delta2)self.W2 += lr * dW2self.W1 += lr * dW1def train(self, X, y, lr=0.1, epochs=1000):for i in range(epochs):y_hat = self.forward_propagation(X)self.backward_propagation(X, y, lr)def predict(self, X):return np.round(self.forward_propagation(X))#示例用法X = np.array([[0, 0], [0, 1], [1, 0], [1, 1]])y = np.array([[0], [1], [1], [0]])nn = BPNeuralNetwork(2, 2, 1)print("预测结果:")print(nn.predict(X))```以上是一个简单的BP神经网络算法示例程序,用于训练一个XOR逻辑门的分类模型。
BP神经⽹络--C语⾔实现下上⼀篇 C语⾔实现上中介绍了程序实现时定义的⼀些数据结构、程序执⾏的流程以及程序的基本⾻架(详情见)。
留下了两个关键函数computO(i) 和 backUpdate(i) 没有分析实现,参数 i 代表的是第 i 个样本,本篇我们⼀起来分析下这两个函数的实现。
BP神经⽹络输出函数 computO(i) 负责的是通过BP神经⽹络的机制对样本 i 的输⼊,预测其输出。
回想BP神经⽹络的基本模型(详情见)对应的公式(1)还有激活函数对应的公式(2):在前篇设计的BP神经⽹络中,输⼊层与隐藏层权重对应的数据结构是w[Neuron][In],隐藏层与输出层权重对应的数据结构是v[Out] [Neuron],并且数组 o[Neuron] 记录的是神经元通过激活函数对外的输出,BP神经⽹络预测的样本结果保存在OutputData[Out]中。
由此,就可以得到以下实现的参考代码:void computO(int var){int i,j;double sum,y;/*神经元输出*/for (i = 0; i < Neuron; ++i){sum=0;for (j = 0; j < In; ++j)sum+=w[i][j]*d_in[var][j];o[i]=1/(1+exp(-1*sum));}/* 隐藏层到输出层输出 */for (i = 0; i < Out; ++i){sum=0;for (j = 0; j < Neuron; ++j)sum+=v[i][j]*o[j];OutputData[i]=sum;}}BP神经⽹络的反馈学习函数 backUpdate(i) 负责的是将预测输出的结果与样本真实的结果进⾏⽐对,然后对神经⽹络中涉及到的权重进⾏修正,也这是BP神经⽹络实现的关键所在。
如何求到对于 w[Neuron][In] 和 v[Out][Neuron] 进⾏修正的误差量便是关键所在!误差修正量的求法在基本模型⼀⽂中数学分析部分有解答,具体问题具体分析,落实到我们设计的这个BP神经⽹络上来说,需要得到的是对w[Neuron][In] 和 v[Out][Neuron]两个数据进⾏修正误差,误差量⽤数据结构 dw[Neuron][In] 和 dv[Out][Neuron] 来进⾏存储。
BP神经网络算法的C语言实现代码以下是一个BP神经网络的C语言实现代码,代码的详细说明可以帮助理解代码逻辑:```c#include <stdio.h>#include <stdlib.h>#include <math.h>#define INPUT_SIZE 2#define HIDDEN_SIZE 2#define OUTPUT_SIZE 1#define LEARNING_RATE 0.1//定义神经网络结构体typedef structdouble input[INPUT_SIZE];double hidden[HIDDEN_SIZE];double output[OUTPUT_SIZE];double weights_ih[INPUT_SIZE][HIDDEN_SIZE];double weights_ho[HIDDEN_SIZE][OUTPUT_SIZE];} NeuralNetwork;//激活函数double sigmoid(double x)return 1 / (1 + exp(-x));//创建神经网络NeuralNetwork* create_neural_networNeuralNetwork* nn =(NeuralNetwork*)malloc(sizeof(NeuralNetwork));//初始化权重for (int i = 0; i < INPUT_SIZE; i++)for (int j = 0; j < HIDDEN_SIZE; j++)nn->weights_ih[i][j] = (double)rand( / RAND_MAX * 2 - 1;}}for (int i = 0; i < HIDDEN_SIZE; i++)for (int j = 0; j < OUTPUT_SIZE; j++)nn->weights_ho[i][j] = (double)rand( / RAND_MAX * 2 - 1;}}return nn;//前向传播void forward(NeuralNetwork* nn)//计算隐藏层输出for (int i = 0; i < HIDDEN_SIZE; i++)double sum = 0;for (int j = 0; j < INPUT_SIZE; j++)sum += nn->input[j] * nn->weights_ih[j][i];}nn->hidden[i] = sigmoid(sum);}//计算输出层输出for (int i = 0; i < OUTPUT_SIZE; i++)double sum = 0;for (int j = 0; j < HIDDEN_SIZE; j++)sum += nn->hidden[j] * nn->weights_ho[j][i];}nn->output[i] = sigmoid(sum);}void backpropagation(NeuralNetwork* nn, double target)//计算输出层误差double output_error[OUTPUT_SIZE];for (int i = 0; i < OUTPUT_SIZE; i++)double delta = target - nn->output[i];output_error[i] = nn->output[i] * (1 - nn->output[i]) * delta;}//更新隐藏层到输出层权重for (int i = 0; i < HIDDEN_SIZE; i++)for (int j = 0; j < OUTPUT_SIZE; j++)nn->weights_ho[i][j] += LEARNING_RATE * nn->hidden[i] * output_error[j];}}//计算隐藏层误差double hidden_error[HIDDEN_SIZE];for (int i = 0; i < HIDDEN_SIZE; i++)double delta = 0;for (int j = 0; j < OUTPUT_SIZE; j++)delta += output_error[j] * nn->weights_ho[i][j];}hidden_error[i] = nn->hidden[i] * (1 - nn->hidden[i]) * delta;}//更新输入层到隐藏层权重for (int i = 0; i < INPUT_SIZE; i++)for (int j = 0; j < HIDDEN_SIZE; j++)nn->weights_ih[i][j] += LEARNING_RATE * nn->input[i] * hidden_error[j];}}void train(NeuralNetwork* nn, double input[][2], double target[], int num_examples)int iteration = 0;while (iteration < MAX_ITERATIONS)double error = 0;for (int i = 0; i < num_examples; i++)for (int j = 0; j < INPUT_SIZE; j++)nn->input[j] = input[i][j];}forward(nn);backpropagation(nn, target[i]);error += fabs(target[i] - nn->output[0]);}//判断误差是否已达到允许范围if (error < 0.01)break;}iteration++;}if (iteration == MAX_ITERATIONS)printf("Training failed! Error: %.8lf\n", error); }void predict(NeuralNetwork* nn, double input[]) for (int i = 0; i < INPUT_SIZE; i++)nn->input[i] = input[i];}forward(nn);printf("Prediction: %.8lf\n", nn->output[0]); int maiNeuralNetwork* nn = create_neural_network(; double input[4][2] ={0,0},{0,1},{1,0},{1,1}};double target[4] =0,1,1,};train(nn, input, target, 4);predict(nn, input[0]);predict(nn, input[1]);predict(nn, input[2]);predict(nn, input[3]);free(nn);return 0;```以上代码实现了一个简单的BP神经网络,该神经网络包含一个输入层、一个隐藏层和一个输出层。
BP算法程序实现BP算法(Back Propagation Algorithm,即反向传播算法)是一种用于训练神经网络的常用算法。
它的基本思想是通过不断地调整神经元之间的连接权值,使得网络的输出接近于期望的输出。
在实现BP算法时,需要进行以下几个步骤:1.初始化参数:首先,需要初始化神经网络的权值和偏置,通常可以使用随机的小数来初始化。
同时,需要设置好网络的学习率和最大迭代次数。
2.前向传播:通过前向传播过程,将输入数据输入到神经网络中,并计算出每个神经元的输出。
具体来说,对于第一层的神经元,它们的输出即为输入数据。
对于后续的层,可以使用如下公式计算输出:a[i] = sigmoid(z[i])其中,a[i]表示第i层的输出,z[i]为第i层的输入加权和,sigmoid为激活函数。
3.计算误差:根据网络的输出和期望的输出,可以计算出网络的误差。
一般来说,可以使用均方差作为误差的度量指标。
loss = 1/(2 * n) * Σ(y - a)^2其中,n为训练样本的数量,y为期望输出,a为网络的实际输出。
4.反向传播:通过反向传播算法,将误差从输出层向输入层逐层传播,更新权值和偏置。
具体来说,需要计算每一层神经元的误差,并使用如下公式更新权值和偏置:delta[i] = delta[i+1] * W[i+1]' * sigmoid_derivative(z[i])W[i] = W[i] + learning_rate * delta[i] * a[i-1]'b[i] = b[i] + learning_rate * delta[i]其中,delta[i]为第i层的误差,W[i]为第i层与i+1层之间的权值,b[i]为第i层的偏置,learning_rate为学习率,sigmoid_derivative为sigmoid函数的导数。
5.迭代更新:根据步骤4中的更新公式,不断迭代调整权值和偏置,直到达到最大迭代次数或误差小于一些阈值。
1 1 -1 1 -1 1 0 1 0 1神经网络BP算法(C程序)文件输入输出目录为: F:\BP\训练样本文件名: 训练样本.txt值为:1 1 -1 1 -1 1 0 1 0 1 输出文件名为: 阈值.txt 权值.txt=========================#include "stdlib.h"#include "math.h"#include "conio.h"#include "stdio.h"#define N 2 /*/学习样本个数*/#define IN 3 /*/输入层神经元数目*/#define HN 3 /*/隐层神经元数目*/#define ON 2 /*/输出层神经元数目*/#define Z 20 /*旧权值保存,每次study的权值都保存下来*/ double P[IN]; /*单个样本输入数据*/double T[ON]; /*单个样本输出数据*/double W[HN][IN]; /*/输入层至隐层权值*/double V[ON][HN]; /*/隐层至输出层权值*/double X[HN]; /*/隐层的输入*/double Y[ON]; /*/输出层的输入*/double H[HN]; /*/隐层的输出*/double O[ON]; /*/输出层的输出*/double YU_HN[HN]; /*/隐层的阈值*/double YU_ON[ON]; /*/输出层的阈值*/double err m[N]; /*/第m个样本的总误差*/double a; /*/输出层至隐层的学习效率*/double b; /*/隐层至输入层学习效率*/double alpha; /*/动量因子, 改进型bp算法使用*/ double d err[ON];FILE *fp;/*定义一个放学习样本的结构*/struct {double input[IN];double teach[ON];}Study_Data[N];/*改进型bp算法用来保存每次计算的权值*/struct {double old_W[HN][IN];double old_V[ON][HN];}Old_WV[Z];显示开始界面int Start_Show(){clrscr();printf("\n ***********************\n");printf(" * Welcome to use *\n");printf(" * this program of *\n");printf(" * calculating the BP *\n");printf(" * model! *\n");printf(" * Happy every day! *\n");printf(" ***********************\n");printf("\n\n Before starting, please read the follows carefully:\n\n");printf(" 1.Please ensure the Path of the '训练样本.txt'(xunlianyangben.txt) is \n correct, like 'F:\BP\训练样本.txt'!\n");printf(" 2.The calculating results will be saved in the Path of 'F:\\BP\\'!\n");printf(" 3.The program will load 10 datas when running from 'F:\\BP\\训练样本.txt'!\n");printf(" 4.The program of BP can study itself for no more than 30000 times.\n And surpassing the number, the program will be ended by itself in\n preventing running infinitely because of error!\n");printf("\n\n\n");printf("Now press any key to start...\n");getch();getch();clrscr();}显示结束界面int End_Show(){printf("\n\n---------------------------------------------------\n");printf("The program has reached the end successfully!\n\n Press any key to exit!\n\n");printf("\n ***********************\n");printf(" * This is the end *\n");printf(" * can calculate the BP*\n");printf(" * model! *\n");printf(" ***********************\n");printf(" * Thanks for using! *\n");printf(" * Happy every day! *\n");printf(" ***********************\n");getch();exit(0);}获取训练样本GetTrainingData() /*OK*/{ int m,i,j;int datr;if((fp=fopen("f:\\bp\\训练样本.txt","r"))==NULL) /*读取训练样本*/{printf("Cannot open file and strike any key exit!");getch();exit(1);}m=0;i=0;j=0;while(fscanf(fp,"%d",&datr)!=EOF){ j++;if(j<=(N*IN)) /*N为学习样本个数;IN为输入层神经元数目*/{if(i<IN){Study_Data[m].input[i]=datr;/*printf("\nthe Study_Datat[%d].input[%d]=%f\n",m,i,Study_Data[m].input[i]);getch();*/ /*use to check the loaded training datas*/}if(m==(N-1)&&i==(IN-1)){m=0;i=-1;}if(i==(IN-1)){m++;else if((N*IN)<J&&J<=(N*(IN+ON))){if(i<ON){Study_Data[m].teach[i]=datr;/*printf("\nThe Study_Data[%d].teach[%d]=%f",m,i,Study_Data[m].teach[i]);getch();*/ /*use to check the loaded training datas*/}if(m==(N-1)&&i==(ON-1))printf("\n");if(i==(ON-1)){m++;i=-1;}}i++;}fclose(fp);printf("\nThere are [%d] datats that have been loaded successfully!\n",j);/*show the data which has been loaded!*/printf("\nShow the data which has been loaded as follows:\n");for(m=0;m<N;M++){for(i=0;i<IN;I++){printf("\nStudy_Data[%d].input[%d]=%f",m,i,Study_Data[m].input[i]);}for(j=0;j<ON;J++){printf("\nStudy_Data[%d].teach[%d]=%f",m,j,Study_Data[m].teach[j]);}}printf("\n\nPress any key to start calculating...");getch();return 1;}/*///////////////////////////////////*//*初始化权、阈值子程序*//*///////////////////////////////////*/initial(){int i;int ii;int j;/*隐层权、阈值初始化*/for(i=0;i<HN;i++){for(j=1;j<IN;j++){W[i][j]=(double)((rand()/32767.0)*2-1); /*初始化输入层到隐层的权值, 随机模拟0 和1 -1 */ printf("w[%d][%d]=%f\n",i,j,W[i][j]);}}for(ii=0;ii<ON;II++){for(jj=0;jj<HN;JJ++){V[ii][jj]= (double)((rand()/32767.0)*2-1); /*初始化隐层到输出层的权值, 随机模拟0 和1 -1*/ printf("V[%d][%d]=%f\n",ii,jj,V[ii][jj]);}}for(k=0;k<HN;K++){YU_HN[k] = (double)((rand()/32767.0)*2-1); /*隐层阈值初始化,-0.01 ~ 0.01 之间*/printf("YU_HN[%d]=%f\n",k,YU_HN[k]);}for(kk=0;kk<ON;KK++){YU_ON[kk] = (double)((rand()/32767.0)*2-1); /*输出层阈值初始化,-0.01 ~ 0.01 之间*/}return 1;}/*子程序initial()结束*//*//////////////////////////////////////////*//*第m个学习样本输入子程序*//*/////////////////////////////////////////*/input_P(int m){ int i,j;for(i=0;i<IN;I++){P[i]=Study_Data[m].input[i];printf("P[%d]=%f\n",i,P[i]);}/*获得第m个样本的数据*/return 1;}/*子程序input_P(m)结束*//*/////////////////////////////////////////*//*第m个样本教师信号子程序*//*/////////////////////////////////////////*/input_T(int m)for(k=0;k<ON;k++)T[k]=Study_Data[m].teach[k];return 1;}/*子程序input_T(m)结束*/H_I_O(){double sigma;int i,j;for(j=0;j<HN;j++){sigma=0;for(i=0;i<IN;i++){sigma+=W[j][i]*P[i];/*求隐层内积*/}X[j]=sigma-YU_HN[i];/*求隐层净输入, 为什么减隐层的阀值*/ H[j]=1.0/(1.0+exp(-X[j]));/*求隐层输出siglon算法*/}return 1;}/*子程序H_I_O()结束*/O_I_O(){int k;int j;double sigma;for(k=0;k<ON;k++){sigma=0.0;for(j=0;j<HN;j++){sigma+=V[k][j]*H[k];}Y[k]=sigma-YU_ON[k];O[k]=1.0/(1.0+exp(-Y[k]));}return 1;}int Err_O_H(int m){int k;double abs_err[ON];double sqr_err=0;for (k=0;k<ON;k++){abs_err[k]=T[k]-O[k];sqr_err+=(abs_err[k])*(abs_err[k]);err_m[m]=sqr_err/2;}return 1;}double e_err[HN];int Err_H_I(){int j,k;double sigma;for(j=0;j<HN;j++){sigma=0.0;for(k=0;k<ON;k++){sigma+=d_err[k]*V[k][j];}e_err[j]=sigma*H[j]*(1-H[j]);}return 1;}saveWV(int m){int i;int ii;int j;int jj;for(i=0;i<HN;i++){for(j=0;j<IN;j++){Old_WV[m].old_W[i][j] = W[i][j];}}for(ii=0;ii<ON;ii++){for(jj=0;jj<HN;jj++){Old_WV[m].old_V[ii][jj] = V[ii][jj];}}return 1;}int Delta_O_H(int n) /*(int m,int n)*/ {int k,j;for (k=0;k<ON;k++){for (j=0;j<HN;j++){V[k][j]=V[k][j]+a*d_err[k]*H[j];}YU_ON[k]+=a*d_err[k];}}else if(n>1){for (k=0;k<ON;k++){for (j=0;j<HN;j++){V[k][j]=V[k][j]+a*d_err[k]*H[j]+alpha*(V[k][j]-Old_WV[(n-1)].old_V[k][j]);}YU_ON[k]+=a*d_err[k];}}return 1;}Delta_H_I(int n) /*(int m,int n)*/{ int i,j;if(n<=1) /*n<=1*/{for (j=0;j<HN;j++){for (i=0;i<IN;i++){W[j][i]=W[j][i]+b*e_err[j]*P[i];}YU_HN[j]+=b*e_err[j];}}else if(n>1){for(j=0;j<HN;j++){for(i=0;i<IN;i++){W[j][i]=W[j][i]+b*e_err[j]*P[i]+alpha*(W[j][i]-Old_WV[(n-1)].old_W[j][i]);}YU_HN[j]+=b*e_err[j];return 1;}double Err_Sum(){int m;double total_err=0;for(m=0;m<N;m++){total_err+=err_m[m];}return total_err;}void savequan(){ int i,j,k;int ii,jj,kk;if((fp=fopen("f:\\bp\\权值.txt","a"))==NULL) /*save the result at f:\hsz\bpc\*.txt*/{printf("Cannot open file strike any key exit!");getch();exit(1);}fprin tf(fp,"Save the result of “权值”(quanzhi) as follows:\n");for(i=0;i<HN;i++){for(j=0;j<IN;j++)fprintf(fp,"W[%d][%d]=%f\n",i,j,W[i][j]);}fprintf(fp,"\n");for(ii=0;ii<ON;ii++){for(jj=0;jj<HN;jj++)fprintf(fp,"V[%d][%d]=%f\n",ii,jj,V[ii][jj]);}fclose(fp);printf("\nThe result of “权值.txt”(quanzhi) has been saved successfully!\nPress any key to continue..."); getch();if((fp=fopen("f:\\bp\\阈值.txt","a"))==NULL) /*save the result at f:\hsz\bpc\*/{printf("Cannot open file strike any key exit!");getch();exit(1);}fprintf(fp,"Save the result of “输出层的阈值”(huozhi) as follows:\n");for(k=0;k<ON;K++)fprintf(fp,"\nSave the result of “隐层的阈值为”(huozhi) as follows:\n");for(kk=0;kk<HN;KK++)fprintf(fp,"YU_HN[%d]=%f\n",kk,YU_HN[kk]);fclose(fp);printf("\nThe result of “阈值.txt”(huozhi) has been saved successfully!\nPress any key to continue..."); getch ();}/**********************//**程序入口, 即主程序**//**********************/void main(){double Pre_error;double sum_err;int study;int flag;flag=30000;a=0.7;b=0.7;alpha=0.9;study=0;Pre_error=0.0001;/*实际值为Pre_error=0.0001;*/Start_Show(); /*调用函数, 显示开始界面*/GetTrainingData();initial ();do{int m;++study;for(m=0;m<N;m++){input_P(m);input_T(m);H_I_O();O_I_O();Err_O_H(m);Err_H_I();saveWV(m); /****************/Delta_O_H(m); /*(m,study)*/Delta_H_I(m); /*(m,study)*/}sum_err=Err_Sum();printf("sum_err=%f\n",sum_err);printf("Pre_error=%f\n\n",Pre_error);if(study>flag){printf("\n*******************************\n");printf("*****************************\n");getch();break;}} while (sum_err>Pre_error);printf("\n****************\n");printf("\nThe program have studyed for [%d] times!\n",study); printf("\n****************\n");savequan(); /*save the results, 保存计算权值*/End_Show();}==========================权值.txt{Save the result of “权值”(quanzhi) as follows:W[0][0]=0.350578W[0][1]=-1.008697W[0][2]=-0.962250W[1][0]=0.055661W[1][1]=-0.372367W[1][2]=-0.890795W[2][0]=0.129752W[2][1]=-0.332591W[2][2]=-0.521561V[0][0]=-2.932654V[0][1]=-3.720583V[0][2]=-2.648183V[1][0]=2.938970V[1][1]=1.633281V[1][2]=1.944077}阈值.txt{Save the result of “输出层的阈值”(huozhi) as follows:YU_ON[0]=-4.226843YU_ON[1]=1.501791Save the result of “隐层的阈值为”(huozhi) as follows:YU_HN[0]=-0.431459YU_HN[1]=0.452127YU_HN[2]=0.258449}==================================。
//BP神经网络算法,c语言版本//VS2010下,无语法错误,可直接运行//添加了简单注释//欢迎学习交流#include <yerNum>#include <yerNum>#include <yerNum>#include <yerNum>#define N_Out 2 //输出向量维数#define N_In 3 //输入向量维数#define N_Sample 6 //样本数量//BP人工神经网络typedef struct{int LayerNum; //中间层数量double v[N_In][50]; //中间层权矩阵i,中间层节点最大数量为50double w[50][N_Out]; //输出层权矩阵double StudyRate; //学习率double Accuracy; //精度控制参数int MaxLoop; //最大循环次数} BPNet;//Sigmoid函数double fnet(double net){return 1/(1+exp(-net));}//初始化int InitBpNet(BPNet *BP);//训练BP网络,样本为x,理想输出为yint TrainBpNet(BPNet *BP, double x[N_Sample][N_In], int y[N_Sample][N_Out]) ; //使用BP网络int UseBpNet(BPNet *BP);//主函数int main(){//训练样本double x[N_Sample][N_In] = {{0.8,0.5,0},{0.9,0.7,0.3},{1,0.8,0.5},{0,0.2,0.3},{0.2,0.1,1.3},{0.2,0.7,0.8}};//理想输出int y[N_Sample][N_Out] = {{0,1},{0,1},{0,1},{1,1},{1,0},{1,0}};BPNet BP;InitBpNet(&BP); //初始化BP网络结构TrainBpNet(&BP, x, y); //训练BP神经网络UseBpNet(&BP); //测试BP神经网络return 1;}//使用BP网络int UseBpNet(BPNet *BP){double Input[N_In];double Out1[50];double Out2[N_Out]; //Out1为中间层输出,Out2为输出层输出//持续执行,除非中断程序while (1){printf("请输入3个数:\n");int i, j;for (i = 0; i < N_In; i++)scanf_s("%f", &Input[i]);double Tmp;for (i = 0; i < (*BP).LayerNum; i++){Tmp = 0;for (j = 0; j < N_In; j++)Tmp += Input[j] * (*BP).v[j][i];Out1[i] = fnet(Tmp);}for (i = 0; i < N_Out; i++){Tmp = 0;for (j = 0; j < (*BP).LayerNum; j++)Tmp += Out1[j] * (*BP).w[j][i];Out2[i] = fnet(Tmp);}printf("结果:");for (i = 0; i < N_Out; i++)printf("%.3f ", Out2[i]);printf("\n");}return 1;}//训练BP网络,样本为x,理想输出为yint TrainBpNet(BPNet *BP, double x[N_Sample][N_In], int y[N_Sample][N_Out]) {double f = (*BP).Accuracy; //精度控制参数double a = (*BP).StudyRate; //学习率int LayerNum = (*BP).LayerNum; //中间层节点数double v[N_In][50], w[50][N_Out]; //权矩阵double ChgH[50], ChgO[N_Out]; //修改量矩阵double Out1[50], Out2[N_Out]; //中间层和输出层输出量int MaxLoop = (*BP).MaxLoop; //最大循环次数int i, j, k, n;double Tmp;for (i = 0; i < N_In; i++)// 复制结构体中的权矩阵for (j = 0; j < LayerNum; j++)v[i][j] = (*BP).v[i][j];for (i = 0; i < LayerNum; i++)for (j = 0; j < N_Out; j++)w[i][j] = (*BP).w[i][j];double e = f + 1;//对每个样本训练网络for (n = 0; e > f && n < MaxLoop; n++){e = 0;for (i= 0; i < N_Sample; i++){//计算中间层输出向量for (k= 0; k < LayerNum; k++){Tmp = 0;for (j = 0; j < N_In; j++)Tmp = Tmp + x[i][j] * v[j][k];Out1[k] = fnet(Tmp);}//计算输出层输出向量for (k = 0; k < N_Out; k++){Tmp = 0;for (j = 0; j < LayerNum; j++)Tmp = Tmp + Out1[j] * w[j][k];Out2[k] = fnet(Tmp);}//计算输出层的权修改量for (j = 0; j < N_Out; j++)ChgO[j] = Out2[j] * (1 - Out2[j]) * (y[i][j] - Out2[j]);//计算输出误差for (j = 0; j < N_Out ; j++)e = e + (y[i][j] - Out2[j]) * (y[i][j] - Out2[j]);//计算中间层权修改量for (j = 0; j < LayerNum; j++){Tmp = 0;for (k = 0; k < N_Out; k++)Tmp = Tmp + w[j][k] * ChgO[k];ChgH[j] = Tmp * Out1[j] * (1 - Out1[j]);}//修改输出层权矩阵for (j = 0; j < LayerNum; j++)for (k = 0; k < N_Out; k++)w[j][k] = w[j][k] + a * Out1[j] * ChgO[k];for (j = 0; j < N_In; j++)for (k = 0; k < LayerNum; k++)v[j][k] = v[j][k] + a * x[i][j] * ChgH[k];}if (n % 10 == 0)printf("误差: %f\n", e);}printf("总共循环次数:%d\n", n);printf("调整后的中间层权矩阵:\n");for (i = 0; i < N_In; i++){for (j = 0; j < LayerNum; j++)printf("%f ", v[i][j]);printf("\n");}printf("调整后的输出层权矩阵:\n");for (i = 0; i < LayerNum; i++) {for (j = 0; j < N_Out; j++)printf("%f ", w[i][j]);printf("\n");}//把结果复制回结构体for (i = 0; i < N_In; i++)for (j = 0; j < LayerNum; j++)(*BP).v[i][j] = v[i][j];for (i = 0; i < LayerNum; i++)for (j = 0; j < N_Out; j++)(*BP).w[i][j] = w[i][j];printf("BP网络训练结束!\n");return 1;}//初始化int InitBpNet(BPNet *BP){printf("请输入中间层节点数,最大数为100:\n");scanf_s("%d", &(*BP).LayerNum);printf("请输入学习率:\n");scanf_s("%lf", &(*BP).StudyRate); //(*BP).StudyRate为double型数据,所以必须是lf printf("请输入精度控制参数:\n");scanf_s("%lf", &(*BP).Accuracy);printf("请输入最大循环次数:\n");scanf_s("%d", &(*BP).MaxLoop);int i, j;srand((unsigned)time(NULL));for (i = 0; i < N_In; i++)for (j = 0; j < (*BP).LayerNum; j++)(*BP).v[i][j] = rand() / (double)(RAND_MAX);for (i = 0; i < (*BP).LayerNum; i++)for (j = 0; j < N_Out; j++)(*BP).w[i][j] = rand() / (double)(RAND_MAX);return 1;}。
神经网络BP算法(C程序)文件输入输出目录为:F:\BP\训练样本文件名:训练样本.txt值为:11-11-110101输出文件名为:阈值.txt权值.txt=========================#include"stdlib.h"#include"math.h"#include"conio.h"#include"stdio.h"#define N2/*/学习样本个数*/#define IN3/*/输入层神经元数目*/#define HN3/*/隐层神经元数目*/#define ON2/*/输出层神经元数目*/#define Z20/*旧权值保存,每次study的权值都保存下来*/ double P[IN];/*单个样本输入数据*/double T[ON];/*单个样本输出数据*/double W[HN][IN];/*/输入层至隐层权值*/double V[ON][HN];/*/隐层至输出层权值*/double X[HN];/*/隐层的输入*/double Y[ON];/*/输出层的输入*/double H[HN];/*/隐层的输出*/double O[ON];/*/输出层的输出*/double YU_HN[HN];/*/隐层的阈值*/double YU_ON[ON];/*/输出层的阈值*/double err m[N];/*/第m个样本的总误差*/double a;/*/输出层至隐层的学习效率*/double b;/*/隐层至输入层学习效率*/double alpha;/*/动量因子,改进型bp算法使用*/ double d err[ON];FILE*fp;/*定义一个放学习样本的结构*/struct{double input[IN];double teach[ON];}Study_Data[N];/*改进型bp算法用来保存每次计算的权值*/struct{double old_W[HN][IN];double old_V[ON][HN];显示开始界面int Start_Show(){clrscr();printf("\n***********************\n");printf("*Welcome to use*\n");printf("*this program of*\n");printf("*calculating the BP*\n");printf("*model!*\n");printf("*Happy every day!*\n");printf("***********************\n");printf("\n\n Before starting,please read the follows carefully:\n\n");printf(" 1.Please ensure the Path of the'训练样本.txt'(xunlianyangben.txt)is\n correct,like'F:\BP\训练样本.txt'!\n");printf(" 2.The calculating results will be saved in the Path of'F:\\BP\\'!\n");printf(" 3.The program will load10datas when running from'F:\\BP\\训练样本.txt'!\n");printf(" 4.The program of BP can study itself for no more than30000times.\n And surpassing the number,the program will be ended by itself in\n preventing running infinitely because of error!\n");printf("\n\n\n");printf("Now press any key to start...\n");getch();getch();clrscr();}显示结束界面int End_Show(){printf("\n\n---------------------------------------------------\n");printf("The program has reached the end successfully!\n\n Press any key to exit!\n\n");printf("\n***********************\n");printf("*This is the end*\n");printf("*of the program which*\n");printf("*can calculate the BP*\n");printf("*model!*\n");printf("***********************\n");printf("*Thanks for using!*\n");printf("*Happy every day!*\n");printf("***********************\n");getch();exit(0);}获取训练样本GetTrainingData()/*OK*/{int m,i,j;int datr;if((fp=fopen("f:\\bp\\训练样本.txt","r"))==NULL)/*读取训练样本*/{printf("Cannot open file and strike any key exit!");getch();exit(1);}m=0;i=0;j=0;while(fscanf(fp,"%d",&datr)!=EOF){j++;if(j<=(N*IN))/*N为学习样本个数;IN为输入层神经元数目*/{if(i<IN){Study_Data[m].input[i]=datr;/*printf("\nthe Study_Datat[%d].input[%d]=%f\n",m,i,Study_Data[m].input[i]);getch();*//*use to check the loaded training datas*/}if(m==(N-1)&&i==(IN-1)){m=0;i=-1;}if(i==(IN-1)){m++;i=-1;}}else if((N*IN)<J&&J<=(N*(IN+ON))){if(i<ON){Study_Data[m].teach[i]=datr;/*printf("\nThe Study_Data[%d].teach[%d]=%f",m,i,Study_Data[m].teach[i]);getch();*//*use to check the loaded training datas*/}if(m==(N-1)&&i==(ON-1))printf("\n");if(i==(ON-1)){m++;i=-1;}i++;}fclose(fp);printf("\nThere are[%d]datats that have been loaded successfully!\n",j);/*show the data which has been loaded!*/printf("\nShow the data which has been loaded as follows:\n");for(m=0;m<N;M++){for(i=0;i<IN;I++){printf("\nStudy_Data[%d].input[%d]=%f",m,i,Study_Data[m].input[i]);}for(j=0;j<ON;J++){printf("\nStudy_Data[%d].teach[%d]=%f",m,j,Study_Data[m].teach[j]);}}printf("\n\nPress any key to start calculating...");getch();return1;}/*///////////////////////////////////*//*初始化权、阈值子程序*//*///////////////////////////////////*/initial(){int i;int ii;int j;int jj;int k;int kk;/*隐层权、阈值初始化*/for(i=0;i<HN;i++){for(j=1;j<IN;j++){W[i][j]=(double)((rand()/32767.0)*2-1);/*初始化输入层到隐层的权值,随机模拟0和1-1*/ printf("w[%d][%d]=%f\n",i,j,W[i][j]);}}for(ii=0;ii<ON;II++){for(jj=0;jj<HN;JJ++)printf("V[%d][%d]=%f\n",ii,jj,V[ii][jj]);}}for(k=0;k<HN;K++){YU_HN[k]=(double)((rand()/32767.0)*2-1);/*隐层阈值初始化,-0.01~0.01之间*/ printf("YU_HN[%d]=%f\n",k,YU_HN[k]);}for(kk=0;kk<ON;KK++){YU_ON[kk]=(double)((rand()/32767.0)*2-1);/*输出层阈值初始化,-0.01~0.01之间*/ }return1;}/*子程序initial()结束*//*//////////////////////////////////////////*//*第m个学习样本输入子程序*//*/////////////////////////////////////////*/input_P(int m){int i,j;for(i=0;i<IN;I++){P[i]=Study_Data[m].input[i];printf("P[%d]=%f\n",i,P[i]);}/*获得第m个样本的数据*/return1;}/*子程序input_P(m)结束*//*/////////////////////////////////////////*//*第m个样本教师信号子程序*//*/////////////////////////////////////////*/input_T(int m){int k;for(k=0;k<ON;k++)T[k]=Study_Data[m].teach[k];return1;}/*子程序input_T(m)结束*/H_I_O(){double sigma;int i,j;for(j=0;j<HN;j++){sigma=0;for(i=0;i<IN;i++)}X[j]=sigma-YU_HN[i];/*求隐层净输入,为什么减隐层的阀值*/ H[j]=1.0/(1.0+exp(-X[j]));/*求隐层输出siglon算法*/}return1;}/*子程序H_I_O()结束*/O_I_O(){int k;int j;double sigma;for(k=0;k<ON;k++){sigma=0.0;for(j=0;j<HN;j++){sigma+=V[k][j]*H[k];}Y[k]=sigma-YU_ON[k];O[k]=1.0/(1.0+exp(-Y[k]));}return1;}int Err_O_H(int m){int k;double abs_err[ON];double sqr_err=0;for(k=0;k<ON;k++){abs_err[k]=T[k]-O[k];sqr_err+=(abs_err[k])*(abs_err[k]);d_err[k]=abs_err[k]*O[k]*(1.0-O[k]);err_m[m]=sqr_err/2;}return1;}double e_err[HN];int Err_H_I(){int j,k;double sigma;for(j=0;j<HN;j++){sigma=0.0;for(k=0;k<ON;k++)sigma+=d_err[k]*V[k][j];}e_err[j]=sigma*H[j]*(1-H[j]);}return1;}saveWV(int m){int i;int ii;int j;int jj;for(i=0;i<HN;i++){for(j=0;j<IN;j++){Old_WV[m].old_W[i][j]=W[i][j];}}for(ii=0;ii<ON;ii++){for(jj=0;jj<HN;jj++){Old_WV[m].old_V[ii][jj]=V[ii][jj];}}return1;}int Delta_O_H(int n)/*(int m,int n)*/ {int k,j;if(n<1)/*n<=1*/{for(k=0;k<ON;k++){for(j=0;j<HN;j++){V[k][j]=V[k][j]+a*d_err[k]*H[j];}YU_ON[k]+=a*d_err[k];}}else if(n>1){for(k=0;k<ON;k++)for(j=0;j<HN;j++){V[k][j]=V[k][j]+a*d_err[k]*H[j]+alpha*(V[k][j]-Old_WV[(n-1)].old_V[k][j]);}YU_ON[k]+=a*d_err[k];}}return1;}Delta_H_I(int n)/*(int m,int n)*/{int i,j;if(n<=1)/*n<=1*/{for(j=0;j<HN;j++){for(i=0;i<IN;i++){W[j][i]=W[j][i]+b*e_err[j]*P[i];}YU_HN[j]+=b*e_err[j];}}else if(n>1){for(j=0;j<HN;j++){for(i=0;i<IN;i++){W[j][i]=W[j][i]+b*e_err[j]*P[i]+alpha*(W[j][i]-Old_WV[(n-1)].old_W[j][i]);}YU_HN[j]+=b*e_err[j];}}return1;}double Err_Sum(){int m;double total_err=0;for(m=0;m<N;m++){total_err+=err_m[m];}return total_err;}void savequan()int ii,jj,kk;if((fp=fopen("f:\\bp\\权值.txt","a"))==NULL)/*save the result at f:\hsz\bpc\*.txt*/{printf("Cannot open file strike any key exit!");getch();exit(1);}fprintf(fp,"Save the result of“权值”(quanzhi)as follows:\n");for(i=0;i<HN;i++){for(j=0;j<IN;j++)fprintf(fp,"W[%d][%d]=%f\n",i,j,W[i][j]);}fprintf(fp,"\n");for(ii=0;ii<ON;ii++){for(jj=0;jj<HN;jj++)fprintf(fp,"V[%d][%d]=%f\n",ii,jj,V[ii][jj]);}fclose(fp);printf("\nThe result of“权值.txt”(quanzhi)has been saved successfully!\nPress any key to continue..."); getch();if((fp=fopen("f:\\bp\\阈值.txt","a"))==NULL)/*save the result at f:\hsz\bpc\*/{printf("Cannot open file strike any key exit!");getch();exit(1);}fprintf(fp,"Save the result of“输出层的阈值”(huozhi)as follows:\n");for(k=0;k<ON;K++)fprintf(fp,"YU_ON[%d]=%f\n",k,YU_ON[k]);fprintf(fp,"\nSave the result of“隐层的阈值为”(huozhi)as follows:\n");for(kk=0;kk<HN;KK++)fprintf(fp,"YU_HN[%d]=%f\n",kk,YU_HN[kk]);fclose(fp);printf("\nThe result of“阈值.txt”(huozhi)has been saved successfully!\nPress any key to continue..."); getch();}/**********************//**程序入口,即主程序**//**********************/void main(){double Pre_error;double sum_err;int flag;flag=30000;a=0.7;b=0.7;alpha=0.9;study=0;Pre_error=0.0001;/*实际值为Pre_error=0.0001;*/Start_Show();/*调用函数,显示开始界面*/GetTrainingData();initial();do{int m;++study;for(m=0;m<N;m++){input_P(m);input_T(m);H_I_O();O_I_O();Err_O_H(m);Err_H_I();saveWV(m);/****************/Delta_O_H(m);/*(m,study)*/Delta_H_I(m);/*(m,study)*/}sum_err=Err_Sum();printf("sum_err=%f\n",sum_err);printf("Pre_error=%f\n\n",Pre_error);if(study>flag){printf("\n*******************************\n");printf("The program is ended by itself because of error!\n The learning times is surpassed!\n"); printf("*****************************\n");getch();break;}}while(sum_err>Pre_error);printf("\n****************\n");printf("\nThe program have studyed for[%d]times!\n",study);printf("\n****************\n");savequan();/*save the results,保存计算权值*/End_Show();}==========================权值.txtW[0][0]=0.350578W[0][1]=-1.008697W[0][2]=-0.962250W[1][0]=0.055661W[1][1]=-0.372367W[1][2]=-0.890795W[2][0]=0.129752W[2][1]=-0.332591W[2][2]=-0.521561V[0][0]=-2.932654V[0][1]=-3.720583V[0][2]=-2.648183V[1][0]=2.938970V[1][1]=1.633281V[1][2]=1.944077}阈值.txt{Save the result of“输出层的阈值”(huozhi)as follows: YU_ON[0]=-4.226843YU_ON[1]=1.501791Save the result of“隐层的阈值为”(huozhi)as follows: YU_HN[0]=-0.431459YU_HN[1]=0.452127YU_HN[2]=0.258449}==================================。