![知识共享许可协议](https://csdnimg.cn/release/phoenix/images/creativecommons/80x15.png)
实验目的
- 理解BP神经网络和离散Hopfield神经网络的结构和原理
- 掌握反向传播学习算法对神经元的训练过程,了解反向传播公式
- 通过构建BP网络和离散Hopfield网络模式识别实例,熟悉前馈网络和反馈网络的原理及结构
- 通过编写源代码理解基于神经网络的模式识别
BP神经网络代码
#include <iostream>
#include <vector>
#include <map>
#include <string>
#include <string.h>
#include <queue>
#include <math.h>
#include <time.h>
#include <stdlib.h>
#include <algorithm>
using namespace std;
#define train_cycle 1000 //训练周期为1000
#define train_step 0.3 //训练步长
#define train_num 10 //训练数字数量
#define table_len 9 //表格长
#define table_wid 7 //表格宽
#define max_lim 0.9 //输出最大临界值
#define min_lim 0.1 //输出最小临界值
#define input_num 63 //输入层神经元数
#define hidden_num 30 //隐层神经元数
#define output_num 10 //输出层神经元数
struct input_neurons {
// double inp;
double outp;
double w[hidden_num];
}input_n[input_num];
struct hidden_neurons {
double inp;
double outp;
double w[output_num];
}hidden_n[hidden_num];
struct output_neurons {
double inp;
double outp;
}output_n[output_num];
//训练数据
bool trainData[train_num][table_len][table_wid] = {
0,0,0,0,0,0,0, //真实数字0
0,1,1,1,1,1,0,
0,1,0,0,0,1,0,
0,1,0,0,0,1,0,
0,1,0,0,0,1,0,
0,1,0,0,0,1,0,
0,1,0,0,0,1,0,
0,1,1,1,1,1,0,
0,0,0,0,0,0,0, //0
0,0,0,0,0,0,0, //1
0,0,0,1,0,0,0,
0,0,1,1,0,0,0,
0,0,0,1,0,0,0,
0,0,0,1,0,0,0,
0,0,0,1,0,0,0,
0,0,0,1,0,0,0,
0,1,1,1,1,1,0,
0,0,0,0,0,0,0, //1
0,0,0,0,0,0,0, //2
0,1,1,1,1,1,0,
0,0,0,0,0,1,0,
0,0,0,0,0,1,0,
0,1,1,1,1,1,0,
0,1,0,0,0,0,0,
0,1,0,0,0,0,0,
0,1,1,1,1,1,0,
0,0,0,0,0,0,0, //2
0,0,0,0,0,0,0, //3
0,1,1,1,1,1,0,
0,0,0,0,0,1,0,
0,0,0,0,0,1,0,
0,0,0,1,1,1,0,
0,0,0,0,0,1,0,
0,0,0,0,0,1,0,
0,1,1,1,1,1,0,
0,0,0,0,0,0,0, //3
0,0,0,0,0,0,0, //4
0,1,0,0,0,0,0,
0,1,0,0,0,0,0,
0,1,0,0,1,0,0,
0,1,1,1,1,1,0,
0,0,0,0,1,0,0,
0,0,0,0,1,0,0,
0,0,0,0,1,0,0,
0,0,0,0,0,0,0, //4
0,0,0,0,0,0,0, //5
0,1,1,1,1,1,0,
0,1,0,0,0,0,0,
0,1,0,0,0,0,0,
0,1,1,1,1,1,0,
0,0,0,0,0,1,0,
0,0,0,0,0,1,0,
0,1,1,1,1,1,0,
0,0,0,0,0,0,0, //5
0,0,0,0,0,0,0, //6
0,1,1,1,1,1,0,
0,1,0,0,0,0,0,
0,1,0,0,0,0,0,
0,1,1,1,1,1,0,
0,1,0,0,0,1,0,
0,1,0,0,0,1,0,
0,1,1,1,1,1,0,
0,0,0,0,0,0,0, //6
0,0,0,0,0,0,0, //7
0,1,1,1,1,1,0,
0,0,0,0,0,1,0,
0,0,0,0,1,0,0,
0,0,0,1,0,0,0,
0,0,1,0,0,0,0,
0,1,0,0,0,0,0,
0,1,0,0,0,0,0,
0,0,0,0,0,0,0, //7
0,0,0,0,0,0,0, //8
0,1,1,1,1,1,0,
0,1,0,0,0,1,0,
0,1,0,0,0,1,0,
0,1,1,1,1,1,0,
0,1,0,0,0,1,0,
0,1,0,0,0,1,0,
0,1,1,1,1,1,0,
0,0,0,0,0,0,0, //8
0,0,0,0,0,0,0, //9
0,1,1,1,1,1,0,
0,1,0,0,0,1,0,
0,1,0,0,0,1,0,
0,1,1,1,1,1,0,
0,0,0,0,0,1,0,
0,0,0,0,0,1,0,
0,0,0,0,0,1,0,
0,0,0,0,0,0,0 //9
};
bool testData[train_num][table_len][table_wid] = {
0,0,0,0,0,0,0, //0
0,1,1,0,1,1,0,
0,1,0,0,0,0,0,
0,1,0,0,0,1,0,
0,1,0,0,0,1,0,
0,1,0,0,0,1,0,
0,1,0,0,0,1,0,
0,1,1,1,1,1,0,
0,0,0,0,0,0,0, //0
0,0,0,0,0,0,0, //1
0,0,0,1,0,0,0,
0,0,0,1,0,0,0,
0,0,0,0,0,0,0,
0,0,0,1,0,0,0,
0,0,0,1,0,0,0,
0,0,0,1,0,0,0,
0,1,1,1,1,1,0,
0,0,0,0,0,0,0, //1
0,0,0,0,0,0,0, //2
0,1,1,1,1,1,0,
0,0,0,0,0,0,0,
0,0,0,0,0,1,0,
0,1,0,1,1,1,0,
0,1,0,0,0,0,0,
0,1,0,0,0,0,0,
0,0,1,1,1,1,0,
0,0,0,0,0,0,0, //2
0,0,0,0,0,0,0, //3
0,1,1,0,1,1,0,
0,0,0,0,0,1,0,
0,0,0,0,0,1,0,
0,0,0,1,1,1,0,
0,0,0,0,0,1,0,
0,0,0,0,0,1,0,
0,1,1,1,1,1,0,
0,0,0,0,0,0,0, //3
0,0,0,0,0,0,0, //4
0,1,0,0,0,0,0,
0,1,0,0,0,0,0,
0,1,0,0,1,0,0,
0,0,1,1,1,1,0,
0,0,0,0,0,0,0,
0,0,0,0,1,0,0,
0,0,0,0,1,0,0,
0,0,0,0,0,0,0, //4
0,0,0,0,0,0,0, //5
0,1,1,1,1,0,0,
0,1,0,0,0,0,0,
0,1,0,0,0,0,0,
0,1,1,1,1,1,0,
0,0,0,0,0,1,0,
0,0,0,0,0,0,0,
0,1,1,1,1,1,0,
0,0,0,0,0,0,0, //5
0,0,0,0,0,0,0, //6
0,1,0,1,1,1,0,
0,1,0,0,0,0,0,
0,1,0,0,0,0,0,
0,1,1,1,1,0,0,
0,1,0,0,0,1,0,
0,1,0,0,0,1,0,
0,1,1,1,1,1,0,
0,0,0,0,0,0,0, //6
0,0,0,0,0,0,0, //7
0,1,0,1,1,1,0,
0,0,0,0,0,1,0,
0,0,0,0,1,0,0,
0,0,0,1,0,0,0,
0,0,1,0,0,0,0,
0,1,0,0,0,0,0,
0,1,0,0,0,0,0,
0,0,0,0,0,0,0, //7
0,0,0,0,0,0,0, //8
0,1,0,1,1,1,0,
0,1,0,0,0,1,0,
0,1,0,0,0,0,0,
0,1,1,1,1,1,0,
0,1,0,0,0,1,0,
0,1,0,0,0,1,0,
0,1,1,1,1,1,0,
0,0,0,0,0,0,0, //8
0,0,0,0,0,0,0, //9
0,1,1,1,0,1,0,
0,1,0,0,0,1,0,
0,0,0,0,0,1,0,
0,1,1,1,1,1,0,
0,0,0,0,0,1,0,
0,0,0,0,0,1,0,
0,0,0,0,0,1,0,
0,0,0,0,0,0,0 //9
};
//期望值
double y[train_num][output_num] = {
1,0,0,0,0,0,0,0,0,0, //0的期望值
0,1,0,0,0,0,0,0,0,0, //1的期望值
0,0,1,0,0,0,0,0,0,0, //2的期望值
0,0,0,1,0,0,0,0,0,0, //3的期望值
0,0,0,0,1,0,0,0,0,0, //4的期望值
0,0,0,0,0,1,0,0,0,0, //5的期望值
0,0,0,0,0,0,1,0,0,0, //6的期望值
0,0,0,0,0,0,0,1,0,0, //7的期望值
0,0,0,0,0,0,0,0,1,0, //8的期望值
0,0,0,0,0,0,0,0,0,1 //9的期望值
};
//输出存放数组
double ans[output_num];
//sigmoid函数
double sigmoid(double x) {
return 1 / (1 + exp(-x));
}
//sigmoid导函数
double sigmoid_der(double x) {
return sigmoid(x)*(1 - sigmoid(x));
}
//产生一个[-1,1]的随机数
double pro_decimal() {
double r = rand() % 200 - 100;
double c = r / 100;
return c;
}
//初始化神经网络各节点权值
void init_w() {
/* memset(input_ans, 0, sizeof(input_ans));
memset(hidden_ans, 0, sizeof(hidden_ans));*/
memset(ans, 0, sizeof(ans));
for (int i = 0; i < input_num; i++) {
for (int j = 0; j < hidden_num; j++) {
input_n[i].w[j] = pro_decimal();
}
}
for (int i = 0; i < hidden_num; i++) {
for (int j = 0; j < output_num; j++) {
hidden_n[i].w[j] = pro_decimal();
}
}
// cout << "test" << endl;
}
//神经网络运行
void NN_run(bool(*NN_input)[table_wid]) {
//初始化神经网络输入值
for (int i = 0; i < hidden_num; i++) {
hidden_n[i].inp = 0;
}
for (int i = 0; i < output_num; i++) {
output_n[i].inp = 0;
}
//输入数据
for (int i = 0; i < table_len; i++) {
for (int j = 0; j < table_wid; j++) {
input_n[i*table_wid + j].outp = sigmoid(NN_input[i][j]);
}
}
//数据传递
for (int i = 0; i < hidden_num; i++) {
for (int j = 0; j < input_num; j++) {
hidden_n[i].inp += input_n[j].outp*input_n[j].w[i];
}
}
for (int i = 0; i < hidden_num; i++) {
hidden_n[i].outp = sigmoid(hidden_n[i].inp);
}
for (int i = 0; i < output_num; i++) {
for (int j = 0; j < hidden_num; j++) {
output_n[i].inp += hidden_n[j].outp*hidden_n[j].w[i];
}
}
for (int i = 0; i < output_num; i++) {
output_n[i].outp = sigmoid(output_n[i].inp);
ans[i] = output_n[i].outp;
if (ans[i] >= max_lim) ans[i] = 1;
if (ans[i] <= min_lim) ans[i] = 0;
}
}
//返回是否训练好
bool if_accomplish() {
for (int num = 0; num < train_num; num++) {
NN_run(trainData[num]);
for (int i = 0; i < output_num; i++) {
double flag = 0;
if (i == num) flag = 1;
if (ans[i] != flag) {
return false;
}
}
}
return true;
}
void BP_study() {
double d_output[output_num];
double d_hidden[hidden_num];
for (int cycle = 0; cycle < train_cycle; cycle++) {
if (if_accomplish()) {
cout << "训练完成啦!!!" << endl;
cout << "输入层与隐层之间的权值" << endl;
for (int i = 0; i < input_num; i++) {
for (int j = 0; j < hidden_num; j++) {
printf("%.2f ", input_n[i].w[j]);
}
cout << endl;
}
cout << "隐层与输出层之间的权值" << endl;
for (int i = 0; i < hidden_num; i++) {
for (int j = 0; j < output_num; j++) {
printf("%.2f ", hidden_n[i].w[j]);
}
cout << endl;
}
return;
}
for (int train_no = 0; train_no < train_num; train_no++) {
NN_run(trainData[train_no]);
memset(d_output, 0, sizeof(d_output));
memset(d_hidden, 0, sizeof(d_hidden));
//计算输出层误差信号(书P215-8.17b)
for (int i = 0; i < output_num; i++) {
d_output[i] = sigmoid_der(ans[i])*(ans[i] - y[train_no][i]);
}
//计算隐层误差信号(书P215-8.18b)
for (int i = 0; i < hidden_num; i++) {
for (int j = 0; j < output_num; j++) {
d_hidden[i] += d_output[j] * hidden_n[i].w[j];
}
d_hidden[i] *= sigmoid_der(hidden_n[i].outp);
}
//更新隐层和输出层之间的权重(书P215-8.16b)
for (int i = 0; i < output_num; i++) {
for (int j = 0; j < hidden_num; j++) {
hidden_n[j].w[i] -= train_step * d_output[i] * hidden_n[j].outp;
}
}
//更新输入层和隐层之间的权重(书P215-8.16b)
for (int i = 0; i < hidden_num; i++) {
for (int j = 0; j < input_num; j++) {
input_n[j].w[i] -= train_step * d_hidden[i] * input_n[j].outp;
}
}
}
}
return;
}
//神经网络输出
void NN_print() {
int x;
cout << "请在0-9中选择一个要测试的数字输入,输入-1结束:" << endl;
while (cin >> x && x != -1) {
cout << "输入测试的数据图像为:" << endl;
for (int i = 0; i < table_len; i++) {
for (int j = 0; j < table_wid; j++) {
cout << testData[x][i][j] << " ";
}
cout << endl;
}
NN_run(testData[x]);
double max = 0, maxi;
for (int i = 0; i < output_num; i++) {
cout << "节点" << i << "输出概率为:" << ans[i] << endl;
if (ans[i] > max) {
max = ans[i];
maxi = i;
}
}
cout << "输入图像数字应该为" << maxi << endl << endl;
}
}
int main() {
srand((unsigned)time(NULL));
init_w();
BP_study();
NN_print();
}
hopfield神经网络代码
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <string>
#include <string.h>
#include <iostream>
#include <queue>
#include <algorithm>
using namespace std;
#define maxl 100
#define maxn 100
int n, l, w[maxl][maxl], a[maxn][maxl], multi_result[maxl];
int get_w(int x, int y) {
int w = 0;
for (int i = 1; i <= n; i++) {
int t1, t2;
t1 = a[i][x] == 0 ? -1 : a[i][x];
t2 = a[i][y] == 0 ? -1 : a[i][y];
w += t1 * t2;
}
return w;
}
void matrix_multi(int t[maxl]) {
memset(multi_result, 0, sizeof(multi_result));
for (int i = 1; i <= l; i++) {
for (int j = 1; j <= l; j++) {
multi_result[i] += t[j] * w[i][j];
}
multi_result[i] >= 0 ? multi_result[i] = 1 : multi_result[i] = 0;
}
for (int i = 1; i <= l; i++) {
cout << multi_result[i] << " ";
}
cout << endl;
}
int main() {
memset(a, 0, sizeof(a));
memset(w, 0, sizeof(w));
cout << "请输入要输入数据组数及每组数据的长度:" << endl;
cin >> n >> l;
for (int i = 1; i <= n; i++) {
for (int j = 1; j <= l; j++) {
cin >> a[i][j];
}
}
for (int i = 1; i < l; i++) {
for (int j = i + 1; j <= l; j++) {
w[i][j] = get_w(i, j);
}
}
for (int i = 2; i <= l; i++) {
for (int j = 1; j < i; j++) {
w[i][j] = w[j][i];
}
}
cout << "连接权值为:" << endl;
for (int i = 1; i <= l; i++) {
for (int j = 1; j <= l; j++) {
cout << w[i][j]<<" ";
}
cout << endl;
}
cout << "请输入测试数据:" << endl;
int flag = 1;
while (flag) {
int t[maxl];
memset(t, 0, sizeof(t));
for (int i = 1; i <= l; i++) {
cin >> t[i];
}
cout << "开始迭代:" << endl;
matrix_multi(t);
while (memcmp(t, multi_result, l + 1)) {
memcpy(t, multi_result, l * sizeof(int));
matrix_multi(multi_result);
}
cout << "达到稳定,稳定状态为:" << endl;
for (int i = 1; i <= l; i++) {
cout << multi_result[i] << " ";
}
cout << endl;
cout << "结束输入请输入0,继续输入则输入其他数字:";
cin >> flag;
}
}
学习心得
了解了BP神经网络和离散Hopfield神经网络的结构和原理,了解了反向传播学习算法对神经元的训练过程,了解了反向传播公式。通过构建了BP网络和离散Hopfield网络模式识别的实例,知道了前馈网络和反馈网络的原理及结构,知道了模式识别的原理,知道了识别过程的程序设计方法。