前言:最近接触到一些神经网络的东西,看到很多人使用PSO(粒子群优化算法)优化BP神经网络中的权值和偏置,经过一段时间的研究,写了一些代码,能够跑通,嫌弃速度慢的可以改一下训练次数或者适应度函数。
在我的理解里,PSO优化BP的初始权值w和偏置b,有点像数据迁徙,等于用粒子去尝试作为网络的参数,然后训练网络的阈值,所以总是会看到PSO优化了权值和阈值的说法,(一开始我是没有想通为什么能够优化阈值的),下面是我的代码实现过程,关于BP和PSO的原理就不一一赘述了,网上有很多大佬解释的很详细了……
首先是利用BP作为适应度函数
function [error] = BP_fit(gbest,input_num,hidden_num,output_num,net,inputn,outputn)
%BP_fit 此函数为PSO的适应度函数
% gbest:最优粒子
% input_num:输入节点数目;
% output_num:输出层节点数目;
% hidden_num:隐含层节点数目;
% net:网络;
% inputn:网络训练输入数据;
% outputn:网络训练输出数据;
% error : 网络输出误差,即PSO适应度函数值
w1 = gbest(1:input_num * hidden_num);
B1 = gbest(input_num * hidden_num + 1:input_num * hidden_num + hidden_num);
w2 = gbest(input_num * hidden_num + hidden_num + 1:input_num * hidden_num...
+ hidden_num + hidden_num * output_num);
B2 = gbest(input_num * hidden_num+ hidden_num + hidden_num * output_num + 1:...
input_num * hidden_num + hidden_num + hidden_num * output_num + output_num);
net.iw{1,1} = reshape(w1,hidden_num,input_num);
net.lw{2,1} = reshape(w2,output_num,hidden_num);
net.b{1} = reshape(B1,hidden_num,1);
net.b{2} = B2';
%建立BP网络
net.trainParam.epochs = 200;
net.trainParam.lr = 0.05;
net.trainParam.goal = 0.000001;
net.trainParam.show = 100;
net.trainParam.showWindow = 0;
net = train(net,inputn,outputn);
ty = sim(net,inputn);
error = sum(sum(abs((ty - outputn))));
end
然后是PSO部分:
%%基于多域PSO_RBF的6R机械臂逆运动学求解的研究
clear;
close;
clc;
%定义BP参数:
% input_num:输入层节点数;
% output_num:输出层节点数;
% hidden_num:隐含层节点数;
% inputn:网络输入;
% outputn:网络输出;
%定义PSO参数:
% max_iters:算法最大迭代次数
% w:粒子更新权值
% c1,c2:为粒子群更新学习率
% m:粒子长度,为BP中初始W、b的长度总和
% n:粒子群规模
% gbest:到达最优位置的粒子
format long
input_num = 3;
output_num = 3;
hidden_num = 25;
max_iters =10;
m = 500; %种群规模
n = input_num * hidden_num + hidden_num + hidden_num * output_num + output_num; %个体长度
w = 0.1;
c1 = 2;
c2 = 2;
%加载网络输入(空间任意点)和输出(对应关节角的值)
load('pfile_i2.mat')
load('pfile_o2.mat')
% inputs_1 = angle_2';
inputs_1 = inputs_2';
outputs_1 = outputs_2';
train_x = inputs_1(:,1:490);
% train_y = outputs_1(4:5,1:490);
train_y = outputs_1(1:3,1:490);
test_x = inputs_1(:,491:500);
test_y = outputs_1(1:3,491:500);
% test_y = outputs_1(4:5,491:500);
[inputn,inputps] = mapminmax(train_x);
[outputn,outputps] = mapminmax(train_y);
net = newff(inputn,outputn,25);
%设置粒子的最小位置与最大位置
% w1阈值设定
for i = 1:input_num * hidden_num
MinX(i) = -0.01*ones(1);
MaxX(i) = 3.8*ones(1);
end
% B1阈值设定
for i = input_num * hidden_num + 1:input_num * hidden_num + hidden_num
MinX(i) = 1*ones(1);
MaxX(i) = 8*ones(1);
end
% w2阈值设定
for i = input_num * hidden_num + hidden_num + 1:input_num * hidden_num + hidden_num + hidden_num * output_num
MinX(i) = -0.01*ones(1);
MaxX(i) = 3.8*ones(1);
end
% B2阈值设定
for i = input_num * hidden_num+ hidden_num + hidden_num * output_num + 1:input_num * hidden_num + hidden_num + hidden_num * output_num + output_num
MinX(i) = 1*ones(1);
MaxX(i) = 8*ones(1);
end
%%初始化位置参数
%产生初始粒子位置
pop = rands(m,n);
%初始化速度和适应度函数值
V = 0.15 * rands(m,n);
BsJ = 0;
%对初始粒子进行限制处理,将粒子筛选到自定义范围内
for i = 1:m
for j = 1:input_num * hidden_num
if pop(i,j) < MinX(j)
pop(i,j) = MinX(j);
end
if pop(i,j) > MaxX(j)
pop(i,j) = MaxX(j);
end
end
for j = input_num * hidden_num + 1:input_num * hidden_num + hidden_num
if pop(i,j) < MinX(j)
pop(i,j) = MinX(j);
end
if pop(i,j) > MaxX(j)
pop(i,j) = MaxX(j);
end
end
for j = input_num * hidden_num + hidden_num + 1:input_num * hidden_num + hidden_num + hidden_num * output_num
if pop(i,j) < MinX(j)
pop(i,j) = MinX(j);
end
if pop(i,j) > MaxX(j)
pop(i,j) = MaxX(j);
end
end
for j = input_num * hidden_num+ hidden_num + hidden_num * output_num + 1:input_num * hidden_num + hidden_num + hidden_num * output_num + output_num
if pop(i,j) < MinX(j)
pop(i,j) = MinX(j);
end
if pop(i,j) > MaxX(j)
pop(i,j) = MaxX(j);
end
end
end
%评估初始粒子
for s = 1:m
indivi = pop(s,:);
fitness = BP_fit(indivi,input_num,hidden_num,output_num,net,inputn,outputn);
BsJ = fitness; %调用适应度函数,更新每个粒子当前位置
Error(s,:) = BsJ; %储存每个粒子的位置,即BP的最终误差
end
[OderEr,IndexEr] = sort(Error);%将Error数组按升序排列
Errorleast = OderEr(1); %记录全局最小值
for i = 1:m %记录到达当前全局最优位置的粒子
if Error(i) == Errorleast
gbest = pop(i,:);
break;
end
end
ibest = pop; %当前粒子群中最优的个体,因为是初始粒子,所以最优个体还是个体本身
for kg = 1:max_iters %迭代次数
for s = 1:m
%个体有52%的可能性变异
for j = 1:n %粒子长度
for i = 1:m %种群规模,变异是针对某个粒子的某一个值的变异
if rand(1)<0.04
pop(i,j) = rands(1);
end
end
end
%r1,r2为粒子群算法参数
r1 = rand(1);
r2 = rand(1);
%个体位置和速度更新
V(s,:) = w * V(s,:) + c1 * r1 * (ibest(s,:)-pop(s,:)) + c2 * r2 * (gbest(1,:)-pop(s,:));
pop(s,:) = pop(s,:) + 0.3 * V(s,:);
%对更新的位置进行判断,超过设定的范围就处理下。粒子中不同的值对应不同的范围
for j = 1:input_num * hidden_num
if pop(s,j) < MinX(j)
pop(s,j) = MinX(j);
end
if pop(s,j) > MaxX(j)
pop(s,j) = MaxX(j);
end
end
for j = input_num * hidden_num + 1:input_num * hidden_num + hidden_num
if pop(s,j) < MinX(j)
pop(s,j) = MinX(j);
end
if pop(s,j) > MaxX(j)
pop(s,j) = MaxX(j);
end
end
for j = input_num * hidden_num + hidden_num + 1:input_num * hidden_num + hidden_num + hidden_num * output_num
if pop(s,j) < MinX(j)
pop(s,j) = MinX(j);
end
if pop(s,j) > MaxX(j)
pop(s,j) = MaxX(j);
end
end
for j = input_num * hidden_num+ hidden_num + hidden_num * output_num + 1:input_num * hidden_num + hidden_num + hidden_num * output_num + output_num
if pop(i,j) < MinX(j)
pop(i,j) = MinX(j);
end
if pop(i,j) > MaxX(j)
pop(i,j) = MaxX(j);
end
end
%更新后的每个个体适应度值
BsJ = BP_fit(indivi,input_num,hidden_num,output_num,net,inputn,outputn);
error(s,:) = BsJ;
%根据适应度值对个体最优和群体最优进行更新
if error(s)<Error(s)
ibest(s,:) = pop(s,:);
Error(s,:) = error(s);
end
%更新全局最优粒子以及最小误差
if error(s)<Errorleast
gbest(s,:) = pop(s,:);
Errorleast = error(s);
end
end
Best(kg,:) = Errorleast;
end
%plot(Best);
save pfile_gbest gbest;
最后是利用训练好的最优粒子去训练网络:
clear
clc
close;
load pfile_gbest;
input_num = 3;
output_num = 3;
hidden_num = 25;
w1 = gbest(1:input_num * hidden_num);
B1 = gbest(input_num * hidden_num + 1:input_num * hidden_num + hidden_num);
w2 = gbest(input_num * hidden_num + hidden_num + 1:input_num * hidden_num...
+ hidden_num + hidden_num * output_num);
B2 = gbest(input_num * hidden_num+ hidden_num + hidden_num * output_num + 1:...
input_num * hidden_num + hidden_num + hidden_num * output_num + output_num);
net.iw{1,1} = reshape(w1,hidden_num,input_num);
net.lw{2,1} = reshape(w2,output_num,hidden_num);
net.b{1} = reshape(B1,hidden_num,1);
net.b{2} = B2';
load('pfile_i2.mat')
% load('pfile_a2.mat')
load('pfile_o2.mat')
% inputs_1 = angle_2';
inputs_1 = inputs_2';
outputs_1 = outputs_2';
train_x = inputs_1(:,1:490);
% train_y = outputs_1(4:5,1:490);
train_y = outputs_1(1:3,1:490);
test_x = inputs_1(:,491:500);
test_y = outputs_1(1:3,491:500);
% test_y = outputs_1(4:5,491:500);
[inputn,inputps] = mapminmax(train_x);
[outputn,outputps] = mapminmax(train_y);
%建立BP网络
net.trainParam.epochs = 200;
net.trainParam.lr = 0.05;
net.trainParam.goal = 0.000001;
net = newff(inputn,outputn,25);
[net,per2] = train(net,inputn,outputn);
inputn_test = mapminmax('apply',test_x,inputps);
ty = sim(net,inputn_test);
net_J = mapminmax('reverse',ty,outputps);
error = abs(test_y - net_J);
水平有限,希望能给大家参考一下..........
