粒子群算法(PSO)關於參數w的一些改進方法


(一)線性遞減
function [xm,fv] = PSO_lin(fitness,N,c1,c2,wmax,wmin,M,D)
format long;
% fitness學習函數
% c1學習因子1
% c2學習因子2
% wmax慣性權重最大值
% wmin慣性權重最值小
% M最大迭代次數
% D搜索空間維數
% N初始化群體個體數目
% xm目標函數取最小值時的自變量
% fv目標函數最小值
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%%%%%%初始化種群的個體%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
for i=1:N
    for j=1:D
        x(i,j)=randn;  
        v(i,j)=randn;  
    end
end
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%%%%%%先計算各個粒子的適應度,並初始化Pi和Pg%%%%%%%%%%%%
for i=1:N
    p(i)=fitness(x(i,:));
    y(i,:)=x(i,:);
end
pg = x(N,:);             %Pg為全局最優
for i=1:(N-1)
    if fitness(x(i,:))
        pg=x(i,:);
    end
end
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%%%%%%主循環,按照公式依次迭代%%%%%%%%%%%%%%%%%%%%%%%%%%
for t=1:M
    for i=1:N
        w = wmax - (t-1)*(wmax-wmin)/(M-1);
        v(i,:)=w*v(i,:)+c1*rand*(y(i,:)-x(i,:))+c2*rand*(pg-x(i,:));
        x(i,:)=x(i,:)+v(i,:);
        if fitness(x(i,:))
            p(i)=fitness(x(i,:));
            y(i,:)=x(i,:);
        end
        if p(i)
            pg=y(i,:);
        end
    end
    Pbest(t)=fitness(pg);
end
xm = pg';
fv = fitness(pg);
(二)自適應
function [xm,fv] = PSO_adaptation(fitness,N,c1,c2,wmax,wmin,M,D)
format long;
% fitness學習函數
% c1學習因子1
% c2學習因子2
% w慣性權重
% M最大迭代次數
% D搜索空間維數
% N初始化群體個體數目
% xm目標函數取最小值時的自變量
% fv目標函數最小值
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%%%%%%初始化種群的個體%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
for i=1:N
    for j=1:D
        x(i,j)=randn;  
        v(i,j)=randn; 
    end
end
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%%%%%%%%%先計算各個粒子的適應度%%%%%%%%%%%%%%%%%%%%%%%
for i=1:N
    p(i)=fitness(x(i,:));
    y(i,:)=x(i,:);
end
pg=x(N,:);             %Pg表示全局最優
for i=1:(N-1)
    if fitness(x(i,:))
        pg=x(i,:);
    end
end
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%%%%%%進入主要循環%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
for t=1:M
    for j=1:N
        fv(j) = fitness(x(j,:));
    end
    fvag = sum(fv)/N;
    fmin = min(fv);
    for i=1:N
        if fv(i) <= fvag
            w = wmin + (fv(i)-fmin)*(wmax-wmin)/(fvag-fmin);
        else
            w = wmax;
        end
        v(i,:)=w*v(i,:)+c1*rand*(y(i,:)-x(i,:))+c2*rand*(pg-x(i,:));
        x(i,:)=x(i,:)+v(i,:);
        if fitness(x(i,:))
            p(i)=fitness(x(i,:));
            y(i,:)=x(i,:);
        end
        if p(i)
            pg=y(i,:);
        end
    end
end
xm = pg';          %目標函數取最小值時的自變量
fv = fitness(pg);  %目標函數最小值
 
(三)增加學習因子
% D搜索空間維數
%%%%%%%%%%%%初始化種群的個體%%%%%%%%%%%%%%%%%%%%%%%%
for i=1:N
    for j=1:D
        x(i,j)=randn;  %初始化位置
        v(i,j)=randn;  %初始化速度
    end
end
%%%%%%%%%%先計算各個粒子的適應度,並初始化Pi和Pg%%%%%%%%%%
for i=1:N
    p(i)=fitness(x(i,:));
    y(i,:)=x(i,:);
end
pg = x(N,:);             %Pg為全局最優
for i=1:(N-1)
    if fitness(x(i,:))
        pg=x(i,:);
    end
end
%%%%%主循環,按照公式依次迭代%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
T = - fitness(pg)/log(0.2);
for t=1:M
    groupFit = fitness(pg);  
    for i=1:N      
        Tfit(i) = exp( - (p(i) - groupFit)/T);    
    end   
    SumTfit = sum(Tfit);    
    Tfit = Tfit/SumTfit;    
    pBet = rand();   
    for i=1:N      
        ComFit(i) = sum(Tfit(1:i));       
        if pBet <= ComFit(i)
            pg_plus = x(i,:);           
           break;           
        end        
    end    
    C = c1 + c2;    
    ksi = 2/abs( 2 - C - sqrt(C^2 - 4*C));    
    for i=1:N
        v(i,:)=ksi*(v(i,:)+c1*rand*(y(i,:)-x(i,:))+c2*rand*(pg_plus-x(i,:)));
        x(i,:)=x(i,:)+v(i,:);
        if fitness(x(i,:))
            p(i)=fitness(x(i,:));
            y(i,:)=x(i,:);
        end
        if p(i)
            pg=y(i,:);
        end
    end
    T = T * lamda; 
    Pbest(t)=fitness(pg);
end
xm = pg';
fv = fitness(pg);
 
(四)隨機權重
function [xm,fv] = PSO_rand(fitness,N,c1,c2,wmax,wmin,rande,M,D)
format long;
% fitness學習函數
% c1學習因子1
% c2學習因子2
% w慣性權重
% M最大迭代次數
% D搜索空間維數
% N初始化群體個體數目
% xm目標函數取最小值時的自變量
% fv目標函數最小值
% rande隨機權重方差
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%%%%%%初始化種群的個體%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
for i=1:N
    for j=1:D
        x(i,j)=randn;  
        v(i,j)=randn;  
    end
end
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%%%%%%先計算各個粒子的適應度,並初始化Pi和Pg%%%%%%%%%%%%
for i=1:N
    p(i)=fitness(x(i,:));
    y(i,:)=x(i,:);
end
pg = x(N,:);             %Pg為全局最優
for i=1:(N-1)
    if fitness(x(i,:))
        pg=x(i,:);
    end
end
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%%%%%%進入主要循環,按照公式依次迭代%%%%%%%%%%%%%%%%%%%%
for t=1:M
    for i=1:N
        miu = wmin + (wmax - wmin)*rand();
        w = miu + rande*randn();
        v(i,:)=w*v(i,:)+c1*rand*(y(i,:)-x(i,:))+c2*rand*(pg-x(i,:));
        x(i,:)=x(i,:)+v(i,:);
        if fitness(x(i,:))
            p(i)=fitness(x(i,:));
            y(i,:)=x(i,:);
        end
        if p(i)
            pg=y(i,:);
        end
    end
    Pbest(t)=fitness(pg);
end
xm = pg';
fv = fitness(pg);
 
以上四種w的改進方法各有千秋;請讀者以自身要求去選擇相應的方法,除此之外,還有很多其它對於w的改進。不過,現在比較主流的給c1,c2,w建立一個相應的關系,通過c1或者c2的值來控制w的變化。


免責聲明!

本站轉載的文章為個人學習借鑒使用,本站對版權不負任何法律責任。如果侵犯了您的隱私權益,請聯系本站郵箱yoyou2525@163.com刪除。



 
粵ICP備18138465號   © 2018-2025 CODEPRJ.COM