简单的遗传算法示例1

来源:百度文库 编辑:神马文学网 时间:2024/04/27 07:13:48
下面是关于简单遗传算法和简单bp网络的程序给大家看一下。 看你有用否??
y=1/x:
%N=input('设定隐含层神经元个数:\n');
%maxecho=input('最大训练回合数');
%网络初始化
N=3;%可以自己设定
maxecho=1000;
h=0.4;%学习速率
p=1:1:100;
t=1./p;
inw=randn(N,2);%输入-隐含权值矩阵,单输入并加上了偏置一列
ins=zeros(N,1);%隐含得到的诱导矩阵
outg=zeros(N,1);%反向传到回至输入的delta值
outs=zeros(N,1);%隐含输出的矩阵
outw=randn(1,N+1);%隐含输出权值矩阵(加上一个偏置)
outv=zeros(1,1);%输出得到的诱导矩阵
outy=zeros(1,1);
backg=zeros(1,1);%反向传至隐含层的delta值
m=1;
n=0;
%开始样本训练
%for x=1:maxecho
while(n<=maxecho)%|m>0.2)
m=0;
for i=1:length(p)
ins=inw*[p(i);1];
outs=1./(1+exp(-ins));
outv=outw*[outs;1];
outy=1./(1+exp(-outv));
backg=(t(i)-outy)*(outy.*(1-outy));
outg=(outs.*(1-outs)).*(outw(:,1:N)'*backg);
outw=outw+h*backg*[outs;1]';
inw=inw+h*outg*[p(i),1];
m=m+(t(i)-outy)^2;
end
n=n+1;
end
%开始样本测试
textp=1:0.1:100;
textt=1./textp;
textout=zeros(1,length(textp));
error=zeros(1,length(textp));
for i=1:length(textp)
ins=inw*[textp(i);1];
outs=1./(1+exp(-ins));
outv=outw*[outs;1];
outy=1./(1+exp(-outv));
textout(i)=outy;
error(i)=abs(textt(i)-textout(i));
end
%测试完成后,观察输出,以及误差的变化
k=1:0.1:100;
plot(k,textt,'r',k,textout,'b',k,error,'g');
xlabel(n);
可以比较清楚的看到一个基本bp算法的过程,当时自己比较得意的就是其中的矩阵相乘。速度很快,而且比较容易扩展,即任意多的输入,任意多的隐含层。
到后来,我的人工智能老师(海龟,他的老师的老师就是乍得)要求比较综合的算法,然后半个月里没日没夜的作小的project,小小的一门人工智能课占据了几乎所有的课外时间,弄得随机过程差点没混过去。还记得圣诞节快乐的一天,那时候在老师家里聚会(project完成后),半夜里就下雪了。杭州很少下雪...^_^,美妙的回忆。在做小程序期间认识了高手冰彬鱼六代(南航毕业,很强的人,自编了一个sga工具箱,现在法国留学),还有动力论坛的海阔天空,当时真是景仰万分,他们所作的无人能够代替。在此呢,略微效仿一下啦,虽然不算什么,但我是新手啊。
ga优化神经网权值&阈值程序。
优化的基本原理和过程很多论文可以查到,在此不必赘述我就把用gaot5的小程序贴在下面吧,也是y=1/x(为看的方便,比较繁杂的也有)
file:gabp.m
%**************************************
clear all
% 用GA训练BP网络的权值、阈值
% 开始计时
tic,
% BP网络初始化
[P,T,R,S1,S2,S]=bpinit;
bounds=ones(S,1)*[-10 10];
% 初始种群个数
num=60;
pop=initializega(num,bounds,'fitness');
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% clear all
% % 用GA训练BP网络的权值、阈值
% % 开始计时
% tic,
% % BP网络初始化
% [P,T,R,S1,S2,S]=bpinit;
% bounds=ones(S,1)*[0.1 0.9];
% % 初始种群个数
% num=60;
% pop=initializega(num,bounds,'fitness');
% % 遗传代数
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% 遗传代数
gen=500;
[x endPop bPop trace]=ga(bounds,'fitness',[],pop,[1e-6 1 1],'maxGenTerm',gen,...
'normGeomSelect',[0.09],['arithXover'],[2],'nonUnifMutation',[2 gen 3]);
for i=1:S
x(i)=endPop(1,i);
end;
sum=0;
% 前R*S1个编码为W1
for i=1:S1,
for k=1:R,
W1(i,k)=x(R*(i-1)+k);
end
end
% 接着的S1*S2个编码(即第R*S1个后的编码)为W2
for i=1:S2,
for k=1:S1,
W2(i,k)=x(S1*(i-1)+k+R*S1);
end
end
% 接着的S1个编码(即第R*S1+S1*S2个后的编码)为B1
for i=1:S1,
B1(i,1)=x((R*S1+S1*S2)+i);
end
% 接着的S2个编码(即第R*S1+S1*S2+S1个后的编码)为B2
for i=1:S2,
B2(i,1)=x((R*S1+S1*S2+S1)+i);
end
% 计算S1与S2层的输出
for i=1:100
x1=W1*P(i)+B1;
A1=1./(1+exp(-x1));
x2=W2*A1+B2;
A2=1./(1+exp(-x2));
% A1=logsig(W1*P(1:17,i),B1);
% A2=logsig(W2*A1,B2);
YY(i)=A2;
% 计算误差平方和
% SE=;
%sum=sum+sumsqr(T(i)-A2);
end
i=1:1:100
plot(i,YY(i),'r+',i,T(i),'b-');
% [W1 B1 W2 B2]=gadecod(x);
% 仿真结果
% TT=simuff(P,W1,B1,'logsig',W2,B2,'logsig')
toc % 结束计时
>>file:fitness.m
function[sol,eval]=fitness(sol,options)
[P,T,R,S1,S2,S]=bpinit;
numv=size(sol,2)-1;
x=sol(1:numv);
eval=f(x);
>>file:f.m
function [eval]=f(sol)
numv=size(sol,2);
x=sol(1:numv);
[P,T,R,S1,S2,S]=bpinit;
%get the value of fitness
% 前R*S1个编码为W1
for i=1:S1
for k=1:R,
W1(i,k)=x(R*(i-1)+k);
end
end
% 接着的S1*S2个编码(即第R*S1个后的编码)为W2
for i=1:S2
for k=1:S1,
W2(i,k)=x(S1*(i-1)+k+R*S1);
end
end
% 接着的S1个编码(即第R*S1+S1*S2个后的编码)为B1
for i=1:S1
B1(i,1)=x((R*S1+S1*S2)+i);
end
% 接着的S2个编码(即第R*S1+S1*S2+S1个后的编码)为B2
for i=1:S2
B2(i,1)=x((R*S1+S1*S2+S1)+i);
end
sum=0;
SE=0;
for i=1:100
x1=W1*P(i)+B1;
A1=1./(1+exp(-x1));
x2=W2*A1+B2;
A2=1./(1+exp(-x2));
% A1=logsig(W1*P(1:17,i),B1);
% A2=logsig(W2*A1,B2);
% 计算误差平方和
SE=sumsqr(T(i)-A2);
sum=sum+SE;
end
eval=10/sum; % 遗传算法的适应值
>>file:bpinit.m
% BP网络初始化:给出网络的训练样本P、T,
% 输入、输出数及隐含神经元数R,S2,S1
function [P,T,R,S1,S2,S]=bpinit;
for i=1:100
P(i)=i;
T(i)=1/P(i);
end
[R,Q]=size(P);%R=1
[S2,Q]=size(T);%S2=1
S1=3;%3 nu
S=R*S1+S1*S2+S1+S2; % 遗传算法编码长度。
这个是copy的。
这里做了一个所谓的军费预测(没题目找啊)
>>file:xw_ga_ann_19%431网络,总共19个权值阈值
clear all;
clc;
format long;
Population_Size=200;
String_Length=190;
chromosome=round(rand(Population_Size,String_Length));
chromosome_change=zeros(Population_Size,String_Length);
flag=1;
fitness_function=zeros(1,Population_Size);
selected=zeros(1,Population_Size);
generation=1;
maxsat=0;
a1=zeros(3,5,10);
a2=zeros(1,4,10);
param1=zeros(3,5);
param2=zeros(1,4);
x_param1=zeros(3,5);
y_param2=zeros(1,4);
while(flag>0)&(generation<1000)
sum_fit=0;
for i=1:Population_Size
param1=zeros(3,5);
param2=zeros(1,4);
x_param1=zeros(3,5);
y_param2=zeros(1,4);
a1(1,1,1:10)=chromosome(i,1:10);a1(1,2,1:10)=chromosome(i,11:20);a1(1,3,1:10)=chromosome(i,21:30);
a1(1,4,1:10)=chromosome(i,31:40);a1(1,5,1:10)=chromosome(i,41:50);a1(2,1,1:10)=chromosome(i,51:60);
a1(2,2,1:10)=chromosome(i,61:70);a1(2,3,1:10)=chromosome(i,71:80);a1(2,4,1:10)=chromosome(i,81:90);
a1(2,5,1:10)=chromosome(i,91:100); a1(3,1,1:10)=chromosome(i,101:110); a1(3,2,1:10)=chromosome(i,111:120);
a1(3,3,1:10)=chromosome(i,121:130); a1(3,4,1:10)=chromosome(i,131:140);a1(3,5,1:10)=chromosome(i,141:150);
a2(1,1,1:10)=chromosome(i,151:160); a2(1,2,1:10)=chromosome(i,161:170); a2(1,3,1:10)=chromosome(i,171:180);
a2(1,4,1:10)=chromosome(i,181:190);
for k=1:3
for r=1:5
for j=1:10
param1(k,r)=param1(k,r)+a1(k,r,j)*pow2(j-1);
end
end
end
for k=1:4
for j=1:10
param2(1,k)=param2(1,k)+a2(1,k,j)*pow2(j-1);
end
end
m=pow2(10)-1;
for k=1:3
for r=1:5
x_param1(k,r)=-15+param1(k,r)/m*32;
end
end
for k=1:4
y_param2(1,k)=-15+param2(1,k)/m*32;
end
fitness_function(1,i)=xw_ga_ann19_fitness( x_param1,y_param2);
%fitness_function(1,i)=x1^2+x2^2+x3^2+x4^2+x5^2+x6^2+x7^2+x8^2+x9^2+x10^2;
if(fitness_function(1,i)>maxsat)
maxsat=fitness_function(1,i);
optimal_1=x_param1;
optimal_2=y_param2;
optimal_3=fitness_function(1,i);
end
if(fitness_function(1,i)<=0.001)
flag=-1;
%optimal
%generation
break;
else sum_fit=sum_fit+fitness_function(1,i);
end
if(flag<0)
break;
end
end
if(flag>0)
%the first select
sum_fit=sum_fit-fitness_function(1,Population_Size);
for i=1:Population_Size-1
x=round(randn(1)*1023);
sum=round(sum_fit);
rr=rem(x,sum);
n=1;ba=1;
partsum=0;
while((partsumparsum=partsum+fitness_function(1,n);
ba=n;
n=n+1;
end
selected(1,i)=ba;
end
%reproduce
for i=1:Population_Size-1
for j=1:String_Length
chromosome_change(i,j)=chromosome(selected(1,i),j);
end
fitness_function(1,i)=fitness_function(1,selected(1,i));
end
%select before crossover
for i=1:Population_Size-1
x=round(rand(1)*32367);
sum=round(sum_fit);
rr=rem(x,sum)+1;
n=1;
partsum=0;
while((partsumpartsum=partsum+fitness_function(1,n);
bba=n;
n=n+1;
end
selected(1,i)=bba;
end
%crossover
maxsw=max(fitness_function);
for i=1:Population_Size/2-1
parent1=selected(1,i);
parent2=selected(1,Population_Size-1-i);
child1=i;
child2=Population_Size-1-i;
pc=0.8;
randnum=rand(1);
if randnumsite1=round(rand(1)*String_Length);
for j=1:String_Length
if(jchromosome(child1,j)=chromosome_change(parent1,j);
chromosome(child2,j)=chromosome_change(parent2,j);
else
chromosome(child1,j)=chromosome_change(parent2,j);
chromosome(child2,j)=chromosome_change(parent1,j);
end
end
end
end
%mutation
pm=0.1;
for i=1:Population_Size-1
for j=1:String_Length
randnum=rand(1);
if(randnumchromosome(i,j)=~chromosome(i,j);%变异取反
%else
% chromosome(i,j)=~chromosome(i,j);%变异取反
end
end
end
end
generation=generation+1;
end
%**************************************************
%ANN start
sensorN=3;%神经元数
stopR=50000;%训练停止回合数
h=1;%学习率
%------------------------------%
%trainS=1:1:100;
%trainD=1./trainS;%训练样本
trainS=[1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 ];
maxp=max(trainS);
minp=min(trainS);
for i=1:16
trainS(i)=(trainS(i)-minp)/(maxp-minp)*0.9+0.05;
end
trainD=[293093 304085 306170 280292 305141 297637 288059 278856 271417 276324 274278 280969 301697 312743 356720 446320];
maxt=max(trainD);
mint=min(trainD);
for i=1:16
trainD(i)=(trainD(i)-mint)/(maxt-mint)*0.9+0.05;
end
%trainS=1:1:100;
%trainD=1./trainS;%训练样本
HLW=randn(sensorN,5);%隐含层权值
%HLW=[x1,x2; x3,x4; x5,x6];
HLW=optimal_1;
HLV=zeros(sensorN,1);%隐含层诱导局部域
HLOut=zeros(sensorN,1);%隐含层输出
HLG=zeros(sensorN,1);%隐含层G
OLW=randn(1,sensorN+1);%输出层权值
%OLW=[x7 x8 x9 x10];
OLW=optimal_2;
OLV=zeros(1,1);%输出层诱导局部域
OLY=zeros(1,1);%输出层输出值
OLG=zeros(1,1);%输出层G
%经验初值 3个隐含神经元
for j=1:stopR %训练回合 停止条件为固定的训练回合 有待改进为其他的停止准则
for i=4:(length(trainD)-1)
HLV=HLW*[trainD(i);trainD(i-1);trainD(i-2);trainD(i-3);1];
HLOut=1./(1+exp(-HLV));
OLV=OLW*[HLOut;1];
OLY=1./(1+exp(-OLV));
OLG=(trainD(i+1)-OLY)*(OLY.*(1-OLY));
HLG=(HLOut.*(1-HLOut)).*(OLW(:,1:sensorN)'*OLG);
OLW=OLW+h*OLG*[HLOut;1]';
HLW=HLW+h*HLG*[trainD(i),trainD(i-1),trainD(i-2),trainD(i-3),1];
end
end
%train=[1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 ];
%maxp=2003;
%minp=min(trainS);
%for i=1:17
%   trainS(i)=(trainS(i)-minp)/(maxp-minp)*0.9+0.05;
%end
trainD=[293093 304085 306170 280292 305141 297637 288059 278856 271417 276324 274278 280969 301697 312743 356720 446320 ];
maxt=446320;
mint=min(trainD);
for i=1:16
trainD(i)=(trainD(i)-mint)/(maxt-mint)*0.9+0.05;
end
trainanswer=zeros(1,length(trainD)-3);
for i=4:(length(trainS))
HLV=HLW*[trainD(i);trainD(i-1);trainD(i-2);trainD(i-3);1];
HLOut=1./(1+exp(-HLV));
OLV=OLW*[HLOut;1];
OLY=1./(1+exp(-OLV));
trainanswer(i-3)=OLY;
end
trainD=[293093 304085 306170 280292 305141 297637 288059 278856 271417 276324 274278 280969 301697 312743 356720 446320 460000 ];
maxt=446320;
mint=min(trainD);
for i=1:17
trainD(i)=(trainD(i)-mint)/(maxt-mint)*0.9+0.05;
end
trainP=zeros(1,length(trainanswer));
trainP=trainD(1,5:17);
k=1992:1:2004;
plot(k,trainP,'r',k,trainanswer,'b+');
xlabel('年份');
ylabel('归一化军费额度(+表示)');
>>file:xw_ga_ann_19_fitness
function m=xw_ga_ann19_fitness( x_param1,y_param2)
%---可以修改的参数-------------%
sensorN=3;%神经元数
stopR=100;%训练停止回合数
h=1;%学习率
%------------------------------%
%trainS=1:1:100;
%trainD=1./trainS;%训练样本
trainS=[1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 ];
maxp=max(trainS);
minp=min(trainS);
for i=1:16
trainS(i)=(trainS(i)-minp)/(maxp-minp)*0.9+0.05;
end
trainD=[293093 304085 306170 280292 305141 297637 288059 278856 271417 276324 274278 280969 301697 312743 356720 446320];
maxt=max(trainD);
mint=min(trainD);
for i=1:16
trainD(i)=(trainD(i)-mint)/(maxt-mint)*0.9+0.05;
end
%trainS=1:1:100;
%trainD=1./trainS;%训练样本
HLW=randn(sensorN,5);%隐含层权值
HLW=x_param1;
HLV=zeros(sensorN,1);%隐含层诱导局部域
HLOut=zeros(sensorN,1);%隐含层输出
HLG=zeros(sensorN,1);%隐含层G
OLW=randn(1,sensorN+1);%输出层权值
OLW=y_param2;
OLV=zeros(1,1);%输出层诱导局部域
OLY=zeros(1,1);%输出层输出值
OLG=zeros(1,1);%输出层G
%经验初值 3个隐含神经元
% HLW=[0.1456,-0.11;-0.1357,-0.21;0.3422,-0.13];
% OLW=[-1.8141    2.6347   -1.2922];
m=0;
%for j=1:stopR %训练回合 停止条件为固定的训练回合 有待改进为其他的停止准则
for i=4:(length(trainD)-1)
HLV=HLW*[trainD(i);trainD(i-1);trainD(i-2);trainD(i-3);1];
HLOut=1./(1+exp(-HLV));
OLV=OLW*[HLOut;1];
OLY=1./(1+exp(-OLV));
%OLG=(trainD(i+1)-OLY)*(OLY.*(1-OLY));
m=m+(trainD(i+1)-OLY)^2;
%        HLG=(HLOut.*(1-HLOut)).*(OLW(:,1:sensorN)'*OLG);
%       OLW=OLW+h*OLG*[HLOut;1]';
%      HLW=HLW+h*HLG*[trainS(i),1];
end
m=10/m;
%end