<pre name="code" class="cpp"><span style="font-family: Arial, Helvetica, sans-serif;">function test_example_SAE</span>
load mnist_uint8; train_x = double(train_x)/255; test_x = double(test_x)/255; train_y = double(train_y); test_y = double(test_y); //將數據一開始初始化
%% ex1 train a 100 hidden unit SDAE and use it to initialize a FFNN
% Setup and train a stacked denoising autoencoder (SDAE)
rand('state',0)
sae = saesetup([784 100]);
這里跳入saesetup函數,由函數可知返回的是sae的結構體
function sae = saesetup(size)
for u = 2 : numel(size) //numel(size)=2
sae.ae{u-1} = nnsetup([size(u-1) size(u) size(u-1)]); %size(1)=784 size(2)=100 size(3)=784
end
end
這里調用了nnsetup函數,由該函數可知返回的也是nn結構體,可以看出訓練后是把nn替代成sae.
function nn = nnsetup(architecture)
%NNSETUP creates a Feedforward Backpropagate Neural Network
% nn = nnsetup(architecture) returns an neural network structure with n=numel(architecture)
% layers, architecture being a n x 1 vector of layer sizes e.g. [784 100 10]
nn.size = architecture; //architecture表示每一層由多少個神經元,總共有多少層(3)
nn.n = numel(nn.size);//網絡層數3
nn.activation_function = 'tanh_opt'; % Activation functions of hidden layers: 'sigm' (sigmoid) or 'tanh_opt' (optimal tanh).
nn.learningRate = 2; % learning rate Note: typically needs to be lower when using 'sigm' activation function and non-normalized inputs.
nn.momentum = 0.5; % Momentum
nn.scaling_learningRate = 1; % Scaling factor for the learning rate (each epoch)
nn.weightPenaltyL2 = 0; % L2 regularization
nn.nonSparsityPenalty = 0; % Non sparsity penalty
nn.sparsityTarget = 0.05; % Sparsity target
nn.inputZeroMaskedFraction = 0; % Used for Denoising AutoEncoders
nn.dropoutFraction = 0; % Dropout level (http://www.cs.toronto.edu/~hinton/absps/dropout.pdf)
nn.testing = 0; % Internal variable. nntest sets this to one.
nn.output = 'sigm'; % output unit 'sigm' (=logistic), 'softmax' and 'linear'
//對每一層的網絡結構進行初始化,一共三個參數W,vW,p,其中W是主要的參數
//vW是更新參數時的臨時參數,p是所謂的sparsity,
for i = 2 : nn.n %生成兩層權值和p{i}
% weights and weight momentum
nn.W{i - 1} = (rand(nn.size(i), nn.size(i - 1)+1) - 0.5) * 2 * 4 * sqrt(6 / (nn.size(i) + nn.size(i - 1))); <span style="font-family: Arial, Helvetica, sans-serif;">//</span><span style="font-family: Arial, Helvetica, sans-serif;">隨機取從-0.5到 2 * 4 * sqrt(6 / (nn.size(i) + nn.size(i - 1)))的權值序列</span>
nn.vW{i - 1} = zeros(size(nn.W{i - 1})); <span style="font-family: Arial, Helvetica, sans-serif;">//</span><span style="font-family: Arial, Helvetica, sans-serif;">使vW與W空間相同,但為0矩陣</span>
% average activations (for use with sparsity)
nn.p{i} = zeros(1, nn.size(i)); //生成兩個空矩陣,p{i}用來表示隱藏神經元j的平均活躍度(詳情可見UFLDL教程)
end
end
程序跳回這一段
sae.ae{1}.activation_function = 'sigm';
sae.ae{1}.learningRate = 1;
sae.ae{1}.inputZeroMaskedFraction = 0.5; <span style="font-family: Arial, Helvetica, sans-serif;">//</span><span style="font-family: Arial, Helvetica, sans-serif;">修改sae里面的各個參數</span>
opts.numepochs = 1;
opts.batchsize = 100;
sae = saetrain(sae, train_x, opts);
這里將nn里的各個參數在sae里部分更改,然后又跳到saetrain函數
function sae = saetrain(sae, x, opts)
for i = 1 : numel(sae.ae);
disp(['Training AE ' num2str(i) '/' num2str(numel(sae.ae))]);//訓練到第幾代
sae.ae{i} = nntrain(sae.ae{i}, x, x, opts);
t = nnff(sae.ae{i}, x, x);
x = t.a{2};
%remove bias term
x = x(:,2:end); //把第一列去掉
end
end
這里轉到nntrain函數,跳過前面的assert判定
loss.train.e = [];
loss.train.e_frac = [];
loss.val.e = [];
loss.val.e_frac = [];
opts.validation = 0;
if nargin == 6
opts.validation = 1;
end
fhandle = [];
if isfield(opts,'plot') && opts.plot == 1 //檢查結構體opts是否包含由‘plot’指定的域,如果包含則返回邏輯1
fhandle = figure();
end
m = size(train_x, 1);
//m是訓練樣本的數量
//注意在調用的時候我們設置了opt,batchsize是做batch gradient時候的大小
batchsize = opts.batchsize;
numepochs = opts.numepochs;//表示循環的次數
numbatches = m / batchsize;
assert(rem(numbatches, 1) == 0, 'numbatches must be a integer');
L = zeros(numepochs*numbatches,1); n = 1;
for i = 1 : numepochs
tic;
kk = randperm(m); //把1到m這些數隨機打亂得到的一個數字序列。
for l = 1 : numbatches
batch_x = train_x(kk((l - 1) * batchsize + 1 : l * batchsize), :); //一批一批進行訓練,每一批數目為batchsize,即600
//Add noise to input (for use in denoising autoencoder) 加入noise,這是denoising autoencoder需要使用到的部分
if(nn.inputZeroMaskedFraction ~= 0) //請參見《Extracting and Composing Robust Features with Denoising Autoencoders》這篇論文
batch_x = batch_x.*(rand(size(batch_x))>nn.inputZeroMaskedFraction);//具體加入的方法就是把訓練樣例中的一些數據調整變為0,inputZeroMaskedFraction表示了調整的比例
end
batch_y = train_y(kk((l - 1) * batchsize + 1 : l * batchsize), :); //同理對y也進行一批一批的調用,與前面的batch_x對應
nn = nnff(nn, batch_x, batch_y);
nn = nnbp(nn);
nn = nnapplygrads(nn);
L(n) = nn.L; //nn最后結果
n = n + 1;
end
t = toc; //這里計算出整個運算過程用了多少second
if opts.validation == 1
loss = nneval(nn, loss, train_x, train_y, val_x, val_y);
str_perf = sprintf('; Full-batch train mse = %f, val mse = %f', loss.train.e(end), loss.val.e(end));
else
loss = nneval(nn, loss, train_x, train_y);
str_perf = sprintf('; Full-batch train err = %f', loss.train.e(end));
end
if ishandle(fhandle)
nnupdatefigures(nn, fhandle, loss, opts, i);
end
disp(['epoch ' num2str(i) '/' num2str(opts.numepochs) '. Took ' num2str(t) ' seconds' '. Mini-batch mean squared error on training set is ' num2str(mean(L((n-numbatches):(n-1)))) str_perf]);
nn.learningRate = nn.learningRate * nn.scaling_learningRate; //加速學習速率
end
end
函數轉為nnff,意為前向傳播算法
function nn = nnff(nn, x, y)
%NNFF performs a feedforward pass
% nn = nnff(nn, x, y) returns an neural network structure with updated
% layer activations, error and loss (nn.a, nn.e and nn.L)
n = nn.n;
m = size(x, 1);
x = [ones(m,1) x];
nn.a{1} = x;
//feedforward pass
for i = 2 : n-1
//根據選擇的激活函數不同進行正向傳播計算
//可以回過頭看nnsetup里面的第一個參數activation_function
//sigm就是sigmoid
switch nn.activation_function
case 'sigm'
% Calculate the unit's outputs (including the bias term)
nn.a{i} = sigm(nn.a{i - 1} * nn.W{i - 1}');
case 'tanh_opt'
nn.a{i} = tanh_opt(nn.a{i - 1} * nn.W{i - 1}');
end
//dropout計算部分 dropoutFraction是nnsetup中可以設置的一個參數
if(nn.dropoutFraction > 0) //>0則執行,去除偏差較大的部分
if(nn.testing)
nn.a{i} = nn.a{i}.*(1 - nn.dropoutFraction);
else
nn.dropOutMask{i} = (rand(size(nn.a{i}))>nn.dropoutFraction);
nn.a{i} = nn.a{i}.*nn.dropOutMask{i};
end
end
//計算sparsity,nonSparsityPenalty是對沒達到sparsitytarget的參數的懲罰系數
//calculate running exponential activations for use with sparsity
if(nn.nonSparsityPenalty>0) //>0則執行
nn.p{i} = 0.99 * nn.p{i} + 0.01 * mean(nn.a{i}, 1);
end
//Add the bias term
nn.a{i} = [ones(m,1) nn.a{i}];
end
switch nn.output //輸出層的結果
case 'sigm'
nn.a{n} = sigm(nn.a{n - 1} * nn.W{n - 1}');
case 'linear'
nn.a{n} = nn.a{n - 1} * nn.W{n - 1}';
case 'softmax'
nn.a{n} = nn.a{n - 1} * nn.W{n - 1}';
nn.a{n} = exp(bsxfun(@minus, nn.a{n}, max(nn.a{n},[],2)));
nn.a{n} = bsxfun(@rdivide, nn.a{n}, sum(nn.a{n}, 2));
end
//error and loss
//計算error (計算輸出層的e)
nn.e = y - nn.a{n}; %y-H w,b(x)
switch nn.output
case {'sigm', 'linear'}
nn.L = 1/2 * sum(sum(nn.e .^ 2)) / m;//見公式P9(UFLDL)
case 'softmax'
nn.L = -sum(sum(y .* log(nn.a{n}))) / m;
end
end
接下來跳轉到nnbp函數
function nn = nnbp(nn)
//NNBP performs backpropagation
// nn = nnbp(nn) returns an neural network structure with updated weights
n = nn.n;
sparsityError = 0;
switch nn.output
case 'sigm'
d{n} = - nn.e .* (nn.a{n} .* (1 - nn.a{n})); //見UFLDL反向傳導算法公式2
case {'softmax','linear'}
d{n} = - nn.e;
end
for i = (n - 1) : -1 : 2
//Derivative of the activation function激活函數的導數
switch nn.activation_function
case 'sigm'
d_act = nn.a{i} .* (1 - nn.a{i}); //UFLDLP15 對f'(Zi)的求導
case 'tanh_opt'
d_act = 1.7159 * 2/3 * (1 - 1/(1.7159)^2 * nn.a{i}.^2);
end
if(nn.nonSparsityPenalty>0) //這些其實都是開關
pi = repmat(nn.p{i}, size(nn.a{i}, 1), 1);
sparsityError = [zeros(size(nn.a{i},1),1) nn.nonSparsityPenalty * (-nn.sparsityTarget ./ pi + (1 - nn.sparsityTarget) ./ (1 - pi))];
end
// Backpropagate first derivatives
if i+1==n // in this case in d{n} there is not the bias term to be removed
d{i} = (d{i + 1} * nn.W{i} + sparsityError) .* d_act; % Bishop (5.56)
else // in this case in d{i} the bias term has to be removed
d{i} = (d{i + 1}(:,2:end) * nn.W{i} + sparsityError) .* d_act; %P13
end
if(nn.dropoutFraction>0)
d{i} = d{i} .* [ones(size(d{i},1),1) nn.dropOutMask{i}];
end
end
for i = 1 : (n - 1)
if i+1==n
nn.dW{i} = (d{i + 1}' * nn.a{i}) / size(d{i + 1}, 1);//P14(UFLDL教程)
else
nn.dW{i} = (d{i + 1}(:,2:end)' * nn.a{i}) / size(d{i + 1}, 1);
end
end
end
接下來跳到nnapplygrads函數,算出權值W的變化量和更新結果
function nn = nnapplygrads(nn)
%NNAPPLYGRADS updates weights and biases with calculated gradients
% nn = nnapplygrads(nn) returns an neural network structure with updated
% weights and biases
for i = 1 : (nn.n - 1)
if(nn.weightPenaltyL2>0) //這又是什么鬼因子。。。
dW = nn.dW{i} + nn.weightPenaltyL2 * [zeros(size(nn.W{i},1),1) nn.W{i}(:,2:end)];
else
dW = nn.dW{i};
end
dW = nn.learningRate * dW;
if(nn.momentum>0)
nn.vW{i} = nn.momentum*nn.vW{i} + dW; //momentum一個引子
dW = nn.vW{i};
end
nn.W{i} = nn.W{i} - dW;
end
end
跳回ntrain函數,得到L(n)
L(n) = nn.L; //nn最后結果
n = n + 1;
end
t = toc; //這里計算出整個運算過程用了多少second
if opts.validation == 1 //開關
loss = nneval(nn, loss, train_x, train_y, val_x, val_y);
str_perf = sprintf('; Full-batch train mse = %f, val mse = %f', loss.train.e(end), loss.val.e(end));
else
loss = nneval(nn, loss, train_x, train_y);
str_perf = sprintf('; Full-batch train err = %f', loss.train.e(end));
end
if ishandle(fhandle)
nnupdatefigures(nn, fhandle, loss, opts, i);
end
disp(['epoch ' num2str(i) '/' num2str(opts.numepochs) '. Took ' num2str(t) ' seconds' '. Mini-batch mean squared error on training set is ' num2str(mean(L((n-numbatches):(n-1)))) str_perf]);
nn.learningRate = nn.learningRate * nn.scaling_learningRate; //加速學習速率
end
end
由於validation=0,所以跳轉到nneval函數,nneval函數檢驗神經網絡的表現
function [loss] = nneval(nn, loss, train_x, train_y, val_x, val_y)
%NNEVAL evaluates performance of neural network
% Returns a updated loss struct
assert(nargin == 4 || nargin == 6, 'Wrong number of arguments');
nn.testing = 1;
% training performance
nn = nnff(nn, train_x, train_y);
loss.train.e(end + 1) = nn.L;
% validation performance
if nargin == 6
nn = nnff(nn, val_x, val_y);
loss.val.e(end + 1) = nn.L;
end
nn.testing = 0;
%calc misclassification rate if softmax
if strcmp(nn.output,'softmax')
[er_train, dummy] = nntest(nn, train_x, train_y);
loss.train.e_frac(end+1) = er_train;
if nargin == 6
[er_val, dummy] = nntest(nn, val_x, val_y);
loss.val.e_frac(end+1) = er_val;
end
end
end
跳回到nntrain,執行完后續后跳回saetrain
function sae = saetrain(sae, x, opts)
for i = 1 : numel(sae.ae);
disp(['Training AE ' num2str(i) '/' num2str(numel(sae.ae))]);
sae.ae{i} = nntrain(sae.ae{i}, x, x, opts);
t = nnff(sae.ae{i}, x, x); //將sae結果返回結構體t
x = t.a{2};
%remove bias term
x = x(:,2:end); //把第一行去掉
end
end
這里設了結構體t,更新了x的值,跳回test_example_SAE
% Use the SDAE to initialize a FFNN
nn = nnsetup([784 100 10]);
nn.activation_function = 'sigm';
nn.learningRate = 1;
nn.W{1} = sae.ae{1}.W{1}; %更新了nn的權值W<pre name="code" class="cpp">
% Train the FFNNopts.numepochs = 1;opts.batchsize = 100;nn = nntrain(nn, train_x, train_y, opts);[er, bad] = nntest(nn, test_x, test_y);assert(er < 0.16, 'Too big error');
面的代碼是用於檢測test_x,test_y和訓練集x和y的偏差,即采用SAE最后還是要把結果歸為nn結構體進行檢測
參考資料:【面向代碼】學習 Deep Learning(一)Neural Network
