用純c語言寫一個完整的BP神經網絡!


話不多說,直接上代碼

所有的說明均在代碼中

首先是頭文件BPNetWork.h

#ifndef BPNETWORK_H
#define BPNETWORK_H
//所需頭文件
#include<math.h>
#include<stdio.h>
#include<stdlib.h>
#include<string.h>


#define f(x) Sigmoid(x)//激活函數設定
#define f_(x) Sigmoidf(x)//導函數

typedef struct {
    double* ws;//權重矩陣
    double* bs;//偏置數組
    double* os;//輸出數組
    double* ss;//誤差(總誤差關於加權和的偏導)
} Layer;
typedef struct {
    int lns;//層數
    int* ns;//每層神經元的數量
    double* is;//神經網絡輸入
    double* ts;//理想輸出
    Layer* las;//神經網絡各個層(不包括輸入層)
    double ln;//學習率
}BPNetWork;



//創建神經網絡
BPNetWork* BPCreate(int* nums, int len,double ln);
//運行一次神經網絡
void RunOnce(BPNetWork* network);
//載入訓練集
void LoadIn(BPNetWork* network, double* input, double* putout);
//反向傳播一次(訓練一次)
void TrainOnce(BPNetWork* network);
//輸出總誤差
double ETotal(BPNetWork* network);

//sigmoid激活函數
#define Sigmoid(x)  (1 / (1 + exp(-(x))))
//sigmoid激活函數的導函數,輸入為sigmoid輸出
#define Sigmoidf(f)  ((f) * (1 - (f)))
#define Tanh(x) ((2 / (1 + exp(-2 * (x))))-1)
#define Tanhf(f) ((1+(f))*(1-(f)))
#endif

然后是程序本體BPNetWork.c

宏定義

#include"BPNetWork.h"


//神經網絡的層數
#define LS network->lns

//輸入層神經元的數量
#define INNS network->ns[0]

//輸入層的第a個輸入
#define INS(a) network->is[a-1]

//第a個理想輸出
#define TAS(a) network->ts[a-1]

//輸出層神經元的數量
#define OUTNS network->ns[LS-1]

//第n層神經元的數量
#define NS(n) network->ns[n-1]

//第n層第a個神經元的第p個權重
#define WF(n,a,p) network->las[n-2].ws[(p-1)+(a-1)*NS(n-1)]

//第n層的第a個神經元的偏置
#define BF(n,a) network->las[n-2].bs[a-1]

//第n層第a個神經元的輸出
#define OF(n,a) network->las[n-2].os[a-1]

//第n層第a個神經元的誤差
#define SF(n,a) network->las[n-2].ss[a-1]

//學習率
#define LN network->ln

BPCreate函數:

BPNetWork* BPCreate(int* nums, int len,double ln)
{
    BPNetWork* network = malloc(sizeof(BPNetWork));
    network->lns = len;
    network->ns = malloc(len * sizeof(int));
    network->ln = ln;
    memcpy(network->ns, nums, len * sizeof(int));
    //
    network->is = malloc(nums[0] * sizeof(double));
    network->las = malloc(sizeof(Layer) * (len - 1));
    network->ts = malloc(sizeof(double) * nums[len - 1]);
    srand(&network);//用networkd的內存地址做為隨機數種子
    for (int p = 0; p < len - 1; p++) {
        int lastnum = nums[p];//上一層的神經元數量
        int num = nums[p + 1];//當前層的神經元數量
        network->las[p].bs = malloc(sizeof(double) * num);
        //
        network->las[p].ws = malloc(sizeof(double) * num * lastnum);
        //
        network->las[p].os = malloc(sizeof(double) * num);
        //
        network->las[p].ss = malloc(sizeof(double) * num);
        for (int pp = 0; pp < num; pp++) {
            //這里rand()/2.0的意思是把整數除整數轉換為浮點數除整數
            //如果是整數除整數,輸出則為帶余的商
            network->las[p].bs[pp] = rand() / 2.0 / RAND_MAX;
            for (int ppp = 0; ppp < lastnum; ppp++) {
                network->las[p].ws[ppp + pp * lastnum] = rand() / 2.0 / RAND_MAX;
            }
        }
    }
    return network;
}

RunOnce函數:

void RunOnce(BPNetWork* network) {
    //計算輸入層到第二層
    for (int a = 1; a <= NS(2); a++) {
        double net = 0;
        double* o = &OF(2,a);//獲取第2層的輸出值for (int aa = 1; aa <= INNS; aa++) {
            net += INS(aa) * WF(2, a, aa);//INS(aa) * WF(2, a, aa);
        }
        *o = f(net + BF(2,a));
    }
    for (int n = 2; n <= LS-1; n++) {
        for (int a = 1; a <= NS(n + 1); a++) {//下一層的神經網絡
            double net = 0;
            double* o = &OF(n+1,a);for (int aa = 1; aa <= NS(n); aa++) {//當前層的神經網絡
                double oo = OF(n, aa);
                double* ww = &WF(n + 1, a, aa);
                net += oo * (*ww);
            }
            *o = f(net + BF(n + 1, a));
        }
    }
}

TrainOnce函數:

void TrainOnce(BPNetWork* network) {
    //計算輸出層的誤差函數
    for (int a = 1; a <= OUTNS; a++) {
        double* s = &SF(LS,a);//獲取第a個神經元的誤差double* b = &BF(LS, a);//獲取第a個神經元的偏置
        double o = OF(LS, a);//獲取第a個神經元的輸出
        *s = (2.0 / OUTNS) * (o - TAS(a))* f_(o);
        *b = *b - LN * (*s);//更新偏置
        //更新權重
        for (int aa = 1; aa <=NS(LS-1) ; aa++) {
            double* w = &WF(LS, a, aa);
            *w = *w - LN * (*s) * OF(LS-1, aa);
        }
    }
    
    //計算隱藏層的誤差
    for (int a = LS-1; a > 2; a--) {
        //開始計算第a層每個神經元的誤差
        for (int n = 1; n <= NS(a); n++) {//當前層
            double* s = &SF(a, n);//獲取第a個神經元的誤差
            *s = 0;
            double* b = &BF(a, n);//獲取第a個神經元的偏置
            double o = OF(a, n);//獲取第a個神經元的輸出
            for (int nn = 1; nn <= NS(a+1); nn++) {//下一層
                double lw = WF(a + 1, nn, n);//獲取下一層到當前神經元的偏置
                double ls = SF(a + 1, nn);//獲取下一層第nn個神經元的誤差
                *s += ls * lw * f_(o);
            }
            *b = *b - LN * (*s);//更新偏置
            //更新權重
            for (int nn = 1; nn <= NS(a - 1); nn++) {//上一層
                double* w = &WF(a, n, nn);
                *w = *w - LN * (*s) *OF(a - 1, nn);
            }
        }
    }
    
    //計算第2層的誤差函數
    for (int n = 1; n <= NS(2); n++) {//當前層
        double* s = &SF(2, n);//獲取第a個神經元的誤差
        *s = 0;
        double* b = &BF(2, n);//獲取第a個神經元的偏置
        double o = OF(2, n);//獲取第a個神經元的輸出
        for (int nn = 1; nn <= NS(3); nn++) {//下一層
            double lw = WF(3, nn, n);//獲取下一層到當前神經元的偏置
            double ls = SF(3, nn);//獲取下一層第nn個神經元的誤差
            *s += ls * lw * f_(o);
        }
        *b = *b - LN * (*s);//更新偏置
        //更新權重
        for (int nn = 1; nn <= INNS; nn++) {//上一層
            double* w = &WF(2, n, nn);
            *w = *w - LN * (*s) * INS(nn);
        }
    }
    
}

LoadIn函數:

void LoadIn(BPNetWork* network,double* input,double* putout) {
    memcpy(network->is, input, INNS*sizeof(double));
    memcpy(network->ts, putout, OUTNS*sizeof(double));
}

ETotal函數:

double ETotal(BPNetWork* network) {
    double val = 0;
    for (int a = 1; a <= OUTNS; a++) {
        val += ((OF(LS, a) - TAS(a)) * (OF(LS, a) - TAS(a))) / OUTNS;
    }
    return val;
}

入口函數:

int main() {
    int a[] = { 1,20,20,1 };//4層神經元,數量分別為1,20,20,1
    double in[1] = { 0.9 };//訓練樣本輸入1
    double in1[1] = { 0.1 };//訓練樣本輸入2
    double in2[1] = { 0.5 };//訓練樣本輸入3
    double out[1] = { 0.1 };//理想輸出
    //神經網絡訓練目標:
    //輸入任意值,輸出0.1
    BPNetWork* network = BPCreate(a, 4, 0.5);
    int c = 1000;//訓練1000次
    while (c--) {
        LoadIn(network, in, out);
        RunOnce(network);
        TrainOnce(network);
        LoadIn(network, in1, out);
        RunOnce(network);
        TrainOnce(network);
        LoadIn(network, in2, out);
        RunOnce(network);
        TrainOnce(network);
    }
    //訓練完后來一波測試
    double t[1] = { 0.7 };//輸入
    double o[1] = { 0.2 };//湊數
    LoadIn(network, t, o);
    RunOnce(network);
    printf("OK\n");
    printf("%g\n", ETotal(network));
    printf("%g", OF(4, 1));
    return 0;
}

訓練目標:

輸入任意數,輸出總為0.1

經過1000次訓練后的輸出:

OK
0.0101097//最終誤差
0.0994528//輸入0.7,輸出約為0.1

 


免責聲明!

本站轉載的文章為個人學習借鑒使用,本站對版權不負任何法律責任。如果侵犯了您的隱私權益,請聯系本站郵箱yoyou2525@163.com刪除。



 
粵ICP備18138465號   © 2018-2025 CODEPRJ.COM