用numpy實現BP神經網絡


在本篇博文中,我們只使用numpy來搭建一個簡單的包含輸入層、隱藏層、輸出層的神經網絡,我們選擇sigmoid作為激活函數,選擇均方差損失函數,最后使用mnist數據集進行了訓練和測試。

1、公式推導

均方差損失函數:

\[loss = J(W,b,x,y)=\frac{1}{2}||a^L-y||^2 \]

前向傳播過程:

\[ z^l = W^la^{l-1}+b^l \\ a^l=\sigma(z^l) \]

反向傳播過程:

\[ \frac{\partial J}{\partial W^l}=\delta^l(a^{l-1})^T \\ \frac{\partial J}{\partial b^l}=\delta^l \\ \delta^l=[(W^{l+1})^T\delta^{l+1}]\odot\sigma'(z^l)\\ \delta^L=(a^L-y)\odot\sigma'(z^L) \]

2、一些工具函數

包括sigmoid激活函數及其一階導數,和將標簽進行one-hot編碼的函數,如下所示:

# 標簽one-hot處理
def onehot(targets, num):
    result = np.zeros((num, 10))
    for i in range(num):
        result[i][targets[i]] = 1
    return result

# sigmoid
def sigmoid(x):
    return 1 / (1 + np.exp(-x))

# sigmoid的一階導數
def Dsigmoid(x):
    return sigmoid(x)*(1-sigmoid(x))

3、神經網絡具體實現

代碼中的self.d2和d1代表公式中的\(\delta\),其他變量均直接按照公式命名,代碼如下:

class NN(object):
    def __init__(self, l0, l1, l2):
        self.lr = 0.1                                        # 學習率
        self.W1 = np.random.randn(l0, l1) * 0.01             # 初始化
        self.b1 = np.random.randn(l1) * 0.01
        self.W2 = np.random.randn(l1, l2) * 0.01
        self.b2 = np.random.randn(l2) * 0.01

    # 前向傳播
    def forward(self, X, y):
        self.X = X                                           # m x 784
        self.z1 = np.dot(X, self.W1) + self.b1               # m x 500, 500是中間層層數
        self.a1 = sigmoid(self.z1)
        self.z2 = np.dot(self.a1, self.W2) + self.b2         # m x 10
        self.a2 = sigmoid(self.z2)
        loss = np.sum((self.a2 - y) * (self.a2 - y)) / 2     # 均方差
        self.d2 = (self.a2 - y) * Dsigmoid(self.z2)          # m x 10 , 用於誤差反向傳播
        return loss, self.a2

    # 反向傳播
    def backward(self):
        dW2 = np.dot(self.a1.T, self.d2) / 3                  # 500 x 10, batchsize=3
        db2 = np.sum(self.d2, axis=0) / 3                     # 10
        d1 = np.dot(self.d2, self.W2.T) * Dsigmoid(self.z1)   # m x 500, 用於誤差反向傳播
        dW1 = np.dot(self.X.T, d1) / 3                        # 784x 500
        db1 = np.sum(d1, axis=0) / 3                          # 500

        self.W2 -= self.lr * dW2
        self.b2 -= self.lr * db2
        self.W1 -= self.lr * dW1
        self.b1 -= self.lr * db1

4、訓練和測試

我們直接使用了torchvision集成的mnist數據集,在訓練后將權重參數保存到文件中,測試時再從文件中讀取權重參數,最后我們測試的准確率達到了96.48%。

def train():
    nn = NN(784, 500, 10)

    for epoch in range(10):
        for i in range(0, 60000, 3):
            X = train_data.data[i:i + 3]
            y = train_data.targets[i:i + 3]
            loss, _ = nn.forward(X, y)
            print("Epoch:", epoch, "-", i, ":", "{:.3f}".format(loss))
            nn.backward()
        np.savez("data.npz", w1=nn.W1, b1=nn.b1, w2=nn.W2, b2=nn.b2)

def test():
    r = np.load("data.npz")
    nn = NN(784, 500, 10)
    nn.W1 = r["w1"]
    nn.b1 = r["b1"]
    nn.W2 = r["w2"]
    nn.b2 = r["b2"]
    _, result = nn.forward(test_data.data, test_data.targets2)
    result = np.argmax(result, axis=1)
    precison = np.sum(result==test_data.targets) / 10000
    print("Precison:", precison)

# Mnist手寫數字集
train_data = torchvision.datasets.MNIST(root='./mnist/', train=True, download=False)
test_data = torchvision.datasets.MNIST(root='./mnist/', train=False)
train_data.data = train_data.data.numpy()         # [60000,28,28]
train_data.targets = train_data.targets.numpy()   # [60000]
test_data.data = test_data.data.numpy()           # [10000,28,28]
test_data.targets = test_data.targets.numpy()     # [10000]

# 輸入向量處理
train_data.data = train_data.data.reshape(60000, 28 * 28) / 255.  # (60000, 784)
test_data.data = test_data.data.reshape(10000, 28 * 28) / 255.

# 標簽one-hot處理
train_data.targets = onehot(train_data.targets, 60000) # (60000, 10)
test_data.targets2 = onehot(test_data.targets, 10000)  # 用於前向傳播

train()
#test()

5、完整代碼

import torchvision
import numpy as np

# 標簽one-hot處理
def onehot(targets, num):
    result = np.zeros((num, 10))
    for i in range(num):
        result[i][targets[i]] = 1
    return result

# sigmoid
def sigmoid(x):
    return 1 / (1 + np.exp(-x))

# sigmoid的一階導數
def Dsigmoid(x):
    return sigmoid(x)*(1-sigmoid(x))


class NN(object):
    def __init__(self, l0, l1, l2):
        self.lr = 0.1                                        # 學習率
        self.W1 = np.random.randn(l0, l1) * 0.01             # 初始化
        self.b1 = np.random.randn(l1) * 0.01
        self.W2 = np.random.randn(l1, l2) * 0.01
        self.b2 = np.random.randn(l2) * 0.01

    # 前向傳播
    def forward(self, X, y):
        self.X = X                                           # m x 784
        self.z1 = np.dot(X, self.W1) + self.b1               # m x 500, 等於中間層層數
        self.a1 = sigmoid(self.z1)
        self.z2 = np.dot(self.a1, self.W2) + self.b2         # m x 10
        self.a2 = sigmoid(self.z2)
        loss = np.sum((self.a2 - y) * (self.a2 - y)) / 2     # 均方差
        self.d2 = (self.a2 - y) * Dsigmoid(self.z2)          # m x 10 , 用於反向傳播
        return loss, self.a2

    # 反向傳播
    def backward(self):
        dW2 = np.dot(self.a1.T, self.d2) / 3                  # 500 x 10, batchsize=3
        db2 = np.sum(self.d2, axis=0) / 3                     # 10
        d1 = np.dot(self.d2, self.W2.T) * Dsigmoid(self.z1)   # m x 500, 用於反向傳播
        dW1 = np.dot(self.X.T, d1) / 3                        # 784x 500
        db1 = np.sum(d1, axis=0) / 3                          # 500

        self.W2 -= self.lr * dW2
        self.b2 -= self.lr * db2
        self.W1 -= self.lr * dW1
        self.b1 -= self.lr * db1


def train():
    nn = NN(784, 500, 10)

    for epoch in range(10):
        for i in range(0, 60000, 3):
            X = train_data.data[i:i + 3]
            y = train_data.targets[i:i + 3]
            loss, _ = nn.forward(X, y)
            print("Epoch:", epoch, "-", i, ":", "{:.3f}".format(loss))
            nn.backward()
        np.savez("data.npz", w1=nn.W1, b1=nn.b1, w2=nn.W2, b2=nn.b2)

def test():
    r = np.load("data.npz")
    nn = NN(784, 500, 10)
    nn.W1 = r["w1"]
    nn.b1 = r["b1"]
    nn.W2 = r["w2"]
    nn.b2 = r["b2"]
    _, result = nn.forward(test_data.data, test_data.targets2)
    result = np.argmax(result, axis=1)
    precison = np.sum(result==test_data.targets) / 10000
    print("Precison:", precison)

if __name__ == '__main__':

    # Mnist手寫數字集
    train_data = torchvision.datasets.MNIST(root='./mnist/', train=True, download=False)
    test_data = torchvision.datasets.MNIST(root='./mnist/', train=False)
    train_data.data = train_data.data.numpy()         # [60000,28,28]
    train_data.targets = train_data.targets.numpy()   # [60000]
    test_data.data = test_data.data.numpy()           # [10000,28,28]
    test_data.targets = test_data.targets.numpy()     # [10000]

    # 輸入向量處理
    train_data.data = train_data.data.reshape(60000, 28 * 28) / 255.  # (60000, 784)
    test_data.data = test_data.data.reshape(10000, 28 * 28) / 255.

    # 標簽one-hot處理
    train_data.targets = onehot(train_data.targets, 60000) # (60000, 10)
    test_data.targets2 = onehot(test_data.targets, 10000)  # 用於前向傳播

    train()
    #test()


免責聲明!

本站轉載的文章為個人學習借鑒使用,本站對版權不負任何法律責任。如果侵犯了您的隱私權益,請聯系本站郵箱yoyou2525@163.com刪除。



 
粵ICP備18138465號   © 2018-2025 CODEPRJ.COM