14、改善深層神經網絡之梯度檢驗


什么為梯度檢驗???

  梯度檢驗可以作為檢驗神經網絡是否有bug的一種方法,現神經網絡的反向傳播算法含有許多細節,在編程實現中很容易出現一些微妙的bug,但往往這些bug並不會影響你的程序運行,而且你的損失函數看樣子也在不斷變小。但最終,你的程序得出的結果誤差將會比那些無bug的程序高出一個數量級,最終的結果可能並不是最優解。

梯度檢驗的原理

  梯度檢驗法是通過一種簡單的方法取得近似的梯度,將這個近似的梯度與真正的梯度對比,如果很接近,則認為梯度正確,否則認為梯度有誤。

  將J(θ)和θ放入直角坐標系,下圖所示是θ取定值時J(θ)的導數:

   ε 是一個很小的值:

   如上圖所示:

   當ε→0時,這趨近於導數的定義:

   在實際的應用中,θ往往是一個向量,梯度下降算法要求我們對向量中的每一個分量進行偏導數的計算,對於偏導數,我們同樣可以用以下式子進行近似計算:

 梯度檢驗代碼檢驗

  1、構建一個AI模型來判斷是否可靠

  首先我們需要先導入相關的庫

import numpy as np
from testCases import *
from gc_utils import sigmoid, relu, dictionary_to_vector, vector_to_dictionary, gradients_to_vector

  在這里我們所用到的testCases.py與gc_utils.py代碼如下:

import numpy as np

def sigmoid(x):
    """
    Compute the sigmoid of x

    Arguments:
    x -- A scalar or numpy array of any size.

    Return:
    s -- sigmoid(x)
    """
    s = 1/(1+np.exp(-x))
    return s

def relu(x):
    """
    Compute the relu of x

    Arguments:
    x -- A scalar or numpy array of any size.

    Return:
    s -- relu(x)
    """
    s = np.maximum(0,x)
    
    return s

def dictionary_to_vector(parameters):
    """
    Roll all our parameters dictionary into a single vector satisfying our specific required shape.
    """
    keys = []
    count = 0
    for key in ["W1", "b1", "W2", "b2", "W3", "b3"]:
        
        # flatten parameter
        new_vector = np.reshape(parameters[key], (-1,1))
        keys = keys + [key]*new_vector.shape[0]
        
        if count == 0:
            theta = new_vector
        else:
            theta = np.concatenate((theta, new_vector), axis=0)
        count = count + 1

    return theta, keys

def vector_to_dictionary(theta):
    """
    Unroll all our parameters dictionary from a single vector satisfying our specific required shape.
    """
    parameters = {}
    parameters["W1"] = theta[:20].reshape((5,4))
    parameters["b1"] = theta[20:25].reshape((5,1))
    parameters["W2"] = theta[25:40].reshape((3,5))
    parameters["b2"] = theta[40:43].reshape((3,1))
    parameters["W3"] = theta[43:46].reshape((1,3))
    parameters["b3"] = theta[46:47].reshape((1,1))

    return parameters

def gradients_to_vector(gradients):
    """
    Roll all our gradients dictionary into a single vector satisfying our specific required shape.
    """
    
    count = 0
    for key in ["dW1", "db1", "dW2", "db2", "dW3", "db3"]:
        # flatten parameter
        new_vector = np.reshape(gradients[key], (-1,1))
        
        if count == 0:
            theta = new_vector
        else:
            theta = np.concatenate((theta, new_vector), axis=0)
        count = count + 1

    return theta
gc_utils
import numpy as np

def gradient_check_n_test_case(): 
    np.random.seed(1)
    x = np.random.randn(4,3)
    y = np.array([1, 1, 0])
    W1 = np.random.randn(5,4) 
    b1 = np.random.randn(5,1) 
    W2 = np.random.randn(3,5) 
    b2 = np.random.randn(3,1) 
    W3 = np.random.randn(1,3) 
    b3 = np.random.randn(1,1) 
    parameters = {"W1": W1,
                  "b1": b1,
                  "W2": W2,
                  "b2": b2,
                  "W3": W3,
                  "b3": b3}

    
    return x, y, parameters
testCases

  首先我們進行簡單的1維的梯度檢驗,后面再學N維的,便於理解

  假設我們有一個簡單的1維線性函數J(θ)=θxJ這個函數(這個模型)只有一個參數θx是輸入。下面我們會用代碼來計算出J(.)J(.)(用前向傳播計算出成本)然后計算出

(用反向傳播計算出梯度)。最后我們用梯度檢驗來證明反向傳播計算出來的梯度是正確的。

上面的流程圖顯示出了關鍵的步驟:輸入 x;然后計算出 J(x)J前向傳播);然后計算出梯度(反向傳播),代碼如下:

# 前向傳播
def forward_propagation(x, theta):
 
    J = np.dot(theta, x)
    
    return J
x, theta = 2, 4
J = forward_propagation(x, theta)
print ("J = " + str(J))
J = 8
# 反向傳播
def backward_propagation(x, theta):
    
    # 這個函數的導數就是x,這是由微積分公式得來的,如果你沒有學過微積分,沒有關系,不用弄明白為什么。重點不在於此。
    dtheta = x 
    
    return dtheta
x, theta = 2, 4
dtheta = backward_propagation(x, theta)
print ("dtheta = " + str(dtheta))
dtheta = 2

  下面我們將用梯度檢驗來確認上面反向傳播計算出來的梯度dtheta是正確的。主要步驟如下:

def gradient_check(x, theta, epsilon=1e-7):

    # 利用前向傳播計算出一個梯度
    thetaplus = theta + epsilon                              
    thetaminus = theta - epsilon                         
    J_plus = forward_propagation(x, thetaplus)              
    J_minus = forward_propagation(x, thetaminus)            
    gradapprox = (J_plus - J_minus) / (2 * epsilon)         
    
    # 利用反向傳播也計算出一個梯度
    grad = backward_propagation(x, theta)

    # 對比兩個梯度相差多遠
    numerator = np.linalg.norm(grad - gradapprox)                    
    denominator = np.linalg.norm(grad) + np.linalg.norm(gradapprox)   
    difference = numerator / denominator                              
    
    if difference < 1e-7:
        print("反向傳播是正確的!")
    else:
        print("反向傳播有問題!")
    
    return difference
x, theta = 2, 4
difference = gradient_check(x, theta)
print("difference = " + str(difference))
反向傳播是正確的!
difference = 2.919335883291695e-10
但是通常情況下,神經網絡的成本函數不僅僅只有一個1維的參數。在神經網絡模型中,θθ通常是由多個W[l]W[l]和b[l]b[l]矩陣構成的。所以學會如何給多維參數做梯度檢驗是很重要的。下面我們就來學習多維參數的梯度檢驗!

上圖展示了你的支付可靠度預測模型的前向傳播和反向傳播流程,下面為前向傳播和反向傳播的代碼實現:

def forward_propagation_n(X, Y, parameters):

    m = X.shape[1]
    W1 = parameters["W1"]
    b1 = parameters["b1"]
    W2 = parameters["W2"]
    b2 = parameters["b2"]
    W3 = parameters["W3"]
    b3 = parameters["b3"]

    # LINEAR -> RELU -> LINEAR -> RELU -> LINEAR -> SIGMOID
    Z1 = np.dot(W1, X) + b1
    A1 = relu(Z1)
    Z2 = np.dot(W2, A1) + b2
    A2 = relu(Z2)
    Z3 = np.dot(W3, A2) + b3
    A3 = sigmoid(Z3)

    logprobs = np.multiply(-np.log(A3), Y) + np.multiply(-np.log(1 - A3), 1 - Y)
    cost = 1. / m * np.sum(logprobs)
    
    cache = (Z1, A1, W1, b1, Z2, A2, W2, b2, Z3, A3, W3, b3)
    
    return cost, cache
def backward_propagation_n(X, Y, cache):    
    m = X.shape[1]
    (Z1, A1, W1, b1, Z2, A2, W2, b2, Z3, A3, W3, b3) = cache
    
    dZ3 = A3 - Y
    dW3 = 1. / m * np.dot(dZ3, A2.T)
    db3 = 1. / m * np.sum(dZ3, axis=1, keepdims=True)
    
    dA2 = np.dot(W3.T, dZ3)
    dZ2 = np.multiply(dA2, np.int64(A2 > 0))
    dW2 = 1. / m * np.dot(dZ2, A1.T) * 2  # ~~
    db2 = 1. / m * np.sum(dZ2, axis=1, keepdims=True)
    
    dA1 = np.dot(W2.T, dZ2)
    dZ1 = np.multiply(dA1, np.int64(A1 > 0))
    dW1 = 1. / m * np.dot(dZ1, X.T)
    db1 = 4. / m * np.sum(dZ1, axis=1, keepdims=True) # ~~
    
    gradients = {"dZ3": dZ3, "dW3": dW3, "db3": db3,
                 "dA2": dA2, "dZ2": dZ2, "dW2": dW2, "db2": db2,
                 "dA1": dA1, "dZ1": dZ1, "dW1": dW1, "db1": db1}
    
    return gradients

  下面進行多維度梯度檢驗:

  多維檢驗中的θ不再是一個數值,而是一個字典,字典里面包含了很多個參數。現在實現一個函數"dictionary_to_vector()",用它可以將這個字典轉換成一個向量,它會改變字典里參數(W1, b1, W2, b2, W3, b3)的維度並且將它們連接起來構成一個大向量,這個向量我們用"values"來表示,同時也另外一個逆操作的函數"vector_to_dictionary",它會將向量轉換回字典形式。

  轉化代碼如下:

# 友情贈送向量轉換為字典
def vector_to_dictionary(theta):

    parameters = {}
    parameters["W1"] = theta[:20].reshape((5,4))
    parameters["b1"] = theta[20:25].reshape((5,1))
    parameters["W2"] = theta[25:40].reshape((3,5))
    parameters["b2"] = theta[40:43].reshape((3,1))
    parameters["W3"] = theta[43:46].reshape((1,3))
    parameters["b3"] = theta[46:47].reshape((1,1))

    return parameters


def gradients_to_vector(gradients):
  
    count = 0
    for key in ["dW1", "db1", "dW2", "db2", "dW3", "db3"]:
        # flatten parameter
        new_vector = np.reshape(gradients[key], (-1,1))
        
        if count == 0:
            theta = new_vector
        else:
            theta = np.concatenate((theta, new_vector), axis=0)
        count = count + 1

    return theta
向量轉換為字典

 代碼如下:

def gradient_check_n(parameters, gradients, X, Y, epsilon=1e-7):
     
    parameters_values, _ = dictionary_to_vector(parameters)
    grad = gradients_to_vector(gradients)
    num_parameters = parameters_values.shape[0]
    J_plus = np.zeros((num_parameters, 1))
    J_minus = np.zeros((num_parameters, 1))
    gradapprox = np.zeros((num_parameters, 1))

    # 計算gradapprox
    for i in range(num_parameters):
        thetaplus =  np.copy(parameters_values)                                      
        thetaplus[i][0] = thetaplus[i][0] + epsilon                                 
        J_plus[i], _ =  forward_propagation_n(X, Y, vector_to_dictionary(thetaplus))  
        
        thetaminus = np.copy(parameters_values)                                     
        thetaminus[i][0] = thetaminus[i][0] - epsilon                                      
        J_minus[i], _ = forward_propagation_n(X, Y, vector_to_dictionary(thetaminus)) 
        
        gradapprox[i] = (J_plus[i] - J_minus[i]) / (2 * epsilon)
    
    numerator = np.linalg.norm(grad - gradapprox)                                
    denominator = np.linalg.norm(grad) + np.linalg.norm(gradapprox)               
    difference = numerator / denominator                                         

    if difference > 2e-7:
        print("\033[93m" + "反向傳播有問題! difference = " + str(difference) + "\033[0m")
    else:
        print("\033[92m" + "反向傳播很完美! difference = " + str(difference) + "\033[0m")
    
    return difference
X, Y, parameters = gradient_check_n_test_case()

cost, cache = forward_propagation_n(X, Y, parameters)
gradients = backward_propagation_n(X, Y, cache)
difference = gradient_check_n(parameters, gradients, X, Y)

反向傳播有問題! difference = 0.2850931566540251

注意:

  • 梯度檢驗是很緩慢的。通過來計算梯度非常消耗計算力。所以,我們不會在訓練的每一個回合都執行梯度檢驗。僅僅偶爾執行幾次。
  • 梯度檢驗是無法與dropout共存的。所以在執行梯度檢驗時,要把dropout關掉,檢驗完畢后再開啟。

**本次實戰編程需要記住的幾點**:

  • 梯度檢驗通過用前向傳播的方式求出一個梯度,然后將其與反向傳播求出的梯度進行對比來判斷梯度是否正確

  • 梯度檢驗很浪費計算力。所以只在需要驗證代碼是否正確時才開啟。確認代碼沒有問題后,就關閉掉梯度檢驗。

 

參考:https://gitee.com/bijingrui1997/deep_learning_notes/blob/master/


免責聲明!

本站轉載的文章為個人學習借鑒使用,本站對版權不負任何法律責任。如果侵犯了您的隱私權益,請聯系本站郵箱yoyou2525@163.com刪除。



 
粵ICP備18138465號   © 2018-2025 CODEPRJ.COM