SGD/BGD/MBGD使用python簡單實現


算法具體可以參照其他的博客:

隨機梯度下降:

# coding=utf-8
'''
隨機梯度下降
'''
import numpy as np

# 構造訓練數據
x = np.arange(0., 10., 0.2)
m = len(x)
x0 = np.full(m, 1.0)
input_data = np.vstack([x0, x]).T  # 將偏置b作為權向量的第一個分量
target_data = 3 * x + 8 + np.random.randn(m)

max_iter = 10000  # 最大迭代次數
epsilon = 1e-5

# 初始化權值
w = np.random.randn(2)
# w = np.zeros(2)

alpha = 0.001  # 步長
diff = 0.
error = np.zeros(2)
count = 0  # 循環次數

print '隨機梯度下降算法'.center(60, '=')

while count < max_iter:
    count += 1
    for j in range(m):
        diff = np.dot(w, input_data[j]) - target_data[j]  # 訓練集代入,計算誤差值
        # 這里的隨機性表現在:一個樣本更新一次參數!
        w = w - alpha * diff * input_data[j]

    if np.linalg.norm(w - error) < epsilon:  # 直接通過np.linalg包求兩個向量的范數
        break
    else:
        error = w
print 'loop count = %d' % count, '\tw:[%f, %f]' % (w[0], w[1])
# coding=utf-8
"""
批量梯度下降
"""
import numpy as np

# 構造訓練數據
x = np.arange(0., 10., 0.2)
m = len(x)
x0 = np.full(m, 1.0)
input_data = np.vstack([x0, x]).T  # 將偏置b作為權向量的第一個分量
target_data = 3 * x + 8 + np.random.randn(m)

# 停止條件
max_iter = 10000
epsilon = 1e-5

# 初始化權值
w = np.random.randn(2)
# w = np.zeros(2)

alpha = 0.001  # 步長
diff = 0.
error = np.zeros(2)
count = 0  # 循環次數

while count < max_iter:
    count += 1

    sum_m = np.zeros(2)

    for i in range(m):
        dif = (np.dot(w, input_data[i]) - target_data[i]) * input_data[i]
        sum_m = sum_m + dif
    '''
    for j in range(m):
        diff = np.dot(w, input_data[j]) - target_data[j]  # 訓練集代入,計算誤差值
        w = w - alpha * diff * input_data[j]
    '''
    w = w - alpha * sum_m

    if np.linalg.norm(w - error) < epsilon:
        break
    else:
        error = w
print 'loop count = %d' % count, '\tw:[%f, %f]' % (w[0], w[1])

小批量梯度下降:

# coding=utf-8
"""
小批量梯度下降
"""
import numpy as np
import random

# 構造訓練數據
x = np.arange(0., 10., 0.2)
m = len(x)
x0 = np.full(m, 1.0)
input_data = np.vstack([x0, x]).T  # 將偏置b作為權向量的第一個分量
target_data = 3 * x + 8 + np.random.randn(m)

# 兩種終止條件
max_iter = 10000
epsilon = 1e-5

# 初始化權值
np.random.seed(0)
w = np.random.randn(2)
# w = np.zeros(2)

alpha = 0.001  # 步長
diff = 0.
error = np.zeros(2)
count = 0  # 循環次數

while count < max_iter:
    count += 1

    sum_m = np.zeros(2)
    index = random.sample(range(m), int(np.ceil(m * 0.2)))
    sample_data = input_data[index]
    sample_target = target_data[index]

    for i in range(len(sample_data)):
        dif = (np.dot(w, input_data[i]) - target_data[i]) * input_data[i]
        sum_m = sum_m + dif

    w = w - alpha * sum_m

    if np.linalg.norm(w - error) < epsilon:
        break
    else:
        error = w
print 'loop count = %d' % count, '\tw:[%f, %f]' % (w[0], w[1])

通過迭代,結果會收斂到8和3:

loop count = 704     w:[8.025972, 2.982300]

 

參考:http://www.cnblogs.com/pinard/p/5970503.html


免責聲明!

本站轉載的文章為個人學習借鑒使用,本站對版權不負任何法律責任。如果侵犯了您的隱私權益,請聯系本站郵箱yoyou2525@163.com刪除。



 
粵ICP備18138465號   © 2018-2025 CODEPRJ.COM