線性回歸 隨機梯度下降SGD (Tensorflow 2.1)


 采用類的方式,參考鏈接

import tensorflow as tf

x_data = tf.Variable(tf.random.uniform((1,3), -1.0, 1.0))
y_data = x_data * 0.1 + 0.3

class Linear(tf.keras.Model):
    def __init__(self):
        super().__init__()
        self.dense = tf.keras.layers.Dense(
            units=1,
            activation=None,
            kernel_initializer=tf.zeros_initializer(),
            bias_initializer=tf.zeros_initializer()
        )

    def call(self, input):
        output = self.dense(input)
        return output

model = Linear()
optimizer = tf.keras.optimizers.SGD(learning_rate = 1e-2)

for i in range(100):
    with tf.GradientTape() as tape:
        y_pred = model(x_data)
        loss = tf.reduce_mean(tf.square(y_pred - y_data))
    grads = tape.gradient(loss, model.variables)
    optimizer.apply_gradients(grads_and_vars=zip(grads, model.variables))
    if i % 20 == 0:
        print(i,":loss == ", loss)

 

 --------------------

在更新一波,修改了梯度的部分

import tensorflow as tf
import numpy as np

# name: create data
# function:
#   np.random.rand()
#       1.當函數括號內沒有參數時,則返回一個浮點數;
#       2.當函數括號內有一個參數時,則返回秩為1的數組,不能表示向量和矩陣;
#       3.當函數括號內有兩個及以上參數時,則返回對應維度的數組,能表示向量或矩陣;
#       4.通過本函數可以返回一個或一組服從“0~1”均勻分布的隨機樣本值。隨機樣本取值范圍是[0,1),不包括1.
#   astype()
#       1.轉化數據類型
x_data = np.random.rand(100).astype(np.float32)
y_data = x_data * 0.1 + 0.3

# name: create tensorflow structure
# function:
#   tf.Variable()
#       1.tf.Variable(initializer,name),參數initializer是初始化參數,name是可自定義的變量名稱
#   tf.random.uniform()
#       tf.random.uniform(shape, minval=0, maxval=None, dtype=tf.dtypes.float32, seed=None, name=None)
#       shap是矩陣維數
#   tf.zeros()
#       tf.zeros(shape, dtype=tf.dtypes.float32, name=None)
Weights = tf.Variable(tf.random.uniform((1,), -1.0, 1.0))
biases = tf.Variable(tf.zeros((1,)))

variables = [Weights, biases]

num_epoch = 1000
optimizer = tf.keras.optimizers.SGD(learning_rate = 1e-3)
for e in range(num_epoch):
    with tf.GradientTape() as tape:
        y_pre = Weights * x_data + biases
        loss = 0.5 * tf.reduce_sum(tf.square(y_pre - y_data))
    grads = tape.gradient(loss, variables)
    optimizer.apply_gradients(grads_and_vars=zip(grads, variables))
    if e % 20 == 0:
        print(e, ": loss == ", loss)

-------------------------

import tensorflow as tf
import numpy as np

# name: create data
# function:
#   np.random.rand()
#       1.當函數括號內沒有參數時,則返回一個浮點數;
#       2.當函數括號內有一個參數時,則返回秩為1的數組,不能表示向量和矩陣;
#       3.當函數括號內有兩個及以上參數時,則返回對應維度的數組,能表示向量或矩陣;
#       4.通過本函數可以返回一個或一組服從“0~1”均勻分布的隨機樣本值。隨機樣本取值范圍是[0,1),不包括1.
#   astype()
#       1.轉化數據類型
x_data = np.random.rand(100).astype(np.float32)
y_data = x_data * 0.1 + 0.3

# name: create tensorflow structure
# function:
#   tf.Variable()
#       1.tf.Variable(initializer,name),參數initializer是初始化參數,name是可自定義的變量名稱
#   tf.random.uniform()
#       tf.random.uniform(shape, minval=0, maxval=None, dtype=tf.dtypes.float32, seed=None, name=None)
#       shap是矩陣維數
#   tf.zeros()
#       tf.zeros(shape, dtype=tf.dtypes.float32, name=None)
Weights = tf.Variable(tf.random.uniform((1,), -1.0, 1.0))
biases = tf.Variable(tf.zeros((1,)))

#name: loss function
#function:
#   tf.keras.losses.MSE()
#       tf.keras.losses.MSE(y_true, y_pred)
#       y_true真實值, y_pred預測值
#   tf.keras.optimizers.SGD() 隨機梯度下降
#       tf.keras.optimizers.SGD(learning_rate=0.01, momentum=0.0, nesterov=False, name='SGD', **kwargs)
def loss():
    return tf.keras.losses.MSE(y_data, Weights * x_data + biases)
optimizer = tf.keras.optimizers.SGD(learning_rate=0.5)

# minimize()
#其中minimize()包含兩個步驟:
#   1.計算loss對指定val_list的梯度(導數),返回元組列表[(gradient,variable),…]
#   compute_gradients(loss,val_list)
#   注:tf.gradients(loss, tf.variables)與compute_gradients(loss,val_list)作用類似,但是只返回梯度
#   2.用計算得到的梯度來更新對應的變量(權重)
#   optimizer.apply_gradients(grads_and_vars, global_step=global_step, name=None)將
#   compute_gradients(loss,val_list)的返回值作為輸入對variable更新
#所以一下寫法可能隱藏梯度爆炸和梯度消失
#參考鏈接https://blog.csdn.net/sinat_37386947/article/details/88849519
for step in range(201):
    optimizer.minimize(loss, var_list=[Weights, biases])
    if step % 20 == 0:
        print("{} step, weights = {}, biases = {}".format(step, Weights.read_value(), biases.read_value()))  # read_value函數可用numpy替換

 


免責聲明!

本站轉載的文章為個人學習借鑒使用,本站對版權不負任何法律責任。如果侵犯了您的隱私權益,請聯系本站郵箱yoyou2525@163.com刪除。



 
粵ICP備18138465號   © 2018-2025 CODEPRJ.COM