【筆記】PyTorch框架學習 -- 2. 計算圖、autograd以及邏輯回歸的實現


1. 計算圖

使用計算圖的主要目的是使梯度求導更加方便。

import torch

w = torch.tensor([1.], requires_grad=True)
x = torch.tensor([2.], requires_grad=True)

a = torch.add(w, x)     # retain_grad()
b = torch.add(w, 1)
y = torch.mul(a, b)

y.backward()
print(w.grad) # tensor([5.])

# 查看葉子結點
print("is_leaf:\n", w.is_leaf, x.is_leaf, a.is_leaf, b.is_leaf, y.is_leaf)
# is_leaf: True True False False False

# 查看梯度
print("gradient:\n", w.grad, x.grad, a.grad, b.grad, y.grad)
# gradient: tensor([5.]) tensor([2.]) None None None

# 查看 grad_fn
print("grad_fn:\n", w.grad_fn, x.grad_fn, a.grad_fn, b.grad_fn, y.grad_fn)
# grad_fn:
# None 
# None 
# <AddBackward0 object at 0x00000258F55C28D0> 
# <AddBackward0 object at 0x00000258F55C2A58> 
# <MulBackward0 object at 0x00000258F55D5518>

2. 靜態圖和動態圖

TensorFlow是靜態圖,PyTorch是動態圖,區別在於在運算前是否先搭建圖。

3. autograd 自動求導

import torch
torch.manual_seed(10)

# 非葉子節點默認不保存梯度,除非顯式指定
w = torch.tensor([1.], requires_grad=True)
x = torch.tensor([2.], requires_grad=True)

a = torch.add(w, x)
b = torch.add(w, 1)
y = torch.mul(a, b)

# 如果不保存圖,那么不能執行第二次
y.backward(retain_graph=True)
print(w.grad) # tensor([5.])
y.backward()

grad_tensors的使用:

w = torch.tensor([1.], requires_grad=True)
x = torch.tensor([2.], requires_grad=True)

a = torch.add(w, x)     # retain_grad()
b = torch.add(w, 1)

y0 = torch.mul(a, b)    # y0 = (x+w) * (w+1)
y1 = torch.add(a, b)    # y1 = (x+w) + (w+1)    dy1/dw = 2

loss = torch.cat([y0, y1], dim=0)       # [y0, y1]
print(loss) # tensor([6., 5.], grad_fn=<CatBackward>)

grad_tensors = torch.tensor([1., 2.])

loss.backward(gradient=grad_tensors)    # gradient 傳入 torch.autograd.backward()中的grad_tensors

print(w.grad) # tensor([9.])

x = torch.tensor([3.], requires_grad=True)
y = torch.pow(x, 2)     # y = x**2

# 只有創建了導數的計算圖,才能進行二階求導
grad_1 = torch.autograd.grad(y, x, create_graph=True)   # grad_1 = dy/dx = 2x = 2 * 3 = 6
print(grad_1) # (tensor([6.], grad_fn=<MulBackward0>),)

grad_2 = torch.autograd.grad(grad_1[0], x)              # grad_2 = d(dy/dx)/dx = d(2x)/dx = 2,二階導數
print(grad_2) # (tensor([2.]),)

# 1. 梯度不自動清零
w = torch.tensor([1.], requires_grad=True)
x = torch.tensor([2.], requires_grad=True)

for i in range(4):
    a = torch.add(w, x)
    b = torch.add(w, 1)
    y = torch.mul(a, b)

    y.backward()
    print(w.grad)  # 若未清零,tensor([5.]) tensor([10.]) tensor([15.]) tensor([20.])

    w.grad.zero_() # 若清零了,tensor([5.]) tensor([5.]) tensor([5.]) tensor([5.])
# 2. 依賴葉子節點的節點,requires_grad默認為True
w = torch.tensor([1.], requires_grad=True)
x = torch.tensor([2.], requires_grad=True)

a = torch.add(w, x)
b = torch.add(w, 1)
y = torch.mul(a, b)

print(a.requires_grad, b.requires_grad, y.requires_grad) # True True True
# 查看下地址
a = torch.ones((1, ))
print(id(a), a) # 2158573461368 tensor([1.])

# a = a + torch.ones((1, ))
# print(id(a), a) # 1939675405912 tensor([2.]) 地址換了

a += torch.ones((1, ))
print(id(a), a) # 2158573461368 tensor([2.])

#===========================================

# 3. 葉子節點不能in-place操作
w = torch.tensor([1.], requires_grad=True)
x = torch.tensor([2.], requires_grad=True)

a = torch.add(w, x)
b = torch.add(w, 1)
y = torch.mul(a, b)

w.add_(1) # in-place操作,會報錯

y.backward()

4. 邏輯回歸

import torch
import torch.nn as nn
import matplotlib.pyplot as plt
import numpy as np
torch.manual_seed(10)


# ============================ step 1/5 生成數據 ============================
sample_nums = 100
mean_value = 1.7
bias = 1
n_data = torch.ones(sample_nums, 2)
x0 = torch.normal(mean_value * n_data, 1) + bias      # 類別0 數據 shape=(100, 2)
y0 = torch.zeros(sample_nums)                         # 類別0 標簽 shape=(100, 1)
x1 = torch.normal(-mean_value * n_data, 1) + bias     # 類別1 數據 shape=(100, 2)
y1 = torch.ones(sample_nums)                          # 類別1 標簽 shape=(100, 1)
train_x = torch.cat((x0, x1), 0)
train_y = torch.cat((y0, y1), 0)


# ============================ step 2/5 選擇模型 ============================
class LR(nn.Module):
    def __init__(self):
        super(LR, self).__init__()
        self.features = nn.Linear(2, 1)
        self.sigmoid = nn.Sigmoid()

    def forward(self, x):
        x = self.features(x)
        x = self.sigmoid(x)
        return x


lr_net = LR()   # 實例化邏輯回歸模型


# ============================ step 3/5 選擇損失函數 ============================
loss_fn = nn.BCELoss()

# ============================ step 4/5 選擇優化器   ============================
lr = 0.01  # 學習率
optimizer = torch.optim.SGD(lr_net.parameters(), lr=lr, momentum=0.9)

# ============================ step 5/5 模型訓練 ============================
for iteration in range(1000):

    # 前向傳播
    y_pred = lr_net(train_x)

    # 計算 loss
    loss = loss_fn(y_pred.squeeze(), train_y)

    # 反向傳播
    loss.backward()

    # 更新參數
    optimizer.step()

    # 清空梯度
    optimizer.zero_grad()

    # 繪圖
    if iteration % 20 == 0:

        mask = y_pred.ge(0.5).float().squeeze()  # 以0.5為閾值進行分類
        correct = (mask == train_y).sum()  # 計算正確預測的樣本個數
        acc = correct.item() / train_y.size(0)  # 計算分類准確率

        plt.scatter(x0.data.numpy()[:, 0], x0.data.numpy()[:, 1], c='r', label='class 0')
        plt.scatter(x1.data.numpy()[:, 0], x1.data.numpy()[:, 1], c='b', label='class 1')

        w0, w1 = lr_net.features.weight[0]
        w0, w1 = float(w0.item()), float(w1.item())
        plot_b = float(lr_net.features.bias[0].item())
        plot_x = np.arange(-6, 6, 0.1)
        plot_y = (-w0 * plot_x - plot_b) / w1

        plt.xlim(-5, 7)
        plt.ylim(-7, 7)
        plt.plot(plot_x, plot_y)

        plt.text(-5, 5, 'Loss=%.4f' % loss.data.numpy(), fontdict={'size': 20, 'color': 'red'})
        plt.title("Iteration: {}\nw0:{:.2f} w1:{:.2f} b: {:.2f} accuracy:{:.2%}".format(iteration, w0, w1, plot_b, acc))
        plt.legend()

        plt.show()
        plt.pause(0.5)

        if acc > 0.99:
            break

最終結果:

思考:

  1. 調整線性回歸模型停止條件以及y = 2*x + (5 + torch.randn(20, 1))中的斜率,訓練一個線性回歸模型;
  2. 計算圖的兩個主要概念是什么?
  3. 動態圖與靜態圖的區別是什么?
  4. 邏輯回歸模型為什么可以進行二分類?
  5. 采用代碼實現邏輯回歸模型的訓練,並嘗試調整數據生成中的mean_value,將mean_value設置為更小的值,例如1,或者更大的值,例如5,會出現什么情況?
  6. 再嘗試僅調整bias,將bias調為更大或者負數,模型訓練過程是怎么樣的?


免責聲明!

本站轉載的文章為個人學習借鑒使用,本站對版權不負任何法律責任。如果侵犯了您的隱私權益,請聯系本站郵箱yoyou2525@163.com刪除。



 
粵ICP備18138465號   © 2018-2025 CODEPRJ.COM