本文目的:展示如何利用PyTorch做一個簡單的線性回歸。
1 隨機生成一些數據
#導入相關庫
import torch
import torch.nn as nn
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import pandas as pd
print("torch version: ", torch.__version__) #torch version: 1.1.0
#隨機生成一些點,並做成DataFrame
x = np.linspace(0, 5, 256)
noise = np.random.randn(256) * 2
y = x * 5 + 7 + noise
df = pd.DataFrame()
df['x'] = x
df['y'] = y
#可視化
sns.lmplot(x='x', y='y', data=df, height=4)

2 利用Pytorch進行線性回歸
三部曲:准備數據,准備模型,訓練。
#准備數據
train_x = x.reshape(-1, 1).astype('float32')
train_y = y.reshape(-1, 1).astype('float32')
train_x = torch.from_numpy(train_x)
train_y = torch.from_numpy(train_y)
#准備模型
model = nn.Linear(1, 1)
#定義訓練參數
loss_fn = nn.MSELoss()
optimizer = torch.optim.SGD(model.parameters(), lr=0.01)
epochs = 3000
#開始訓練
for i in range(1, epochs+1):
optimizer.zero_grad()
out = model(train_x)
loss = loss_fn(out, train_y)
loss.backward()
optimizer.step()
if(i % 300 == 0):
print('epoch {} loss {:.4f}'.format(i, loss.item()))
3 結果可視化
#獲取參數值
w, b = model.parameters() #parameters()返回的是一個迭代器指向的對象
print(w.item(), b.item())
#結果可視化
#model返回的是總tensor,包含grad_fn,用data提取出的tensor是純tensor
pred = model.forward(train_x).data.numpy().squeeze()
plt.plot(x, y, 'go', label='Truth', alpha=0.3)
plt.plot(x, pred, label='Predicted')
plt.legend()
plt.show()

4 小結
- 數據生成和可視化方法
Reference
