Pytorch簡介
PyTorch是一個基於Torch的Python開源機器學習庫,用於自然語言處理等應用程序。它主要由Facebookd的人工智能小組開發,不僅能夠 實現強大的GPU加速,同時還支持動態神經網絡,這一點是現在很多主流框架如TensorFlow都不支持的。 PyTorch提供了兩個高級功能:
- 具有強大的GPU加速的張量計算(如Numpy)
- 包含自動求導系統的深度神經網絡
用到的包
import torch
import matplotlib.pyplot as plt
import torch.nn as nn
生成訓練用的數據
x = torch.linspace(-torch.pi,torch.pi,10000) #(1000, )
x = torch.unsqueeze(input=x, dim=1) # (1000, 1)
y = torch.sin(x) # (1000, 1)
如果可以的話,放到GPU里訓練(快很多)
device = "cuda" if torch.cuda.is_available() else "cpu"
print(f"Using {device} device to train")
x = x.to(device)
y = y.to(device)
定義模型
兩層,x --> (1,70) --> sigmoid --> (70,1) --> y_pred
class NeuralNetwork(nn.Module):
def __init__(self):
# 調用
super(NeuralNetwork, self).__init__()
#
self.linear_relu_stack = nn.Sequential(
nn.Linear(1, 70),
nn.Sigmoid(),
nn.Linear(70,1)
)
def forward(self, x):
y_pred = self.linear_relu_stack(x)
return y_pred
定義損失函數和優化器,開始訓練
# 把模型放到GPU上訓練
model = NeuralNetwork().to(device)
#均方差做損失函數
loss_fn = nn.MSELoss()
#optimizer = torch.optim.SGD(model.parameters(), lr=0.01)
#用下面這個Adam優化器會收斂的快很多
optimizer = torch.optim.Adam(model.parameters(), lr=0.01)
# 迭代3000次
batches = 3000
plt.figure("regression") #新建一張畫布,打印數據點和預測值
plt.ion() #開啟交互模式
plt.show()
for i in range(batches):
y_pred = model(x)
loss = loss_fn(y_pred, y)
#
optimizer.zero_grad()
loss.backward()
optimizer.step()
if i % 100 == 0:
loss, batch = loss.item(), i
print(f'loss: {loss} {batch}')
plt.cla()
plt.plot(x.cpu().numpy(), y.cpu().numpy())
plt.plot(x.cpu().numpy(), y_pred.detach().cpu().numpy())
plt.pause(0.001)
保存模型和讀取模型參數
# 保存
torch.save(model.state_dict(), "model.pth")
print("Saved PyTorch Model State to model.pth")
# 讀取
model = NeuralNetwork()
model.load_state_dict(torch.load("model.pth"))
結果
全部代碼
import torch
import matplotlib.pyplot as plt
import torch.nn as nn
# 生成訓練數據用的數據
x = torch.linspace(-torch.pi,torch.pi,10000) #(1000, )
x = torch.unsqueeze(input=x, dim=1) # (1000, 1)
y = torch.sin(x) # (1000, 1)
plt.plot(x.numpy(),y.numpy())
# 如果可以用cuda就在cuda上運行,這樣會快很多
device = "cuda" if torch.cuda.is_available() else "cpu"
print(f"Using {device} device to train")
x = x.to(device)
y = y.to(device)
# 定義NN模型,繼承自nn.Module
class NeuralNetwork(nn.Module):
def __init__(self):
# 調用
super(NeuralNetwork, self).__init__()
#
self.linear_relu_stack = nn.Sequential(
nn.Linear(1, 70),
nn.Sigmoid(),
nn.Linear(70,1)
)
def forward(self, x):
y_pred = self.linear_relu_stack(x)
return y_pred
# 把模型放到GPU上訓練
model = NeuralNetwork().to(device)
#均方差做損失函數
loss_fn = nn.MSELoss()
#optimizer = torch.optim.SGD(model.parameters(), lr=0.01)
#用下面這個Adam優化器會收斂的快很多
optimizer = torch.optim.Adam(model.parameters(), lr=0.01)
# 迭代3000次
batches = 3000
plt.figure("regression") #新建一張畫布,打印數據點和預測值
plt.ion() #開啟交互模式
plt.show()
for i in range(batches):
y_pred = model(x)
loss = loss_fn(y_pred, y)
#
optimizer.zero_grad()
loss.backward()
optimizer.step()
if i % 100 == 0:
loss, batch = loss.item(), i
print(f'loss: {loss} {batch}')
plt.cla()
plt.plot(x.cpu().numpy(), y.cpu().numpy())
plt.plot(x.cpu().numpy(), y_pred.detach().cpu().numpy())
plt.pause(0.001)
# 保存
torch.save(model.state_dict(), "model.pth")
print("Saved PyTorch Model State to model.pth")
import torch
import matplotlib.pyplot as plt
import torch.nn as nn
class NeuralNetwork(nn.Module):
def __init__(self):
# 調用
super(NeuralNetwork, self).__init__()
#
self.linear_relu_stack = nn.Sequential(
nn.Linear(1, 70),
nn.Sigmoid(),
nn.Linear(70,1)
)
def forward(self, x):
y_pred = self.linear_relu_stack(x)
return y_pred
x = torch.linspace(-torch.pi,torch.pi,10000) #(1000, )
x = torch.unsqueeze(input=x, dim=1) # (1000, 1)
model = NeuralNetwork()
model.load_state_dict(torch.load("model.pth"))
y = model(x)
plt.plot(x.numpy(), y.detach().numpy())