一、自定義神經網絡
import torch import tqdm class TwoLayerNet(torch.nn.Module): def __init__(self, D_in, H, D_out): """ 在構造函數中,我們實例化了兩個nn.Linear模塊,並將它們作為成員變量。 """ super(TwoLayerNet, self).__init__() self.linear1 = torch.nn.Linear(D_in, H) self.linear2 = torch.nn.Linear(H, D_out) def forward(self, x): #.clamp(input, min, max, out=None) → Tensor表示將結果限定在【min,max】之間 h_relu = self.linear1(x).clamp(min=0) y_pred = self.linear2(h_relu) return y_pred #定義輸入輸出,構建模型、損失函數、優化器 N, D_in, H, D_out = 64, 1000, 100, 10 # N是批大小; D_in 是輸入維度;H 是隱藏層維度; D_out 是輸出維度 x = torch.randn(N, D_in) #x.shape=torch.Size([64, 1000]) y = torch.randn(N, D_out)#y.shape=torch.Size([64, 10]) model = TwoLayerNet(D_in, H, D_out) loss_fn = torch.nn.MSELoss(reduction='sum')# 構造損失函數和優化器。 optimizer = torch.optim.SGD(model.parameters(), lr=1e-4)# SGD構造函數中對model.parameters()的調用 #K開始模型訓練 for i in tqdm.tqdm(range(500)): y_pred = model(x)# 前向傳播:通過向模型傳遞x計算預測值y loss = loss_fn(y_pred, y)#計算並輸出loss if (i+1)%100==0: print(i, loss.item()) optimizer.zero_grad()# 清零梯度,反向傳播,更新權重 loss.backward() optimizer.step()
驗證一下結果:
model(x[10,:])
y[10,:]
Out[32]:
