完整代碼
#實現分類 import torch import torch.nn.functional as F from torch.autograd import Variable import matplotlib.pyplot as plt import torch.optim as optim #生成數據 n_data = torch.ones(100, 2) x0 = torch.normal(2*n_data, 1) y0 = torch.zeros(100) x1 = torch.normal(-2*n_data,1) y1 = torch.ones(100) #x當做數據,y當做標簽 x = torch.cat((x0,x1), 0).type(torch.FloatTensor) y = torch.cat((y0,y1), ).type(torch.LongTensor) x,y = Variable(x),Variable(y) #繪制圖像 #plt.scatter(x.data.numpy()[:, 0], x.data.numpy()[:, 1], c=y.data.numpy(), s=100, lw=0, ) #plt.show() #定義網絡 class Net(torch.nn.Module): def __init__(self, n_feature, n_hidden, n_output): super(Net, self).__init__() self.hidden = torch.nn.Linear(n_feature, n_hidden) self.predict = torch.nn.Linear(n_hidden,n_output) def forward(self, x): x = F.relu(self.hidden(x)) x = self.predict(x) return x #輸入是兩個特征,x對應的特征和y對應的特征,輸出是2個類,0和1 net = Net(2, 10, 2) #print(net) #輸出為[0,1]說明圖片為class1,若是[1,0],說明輸出為class0。這是二分類 #輸出為[0,1,0]說明圖片為class1,若是[1,0,0],說明輸出為class0,若是[0,0,1],說明輸出為class2。這是三分類 #優化 optimizer = optim.SGD(net.parameters(), lr=0.02) loss_func = torch.nn.CrossEntropyLoss() #輸出是概率 #可視化 plt.ion() #plt.show() for t in range(100): out = net(x) loss = loss_func(out, y) #預測值和真實值 optimizer.zero_grad() loss.backward() optimizer.step() #可視化 if t % 2 == 0: plt.cla() prediction = torch.max(F.softmax(out), 1)[1] pred_y = prediction.data.numpy().squeeze() target_y = y.data.numpy() plt.scatter(x.data.numpy()[:, 0], x.data.numpy()[:, 1], c=pred_y, s=100, lw=0, cmap='RdYlGn') accuracy = sum(pred_y == target_y)/200 plt.text(1.5, -4, 'Accuracy=%.2f' % accuracy, fontdict={'size': 20, 'color': 'red'}) plt.pause(0.1) plt.ioff() plt.show()