import torch.nn as nn import torch.nn.functional as F import torch.optim as optim from torch.autograd import Variable import torch class Net(nn.Module): # 需要繼承這個類 def __init__(self): super(Net, self).__init__() # 建立了兩個卷積層,self.conv1, self.conv2,注意,這些層都是不包含激活函數的 self.conv1 = nn.Conv2d(1, 6, 5) # 1 input image channel, 6 output channels, 5x5 square convolution kernel self.conv2 = nn.Conv2d(6, 16, 5) # 三個全連接層 self.fc1 = nn.Linear(16 * 5 * 5, 120) # an affine operation: y = Wx + b self.fc2 = nn.Linear(120, 84) self.fc3 = nn.Linear(84, 10) def forward(self, x): # 注意,2D卷積層的輸入data維數是 batchsize*channel*height*width x = F.max_pool2d(F.relu(self.conv1(x)), (2, 2)) # Max pooling over a (2, 2) window x = F.max_pool2d(F.relu(self.conv2(x)), 2) # If the size is a square you can only specify a single number x = x.view(-1, self.num_flat_features(x)) x = F.relu(self.fc1(x)) x = F.relu(self.fc2(x)) x = self.fc3(x) print(x) print('y=--------') return x def num_flat_features(self, x): size = x.size()[1:] # all dimensions except the batch dimension num_features = 1 for s in size: num_features *= s return num_features net = Net() # create your optimizer optimizer = optim.SGD(net.parameters(), lr = 0.01) num_iteations = 20 input = Variable(torch.randn(2, 1, 32, 32)) print('input=',input) #target = Variable(torch.Tensor([5],dtype=torch.long)) target = Variable(torch.LongTensor([5,7])) # in your training loop: for i in range(num_iteations): optimizer.zero_grad() # zero the gradient buffers,如果不歸0的話,gradients會累加 output = net(input) # 這里就體現出來動態建圖了,你還可以傳入其他的參數來改變網絡的結構 criterion = nn.CrossEntropyLoss() loss = criterion(output, target) loss.backward() # 得到grad,i.e.給Variable.grad賦值 optimizer.step() # Does the update,i.e. Variable.data -= learning_rate*Variable.grad
這里是給出的一個代碼。
init只是規定了conv的輸入通道數量、輸出通道數量和卷積核尺寸。
然后在神經網絡中,充當卷積層的是forward部分。
input = Variable(torch.randn(2, 1, 32, 32)) #batchsize,channel,height,width
target = Variable(torch.LongTensor([5,7])) #我希望兩個神經網絡,第一個等於5,第二個等於7.當然隨便兩個數。(不代表5*7維矩陣呀)