1.從數據直接構建tensor
x = torch.tensor([5.5,3])
2.從已有的tensor構建一個tensor。這些方法會重用原來tensor的特征。
x = x.new_ones(5,3,dtype=torch.double)
torch.randn_like(x,dtype=torch.float)
3.得到tensor的形狀
x.shape()
x.size()
4.tensor的運算
x = torch.rand(5,3) y = torch.rand(5,3)
x+y torch.add(x,y)
result = torch.empty(5,3) result = x+y
y.add_() #把結果保存到里面
5.numpy里面的indexing都可以在tensor使用
x[:,1:]
6.resizing(在numpy里面用reshape在torch里面用view)
x = torch.randn(4,4) y = x.view(16) z = x.view(-1,8)
7.如果只用一個元素的tensor 使用。item() 方法可以把里面的value 變成Python數值
x = torch.randn(1) x.data x.grad x.item() z.transpose(1,0)
8 .numpy和tensor 之間的轉化
a = torch.ones(5) b = a.numpy() #a,b共享內存空間
a = np.ones(5) b = torch.from_numpy(a) #a,b共享內存空間
9.cuda tensor
if torch.cuda.is_available():
device = torch.device("cuda")
y = torch.ones_like(x,device=device)
x = x.to(device)
y.cpu().data.numpy() y.to("cpu").data.numpy() model = model.cuda()
10. 用numpy 實現兩層神經網絡
N , D_in, H, D_out = 64,1000,100,10
x = np.random.randn(N,D_in) y = np.random.randn(N,D_out) w1 = np.random.randn(D_in,H) w2 = np.random.randn(H,D_out) learning_rate = 1e-6 for t in range(500): h = x.dot(w1) #(N,H) h_relu = np.maxinum(h,0) y_pred = h_relu.dot(w2) #compute loss loss = np.square(y_pred - y).sum() print(t,loss) grad_y_pred = 2.0*(y_pred-y) grad_w2 = h_relu.T.dot(grad_y_pred) grad_h_relu = grad_y_pred.dot(w2.T) grad_h = grad_h_relu.copy() grad_h[h<0] = 0 grad_w1 = x.T.dot(grad_h) w1 -= learning_rate*grad_w1 w2 -=learning_rate*grad_w2
11.用tensors 實現兩層神經網絡
import torch dtype = torch.float device = torch.device("cpu") # device = torch.device("cuda:0") # Uncomment this to run on GPU # N is batch size; D_in is input dimension; # H is hidden dimension; D_out is output dimension. N, D_in, H, D_out = 64, 1000, 100, 10 # Create random input and output data x = torch.randn(N, D_in, device=device, dtype=dtype) y = torch.randn(N, D_out, device=device, dtype=dtype) # Randomly initialize weights w1 = torch.randn(D_in, H, device=device, dtype=dtype) w2 = torch.randn(H, D_out, device=device, dtype=dtype) learning_rate = 1e-6 for t in range(500): # Forward pass: compute predicted y h = x.mm(w1) h_relu = h.clamp(min=0) y_pred = h_relu.mm(w2) # Compute and print loss loss = (y_pred - y).pow(2).sum().item() print(t, loss) # Backprop to compute gradients of w1 and w2 with respect to loss grad_y_pred = 2.0 * (y_pred - y) grad_w2 = h_relu.t().mm(grad_y_pred) grad_h_relu = grad_y_pred.mm(w2.t()) grad_h = grad_h_relu.clone() grad_h[h < 0] = 0 grad_w1 = x.t().mm(grad_h) # Update weights using gradient descent w1 -= learning_rate * grad_w1 w2 -= learning_rate * grad_w2
autograd
import torch dtype = torch.float device = torch.device("cpu") # device = torch.device("cuda:0") # Uncomment this to run on GPU # N 是 batch size; D_in 是 input dimension; # H 是 hidden dimension; D_out 是 output dimension. N, D_in, H, D_out = 64, 1000, 100, 10 # 創建隨機的Tensor來保存輸入和輸出 # 設定requires_grad=False表示在反向傳播的時候我們不需要計算gradient x = torch.randn(N, D_in, device=device, dtype=dtype) y = torch.randn(N, D_out, device=device, dtype=dtype) # 創建隨機的Tensor和權重。 # 設置requires_grad=True表示我們希望反向傳播的時候計算Tensor的gradient w1 = torch.randn(D_in, H, device=device, dtype=dtype, requires_grad=True) w2 = torch.randn(H, D_out, device=device, dtype=dtype, requires_grad=True) learning_rate = 1e-6 for t in range(500): # 前向傳播:通過Tensor預測y;這個和普通的神經網絡的前向傳播沒有任何不同, # 但是我們不需要保存網絡的中間運算結果,因為我們不需要手動計算反向傳播。 y_pred = x.mm(w1).clamp(min=0).mm(w2) # 通過前向傳播計算loss # loss是一個形狀為(1,)的Tensor # loss.item()可以給我們返回一個loss的scalar loss = (y_pred - y).pow(2).sum() print(t, loss.item()) # PyTorch給我們提供了autograd的方法做反向傳播。如果一個Tensor的requires_grad=True, # backward會自動計算loss相對於每個Tensor的gradient。在backward之后, # w1.grad和w2.grad會包含兩個loss相對於兩個Tensor的gradient信息。 loss.backward() # 我們可以手動做gradient descent(后面我們會介紹自動的方法)。 # 用torch.no_grad()包含以下statements,因為w1和w2都是requires_grad=True, # 但是在更新weights之后我們並不需要再做autograd。 # 另一種方法是在weight.data和weight.grad.data上做操作,這樣就不會對grad產生影響。 # tensor.data會我們一個tensor,這個tensor和原來的tensor指向相同的內存空間, # 但是不會記錄計算圖的歷史。 with torch.no_grad(): w1 -= learning_rate * w1.grad w2 -= learning_rate * w2.grad # Manually zero the gradients after updating weights w1.grad.zero_() w2.grad.zero_()
optim
import torch # N is batch size; D_in is input dimension; # H is hidden dimension; D_out is output dimension. N, D_in, H, D_out = 64, 1000, 100, 10 # Create random Tensors to hold inputs and outputs x = torch.randn(N, D_in) y = torch.randn(N, D_out) # Use the nn package to define our model and loss function. model = torch.nn.Sequential( torch.nn.Linear(D_in, H), torch.nn.ReLU(), torch.nn.Linear(H, D_out), ) loss_fn = torch.nn.MSELoss(reduction='sum') # Use the optim package to define an Optimizer that will update the weights of # the model for us. Here we will use Adam; the optim package contains many other # optimization algoriths. The first argument to the Adam constructor tells the # optimizer which Tensors it should update. learning_rate = 1e-4 optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate) for t in range(500): # Forward pass: compute predicted y by passing x to the model. y_pred = model(x) # Compute and print loss. loss = loss_fn(y_pred, y) print(t, loss.item()) # Before the backward pass, use the optimizer object to zero all of the # gradients for the variables it will update (which are the learnable # weights of the model). This is because by default, gradients are # accumulated in buffers( i.e, not overwritten) whenever .backward() # is called. Checkout docs of torch.autograd.backward for more details. optimizer.zero_grad() # Backward pass: compute gradient of the loss with respect to model # parameters loss.backward() # Calling the step function on an Optimizer makes an update to its # parameters optimizer.step()
自定義的nn Modules
import torch
class TwoLayerNet(torch.nn.Module):
def __init__(self, D_in, H, D_out):
"""
In the constructor we instantiate two nn.Linear modules and assign them as
member variables.
"""
super(TwoLayerNet, self).__init__()
self.linear1 = torch.nn.Linear(D_in, H)
self.linear2 = torch.nn.Linear(H, D_out)
def forward(self, x):
"""
In the forward function we accept a Tensor of input data and we must return
a Tensor of output data. We can use Modules defined in the constructor as
well as arbitrary operators on Tensors.
"""
h_relu = self.linear1(x).clamp(min=0)
y_pred = self.linear2(h_relu)
return y_pred
# N is batch size; D_in is input dimension;
# H is hidden dimension; D_out is output dimension.
N, D_in, H, D_out = 64, 1000, 100, 10
# Create random Tensors to hold inputs and outputs
x = torch.randn(N, D_in)
y = torch.randn(N, D_out)
# Construct our model by instantiating the class defined above
model = TwoLayerNet(D_in, H, D_out)
# Construct our loss function and an Optimizer. The call to model.parameters()
# in the SGD constructor will contain the learnable parameters of the two
# nn.Linear modules which are members of the model.
criterion = torch.nn.MSELoss(reduction='sum')
optimizer = torch.optim.SGD(model.parameters(), lr=1e-4)
for t in range(500):
# Forward pass: Compute predicted y by passing x to the model
y_pred = model(x)
# Compute and print loss
loss = criterion(y_pred, y)
print(t, loss.item())
# Zero gradients, perform a backward pass, and update the weights.
optimizer.zero_grad()
loss.backward()
optimizer.step()