torch文檔學習筆記


下面為官方文檔學習筆記    http://pytorch.org/docs/0.3.0/index.html

1、torch.Tensor

from __future__ import print_function import torch import numpy as np import pandas as pd from pandas import Series,DataFrame ################Tensors Tensors Tensors##################################
x=torch.Tensor(5,3) torch.is_tensor(x) torch.is_storage(x) torch.numel(x) #int
torch.eye(2) torch.eye(2,3) torch.from_numpy(np.array([1,2,3]))   #torch.IntTensor of size 3
torch.from_numpy(np.array([1.0,2,3])) #torch.DoubleTensor of size 3
print(torch.linspace(0,10,steps=3))   #torch.FloatTensor of size 3 0 5 10
print(torch.linspace(0,10,3))   #torch.FloatTensor of size 3 0 5 10 # steps大於1的整數,start end中間樣本點的個數
print(torch.logspace(0,1,4)) #torch.FloatTensor of size 4,類似於linspace 以10為底,10的0次方到10的1次方之間
print(torch.ones(2,2)) #torch.FloatTensor of size 2x2
print(torch.ones(2,2,1,5,8,2,8,10)) #torch.FloatTensor of size 2x2x1x5x8x2x8x10
print(torch.ones_like(x)) #torch.FloatTensor of size 5x3
print(torch.arange(0,3,1)) #torch.FloatTensor of size 3 0 1 2 #可迭代對象
print(torch.zeros(2))     #torch.FloatTensor of size 2 0 0
print(torch.zeros_like(x)) #torch.FloatTensor of size 5x3


##############Indexing,Slicing,Joining,Mutating Ops##############################
print(torch.cat((x,x,x),0)) #torch.FloatTensor of size 15x3 #concat 連接Tensor
print(torch.cat((x,x,x),1)) #torch.FloatTensor of size 5x9
print(torch.chunk(x,2))    #截斷,結果為元組,元組內元素個數為截斷個數 # ( # 0 0 0 # 0 0 0 # 0 0 0 # [torch.FloatTensor of size 3x3] # , # 1.00000e-37 * # 0.0000 0.0000 0.0000 # 0.0000 7.4058 0.0000 # [torch.FloatTensor of size 2x3] # )
 t=torch.Tensor([[1,2],[3,4]]) # torch.gather(input, dim, index, out=None) → Tensor dim選擇橫向還是縱向的
print(torch.gather(t,0,torch.LongTensor([[0,0],[0,0]])))  #torch.FloatTensor of size 2x2 # 1 2 # 1 2
print(torch.gather(t,1,torch.LongTensor([[0,0],[0,0]])))  #torch.FloatTensor of size 2x2 # 1 1 # 3 3

# torch.index_select(input, dim, index, out=None) → Tensor #選擇幾行或者幾列,如果要同時選列和行,可以分兩步 #注意和前面gather一樣,index數據類型為LongTensor
print(torch.index_select(t,0,torch.LongTensor([0])))    #torch.FloatTensor of size 1x2 1 2
print(torch.index_select(t,1,torch.LongTensor([0])))    #torch.FloatTensor of size 2x1 1 3
print(torch.index_select(t,1,torch.LongTensor([0,1])))  #torch.FloatTensor of size 2x2 # 1 2 # 3 4
print(t.ge(1))  #torch.ByteTensor of size 2x2 判斷t中元素是否大於等於1 # 1 1 # 1 1
print(t.ge(0.5))  #torch.ByteTensor of size 2x2 # 1 1 # 1 1
print(t.ge(2.5))  #torch.ByteTensor of size 2x2 # 0 0 # 1 1
print(t.ge(5))  #torch.ByteTensor of size 2x2 # 0 0 # 0 0

#torch.masked_select(input, mask, out=None) → Tensor 標記選擇,即定向選擇
print(torch.masked_select(t,t.ge(2.5)))  #torch.FloatTensor of size 2 # 3 4
for i in torch.masked_select(t,t.ge(1.5)):  #可迭代對象
    print(i) # 2.0 # 3.0 # 4.0

# torch.nonzero(input, out=None) → LongTensor 得到非0元素所在位置N*2,其中N為非0元素個數
print(torch.nonzero(torch.Tensor([1,2,3,0,4]))) #torch.LongTensor of size 4x1
 # 0
 # 1
 # 2
 # 4
non_zero=torch.nonzero(torch.Tensor([[0.6, 0.0, 0.0, 0.0], [0.0, 0.4, 0.0, 0.0], [0.0, 0.0, 1.2, 0.0], [0.0, 0.0, 0.0,-0.4]])) print(non_zero)   #torch.LongTensor of size 4x2 # 0 0 # 1 1 # 2 2 # 3 3
non_zero=torch.nonzero(torch.Tensor([[0.6, 0.0, 0.0, 0.0], [2, 0.4, 0.0, 0.0], [3, 0.0, 1.2, 0.0], [4, 0.0, 0.0,-0.4]])) print(non_zero)   #torch.LongTensor of size 4x2 # 0 0 # 1 0 # 1 1 # 2 0 # 2 2 # 3 0

#torch.split(tensor, split_size, dim=0) 與前面torch.chunk類似
print(torch.split(x,2)) # ( # 0 0 0 # 0 0 0 # [torch.FloatTensor of size 2x3] # , # 1.00000e-42 * # 0.0000 0.0000 0.0000 # 0.0000 2.0011 0.0000 # [torch.FloatTensor of size 2x3] # , # 1.00000e-38 * # 0.0000 1.1112 0.0000 # [torch.FloatTensor of size 1x3] # )

#torch.squeeze(input, dim=None, out=None) # 將某一方向全部相同數據壓縮掉,想想一下,x、y、z是三維的,如果z全為0,則可轉化為二維。
torch_squeeze=torch.zeros(2,1,2,1,2) print(torch_squeeze)         #torch.FloatTensor of size 2x1x2x1x2
print(torch_squeeze.size())  #torch.Size([2, 1, 2, 1, 2])
squeeze_one=torch.squeeze(torch_squeeze) print(squeeze_one)           #torch.FloatTensor of size 2x2x2
squeeze_one=torch.squeeze(torch_squeeze,-2) #可用[-5, 4]等
print(squeeze_one)           #torch.FloatTensor of size 2x1x2x2
squeeze_one=torch.squeeze(torch_squeeze,1) print(squeeze_one)           #torch.FloatTensor of size 2x2x1x2

#torch.stack(sequence, dim=0, out=None) # Concatenates sequence of tensors along a new dimension.
print(x) print(torch.stack((x,x),1));print(torch.stack([x,x],dim=2)

#torch.t(input,out=None) → Tensor 轉置
print(torch.t(x))  #torch.FloatTensor of size 3x5 #print(torch.t(torch.Tensor(1,2,3,4))) #RuntimeError: t() expects a 2D tensor, but self is 4D

#torch.take(input,indices) → Tensor 把input當做1D tensor,按照indices來選擇元素
print(torch.take(x,torch.LongTensor([0,2,5])))   #torch.FloatTensor of size 3

#torch.transpose(input,dim0,dim1,out=None) → Tensor
y=torch.Tensor(1,2,3,4) print(y)                          #torch.FloatTensor of size 1x2x3x4
print(torch.transpose(y,1,3))     #torch.FloatTensor of size 1x4x3x2 對調,從1*2*3*4——>1*4*3*2

#torch.unbind(tensor,dim=0) 移出tensor中的一維 removes a tensor dimension #返回移出后的元組
print(torch.unbind(y))     #torch.FloatTensor of size 2x3x4 默認dim=0
print(torch.unbind(y,2))   #torch.FloatTensor of size 1x2x4

#torch.unsqueeze(input,dim,out=None)
m=torch.Tensor([1,2,3,4]) print(m)                     #torch.FloatTensor of size 4
m_zero=torch.unsqueeze(m,0) print(m_zero)                #torch.FloatTensor of size 1x4
m_one=torch.unsqueeze(m,1) print(m_one)                 #torch.FloatTensor of size 4x1
 m_zero_to_m=torch.squeeze(m_zero) print(m_zero_to_m)           #torch.FloatTensor of size 4
print(m==m_zero_to_m)        #torch.ByteTensor of size 4 # 1 # 1 # 1 # 1
print(m.equal(m_zero_to_m))  #True

2、Random sampling

import torch ################ Random sampling ##################################
print(torch.manual_seed(1))  #<torch._C.Generator object at 0x0000023ED56BD470>
print(torch.manual_seed(2))  #<torch._C.Generator object at 0x0000023F7532D470>
print(torch.initial_seed())  #2 返回初始生成的隨機數字
print(torch.get_rng_state()) #torch.ByteTensor of size 5048

#torch.set_rng_state(new_state)
print(torch.set_rng_state(torch.get_rng_state())) #設置隨機數生成狀態,返回為None

#torch.bernoulli(input,out=None) → Tensor 從伯努利分布中刻畫二項分布(0或1)隨機數
a=torch.Tensor(3,3).uniform_(0,1)  #torch.FloatTensor of size 3x3 #先生成隨機3*3Tensor,再通過uniform轉換
print(torch.bernoulli(a))  #torch.FloatTensor of size 3x3 # 1 1 1 # 0 1 0 # 0 0 1
a=torch.ones(3,3) print(torch.bernoulli(a))  #torch.FloatTensor of size 3x3 # 1 1 1 # 1 1 1 # 1 1 1

#torch.multinomial(input, num_samples, replacement=False, out=None) → LongTensor #返回一個張量,每行包含num_samples指數多項式概率分布位於張量輸入相應的行采樣。 #輸入行不需要求和(在這種情況下,我們使用值作為權重),但必須是非負的,並且有一個非零和。 #看了半天覺得比較復雜,等以后用時再看
print(torch.multinomial(a,3)) #torch.LongTensor of size 3x3 # 2 0 1 # 2 0 1 # 0 2 1
print(torch.multinomial(torch.Tensor([1,2,3]),3)) #torch.LongTensor of size 3 # 2 1 0
print(torch.multinomial(torch.Tensor([1,2,3,3]),4)) #torch.LongTensor of size 3 # 3 1 2 0

print(torch.multinomial(torch.Tensor([1.0,2,3]),3)) #torch.LongTensor of size 3 # 1 2 0
print(torch.multinomial(torch.Tensor([1,2.0,3,3]),4)) #torch.LongTensor of size 3 # 3 1 2 0


#torch.normal(means, std, out=None)
print(torch.normal(means=torch.arange(1,6),std=torch.arange(0.85,0,-0.2))) #torch.FloatTensor of size 5 #注意要保持means和std兩個size相同 # 1.2742 # 2.5393 # 3.3374 # 4.2307 # 4.9896
print(torch.normal(mean=0,std=torch.arange(0.85,0,-0.2))) #torch.FloatTensor of size 5 #注意前面是means后面是mean # -0.7768 # -0.1913 # -0.3296 # 0.3402 # 0.0021
x=torch.normal(std=torch.arange(0.85,0,-0.2)) print(x)            #torch.FloatTensor of size 5
print(x.mean())     #0.4534170083701611
x=torch.normal(means=torch.arange(0.85,0,-0.2)) print(x)            #torch.FloatTensor of size 5
print(x.mean())     #0.5901669651269913

#torch.rand(*sizes, out=None) → Tensor #0到1之間均勻分布
print(torch.rand(4))  #torch.FloatTensor of size 4 # 0.6558 # 0.2958 # 0.0541 # 0.6938
print(torch.rand(2,3)) #torch.FloatTensor of size 2x3 # 0.7529 0.6873 0.0716 # 0.9869 0.4623 0.0241

#torch.randperm(n, out=None) → LongTensor #返回隨機置換后的整數。
print(torch.randperm(4)) #torch.LongTensor of size 4 # 1 # 3 # 2 # 0

#還有一些在張量上定義的隨機抽樣函數。 # torch.Tensor.bernoulli_() - in-place version of torch.bernoulli() # torch.Tensor.cauchy_() - numbers drawn from the Cauchy distribution # torch.Tensor.exponential_() - numbers drawn from the exponential distribution # torch.Tensor.geometric_() - elements drawn from the geometric distribution # torch.Tensor.log_normal_() - samples from the log-normal distribution # torch.Tensor.normal_() - in-place version of torch.normal() # torch.Tensor.random_() - numbers sampled from the discrete uniform distribution # torch.Tensor.uniform_() - numbers sampled from the uniform distribution

 3、Serialization 序列化、Parallelism平行運算和Math operations 數學運算

from __future__ import print_function
import torch
import numpy as np
import pandas as pd
from pandas import Series,DataFrame

################ Serialization ##################################
#torch.save(the_model,PATH)
#torch.load('tensors.pt')
#torch.load(PATH)

################ Parallelism ##################################
#torch.get_num_threads()  → int
#torch.set_num_threads(int)

################ Math operations ##################################
#torch.abs(input, out=None) → Tensor 求絕對值
#torch.add(input, value, out=None) out=tensor+value
#torch.add(input, value=1, other, out=None) out=input+(other∗value)
#torch.mul(input,value,out=None)  #相乘,value可以為數字,也可以為與t1元素個數相同Tensor
a_tensor=torch.IntTensor([1,2,3])  #[torch.IntTensor of size 3]
print(torch.dot(a_tensor,a_tensor)) #14 a_tensor不能為size numb*numb,只能為size numb
print(torch.mul(a_tensor,a_tensor)) #1 4 9 [torch.IntTensor of size 3]
#torch.div(input, value, out=None) #與mul類似 相除,如果是IntTensor類型,只保留整數部分,小數部分舍去(注意不是四舍五入)
#torch.ceil(input, out=None) → Tensor    向上取整,input類型不能為IntTensor,FloatTensor可以
#torch.erfinv(tensor, out=None) → Tensor 反誤差函數,x區間為[-inf,inf],y區間為[-1,1],即給的是y,求x值
#torch.fmod(input, divisor, out=None) → Tensor 求元素mod,即求余數  % 即可  與下面torch.remainder相同,沒有取余計算,// 不可以
# torch.frac(tensor, out=None) → Tensor     求每個元素小數部分
# torch.exp(tensor, out=None) → Tensor    求每個元素的指數值
#torch.log(input, out=None) → Tensor      求每個元素對數值
#torch.log1p(input, out=None) → Tensor  yi=log(xi+1)
#torch.neg(input, out=None) → Tensor    out=−1∗input  可以直接在前面加負號  如: -a_tensor
#torch.pow(input, exponent, out=None)  outi=x(i)的exponent次方 或者out(i)=x(i)的exponent(i)次方 torch.pow(torch.Tensor([1,3]),torch.Tensor([1,2]))  #1 9  numpy中為np.power()
#torch.pow(base, input, out=None)  #outi=base的input(i) 次方   base (float),input (Tensor) 例如:torch.pow(2,torch.Tensor([1,2])  #2 4
#torch.reciprocal(input, out=None) → Tensor  1.0/x  求Tensor的倒數   也可以1/input
#torch.remainder(input, divisor, out=None) → Tensor  求余數 input (Tensor),The dividend divisor (Tensor or float)
#torch.round(input, out=None) → Tensor 四舍五入
#torch.sqrt(input, out=None) → Tensor 求元素的平方根
#torch.rsqrt(input, out=None) → Tensor 求元素平方根倒數,負數返回結果為nan
#torch.sigmoid(input, out=None) → Tensor 求元素的sigmod值,位於0到1之間,S型函數
#torch.trunc(input, out=None) → Tensor 取元素整數部分

x=torch.Tensor([-1,-2,3])
t=torch.ones(3,2)
t1=torch.ones(1,6)
t2=torch.ones(6,1)
#torch.add(input, value, out=None) out=tensor+value
print(torch.add(x,20))#每一個都加20
# 19
# 18
# 23

#torch.add(input, value=1, other, out=None) out=input+(other∗value)
#input (Tensor) – the first input Tensor
# value (Number) – the scalar multiplier for other
# other (Tensor) – the second input Tensor
print(torch.add(x,1,x))   x+1*x  中間1為配的系數
# -2
# -4
#  6

#torch.mul(input,value,out=None)  #value可以為數字,也可以為與  t1元素個數相同Tensor  相乘
print(torch.mul(t1,10))   #torch.FloatTensor of size 1x6
print(torch.mul(t1,t2))   #torch.FloatTensor of size 6x6
print(torch.mul(t2,t1))   #torch.FloatTensor of size 6x6
t2=torch.ones(7,1)
print(torch.mul(t1,t2))   #torch.FloatTensor of size 7x6
t2=torch.ones(7,2)        #看來也不能亂乘
#print(torch.mul(t1,t2))   #RuntimeError: inconsistent tensor size
t1=torch.Tensor([1,2,3,4])
t2=torch.Tensor([[0,1],[0,1]])
#下面這個也報警告
# print(torch.mul(t1,t2))  #torch.FloatTensor of size 4
# print(torch.mul(t2,t1))  #torch.FloatTensor of size 2x2

#torch.div(input, value, out=None)
#out=tensor/value   outi=inputi/otheri
print(torch.div(t1,2))  #torch.FloatTensor of size 4
 # 0.5000
 # 1.0000
 # 1.5000
 # 2.0000
#下面除法也會出現警告 UserWarning: self and other not broadcastable
#print(torch.div(t1,t2)) #torch.FloatTensor of size 4
# inf
#   2
# inf
#   4
#print(torch.div(t2,t1)) #[torch.FloatTensor of size 2x2]
 # 0.0000  0.5000
 # 0.0000  0.2500

#torch.addcdiv(tensor, value=1, tensor1, tensor2, out=None) → Tensor
# tensor (Tensor) – the tensor to be added
# value (Number, optional) – multiplier for tensor1 ./ tensor2
# tensor1 (Tensor) – Numerator tensor
# tensor2 (Tensor) – Denominator tensor
t=torch.ones(3,2)
t1=torch.ones(1,6)
t2=torch.ones(6,1)
#加上后面兩個相除,加上后面兩個相乘,中間還可以配個系數,不過報警告
#UserWarning: self, tensor1, and tensor2 not broadcastable, but have the same number of elements
#print(torch.addcdiv(t,0.1,t1,t1)) #torch.FloatTensor of size 3x2
# 1.1000  1.1000
# 1.1000  1.1000
# 1.1000  1.1000
#print(torch.addcmul(t,0.1,t1,t1)) #torch.FloatTensor of size 3x2
# 1.1000  1.1000
# 1.1000  1.1000
# 1.1000  1.1000


# torch.lerp(start, end, weight, out=None)
# outi=starti+weight∗(endi−starti)
print(t1,t2)
print(torch.lerp(t1,t2,0.5))
# 0.1000
# 0.9000
# 1.6000
# -2.4000
# [torch.FloatTensor of size 4]
#
# 0
# 1
# 0
# 1
# [torch.FloatTensor of size 2x2]
#
# 0.0500
# 0.9500
# 0.8000
# -0.7000
# [torch.FloatTensor of size 4]

#torch.floor(input, out=None) → Tensor
#與ceil相對應的,floor

#torch.clamp(input, min, max, out=None) → Tensor
#       | min, if x_i < min
# y_i = | x_i, if min <= x_i <= max
#       | max, if x_i > max
print(torch.clamp(t1,0,1)) #torch.FloatTensor of size 4
# 0.1000
# 0.9000
# 1.0000
# 0.0000
#必須要輸入min或者max
print(torch.clamp(t1,min=0)) #torch.FloatTensor of size 4
# 0.1000
# 0.9000
# 1.6000
# 0.0000
print(torch.clamp(t1,max=1)) #torch.FloatTensor of size 4
#  0.1000
#  0.9000
#  1.0000
# -2.4000
print(torch.clamp(torch.randn(5,5),min=0,max=1)) #torch.FloatTensor of size 5x5
# 0.9985  0.4794  0.0000  0.1223  0.0000
# 0.0000  0.0000  0.0000  0.0000  0.1613
# 0.0527  0.1433  0.6362  0.0000  0.0000
# 0.4906  0.0000  0.0000  0.9332  0.0000
# 0.0000  0.0000  1.0000  0.3525  0.9937

#torch.erf(tensor, out=None) → Tensor
#Computes the error function of each element 計算每個元素的誤差函數,S型,-1到1之間
print(torch.erf(torch.Tensor([-100,-10,-2,-1,0,0.5,1,2,10,100]))) #torch.FloatTensor of size 10
# -1.0000
# -1.0000
# -0.9953
# -0.8427
#  0.0000
#  0.5205
#  0.8427
#  0.9953
#  1.0000
#  1.0000

 4、Math operations 數學運算

import torch
###################################  Reduction operation  ##################################
#注意:dim=0,按照列來處理的,dim=1,按照行來處理的,python數據處理列比行方便,0在1前
#torch.sum(input) → float 得到一個值,類加值
#torch.sum(input, dim, keepdim=False, out=None) → Tensor 對行或列加總,得到行或者列
#torch.cumprod(input, dim, out=None) → Tensor 累乘
#torch.cumsum(input, dim, out=None) → Tensor  累加
#torch.dist(input, other, p=2) → float 求 p范數 input:(Tensor), other:(Tensor), p:(float, optional)
#torch.mean(input) → float 求元素均值,得到一個值
#torch.mean(input, dim, keepdim=False, out=None) → Tensor
#torch.median(input) → float
#torch.median(input, dim=-1, keepdim=False, values=None, indices=None) -> (Tensor, LongTensor)
#median與mean類似,整體和部分處理
#torch.mode(input, dim=-1, keepdim=False, values=None, indices=None) -> (Tensor, LongTensor)
#返回元組,兩個元素,每行或者每列最小值和元素所在位置
#torch.norm(input, p=2) → float 返回p范數值 注意與torch.normal()區別
#torch.norm(input, p, dim, keepdim=False, out=None) → Tensor  與前面類似
#torch.prod(input) → float 返回一個值,所有元素累乘值
#torch.prod(input, dim, keepdim=False, out=None) → Tensor
#torch.std(input, unbiased=True) → floa  生成無偏的標准差
#torch.std(input, dim, keepdim=False, unbiased=True, out=None) → Tenso
#torch.var(input, unbiased=True) → float
#torch.var(input, dim, keepdim=False, unbiased=True, out=None) → Tensor

###################################  Comparison operation  ##################################
#torch.eq(input, other, out=None) → Tensor    other可以為Tensor或者float,判斷兩個是否相等,得到0 1 Tensor
#torch.equal(tensor1, tensor2) → bool  True 或者False
#torch.ge(input, other, out=None) → Tensor 與eq類似,判斷是否大於等於other,返回0 1 Tensor
#torch.gt(input, other, out=None) → Tensor 判斷是否大於other,返回0 1 Tensor
#torch.le(input, other, out=None) → Tensor 判斷是否小於等於other,返回0 1 Tensor
#torch.lt(input, other, out=None) → Tensor 判斷是否小於other,返回0 1 Tensor
#torch.max(input) → float
#torch.max(input, dim, keepdim=False, out=None) -> (Tensor, LongTensor)
#torch.max(input, other, out=None) → Tensor 取兩個Tensor中較大的元素組成Tensor
#torch.min(input) → float
#torch.min(input, dim, keepdim=False, out=None) -> (Tensor, LongTensor)
#torch.min(input, other, out=None) → Tensor 取兩個Tensor中較小的元素組成Tensor
#torch.ne(input, other, out=None) → Tensor  不等於,other可以為Tensor或者float
#torch.sort(input, dim=None, descending=False, out=None) -> (Tensor, LongTensor) 返回Tuple
x=torch.randn(3,3)
sorted,indices=torch.sort(x)
print(sorted,indices)
# -1.7012  0.2619  0.3892
# -1.8940 -0.7567  1.2057
# -0.8224  0.7787  1.3752
# [torch.FloatTensor of size 3x3]
# 0  2  1
# 2  1  0
# 2  1  0
# [torch.LongTensor of size 3x3]

#torch.kthvalue(input, k, dim=None, keepdim=False, out=None) -> (Tensor, LongTensor)
# k:第k個最小元素,返回第k個最小元素
#torch.topk(input, k, dim=None, largest=True, sorted=True, out=None) -> (Tensor, LongTensor)
#返回前k個最大元素,注意是前k個,largest=False,返回前k個最小元素

###################################  Comparison operation  ##################################
#torch.cross(input, other, dim=-1, out=None) → Tensor  向量積、叉乘與mul點乘對應
x=torch.Tensor([1,2,3])
y=torch.Tensor([2,5,1])
print(torch.cross(x,y))   #https://baike.baidu.com/item/%E5%90%91%E9%87%8F%E7%A7%AF/4601007?fr=aladdin
# -13   2*1-3*5
#   5   3*2-1*1
#   1   1*5-2*2
# [torch.FloatTensor of size 3]
#torch.trace(input) → float  得到主對角線和
#torch.diag(input, diagonal=0, out=None) → Tensor  1、一維到二維Tensor  2、二維到一維Tensor,一維就是主對角線上,有參數可以調節
a=torch.randn(3)
print(a)
# 2.1126
# -1.4150
# 0.4451  [torch.FloatTensor of size 3]

print(torch.diag(a))
#  2.1126  0.0000  0.0000
#  0.0000 -1.4150  0.0000
#  0.0000  0.0000  0.4451
# [torch.FloatTensor of size 3x3]

a=torch.randn(3,3)
print(a)
#  0.6810 -2.1620 -0.3158
#  0.0545  1.1060  1.3524
#  0.1481 -1.1468 -0.0113
# [torch.FloatTensor of size 3x3]
print(torch.diag(a,0))
#  1.0596
#  1.2221
#  0.2311
# [torch.FloatTensor of size 3]
print(torch.diag(a,1))
#  1.8588
# -0.2285
# [torch.FloatTensor of size 2]

#torch.tril(input, diagonal=0, out=None) → Tensor 刪除Tensor部分對角線數據
#torch.triu(input, diagonal=0, out=None) → Tensor 與tril類似
#torch.inverse(input, out=None) → Tensor  求Tensor的逆
#torch.mm(mat1, mat2, out=None) → Tensor  兩個Tensor的矩陣乘積
data=[[1,2],[3,4]]
tensor=torch.FloatTensor(data)
tensor
Out[23]:
tensor([[ 1.,  2.],
        [ 3.,  4.]])
np.matmul(data,data)
Out[24]:
array([[ 7, 10],
       [15, 22]])
torch.mm(tensor,tensor)
Out[25]:
tensor([[  7.,  10.],
        [ 15.,  22.]])
#torch.mv(mat, vec, out=None) → Tensor mat = torch.randn(2, 3) # [torch.FloatTensor of size 2x3] vec = torch.randn(3) # [torch.FloatTensor of size 3] torch.mv(mat, vec) # -2.0939 # -2.2950 # [torch.FloatTensor of size 2] # #torch.qr(input, out=None) -> (Tensor, Tensor) 齊次分解

 


免責聲明!

本站轉載的文章為個人學習借鑒使用,本站對版權不負任何法律責任。如果侵犯了您的隱私權益,請聯系本站郵箱yoyou2525@163.com刪除。



 
粵ICP備18138465號   © 2018-2025 CODEPRJ.COM