1 #include<iostream> 2 #include<torch/torch.h> 3 #include<torch/script.h> 4 5 int main() 6 { 7 torch::Tensor t1 = torch::rand({4, 5}); 8 9 // print shape 10 t1.print(); 11 std::cout << "t1.sizes = " << t1.sizes() << std::endl; 12 // print the tensor 13 std::cout << "t1 = " << t1 << std::endl; std::cout << std::endl; 14 float a1 = t1[1][2].item().toFloat(); 15 std::cout << "t1[1][2] = " << a1 << std::endl; std::cout << std::endl; 16 17 // 訪問某一列 18 torch::Tensor t2 = t1.select(0, 3); // 0 = dimY, 1 = dimX 19 // print the tensor 20 std::cout << "t2 = " << t2 << std::endl; std::cout << std::endl; 21 22 // 批量處理 23 torch::Tensor t3 = torch::rand({ 3, 5 }); 24 std::cout << "t3 = " << t3 << std::endl; std::cout << std::endl; 25 torch::Tensor t4 = t3.ge(0.5); 26 std::cout << "t4 = " << t4 << std::endl; std::cout << std::endl; 27 torch::Tensor t5 = (t3 > 0.5); // 0 or 1, as bool vec 28 std::cout << "t5 = " << t5 << std::endl; std::cout << std::endl; 29 30 torch::Tensor t6 = torch::masked_select(t3, t5); 31 std::cout << "t6 = " << t6 << std::endl; std::cout << std::endl; 32 33 torch::Tensor t7 = t3.masked_select(t5); 34 std::cout << "t7 = " << t7 << std::endl; std::cout << std::endl; 35 36 // 創建一個mask,篩選行 37 // torch::from_blob 針對torch::kBool居然有bug 38 /*std::vector<int> v4{1, 0, 1}; 39 torch::Tensor mask4 = torch::from_blob(v4.data(), { 3, 1 }, torch::kBool); 40 std::cout << "mask4 = " << mask4 << std::endl;*/ 41 torch::Tensor mask5 = torch::tensor({ {1}, {0}, {1} }, torch::kBool); 42 std::cout << "mask5 = " << mask5 << std::endl; std::cout << std::endl; 43 std::cout << "mask5.size() = " << mask5.sizes() << std::endl; std::cout << std::endl; 44 45 torch::Tensor t5_ = torch::rand({3, 6}); 46 std::cout << "t5_ = " << t5_ << std::endl; std::cout << std::endl; 47 auto t6_ = torch::masked_select(t5_, mask5).view({-1, 6}); // view展平是必須的 48 std::cout << "t6_ = " << t6_ << std::endl; std::cout << std::endl; 49 50 // 構建一個張量 51 torch::Tensor t8 = torch::rand({6, 6}); 52 std::cout << "t8 = " << t8 << std::endl; std::cout << std::endl; 53 std::cout << "t8.slice(0, 2, 4) = " << t8.slice(0, 2, 4) << std::endl; std::cout << std::endl; 54 55 // 行/列最大值 56 torch::Tensor t9 = torch::rand({ 3, 4 }); 57 std::cout << "t9 = " << t9 << std::endl; std::cout << std::endl; 58 std::tuple<torch::Tensor, torch::Tensor> max_info = torch::max(t9, 0); // 0:代表列, slice那里0代表行 59 // 訪問元組 60 torch::Tensor max_vals = std::get<0>(max_info); 61 torch::Tensor idxs = std::get<1>(max_info); 62 std::cout << "max_vals = " << max_vals << std::endl; std::cout << std::endl; 63 std::cout << "idxs = " << idxs << std::endl; std::cout << std::endl; 64 65 // 拼接 66 torch::Tensor t10 = torch::cat({ t3, t9 }, 1); 67 std::cout << "t10 = " << t10 << std::endl; std::cout << std::endl; 68 system("pause"); 69 return 1; 70 }
LearningPytorch.py

1 import numpy as np 2 import torch 3 4 if __name__ == "__main__": 5 ## e.g. 2.4 6 # 聲明tensor 7 t1 = torch.tensor([[1, 2, 3], [2, 3, 4]]) 8 print('t1.dtype = ', t1.dtype) 9 print('t1.shape = ', t1.shape) 10 t1 = torch.tensor(range(10)) # 轉換迭代器為張量 11 # numpy -> tensor 12 t1 = torch.tensor(np.array([1, 2, 3])) 13 t2 = torch.from_numpy(np.array([1, 2, 3])) 14 # tensor -> numpy 15 t3 = t2.numpy() 16 17 # 隨機tensor 18 t1 = torch.randn(3, 3) * 10 # randn:正態分布, rand:均勻分布 19 t2 = t1.to(torch.int8) 20 21 ## e.g. 2.5 隨機數 22 t1 = torch.rand(3, 3) # 3*3矩陣,元素服從[0, 1)均值分布 23 t1 = torch.randn(2, 3, 4) # 2*3*4, 高斯分布 24 t1 = torch.zeros(2, 2, 2) 25 t1 = torch.ones(3, 4, 5) * 4 26 t1 = torch.eye(4) 27 t1 = torch.randint(1, 5, (3, 3)) # # 生成[0, 10)之間均勻分布整數的3×3矩陣 28 29 ## e.g. 2.6 隨機數 30 t1 = torch.randn(3, 3) 31 32 # 復制t1的shape 33 t2 = torch.zeros_like(t1) # 生成一個元素全為0的張量,形狀和給定張量t1相同 34 t2 = torch.ones_like(t1) 35 t2 = torch.randn_like(t1) # 正太分布 36 37 # 復制t1的類型 38 t3 = t1.new_tensor([1, 2, 3]) # 根據Python列表生成張量,注意這里輸出的是單精度浮點數 39 t3 = t1.new_zeros(3, 3) # 生成相同類型且元素全為0的張量 40 t3 = t1.new_ones(3, 3) # 生成相同類型且元素全為1的張量 41 42 # e.g. 2.8 設備 43 t1 = torch.randn(3, 3, device="cpu") # 獲取存儲在CPU上的一個張量 44 t1 = torch.randn(3, 3, device="cuda:0") # 獲取存儲在0號GPU上的一個張量 45 t1 = torch.randn(3, 3, device="cuda:0").device # 獲取當前張量的設備 46 t1 = torch.randn(3, 3, device="cuda:0").cpu().device # 張量從1號GPU轉移到CPU 47 t1 = torch.randn(3, 3, device="cuda:0").cuda(0).device # 張量保持設備不變 48 49 # e.g. 2.9 指針、維度 50 t1 = torch.randn(3, 4, 5) 51 nd = t1.ndimension() # channels = 3;獲取維度的數目 52 ne = t1.nelement() # c*w*h = 60;獲取該張量的總元素數目 53 sz = t1.size() # torch.Size([3, 4, 5]) 54 c = t1.size(0) # 獲取該張量維度0的大小,調用方法 55 t = torch.randn(12) # 產生大小為12的向量 56 t1 = t.view(3, 4) # 向量改變形狀為3×4的矩陣 57 t1 = t1.view(-1, 4) # 第一個維度為-1,PyTorch會自動計算該維度的具體值 58 t1.view(4, 3)[0, 0] = 99.0 # 訪問一個元素、遍歷 59 ptr = t1.data_ptr() # 獲取tensor數據指針 60 t1.view(3, 4).data_ptr() # 獲取張量的數據指針 61 t1.view(4, 3).contiguous().data_ptr() # 同上,不改變 62 t1.view(3, 4).transpose(0, 1).data_ptr() # transpose方法交換兩個維度 63 t1.view(3, 4).transpose(0, 1).contiguous().data_ptr() # 步長和維度不兼容,重新生成張量(即:會重新分配內存) 64 65 # e.g. 2.10 mask 66 t1 = torch.randn(2, 3, 4) 67 t2 = t1[1, 2, 3] 68 t2 = t1[:, 1:, 1:3] 69 mask = t1 > 0 # t1中元素大於0 的mask,mask.shape 等於 t1.shape 70 t2 = t1[mask] # torch.Size([15]), t2是一個n*1的行向量 71 72 # e.g. 2.11 sqrt && sum 73 t1 = torch.randint(1, 9, (3, 3)) 74 t1 = t1.to(torch.float) 75 t2 = t1.sqrt() # 操作不改變t1的值 76 t3 = torch.sqrt(t1) # 操作不改變t1的值 77 t1.sqrt_() # 平方根原地操作,修改自己的值 78 sum1 = torch.sum(t1) # 默認對所有的元素求和 79 sum2 = torch.sum(t1, 0) # 對第0維的元素求和 80 sum3 = torch.sum(t1, [0, 1]) # 對第0、1維的元素求和 81 82 mean1 = t1.mean() # 對所有元素求平均,也可以用torch.mean函數 83 mean2 = t1.mean(0) # 對第0維的元素求平均 84 mean3 = torch.mean(t1, [0, 1]) # 對第0、1維元素求平均, mean.shape = 1*1 85 86 # e.g. 2.12 加減乘除(其實都重載了運算符,自己取試一試) 87 t1 = torch.rand(2, 3) 88 t2 = torch.rand(2, 3) 89 t3 = t1.add(t2) 90 t4 = t1.sub(t2) 91 t5 = t1.mul(t2) # 對應元素相乘,非矩陣乘法 92 t6 = t1 * t2 93 t1.add_(t2) # 四則運算,改變參與運算張量(t2)的值 94 95 96 var = torch.__version__
LearningPytorch.cpp

1 #include<torch/torch.h> 2 #include<torch/script.h> 3 #include<iostream> 4 using namespace std; // 項目中建議不要使用 5 6 void printTitle(const string& title) 7 { 8 cout << endl; 9 cout << "******【" << title << "】******" << endl; 10 } 11 12 int main() 13 { 14 // e.g. 2.4 15 { 16 printTitle("e.g. 2.4"); 17 // 聲明tensor 18 torch::Tensor t1 = torch::tensor({ { 1, 2, 3 }, { 2, 3, 4 } }, torch::kByte); 19 cout << "t1.dtype() = " << t1.dtype() << endl; // __int64 20 t1.print(); 21 cout << "t1 = " << t1 << endl; 22 t1 = torch::range(1, 10, torch::kByte); 23 24 // 隨機tensor 25 t1 = torch::randn({ 3, 3 }, torch::kFloat) * 10; 26 cout << "t1 = " << t1 << endl; 27 torch::Tensor t2 = t1.to(torch::kInt8); 28 cout << "t2 = " << t2 << endl; 29 } 30 // e.g. 2.5 隨機 31 { 32 printTitle("e.g. 2.5 隨機"); 33 torch::Tensor t1 = torch::rand({ 3, 3 }, torch::kFloat32); 34 t1 = torch::randn({ 2, 3, 4 }); 35 t1 = torch::zeros({ 2, 2, 2 }, torch::kUInt8); 36 t1 = torch::ones({ 3, 4 }) * 9; 37 t1 = torch::eye(3, torch::kFloat); 38 t1 = torch::randint(0, 4, { 3, 3 }); 39 cout << "t1 = " << t1 << endl; 40 41 } 42 // e.g. 2.6 隨機 43 { 44 printTitle("e.g. 2.6 隨機"); 45 torch::Tensor t1 = torch::rand({ 3, 3 }, torch::kFloat32); 46 // copy the shape of t1 47 torch::Tensor t2 = torch::zeros_like(t1); 48 t2 = torch::ones_like(t1); 49 t2 = torch::randn_like(t1); 50 51 // copy the dtype of t1 52 torch::Tensor t3 = t1.new_zeros({ 3, 3 }); // 生成相同類型且元素全為0的張量 53 t3 = torch::ones(t1.sizes(), t1.dtype()); // 和opencv一樣 54 t3 = torch::zeros(t1.sizes(), t1.dtype()); 55 56 cout << "t2 = " << t2 << endl; 57 cout << "t3 = " << t3 << endl; 58 } 59 // e.g. 2.8 設備 60 { 61 printTitle("e.g. 2.8 設備"); 62 torch::Tensor t1 = torch::randn({ 3, 3 }, torch::Device("cpu")); 63 cout << "t1 = " << t1 << endl; 64 auto device = torch::Device("cuda:0"); 65 torch::Tensor t2 = torch::randn({ 3, 3 }, torch::kF32).to(device); 66 cout << "t2 = " << t2 << endl; 67 cout << "t2.device = " << t2.device() << endl; 68 } 69 // e.g. 2.9 指針 70 { 71 printTitle("e.g. 2.9 指針"); 72 torch::Tensor t1 = torch::randn({ 3, 4, 5 }); 73 cout << t1 << endl; 74 int nd = t1.ndimension(); // channels = 3; 獲取維度的數目 75 int nc = t1.size(0); // c 76 int nw = t1.size(1); // w 77 int nh = t1.size(2); // h 78 cout << nd << " " << nc << endl; 79 auto sz = t1.sizes(); // [c w h] 80 cout << "sz = " << sz << endl; 81 t1 = torch::randn({ 12 }); 82 torch::Tensor t2 = t1.view({ -1, 3 }); // 將其第二個維度變為3,第一個維度會自動計算,不過不能整除就會報錯 83 t2[0][0] = 99; // 訪問元素 84 cout << "t2 = " << t2 << endl; 85 float* t2_ptr = (float*)t2.data_ptr(); // 獲取指針 86 cout << "t2_ptr = " << t2_ptr << endl; 87 void* t22_ptr = (void*)t2.data_ptr(); // 指針指針,地址不變 88 cout << "t22_ptr = " << t22_ptr << endl; 89 auto t222_ptr = t2.contiguous().data_ptr(); // 指針指針,地址不變 90 cout << "t222_ptr = " << t222_ptr << endl; 91 auto t2222_ptr = t2.transpose(0, 1).contiguous().data_ptr(); // 步長和維度不兼容,重新生成張量(即:會重新分配內存) 92 cout << "t2222_ptr = " << t2222_ptr << endl; 93 } 94 // e.g. 2.10 mask 95 { 96 printTitle("e.g. 2.10 mask"); 97 torch::Tensor t1 = torch::randn({ 2, 3, 4 }); 98 cout << "t1 = " << t1 << endl; 99 torch::Tensor ele = t1[1][2][3]; 100 cout << "ele = " << ele << endl; 101 double ele_ = ele.item().toDouble(); // tensor 轉 double 102 cout << "ele_ = " << ele_ << endl; 103 torch::Tensor mask = t1.ge(0); 104 cout << "mask = " << mask << endl; 105 torch::Tensor t2 = t1.masked_select(mask); // t2 是一個向量 106 cout << "t2 =" << t2 << endl; 107 } 108 // e.g. 2.11 sqrt && sum 109 { 110 printTitle("e.g. 2.11 sqrt"); 111 torch::Tensor t1 = torch::randint(1, 9, { 3, 3 }); 112 cout << "t1 = " << t1 << endl; 113 torch::Tensor t2 = t1.to(torch::kFloat32); 114 torch::Tensor t3 = t2.sqrt(); // 操作不改變t2的值 115 t3 = torch::sqrt(t2); // 操作不改變t2的值 116 cout << "t3 = " << t3 << endl; 117 t2.sqrt_(); // 平方根原地操作,修改自己的值 118 cout << "t2 = " << t2 << endl; 119 120 // 也可以調用默認的sum()成員函數 121 cout << "t1 = " << t1 << endl; 122 torch::Tensor sum1 = torch::sum(t1); // 默認對所有的元素求和 123 torch::Tensor sum2 = torch::sum(t1, 0); // 對第0維的元素求和,即:按列進行求和 124 torch::Tensor sum3 = torch::sum(t1, { 1,0 }); // 寫成{0, 1}會報編譯錯 125 cout << "sum3 = " << sum3.item().toFloat() << endl; 126 127 torch::Tensor mean1 = t1.mean(); // 對所有元素求平均,也可以用torch.mean函數 128 torch::Tensor mean2 = t1.mean(0); // 對第0維的元素求平均 129 // 寫成{0, 1}會報編譯錯,同上 130 torch::Tensor mean3 = torch::mean(t1, { 1, 0 }); // 對第0、1維元素求平均, mean.shape = 1*1 131 cout << "mean1 = " << mean1.item().toFloat() << endl; 132 cout << "mean2 = " << mean2 << endl; 133 cout << "mean3 = " << mean3 << endl; 134 } 135 // e.g. 2.12 對應元素加、減、乘、除(其實都重載了運算符,自己取試一試) 136 { 137 printTitle("e.g. 2.12 "); 138 torch::Tensor t1 = torch::rand({ 2, 3 }); 139 torch::Tensor t2 = torch::rand({ 2, 3 }); 140 torch::Tensor t3 = t1 + t2; 141 torch::Tensor t4 = t1.sub(t2); 142 torch::Tensor t5 = t1.mul(t2); 143 torch::Tensor t6 = t1.div(2); 144 cout << "t1 = " << t1 << endl; 145 cout << "t2 = " << t2 << endl; 146 cout << "t3 = " << t3 << endl; 147 cout << "t4 = " << t4 << endl; 148 cout << "t5 = " << t5 << endl; 149 cout << "t6 = " << t6 << endl; 150 t6.add_(1); // 會修改t6中的值 151 cout << "t6 = " << t6 << endl; 152 } 153 // e.g. 2.13 min max argmax 154 { 155 printTitle("e.g. 2.13 min max argmax"); 156 torch::Tensor t1 = torch::randn({ 3, 4 }, torch::kFloat64); 157 cout << "t1 = " << t1 << endl; 158 torch::Tensor mask_argmax = torch::argmax(t1, 0); // 返回的是沿着第0個維度,極大值所在位置 159 cout << "mask_argmax = " << mask_argmax << endl; 160 // max 161 std::tuple<torch::Tensor, torch::Tensor> maxVals = torch::max(t1, -1); // 函數調用,返回的是沿着最后一個維度,包含極大值和極大值所在位置的元組 162 torch::Tensor mask_max = std::get<0>(maxVals); // max val 163 torch::Tensor mask_max_idx = std::get<1>(maxVals); // index of maxVal 164 cout << "mask_max = " << mask_max << endl; 165 cout << "mask_max_idx = " << mask_max_idx << endl; 166 // min 167 std::tuple<torch::Tensor, torch::Tensor> minVals = t1.min(0); // 內置方法調用,返回的是沿着第0個維度,包含極小值和極小值所在位置的元組 168 torch::Tensor mask_min = std::get<0>(minVals); // min val 169 torch::Tensor mask_min_idx = std::get<1>(minVals);// index of minVal 170 cout << "mask_min = " << mask_min << endl; 171 cout << "mask_min_idx = " << mask_min_idx << endl; 172 // sort 173 std::tuple<torch::Tensor, torch::Tensor> sortVals = t1.sort(-1); // 沿着最后一個維度排序,返回排序后的張量和張量元素在該維度的原始位置 174 torch::Tensor tensorVal = std::get<0>(sortVals); 175 torch::Tensor tensorValIdx = std::get<1>(sortVals); 176 cout << "tensorVal = " << tensorVal << endl; 177 cout << "tensorValIdx = " << tensorValIdx << endl; 178 } 179 // e.g. 2.14 矩陣乘法 180 { 181 printTitle("e.g. 2.14 矩陣乘法"); 182 torch::Tensor t1 = torch::tensor({ {1, 2}, {3, 4} }, torch::kFloat64); // 2×2 183 torch::Tensor t2 = torch::tensor({ {1, 1, 1}, {2, 3, 1} }, torch::kFloat64); // 2×3 184 auto t3 = t1.mm(t2); // 矩陣乘法, torch::mm 185 cout << "t1 = " << t1 << endl; 186 cout << "t2 = " << t2 << endl; 187 cout << "t3 = " << t3 << endl; 188 // 189 t1 = torch::randn({ 2, 3, 4 }); 190 t2 = torch::randn({ 2, 4, 3 }); 191 torch::Tensor t4 = t1.bmm(t2); // (迷你)批次矩陣乘法,返回結果為2×3×3,函數形式 192 cout << "t1 = " << t1 << endl; 193 cout << "t2 = " << t2 << endl; 194 cout << "t4 = " << t4 << endl; 195 } 196 // e.g. 2.16 Tensor堆疊、拼接 197 { 198 printTitle("e.g. 2.16 Tensor堆疊、拼接"); 199 auto t1 = torch::randn({ 2, 3 }); 200 auto t2 = torch::randn({ 2, 3 }); 201 auto t3 = torch::stack({ t1, t2 }, -1); // 沿着最后一個維度做堆疊,返回大小為2×2×3的張量 202 cout << "t1.sizes() = " << t1.sizes() << endl; 203 cout << "t2.sizes() = " << t2.sizes() << endl; 204 cout << "t3.sizes() = " << t3.sizes() << endl; 205 } 206 // e.g. 2.17 2.18 拓展、壓縮維度 207 { 208 printTitle("e.g. 2.17 2.18 拓展維度"); 209 torch::Tensor t1 = torch::rand({ 3, 4 }); 210 cout << "t1.sizes() = " << t1.sizes() << endl; 211 auto t11 = t1.unsqueeze(-1); // 擴增最后一個維度 212 cout << "t11.sizes() = " << t11.sizes() << endl; 213 auto t12 = t1.unsqueeze(-1).unsqueeze(-1); // 繼續擴增最后一個維度 214 cout << "t12.sizes() = " << t12.sizes() << endl; 215 auto t13 = t1.unsqueeze(1); // 在第1個維度插入新一個維度 -> 3*4*1 216 cout << "t13.sizes() = " << t13.sizes() << endl; 217 218 auto t2 = torch::rand({ 1, 3, 4, 1 }); 219 cout << "t2.sizes() = " << t2.sizes() << endl; 220 auto t21 = t2.squeeze(); // 壓縮所有大小為1的維度 221 cout << "t21.sizes() = " << t21.sizes() << endl; 222 } 223 // e.g. 2.18 224 return 1; 225 }