paddle常規操作


0,有時間看看源碼還是看看源碼吧,雖然看了也還是菜雞。。。

https://github.com/PaddlePaddle

1,常用方法總結

'''========================================1,資源配置========================================'''
paddle.fluid.is_compiled_with_cuda()
paddle.set_device('gpu:0'/'cpu')
paddle.set_device('gpu:0') if use_gpu else paddle.set_device('cpu')

'''========================================2,tensor========================================'''
#************************************轉為tensor************************************
paddle.to_tensor(**, stop_gradient=False)
#************************************轉變tensor的dtype************************************
paddle.cast(tensor, 'float32')
tensor.astype('float32')
#************************************維度擴展************************************
tensor.unsqueeze([0, 1])
#************************************取隨機************************************
paddle.randn([2,10])

'''========================================3,數據加載========================================'''
paddle.io.Dataset/DataLoader
class myDataset(Dataset):
  def __init__(self, files):
    ...
  def __len__(self):
    return len(...)/...shape[0]
  def __getitem__(self,index):
    ...
    return data, label
traingData = myDataset(files) 
trainingDataloader = Dataloader(traingData, batch_size=, shuffle=True)
#************************************獲取數據和標簽************************************
data, label = next(iter(trainingDataloader))
for (data, label) in trainingDataloader:
for batch, (data, label) in enumerate(trainingDataloader):

'''========================================4,基本模型庫========================================'''
#************************************模型構造方法1  nn基礎模型************************************
myNet = paddle.nn.Linear(10, 10)
myNet.weight
myNet.weight.grad                                                                        
myNet.bias
myNet.bias.grad                                                                          
myNet.parameters()                                                                      
#************************************模型構造方法2.1  nn.Sequential************************************
myNet = paddle.nn.Sequential(           
  nn.Linear(10, 10),
  nn.Tanh(),
  nn.Linear(10, 10)
)
[param.shape for param in myNet.parameters()]
[(name, param.shape) for (name, param) in myNet.named_parameters()]
#************************************模型構造方法2.2 collections.OrderedDict 與 nn.Sequential 結合,為子模塊命名************************************
myNet = nn.Sequential(('hidden_linear',nn.Linear(10,10)),
                      ('hidden_activation', nn.Tanh()),
                      ('output_linear', nn.Linear(10,10)))
[param.shape for param in myNet.parameters()]
[(name, param.shape) for (name, param) in myNet.named_parameters()]
myNet.hidden_linear.weight
myNet.hidden_linear.weight.grad
myNet.hidden_linear.bias
myNet.hidden_linear.bias.grad
#************************************動態添加子模塊************************************
nn.Sequential().add_sublayer()
#************************************模型構造方法3  nn.Module************************************
paddle.nn.Layer
class myModule(nn.Layer):
  def __init__(self):
    super().__init__()
  def forward(self, inputs):
    ...                                                                                      
myNet = myModule()
[(name, param.shape) for (name, param) in myNet.named_parameters()]

'''========================================5,優化器========================================'''
#************************************四大類優化器************************************
opt = paddle.optimizer.SGD(learning_rate=lr, parameters=myNet.parameters())
                      paddle.optimizer.Momentum(learning_rate=0.01, momentum=0.9, parameters=model.parameters())
                      paddle.optimizer.Adagrad(learning_rate=0.01, parameters=model.parameters())
                      paddle.optimizer.Adam(learning_rate=0.01, parameters=model.parameters())
#************************************5.1:針對模型不同層設置不同的學習率************************************

#************************************5.2:自定義根據 epoch 改變學習率************************************

#************************************5.3:手動設置學習率衰減區間************************************

#************************************5.4:變學習率API,與pytorch相比不用step************************************
lr = paddle.optimizer.lr.PolynomialDecay(learning_rate=0.01, decay_steps=total_steps, end_lr=0.001)
opt = paddle.optimizer.Momentum(learning_rate=lr, parameters=model.parameters())

'''========================================6,損失函數========================================'''
paddle.nn.CrossEntropyLoss()
paddle.nn.functional.cross_entropy()

'''========================================7,訓練========================================'''
myNet.train()
loss.backward()
opt.step()
opt.clear_grad()

'''========================================8,保存========================================'''
#************************************方法一************************************
paddle.save(opt.state_dict(), '')
paddle.save(model.state_dict(), '')
#************************************方法二************************************
paddle.save(optimizer, '')           #報錯,不支持
paddle.save(myNet, '')               #報錯,不支持

'''========================================9.加載========================================'''
#保存方法一的對應加載
myNet = myModule()
model_dict = paddle.load()
myNet.set_state_dict(model_dict)
optimizer同理
#保存方法二的對應加載,不支持
myNet = paddle.load()
optimizer = paddle.load()

'''========================================10,測試========================================'''
myNet.eval()

'''========================================11,計算准確率========================================'''
paddle.metric.Accuracy()

'''========================================12,視覺庫========================================'''
paddle.vision
paddle.vision.models
paddle.vision.transforms

'''========================================其他========================================'''
model.parameters()
model.named_parameters()
model.state_dict()
optimizer.state_dict()

2,paddle API文檔

3,paddle 組件


免責聲明!

本站轉載的文章為個人學習借鑒使用,本站對版權不負任何法律責任。如果侵犯了您的隱私權益,請聯系本站郵箱yoyou2525@163.com刪除。



 
粵ICP備18138465號   © 2018-2025 CODEPRJ.COM