pytorch loss
參考文獻:
https://blog.csdn.net/zhangxb35/article/details/72464152?utm_source=itdadao&utm_medium=referral
loss 測試
import torch
from torch.autograd import Variable
'''
參考文獻: https://blog.csdn.net/zhangxb35/article/details/72464152?utm_source=itdadao&utm_medium=referral
如果 reduce = False,那么 size_average 參數失效,直接返回向量形式的 loss;
如果 reduce = True,那么 loss 返回的是標量
如果 size_average = True,返回 loss.mean();
如果 size_average = True,返回 loss.sum();
'''
# nn.L1Loss: loss(input, target)=|input-target|
if False:
loss_fn = torch.nn.L1Loss(reduce=True, size_average=False)
input = torch.autograd.Variable(torch.randn(3, 4))
target = torch.autograd.Variable(torch.randn(3, 4))
loss = loss_fn(input, target)
print(input)
print(target)
print(loss)
print(input.size(), target.size(), loss.size())
# nn.SmoothL1Loss 在(-1, 1)上是平方loss, 其他情況是L1 loss
if False:
loss_fn = torch.nn.SmoothL1Loss(reduce=False, size_average=False)
input = torch.autograd.Variable(torch.randn(3, 4))
target = torch.autograd.Variable(torch.randn(3, 4))
loss = loss_fn(input, target)
print(input)
print(target)
print(loss)
print(input.size(), target.size(), loss.size())
# nn.MSELoss 均方損失函數
if False:
loss_fn = torch.nn.MSELoss(reduce=False, size_average=False)
input = torch.autograd.Variable(torch.randn(3, 4))
target = torch.autograd.Variable(torch.randn(3, 4))
loss = loss_fn(input, target)
print(input)
print(target)
print(loss)
print(input.size(), target.size(), loss.size())
# nn.BCELoss
if False:
import torch.nn.functional as F
loss_fn = torch.nn.BCELoss(reduce=False, size_average=False)
input = torch.autograd.Variable(torch.randn(3, 4))
target = torch.autograd.Variable(torch.FloatTensor(3, 4).random_(2))
loss = loss_fn(F.sigmoid(input), target)
print(input, input.shape)
print(F.sigmoid(input))
print(target, target.shape)
print(loss, loss.shape)
# nn.CrossEntropyLoss
if False:
weight = torch.Tensor([1, 2, 1, 1, 10])
loss_fn = torch.nn.CrossEntropyLoss(reduce=False, size_average=False, weight=None)
input = Variable(torch.randn(3, 5)) # (batch_size, C)
target = Variable(torch.LongTensor(3).random_(5))
loss = loss_fn(input, target)
print(input)
print(target)
print(loss)
# nn.NLLLoss 負的 log likehood loss損失.用於訓練一個n類分類器.
if False:
m = nn.LogSoftmax()
loss = nn.NLLLoss()
# input is of size nBatch x nClasses = 3 x 5
input = Variable(torch.randn(3, 5), requires_grad=True)
# each element in target has to have 0 <= value < nclasses
target = Variable(torch.LongTensor([1, 0, 4]))
output = loss(m(input), target)
print(output)
# nn.LLLoss2d 對於圖片的 negtive log likehood loss.計算每個像素的NLL loss
if False:
m = nn.Conv2d(16, 32, (3, 3)).float()
loss = nn.NLLLoss2d()
# input is of size nBatch x nClasses x height x width
input = Variable(torch.randn(3, 16, 10, 10))
# each element in target has to have 0 <= value < nclasses
target = Variable(torch.LongTensor(3, 8, 8).random_(0, 4))
output = loss(m(input), target)
print(m(input))
print(output)
# nn.MultiLabelMarginLoss input : x --> (N, c), y --> (N, c)其中y是 LongTensor, 且其元素為類別的index
if False:
x=Variable(torch.randn(3, 4))
y=Variable(torch.LongTensor(3, 4).random_(4))
loss=torch.nn.MultiLabelMarginLoss()
output = loss(x, y)
print(output)
# nn.MultiLabelSoftMarginLoss 與MultiLableMarginLoss相同,區別在於y的類型是FloatTensor
if False:
x = Variable(torch.randn(3, 10))
y = Variable(torch.FloatTensor(3, 10).random_(10))
loss = torch.nn.MultiLabelSoftMarginLoss()
output = loss(x, y)
print(output)
# nn.MultiMarginLoss 適用於多分類模型
if True:
x = Variable(torch.randn(3, 10))
y = Variable(torch.LongTensor(3).random_(10))
loss = torch.nn.MultiMarginLoss()
output=loss(x, y)
print(output)