import torch
from torch.autograd import Variable
# fake data
x = torch.unsqueeze(torch.linspace(-1,1,100),dim=1)
y = x.pow(2) + 0.2*torch.rand(x.size())
x,y = Variable(x),Variable(y)
保存数据
全部保留
torch.save(net1 ,'net.pkl')
参数保留
torch.save(net1.state_dict(), 'net_params.pkl' )
提取神经网络
net2 = torch.load('.//pkl//net.pkl')
用参数还原神经网络
net3 = torch.nn.Sequential(
torch.nn.Linear(1,10),
torch.nn.ReLu(),
torch.nn.Linear(10,1)
)
net3.load_state_dict(torch.load('.//pkl/net_params'))
def save():
# save net1
net1 = torch.nn.Sequential(
torch.nn.Linear(1,10),
torch.nn.ReLU(),
torch.nn.Linear(10,1)
)
optimizer = torch.optim.SGD(net1.parameters(),lr=0.5)
loss_func = torch.nn.MSELoss()
for t in range(100):
prediction = net1(x)
loss = loss_func(prediction,y)
optimizer.zero_grad()
loss.backward()
optimizer.step()
### 保存模型
torch.save(net1 ,'net.pkl')
### 只保留参数
torch.save(net1.state_dict(), 'net_params.pkl' )
def restore_net():
# 提取全部的神经网络
net2 = torch.load('.//pkl//net.pkl')
def restore_params():
# 使用参数复原神经网络
net3 = torch.nn.Sequential(
torch.nn.Linear(1,10),
torch.nn.ReLu(),
torch.nn.Linear(10,1)
)
net3.load_state_dict(torch.load('.//pkl/net_params'))
# save()
restore_net()