0%

PyTorch入门-4

PyTorch入门-4

优化模型参数

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
import torch
from torch import nn
from torch.utils.data import DataLoader
from torchvision import datasets
from torchvision.transforms import ToTensor, Lambda

training_data = datasets.FashionMNIST(
root="data",
train=True,
download=True,
transform=ToTensor()
)

test_data = datasets.FashionMNIST(
root="data",
train=False,
download=True,
transform=ToTensor()
)

train_dataloader = DataLoader(training_data, batch_size=64)
test_dataloader = DataLoader(test_data, batch_size=64)

class NeuralNetwork(nn.Module):
def __init__(self):
super(NeuralNetwork, self).__init__()
self.flatten = nn.Flatten()
self.linear_relu_stack = nn.Sequential(
nn.Linear(28*28, 512),
nn.ReLU(),
nn.Linear(512, 512),
nn.ReLU(),
nn.Linear(512, 10),
)

def forward(self, x):
x = self.flatten(x)
logits = self.linear_relu_stack(x)
return logits

model = NeuralNetwork()

超参数

1
2
3
learning_rate = 1e-3
batch_size = 64
epochs = 5

循环优化

一旦我们设置了超参数,我们就可以通过优化循环训练和优化我们的模型。优化循环的每次迭代称为epoch

每个epoch由两个主要部分组成:

训练循环:遍历训练数据集,并试图收敛到最优参数。

验证/测试循环:对测试数据集进行迭代,以检查模型性能是否正在改善。

损失函数
  • nn.MSELoss: 均方误差,回归问题
  • nn.NLLLoss: 负对数似然,分类问题
  • nn.CrossEntropyLoss: 结合nn.LogSoftmax and nn.NLLLoss
1
2
# Initialize the loss function
loss_fn = nn.CrossEntropyLoss()
优化算法
  • SGD
  • ADAM
  • RMSProp
1
optimizer = torch.optim.SGD(model.parameters(), lr=learning_rate)

在训练循环中,优化分为三个步骤:

  • 调用optimizer.zero_grad()重置模型参数的梯度。梯度默认情况下是叠加的; 为了防止重复计算,我们在每次迭代时显式地将它们归零。
  • 通过调用loss.backward()反向传播预测损失。Pytorch保存loss关于每个变量的梯度
  • 一旦我们有了梯度,我们调用optimizer.step()来通过向后传递中收集的梯度来调整参数。

Full Implementation

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
def train_loop(dataloader, model, loss_fn, optimizer):
size = len(dataloader.dataset)
for batch, (X, y) in enumerate(dataloader):
# Compute prediction and loss
pred = model(X)
loss = loss_fn(pred, y)

# Backpropagation
optimizer.zero_grad()
loss.backward()
optimizer.step()

if batch % 100 == 0:
loss, current = loss.item(), batch * len(X)
print(f"loss: {loss:>7f} [{current:>5d}/{size:>5d}]")


def test_loop(dataloader, model, loss_fn):
size = len(dataloader.dataset)
num_batches = len(dataloader)
test_loss, correct = 0, 0

with torch.no_grad(): #预测不需要计算梯度
for X, y in dataloader:
pred = model(X)
test_loss += loss_fn(pred, y).item()
correct += (pred.argmax(1) == y).type(torch.float).sum().item()

test_loss /= num_batches
correct /= size
print(f"Test Error: \n Accuracy: {(100*correct):>0.1f}%, Avg loss: {test_loss:>8f} \n")


loss_fn = nn.CrossEntropyLoss()
optimizer = torch.optim.SGD(model.parameters(), lr=learning_rate)

epochs = 10
for t in range(epochs):
print(f"Epoch {t+1}\n-------------------------------")
train_loop(train_dataloader, model, loss_fn, optimizer)
test_loop(test_dataloader, model, loss_fn)
print("Done!")


###### output
Epoch 1
-------------------------------
loss: 2.290156 [ 0/60000]
loss: 2.275099 [ 6400/60000]
loss: 2.256799 [12800/60000]
loss: 2.252760 [19200/60000]
loss: 2.235528 [25600/60000]
loss: 2.205756 [32000/60000]
loss: 2.204928 [38400/60000]
loss: 2.172354 [44800/60000]
loss: 2.160271 [51200/60000]
loss: 2.127511 [57600/60000]
Test Error:
Accuracy: 49.9%, Avg loss: 2.116347

保存并加载模型

1
2
import torch
import torchvision.models as models

保存加载模型权重

PyTorch模型将学习到的参数存储在一个名为state_dict的内部状态字典中。

1
2
model = models.vgg16(pretrained=True)
torch.save(model.state_dict(), 'model_weights.pth')

要加载模型权重,您需要首先创建同一个模型的实例,然后使用load_state_dict()方法加载参数。

1
2
3
4
5
model = models.vgg16() # we do not specify pretrained=True, i.e. do not load default weights
#我们没有指定pretraining =True,即不加载默认的权重
model.load_state_dict(torch.load('model_weights.pth'))
model.eval() #be sure to call model.eval() method before inferencing to set the dropout and batch normalization layers to evaluation mode. Failing to do this will yield inconsistent inference results.
#在推断将退出和批处理规范化层设置为求值模式之前,请确保调用model.eval()方法。如果不这样做,将产生不一致的推理结果。

Saving and Loading Models with Shapes

1
2
3
4
# 当加载模型权重时,我们需要首先实例化模型类,因为类定义了网络的结构。我们可能希望将这个类的结构与模型一起保存,在这种情况下,我们可以将model(而不是model.state_dict())传递给保存函数
torch.save(model, 'model.pth')

model = torch.load('model.pth')

Demo

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52

import torch
import torch.nn as nn
import torch.optim as optim

class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(3, 6, 5)
self.pool = nn.MaxPool2d(2, 2)
self.conv2 = nn.Conv2d(6, 16, 5)
self.fc1 = nn.Linear(16 * 5 * 5, 120)
self.fc2 = nn.Linear(120, 84)
self.fc3 = nn.Linear(84, 10)

def forward(self, x):
x = self.pool(F.relu(self.conv1(x)))
x = self.pool(F.relu(self.conv2(x)))
x = x.view(-1, 16 * 5 * 5)
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = self.fc3(x)
return x

net = Net()
print(net)

optimizer = optim.SGD(net.parameters(), lr=0.001, momentum=0.9)
# Additional information
EPOCH = 5
PATH = "model.pt"
LOSS = 0.4

torch.save({
'epoch': EPOCH,
'model_state_dict': net.state_dict(),
'optimizer_state_dict': optimizer.state_dict(),
'loss': LOSS,
}, PATH)

model = Net()
optimizer = optim.SGD(net.parameters(), lr=0.001, momentum=0.9)

checkpoint = torch.load(PATH)
model.load_state_dict(checkpoint['model_state_dict'])
optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
epoch = checkpoint['epoch']
loss = checkpoint['loss']

model.eval()
# - or -
model.train()