1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59
| def train_loop(dataloader, model, loss_fn, optimizer): size = len(dataloader.dataset) for batch, (X, y) in enumerate(dataloader): pred = model(X) loss = loss_fn(pred, y)
optimizer.zero_grad() loss.backward() optimizer.step()
if batch % 100 == 0: loss, current = loss.item(), batch * len(X) print(f"loss: {loss:>7f} [{current:>5d}/{size:>5d}]")
def test_loop(dataloader, model, loss_fn): size = len(dataloader.dataset) num_batches = len(dataloader) test_loss, correct = 0, 0
with torch.no_grad(): for X, y in dataloader: pred = model(X) test_loss += loss_fn(pred, y).item() correct += (pred.argmax(1) == y).type(torch.float).sum().item()
test_loss /= num_batches correct /= size print(f"Test Error: \n Accuracy: {(100*correct):>0.1f}%, Avg loss: {test_loss:>8f} \n")
loss_fn = nn.CrossEntropyLoss() optimizer = torch.optim.SGD(model.parameters(), lr=learning_rate)
epochs = 10 for t in range(epochs): print(f"Epoch {t+1}\n-------------------------------") train_loop(train_dataloader, model, loss_fn, optimizer) test_loop(test_dataloader, model, loss_fn) print("Done!")
Epoch 1 ------------------------------- loss: 2.290156 [ 0/60000] loss: 2.275099 [ 6400/60000] loss: 2.256799 [12800/60000] loss: 2.252760 [19200/60000] loss: 2.235528 [25600/60000] loss: 2.205756 [32000/60000] loss: 2.204928 [38400/60000] loss: 2.172354 [44800/60000] loss: 2.160271 [51200/60000] loss: 2.127511 [57600/60000] Test Error: Accuracy: 49.9%, Avg loss: 2.116347
|