[PYTHON] Make more visible performance indicators with PyTorch and scikit-learn

Introduction

I wrote it because I want to leave an index other than the code that I thought was easy to see while I was solving the classification problem.

code only The code in PyTorch Transfer Learning Tutorial (1) has been improved. Don't get angry because import is not so much ...

import torch
import torch.nn as nn
import torch.optim as optim
from torch.optim import lr_scheduler
import numpy as np
import torchvision
from torchvision import datasets, models, transforms
import matplotlib.pyplot as plt
import time
import os
import copy
from PIL import Image
from sklearn.metrics import *
import pandas as pd
from torch.utils.tensorboard import SummaryWriter
import datetime


def train_model(model, criterion, optimizer, scheduler, num_epochs=25, save_model_name="vgg16_transferlearning"):
    writer = SummaryWriter()
    save_model_dir="H:\model"
    os.makedirs(save_model_dir, exist_ok=True)
    d = datetime.datetime.now()
    save_day = "{}_{}{}_{}-{}".format(d.year, d.month, d.day, d.hour, d.minute)
    since = time.time()

    best_model_wts = copy.deepcopy(model.state_dict())
    best_acc = 0.0
    best_precision = 0.0

    for epoch in range(num_epochs):
        print('Epoch {}/{}'.format(epoch, num_epochs - 1))
        print('-' * 10)

        # Each epoch has a training and validation phase
        for phase in ['train', 'val']:
            if phase == 'train':
                model.train()  # Set model to training mode
            else:
                model.eval()   # Set model to evaluate mode
            
            #Loss function and correct answer rate?
            running_loss = 0.0
            running_corrects = 0

            # Iterate over data.
            for inputs, labels in dataloaders[phase]:
                inputs = inputs.to(device)
                labels = labels.to(device)

                # zero the parameter gradients
                optimizer.zero_grad()

                # forward
                # track history if only in train
                with torch.set_grad_enabled(phase == 'train'):
                    outputs = model(inputs)
                    # row
                    axis = 1
                    _, preds = torch.max(outputs, axis)
                    #Loss using the loss function(loss)Calculate
                    loss = criterion(outputs, labels) 

                    # backward + optimize only if in training phase
                    if phase == 'train':
                        loss.backward()
                        optimizer.step()

                #statistics Learning evaluation & statistics
                running_loss += loss.item() * inputs.size(0) # inputs.size(0) == batchsize
                running_corrects += torch.sum(preds == labels.data)
            if phase == 'train':
                scheduler.step()
                if epoch%10 == 0:
                    torch.save(model_ft.state_dict(), os.path.join(save_model_dir, save_model_name+"_{}_{}.pkl".format(epoch, save_day)))
                    print("saving model epoch :{}".format(epoch))
                    
            #Evaluation item(loss, accracy, recall, precision)
            epoch_loss = running_loss / dataset_sizes[phase]
            epoch_acc = running_corrects.double() / dataset_sizes[phase]
            epoch_recall = recall_score(y_true=labels.cpu(), y_pred=preds.cpu(), pos_label=0)
            epoch_precision = precision_score(y_true=labels.cpu(), y_pred=preds.cpu(), pos_label=0)
            
            writer.add_scalar('Loss/{}'.format(phase), epoch_loss, epoch)
            writer.add_scalar('Accuracy/{}'.format(phase), epoch_acc, epoch)
            writer.add_scalar('Recall/{}'.format(phase), epoch_recall, epoch)
            writer.add_scalar('Precision/{}'.format(phase), epoch_precision, epoch)
            
            print('{} Loss: {:.4f} Acc: {:.4f} Recall: {:.4f} Precision: {:.4f}'.format(
                phase, epoch_loss, epoch_acc, epoch_recall, epoch_precision))

            # deep copy the model
            if phase == 'val' and epoch_acc > best_acc:
                if epoch_recall==1 and epoch_precision > best_precision:
                    torch.save(model_ft.state_dict(), 
                               os.path.join(save_model_dir, save_model_name+"_{}_{}_recall_1.0.pkl".format(epoch, save_day)))
                    print("saving model recall=1.0 epoch :{}".format(epoch))
                    recall_1_precision = epoch_precision
                best_precision = epoch_precision
                best_acc = epoch_acc
                best_model_wts = copy.deepcopy(model.state_dict())
        print()

    time_elapsed = time.time() - since
    print('Training complete in {:.0f}m {:.0f}s'.format(
        time_elapsed // 60, time_elapsed % 60))
    print('Best val Acc: {:4f}, Precision: {:.4f}'.format(best_acc, best_precision))

    # load best model weights
    model.load_state_dict(best_model_wts)
    torch.save(model_ft.state_dict(), 
               os.path.join(save_model_dir, save_model_name+"_{}_{}_best.pkl".format(epoch, save_day)))
    writer.close()
    
    return model

Run with this

model_ft = model_ft.to(device)
criterion = nn.CrossEntropyLoss()
optimizer_ft = optim.SGD(model_ft.parameters(), lr=0.001, momentum=0.9)
exp_lr_scheduler = lr_scheduler.StepLR(optimizer_ft, step_size=7, gamma=0.1) #I do not understand


# Writer will output to ./runs/ directory by default
writer = SummaryWriter()
dummy_iamge = torch.rand(inputs.shape[0:])
print(dummy_iamge.shape)
dummy_iamge = dummy_iamge.to(device)
writer.add_graph(model_ft, dummy_iamge)
writer.close()
model_ft = train_model(model_ft, criterion, optimizer_ft, exp_lr_scheduler, num_epochs=25)

output

Epoch 0/24
----------
saving model epoch :0
train Loss: 0.6785 Acc: 0.5913 Recall: 1.0000 Precision: 1.0000
val Loss: 0.6839 Acc: 0.4138 Recall: 0.3012 Precision: 1.0000

Epoch 1/24
----------
train Loss: 0.5544 Acc: 0.7340 Recall: 1.0000 Precision: 1.0000
val Loss: 0.2682 Acc: 0.9475 Recall: 1.0000 Precision: 0.9765

.....

Epoch 24/24
----------
train Loss: 0.0956 Acc: 0.9738 Recall: 1.0000 Precision: 1.0000
val Loss: 0.0232 Acc: 1.0000 Recall: 1.0000 Precision: 1.0000

Training complete in 6m 10s
Best val Acc: 1.000000, Precision: 1.0000

Performance indicators and tools used

Reason for using

The biggest reason for moving to Pytorch was that Tensorboard could be used as is. This time, I'm mainly thinking about classification problems, so I wanted to use a confusion matrix to get the recall and precision. Finally, I was able to confirm accuracy, loss, recall, and precision in the Tensorboard, so I'm happy.

at the end

After all it would be nice to be able to visualize it when evaluating performance ~ Information is packed in 2D rather than 1D. However, be careful because if you pack too much information, the information will become complicated and unreadable.

Please point out if there is a better way to write it.

reference

(1) TRANSFER LEARNING FOR COMPUTER VISION TUTORIAL

Recommended Posts

Make more visible performance indicators with PyTorch and scikit-learn
Image segmentation with scikit-image and scikit-learn
Make testing with Selenium more accessible
Fractal to make and play with Python
Make a drawing quiz with kivy + PyTorch
Make a thermometer with Raspberry Pi and make it visible on the browser Part 3
[# 1] Make Minecraft with Python. ~ Preliminary research and design ~