[PYTHON] handwritten client script

handwritting.py code: (try)

qiita.rb



import argparse
import cv2
import datetime
import grpc
import numpy as np
import os
from tensorflow import make_tensor_proto, make_ndarray
from tensorflow_serving.apis import predict_pb2
from tensorflow_serving.apis import prediction_service_pb2_grpc
from pose_extractor import extract_poses
import sys
import time
import math
import logging as log
from argparse import ArgumentParser, SUPPRESS
from functools import reduce
from PIL import ImageFont, ImageDraw, Image
from openvino.inference_engine import IENetwork, IECore
from utils.codec import CTCCodec
from __future__ import print_function
from utils.codec import CTCCodec
import PIL
import io
import IPython.display
from IPython.display import clear_output





def load_image(file_path):
    img = cv2.imread(file_path)  
    img = cv2.resize(img, (args['width'], args['height']))
    img = img.transpose(2,0,1).reshape(1,3,args['height'],args['width'])
    return img


def get_characters(char_file):
    with open(char_file, 'r', encoding='utf-8') as f:
        return ''.join(line.strip('\n') for line in f)



def preprocess_input(src, height, width):
    src = cv2.cvtColor(src, cv2.COLOR_BGR2GRAY)
    ratio = float(src.shape[1]) / float(src.shape[0])
    tw = int(height * ratio)
    rsz = cv2.resize(src, (tw, height), interpolation=cv2.INTER_CUBIC).astype(np.float32)
    outimg = np.full((height, width), 255., np.float32)
    rsz_h, rsz_w = rsz.shape
    outimg[:rsz_h, :rsz_w] = rsz
    cv2.imshow('OCR input image', outimg)
    outimg = np.reshape(outimg, (1, height, width))
    return outimg





#Command line arguments
parser = argparse.ArgumentParser(description='Demo for handwritten-japanese-recognition requests via TFS gRPC API.'
                                             'analyses input images and saveswith with detected objects.'
                                             'it relies on model given as parameter...')

parser.add_argument('--model_name', required=False, help='Name of the model to be used', default="handwritten-japanese-recognition")
parser.add_argument('--input_images_dir', required=False, help='Directory with input images', default="images")
parser.add_argument('--output_dir', required=False, help='Directory for staring images with detection results', default="results")
parser.add_argument('--batch_size', required=False, help='How many images should be grouped in one batch', default=1, type=int)
parser.add_argument('--width', required=False, help='How the input image width should be resized in pixels', default=456, type=int)
parser.add_argument('--height', required=False, help='How the input image width should be resized in pixels', default=256, type=int)
parser.add_argument('--grpc_address',required=False, default='localhost',  help='Specify url to grpc service. default:localhost')
parser.add_argument('--grpc_port',required=False, default=9000, help='Specify port to grpc service. default: 9000')
#parser.add_argument("-cl", "--charlist", type=str, default=os.path.join(os.path.dirname(__file__), "data/kondate_nakayosi_char_list.txt"), help="Path to the decoding char list file")

args = vars(parser.parse_args())

channel = grpc.insecure_channel("{}:{}".format(args['grpc_address'],args['grpc_port']))
stub = prediction_service_pb2_grpc.PredictionServiceStub(channel)

files = os.listdir(args['input_images_dir'])
batch_size = args['batch_size']
model_name = args['model_name']
print("Running "+model_name+" on files:" + str(files))

imgs = np.zeros((0,3,args['height'],args['width']), np.dtype('<f'))

#When there are multiple images
for i in files:
    img = load_image(os.path.join(args['input_images_dir'], i))
    imgs = np.append(imgs, img, axis=0) 

print('Start processing {} iterations with batch size {}'.format(len(files)//batch_size , batch_size))


for x in range(0, imgs.shape[0] - batch_size + 1, batch_size):
    request = predict_pb2.PredictRequest()
    request.model_spec.name = model_name
    img = imgs[x:(x + batch_size)]
    print("\nRequest shape", img.shape)
    request.inputs["data"].CopyFrom(make_tensor_proto(img, shape=(img.shape)))
    start_time = datetime.datetime.now()
    result = stub.Predict(request, 10.0) 
    end_time = datetime.datetime.now()
    duration = (end_time - start_time).total_seconds() * 1000

    # Plugin initialization
    ie = IECore()
    # Read IR
    log.info("Loading network")
   
    model = 'handwritten-japanese-recognition-0001'
    model = './intel/'+model+'/FP16/'+model
    net = ie.read_network(model+'.xml', model+'.bin')

    assert len(net.inputs) == 1, "Demo supports only single input topologies"
    assert len(net.outputs) == 1, "Demo supports only single output topologies"

    log.info("Preparing input/output blobs")
    input_blob = next(iter(net.inputs))
    out_blob = next(iter(net.outputs))

    input_batch_size, input_channel, input_height, input_width= net.inputs[input_blob].shape

    # Read and pre-process input image (NOTE: one image only)
    files = os.listdir(args['input_images_dir'])
    for i in files:
        input_image = preprocess_input(os.path.join(args['input_images_dir'], i), height=iargs['height'], width=args['width'])[None,:,:,:]


    # Loading model to the plugin
    log.info("Loading model to the plugin")
    exec_net = ie.load_network(network=net, device_name="CPU")
    characters = get_characters('data/kondate_nakayosi_char_list.txt')
    codec = CTCCodec(characters)

    # Start sync inference
    number_iter = 1
    log.info("Starting inference ({} iterations)".format(number_iter))
    infer_time = []
    for i in range(number_iter):
        
        t0 = time.time()
        preds = exec_net.infer(inputs={input_blob: input_image})
        preds = preds[out_blob]
        result = codec.decode(preds)
        print(result)
        infer_time.append((time.time() - t0) * 1000)

    log.info("Average throughput: {} ms".format(np.average(np.asarray(infer_time))))

    sys.exit()



command:

// Download the latest Model Server image docker pull openvino/ubuntu18_model_server:latest

// Download model into a separate directory curl --create-dirs https://download.01.org/opencv/2020/openvinotoolkit/2020.2/open_model_zoo/models_bin/3/handwritten-japanese-recognition-0001/FP32/handwritten-japanese-recognition-0001.xml https://download.01.org/opencv/2020/openvinotoolkit/2020.2/open_model_zoo/models_bin/3/handwritten-japanese-recognition-0001/FP32/handwritten-japanese-recognition-0001.bin -o handwritten-japanese-recognition-0001.xml -o model/handwritten-japanese-recognition-0001.bin

// Start the container serving gRPC on port 9000 docker run -d -v $(pwd)/model:/models/handwritten-japanese-recognition/1 -e LOG_LEVEL=DEBUG -p 9000:9000 openvino/ubuntu18_model_server /ie-serving-py/start_server.sh ie_serving model --model_path /models/handwritten-japanese-recognition --model_name handwritten-japanese-recognition --port 9000 --shape auto

// Download the example client script curl https://raw.githubusercontent.com/OVaaS/ovaas-api/master/handwritting.py -o handwritting.py

// Download an image to be analyzed curl https://openvino.jp/wp-content/uploads/2020/04/handwritting2_crop4.jpg -o images/people1.jpeg

// Install client dependencies //pip install -r client_requirements.txt

// Create a folder for results mkdir results

// Run inference and store results in the newly created folder python handwriting.py

Recommended Posts

handwritten client script