1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495 |
- from openvino.inference_engine import IECore
- import cv2 as cv
- import numpy as np
- import time
- import sys
- from util import ResizeImage,CropImage,ToTensor,NormalizeImage
- model_xml = "IR/handwriting.xml"
- model_bin = "IR/handwriting.bin"
- onnx_model = "onnx/handwriting.onnx"
- image_file = "test/test3.jpg"
- top_k = 10
- # 图像预处理,转为Tensor张量
- def preprocess(img):
- resize_op = ResizeImage(resize_short=256)
- img = resize_op(img)
- crop_op = CropImage(size=(224, 224))
- img = crop_op(img)
- img_mean = [0.485, 0.456, 0.406]
- img_std = [0.229, 0.224, 0.225]
- img_scale = 1.0 / 255.0
- normalize_op = NormalizeImage(
- scale=img_scale, mean=img_mean, std=img_std)
- img = normalize_op(img)
- tensor_op = ToTensor()
- img = tensor_op(img)
- return img
- #标签文件
- label_file = "labels.txt"
- # 读取标签文件
- def readLabels():
- index = 0
- labels=[]
- with open(label_file) as file_obj:
- for line in file_obj:
- labels.append(line.strip())
- index=index+1
- return labels
- labels = readLabels()
- DEVICE = 'CPU'
- #初始化插件,输出插件版本号
- ie = IECore()
- devices = ie.available_devices
- print(devices)
- ver = ie.get_versions(DEVICE)[DEVICE]
- print("{descr}: {maj}.{min}.{num}".format(descr=ver.description, maj=ver.major, min=ver.minor, num=ver.build_number))
- #读取IR模型文件
- # net = ie.read_network(model=model_xml, weights=model_bin)
- net = ie.read_network(model=onnx_model)
- #准备输入输出张量MYRIAD
- print("Preparing input blobs")
- input_blob = next(iter(net.inputs))
- out_blob = next(iter(net.outputs))
- net.batch_size = 1
- #载入模型到AI推断计算设备
- print("Loading IR to the plugin...")
- exec_net = ie.load_network(network=net, num_requests=1, device_name=DEVICE)
- frame = cv.imread(image_file)[:,:,::-1]
- # frame = cv.resize(frame, (64,64))
- # deal
- n, c, h, w = net.inputs[input_blob].shape
- initial_h, initial_w, channels = frame.shape
- #按照AI模型要求放缩图片
- image = cv.resize(frame, (w, h))
- image = preprocess(image)
- print("Batch size is {}".format(n))
- #执行推断计算
- print("Starting inference in synchronous mode")
- start = time.time()
- res = exec_net.infer(inputs={input_blob: image})
- end = time.time()
- print("Infer Time:{}ms".format((end-start)*1000))
- # 处理输出
- print("Processing output blob")
- res = res[out_blob]
- result = zip(res[0].tolist(), labels)
- result2 = sorted(result, reverse=True)
- candidate = list(result2[0:top_k])
- for c in candidate:
- print(c[1])
|