Python示例
更新时间:2021-07-16
Python接口
Python接口目录结构
EdgeBoard系统已经安装了python环境,用户可直接使用即可,同时python接口为用户提供了paddlemobile的python安装包以及示例工程,下载链接,包含文件如下:
文件名称 | 说明 |
---|---|
paddlemobile-0.0.1.linux-aarch64-py2.tar.gz | paddlemobile的python2安装包 |
edgeboard.py | 基于python的模型预测示例 |
api.py | edgeboard.py的api示例 |
configs.classification | 分类模型的配置文件目录,同C++示例的配置文件 |
configs.detection | 检测模型的配置文件目录,同C++示例的配置文件 |
models.classification | 分类模型的模型文件目录,同C++示例的模型文件 |
models.detection | 检测模型的模型文件目录,同C++示例的模型文件 |
edgeboard.py代码如下:
#!/usr/bin/python
# -*- coding: UTF-8 -*-
import errno
import math
import os
import sys
import json
import cv2
import numpy as np
import paddlemobile as pm
predictor = None
labels = []
def init(configs):
global predictor
model_dir = configs['model']
pm_config = pm.PaddleMobileConfig() # 创建paddlemobile配置对象
pm_config.precision = pm.PaddleMobileConfig.Precision.FP32 # 对精度赋值成FP32
pm_config.device = pm.PaddleMobileConfig.Device.kFPGA # 选定FPGA
pm_config.prog_file = model_dir + "/model"
pm_config.param_file = model_dir + '/params'
pm_config.thread_num = 4
print('')
print('configuration for predictor is :')
print('\tPrecision: ' + str(pm_config.precision))
print('\t Device: ' + str(pm_config.device))
print('\t Model: ' + str(pm_config.prog_file))
print('\t Params: ' + str(pm_config.param_file))
print('\tThreadNum: ' + str(pm_config.thread_num))
print('')
predictor = pm.CreatePaddlePredictor(pm_config) # 创建预测器
# 读取label_list.txt文件
def read_labels(configs):
global labels
if not 'labels' in configs:
return
label_path = configs['labels']
if label_path is None or label_path == '':
return
with open(label_path) as label_file:
line = label_file.readline()
while line:
labels.append(line.strip().split(':')[-1])
line = label_file.readline()
# 读取本地图片
def read_image(configs):
image_input = cv2.imread(configs['image'], cv2.IMREAD_COLOR)
return image_input
# 图像预处理
def preprocess_image(image_input, configs):
# resizing image
print('image shape input: ' + str(image_input.shape))
width = configs['input_width']
height = configs['input_height']
image_resized = cv2.resize(image_input, (width, height), cv2.INTER_CUBIC)
print('image shape resized: ' + str(image_resized.shape))
# to float32
image = image_resized.astype('float32')
# transpose to channel-first format
image_transposed = np.transpose(image, (2, 0, 1))
print('image shape transposed: ' + str(image_transposed.shape))
# mean and scale preprocessing
mean = np.array(configs['mean']).reshape((3, 1, 1))
scale_number = configs['scale']
scale = np.array([scale_number, scale_number, scale_number]).reshape((3, 1, 1))
# RGB or BGR formatting
format = configs['format'].upper()
if format == 'RGB':
b = image_transposed[0]
g = image_transposed[1]
r = image_transposed[2]
image_transposed = np.stack([r, g, b])
print('image shape formatted transposed: ' + str(image_transposed.shape))
# mean and scale
print 'substract mean', mean.flatten(), ' and multiple with scale', scale.flatten()
image_transposed -= mean
image_transposed *= scale
# transpose back
image_result = np.transpose(image_transposed, (1, 2, 0))
print('image shape transposed-back: ' + str(image_result.shape))
print('')
return image_result
# 检测模型输出结果图片
def draw_results(image, output, threshold):
height, width, _ = image.shape
print 'boxes with scores above the threshold (%f): \n' % threshold
i = 1
for box in output:
if box[1] > threshold:
print '\t', i, '\t', box[0], '\t', box[1], '\t', box[2], '\t', box[3], '\t', box[4], '\t', box[5]
x_min = int(box[2] * width )
y_min = int(box[3] * height)
x_max = int(box[4] * width )
y_max = int(box[5] * height)
cv2.rectangle(image, (x_min, y_min), (x_max, y_max), (0, 255, 0), 3)
i += 1
cv2.imwrite('result.jpg', image)
print('')
# 分类模型预测结果输出
def classify(output, configs):
data = output.flatten()
max_index = 0
score = 0.0
for i in range(len(data)):
if data[i] > score and not data[i] == float('inf'):
max_index = i
score = data[i]
print 'label: ', labels[max_index]
print 'index: ', max_index
print 'score: ', score
print ''
# 检测模型预测结果输出
def detect(output, configs):
image = read_image(configs)
draw_results(image, output, configs['threshold'])
# 模型预测
def predict(configs, detection):
global predictor
width = configs['input_width']
height = configs['input_height']
image = read_image(configs)
input = preprocess_image(image, configs)
tensor = pm.PaddleTensor()
tensor.dtype = pm.PaddleDType.FLOAT32
tensor.shape = (1, 3, width, height)
tensor.data = pm.PaddleBuf(input)
paddle_data_feeds = [tensor]
print('prediction is running ...')
outputs = predictor.Run(paddle_data_feeds)
assert len(outputs) == 1, 'error numbers of tensor returned from Predictor.Run function !!!'
output = np.array(outputs[0], copy = False)
print('\nprediction result :')
print('\t nDim: ' + str(output.ndim))
print('\tShape: ' + str(output.shape))
print('\tDType: ' + str(output.dtype))
print('')
# print(output)
# print('')
if detection:
detect(output, configs)
else:
classify(output, configs)
api.py代码如下:
#!/usr/bin/python
# -*- coding: UTF-8 -*-
import errno
import math
import os
import sys
import argparse
import json
import cv2
import numpy as np
import edgeboard
# 参数设定
def parse_args():
parser = argparse.ArgumentParser(description='API implementation for Paddle-Mobile')
parser.add_argument('-d', '--detection',
help='flag indicating detections',
action="store_true")
parser.add_argument('-j', '--json',
help='configuration file for the prediction')
return parser.parse_args()
# 输出参数
def print_args(args):
print 'Arguments: '
print '\t', ' detection flag: ', args.detection
print '\t', 'json configuration: ', args.json
# 主函数
def main():
args = parse_args()
print_args(args)
if args.json is None or args.json == '':
print '\nFor usage, please use the -h switch.\n\n'
sys.exit(0)
with open(args.json) as json_file:
configs = json.load(json_file)
edgeboard.init(configs)
edgeboard.read_labels(configs)
edgeboard.predict(configs, args.detection)
if __name__ == '__main__':
sys.exit(main())
安装和使用
1、拷贝paddlemobile-0.0.1.linux-aarch64-py2.tar.gz到用户目录,例如拷贝至/home/root/workspace/
目录下
2、安装paddlemobile-python SDK,请在根目录中展开tar.gz压缩包
//进入系统根目录
cd /
//展开tar.gz 压缩包
tar -xzvf home/root/workspace/paddlemobile-0.0.1.linux-aarch64-py2.tar.gz
//返回用户HOME目录
cd
//检查package paddlemobile (0.0.1) 是否已经安装
pip list
3、把EdgeBoard的paddle-mobile预测库到/usr/lib
目录下
EdgeBoard软核升级文件的版本(fpgadrv.ko、paddlemobile.so、image.ub&BOOT.BIN)必须版本一致才能正确运行,请升级软核版本。
1.3.0版本以上的预测库包含版本号,需连同ln.sh脚本文件一同拷贝到usr/lib目录下,并执行脚本文件sh ln.sh
如下图所示:
4、查看配置文件,调用python接口api.py
。python接口的配置文件同C++示例配置文件结构相同,在运行自己的模型时需要更改模型文件及对应的配置文件,在此不再赘述。
本地图片预测示例
1、分类模型预测
python api.py -j {分类模型json文件}
例如:运行预置模型Inceptionv3
python api.py -j configs.classification/Inceptionv3/zebra.json
如下图:
2、检测模型预测
python api.py -d -j {检测模型json文件}
例如:运行预置模型vgg-ssd
python api.py -d -j configs.detection/vgg-ssd/screw.json
如下图:
预测结果图: