网站建设后台cms管理系统方案,昆山哪里有人做网站,南宁网红景点,北京小程序开发平台编辑#xff1a;OAK中国 首发#xff1a;oakchina.cn 喜欢的话#xff0c;请多多#x1f44d;⭐️✍ 内容可能会不定期更新#xff0c;官网内容都是最新的#xff0c;请查看首发地址链接。 Hello#xff0c;大家好#xff0c;这里是OAK中国#xff0c;我是Ashely。
专… 编辑OAK中国 首发oakchina.cn 喜欢的话请多多⭐️✍ 内容可能会不定期更新官网内容都是最新的请查看首发地址链接。 Hello大家好这里是OAK中国我是Ashely。
专注科技专注分享。
这可能是我给大家发的最后一篇博客了马上就要离开这里追逐我的梦想了Anyway来看教程正文吧 1.其他Yolo转换及使用教程请参考 2.检测类的yolo模型建议使用在线转换地址如果在线转换不成功你再根据本教程来做本地转换。 ▌.pt 转换为 .onnx
使用下列脚本(将脚本放到 YOLOv10 根目录中)将 pytorch 模型转换为 onnx 模型
若安装了blobconverter可直接转换成 blob
示例用法(默认使用 one2one )
python export_onnx.py -w path_to_model.pt -imgsz 640 export_onnx.py :
usage: export_onnx.py [-h] -m INPUT_MODEL [-imgsz IMG_SIZE [IMG_SIZE ...]] [-op OPSET] [--max_det MAX_DET] [-n NAME] [-o OUTPUT_DIR] [-b] [-s] [-sh SHAVES] [-t {docker,blobconverter,local}]Tool for converting Yolov8 models to the blob format used by OAKoptional arguments:-h, --help show this help message and exit-m INPUT_MODEL, -i INPUT_MODEL, -w INPUT_MODEL, --input_model INPUT_MODELweights path (default: None)-imgsz IMG_SIZE [IMG_SIZE ...], --img-size IMG_SIZE [IMG_SIZE ...]image size (default: [640, 640])-op OPSET, --opset OPSETopset version (default: 12)--max_det MAX_DET maximum number of detections per image (default: 300)--one2many Use the one2many branch as the head output, otherwise one2one (default: False)-n NAME, --name NAME The name of the model to be saved, none means using the same name as the input model (default: None)-o OUTPUT_DIR, --output_dir OUTPUT_DIRDirectory for saving files, none means using the same path as the input model (default: None)-b, --blob OAK Blob export (default: False)-s, --spatial_detectionInference with depth information (default: False)-sh SHAVES, --shaves SHAVESInference with depth information (default: None)-t {docker,blobconverter,local}, --convert_tool {docker,blobconverter,local}Which tool is used to convert, docker: should already have docker (https://docs.docker.com/get-docker/) and docker-py (pip install docker) installed; blobconverter: uses an onlineserver to convert the model and should already have blobconverter (pip install blobconverter); local: use openvino-dev (pip install openvino-dev) and openvino 2022.1 (https://docs.oakchina.cn/en/latest /pages/Advanced/Neural_networks/local_convert_openvino.html#id2) to convert (default: blobconverter)#!/usr/bin/env python3
# -*- coding:utf-8 -*-
import argparse
import json
import logging
import math
import sys
import time
import warnings
from io import BytesIO
from pathlib import Path
from zipfile import ZipFile, ZIP_LZMAimport torch
from torch import nnwarnings.filterwarnings(ignore)FILE Path(__file__).resolve()
ROOT FILE.parents[0]
if str(ROOT) not in sys.path:sys.path.append(str(ROOT)) # add ROOT to PATHfrom ultralytics.nn.modules import v10Detect, Detect
from ultralytics.nn.tasks import attempt_load_weights
from ultralytics.utils.tal import dist2bbox, make_anchors
from ultralytics.utils import ops
from ultralytics.utils.torch_utils import select_devicetry:from rich import printfrom rich.logging import RichHandlerlogging.basicConfig(levelINFO,format%(message)s,datefmt[%X],handlers[RichHandler(rich_tracebacksFalse,show_pathFalse,)],)
except ImportError:logging.basicConfig(levelINFO,format%(asctime)s\t%(levelname)s\t%(message)s,datefmt[%X],)def v10postprocess(preds, max_det, nc80):对模型预测结果进行后处理。Args:preds (torch.Tensor): 模型的预测结果形状为 (batch_size, num_boxes, 4 num_classes)。max_det (int): 需要保留的最大检测框数量。nc (int): 类别数。Returns:boxes (torch.Tensor): 保留的检测框的坐标形状为 (batch_size, max_det, 4)。scores (torch.Tensor): 保留的检测框的置信度形状为 (batch_size, max_det)。labels (torch.Tensor): 保留的检测框的类别标签形状为 (batch_size, max_det)。Notes:这个函数假设输入的 preds 张量的最后一个维度表示每个检测框的坐标和置信度。assert 4 nc preds.shape[-1]# 分割预测结果为边界框坐标和置信度boxes, scores preds.split([4, nc], dim-1)# 选取每个预测结果中置信度最高的几个预测框max_scores scores.amax(dim-1)max_scores, index torch.topk(max_scores, max_det, dim-1)index index.unsqueeze(-1)# 根据置信度最高的预测框的索引获取对应的边界框和置信度boxes torch.gather(boxes, dim1, indextorch.cat([index for i in range(boxes.shape[-1])], dim-1))scores torch.gather(scores, dim1, indextorch.cat([index for i in range(scores.shape[-1])], dim-1))# 在所有预测结果中选取置信度最高的几个预测框scores, index torch.topk(scores.flatten(1), max_det, dim-1)# 计算类别标签labels index - (index // nc) * ncindex (index // nc).unsqueeze(-1)# 根据索引获取保留的边界框boxes boxes.gather(dim1, indextorch.cat([index for i in range(boxes.shape[-1])], dim-1))return boxes, scores, labelsclass DetectV10(nn.Module):YOLOv10 Detect head for detection modelsdynamic False # force grid reconstructionexport False # export modeshape Noneanchors torch.empty(0) # initstrides torch.empty(0) # initmax_det -1def __init__(self, old_detect):super().__init__()self.nc old_detect.nc # number of classesself.nl old_detect.nl # number of detection layersself.reg_max old_detect.reg_max # DFL channels (ch[0] // 16 to scale 4/8/12/16/20 for n/s/m/l/x)self.no old_detect.no # number of outputs per anchorself.stride old_detect.stride # strides computed during buildself.cv2 old_detect.cv2self.cv3 old_detect.cv3self.dfl old_detect.dflself.f old_detect.fself.i old_detect.iself.one2one_cv2 old_detect.one2one_cv2self.one2one_cv3 old_detect.one2one_cv3def decode_bboxes(self, bboxes, anchors):Decode bounding boxes.return dist2bbox(bboxes, anchors, xywhFalse, dim1)def inference(self, x):# Inference pathshape x[0].shape # BCHWx_cat torch.cat([xi.view(shape[0], self.no, -1) for xi in x], 2)if self.dynamic or self.shape ! shape:self.anchors, self.strides (x.transpose(0, 1) for x in make_anchors(x, self.stride, 0.5))self.shape shapebox, cls x_cat.split((self.reg_max * 4, self.nc), 1)# dbox dist2bbox(self.dfl(box), self.anchors.unsqueeze(0), xywhFalse, dim1) * self.stridesdbox self.decode_bboxes(self.dfl(box), self.anchors.unsqueeze(0)) * self.stridesy torch.cat((dbox, cls.sigmoid()), 1)return ydef forward_feat(self, x, cv2, cv3):y []for i in range(self.nl):y.append(torch.cat((cv2[i](x[i]), cv3[i](x[i])), 1))return ydef forward(self, x):one2one self.forward_feat([xi.detach() for xi in x], self.one2one_cv2, self.one2one_cv3)one2one self.inference(one2one)assert self.max_det ! -1boxes, scores, labels v10postprocess(one2one.permute(0, 2, 1), self.max_det, self.nc)boxes / torch.Tensor([x[0].shape[2] * 2**3, x[0].shape[3] * 2**3, x[0].shape[2] * 2**3, x[0].shape[3] * 2**3])return torch.cat([labels.unsqueeze(-1), labels.unsqueeze(-1), scores.unsqueeze(-1), boxes], dim-1)# return torch.cat([boxes, scores.unsqueeze(-1), labels.unsqueeze(-1)], dim-1)def bias_init(self):# Initialize Detect() biases, WARNING: requires stride availabilitym self # self.model[-1] # Detect() modulefor a, b, s in zip(m.one2one_cv2, m.one2one_cv3, m.stride): # froma[-1].bias.data[:] 1.0 # boxb[-1].bias.data[: m.nc] math.log(5 / m.nc / (640 / s) ** 2) # cls (.01 objects, 80 classes, 640 img)class DetectV8(nn.Module):YOLOv8 Detect head for detection modelsdynamic False # force grid reconstructionexport False # export modeshape Noneanchors torch.empty(0) # initstrides torch.empty(0) # initdef __init__(self, old_detect):super().__init__()self.nc old_detect.nc # number of classesself.nl old_detect.nl # number of detection layersself.reg_max (old_detect.reg_max) # DFL channels (ch[0] // 16 to scale 4/8/12/16/20 for n/s/m/l/x)self.no old_detect.no # number of outputs per anchorself.stride old_detect.stride # strides computed during buildself.cv2 old_detect.cv2self.cv3 old_detect.cv3self.dfl old_detect.dflself.f old_detect.fself.i old_detect.idef forward(self, x):shape x[0].shape # BCHWfor i in range(self.nl):x[i] torch.cat((self.cv2[i](x[i]), self.cv3[i](x[i])), 1)box, cls torch.cat([xi.view(shape[0], self.no, -1) for xi in x], 2).split((self.reg_max * 4, self.nc), 1)box self.dfl(box)cls_output cls.sigmoid()# Get the maxconf, _ cls_output.max(1, keepdimTrue)# Concaty torch.cat([box, conf, cls_output], dim1)# Split to 3 channelsoutputs []start, end 0, 0for i, xi in enumerate(x):end xi.shape[-2] * xi.shape[-1]outputs.append(y[:, :, start:end].view(xi.shape[0], -1, xi.shape[-2], xi.shape[-1]))start xi.shape[-2] * xi.shape[-1]return outputsdef bias_init(self):# Initialize Detect() biases, WARNING: requires stride availabilitym self # self.model[-1] # Detect() modulefor a, b, s in zip(m.cv2, m.cv3, m.stride): # froma[-1].bias.data[:] 1.0 # boxb[-1].bias.data[: m.nc] math.log(5 / m.nc / (640 / s) ** 2) # cls (.01 objects, 80 classes, 640 img)def parse_args():parser argparse.ArgumentParser(descriptionTool for converting Yolov8 models to the blob format used by OAK,formatter_classargparse.ArgumentDefaultsHelpFormatter,)parser.add_argument(-m,-i,-w,--input_model,typePath,requiredTrue,helpweights path,)parser.add_argument(-imgsz,--img-size,nargs,typeint,default[640, 640],helpimage size,) # height, widthparser.add_argument(-op, --opset, typeint, default12, helpopset version)parser.add_argument(--max_det, default300, helpmaximum number of detections per image)parser.add_argument(--one2many,actionstore_true,helpUse the one2many branch as the head output, otherwise one2one,)parser.add_argument(-n,--name,typestr,helpThe name of the model to be saved, none means using the same name as the input model,)parser.add_argument(-o,--output_dir,typePath,helpDirectory for saving files, none means using the same path as the input model,)parser.add_argument(-b,--blob,actionstore_true,helpOAK Blob export,)parser.add_argument(-s,--spatial_detection,actionstore_true,helpInference with depth information,)parser.add_argument(-sh,--shaves,typeint,helpInference with depth information,)parser.add_argument(-t,--convert_tool,typestr,helpWhich tool is used to convert, docker: should already have docker (https://docs.docker.com/get-docker/) and docker-py (pip install docker) installed; blobconverter: uses an online server to convert the model and should already have blobconverter (pip install blobconverter); local: use openvino-dev (pip install openvino-dev) and openvino 2022.1 ( https://docs.oakchina.cn/en/latest /pages/Advanced/Neural_networks/local_convert_openvino.html#id2) to convert,defaultblobconverter,choices[docker, blobconverter, local],)args parser.parse_args()args.input_model args.input_model.resolve().absolute()if args.name is None:args.name args.input_model.stemif args.output_dir is None:args.output_dir args.input_model.parentargs.img_size * 2 if len(args.img_size) 1 else 1 # expandif args.shaves is None:args.shaves 5 if args.spatial_detection else 6return argsdef export(input_model, img_size, output_model, opset, **kwargs):t time.time()# Load PyTorch modeldevice select_device(cpu)# load FP32 modelmodel attempt_load_weights(input_model, devicedevice, inplaceTrue, fuseTrue)labels model.module.names if hasattr(model, module) else model.names # get class nameslabels labels if isinstance(labels, list) else list(labels.values())nc model.nc if hasattr(model, nc) else model.model[-1].nc# check num classes and labelsassert nc len(labels), fModel class count {nc} ! len(names) {len(labels)}# Replace with the custom Detection Headif kwargs.get(one2many, False):if isinstance(model.model[-1], (Detect)):model.model[-1] DetectV8(model.model[-1])model.model[-1].export Trueelse:if isinstance(model.model[-1], (v10Detect)):print(Replacing model.model[-1] with DetectV10)model.model[-1] DetectV10(model.model[-1])model.model[-1].export Truemodel.model[-1].max_det kwargs.get(max_det, 300)num_branches model.model[-1].nl# Inputimg torch.zeros(1, 3, *img_size).to(device) # image size(1,3,320,320) iDetectionmodel.eval()model.float()model model.fuse()model(img) # dry runs# ONNX exporttry:import onnxprint()logging.info(Starting ONNX export with onnx %s... % onnx.__version__)output_list [output%s_yolov6r2 % (i 1) for i in range(num_branches)] if kwargs.get(one2many, False) else [output_yolov10]with BytesIO() as f:torch.onnx.export(model,img,f,verboseFalse,opset_versionopset,input_names[images],output_namesoutput_list,)# Checksonnx_model onnx.load_from_string(f.getvalue()) # load onnx modelonnx.checker.check_model(onnx_model) # check onnx modeltry:import onnxsimlogging.info(Starting to simplify ONNX...)onnx_model, check onnxsim.simplify(onnx_model)assert check, assert check failedexcept ImportError:logging.warning(onnxsim is not found, if you want to simplify the onnx, you should install it:\n\t pip install -U onnxsim onnxruntime\n then use:\n\t fpython -m onnxsim {output_model} {output_model})except Exception:logging.exception(Simplifier failure)onnx.save(onnx_model, output_model)logging.info(ONNX export success, saved as:\n\t%s % output_model)except Exception:logging.exception(ONNX export failure)if kwargs.get(one2many, False):# generate anchors and sidesanchors []# generate masksmasks dict()logging.info(anchors:\n\t%s % anchors)logging.info(anchor_masks:\n\t%s % masks)jsondata {nn_config: {output_format: detection,NN_family: YOLO,input_size: f{img_size[0]}x{img_size[1]},NN_specific_metadata: {classes: nc,coordinates: 4,anchors: anchors,anchor_masks: masks,iou_threshold: 0.3,confidence_threshold: 0.5,},},mappings: {labels: labels},}else:jsondata {nn_config: {output_format: detection,NN_family: mobilenet,input_size: f{img_size[0]}x{img_size[1]},confidence_threshold: 0.5,},mappings: {labels: labels},}export_json output_model.with_suffix(.json)export_json.write_text(json.dumps(jsondata,indent4,))logging.info(Model data export success, saved as:\n\t%s % export_json)# Finishlogging.info(Export complete (%.2fs).\n % (time.time() - t))def convert(convert_tool, output_model, shaves, output_dir, name, **kwargs):t time.time()export_dir: Path output_dir.joinpath(name _openvino)export_dir.mkdir(parentsTrue, exist_okTrue)export_xml export_dir.joinpath(name .xml)export_blob export_dir.joinpath(name .blob)if convert_tool blobconverter:import blobconverterblob_path blobconverter.from_onnx(modelstr(output_model),data_typeFP16,shavesshaves,use_cacheFalse,# version2021.4,version2022.1,output_direxport_dir,optimizer_params[--scale255,--reverse_input_channel,--use_new_frontend,],download_irTrue,)with ZipFile(blob_path, r, ZIP_LZMA) as zip_obj:for name in zip_obj.namelist():zip_obj.extract(name,export_dir,)blob_path.unlink()elif convert_tool docker:import dockerexport_dir Path(/io).joinpath(export_dir.name)export_xml export_dir.joinpath(name .xml)export_blob export_dir.joinpath(name .blob)client docker.from_env()image client.images.pull(openvino/ubuntu20_dev, tag2022.3.1)docker_output client.containers.run(imageimage.tags[0],commandfbash -c \mo -m {name}.onnx -n {name} -o {export_dir} --static_shape --reverse_input_channels --scale255 --use_new_frontend echo MYRIAD_ENABLE_MX_BOOT NO | tee /tmp/myriad.conf /dev/null /opt/intel/openvino/tools/compile_tool/compile_tool -m {export_xml} -o {export_blob} -ip U8 -VPU_NUMBER_OF_SHAVES {shaves} -VPU_NUMBER_OF_CMX_SLICES {shaves} -d MYRIAD -c /tmp/myriad.conf\,removeTrue,volumes[f{output_dir}:/io,],working_dir/io,)logging.info(docker_output.decode(utf8))else:import subprocess as sp# OpenVINO exportlogging.info(Starting to export OpenVINO...)OpenVINO_cmd mo --input_model %s --output_dir %s --data_type FP16 --scale 255 --reverse_input_channel % (output_model,export_dir,)try:sp.check_output(OpenVINO_cmd, shellTrue)logging.info(OpenVINO export success, saved as %s % export_dir)except sp.CalledProcessError:logging.exception()logging.warning(OpenVINO export failure!)logging.warning(By the way, you can try to export OpenVINO use:\n\t%s % OpenVINO_cmd)# OAK Blob exportlogging.info(Then you can try to export blob use:)blob_cmd (echo MYRIAD_ENABLE_MX_BOOT ON | tee /tmp/myriad.conf compile_tool -m %s -o %s -ip U8 -d MYRIAD -VPU_NUMBER_OF_SHAVES %s -VPU_NUMBER_OF_CMX_SLICES %s -c /tmp/myriad.conf% (export_xml, export_blob, shaves, shaves))logging.info(%s % blob_cmd)logging.info(compile_tool maybe in the path: /opt/intel/openvino/tools/compile_tool/compile_tool, if you install openvino 2022.1 with apt)logging.info(Convert complete (%.2fs).\n % (time.time() - t))if __name__ __main__:args parse_args()logging.info(args)print()output_model args.output_dir / (args.name .onnx)export(output_modeloutput_model, **vars(args))if args.blob:convert(output_modeloutput_model, **vars(args))可以使用 Netron 查看模型结构
one2one one2many
▌转换
openvino 本地转换
onnx - openvino mo 是 openvino_dev 2022.1 中脚本安装命令为 pip install openvino-dev mo --input_model yolov10n.onnx --scale255 --reverse_input_channelopenvino - blob compile_tool 是 OpenVINO Runtime 中脚本 path/compile_tool -m yolov10n.xml
-ip U8 -d MYRIAD
-VPU_NUMBER_OF_SHAVES 6
-VPU_NUMBER_OF_CMX_SLICES 6在线转换
blobconvert 网页http://blobconverter.luxonis.com/
进入网页按下图指示操作 修改参数转换模型
选择 onnx 模型修改 optimizer_params 为 --data_typeFP16 --scale255 --reverse_input_channel修改 shaves 为 6转换
blobconverter python 代码
blobconverter.from_onnx(yolov10n.onnx, optimizer_params[--scale255,--reverse_input_channel,],shaves6,)blobconvert cli
blobconverter --onnx yolov10n.onnx -sh 6 -o . --optimizer-params scale255 --reverse_input_channel▌DepthAI 示例
one2one 正确解码需要可配置的网络相关参数
setConfidenceThreshold – 置信度阈值低于该阈值的对象将被过滤掉
# codingutf-8
import cv2
import depthai as dai
import numpy as npnumClasses 80
model dai.OpenVINO.Blob(yolov10n.blob)
dim next(iter(model.networkInputs.values())).dims
W, H dim[:2]labelMap [# class_1,class_2,...class_%s % ifor i in range(numClasses)
]# Create pipeline
pipeline dai.Pipeline()# Define sources and outputs
camRgb pipeline.create(dai.node.ColorCamera)
detectionNetwork pipeline.create(dai.node.MobileNetDetectionNetwork)
xoutRgb pipeline.create(dai.node.XLinkOut)
xoutNN pipeline.create(dai.node.XLinkOut)xoutRgb.setStreamName(image)
xoutNN.setStreamName(nn)# Properties
camRgb.setPreviewSize(W, H)
camRgb.setResolution(dai.ColorCameraProperties.SensorResolution.THE_1080_P)
camRgb.setInterleaved(False)
camRgb.setColorOrder(dai.ColorCameraProperties.ColorOrder.BGR)# Network specific settings
detectionNetwork.setBlob(model)
detectionNetwork.setConfidenceThreshold(0.5)# Linking
camRgb.preview.link(detectionNetwork.input)
camRgb.preview.link(xoutRgb.input)
detectionNetwork.out.link(xoutNN.input)# Connect to device and start pipeline
with dai.Device(pipeline) as device:# Output queues will be used to get the rgb frames and nn data from the outputs defined aboveimageQueue device.getOutputQueue(nameimage, maxSize4, blockingFalse)detectQueue device.getOutputQueue(namenn, maxSize4, blockingFalse)frame Nonedetections []# nn data, being the bounding box locations, are in 0..1 range - they need to be normalized with frame width/heightdef frameNorm(frame, bbox):normVals np.full(len(bbox), frame.shape[0])normVals[::2] frame.shape[1]return (np.clip(np.array(bbox), 0, 1) * normVals).astype(int)def drawText(frame, text, org, color(255, 255, 255), thickness1):cv2.putText(frame, text, org, cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 0), thickness 3, cv2.LINE_AA)cv2.putText(frame, text, org, cv2.FONT_HERSHEY_SIMPLEX, 0.5, color, thickness, cv2.LINE_AA)def drawRect(frame, topLeft, bottomRight, color(255, 255, 255), thickness1):cv2.rectangle(frame, topLeft, bottomRight, (0, 0, 0), thickness 3)cv2.rectangle(frame, topLeft, bottomRight, color, thickness)def displayFrame(name, frame):color (128, 128, 128)for detection in detections:bbox frameNorm(frame, (detection.xmin, detection.ymin, detection.xmax, detection.ymax))drawText(frameframe,textlabelMap[detection.label],org(bbox[0] 10, bbox[1] 20),)drawText(frameframe,textf{detection.confidence:.2%},org(bbox[0] 10, bbox[1] 35),)drawRect(frameframe,topLeft(bbox[0], bbox[1]),bottomRight(bbox[2], bbox[3]),colorcolor,)# Show the framecv2.imshow(name, frame)while True:imageQueueData imageQueue.tryGet()detectQueueData detectQueue.tryGet()if imageQueueData is not None:frame imageQueueData.getCvFrame()if detectQueueData is not None:detections detectQueueData.detectionsif frame is not None:displayFrame(rgb, frame)if cv2.waitKey(1) ord(q):breakone2many 正确解码需要可配置的网络相关参数
setNumClasses – YOLO 检测类别的数量setIouThreshold – iou 阈值setConfidenceThreshold – 置信度阈值低于该阈值的对象将被过滤掉对象将被过滤掉
# codingutf-8
import cv2
import depthai as dai
import numpy as npnumClasses 80
model dai.OpenVINO.Blob(yolov10n.blob)
dim next(iter(model.networkInputs.values())).dims
W, H dim[:2]output_name, output_tenser next(iter(model.networkOutputs.items()))
if yolov6 in output_name:numClasses output_tenser.dims[2] - 5
else:numClasses output_tenser.dims[2] // 3 - 5labelMap [# class_1,class_2,...class_%s % ifor i in range(numClasses)
]# Create pipeline
pipeline dai.Pipeline()# Define sources and outputs
camRgb pipeline.create(dai.node.ColorCamera)
detectionNetwork pipeline.create(dai.node.YoloDetectionNetwork)
xoutRgb pipeline.create(dai.node.XLinkOut)
xoutNN pipeline.create(dai.node.XLinkOut)xoutRgb.setStreamName(image)
xoutNN.setStreamName(nn)# Properties
camRgb.setPreviewSize(W, H)
camRgb.setResolution(dai.ColorCameraProperties.SensorResolution.THE_1080_P)
camRgb.setInterleaved(False)
camRgb.setColorOrder(dai.ColorCameraProperties.ColorOrder.BGR)# Network specific settings
detectionNetwork.setBlob(model)
detectionNetwork.setConfidenceThreshold(0.5)# Yolo specific parameters
detectionNetwork.setNumClasses(numClasses)
detectionNetwork.setCoordinateSize(4)
detectionNetwork.setAnchors([])
detectionNetwork.setAnchorMasks({})
detectionNetwork.setIouThreshold(0.5)# Linking
camRgb.preview.link(detectionNetwork.input)
camRgb.preview.link(xoutRgb.input)
detectionNetwork.out.link(xoutNN.input)# Connect to device and start pipeline
with dai.Device(pipeline) as device:# Output queues will be used to get the rgb frames and nn data from the outputs defined aboveimageQueue device.getOutputQueue(nameimage, maxSize4, blockingFalse)detectQueue device.getOutputQueue(namenn, maxSize4, blockingFalse)frame Nonedetections []# nn data, being the bounding box locations, are in 0..1 range - they need to be normalized with frame width/heightdef frameNorm(frame, bbox):normVals np.full(len(bbox), frame.shape[0])normVals[::2] frame.shape[1]return (np.clip(np.array(bbox), 0, 1) * normVals).astype(int)def drawText(frame, text, org, color(255, 255, 255), thickness1):cv2.putText(frame, text, org, cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 0), thickness 3, cv2.LINE_AA)cv2.putText(frame, text, org, cv2.FONT_HERSHEY_SIMPLEX, 0.5, color, thickness, cv2.LINE_AA)def drawRect(frame, topLeft, bottomRight, color(255, 255, 255), thickness1):cv2.rectangle(frame, topLeft, bottomRight, (0, 0, 0), thickness 3)cv2.rectangle(frame, topLeft, bottomRight, color, thickness)def displayFrame(name, frame):color (128, 128, 128)for detection in detections:bbox frameNorm(frame, (detection.xmin, detection.ymin, detection.xmax, detection.ymax))drawText(frameframe,textlabelMap[detection.label],org(bbox[0] 10, bbox[1] 20),)drawText(frameframe,textf{detection.confidence:.2%},org(bbox[0] 10, bbox[1] 35),)drawRect(frameframe,topLeft(bbox[0], bbox[1]),bottomRight(bbox[2], bbox[3]),colorcolor,)# Show the framecv2.imshow(name, frame)while True:imageQueueData imageQueue.tryGet()detectQueueData detectQueue.tryGet()if imageQueueData is not None:frame imageQueueData.getCvFrame()if detectQueueData is not None:detections detectQueueData.detectionsif frame is not None:displayFrame(rgb, frame)if cv2.waitKey(1) ord(q):break▌参考资料
https://docs.oakchina.cn/en/latest/ https://www.oakchina.cn/selection-guide/ OAK中国 | OpenCV AI Kit在中国区的官方代理商和技术服务商 | 追踪AI技术和产品新动态
戳「关注」获取最新资讯↗↗