网站技术可行性,汕尾住房和建设局网站首页,郑州做网站公司,网站建设工程师若该文为原创文章#xff0c;转载请注明原文出处。
本篇文章参考的是野火-lubancat的rk3568教程#xff0c;本篇记录了在正点原子的ATK-DLK3568部署。
一、介绍 ResNet18 是一种卷积神经网络#xff0c;它有 18 层深度#xff0c;其中包括带有权重的卷积层和全连接层。它…若该文为原创文章转载请注明原文出处。
本篇文章参考的是野火-lubancat的rk3568教程本篇记录了在正点原子的ATK-DLK3568部署。
一、介绍 ResNet18 是一种卷积神经网络它有 18 层深度其中包括带有权重的卷积层和全连接层。它是ResNet 系列网络的一个变体使用了残差连接residual connection来解决深度网络的退化问题。 ResNetResidual Neural Network由微软研究院的 Kaiming He 等人在 2015 年提出ResNet 的结 构可以极快的加速神经网络的训练模型的准确率也有比较大的提升。 ResNet 是一种残差网络可以把它理解为一个子网络这个子网络经过堆叠可以构成一个很深的 网络。ResNet 系列有多种变体如 ResNet18ResNet34ResNet50ResNet101 和 ResNet152其 网络结构如下 这里我们主要看下 ResNet18ResNet18 基本含义是网络的基本架构是 ResNet网络的深度是 18层是带有权重的 18 层不包括 BN 层池化层。ResNet18 使用的基本残差单元每个单元由两 个 3x3 卷积层组成中间有一个 BN 层和一个 ReLU 激活函数。
PyTorch 中的 ResNet18 源码实现https://github.com/pytorch/vision/blob/main/torchvision/models/ resnet.py 二、环境安装
环境分为两部分一是训练的环境二是rknn环境
rknn环境前面有介绍自行安装训练的环境是windows电脑无gpu,使用的是CPU
1、创建虚拟环境
conda create -n ResNet18_env python3.8 -y
2、激活环境
conda activate ResNet18_env
3、安装环境
pip install torchvision
pip install onnxruntime
三、训练 自定义一个 ResNet18 网络结构并使用 CIFAR-10 数据集进行简单测试。CIFAR-10 数据集由 10 个类别的 60000 张 32x32 彩色图像组成每个类别有 6000 张图像总共分为 50000 张训练图像和 10000 张测试图像。
resnet18.py
import os
import torchvision
import torch
import torch.nn as nn#devicetorch.device(cuda if torch.cuda.is_available() else cpu)
devicetorch.device(cpu)# Transform configuration and data augmentation
transform_traintorchvision.transforms.Compose([torchvision.transforms.Pad(4),torchvision.transforms.RandomHorizontalFlip(), #图像一半的概率翻转一半的概率不翻转torchvision.transforms.RandomCrop(32), #图像随机裁剪成32*32# torchvision.transforms.RandomVerticalFlip(),# torchvision.transforms.RandomRotation(15),torchvision.transforms.ToTensor(), #转为Tensor 归一化torchvision.transforms.Normalize([0.5,0.5,0.5], [0.5,0.5,0.5])#torchvision.transforms.Normalize((0.4914, 0.4822, 0.4465),(0.2023, 0.1994, 0.2010))])
transform_testtorchvision.transforms.Compose([torchvision.transforms.ToTensor(),torchvision.transforms.Normalize([0.5,0.5,0.5], [0.5,0.5,0.5])#torchvision.transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010))])
# epoch时才对数据集进行以上数据增强操作num_classes10
batch_size128
learning_rate0.001
num_epoches100
classes (plane,car,bird,cat,deer,dog,frog,horse,ship,truck)# load downloaded dataset
train_dataset torchvision.datasets.CIFAR10(./data, downloadTrue, trainTrue, transformtransform_train)
test_dataset torchvision.datasets.CIFAR10(./data, downloadTrue, trainFalse, transformtransform_test)# Data loader
train_loader torch.utils.data.DataLoader(train_dataset, batch_sizebatch_size, shuffleTrue)
test_loader torch.utils.data.DataLoader(test_dataset, batch_sizebatch_size, shuffleFalse)# Define 3*3 convolutional neural network
def conv3x3(in_channels, out_channels, stride1):return nn.Conv2d(in_channels, out_channels, kernel_size3, stridestride, padding1, biasFalse)class ResidualBlock(nn.Module):def __init__(self, in_channels, out_channels, stride1, downsampleNone):super(ResidualBlock, self).__init__()self.conv1 conv3x3(in_channels, out_channels, stride)self.bn1 nn.BatchNorm2d(out_channels)self.relu nn.ReLU(inplaceTrue)self.conv2 conv3x3(out_channels, out_channels)self.bn2 nn.BatchNorm2d(out_channels)self.downsample downsampledef forward(self, x):residualxout self.conv1(x)out self.bn1(out)out self.relu(out)out self.conv2(out)out self.bn2(out)if(self.downsample):residual self.downsample(x)out residualout self.relu(out)return out# 自定义一个神经网络使用nn.model通过__init__初始化每一层神经网络。
# 使用forward连接数据
class ResNet(nn.Module):def __init__(self, block, layers, num_classes):super(ResNet, self).__init__()self.in_channels 16self.conv conv3x3(3, 16)self.bn torch.nn.BatchNorm2d(16)self.relu torch.nn.ReLU(inplaceTrue)self.layer1 self._make_layers(block, 16, layers[0])self.layer2 self._make_layers(block, 32, layers[1], 2)self.layer3 self._make_layers(block, 64, layers[2], 2)self.layer4 self._make_layers(block, 128, layers[3], 2)self.avg_pool torch.nn.AdaptiveAvgPool2d((1, 1))self.fc torch.nn.Linear(128, num_classes)def _make_layers(self, block, out_channels, blocks, stride1):downsample Noneif (stride ! 1) or (self.in_channels ! out_channels):downsample torch.nn.Sequential(conv3x3(self.in_channels, out_channels, stridestride),torch.nn.BatchNorm2d(out_channels))layers []layers.append(block(self.in_channels, out_channels, stride, downsample))self.in_channels out_channelsfor i in range(1, blocks):layers.append(block(out_channels, out_channels))return torch.nn.Sequential(*layers)def forward(self, x):out self.conv(x)out self.bn(out)out self.relu(out)out self.layer1(out)out self.layer2(out)out self.layer3(out)out self.layer4(out)out self.avg_pool(out)out out.view(out.size(0), -1)out self.fc(out)return out# Make model使用cpu
modelResNet(ResidualBlock, [2,2,2,2], num_classes).to(devicedevice)# 打印model结构
# print(fModel structure: {model}\n\n)# 优化器和损失函数
criterion nn.CrossEntropyLoss() #交叉熵损失函数
optimizer torch.optim.Adam(model.parameters(), lrlearning_rate) #优化器随机梯度下降if __name__ __main__:# Train the modeltotal_step len(train_loader)for epoch in range(0,num_epoches):for i, (images, labels) in enumerate(train_loader):images images.to(devicedevice)labels labels.to(devicedevice)# Forward passoutputs model(images)loss criterion(outputs, labels)# Backward and optimizeoptimizer.zero_grad()# 反向传播loss.backward()# 更新参数optimizer.step()#sum_loss loss.item()#_, predicted torch.max(outputs.data, dim1)#total labels.size(0)#correct predicted.eq(labels.data).cpu().sum()if (i1) % total_step 0:print(Epoch [{}/{}], Step [{}/{}], Loss: {:.4f}.format(epoch1, num_epoches, i1, total_step, loss.item()))print(Finished Tranining)# 保存权重文件#torch.save(model.state_dict(), model_weights.pth)#torch.save(model, model.pt)print(\nTest the model)model.eval()with torch.no_grad():correct 0total 0for images, labels in test_loader:images images.to(devicedevice)labels labels.to(devicedevice)outputs model(images)_, predicted torch.max(outputs.data, 1)total labels.size(0)correct (predicted labels).sum().item()print(在10000张测试集图片上的准确率:{:.4f} %.format(100 * correct / total))# 导出onnx模型x torch.randn((1, 3, 32, 32))torch.onnx.export(model, x, ./resnet18.onnx, opset_version12, input_names[input], output_names[output])
这里有个要注意的数据集已经提前下载好了所以没有在线下载
数据集下载是通过下面代码数据集放在data目录下
# load downloaded dataset
train_dataset torchvision.datasets.CIFAR10(./data, downloadTrue, trainTrue, transformtransform_train)
test_dataset torchvision.datasets.CIFAR10(./data, downloadTrue, trainFalse, transformtransform_test)类型分类为10类
classes (plane,car,bird,cat,deer,dog,frog,horse,ship,truck)
等待大概1小时训练结束后会在当前目录下生成resnet18.onnx模型
四、测试onnx模型
测试代码如下
test_resnet18_onnx.py
import os, syssys.path.append(os.getcwd())
import onnxruntime
import torchvision.models as models
import torchvision.transforms as transforms
from PIL import Imagedef to_numpy(tensor):return tensor.detach().cpu().numpy() if tensor.requires_grad else tensor.cpu().numpy()# 自定义的数据增强
def get_test_transform(): return transforms.Compose([transforms.Resize([32, 32]),transforms.ToTensor(),transforms.Normalize(mean[0.485, 0.456, 0.406], std[0.229, 0.224, 0.225]),])# 推理的图片路径
image Image.open(./horse5.jpg).convert(RGB)img get_test_transform()(image)
img img.unsqueeze_(0) # - NCHW, 1,3,224,224
# 模型加载
onnx_model_path resnet18.onnx
resnet_session onnxruntime.InferenceSession(onnx_model_path)
inputs {resnet_session.get_inputs()[0].name: to_numpy(img)}
outs resnet_session.run(None, inputs)[0]print(onnx weights, outs)
print(onnx prediction, outs.argmax(axis1)[0]) 测试预测结果7对应的是马。
五、RKNN模型转换
打开ATK搭建好的虚拟机进入环境rknn2_envRKNN环境要确保安装好。
转换代码在rknn-toolkit2目录下的example的pytorch里也有参考代码如下
rknn_transfer.py
import numpy as np
import cv2
from rknn.api import RKNN
import torchvision.models as models
import torch
import osdef softmax(x):return np.exp(x)/sum(np.exp(x))def torch_version():import torchtorch_ver torch.__version__.split(.)torch_ver[2] torch_ver[2].split()[0]return [int(v) for v in torch_ver]if __name__ __main__:if torch_version() [1, 9, 0]:import torchprint(Your torch version is {}, in order to better support the Quantization Aware Training (QAT) model,\nPlease update the torch version to 1.9.0 or higher!.format(torch.__version__))exit(0)MODEL ./resnet18.onnx# Create RKNN objectrknn RKNN(verboseTrue)# Pre-process configprint(-- Config model)rknn.config(mean_values[127.5, 127.5, 127.5], std_values[255, 255, 255], target_platformrk3568)#rknn.config(mean_values[123.675, 116.28, 103.53], std_values[58.395, 58.395, 58.395], target_platformrk3568)#rknn.config(mean_values[125.307, 122.961, 113.8575], std_values[51.5865, 50.847, 51.255], target_platformrk3568)print(done)# Load modelprint(-- Loading model)#ret rknn.load_pytorch(modelmodel, input_size_listinput_size_list)ret rknn.load_onnx(modelMODEL)if ret ! 0:print(Load model failed!)exit(ret)print(done)# Build modelprint(-- Building model)ret rknn.build(do_quantizationFalse)if ret ! 0:print(Build model failed!)exit(ret)print(done)# Export rknn modelprint(-- Export rknn model)ret rknn.export_rknn(./resnet_18_100.rknn)if ret ! 0:print(Export rknn model failed!)exit(ret)print(done)# Set inputsimg cv2.imread(./0_125.jpg)img cv2.cvtColor(img, cv2.COLOR_BGR2RGB)img cv2.resize(img,(32,32))#img np.expand_dims(img, 0)# Init runtime environmentprint(-- Init runtime environment)ret rknn.init_runtime()if ret ! 0:print(Init runtime environment failed!)exit(ret)print(done)# Inferenceprint(-- Running model)outputs rknn.inference(inputs[img])np.save(./pytorch_resnet18_qat_0.npy, outputs[0])#show_outputs(softmax(np.array(outputs[0][0])))print(outputs)print(done)rknn.release()运行python rknn_transfer.py正常生成rknn文件。 会在当前目录下生成rknn模型 六、部署
导出rknn后把rknn和测试图片通过adb上传到开发板。
rknnlite_inference0.py
import numpy as np
import cv2
import os
from rknnlite.api import RKNNLiteIMG_PATH 2_67.jpg
RKNN_MODEL ./resnet18.rknn
img_height 32
img_width 32
class_names [plane,car,bird,cat,deer,dog,frog,horse,ship,truck]# Create RKNN object
rknn_lite RKNNLite()# load RKNN model
print(-- Load RKNN model)
ret rknn_lite.load_rknn(RKNN_MODEL)
if ret ! 0:print(Load RKNN model failed)exit(ret)
print(done)# Init runtime environment
print(-- Init runtime environment)
ret rknn_lite.init_runtime()
if ret ! 0:print(Init runtime environment failed!)exit(ret)
print(done)# load image
img cv2.imread(IMG_PATH)
img cv2.resize(img,(32,32))
img cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
img np.expand_dims(img, 0)# runing model
print(-- Running model)
outputs rknn_lite.inference(inputs[img])
print(result: , outputs)
print(This image most likely belongs to {}..format(class_names[np.argmax(outputs)])
)rknn_lite.release()
在开发板上运行结果 识别出来horse,验证正常。 如有侵权或需要完整代码请及时联系博主。