C# Onnx Yolov8 Pose 姿态识别

本文详细介绍了如何在.NETFramework4.8环境中使用UltralyticsYOLOv8模型进行人体姿态检测,涉及图像预处理、模型输入输出、OpenCVSharp和Microsoft.ML.OnnxRuntime的整合,以及实际代码示例。

摘要生成于 C知道 ,由 DeepSeek-R1 满血版支持, 前往体验 >

目录

效果

模型信息

项目

代码

下载 


效果

模型信息

Model Properties
-------------------------
date:2023-09-07T17:11:43.091306
description:Ultralytics YOLOv8n-pose model trained on /usr/src/app/ultralytics/datasets/coco-pose.yaml
author:Ultralytics
kpt_shape:[17, 3]
task:pose
license:AGPL-3.0 https://ultralytics.com/license
version:8.0.172
stride:32
batch:1
imgsz:[640, 640]
names:{0: 'person'}
---------------------------------------------------------------

Inputs
-------------------------
name:images
tensor:Float[1, 3, 640, 640]
---------------------------------------------------------------

Outputs
-------------------------
name:output0
tensor:Float[1, 56, 8400]
---------------------------------------------------------------

项目

VS2022

.net framework 4.8

OpenCvSharp 4.8

Microsoft.ML.OnnxRuntime 1.16.2

代码

// 配置图片数据
image = new Mat(image_path);
int max_image_length = image.Cols > image.Rows ? image.Cols : image.Rows;
Mat max_image = Mat.Zeros(new OpenCvSharp.Size(max_image_length, max_image_length), MatType.CV_8UC3);
Rect roi = new Rect(0, 0, image.Cols, image.Rows);
image.CopyTo(new Mat(max_image, roi));

float[] result_array = new float[8400 * 56];
float[] factors = new float[2];
factors[0] = factors[1] = (float)(max_image_length / 640.0);

// 将图片转为RGB通道
Mat image_rgb = new Mat();
Cv2.CvtColor(max_image, image_rgb, ColorConversionCodes.BGR2RGB);
Mat resize_image = new Mat();
Cv2.Resize(image_rgb, resize_image, new OpenCvSharp.Size(640, 640));

// 输入Tensor
// input_tensor = new DenseTensor<float>(new[] { 1, 3, 640, 640 });
for (int y = 0; y < resize_image.Height; y++)
{
    for (int x = 0; x < resize_image.Width; x++)
    {
        input_tensor[0, 0, y, x] = resize_image.At<Vec3b>(y, x)[0] / 255f;
        input_tensor[0, 1, y, x] = resize_image.At<Vec3b>(y, x)[1] / 255f;
        input_tensor[0, 2, y, x] = resize_image.At<Vec3b>(y, x)[2] / 255f;
    }
}

//将 input_tensor 放入一个输入参数的容器,并指定名称
input_ontainer.Add(NamedOnnxValue.CreateFromTensor("images", input_tensor));

dt1 = DateTime.Now;
//运行 Inference 并获取结果
result_infer = onnx_session.Run(input_ontainer);

dt2 = DateTime.Now;

// 将输出结果转为DisposableNamedOnnxValue数组
results_onnxvalue = result_infer.ToArray();

// 读取第一个节点输出并转为Tensor数据
result_tensors = results_onnxvalue[0].AsTensor<float>();

using Microsoft.ML.OnnxRuntime;
using Microsoft.ML.OnnxRuntime.Tensors;
using OpenCvSharp;
using System;
using System.Collections.Generic;
using System.ComponentModel;
using System.Data;
using System.Drawing;
using System.Linq;
using System.Text;
using System.Windows.Forms;
using static System.Net.Mime.MediaTypeNames;

namespace Onnx_Yolov8_Demo
{
    public partial class Form1 : Form
    {
        public Form1()
        {
            InitializeComponent();
        }

        string fileFilter = "*.*|*.bmp;*.jpg;*.jpeg;*.tiff;*.tiff;*.png";
        string image_path = "";
        string startupPath;
        string classer_path;
        DateTime dt1 = DateTime.Now;
        DateTime dt2 = DateTime.Now;
        string model_path;
        Mat image;
        PoseResult result_pro;
        Mat result_image;

        SessionOptions options;
        InferenceSession onnx_session;
        Tensor<float> input_tensor;
        List<NamedOnnxValue> input_ontainer;
        IDisposableReadOnlyCollection<DisposableNamedOnnxValue> result_infer;
        DisposableNamedOnnxValue[] results_onnxvalue;

        Tensor<float> result_tensors;

        private void button1_Click(object sender, EventArgs e)
        {
            OpenFileDialog ofd = new OpenFileDialog();
            ofd.Filter = fileFilter;
            if (ofd.ShowDialog() != DialogResult.OK) return;
            pictureBox1.Image = null;
            image_path = ofd.FileName;
            pictureBox1.Image = new Bitmap(image_path);
            textBox1.Text = "";
            image = new Mat(image_path);
            pictureBox2.Image = null;
        }

        private void button2_Click(object sender, EventArgs e)
        {
            if (image_path == "")
            {
                return;
            }

            // 配置图片数据
            image = new Mat(image_path);
            int max_image_length = image.Cols > image.Rows ? image.Cols : image.Rows;
            Mat max_image = Mat.Zeros(new OpenCvSharp.Size(max_image_length, max_image_length), MatType.CV_8UC3);
            Rect roi = new Rect(0, 0, image.Cols, image.Rows);
            image.CopyTo(new Mat(max_image, roi));

            float[] result_array = new float[8400 * 56];
            float[] factors = new float[2];
            factors[0] = factors[1] = (float)(max_image_length / 640.0);

            // 将图片转为RGB通道
            Mat image_rgb = new Mat();
            Cv2.CvtColor(max_image, image_rgb, ColorConversionCodes.BGR2RGB);
            Mat resize_image = new Mat();
            Cv2.Resize(image_rgb, resize_image, new OpenCvSharp.Size(640, 640));

            // 输入Tensor
            // input_tensor = new DenseTensor<float>(new[] { 1, 3, 640, 640 });
            for (int y = 0; y < resize_image.Height; y++)
            {
                for (int x = 0; x < resize_image.Width; x++)
                {
                    input_tensor[0, 0, y, x] = resize_image.At<Vec3b>(y, x)[0] / 255f;
                    input_tensor[0, 1, y, x] = resize_image.At<Vec3b>(y, x)[1] / 255f;
                    input_tensor[0, 2, y, x] = resize_image.At<Vec3b>(y, x)[2] / 255f;
                }
            }

            //将 input_tensor 放入一个输入参数的容器,并指定名称
            input_ontainer.Add(NamedOnnxValue.CreateFromTensor("images", input_tensor));

            dt1 = DateTime.Now;
            //运行 Inference 并获取结果
            result_infer = onnx_session.Run(input_ontainer);

            dt2 = DateTime.Now;

            // 将输出结果转为DisposableNamedOnnxValue数组
            results_onnxvalue = result_infer.ToArray();

            // 读取第一个节点输出并转为Tensor数据
            result_tensors = results_onnxvalue[0].AsTensor<float>();

            result_array = result_tensors.ToArray();

            resize_image.Dispose();
            image_rgb.Dispose();

            PoseResult result_pro = new PoseResult(factors);
           result_image = result_pro.draw_result(result_pro.process_result(result_array), image.Clone());

            if (!result_image.Empty())
            {
                pictureBox2.Image = new Bitmap(result_image.ToMemoryStream());
                textBox1.Text = "推理耗时:" + (dt2 - dt1).TotalMilliseconds + "ms";
            }
            else
            {
                textBox1.Text = "无信息";
            }
        }

        private void Form1_Load(object sender, EventArgs e)
        {
            startupPath = System.Windows.Forms.Application.StartupPath;
            model_path = startupPath + "\\yolov8n-pose.onnx";
            classer_path = startupPath + "\\yolov8-detect-lable.txt";

            // 创建输出会话,用于输出模型读取信息
            options = new SessionOptions();
            options.LogSeverityLevel = OrtLoggingLevel.ORT_LOGGING_LEVEL_INFO;
            // 设置为CPU上运行
            options.AppendExecutionProvider_CPU(0);

            // 创建推理模型类,读取本地模型文件
            onnx_session = new InferenceSession(model_path, options);//model_path 为onnx模型文件的路径

            // 输入Tensor
            input_tensor = new DenseTensor<float>(new[] { 1, 3, 640, 640 });

            // 创建输入容器
            input_ontainer = new List<NamedOnnxValue>();

        }
    }
}

下载 

Demo下载

### 加载并运行YOLOv8模型用于水果识别 为了在C#环境中使用ONNX Runtime加载并运行YOLOv8模型来实现水果识别,需遵循一系列配置与编程实践。 #### 安装依赖库 确保项目中已经安装了`Microsoft.ML.OnnxRuntime` NuGet包。这可以通过NuGet Package Manager或命令行完成: ```shell dotnet add package Microsoft.ML.OnnxRuntime ``` #### 准备模型文件 首先,需要准备一个适用于水果识别YOLOv8 ONNX模型文件。如果已有预训练好的`.pt`格式模型,则可通过Python脚本将其转换为ONNX格式[^1]。对于特定类别(如水果),可能还需要重新训练模型以适应新的数据集需求。 #### 编写C#代码 以下是利用ONNX Runtime执行推理过程的一个简单例子,在此之前假设已经有了名为`model.onnx`的ONNX模型以及相应的标签列表`label.txt`: ```csharp using System; using System.IO; using Microsoft.ML.OnnxRuntime; class Program { static void Main(string[] args) { var sessionOptions = new SessionOptions(); using (var sess = new InferenceSession("path/to/model.onnx", sessionOptions)) { // Prepare input data as a tensor. var inputData = File.ReadAllBytes("path/to/image.jpg"); // Convert image bytes into an appropriate format expected by the model. // This step depends on how your model expects its inputs formatted. // Create dictionary with named parameters matching those defined when exporting the model from PyTorch. var inputs = new List<NamedOnnxValue>(); inputs.Add(NamedOnnxValue.CreateFromTensor<float>("input_name", /* processed_image_tensor */)); // Run inference and get outputs. IDisposableReadOnlyCollection<IDisposable> dispColl = null; try { using (IDisposableEnumerable<DisposableNamedOnnxValue> results = sess.Run(inputs)) foreach (var result in results) Console.WriteLine($"Output name={result.Name}, shape={string.Join(", ", result.Value.AsEnumerable<object>().Select(o => o.ToString()))}"); dispColl?.Dispose(); } catch(Exception e){ Console.Error.WriteLine(e.Message); } } } } ``` 注意上述代码中的输入处理部分取决于具体的模型要求;通常情况下,图像会被调整大小、归一化,并转化为浮点数数组的形式传递给模型。 此外,由于YOLO系列算法输出的是边界框坐标和置信度分数,因此还需解析这些信息并与预先定义好的分类名称关联起来显示最终的结果。 关于NMS(Non-Maximum Suppression)操作,现代版本的ONNX OpSet支持直接将该功能集成到模型内部[^2]。这意味着只要正确设置了导出参数,就无需再单独编写额外的过滤逻辑。
评论 2
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包

打赏作者

天天代码码天天

你的鼓励将是我创作的最大动力

¥1 ¥2 ¥4 ¥6 ¥10 ¥20
扫码支付:¥1
获取中
扫码支付

您的余额不足,请更换扫码支付或充值

打赏作者

实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值