数据集&yolo关键点模型 -关键点系列- 手部关键点数据集 handpose keypoints >> DataBall

数据集&yolo关键点模型 -关键点系列- 手部关键点数据集 handpose keypoints >> DataBall

该示例用3k+数据训练,模型采用yolo11n架构,对于一些简单场景可以满足左右手检测及21关键点检测,运算量小,模型效能高。

后期会推出 yolo11s,yolo11m架构模型或其它yolo系列。

一、模型推理

预训练模型地址:yolo11关键点模型-手部关键点数据集handposekeypoints>>DataBall资源-CSDN文库

模型参数:yolo11n 手部关键点模型 

21个手部关键点检测 类别:左手 【0】,右手 【1】

模型推理代码如下:

# -*-coding:utf-8-*-
# date:2024-10
# Author: DataBall - XIAN
# Function: ultralytics yolo handpose 手部关键点推理

from ultralytics import YOLO
import cv2
import os
import supervision as sv
import time

'''
function: 绘制二维关键点连线
'''
def draw_joints(img_,hand_,x,y):
    thick = 2
    colors = [(0,215,255),(255,115,55),(5,255,55),(25,15,255),(225,15,55)]
    #
    cv2.line(img_, (int(hand_['0']['x']+x), int(hand_['0']['y']+y)),(int(hand_['1']['x']+x), int(hand_['1']['y']+y)), colors[0], thick)
    cv2.line(img_, (int(hand_['1']['x']+x), int(hand_['1']['y']+y)),(int(hand_['2']['x']+x), int(hand_['2']['y']+y)), colors[0], thick)
    cv2.line(img_, (int(hand_['2']['x']+x), int(hand_['2']['y']+y)),(int(hand_['3']['x']+x), int(hand_['3']['y']+y)), colors[0], thick)
    cv2.line(img_, (int(hand_['3']['x']+x), int(hand_['3']['y']+y)),(int(hand_['4']['x']+x), int(hand_['4']['y']+y)), colors[0], thick)

    cv2.line(img_, (int(hand_['0']['x']+x), int(hand_['0']['y']+y)),(int(hand_['5']['x']+x), int(hand_['5']['y']+y)), colors[1], thick)
    cv2.line(img_, (int(hand_['5']['x']+x), int(hand_['5']['y']+y)),(int(hand_['6']['x']+x), int(hand_['6']['y']+y)), colors[1], thick)
    cv2.line(img_, (int(hand_['6']['x']+x), int(hand_['6']['y']+y)),(int(hand_['7']['x']+x), int(hand_['7']['y']+y)), colors[1], thick)
    cv2.line(img_, (int(hand_['7']['x']+x), int(hand_['7']['y']+y)),(int(hand_['8']['x']+x), int(hand_['8']['y']+y)), colors[1], thick)

    cv2.line(img_, (int(hand_['0']['x']+x), int(hand_['0']['y']+y)),(int(hand_['9']['x']+x), int(hand_['9']['y']+y)), colors[2], thick)
    cv2.line(img_, (int(hand_['9']['x']+x), int(hand_['9']['y']+y)),(int(hand_['10']['x']+x), int(hand_['10']['y']+y)), colors[2], thick)
    cv2.line(img_, (int(hand_['10']['x']+x), int(hand_['10']['y']+y)),(int(hand_['11']['x']+x), int(hand_['11']['y']+y)), colors[2], thick)
    cv2.line(img_, (int(hand_['11']['x']+x), int(hand_['11']['y']+y)),(int(hand_['12']['x']+x), int(hand_['12']['y']+y)), colors[2], thick)

    cv2.line(img_, (int(hand_['0']['x']+x), int(hand_['0']['y']+y)),(int(hand_['13']['x']+x), int(hand_['13']['y']+y)), colors[3], thick)
    cv2.line(img_, (int(hand_['13']['x']+x), int(hand_['13']['y']+y)),(int(hand_['14']['x']+x), int(hand_['14']['y']+y)), colors[3], thick)
    cv2.line(img_, (int(hand_['14']['x']+x), int(hand_['14']['y']+y)),(int(hand_['15']['x']+x), int(hand_['15']['y']+y)), colors[3], thick)
    cv2.line(img_, (int(hand_['15']['x']+x), int(hand_['15']['y']+y)),(int(hand_['16']['x']+x), int(hand_['16']['y']+y)), colors[3], thick)

    cv2.line(img_, (int(hand_['0']['x']+x), int(hand_['0']['y']+y)),(int(hand_['17']['x']+x), int(hand_['17']['y']+y)), colors[4], thick)
    cv2.line(img_, (int(hand_['17']['x']+x), int(hand_['17']['y']+y)),(int(hand_['18']['x']+x), int(hand_['18']['y']+y)), colors[4], thick)
    cv2.line(img_, (int(hand_['18']['x']+x), int(hand_['18']['y']+y)),(int(hand_['19']['x']+x), int(hand_['19']['y']+y)), colors[4], thick)
    cv2.line(img_, (int(hand_['19']['x']+x), int(hand_['19']['y']+y)),(int(hand_['20']['x']+x), int(hand_['20']['y']+y)), colors[4], thick)

if __name__ == '__main__':
    # Load a model
    model = YOLO(r"runs\pose\train5\weights\yolo11n-handpose-epoch-3k.pt")  # load an official model
    path_root_ = "datasets/yolo_hand_pose_data-DataBall/images/test/"

    # 设置颜色调色板
    color_palette = sv.ColorPalette(colors=[
        sv.Color(r=225, g=230, b=128),   # 红色
        sv.Color(r=0, g=255, b=0),   # 绿色
        sv.Color(r=0, g=0, b=255),   # 蓝色
        # 可以继续添加更多颜色
    ])
    # 设置边界框绘制器,使用颜色映射策略为类别(CLASS)
    box_annotator = sv.BoxAnnotator(color=color_palette, thickness=2)

    for f_ in os.listdir(path_root_):
        # img_path = "image/6.jpg" # 图片路径
        img_path = path_root_ +f_
        # Predict with the model
        results = model(
                source = img_path,
                project='./',       # 保存预测结果的根目录
                name='exp',         # 保存预测结果目录名称
                exist_ok=True,
                save=True,
                imgsz=640,          # 推理模型输入图像尺寸
                iou = 0.3,          # nms 阈值
                conf=0.25           # 置信度阈值
            )  # predict on an image

        img = cv2.imread(img_path) # 加载读取图片,用于模型推理结果可视化

        #--------------------------------------------------------------------
        # 遍历结果进行可视化
        try:
            for result in results:
                boxes = result.boxes.xyxy.detach().cpu().numpy()  # 获取检测目标边界框
                confidences = result.boxes.conf.detach().cpu().numpy()  # 获取检测目标置信度
                cls = result.boxes.cls.detach().cpu().numpy() # 获取检测目标标签
                conf = result.keypoints.conf.detach().cpu().numpy() # 获取检测关键点置信度
                key_points = result.keypoints.data.detach().cpu().numpy()

                for i in range(boxes.shape[0]):
                    box_ = boxes[i].reshape(-1,4)
                    det_ = sv.Detections(xyxy=box_)
                    conf_ =confidences[i]
                    print("conf_",conf_)
                    cls_ = int(cls[i])
                    img = box_annotator.annotate(scene=img, detections=det_, labels=["[{}] {:.2f}".format(cls_,conf_)])

                RGB_ = [(255,0,255),(0,255,0),(0,255,255),(0,0,255),(255,255,0)]
                for i in range(key_points.shape[0]):
                    for j in range(key_points.shape[1]):
                        conf_ = key_points[i][j][2]
                        x,y = int(key_points[i][j][0]),int(key_points[i][j][1])
                        if j == 0:
                            cv2.circle(img, (x,y), 3, (128,128,128), -1) #绘制实心圆
                        else:
                            cv2.circle(img, (x,y), 3, RGB_[(j-1)%5], -1) #绘制实心圆

                    #----------------------------------------------------------------------
                    pts2d_list = {}
                    for j in range(key_points.shape[1]):
                        x_,y_ = int(key_points[i][j][0]),int(key_points[i][j][1])
                        pts2d_list[str(j)]={"x":x_,"y":y_}

                    draw_joints(img,pts2d_list,0,0) # 绘制手部关键点连线

            cv2.namedWindow("img",1)
            cv2.imshow("img",img)

            key_id = cv2.waitKey(1)
            if key_id== 27:
                break
        except:
            continue

 ​​

助力快速掌握数据集的信息和使用方式。

数据可以如此美好!

生活如遇低谷,请让我们微笑面对,雨过终会见彩虹!

推理效果示例:

二、训练数据

训练数据量:3k +

想要进一步了解,请联系。

DataBall 助力快速掌握数据集的信息和使用方式,会员享有 百种数据集,持续增加中。 

数据样例项目地址:

样品数据量:

* 相关项目

目前数据集暂时在该网址进行更新:  https://blog.csdn.net/weixin_42140236/article/details/142447120?spm=1001.2014.3001.5501

​​

猜你喜欢

转载自blog.csdn.net/weixin_42140236/article/details/143223142