隔空操作之隔空下象棋

演示视频如下,

https://gitee.com/dfytensor/air_operating_system/raw/master/%E4%B8%AD%E5%9B%BD%E8%B1%A1%E6%A3%8B/chiese_chess_vedio.mp4
import cv2
import mediapipe as mp
import pyautogui
import numpy as np

screenWidth, screenHeight = pyautogui.size()

mp_drawing = mp.solutions.drawing_utils
mp_hands = mp.solutions.hands
hands = mp_hands.Hands(
    static_image_mode=False,
    max_num_hands=1,
    min_detection_confidence=0.65,
    min_tracking_confidence=0.65)


def shibie_wuzhi_zhuangtai(hand_dict):
    finger_list = []
    for finger_poindts in [[2, 3, 4], [6, 7, 8], [10, 11, 12], [14, 15, 16], [18, 19, 20]]:
        x, y, z = finger_poindts
        #
        finger_one = hand_dict[x].y > hand_dict[y].y and hand_dict[y].y > hand_dict[z].y
        finger_list.append(finger_one)
    return finger_list


cap = cv2.VideoCapture(0)
state_list=[1]

while True:
    ret, frame = cap.read()
    frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
    # 因为摄像头是镜像的,所以将摄像头水平翻转
    # 不是镜像的可以不翻转
    frame = cv2.flip(frame, 1)
    results = hands.process(frame)
    frame = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR)


    if results.multi_hand_landmarks:
        for hand_landmarks in results.multi_hand_landmarks:
            # 识别状态
            hand_dict = {
    
    i: v for i, v in enumerate(hand_landmarks.landmark)}
            # finger_table = shibie_wuzhi_zhuangtai(hand_dict)
            # history_list.append(hand_dict)
            if hand_dict:

                a = (hand_dict[17].x - hand_dict[0].x) ** 2 + (hand_dict[17].y - hand_dict[0].y) ** 2
                b = (hand_dict[5].x - hand_dict[0].x) ** 2 + (hand_dict[5].y - hand_dict[0].y) ** 2
                c = (hand_dict[5].x - hand_dict[17].x) ** 2 + (hand_dict[17].y - hand_dict[0].y) ** 2
                cosc = (a + b - c) / 2 / np.sqrt(a * b)

                #
                if  len(state_list)>8:
                    state_list=state_list[-3:]


                if np.arccos(cosc) > 1.0:
                    shou_zhang_s = np.sqrt(b * (1 - cosc ** 2) * a) / 2

                    currentMouseX = min(np.floor(hand_dict[0].x* screenWidth), screenWidth - 1)
                    currentMouseY = min(np.floor(hand_dict[0].y * screenHeight), screenHeight - 1)

                    if shou_zhang_s < 0.016:
                        # 移动动作模拟
                        print("移动层")
                        currnet_state=1
                        state_list.append(currnet_state)
                        pyautogui.moveTo(currentMouseX,currentMouseY, duration=0, tween=pyautogui.linear)

                    elif shou_zhang_s > 0.016 and shou_zhang_s < 0.030:

                        # 单击动作模拟
                        currnet_state = 3
                        print("单击层", state_list[-1], currnet_state)
                        if  state_list[-1]!=currnet_state :

                            pyautogui.click()
                        state_list.append(currnet_state)

                    elif shou_zhang_s > 0.030:
                        # 拖拽动作模拟

                        print("移动拖拽")
                        currnet_state = 2
                        # if state_list[-1] != currnet_state:
                        pyautogui.dragTo(currentMouseX, currentMouseY, duration=0, tween=pyautogui.linear)

                        state_list.append(currnet_state)





                # 可视化的时候可以将背景设置为不同的颜色来表达,所处的操作模式不同,
                font = cv2.FONT_HERSHEY_SIMPLEX
                frame = np.zeros(frame.shape).astype("uint8")

                if state_list[-1]==1:

                    frame[:,:,0]=255
                    frame = cv2.putText(frame, '001_move', (0, 100), font, 1.2, (255, 255, 255), 2)


                elif state_list[-1]==2:


                    frame[:, :, 1] = 255
                    frame = cv2.putText(frame, '002_drag', (0, 100), font, 1.2, (255, 255, 255), 2)


                elif state_list[-1]==3:
                    frame[:, :, 2] = 255
                    frame = cv2.putText(frame, '003_click', (0, 100), font, 1.2, (255, 255, 255), 2)





                mp_drawing.draw_landmarks(frame, hand_landmarks, mp_hands.HAND_CONNECTIONS)
            else:

                frame = np.zeros(frame.shape).astype("uint8")
    else:

        frame = np.zeros(frame.shape).astype("uint8")


    cv2.imshow('MediaPipe Hands', frame)
    if cv2.waitKey(1) & 0xFF == 27:
        break
cap.release()

if __name__ == '__main__':
    pass

猜你喜欢

转载自blog.csdn.net/weixin_32759777/article/details/123508818