Dlib表情识别

版权声明:我是南七小僧,微信: to_my_love ,寻找人工智能相关工作,欢迎交流思想碰撞。 https://blog.csdn.net/qq_25439417/article/details/84556838

 

使用Tkinter做了用户界面

# created at 2017-11-27
# updated at 2018-09-06

# Author:   coneypo
# Dlib:     http://dlib.net/
# Blog:     http://www.cnblogs.com/AdaminXie/
# Github:   https://github.com/coneypo/Dlib_examples

import dlib
import time, threading
import queue
#from skimage import io
import cv2

import time
from tkinter import *
#import tkinter.font as tkFont

#ft1 = tkFont.Font(size=30)

#print(cap.isOpened())
root = Tk()           
root.title('AI面试')
panel = Label(root)
text = Text(root)
text.pack(padx=10, pady=20)
#text.insert(1.0,'面试者情绪:')
panel.pack(padx=10, pady=10)


print('55555555555')
#print(cap.isOpened())
imglist = queue.Queue(maxsize=1000)
facelist = queue.Queue(maxsize=1000)
print('11111111')
url = 'http://admin:[email protected]:8081'
# 使用 Dlib 的正面人脸检测器 frontal_face_detector
detector = dlib.get_frontal_face_detector()
face_width=1
face_height=1
# Dlib 的 68点模型
predictor = dlib.shape_predictor("../face_lib/shape_predictor_68_face_landmarks.dat")

print('22222222222222222')
# 图片所在路径
#img = cv2.imread("../face_img/3.jpg")

# 生成 Dlib 的图像窗口
#win = dlib.image_window()
#win.set_image(img)
cap = cv2.VideoCapture(url)
# 使用 detector 检测器来检测图像中的人脸
#s = time.clock()
print(cap.isOpened())

from PIL import Image,ImageTk
#imglist = []
def get_img():
    while (cap.isOpened()):
        iss,frame = cap.read()
        frame = cv2.resize(frame,(480,300))
        imglist.put(frame)

#imgtk=0
def get_face():
    while True:
        if not imglist.empty():
            img = imglist.get()
            faces = detector(img, 1)
            for i, d in enumerate(faces):
#                print("第", i+1, "个人脸的矩形框坐标:",
#                      "left:", d.left(), "right:", d.right(), "top:", d.top(), "bottom:", d.bottom())
                face_width=d.right()-d.left()
                face_height=d.bottom()-d.top()
                cv2.rectangle(img,(d.left(),d.top()),(d.right(),d.bottom()),(165, 23, 255), 2)
                
                shape = predictor(img, faces[i])
                facelist.put(shape)
                for i in range(0,68,2):
                    img1 = cv2.circle(img, (shape.part(i).x, shape.part(i).y), 2, (0, 255, 0), -1, 3)
        #      
                curimg = Image.fromarray(cv2.cvtColor(img1,cv2.COLOR_BGR2RGB))
#                    Image.fromarray
#                    global imgtk
                imgtk = ImageTk.PhotoImage(image=curimg)
                panel.imgtk = imgtk
                panel.config(image=imgtk)
                obr = imgtk
#                    root.after(1,get_face)
        #                    root.mainloop()
                        
        #            cv2.imshow('face2', img)
                cv2.waitKey(1)  
#        imglist.append(frame)
#        faces = detector(frame, 1)
    


#get_face()
#cv2.waitKey(0)
#cv2.waitKey(0)
#print(time.clock()-s)
    # 绘制面部轮廓
#    win.add_overlay(shape)
    
def get_emotion():
     # 眉毛
    while True:
        shape = facelist.get()
        
        mouth_width = (shape.part(54).x - shape.part(48).x) / face_width  # 嘴巴咧开程度
        mouth_higth = (shape.part(66).y - shape.part(62).y) / face_height  # 嘴巴张开程度
        # print("嘴巴宽度与识别框宽度之比:",mouth_width_arv)
        # print("嘴巴高度与识别框高度之比:",mouth_higth_arv)
    
        # 通过两个眉毛上的10个特征点,分析挑眉程度和皱眉程度
#        brow_sum = 0  # 高度之和
#        frown_sum = 0  # 两边眉毛距离之和
#        for j in range(17, 21):
#            brow_sum += (shape.part(j).y - d.top()) + (shape.part(j + 5).y - d.top())
#            frown_sum += shape.part(j + 5).x - shape.part(j).x
#            line_brow_x.append(shape.part(j).x)
#            line_brow_y.append(shape.part(j).y)
#    
#        # self.brow_k, self.brow_d = self.fit_slr(line_brow_x, line_brow_y)  # 计算眉毛的倾斜程度
#        tempx = np.array(line_brow_x)
#        tempy = np.array(line_brow_y)
#        z1 = np.polyfit(tempx, tempy, 1)  # 拟合成一次直线
#        brow_k = -round(z1[0], 3)  # 拟合出曲线的斜率和实际眉毛的倾斜方向是相反的
    
#        brow_hight = (brow_sum / 10) / face_width  # 眉毛高度占比
#        brow_width = (frown_sum / 5) / face_width  # 眉毛距离占比
        # print("眉毛高度与识别框高度之比:",round(brow_arv/self.face_width,3))
        # print("眉毛间距与识别框高度之比:",round(frown_arv/self.face_width,3))
    
        # 眼睛睁开程度
        eye_sum = (shape.part(41).y - shape.part(37).y + shape.part(40).y - shape.part(38).y +
                   shape.part(47).y - shape.part(43).y + shape.part(46).y - shape.part(44).y)
        eye_hight = (eye_sum / 4) / face_width
        # print("眼睛睁开距离与识别框高度之比:",round(eye_open/self.face_width,3))
#        print('分析表情')
        # 分情况讨论
        text.delete(0.0, END)  # 删除所有值
        # 张嘴,可能是开心或者惊讶
#        text.select_clear()
        if round(mouth_higth >= 0.03):
            if eye_hight >= 0.056:
                text.insert(3.0,'面试者情绪:惊讶')
#                print('惊讶')
            else:
                text.insert(3.0,'面试者情绪:轻松')
#                print('喜悦')
    
        # 没有张嘴,可能是正常和生气
        else:
#            if brow_k <= -0.3:
#                print('生气')
#            else:
            text.insert(3.0,'面试者情绪:正常')
#            print('正常')
                
ts = threading.Thread(target=get_img,name='getimg')
td = threading.Thread(target=get_face,name='disimg')
te = threading.Thread(target=get_emotion,name='emotion')
ts.start()
td.start()
te.start()

root.mainloop()
# 绘制矩阵轮廓
#win.add_overlay(faces)

# 保持图像
#dlib.hit_enter_to_continue()

猜你喜欢

转载自blog.csdn.net/qq_25439417/article/details/84556838