python dlib学习(十二):面部表情跟踪
生活随笔
收集整理的這篇文章主要介紹了
python dlib学习(十二):面部表情跟踪
小編覺(jué)得挺不錯(cuò)的,現(xiàn)在分享給大家,幫大家做個(gè)參考.
前言
面部表情跟蹤的原理就是檢測(cè)人臉特征點(diǎn),根據(jù)特定的特征點(diǎn)可以對(duì)應(yīng)到特定的器官,比如眼睛、鼻子、嘴巴、耳朵等等,以此來(lái)跟蹤各個(gè)面部器官的動(dòng)作。
程序?qū)崿F(xiàn)
原理很簡(jiǎn)單,下面直接上程序了:
# *_*coding:utf-8 *_* # author: 許鴻斌import sys import cv2 import dlib import os import logging import datetime import numpy as npdef cal_face_boundary(img, shape):for index_, pt in enumerate(shape.parts()):if index_ == 0:x_min = pt.xx_max = pt.xy_min = pt.yy_max = pt.yelse:if pt.x < x_min:x_min = pt.xif pt.x > x_max:x_max = pt.xif pt.y < y_min:y_min = pt.yif pt.y > y_max:y_max = pt.y# print('x_min:{}'.format(x_min))# print('x_max:{}'.format(x_max))# print('y_min:{}'.format(y_min))# print('y_max:{}'.format(y_max))# 如果出現(xiàn)負(fù)值,即人臉位于圖像框之外的情況,應(yīng)當(dāng)忽視圖像外的部分,將負(fù)值置為0if x_min < 0:x_min = 0if y_min < 0:y_min = 0if x_min == x_max or y_min == y_max:return Noneelse:return img[y_min:y_max, x_min:x_max]def draw_left_eyebrow(img, shape):# 17 - 21pt_pos = []for index, pt in enumerate(shape.parts()[17:21 + 1]):pt_pos.append((pt.x, pt.y))for num in range(len(pt_pos)-1):cv2.line(img, pt_pos[num], pt_pos[num+1], 255, 2)def draw_right_eyebrow(img, shape):# 22 - 26pt_pos = []for index, pt in enumerate(shape.parts()[22:26 + 1]):pt_pos.append((pt.x, pt.y))for num in range(len(pt_pos) - 1):cv2.line(img, pt_pos[num], pt_pos[num + 1], 255, 2)def draw_left_eye(img, shape):# 36 - 41pt_pos = []for index, pt in enumerate(shape.parts()[36:41 + 1]):pt_pos.append((pt.x, pt.y))for num in range(len(pt_pos) - 1):cv2.line(img, pt_pos[num], pt_pos[num + 1], 255, 2)cv2.line(img, pt_pos[0], pt_pos[-1], 255, 2)def draw_right_eye(img, shape):# 42 - 47pt_pos = []for index, pt in enumerate(shape.parts()[42:47 + 1]):pt_pos.append((pt.x, pt.y))for num in range(len(pt_pos) - 1):cv2.line(img, pt_pos[num], pt_pos[num + 1], 255, 2)cv2.line(img, pt_pos[0], pt_pos[-1], 255, 2)def draw_nose(img, shape):# 27 - 35pt_pos = []for index, pt in enumerate(shape.parts()[27:35 + 1]):pt_pos.append((pt.x, pt.y))for num in range(len(pt_pos) - 1):cv2.line(img, pt_pos[num], pt_pos[num + 1], 255, 2)cv2.line(img, pt_pos[0], pt_pos[4], 255, 2)cv2.line(img, pt_pos[0], pt_pos[-1], 255, 2)cv2.line(img, pt_pos[3], pt_pos[-1], 255, 2)def draw_mouth(img, shape):# 48 - 59pt_pos = []for index, pt in enumerate(shape.parts()[48:59 + 1]):pt_pos.append((pt.x, pt.y))for num in range(len(pt_pos) - 1):cv2.line(img, pt_pos[num], pt_pos[num + 1], 255, 2)cv2.line(img, pt_pos[0], pt_pos[-1], 255, 2)# 60 - 67pt_pos = []for index, pt in enumerate(shape.parts()[60:]):pt_pos.append((pt.x, pt.y))for num in range(len(pt_pos) - 1):cv2.line(img, pt_pos[num], pt_pos[num + 1], 255, 2)cv2.line(img, pt_pos[0], pt_pos[-1], 255, 2)def draw_jaw(img, shape):# 0 - 16pt_pos = []for index, pt in enumerate(shape.parts()[0:16 + 1]):pt_pos.append((pt.x, pt.y))for num in range(len(pt_pos) - 1):cv2.line(img, pt_pos[num], pt_pos[num + 1], 255, 2)# 獲取logger實(shí)例,如果參數(shù)為空則返回root logger logger = logging.getLogger("PedestranDetect") # 指定logger輸出格式 formatter = logging.Formatter('%(asctime)s %(levelname)-8s: %(message)s') # 文件日志 # file_handler = logging.FileHandler("test.log") # file_handler.setFormatter(formatter) # 可以通過(guò)setFormatter指定輸出格式 # 控制臺(tái)日志 console_handler = logging.StreamHandler(sys.stdout) console_handler.formatter = formatter # 也可以直接給formatter賦值 # 為logger添加的日志處理器 # logger.addHandler(file_handler) logger.addHandler(console_handler) # 指定日志的最低輸出級(jí)別,默認(rèn)為WARN級(jí)別 logger.setLevel(logging.INFO)pwd = os.getcwd() predictor_path = os.path.join(pwd, 'shape_predictor_68_face_landmarks.dat')logger.info(u'導(dǎo)入人臉檢測(cè)器') detector = dlib.get_frontal_face_detector() logger.info(u'導(dǎo)入人臉特征點(diǎn)檢測(cè)器') predictor = dlib.shape_predictor(predictor_path)cap = cv2.VideoCapture(0) cnt = 0 total_time = 0 start_time = 0 while(1):ret, frame = cap.read()# cv2.imshow("window", frame)if cv2.waitKey(1) & 0xFF == ord('q'):breakimg = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)dets = detector(img, 1)if dets:logger.info('Face detected')else:logger.info('No face detected')for index, face in enumerate(dets):# print('face {}; left {}; top {}; right {}; bottom {}'.format(index, face.left(), face.top(), face.right(),# face.bottom()))shape = predictor(img, face)# for index_, pt in enumerate(shape.parts()):# pt_pos = (pt.x, pt.y)# cv2.circle(frame, pt_pos, 2, (255, 0, 0), 1)features = np.zeros(img.shape[0:-1], dtype=np.uint8)for index_, pt in enumerate(shape.parts()):pt_pos = (pt.x, pt.y)cv2.circle(features, pt_pos, 2, 255, 1)draw_left_eyebrow(features, shape)draw_right_eyebrow(features, shape)draw_left_eye(features, shape)draw_right_eye(features, shape)draw_nose(features, shape)draw_mouth(features, shape)draw_jaw(features, shape)logger.info('face shape: {} {}'.format(face.right()-face.left(), face.bottom()-face.top()))faceROI = cal_face_boundary(features, shape)logger.info('ROI shape: {}'.format(faceROI.shape))# faceROI = features[face.top():face.bottom(), face.left():face.right()]faceROI = cv2.resize(faceROI, (500, 500), interpolation=cv2.INTER_LINEAR)# logger.info('face {}'.format(index))cv2.imshow('face {}'.format(index), faceROI)if cnt == 0:start_time = datetime.datetime.now()cnt += 1elif cnt == 100:end_time = datetime.datetime.now()frame_rate = float(100) / (end_time-start_time).seconds# logger.info(start_time)# logger.info(end_time)logger.info(u'幀率:{:.2f}fps'.format(frame_rate))cnt = 0else:cnt += 1# logger.info(cnt)程序不算復(fù)雜,所以就只解釋要點(diǎn)。
運(yùn)行結(jié)果
按q鍵退出;如果攝像頭中檢測(cè)到人臉會(huì)自動(dòng)提取信息,并在如下窗口中重新繪制面部器官輪廓。
總結(jié)
以上是生活随笔為你收集整理的python dlib学习(十二):面部表情跟踪的全部?jī)?nèi)容,希望文章能夠幫你解決所遇到的問(wèn)題。
- 上一篇: 基于Python编写的简易翻译器
- 下一篇: OpenFace学习(2):FaceNe