Gesichtsorganerkennung, die derzeit in dlib, aber nicht in OpenCV ist.
Vorerst habe ich es gewaltsam gefärbt, aber es muss einen klügeren Weg geben. Oder besser gesagt, ich denke nur, dass die Referenz nicht richtig geladen ist. .. ..
Das Video ist unten. Es ist peinlich, mein Gesicht zu zeigen, deshalb benutze ich die Gesichtserkennung, um es zu verbergen. https://www.youtube.com/watch?v=s2YtXqcBuPY [](https://www.youtube.com/watch? v = s2YtXqcBuPY)
Der Quellcode ist unten. Damit es funktioniert, legen Sie die trainierten Daten der Gesichtsorgane in dasselbe Verzeichnis wie die py-Datei. Muss platziert werden. →shape_predictor_68_face_landmarks.dat(http://dlib.net/files/shape_predictor_68_face_landmarks.dat.bz2)
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
face_landmark_detector.py.
Usage:
face_landmark_detector.py [<video source>] [<resize rate>] [<privacy mask>]
'''
import sys
import dlib
import cv2
import time
import copy
try:
fn = sys.argv[1]
if fn.isdigit() == True:
fn = int(fn)
except:
fn = 0
try:
resize_rate = sys.argv[2]
resize_rate = int(resize_rate)
except:
resize_rate = 1
try:
privacy_mask = sys.argv[3]
privacy_mask = int(privacy_mask)
except:
privacy_mask = 0
predictor_path = "./shape_predictor_68_face_landmarks.dat"
detector = dlib.get_frontal_face_detector()
predictor = dlib.shape_predictor(predictor_path)
video_input = cv2.VideoCapture(fn)
while(video_input.isOpened() == True):
ret, frame = video_input.read()
temp_frame = copy.deepcopy(frame)
#Reduzierung des Zielrahmens zur Reduzierung der Verarbeitungslast (wenn ein Argument angegeben wird)
height, width = frame.shape[:2]
temp_frame = cv2.resize(frame, (int(width/resize_rate), int(height/resize_rate)))
#Gesichtserkennung
start = time.time()
dets = detector(temp_frame, 1)
elapsed_time = time.time() - start
print ("detector processing time:{0}".format(elapsed_time)) + "[sec]"
for k, d in enumerate(dets):
print("Detection {}: Left: {} Top: {} Right: {} Bottom: {}".format(
k, d.left(), d.top(), d.right(), d.bottom()))
#Gesichtsorganerkennung
start = time.time()
shape = predictor(temp_frame, d)
elapsed_time = time.time() - start
print ("predictor processing time:{0}".format(elapsed_time)) + "[sec]"
#Zeichnung
rect_offset = 20
if privacy_mask == 1:
cv2.rectangle(frame, (int(d.left() * resize_rate) - rect_offset, int(d.top() * resize_rate) - rect_offset), \
(int(d.right() * resize_rate) + rect_offset, int(d.bottom() * resize_rate) + rect_offset), (255, 255, 255), -1)
for shape_point_count in range(shape.num_parts):
shape_point = shape.part(shape_point_count)
if shape_point_count < 17: # [0-16]:Kontur
cv2.circle(frame, (int(shape_point.x * resize_rate), int(shape_point.y * resize_rate)), 2, (0, 0, 255), -1)
elif shape_point_count < 22: # [17-21]Augenbrauen (rechts)
cv2.circle(frame, (int(shape_point.x * resize_rate), int(shape_point.y * resize_rate)), 2, (0, 255, 0), -1)
elif shape_point_count < 27: # [22-26]Augenbrauen (links)
cv2.circle(frame, (int(shape_point.x * resize_rate), int(shape_point.y * resize_rate)), 2, (255, 0, 0), -1)
elif shape_point_count < 31: # [27-30]Nase zurück
cv2.circle(frame, (int(shape_point.x * resize_rate), int(shape_point.y * resize_rate)), 2, (0, 255, 255), -1)
elif shape_point_count < 36: # [31-35]Nasenflügel, Nasenspitzen
cv2.circle(frame, (int(shape_point.x * resize_rate), int(shape_point.y * resize_rate)), 2, (255, 255, 0), -1)
elif shape_point_count < 42: # [36-4142 Augen 47)
cv2.circle(frame, (int(shape_point.x * resize_rate), int(shape_point.y * resize_rate)), 2, (255, 0, 255), -1)
elif shape_point_count < 48: # [42-47]Augen (links)
cv2.circle(frame, (int(shape_point.x * resize_rate), int(shape_point.y * resize_rate)), 2, (0, 0, 128), -1)
elif shape_point_count < 55: # [48-54]Oberlippe (Oberkontur)
cv2.circle(frame, (int(shape_point.x * resize_rate), int(shape_point.y * resize_rate)), 2, (0, 128, 0), -1)
elif shape_point_count < 60: # [54-59]Unterlippe (Unterkontur)
cv2.circle(frame, (int(shape_point.x * resize_rate), int(shape_point.y * resize_rate)), 2, (128, 0, 0), -1)
elif shape_point_count < 65: # [60-64]Oberlippe (Unterkontur)
cv2.circle(frame, (int(shape_point.x * resize_rate), int(shape_point.y * resize_rate)), 2, (0, 128, 255), -1)
elif shape_point_count < 68: # [65-67]Unterlippe (Oberkontur)
cv2.circle(frame, (int(shape_point.x * resize_rate), int(shape_point.y * resize_rate)), 2, (128, 255, 0), -1)
cv2.imshow('face landmark detector', frame)
c = cv2.waitKey(50) & 0xFF
if c==27: # ESC
break
video_input.release()
cv2.destroyAllWindows()
das ist alles.
Recommended Posts