-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathfacedetector.py
90 lines (80 loc) · 3.02 KB
/
facedetector.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
import numpy as np
import cv2
def checkIfOldFace(face):
# check if oldfaces is empty
if len(oldFaces.keys()):
# check for the current location in oldFaces, if it's found update the frame counts and set wasFound to true
if tuple(face) in oldFaces:
oldFaces[tuple(face)]["frameCount"] += 1
oldFaces[tuple(face)]["wasFound"] = True
return
# Check the faces for a nearby face. If its found update the key, frame count, and wasFound, delete old key entry
for oldFaceCoord, value in oldFaces.iteritems():
if isFaceClose(face, oldFaceCoord) and "wasFound" not in value:
print "here"
oldFaces[tuple(face)] = oldFaces[tuple(oldFaceCoord)]
oldFaces[tuple(face)]["frameCount"] += 1
oldFaces[tuple(face)]["wasFound"] = True
del oldFaces[tuple(oldFaceCoord)]
break
# If no match was found assume its a new face
else:
oldFaces[tuple(face)] = {
"frameCount": 1,
"wasFound": True
}
# if there are no entries add it as the first
else:
oldFaces[tuple(face)] = {
"frameCount": 1,
"wasFound": True
}
def cleanupFaceDict():
for key in oldFaces.keys():
# If the face was found this frame set mia frames to 0 and delete wasFound for the next frame
if "wasFound" in oldFaces[key]:
if "miaFrames" in oldFaces[key]:
oldFaces[key]["frameCount"] += oldFaces[key]["miaFrames"]
oldFaces[key]["miaFrames"] = 0
del oldFaces[key]["wasFound"]
# Otherwise if its been mia for less than 29 frames increase the counter
elif oldFaces[key]["miaFrames"] < miaFrames:
oldFaces[key]["miaFrames"] = oldFaces[key]["miaFrames"] + 1
# If its been mia more than 29 frames delete the entry
else:
del oldFaces[key]
def isFaceClose(face, oldFace):
if abs(face[0] - oldFace[0]) < maxPixelDist and abs(face[1] - oldFace[1]) < maxPixelDist and abs(face[1] - oldFace[1]) < maxPixelDist and abs(face[2] - oldFace[2]) < maxPixelDist and abs(face[3] - oldFace[3]) < maxPixelDist:
return True
return False
face_cascade = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
profileface_cascade = cv2.CascadeClassifier('haarcascade_profileface.xml')
miaFrames = 160
maxPixelDist = 30
cap = cv2.VideoCapture("newvid.mp4")
fps = cap.get(cv2.cv.CV_CAP_PROP_FPS)
oldFaces = {}
fourcc = cv2.cv.CV_FOURCC(*'XVID')
videowriter = cv2.VideoWriter( "test1", fourcc, fps, (640,480))
while True:
ret, frame = cap.read()
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
faces = face_cascade.detectMultiScale(frame, 1.3, 5)
cv2.putText(frame,str(fps),(10,50), cv2.FONT_HERSHEY_SIMPLEX, 1,(255,255,255),1)
# print oldFaces
for i in range(len(faces)):
(x,y,w,h) = faces[i]
cv2.rectangle(frame, (x,y), (x+w, y+h), (255,0,0), 2)
checkIfOldFace(tuple(faces[i]))
cv2.putText(frame, str(oldFaces[tuple(faces[i])]["frameCount"]),(x,y), cv2.FONT_HERSHEY_SIMPLEX, 1,(0,255,0),1)
roi_gray = gray[y:y+h, x:x+w]
roi_color = frame[y:y+h, x:x+w]
print oldFaces
# cleanupFaceDict()
k = cv2.waitKey(30) & 0xff
# cv2.imshow('frame', frame)
videowriter.write( frame )
if k == 27:
break
cap.release()
cv2.destroyAllWindows()