-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathFaceDetector.py
More file actions
121 lines (101 loc) · 3.96 KB
/
FaceDetector.py
File metadata and controls
121 lines (101 loc) · 3.96 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
import cv2
from DataLoader import LoadFilesData, DataLoader
import numpy as np
from Visualization import visualizeImages
import glob
from math import ceil
import NeuralNet
from Sampler import randomSample
"""
Attempts to detect a face in an image using the OpenCV Haar Cascade
Params
image: the image to detect a face in
cascadePath: the directory containing cascade cml files to match against
Returns
0: a bool indicating whether a face was found
"""
def detectedFace(image, cascadePath="./cascades"):
for thisCascade in glob.glob(cascadePath+"/*.xml"):
#convert to grayscale
grayImage = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
#perform opencv face detection
faceCascade = cv2.CascadeClassifier(thisCascade)
faces = faceCascade.detectMultiScale(
grayImage,
scaleFactor=1.1,
minNeighbors=0,
minSize=(32, 32),
maxSize=(64, 64),
)
if len(faces) > 0:
return True
return False
"""
Detects the percentage of images in which faces can't be indentified
Params
imageMat: a numoy array of images to search for faces in
printResults: if true, will print results to the console for display
Returns
0: the percent of images in whoch faces couldn't be found
1: a numpy array holding the faces that couldn't be recognized
"""
def detectErrorRate(imageMat, printResults=True):
# convert to 8 bit int
imageSet = ((imageMat + 1) * (255 / 2)).astype(np.uint8)
numImages = imageSet.shape[0]
numFound = 0
errMat = np.zeros_like(imageSet)
for i in range(numImages):
thisImage = imageSet[i, :, :, :]
foundFace = detectedFace(thisImage)
if not foundFace:
errMat[numFound, :, ::] = thisImage
numFound = numFound + 1
if printResults:
print ("Error Rate: " + str(float(numFound*100)/numImages) + "% (" + str(numFound) + "/" + str(numImages) +")")
return float(numFound)/numImages, errMat[:numFound, :, :, :]
"""
Detects the percentage of images in the IMDB-WIKI dataset where the faces couldn't be detected
Params
imageCount: the number of images to use in the sample
printResults: if true, will print results to the console for display
Returns
0: the percent of images in whoch faces couldn't be found
1: a numpy array holding the faces that couldn't be recognized
"""
def errorInDataset(imageCount, printResults=True):
datasetDir = "/home/sanche/Datasets/IMDB-WIKI"
csvPath = "./dataset.csv"
indicesPath = "./indices.p"
csvdata, indices = LoadFilesData(datasetDir, csvPath, indicesPath)
numPerBin = int(ceil(imageCount/16.0))
loader = DataLoader(indices, csvdata, numPerBin=numPerBin, imageSize=64, numWorkerThreads=10, bufferMax=20,
debugLogs=False, useCached=False)
loader.start()
batchDict = loader.getData()
imageSet = batchDict["image"]
#shuffle, so if some are trimmed, we are randomly from all bins
np.random.shuffle(imageSet)
return detectErrorRate(imageSet[:imageCount,:,:,:], printResults=printResults)
"""
Detects the percentage of images generated by the neural net with undetectable faces
Params
imageCount: the number of images to use in the sample
printResults: if true, will print results to the console for display
Returns
0: the percent of images in whoch faces couldn't be found
1: a numpy array holding the faces that couldn't be recognized
"""
def errorInGenerated(imageCount, printResults=True):
# initialize the data loader
image_size = 64
batch_size = 64
noise_size = 100
# start training
network = NeuralNet.NeuralNet(batch_size=batch_size, image_size=image_size, noise_size=noise_size, learningRate=5e-4)
sample = randomSample(network, imageCount)
return detectErrorRate(sample, printResults=printResults)
if __name__ == "__main__":
sampleSize = 10000
errorInDataset(sampleSize)
errorInGenerated(sampleSize)