-
Notifications
You must be signed in to change notification settings - Fork 2
Expand file tree
/
Copy pathdrone.py
More file actions
159 lines (123 loc) · 4.87 KB
/
drone.py
File metadata and controls
159 lines (123 loc) · 4.87 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
import cv2
import time
from PIL import Image
import face_recognition
import os
import numpy as np
import sys
from config import ip_address
from config import number_of_times_to_upsample
from config import num_jitters
from config import tolerance
subjects = []
status = []
face_encoding_list = []
def colour_f(status1):
if status1=="vip":
return (0,255,0)
if status1=="blacklisted":
return (0,0,255)
else :
return (255,255,255)
def draw_rectangle2(img, rect,colour):
(x, y, w, h) = rect
cv2.rectangle(img, (x, y), (w, h), colour, 2)
def draw_text(img, text, x, y):
cv2.putText(img, text, (x, y), cv2.FONT_HERSHEY_PLAIN, 1.5, (0, 255, 0), 2)
def prepare_training_data(data_folder_path):
global subjects
global status
dirs = os.listdir(data_folder_path)
faces = []
labels = []
for dir_name in dirs:
if not dir_name.startswith("s"):
continue;
label = int(dir_name.replace("s", ""))
subject_dir_path = data_folder_path + "/" + dir_name
subject_images_names = os.listdir(subject_dir_path)
for image_name in subject_images_names:
if image_name.startswith("."):
continue;
if image_name == "name.txt":
name_path = subject_dir_path + "/" + image_name
with open(name_path,'r+') as name:
content = name.read()
content = content.lower()
subjects.append(content)
elif image_name == "status.txt":
name_path = subject_dir_path + "/" + image_name
with open(name_path,'r+') as name:
content = name.read()
content = content.lower()
status.append(content)
else :
image_path = subject_dir_path + "/" + image_name
image = face_recognition.load_image_file(image_path)
# make sure to resize on fixed amount
# cv2.imshow("Training on image...", cv2.resize(image, (400, 500)))
print("Faces Scanned: ", len(faces) + 1)
cv2.waitKey(100)
faces.append(image)
labels.append(label)
cv2.destroyAllWindows()
cv2.waitKey(1)
cv2.destroyAllWindows()
return faces
faces = prepare_training_data("train-images")
def show_result(frame):
global faces
global subjects
global status
global face_encoding_list
face_locations = face_recognition.face_locations(frame,number_of_times_to_upsample=number_of_times_to_upsample)
try:
for face in faces:
face_encoding = face_recognition.face_encodings(face,num_jitters=num_jitters)[0]
face_encoding_list.append(face_encoding)
except IndexError:
print("I wasn't able to locate any faces in at least one of the images. Check the image files. Aborting...")
quit()
print("I found {} face(s) in this photograph.".format(len(face_locations)))
img = frame.copy()
for face_location in face_locations:
# Print the location of each face in this image
top, right, bottom, left = face_location
print("A face is located at pixel location Top: {}, Left: {}, Bottom: {}, Right: {}".format(top, left, bottom, right))
# You can access the actual face itself like this:
face_image = frame[top:bottom, left:right]
face_encoding1 = face_recognition.face_encodings(face_image,num_jitters=num_jitters)
if len(face_encoding1) > 0 :
face_encoding = face_encoding1[0]
results = face_recognition.compare_faces(face_encoding_list, face_encoding, tolerance = tolerance)
if True in results:
index1 = results.index(True)
colour_2 = colour_f(status[index1])
draw_rectangle2(img, (left,top,right,bottom),colour_2)
draw_text(img,subjects[index1] , left, top-5)
print(subjects[index1])
print(status[index1])
else :
print("not found")
draw_rectangle2(img, (left,top,right,bottom),(255,255,255))
draw_text(img,"No Match" ,left, top-5)
else:
draw_rectangle2(img, (left,top,right,bottom),(255,255,255))
draw_text(img,"Error 1" ,left, top-5)
save(frame,"before")
save(img,"after")
cv2.imshow("face_detected",img)
cv2.waitKey(0)
cv2.destroyAllWindows()
vid = cv2.VideoCapture()
vid.open(ip_address)
while True:
check , frame = vid.read()
cv2.imshow("face detect",frame)
key = cv2.waitKey(1)
if key == ord('q'):
break
if key == ord('c'):
show_result(frame)
vid.release()
cv2.destroyAllWindows()