commit some docs and ppts

This commit is contained in:
Tang1705
2020-08-02 11:29:42 +08:00
parent 88561f34d7
commit c5c560855a
18 changed files with 64 additions and 38 deletions

View File

@@ -321,7 +321,7 @@ class Calibration:
position.append([i, j])
except:
pass
cv2.imwrite('testt3.png',frame)
cv2.imwrite('testt3.png', frame)
# cv2.imshow('lab', frame)
# cv2.waitKey(0) # 按0退出
return points, position
@@ -338,21 +338,24 @@ class Calibration:
feature_points, position = self.decode(frame=img_rd, feature_point=featurepoints_position)
index = len(feature_points) // 2
pixel_distance = math.sqrt((position[index + 1][0] - position[index][0]) ** 2 +
(position[index + 1][1] - position[index][1]) ** 2)
world_distance = math.sqrt((feature_points[index + 1][0] * 2 - feature_points[index][0] * 2) ** 2 +
(feature_points[index + 1][1] * 2 - feature_points[index][1] * 2) ** 2)
try:
pixel_distance = math.sqrt((position[index + 1][0] - position[index][0]) ** 2 +
(position[index + 1][1] - position[index][1]) ** 2)
world_distance = math.sqrt((feature_points[index + 1][0] * 2 - feature_points[index][0] * 2) ** 2 +
(feature_points[index + 1][1] * 2 - feature_points[index][1] * 2) ** 2)
self.scale = world_distance / pixel_distance
self.scale = world_distance / pixel_distance
except:
self.scale = 0.11
# 绘制特征点
point_size = 1
point_color = (0, 0, 255)
thickness = 0 # 可以为 0 、4、8
for point in featurepoints_position:
cv2.circle(img_rd, (int(point[1]), int(point[0])), point_size, point_color, thickness)
cv2.imwrite('testt2.png', img_rd)
# point_size = 1
# point_color = (0, 0, 255)
# thickness = 0 # 可以为 0 、4、8
#
# for point in featurepoints_position:
# cv2.circle(img_rd, (int(point[1]), int(point[0])), point_size, point_color, thickness)
# cv2.imwrite('testt2.png', img_rd)
self.draw_note(img_rd)
self.update_fps()

View File

@@ -35,6 +35,7 @@ class Face_Recognizer:
self.name_known_cnt = 0
self.name_known_list = []
self.type_known_list = []
self.id_known_list = []
self.metadata = []
self.embedded = []
@@ -43,6 +44,7 @@ class Face_Recognizer:
self.pos_camera_list = []
self.name_camera_list = []
self.type_camera_list = []
self.id_camera_list = []
# 存储当前摄像头中捕获到的人脸数
self.faces_cnt = 0
# 存储当前摄像头中捕获到的人脸特征
@@ -84,8 +86,9 @@ class Face_Recognizer:
type = 'old'
elif self.type_known_list[i] == 'volunteer':
type = 'employee'
self.name_known_list[i] = requests.get("http://zhuooyu.cn:8000/api/person/" + str(type) + "/" + str(
self.name_known_list[i]) + "/").text
self.id_known_list.append(
requests.get("http://zhuooyu.cn:8000/api/person/" + str(type) + "/" + str(
self.name_known_list[i]) + "/").text)
self.loaded = True
return 1
else:
@@ -156,6 +159,7 @@ class Face_Recognizer:
self.pos_camera_list = []
self.name_camera_list = []
self.type_camera_list = []
self.id_camera_list = []
(h, w) = img_rd.shape[:2]
blob = cv2.dnn.blobFromImage(cv2.resize(img_rd, (300, 300)), 1.0,
@@ -181,6 +185,7 @@ class Face_Recognizer:
# 先默认所有人不认识,是 unknown
self.name_camera_list.append("unknown")
self.type_camera_list.append('unknown')
self.id_camera_list.append('unknown')
# 每个捕获人脸的名字坐标
box = faces[0, 0, k, 3:7] * np.array([w, h, w, h])
@@ -210,8 +215,10 @@ class Face_Recognizer:
similar_person_num = e_distance_list.index(min(e_distance_list))
# print(min(e_distance_list))
if min(e_distance_list) < 0.58:
self.name_camera_list[k] = self.name_known_list[similar_person_num % 8]
self.name_camera_list[k] = self.id_known_list[similar_person_num % 8]
self.type_camera_list[k] = self.type_known_list[similar_person_num % 8]
self.id_camera_list[k] = self.name_known_list[similar_person_num % 8]
data_type_three[api_transfer[self.type_camera_list[k]]] += 1
cv2.rectangle(img_rd, tuple([startX, startY]), tuple([endX, endY]),
(0, 255, 0), 2)
@@ -221,15 +228,18 @@ class Face_Recognizer:
if self.type_camera_list[k] == 'elder':
mode = smile_detection.smile_detect(img_blank)
if mode == 'happy':
# print("happy")
cv2.rectangle(img_with_name, tuple([startX, startY - 70]),
tuple([endX, startY - 35]),
(0, 215, 255), cv2.FILLED)
cv2.putText(img_with_name, 'happy', (startX + 5, startY - 45), cv2.FONT_ITALIC, 1,
(255, 255, 255), 1, cv2.LINE_AA)
cv2.imwrite('smile_detection.jpg', img_with_name)
time_snap = datetime.now()
cv2.imwrite('smile_detection' + str(time_snap).replace(':','') + '.jpg', img_with_name)
if (datetime.now() - self.pre).total_seconds() > 5:
t = threading.Thread(target=post(elder_id=self.name_camera_list[k], event=0,
imagePath='smile_detection.jpg'))
t = threading.Thread(target=post(elder_id=self.id_camera_list[k], event=0,
imagePath='smile_detection' + str(
time_snap).replace(':','') + '.jpg'))
t.setDaemon(False)
t.start()
self.pre = datetime.now()
@@ -242,9 +252,11 @@ class Face_Recognizer:
cv2.rectangle(img_rd, tuple([startX, startY - 35]), tuple([endX, startY]),
(0, 0, 255), cv2.FILLED)
img_with_name = self.draw_name(img_rd)
cv2.imwrite('stranger_detection.jpg', img_with_name)
time_snap = datetime.now()
cv2.imwrite('stranger_detection' + str(time_snap).replace(':','') + '.jpg', img_with_name)
if (datetime.now() - self.pre).total_seconds() > 5:
t = threading.Thread(target=post(event=2, imagePath='stranger_detection.jpg'))
t = threading.Thread(
target=post(event=2, imagePath='stranger_detection' + str(time_snap).replace(':','') + '.jpg'))
t.setDaemon(False)
t.start()
self.pre = datetime.now()

View File

@@ -196,9 +196,11 @@ class Intrusion_Detection():
event_location = '院子'
print('[EVENT] %s, 院子, 有人闯入禁止区域!!!'
% (current_time))
cv2.imwrite('intrusion.jpg', frame)
time_snap = datetime.now()
cv2.imwrite('intrusion' + str(time_snap).replace(':', '') + '.jpg', frame)
if (datetime.now() - self.pre).total_seconds() > 5:
t = threading.Thread(target=post(event=4, imagePath='intrusion.jpg'))
t = threading.Thread(
target=post(event=4, imagePath='intrusion' + str(time_snap).replace(':', '') + '.jpg'))
t.setDaemon(False)
t.start()
self.pre = datetime.now()

View File

@@ -34,6 +34,7 @@ class Interaction_Detection:
self.name_known_cnt = 0
self.name_known_list = []
self.type_known_list = []
self.id_known_list = []
self.metadata = []
self.embedded = []
@@ -42,6 +43,7 @@ class Interaction_Detection:
self.pos_camera_list = []
self.name_camera_list = []
self.type_camera_list = []
self.id_camera_list = []
# 存储当前摄像头中捕获到的人脸数
self.faces_cnt = 0
# 存储当前摄像头中捕获到的人脸特征
@@ -61,7 +63,9 @@ class Interaction_Detection:
self.name_known_cnt = 0
for i in range(0, len(self.metadata)):
for j in range(0, len(self.metadata[i])):
self.name_known_cnt += 1
for k in range(0,len(self.metadata[i][j])):
self.name_known_cnt += 1
print(self.name_known_cnt)
self.embedded = np.zeros((self.name_known_cnt * 8, 128))
for i, m in enumerate(self.metadata):
@@ -83,8 +87,8 @@ class Interaction_Detection:
type = 'old'
elif self.type_known_list[i] == 'volunteer':
type = 'employee'
self.name_known_list[i] = requests.get("http://zhuooyu.cn:8000/api/person/" + str(type) + "/" + str(
self.name_known_list[i]) + "/").text
self.id_known_list.append(requests.get("http://zhuooyu.cn:8000/api/person/" + str(type) + "/" + str(
self.name_known_list[i]) + "/").text)
self.loaded = True
return 1
else:
@@ -216,18 +220,19 @@ class Interaction_Detection:
except:
continue
print(self.type_camera_list)
img_with_name = self.draw_name(img_rd)
if 'volunteer' in self.type_camera_list and len(self.type_camera_list) > 1:
index = self.type_camera_list.index('volunteer')
if 'unknown' in self.type_camera_list and len(self.type_camera_list) > 1:
index = self.type_camera_list.index('unknown')
pos_vol = np.array(self.pos_camera_list[index])
for i in range(0, len(self.type_camera_list)):
if self.type_camera_list[i] == "elder":
d = scale * np.sqrt(facenet.distance(pos_vol, np.array(self.pos_camera_list[i])))
if d < 50:
if (datetime.now() - self.pre).total_seconds() > 5:
cv2.imwrite("interaction.jpg", img_with_name)
t = threading.Thread(target=post(event=1, imagePath='interaction.jpg'))
time_snap = datetime.now()
cv2.imwrite("interaction"+str(time_snap).replace(':','')+".jpg", img_with_name)
t = threading.Thread(target=post(event=1, imagePath='interaction'+str(time_snap).replace(':','')+'.jpg'))
t.setDaemon(False)
t.start()
self.pre = datetime.now()

View File

@@ -65,7 +65,7 @@ class Live(object):
self.out = cv2.VideoWriter('output.avi', self.fourcc, 20, (640, 480))
# Get video information
self.fps = 15 # 设置帧速率
self.fps = 30 # 设置帧速率
width = 640 # 宽
height = 480 # 高
@@ -211,7 +211,7 @@ class Live(object):
frame = self.Face_Register_on.process(frame)
elif self.recieve["data"]["fuc"] == 'standard':
self.scale = self.Calibration_on.run(frame)
self.recieve = {"todo": "change", 'data': {'fuc': '14'}}
self.recieve = {"todo": "change", 'data': {'fuc': '2'}}
elif self.recieve["todo"] == 'change':
fuc = self.recieve["data"]["fuc"] # 更改的功能 0:无 1微笑检测 2交互检测 3摔倒检测 4禁区入侵
if fuc == '0':

View File

@@ -101,9 +101,11 @@ def fall_detect(cnts, defined_min_area, frame, prevX, prevY, xList, yList, cente
draw.text((10, 10), text="Fall Detected", font=font,
fill=(255, 0, 0))
frame = cv2.cvtColor(np.array(img_rd), cv2.COLOR_RGB2BGR)
cv2.imwrite('fall_detection.jpg', frame)
time_snap = datetime.datetime.now()
cv2.imwrite('fall_detection' + str(time_snap).replace(':', '') + '.jpg', frame)
if (datetime.datetime.now() - pre).total_seconds() > 5:
t = threading.Thread(target=post(event=3, imagePath='fall_detection.jpg'))
t = threading.Thread(
target=post(event=3, imagePath='fall_detection' + str(time_snap).replace(':', '') + '.jpg'))
t.setDaemon(False)
t.start()
pre = datetime.datetime.now()

View File

@@ -177,11 +177,13 @@ try:
draw.text((10, 10), text="Fall Detected", font=font,
fill=(255, 0, 0))
img_rd = cv2.cvtColor(np.array(img_rd), cv2.COLOR_RGB2BGR)
cv2.imwrite('fall_detection.jpg', frame)
time_snap = datetime.datetime.now()
cv2.imwrite('fall_detection' + str(time_snap).replace(':','') + '.jpg', frame)
if (datetime.datetime.now() - pre).total_seconds() > 5:
t = threading.Thread(target=post(event=3, imagePath='fall_detection.jpg'))
t = threading.Thread(
target=post(event=3, imagePath='fall_detection' + str(time_snap).replace(':','') + '.jpg'))
t.start()
status = post(event=3, imagePath='fall_detection.jpg')
# status = post(event=3, imagePath='fall_detection.jpg')
# print("fall")
pre = datetime.datetime.now()
# print(pre)