diff --git a/Class/detection/Calibration_On_Desk.py b/Class/detection/Calibration_On_Desk.py index 67633f8..a9a7f10 100644 --- a/Class/detection/Calibration_On_Desk.py +++ b/Class/detection/Calibration_On_Desk.py @@ -321,7 +321,7 @@ class Calibration: position.append([i, j]) except: pass - cv2.imwrite('testt3.png',frame) + cv2.imwrite('testt3.png', frame) # cv2.imshow('lab', frame) # cv2.waitKey(0) # 按0退出 return points, position @@ -338,21 +338,24 @@ class Calibration: feature_points, position = self.decode(frame=img_rd, feature_point=featurepoints_position) index = len(feature_points) // 2 - pixel_distance = math.sqrt((position[index + 1][0] - position[index][0]) ** 2 + - (position[index + 1][1] - position[index][1]) ** 2) - world_distance = math.sqrt((feature_points[index + 1][0] * 2 - feature_points[index][0] * 2) ** 2 + - (feature_points[index + 1][1] * 2 - feature_points[index][1] * 2) ** 2) + try: + pixel_distance = math.sqrt((position[index + 1][0] - position[index][0]) ** 2 + + (position[index + 1][1] - position[index][1]) ** 2) + world_distance = math.sqrt((feature_points[index + 1][0] * 2 - feature_points[index][0] * 2) ** 2 + + (feature_points[index + 1][1] * 2 - feature_points[index][1] * 2) ** 2) + self.scale = world_distance / pixel_distance - self.scale = world_distance / pixel_distance + except: + self.scale = 0.11 # 绘制特征点 - point_size = 1 - point_color = (0, 0, 255) - thickness = 0 # 可以为 0 、4、8 - - for point in featurepoints_position: - cv2.circle(img_rd, (int(point[1]), int(point[0])), point_size, point_color, thickness) - cv2.imwrite('testt2.png', img_rd) + # point_size = 1 + # point_color = (0, 0, 255) + # thickness = 0 # 可以为 0 、4、8 + # + # for point in featurepoints_position: + # cv2.circle(img_rd, (int(point[1]), int(point[0])), point_size, point_color, thickness) + # cv2.imwrite('testt2.png', img_rd) self.draw_note(img_rd) self.update_fps() diff --git a/Class/detection/Camera_In_Room.py b/Class/detection/Camera_In_Room.py index eef2ad1..85a9e29 100644 --- a/Class/detection/Camera_In_Room.py +++ b/Class/detection/Camera_In_Room.py @@ -35,6 +35,7 @@ class Face_Recognizer: self.name_known_cnt = 0 self.name_known_list = [] self.type_known_list = [] + self.id_known_list = [] self.metadata = [] self.embedded = [] @@ -43,6 +44,7 @@ class Face_Recognizer: self.pos_camera_list = [] self.name_camera_list = [] self.type_camera_list = [] + self.id_camera_list = [] # 存储当前摄像头中捕获到的人脸数 self.faces_cnt = 0 # 存储当前摄像头中捕获到的人脸特征 @@ -84,8 +86,9 @@ class Face_Recognizer: type = 'old' elif self.type_known_list[i] == 'volunteer': type = 'employee' - self.name_known_list[i] = requests.get("http://zhuooyu.cn:8000/api/person/" + str(type) + "/" + str( - self.name_known_list[i]) + "/").text + self.id_known_list.append( + requests.get("http://zhuooyu.cn:8000/api/person/" + str(type) + "/" + str( + self.name_known_list[i]) + "/").text) self.loaded = True return 1 else: @@ -156,6 +159,7 @@ class Face_Recognizer: self.pos_camera_list = [] self.name_camera_list = [] self.type_camera_list = [] + self.id_camera_list = [] (h, w) = img_rd.shape[:2] blob = cv2.dnn.blobFromImage(cv2.resize(img_rd, (300, 300)), 1.0, @@ -181,6 +185,7 @@ class Face_Recognizer: # 先默认所有人不认识,是 unknown self.name_camera_list.append("unknown") self.type_camera_list.append('unknown') + self.id_camera_list.append('unknown') # 每个捕获人脸的名字坐标 box = faces[0, 0, k, 3:7] * np.array([w, h, w, h]) @@ -210,8 +215,10 @@ class Face_Recognizer: similar_person_num = e_distance_list.index(min(e_distance_list)) # print(min(e_distance_list)) if min(e_distance_list) < 0.58: - self.name_camera_list[k] = self.name_known_list[similar_person_num % 8] + self.name_camera_list[k] = self.id_known_list[similar_person_num % 8] self.type_camera_list[k] = self.type_known_list[similar_person_num % 8] + self.id_camera_list[k] = self.name_known_list[similar_person_num % 8] + data_type_three[api_transfer[self.type_camera_list[k]]] += 1 cv2.rectangle(img_rd, tuple([startX, startY]), tuple([endX, endY]), (0, 255, 0), 2) @@ -221,15 +228,18 @@ class Face_Recognizer: if self.type_camera_list[k] == 'elder': mode = smile_detection.smile_detect(img_blank) if mode == 'happy': + # print("happy") cv2.rectangle(img_with_name, tuple([startX, startY - 70]), tuple([endX, startY - 35]), (0, 215, 255), cv2.FILLED) cv2.putText(img_with_name, 'happy', (startX + 5, startY - 45), cv2.FONT_ITALIC, 1, (255, 255, 255), 1, cv2.LINE_AA) - cv2.imwrite('smile_detection.jpg', img_with_name) + time_snap = datetime.now() + cv2.imwrite('smile_detection' + str(time_snap).replace(':','') + '.jpg', img_with_name) if (datetime.now() - self.pre).total_seconds() > 5: - t = threading.Thread(target=post(elder_id=self.name_camera_list[k], event=0, - imagePath='smile_detection.jpg')) + t = threading.Thread(target=post(elder_id=self.id_camera_list[k], event=0, + imagePath='smile_detection' + str( + time_snap).replace(':','') + '.jpg')) t.setDaemon(False) t.start() self.pre = datetime.now() @@ -242,9 +252,11 @@ class Face_Recognizer: cv2.rectangle(img_rd, tuple([startX, startY - 35]), tuple([endX, startY]), (0, 0, 255), cv2.FILLED) img_with_name = self.draw_name(img_rd) - cv2.imwrite('stranger_detection.jpg', img_with_name) + time_snap = datetime.now() + cv2.imwrite('stranger_detection' + str(time_snap).replace(':','') + '.jpg', img_with_name) if (datetime.now() - self.pre).total_seconds() > 5: - t = threading.Thread(target=post(event=2, imagePath='stranger_detection.jpg')) + t = threading.Thread( + target=post(event=2, imagePath='stranger_detection' + str(time_snap).replace(':','') + '.jpg')) t.setDaemon(False) t.start() self.pre = datetime.now() diff --git a/Class/detection/Camera_In_Yard.py b/Class/detection/Camera_In_Yard.py index 4268202..b48e838 100644 --- a/Class/detection/Camera_In_Yard.py +++ b/Class/detection/Camera_In_Yard.py @@ -196,9 +196,11 @@ class Intrusion_Detection(): event_location = '院子' print('[EVENT] %s, 院子, 有人闯入禁止区域!!!' % (current_time)) - cv2.imwrite('intrusion.jpg', frame) + time_snap = datetime.now() + cv2.imwrite('intrusion' + str(time_snap).replace(':', '') + '.jpg', frame) if (datetime.now() - self.pre).total_seconds() > 5: - t = threading.Thread(target=post(event=4, imagePath='intrusion.jpg')) + t = threading.Thread( + target=post(event=4, imagePath='intrusion' + str(time_snap).replace(':', '') + '.jpg')) t.setDaemon(False) t.start() self.pre = datetime.now() diff --git a/Class/detection/Camera_On_Desk.py b/Class/detection/Camera_On_Desk.py index fb20c16..4c57d94 100644 --- a/Class/detection/Camera_On_Desk.py +++ b/Class/detection/Camera_On_Desk.py @@ -34,6 +34,7 @@ class Interaction_Detection: self.name_known_cnt = 0 self.name_known_list = [] self.type_known_list = [] + self.id_known_list = [] self.metadata = [] self.embedded = [] @@ -42,6 +43,7 @@ class Interaction_Detection: self.pos_camera_list = [] self.name_camera_list = [] self.type_camera_list = [] + self.id_camera_list = [] # 存储当前摄像头中捕获到的人脸数 self.faces_cnt = 0 # 存储当前摄像头中捕获到的人脸特征 @@ -61,7 +63,9 @@ class Interaction_Detection: self.name_known_cnt = 0 for i in range(0, len(self.metadata)): for j in range(0, len(self.metadata[i])): - self.name_known_cnt += 1 + for k in range(0,len(self.metadata[i][j])): + self.name_known_cnt += 1 + print(self.name_known_cnt) self.embedded = np.zeros((self.name_known_cnt * 8, 128)) for i, m in enumerate(self.metadata): @@ -83,8 +87,8 @@ class Interaction_Detection: type = 'old' elif self.type_known_list[i] == 'volunteer': type = 'employee' - self.name_known_list[i] = requests.get("http://zhuooyu.cn:8000/api/person/" + str(type) + "/" + str( - self.name_known_list[i]) + "/").text + self.id_known_list.append(requests.get("http://zhuooyu.cn:8000/api/person/" + str(type) + "/" + str( + self.name_known_list[i]) + "/").text) self.loaded = True return 1 else: @@ -216,18 +220,19 @@ class Interaction_Detection: except: continue - + print(self.type_camera_list) img_with_name = self.draw_name(img_rd) - if 'volunteer' in self.type_camera_list and len(self.type_camera_list) > 1: - index = self.type_camera_list.index('volunteer') + if 'unknown' in self.type_camera_list and len(self.type_camera_list) > 1: + index = self.type_camera_list.index('unknown') pos_vol = np.array(self.pos_camera_list[index]) for i in range(0, len(self.type_camera_list)): if self.type_camera_list[i] == "elder": d = scale * np.sqrt(facenet.distance(pos_vol, np.array(self.pos_camera_list[i]))) if d < 50: if (datetime.now() - self.pre).total_seconds() > 5: - cv2.imwrite("interaction.jpg", img_with_name) - t = threading.Thread(target=post(event=1, imagePath='interaction.jpg')) + time_snap = datetime.now() + cv2.imwrite("interaction"+str(time_snap).replace(':','')+".jpg", img_with_name) + t = threading.Thread(target=post(event=1, imagePath='interaction'+str(time_snap).replace(':','')+'.jpg')) t.setDaemon(False) t.start() self.pre = datetime.now() diff --git a/Class/detection/Live_Tool.py b/Class/detection/Live_Tool.py index 28bacc2..abdfc42 100644 --- a/Class/detection/Live_Tool.py +++ b/Class/detection/Live_Tool.py @@ -65,7 +65,7 @@ class Live(object): self.out = cv2.VideoWriter('output.avi', self.fourcc, 20, (640, 480)) # Get video information - self.fps = 15 # 设置帧速率 + self.fps = 30 # 设置帧速率 width = 640 # 宽 height = 480 # 高 @@ -211,7 +211,7 @@ class Live(object): frame = self.Face_Register_on.process(frame) elif self.recieve["data"]["fuc"] == 'standard': self.scale = self.Calibration_on.run(frame) - self.recieve = {"todo": "change", 'data': {'fuc': '14'}} + self.recieve = {"todo": "change", 'data': {'fuc': '2'}} elif self.recieve["todo"] == 'change': fuc = self.recieve["data"]["fuc"] # 更改的功能 0:无 1微笑检测 2交互检测 3摔倒检测 4禁区入侵 if fuc == '0': diff --git a/Class/detection/algorithm_fall.py b/Class/detection/algorithm_fall.py index ac864f8..2abf0e4 100644 --- a/Class/detection/algorithm_fall.py +++ b/Class/detection/algorithm_fall.py @@ -101,9 +101,11 @@ def fall_detect(cnts, defined_min_area, frame, prevX, prevY, xList, yList, cente draw.text((10, 10), text="Fall Detected", font=font, fill=(255, 0, 0)) frame = cv2.cvtColor(np.array(img_rd), cv2.COLOR_RGB2BGR) - cv2.imwrite('fall_detection.jpg', frame) + time_snap = datetime.datetime.now() + cv2.imwrite('fall_detection' + str(time_snap).replace(':', '') + '.jpg', frame) if (datetime.datetime.now() - pre).total_seconds() > 5: - t = threading.Thread(target=post(event=3, imagePath='fall_detection.jpg')) + t = threading.Thread( + target=post(event=3, imagePath='fall_detection' + str(time_snap).replace(':', '') + '.jpg')) t.setDaemon(False) t.start() pre = datetime.datetime.now() diff --git a/Class/detection/fall_detection.py b/Class/detection/fall_detection.py index a526c03..9f4a2d4 100644 --- a/Class/detection/fall_detection.py +++ b/Class/detection/fall_detection.py @@ -177,11 +177,13 @@ try: draw.text((10, 10), text="Fall Detected", font=font, fill=(255, 0, 0)) img_rd = cv2.cvtColor(np.array(img_rd), cv2.COLOR_RGB2BGR) - cv2.imwrite('fall_detection.jpg', frame) + time_snap = datetime.datetime.now() + cv2.imwrite('fall_detection' + str(time_snap).replace(':','') + '.jpg', frame) if (datetime.datetime.now() - pre).total_seconds() > 5: - t = threading.Thread(target=post(event=3, imagePath='fall_detection.jpg')) + t = threading.Thread( + target=post(event=3, imagePath='fall_detection' + str(time_snap).replace(':','') + '.jpg')) t.start() - status = post(event=3, imagePath='fall_detection.jpg') + # status = post(event=3, imagePath='fall_detection.jpg') # print("fall") pre = datetime.datetime.now() # print(pre) diff --git a/Doc/用户手册.pdf b/Doc/用户手册.pdf new file mode 100644 index 0000000..77108ee Binary files /dev/null and b/Doc/用户手册.pdf differ diff --git a/Doc/系统设计说明书.pdf b/Doc/系统设计说明书.pdf new file mode 100644 index 0000000..59ab0ad Binary files /dev/null and b/Doc/系统设计说明书.pdf differ diff --git a/Doc/需求规格说明书.pdf b/Doc/需求规格说明书.pdf new file mode 100644 index 0000000..f1531f3 Binary files /dev/null and b/Doc/需求规格说明书.pdf differ diff --git a/Doc/项目总结.pdf b/Doc/项目总结.pdf new file mode 100644 index 0000000..26fb81b Binary files /dev/null and b/Doc/项目总结.pdf differ diff --git a/Doc/项目章程.pdf b/Doc/项目章程.pdf new file mode 100644 index 0000000..17c25df Binary files /dev/null and b/Doc/项目章程.pdf differ diff --git a/PPT/小学期中期答辩.pptx b/PPT/小学期中期答辩.pptx new file mode 100644 index 0000000..1f4ae3f Binary files /dev/null and b/PPT/小学期中期答辩.pptx differ diff --git a/PPT/小学期中期答辩稿.docx b/PPT/小学期中期答辩稿.docx new file mode 100644 index 0000000..0bf3775 Binary files /dev/null and b/PPT/小学期中期答辩稿.docx differ diff --git a/PPT/小学期中期答辩稿.pdf b/PPT/小学期中期答辩稿.pdf new file mode 100644 index 0000000..1da3625 Binary files /dev/null and b/PPT/小学期中期答辩稿.pdf differ diff --git a/PPT/小学期终期答辩.pptx b/PPT/小学期终期答辩.pptx new file mode 100644 index 0000000..93b29ff Binary files /dev/null and b/PPT/小学期终期答辩.pptx differ diff --git a/PPT/小学期结题答辩稿.docx b/PPT/小学期结题答辩稿.docx new file mode 100644 index 0000000..ff73297 Binary files /dev/null and b/PPT/小学期结题答辩稿.docx differ diff --git a/PPT/小学期结题答辩稿.pdf b/PPT/小学期结题答辩稿.pdf new file mode 100644 index 0000000..8f5c05c Binary files /dev/null and b/PPT/小学期结题答辩稿.pdf differ