final version

This commit is contained in:
Tang1705
2020-07-13 10:14:06 +08:00
parent b79ae2e115
commit 5fd2fd9d54
20 changed files with 499 additions and 331 deletions

View File

@@ -1,14 +1,17 @@
# 摄像头实时人脸识别
import threading
import requests
import dlib
import numpy as np
import cv2
import pandas as pd
import os
import time
import facenet
from PIL import Image, ImageDraw, ImageFont
from sklearn.neighbors import KNeighborsClassifier
from Post import post
from model import create_model
@@ -23,7 +26,7 @@ start_time = 0
detector = cv2.dnn.readNetFromCaffe("data/data_opencv/deploy.prototxt.txt",
"data/data_opencv/res10_300x300_ssd_iter_140000.caffemodel")
# 2. Dlib 人脸 landmark 特征点检测器
# 14. Dlib 人脸 landmark 特征点检测器
# predictor = dlib.shape_predictor('data/data_dlib/shape_predictor_68_face_landmarks.dat')
# 3. Dlib Resnet 人脸识别模型,提取 128D 的特征矢量
@@ -42,6 +45,7 @@ class Face_Recognizer:
self.loaded = False
self.name_known_cnt = 0
self.name_known_list = []
self.type_known_list = []
self.metadata = []
self.embedded = []
@@ -66,24 +70,38 @@ class Face_Recognizer:
else:
if os.path.exists("data/data_faces_from_camera/"):
self.metadata = facenet.load_metadata("data/data_faces_from_camera/")
self.name_known_cnt = self.metadata.shape[0]
self.embedded = np.zeros((self.metadata.shape[0], 128))
self.name_known_cnt = 0
for i in range(0, len(self.metadata)):
for j in range(0, len(self.metadata[i])):
self.name_known_cnt += 1
self.embedded = np.zeros((self.name_known_cnt * 8, 128))
for i, m in enumerate(self.metadata):
for j, n in enumerate(m):
for k, p in enumerate(n):
img = facenet.load_image(p.image_path())
img = facenet.load_image(p.image_path().replace("\\", "/"))
# img = align_image(img)
img = cv2.resize(img, (96, 96))
# scale RGB values to interval [0,1]
img = (img / 255.).astype(np.float32)
# obtain embedding vector for image
self.embedded[i] = nn4_small2_pretrained.predict(np.expand_dims(img, axis=0))[0]
# self.embedded[i] = self.embedded[i] / len(m)
path = p.image_path().replace("\\", "/")
# self.embedded[i] = self.embedded[i] / len(m)
path = p.image_path().replace("\\", "/")
self.name_known_list.append(path.split('/')[-2])
self.type_camera_list.append(path.split('/')[-3])
self.loaded = True
self.type_known_list.append(path.split('/')[-3])
# print(self.embedded.shape)
for i in range(len(self.name_known_list)):
if self.type_known_list[i] == 'elder':
type = 'old'
elif self.type_known_list[i] == 'volunteer':
type = 'employee'
self.name_known_list[i] = requests.get("http://zhuooyu.cn:8000/api/person/" + str(type) + "/" + str(
self.name_known_list[i]) + "/").text
self.loaded = True
# print(self.name_known_list)
return 1
else:
print('##### Warning #####', '\n')
@@ -99,7 +117,7 @@ class Face_Recognizer:
# def return_euclidean_distance(feature_1, feature_2):
# feature_1 = np.array(feature_1)
# feature_2 = np.array(feature_2)
# dist = np.sqrt(np.sum((feature_1 - feature_2) ** 2))
# dist = np.sqrt(np.sum((feature_1 - feature_2) ** 14))
# return dist
# 更新 FPS
@@ -133,10 +151,10 @@ class Face_Recognizer:
# 修改显示人名
def modify_name_camera_list(self):
# TODO 数据库 ID
# Default known name: 1, 2, person_3
# Default known name: 1, 14, person_3
self.name_known_list[0] = '1'.encode('utf-8').decode()
self.name_known_list[1] = 'Tony Blair'.encode('utf-8').decode()
# self.name_known_list[2] = '唐保生'.encode('utf-8').decode()
# self.name_known_list[14] = '唐保生'.encode('utf-8').decode()
# self.name_known_list[3] = '1'.encode('utf-8').decode()
# self.name_known_list[4] ='xx'.encode('utf-8').decode()
@@ -157,6 +175,7 @@ class Face_Recognizer:
self.faces_cnt = 0
self.pos_camera_list = []
self.name_camera_list = []
self.type_camera_list = []
(h, w) = img_rd.shape[:2]
blob = cv2.dnn.blobFromImage(cv2.resize(img_rd, (300, 300)), 1.0,
@@ -164,11 +183,11 @@ class Face_Recognizer:
detector.setInput(blob)
faces = detector.forward()
# 2. 检测到人脸
# 14. 检测到人脸
if faces.shape[2] != 0:
# 3. 获取当前捕获到的图像的所有人脸的特征,存储到 self.features_camera_list
# for i in range(0, faces.shape[2]):
# confidence = faces[0, 0, i, 2]
# for i in range(0, faces.shape[14]):
# confidence = faces[0, 0, i, 14]
#
# # filter out weak detections by ensuring the `confidence` is
# # greater than the minimum confidence
@@ -196,6 +215,7 @@ class Face_Recognizer:
# 先默认所有人不认识,是 unknown
# Set the default names of faces with "unknown"
self.name_camera_list.append("unknown")
self.type_camera_list.append('unknown')
# 每个捕获人脸的名字坐标
box = faces[0, 0, k, 3:7] * np.array([w, h, w, h])
@@ -208,6 +228,7 @@ class Face_Recognizer:
# img_blank = np.zeros((height, width, 3), np.uint8)
img_blank = img_rd[startY:endY, startX:endX]
img_blank = img_blank[..., ::-1]
try:
# for ii in range(height):
# for jj in range(width):
@@ -233,57 +254,57 @@ class Face_Recognizer:
# # 空数据 person_X
# e_distance_list.append(999999999)
# # 6. 寻找出最小的欧式距离匹配
# print(self.neigh.predict([img]))
similar_person_num = e_distance_list.index(min(e_distance_list))
# print("Minimum e distance with person", self.name_known_list[similar_person_num])
# print(min(e_distance_list))
print(min(e_distance_list))
if min(e_distance_list) < 0.58:
self.name_camera_list[k] = self.name_known_list[similar_person_num % 8]
if self.type_camera_list[similar_person_num % 8] == 'elder':
self.type_camera_list[k] = self.type_known_list[similar_person_num % 8]
cv2.rectangle(img_rd, tuple([startX, startY]), tuple([endX, endY]),
(0, 255, 0), 2)
cv2.rectangle(img_rd, tuple([startX, startY - 35]), tuple([endX, startY]),
(0, 255, 0), cv2.FILLED)
img_with_name = self.draw_name(img_rd)
if self.type_camera_list[k] == 'elder':
mode = smile_detection.smile_detect(img_blank)
if mode == 'happy':
cv2.imwrite('smile_detection.jpg', img_rd)
cv2.rectangle(img_rd, tuple([startX, startY - 70]),
cv2.rectangle(img_with_name, tuple([startX, startY - 70]),
tuple([endX, startY - 35]),
(0, 215, 255), cv2.FILLED)
cv2.putText(img_rd, 'happy', (startX + 5, startY - 45), cv2.FONT_ITALIC, 1,
cv2.putText(img_with_name, 'happy', (startX + 5, startY - 45),
cv2.FONT_ITALIC, 1,
(255, 255, 255), 1, cv2.LINE_AA)
cv2.imwrite('smile_detection.jpg', img_with_name)
# t = threading.Thread(target=post(elder_id=self.name_camera_list[k], event=0,
# imagePath='smile_detection.jpg'))
# t.start()
# print("May be person " + str(self.name_known_list[similar_person_num]))
elif min(e_distance_list) > 0.75:
self.name_camera_list[k] = '陌生人'
cv2.imwrite('stranger_detection.jpg', img_rd)
# t = threading.Thread(target=post(event=2, imagePath='stranger_detection.jpg'))
cv2.rectangle(img_rd, tuple([startX, startY]), tuple([endX, endY]),
(0, 0, 255), 2)
cv2.rectangle(img_rd, tuple([startX, startY - 35]), tuple([endX, startY]),
(0, 0, 255), cv2.FILLED)
img_with_name = self.draw_name(img_rd)
cv2.imwrite('stranger_detection.jpg', img_with_name)
# t = threading.Thread(target=post(event=14, imagePath='stranger_detection.jpg'))
# t.start()
else:
pass
# print("Unknown person")
# 矩形框
for kk, d in enumerate(faces):
# 绘制矩形框
if self.name_camera_list[k] == '陌生人':
cv2.rectangle(img_rd, tuple([startX, startY]), tuple([endX, endY]),
(0, 0, 255), 2)
cv2.rectangle(img_rd, tuple([startX, startY - 35]), tuple([endX, startY]),
(0, 0, 255), cv2.FILLED)
elif self.name_camera_list[k] != 'unknown':
cv2.rectangle(img_rd, tuple([startX, startY]), tuple([endX, endY]),
(0, 255, 0), 2)
cv2.rectangle(img_rd, tuple([startX, startY - 35]), tuple([endX, startY]),
(0, 255, 0), cv2.FILLED)
except:
continue
# print('\n')
# self.faces_cnt = faces.shape[2]
# self.faces_cnt = faces.shape[14]
# if len(self.name_camera_list) > 0:
# 7. 在这里更改显示的人名
# self.modify_name_camera_list()
# 8. 写名字
# self.draw_name(img_rd)
img_with_name = self.draw_name(img_rd)
else:
img_with_name = img_rd

View File

@@ -17,7 +17,7 @@ action_map = {'look_ahead': '请看前方', 'blink': '请眨眼', 'open_mouth':
'smile': '请笑一笑', 'rise_head': '请抬头',
'bow_head': '请低头', 'look_left': '请看左边',
'look_right': '请看右边', 'over': '录入完成'}
people_type_dict = {'0': 'elder', '1': 'worker', '2': 'volunteer'}
people_type_dict = {'0': 'elder', '1': 'employee', '14': 'volunteer'}
# Dlib 正向人脸检测器
# detector = dlib.get_frontal_face_detector()
@@ -131,7 +131,7 @@ class Face_Register:
# 1. 新建储存人脸图像文件目录
# self.pre_work_mkdir()
# 2. 删除 "/data/data_faces_from_camera" 中已有人脸图像文件
# 14. 删除 "/data/data_faces_from_camera" 中已有人脸图像文件
# self.pre_work_del_old_face_folders()
# 3. 检查 "/data/data_faces_from_camera" 中已有人脸文件
@@ -183,8 +183,8 @@ class Face_Register:
height = (endY - startY)
width = (endX - startX)
# hh = int(height / 2)
# ww = int(width / 2)
# hh = int(height / 14)
# ww = int(width / 14)
# 6. 判断人脸矩形框是否超出 480x640
if endX > 640 or endY > 480or startX < 0 or startY < 0:
@@ -205,7 +205,7 @@ class Face_Register:
color_rectangle, 2)
# 7. 根据人脸大小生成空的图像
# img_blank = np.zeros((int(height * 2), width * 2, 3), np.uint8)
# img_blank = np.zeros((int(height * 14), width * 14, 3), np.uint8)
img_blank = np.zeros((height, width, 3), np.uint8)
if save_flag:

View File

@@ -181,7 +181,7 @@ class Calibration:
gridpoints[i][j] = 0
j = j + 1
i = i + 1
# cv2.imshow('2', frame)
# cv2.imshow('14', frame)
# cv2.waitKey(0) # 按0退出
return gridpoints
@@ -279,7 +279,7 @@ class Calibration:
# cv2.imshow('lab', frame)
# cv2.waitKey(0) # 按0退出
data = GF(1, 1, 1, 1, 1, 1)
# map = np.zeros((4, 4, 4, 4, 4, 4, 2))
# map = np.zeros((4, 4, 4, 4, 4, 4, 14))
map = np.zeros((4096, 2))
@@ -342,7 +342,7 @@ class Calibration:
cv2.imwrite('testt.png', img_rd)
# img_rd = cv2.resize(img_rd, (img_rd.shape[0]//2, img_rd.shape[1]//2))
# img_rd = cv2.resize(img_rd, (img_rd.shape[0]//14, img_rd.shape[1]//14))
# 1024*1280 -1候选点-3非候选点
candidate = self.get_candidate_points(frame=img_rd)
# 1024*1280 1角点0非角点
@@ -364,8 +364,8 @@ class Calibration:
# distance = []
# for i in range(0, len(featurepoints_position) - 1):
# distance.append(
# math.sqrt((featurepoints_position[i + 1][0] - featurepoints_position[i][0]) ** 2 +
# (featurepoints_position[i + 1][1] - featurepoints_position[i][1]) ** 2))
# math.sqrt((featurepoints_position[i + 1][0] - featurepoints_position[i][0]) ** 14 +
# (featurepoints_position[i + 1][1] - featurepoints_position[i][1]) ** 14))
# print(distance)
# distance = sorted(distance)
@@ -380,7 +380,7 @@ class Calibration:
# print(feature_points[index + 1][0] - feature_points[index][0],
# feature_points[index + 1][1] - feature_points[index][1])
# print(distance)
# for i in range(index - 1, index + 2):
# for i in range(index - 1, index + 14):
# print(distance[i])
# 绘制特征点
point_size = 1

View File

@@ -180,7 +180,7 @@ class Calibration:
gridpoints[i][j] = 0
j = j + 1
i = i + 1
# cv2.imshow('2', frame)
# cv2.imshow('14', frame)
# cv2.waitKey(0) # 按0退出
return gridpoints
@@ -260,7 +260,7 @@ class Calibration:
if l[i][j] < 100:
color_map[i][j] = 3
frame[i][j] = np.array([0, 0, 0])
elif l[i][j] > 220:
elif l[i][j] > 200:
frame[i][j] = np.array([255, 255, 255])
color_map[i][j] = 255
else:
@@ -278,7 +278,7 @@ class Calibration:
# cv2.imshow('lab', frame)
# cv2.waitKey(0) # 按0退出
data = GF(1, 1, 1, 1, 1, 1)
# map = np.zeros((4, 4, 4, 4, 4, 4, 2))
# map = np.zeros((4, 4, 4, 4, 4, 4, 14))
map = np.zeros((4096, 2))
@@ -321,6 +321,7 @@ class Calibration:
position.append([i, j])
except:
pass
cv2.imwrite('testt3.png',frame)
# cv2.imshow('lab', frame)
# cv2.waitKey(0) # 按0退出
return points, position
@@ -351,6 +352,7 @@ class Calibration:
for point in featurepoints_position:
cv2.circle(img_rd, (int(point[1]), int(point[0])), point_size, point_color, thickness)
cv2.imwrite('testt2.png', img_rd)
self.draw_note(img_rd)
self.update_fps()

View File

@@ -1,8 +1,5 @@
# 进行人脸录入 / face register
# 录入多张人脸 / support multi-faces
import datetime
import dlib
import numpy as np
import cv2
import os
@@ -18,7 +15,7 @@ action_map = {'look_ahead': '请看前方', 'blink': '请眨眼', 'open_mouth':
'smile': '请笑一笑', 'rise_head': '请抬头',
'bow_head': '请低头', 'look_left': '请看左边',
'look_right': '请看右边', 'over': '录入完成'}
people_type_dict = {'0': 'elder', '1': 'worker', '2': 'volunteer'}
people_type_dict = {'0': 'elder', '1': 'employee', '2': 'volunteer'}
# Dlib 正向人脸检测器
# detector = dlib.get_frontal_face_detector()
@@ -50,7 +47,7 @@ class Face_Register:
self.fps = 0
self.people_type = people_type_dict[str(people_type)]
self.id = id
self.id = str(id)
def speak(self):
text = action_map[action_list[self.index]]
@@ -111,9 +108,9 @@ class Face_Register:
cv2.putText(img_rd, "FPS: " + str(self.fps.__round__(2)), (20, 100), self.font, 0.8, (0, 255, 0), 1,
cv2.LINE_AA)
cv2.putText(img_rd, "Faces: " + str(self.faces_cnt), (20, 140), self.font, 0.8, (0, 255, 0), 1, cv2.LINE_AA)
cv2.putText(img_rd, "N: Create face folder", (20, 350), self.font, 0.8, (255, 255, 255), 1, cv2.LINE_AA)
cv2.putText(img_rd, "S: Save current face", (20, 400), self.font, 0.8, (255, 255, 255), 1, cv2.LINE_AA)
cv2.putText(img_rd, "Q: Quit", (20, 450), self.font, 0.8, (255, 255, 255), 1, cv2.LINE_AA)
# cv2.putText(img_rd, "N: Create face folder", (20, 350), self.font, 0.8, (255, 255, 255), 1, cv2.LINE_AA)
# cv2.putText(img_rd, "S: Save current face", (20, 400), self.font, 0.8, (255, 255, 255), 1, cv2.LINE_AA)
# cv2.putText(img_rd, "Q: Quit", (20, 450), self.font, 0.8, (255, 255, 255), 1, cv2.LINE_AA)
font = ImageFont.truetype("simsun.ttc", 30, index=1)
img_rd = Image.fromarray(cv2.cvtColor(img_rd, cv2.COLOR_BGR2RGB))
@@ -137,18 +134,68 @@ class Face_Register:
detector.setInput(blob)
faces = detector.forward()
# # 新建存储人脸的文件夹
# if self.index == 0:
# # self.existing_faces_cnt += 1
# current_face_dir = self.path_photos_from_camera + '/' + self.people_type + '/' + self.id
# # current_face_dir = self.path_photos_from_camera + "person_" + str(self.existing_faces_cnt)
# os.makedirs(current_face_dir)
# # print('\n')
# # print("新建的人脸文件夹 / Create folders: ", current_face_dir)
#
# self.ss_cnt = 0 # 将人脸计数器清零
# self.index = 0
# self.press_n_flag = 1 # 已经按下 'n'
# 检测到人脸
if faces.shape[2] != 0:
# 矩形框
for i in range(0, faces.shape[2]):
# 计算矩形框大小
confidence = faces[0, 0, i, 2]
# filter out weak detections by ensuring the `confidence` is
# greater than the minimum confidence
if confidence < 0.5:
continue
self.faces_cnt += 1
# compute the (x, y)-coordinates of the bounding box for the
# object
box = faces[0, 0, i, 3:7] * np.array([w, h, w, h])
(startX, startY, endX, endY) = box.astype("int")
# 判断人脸矩形框是否超出 480x640
if endX > 640 or endY > 480 or startX < 0 or startY < 0:
# if (endX + ww) > 640 or (endY + hh > 480) or (startX - ww < 0) or (
# startY - hh < 0):
cv2.putText(img_rd, "OUT OF RANGE", (20, 300), self.font, 0.8, (0, 0, 255), 1, cv2.LINE_AA)
color_rectangle = (0, 0, 255)
save_flag = 0
print("请调整位置 / Please adjust your position")
else:
color_rectangle = (0, 255, 0)
save_flag = 1
cv2.rectangle(img_rd, tuple([startX, startY]), tuple([endX, endY]), color_rectangle, 2)
# 生成的窗口添加说明文字
img_rd = self.draw_note(img_rd)
self.update_fps()
if not self.init:
self.speak()
self.init = True
return img_rd
def take_photo(self, img_rd):
(h, w) = img_rd.shape[:2]
blob = cv2.dnn.blobFromImage(cv2.resize(img_rd, (300, 300)), 1.0,
(300, 300), (104.0, 177.0, 123.0))
detector.setInput(blob)
faces = detector.forward()
current_face_dir = self.path_photos_from_camera + self.people_type + '/' + self.id
# 新建存储人脸的文件夹
if not os.path.exists(current_face_dir):
# self.existing_faces_cnt += 1
# current_face_dir = self.path_photos_from_camera + "person_" + str(self.existing_faces_cnt)
os.makedirs(current_face_dir)
# print('\n')
# print("新建的人脸文件夹 / Create folders: ", current_face_dir)
#
self.ss_cnt = 0 # 将人脸计数器清零
# self.index = 0
self.press_n_flag = 1 # 已经按下 'n'
# 检测到人脸
if faces.shape[2] != 0:
@@ -169,10 +216,10 @@ class Face_Register:
box = faces[0, 0, i, 3:7] * np.array([w, h, w, h])
(startX, startY, endX, endY) = box.astype("int")
height = (endY - startY)
width = (endX - startX)
# hh = int(height / 2)
# ww = int(width / 2)
# height = (endY - startY)
# width = (endX - startX)
# # hh = int(height / 14)
# # ww = int(width / 14)
# 判断人脸矩形框是否超出 480x640
if endX > 640 or endY > 480 or startX < 0 or startY < 0:
@@ -188,29 +235,26 @@ class Face_Register:
cv2.rectangle(img_rd, tuple([startX, startY]), tuple([endX, endY]), color_rectangle, 2)
# 根据人脸大小生成空的图像
# img_blank = np.zeros((int(height * 2), width * 2, 3), np.uint8)
img_blank = np.zeros((height, width, 3), np.uint8)
if save_flag:
# 保存摄像头中的人脸到本地
# 检查有没有先按'n'新建文件夹
if self.press_n_flag:
self.ss_cnt += 1
# if save_flag:
# # 保存摄像头中的人脸到本地
# # 检查有没有先按'n'新建文件夹
# if self.press_n_flag:
# self.ss_cnt += 1
#
# if self.index <= 7:
# for ii in range(height):
# for jj in range(width):
# img_blank[ii][jj] = img_rd[startY + ii][startX + jj]
# cv2.imwrite(current_face_dir + "/img_face_" + str(self.ss_cnt) + ".jpg", img_blank)
# print("写入本地 / Save into",
# str(current_face_dir) + "/img_face_" + str(self.ss_cnt) + ".jpg")
# if self.index < len(action_list) - 1:
# self.index += 1
# self.speak()
# else:
# print("请先按 'N' 来建文件夹, 按 'S' / Please press 'N' and press 'S'")
# # self.faces_cnt = len(faces)
if self.index <= 7:
img_blank = img_rd[startY:endY, startX:endX]
cv2.imwrite(current_face_dir + "/img_face_" + str(self.ss_cnt) + ".jpg", img_blank)
print("写入本地 / Save into",
str(current_face_dir) + "/img_face_" + str(self.ss_cnt) + ".jpg")
if self.index < len(action_list) - 1:
self.index += 1
else:
self.init = False
self.speak()
else:
print("请先按 'N' 来建文件夹, 按 'S' / Please press 'N' and press 'S'")
# self.faces_cnt = len(faces)
# 生成的窗口添加说明文字
img_rd = self.draw_note(img_rd)
@@ -218,13 +262,11 @@ class Face_Register:
self.update_fps()
if not self.init:
self.index = 0
self.speak()
self.init = True
return img_rd
def take_photo(self, frame):
pass
def run(self, frame):
return self.process(frame)

View File

@@ -7,6 +7,7 @@ import os
from sys import platform
import argparse
import numpy as np
from Post import post
from PIL import ImageDraw, ImageFont
from PIL import Image
@@ -158,8 +159,9 @@ class Fall_Detection:
fill=(255, 0, 0))
img_rd = cv2.cvtColor(np.array(img_rd), cv2.COLOR_RGB2BGR)
cv2.imwrite('fall_detection.jpg', frame)
# t = threading.Thread(target=post(event=3, imagePath='fall_detection.jpg'))
# t.start()
t = threading.Thread(target=post(event=3, imagePath='fall_detection.jpg'))
t.setDaemon(False)
t.start()
# status = post(event=3, imagePath='fall_detection.jpg')
# print("fall")
@@ -192,3 +194,4 @@ class Fall_Detection:
frame = cv2.resize(img_rd, (640, 480))
# cv2.imshow("OpenPose 1.6.0 - Tutorial Python API", img_rd)
return frame
# http://zhuooyu.cn:8000/api/person/old/10

View File

@@ -1,37 +1,22 @@
# 摄像头实时人脸识别
import threading
from datetime import datetime
import dlib
import numpy as np
import cv2
import pandas as pd
import os
import time
import requests
import facenet
from model import create_model
from PIL import Image, ImageDraw, ImageFont
from Post import post
from Post import post, post_person
import smile_detection
start_time = 0
# 1. Dlib 正向人脸检测器
# detector = dlib.get_frontal_face_detector()
# OpenCV DNN face detector
# detector = cv2.dnn.readNetFromCaffe("data/data_opencv/deploy.prototxt.txt",
# "data/data_opencv/res10_300x300_ssd_iter_140000.caffemodel")
# 2. Dlib 人脸 landmark 特征点检测器
# predictor = dlib.shape_predictor('data/data_dlib/shape_predictor_68_face_landmarks.dat')
# 3. Dlib Resnet 人脸识别模型,提取 128D 的特征矢量
# face_reco_model = dlib.face_recognition_model_v1("data/data_dlib/dlib_face_recognition_resnet_model_v1.dat")
# nn4_small2 = create_model()
# nn4_small2.load_weights('weights/nn4.small2.v1.h5')
api_transfer = {'elder': 'old', 'employee': 'employee', 'volunteer': 'volunteer'}
class Face_Recognizer:
@@ -40,6 +25,8 @@ class Face_Recognizer:
self.detector = detector
self.nn4_small2 = nn4_small2
self.pre = datetime.now()
# 用来存放所有录入人脸特征的数组
self.features_known_list = []
@@ -47,6 +34,7 @@ class Face_Recognizer:
self.loaded = False
self.name_known_cnt = 0
self.name_known_list = []
self.type_known_list = []
self.metadata = []
self.embedded = []
@@ -71,24 +59,34 @@ class Face_Recognizer:
else:
if os.path.exists("data/data_faces_from_camera/"):
self.metadata = facenet.load_metadata("data/data_faces_from_camera/")
self.name_known_cnt = self.metadata.shape[0]
self.embedded = np.zeros((self.metadata.shape[0], 128))
self.name_known_cnt = 0
for i in range(0, len(self.metadata)):
for j in range(0, len(self.metadata[i])):
self.name_known_cnt += 1
self.embedded = np.zeros((self.name_known_cnt * 8, 128))
for i, m in enumerate(self.metadata):
for j, n in enumerate(m):
for k, p in enumerate(n):
img = facenet.load_image(p.image_path())
img = facenet.load_image(p.image_path().replace("\\", "/"))
# img = align_image(img)
img = cv2.resize(img, (96, 96))
# scale RGB values to interval [0,1]
img = (img / 255.).astype(np.float32)
# obtain embedding vector for image
self.embedded[i] = self.nn4_small2.predict(np.expand_dims(img, axis=0))[0]
# self.embedded[i] = self.embedded[i] / len(m)
path = p.image_path().replace("\\", "/")
# self.embedded[i] = self.embedded[i] / len(m)
path = p.image_path().replace("\\", "/")
self.name_known_list.append(path.split('/')[-2])
self.type_camera_list.append(path.split('/')[-3])
self.loaded = True
self.type_known_list.append(path.split('/')[-3])
for i in range(len(self.name_known_list)):
if self.type_known_list[i] == 'elder':
type = 'old'
elif self.type_known_list[i] == 'volunteer':
type = 'employee'
self.name_known_list[i] = requests.get("http://zhuooyu.cn:8000/api/person/" + str(type) + "/" + str(
self.name_known_list[i]) + "/").text
self.loaded = True
return 1
else:
print('##### Warning #####', '\n')
@@ -110,7 +108,7 @@ class Face_Recognizer:
font = cv2.FONT_ITALIC
# cv2.putText(img_rd, "Face Recognizer", (20, 40), font, 1, (255, 255, 255), 1, cv2.LINE_AA)
# cv2.putText(img_rd, "FPS: " + str(self.fps.__round__(2)), (20, 100), font, 0.8, (0, 255, 0), 1, cv2.LINE_AA)
# cv2.putText(img_rd, "FPS: " + str(self.fps.__round__(14)), (20, 100), font, 0.8, (0, 255, 0), 1, cv2.LINE_AA)
cv2.putText(img_rd, "Faces: " + str(self.faces_cnt), (20, 40), font, 0.8, (0, 255, 0), 1, cv2.LINE_AA)
# cv2.putText(img_rd, "Q: Quit", (20, 450), font, 0.8, (255, 255, 255), 1, cv2.LINE_AA)
@@ -130,16 +128,23 @@ class Face_Recognizer:
# 修改显示人名
def modify_name_camera_list(self):
# TODO 数据库 ID
# Default known name: 1, 2, person_3
# Default known name: 1, 14, person_3
self.name_known_list[0] = '1'.encode('utf-8').decode()
self.name_known_list[1] = 'Tony Blair'.encode('utf-8').decode()
# self.name_known_list[2] = '唐保生'.encode('utf-8').decode()
# self.name_known_list[14] = '唐保生'.encode('utf-8').decode()
# self.name_known_list[3] = '1'.encode('utf-8').decode()
# self.name_known_list[4] ='xx'.encode('utf-8').decode()
# 进行人脸识别和微笑检测
def process(self, img_rd):
img_with_name = img_rd
data_type_three = {
'old': 0,
'employee': 0,
'volunteer': 0,
'stranger': 0
}
# 读取所有人脸
if self.get_face_database():
cv2.putText(img_rd, "Faces: " + str(self.faces_cnt), (20, 40), cv2.FONT_ITALIC, 0.8, (0, 255, 0), 1,
@@ -150,6 +155,7 @@ class Face_Recognizer:
self.faces_cnt = 0
self.pos_camera_list = []
self.name_camera_list = []
self.type_camera_list = []
(h, w) = img_rd.shape[:2]
blob = cv2.dnn.blobFromImage(cv2.resize(img_rd, (300, 300)), 1.0,
@@ -174,6 +180,7 @@ class Face_Recognizer:
# 确定人名的位置坐标
# 先默认所有人不认识,是 unknown
self.name_camera_list.append("unknown")
self.type_camera_list.append('unknown')
# 每个捕获人脸的名字坐标
box = faces[0, 0, k, 3:7] * np.array([w, h, w, h])
@@ -185,6 +192,7 @@ class Face_Recognizer:
# width = (endX - startX)
img_blank = img_rd[startY:endY, startX:endX]
img_blank = img_blank[..., ::-1]
try:
# for ii in range(height):
# for jj in range(width):
@@ -200,59 +208,57 @@ class Face_Recognizer:
e_distance_list.append(facenet.distance(self.embedded[i], img))
similar_person_num = e_distance_list.index(min(e_distance_list))
# print(min(e_distance_list))
if min(e_distance_list) < 0.58:
self.name_camera_list[k] = self.name_known_list[similar_person_num % 8]
if self.type_camera_list[similar_person_num % 8] == 'elder':
mode = smile_detection.smile_detect(img_blank)
if mode == 'happy':
cv2.imwrite('smile_detection.jpg', img_rd)
cv2.rectangle(img_rd, tuple([startX, startY - 70]),
tuple([endX, startY - 35]),
(0, 215, 255), cv2.FILLED)
cv2.putText(img_rd, 'happy', (startX + 5, startY - 45), cv2.FONT_ITALIC, 1,
(255, 255, 255), 1, cv2.LINE_AA)
# t = threading.Thread(target=post(elder_id=self.name_camera_list[k], event=0,
# imagePath='smile_detection.jpg'))
# t.start()
# print("May be person " + str(self.name_known_list[similar_person_num]))
elif min(e_distance_list) > 0.75:
self.name_camera_list[k] = '陌生人'
cv2.imwrite('stranger_detection.jpg', img_rd)
# t = threading.Thread(target=post(event=2, imagePath='stranger_detection.jpg'))
# t.start()
else:
pass
if self.name_camera_list[k] == '陌生人':
cv2.rectangle(img_rd, tuple([startX, startY]), tuple([endX, endY]),
(0, 0, 255), 2)
cv2.rectangle(img_rd, tuple([startX, startY - 35]), tuple([endX, startY]),
(0, 0, 255), cv2.FILLED)
elif self.name_camera_list[k] != 'unknown':
self.type_camera_list[k] = self.type_known_list[similar_person_num % 8]
data_type_three[api_transfer[self.type_camera_list[k]]] += 1
cv2.rectangle(img_rd, tuple([startX, startY]), tuple([endX, endY]),
(0, 255, 0), 2)
cv2.rectangle(img_rd, tuple([startX, startY - 35]), tuple([endX, startY]),
(0, 255, 0), cv2.FILLED)
img_with_name = self.draw_name(img_rd)
if self.type_camera_list[k] == 'elder':
mode = smile_detection.smile_detect(img_blank)
if mode == 'happy':
cv2.rectangle(img_with_name, tuple([startX, startY - 70]),
tuple([endX, startY - 35]),
(0, 215, 255), cv2.FILLED)
cv2.putText(img_with_name, 'happy', (startX + 5, startY - 45), cv2.FONT_ITALIC, 1,
(255, 255, 255), 1, cv2.LINE_AA)
cv2.imwrite('smile_detection.jpg', img_with_name)
if (datetime.now() - self.pre).total_seconds() > 5:
t = threading.Thread(target=post(elder_id=self.name_camera_list[k], event=0,
imagePath='smile_detection.jpg'))
t.setDaemon(False)
t.start()
self.pre = datetime.now()
# print("May be person " + str(self.name_known_list[similar_person_num]))
elif min(e_distance_list) > 0.75:
data_type_three['stranger'] += 1
self.name_camera_list[k] = '陌生人'
cv2.rectangle(img_rd, tuple([startX, startY]), tuple([endX, endY]),
(0, 0, 255), 2)
cv2.rectangle(img_rd, tuple([startX, startY - 35]), tuple([endX, startY]),
(0, 0, 255), cv2.FILLED)
img_with_name = self.draw_name(img_rd)
cv2.imwrite('stranger_detection.jpg', img_with_name)
if (datetime.now() - self.pre).total_seconds() > 5:
t = threading.Thread(target=post(event=2, imagePath='stranger_detection.jpg'))
t.setDaemon(False)
t.start()
self.pre = datetime.now()
else:
pass
except:
continue
img_with_name = self.draw_name(img_rd)
else:
img_with_name = img_rd
# 更新 FPS / Update stream FPS
# self.update_fps()
if (datetime.now() - self.pre).total_seconds() > 5:
post_person(data_type_three)
self.pre = datetime.now()
return img_with_name
# OpenCV 调用摄像头并进行 process
def run(self, frame):
# cap = cv2.VideoCapture(0)
# cap.set(3, 480)
img_with_name = self.process(frame)
return img_with_name
# cap.release()
# cv2.destroyAllWindows()

View File

@@ -1,7 +1,11 @@
import threading
from datetime import datetime
from oldcare.track.centroidtracker import CentroidTracker
from oldcare.track.trackableobject import TrackableObject
from imutils.video import FPS
import numpy as np
from Post import post
import imutils
import argparse
import time
@@ -14,11 +18,7 @@ import cv2
# # Well be using a MobileNet Single Shot Detector (SSD),
# # “Single Shot Detectors for object detection”.
# model_file_path = 'data/data_opencv/MobileNetSSD_deploy.caffemodel'
skip_frames = 30 # of skip frames between detections
# 超参数
# minimum probability to filter weak detections
minimum_confidence = 0.80
# 物体识别模型能识别的物体21种
CLASSES = ["background", "aeroplane", "bicycle", "bird", "boat",
@@ -27,6 +27,7 @@ CLASSES = ["background", "aeroplane", "bicycle", "bird", "boat",
"person", "pottedplant", "sheep", "sofa", "train",
"tvmonitor"]
# 加载物体识别模型
# net = cv2.dnn.readNetFromCaffe(prototxt_file_path, model_file_path)
@@ -38,8 +39,7 @@ class Intrusion_Detection():
# the first frame from the video)
self.W = None
self.H = None
self.pre = datetime.now()
# instantiate our centroid tracker, then initialize a list to store
# each of our dlib correlation trackers, followed by a dictionary to
# map each unique object ID to a TrackableObject
@@ -56,9 +56,8 @@ class Intrusion_Detection():
# start the frames per second throughput estimator
self.fps = FPS().start()
# loop over frames from the video stream
def process(self,frame):
def process(self, frame):
# grab the next frame and handle if we are reading from either
# VideoCapture or VideoStream
@@ -70,23 +69,22 @@ class Intrusion_Detection():
# initialize the current status along with our list of bounding
# box rectangles returned by either (1) our object detector or
# (2) the correlation trackers
# (14) the correlation trackers
status = "Waiting"
rects = []
# check to see if we should run a more computationally expensive
# object detection method to aid our tracker
if self.totalFrames % skip_frames == 0:
if self.totalFrames % 20 == 0:
# set the status and initialize our new set of object trackers
status = "Detecting"
trackers = []
self.trackers = []
# convert the frame to a blob and pass the blob through the
# network and obtain the detections
blob = cv2.dnn.blobFromImage(frame, 0.007843, (self.W, self.H), 127.5)
self.net.setInput(blob)
detections = self.net.forward()
# loop over the detections
for i in np.arange(0, detections.shape[2]):
# extract the confidence (i.e., probability) associated
@@ -95,7 +93,8 @@ class Intrusion_Detection():
# filter out weak detections by requiring a minimum
# confidence
if confidence > minimum_confidence:
if confidence > 0.5:
# extract the index of the class label from the
# detections list
idx = int(detections[0, 0, i, 1])
@@ -118,8 +117,7 @@ class Intrusion_Detection():
# add the tracker to our list of trackers so we can
# utilize it during skip frames
trackers.append(tracker)
self.trackers.append(tracker)
# otherwise, we should utilize our object *trackers* rather than
# object *detectors* to obtain a higher frame processing throughput
else:
@@ -149,10 +147,10 @@ class Intrusion_Detection():
# draw a horizontal line in the center of the frame -- once an
# object crosses this line we will determine whether they were
# moving 'up' or 'down'
# cv2.line(frame, (0, H // 2), (W, H // 2), (0, 255, 255), 2)
# cv2.line(frame, (0, H // 14), (W, H // 14), (0, 255, 255), 14)
# use the centroid tracker to associate the (1) old object
# centroids with (2) the newly computed object centroids
# centroids with (14) the newly computed object centroids
objects = self.ct.update(rects)
# loop over the tracked objects
@@ -199,6 +197,11 @@ class Intrusion_Detection():
print('[EVENT] %s, 院子, 有人闯入禁止区域!!!'
% (current_time))
cv2.imwrite('intrusion.jpg', frame)
if (datetime.now() - self.pre).total_seconds() > 5:
t = threading.Thread(target=post(event=4, imagePath='intrusion.jpg'))
t.setDaemon(False)
t.start()
self.pre = datetime.now()
# todo insert into database
# command = '%s inserting.py --event_desc %s--event_type4 - -event_location % s' % \
@@ -236,3 +239,4 @@ class Intrusion_Detection():
# increment the total number of frames processed thus far and
# then update the FPS counter
self.totalFrames += 1
return frame

View File

@@ -1,5 +1,6 @@
# 摄像头实时人脸识别
import threading
from datetime import datetime
import dlib
import numpy as np
@@ -7,34 +8,15 @@ import cv2
import pandas as pd
import os
import time
import requests
import facenet
from PIL import Image, ImageDraw, ImageFont
from Calibration import Calibration
from Post import post
from model import create_model
import smile_detection
from Post import post, post_person
start_time = 0
# 1. Dlib 正向人脸检测器
# detector = dlib.get_frontal_face_detector()
# OpenCV DNN face detector
# detector = cv2.dnn.readNetFromCaffe("data/data_opencv/deploy.prototxt.txt",
# "data/data_opencv/res10_300x300_ssd_iter_140000.caffemodel")
# 2. Dlib 人脸 landmark 特征点检测器
# predictor = dlib.shape_predictor('data/data_dlib/shape_predictor_68_face_landmarks.dat')
# 3. Dlib Resnet 人脸识别模型,提取 128D 的特征矢量
# face_reco_model = dlib.face_recognition_model_v1("data/data_dlib/dlib_face_recognition_resnet_model_v1.dat")
# nn4_small2 = create_model()
# nn4_small2.load_weights('weights/nn4.small2.v1.h5')
api_transfer = {'elder': 'old', 'employee': 'employee', 'volunteer': 'volunteer'}
class Interaction_Detection:
@@ -42,6 +24,7 @@ class Interaction_Detection:
# 模型
self.detector = detector
self.nn4_small2 = nn4_small2
self.pre = datetime.now()
# 用来存放所有录入人脸特征的数组
self.features_known_list = []
@@ -75,24 +58,34 @@ class Interaction_Detection:
else:
if os.path.exists("data/data_faces_from_camera/"):
self.metadata = facenet.load_metadata("data/data_faces_from_camera/")
self.name_known_cnt = self.metadata.shape[0]
self.embedded = np.zeros((self.metadata.shape[0], 128))
self.name_known_cnt = 0
for i in range(0, len(self.metadata)):
for j in range(0, len(self.metadata[i])):
self.name_known_cnt += 1
self.embedded = np.zeros((self.name_known_cnt * 8, 128))
for i, m in enumerate(self.metadata):
for j, n in enumerate(m):
for k, p in enumerate(n):
img = facenet.load_image(p.image_path())
img = facenet.load_image(p.image_path().replace("\\", "/"))
# img = align_image(img)
img = cv2.resize(img, (96, 96))
# scale RGB values to interval [0,1]
img = (img / 255.).astype(np.float32)
# obtain embedding vector for image
self.embedded[i] = self.nn4_small2.predict(np.expand_dims(img, axis=0))[0]
# self.embedded[i] = self.embedded[i] / len(m)
path = p.image_path().replace("\\", "/")
# self.embedded[i] = self.embedded[i] / len(m)
path = p.image_path().replace("\\", "/")
self.name_known_list.append(path.split('/')[-2])
self.type_known_list.append(path.split('/')[-3])
self.loaded = True
for i in range(len(self.name_known_list)):
if self.type_known_list[i] == 'elder':
type = 'old'
elif self.type_known_list[i] == 'volunteer':
type = 'employee'
self.name_known_list[i] = requests.get("http://zhuooyu.cn:8000/api/person/" + str(type) + "/" + str(
self.name_known_list[i]) + "/").text
self.loaded = True
return 1
else:
print('##### Warning #####', '\n')
@@ -114,7 +107,7 @@ class Interaction_Detection:
font = cv2.FONT_ITALIC
# cv2.putText(img_rd, "Face Recognizer", (20, 40), font, 1, (255, 255, 255), 1, cv2.LINE_AA)
# cv2.putText(img_rd, "FPS: " + str(self.fps.__round__(2)), (20, 100), font, 0.8, (0, 255, 0), 1, cv2.LINE_AA)
# cv2.putText(img_rd, "FPS: " + str(self.fps.__round__(14)), (20, 100), font, 0.8, (0, 255, 0), 1, cv2.LINE_AA)
cv2.putText(img_rd, "Faces: " + str(self.faces_cnt), (20, 40), font, 0.8, (0, 255, 0), 1, cv2.LINE_AA)
# cv2.putText(img_rd, "Q: Quit", (20, 450), font, 0.8, (255, 255, 255), 1, cv2.LINE_AA)
@@ -134,16 +127,22 @@ class Interaction_Detection:
# 修改显示人名
def modify_name_camera_list(self):
# TODO 数据库 ID
# Default known name: 1, 2, person_3
# Default known name: 1, 14, person_3
self.name_known_list[0] = '1'.encode('utf-8').decode()
self.name_known_list[1] = 'Tony Blair'.encode('utf-8').decode()
# self.name_known_list[2] = '唐保生'.encode('utf-8').decode()
# self.name_known_list[14] = '唐保生'.encode('utf-8').decode()
# self.name_known_list[3] = '1'.encode('utf-8').decode()
# self.name_known_list[4] ='xx'.encode('utf-8').decode()
# 进行人脸识别和微笑检测
# 进行人脸识别
def process(self, img_rd, scale):
img_with_name = img_rd
data_type_three = {
'old': 0,
'employee': 0,
'volunteer': 0,
'stranger': 0
}
# 读取所有人脸
if self.get_face_database():
self.draw_note(img_rd)
@@ -176,6 +175,7 @@ class Interaction_Detection:
# 确定人名的位置坐标
# 先默认所有人不认识,是 unknown
self.name_camera_list.append("unknown")
self.type_camera_list.append("unknown")
# 每个捕获人脸的名字坐标
box = faces[0, 0, k, 3:7] * np.array([w, h, w, h])
@@ -184,6 +184,7 @@ class Interaction_Detection:
[int(startX + 5), int(startY - 30)]))
img_blank = img_rd[startY:endY, startX:endX]
img_blank = img_blank[..., ::-1]
try:
# for ii in range(height):
# for jj in range(width):
@@ -204,6 +205,7 @@ class Interaction_Detection:
if min(e_distance_list) < 0.58:
self.name_camera_list[k] = self.name_known_list[similar_person_num % 8]
self.type_camera_list[k] = self.type_known_list[similar_person_num % 8]
data_type_three[api_transfer[self.type_camera_list[k]]] += 1
# 绘制矩形框
if self.name_camera_list[k] != 'unknown':
@@ -216,14 +218,19 @@ class Interaction_Detection:
continue
img_with_name = self.draw_name(img_rd)
if 'volunteer' in self.type_camera_list:
if 'volunteer' in self.type_camera_list and len(self.type_camera_list) > 1:
index = self.type_camera_list.index('volunteer')
pos_vol = self.pos_camera_list[index]
pos_vol = np.array(self.pos_camera_list[index])
for i in range(0, len(self.type_camera_list)):
if i != index:
d = scale * np.sqrt(facenet.distance(pos_vol, self.type_camera_list[i]))
if self.type_camera_list[i] == "elder":
d = scale * np.sqrt(facenet.distance(pos_vol, np.array(self.pos_camera_list[i])))
if d < 50:
pass
if (datetime.now() - self.pre).total_seconds() > 5:
cv2.imwrite("interaction.jpg", img_with_name)
t = threading.Thread(target=post(event=1, imagePath='interaction.jpg'))
t.setDaemon(False)
t.start()
self.pre = datetime.now()
else:
img_with_name = img_rd
@@ -232,7 +239,9 @@ class Interaction_Detection:
# 更新 FPS / Update stream FPS
self.update_fps()
# 距离检测
if (datetime.now() - self.pre).total_seconds() > 5:
post_person(data_type_three)
self.pre = datetime.now()
return img_with_name
# OpenCV 调用摄像头并进行 process

View File

@@ -1,6 +1,8 @@
import os
import queue
import threading
import time
from numba import cuda
import cv2
import sys
import platform
@@ -16,27 +18,6 @@ from Camera_In_Hall import Fall_Detection
from Camera_On_Desk import Interaction_Detection
from Camera_In_Yard import Intrusion_Detection
# Import Openpose (Windows/Ubuntu/OSX)
# dir_path = os.path.dirname(os.path.realpath(__file__))
dir_path = 'D:\\BJTU\\Python\\openpose-master\\build-2017'
try:
# Windows Import
if platform == "win32":
# Change these variables to point to the correct folder (Release/x64 etc.)
sys.path.append(dir_path + '\\python\\openpose\\Release')
os.environ['PATH'] = os.environ['PATH'] + ';' + dir_path + '\\x64\\Release;' + dir_path + '\\bin;'
import pyopenpose as op
else:
# Change these variables to point to the correct folder (Release/x64 etc.)
sys.path.append(dir_path + '\\python\\openpose\\Release')
# If you run `make install` (default path is `/usr/local/python` for Ubuntu), you can also access the OpenPose/python module from there. This will install OpenPose and the python library at your desired installation path. Ensure that this is in your python path in order to use it.
# sys.path.append('/usr/local/python')
from openpose import pyopenpose as op
except ImportError as e:
print(
'Error: OpenPose library could not be found. Did you enable `BUILD_PYTHON` in CMake and have this Python script in the right folder?')
raise e
class Live(object):
def __init__(self):
@@ -71,7 +52,8 @@ class Live(object):
# 摔倒检测
self.transfer_flag = False
self.Fall_Detection_on = Fall_Detection()
self.trigger = False
self.Fall_Detection_on = None
# 入侵检测
self.net = cv2.dnn.readNetFromCaffe('data/data_opencv/MobileNetSSD_deploy.prototxt',
@@ -83,7 +65,9 @@ class Live(object):
self.out = cv2.VideoWriter('output.avi', self.fourcc, 20, (640, 480))
# Get video information
self.fps = 20 # 设置帧速率
self.fps = 15 # 设置帧速率
width = 640 # 宽
height = 480 # 高
# ffmpeg command
self.command = ['ffmpeg',
@@ -91,7 +75,7 @@ class Live(object):
'-f', 'rawvideo',
'-vcodec', 'rawvideo',
'-pix_fmt', 'bgr24',
'-s', "{}x{}".format(480, 640),
'-s', "{}x{}".format(width, height),
'-r', str(self.fps),
'-i', '-',
'-c:v', 'libx264',
@@ -99,19 +83,24 @@ class Live(object):
'-preset', 'slow',
'-f', 'flv',
self.rtmpUrl]
self.recieve = None
self.pretodo = ''
self.recieve = {"todo": "reboot"}
# self.recieve = {"todo": "change", 'data': {'fuc': '1'}}
self.transfer_flag = False
self.take = False
# if trigger:
# cuda.select_device(0) # 选择GPU设备
# cuda.close() # 释放GPU资源
def read_frame(self):
# 根据不同的操作系统,设定读取哪个摄像头
if platform.system() == 'Linux': # 如果是Linux系统
if platform == 'Linux': # 如果是Linux系统
cap = cv2.VideoCapture(10) # 绑定编号为10的摄像头
cap.set(cv2.CAP_PROP_FOURCC, cv2.VideoWriter_fourcc('M', 'J', 'P', 'G'))
cap.set(3, 640) # 设置摄像头画面的宽
cap.set(4, 480) # 设置摄像头画面的高
elif platform.system() == 'Darwin': # 如果是苹果的OS X系统
elif platform == 'Darwin': # 如果是苹果的OS X系统
cap = cv2.VideoCapture(0) # 绑定编号为0的摄像头
cap.set(cv2.CAP_PROP_FOURCC, cv2.VideoWriter_fourcc('M', 'J', 'P', 'G'))
cap.set(3, 640)
@@ -119,8 +108,8 @@ class Live(object):
else: # windows系统
cap = cv2.VideoCapture(0) # 绑定编号为0的摄像头
cap.set(cv2.CAP_PROP_FOURCC, cv2.VideoWriter_fourcc('M', 'J', 'P', 'G'))
cap.set(3, 640)
cap.set(4, 480)
# cap.set(3, 640)
# cap.set(4, 480)
# read webcamera
counter = 0
@@ -134,13 +123,60 @@ class Live(object):
self.cap = cv2.VideoCapture(0)
ret, frame = self.cap.read()
self.transfer_flag = False
recieve = self.ws.recv()
self.recieve = eval(recieve)
# put frame into queue
now = datetime.now()
if (now - pre).total_seconds() > 0.15:
self.frame_queue.put(frame)
pre = now
self.frame_queue.put(frame)
# if self.recieve["todo"] == "change":
# if self.recieve["data"]["fuc"] == "1":
# # Get video information
# self.fps = 5 # 设置帧速率
# width = 640 # 宽
# height = 480 # 高
#
# # ffmpeg command
# self.command = ['ffmpeg',
# '-y',
# '-f', 'rawvideo',
# '-vcodec', 'rawvideo',
# '-pix_fmt', 'bgr24',
# '-s', "{}x{}".format(width, height),
# '-r', str(self.fps),
# '-i', '-',
# '-c:v', 'libx264',
# '-pix_fmt', 'yuv420p',
# '-preset', 'slow',
# '-f', 'flv',
# self.rtmpUrl]
#
# if (now - pre).total_seconds() > 0.14:
# self.frame_queue.put(frame)
# pre = now
# else:
# # Get video information
# self.fps = 20 # 设置帧速率
# width = 640 # 宽
# height = 480 # 高
#
# # ffmpeg command
# self.command = ['ffmpeg',
# '-y',
# '-f', 'rawvideo',
# '-vcodec', 'rawvideo',
# '-pix_fmt', 'bgr24',
# '-s', "{}x{}".format(width, height),
# '-r', str(self.fps),
# '-i', '-',
# '-c:v', 'libx264',
# '-pix_fmt', 'yuv420p',
# '-preset', 'slow',
# '-f', 'flv',
# self.rtmpUrl]
#
# self.frame_queue.put(frame)
# pre = now
# else:
# self.frame_queue.put(frame)
# pre = now
def push_frame(self):
# 防止多线程时 command 未被设置
@@ -152,47 +188,71 @@ class Live(object):
while True:
if not self.frame_queue.empty():
if self.frame_queue.qsize() > 100:
self.frame_queue.queue.clear()
continue
frame = self.frame_queue.get()
if self.recieve.todo == 'reboot':
if self.recieve["todo"] == 'reboot':
pass
elif self.recieve.todo == 'entering':
data = self.recieve.data
people_type = data.type # 0代表老人,1代表员工,2代表义工
id = data.id
elif self.recieve["todo"] == 'entering':
people_type = self.recieve["data"]["type"] # 0代表老人,1代表员工,2代表义工
id = self.recieve["data"]["id"]
self.pretodo = 'entering'
self.Face_Register_on = Face_Register(people_type, id)
elif self.recieve.todo == 'takePhoto':
if self.pretodo == 'entering':
frame = self.Face_Register_on.take_photo(frame)
elif self.pretodo == '2':
self.scale = self.Calibration_on.run(frame)
elif self.recieve.todo == 'change':
fuc = self.recieve.data.fuc # 更改的功能 0:无 1微笑检测 2交互检测 3摔倒检测 4禁区入侵
if fuc == 0:
self.out.write(frame)
elif fuc == 1:
frame = self.Face_Recognizer_on.run(frame)
elif fuc == 2:
self.pretodo = '2'
if self.scale == -1:
frame = self.Calibration_on.run(frame)
if not self.take:
self.Face_Register_on = Face_Register(people_type, id)
self.take = True
frame = self.Face_Register_on.process(frame)
elif self.recieve["todo"] == 'takePhoto':
if self.recieve["data"]["fuc"] == 'shutter':
if self.take:
frame = self.Face_Register_on.take_photo(frame)
self.take = False
else:
self.Interaction_on.process(frame)
elif fuc == 3:
frame = self.Face_Register_on.process(frame)
elif self.recieve["data"]["fuc"] == 'standard':
self.scale = self.Calibration_on.run(frame)
self.recieve = {"todo": "change", 'data': {'fuc': '14'}}
elif self.recieve["todo"] == 'change':
fuc = self.recieve["data"]["fuc"] # 更改的功能 0:无 1微笑检测 2交互检测 3摔倒检测 4禁区入侵
if fuc == '0':
self.out.write(frame)
elif fuc == '1':
frame = self.Face_Recognizer_on.process(frame)
elif fuc == '14':
if self.scale == -1:
pass
else:
frame = self.Interaction_on.process(frame, self.scale)
elif fuc == '3' and self.trigger:
if not self.transfer_flag:
self.cap = cv2.VideoCapture("test4.mp4")
ret, frame = self.cap.read()
self.transfer_flag = True
self.Fall_Detection_on.re_init()
self.Fall_Detection_on = Fall_Detection()
# self.Fall_Detection_on.re_init()
frame = self.Fall_Detection_on.run(frame)
elif fuc == 4:
elif fuc == '4':
frame = self.Intrusion_Detection_on.process(frame)
# frame = Fall_Detection_on.run(frame=frame)
p.stdin.write(frame.tobytes())
def get_result(self):
self.ws.send("Hello, World")
while True:
recieve = self.ws.recv()
print(eval(recieve))
self.recieve = eval(recieve)
if self.recieve['todo'] == 'takePhoto' and self.recieve['data']['fuc'] == 'shutter':
self.take = True
def release_gpu(self):
cuda.select_device(0) # 选择GPU设备
cuda.close() # 释放GPU资源
def run(self):
threads = [
threading.Thread(target=Live.read_frame, args=(self,)),
threading.Thread(target=Live.push_frame, args=(self,))
threading.Thread(target=Live.push_frame, args=(self,)),
threading.Thread(target=Live.get_result, args=(self,))
]
[thread.setDaemon(False) for thread in threads]
[thread.start() for thread in threads]
@@ -200,4 +260,4 @@ class Live(object):
if __name__ == "__main__":
live = Live()
live.run()
live.run()

View File

@@ -3,6 +3,8 @@ import datetime
url = "http://192.144.229.49:8000/api/event/list"
api = "http://zhuooyu.cn:8000/api/websocket/total"
data_type_one = {
"oldperson_id": 1,
"event_type": 3, # 0代表情感检测1代表义工交互检测2代表陌生人检测3代表摔倒检测4代表禁止区域入侵检测
@@ -19,6 +21,13 @@ data_type_two = {
"event_desc": "" # 必填,事件描述
}
data_type_three = {
'old': 0,
'employee': 0,
'volunteer': 0,
'stranger': 0
}
def post(elder_id='None', event=-1, imagePath='None', volunteer='None'):
flag = 0
@@ -61,4 +70,7 @@ def post(elder_id='None', event=-1, imagePath='None', volunteer='None'):
status = requests.post(url, files=imageFile, data=data_type_one)
else:
status = requests.post(url, files=imageFile, data=data_type_two)
print(status)
def post_person(data_type_three):
status = requests.post(api, data=data_type_three)

View File

@@ -1,16 +1,15 @@
from imutils.video import VideoStream
import imutils
import time
import datetime
import threading
import cv2
import numpy as np
import statistics
import queue
import math
from Post import post
from PIL import ImageDraw, ImageFont
from PIL import Image
def fall_detect(cnts, defined_min_area, frame, prevX, prevY, xList, yList, centerV, alert):
def fall_detect(cnts, defined_min_area, frame, prevX, prevY, xList, yList, centerV, alert, pre):
for c in cnts:
# exclusion
if cv2.contourArea(c) < defined_min_area:
@@ -85,16 +84,16 @@ def fall_detect(cnts, defined_min_area, frame, prevX, prevY, xList, yList, cente
# print("P_FALL2: ", P_FALL)
# status display
# cv2.putText(frame, "Status : ", (10, 20), cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 0, 255), 2)
# cv2.putText(frame, "Status : ", (10, 20), cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 0, 255), 14)
# cv2.putText(frame, "Fall Confidence: {0:.2f} ".format(P_FALL), (10, 50), cv2.FONT_HERSHEY_SIMPLEX, 0.5,
# (0, 128, 255), 2)
# cv2.putText(frame, "Angle: {0:.2f}".format(angle), (10, 220),cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 0), 2)
# cv2.putText(frame, "AR: {0:.2f}".format(AR), (10, 237),cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 0), 2)
# cv2.putText(frame, "Center Speed: {0:.2f}".format(centerV), (10, 256),cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 0), 2)
# (0, 128, 255), 14)
# cv2.putText(frame, "Angle: {0:.2f}".format(angle), (10, 220),cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 0), 14)
# cv2.putText(frame, "AR: {0:.2f}".format(AR), (10, 237),cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 0), 14)
# cv2.putText(frame, "Center Speed: {0:.2f}".format(centerV), (10, 256),cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 0), 14)
# fall
if P_FALL > 0.88:
if alert >3:
if alert > 3:
# print("fall")
font = ImageFont.truetype("simsun.ttc", 30, index=1)
img_rd = Image.fromarray(cv2.cvtColor(frame, cv2.COLOR_BGR2RGB))
@@ -102,10 +101,16 @@ def fall_detect(cnts, defined_min_area, frame, prevX, prevY, xList, yList, cente
draw.text((10, 10), text="Fall Detected", font=font,
fill=(255, 0, 0))
frame = cv2.cvtColor(np.array(img_rd), cv2.COLOR_RGB2BGR)
cv2.imwrite('fall_detection.jpg', frame)
if (datetime.datetime.now() - pre).total_seconds() > 5:
t = threading.Thread(target=post(event=3, imagePath='fall_detection.jpg'))
t.setDaemon(False)
t.start()
pre = datetime.datetime.now()
# cv2.imwrite("report.jpg", frame)
# send_alert.SendMail("report.jpg")
alert = alert + 1
else:
alert = alert + 1
return frame,alert
return frame, alert, pre

View File

@@ -51,7 +51,7 @@ width = 400
# 超参数
# minimum probability to filter weak detections
minimum_confidence = 0.80
minimum_confidence = 0.50
# 物体识别模型能识别的物体21种
CLASSES = ["background", "aeroplane", "bicycle", "bird", "boat",
@@ -120,7 +120,7 @@ while True:
# initialize the current status along with our list of bounding
# box rectangles returned by either (1) our object detector or
# (2) the correlation trackers
# (14) the correlation trackers
status = "Waiting"
rects = []
@@ -199,10 +199,10 @@ while True:
# draw a horizontal line in the center of the frame -- once an
# object crosses this line we will determine whether they were
# moving 'up' or 'down'
# cv2.line(frame, (0, H // 2), (W, H // 2), (0, 255, 255), 2)
# cv2.line(frame, (0, H // 14), (W, H // 14), (0, 255, 255), 14)
# use the centroid tracker to associate the (1) old object
# centroids with (2) the newly computed object centroids
# centroids with (14) the newly computed object centroids
objects = ct.update(rects)
# loop over the tracked objects

View File

@@ -57,7 +57,7 @@ def load_metadata(path):
def load_image(path):
img = cv2.imread(path, 1)
img = cv2.imread(path)
# OpenCV loads images with color channels
# in BGR order. So we need to reverse them
return img[..., ::-1]

View File

@@ -55,8 +55,8 @@ def conv2d_bn(
tensor = ZeroPadding2D(padding=padding)(tensor)
if cv2_out == None:
return tensor
tensor = Conv2D(cv2_out, cv2_filter, strides=cv2_strides, name=layer+'_conv'+'2')(tensor)
tensor = BatchNormalization(axis=3, epsilon=0.00001, name=layer+'_bn'+'2')(tensor)
tensor = Conv2D(cv2_out, cv2_filter, strides=cv2_strides, name=layer+'_conv'+'14')(tensor)
tensor = BatchNormalization(axis=3, epsilon=0.00001, name=layer+'_bn'+'14')(tensor)
tensor = Activation('relu')(tensor)
return tensor

View File

@@ -22,6 +22,7 @@ import queue
import frame_process
import algorithm_fall
pre = datetime.datetime.now()
try:
# Import Openpose (Windows/Ubuntu/OSX)
# dir_path = os.path.dirname(os.path.realpath(__file__))
@@ -153,7 +154,9 @@ try:
# print(v, abs(a))
if (abs(a) > 0.2) and \
(np.subtract(np.array(width), np.array(height)) > np.subtract(np.array(width0),
np.array(height0)) and np.subtract(np.array(width), np.array(height)) > 0):
np.array(
height0)) and np.subtract(
np.array(width), np.array(height)) > 0):
couter += 1
# print(np.subtract(np.array(width), np.array(height)))
# print("alarm by v and a")
@@ -175,10 +178,13 @@ try:
fill=(255, 0, 0))
img_rd = cv2.cvtColor(np.array(img_rd), cv2.COLOR_RGB2BGR)
cv2.imwrite('fall_detection.jpg', frame)
# t = threading.Thread(target=post(event=3, imagePath='fall_detection.jpg'))
# t.start()
# status = post(event=3, imagePath='fall_detection.jpg')
# print("fall")
if (datetime.datetime.now() - pre).total_seconds() > 5:
t = threading.Thread(target=post(event=3, imagePath='fall_detection.jpg'))
t.start()
status = post(event=3, imagePath='fall_detection.jpg')
# print("fall")
pre = datetime.datetime.now()
# print(pre)
# update variables
frame_start_time = now
@@ -203,8 +209,9 @@ try:
cnts = frame_process.get_contours(firstFrame, gray)
defined_min_area = 3000
frame, alert = algorithm_fall.fall_detect(cnts, defined_min_area, frame, prevX, prevY, xList, yList,
centerV, alert)
frame, alert, pre = algorithm_fall.fall_detect(cnts, defined_min_area, frame, prevX, prevY, xList,
yList,
centerV, alert, pre)
# cv2.putText(frame, datetime.datetime.now().strftime("%A %d %B %Y %I:%M:%S%p"),
# (10, frame.shape[0] - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.35, (255, 255, 255), 1)

View File

@@ -1,7 +1,3 @@
# -----------------------------------------------------------------------------------------
# Code taken from https://github.com/iwantooxxoox/Keras-OpenFace (with minor modifications)
# -----------------------------------------------------------------------------------------
from keras.layers import Conv2D, ZeroPadding2D, Activation, Input, concatenate
from keras.layers.core import Lambda, Flatten, Dense
from keras.layers.normalization import BatchNormalization

View File

@@ -5,9 +5,6 @@ from keras.models import load_model
import numpy as np
from utils.datasets import get_labels
from utils.inference import detect_faces
from utils.inference import draw_text
from utils.inference import draw_bounding_box
from utils.inference import load_detection_model
from utils.preprocessor import preprocess_input

View File

@@ -32,7 +32,8 @@ emotion_window = []
# starting video streaming
cv2.namedWindow('window_frame')
video_capture = cv2.VideoCapture(0)
video_capture = cv2.VideoCapture("testdemo.mp4")
counter = 0
while True:
bgr_image = video_capture.read()[1]
gray_image = cv2.cvtColor(bgr_image, cv2.COLOR_BGR2GRAY)
@@ -54,7 +55,7 @@ while True:
y1 = startY - y_off
y2 = endY + y_off
face_coordinates=np.array([startX, startY, endX-startX, endY-startY])
face_coordinates = np.array([startX, startY, endX - startX, endY - startY])
# x1, x2, y1, y2 = apply_offsets(face_coordinates, emotion_offsets)
gray_face = gray_image[y1:y2, x1:x2]
@@ -80,13 +81,13 @@ while True:
continue
if emotion_text == 'angry':
color = emotion_probability * np.asarray((255, 0, 0))
color = emotion_probability * np.asarray((0, 255, 0))
elif emotion_text == 'sad':
color = emotion_probability * np.asarray((0, 0, 255))
color = emotion_probability * np.asarray((0, 255, 0))
elif emotion_text == 'happy':
color = emotion_probability * np.asarray((255, 255, 0))
color = emotion_probability * np.asarray((0, 255, 0))
elif emotion_text == 'surprise':
color = emotion_probability * np.asarray((0, 255, 255))
color = emotion_probability * np.asarray((0, 255, 0))
else:
color = emotion_probability * np.asarray((0, 255, 0))
@@ -96,9 +97,12 @@ while True:
draw_bounding_box(face_coordinates, rgb_image, color)
draw_text(face_coordinates, rgb_image, emotion_mode,
color, 0, -45, 1, 1)
print(emotion_text)
bgr_image = cv2.cvtColor(rgb_image, cv2.COLOR_RGB2BGR)
cv2.imshow('window_frame', bgr_image)
cv2.imwrite('smile/mine/' + str(counter) + '.png', bgr_image)
counter += 1
if cv2.waitKey(1) & 0xFF == ord('q'):
break
video_capture.release()

Binary file not shown.