refine function

This commit is contained in:
Tang1705
2020-07-06 17:05:50 +08:00
parent 80bb3c7f70
commit 1a089c878b
6 changed files with 251 additions and 89 deletions

BIN
Class.zip Normal file

Binary file not shown.

View File

@@ -8,6 +8,7 @@ import os
import time
from PIL import Image, ImageDraw, ImageFont
start_time = 0
# 1. Dlib 正向人脸检测器
# detector = dlib.get_frontal_face_detector()
@@ -75,11 +76,9 @@ class Face_Recognizer:
def return_euclidean_distance(feature_1, feature_2):
feature_1 = np.array(feature_1)
feature_2 = np.array(feature_2)
dist = np.sqrt(np.sum(np.square(feature_1 - feature_2)))
dist = np.sqrt(np.sum((feature_1 - feature_2) ** 2))
return dist
# TODO th
# 更新 FPS
def update_fps(self):
now = time.time()
@@ -111,7 +110,7 @@ class Face_Recognizer:
# TODO 数据库 ID
# Default known name: person_1, person_2, person_3
self.name_known_list[0] = '唐麒'.encode('utf-8').decode()
# self.name_known_list[1] ='李四'.encode('utf-8').decode()
self.name_known_list[1] = '段海燕'.encode('utf-8').decode()
# self.name_known_list[2] ='xx'.encode('utf-8').decode()
# self.name_known_list[3] ='xx'.encode('utf-8').decode()
# self.name_known_list[4] ='xx'.encode('utf-8').decode()
@@ -122,6 +121,7 @@ class Face_Recognizer:
if self.get_face_database():
while stream.isOpened():
flag, img_rd = stream.read()
img_with_name = img_rd
kk = cv2.waitKey(1)
# 按下 q 键退出
if kk == ord('q'):
@@ -164,7 +164,8 @@ class Face_Recognizer:
# greater than the minimum confidence
if confidence < 0.5:
continue
print("##### camera person", k + 1, "#####")
self.faces_cnt+=1
# print("##### camera person", k + 1, "#####")
# 让人名跟随在矩形框的上方
# 确定人名的位置坐标
# 先默认所有人不认识,是 unknown
@@ -175,30 +176,31 @@ class Face_Recognizer:
box = faces[0, 0, k, 3:7] * np.array([w, h, w, h])
(startX, startY, endX, endY) = box.astype("int")
self.pos_camera_list.append(tuple(
[int(startX+5), int(startY - 30)]))
[int(startX + 5), int(startY - 30)]))
# 5. 对于某张人脸,遍历所有存储的人脸特征
e_distance_list = []
for i in range(len(self.features_known_list)):
# 如果 person_X 数据不为空
if str(self.features_known_list[i][0]) != '0.0':
print("with person", str(i + 1), "the e distance: ", end='')
# print("with person", str(i + 1), "the e distance: ", end='')
e_distance_tmp = self.return_euclidean_distance(self.features_camera_list[k],
self.features_known_list[i])
print(e_distance_tmp)
# print(e_distance_tmp)
e_distance_list.append(e_distance_tmp)
else:
# 空数据 person_X
e_distance_list.append(999999999)
# 6. 寻找出最小的欧式距离匹配
similar_person_num = e_distance_list.index(min(e_distance_list))
print("Minimum e distance with person", self.name_known_list[similar_person_num])
# print("Minimum e distance with person", self.name_known_list[similar_person_num])
if min(e_distance_list) < 0.4:
if min(e_distance_list) < 1:
self.name_camera_list[k] = self.name_known_list[similar_person_num]
print("May be person " + str(self.name_known_list[similar_person_num]))
# print("May be person " + str(self.name_known_list[similar_person_num]))
else:
print("Unknown person")
pass
# print("Unknown person")
# 矩形框
for kk, d in enumerate(faces):
@@ -207,23 +209,24 @@ class Face_Recognizer:
(0, 255, 0), 2)
cv2.rectangle(img_rd, tuple([startX, startY - 35]), tuple([endX, startY]),
(0, 255, 0), cv2.FILLED)
print('\n')
self.faces_cnt = len(faces)
# 7. 在这里更改显示的人名
self.modify_name_camera_list()
# 8. 写名字
# self.draw_name(img_rd)
img_with_name = self.draw_name(img_rd)
# print('\n')
# self.faces_cnt = faces.shape[2]
# if len(self.name_camera_list) > 0:
# 7. 在这里更改显示的人名
self.modify_name_camera_list()
# 8. 写名字
# self.draw_name(img_rd)
img_with_name = self.draw_name(img_rd)
else:
img_with_name = img_rd
print("Faces in camera now:", self.name_camera_list, "\n")
# print("Faces in camera now:", self.name_camera_list, "\n")
cv2.imshow("camera", img_with_name)
if len(img_with_name):
cv2.imshow("camera", img_with_name)
# 9. 更新 FPS / Update stream FPS
self.update_fps()
# self.update_fps()
# OpenCV 调用摄像头并进行 process
def run(self):

View File

@@ -7,7 +7,15 @@ import cv2
import os
import shutil # 读写文件
import time
import face_recognition
from PIL import ImageDraw, ImageFont
from PIL import Image
action_list = ['look_ahead', 'look_left', 'look_right', 'rise_head', 'bow_head', 'blink', 'open_mouth', 'smile', 'over']
action_map = {'look_ahead': '请看前方', 'blink': '请眨眼', 'open_mouth': '请张嘴',
'smile': '请笑一笑', 'rise_head': '请抬头',
'bow_head': '请低头', 'look_left': '请看左边',
'look_right': '请看右边', 'over': '录入完成'}
# Dlib 正向人脸检测器
# detector = dlib.get_frontal_face_detector()
@@ -30,6 +38,8 @@ class Face_Register:
self.save_flag = 1
# 之后用来检查是否先按 'n' 再按 's',即先新建文件夹再保存
self.press_n_flag = 0
# 之后用来提示动作的计数器
self.index = 0
self.frame_time = 0
self.frame_start_time = 0
@@ -76,6 +86,8 @@ class Face_Register:
# 生成的 cv2 window 上面添加说明文字
def draw_note(self, img_rd):
# 添加说明
# cv2.putText(img_rd, action_map[action_list[index]].encode('utf-8').decode(), (20, 250), self.font, 1,
# (0, 255, 0), 1, cv2.LINE_AA)
cv2.putText(img_rd, "Face Register", (20, 40), self.font, 1, (255, 255, 255), 1, cv2.LINE_AA)
cv2.putText(img_rd, "FPS: " + str(self.fps.__round__(2)), (20, 100), self.font, 0.8, (0, 255, 0), 1,
cv2.LINE_AA)
@@ -84,6 +96,18 @@ class Face_Register:
cv2.putText(img_rd, "S: Save current face", (20, 400), self.font, 0.8, (255, 255, 255), 1, cv2.LINE_AA)
cv2.putText(img_rd, "Q: Quit", (20, 450), self.font, 0.8, (255, 255, 255), 1, cv2.LINE_AA)
font = ImageFont.truetype("simsun.ttc", 30, index=1)
img_rd = Image.fromarray(cv2.cvtColor(img_rd, cv2.COLOR_BGR2RGB))
draw = ImageDraw.Draw(img_rd)
if self.index <= 7:
draw.text((20, 230), text=action_map[action_list[self.index]].encode('utf-8').decode(), font=font,
fill=(0, 255, 0))
else:
draw.text((20, 230), text=action_map[action_list[8]].encode('utf-8').decode(), font=font,
fill=(0, 255, 0))
img_rd = cv2.cvtColor(np.array(img_rd), cv2.COLOR_RGB2BGR)
return img_rd
# 获取人脸
def process(self, stream):
# 1. 新建储存人脸图像文件目录
@@ -96,6 +120,7 @@ class Face_Register:
self.check_existing_faces_cnt()
while stream.isOpened():
self.faces_cnt = 0
flag, img_rd = stream.read() # Get camera video stream
kk = cv2.waitKey(1)
@@ -115,6 +140,7 @@ class Face_Register:
print("新建的人脸文件夹 / Create folders: ", current_face_dir)
self.ss_cnt = 0 # 将人脸计数器清零
self.index = 0
self.press_n_flag = 1 # 已经按下 'n'
# 5. 检测到人脸
@@ -129,6 +155,8 @@ class Face_Register:
if confidence < 0.5:
continue
self.faces_cnt += 1
# compute the (x, y)-coordinates of the bounding box for the
# object
box = faces[0, 0, i, 3:7] * np.array([w, h, w, h])
@@ -165,18 +193,20 @@ class Face_Register:
# 检查有没有先按'n'新建文件夹
if self.press_n_flag:
self.ss_cnt += 1
for ii in range(height * 2):
for jj in range(width * 2):
img_blank[ii][jj] = img_rd[startY - hh + ii][startX - ww + jj]
cv2.imwrite(current_face_dir + "/img_face_" + str(self.ss_cnt) + ".jpg", img_blank)
print("写入本地 / Save into",
str(current_face_dir) + "/img_face_" + str(self.ss_cnt) + ".jpg")
if self.index<=7:
for ii in range(height * 2):
for jj in range(width * 2):
img_blank[ii][jj] = img_rd[startY - hh + ii][startX - ww + jj]
cv2.imwrite(current_face_dir + "/img_face_" + str(self.ss_cnt) + ".jpg", img_blank)
print("写入本地 / Save into",
str(current_face_dir) + "/img_face_" + str(self.ss_cnt) + ".jpg")
self.index += 1
else:
print("请先按 'N' 来建文件夹, 按 'S' / Please press 'N' and press 'S'")
self.faces_cnt = len(faces)
# self.faces_cnt = len(faces)
# 9. 生成的窗口添加说明文字
self.draw_note(img_rd)
img_rd = self.draw_note(img_rd)
# 10. 按下 'q' 键退出
if kk == ord('q'):
@@ -200,4 +230,4 @@ def main():
if __name__ == '__main__':
main()
main()

View File

@@ -6,44 +6,45 @@ import numpy as np
import statistics
import queue
import math
from PIL import ImageDraw, ImageFont
from PIL import Image
def fall_detect(cnts, defined_min_area, frame, prevX, prevY, xList, yList, centerV, alert):
for c in cnts:
#exclusion
# exclusion
if cv2.contourArea(c) < defined_min_area:
continue
# outer bounding box
(x_b, y_b, w_b, h_b) = cv2.boundingRect(c)
cv2.rectangle(frame, (x_b, y_b), (x_b + w_b, y_b + h_b), (0, 255, 255), 2) # 黄色矩形
cv2.rectangle(frame, (x_b, y_b), (x_b + w_b, y_b + h_b), (0, 255, 255), 2) # 黄色矩形
#rotating bounding box
rect = cv2.minAreaRect(c) # 得到最小外接矩形的(中心(x,y), (宽,高), 旋转角度)
box = cv2.boxPoints(rect) # 获取最小外接矩形的4个顶点坐标
# rotating bounding box
rect = cv2.minAreaRect(c) # 得到最小外接矩形的(中心(x,y), (宽,高), 旋转角度)
box = cv2.boxPoints(rect) # 获取最小外接矩形的4个顶点坐标
box = np.int0(box)
cv2.drawContours(frame,[box],0,(0,0,255),2)
cv2.drawContours(frame, [box], 0, (0, 0, 255), 2)
#averaging line
rows,cols = frame.shape[:2]
[vx,vy,x,y] = cv2.fitLine(c, cv2.DIST_L2, 0, 0.01, 0.01)
lefty = (-x * vy/vx) + y
righty =((cols-x) * vy/vx)+y
cv2.line(frame,(cols-1,righty),(0,lefty),(255,0,0),2)
# averaging line
rows, cols = frame.shape[:2]
[vx, vy, x, y] = cv2.fitLine(c, cv2.DIST_L2, 0, 0.01, 0.01)
lefty = (-x * vy / vx) + y
righty = ((cols - x) * vy / vx) + y
cv2.line(frame, (cols - 1, righty), (0, lefty), (255, 0, 0), 2)
#ellipse
# ellipse
elps = cv2.fitEllipse(c)
(x, y), (MA, ma), angle = cv2.fitEllipse(c)
cv2.ellipse(frame, elps,(255,0,0),3) #red
cv2.ellipse(frame, elps, (255, 0, 0), 3) # red
#Aspect Ratio
AR = MA/ma
# Aspect Ratio
AR = MA / ma
#Center Speed - acceleration
# Center Speed - acceleration
prevX = 0.0
prevY = 0.0
centerSpeed =0
centerSpeed = 0
if xList.full():
prevX = statistics.median(list(xList.queue))
prevY = statistics.median(list(yList.queue))
@@ -54,49 +55,57 @@ def fall_detect(cnts, defined_min_area, frame, prevX, prevY, xList, yList, cente
yList.put(elps[0][1])
X = statistics.median(list(xList.queue))
Y = statistics.median(list(yList.queue))
if xList.full():
dx = abs(prevX-X)
dy = abs(prevY-Y)
centerV = math.sqrt(dx**2+dy**2)
# calculate probabilities for the 4 features
pAngle = (abs(angle-90)-50)/10
pAngle = 1 / (math.exp(pAngle)+1)
pAR = 10*AR - 5
pAR = 1 / (math.exp(pAR) + 1)
ACS = centerV - 9
ACS = 1 / (math.exp(ACS) + 1)
if xList.full():
dx = abs(prevX - X)
dy = abs(prevY - Y)
centerV = math.sqrt(dx ** 2 + dy ** 2)
# calculate probabilities for the 4 features
pAngle = (abs(angle - 90) - 50) / 10
pAngle = 1 / (math.exp(pAngle) + 1)
pAR = 10 * AR - 5
pAR = 1 / (math.exp(pAR) + 1)
ACS = centerV - 9
try:
ACS = 1 / (math.exp(ACS) + 1)
except:
ACS = 1 / (float('inf') + 1)
# print("pAngle : ", pAngle)
# print("pAR : ", pAR)
# print("ACS : ", ACS)
#confidence
# confidence
P_FALL = pAngle * pAR * ACS + 0.5
# print("P_FALL1 : ", P_FALL)
P_FALL = 1/ (math.exp(-(P_FALL-0.65)*10)+1)
P_FALL = 1 / (math.exp(-(P_FALL - 0.65) * 10) + 1)
# print("P_FALL2: ", P_FALL)
#status display
cv2.putText(frame, "Status : ", (10, 20), cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 0, 255), 2)
cv2.putText(frame, "Fall Confidence: {0:.2f} ".format(P_FALL), (10,50),cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 128, 255), 2)
# status display
# cv2.putText(frame, "Status : ", (10, 20), cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 0, 255), 2)
# cv2.putText(frame, "Fall Confidence: {0:.2f} ".format(P_FALL), (10, 50), cv2.FONT_HERSHEY_SIMPLEX, 0.5,
# (0, 128, 255), 2)
# cv2.putText(frame, "Angle: {0:.2f}".format(angle), (10, 220),cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 0), 2)
# cv2.putText(frame, "AR: {0:.2f}".format(AR), (10, 237),cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 0), 2)
# cv2.putText(frame, "Center Speed: {0:.2f}".format(centerV), (10, 256),cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 0), 2)
#fall
# fall
if P_FALL > 0.88:
if alert >= 8:
print("fall")
cv2.putText(frame, " Fall Detected", (82, 20),cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 0, 255), 2)
if alert >3:
# print("fall")
font = ImageFont.truetype("simsun.ttc", 30, index=1)
img_rd = Image.fromarray(cv2.cvtColor(frame, cv2.COLOR_BGR2RGB))
draw = ImageDraw.Draw(img_rd)
draw.text((10, 10), text="Fall Detected", font=font,
fill=(255, 0, 0))
frame = cv2.cvtColor(np.array(img_rd), cv2.COLOR_RGB2BGR)
# cv2.imwrite("report.jpg", frame)
# send_alert.SendMail("report.jpg")
alert = alert + 1
else:
alert = alert + 1
return alert
return frame,alert

View File

@@ -0,0 +1,110 @@
from sklearn.externals import joblib
import ML_ways_sklearn
import dlib
import numpy as np
import cv2
detector = dlib.get_frontal_face_detector()
# OpenCV DNN face detector
# detector = cv2.dnn.readNetFromCaffe("data/data_opencv/deploy.prototxt.txt",
# "data/data_opencv/res10_300x300_ssd_iter_140000.caffemodel")
predictor = dlib.shape_predictor('data/data_dlib_model/shape_predictor_68_face_landmarks.dat')
# OpenCV 调用摄像头
cap = cv2.VideoCapture(0)
# 设置视频参数
cap.set(3, 480)
def get_features(img_rd):
# 输入: img_rd: 图像文件
# 输出: positions_lip_arr: feature point 49 to feature point 68, 20 feature points / 40D in all
# 取灰度
img_gray = cv2.cvtColor(img_rd, cv2.COLOR_RGB2GRAY)
# 计算68点坐标
positions_68_arr = []
faces = detector(img_gray, 0)
landmarks = np.matrix([[p.x, p.y] for p in predictor(img_rd, faces[0]).parts()])
for idx, point in enumerate(landmarks):
# 68点的坐标
pos = (point[0, 0], point[0, 1])
positions_68_arr.append(pos)
positions_lip_arr = []
# 将点 49-68 写入 CSV
# 即 positions_68_arr[48]-positions_68_arr[67]
for i in range(48, 68):
positions_lip_arr.append(positions_68_arr[i][0])
positions_lip_arr.append(positions_68_arr[i][1])
return positions_lip_arr
while cap.isOpened():
# 480 height * 640 width
flag, img_rd = cap.read()
kk = cv2.waitKey(1)
img_gray = cv2.cvtColor(img_rd, cv2.COLOR_RGB2GRAY)
# 人脸数 faces
faces = detector(img_gray, 0)
# 检测到人脸
if len(faces) != 0:
# 提取单张40维度特征
positions_lip_test = get_features(img_rd)
# path of models
path_models = "data/data_models/"
# ######### LR ###########
LR = joblib.load(path_models+"model_LR.m")
ss_LR = ML_ways_sklearn.model_LR()
X_test_LR = ss_LR.transform([positions_lip_test])
y_predict_LR = str(LR.predict(X_test_LR)[0]).replace('0', "no smile").replace('1', "with smile")
print("LR:", y_predict_LR)
# ######### LSVC ###########
LSVC = joblib.load(path_models+"model_LSVC.m")
ss_LSVC = ML_ways_sklearn.model_LSVC()
X_test_LSVC = ss_LSVC.transform([positions_lip_test])
y_predict_LSVC = str(LSVC.predict(X_test_LSVC)[0]).replace('0', "no smile").replace('1', "with smile")
print("LSVC:", y_predict_LSVC)
# ######### MLPC ###########
MLPC = joblib.load(path_models+"model_MLPC.m")
ss_MLPC = ML_ways_sklearn.model_MLPC()
X_test_MLPC = ss_MLPC.transform([positions_lip_test])
y_predict_MLPC = str(MLPC.predict(X_test_MLPC)[0]).replace('0', "no smile").replace('1', "with smile")
print("MLPC:", y_predict_MLPC)
# ######### SGDC ###########
SGDC = joblib.load(path_models+"model_SGDC.m")
ss_SGDC = ML_ways_sklearn.model_SGDC()
X_test_SGDC = ss_SGDC.transform([positions_lip_test])
y_predict_SGDC = str(SGDC.predict(X_test_SGDC)[0]).replace('0', "no smile").replace('1', "with smile")
print("SGDC:", y_predict_SGDC)
print('\n')
# 按下 'q' 键退出
if kk == ord('q'):
break
# 窗口显示
# cv2.namedWindow("camera", 0) # 如果需要摄像头窗口大小可调
cv2.imshow("camera", img_rd)
# 释放摄像头
cap.release()
# 删除建立的窗口
cv2.destroyAllWindows()

View File

@@ -9,6 +9,9 @@ from sys import platform
import argparse
import numpy as np
from PIL import ImageDraw, ImageFont
from PIL import Image
from imutils.video import VideoStream
import datetime
import imutils
@@ -39,7 +42,7 @@ try:
raise e
# cap = cv2.VideoCapture(0)
cap = cv2.VideoCapture("demo1.mp4")
cap = cv2.VideoCapture("test4.mp4")
_, frame = cap.read()
cv2.imwrite('fall_detection.jpg', frame)
@@ -113,6 +116,7 @@ try:
imageToProcess = cv2.imread(args[0].image_path)
datum.cvInputData = imageToProcess
opWrapper.emplaceAndPop([datum])
img_rd = datum.cvOutputData
# Display Image
# print("Body keypoints: \n" + str(datum.poseKeypoints))
@@ -147,7 +151,7 @@ try:
np.array(height0))):
couter += 1
# print("alarm by v and a")
elif (width > height and (x[8] != 0 or x[9] != 0 or x[12] != 0)):
elif (width > height and (x[8] != 0 or x[9] != 0 or x[12] != 0) and v < 0.41):
couter += 1
# print("alarm by w and h")
else:
@@ -157,8 +161,15 @@ try:
couter = 0
error = 0
if couter > 2:
print("alarm")
if couter > 3:
font = ImageFont.truetype("simsun.ttc", 30, index=1)
img_rd = Image.fromarray(cv2.cvtColor(datum.cvOutputData, cv2.COLOR_BGR2RGB))
draw = ImageDraw.Draw(img_rd)
draw.text((10, 10), text="Fall Detected", font=font,
fill=(255, 0, 0))
img_rd = cv2.cvtColor(np.array(img_rd), cv2.COLOR_RGB2BGR)
# print("fall")
# update variables
frame_start_time = now
@@ -183,8 +194,8 @@ try:
cnts = frame_process.get_contours(firstFrame, gray)
defined_min_area = 3000
alert = algorithm_fall.fall_detect(cnts, defined_min_area, frame, prevX, prevY, xList, yList,
centerV, alert)
frame, alert = algorithm_fall.fall_detect(cnts, defined_min_area, frame, prevX, prevY, xList, yList,
centerV, alert)
# cv2.putText(frame, datetime.datetime.now().strftime("%A %d %B %Y %I:%M:%S%p"),
# (10, frame.shape[0] - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.35, (255, 255, 255), 1)
@@ -192,9 +203,8 @@ try:
cv2.imshow("OpenPose 1.6.0 - Tutorial Python API", frame)
continue
cv2.imshow("OpenPose 1.6.0 - Tutorial Python API", datum.cvOutputData)
cv2.imshow("OpenPose 1.6.0 - Tutorial Python API", img_rd)
cap.release()
cv2.destroyAllWindows()
except Exception as e:
print(e)
sys.exit(-1)