Book continues , After getting the trained model , We can use this model for face recognition . of course , The scope is limited to a few people I photographed .

in addition , I made a simple interface , This system is used in windows lower , Compared with the previous training model Ubuntu It's not the same , So I rebuilt a project . And use the  Keras2.2.4
,OpenCV4.1, Python3.6.7,Pyqt5.0.

okay . Get to work

1. Drawing interface .

There's nothing to say about this , I use it QtDesigner The interface of painting , It looks like the following , One on the left QLabel Display the image captured by the camera , Three buttons on the right

QtDesigner The code generated automatically is as follows :
# -*- coding: utf-8 -*- # Form implementation generated from reading ui file
'D:\exceise\main_window.ui' # # Created by: PyQt5 UI code generator 5.6 # #
WARNING! All changes made in this file will be lost! from PyQt5 import QtCore,
QtGui, QtWidgets class Ui_MainWindow(object): def setupUi(self, MainWindow):
MainWindow.setObjectName("MainWindow") MainWindow.resize(1024, 768)
self.centralwidget = QtWidgets.QWidget(MainWindow)
self.centralwidget.setObjectName("centralwidget") self.FaceCollect =
QtWidgets.QPushButton(self.centralwidget)
self.FaceCollect.setGeometry(QtCore.QRect(790, 130, 151, 51))
self.FaceCollect.setObjectName("FaceCollect") self.OpenCamera =
QtWidgets.QPushButton(self.centralwidget)
self.OpenCamera.setGeometry(QtCore.QRect(790, 40, 151, 51))
self.OpenCamera.setObjectName("OpenCamera") self.FaceIdentify =
QtWidgets.QPushButton(self.centralwidget)
self.FaceIdentify.setGeometry(QtCore.QRect(790, 220, 151, 51))
self.FaceIdentify.setObjectName("FaceIdentify") self.ImageView =
QtWidgets.QLabel(self.centralwidget)
self.ImageView.setGeometry(QtCore.QRect(30, 20, 711, 681))
self.ImageView.setObjectName("ImageView")
MainWindow.setCentralWidget(self.centralwidget) self.menubar =
QtWidgets.QMenuBar(MainWindow) self.menubar.setGeometry(QtCore.QRect(0, 0,
1024, 26)) self.menubar.setObjectName("menubar")
MainWindow.setMenuBar(self.menubar) self.statusbar =
QtWidgets.QStatusBar(MainWindow) self.statusbar.setObjectName("statusbar")
MainWindow.setStatusBar(self.statusbar) self.retranslateUi(MainWindow)
QtCore.QMetaObject.connectSlotsByName(MainWindow) def retranslateUi(self,
MainWindow): _translate = QtCore.QCoreApplication.translate
MainWindow.setWindowTitle(_translate("MainWindow", " Face recognition "))
self.FaceCollect.setText(_translate("MainWindow", " Face collection "))
self.OpenCamera.setText(_translate("MainWindow", " Turn on the camera "))
self.FaceIdentify.setText(_translate("MainWindow", " Face recognition "))
self.ImageView.setText(_translate("MainWindow", "")) if __name__ == "__main__":
import sys app = QtWidgets.QApplication(sys.argv) MainWindow =
QtWidgets.QMainWindow() ui = Ui_MainWindow() ui.setupUi(MainWindow)
MainWindow.show() sys.exit(app.exec_())
I don't want to change it directly QtDesigner Directly generated code , So write a window class , Inherit this window generated automatically in .

In the following code center_window Function is to center the window automatically ,closeEvent Is the window close event .slot_init Is the initialization slot function

 
import sys import cv2 from PyQt5 import QtGui, QtWidgets from PyQt5.QtCore
import * from keras.models import load_model from windows.main_window import
Ui_MainWindow # Import created GUI class class Window1(QtWidgets.QMainWindow, Ui_MainWindow):
color = (0, 255, 0) # Color of face rectangle model = load_model("model\CNN.model") def
__init__(self): super(Window1, self).__init__() self.setupUi(self)
self.center_window() self.timer_camera = QTimer() self.cap = cv2.VideoCapture()
self.CAM_NUM = 0 self.slot_init() def closeEvent(self, event): ok =
QtWidgets.QPushButton() cacel = QtWidgets.QPushButton() msg =
QtWidgets.QMessageBox(QtWidgets.QMessageBox.Warning, u" close ", u" Close or not !")
msg.addButton(ok,QtWidgets.QMessageBox.ActionRole) msg.addButton(cacel,
QtWidgets.QMessageBox.RejectRole) ok.setText(u' determine ') cacel.setText(u' cancel ') #
msg.setDetailedText('sdfsdff') if msg.exec_() ==
QtWidgets.QMessageBox.RejectRole: event.ignore() else: #
self.socket_client.send_command(self.socket_client.current_user_command) if
self.cap.isOpened(): self.cap.release() if self.timer_camera.isActive():
self.timer_camera.stop() event.accept() if __name__ == '__main__': app =
QtWidgets.QApplication(sys.argv) window = Window1() window.show()
sys.exit(app.exec_())
Then explain how to put the camera in the QT Principle of display on the window , Understand this principle, basically understand how to write the code . This is the principle
adopt OpenCV Turn on the camera , Read each image , Then output to QLabel upper . So have one Timer timer , Read a frame of camera information every other time , Then output to QLabel. The code is as follows :
def show_camera(self): # Load classifier classfier =
cv2.CascadeClassifier("..\haarcascade_frontalface_alt2.xml") #
Read a frame of data ,flag Indicates the camera reading status ,self.image Image reading matrix of camera mat type flag, self.image =
self.cap.read() # Image graying , Graying can reduce the amount of computation in later detection show = cv2.cvtColor(self.image,
cv2.COLOR_BGR2RGB) # detectMultiScale Complete face detection ,returns the positions of detected
faces as Rect(x,y,w,h),x,y Is the starting coordinate of the upper left corner ,h,w It's high and wide #
grey Is the image data to be recognized ,scaleFactor Image scaling , It can be understood that the distance between the same object and the camera is different , They are also different in size , It must be scaled to a certain size for easy recognition , This parameter specifies the scale for each zoom
Rects = classfier.detectMultiScale(show, scaleFactor=1.2, minNeighbors=3,
minSize=(32, 32)) if len(Rects) > 0: for Rect in Rects: x, y, w, h = Rect #
If a face is detected , Draw a green box around the face , among self.color = (0, 255, 0) cv2.rectangle(show, (x, y), (x +
w, y + h), self.color, 2) # Draw a rectangle # The detected face is sent to the previous model for face recognition img = cv2.resize(show,
(128, 128)) img = img.reshape(1, 128, 128, 3).astype('float32') predicted =
self.model.predict(img) # because QLabel It cannot be displayed directly img type , Need to be transformed into QImage type showImage =
QtGui.QImage(show.data, show.shape[1], show.shape[0],
QtGui.QImage.Format_RGB888)
self.ImageView.setPixmap(QtGui.QPixmap.fromImage(showImage))

The above code realizes face detection , Yes, that's the code . Through debugging predicted You'll find out predicted[4]=1, you 're right , Remember the last article , I am 4 number . Maybe a friend wants to ask , Why don't you do it QMessageBox Things like that , find 4 It's a message box with a name , The answer is I'm lazy , I don't want to . That's not a good reason !
# The detected face is sent to the previous model for face recognition img = cv2.resize(show, (128, 128)) img = img.reshape(1,
128, 128, 3).astype('float32') predicted = self.model.predict(img)
You also need to implement the button press event
def OpenCameraButtonClick(self): if self.timer_camera.isActive() == False:
flag = self.cap.open(self.CAM_NUM) if flag == False: msg =
QtWidgets.QMessageBox.warning(self, u"Warning", u" Please check whether the camera is connected to the computer correctly ",
buttons=QtWidgets.QMessageBox.Ok, defaultButton=QtWidgets.QMessageBox.Ok) else:
self.timer_camera.start(30) self.OpenCamera.setText(u' Turn off the camera ') else:
self.timer_camera.stop() self.cap.release() self.ImageView.clear()
self.OpenCamera.setText(u' Turn on the camera ')
Overall code :
import sys import cv2 from PyQt5 import QtGui, QtWidgets from PyQt5.QtCore
import * from keras.models import load_model from windows.main_window import
Ui_MainWindow # Import created GUI class class Window1(QtWidgets.QMainWindow, Ui_MainWindow):
color = (0, 255, 0) # Color of face rectangle model = load_model("..\model\CNN.model") def
__init__(self): super(Window1, self).__init__() self.setupUi(self)
self.center_window() self.timer_camera = QTimer() self.cap = cv2.VideoCapture()
self.CAM_NUM = 0 self.slot_init() def center_window(self): desktop_geometry =
QtWidgets.QApplication.desktop() # Get screen size main_window_width =
desktop_geometry.width() # The width of the screen main_window_height = desktop_geometry.height()
# The height of the screen rect = self.geometry() # Get window interface size window_width = rect.width() # Width of window interface
window_height = rect.height() # The height of window interface x = (main_window_width - window_width)
// 2 # Calculate the abscissa of the upper left corner of the window y = (main_window_height - window_height) // 2 # Calculate the ordinate of the upper left corner of the window
self.setGeometry(x, y, window_width, window_height) # Set the position of the window interface on the screen def
slot_init(self): self.OpenCamera.clicked.connect(self.OpenCameraButtonClick)
self.timer_camera.timeout.connect(self.show_camera) def
OpenCameraButtonClick(self): if self.timer_camera.isActive() == False: flag =
self.cap.open(self.CAM_NUM) if flag == False: msg =
QtWidgets.QMessageBox.warning(self, u"Warning", u" Please check whether the camera is connected to the computer correctly ",
buttons=QtWidgets.QMessageBox.Ok, defaultButton=QtWidgets.QMessageBox.Ok) else:
self.timer_camera.start(30) self.OpenCamera.setText(u' Turn off the camera ') else:
self.timer_camera.stop() self.cap.release() self.ImageView.clear()
self.OpenCamera.setText(u' Turn on the camera ') def show_camera(self): # Load classifier classfier =
cv2.CascadeClassifier("..\haarcascade_frontalface_alt2.xml") #
Read a frame of data ,flag Indicates the camera reading status ,self.image Represents the image matrix read by the camera mat type flag, self.image =
self.cap.read() # Image graying , Graying can reduce the amount of computation in later detection show = cv2.cvtColor(self.image,
cv2.COLOR_BGR2RGB) # detectMultiScale Complete face detection ,returns the positions of detected
faces as Rect(x,y,w,h),x,y Is the starting coordinate of the upper left corner ,h,w It's high and wide #
grey Is the image data to be recognized ,scaleFactor Image scaling , It can be understood that the distance between the same object and the camera is different , They are also different in size , It must be scaled to a certain size for easy recognition , This parameter specifies the scale for each zoom
Rects = classfier.detectMultiScale(show, scaleFactor=1.2, minNeighbors=3,
minSize=(32, 32)) if len(Rects) > 0: for Rect in Rects: x, y, w, h = Rect #
If a face is detected , Draw a green box around the face , among self.color = (0, 255, 0) cv2.rectangle(show, (x, y), (x +
w, y + h), self.color, 2) # Draw a rectangle # The detected face is sent to the previous model for face recognition img = cv2.resize(show,
(128, 128)) img = img.reshape(1, 128, 128, 3).astype('float32') predicted =
self.model.predict(img) # because QLabel It cannot be displayed directly img type , Need to be transformed into QImage type showImage =
QtGui.QImage(show.data, show.shape[1], show.shape[0],
QtGui.QImage.Format_RGB888)
self.ImageView.setPixmap(QtGui.QPixmap.fromImage(showImage)) def
closeEvent(self, event): ok = QtWidgets.QPushButton() cacel =
QtWidgets.QPushButton() msg =
QtWidgets.QMessageBox(QtWidgets.QMessageBox.Warning, u" close ", u" Close or not !")
msg.addButton(ok,QtWidgets.QMessageBox.ActionRole) msg.addButton(cacel,
QtWidgets.QMessageBox.RejectRole) ok.setText(u' determine ') cacel.setText(u' cancel ') #
msg.setDetailedText('sdfsdff') if msg.exec_() ==
QtWidgets.QMessageBox.RejectRole: event.ignore() else: #
self.socket_client.send_command(self.socket_client.current_user_command) if
self.cap.isOpened(): self.cap.release() if self.timer_camera.isActive():
self.timer_camera.stop() event.accept() if __name__ == '__main__': app =
QtWidgets.QApplication(sys.argv) window = Window1() window.show()
sys.exit(app.exec_())
 

Technology