Triggering Events with Facial Recognition

Greetings!!

In this article, I’ll be using LBHP face recognizer to train model with my faces. Now with this model, as it recognizes an individual we can trigger lots of events, according to technology we can Integrate. Here,

📌 When it recognize your face then —
👉 It send mail to your mail id by writing this is face of your_name.
👉 Second it send WhatsApp message to your friend, it can be anything.

📌 When it recognize second face, it can be your friend or family members face.
👉 Create EC2 instance in the AWS using CLI.
👉 Create 5GB EBS volume and attach it to the instance.

For AWS provisioning I’ve used terraform.

First Step:~

Collecting dataset, as capturing images from camera using CV2 module.

import cv2
import numpy as np
# Load HAAR face classifier
face_classifier = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
# Load functions
def face_extractor(img):
# Function detects faces and returns the cropped face
# If no face detected, it returns the input image

gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
faces = face_classifier.detectMultiScale(gray, 1.3, 5)

if faces is ():
return None

# Crop all faces found
for (x,y,w,h) in faces:
cropped_face = img[y:y+h, x:x+w]
return cropped_face# Initialize Webcam
cap = cv2.VideoCapture(0)
count = 0
# Collect 100 samples of your face from webcam input
while True:
ret, frame = cap.read()
if face_extractor(frame) is not None:
count += 1
face = cv2.resize(face_extractor(frame), (200, 200))
face = cv2.cvtColor(face, cv2.COLOR_BGR2GRAY)
# Save file in specified directory with unique name
file_name_path = 'E://ARTH//Summer//workspace//CV//faceRecog//faces//kunal' + str(count) + '.jpg'
cv2.imwrite(file_name_path, face)
# Put count on images and display live count
cv2.putText(face, str(count), (50, 50), cv2.FONT_HERSHEY_COMPLEX, 1, (0,255,0), 2)
cv2.imshow('Face Cropper', face)

else:
print("Face not found")
pass
if cv2.waitKey(1) == 13 or count == 100: #13 is the Enter Key
break

cap.release()
cv2.destroyAllWindows()
print("Collecting Samples Complete")

This will collect dataset.

Second Step:~

Now from dataset we’ve just collected in previous step, we’ll train model using, LBPH face recognizer.

import cv2
import numpy as np
from os import listdir
from os.path import isfile, join
print(cv2.__version__)
# Get the training data we previously made
data_path = 'E://ARTH//Summer//workspace//CV//faceRecog//faces//'
# a=listdir('d:/faces')
# print(a)
# """
onlyfiles = [f for f in listdir(data_path) if isfile(join(data_path, f))]
# Create arrays for training data and labels
Training_Data, Labels = [], []
# Open training images in our datapath
# Create a numpy array for training data
for i, files in enumerate(onlyfiles):
image_path = data_path + onlyfiles[i]
images = cv2.imread(image_path, cv2.IMREAD_GRAYSCALE)
Training_Data.append(np.asarray(images, dtype=np.uint8))
Labels.append(i)
#
# Create a numpy array for both training data and labels
Labels = np.asarray(Labels, dtype=np.int32)
kunal_model=cv2.face_LBPHFaceRecognizer.create()
# Initialize facial recognizer
# model = cv2.face_LBPHFaceRecognizer.create()
# model=cv2.f
# NOTE: For OpenCV 3.0 use cv2.face.createLBPHFaceRecognizer()
# Let's train our model
kunal_model.train(np.asarray(Training_Data), np.asarray(Labels))
print("Model trained sucessefully")

Third step:~

import smtplib, ssl, getpassport = 465  # For SSL
password = getpass.getpass("Type your password and press enter: ")
smtp_server = "smtp.gmail.com"
sender_email = "ghostadmi00@gmail.com"
receiver_email = "maheshwarikunal8@gmail.com"
message = """\
Subject: Hi there
Hello Kunal"""# Create a secure SSL context
context = ssl.create_default_context()
with smtplib.SMTP_SSL("smtp.gmail.com", port, context=context) as server:
server.login("ghostadmi00@gmail.com", password)
# TODO: Send email here

Now from above trained model, I’ll be Trigger events, as discussed above is task description.

import cv2
import numpy as np
import webbrowser
import pywhatkit, os
face_classifier = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')def face_detector(img, size=0.5):

# Convert image to grayscale
gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
faces = face_classifier.detectMultiScale(gray, 1.3, 5)
if faces is ():
return img, []

for (x,y,w,h) in faces:
cv2.rectangle(img,(x,y),(x+w,y+h),(0,255,255),2)
roi = img[y:y+h, x:x+w]
roi = cv2.resize(roi, (200, 200))
return img, roi
# Open Webcam
cap = cv2.VideoCapture(0)
while True:ret, frame = cap.read()

image, face = face_detector(frame)

try:
face = cv2.cvtColor(face, cv2.COLOR_BGR2GRAY)
# Pass face to prediction model
# "results" comprises of a tuple containing the label and the confidence value
results = kunal_model.predict(face)
print(results)
if results[1] < 500:
confidence = int( 100 * (1 - (results[1])/400) )
display_string = str(confidence) + '% Confident it is User'

cv2.putText(image, display_string, (100, 120), cv2.FONT_HERSHEY_COMPLEX, 1, (255,120,150), 2)

if confidence > 85:
cv2.putText(image, "Hey Kunal", (250, 450), cv2.FONT_HERSHEY_COMPLEX, 1, (0,255,0), 2)
cv2.imshow('Face Recognition', image )

#sending email
context = ssl.create_default_context()
with smtplib.SMTP_SSL(smtp_server, port, context=context) as server:
server.login(sender_email, password)
server.sendmail(sender_email, receiver_email, message)
# sending whatsapp message
pywhatkit.sendwhatmsg_instantly('+919829862121', 'Your Welcome')

#provisioning EC2 instances using terraform
os.system('terraform -chdir=E:\ARTH\Summer\workspace\CV\faceRecog\terr\ init')
os.system('terraform -chdir=E:\ARTH\Summer\workspace\CV\faceRecog\terr\ apply --auto-approve')

# provisioning EC2 instances using AWS CLI cmd
#os.system('aws ec2 run-instances --image-id ami-0ad704c126371a549 --instance-type t2.micro --subnet-id subnet-36edd75e')
#os.system('aws ec2 create-volume --availability-zone ap-south-1a --size 5')
#os.system('aws ec2 attach-volume --device /dev/sdh --instance-id i-01254d7a21ee38bae --volume-id vol-07efaf4c26997a2d6')
break

else:
cv2.putText(image, "Locked", (250, 450), cv2.FONT_HERSHEY_COMPLEX, 1, (0,0,255), 2)
cv2.imshow('Face Recognition', image )
except:
cv2.putText(image, "No Face Found", (220, 120) , cv2.FONT_HERSHEY_COMPLEX, 1, (0,0,255), 2)
cv2.putText(image, "Locked", (250, 450), cv2.FONT_HERSHEY_COMPLEX, 1, (0,0,255), 2)
cv2.imshow('Face Recognition', image )
pass

if cv2.waitKey(1) == 13: #13 is the Enter Key
break

cap.release()
cv2.destroyAllWindows()

Using smtplib I’ll be sending email, while using pywhat kit It’ll be sending whatsapp message.

Also using terraform I’ll be provisioning infrastructure describe above.

So, as soon as It’ll detect my face,

it’ll trigger :

This is email from code

This is WhatsApp message

This instance, and attach a 5G storage to it:~

Thank You That’s it for now.

--

--

Get the Medium app

A button that says 'Download on the App Store', and if clicked it will lead you to the iOS App store
A button that says 'Get it on, Google Play', and if clicked it will lead you to the Google Play store