Objective: Build a system that can recognize emotions from facial expressions using AI.
Problem Statement: Understanding human emotions is crucial in many fields, including mental health care, customer service, and human-computer interaction. However, detecting emotions in real-time through traditional means, such as verbal communication or self-reporting, is often inefficient and subjective. This project aims to create an AI-powered emotion recognition system that uses facial expression analysis to automatically detect and classify emotions from real-time video inputs. By leveraging deep learning and computer vision, the system will enhance emotional awareness in applications like customer support systems, virtual learning environments, and therapeutic monitoring, providing a more responsive and empathetic interaction experience.
Key Tools:
face_recognition
or dlib
for detecting facial features.Features:
Python Code:
import cv2
import numpy as np
from keras.models import load_model
from keras.preprocessing.image import img_to_array
# Load pre-trained emotion recognition model (FER2013)
model = load_model('emotion_model.hdf5') # Use a pre-trained model like FER2013
# Emotion classes (can vary depending on the model)
emotion_classes = ['Angry', 'Disgust', 'Fear', 'Happy', 'Sad', 'Surprise', 'Neutral']
# Initialize webcam
cap = cv2.VideoCapture(0)
# Load face detector (Haar cascade)
face_cascade = cv2.CascadeClassifier(cv2.data.haarcascades + 'haarcascade_frontalface_default.xml')
while True:
ret, frame = cap.read()
if not ret:
break
# Convert frame to grayscale (required for face detection)
gray_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
# Detect faces
faces = face_cascade.detectMultiScale(gray_frame, scaleFactor=1.3, minNeighbors=5)
for (x, y, w, h) in faces:
# Extract face ROI
face_roi = gray_frame[y:y+h, x:x+w]
face_roi = cv2.resize(face_roi, (48, 48)) # Model expects 48x48 input
face_roi = face_roi.astype('float') / 255.0
face_roi = img_to_array(face_roi)
face_roi = np.expand_dims(face_roi, axis=0)
# Predict emotion
prediction = model.predict(face_roi)[0]
max_index = np.argmax(prediction)
emotion = emotion_classes[max_index]
# Draw bounding box and emotion label
cv2.rectangle(frame, (x, y), (x+w, y+h), (0, 255, 0), 2)
cv2.putText(frame, emotion, (x, y-10), cv2.FONT_HERSHEY_SIMPLEX, 0.9, (0, 255, 0), 2)
# Show the frame
cv2.imshow('Emotion Recognition', frame)
# Press 'q' to exit
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cap.release()
cv2.destroyAllWindows()