Spaces:
Running
Running
import cv2 | |
import gradio as gr | |
import numpy as np | |
from fer import FER | |
from moviepy.editor import VideoFileClip | |
# Initialize the pre-trained detector once so you don't reinitialize on every function call. | |
detector = FER(mtcnn=True) # Optionally, you can set mtcnn to False to use a faster (but less accurate) cascade. | |
def emotion_recognition(image): | |
""" | |
Process the input image, detect emotions on faces, | |
and annotate the image with bounding boxes and emotion labels. | |
Parameters: | |
image (numpy.ndarray): Input image (RGB). | |
Returns: | |
numpy.ndarray: Annotated image with emotion labels. | |
""" | |
# fer works with RGB images which is what Gradio provides by default. | |
results = detector.detect_emotions(image) | |
annotated_image = image.copy() | |
# Loop through each detected face | |
for face in results: | |
(x, y, w, h) = face["box"] | |
# Get the dominant emotion for the detected face | |
dominant_emotion = max(face["emotions"].items(), key=lambda item: item[1])[0] | |
# Draw bounding box around face | |
cv2.rectangle(annotated_image, (x, y), (x + w, y + h), (0, 255, 0), 2) | |
# Put the emotion label above the bounding box | |
cv2.putText(annotated_image, dominant_emotion, (x, y - 10), | |
cv2.FONT_HERSHEY_SIMPLEX, 0.9, (36, 255, 12), 2) | |
return annotated_image | |
# Create a Gradio Interface | |
interface = gr.Interface( | |
fn=emotion_recognition, | |
inputs=gr.Image(type="numpy", label="Input Image"), | |
outputs=gr.Image(type="numpy", label="Annotated Image"), | |
title="Facial Emotion Recognition", | |
description="Upload an image and let the app detect and annotate facial emotions." | |
) | |
# Run the app locally | |
if __name__ == "__main__": | |
interface.launch() | |