File size: 2,383 Bytes
b832def
b3de9fc
37aa19b
 
 
b3de9fc
 
37aa19b
be6a600
37aa19b
b3de9fc
be6a600
 
 
37aa19b
be6a600
37aa19b
 
b3de9fc
 
 
37aa19b
b3de9fc
37aa19b
be6a600
b3de9fc
37aa19b
b3de9fc
 
be6a600
b3de9fc
 
 
 
 
37aa19b
b3de9fc
37aa19b
be6a600
37aa19b
b3de9fc
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5152cc0
37aa19b
b3de9fc
 
be6a600
 
b3de9fc
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
import gradio as gr
from textblob import TextBlob
from deepface import DeepFace
import cv2
import moviepy.editor as mp
import tempfile
import os

# Analyze text
def analyze_text(text):
    blob = TextBlob(text)
    polarity = blob.sentiment.polarity
    emotion = "Positive" if polarity > 0 else "Negative" if polarity < 0 else "Neutral"
    return f"Sentiment: {emotion} (Score: {polarity:.2f})"

# Analyze face
def analyze_face(image):
    try:
        result = DeepFace.analyze(image, actions=['emotion'], enforce_detection=False)
        dominant_emotion = result[0]['dominant_emotion']
        return f"Dominant Emotion: {dominant_emotion}"
    except Exception as e:
        return f"Error analyzing face: {str(e)}"

# Analyze video
def analyze_video(video_path):
    try:
        temp_folder = tempfile.mkdtemp()
        clip = mp.VideoFileClip(video_path)
        frame = clip.get_frame(clip.duration / 2)
        frame_path = os.path.join(temp_folder, "frame.jpg")
        cv2.imwrite(frame_path, cv2.cvtColor(frame, cv2.COLOR_RGB2BGR))
        result = DeepFace.analyze(frame_path, actions=['emotion'], enforce_detection=False)
        dominant_emotion = result[0]['dominant_emotion']
        return f"Dominant Emotion in Video: {dominant_emotion}"
    except Exception as e:
        return f"Error analyzing video: {str(e)}"

# Build Gradio Interface
with gr.Blocks() as demo:
    gr.Markdown("# 🧠 Emotion Decoder - Sentiment & Emotion Analysis")
    
    with gr.Tab("Text Analysis"):
        text_input = gr.Textbox(label="Enter text")
        text_output = gr.Textbox(label="Sentiment Result")
        text_button = gr.Button("Analyze Text")
        text_button.click(analyze_text, inputs=text_input, outputs=text_output)
    
    with gr.Tab("Face Emotion Detection"):
        img_input = gr.Image(type="filepath", label="Upload an Image")
        img_output = gr.Textbox(label="Emotion Result")
        img_button = gr.Button("Analyze Face Emotion")
        img_button.click(analyze_face, inputs=img_input, outputs=img_output)
    
    with gr.Tab("Video Emotion Detection"):
        video_input = gr.Video(label="Upload a Video")
        video_output = gr.Textbox(label="Emotion Result")
        video_button = gr.Button("Analyze Video Emotion")
        video_button.click(analyze_video, inputs=video_input, outputs=video_output)

demo.launch()