File size: 2,778 Bytes
c00e8ff
 
d1d98e8
 
 
e4fd89f
 
c00e8ff
e4fd89f
d1d98e8
c00e8ff
 
 
e4fd89f
 
 
d1d98e8
 
 
 
 
 
 
e4fd89f
 
 
d1d98e8
 
e4fd89f
d1d98e8
 
 
 
 
 
 
 
 
e4fd89f
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
import gradio as gr
from textblob import TextBlob
from deepface import DeepFace
import tempfile
import os
import cv2
import moviepy.editor as mp

# Sentiment Analysis for Text
def analyze_text(text):
    blob = TextBlob(text)
    polarity = blob.sentiment.polarity
    sentiment = "Positive" if polarity > 0 else "Negative" if polarity < 0 else "Neutral"
    return f"Sentiment: {sentiment} (Polarity: {polarity:.2f})"

# Emotion Analysis for Image (Face Recognition)
def analyze_image(image):
    try:
        result = DeepFace.analyze(image, actions=['emotion'], enforce_detection=False)
        dominant_emotion = result[0]['dominant_emotion']
        return f"Detected Emotion: {dominant_emotion}"
    except Exception as e:
        return f"Error: {str(e)}"

# Emotion Analysis for Video (Face Recognition)
def analyze_video(video):
    try:
        tmpdir = tempfile.mkdtemp()
        clip = mp.VideoFileClip(video)
        frame = clip.get_frame(clip.duration / 2)
        frame_path = os.path.join(tmpdir, "frame.jpg")
        cv2.imwrite(frame_path, cv2.cvtColor(frame, cv2.COLOR_RGB2BGR))
        result = DeepFace.analyze(frame_path, actions=['emotion'], enforce_detection=False)
        dominant_emotion = result[0]['dominant_emotion']
        return f"Video Emotion: {dominant_emotion}"
    except Exception as e:
        return f"Error: {str(e)}"

# Gradio Blocks UI
with gr.Blocks(theme="huggingface") as demo:
    gr.Markdown("# 🎭 Sentiment & Emotion Decoder", elem_id="header")
    gr.Markdown("Upload your text, face image, or video to decode emotions and sentiments!")

    with gr.Tabs():
        # Text Sentiment Analysis Tab
        with gr.TabItem("📜 Text Sentiment"):
            text_input = gr.Textbox(label="Enter Text Here", placeholder="Type your social media post here...")
            text_button = gr.Button("🔍 Analyze Sentiment")
            text_output = gr.Label(label="Sentiment Result")
            text_button.click(analyze_text, inputs=text_input, outputs=text_output)

        # Image Emotion Analysis Tab
        with gr.TabItem("📸 Face Emotion Image"):
            img_input = gr.Image(type="filepath", label="Upload Face Image")
            img_output = gr.Label(label="Emotion Result")
            img_button = gr.Button("🔍 Analyze Image")
            img_button.click(analyze_image, inputs=img_input, outputs=img_output)

        # Video Emotion Analysis Tab
        with gr.TabItem("🎥 Face Emotion Video"):
            video_input = gr.Video(label="Upload Face Video")
            video_output = gr.Label(label="Emotion Result")
            video_button = gr.Button("🔍 Analyze Video")
            video_button.click(analyze_video, inputs=video_input, outputs=video_output)

# Launch the Interface
demo.launch()