logu29 commited on
Commit
37aa19b
·
verified ·
1 Parent(s): dc14ea8

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +70 -10
app.py CHANGED
@@ -1,17 +1,77 @@
 
 
 
1
  import gradio as gr
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2
 
3
- from transforme import pipeline
 
 
 
 
 
4
 
5
- sentiment = pipeline("emoition-analysis")
 
 
 
 
6
 
7
- def get_emotion( input_text):
8
- return emotion(input_text)
 
 
 
9
 
10
- iface = gr.Inface(in = get_emotion,
11
- input = "text",
12
- outputs = ["text"],
13
- title = 'emotion analysis',
14
- description='get emotion negative/positive for the given input')
15
 
16
- iface.lanch(inline = false)
 
 
17
 
 
 
1
+ # In Google Colab
2
+
3
+ app_code = '''
4
  import gradio as gr
5
+ from transformers import pipeline
6
+ from deepface import DeepFace
7
+ import cv2
8
+ import numpy as np
9
+ import tempfile
10
+ import moviepy.editor as mp
11
+
12
+ # Load Text Sentiment Model
13
+ sentiment_pipeline = pipeline("sentiment-analysis")
14
+
15
+ # 1. Text Sentiment Analysis
16
+ def analyze_text(text):
17
+ result = sentiment_pipeline(text)[0]
18
+ return f"{result['label']} ({result['score']*100:.2f}%)"
19
+
20
+ # 2. Face Emotion Detection
21
+ def analyze_face(image):
22
+ try:
23
+ analysis = DeepFace.analyze(image, actions=['emotion'], enforce_detection=False)
24
+ emotion = analysis[0]['dominant_emotion']
25
+ return f"Detected Emotion: {emotion}"
26
+ except Exception as e:
27
+ return f"Error: {str(e)}"
28
+
29
+ # 3. Video Emotion Detection
30
+ def analyze_video(video_file):
31
+ temp_video_path = tempfile.NamedTemporaryFile(delete=False, suffix=".mp4").name
32
+ with open(temp_video_path, "wb") as f:
33
+ f.write(video_file.read())
34
+
35
+ clip = mp.VideoFileClip(temp_video_path)
36
+ frame = clip.get_frame(clip.duration / 2) # Take middle frame
37
+ frame_rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
38
+
39
+ try:
40
+ analysis = DeepFace.analyze(frame_rgb, actions=['emotion'], enforce_detection=False)
41
+ emotion = analysis[0]['dominant_emotion']
42
+ return f"Detected Emotion in Video: {emotion}"
43
+ except Exception as e:
44
+ return f"Error: {str(e)}"
45
+
46
+ # Gradio Interface
47
+ with gr.Blocks() as demo:
48
+ gr.Markdown("# 🎯 Deep Learning Sentiment & Emotion Analyzer")
49
+ gr.Markdown("Analyze **Text**, **Face Image**, or **Video**!")
50
 
51
+ with gr.Tabs():
52
+ with gr.TabItem("Text Sentiment"):
53
+ text_input = gr.Textbox(label="Enter Text")
54
+ text_output = gr.Label()
55
+ text_button = gr.Button("Analyze Text")
56
+ text_button.click(analyze_text, inputs=text_input, outputs=text_output)
57
 
58
+ with gr.TabItem("Face Emotion (Image)"):
59
+ image_input = gr.Image(type="numpy", label="Upload Face Image")
60
+ image_output = gr.Label()
61
+ image_button = gr.Button("Analyze Face Emotion")
62
+ image_button.click(analyze_face, inputs=image_input, outputs=image_output)
63
 
64
+ with gr.TabItem("Video Emotion"):
65
+ video_input = gr.File(label="Upload Video (.mp4)")
66
+ video_output = gr.Label()
67
+ video_button = gr.Button("Analyze Video Emotion")
68
+ video_button.click(analyze_video, inputs=video_input, outputs=video_output)
69
 
70
+ demo.launch()
71
+ '''
 
 
 
72
 
73
+ # Save to app.py
74
+ with open("app.py", "w") as f:
75
+ f.write(app_code)
76
 
77
+ print("✅ app.py saved!")