notrey commited on
Commit
d900808
·
1 Parent(s): e33102a

updating app

Browse files
Files changed (1) hide show
  1. app.py +374 -257
app.py CHANGED
@@ -7,6 +7,8 @@ from transformers import pipeline
7
  from PIL import Image
8
  import torch
9
  from collections import deque
 
 
10
 
11
  # Set page config
12
  st.set_page_config(
@@ -18,8 +20,8 @@ st.set_page_config(
18
  # --- App Title and Description ---
19
  st.title("Advanced Real-Time Emotion Detection")
20
  st.write("""
21
- This app detects emotions in real-time using your webcam. It tracks facial expressions continuously
22
- and provides visual feedback on detected emotions.
23
  """)
24
 
25
  # --- Load Models ---
@@ -51,10 +53,10 @@ selected_model = st.sidebar.selectbox(
51
  list(model_options.keys())
52
  )
53
 
54
- # Input method selection
55
  input_method = st.sidebar.radio(
56
  "Choose Input Method",
57
- ["Real-time Webcam", "Upload an Image", "Capture Image"]
58
  )
59
 
60
  # Confidence threshold
@@ -69,8 +71,19 @@ confidence_threshold = st.sidebar.slider(
69
  # Face detection toggle
70
  use_face_detection = st.sidebar.checkbox("Enable Face Detection", value=True)
71
 
 
 
 
 
 
 
 
 
 
 
 
72
  # History length for real-time tracking
73
- if input_method == "Real-time Webcam":
74
  history_length = st.sidebar.slider(
75
  "Emotion History Length (seconds)",
76
  min_value=5,
@@ -161,117 +174,79 @@ def draw_faces_with_emotions(image, faces, emotions):
161
 
162
  return img
163
 
164
- # --- Main App Logic ---
165
- if input_method == "Upload an Image":
166
- uploaded_file = st.file_uploader("Choose an image file", type=["jpg", "jpeg", "png"])
 
167
 
168
- if uploaded_file is not None:
169
- # Load and display image
170
- image = Image.open(uploaded_file).convert("RGB")
171
-
172
- col1, col2 = st.columns(2)
173
- with col1:
174
- st.image(image, caption="Uploaded Image", use_column_width=True)
175
-
176
- # Process image
177
- if use_face_detection:
178
- faces, opencv_image = detect_faces(image)
179
-
180
- if len(faces) > 0:
181
- emotions = []
182
- for face in faces:
183
- face_img = process_image_for_emotion(image, face)
184
- emotions.append(predict_emotion(face_img))
185
-
186
- # Draw faces with emotions
187
- result_image = draw_faces_with_emotions(opencv_image, faces, emotions)
188
-
189
- with col2:
190
- st.image(result_image, caption="Detected Emotions", channels="BGR", use_column_width=True)
191
-
192
- # Display predictions
193
- st.subheader("Detected Emotions:")
194
- for i, (emotion, face) in enumerate(zip(emotions, faces)):
195
- if emotion["score"] >= confidence_threshold:
196
- st.write(f"Face {i+1}: **{emotion['label']}** (Confidence: {emotion['score']:.2f})")
197
-
198
- # Show confidence bars
199
- top_emotions = classifier(process_image_for_emotion(image, face))
200
- labels = [item["label"] for item in top_emotions]
201
- scores = [item["score"] for item in top_emotions]
202
-
203
- fig = go.Figure(go.Bar(
204
- x=scores,
205
- y=labels,
206
- orientation='h'
207
- ))
208
- fig.update_layout(
209
- title=f"Emotion Confidence - Face {i+1}",
210
- xaxis_title="Confidence",
211
- yaxis_title="Emotion",
212
- height=300
213
- )
214
- st.plotly_chart(fig, use_container_width=True)
215
- else:
216
- st.warning("No faces detected in the image. Try another image or disable face detection.")
217
- else:
218
- # Process the whole image
219
- prediction = predict_emotion(image)
220
- st.subheader("Prediction:")
221
- st.write(f"**Emotion:** {prediction['label']}")
222
- st.write(f"**Confidence:** {prediction['score']:.2f}")
223
-
224
- elif input_method == "Capture Image":
225
- picture = st.camera_input("Capture an Image")
226
 
227
- if picture is not None:
228
- image = Image.open(picture).convert("RGB")
229
-
230
- col1, col2 = st.columns(2)
231
- with col1:
232
- st.image(image, caption="Captured Image", use_column_width=True)
233
-
234
- # Process image
235
- if use_face_detection:
236
- faces, opencv_image = detect_faces(image)
237
-
238
- if len(faces) > 0:
239
- emotions = []
240
- for face in faces:
241
- face_img = process_image_for_emotion(image, face)
242
- emotions.append(predict_emotion(face_img))
243
-
244
- # Draw faces with emotions
245
- result_image = draw_faces_with_emotions(opencv_image, faces, emotions)
246
-
247
- with col2:
248
- st.image(result_image, caption="Detected Emotions", channels="BGR", use_column_width=True)
249
-
250
- # Display predictions
251
- st.subheader("Detected Emotions:")
252
- for i, (emotion, face) in enumerate(zip(emotions, faces)):
253
- if emotion["score"] >= confidence_threshold:
254
- st.write(f"Face {i+1}: **{emotion['label']}** (Confidence: {emotion['score']:.2f})")
255
- else:
256
- st.warning("No faces detected in the image. Try another image or disable face detection.")
257
- else:
258
- # Process the whole image
259
- prediction = predict_emotion(image)
260
- st.subheader("Prediction:")
261
- st.write(f"**Emotion:** {prediction['label']}")
262
- st.write(f"**Confidence:** {prediction['score']:.2f}")
263
-
264
- elif input_method == "Real-time Webcam":
265
- st.subheader("Real-time Emotion Detection")
266
- st.write("Click 'Start' to begin real-time emotion detection using your webcam.")
267
 
268
- # Create a placeholder for the webcam feed
269
- video_placeholder = st.empty()
270
 
271
- # Create a placeholder for metrics
272
- metrics_placeholder = st.empty()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
273
 
274
- # Create a placeholder for emotion history chart
 
 
 
 
 
 
275
  chart_placeholder = st.empty()
276
 
277
  # Initialize session state for tracking emotions over time
@@ -279,6 +254,7 @@ elif input_method == "Real-time Webcam":
279
  st.session_state.emotion_history = {}
280
  st.session_state.last_update_time = time.time()
281
  st.session_state.frame_count = 0
 
282
 
283
  # Start/Stop button
284
  start_button = st.button("Start" if 'running' not in st.session_state or not st.session_state.running else "Stop")
@@ -286,171 +262,196 @@ elif input_method == "Real-time Webcam":
286
  if start_button:
287
  st.session_state.running = not st.session_state.get('running', False)
288
 
289
- # If running, capture and process webcam feed
290
  if st.session_state.get('running', False):
291
  try:
292
- # Open the webcam
293
- cap = cv2.VideoCapture(0)
294
-
295
- # Check if webcam opened successfully
296
- if not cap.isOpened():
297
- st.error("Could not open webcam. Please check your camera settings.")
298
- st.session_state.running = False
299
  else:
300
- # Create deques for tracking emotions
301
- emotion_deques = {}
302
- timestamp_deque = deque(maxlen=30*history_length) # Store timestamps for X seconds at 30fps
303
 
304
- while st.session_state.get('running', False):
305
- # Read frame
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
306
  ret, frame = cap.read()
307
-
308
- if not ret:
309
- st.error("Failed to capture frame from webcam")
310
- break
311
-
312
- # Flip the frame horizontally for a more natural view
 
 
 
 
 
 
 
 
313
  frame = cv2.flip(frame, 1)
 
 
 
 
 
 
 
314
 
315
- # Increment frame count for FPS calculation
316
- st.session_state.frame_count += 1
317
-
318
- # Detect faces
319
- if use_face_detection:
320
- faces, _ = detect_faces(frame)
321
 
322
- if len(faces) > 0:
323
- # Process each face
324
- emotions = []
325
- for face in faces:
326
- face_img = process_image_for_emotion(frame, face)
327
- emotions.append(predict_emotion(face_img))
328
-
329
- # Draw faces with emotions
330
- frame = draw_faces_with_emotions(frame, faces, emotions)
331
-
332
- # Update emotion history
333
- current_time = time.time()
334
- timestamp_deque.append(current_time)
335
-
336
- for i, emotion in enumerate(emotions):
337
- if emotion["score"] >= confidence_threshold:
338
- face_id = f"Face {i+1}"
339
- if face_id not in emotion_deques:
340
- emotion_deques[face_id] = deque(maxlen=30*history_length)
341
-
342
- emotion_deques[face_id].append({
343
- "emotion": emotion["label"],
344
- "confidence": emotion["score"],
345
- "time": current_time
346
- })
347
- else:
348
- # No faces detected
349
- pass
350
- else:
351
- # Process the whole frame
352
- pil_image = Image.fromarray(cv2.cvtColor(frame, cv2.COLOR_BGR2RGB))
353
- emotion = predict_emotion(pil_image)
354
-
355
- # Display emotion on frame
356
- cv2.putText(
357
- frame,
358
- f"{emotion['label']} ({emotion['score']:.2f})",
359
- (10, 30),
360
- cv2.FONT_HERSHEY_SIMPLEX,
361
- 1,
362
- (0, 255, 0),
363
- 2
364
- )
365
 
366
  # Update emotion history
367
  current_time = time.time()
368
  timestamp_deque.append(current_time)
369
 
370
- if "Frame" not in emotion_deques:
371
- emotion_deques["Frame"] = deque(maxlen=30*history_length)
372
-
373
- emotion_deques["Frame"].append({
374
- "emotion": emotion["label"],
375
- "confidence": emotion["score"],
376
- "time": current_time
377
- })
 
 
 
 
 
 
 
 
 
 
378
 
379
- # Calculate FPS
 
 
 
 
 
 
 
 
 
 
 
380
  current_time = time.time()
381
- time_diff = current_time - st.session_state.last_update_time
382
- if time_diff >= 1.0: # Update every second
383
- fps = st.session_state.frame_count / time_diff
384
- st.session_state.last_update_time = current_time
385
- st.session_state.frame_count = 0
386
-
387
- # Update metrics
388
- with metrics_placeholder.container():
389
- cols = st.columns(3)
390
- cols[0].metric("FPS", f"{fps:.1f}")
391
- cols[1].metric("Faces Detected", len(faces) if use_face_detection else "N/A")
392
 
393
- # Display the frame
394
- video_placeholder.image(frame, channels="BGR", use_column_width=True)
395
 
396
- # Update emotion history chart periodically
397
- if len(timestamp_deque) > 0 and time_diff >= 0.5: # Update chart every 0.5 seconds
398
- with chart_placeholder.container():
399
- # Create tabs for each face
400
- if len(emotion_deques) > 0:
401
- tabs = st.tabs(list(emotion_deques.keys()))
402
-
403
- for i, (face_id, emotion_data) in enumerate(emotion_deques.items()):
404
- with tabs[i]:
405
- if len(emotion_data) > 0:
406
- # Count occurrences of each emotion
407
- emotion_counts = {}
408
- for entry in emotion_data:
409
- emotion = entry["emotion"]
410
- if emotion not in emotion_counts:
411
- emotion_counts[emotion] = 0
412
- emotion_counts[emotion] += 1
413
-
414
- # Create pie chart for emotion distribution
415
- fig = go.Figure(data=[go.Pie(
416
- labels=list(emotion_counts.keys()),
417
- values=list(emotion_counts.values()),
418
- hole=.3
419
- )])
420
- fig.update_layout(title=f"Emotion Distribution - {face_id}")
421
- st.plotly_chart(fig, use_container_width=True)
422
-
423
- # Create line chart for emotion confidence over time
424
- emotions = list(emotion_data)[-20:] # Get the last 20 entries
425
- times = [(e["time"] - emotions[0]["time"]) for e in emotions]
426
- confidences = [e["confidence"] for e in emotions]
427
- emotion_labels = [e["emotion"] for e in emotions]
428
-
429
- fig = go.Figure()
430
- fig.add_trace(go.Scatter(
431
- x=times,
432
- y=confidences,
433
- mode='lines+markers',
434
- text=emotion_labels,
435
- hoverinfo='text+y'
436
- ))
437
- fig.update_layout(
438
- title=f"Emotion Confidence Over Time - {face_id}",
439
- xaxis_title="Time (seconds)",
440
- yaxis_title="Confidence",
441
- yaxis=dict(range=[0, 1])
442
- )
443
- st.plotly_chart(fig, use_container_width=True)
444
- else:
445
- st.info(f"No emotion data available for {face_id} yet.")
446
- else:
447
- st.info("No emotion data available yet.")
448
 
449
- # Release the webcam when done
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
450
  cap.release()
451
 
452
  except Exception as e:
453
- st.error(f"Error during webcam processing: {str(e)}")
454
  st.session_state.running = False
455
  else:
456
  # Display a placeholder image when not running
@@ -466,12 +467,128 @@ elif input_method == "Real-time Webcam":
466
  )
467
  video_placeholder.image(placeholder_img, channels="BGR", use_column_width=True)
468
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
469
  # --- Footer ---
470
  st.markdown("---")
471
  st.markdown("""
472
  **Tips for Best Results:**
 
473
  - Ensure good lighting for accurate face detection
474
- - Position your face clearly in the frame
475
  - Try different emotion models for comparison
476
- - Adjust the confidence threshold if emotions aren't being detected
477
  """)
 
7
  from PIL import Image
8
  import torch
9
  from collections import deque
10
+ import os
11
+ import tempfile
12
 
13
  # Set page config
14
  st.set_page_config(
 
20
  # --- App Title and Description ---
21
  st.title("Advanced Real-Time Emotion Detection")
22
  st.write("""
23
+ This app detects emotions in real-time using webcam, video files, or images.
24
+ If your webcam isn't working, try the simulation mode or upload a video file.
25
  """)
26
 
27
  # --- Load Models ---
 
53
  list(model_options.keys())
54
  )
55
 
56
+ # Input method selection with addition of video upload and simulation
57
  input_method = st.sidebar.radio(
58
  "Choose Input Method",
59
+ ["Real-time Webcam", "Upload Video", "Simulation Mode", "Upload an Image", "Capture Image"]
60
  )
61
 
62
  # Confidence threshold
 
71
  # Face detection toggle
72
  use_face_detection = st.sidebar.checkbox("Enable Face Detection", value=True)
73
 
74
+ # Processing speed for video/simulation
75
+ if input_method in ["Upload Video", "Simulation Mode"]:
76
+ processing_speed = st.sidebar.slider(
77
+ "Processing Speed",
78
+ min_value=0.1,
79
+ max_value=2.0,
80
+ value=1.0,
81
+ step=0.1,
82
+ help="Adjust the speed of video processing (higher is faster)"
83
+ )
84
+
85
  # History length for real-time tracking
86
+ if input_method in ["Real-time Webcam", "Upload Video", "Simulation Mode"]:
87
  history_length = st.sidebar.slider(
88
  "Emotion History Length (seconds)",
89
  min_value=5,
 
174
 
175
  return img
176
 
177
+ def generate_simulated_face(frame_num, canvas_size=(640, 480)):
178
+ """Generate a simulated face with changing expressions."""
179
+ # Create a blank canvas
180
+ canvas = np.ones((canvas_size[1], canvas_size[0], 3), dtype=np.uint8) * 230
181
 
182
+ # Calculate center position and face size
183
+ center_x, center_y = canvas_size[0] // 2, canvas_size[1] // 2
184
+ face_radius = min(canvas_size) // 4
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
185
 
186
+ # Face movement based on frame number
187
+ movement_x = int(np.sin(frame_num * 0.02) * 50)
188
+ movement_y = int(np.cos(frame_num * 0.03) * 30)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
189
 
190
+ face_x = center_x + movement_x
191
+ face_y = center_y + movement_y
192
 
193
+ # Draw face circle
194
+ cv2.circle(canvas, (face_x, face_y), face_radius, (220, 210, 180), -1)
195
+
196
+ # Draw eyes
197
+ eye_y = face_y - int(face_radius * 0.2)
198
+ left_eye_x = face_x - int(face_radius * 0.5)
199
+ right_eye_x = face_x + int(face_radius * 0.5)
200
+ eye_size = max(5, face_radius // 8)
201
+
202
+ # Blink occasionally
203
+ if frame_num % 50 > 45: # Blink every 50 frames for 5 frames
204
+ cv2.ellipse(canvas, (left_eye_x, eye_y), (eye_size, 1), 0, 0, 360, (30, 30, 30), -1)
205
+ cv2.ellipse(canvas, (right_eye_x, eye_y), (eye_size, 1), 0, 0, 360, (30, 30, 30), -1)
206
+ else:
207
+ cv2.circle(canvas, (left_eye_x, eye_y), eye_size, (255, 255, 255), -1)
208
+ cv2.circle(canvas, (right_eye_x, eye_y), eye_size, (255, 255, 255), -1)
209
+ cv2.circle(canvas, (left_eye_x, eye_y), eye_size-2, (70, 70, 70), -1)
210
+ cv2.circle(canvas, (right_eye_x, eye_y), eye_size-2, (70, 70, 70), -1)
211
+
212
+ # Draw mouth - change shape based on frame number to simulate different emotions
213
+ mouth_y = face_y + int(face_radius * 0.3)
214
+ mouth_width = int(face_radius * 0.6)
215
+ mouth_height = int(face_radius * 0.2)
216
+
217
+ # Cycle through different emotions based on frame number
218
+ emotion_cycle = (frame_num // 100) % 4
219
+
220
+ if emotion_cycle == 0: # Happy
221
+ # Smile
222
+ cv2.ellipse(canvas, (face_x, mouth_y), (mouth_width, mouth_height),
223
+ 0, 0, 180, (50, 50, 50), 2)
224
+ elif emotion_cycle == 1: # Sad
225
+ # Frown
226
+ cv2.ellipse(canvas, (face_x, mouth_y + mouth_height),
227
+ (mouth_width, mouth_height), 0, 180, 360, (50, 50, 50), 2)
228
+ elif emotion_cycle == 2: # Surprised
229
+ # O mouth
230
+ cv2.circle(canvas, (face_x, mouth_y), mouth_height, (50, 50, 50), 2)
231
+ else: # Neutral
232
+ # Straight line
233
+ cv2.line(canvas, (face_x - mouth_width//2, mouth_y),
234
+ (face_x + mouth_width//2, mouth_y), (50, 50, 50), 2)
235
+
236
+ # Add some text showing what emotion is being simulated
237
+ emotions = ["Happy", "Sad", "Surprised", "Neutral"]
238
+ cv2.putText(canvas, f"Simulating: {emotions[emotion_cycle]}",
239
+ (20, 30), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (50, 50, 50), 2)
240
+ cv2.putText(canvas, "Simulation Mode - No webcam required",
241
+ (20, canvas_size[1] - 20), cv2.FONT_HERSHEY_SIMPLEX, 0.6, (100, 100, 100), 1)
242
 
243
+ return canvas
244
+
245
+ def process_video_feed(feed_source, is_simulation=False):
246
+ """Process video feed (webcam, video file, or simulation)."""
247
+ # Create placeholders
248
+ video_placeholder = st.empty()
249
+ metrics_placeholder = st.empty()
250
  chart_placeholder = st.empty()
251
 
252
  # Initialize session state for tracking emotions over time
 
254
  st.session_state.emotion_history = {}
255
  st.session_state.last_update_time = time.time()
256
  st.session_state.frame_count = 0
257
+ st.session_state.simulation_frame = 0
258
 
259
  # Start/Stop button
260
  start_button = st.button("Start" if 'running' not in st.session_state or not st.session_state.running else "Stop")
 
262
  if start_button:
263
  st.session_state.running = not st.session_state.get('running', False)
264
 
265
+ # If running, capture and process video feed
266
  if st.session_state.get('running', False):
267
  try:
268
+ # Initialize video source
269
+ if is_simulation:
270
+ # No need to open a video source for simulation
271
+ pass
 
 
 
272
  else:
273
+ cap = feed_source
 
 
274
 
275
+ # Check if video source opened successfully
276
+ if not cap.isOpened():
277
+ st.error("Could not open video source. Please check your settings.")
278
+ st.session_state.running = False
279
+ return
280
+
281
+ # Create deques for tracking emotions
282
+ emotion_deques = {}
283
+ timestamp_deque = deque(maxlen=30*history_length) # Store timestamps for X seconds at 30fps
284
+
285
+ while st.session_state.get('running', False):
286
+ # Get frame
287
+ if is_simulation:
288
+ # Generate a simulated frame
289
+ frame = generate_simulated_face(st.session_state.simulation_frame)
290
+ st.session_state.simulation_frame += 1
291
+ ret = True
292
+ else:
293
+ # Read from video source
294
  ret, frame = cap.read()
295
+
296
+ if not ret:
297
+ if is_simulation:
298
+ st.error("Simulation error")
299
+ elif input_method == "Upload Video":
300
+ # For video files, loop back to the beginning
301
+ cap.set(cv2.CAP_PROP_POS_FRAMES, 0)
302
+ continue
303
+ else:
304
+ st.error("Failed to capture frame from video source")
305
+ break
306
+
307
+ # For webcam, flip horizontally for a more natural view
308
+ if input_method == "Real-time Webcam" and not is_simulation:
309
  frame = cv2.flip(frame, 1)
310
+
311
+ # Increment frame count for FPS calculation
312
+ st.session_state.frame_count += 1
313
+
314
+ # Detect faces
315
+ if use_face_detection:
316
+ faces, _ = detect_faces(frame)
317
 
318
+ if len(faces) > 0:
319
+ # Process each face
320
+ emotions = []
321
+ for face in faces:
322
+ face_img = process_image_for_emotion(frame, face)
323
+ emotions.append(predict_emotion(face_img))
324
 
325
+ # Draw faces with emotions
326
+ frame = draw_faces_with_emotions(frame, faces, emotions)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
327
 
328
  # Update emotion history
329
  current_time = time.time()
330
  timestamp_deque.append(current_time)
331
 
332
+ for i, emotion in enumerate(emotions):
333
+ if emotion["score"] >= confidence_threshold:
334
+ face_id = f"Face {i+1}"
335
+ if face_id not in emotion_deques:
336
+ emotion_deques[face_id] = deque(maxlen=30*history_length)
337
+
338
+ emotion_deques[face_id].append({
339
+ "emotion": emotion["label"],
340
+ "confidence": emotion["score"],
341
+ "time": current_time
342
+ })
343
+ else:
344
+ # No faces detected
345
+ pass
346
+ else:
347
+ # Process the whole frame
348
+ pil_image = Image.fromarray(cv2.cvtColor(frame, cv2.COLOR_BGR2RGB))
349
+ emotion = predict_emotion(pil_image)
350
 
351
+ # Display emotion on frame
352
+ cv2.putText(
353
+ frame,
354
+ f"{emotion['label']} ({emotion['score']:.2f})",
355
+ (10, 30),
356
+ cv2.FONT_HERSHEY_SIMPLEX,
357
+ 1,
358
+ (0, 255, 0),
359
+ 2
360
+ )
361
+
362
+ # Update emotion history
363
  current_time = time.time()
364
+ timestamp_deque.append(current_time)
 
 
 
 
 
 
 
 
 
 
365
 
366
+ if "Frame" not in emotion_deques:
367
+ emotion_deques["Frame"] = deque(maxlen=30*history_length)
368
 
369
+ emotion_deques["Frame"].append({
370
+ "emotion": emotion["label"],
371
+ "confidence": emotion["score"],
372
+ "time": current_time
373
+ })
374
+
375
+ # Calculate FPS
376
+ current_time = time.time()
377
+ time_diff = current_time - st.session_state.last_update_time
378
+ if time_diff >= 1.0: # Update every second
379
+ fps = st.session_state.frame_count / time_diff
380
+ st.session_state.last_update_time = current_time
381
+ st.session_state.frame_count = 0
382
+
383
+ # Update metrics
384
+ with metrics_placeholder.container():
385
+ cols = st.columns(3)
386
+ cols[0].metric("FPS", f"{fps:.1f}")
387
+ cols[1].metric("Faces Detected", len(faces) if use_face_detection else "N/A")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
388
 
389
+ # Display the frame
390
+ video_placeholder.image(frame, channels="BGR", use_column_width=True)
391
+
392
+ # Update emotion history chart periodically
393
+ if len(timestamp_deque) > 0 and time_diff >= 0.5: # Update chart every 0.5 seconds
394
+ with chart_placeholder.container():
395
+ # Create tabs for each face
396
+ if len(emotion_deques) > 0:
397
+ tabs = st.tabs(list(emotion_deques.keys()))
398
+
399
+ for i, (face_id, emotion_data) in enumerate(emotion_deques.items()):
400
+ with tabs[i]:
401
+ if len(emotion_data) > 0:
402
+ # Count occurrences of each emotion
403
+ emotion_counts = {}
404
+ for entry in emotion_data:
405
+ emotion = entry["emotion"]
406
+ if emotion not in emotion_counts:
407
+ emotion_counts[emotion] = 0
408
+ emotion_counts[emotion] += 1
409
+
410
+ # Create pie chart for emotion distribution
411
+ fig = go.Figure(data=[go.Pie(
412
+ labels=list(emotion_counts.keys()),
413
+ values=list(emotion_counts.values()),
414
+ hole=.3
415
+ )])
416
+ fig.update_layout(title=f"Emotion Distribution - {face_id}")
417
+ st.plotly_chart(fig, use_container_width=True)
418
+
419
+ # Create line chart for emotion confidence over time
420
+ emotions = list(emotion_data)[-20:] # Get the last 20 entries
421
+ times = [(e["time"] - emotions[0]["time"]) for e in emotions]
422
+ confidences = [e["confidence"] for e in emotions]
423
+ emotion_labels = [e["emotion"] for e in emotions]
424
+
425
+ fig = go.Figure()
426
+ fig.add_trace(go.Scatter(
427
+ x=times,
428
+ y=confidences,
429
+ mode='lines+markers',
430
+ text=emotion_labels,
431
+ hoverinfo='text+y'
432
+ ))
433
+ fig.update_layout(
434
+ title=f"Emotion Confidence Over Time - {face_id}",
435
+ xaxis_title="Time (seconds)",
436
+ yaxis_title="Confidence",
437
+ yaxis=dict(range=[0, 1])
438
+ )
439
+ st.plotly_chart(fig, use_container_width=True)
440
+ else:
441
+ st.info(f"No emotion data available for {face_id} yet.")
442
+ else:
443
+ st.info("No emotion data available yet.")
444
+
445
+ # Control processing speed for videos and simulation
446
+ if input_method in ["Upload Video", "Simulation Mode"]:
447
+ time.sleep(0.03 / processing_speed) # Adjust delay based on processing_speed
448
+
449
+ # Release resources when done
450
+ if not is_simulation and cap.isOpened():
451
  cap.release()
452
 
453
  except Exception as e:
454
+ st.error(f"Error during processing: {str(e)}")
455
  st.session_state.running = False
456
  else:
457
  # Display a placeholder image when not running
 
467
  )
468
  video_placeholder.image(placeholder_img, channels="BGR", use_column_width=True)
469
 
470
+ # --- Process uploaded image ---
471
+ def process_static_image(image):
472
+ col1, col2 = st.columns(2)
473
+ with col1:
474
+ st.image(image, caption="Image", use_column_width=True)
475
+
476
+ # Process image
477
+ if use_face_detection:
478
+ faces, opencv_image = detect_faces(image)
479
+
480
+ if len(faces) > 0:
481
+ emotions = []
482
+ for face in faces:
483
+ face_img = process_image_for_emotion(image, face)
484
+ emotions.append(predict_emotion(face_img))
485
+
486
+ # Draw faces with emotions
487
+ result_image = draw_faces_with_emotions(opencv_image, faces, emotions)
488
+
489
+ with col2:
490
+ st.image(result_image, caption="Detected Emotions", channels="BGR", use_column_width=True)
491
+
492
+ # Display predictions
493
+ st.subheader("Detected Emotions:")
494
+ for i, (emotion, face) in enumerate(zip(emotions, faces)):
495
+ if emotion["score"] >= confidence_threshold:
496
+ st.write(f"Face {i+1}: **{emotion['label']}** (Confidence: {emotion['score']:.2f})")
497
+
498
+ # Show confidence bars
499
+ top_emotions = classifier(process_image_for_emotion(image, face))
500
+ labels = [item["label"] for item in top_emotions]
501
+ scores = [item["score"] for item in top_emotions]
502
+
503
+ fig = go.Figure(go.Bar(
504
+ x=scores,
505
+ y=labels,
506
+ orientation='h'
507
+ ))
508
+ fig.update_layout(
509
+ title=f"Emotion Confidence - Face {i+1}",
510
+ xaxis_title="Confidence",
511
+ yaxis_title="Emotion",
512
+ height=300
513
+ )
514
+ st.plotly_chart(fig, use_container_width=True)
515
+ else:
516
+ st.warning("No faces detected in the image. Try another image or disable face detection.")
517
+ else:
518
+ # Process the whole image
519
+ prediction = predict_emotion(image)
520
+ st.subheader("Prediction:")
521
+ st.write(f"**Emotion:** {prediction['label']}")
522
+ st.write(f"**Confidence:** {prediction['score']:.2f}")
523
+
524
+ # --- Main App Logic ---
525
+ if input_method == "Upload an Image":
526
+ uploaded_file = st.file_uploader("Choose an image file", type=["jpg", "jpeg", "png"])
527
+
528
+ if uploaded_file is not None:
529
+ # Load and display image
530
+ image = Image.open(uploaded_file).convert("RGB")
531
+ process_static_image(image)
532
+
533
+ elif input_method == "Capture Image":
534
+ picture = st.camera_input("Capture an Image")
535
+
536
+ if picture is not None:
537
+ image = Image.open(picture).convert("RGB")
538
+ process_static_image(image)
539
+
540
+ elif input_method == "Upload Video":
541
+ uploaded_video = st.file_uploader("Upload a video file", type=["mp4", "avi", "mov", "mkv"])
542
+
543
+ if uploaded_video is not None:
544
+ # Save the uploaded video to a temporary file
545
+ tfile = tempfile.NamedTemporaryFile(delete=False)
546
+ tfile.write(uploaded_video.read())
547
+
548
+ # Open the video file
549
+ cap = cv2.VideoCapture(tfile.name)
550
+
551
+ # Process the video
552
+ process_video_feed(cap)
553
+
554
+ # Clean up the temporary file
555
+ os.unlink(tfile.name)
556
+
557
+ elif input_method == "Simulation Mode":
558
+ st.info("Simulation mode uses a generated animated face. No webcam required!")
559
+ process_video_feed(None, is_simulation=True)
560
+
561
+ elif input_method == "Real-time Webcam":
562
+ try:
563
+ # First check if we can access the webcam
564
+ cap = cv2.VideoCapture(0)
565
+ if not cap.isOpened():
566
+ st.error("Could not open webcam. Please try the Simulation Mode instead.")
567
+ st.info("If you're using Streamlit in a browser, make sure you've granted camera permissions.")
568
+
569
+ # Show troubleshooting tips
570
+ with st.expander("Webcam Troubleshooting Tips"):
571
+ st.markdown("""
572
+ 1. **Check Browser Permissions**: Make sure your browser has permission to access your camera.
573
+ 2. **Close Other Applications**: Other applications might be using your webcam.
574
+ 3. **Refresh the Page**: Sometimes simply refreshing can resolve the issue.
575
+ 4. **Try a Different Browser**: Some browsers handle webcam access better than others.
576
+ 5. **Use Simulation Mode**: If you cannot get the webcam working, use the Simulation Mode.
577
+ """)
578
+ else:
579
+ # Webcam available, process it
580
+ process_video_feed(cap)
581
+ except Exception as e:
582
+ st.error(f"Error accessing webcam: {str(e)}")
583
+ st.info("Please try the Simulation Mode instead, which doesn't require webcam access.")
584
+
585
  # --- Footer ---
586
  st.markdown("---")
587
  st.markdown("""
588
  **Tips for Best Results:**
589
+ - If webcam doesn't work, try "Simulation Mode" or "Upload Video" options
590
  - Ensure good lighting for accurate face detection
591
+ - Position faces clearly in the frame
592
  - Try different emotion models for comparison
593
+ - Adjust the confidence threshold if emotions aren't being detected correctly
594
  """)