wooj0216 commited on
Commit
be70a4d
·
1 Parent(s): c96cf7e

ADD: example images

Browse files
__pycache__/detection.cpython-310.pyc CHANGED
Binary files a/__pycache__/detection.cpython-310.pyc and b/__pycache__/detection.cpython-310.pyc differ
 
app.py CHANGED
@@ -3,8 +3,9 @@ import cv2
3
  from PIL import Image
4
  import torch
5
  import numpy as np
 
6
 
7
- from transformers import AutoImageProcessor, AutoProcessor, AutoModel, CLIPVisionModel
8
  from detection import detect_image, detect_video
9
  from model import LinearClassifier
10
 
@@ -65,6 +66,14 @@ def change_input(input_type):
65
  else:
66
  return None
67
 
 
 
 
 
 
 
 
 
68
 
69
  def process_input(input_type, model_type, image, video):
70
  detection_type = "facial" if model_type == "Facial" else "general"
@@ -77,6 +86,18 @@ def process_input(input_type, model_type, image, video):
77
  return None, None
78
 
79
 
 
 
 
 
 
 
 
 
 
 
 
 
80
  with gr.Blocks() as demo:
81
 
82
  gr.Markdown("## Deepfake Detection : Facial / General")
@@ -92,6 +113,15 @@ with gr.Blocks() as demo:
92
 
93
  pred_score_output = gr.Textbox(label="Prediction Score")
94
  attn_map_output = gr.Image(type="pil", label="Attention Map")
 
 
 
 
 
 
 
 
 
95
 
96
  input_type.change(fn=change_input, inputs=[input_type], outputs=[image_input, video_input])
97
 
 
3
  from PIL import Image
4
  import torch
5
  import numpy as np
6
+ import os
7
 
8
+ from transformers import AutoProcessor, CLIPVisionModel
9
  from detection import detect_image, detect_video
10
  from model import LinearClassifier
11
 
 
66
  else:
67
  return None
68
 
69
+ def determine_model_type(image_path):
70
+ if "facial" in image_path.lower():
71
+ return "Facial"
72
+ elif "general" in image_path.lower():
73
+ return "General"
74
+ else:
75
+ return "Facial" # 기본값
76
+
77
 
78
  def process_input(input_type, model_type, image, video):
79
  detection_type = "facial" if model_type == "Facial" else "general"
 
86
  return None, None
87
 
88
 
89
+ def process_example(image_path):
90
+ model_type = determine_model_type(image_path)
91
+ return Image.open(image_path), model_type
92
+
93
+
94
+ example_images = [
95
+ "examples/fake/facial.jpg",
96
+ "examples/fake/general.jpg",
97
+ "examples/real/facial.jpg",
98
+ "examples/real/general.jpg",
99
+ ]
100
+
101
  with gr.Blocks() as demo:
102
 
103
  gr.Markdown("## Deepfake Detection : Facial / General")
 
113
 
114
  pred_score_output = gr.Textbox(label="Prediction Score")
115
  attn_map_output = gr.Image(type="pil", label="Attention Map")
116
+
117
+ # Example Images 추가
118
+ gr.Examples(
119
+ examples=example_images,
120
+ inputs=[image_input],
121
+ outputs=[image_input, model_type],
122
+ fn=process_example,
123
+ cache_examples=False
124
+ )
125
 
126
  input_type.change(fn=change_input, inputs=[input_type], outputs=[image_input, video_input])
127
 
attn.jpg ADDED
examples/fake/facial.jpg ADDED
examples/fake/general.jpg ADDED
examples/real/facial.jpg ADDED
examples/real/general.jpg ADDED