Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -1,20 +1,30 @@
|
|
1 |
import gradio as gr
|
2 |
-
from transformers import OneFormerProcessor, OneFormerForUniversalSegmentation
|
3 |
from PIL import Image, ImageFilter
|
4 |
import numpy as np
|
5 |
import torch
|
6 |
from scipy.ndimage import gaussian_filter
|
|
|
7 |
|
8 |
-
# Load the OneFormer processor and model globally
|
9 |
-
|
10 |
-
|
11 |
try:
|
12 |
-
|
13 |
-
|
14 |
except Exception as e:
|
15 |
print(f"Error loading OneFormer model: {e}")
|
16 |
|
17 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
18 |
"""Applies Gaussian blur to the background of the image."""
|
19 |
blurred_background = image.filter(ImageFilter.GaussianBlur(radius=radius))
|
20 |
img_array = np.array(image)
|
@@ -24,20 +34,66 @@ def apply_gaussian_blur(image, mask, radius):
|
|
24 |
final_image_array = np.where(foreground_mask_3d, img_array, blurred_array)
|
25 |
return Image.fromarray(final_image_array.astype(np.uint8))
|
26 |
|
27 |
-
def
|
28 |
-
"""
|
29 |
-
|
30 |
-
|
31 |
-
mask_array = np.array(mask) / 255.0 # Normalize mask to 0-1
|
32 |
|
33 |
-
|
34 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
35 |
|
36 |
-
return Image.fromarray(blurred_image.astype(np.uint8))
|
37 |
|
38 |
def segment_and_blur(input_image, blur_type, gaussian_radius=15, lens_strength=5):
|
39 |
"""Segments the input image and applies the selected blur."""
|
40 |
-
if
|
41 |
return "Error: OneFormer model not loaded."
|
42 |
|
43 |
image = input_image.convert("RGB")
|
@@ -45,18 +101,18 @@ def segment_and_blur(input_image, blur_type, gaussian_radius=15, lens_strength=5
|
|
45 |
image = image.rotate(-90, expand=True)
|
46 |
|
47 |
# Prepare input for semantic segmentation
|
48 |
-
inputs =
|
49 |
|
50 |
# Semantic segmentation
|
51 |
with torch.no_grad():
|
52 |
-
outputs =
|
53 |
|
54 |
# Processing semantic segmentation output
|
55 |
-
predicted_semantic_map =
|
56 |
segmentation_mask = predicted_semantic_map.cpu().numpy()
|
57 |
|
58 |
# Get the mapping of class IDs to labels
|
59 |
-
id2label =
|
60 |
|
61 |
# Set foreground label to person
|
62 |
foreground_label = 'person'
|
@@ -75,14 +131,13 @@ def segment_and_blur(input_image, blur_type, gaussian_radius=15, lens_strength=5
|
|
75 |
# Set the pixels corresponding to the foreground object to white (255)
|
76 |
output_mask_array[segmentation_mask == foreground_class_id] = 255
|
77 |
|
78 |
-
# Convert the NumPy array to a PIL Image
|
79 |
-
mask_pil = Image.fromarray(output_mask_array, mode='L')
|
80 |
-
mask_array = np.array(mask_pil)
|
81 |
|
82 |
if blur_type == "Gaussian":
|
83 |
-
blurred_image =
|
84 |
elif blur_type == "Lens":
|
85 |
-
blurred_image =
|
86 |
else:
|
87 |
return "Error: Invalid blur type selected."
|
88 |
|
|
|
1 |
import gradio as gr
|
2 |
+
from transformers import OneFormerProcessor, OneFormerForUniversalSegmentation, AutoImageProcessor, AutoModelForDepthEstimation
|
3 |
from PIL import Image, ImageFilter
|
4 |
import numpy as np
|
5 |
import torch
|
6 |
from scipy.ndimage import gaussian_filter
|
7 |
+
import cv2
|
8 |
|
9 |
+
# Load the OneFormer processor and model globally
|
10 |
+
oneformer_processor = None
|
11 |
+
oneformer_model = None
|
12 |
try:
|
13 |
+
oneformer_processor = OneFormerProcessor.from_pretrained("shi-labs/oneformer_coco_swin_large")
|
14 |
+
oneformer_model = OneFormerForUniversalSegmentation.from_pretrained("shi-labs/oneformer_coco_swin_large")
|
15 |
except Exception as e:
|
16 |
print(f"Error loading OneFormer model: {e}")
|
17 |
|
18 |
+
# Load the Depth Estimation processor and model globally
|
19 |
+
depth_processor = None
|
20 |
+
depth_model = None
|
21 |
+
try:
|
22 |
+
depth_processor = AutoImageProcessor.from_pretrained("depth-anything/Depth-Anything-V2-Small-hf")
|
23 |
+
depth_model = AutoModelForDepthEstimation.from_pretrained("depth-anything/Depth-Anything-V2-Small-hf")
|
24 |
+
except Exception as e:
|
25 |
+
print(f"Error loading Depth Anything model: {e}")
|
26 |
+
|
27 |
+
def apply_gaussian_blur_background(image, mask, radius):
|
28 |
"""Applies Gaussian blur to the background of the image."""
|
29 |
blurred_background = image.filter(ImageFilter.GaussianBlur(radius=radius))
|
30 |
img_array = np.array(image)
|
|
|
34 |
final_image_array = np.where(foreground_mask_3d, img_array, blurred_array)
|
35 |
return Image.fromarray(final_image_array.astype(np.uint8))
|
36 |
|
37 |
+
def apply_depth_based_blur_background(image, mask, strength):
|
38 |
+
"""Applies lens blur to the background of the image based on depth estimation."""
|
39 |
+
resized_image = image.resize((512, 512))
|
40 |
+
image_np = np.array(resized_image)
|
|
|
41 |
|
42 |
+
if depth_processor is None or depth_model is None:
|
43 |
+
return "Error: Depth Anything model not loaded."
|
44 |
+
|
45 |
+
# Prepare image for the depth estimation model
|
46 |
+
inputs = depth_processor(images=resized_image, return_tensors="pt")
|
47 |
+
|
48 |
+
with torch.no_grad():
|
49 |
+
outputs = depth_model(**inputs)
|
50 |
+
predicted_depth = outputs.predicted_depth
|
51 |
+
|
52 |
+
# Interpolate depth map to the resized image size
|
53 |
+
prediction = torch.nn.functional.interpolate(
|
54 |
+
predicted_depth.unsqueeze(1),
|
55 |
+
size=resized_image.size[::-1],
|
56 |
+
mode="bicubic",
|
57 |
+
align_corners=False,
|
58 |
+
).squeeze().cpu().numpy()
|
59 |
+
|
60 |
+
# Normalize the depth map to the range 0-1
|
61 |
+
depth_norm = (prediction - np.min(prediction)) / (np.max(prediction) - np.min(prediction))
|
62 |
+
|
63 |
+
num_blur_levels = 5
|
64 |
+
blurred_layers = []
|
65 |
+
for i in range(num_blur_levels):
|
66 |
+
sigma = i * (strength / 5) # Adjust sigma based on strength
|
67 |
+
if sigma == 0:
|
68 |
+
blurred = image_np
|
69 |
+
else:
|
70 |
+
blurred = cv2.GaussianBlur(image_np, (15, 15), sigmaX=sigma, sigmaY=sigma, borderType=cv2.BORDER_REPLICATE)
|
71 |
+
blurred_layers.append(blurred)
|
72 |
+
|
73 |
+
depth_indices = ((1 - depth_norm) * (num_blur_levels - 1)).astype(np.uint8)
|
74 |
+
|
75 |
+
final_blurred_image_resized = np.zeros_like(image_np)
|
76 |
+
for y in range(image_np.shape[0]):
|
77 |
+
for x in range(image_np.shape[1]):
|
78 |
+
depth_index = depth_indices[y, x]
|
79 |
+
final_blurred_image_resized[y, x] = blurred_layers[depth_index][y, x]
|
80 |
+
|
81 |
+
final_blurred_pil_resized = Image.fromarray(final_blurred_image_resized.astype(np.uint8))
|
82 |
+
final_blurred_pil = final_blurred_pil_resized.resize(image.size)
|
83 |
+
final_blurred_array = np.array(final_blurred_pil)
|
84 |
+
original_array = np.array(image)
|
85 |
+
mask_resized = mask.resize(image.size)
|
86 |
+
mask_array = np.array(mask_resized) > 0
|
87 |
+
mask_array_3d = np.stack([mask_array] * 3, axis=-1)
|
88 |
+
|
89 |
+
# Apply the mask to combine the original foreground with the blurred background
|
90 |
+
final_output_array = np.where(mask_array_3d, original_array, final_blurred_array)
|
91 |
+
return Image.fromarray(final_output_array.astype(np.uint8))
|
92 |
|
|
|
93 |
|
94 |
def segment_and_blur(input_image, blur_type, gaussian_radius=15, lens_strength=5):
|
95 |
"""Segments the input image and applies the selected blur."""
|
96 |
+
if oneformer_processor is None or oneformer_model is None:
|
97 |
return "Error: OneFormer model not loaded."
|
98 |
|
99 |
image = input_image.convert("RGB")
|
|
|
101 |
image = image.rotate(-90, expand=True)
|
102 |
|
103 |
# Prepare input for semantic segmentation
|
104 |
+
inputs = oneformer_processor(images=image, task_inputs=["semantic"], return_tensors="pt")
|
105 |
|
106 |
# Semantic segmentation
|
107 |
with torch.no_grad():
|
108 |
+
outputs = oneformer_model(**inputs)
|
109 |
|
110 |
# Processing semantic segmentation output
|
111 |
+
predicted_semantic_map = oneformer_processor.post_process_semantic_segmentation(outputs, target_sizes=[image.size[::-1]])[0]
|
112 |
segmentation_mask = predicted_semantic_map.cpu().numpy()
|
113 |
|
114 |
# Get the mapping of class IDs to labels
|
115 |
+
id2label = oneformer_model.config.id2label
|
116 |
|
117 |
# Set foreground label to person
|
118 |
foreground_label = 'person'
|
|
|
131 |
# Set the pixels corresponding to the foreground object to white (255)
|
132 |
output_mask_array[segmentation_mask == foreground_class_id] = 255
|
133 |
|
134 |
+
# Convert the NumPy array to a PIL Image
|
135 |
+
mask_pil = Image.fromarray(output_mask_array, mode='L')
|
|
|
136 |
|
137 |
if blur_type == "Gaussian":
|
138 |
+
blurred_image = apply_gaussian_blur_background(image, mask_pil, gaussian_radius)
|
139 |
elif blur_type == "Lens":
|
140 |
+
blurred_image = apply_depth_based_blur_background(image, mask_pil, lens_strength)
|
141 |
else:
|
142 |
return "Error: Invalid blur type selected."
|
143 |
|