File size: 3,765 Bytes
2aa301a
 
 
 
 
478fbc7
2aa301a
478fbc7
2aa301a
478fbc7
 
2aa301a
5752949
7b7deb0
 
 
 
 
 
 
 
 
 
2aa301a
 
 
 
478fbc7
 
5752949
 
 
478fbc7
 
2aa301a
7b7deb0
 
 
 
2aa301a
7b7deb0
2aa301a
7b7deb0
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2aa301a
7fc5388
5752949
 
7b7deb0
 
 
 
 
 
 
 
 
 
 
 
 
2aa301a
7b7deb0
 
 
 
 
 
 
 
 
 
 
2aa301a
7b7deb0
 
 
2aa301a
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
import gradio as gr
import numpy as np
import torch
from PIL import Image
import cv2
import requests

from transformers import pipeline

# Load the depth estimation pipeline
pipe = pipeline(task="depth-estimation", model="depth-anything/Depth-Anything-V2-Small-hf")


def apply_depth_aware_blur_inverse(

    image,

    foreground_blur,

    midground_blur,

    background_blur,

    foreground_threshold,

    midground_lower,

    midground_upper,

    background_threshold,

):
    original_image = Image.fromarray(image).convert("RGB")
    original_image = original_image.resize((512, 512))
    image_np = np.array(original_image)

    # Inference
    depth = pipe(original_image)["depth"]
    depth = np.array(depth)  # Convert to numpy array
    depth = cv2.resize(depth, (512, 512), interpolation=cv2.INTER_CUBIC)  # Resize depth map

    # Normalize the depth map
    normalized_depth_map = (depth - np.min(depth)) / (np.max(depth) - np.min(depth))

    # Create masks (Inverted Logic)
    foreground_mask = (normalized_depth_map >= foreground_threshold).astype(np.uint8) * 255
    midground_mask = ((normalized_depth_map < foreground_threshold) & (normalized_depth_map >= background_threshold)).astype(np.uint8) * 255
    background_mask = (normalized_depth_map < background_threshold).astype(np.uint8) * 255

    blurred_image = np.copy(np.array(original_image))

    # Apply blur, ensuring kernel size is valid (Inverted Blur)
    if foreground_blur > 0 and foreground_blur % 2 == 1:
        blurred_image = np.where(
            (foreground_mask[..., None] == 255),
            cv2.GaussianBlur(blurred_image, (foreground_blur, foreground_blur), 10),
            blurred_image,
        )
    if midground_blur > 0 and midground_blur % 2 == 1:
        blurred_image = np.where(
            (midground_mask[..., None] == 255),
            cv2.GaussianBlur(blurred_image, (midground_blur, midground_blur), 8),
            blurred_image,
        )
    if background_blur > 0 and background_blur % 2 == 1:
        blurred_image = np.where(
            (background_mask[..., None] == 255),
            cv2.GaussianBlur(blurred_image, (background_blur, background_blur), 20),
            blurred_image,
        )

    return Image.fromarray(blurred_image.astype(np.uint8))


# Example input values (including defaults)
example_image = np.zeros((512, 512, 3), dtype=np.uint8)  # Placeholder for an image
example_inputs = [
    example_image,
    35,  # foreground_blur
    7,  # midground_blur
    15,  # background_blur (default)
    0.6,  # foreground_threshold (default)
    0.6,  # midground_lower (default)
    0.2,  # midground_upper (default)
    0.2,  # background_threshold (default)
]

iface = gr.Interface(
    fn=apply_depth_aware_blur_inverse,  # Changed function name
    inputs=[
        gr.Image(label="Input Image"),
        gr.Slider(1, 51, step=2, label="Foreground Blur Kernel Size"),
        gr.Slider(1, 51, step=2, label="Midground Blur Kernel Size"),
        gr.Slider(1, 51, step=2, label="Background Blur Kernel Size"),
        gr.Slider(0, 1, label="Foreground Threshold"),
        gr.Slider(0, 1, label="Midground Lower Threshold"),
        gr.Slider(0, 1, label="Midground Upper Threshold"),
        gr.Slider(0, 1, label="Background Threshold"),
    ],
    outputs=gr.Image(label="Blurred Image"),
    title="Inverse Depth-Aware Lens Blur App",  # Changed title
    description="Apply inverse depth-based blur to uploaded images using Depth Anything V2. Closer objects are blurred, farther objects are sharper.",  # Changed description
    examples=[example_inputs],  # Provide example inputs
)

if __name__ == "__main__":
    iface.launch()