File size: 5,080 Bytes
2dc21ea
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
import gradio as gr
import os

# Import from our modules
from constants import class_names
from segmentation import segment_image
from utils import process_url, process_person_and_garment

# Define fixed size for consistent image display
FIXED_IMAGE_SIZE = (400, 400)

def create_interface():
    """Create the Gradio interface with improved image consistency"""
    with gr.Blocks(title="Garment-based Segmentation") as demo:
        gr.Markdown("""
        # Garment-based Segmentation with SegFormer and Fashion-CLIP
        
        This application uses AI models to segment specific clothing items in images by matching a garment to a person.
        """)
        
        with gr.Row():
            with gr.Column(scale=1):
                # Person image section
                gr.Markdown("### Person Image")
                person_image = gr.Image(
                    type="pil", 
                    label="Upload a person wearing clothes", 
                    height=300,
                    sources=["upload", "webcam", "clipboard"],
                    elem_id="person-image-upload"
                )
                
                # Garment image section
                gr.Markdown("### Garment Image")
                garment_image = gr.Image(
                    type="pil", 
                    label="Upload a garment to detect", 
                    height=300,
                    sources=["upload", "webcam", "clipboard"],
                    elem_id="garment-image-upload"
                )
                
                with gr.Row():
                    show_original_dual = gr.Checkbox(label="Show Original", value=True)
                    show_segmentation_dual = gr.Checkbox(label="Show Segmentation", value=True)
                    show_overlay_dual = gr.Checkbox(label="Show Overlay", value=True)
                
                process_button = gr.Button(
                    "Process Images", 
                    variant="primary",
                    size="lg",
                    elem_id="process-button"
                )
            
            with gr.Column(scale=2):
                dual_output_images = gr.Gallery(
                    label="Results", 
                    columns=3, 
                    height=450,
                    object_fit="contain",
                    elem_id="dual_gallery"
                )
                result_text = gr.Textbox(label="Result", interactive=False, lines=4)
        
        # Set up event handler for dual image processing
        process_button.click(
            fn=lambda p_img, g_img, orig, seg, over: process_person_and_garment(p_img, g_img, orig, seg, over, FIXED_IMAGE_SIZE),
            inputs=[person_image, garment_image, show_original_dual, show_segmentation_dual, show_overlay_dual],
            outputs=[dual_output_images, result_text]
        )
        
        # Add custom CSS for consistent image sizes and improved UI
        gr.HTML("""
        <style>
        .gradio-container img {
            max-height: 400px !important;
            object-fit: contain !important;
        }
        #dual_gallery {
            min-height: 450px;
        }
        /* Larger upload buttons */
        #person-image-upload .upload-button, 
        #garment-image-upload .upload-button {
            font-size: 1.2em !important;
            padding: 12px 20px !important;
            border-radius: 8px !important;
            margin: 10px auto !important;
            display: block !important;
            width: 80% !important;
            text-align: center !important;
            background-color: #4CAF50 !important;
            color: white !important;
            border: none !important;
            cursor: pointer !important;
            transition: background-color 0.3s ease !important;
        }
        #person-image-upload .upload-button:hover, 
        #garment-image-upload .upload-button:hover {
            background-color: #45a049 !important;
        }
        /* Larger process button */
        #process-button {
            font-size: 1.3em !important;
            padding: 15px 25px !important;
            margin: 15px auto !important;
            display: block !important;
            width: 90% !important;
        }
        /* Better section headers */
        h3 {
            font-size: 1.5em !important;
            margin-top: 20px !important;
            margin-bottom: 15px !important;
            color: #2c3e50 !important;
            border-bottom: 2px solid #3498db !important;
            padding-bottom: 8px !important;
        }
        /* Better main heading */
        h1 {
            color: #2c3e50 !important;
            text-align: center !important;
            margin-bottom: 30px !important;
            font-size: 2.5em !important;
        }
        /* Better checkbox layout */
        .gradio-checkbox {
            margin: 10px 5px !important;
        }
        </style>
        """)
    
    return demo

# Main application entry point
if __name__ == "__main__":
    demo = create_interface()
    demo.launch()