dezzman commited on
Commit
c6ed7d9
·
verified ·
1 Parent(s): 8a52ce7

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +102 -34
app.py CHANGED
@@ -64,12 +64,6 @@ def infer(
64
  lora_scale=0.5
65
  ):
66
  generator = torch.Generator(device).manual_seed(seed)
67
-
68
- print(prompt)
69
- print(type(prompt))
70
-
71
- print(negative_prompt)
72
- print(type(negative_prompt))
73
 
74
  if model_id != model_id_default:
75
  pipe = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch_dtype).to(device)
@@ -95,37 +89,111 @@ def infer(
95
 
96
  return pipe(**params).images[0]
97
 
98
- with gr.Blocks() as demo:
99
- with gr.Column():
100
- gr.Markdown("# DEMO Text-to-Image")
101
- model_id = gr.Textbox(label="Model ID", value=model_id_default)
102
- prompt = gr.Textbox(label="Prompt")
103
- negative_prompt = gr.Textbox(label="Negative prompt")
104
- seed = gr.Number(label="Seed", value=42)
105
- guidance_scale = gr.Slider(label="Guidance scale", minimum=0.0, maximum=10.0, value=7.0)
106
- lora_scale = gr.Slider(label="LoRA scale", minimum=0.0, maximum=1.0, value=0.5)
107
- num_inference_steps = gr.Slider(label="Number of inference steps", minimum=1, maximum=50, value=20)
108
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
109
  with gr.Accordion("Optional Settings", open=False):
110
- width = gr.Slider(label="Width", minimum=256, maximum=1024, value=512, step=32)
111
- height = gr.Slider(label="Height", minimum=256, maximum=1024, value=512, step=32)
112
-
113
- run_button = gr.Button("Run")
114
- result = gr.Image(label="Result")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
115
 
116
- run_button.click(
117
- fn=infer,
 
118
  inputs=[
119
- prompt,
120
- negative_prompt,
121
- width,
122
- height,
123
- num_inference_steps,
124
- model_id, seed,
125
- guidance_scale,
126
- lora_scale
127
- ],
128
- outputs=result)
 
 
129
 
130
  if __name__ == "__main__":
131
- demo.launch()
 
64
  lora_scale=0.5
65
  ):
66
  generator = torch.Generator(device).manual_seed(seed)
 
 
 
 
 
 
67
 
68
  if model_id != model_id_default:
69
  pipe = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch_dtype).to(device)
 
89
 
90
  return pipe(**params).images[0]
91
 
92
+ css = """
93
+ #col-container {
94
+ margin: 0 auto;
95
+ max-width: 640px;
96
+ }
97
+ """
98
+
99
+ with gr.Blocks(css=css) as demo:
100
+ with gr.Column(elem_id="col-container"):
101
+ gr.Markdown(" # DEMO Text-to-Image")
102
 
103
+ with gr.Row():
104
+ model_id = gr.Textbox(
105
+ label="Model ID",
106
+ max_lines=1,
107
+ placeholder="Enter model id like 'CompVis/stable-diffusion-v1-4'",
108
+ value=model_id_default
109
+ )
110
+
111
+ prompt = gr.Textbox(
112
+ label="Prompt",
113
+ max_lines=1,
114
+ placeholder="Enter your prompt",
115
+ )
116
+
117
+ negative_prompt = gr.Textbox(
118
+ label="Negative prompt",
119
+ max_lines=1,
120
+ placeholder="Enter a negative prompt",
121
+ )
122
+
123
+ with gr.Row():
124
+ seed = gr.Number(
125
+ label="Seed",
126
+ minimum=0,
127
+ maximum=MAX_SEED,
128
+ step=1,
129
+ value=42,
130
+ )
131
+
132
+ with gr.Row():
133
+ guidance_scale = gr.Slider(
134
+ label="Guidance scale",
135
+ minimum=0.0,
136
+ maximum=10.0,
137
+ step=0.1,
138
+ value=7.0,
139
+ )
140
+
141
+ with gr.Row():
142
+ lora_scale = gr.Slider(
143
+ label="LoRA scale",
144
+ minimum=0.0,
145
+ maximum=1.0,
146
+ step=0.1,
147
+ value=0.5,
148
+ )
149
+
150
+ with gr.Row():
151
+ num_inference_steps = gr.Slider(
152
+ label="Number of inference steps",
153
+ minimum=1,
154
+ maximum=50,
155
+ step=1,
156
+ value=20,
157
+ )
158
+
159
  with gr.Accordion("Optional Settings", open=False):
160
+ with gr.Row():
161
+ width = gr.Slider(
162
+ label="Width",
163
+ minimum=256,
164
+ maximum=MAX_IMAGE_SIZE,
165
+ step=32,
166
+ value=512,
167
+ )
168
+
169
+ with gr.Row():
170
+ height = gr.Slider(
171
+ label="Height",
172
+ minimum=256,
173
+ maximum=MAX_IMAGE_SIZE,
174
+ step=32,
175
+ value=512,
176
+ )
177
+
178
+ run_button = gr.Button("Run", scale=1, variant="primary")
179
+ result = gr.Image(label="Result", show_label=False)
180
 
181
+ gr.on(
182
+ triggers=[run_button.click, prompt.submit],
183
+ fn=infer,
184
  inputs=[
185
+ prompt,
186
+ negative_prompt,
187
+ width,
188
+ height,
189
+ num_inference_steps,
190
+ model_id,
191
+ seed,
192
+ guidance_scale,
193
+ lora_scale,
194
+ ],
195
+ outputs=[result],
196
+ )
197
 
198
  if __name__ == "__main__":
199
+ demo.launch()