gif
Browse files- __pycache__/sde.cpython-310.pyc +0 -0
- __pycache__/utils.cpython-310.pyc +0 -0
- app.py +10 -92
- configs/__pycache__/t2i_512px_clip_dimr.cpython-310.pyc +0 -0
- diffusion/__pycache__/base_solver.cpython-310.pyc +0 -0
- diffusion/__pycache__/flow_matching.cpython-310.pyc +0 -0
- libs/__pycache__/__init__.cpython-310.pyc +0 -0
- libs/__pycache__/autoencoder.cpython-310.pyc +0 -0
- libs/__pycache__/clip.cpython-310.pyc +0 -0
- libs/model/__pycache__/axial_rope.cpython-310.pyc +0 -0
- libs/model/__pycache__/common_layers.cpython-310.pyc +0 -0
- libs/model/__pycache__/dimr_t2i.cpython-310.pyc +0 -0
- libs/model/__pycache__/flags.cpython-310.pyc +0 -0
- libs/model/__pycache__/trans_autoencoder.cpython-310.pyc +0 -0
- libs/model/sigmoid/__pycache__/kernel.cpython-310.pyc +0 -0
- libs/model/sigmoid/__pycache__/module.cpython-310.pyc +0 -0
__pycache__/sde.cpython-310.pyc
CHANGED
Binary files a/__pycache__/sde.cpython-310.pyc and b/__pycache__/sde.cpython-310.pyc differ
|
|
__pycache__/utils.cpython-310.pyc
CHANGED
Binary files a/__pycache__/utils.cpython-310.pyc and b/__pycache__/utils.cpython-310.pyc differ
|
|
app.py
CHANGED
@@ -237,19 +237,16 @@ def infer(
|
|
237 |
to_pil = ToPILImage()
|
238 |
pil_images = [to_pil(img) for img in samples]
|
239 |
|
240 |
-
# Get the first and last images
|
241 |
first_image = pil_images[0]
|
242 |
last_image = pil_images[-1]
|
243 |
|
244 |
-
|
245 |
-
|
246 |
-
|
247 |
-
|
248 |
-
# gif_buffer.seek(0)
|
249 |
-
# gif_bytes = gif_buffer.read()
|
250 |
|
251 |
-
|
252 |
-
return first_image, last_image, seed
|
253 |
|
254 |
|
255 |
# examples = [
|
@@ -297,7 +294,7 @@ with gr.Blocks(css=css) as demo:
|
|
297 |
# Create separate outputs for the first image, last image, and the animated GIF
|
298 |
first_image_output = gr.Image(label="Image if the first prompt", show_label=True)
|
299 |
last_image_output = gr.Image(label="Image if the second prompt", show_label=True)
|
300 |
-
|
301 |
|
302 |
with gr.Accordion("Advanced Settings", open=False):
|
303 |
seed = gr.Slider(
|
@@ -324,7 +321,7 @@ with gr.Blocks(css=css) as demo:
|
|
324 |
minimum=1,
|
325 |
maximum=50,
|
326 |
step=1,
|
327 |
-
value=
|
328 |
)
|
329 |
with gr.Row():
|
330 |
num_of_interpolation = gr.Slider(
|
@@ -348,89 +345,10 @@ with gr.Blocks(css=css) as demo:
|
|
348 |
num_inference_steps,
|
349 |
num_of_interpolation,
|
350 |
],
|
351 |
-
|
352 |
-
outputs=[first_image_output, last_image_output, seed],
|
353 |
)
|
354 |
|
355 |
-
# with gr.Blocks(css=css) as demo:
|
356 |
-
# with gr.Column(elem_id="col-container"):
|
357 |
-
# gr.Markdown(" # CrossFlow")
|
358 |
-
# gr.Markdown(" CrossFlow directly transforms text representations into images for text-to-image generation, enabling interpolation in the input text latent space.")
|
359 |
-
|
360 |
-
# with gr.Row():
|
361 |
-
# prompt1 = gr.Text(
|
362 |
-
# label="Prompt_1",
|
363 |
-
# show_label=False,
|
364 |
-
# max_lines=1,
|
365 |
-
# placeholder="Enter your prompt for the first image",
|
366 |
-
# container=False,
|
367 |
-
# )
|
368 |
-
|
369 |
-
# with gr.Row():
|
370 |
-
# prompt2 = gr.Text(
|
371 |
-
# label="Prompt_2",
|
372 |
-
# show_label=False,
|
373 |
-
# max_lines=1,
|
374 |
-
# placeholder="Enter your prompt for the second image",
|
375 |
-
# container=False,
|
376 |
-
# )
|
377 |
-
|
378 |
-
# with gr.Row():
|
379 |
-
# run_button = gr.Button("Run", scale=0, variant="primary")
|
380 |
-
|
381 |
-
# result = gr.Image(label="Result", show_label=False)
|
382 |
-
|
383 |
-
# with gr.Accordion("Advanced Settings", open=False):
|
384 |
-
# seed = gr.Slider(
|
385 |
-
# label="Seed",
|
386 |
-
# minimum=0,
|
387 |
-
# maximum=MAX_SEED,
|
388 |
-
# step=1,
|
389 |
-
# value=0,
|
390 |
-
# )
|
391 |
-
|
392 |
-
# randomize_seed = gr.Checkbox(label="Randomize seed", value=True)
|
393 |
-
|
394 |
-
# with gr.Row():
|
395 |
-
# guidance_scale = gr.Slider(
|
396 |
-
# label="Guidance scale",
|
397 |
-
# minimum=0.0,
|
398 |
-
# maximum=10.0,
|
399 |
-
# step=0.1,
|
400 |
-
# value=7.0, # Replace with defaults that work for your model
|
401 |
-
# )
|
402 |
-
# with gr.Row():
|
403 |
-
# num_inference_steps = gr.Slider(
|
404 |
-
# label="Number of inference steps - 50 inference steps are recommended; but you can reduce to 20 if the demo fails.",
|
405 |
-
# minimum=1,
|
406 |
-
# maximum=50,
|
407 |
-
# step=1,
|
408 |
-
# value=50, # Replace with defaults that work for your model
|
409 |
-
# )
|
410 |
-
# with gr.Row():
|
411 |
-
# num_of_interpolation = gr.Slider(
|
412 |
-
# label="Number of images for interpolation - More images yield smoother transitions but require more resources and may fail.",
|
413 |
-
# minimum=5,
|
414 |
-
# maximum=50,
|
415 |
-
# step=1,
|
416 |
-
# value=10, # Replace with defaults that work for your model
|
417 |
-
# )
|
418 |
-
|
419 |
-
# gr.Examples(examples=examples, inputs=[prompt1, prompt2])
|
420 |
-
# gr.on(
|
421 |
-
# triggers=[run_button.click, prompt1.submit, prompt2.submit],
|
422 |
-
# fn=infer,
|
423 |
-
# inputs=[
|
424 |
-
# prompt1,
|
425 |
-
# prompt2,
|
426 |
-
# seed,
|
427 |
-
# randomize_seed,
|
428 |
-
# guidance_scale,
|
429 |
-
# num_inference_steps,
|
430 |
-
# num_of_interpolation,
|
431 |
-
# ],
|
432 |
-
# outputs=[result, seed],
|
433 |
-
# )
|
434 |
|
435 |
if __name__ == "__main__":
|
436 |
demo.launch()
|
|
|
237 |
to_pil = ToPILImage()
|
238 |
pil_images = [to_pil(img) for img in samples]
|
239 |
|
|
|
240 |
first_image = pil_images[0]
|
241 |
last_image = pil_images[-1]
|
242 |
|
243 |
+
gif_buffer = io.BytesIO()
|
244 |
+
pil_images[0].save(gif_buffer, format="GIF", save_all=True, append_images=pil_images[1:], duration=10, loop=0)
|
245 |
+
gif_buffer.seek(0)
|
246 |
+
gif_bytes = gif_buffer.read()
|
|
|
|
|
247 |
|
248 |
+
return first_image, last_image, gif_bytes, seed
|
249 |
+
# return first_image, last_image, seed
|
250 |
|
251 |
|
252 |
# examples = [
|
|
|
294 |
# Create separate outputs for the first image, last image, and the animated GIF
|
295 |
first_image_output = gr.Image(label="Image if the first prompt", show_label=True)
|
296 |
last_image_output = gr.Image(label="Image if the second prompt", show_label=True)
|
297 |
+
gif_output = gr.Image(label="Linear interpolation", show_label=True)
|
298 |
|
299 |
with gr.Accordion("Advanced Settings", open=False):
|
300 |
seed = gr.Slider(
|
|
|
321 |
minimum=1,
|
322 |
maximum=50,
|
323 |
step=1,
|
324 |
+
value=50, # Replace with defaults that work for your model
|
325 |
)
|
326 |
with gr.Row():
|
327 |
num_of_interpolation = gr.Slider(
|
|
|
345 |
num_inference_steps,
|
346 |
num_of_interpolation,
|
347 |
],
|
348 |
+
outputs=[first_image_output, last_image_output, gif_output, seed],
|
349 |
+
# outputs=[first_image_output, last_image_output, seed],
|
350 |
)
|
351 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
352 |
|
353 |
if __name__ == "__main__":
|
354 |
demo.launch()
|
configs/__pycache__/t2i_512px_clip_dimr.cpython-310.pyc
CHANGED
Binary files a/configs/__pycache__/t2i_512px_clip_dimr.cpython-310.pyc and b/configs/__pycache__/t2i_512px_clip_dimr.cpython-310.pyc differ
|
|
diffusion/__pycache__/base_solver.cpython-310.pyc
CHANGED
Binary files a/diffusion/__pycache__/base_solver.cpython-310.pyc and b/diffusion/__pycache__/base_solver.cpython-310.pyc differ
|
|
diffusion/__pycache__/flow_matching.cpython-310.pyc
CHANGED
Binary files a/diffusion/__pycache__/flow_matching.cpython-310.pyc and b/diffusion/__pycache__/flow_matching.cpython-310.pyc differ
|
|
libs/__pycache__/__init__.cpython-310.pyc
CHANGED
Binary files a/libs/__pycache__/__init__.cpython-310.pyc and b/libs/__pycache__/__init__.cpython-310.pyc differ
|
|
libs/__pycache__/autoencoder.cpython-310.pyc
CHANGED
Binary files a/libs/__pycache__/autoencoder.cpython-310.pyc and b/libs/__pycache__/autoencoder.cpython-310.pyc differ
|
|
libs/__pycache__/clip.cpython-310.pyc
CHANGED
Binary files a/libs/__pycache__/clip.cpython-310.pyc and b/libs/__pycache__/clip.cpython-310.pyc differ
|
|
libs/model/__pycache__/axial_rope.cpython-310.pyc
CHANGED
Binary files a/libs/model/__pycache__/axial_rope.cpython-310.pyc and b/libs/model/__pycache__/axial_rope.cpython-310.pyc differ
|
|
libs/model/__pycache__/common_layers.cpython-310.pyc
CHANGED
Binary files a/libs/model/__pycache__/common_layers.cpython-310.pyc and b/libs/model/__pycache__/common_layers.cpython-310.pyc differ
|
|
libs/model/__pycache__/dimr_t2i.cpython-310.pyc
CHANGED
Binary files a/libs/model/__pycache__/dimr_t2i.cpython-310.pyc and b/libs/model/__pycache__/dimr_t2i.cpython-310.pyc differ
|
|
libs/model/__pycache__/flags.cpython-310.pyc
CHANGED
Binary files a/libs/model/__pycache__/flags.cpython-310.pyc and b/libs/model/__pycache__/flags.cpython-310.pyc differ
|
|
libs/model/__pycache__/trans_autoencoder.cpython-310.pyc
CHANGED
Binary files a/libs/model/__pycache__/trans_autoencoder.cpython-310.pyc and b/libs/model/__pycache__/trans_autoencoder.cpython-310.pyc differ
|
|
libs/model/sigmoid/__pycache__/kernel.cpython-310.pyc
CHANGED
Binary files a/libs/model/sigmoid/__pycache__/kernel.cpython-310.pyc and b/libs/model/sigmoid/__pycache__/kernel.cpython-310.pyc differ
|
|
libs/model/sigmoid/__pycache__/module.cpython-310.pyc
CHANGED
Binary files a/libs/model/sigmoid/__pycache__/module.cpython-310.pyc and b/libs/model/sigmoid/__pycache__/module.cpython-310.pyc differ
|
|