|
import gradio as gr |
|
|
|
from absl import flags |
|
from absl import app |
|
from ml_collections import config_flags |
|
import os |
|
|
|
import spaces |
|
import torch |
|
|
|
|
|
import io |
|
import random |
|
import tempfile |
|
|
|
import numpy as np |
|
import torch |
|
import torch.nn.functional as F |
|
from torchvision.utils import save_image |
|
from torchvision.transforms import ToPILImage |
|
from huggingface_hub import hf_hub_download |
|
|
|
from absl import logging |
|
import ml_collections |
|
|
|
from diffusion.flow_matching import ODEEulerFlowMatchingSolver |
|
import utils |
|
import libs.autoencoder |
|
from libs.clip import FrozenCLIPEmbedder |
|
from configs import t2i_512px_clip_dimr, t2i_256px_clip_dimr |
|
|
|
|
|
def unpreprocess(x: torch.Tensor) -> torch.Tensor: |
|
x = 0.5 * (x + 1.0) |
|
x.clamp_(0.0, 1.0) |
|
return x |
|
|
|
def cosine_similarity_torch(latent1: torch.Tensor, latent2: torch.Tensor) -> torch.Tensor: |
|
latent1_flat = latent1.view(-1) |
|
latent2_flat = latent2.view(-1) |
|
cosine_similarity = F.cosine_similarity( |
|
latent1_flat.unsqueeze(0), latent2_flat.unsqueeze(0), dim=1 |
|
) |
|
return cosine_similarity |
|
|
|
def kl_divergence(latent1: torch.Tensor, latent2: torch.Tensor) -> torch.Tensor: |
|
latent1_prob = F.softmax(latent1, dim=-1) |
|
latent2_prob = F.softmax(latent2, dim=-1) |
|
latent1_log_prob = torch.log(latent1_prob) |
|
kl_div = F.kl_div(latent1_log_prob, latent2_prob, reduction="batchmean") |
|
return kl_div |
|
|
|
def batch_decode(_z: torch.Tensor, decode, batch_size: int = 5) -> torch.Tensor: |
|
num_samples = _z.size(0) |
|
decoded_batches = [] |
|
|
|
for i in range(0, num_samples, batch_size): |
|
batch = _z[i : i + batch_size] |
|
decoded_batch = decode(batch) |
|
decoded_batches.append(decoded_batch) |
|
|
|
return torch.cat(decoded_batches, dim=0) |
|
|
|
def get_caption(llm: str, text_model, prompt_dict: dict, batch_size: int): |
|
if batch_size == 3: |
|
|
|
assert len(prompt_dict) == 2, "Expected 2 prompts for batch_size 3." |
|
batch_prompts = list(prompt_dict.values()) + [" "] |
|
elif batch_size == 4: |
|
|
|
assert len(prompt_dict) == 3, "Expected 3 prompts for batch_size 4." |
|
batch_prompts = list(prompt_dict.values()) + [" "] |
|
elif batch_size >= 5: |
|
|
|
assert len(prompt_dict) == 2, "Expected 2 prompts for linear interpolation." |
|
batch_prompts = [prompt_dict["prompt_1"]] + [" "] * (batch_size - 2) + [prompt_dict["prompt_2"]] |
|
else: |
|
raise ValueError(f"Unsupported batch_size: {batch_size}") |
|
|
|
if llm == "clip": |
|
latent, latent_and_others = text_model.encode(batch_prompts) |
|
context = latent_and_others["token_embedding"].detach() |
|
elif llm == "t5": |
|
latent, latent_and_others = text_model.get_text_embeddings(batch_prompts) |
|
context = (latent_and_others["token_embedding"] * 10.0).detach() |
|
else: |
|
raise NotImplementedError(f"Language model {llm} not supported.") |
|
|
|
token_mask = latent_and_others["token_mask"].detach() |
|
tokens = latent_and_others["tokens"].detach() |
|
captions = batch_prompts |
|
|
|
return context, token_mask, tokens, captions |
|
|
|
|
|
|
|
config_dict = t2i_256px_clip_dimr.get_config() |
|
config_1 = ml_collections.ConfigDict(config_dict) |
|
config_dict = t2i_512px_clip_dimr.get_config() |
|
config_2 = ml_collections.ConfigDict(config_dict) |
|
|
|
device = torch.device("cuda" if torch.cuda.is_available() else "cpu") |
|
logging.info(f"Using device: {device}") |
|
|
|
|
|
config_1 = ml_collections.FrozenConfigDict(config_1) |
|
config_2 = ml_collections.FrozenConfigDict(config_2) |
|
|
|
torch_dtype = torch.float16 if torch.cuda.is_available() else torch.float32 |
|
MAX_SEED = np.iinfo(np.int32).max |
|
MAX_IMAGE_SIZE = 1024 |
|
|
|
|
|
repo_id = "QHL067/CrossFlow" |
|
|
|
filename = "pretrained_models/t2i_256px_clip_dimr.pth" |
|
checkpoint_path = hf_hub_download(repo_id=repo_id, filename=filename) |
|
nnet_1 = utils.get_nnet(**config_1.nnet) |
|
nnet_1 = nnet_1.to(device) |
|
state_dict = torch.load(checkpoint_path, map_location=device) |
|
nnet_1.load_state_dict(state_dict) |
|
nnet_1.eval() |
|
|
|
filename = "pretrained_models/t2i_512px_clip_dimr.pth" |
|
checkpoint_path = hf_hub_download(repo_id=repo_id, filename=filename) |
|
nnet_2 = utils.get_nnet(**config_2.nnet) |
|
nnet_2 = nnet_2.to(device) |
|
state_dict = torch.load(checkpoint_path, map_location=device) |
|
nnet_2.load_state_dict(state_dict) |
|
nnet_2.eval() |
|
|
|
|
|
llm = "clip" |
|
clip = FrozenCLIPEmbedder() |
|
clip.eval() |
|
clip.to(device) |
|
|
|
|
|
autoencoder = libs.autoencoder.get_model(**config_1.autoencoder) |
|
autoencoder.to(device) |
|
|
|
|
|
@torch.cuda.amp.autocast() |
|
def encode(_batch: torch.Tensor) -> torch.Tensor: |
|
"""Encode a batch of images using the autoencoder.""" |
|
return autoencoder.encode(_batch) |
|
|
|
|
|
@torch.cuda.amp.autocast() |
|
def decode(_batch: torch.Tensor) -> torch.Tensor: |
|
"""Decode a batch of latent vectors using the autoencoder.""" |
|
return autoencoder.decode(_batch) |
|
|
|
|
|
@spaces.GPU |
|
def infer( |
|
prompt1, |
|
prompt2, |
|
seed, |
|
randomize_seed, |
|
guidance_scale, |
|
num_inference_steps, |
|
num_of_interpolation, |
|
operation_mode, |
|
save_gpu_memory=True, |
|
progress=gr.Progress(track_tqdm=True), |
|
): |
|
if randomize_seed: |
|
seed = random.randint(0, MAX_SEED) |
|
|
|
torch.manual_seed(seed) |
|
if device.type == "cuda": |
|
torch.cuda.manual_seed_all(seed) |
|
|
|
|
|
prompt_dict = {"prompt_1": prompt1, "prompt_2": prompt2} |
|
for key, value in prompt_dict.items(): |
|
assert value is not None, f"{key} must not be None." |
|
if operation_mode == 'Interpolation': |
|
assert num_of_interpolation >= 5, "For linear interpolation, please sample at least five images." |
|
else: |
|
assert num_of_interpolation == 3, "For arithmetic, please sample three images." |
|
|
|
if num_of_interpolation == 3: |
|
nnet = nnet_2 |
|
config = config_2 |
|
else: |
|
nnet = nnet_1 |
|
config = config_1 |
|
|
|
|
|
|
|
|
|
_context, _token_mask, _token, _caption = get_caption( |
|
llm, clip, prompt_dict=prompt_dict, batch_size=num_of_interpolation |
|
) |
|
|
|
with torch.no_grad(): |
|
_z_gaussian = torch.randn(num_of_interpolation, *config.z_shape, device=device) |
|
_z_x0, _mu, _log_var = nnet( |
|
_context, text_encoder=True, shape=_z_gaussian.shape, mask=_token_mask |
|
) |
|
_z_init = _z_x0.reshape(_z_gaussian.shape) |
|
|
|
|
|
if num_of_interpolation == 3: |
|
|
|
if operation_mode == 'Addition': |
|
z_init_temp = _z_init[0] + _z_init[1] |
|
elif operation_mode == 'Subtraction': |
|
z_init_temp = _z_init[0] - _z_init[1] |
|
else: |
|
raise NotImplementedError("Either prompt_a or prompt_s must be provided for 3-sample mode.") |
|
mean = z_init_temp.mean() |
|
std = z_init_temp.std() |
|
_z_init[2] = (z_init_temp - mean) / std |
|
|
|
elif num_of_interpolation == 4: |
|
raise ValueError("Unsupported number of interpolations.") |
|
|
|
elif num_of_interpolation >= 5: |
|
tensor_a = _z_init[0] |
|
tensor_b = _z_init[-1] |
|
num_interpolations = num_of_interpolation - 2 |
|
interpolations = [ |
|
tensor_a + (tensor_b - tensor_a) * (i / (num_interpolations + 1)) |
|
for i in range(1, num_interpolations + 1) |
|
] |
|
_z_init = torch.stack([tensor_a] + interpolations + [tensor_b], dim=0) |
|
|
|
else: |
|
raise ValueError("Unsupported number of interpolations.") |
|
|
|
assert guidance_scale > 1, "Guidance scale must be greater than 1." |
|
|
|
has_null_indicator = hasattr(config.nnet.model_args, "cfg_indicator") |
|
ode_solver = ODEEulerFlowMatchingSolver( |
|
nnet, |
|
bdv_model_fn=None, |
|
step_size_type="step_in_dsigma", |
|
guidance_scale=guidance_scale, |
|
) |
|
_z, _ = ode_solver.sample( |
|
x_T=_z_init, |
|
batch_size=num_of_interpolation, |
|
sample_steps=num_inference_steps, |
|
unconditional_guidance_scale=guidance_scale, |
|
has_null_indicator=has_null_indicator, |
|
) |
|
|
|
print("+++++"*20) |
|
print("Now, save images") |
|
print("+++++"*20) |
|
|
|
if save_gpu_memory: |
|
image_unprocessed = batch_decode(_z, decode) |
|
else: |
|
image_unprocessed = decode(_z) |
|
|
|
samples = unpreprocess(image_unprocessed).contiguous() |
|
|
|
|
|
to_pil = ToPILImage() |
|
pil_images = [to_pil(img) for img in samples] |
|
|
|
if num_of_interpolation == 3: |
|
return pil_images[0], pil_images[1], pil_images[2], seed |
|
|
|
else: |
|
first_image = pil_images[0] |
|
last_image = pil_images[-1] |
|
|
|
gif_buffer = io.BytesIO() |
|
pil_images[0].save(gif_buffer, format="GIF", save_all=True, append_images=pil_images[1:], duration=200, loop=0) |
|
gif_buffer.seek(0) |
|
gif_bytes = gif_buffer.read() |
|
|
|
|
|
temp_gif = tempfile.NamedTemporaryFile(delete=False, suffix=".gif") |
|
temp_gif.write(gif_bytes) |
|
temp_gif.close() |
|
gif_path = temp_gif.name |
|
|
|
return first_image, last_image, gif_path, seed |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def infer_tab1(prompt1, prompt2, seed, randomize_seed, guidance_scale, num_inference_steps, num_of_interpolation): |
|
default_op = "Interpolation" |
|
return infer(prompt1, prompt2, seed, randomize_seed, guidance_scale, num_inference_steps, num_of_interpolation, default_op) |
|
|
|
|
|
def infer_tab2(prompt1, prompt2, seed, randomize_seed, guidance_scale, num_inference_steps, operation_mode): |
|
default_interpolation = 3 |
|
return infer(prompt1, prompt2, seed, randomize_seed, guidance_scale, num_inference_steps, default_interpolation, operation_mode) |
|
|
|
examples_1 = [ |
|
["A robot cooking dinner in the kitchen", "An orange cat wearing sunglasses on a ship"], |
|
] |
|
|
|
examples_2 = [ |
|
["A dog wearing sunglasses", "a hat"], |
|
] |
|
|
|
css = """ |
|
#col-container { |
|
margin: 0 auto; |
|
max-width: 640px; |
|
} |
|
""" |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
with gr.Blocks(css=css) as demo: |
|
gr.Markdown("# CrossFlow") |
|
gr.Markdown("[CVPR 2025] Flowing from Words to Pixels: A Framework for Cross-Modality Evolution") |
|
gr.Markdown("[CrossFlow](https://cross-flow.github.io/) achieves text-to-image generation by directly mapping text representations (source distribution) to images (target distribution), eliminating the need for a noise distribution or conditioning mechanism.") |
|
gr.Markdown("This direct mapping enables meaningful 'Linear Interpolation' and 'Arithmetic Operations' in the text latent space, as demonstrated here.") |
|
with gr.Tabs(): |
|
|
|
with gr.Tab("[Linear Interpolation]"): |
|
gr.Markdown("This demo uses 256px images, 25 sampling steps (instead of 50), and 10 interpolations (instead of 50) to conserve GPU memory.") |
|
gr.Markdown("**You will get much better results with the original [code](https://github.com/qihao067/CrossFlow)**. (You may also adjust the sampling steps and interpolations in Advanced Settings, but doing so may trigger OOM errors.)") |
|
|
|
prompt1_tab1 = gr.Text(placeholder="Prompt for first image", label="Prompt 1") |
|
prompt2_tab1 = gr.Text(placeholder="Prompt for second image", label="Prompt 2") |
|
seed_tab1 = gr.Slider(minimum=0, maximum=MAX_SEED, step=1, value=0, label="Seed") |
|
randomize_seed_tab1 = gr.Checkbox(label="Randomize seed", value=True) |
|
with gr.Accordion("Advanced Settings", open=False): |
|
guidance_scale_tab1 = gr.Slider(minimum=0.0, maximum=10.0, step=0.1, value=7.0, label="Guidance Scale") |
|
num_inference_steps_tab1 = gr.Slider(minimum=1, maximum=50, step=1, value=25, label="Number of Inference Steps") |
|
num_of_interpolation_tab1 = gr.Slider(minimum=5, maximum=50, step=1, value=10, label="Number of Images for Interpolation") |
|
run_button_tab1 = gr.Button("Run") |
|
|
|
first_image_output_tab1 = gr.Image(label="Image of the first prompt") |
|
last_image_output_tab1 = gr.Image(label="Image of the second prompt") |
|
gif_output_tab1 = gr.Image(label="Linear interpolation") |
|
|
|
run_button_tab1.click( |
|
fn=infer_tab1, |
|
inputs=[ |
|
prompt1_tab1, |
|
prompt2_tab1, |
|
seed_tab1, |
|
randomize_seed_tab1, |
|
guidance_scale_tab1, |
|
num_inference_steps_tab1, |
|
num_of_interpolation_tab1 |
|
], |
|
outputs=[first_image_output_tab1, last_image_output_tab1, gif_output_tab1, seed_tab1] |
|
) |
|
|
|
gr.Examples(examples=examples_1, inputs=[prompt1_tab1, prompt2_tab1]) |
|
|
|
|
|
with gr.Tab("[Arithmetic Operations]"): |
|
gr.Markdown("This demo only supports addition or subtraction between two text latents, i.e., 'VE(Prompt_1) + VE(Prompt_2)' or 'VE(Prompt_1) - VE(Prompt_2)'. For the other arithmetic operations, see the original [code](https://github.com/qihao067/CrossFlow).") |
|
|
|
prompt1_tab2 = gr.Text(placeholder="Prompt for first image", label="Prompt 1") |
|
prompt2_tab2 = gr.Text(placeholder="Prompt for second image", label="Prompt 2") |
|
seed_tab2 = gr.Slider(minimum=0, maximum=MAX_SEED, step=1, value=0, label="Seed") |
|
randomize_seed_tab2 = gr.Checkbox(label="Randomize seed", value=True) |
|
guidance_scale_tab2 = gr.Slider(minimum=0.0, maximum=10.0, step=0.1, value=7.0, label="Guidance Scale") |
|
num_inference_steps_tab2 = gr.Slider(minimum=1, maximum=50, step=1, value=50, label="Number of Inference Steps") |
|
operation_mode_tab2 = gr.Radio(choices=["Addition", "Subtraction"], label="Operation Mode", value="Addition") |
|
run_button_tab2 = gr.Button("Run") |
|
|
|
first_image_output_tab2 = gr.Image(label="Image of the first prompt") |
|
last_image_output_tab2 = gr.Image(label="Image of the second prompt") |
|
gif_output_tab2 = gr.Image(label="Resulting image produced by the arithmetic operations.") |
|
|
|
run_button_tab2.click( |
|
fn=infer_tab2, |
|
inputs=[ |
|
prompt1_tab2, |
|
prompt2_tab2, |
|
seed_tab2, |
|
randomize_seed_tab2, |
|
guidance_scale_tab2, |
|
num_inference_steps_tab2, |
|
operation_mode_tab2 |
|
], |
|
outputs=[first_image_output_tab2, last_image_output_tab2, gif_output_tab2, seed_tab2] |
|
) |
|
|
|
gr.Examples(examples=examples_2, inputs=[prompt1_tab2, prompt2_tab2]) |
|
|
|
|
|
if __name__ == "__main__": |
|
demo.launch() |
|
|