Spaces:
Running
on
Zero
Running
on
Zero
import os | |
import json | |
import torch | |
import gc | |
import numpy as np | |
import gradio as gr | |
from PIL import Image | |
from diffusers import StableDiffusionXLPipeline | |
import open_clip | |
from huggingface_hub import hf_hub_download | |
from IP_Composer.IP_Adapter.ip_adapter import IPAdapterXL | |
from IP_Composer.perform_swap import compute_dataset_embeds_svd, get_modified_images_embeds_composition | |
import spaces | |
device = "cuda" if torch.cuda.is_available() else "cpu" | |
# Initialize SDXL pipeline | |
base_model_path = "stabilityai/stable-diffusion-xl-base-1.0" | |
pipe = StableDiffusionXLPipeline.from_pretrained( | |
base_model_path, | |
torch_dtype=torch.float16, | |
add_watermarker=False, | |
) | |
# Initialize IP-Adapter | |
image_encoder_repo = 'h94/IP-Adapter' | |
image_encoder_subfolder = 'models/image_encoder' | |
ip_ckpt = hf_hub_download('h94/IP-Adapter', subfolder="sdxl_models", filename='ip-adapter_sdxl_vit-h.bin') | |
ip_model = IPAdapterXL(pipe, image_encoder_repo, image_encoder_subfolder, ip_ckpt, device) | |
# Initialize CLIP model | |
clip_model, _, preprocess = open_clip.create_model_and_transforms('hf-hub:laion/CLIP-ViT-H-14-laion2B-s32B-b79K') | |
clip_model.to(device) | |
CONCEPTS_MAP = {'age':'age','animal fur':'animal_fur', 'deterioration': 'deterioration', 'dogs':'dog', 'emotion':'emotion', 'floor':'floor', 'flowers':'flower', 'fruit/vegtebale':'fruit_vegtebale', 'fur':'fur', 'furniture':'furniture', 'lens':'lens', 'outfit':'outfit', 'outfit color':'outfit_color', 'pattern':'pattern', 'texture':'texture', 'times of day':'times_of_day', 'tree':'tree', 'vehicle':'vehicle', 'vehicle color':'vehicle_color'} | |
concept_options = list(CONCEPTS_MAP.keys()) | |
def get_image_embeds(pil_image, model=clip_model, preproc=preprocess, dev=device): | |
"""Get CLIP image embeddings for a given PIL image""" | |
image = preproc(pil_image)[np.newaxis, :, :, :] | |
with torch.no_grad(): | |
embeds = model.encode_image(image.to(dev)) | |
return embeds.cpu().detach().numpy() | |
def process_images( | |
base_image, | |
concept_image1, concept_name1, | |
concept_image2=None, concept_name2=None, | |
concept_image3=None, concept_name3=None, | |
rank1=10, rank2=10, rank3=10, | |
prompt=None, | |
scale=1.0, | |
seed=420 | |
): | |
"""Process the base image and concept images to generate modified images""" | |
# Process base image | |
base_image_pil = Image.fromarray(base_image).convert("RGB") | |
base_embed = get_image_embeds(base_image_pil, clip_model, preprocess, device) | |
# Process concept images | |
concept_images = [] | |
concept_descriptions = [] | |
# for demo purposes we allow for up to 3 different concepts and corresponding concept images | |
if concept_image1 is not None: | |
concept_images.append(concept_image1) | |
concept_descriptions.append(CONCEPTS_MAP[concept_name1]) | |
else: | |
return None, "Please upload at least one concept image" | |
# Add second concept (optional) | |
if concept_image2 is not None: | |
concept_images.append(concept_image2) | |
concept_descriptions.append(CONCEPTS_MAP[concept_name2]) | |
# Add third concept (optional) | |
if concept_image3 is not None: | |
concept_images.append(concept_image3) | |
concept_descriptions.append(CONCEPTS_MAP[concept_name3]) | |
# Get all ranks | |
ranks = [rank1] | |
if concept_image2 is not None: | |
ranks.append(rank2) | |
if concept_image3 is not None: | |
ranks.append(rank3) | |
concept_embeds = [] | |
projection_matrices = [] | |
# for the demo, we assume 1 concept image per concept | |
# for each concept image, we calculate it's image embeedings and load the concepts textual embeddings to copmpute the projection matrix over it | |
for i, concept_name in enumerate(concept_descriptions): | |
img_pil = Image.fromarray(concept_images[i]).convert("RGB") | |
concept_embeds.append(get_image_embeds(img_pil, clip_model, preprocess, device)) | |
embeds_path = f"./IP_Composer/text_embeddings/{concept_name}_descriptions.npy" | |
with open(embeds_path, "rb") as f: | |
all_embeds_in = np.load(f) | |
projection_matrix = compute_dataset_embeds_svd(all_embeds_in, ranks[i]) | |
projection_matrices.append(projection_matrix) | |
# Create projection data structure for the composition | |
projections_data = [ | |
{ | |
"embed": embed, | |
"projection_matrix": proj_matrix | |
} | |
for embed, proj_matrix in zip(concept_embeds, projection_matrices) | |
] | |
# Generate modified images - | |
modified_images = get_modified_images_embeds_composition( | |
base_embed, | |
projections_data, | |
ip_model, | |
prompt=prompt, | |
scale=scale, | |
num_samples=1, | |
seed=seed | |
) | |
return modified_images[0] | |
def process_and_display( | |
base_image, | |
concept_image1, concept_name1="age", | |
concept_image2=None, concept_name2=None, | |
concept_image3=None, concept_name3=None, | |
rank1=30, rank2=30, rank3=30, | |
prompt=None, scale=1.0, seed=420 | |
): | |
"""Wrapper for process_images that handles UI updates""" | |
if base_image is None: | |
return None, "Please upload a base image" | |
if concept_image1 is None: | |
return None, "Please upload at least one concept image" | |
modified_images = process_images( | |
base_image, | |
concept_image1, concept_name1, | |
concept_image2, concept_name2, | |
concept_image3, concept_name3, | |
rank1, rank2, rank3, | |
prompt, scale, seed | |
) | |
# # Clean up memory | |
# torch.cuda.empty_cache() | |
# gc.collect() | |
return modified_images | |
with gr.Blocks(title="Image Concept Composition") as demo: | |
gr.Markdown("# IP Composer") | |
gr.Markdown("") | |
with gr.Row(): | |
with gr.Column(): | |
base_image = gr.Image(label="Base Image (Required)", type="numpy") | |
with gr.Row(): | |
with gr.Column(scale=2): | |
concept_image1 = gr.Image(label="Concept Image 1 (Required)", type="numpy") | |
with gr.Row(): | |
concept_name1 = gr.Dropdown(concept_options, label="concept 1", value=None, info="concept type") | |
rank1 = gr.Slider(minimum=1, maximum=50, value=30, step=1, label="Rank 1") | |
with gr.Row(): | |
with gr.Column(scale=2): | |
concept_image2 = gr.Image(label="Concept Image 2 (Optional)", type="numpy") | |
with gr.Row(): | |
concept_name2 = gr.Dropdown(concept_options, label="concept 2", value=None, info="concept type") | |
rank2 = gr.Slider(minimum=1, maximum=50, value=30, step=1, label="Rank 2") | |
with gr.Row(): | |
with gr.Column(scale=2): | |
concept_image3 = gr.Image(label="Concept Image 3 (Optional)", type="numpy") | |
with gr.Row(): | |
concept_name3 = gr.Dropdown(concept_options, label="concept 3", value= None, info="concept type") | |
rank3 = gr.Slider(minimum=1, maximum=50, value=30, step=1, label="Rank 3") | |
prompt = gr.Textbox(label="Guidance Prompt (Optional)", placeholder="Optional text prompt to guide generation") | |
with gr.Row(): | |
scale = gr.Slider(minimum=0.1, maximum=2.0, value=1.0, step=0.1, label="Scale") | |
seed = gr.Number(value=420, label="Seed", precision=0) | |
submit_btn = gr.Button("Generate") | |
with gr.Column(): | |
output_image = gr.Image(label="composed output", show_label=True) | |
submit_btn.click( | |
fn=process_and_display, | |
inputs=[ | |
base_image, | |
concept_image1, concept_name1, | |
concept_image2, concept_name2, | |
concept_image3, concept_name3, | |
rank1, rank2, rank3, | |
prompt, scale, seed | |
], | |
outputs=[output_image] | |
) | |
demo.launch() |