Spaces:
Sleeping
Sleeping
import os | |
import torch | |
import gradio as gr | |
from diffusers import DiffusionPipeline | |
from PIL import Image | |
# --- Configuration --- | |
SPACE_TITLE = "🎨 Enhanced Studio Ghibli AI Art Generator (LoRA)" | |
SPACE_DESCRIPTION = "Upload a portrait or a photo and transform it into a breathtaking Studio Ghibli-style masterpiece using a LoRA for fine-tuned results." | |
BASE_MODEL_ID = "black-forest-labs/FLUX.1-dev" | |
LORA_REPO_ID = "strangerzonehf/Flux-Ghibli-Art-LoRA" | |
TRIGGER_WORD = "Ghibli Art" | |
STRENGTH = 0.60 # Adjust for better balance between input and style | |
GUIDANCE_SCALE = 7.5 # Increased for better prompt adherence | |
NUM_inference_steps = 30 # Increased for potentially higher quality | |
INPUT_IMAGE_SIZE = (512, 512) | |
PROMPT_PREFIX = "" # No need for separate prefix as LoRA is targeted | |
NEGATIVE_PROMPT = "ugly, deformed, blurry, low quality, bad anatomy, bad proportions, disfigured, poorly drawn face, mutation, mutated, extra limbs, extra fingers, body horror, glitchy, tiling" | |
# --- Device Setup --- | |
# Attempt to use CUDA if available, otherwise fallback to CPU | |
device = "cuda" if torch.cuda.is_available() else "cpu" | |
print(f"🚀 Using device: {device}") | |
# Ensure Torch is correctly initialized | |
try: | |
torch.zeros(1).to(device) | |
print("✅ Torch initialized successfully on", device) | |
except Exception as e: | |
print(f"⚠️ Torch initialization error: {e}") | |
# --- Model Loading --- | |
try: | |
pipe = DiffusionPipeline.from_pretrained(BASE_MODEL_ID, torch_dtype=torch.bfloat16) | |
except ValueError as e: | |
if "sentencepiece" in str(e): | |
print("⚠️ Error: sentencepiece is not installed. Please install it with: pip install sentencepiece") | |
raise | |
else: | |
raise e | |
try: | |
pipe.load_lora_weights(LORA_REPO_ID) | |
print(f"✅ LoRA weights loaded from {LORA_REPO_ID}") | |
except Exception as e: | |
print(f"⚠️ Error loading LoRA weights: PEFT backend is required. Please install it with: pip install peft") | |
print("⚠️ Continuing without LoRA, results may vary.") | |
pipe.to(device) | |
# --- Optimization (Conditional for CUDA) --- | |
if device == "cuda": | |
try: | |
pipe.enable_xformers_memory_efficient_attention() | |
print("✅ xFormers enabled!") | |
except Exception as e: | |
print(f"⚠️ xFormers not available: {e}") | |
pipe.enable_model_cpu_offload() | |
pipe.enable_vae_slicing() | |
pipe.enable_attention_slicing() | |
# --- Image Transformation Function --- | |
def transform_image(input_image): | |
if input_image is None: | |
return None | |
try: | |
input_image = input_image.resize(INPUT_IMAGE_SIZE) | |
prompt = f"{PROMPT_PREFIX} {TRIGGER_WORD}, portrait of a person" # Incorporate trigger word | |
# FLUX.1-dev does not seem to directly support the 'image' argument for image-to-image. | |
# We will try to influence the generation using the prompt, describing the input image. | |
# This is a workaround as direct image-to-image is not available with this pipeline. | |
# Basic description of the input image (you can make this more detailed) | |
prompt += f", photo of a person" | |
output = pipe( | |
prompt=prompt, | |
guidance_scale=GUIDANCE_SCALE, | |
num_inference_steps=NUM_inference_steps, | |
negative_prompt=NEGATIVE_PROMPT, | |
).images[0] | |
return output | |
except Exception as e: | |
print(f"❌ Error during image transformation: {e}") | |
return None | |
# --- Gradio UI --- | |
iface = gr.Interface( | |
fn=transform_image, | |
inputs=gr.Image(type="pil", label="Upload a Portrait/Photo"), | |
outputs=gr.Image(type="pil", label="Studio Ghibli-Style Output"), | |
title=SPACE_TITLE, | |
description=SPACE_DESCRIPTION, | |
examples=[ | |
gr.Image(type="pil", value="examples/portrait1.jpg", label="Example Portrait"), | |
gr.Image(type="pil", value="examples/photo1.jpg", label="Example Photo"), | |
gr.Image(type="pil", value="examples/landscape1.jpg", label="Example Landscape"), | |
], | |
) | |
# --- Main Execution --- | |
if __name__ == "__main__": | |
# Create an 'examples' directory if it doesn't exist and add some sample images | |
if not os.path.exists("examples"): | |
os.makedirs("examples") | |
# You'll need to download or create these example images | |
# and place them in the 'examples' folder. | |
# Example: | |
# from urllib.request import urlretrieve | |
# urlretrieve("URL_TO_YOUR_EXAMPLE_IMAGE_1", "examples/portrait1.jpg") | |
# urlretrieve("URL_TO_YOUR_EXAMPLE_IMAGE_2", "examples/photo1.jpg") | |
# urlretrieve("URL_TO_YOUR_EXAMPLE_IMAGE_3", "examples/landscape1.jpg") | |
print("ℹ️ Created 'examples' directory. Please add sample images.") | |
iface.launch() |