|
import json |
|
from argparse import ArgumentParser |
|
|
|
from PIL import Image |
|
|
|
import constants |
|
from backend.controlnet import controlnet_settings_from_dict |
|
from backend.device import get_device_name |
|
from backend.models.gen_images import ImageFormat |
|
from backend.models.lcmdiffusion_setting import DiffusionTask |
|
from backend.upscale.tiled_upscale import generate_upscaled_image |
|
from constants import APP_VERSION, DEVICE |
|
from frontend.webui.image_variations_ui import generate_image_variations |
|
from models.interface_types import InterfaceType |
|
from paths import FastStableDiffusionPaths, ensure_path |
|
from state import get_context, get_settings |
|
from utils import show_system_info |
|
|
|
parser = ArgumentParser(description=f"FAST SD CPU {constants.APP_VERSION}") |
|
parser.add_argument( |
|
"-s", |
|
"--share", |
|
action="store_true", |
|
help="Create sharable link(Web UI)", |
|
required=False, |
|
) |
|
group = parser.add_mutually_exclusive_group(required=False) |
|
group.add_argument( |
|
"-g", |
|
"--gui", |
|
action="store_true", |
|
help="Start desktop GUI", |
|
) |
|
group.add_argument( |
|
"-w", |
|
"--webui", |
|
action="store_true", |
|
help="Start Web UI", |
|
) |
|
group.add_argument( |
|
"-a", |
|
"--api", |
|
action="store_true", |
|
help="Start Web API server", |
|
) |
|
group.add_argument( |
|
"-m", |
|
"--mcp", |
|
action="store_true", |
|
help="Start MCP(Model Context Protocol) server", |
|
) |
|
group.add_argument( |
|
"-r", |
|
"--realtime", |
|
action="store_true", |
|
help="Start realtime inference UI(experimental)", |
|
) |
|
group.add_argument( |
|
"-v", |
|
"--version", |
|
action="store_true", |
|
help="Version", |
|
) |
|
|
|
parser.add_argument( |
|
"-b", |
|
"--benchmark", |
|
action="store_true", |
|
help="Run inference benchmark on the selected device", |
|
) |
|
parser.add_argument( |
|
"--lcm_model_id", |
|
type=str, |
|
help="Model ID or path,Default stabilityai/sd-turbo", |
|
default="stabilityai/sd-turbo", |
|
) |
|
parser.add_argument( |
|
"--openvino_lcm_model_id", |
|
type=str, |
|
help="OpenVINO Model ID or path,Default rupeshs/sd-turbo-openvino", |
|
default="rupeshs/sd-turbo-openvino", |
|
) |
|
parser.add_argument( |
|
"--prompt", |
|
type=str, |
|
help="Describe the image you want to generate", |
|
default="", |
|
) |
|
parser.add_argument( |
|
"--negative_prompt", |
|
type=str, |
|
help="Describe what you want to exclude from the generation", |
|
default="", |
|
) |
|
parser.add_argument( |
|
"--image_height", |
|
type=int, |
|
help="Height of the image", |
|
default=512, |
|
) |
|
parser.add_argument( |
|
"--image_width", |
|
type=int, |
|
help="Width of the image", |
|
default=512, |
|
) |
|
parser.add_argument( |
|
"--inference_steps", |
|
type=int, |
|
help="Number of steps,default : 1", |
|
default=1, |
|
) |
|
parser.add_argument( |
|
"--guidance_scale", |
|
type=float, |
|
help="Guidance scale,default : 1.0", |
|
default=1.0, |
|
) |
|
|
|
parser.add_argument( |
|
"--number_of_images", |
|
type=int, |
|
help="Number of images to generate ,default : 1", |
|
default=1, |
|
) |
|
parser.add_argument( |
|
"--seed", |
|
type=int, |
|
help="Seed,default : -1 (disabled) ", |
|
default=-1, |
|
) |
|
parser.add_argument( |
|
"--use_openvino", |
|
action="store_true", |
|
help="Use OpenVINO model", |
|
) |
|
|
|
parser.add_argument( |
|
"--use_offline_model", |
|
action="store_true", |
|
help="Use offline model", |
|
) |
|
parser.add_argument( |
|
"--clip_skip", |
|
type=int, |
|
help="CLIP Skip (1-12), default : 1 (disabled) ", |
|
default=1, |
|
) |
|
parser.add_argument( |
|
"--token_merging", |
|
type=float, |
|
help="Token merging scale, 0.0 - 1.0, default : 0.0", |
|
default=0.0, |
|
) |
|
|
|
parser.add_argument( |
|
"--use_safety_checker", |
|
action="store_true", |
|
help="Use safety checker", |
|
) |
|
parser.add_argument( |
|
"--use_lcm_lora", |
|
action="store_true", |
|
help="Use LCM-LoRA", |
|
) |
|
parser.add_argument( |
|
"--base_model_id", |
|
type=str, |
|
help="LCM LoRA base model ID,Default Lykon/dreamshaper-8", |
|
default="Lykon/dreamshaper-8", |
|
) |
|
parser.add_argument( |
|
"--lcm_lora_id", |
|
type=str, |
|
help="LCM LoRA model ID,Default latent-consistency/lcm-lora-sdv1-5", |
|
default="latent-consistency/lcm-lora-sdv1-5", |
|
) |
|
parser.add_argument( |
|
"-i", |
|
"--interactive", |
|
action="store_true", |
|
help="Interactive CLI mode", |
|
) |
|
parser.add_argument( |
|
"-t", |
|
"--use_tiny_auto_encoder", |
|
action="store_true", |
|
help="Use tiny auto encoder for SD (TAESD)", |
|
) |
|
parser.add_argument( |
|
"-f", |
|
"--file", |
|
type=str, |
|
help="Input image for img2img mode", |
|
default="", |
|
) |
|
parser.add_argument( |
|
"--img2img", |
|
action="store_true", |
|
help="img2img mode; requires input file via -f argument", |
|
) |
|
parser.add_argument( |
|
"--batch_count", |
|
type=int, |
|
help="Number of sequential generations", |
|
default=1, |
|
) |
|
parser.add_argument( |
|
"--strength", |
|
type=float, |
|
help="Denoising strength for img2img and Image variations", |
|
default=0.3, |
|
) |
|
parser.add_argument( |
|
"--sdupscale", |
|
action="store_true", |
|
help="Tiled SD upscale,works only for the resolution 512x512,(2x upscale)", |
|
) |
|
parser.add_argument( |
|
"--upscale", |
|
action="store_true", |
|
help="EDSR SD upscale ", |
|
) |
|
parser.add_argument( |
|
"--custom_settings", |
|
type=str, |
|
help="JSON file containing custom generation settings", |
|
default=None, |
|
) |
|
parser.add_argument( |
|
"--usejpeg", |
|
action="store_true", |
|
help="Images will be saved as JPEG format", |
|
) |
|
parser.add_argument( |
|
"--noimagesave", |
|
action="store_true", |
|
help="Disable image saving", |
|
) |
|
parser.add_argument( |
|
"--imagequality", type=int, help="Output image quality [0 to 100]", default=90 |
|
) |
|
parser.add_argument( |
|
"--lora", |
|
type=str, |
|
help="LoRA model full path e.g D:\lora_models\CuteCartoon15V-LiberteRedmodModel-Cartoon-CuteCartoonAF.safetensors", |
|
default=None, |
|
) |
|
parser.add_argument( |
|
"--lora_weight", |
|
type=float, |
|
help="LoRA adapter weight [0 to 1.0]", |
|
default=0.5, |
|
) |
|
parser.add_argument( |
|
"--port", |
|
type=int, |
|
help="Web server port", |
|
default=8000, |
|
) |
|
|
|
args = parser.parse_args() |
|
|
|
if args.version: |
|
print(APP_VERSION) |
|
exit() |
|
|
|
|
|
print("FastSD CPU - ", APP_VERSION) |
|
show_system_info() |
|
print(f"Using device : {constants.DEVICE}") |
|
|
|
|
|
if args.webui: |
|
app_settings = get_settings() |
|
else: |
|
app_settings = get_settings() |
|
|
|
print(f"Output path : {app_settings.settings.generated_images.path}") |
|
ensure_path(app_settings.settings.generated_images.path) |
|
|
|
print(f"Found {len(app_settings.lcm_models)} LCM models in config/lcm-models.txt") |
|
print( |
|
f"Found {len(app_settings.stable_diffsuion_models)} stable diffusion models in config/stable-diffusion-models.txt" |
|
) |
|
print( |
|
f"Found {len(app_settings.lcm_lora_models)} LCM-LoRA models in config/lcm-lora-models.txt" |
|
) |
|
print( |
|
f"Found {len(app_settings.openvino_lcm_models)} OpenVINO LCM models in config/openvino-lcm-models.txt" |
|
) |
|
|
|
from backend.api.web import start_web_server |
|
start_web_server(7860) |