Spaces:
Running
on
L40S
Running
on
L40S
Rishi Desai
commited on
Commit
·
99745bb
1
Parent(s):
2fd1616
cleanup; readme
Browse files- FaceEnhancementProd.py +19 -20
- README.md +24 -12
- demo.py +12 -28
- install.py +20 -43
- run_comfy.py +9 -8
- main.py → test.py +19 -25
FaceEnhancementProd.py
CHANGED
@@ -4,13 +4,12 @@ import sys
|
|
4 |
from typing import Sequence, Mapping, Any, Union
|
5 |
import torch
|
6 |
|
7 |
-
|
8 |
-
COMFYUI_PATH = os.path.join(BASE_PATH, "ComfyUI")
|
9 |
|
10 |
"""
|
11 |
To avoid loading the models each time, we store them in a global variable.
|
12 |
"""
|
13 |
-
|
14 |
|
15 |
def get_value_at_index(obj: Union[Sequence, Mapping], index: int) -> Any:
|
16 |
"""Returns the value at the given index of a sequence or mapping.
|
@@ -74,7 +73,7 @@ def add_extra_model_paths() -> None:
|
|
74 |
Parse the optional extra_model_paths.yaml file and add the parsed paths to the sys.path.
|
75 |
"""
|
76 |
try:
|
77 |
-
from
|
78 |
except ImportError:
|
79 |
print(
|
80 |
"Could not import load_extra_path_config from main.py. Looking in utils.extra_config instead."
|
@@ -177,10 +176,10 @@ def load_models():
|
|
177 |
}
|
178 |
|
179 |
def initialize_models():
|
180 |
-
global
|
181 |
-
if
|
182 |
import_custom_nodes() # Ensure NODE_CLASS_MAPPINGS is initialized
|
183 |
-
|
184 |
|
185 |
initialize_models()
|
186 |
|
@@ -192,17 +191,17 @@ def main(
|
|
192 |
positive_prompt: str = "",
|
193 |
id_weight: float = 0.75,
|
194 |
):
|
195 |
-
global
|
196 |
-
if
|
197 |
raise ValueError("Models must be initialized before calling main(). Call initialize_models() first.")
|
198 |
with torch.inference_mode():
|
199 |
-
dualcliploader_94 =
|
200 |
-
vaeloader_95 =
|
201 |
-
pulidfluxmodelloader_44 =
|
202 |
-
pulidfluxevacliploader_45 =
|
203 |
-
pulidfluxinsightfaceloader_46 =
|
204 |
-
controlnetloader_49 =
|
205 |
-
unetloader_93 =
|
206 |
|
207 |
cliptextencode = CLIPTextEncode()
|
208 |
cliptextencode_23 = cliptextencode.encode(
|
@@ -237,10 +236,10 @@ def main(
|
|
237 |
basicscheduler = NODE_CLASS_MAPPINGS["BasicScheduler"]()
|
238 |
samplercustomadvanced = NODE_CLASS_MAPPINGS["SamplerCustomAdvanced"]()
|
239 |
vaedecode = VAEDecode()
|
240 |
-
faceembeddistance = NODE_CLASS_MAPPINGS["FaceEmbedDistance"]()
|
241 |
-
display_any_rgthree = NODE_CLASS_MAPPINGS["Display Any (rgthree)"]()
|
242 |
-
image_comparer_rgthree = NODE_CLASS_MAPPINGS["Image Comparer (rgthree)"]()
|
243 |
-
saveimage = SaveImage()
|
244 |
|
245 |
applypulidflux_133 = applypulidflux.apply_pulid_flux(
|
246 |
weight=id_weight,
|
|
|
4 |
from typing import Sequence, Mapping, Any, Union
|
5 |
import torch
|
6 |
|
7 |
+
COMFYUI_PATH = "./ComfyUI"
|
|
|
8 |
|
9 |
"""
|
10 |
To avoid loading the models each time, we store them in a global variable.
|
11 |
"""
|
12 |
+
COMFY_MODELS = None
|
13 |
|
14 |
def get_value_at_index(obj: Union[Sequence, Mapping], index: int) -> Any:
|
15 |
"""Returns the value at the given index of a sequence or mapping.
|
|
|
73 |
Parse the optional extra_model_paths.yaml file and add the parsed paths to the sys.path.
|
74 |
"""
|
75 |
try:
|
76 |
+
from test import load_extra_path_config
|
77 |
except ImportError:
|
78 |
print(
|
79 |
"Could not import load_extra_path_config from main.py. Looking in utils.extra_config instead."
|
|
|
176 |
}
|
177 |
|
178 |
def initialize_models():
|
179 |
+
global COMFY_MODELS
|
180 |
+
if COMFY_MODELS is None:
|
181 |
import_custom_nodes() # Ensure NODE_CLASS_MAPPINGS is initialized
|
182 |
+
COMFY_MODELS = load_models()
|
183 |
|
184 |
initialize_models()
|
185 |
|
|
|
191 |
positive_prompt: str = "",
|
192 |
id_weight: float = 0.75,
|
193 |
):
|
194 |
+
global COMFY_MODELS
|
195 |
+
if COMFY_MODELS is None:
|
196 |
raise ValueError("Models must be initialized before calling main(). Call initialize_models() first.")
|
197 |
with torch.inference_mode():
|
198 |
+
dualcliploader_94 = COMFY_MODELS["dualcliploader_94"]
|
199 |
+
vaeloader_95 = COMFY_MODELS["vaeloader_95"]
|
200 |
+
pulidfluxmodelloader_44 = COMFY_MODELS["pulidfluxmodelloader_44"]
|
201 |
+
pulidfluxevacliploader_45 = COMFY_MODELS["pulidfluxevacliploader_45"]
|
202 |
+
pulidfluxinsightfaceloader_46 = COMFY_MODELS["pulidfluxinsightfaceloader_46"]
|
203 |
+
controlnetloader_49 = COMFY_MODELS["controlnetloader_49"]
|
204 |
+
unetloader_93 = COMFY_MODELS["unetloader_93"]
|
205 |
|
206 |
cliptextencode = CLIPTextEncode()
|
207 |
cliptextencode_23 = cliptextencode.encode(
|
|
|
236 |
basicscheduler = NODE_CLASS_MAPPINGS["BasicScheduler"]()
|
237 |
samplercustomadvanced = NODE_CLASS_MAPPINGS["SamplerCustomAdvanced"]()
|
238 |
vaedecode = VAEDecode()
|
239 |
+
# faceembeddistance = NODE_CLASS_MAPPINGS["FaceEmbedDistance"]()
|
240 |
+
# display_any_rgthree = NODE_CLASS_MAPPINGS["Display Any (rgthree)"]()
|
241 |
+
# image_comparer_rgthree = NODE_CLASS_MAPPINGS["Image Comparer (rgthree)"]()
|
242 |
+
# saveimage = SaveImage()
|
243 |
|
244 |
applypulidflux_133 = applypulidflux.apply_pulid_flux(
|
245 |
weight=id_weight,
|
README.md
CHANGED
@@ -27,7 +27,7 @@ A tool for improving facial consistency and quality in AI-generated images. Dram
|
|
27 |
|
28 |
1. Set up your Hugging Face token:
|
29 |
- Create a token at [Hugging Face](https://huggingface.co/settings/tokens)
|
30 |
-
- Log into Hugging Face and accept their terms of service to download Flux
|
31 |
- Set the following environment variables:
|
32 |
```
|
33 |
export HUGGINGFACE_TOKEN=your_token_here
|
@@ -48,21 +48,26 @@ A tool for improving facial consistency and quality in AI-generated images. Dram
|
|
48 |
```
|
49 |
|
50 |
This will
|
51 |
-
- Install ComfyUI, custom nodes, and
|
52 |
- Download all required models (Flux.1-dev, ControlNet, text encoders, PuLID, and more)
|
53 |
|
54 |
4. Run inference on one example:
|
55 |
|
56 |
```
|
57 |
-
python
|
58 |
```
|
59 |
|
60 |
-
|
61 |
-
|
62 |
-
Using the ComfyUI workflows is the fastest way to get started. Run `python run_comfy.py`
|
63 |
-
- `./workflows/FaceEnhancementProd.json` for face enhancement
|
64 |
-
- `./workflows/FaceEmbedDist.json` for computing the face embedding distance
|
65 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
66 |
|
67 |
## Gradio Demo
|
68 |
|
@@ -72,15 +77,22 @@ A simple web interface for the face enhancement workflow.
|
|
72 |
|
73 |
2. Go to http://localhost:7860. You may need to enable port forwarding.
|
74 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
75 |
### Notes
|
76 |
- The script and demo run a ComfyUI server ephemerally
|
77 |
-
- Gradio demo is faster than the script because models remain loaded in memory
|
78 |
- All images are saved in `./ComfyUI/input/scratch/`
|
79 |
-
-
|
80 |
-
- Face cropping and
|
81 |
|
82 |
### Troubleshooting
|
83 |
|
84 |
-
- **Out of memory errors**: If your GPU has less than 48 GB VRAM, install [Flux.1-dev at
|
85 |
- **Face detection issues**: This method works for photorealistic images of people. It may not work on cartoons, anime characters, or non-human subjects.
|
86 |
- **Downloading models fails**: Check your Hugging Face token has proper permissions.
|
|
|
27 |
|
28 |
1. Set up your Hugging Face token:
|
29 |
- Create a token at [Hugging Face](https://huggingface.co/settings/tokens)
|
30 |
+
- Log into Hugging Face and accept their terms of service to download [Flux.1-dev](https://huggingface.co/black-forest-labs/FLUX.1-dev)
|
31 |
- Set the following environment variables:
|
32 |
```
|
33 |
export HUGGINGFACE_TOKEN=your_token_here
|
|
|
48 |
```
|
49 |
|
50 |
This will
|
51 |
+
- Install ComfyUI, custom nodes, and remaining dependencies to your venv
|
52 |
- Download all required models (Flux.1-dev, ControlNet, text encoders, PuLID, and more)
|
53 |
|
54 |
4. Run inference on one example:
|
55 |
|
56 |
```
|
57 |
+
python test.py --input examples/dany_gpt_1.png --ref examples/dany_face.jpg --out examples/dany_enhanced.png
|
58 |
```
|
59 |
|
60 |
+
<details>
|
61 |
+
<summary>Arguments</summary>
|
|
|
|
|
|
|
62 |
|
63 |
+
- `--input` (str): Path to the input image.
|
64 |
+
- `--ref` (str): Path to the reference face image.
|
65 |
+
- `--output` (str): Path to save the output image.
|
66 |
+
- `--crop` (store_true): Flag to face crop the reference image. Default: False.
|
67 |
+
- `--upscale` (store_true): Flag to upscale the (cropped) reference image. Default: False.
|
68 |
+
- `--caption` (store_true): Flag to caption the input image. Default: False.
|
69 |
+
- `--id_weight` (float): Face ID weight. Default: 0.75.
|
70 |
+
</details>
|
71 |
|
72 |
## Gradio Demo
|
73 |
|
|
|
77 |
|
78 |
2. Go to http://localhost:7860. You may need to enable port forwarding.
|
79 |
|
80 |
+
## Running on ComfyUI
|
81 |
+
|
82 |
+
Using the ComfyUI workflows is the fastest way to get started. Run `python run_comfy.py`. There are two workflows:
|
83 |
+
- `./workflows/FaceEnhancementProd.json` for face enhancement
|
84 |
+
- `./workflows/FaceEmbedDist.json` for computing the [face embedding distance](https://github.com/cubiq/ComfyUI_FaceAnalysis)
|
85 |
+
|
86 |
+
|
87 |
### Notes
|
88 |
- The script and demo run a ComfyUI server ephemerally
|
89 |
+
- Gradio demo is faster than the script because the models remain loaded in memory and ComfyUI server is booted up.
|
90 |
- All images are saved in `./ComfyUI/input/scratch/`
|
91 |
+
- `FaceEnhancementProd.py` was created with the [ComfyUI-to-Python-Extension](https://github.com/pydn/ComfyUI-to-Python-Extension) and re-engineered for efficiency and function.
|
92 |
+
- Face cropping, upscaling, and captioning are unavailable; these will be added in an update.
|
93 |
|
94 |
### Troubleshooting
|
95 |
|
96 |
+
- **Out of memory errors**: If your GPU has less than 48 GB VRAM, install [Flux.1-dev at fp8 precision](https://huggingface.co/Comfy-Org/flux1-dev).
|
97 |
- **Face detection issues**: This method works for photorealistic images of people. It may not work on cartoons, anime characters, or non-human subjects.
|
98 |
- **Downloading models fails**: Check your Hugging Face token has proper permissions.
|
demo.py
CHANGED
@@ -1,37 +1,27 @@
|
|
|
|
1 |
from install import install
|
2 |
|
3 |
-
|
4 |
-
|
5 |
-
|
6 |
-
|
7 |
-
|
|
|
8 |
|
9 |
import gradio as gr
|
10 |
-
import os
|
11 |
import tempfile
|
12 |
import hashlib
|
13 |
import io
|
14 |
import pickle
|
15 |
-
import pathlib
|
16 |
import sys
|
17 |
-
from
|
18 |
from PIL import Image
|
19 |
|
20 |
-
|
21 |
-
|
22 |
-
# Ensure cache directory exists
|
23 |
-
os.makedirs(CACHE_DIR, exist_ok=True)
|
24 |
|
25 |
def get_image_hash(img):
|
26 |
-
"""
|
27 |
-
Generate a hash of the image content.
|
28 |
-
|
29 |
-
Args:
|
30 |
-
img: PIL Image
|
31 |
-
|
32 |
-
Returns:
|
33 |
-
str: Hash of the image
|
34 |
-
"""
|
35 |
img_bytes = io.BytesIO()
|
36 |
img.save(img_bytes, format='PNG')
|
37 |
return hashlib.md5(img_bytes.getvalue()).hexdigest()
|
@@ -51,7 +41,7 @@ def enhance_face_gradio(input_image, ref_image):
|
|
51 |
input_hash = get_image_hash(input_image)
|
52 |
ref_hash = get_image_hash(ref_image)
|
53 |
combined_hash = f"{input_hash}_{ref_hash}"
|
54 |
-
cache_path = os.path.join(
|
55 |
|
56 |
# Check if result exists in cache
|
57 |
if os.path.exists(cache_path):
|
@@ -78,7 +68,6 @@ def enhance_face_gradio(input_image, ref_image):
|
|
78 |
ref_image.save(ref_path)
|
79 |
|
80 |
try:
|
81 |
-
# Process the face
|
82 |
process_face(
|
83 |
input_path=input_path,
|
84 |
ref_path=ref_path,
|
@@ -109,9 +98,7 @@ def enhance_face_gradio(input_image, ref_image):
|
|
109 |
return result_img
|
110 |
|
111 |
def create_gradio_interface():
|
112 |
-
# Create the Gradio interface
|
113 |
with gr.Blocks(title="Face Enhancement Demo") as demo:
|
114 |
-
# Add instructions at the top
|
115 |
gr.Markdown("""
|
116 |
# Face Enhancement
|
117 |
### Instructions
|
@@ -124,10 +111,8 @@ def create_gradio_interface():
|
|
124 |
For more information, check out my [blog post](https://rishidesai.github.io/posts/face-enhancement-techniques/).
|
125 |
""", elem_id="instructions")
|
126 |
|
127 |
-
# Add a horizontal line for separation
|
128 |
gr.Markdown("---")
|
129 |
|
130 |
-
# Main interface layout
|
131 |
with gr.Row():
|
132 |
with gr.Column():
|
133 |
input_image = gr.Image(label="Target Image", type="pil")
|
@@ -144,7 +129,6 @@ def create_gradio_interface():
|
|
144 |
queue=True # Enable queue for sequential processing
|
145 |
)
|
146 |
|
147 |
-
# Add examples using gr.Examples
|
148 |
gr.Markdown("## Examples\nClick on an example to load the images into the interface.")
|
149 |
example_inps = [
|
150 |
["examples/dany_gpt_1.png", "examples/dany_face.jpg"],
|
|
|
1 |
+
import os
|
2 |
from install import install
|
3 |
|
4 |
+
if "HF_DEMO" in os.environ:
|
5 |
+
# Global variable to track if install() has been run; only for deploying on HF space
|
6 |
+
INSTALLED = False
|
7 |
+
if not INSTALLED:
|
8 |
+
install()
|
9 |
+
INSTALLED = True
|
10 |
|
11 |
import gradio as gr
|
|
|
12 |
import tempfile
|
13 |
import hashlib
|
14 |
import io
|
15 |
import pickle
|
|
|
16 |
import sys
|
17 |
+
from test import process_face
|
18 |
from PIL import Image
|
19 |
|
20 |
+
INPUT_CACHE_DIR = "./cache"
|
21 |
+
os.makedirs(INPUT_CACHE_DIR, exist_ok=True)
|
|
|
|
|
22 |
|
23 |
def get_image_hash(img):
|
24 |
+
"""Generate a hash of the image content."""
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
25 |
img_bytes = io.BytesIO()
|
26 |
img.save(img_bytes, format='PNG')
|
27 |
return hashlib.md5(img_bytes.getvalue()).hexdigest()
|
|
|
41 |
input_hash = get_image_hash(input_image)
|
42 |
ref_hash = get_image_hash(ref_image)
|
43 |
combined_hash = f"{input_hash}_{ref_hash}"
|
44 |
+
cache_path = os.path.join(INPUT_CACHE_DIR, f"{combined_hash}.pkl")
|
45 |
|
46 |
# Check if result exists in cache
|
47 |
if os.path.exists(cache_path):
|
|
|
68 |
ref_image.save(ref_path)
|
69 |
|
70 |
try:
|
|
|
71 |
process_face(
|
72 |
input_path=input_path,
|
73 |
ref_path=ref_path,
|
|
|
98 |
return result_img
|
99 |
|
100 |
def create_gradio_interface():
|
|
|
101 |
with gr.Blocks(title="Face Enhancement Demo") as demo:
|
|
|
102 |
gr.Markdown("""
|
103 |
# Face Enhancement
|
104 |
### Instructions
|
|
|
111 |
For more information, check out my [blog post](https://rishidesai.github.io/posts/face-enhancement-techniques/).
|
112 |
""", elem_id="instructions")
|
113 |
|
|
|
114 |
gr.Markdown("---")
|
115 |
|
|
|
116 |
with gr.Row():
|
117 |
with gr.Column():
|
118 |
input_image = gr.Image(label="Target Image", type="pil")
|
|
|
129 |
queue=True # Enable queue for sequential processing
|
130 |
)
|
131 |
|
|
|
132 |
gr.Markdown("## Examples\nClick on an example to load the images into the interface.")
|
133 |
example_inps = [
|
134 |
["examples/dany_gpt_1.png", "examples/dany_face.jpg"],
|
install.py
CHANGED
@@ -1,62 +1,47 @@
|
|
1 |
import os
|
2 |
|
3 |
-
|
4 |
-
BASE_PATH = "./"
|
5 |
-
COMFYUI_PATH = os.path.join(BASE_PATH, "ComfyUI")
|
6 |
-
MODEL_PATH = os.path.join(COMFYUI_PATH, "models")
|
7 |
|
8 |
CACHE_PATH = os.getenv('HF_HOME')
|
9 |
os.makedirs(CACHE_PATH, exist_ok=True)
|
10 |
|
11 |
|
12 |
-
def
|
13 |
-
"""Run a shell command
|
14 |
print(f"🔄 Running: {command}")
|
15 |
exit_code = os.system(command)
|
16 |
-
|
17 |
if exit_code != 0:
|
18 |
print(f"❌ Command failed: {command} (Exit Code: {exit_code})")
|
19 |
-
exit(1)
|
20 |
-
|
21 |
|
22 |
-
def manage_git_repo(repo_url, install_path, requirements=False, submodules=False):
|
23 |
-
"""Clone or update a git repository and handle its dependencies.
|
24 |
|
25 |
-
|
26 |
-
|
27 |
-
install_path: Where to install/update the repository
|
28 |
-
requirements: Whether to install requirements.txt
|
29 |
-
submodules: Whether to update git submodules
|
30 |
-
"""
|
31 |
-
# Save the original directory
|
32 |
original_dir = os.getcwd()
|
33 |
|
34 |
if not os.path.exists(install_path) or not os.path.isdir(install_path) or not os.path.exists(
|
35 |
os.path.join(install_path, ".git")):
|
36 |
print(f"📂 Cloning {os.path.basename(install_path)}...")
|
37 |
-
|
38 |
else:
|
39 |
print(f"🔄 {os.path.basename(install_path)} exists. Checking for updates...")
|
40 |
|
41 |
# Change to repo directory and update
|
42 |
os.chdir(install_path)
|
43 |
-
|
44 |
|
45 |
if submodules:
|
46 |
-
|
47 |
-
|
48 |
if requirements:
|
49 |
-
|
50 |
|
51 |
print(f"✅ {os.path.basename(install_path)} installed and updated.")
|
52 |
-
|
53 |
-
# Change back to the original directory
|
54 |
os.chdir(original_dir)
|
55 |
|
56 |
|
57 |
def install_comfyui():
|
58 |
"""Clone and set up ComfyUI if not already installed."""
|
59 |
-
|
60 |
"https://github.com/comfyanonymous/ComfyUI.git",
|
61 |
COMFYUI_PATH,
|
62 |
requirements=True
|
@@ -77,10 +62,7 @@ def download_huggingface_models():
|
|
77 |
{"repo_id": "comfyanonymous/flux_text_encoders", "filename": "clip_l.safetensors", "folder": "text_encoders"},
|
78 |
]
|
79 |
|
80 |
-
#
|
81 |
-
huggingface_token = os.getenv('HUGGINGFACE_TOKEN')
|
82 |
-
|
83 |
-
# Dictionary mapping repo_ids to specific filenames
|
84 |
filename_mappings = {
|
85 |
"Shakker-Labs/FLUX.1-dev-ControlNet-Union-Pro": "Flux_Dev_ControlNet_Union_Pro_ShakkerLabs.safetensors",
|
86 |
}
|
@@ -92,9 +74,9 @@ def download_huggingface_models():
|
|
92 |
filename=model["filename"],
|
93 |
cache_dir=CACHE_PATH,
|
94 |
repo_type=model.get("repo_type", "model"),
|
95 |
-
token=
|
96 |
)
|
97 |
-
target_dir = os.path.join(
|
98 |
os.makedirs(target_dir, exist_ok=True)
|
99 |
|
100 |
# Use mapping if it exists, otherwise use original filename
|
@@ -114,7 +96,7 @@ def download_and_extract_antelopev2():
|
|
114 |
"""Download and extract AntelopeV2 model for insightface."""
|
115 |
import zipfile, requests, shutil
|
116 |
|
117 |
-
base_path = os.path.join(
|
118 |
model_target_path = os.path.join(base_path, "antelopev2")
|
119 |
download_url = "https://huggingface.co/MonsterMMORPG/tools/resolve/main/antelopev2.zip"
|
120 |
zip_path = os.path.join(base_path, "antelopev2.zip")
|
@@ -174,12 +156,7 @@ def download_and_extract_antelopev2():
|
|
174 |
def install_custom_nodes():
|
175 |
"""Install all custom nodes for ComfyUI."""
|
176 |
|
177 |
-
|
178 |
-
{
|
179 |
-
"repo": "https://github.com/ltdrdata/ComfyUI-Manager",
|
180 |
-
"name": "ComfyUI-Manager",
|
181 |
-
"requirements": True
|
182 |
-
},
|
183 |
{
|
184 |
"repo": "https://github.com/sipie800/ComfyUI-PuLID-Flux-Enhanced",
|
185 |
"name": "ComfyUI-PuLID-Flux-Enhanced",
|
@@ -190,7 +167,7 @@ def install_custom_nodes():
|
|
190 |
"name": "rgthree-comfy",
|
191 |
"requirements": True
|
192 |
},
|
193 |
-
{ # we already have insightface so don't need requirements (
|
194 |
"repo": "https://github.com/cubiq/ComfyUI_FaceAnalysis",
|
195 |
"name": "ComfyUI_FaceAnalysis",
|
196 |
"requirements": False
|
@@ -202,10 +179,10 @@ def install_custom_nodes():
|
|
202 |
# },
|
203 |
]
|
204 |
|
205 |
-
for node in
|
206 |
repo_name = node["name"]
|
207 |
repo_path = os.path.join(COMFYUI_PATH, "custom_nodes", repo_name)
|
208 |
-
|
209 |
node["repo"],
|
210 |
repo_path,
|
211 |
requirements=node.get("requirements", False),
|
@@ -221,7 +198,7 @@ def install():
|
|
221 |
install_custom_nodes()
|
222 |
download_huggingface_models()
|
223 |
download_and_extract_antelopev2()
|
224 |
-
print("🎉 Setup Complete!
|
225 |
|
226 |
if __name__ == "__main__":
|
227 |
install()
|
|
|
1 |
import os
|
2 |
|
3 |
+
COMFYUI_PATH = "./ComfyUI"
|
|
|
|
|
|
|
4 |
|
5 |
CACHE_PATH = os.getenv('HF_HOME')
|
6 |
os.makedirs(CACHE_PATH, exist_ok=True)
|
7 |
|
8 |
|
9 |
+
def run_cmd(command):
|
10 |
+
"""Run a shell command"""
|
11 |
print(f"🔄 Running: {command}")
|
12 |
exit_code = os.system(command)
|
|
|
13 |
if exit_code != 0:
|
14 |
print(f"❌ Command failed: {command} (Exit Code: {exit_code})")
|
15 |
+
exit(1)
|
|
|
16 |
|
|
|
|
|
17 |
|
18 |
+
def install_git_repo(repo_url, install_path, requirements=False, submodules=False):
|
19 |
+
"""Clone or update a git repository and handle its dependencies"""
|
|
|
|
|
|
|
|
|
|
|
20 |
original_dir = os.getcwd()
|
21 |
|
22 |
if not os.path.exists(install_path) or not os.path.isdir(install_path) or not os.path.exists(
|
23 |
os.path.join(install_path, ".git")):
|
24 |
print(f"📂 Cloning {os.path.basename(install_path)}...")
|
25 |
+
run_cmd(f"git clone {repo_url} {install_path}")
|
26 |
else:
|
27 |
print(f"🔄 {os.path.basename(install_path)} exists. Checking for updates...")
|
28 |
|
29 |
# Change to repo directory and update
|
30 |
os.chdir(install_path)
|
31 |
+
run_cmd("git pull")
|
32 |
|
33 |
if submodules:
|
34 |
+
run_cmd("git submodule update --init --recursive")
|
|
|
35 |
if requirements:
|
36 |
+
run_cmd("python -m pip install -r requirements.txt")
|
37 |
|
38 |
print(f"✅ {os.path.basename(install_path)} installed and updated.")
|
|
|
|
|
39 |
os.chdir(original_dir)
|
40 |
|
41 |
|
42 |
def install_comfyui():
|
43 |
"""Clone and set up ComfyUI if not already installed."""
|
44 |
+
install_git_repo(
|
45 |
"https://github.com/comfyanonymous/ComfyUI.git",
|
46 |
COMFYUI_PATH,
|
47 |
requirements=True
|
|
|
62 |
{"repo_id": "comfyanonymous/flux_text_encoders", "filename": "clip_l.safetensors", "folder": "text_encoders"},
|
63 |
]
|
64 |
|
65 |
+
# More specific filenames
|
|
|
|
|
|
|
66 |
filename_mappings = {
|
67 |
"Shakker-Labs/FLUX.1-dev-ControlNet-Union-Pro": "Flux_Dev_ControlNet_Union_Pro_ShakkerLabs.safetensors",
|
68 |
}
|
|
|
74 |
filename=model["filename"],
|
75 |
cache_dir=CACHE_PATH,
|
76 |
repo_type=model.get("repo_type", "model"),
|
77 |
+
token=os.getenv('HUGGINGFACE_TOKEN')
|
78 |
)
|
79 |
+
target_dir = os.path.join(COMFYUI_PATH, "models", model["folder"])
|
80 |
os.makedirs(target_dir, exist_ok=True)
|
81 |
|
82 |
# Use mapping if it exists, otherwise use original filename
|
|
|
96 |
"""Download and extract AntelopeV2 model for insightface."""
|
97 |
import zipfile, requests, shutil
|
98 |
|
99 |
+
base_path = os.path.join(COMFYUI_PATH, "models", "insightface/models")
|
100 |
model_target_path = os.path.join(base_path, "antelopev2")
|
101 |
download_url = "https://huggingface.co/MonsterMMORPG/tools/resolve/main/antelopev2.zip"
|
102 |
zip_path = os.path.join(base_path, "antelopev2.zip")
|
|
|
156 |
def install_custom_nodes():
|
157 |
"""Install all custom nodes for ComfyUI."""
|
158 |
|
159 |
+
custom_nodes = [
|
|
|
|
|
|
|
|
|
|
|
160 |
{
|
161 |
"repo": "https://github.com/sipie800/ComfyUI-PuLID-Flux-Enhanced",
|
162 |
"name": "ComfyUI-PuLID-Flux-Enhanced",
|
|
|
167 |
"name": "rgthree-comfy",
|
168 |
"requirements": True
|
169 |
},
|
170 |
+
{ # we already have insightface so don't need requirements (no dlib)
|
171 |
"repo": "https://github.com/cubiq/ComfyUI_FaceAnalysis",
|
172 |
"name": "ComfyUI_FaceAnalysis",
|
173 |
"requirements": False
|
|
|
179 |
# },
|
180 |
]
|
181 |
|
182 |
+
for node in custom_nodes:
|
183 |
repo_name = node["name"]
|
184 |
repo_path = os.path.join(COMFYUI_PATH, "custom_nodes", repo_name)
|
185 |
+
install_git_repo(
|
186 |
node["repo"],
|
187 |
repo_path,
|
188 |
requirements=node.get("requirements", False),
|
|
|
198 |
install_custom_nodes()
|
199 |
download_huggingface_models()
|
200 |
download_and_extract_antelopev2()
|
201 |
+
print("🎉 Setup Complete!")
|
202 |
|
203 |
if __name__ == "__main__":
|
204 |
install()
|
run_comfy.py
CHANGED
@@ -1,18 +1,19 @@
|
|
1 |
import os
|
2 |
import subprocess
|
|
|
3 |
|
4 |
COMFYUI_PATH = "./ComfyUI"
|
5 |
-
PORT = 8000
|
6 |
|
7 |
-
|
8 |
-
def run_comfyui():
|
9 |
"""Launch ComfyUI with external access."""
|
10 |
os.chdir(COMFYUI_PATH)
|
11 |
-
print(f"🚀 Launching ComfyUI on port {
|
12 |
-
|
13 |
-
subprocess.run(f"python main.py --listen 0.0.0.0 --port {PORT} --disable-auto-launch", shell=True)
|
14 |
|
|
|
15 |
|
16 |
if __name__ == "__main__":
|
17 |
-
|
18 |
-
|
|
|
|
|
|
|
|
1 |
import os
|
2 |
import subprocess
|
3 |
+
import argparse
|
4 |
|
5 |
COMFYUI_PATH = "./ComfyUI"
|
|
|
6 |
|
7 |
+
def run_comfyui(port):
|
|
|
8 |
"""Launch ComfyUI with external access."""
|
9 |
os.chdir(COMFYUI_PATH)
|
10 |
+
print(f"🚀 Launching ComfyUI on port {port}...")
|
|
|
|
|
11 |
|
12 |
+
subprocess.run(f"python main.py --listen 0.0.0.0 --port {port} --disable-auto-launch", shell=True)
|
13 |
|
14 |
if __name__ == "__main__":
|
15 |
+
parser = argparse.ArgumentParser(description="Run ComfyUI")
|
16 |
+
parser.add_argument('--port', type=int, default=8000, help='Port to run ComfyUI on')
|
17 |
+
args = parser.parse_args()
|
18 |
+
|
19 |
+
run_comfyui(args.port)
|
main.py → test.py
RENAMED
@@ -12,88 +12,82 @@ def parse_args():
|
|
12 |
parser.add_argument('--output', type=str, required=True, help='Path to save the output image')
|
13 |
parser.add_argument('--id_weight', type=float, default=0.75, help='face ID weight')
|
14 |
args = parser.parse_args()
|
15 |
-
|
16 |
-
# Validate input file exists
|
17 |
if not os.path.exists(args.input):
|
18 |
parser.error(f"Input file does not exist: {args.input}")
|
19 |
-
|
20 |
-
# Validate reference file exists
|
21 |
if not os.path.exists(args.ref):
|
22 |
parser.error(f"Reference file does not exist: {args.ref}")
|
23 |
-
|
24 |
-
# Validate output directory exists
|
25 |
output_dir = os.path.dirname(args.output)
|
26 |
if output_dir and not os.path.exists(output_dir):
|
27 |
parser.error(f"Output directory does not exist: {output_dir}")
|
28 |
-
|
29 |
return args
|
30 |
|
31 |
def create_scratch_dir():
|
32 |
"""Create a new numbered directory in ./ComfyUI/input/scratch"""
|
33 |
base_dir = "./ComfyUI/input/scratch"
|
34 |
-
|
35 |
# Create base directory if it doesn't exist
|
36 |
os.makedirs(base_dir, exist_ok=True)
|
37 |
-
|
38 |
# Get existing directories and find the next number
|
39 |
existing_dirs = [d for d in os.listdir(base_dir) if os.path.isdir(os.path.join(base_dir, d)) and d.isdigit()]
|
40 |
next_dir_num = 1
|
41 |
if existing_dirs:
|
42 |
next_dir_num = max([int(d) for d in existing_dirs]) + 1
|
43 |
-
|
44 |
# Create the new directory
|
45 |
new_dir = os.path.join(base_dir, str(next_dir_num))
|
46 |
os.makedirs(new_dir, exist_ok=True)
|
47 |
-
|
48 |
return new_dir
|
49 |
|
50 |
def process_face(input_path, ref_path, crop=False, upscale=False, output_path=None, id_weight=0.75):
|
51 |
"""
|
52 |
Process a face image using the given parameters.
|
53 |
-
|
54 |
Returns:
|
55 |
str: Path to the scratch directory used for processing
|
56 |
"""
|
57 |
print(f"Processing image: {input_path}")
|
58 |
print(f"Reference image: {ref_path}")
|
59 |
print(f"Output will be saved to: {output_path}")
|
60 |
-
|
61 |
# Create a new scratch directory for this run
|
62 |
scratch_dir = create_scratch_dir()
|
63 |
print(f"Created scratch directory: {scratch_dir}")
|
64 |
-
|
65 |
# Copy input and reference images to scratch directory
|
66 |
input_filename = os.path.basename(input_path)
|
67 |
ref_filename = os.path.basename(ref_path)
|
68 |
-
|
69 |
scratch_input = os.path.join(scratch_dir, input_filename)
|
70 |
scratch_ref = os.path.join(scratch_dir, ref_filename)
|
71 |
-
|
72 |
shutil.copy(input_path, scratch_input)
|
73 |
shutil.copy(ref_path, scratch_ref)
|
74 |
-
|
75 |
# Convert paths to ComfyUI format (relative to ComfyUI/input/)
|
76 |
# For example: "./ComfyUI/input/scratch/1/image.png" becomes "scratch/1/image.png"
|
77 |
comfy_ref_path = os.path.relpath(scratch_ref, "./ComfyUI/input")
|
78 |
comfy_input_path = os.path.relpath(scratch_input, "./ComfyUI/input")
|
79 |
-
|
80 |
enhance_face(comfy_ref_path, comfy_input_path, output_path, dist_image=f"{output_path}_dist.png", id_weight=id_weight)
|
81 |
-
|
82 |
print(f"Enhanced image saved to: {output_path}")
|
83 |
print(f"Working files are in: {scratch_dir}")
|
84 |
-
|
85 |
return scratch_dir
|
86 |
|
87 |
def main():
|
88 |
args = parse_args()
|
89 |
return process_face(
|
90 |
-
input_path=args.input,
|
91 |
-
ref_path=args.ref,
|
92 |
-
crop=args.crop,
|
93 |
-
upscale=args.upscale,
|
94 |
output_path=args.output,
|
95 |
id_weight=args.id_weight
|
96 |
)
|
97 |
|
98 |
if __name__ == "__main__":
|
99 |
-
main()
|
|
|
12 |
parser.add_argument('--output', type=str, required=True, help='Path to save the output image')
|
13 |
parser.add_argument('--id_weight', type=float, default=0.75, help='face ID weight')
|
14 |
args = parser.parse_args()
|
15 |
+
|
|
|
16 |
if not os.path.exists(args.input):
|
17 |
parser.error(f"Input file does not exist: {args.input}")
|
|
|
|
|
18 |
if not os.path.exists(args.ref):
|
19 |
parser.error(f"Reference file does not exist: {args.ref}")
|
|
|
|
|
20 |
output_dir = os.path.dirname(args.output)
|
21 |
if output_dir and not os.path.exists(output_dir):
|
22 |
parser.error(f"Output directory does not exist: {output_dir}")
|
|
|
23 |
return args
|
24 |
|
25 |
def create_scratch_dir():
|
26 |
"""Create a new numbered directory in ./ComfyUI/input/scratch"""
|
27 |
base_dir = "./ComfyUI/input/scratch"
|
28 |
+
|
29 |
# Create base directory if it doesn't exist
|
30 |
os.makedirs(base_dir, exist_ok=True)
|
31 |
+
|
32 |
# Get existing directories and find the next number
|
33 |
existing_dirs = [d for d in os.listdir(base_dir) if os.path.isdir(os.path.join(base_dir, d)) and d.isdigit()]
|
34 |
next_dir_num = 1
|
35 |
if existing_dirs:
|
36 |
next_dir_num = max([int(d) for d in existing_dirs]) + 1
|
37 |
+
|
38 |
# Create the new directory
|
39 |
new_dir = os.path.join(base_dir, str(next_dir_num))
|
40 |
os.makedirs(new_dir, exist_ok=True)
|
41 |
+
|
42 |
return new_dir
|
43 |
|
44 |
def process_face(input_path, ref_path, crop=False, upscale=False, output_path=None, id_weight=0.75):
|
45 |
"""
|
46 |
Process a face image using the given parameters.
|
47 |
+
|
48 |
Returns:
|
49 |
str: Path to the scratch directory used for processing
|
50 |
"""
|
51 |
print(f"Processing image: {input_path}")
|
52 |
print(f"Reference image: {ref_path}")
|
53 |
print(f"Output will be saved to: {output_path}")
|
54 |
+
|
55 |
# Create a new scratch directory for this run
|
56 |
scratch_dir = create_scratch_dir()
|
57 |
print(f"Created scratch directory: {scratch_dir}")
|
58 |
+
|
59 |
# Copy input and reference images to scratch directory
|
60 |
input_filename = os.path.basename(input_path)
|
61 |
ref_filename = os.path.basename(ref_path)
|
62 |
+
|
63 |
scratch_input = os.path.join(scratch_dir, input_filename)
|
64 |
scratch_ref = os.path.join(scratch_dir, ref_filename)
|
65 |
+
|
66 |
shutil.copy(input_path, scratch_input)
|
67 |
shutil.copy(ref_path, scratch_ref)
|
68 |
+
|
69 |
# Convert paths to ComfyUI format (relative to ComfyUI/input/)
|
70 |
# For example: "./ComfyUI/input/scratch/1/image.png" becomes "scratch/1/image.png"
|
71 |
comfy_ref_path = os.path.relpath(scratch_ref, "./ComfyUI/input")
|
72 |
comfy_input_path = os.path.relpath(scratch_input, "./ComfyUI/input")
|
73 |
+
|
74 |
enhance_face(comfy_ref_path, comfy_input_path, output_path, dist_image=f"{output_path}_dist.png", id_weight=id_weight)
|
75 |
+
|
76 |
print(f"Enhanced image saved to: {output_path}")
|
77 |
print(f"Working files are in: {scratch_dir}")
|
78 |
+
|
79 |
return scratch_dir
|
80 |
|
81 |
def main():
|
82 |
args = parse_args()
|
83 |
return process_face(
|
84 |
+
input_path=args.input,
|
85 |
+
ref_path=args.ref,
|
86 |
+
crop=args.crop,
|
87 |
+
upscale=args.upscale,
|
88 |
output_path=args.output,
|
89 |
id_weight=args.id_weight
|
90 |
)
|
91 |
|
92 |
if __name__ == "__main__":
|
93 |
+
main()
|