reach-vb HF Staff commited on
Commit
db155ea
·
verified ·
1 Parent(s): 71b079d

Upload 4 files

Browse files
Files changed (5) hide show
  1. .gitattributes +1 -0
  2. app.py +380 -1
  3. flux_lora.png +3 -0
  4. loras.json +234 -0
  5. requirements.txt +3 -0
.gitattributes CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ flux_lora.png filter=lfs diff=lfs merge=lfs -text
app.py CHANGED
@@ -1 +1,380 @@
1
- yest
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import gradio as gr
3
+ import json
4
+ import logging
5
+ from PIL import Image
6
+
7
+ from huggingface_hub import ModelCard, HfFileSystem # Keep ModelCard, HfFileSystem for add_custom_lora
8
+ from huggingface_hub import InferenceClient # Added for API inference
9
+ import copy
10
+ import random
11
+ import time
12
+ import re # Keep for add_custom_lora URL parsing and potentially trigger word finding
13
+
14
+ # --- Inference Client Setup ---
15
+ # It's recommended to load the API key from environment variables or Gradio secrets
16
+ HF_API_KEY = os.getenv("HF_API_KEY")
17
+ if not HF_API_KEY:
18
+ # Try to get from Gradio secrets if running in a Space
19
+ try:
20
+ HF_API_KEY = gr.secrets.get("HF_API_KEY")
21
+ except (AttributeError, KeyError):
22
+ HF_API_KEY = None # Set to None if not found
23
+
24
+ if not HF_API_KEY:
25
+ logging.warning("HF_API_KEY not found in environment variables or Gradio secrets. Inference API calls will likely fail.")
26
+ # Optionally, raise an error or provide a default behavior
27
+ # raise ValueError("Missing Hugging Face API Key (HF_API_KEY) for InferenceClient")
28
+ client = None # Initialize client as None if no key
29
+ else:
30
+ client = InferenceClient(provider="fal-ai", token=HF_API_KEY)
31
+ # Note: Provider choice depends on where the target models are hosted/supported for inference.
32
+
33
+ # Load LoRAs from JSON file
34
+ with open('loras.json', 'r') as f:
35
+ loras = json.load(f)
36
+
37
+ # Removed diffusers model initialization block
38
+
39
+ MAX_SEED = 2**32-1
40
+
41
+ class calculateDuration:
42
+ def __init__(self, activity_name=""):
43
+ self.activity_name = activity_name
44
+
45
+ def __enter__(self):
46
+ self.start_time = time.time()
47
+ return self
48
+
49
+ def __exit__(self, exc_type, exc_value, traceback):
50
+ self.end_time = time.time()
51
+ self.elapsed_time = self.end_time - self.start_time
52
+ if self.activity_name:
53
+ print(f"Elapsed time for {self.activity_name}: {self.elapsed_time:.6f} seconds")
54
+ else:
55
+ print(f"Elapsed time: {self.elapsed_time:.6f} seconds")
56
+
57
+ # Updated function signature: Removed width, height inputs
58
+ def update_selection(evt: gr.SelectData):
59
+ selected_lora = loras[evt.index]
60
+ new_placeholder = f"Type a prompt for {selected_lora['title']}"
61
+ lora_repo = selected_lora["repo"]
62
+ # Use the repo ID directly as the model identifier for the API call
63
+ updated_text = f"### Selected: [{lora_repo}](https://huggingface.co/{lora_repo}) ✨ (Model ID: `{lora_repo}`)"
64
+
65
+ # Default width/height
66
+ width = 1024
67
+ height = 1024
68
+ # Update width/height based on aspect ratio if defined
69
+ if "aspect" in selected_lora:
70
+ if selected_lora["aspect"] == "portrait":
71
+ width = 768
72
+ height = 1024
73
+ elif selected_lora["aspect"] == "landscape":
74
+ width = 1024
75
+ height = 768
76
+ # else keep 1024x1024
77
+
78
+ # Return updates for prompt, selection info, index, and width/height states
79
+ return (
80
+ gr.update(placeholder=new_placeholder),
81
+ updated_text,
82
+ evt.index,
83
+ gr.update(value=width),
84
+ gr.update(value=height),
85
+ )
86
+
87
+ def run_lora(prompt, selected_index, current_seed, current_width, current_height):
88
+ global client # Access the global client
89
+
90
+ if client is None:
91
+ raise gr.Error("InferenceClient could not be initialized. Missing HF_API_KEY.")
92
+
93
+ if selected_index is None:
94
+ raise gr.Error("You must select a LoRA/Model before proceeding.")
95
+
96
+ # --- Hardcoded Defaults (Removed from UI) ---
97
+ cfg_scale = 7.0
98
+ steps = 30
99
+ # lora_scale = 0.95 # Might not be applicable/used by API
100
+ randomize_seed = True # Always randomize in this simplified version
101
+ # Removed image_input_path, image_strength - No img2img in this version
102
+
103
+ selected_lora = loras[selected_index]
104
+ # The 'repo' field now directly serves as the model identifier for the API
105
+ model_id = selected_lora["repo"]
106
+ trigger_word = selected_lora.get("trigger_word", "") # Use .get for safety
107
+
108
+ # --- Prompt Construction ---
109
+ if trigger_word:
110
+ trigger_position = selected_lora.get("trigger_position", "prepend") # Default prepend
111
+ if trigger_position == "prepend":
112
+ prompt_mash = f"{trigger_word} {prompt}"
113
+ else: # Append
114
+ prompt_mash = f"{prompt} {trigger_word}"
115
+ else:
116
+ prompt_mash = prompt
117
+
118
+ # --- Seed Handling ---
119
+ seed_to_use = current_seed # Use the state value by default
120
+ if randomize_seed:
121
+ seed_to_use = random.randint(0, MAX_SEED)
122
+ # Optional: Keep timer if desired
123
+ # with calculateDuration("Randomizing seed"):
124
+ # pass
125
+
126
+ # --- API Call (Text-to-Image only) ---
127
+ final_image = None
128
+ try:
129
+ with calculateDuration(f"API Inference (txt2img) for {model_id}"):
130
+ print(f"Running Text-to-Image for Model: {model_id}")
131
+ final_image = client.text_to_image(
132
+ prompt=prompt_mash,
133
+ model=model_id,
134
+ guidance_scale=cfg_scale,
135
+ num_inference_steps=steps,
136
+ seed=seed_to_use,
137
+ width=current_width, # Use width from state
138
+ height=current_height, # Use height from state
139
+ # lora_scale might need to be passed via 'parameters' if supported
140
+ # parameters={"lora_scale": lora_scale}
141
+ )
142
+
143
+ except Exception as e:
144
+ print(f"Error during API call: {e}")
145
+ # Improved error message for common API key issues
146
+ if "authorization" in str(e).lower() or "401" in str(e):
147
+ raise gr.Error(f"Authorization error calling the Inference API. Please ensure your HF_API_KEY is valid and has the necessary permissions. Error: {e}")
148
+ elif "model is currently loading" in str(e).lower() or "503" in str(e):
149
+ raise gr.Error(f"Model '{model_id}' is currently loading or unavailable. Please try again in a few moments. Error: {e}")
150
+ else:
151
+ raise gr.Error(f"Failed to generate image using the API. Model: {model_id}. Error: {e}")
152
+
153
+ # Return final image, the seed used, and hide progress bar
154
+ return final_image, seed_to_use, gr.update(visible=False)
155
+
156
+
157
+ # Removed get_huggingface_safetensors function as we don't download safetensors
158
+
159
+ def parse_hf_link(link):
160
+ """Parses a Hugging Face link or repo ID string."""
161
+ if link.startswith("https://huggingface.co/"):
162
+ link = link.replace("https://huggingface.co/", "")
163
+ elif link.startswith("www.huggingface.co/"):
164
+ link = link.replace("www.huggingface.co/", "")
165
+ # Basic validation for "user/model" format
166
+ if "/" not in link or len(link.split("/")) != 2:
167
+ raise ValueError("Invalid Hugging Face repository ID format. Expected 'user/model'.")
168
+ return link.strip()
169
+
170
+ def get_model_details(repo_id):
171
+ """Fetches model card details (image, trigger word) if possible."""
172
+ try:
173
+ model_card = ModelCard.load(repo_id)
174
+ image_path = model_card.data.get("widget", [{}])[0].get("output", {}).get("url", None)
175
+ trigger_word = model_card.data.get("instance_prompt", "") # Common key for trigger words
176
+ # Try another common key if the first fails
177
+ if not trigger_word:
178
+ trigger_word = model_card.data.get("trigger_words", [""])[0]
179
+
180
+ image_url = f"https://huggingface.co/{repo_id}/resolve/main/{image_path}" if image_path else None
181
+
182
+ # Fallback: Check repo files for an image if not in card widget data
183
+ if not image_url:
184
+ fs = HfFileSystem()
185
+ files = fs.ls(repo_id, detail=False)
186
+ image_extensions = (".jpg", ".jpeg", ".png", ".webp")
187
+ for file in files:
188
+ filename = file.split("/")[-1]
189
+ if filename.lower().endswith(image_extensions):
190
+ image_url = f"https://huggingface.co/{repo_id}/resolve/main/{filename}"
191
+ break # Take the first image found
192
+
193
+ # Use repo name as title if not specified elsewhere
194
+ title = model_card.data.get("model_display_name", repo_id.split('/')[-1]) # Example key, might vary
195
+
196
+ return title, trigger_word, image_url
197
+ except Exception as e:
198
+ print(f"Could not fetch model card details for {repo_id}: {e}")
199
+ # Fallback values
200
+ return repo_id.split('/')[-1], "", None # Use repo name part as title
201
+
202
+
203
+ def add_custom_lora(custom_lora_input):
204
+ global loras
205
+ if not custom_lora_input:
206
+ # Clear the custom LoRA section if input is empty
207
+ return gr.update(visible=False, value=""), gr.update(visible=False), gr.update(), "", None, ""
208
+
209
+ try:
210
+ repo_id = parse_hf_link(custom_lora_input)
211
+ print(f"Attempting to add custom model: {repo_id}")
212
+
213
+ # Check if model already exists in the list
214
+ existing_item_index = next((index for (index, item) in enumerate(loras) if item['repo'] == repo_id), None)
215
+
216
+ if existing_item_index is not None:
217
+ print(f"Model {repo_id} already exists in the list.")
218
+ # Optionally re-select the existing one or just show info
219
+ selected_lora = loras[existing_item_index]
220
+ title = selected_lora.get('title', repo_id.split('/')[-1])
221
+ image = selected_lora.get('image', None) # Use existing image if available
222
+ trigger_word = selected_lora.get('trigger_word', '')
223
+ else:
224
+ # Fetch details for the new model
225
+ title, trigger_word, image = get_model_details(repo_id)
226
+ print(f"Adding new model: {repo_id}, Title: {title}, Trigger: '{trigger_word}', Image: {image}")
227
+ new_item = {
228
+ "image": image, # Store image URL (can be None)
229
+ "title": title,
230
+ "repo": repo_id, # Store the repo ID used for API calls
231
+ # "weights": path, # No longer needed
232
+ "trigger_word": trigger_word # Store trigger word if found
233
+ }
234
+ loras.append(new_item)
235
+ existing_item_index = len(loras) - 1 # Index of the newly added item
236
+
237
+ # Generate HTML card for display
238
+ card = f'''
239
+ <div class="custom_lora_card">
240
+ <span>Loaded custom model:</span>
241
+ <div class="card_internal">
242
+ {f'<img src="{image}" alt="{title} preview"/>' if image else '<div class="no-image">No Image</div>'}
243
+ <div>
244
+ <h3>{title}</h3>
245
+ <small>Model ID: <code>{repo_id}</code><br></small>
246
+ <small>{"Using trigger word: <code><b>"+trigger_word+"</code></b>" if trigger_word else "No specific trigger word found in card. Include if needed."}<br></small>
247
+ </div>
248
+ </div>
249
+ </div>
250
+ '''
251
+
252
+ # Update the gallery to include the new item (or reflect potential changes if re-added)
253
+ updated_gallery_items = [(item.get("image"), item.get("title", item["repo"].split('/')[-1])) for item in loras]
254
+
255
+ # Update UI elements: show info card, show remove button, update gallery, clear selection info, set selected index, update prompt placeholder
256
+ return (
257
+ gr.update(visible=True, value=card),
258
+ gr.update(visible=True),
259
+ gr.Gallery(value=updated_gallery_items, selected_index=existing_item_index), # Select the added/found item
260
+ f"### Selected: [{repo_id}](https://huggingface.co/{repo_id}) ✨ (Model ID: `{repo_id}`)", # Update selection info
261
+ existing_item_index,
262
+ gr.update(placeholder=f"Type a prompt for {title}") # Update prompt placeholder
263
+ )
264
+
265
+ except ValueError as e: # Catch parsing errors
266
+ gr.Warning(f"Invalid Input: {e}")
267
+ return gr.update(visible=True, value=f"Invalid input: {e}"), gr.update(visible=False), gr.update(), "", None, ""
268
+ except Exception as e: # Catch other errors (e.g., network issues during card fetch)
269
+ gr.Warning(f"Error adding custom model: {e}")
270
+ # Show error in the info box, hide remove button, don't change gallery/selection
271
+ return gr.update(visible=True, value=f"Error adding custom model: {e}"), gr.update(visible=False), gr.update(), "", None, ""
272
+
273
+
274
+ def remove_custom_lora():
275
+ # This function might need adjustment if we want to remove the *last added* custom lora
276
+ # For now, it just clears the display and selection related to custom loras.
277
+ # It doesn't remove the item from the global `loras` list.
278
+ return gr.update(visible=False, value=""), gr.update(visible=False), gr.update(selected_index=None), "", None, gr.update(value="") # Clear custom_lora textbox too
279
+
280
+ # run_lora.zerogpu = True # Removed as inference is remote
281
+
282
+ css = '''
283
+ #gen_btn{height: 100%}
284
+ #gen_column{align-self: stretch}
285
+ #title{text-align: center}
286
+ #title h1{font-size: 3em; display:inline-flex; align-items:center}
287
+ #title img{width: 100px; margin-right: 0.5em}
288
+ #gallery .grid-wrap{height: 10vh}
289
+ #lora_list{background: var(--block-background-fill);padding: 0 1em .3em; font-size: 90%}
290
+ .card_internal{display: flex;height: 100px;margin-top: .5em; align-items: center;}
291
+ .card_internal img{margin-right: 1em; height: 100%; width: auto; object-fit: cover;}
292
+ .card_internal .no-image { width: 100px; height: 100px; background-color: #eee; display: flex; align-items: center; justify-content: center; color: #aaa; margin-right: 1em; font-size: small;}
293
+ .styler{--form-gap-width: 0px !important}
294
+ #progress{height:30px}
295
+ #progress .generating{display:none}
296
+ /* Keep progress bar CSS for potential future use or remove if definitely not needed */
297
+ .progress-container {width: 100%;height: 30px;background-color: #f0f0f0;border-radius: 15px;overflow: hidden;margin-bottom: 20px}
298
+ .progress-bar {height: 100%;background-color: #4f46e5;width: calc(var(--current) / var(--total) * 100%);transition: width 0.5s ease-in-out}
299
+ '''
300
+ font=[gr.themes.GoogleFont("Source Sans Pro"), "Arial", "sans-serif"]
301
+ with gr.Blocks(theme=gr.themes.Soft(font=font), css=css, delete_cache=(60, 60)) as app:
302
+ title = gr.HTML(
303
+ """<h1><img src="https://huggingface.co/spaces/multimodalart/flux-lora-the-explorer/resolve/main/flux_lora.png" alt="LoRA"> FLUX LoRA the Explorer</h1>""",
304
+ elem_id="title",
305
+ )
306
+ # --- States for parameters previously in Advanced Settings ---
307
+ selected_index = gr.State(None)
308
+ width = gr.State(1024) # Default width
309
+ height = gr.State(1024) # Default height
310
+ seed = gr.State(0) # Default seed (will be randomized by run_lora)
311
+ # input_image = gr.State(None) # State for input image if img2img was kept
312
+
313
+ with gr.Row():
314
+ with gr.Column(scale=3):
315
+ prompt = gr.Textbox(label="Prompt", lines=1, placeholder="Type a prompt after selecting a LoRA/Model")
316
+ with gr.Column(scale=1, elem_id="gen_column"):
317
+ generate_button = gr.Button("Generate", variant="primary", elem_id="gen_btn")
318
+ with gr.Row():
319
+ with gr.Column():
320
+ selected_info = gr.Markdown("Select a base model or add a custom one below.") # Updated initial text
321
+ gallery = gr.Gallery(
322
+ # Ensure items have 'image' and 'title' keys, provide fallbacks if needed
323
+ [(item.get("image"), item.get("title", item["repo"].split('/')[-1])) for item in loras],
324
+ label="Model Gallery", # Changed label
325
+ allow_preview=False,
326
+ columns=3,
327
+ elem_id="gallery",
328
+ show_share_button=False
329
+ )
330
+ with gr.Group():
331
+ custom_lora = gr.Textbox(label="Custom Model", info="Hugging Face model ID (e.g., user/model-name) or URL", placeholder="stabilityai/stable-diffusion-xl-base-1.0") # Updated label/placeholder
332
+ gr.Markdown("[Check Hugging Face Models](https://huggingface.co/models?pipeline_tag=text-to-image&sort=trending)", elem_id="lora_list") # Updated link/text
333
+ custom_lora_info = gr.HTML(visible=False)
334
+ custom_lora_button = gr.Button("Clear custom model info", visible=False) # Changed button text
335
+ with gr.Column():
336
+ # Keep progress bar element, but it will only be shown briefly if API is slow, then hidden by run_lora return
337
+ progress_bar = gr.Markdown(elem_id="progress", visible=False, value="Generating...")
338
+ result = gr.Image(label="Generated Image")
339
+ # Display the seed used for the generation
340
+ used_seed_display = gr.Textbox(label="Seed Used", value=0, interactive=False) # Display seed used
341
+
342
+ # --- Removed Advanced Settings Accordion ---
343
+ # with gr.Row():
344
+ # with gr.Accordion("Advanced Settings", open=False):
345
+ # ... (Removed content) ...
346
+
347
+ gallery.select(
348
+ update_selection,
349
+ inputs=[], # No direct inputs needed, uses evt
350
+ # Update prompt placeholder, selection text, selected index state, and width/height states
351
+ outputs=[prompt, selected_info, selected_index, width, height]
352
+ )
353
+ # Use submit event for Textbox to trigger add_custom_lora
354
+ custom_lora.submit(
355
+ add_custom_lora,
356
+ inputs=[custom_lora],
357
+ # Outputs: info card, remove button, gallery, selection text, selected index state, prompt placeholder
358
+ outputs=[custom_lora_info, custom_lora_button, gallery, selected_info, selected_index, prompt]
359
+ )
360
+ custom_lora_button.click(
361
+ remove_custom_lora,
362
+ outputs=[custom_lora_info, custom_lora_button, gallery, selected_info, selected_index, custom_lora] # Clear textbox too
363
+ )
364
+ gr.on(
365
+ triggers=[generate_button.click, prompt.submit],
366
+ fn=run_lora,
367
+ # Inputs now use state variables for width, height, seed
368
+ inputs=[prompt, selected_index, seed, width, height],
369
+ # Outputs: result image, seed state (updated with used seed), progress bar update
370
+ outputs=[result, seed, progress_bar]
371
+ ).then(
372
+ # Update the displayed seed value after run_lora completes
373
+ lambda s: gr.update(value=s),
374
+ inputs=[seed],
375
+ outputs=[used_seed_display]
376
+ )
377
+
378
+
379
+ app.queue()
380
+ app.launch()
flux_lora.png ADDED

Git LFS Details

  • SHA256: 6bfe4778565dc61ae2d3a1d514d7496cca410343e586583bd842334c234b0843
  • Pointer size: 131 Bytes
  • Size of remote file: 199 kB
loras.json ADDED
@@ -0,0 +1,234 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [
2
+ {
3
+ "repo": "Purz/choose-your-own-adventure",
4
+ "image": "https://huggingface.co/Purz/choose-your-own-adventure/resolve/main/34584570.jpeg",
5
+ "trigger_word": "cy04,",
6
+ "trigger_position": "prepend",
7
+ "title": "choose your own adventure",
8
+ "aspect": "portrait"
9
+ },
10
+ {
11
+ "image": "https://huggingface.co/renderartist/retrocomicflux/resolve/main/images/ComfyUI_temp_ipugi_00131_.png",
12
+ "repo": "renderartist/retrocomicflux",
13
+ "trigger_word": "c0m1c style vintage 1930s style comic strip panel of",
14
+ "title": "Retro Comic",
15
+ "trigger_position": "prepend"
16
+ },
17
+ {
18
+ "image": "https://huggingface.co/glif/l0w-r3z/resolve/main/images/a19d658b-5d4c-45bc-9df6-f2bec54462a5.png",
19
+ "repo": "glif/l0w-r3z",
20
+ "trigger_word": ", l0w-r3z",
21
+ "title": "Low Res 3D"
22
+ },
23
+ {
24
+ "repo": "Purz/vhs-box",
25
+ "image": "https://huggingface.co/Purz/vhs-box/resolve/main/33726559.jpeg",
26
+ "trigger_word": ", vhs_box",
27
+ "title": "VHS Box"
28
+ },
29
+ {
30
+ "image": "https://huggingface.co/renderartist/simplevectorflux/resolve/main/images/ComfyUI_09477_.jpeg",
31
+ "title": "Simple Vector",
32
+ "repo": "renderartist/simplevectorflux",
33
+ "trigger_word": "v3ct0r style, simple flat vector art, isolated on white bg,",
34
+ "trigger_position": "prepend"
35
+ },
36
+ {
37
+ "image": "https://huggingface.co/glif/anime-blockprint-style/resolve/main/images/glif-block-print-anime-flux-dev-araminta-k-lora-araminta-k-kbde06qyovrmvsv65ubfyhn1.jpg",
38
+ "repo": "glif/anime-blockprint-style",
39
+ "trigger_word": ", blockprint style",
40
+ "title": "Blockprint Style"
41
+ },
42
+ {
43
+ "image": "https://huggingface.co/Purz/face-projection/resolve/main/34031841.jpeg",
44
+ "repo": "Purz/face-projection",
45
+ "trigger_word": "f4c3_p40j3ct10n,",
46
+ "trigger_position": "prepend",
47
+ "title": "face projection"
48
+ },
49
+ {
50
+ "image": "https://huggingface.co/multimodalart/flux-tarot-v1/resolve/main/images/e5f2761e5d474e6ba492d20dca0fa26f_e78f1524074b42b6ac49643ffad50ac6.png",
51
+ "title": "Tarot v1",
52
+ "repo": "multimodalart/flux-tarot-v1",
53
+ "trigger_word": "in the style of TOK a trtcrd, tarot style",
54
+ "aspect": "portrait"
55
+ },
56
+ {
57
+ "repo": "alvdansen/pola-photo-flux",
58
+ "image": "https://huggingface.co/alvdansen/pola-photo-flux/resolve/main/images/out-2%20(83).webp",
59
+ "trigger_word": ", polaroid style",
60
+ "title": "Polaroid Style"
61
+ },
62
+ {
63
+ "image": "https://huggingface.co/dvyio/flux-lora-the-sims/resolve/main/images/dunBAVBsALOepaE_dsWFI_6b0fef6b0fc4472aa07d00edea7c75b3.jpg",
64
+ "repo": "dvyio/flux-lora-the-sims",
65
+ "trigger_word": ", video game screenshot in the style of THSMS",
66
+ "title": "The Sims style"
67
+ },
68
+ {
69
+ "image": "https://huggingface.co/alvdansen/softpasty-flux-dev/resolve/main/images/ComfyUI_00814_%20(2).png",
70
+ "title": "SoftPasty",
71
+ "repo": "alvdansen/softpasty-flux-dev",
72
+ "trigger_word": "araminta_illus illustration style"
73
+ },
74
+ {
75
+ "image": "https://huggingface.co/dvyio/flux-lora-film-noir/resolve/main/images/S8iWMa0GamEcFkanHHmI8_a232d8b83bb043808742d661dac257f7.jpg",
76
+ "title": "Film Noir",
77
+ "repo": "dvyio/flux-lora-film-noir",
78
+ "trigger_word": "in the style of FLMNR"
79
+ },
80
+ {
81
+ "image": "https://huggingface.co/AIWarper/RubberCore1920sCartoonStyle/resolve/main/images/Rub_00006_.png",
82
+ "title": "1920s cartoon",
83
+ "repo": "AIWarper/RubberCore1920sCartoonStyle",
84
+ "trigger_word": "RU883R style",
85
+ "trigger_position": "prepend"
86
+ },
87
+ {
88
+ "image": "https://huggingface.co/Norod78/JojosoStyle-flux-lora/resolve/main/samples/1725244218477__000004255_1.jpg",
89
+ "title": "JoJo Style",
90
+ "repo": "Norod78/JojosoStyle-flux-lora",
91
+ "trigger_word": "JojosoStyle",
92
+ "trigger_position": "prepend"
93
+ },
94
+ {
95
+ "image": "https://github.com/XLabs-AI/x-flux/blob/main/assets/readme/examples/picture-6-rev1.png?raw=true",
96
+ "title": "flux-Realism",
97
+ "repo": "XLabs-AI/flux-RealismLora",
98
+ "trigger_word": ""
99
+ },
100
+ {
101
+ "image": "https://huggingface.co/multimodalart/vintage-ads-flux/resolve/main/samples/j_XNU6Oe0mgttyvf9uPb3_dc244dd3d6c246b4aff8351444868d66.png",
102
+ "title": "Vintage Ads",
103
+ "repo":"multimodalart/vintage-ads-flux",
104
+ "trigger_word": "a vintage ad of",
105
+ "trigger_position": "prepend"
106
+ },
107
+ {
108
+ "image": "https://huggingface.co/glif/how2draw/resolve/main/images/glif-how2draw-araminta-k-vbnvy94npt8m338r2vm02m50.jpg",
109
+ "repo": "glif/how2draw",
110
+ "trigger_word": ", How2Draw",
111
+ "title": "How2Draw"
112
+ },
113
+ {
114
+ "image": "https://huggingface.co/mgwr/Cine-Aesthetic/resolve/main/images/00030-1333633802.png",
115
+ "title": "Cine Aesthetic",
116
+ "repo": "mgwr/Cine-Aesthetic",
117
+ "trigger_word": "mgwr/cine",
118
+ "trigger_position": "prepend"
119
+ },
120
+ {
121
+ "image": "https://huggingface.co/sWizad/pokemon-trainer-sprites-pixelart-flux/resolve/main/26578915.jpeg",
122
+ "repo": "sWizad/pokemon-trainer-sprites-pixelart-flux",
123
+ "title": "Pokemon Trainer Sprites",
124
+ "trigger_word": "white background, a pixel image of",
125
+ "trigger_position": "prepend"
126
+ },
127
+ {
128
+ "image": "https://huggingface.co/nerijs/animation2k-flux/resolve/main/images/Q8-oVxNnXvZ9HNrgbNpGw_02762aaaba3b47859ee5fe9403a371e3.png",
129
+ "title": "animation2k",
130
+ "repo": "nerijs/animation2k-flux",
131
+ "trigger_word": ""
132
+ },
133
+ {
134
+ "image":"https://huggingface.co/alvdansen/softserve_anime/resolve/main/images/ComfyUI_00062_.png",
135
+ "title":"SoftServe Anime",
136
+ "repo": "alvdansen/softserve_anime",
137
+ "trigger_word": ""
138
+ },
139
+ {
140
+ "image": "https://huggingface.co/veryVANYA/ps1-style-flux/resolve/main/24439220.jpeg",
141
+ "title": "PS1 style",
142
+ "repo": "veryVANYA/ps1-style-flux",
143
+ "trigger_word": "ps1 game screenshot,",
144
+ "trigger_position": "prepend"
145
+ },
146
+ {
147
+ "image": "https://huggingface.co/alvdansen/flux-koda/resolve/main/images/ComfyUI_00566_%20(2).png",
148
+ "title": "flux koda",
149
+ "repo": "alvdansen/flux-koda",
150
+ "trigger_word": "flmft style"
151
+ },
152
+ {
153
+ "image": "https://huggingface.co/alvdansen/frosting_lane_flux/resolve/main/images/content%20-%202024-08-11T005936.346.jpeg",
154
+ "title": "Frosting Lane Flux",
155
+ "repo": "alvdansen/frosting_lane_flux",
156
+ "trigger_word": ""
157
+ },
158
+ {
159
+ "image": "https://huggingface.co/davisbro/half_illustration/resolve/main/images/example3.webp",
160
+ "title": "Half Illustration",
161
+ "repo": "davisbro/half_illustration",
162
+ "trigger_word": "in the style of TOK"
163
+ },
164
+ {
165
+ "image":"https://pbs.twimg.com/media/GVRiSH7WgAAnI4P?format=jpg&name=medium",
166
+ "title":"wrong",
167
+ "repo": "fofr/flux-wrong",
168
+ "trigger_word": "WRNG"
169
+ },
170
+ {
171
+ "image":"https://huggingface.co/linoyts/yarn_art_Flux_LoRA/resolve/main/yarn_art_2.png",
172
+ "title":"Yarn Art",
173
+ "repo": "linoyts/yarn_art_Flux_LoRA",
174
+ "trigger_word": ", yarn art style"
175
+ },
176
+ {
177
+ "image": "https://huggingface.co/Norod78/Flux_1_Dev_LoRA_Paper-Cutout-Style/resolve/main/08a19840b6214b76b0607b2f9d5a7e28_63159b9d98124c008efb1d36446a615c.png",
178
+ "title": "Paper Cutout",
179
+ "repo": "Norod78/Flux_1_Dev_LoRA_Paper-Cutout-Style",
180
+ "trigger_word": ", Paper Cutout Style"
181
+ },
182
+ {
183
+ "image": "https://huggingface.co/SebastianBodza/flux_lora_aquarel_watercolor/resolve/main/images/ascend.webp",
184
+ "title": "Aquarell Watercolor",
185
+ "repo": "SebastianBodza/Flux_Aquarell_Watercolor_v2",
186
+ "trigger_word": "in a watercolor style, AQUACOLTOK. White background."
187
+ },
188
+ {
189
+ "image": "https://huggingface.co/dataautogpt3/FLUX-SyntheticAnime/resolve/main/assets/angel.png",
190
+ "title": "SyntheticAnime",
191
+ "repo": "dataautogpt3/FLUX-SyntheticAnime",
192
+ "trigger_word": "1980s anime screengrab, VHS quality"
193
+ },
194
+ {
195
+ "image": "https://github.com/XLabs-AI/x-flux/blob/main/assets/readme/examples/result_14.png?raw=true",
196
+ "title": "flux-anime",
197
+ "repo": "XLabs-AI/flux-lora-collection",
198
+ "weights": "anime_lora.safetensors",
199
+ "trigger_word": ", anime"
200
+ },
201
+ {
202
+ "image": "https://replicate.delivery/yhqm/QD8Ioy5NExqSCtBS8hG04XIRQZFaC9pxJemINT1bibyjZfSTA/out-0.webp",
203
+ "title": "80s Cyberpunk",
204
+ "repo": "fofr/flux-80s-cyberpunk",
205
+ "trigger_word": "style of 80s cyberpunk",
206
+ "trigger_position": "prepend"
207
+ },
208
+ {
209
+ "image": "https://huggingface.co/kudzueye/Boreal/resolve/main/images/ComfyUI_00845_.png",
210
+ "title": "Boreal",
211
+ "repo": "kudzueye/boreal-flux-dev-v2",
212
+ "trigger_word": "phone photo"
213
+ },
214
+ {
215
+ "image": "https://github.com/XLabs-AI/x-flux/blob/main/assets/readme/examples/result_18.png?raw=true",
216
+ "title": "flux-disney",
217
+ "repo": "XLabs-AI/flux-lora-collection",
218
+ "weights": "disney_lora.safetensors",
219
+ "trigger_word": ", disney style"
220
+ },
221
+ {
222
+ "image": "https://github.com/XLabs-AI/x-flux/blob/main/assets/readme/examples/result_23.png?raw=true",
223
+ "title": "flux-art",
224
+ "repo": "XLabs-AI/flux-lora-collection",
225
+ "weights": "art_lora.safetensors",
226
+ "trigger_word": ", art"
227
+ },
228
+ {
229
+ "image": "https://huggingface.co/martintomov/retrofuturism-flux/resolve/main/images/2e40deba-858e-454f-ae1c-d1ba2adb6a65.jpeg",
230
+ "title": "Retrofuturism Flux",
231
+ "repo": "martintomov/retrofuturism-flux",
232
+ "trigger_word": ", retrofuturism"
233
+ }
234
+ ]
requirements.txt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ git+https://github.com/huggingface/huggingface_hub.git
2
+ pillow
3
+ gradio