Seokjin
Remove unsupported 'interactive' argument from gr.JSON
520d0cf
# -*- coding: utf-8 -*-
# --- ํ•„์š”ํ•œ ๋ชจ๋“ˆ ์ž„ํฌํŠธ ---
import gradio as gr
from transformers import DonutProcessor, VisionEncoderDecoderModel
from PIL import Image
import torch
import re
import json
import os
import warnings
# --- ๊ฒฝ๊ณ  ๋ฉ”์‹œ์ง€ ๋ฌด์‹œ ---
# UserWarning: TypedStorage is deprecated ๋Š” PyTorch ๊ด€๋ จ ๊ฒฝ๊ณ ๋กœ ๋ฌด์‹œํ•ด๋„ ๊ดœ์ฐฎ์Šต๋‹ˆ๋‹ค.
warnings.filterwarnings("ignore", category=UserWarning, message="TypedStorage is deprecated")
# Future or other warnings if needed
warnings.filterwarnings("ignore", category=FutureWarning)
# --- ๋ชจ๋ธ ๋ฐ ํ”„๋กœ์„ธ์„œ ๊ฒฝ๋กœ ์ •์˜ ---
# Hugging Face Spaces ์ €์žฅ์†Œ ๋‚ด๋ถ€์— ๋ชจ๋ธ ํŒŒ์ผ์„ ๋ณต์‚ฌํ–ˆ๋‹ค๊ณ  ๊ฐ€์ •ํ•ฉ๋‹ˆ๋‹ค.
# ์ €์žฅ์†Œ ๋ฃจํŠธ์— donut_sroie_finetuned ํด๋”๊ฐ€ ์žˆ๊ณ  ๊ทธ ์•ˆ์— final_model ์ด ์žˆ๋Š” ๊ตฌ์กฐ
model_path_finetuned = "greene6517/finetuned_donut_sroie"
model_name_base = "naver-clova-ix/donut-base" # Base ๋ชจ๋ธ์€ Hub์—์„œ ์ง์ ‘ ๋กœ๋“œ
# --- Fine-tuned Processor ๋ฐ ๋ชจ๋ธ ๋กœ๋”ฉ ---
print(f"Loading Fine-tuned processor from Hub: {model_path_finetuned}") # ๋กœ๊ทธ ๋ฉ”์‹œ์ง€๋„ ํ™•์ธ
try:
# local_files_only=True ๊ฐ€ ์—†์–ด์•ผ ํ•จ! model_path_finetuned ๋ณ€์ˆ˜ ์‚ฌ์šฉ ํ™•์ธ!
processor = DonutProcessor.from_pretrained(model_path_finetuned)
print("Successfully loaded fine-tuned processor from Hub.")
except Exception as e:
print(f"FATAL: Could not load fine-tuned processor from Hub: {e}")
exit()
print(f"Loading Fine-tuned model from Hub: {model_path_finetuned}") # ๋กœ๊ทธ ๋ฉ”์‹œ์ง€๋„ ํ™•์ธ
try:
# local_files_only=True ๊ฐ€ ์—†์–ด์•ผ ํ•จ! model_path_finetuned ๋ณ€์ˆ˜ ์‚ฌ์šฉ ํ™•์ธ!
model_finetuned = VisionEncoderDecoderModel.from_pretrained(model_path_finetuned)
print("Successfully loaded fine-tuned model from Hub.")
except Exception as e:
print(f"FATAL: Could not load fine-tuned model from Hub: {e}")
exit()
print(f"Loading Fine-tuned model from: {model_path_finetuned}")
try:
# local_files_only=True ๋ฅผ ์‚ฌ์šฉํ•˜์—ฌ Spaces ์ €์žฅ์†Œ ๋‚ด ํŒŒ์ผ๋งŒ ์‚ฌ์šฉํ•˜๋„๋ก ๊ฐ•์ œ
model_finetuned = VisionEncoderDecoderModel.from_pretrained(model_path_finetuned, local_files_only=True)
print("Successfully loaded fine-tuned model locally from Space repo.")
except Exception as e:
print(f"Error loading fine-tuned model locally: {e}. Check if model files exist at the path.")
# ํ•„์š”์‹œ Hub์—์„œ ๋กœ๋“œ ์‹œ๋„ํ•˜๋Š” ๋กœ์ง ์ถ”๊ฐ€ ๊ฐ€๋Šฅ (๋‹จ, ๋ชจ๋ธ์ด Hub์— ์—…๋กœ๋“œ ๋˜์–ด ์žˆ์–ด์•ผ ํ•จ)
# try:
# model_finetuned = VisionEncoderDecoderModel.from_pretrained("your-hf-username/your-model-repo-name") # Hub ๊ฒฝ๋กœ ์˜ˆ์‹œ
# print("Loaded fine-tuned model from Hub as fallback.")
# except Exception as e2:
# print(f"FATAL: Could not load fine-tuned model locally or from Hub: {e2}")
# exit()
# ์—ฌ๊ธฐ์„œ๋Š” ๋กœ์ปฌ ๋กœ๋”ฉ ์‹คํŒจ ์‹œ ์ผ๋‹จ ์ข…๋ฃŒํ•˜๋„๋ก ํ•จ (์ˆ˜์ • ํ•„์š”์‹œ ์ฃผ์„ ํ•ด์ œ)
print(f"FATAL: Could not load fine-tuned model locally: {e}")
exit()
# --- Base Processor ๋ฐ ๋ชจ๋ธ ๋กœ๋”ฉ (Hub์—์„œ ์ง์ ‘) ---
print(f"Loading Base processor from: {model_name_base}")
try:
processor_base = DonutProcessor.from_pretrained(model_name_base)
print("Successfully loaded base processor.")
except Exception as e:
print(f"FATAL: Could not load base processor: {e}")
exit()
print(f"Loading Base model from: {model_name_base}")
try:
model_base = VisionEncoderDecoderModel.from_pretrained(model_name_base)
print("Successfully loaded base model.")
except Exception as e:
print(f"FATAL: Could not load base model: {e}")
exit()
# --- ์žฅ์น˜ ์„ค์ • ๋ฐ ๋ชจ๋ธ ์ด๋™ ---
# Spaces ํ™˜๊ฒฝ์—์„œ๋Š” CPU ๋˜๋Š” ํ• ๋‹น๋œ GPU๋ฅผ ์‚ฌ์šฉํ•ฉ๋‹ˆ๋‹ค.
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
print(f"\nUsing device: {device}")
# ๋ชจ๋ธ์„ ํ•ด๋‹น ์žฅ์น˜๋กœ ์ด๋™
try:
model_finetuned.to(device)
model_base.to(device)
print("Models moved to device.")
# ํ‰๊ฐ€ ๋ชจ๋“œ ์„ค์ • (ํ•„์ˆ˜)
model_finetuned.eval()
model_base.eval()
print("Models set to evaluation mode.")
except Exception as e:
print(f"Error moving models to device or setting eval mode: {e}")
exit()
# --- Helper function to clean generated sequence (์ฃผ๋กœ Fine-tuned์šฉ) ---
def clean_sequence(sequence, processor_to_use, prompt_token_str=None):
"""Removes prompt, EOS, PAD tokens from a generated sequence."""
cleaned = sequence
try:
# Standard tokens first
eos_token = processor_to_use.tokenizer.eos_token if processor_to_use.tokenizer.eos_token else "</s>" # Default EOS
pad_token = processor_to_use.tokenizer.pad_token if processor_to_use.tokenizer.pad_token else "<pad>" # Default PAD
cleaned = cleaned.replace(eos_token, "").replace(pad_token, "").strip()
# Add BOS token removal if it exists and appears
if hasattr(processor_to_use.tokenizer, 'bos_token') and processor_to_use.tokenizer.bos_token:
cleaned = cleaned.replace(processor_to_use.tokenizer.bos_token, "").strip()
# Specific prompt removal (case-insensitive start check can be robust)
if prompt_token_str:
# Simple startswith check might be enough if prompt is always at the beginning
if cleaned.startswith(prompt_token_str):
cleaned = cleaned[len(prompt_token_str):].strip()
# Regex version (more robust but slightly slower)
# cleaned = re.sub(f"^{re.escape(prompt_token_str)}", "", cleaned, flags=re.IGNORECASE).strip()
except Exception as e:
print(f"Warning: Error during sequence cleaning: {e}")
return sequence # Return original if cleaning fails
return cleaned
# --- Helper function to parse SROIE format ---
def token2json_simple(text):
"""Parses <s_key>value</s_key> format into a dictionary."""
output = {}
# Regex to find <s_...>...</s_...> patterns, handling potential spaces and newlines in value
# It captures the key name (e.g., "company") and the value between the tags.
parts = re.findall(r"<s_(.*?)>([\s\S]*?)</s_\1>", text)
for key, value in parts:
# Strip leading/trailing whitespace from key and value
output[key.strip()] = value.strip()
# Add info if parsing failed but text was present
if not output and text and not text.isspace():
output["parsing_info"] = "Could not parse SROIE key-value pairs from the cleaned sequence."
output["cleaned_sequence_preview"] = text[:200] + "..." # Show preview
elif not text or text.isspace():
output["parsing_info"] = "Empty sequence after cleaning, nothing to parse."
return output
# --- ํ†ตํ•ฉ ์ด๋ฏธ์ง€ ์ฒ˜๋ฆฌ ๋ฐ ์ถ”๋ก  ํ•จ์ˆ˜ ---
# ๋ฐ์ฝ”๋ ˆ์ดํ„ฐ ์ถ”๊ฐ€: ๊ทธ๋ž˜๋””์–ธํŠธ ๊ณ„์‚ฐ ๋น„ํ™œ์„ฑํ™” (์ถ”๋ก  ์‹œ ๋ฉ”๋ชจ๋ฆฌ ์ ˆ์•ฝ ๋ฐ ์†๋„ ํ–ฅ์ƒ)
@torch.no_grad()
def process_image_comparison(image_input):
if image_input is None:
no_image_msg = {"error": "์ด๋ฏธ์ง€๋ฅผ ์—…๋กœ๋“œํ•ด์ฃผ์„ธ์š”."}
# Ensure JSON output for Gradio component
return json.dumps(no_image_msg, indent=2, ensure_ascii=False), json.dumps(no_image_msg, indent=2, ensure_ascii=False)
try:
# Gradio's numpy input needs conversion
image = Image.fromarray(image_input).convert("RGB")
except Exception as e:
error_msg = {"error": f"์ด๋ฏธ์ง€ ๋ณ€ํ™˜ ์˜ค๋ฅ˜: {e}"}
error_json_str = json.dumps(error_msg, indent=2, ensure_ascii=False)
return error_json_str, error_json_str
results_ft_json_str = "{}"
results_base_json_str = "{}"
sequence_ft_raw = "N/A"
sequence_base_raw = "N/A"
# === Fine-tuned ๋ชจ๋ธ ์ถ”๋ก  ===
try:
pixel_values_ft = processor(image, return_tensors="pt").pixel_values.to(device)
task_prompt_ft = "<s_sroie>" # Fine-tuned ๋ชจ๋ธ์˜ ์‹œ์ž‘ ํ”„๋กฌํ”„ํŠธ
decoder_input_ids_ft = processor.tokenizer(
task_prompt_ft, add_special_tokens=False, return_tensors="pt"
).input_ids.to(device)
# ์ƒ์„ฑ ์‹œ ํ•„์š”ํ•œ ํŒŒ๋ผ๋ฏธํ„ฐ ์„ค์ •
generation_config_ft = {
"max_length": model_finetuned.config.decoder.max_position_embeddings,
"pad_token_id": processor.tokenizer.pad_token_id,
"eos_token_id": processor.tokenizer.eos_token_id,
"use_cache": True,
"bad_words_ids": [[processor.tokenizer.unk_token_id]] if processor.tokenizer.unk_token_id else None,
"return_dict_in_generate": True,
"decoder_input_ids": decoder_input_ids_ft # ์‹œ์ž‘ ํ”„๋กฌํ”„ํŠธ ์ œ๊ณต
}
outputs_ft = model_finetuned.generate(pixel_values_ft, **generation_config_ft)
sequence_ft_raw = processor.batch_decode(outputs_ft.sequences)[0]
# print(f"\nFine-tuned Raw Output: {sequence_ft_raw}") # ์„œ๋ฒ„ ๋กœ๊ทธ์— ์ถœ๋ ฅ (๋””๋ฒ„๊น…์šฉ)
# Fine-tuned ๋ชจ๋ธ ๊ฒฐ๊ณผ ํด๋ฆฌ๋‹
sequence_ft_cleaned = clean_sequence(sequence_ft_raw, processor, prompt_token_str=task_prompt_ft)
# print(f"Fine-tuned Cleaned Output: {sequence_ft_cleaned}") # ์„œ๋ฒ„ ๋กœ๊ทธ์— ์ถœ๋ ฅ (๋””๋ฒ„๊น…์šฉ)
# ํด๋ฆฌ๋‹๋œ ๊ฒฐ๊ณผ ํŒŒ์‹ฑ
result_json_ft = token2json_simple(sequence_ft_cleaned)
result_json_ft["raw_decoded_sequence_preview"] = sequence_ft_raw[:200] + "..." # ์›๋ณธ ๊ฒฐ๊ณผ ํ”„๋ฆฌ๋ทฐ ์ถ”๊ฐ€
# ์ตœ์ข… JSON ๋ฌธ์ž์—ด ๋ณ€ํ™˜
results_ft_json_str = json.dumps(result_json_ft, indent=2, ensure_ascii=False, sort_keys=False)
except Exception as e:
print(f"Error during fine-tuned model inference: {e}")
import traceback
traceback.print_exc() # detailed error log on server
results_ft_json_str = json.dumps({
"error": f"Fine-tuned ๋ชจ๋ธ ์ถ”๋ก  ์˜ค๋ฅ˜: {e}",
"raw_decoded_sequence_before_error": sequence_ft_raw
}, indent=2, ensure_ascii=False)
# === Base ๋ชจ๋ธ ์ถ”๋ก  ===
try:
pixel_values_base = processor_base(image, return_tensors="pt").pixel_values.to(device)
# Base ๋ชจ๋ธ์šฉ ํ”„๋กฌํ”„ํŠธ (์˜ˆ: <s_iitcdip> ๋˜๋Š” ๋‹ค๋ฅธ ์ผ๋ฐ˜ ๋ฌธ์„œ ํ”„๋กฌํ”„ํŠธ)
# ์—ฌ๊ธฐ์„œ๋Š” ์ด์ „ ์ฝ”๋“œ์™€ ๋™์ผํ•˜๊ฒŒ <s_iitcdip> ์‚ฌ์šฉ
task_prompt_base = "<s_iitcdip>"
# Base ๋ชจ๋ธ์€ ํ•ด๋‹น ํ”„๋กฌํ”„ํŠธ ํ† ํฐ์ด ์—†์„ ์ˆ˜ ์žˆ์œผ๋ฏ€๋กœ ํ™•์ธ ๋˜๋Š” ๋‹ค๋ฅธ ํ”„๋กฌํ”„ํŠธ ์‚ฌ์šฉ ํ•„์š”
# ์—ฌ๊ธฐ์„œ๋Š” ์ผ๋‹จ ์ง„ํ–‰
try:
decoder_input_ids_base = processor_base.tokenizer(
task_prompt_base,
add_special_tokens=False,
return_tensors="pt",
).input_ids.to(device)
except Exception as tokenizer_e:
print(f"Warning: Base processor cannot tokenize prompt '{task_prompt_base}'. Using default generation. Error: {tokenizer_e}")
decoder_input_ids_base = None # ํ”„๋กฌํ”„ํŠธ ์—†์ด ์ƒ์„ฑ
# ์ƒ์„ฑ ํŒŒ๋ผ๋ฏธํ„ฐ ์„ค์ •
generation_config_base = {
"max_length": model_base.config.decoder.max_position_embeddings,
"early_stopping": True,
"pad_token_id": processor_base.tokenizer.pad_token_id,
"eos_token_id": processor_base.tokenizer.eos_token_id,
"use_cache": True,
"num_beams": 1, # Greedy decoding
"bad_words_ids": [[processor_base.tokenizer.unk_token_id]] if processor_base.tokenizer.unk_token_id else None,
"return_dict_in_generate": True,
}
# ํ”„๋กฌํ”„ํŠธ๊ฐ€ ์„ฑ๊ณต์ ์œผ๋กœ ์ธ์ฝ”๋”ฉ ๋˜์—ˆ์œผ๋ฉด ์ถ”๊ฐ€
if decoder_input_ids_base is not None:
generation_config_base["decoder_input_ids"] = decoder_input_ids_base
outputs_base = model_base.generate(pixel_values_base, **generation_config_base)
sequence_base_raw = processor_base.batch_decode(outputs_base.sequences)[0]
# print(f"\nBase Raw Output: {sequence_base_raw}") # ์„œ๋ฒ„ ๋กœ๊ทธ์— ์ถœ๋ ฅ (๋””๋ฒ„๊น…์šฉ)
# Base ๋ชจ๋ธ ๊ฒฐ๊ณผ ํด๋ฆฌ๋‹ (skip_special_tokens ์‚ฌ์šฉ)
sequence_base_cleaned = processor_base.batch_decode(outputs_base.sequences, skip_special_tokens=True)[0]
# print(f"Base Cleaned Output (skip_special_tokens): {sequence_base_cleaned}") # ์„œ๋ฒ„ ๋กœ๊ทธ์— ์ถœ๋ ฅ (๋””๋ฒ„๊น…์šฉ)
# ๊ฒฐ๊ณผ ๋”•์…”๋„ˆ๋ฆฌ ์ƒ์„ฑ
result_json_base = {
"raw_decoded_sequence_preview": sequence_base_raw[:200] + "...", # ์›๋ณธ ๊ฒฐ๊ณผ ํ”„๋ฆฌ๋ทฐ
"output_skip_special_tokens": sequence_base_cleaned # ํด๋ฆฌ๋‹๋œ ๊ฒฐ๊ณผ
}
# ์ตœ์ข… JSON ๋ฌธ์ž์—ด ๋ณ€ํ™˜
results_base_json_str = json.dumps(result_json_base, indent=2, ensure_ascii=False, sort_keys=False)
except Exception as e:
print(f"Error during base model inference: {e}")
import traceback
traceback.print_exc() # detailed error log on server
results_base_json_str = json.dumps({
"error": f"Base ๋ชจ๋ธ ์ถ”๋ก  ์˜ค๋ฅ˜: {e}",
"raw_decoded_sequence_before_error": sequence_base_raw # Include raw if available
}, indent=2, ensure_ascii=False)
# ๋‘ ๋ชจ๋ธ์˜ ๊ฒฐ๊ณผ๋ฅผ JSON ๋ฌธ์ž์—ด ํ˜•ํƒœ๋กœ ๋ฐ˜ํ™˜
return results_ft_json_str, results_base_json_str
# --- Gradio ์ธํ„ฐํŽ˜์ด์Šค ์ •์˜ ---
# CSS ์Šคํƒ€์ผ ์ •์˜
custom_css = """
body { background-color: #f0f4f8; font-family: 'Segoe UI', Tahoma, Geneva, Verdana, sans-serif; }
#main_title { text-align: center; color: #1a5276; font-size: 2.3em; font-weight: 600; margin-top: 20px; margin-bottom: 5px; }
#sub_description { text-align: center; color: #566573; font-size: 1.0em; margin-bottom: 25px; }
.gradio-container { border-radius: 10px !important; box-shadow: 0 3px 10px rgba(0,0,0,0.08); padding: 25px !important; }
footer { display: none !important; } /* Hide Gradio footer */
#output-title-ft, #output-title-base { color: #1a5276; font-weight: 600; margin-bottom: 8px; font-size: 1.2em; border-bottom: 2px solid #aed6f1; padding-bottom: 4px; }
#output_row > div.gradio-column { border: 1px solid #d5dbdb; padding: 15px !important; border-radius: 8px; background-color: #ffffff; margin: 0 8px !important; box-shadow: 0 1px 3px rgba(0,0,0,0.04); }
#json_output_ft > div:nth-child(2), #json_output_base > div:nth-child(2) { max-height: 600px; overflow-y: auto !important; } /* JSON output scroll */
"""
# Gradio Blocks ์ธํ„ฐํŽ˜์ด์Šค ๊ตฌ์„ฑ
with gr.Blocks(css=custom_css, theme=gr.themes.Soft(primary_hue="blue", secondary_hue="sky")) as demo:
gr.Markdown("# Donut ๋ชจ๋ธ ๋น„๊ต: Fine-tuned vs Base", elem_id="main_title")
gr.Markdown("์˜์ˆ˜์ฆ ์ด๋ฏธ์ง€๋ฅผ ์—…๋กœ๋“œํ•˜๋ฉด Fine-tuned ๋ชจ๋ธ(SROIE ํŒŒ์‹ฑ)๊ณผ Base ๋ชจ๋ธ์˜ ์ถ”์ถœ ๊ฒฐ๊ณผ๋ฅผ ๋น„๊ตํ•ฉ๋‹ˆ๋‹ค.", elem_id="sub_description")
with gr.Row():
with gr.Column(scale=1):
image_input = gr.Image(type="numpy", label="๐Ÿงพ ์˜์ˆ˜์ฆ ์ด๋ฏธ์ง€ ์—…๋กœ๋“œ")
submit_btn = gr.Button("๐Ÿš€ ๊ฒฐ๊ณผ ๋น„๊ต ์‹œ์ž‘", variant="primary", scale=0)
# --- ์˜ˆ์ œ ์ด๋ฏธ์ง€ ๋ถ€๋ถ„์€ Spaces ํ™˜๊ฒฝ์—์„œ ๊ฒฝ๋กœ ๋ฌธ์ œ๊ฐ€ ์žˆ์„ ์ˆ˜ ์žˆ์–ด ์ผ๋‹จ ์ฃผ์„ ์ฒ˜๋ฆฌ ---
# ๋งŒ์•ฝ ์˜ˆ์ œ ์ด๋ฏธ์ง€๋ฅผ Space ์ €์žฅ์†Œ์— ํ•จ๊ป˜ ์—…๋กœ๋“œํ•˜๊ณ  ๊ฒฝ๋กœ๋ฅผ ๋งž์ถœ ์ˆ˜ ์žˆ๋‹ค๋ฉด ์ฃผ์„ ํ•ด์ œ ๊ฐ€๋Šฅ
example_img_dir = "example" # Space ์ €์žฅ์†Œ ๋ฃจํŠธ์— ์žˆ๋Š” 'example' ํด๋” ์ง€์ •
# list comprehension ์‚ฌ์šฉํ•˜์—ฌ ์กด์žฌํ•˜๋Š” ํŒŒ์ผ๋งŒ ๋ชฉ๋ก์œผ๋กœ ๋งŒ๋“ฆ
example_paths = [os.path.join(example_img_dir, f) for f in ["1.jpg", "2.jpg"] if os.path.exists(os.path.join(example_img_dir, f))]
if example_paths:
gr.Examples(examples=example_paths, inputs=image_input, label="์˜ˆ์ œ ์ด๋ฏธ์ง€ ํด๋ฆญ (ํด๋ฆญ ํ›„ '๊ฒฐ๊ณผ ๋น„๊ต ์‹œ์ž‘' ๋ฒ„ํŠผ ๋ˆ„๋ฅด์„ธ์š”)")
else:
gr.Markdown("_(์˜ˆ์ œ ์ด๋ฏธ์ง€๋ฅผ ์ฐพ์„ ์ˆ˜ ์—†์Šต๋‹ˆ๋‹ค. 'example' ํด๋” ํ™•์ธ ํ•„์š”)_")
with gr.Column(scale=2):
with gr.Row(elem_id="output_row"):
with gr.Column(scale=1):
gr.Markdown("### โœจ Fine-tuned Model (SROIE ํŒŒ์‹ฑ)", elem_id="output-title-ft")
json_output_ft = gr.JSON(label="Fine-tuned ๊ฒฐ๊ณผ (JSON)", elem_id="json_output_ft")
with gr.Column(scale=1):
gr.Markdown("### ๐Ÿ’ก Base Model (Raw + Cleaned)", elem_id="output-title-base")
json_output_base = gr.JSON(label="Base ๋ชจ๋ธ ๊ฒฐ๊ณผ (JSON)", elem_id="json_output_base")
# ๋ฒ„ํŠผ ํด๋ฆญ ์‹œ ์‹คํ–‰ํ•  ํ•จ์ˆ˜ ๋ฐ ์ž…์ถœ๋ ฅ ์ •์˜
submit_btn.click(
fn=process_image_comparison,
inputs=image_input,
outputs=[json_output_ft, json_output_base] # ํ•จ์ˆ˜๊ฐ€ ๋ฐ˜ํ™˜ํ•˜๋Š” ์ˆœ์„œ๋Œ€๋กœ ์ปดํฌ๋„ŒํŠธ ์ง€์ •
)
# --- Gradio ์•ฑ ์‹คํ–‰ ---
# Hugging Face Spaces ์—์„œ ์‹คํ–‰๋  ๋•Œ๋Š” ์ด ๋ถ€๋ถ„์ด ํ˜ธ์ถœ๋ฉ๋‹ˆ๋‹ค.
if __name__ == "__main__":
# share=True ๋Š” Spaces ํ™˜๊ฒฝ์—์„œ๋Š” ํ•„์š” ์—†์Šต๋‹ˆ๋‹ค.
demo.launch()