hackerbyhobby
commited on
updated to translate final message to original language detected
Browse files- app.py +29 -18
- app.py.bestoftues +175 -16
app.py
CHANGED
@@ -8,7 +8,7 @@ from deep_translator import GoogleTranslator
|
|
8 |
import openai
|
9 |
import os
|
10 |
|
11 |
-
# Set
|
12 |
openai.api_key = os.getenv("OPENAI_API_KEY")
|
13 |
|
14 |
# Translator instance
|
@@ -120,7 +120,6 @@ def query_llm_for_classification(raw_message: str) -> dict:
|
|
120 |
raw_reply = response["choices"][0]["message"]["content"].strip()
|
121 |
|
122 |
import json
|
123 |
-
# Expect something like {"label": "...", "reason": "..."}
|
124 |
llm_result = json.loads(raw_reply)
|
125 |
if "label" not in llm_result or "reason" not in llm_result:
|
126 |
return {"label": "Unknown", "reason": f"Unexpected format: {raw_reply}"}
|
@@ -170,19 +169,31 @@ def query_llm_for_explanation(
|
|
170 |
llm_reason: str,
|
171 |
found_smishing: list,
|
172 |
found_other_scam: list,
|
173 |
-
found_urls: list
|
|
|
174 |
) -> str:
|
175 |
"""
|
176 |
-
Second LLM call: provides a holistic explanation of the final classification
|
177 |
-
|
178 |
-
relevant details (keywords, URLs).
|
179 |
"""
|
180 |
-
|
181 |
-
|
182 |
-
|
183 |
-
|
184 |
-
|
185 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
186 |
user_context = f"""
|
187 |
User Message:
|
188 |
{text}
|
@@ -195,7 +206,7 @@ Suspicious SMiShing Keywords => {found_smishing}
|
|
195 |
Suspicious Other Scam Keywords => {found_other_scam}
|
196 |
URLs => {found_urls}
|
197 |
"""
|
198 |
-
|
199 |
try:
|
200 |
response = openai.ChatCompletion.create(
|
201 |
model="gpt-3.5-turbo",
|
@@ -215,7 +226,7 @@ def smishing_detector(input_type, text, image):
|
|
215 |
Main detection function combining text (if 'Text') & OCR (if 'Screenshot'),
|
216 |
plus two LLM calls:
|
217 |
1) classification to adjust final probabilities,
|
218 |
-
2) a final explanation summarizing the outcome.
|
219 |
"""
|
220 |
if input_type == "Text":
|
221 |
combined_text = text.strip() if text else ""
|
@@ -275,7 +286,7 @@ def smishing_detector(input_type, text, image):
|
|
275 |
found_smishing = [kw for kw in smishing_keys if kw in lower_text]
|
276 |
found_other_scam = [kw for kw in scam_keys if kw in lower_text]
|
277 |
|
278 |
-
# 6. Final LLM explanation
|
279 |
final_explanation = query_llm_for_explanation(
|
280 |
text=combined_text,
|
281 |
final_label=final_label,
|
@@ -286,7 +297,8 @@ def smishing_detector(input_type, text, image):
|
|
286 |
llm_reason=llm_reason,
|
287 |
found_smishing=found_smishing,
|
288 |
found_other_scam=found_other_scam,
|
289 |
-
found_urls=found_urls
|
|
|
290 |
)
|
291 |
|
292 |
return {
|
@@ -321,7 +333,7 @@ def toggle_inputs(choice):
|
|
321 |
return gr.update(visible=False), gr.update(visible=True)
|
322 |
|
323 |
with gr.Blocks() as demo:
|
324 |
-
gr.Markdown("## SMiShing & Scam Detector with LLM-Enhanced Logic")
|
325 |
|
326 |
with gr.Row():
|
327 |
input_type = gr.Radio(
|
@@ -363,7 +375,6 @@ with gr.Blocks() as demo:
|
|
363 |
)
|
364 |
|
365 |
if __name__ == "__main__":
|
366 |
-
# Warn if openai.api_key not set
|
367 |
if not openai.api_key:
|
368 |
print("WARNING: OPENAI_API_KEY not set. LLM calls will fail or be skipped.")
|
369 |
demo.launch()
|
|
|
8 |
import openai
|
9 |
import os
|
10 |
|
11 |
+
# Set your OpenAI API key
|
12 |
openai.api_key = os.getenv("OPENAI_API_KEY")
|
13 |
|
14 |
# Translator instance
|
|
|
120 |
raw_reply = response["choices"][0]["message"]["content"].strip()
|
121 |
|
122 |
import json
|
|
|
123 |
llm_result = json.loads(raw_reply)
|
124 |
if "label" not in llm_result or "reason" not in llm_result:
|
125 |
return {"label": "Unknown", "reason": f"Unexpected format: {raw_reply}"}
|
|
|
169 |
llm_reason: str,
|
170 |
found_smishing: list,
|
171 |
found_other_scam: list,
|
172 |
+
found_urls: list,
|
173 |
+
detected_lang: str
|
174 |
) -> str:
|
175 |
"""
|
176 |
+
Second LLM call: provides a holistic explanation of the final classification
|
177 |
+
in the same language as detected_lang (English or Spanish).
|
|
|
178 |
"""
|
179 |
+
# Decide the language for final explanation
|
180 |
+
if detected_lang == "es":
|
181 |
+
# Spanish
|
182 |
+
system_prompt = (
|
183 |
+
"Eres un experto en ciberseguridad. Proporciona una explicación final al usuario en español. "
|
184 |
+
"Combina la clasificación local, la clasificación LLM y la etiqueta final en una sola explicación breve. "
|
185 |
+
"No reveles el código interno ni el JSON bruto; simplemente da una breve explicación fácil de entender. "
|
186 |
+
"Termina con la etiqueta final. "
|
187 |
+
)
|
188 |
+
else:
|
189 |
+
# Default to English
|
190 |
+
system_prompt = (
|
191 |
+
"You are a cybersecurity expert providing a final explanation to the user in English. "
|
192 |
+
"Combine the local classification, the LLM classification, and the final label "
|
193 |
+
"into one concise explanation. Do not reveal internal code or raw JSON. "
|
194 |
+
"End with a final statement of the final label."
|
195 |
+
)
|
196 |
+
|
197 |
user_context = f"""
|
198 |
User Message:
|
199 |
{text}
|
|
|
206 |
Suspicious Other Scam Keywords => {found_other_scam}
|
207 |
URLs => {found_urls}
|
208 |
"""
|
209 |
+
|
210 |
try:
|
211 |
response = openai.ChatCompletion.create(
|
212 |
model="gpt-3.5-turbo",
|
|
|
226 |
Main detection function combining text (if 'Text') & OCR (if 'Screenshot'),
|
227 |
plus two LLM calls:
|
228 |
1) classification to adjust final probabilities,
|
229 |
+
2) a final explanation summarizing the outcome in the detected language.
|
230 |
"""
|
231 |
if input_type == "Text":
|
232 |
combined_text = text.strip() if text else ""
|
|
|
286 |
found_smishing = [kw for kw in smishing_keys if kw in lower_text]
|
287 |
found_other_scam = [kw for kw in scam_keys if kw in lower_text]
|
288 |
|
289 |
+
# 6. Final LLM explanation (in detected_lang)
|
290 |
final_explanation = query_llm_for_explanation(
|
291 |
text=combined_text,
|
292 |
final_label=final_label,
|
|
|
297 |
llm_reason=llm_reason,
|
298 |
found_smishing=found_smishing,
|
299 |
found_other_scam=found_other_scam,
|
300 |
+
found_urls=found_urls,
|
301 |
+
detected_lang=detected_lang
|
302 |
)
|
303 |
|
304 |
return {
|
|
|
333 |
return gr.update(visible=False), gr.update(visible=True)
|
334 |
|
335 |
with gr.Blocks() as demo:
|
336 |
+
gr.Markdown("## SMiShing & Scam Detector with LLM-Enhanced Logic (Multilingual Explanation)")
|
337 |
|
338 |
with gr.Row():
|
339 |
input_type = gr.Radio(
|
|
|
375 |
)
|
376 |
|
377 |
if __name__ == "__main__":
|
|
|
378 |
if not openai.api_key:
|
379 |
print("WARNING: OPENAI_API_KEY not set. LLM calls will fail or be skipped.")
|
380 |
demo.launch()
|
app.py.bestoftues
CHANGED
@@ -5,6 +5,11 @@ from transformers import pipeline
|
|
5 |
import re
|
6 |
from langdetect import detect
|
7 |
from deep_translator import GoogleTranslator
|
|
|
|
|
|
|
|
|
|
|
8 |
|
9 |
# Translator instance
|
10 |
translator = GoogleTranslator(source="auto", target="es")
|
@@ -23,7 +28,7 @@ CANDIDATE_LABELS = ["SMiShing", "Other Scam", "Legitimate"]
|
|
23 |
|
24 |
def get_keywords_by_language(text: str):
|
25 |
"""
|
26 |
-
Detect language using
|
27 |
"""
|
28 |
snippet = text[:200]
|
29 |
try:
|
@@ -88,9 +93,129 @@ def boost_probabilities(probabilities: dict, text: str):
|
|
88 |
"detected_lang": detected_lang
|
89 |
}
|
90 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
91 |
def smishing_detector(input_type, text, image):
|
92 |
"""
|
93 |
-
Main detection function combining text (if 'Text')
|
|
|
|
|
|
|
94 |
"""
|
95 |
if input_type == "Text":
|
96 |
combined_text = text.strip() if text else ""
|
@@ -105,31 +230,44 @@ def smishing_detector(input_type, text, image):
|
|
105 |
"label": "No text provided",
|
106 |
"confidence": 0.0,
|
107 |
"keywords_found": [],
|
108 |
-
"urls_found": []
|
|
|
|
|
|
|
109 |
}
|
110 |
|
111 |
-
|
|
|
112 |
sequences=combined_text,
|
113 |
candidate_labels=CANDIDATE_LABELS,
|
114 |
hypothesis_template="This message is {}."
|
115 |
)
|
116 |
-
original_probs = {k: float(v) for k, v in zip(
|
117 |
|
|
|
118 |
boosted = boost_probabilities(original_probs, combined_text)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
119 |
|
120 |
-
#
|
121 |
-
|
122 |
-
|
123 |
-
|
124 |
-
boosted.pop("detected_lang", None)
|
125 |
-
# 3. Convert numeric values to float
|
126 |
-
for k, v in boosted.items():
|
127 |
-
boosted[k] = float(v)
|
128 |
-
# Patched snippet ends
|
129 |
|
|
|
|
|
|
|
|
|
130 |
final_label = max(boosted, key=boosted.get)
|
131 |
final_confidence = round(boosted[final_label], 3)
|
132 |
|
|
|
133 |
lower_text = combined_text.lower()
|
134 |
smishing_keys, scam_keys, _ = get_keywords_by_language(combined_text)
|
135 |
|
@@ -137,16 +275,34 @@ def smishing_detector(input_type, text, image):
|
|
137 |
found_smishing = [kw for kw in smishing_keys if kw in lower_text]
|
138 |
found_other_scam = [kw for kw in scam_keys if kw in lower_text]
|
139 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
140 |
return {
|
141 |
"detected_language": detected_lang,
|
142 |
"text_used_for_classification": combined_text,
|
143 |
"original_probabilities": {k: round(v, 3) for k, v in original_probs.items()},
|
144 |
-
"
|
|
|
|
|
|
|
145 |
"label": final_label,
|
146 |
"confidence": final_confidence,
|
147 |
"smishing_keywords_found": found_smishing,
|
148 |
"other_scam_keywords_found": found_other_scam,
|
149 |
"urls_found": found_urls,
|
|
|
150 |
}
|
151 |
|
152 |
#
|
@@ -165,7 +321,7 @@ def toggle_inputs(choice):
|
|
165 |
return gr.update(visible=False), gr.update(visible=True)
|
166 |
|
167 |
with gr.Blocks() as demo:
|
168 |
-
gr.Markdown("## SMiShing & Scam Detector
|
169 |
|
170 |
with gr.Row():
|
171 |
input_type = gr.Radio(
|
@@ -207,4 +363,7 @@ with gr.Blocks() as demo:
|
|
207 |
)
|
208 |
|
209 |
if __name__ == "__main__":
|
|
|
|
|
|
|
210 |
demo.launch()
|
|
|
5 |
import re
|
6 |
from langdetect import detect
|
7 |
from deep_translator import GoogleTranslator
|
8 |
+
import openai
|
9 |
+
import os
|
10 |
+
|
11 |
+
# Set up your OpenAI API key
|
12 |
+
openai.api_key = os.getenv("OPENAI_API_KEY")
|
13 |
|
14 |
# Translator instance
|
15 |
translator = GoogleTranslator(source="auto", target="es")
|
|
|
28 |
|
29 |
def get_keywords_by_language(text: str):
|
30 |
"""
|
31 |
+
Detect language using langdetect and translate keywords if needed.
|
32 |
"""
|
33 |
snippet = text[:200]
|
34 |
try:
|
|
|
93 |
"detected_lang": detected_lang
|
94 |
}
|
95 |
|
96 |
+
def query_llm_for_classification(raw_message: str) -> dict:
|
97 |
+
"""
|
98 |
+
First LLM call: asks for a classification (SMiShing, Other Scam, or Legitimate)
|
99 |
+
acting as a cybersecurity expert. Returns label and short reason.
|
100 |
+
"""
|
101 |
+
if not raw_message.strip():
|
102 |
+
return {"label": "Unknown", "reason": "No message provided to the LLM."}
|
103 |
+
|
104 |
+
system_prompt = (
|
105 |
+
"You are a cybersecurity expert. You will classify the user's message "
|
106 |
+
"as one of: SMiShing, Other Scam, or Legitimate. Provide a short reason. "
|
107 |
+
"Return only JSON with keys: label, reason."
|
108 |
+
)
|
109 |
+
user_prompt = f"Message: {raw_message}\nClassify it as SMiShing, Other Scam, or Legitimate."
|
110 |
+
|
111 |
+
try:
|
112 |
+
response = openai.ChatCompletion.create(
|
113 |
+
model="gpt-3.5-turbo",
|
114 |
+
messages=[
|
115 |
+
{"role": "system", "content": system_prompt},
|
116 |
+
{"role": "user", "content": user_prompt}
|
117 |
+
],
|
118 |
+
temperature=0.2
|
119 |
+
)
|
120 |
+
raw_reply = response["choices"][0]["message"]["content"].strip()
|
121 |
+
|
122 |
+
import json
|
123 |
+
# Expect something like {"label": "...", "reason": "..."}
|
124 |
+
llm_result = json.loads(raw_reply)
|
125 |
+
if "label" not in llm_result or "reason" not in llm_result:
|
126 |
+
return {"label": "Unknown", "reason": f"Unexpected format: {raw_reply}"}
|
127 |
+
|
128 |
+
return llm_result
|
129 |
+
|
130 |
+
except Exception as e:
|
131 |
+
return {"label": "Unknown", "reason": f"LLM error: {e}"}
|
132 |
+
|
133 |
+
def incorporate_llm_label(boosted: dict, llm_label: str) -> dict:
|
134 |
+
"""
|
135 |
+
Adjust the final probabilities based on the LLM's classification.
|
136 |
+
If LLM says SMiShing, add +0.2 to SMiShing, etc. Then clamp & re-normalize.
|
137 |
+
"""
|
138 |
+
if llm_label == "SMiShing":
|
139 |
+
boosted["SMiShing"] += 0.2
|
140 |
+
elif llm_label == "Other Scam":
|
141 |
+
boosted["Other Scam"] += 0.2
|
142 |
+
elif llm_label == "Legitimate":
|
143 |
+
boosted["Legitimate"] += 0.2
|
144 |
+
# else "Unknown" => do nothing
|
145 |
+
|
146 |
+
# clamp
|
147 |
+
for k in boosted:
|
148 |
+
if boosted[k] < 0:
|
149 |
+
boosted[k] = 0.0
|
150 |
+
|
151 |
+
total = sum(boosted.values())
|
152 |
+
if total > 0:
|
153 |
+
for k in boosted:
|
154 |
+
boosted[k] /= total
|
155 |
+
else:
|
156 |
+
# fallback
|
157 |
+
boosted["Legitimate"] = 1.0
|
158 |
+
boosted["SMiShing"] = 0.0
|
159 |
+
boosted["Other Scam"] = 0.0
|
160 |
+
|
161 |
+
return boosted
|
162 |
+
|
163 |
+
def query_llm_for_explanation(
|
164 |
+
text: str,
|
165 |
+
final_label: str,
|
166 |
+
final_conf: float,
|
167 |
+
local_label: str,
|
168 |
+
local_conf: float,
|
169 |
+
llm_label: str,
|
170 |
+
llm_reason: str,
|
171 |
+
found_smishing: list,
|
172 |
+
found_other_scam: list,
|
173 |
+
found_urls: list
|
174 |
+
) -> str:
|
175 |
+
"""
|
176 |
+
Second LLM call: provides a holistic explanation of the final classification.
|
177 |
+
We include the local classification info, the LLM’s own classification, and
|
178 |
+
relevant details (keywords, URLs).
|
179 |
+
"""
|
180 |
+
system_prompt = (
|
181 |
+
"You are a cybersecurity expert providing a final explanation to the user. "
|
182 |
+
"Combine the local classification, the LLM classification, and the final label "
|
183 |
+
"into one concise explanation. Do not reveal internal code or raw JSON; just give "
|
184 |
+
"a short, user-friendly explanation. End with a final statement of the final label."
|
185 |
+
)
|
186 |
+
user_context = f"""
|
187 |
+
User Message:
|
188 |
+
{text}
|
189 |
+
|
190 |
+
Local Classification => Label: {local_label}, Confidence: {local_conf}
|
191 |
+
LLM Classification => Label: {llm_label}, Reason: {llm_reason}
|
192 |
+
Final Overall Label => {final_label} (confidence {final_conf})
|
193 |
+
|
194 |
+
Suspicious SMiShing Keywords => {found_smishing}
|
195 |
+
Suspicious Other Scam Keywords => {found_other_scam}
|
196 |
+
URLs => {found_urls}
|
197 |
+
"""
|
198 |
+
# The LLM can combine these facts into a short paragraph.
|
199 |
+
try:
|
200 |
+
response = openai.ChatCompletion.create(
|
201 |
+
model="gpt-3.5-turbo",
|
202 |
+
messages=[
|
203 |
+
{"role": "system", "content": system_prompt},
|
204 |
+
{"role": "user", "content": user_context}
|
205 |
+
],
|
206 |
+
temperature=0.2
|
207 |
+
)
|
208 |
+
final_explanation = response["choices"][0]["message"]["content"].strip()
|
209 |
+
return final_explanation
|
210 |
+
except Exception as e:
|
211 |
+
return f"Could not generate final explanation due to error: {e}"
|
212 |
+
|
213 |
def smishing_detector(input_type, text, image):
|
214 |
"""
|
215 |
+
Main detection function combining text (if 'Text') & OCR (if 'Screenshot'),
|
216 |
+
plus two LLM calls:
|
217 |
+
1) classification to adjust final probabilities,
|
218 |
+
2) a final explanation summarizing the outcome.
|
219 |
"""
|
220 |
if input_type == "Text":
|
221 |
combined_text = text.strip() if text else ""
|
|
|
230 |
"label": "No text provided",
|
231 |
"confidence": 0.0,
|
232 |
"keywords_found": [],
|
233 |
+
"urls_found": [],
|
234 |
+
"llm_label": "Unknown",
|
235 |
+
"llm_reason": "No text to analyze",
|
236 |
+
"final_explanation": "No text provided"
|
237 |
}
|
238 |
|
239 |
+
# 1. Local zero-shot classification
|
240 |
+
local_result = classifier(
|
241 |
sequences=combined_text,
|
242 |
candidate_labels=CANDIDATE_LABELS,
|
243 |
hypothesis_template="This message is {}."
|
244 |
)
|
245 |
+
original_probs = {k: float(v) for k, v in zip(local_result["labels"], local_result["scores"])}
|
246 |
|
247 |
+
# 2. Basic boosting from keywords & URLs
|
248 |
boosted = boost_probabilities(original_probs, combined_text)
|
249 |
+
detected_lang = boosted.pop("detected_lang", "en")
|
250 |
+
|
251 |
+
# Convert to float only
|
252 |
+
for k in boosted:
|
253 |
+
boosted[k] = float(boosted[k])
|
254 |
+
|
255 |
+
local_label = max(boosted, key=boosted.get)
|
256 |
+
local_conf = round(boosted[local_label], 3)
|
257 |
|
258 |
+
# 3. LLM Classification
|
259 |
+
llm_classification = query_llm_for_classification(combined_text)
|
260 |
+
llm_label = llm_classification.get("label", "Unknown")
|
261 |
+
llm_reason = llm_classification.get("reason", "No reason provided")
|
|
|
|
|
|
|
|
|
|
|
262 |
|
263 |
+
# 4. Incorporate LLM’s label into final probabilities
|
264 |
+
boosted = incorporate_llm_label(boosted, llm_label)
|
265 |
+
|
266 |
+
# Now we have updated probabilities
|
267 |
final_label = max(boosted, key=boosted.get)
|
268 |
final_confidence = round(boosted[final_label], 3)
|
269 |
|
270 |
+
# 5. Gather found keywords & URLs
|
271 |
lower_text = combined_text.lower()
|
272 |
smishing_keys, scam_keys, _ = get_keywords_by_language(combined_text)
|
273 |
|
|
|
275 |
found_smishing = [kw for kw in smishing_keys if kw in lower_text]
|
276 |
found_other_scam = [kw for kw in scam_keys if kw in lower_text]
|
277 |
|
278 |
+
# 6. Final LLM explanation
|
279 |
+
final_explanation = query_llm_for_explanation(
|
280 |
+
text=combined_text,
|
281 |
+
final_label=final_label,
|
282 |
+
final_conf=final_confidence,
|
283 |
+
local_label=local_label,
|
284 |
+
local_conf=local_conf,
|
285 |
+
llm_label=llm_label,
|
286 |
+
llm_reason=llm_reason,
|
287 |
+
found_smishing=found_smishing,
|
288 |
+
found_other_scam=found_other_scam,
|
289 |
+
found_urls=found_urls
|
290 |
+
)
|
291 |
+
|
292 |
return {
|
293 |
"detected_language": detected_lang,
|
294 |
"text_used_for_classification": combined_text,
|
295 |
"original_probabilities": {k: round(v, 3) for k, v in original_probs.items()},
|
296 |
+
"boosted_probabilities_before_llm": {local_label: local_conf},
|
297 |
+
"llm_label": llm_label,
|
298 |
+
"llm_reason": llm_reason,
|
299 |
+
"boosted_probabilities_after_llm": {k: round(v, 3) for k, v in boosted.items()},
|
300 |
"label": final_label,
|
301 |
"confidence": final_confidence,
|
302 |
"smishing_keywords_found": found_smishing,
|
303 |
"other_scam_keywords_found": found_other_scam,
|
304 |
"urls_found": found_urls,
|
305 |
+
"final_explanation": final_explanation,
|
306 |
}
|
307 |
|
308 |
#
|
|
|
321 |
return gr.update(visible=False), gr.update(visible=True)
|
322 |
|
323 |
with gr.Blocks() as demo:
|
324 |
+
gr.Markdown("## SMiShing & Scam Detector with LLM-Enhanced Logic")
|
325 |
|
326 |
with gr.Row():
|
327 |
input_type = gr.Radio(
|
|
|
363 |
)
|
364 |
|
365 |
if __name__ == "__main__":
|
366 |
+
# Warn if openai.api_key not set
|
367 |
+
if not openai.api_key:
|
368 |
+
print("WARNING: OPENAI_API_KEY not set. LLM calls will fail or be skipped.")
|
369 |
demo.launch()
|