|
import gradio as gr |
|
import pytesseract |
|
from PIL import Image |
|
from transformers import pipeline |
|
import re |
|
from langdetect import detect |
|
from deep_translator import GoogleTranslator |
|
import openai |
|
import os |
|
import requests |
|
import json |
|
|
|
|
|
openai.api_key = os.getenv("OPENAI_API_KEY") |
|
|
|
|
|
SAFE_BROWSING_API_KEY = os.getenv("GOOGLE_SAFE_BROWSING_API_KEY") |
|
SAFE_BROWSING_URL = "https://safebrowsing.googleapis.com/v4/threatMatches:find" |
|
|
|
|
|
translator = GoogleTranslator(source="auto", target="es") |
|
|
|
|
|
with open("smishing_keywords.txt", "r", encoding="utf-8") as f: |
|
SMISHING_KEYWORDS = [line.strip().lower() for line in f if line.strip()] |
|
|
|
with open("other_scam_keywords.txt", "r", encoding="utf-8") as f: |
|
OTHER_SCAM_KEYWORDS = [line.strip().lower() for line in f if line.strip()] |
|
|
|
|
|
model_name = "joeddav/xlm-roberta-large-xnli" |
|
classifier = pipeline("zero-shot-classification", model=model_name) |
|
CANDIDATE_LABELS = ["SMiShing", "Other Scam", "Legitimate"] |
|
|
|
def check_urls_with_google_safebrowsing(urls): |
|
""" |
|
Debug-enabled version of Google Safe Browsing check: |
|
- Prints payload and response to help troubleshoot issues. |
|
Returns a dict {url: bool is_malicious}. |
|
If the API key is missing or error occurs, returns {url: False}. |
|
""" |
|
result = {} |
|
if not SAFE_BROWSING_API_KEY: |
|
print("No GOOGLE_SAFE_BROWSING_API_KEY found. Returning all URLs as safe.") |
|
for u in urls: |
|
result[u] = False |
|
return result |
|
|
|
threat_entries = [{"url": u} for u in urls] |
|
payload = { |
|
"client": { |
|
"clientId": "my-smishing-detector", |
|
"clientVersion": "1.0" |
|
}, |
|
"threatInfo": { |
|
"threatTypes": [ |
|
"MALWARE", |
|
"SOCIAL_ENGINEERING", |
|
"UNWANTED_SOFTWARE", |
|
"POTENTIALLY_HARMFUL_APPLICATION" |
|
], |
|
"platformTypes": ["ANY_PLATFORM"], |
|
"threatEntryTypes": ["URL"], |
|
"threatEntries": threat_entries |
|
} |
|
} |
|
|
|
print("---- Safe Browsing Debug ----") |
|
print("REQUEST Endpoint:", SAFE_BROWSING_URL) |
|
print("API Key:", SAFE_BROWSING_API_KEY) |
|
print("REQUEST Payload (JSON):") |
|
print(json.dumps(payload, indent=2)) |
|
|
|
try: |
|
resp = requests.post( |
|
SAFE_BROWSING_URL, |
|
params={"key": SAFE_BROWSING_API_KEY}, |
|
json=payload, |
|
timeout=10 |
|
) |
|
print("RESPONSE Status Code:", resp.status_code) |
|
try: |
|
data = resp.json() |
|
print("RESPONSE JSON:") |
|
print(json.dumps(data, indent=2)) |
|
except Exception as parse_err: |
|
print("Error parsing response as JSON:", parse_err) |
|
data = {} |
|
|
|
malicious_urls = set() |
|
if "matches" in data: |
|
for match in data["matches"]: |
|
threat_url = match.get("threat", {}).get("url") |
|
if threat_url: |
|
malicious_urls.add(threat_url) |
|
|
|
for u in urls: |
|
result[u] = (u in malicious_urls) |
|
|
|
except Exception as e: |
|
print(f"Error contacting Safe Browsing API: {e}") |
|
for u in urls: |
|
result[u] = False |
|
|
|
print("RESULTS (url -> malicious):", result) |
|
print("---- End Debug ----\n") |
|
return result |
|
|
|
def get_keywords_by_language(text: str): |
|
""" |
|
Detect language using langdetect and translate keywords if needed. |
|
""" |
|
snippet = text[:200] |
|
try: |
|
detected_lang = detect(snippet) |
|
except Exception: |
|
detected_lang = "en" |
|
|
|
if detected_lang == "es": |
|
smishing_in_spanish = [ |
|
translator.translate(kw).lower() for kw in SMISHING_KEYWORDS |
|
] |
|
other_scam_in_spanish = [ |
|
translator.translate(kw).lower() for kw in OTHER_SCAM_KEYWORDS |
|
] |
|
return smishing_in_spanish, other_scam_in_spanish, "es" |
|
else: |
|
return SMISHING_KEYWORDS, OTHER_SCAM_KEYWORDS, "en" |
|
|
|
def boost_probabilities(probabilities: dict, text: str): |
|
""" |
|
Boost probabilities based on keyword matches, presence of URLs, |
|
and Google Safe Browsing checks. |
|
""" |
|
lower_text = text.lower() |
|
smishing_keywords, other_scam_keywords, detected_lang = get_keywords_by_language(text) |
|
|
|
smishing_count = sum(1 for kw in smishing_keywords if kw in lower_text) |
|
other_scam_count = sum(1 for kw in other_scam_keywords if kw in lower_text) |
|
|
|
smishing_boost = 0.30 * smishing_count |
|
other_scam_boost = 0.30 * other_scam_count |
|
|
|
|
|
found_urls = re.findall( |
|
r"(https?://[^\s]+|\b[a-zA-Z0-9.-]+\.(?:com|net|org|edu|gov|mil|io|ai|co|info|biz|us|uk|de|fr|es|ru|jp|cn|in|au|ca|br|mx|it|nl|se|no|fi|ch|pl|kr|vn|id|tw|sg|hk)\b)", |
|
lower_text |
|
) |
|
if found_urls: |
|
smishing_boost += 0.35 |
|
|
|
p_smishing = probabilities.get("SMiShing", 0.0) |
|
p_other_scam = probabilities.get("Other Scam", 0.0) |
|
p_legit = probabilities.get("Legitimate", 1.0) |
|
|
|
p_smishing += smishing_boost |
|
p_other_scam += other_scam_boost |
|
p_legit -= (smishing_boost + other_scam_boost) |
|
|
|
|
|
p_smishing = max(p_smishing, 0.0) |
|
p_other_scam = max(p_other_scam, 0.0) |
|
p_legit = max(p_legit, 0.0) |
|
|
|
total = p_smishing + p_other_scam + p_legit |
|
if total > 0: |
|
p_smishing /= total |
|
p_other_scam /= total |
|
p_legit /= total |
|
else: |
|
p_smishing, p_other_scam, p_legit = 0.0, 0.0, 1.0 |
|
|
|
|
|
sb_results = {} |
|
if found_urls: |
|
sb_results = check_urls_with_google_safebrowsing(found_urls) |
|
|
|
if any(sb_results[u] for u in sb_results): |
|
p_smishing = 1.0 |
|
p_other_scam = 0.0 |
|
p_legit = 0.0 |
|
|
|
return { |
|
"SMiShing": p_smishing, |
|
"Other Scam": p_other_scam, |
|
"Legitimate": p_legit, |
|
"detected_lang": detected_lang, |
|
"found_urls": found_urls, |
|
"safe_browsing_results": sb_results |
|
} |
|
|
|
def smishing_detector(input_type, text, image): |
|
""" |
|
Main detection function combining text (if 'Text') & OCR (if 'Screenshot'), |
|
and debugging logs for Safe Browsing calls. |
|
""" |
|
if input_type == "Text": |
|
combined_text = text.strip() if text else "" |
|
else: |
|
combined_text = "" |
|
if image is not None: |
|
combined_text = pytesseract.image_to_string(image, lang="spa+eng").strip() |
|
|
|
if not combined_text: |
|
return { |
|
"text_used_for_classification": "(none)", |
|
"label": "No text provided", |
|
"confidence": 0.0, |
|
"keywords_found": [], |
|
"urls_found": [], |
|
"safe_browsing_results": {}, |
|
} |
|
|
|
|
|
local_result = classifier( |
|
sequences=combined_text, |
|
candidate_labels=CANDIDATE_LABELS, |
|
hypothesis_template="This message is {}." |
|
) |
|
original_probs = {k: float(v) for k, v in zip(local_result["labels"], local_result["scores"])} |
|
|
|
|
|
boosted_dict = boost_probabilities(original_probs, combined_text) |
|
detected_lang = boosted_dict.pop("detected_lang", "en") |
|
sb_results = boosted_dict.pop("safe_browsing_results", {}) |
|
found_urls = boosted_dict.pop("found_urls", []) |
|
|
|
for k in boosted_dict: |
|
boosted_dict[k] = float(boosted_dict[k]) |
|
|
|
final_label = max(boosted_dict, key=boosted_dict.get) |
|
final_confidence = round(boosted_dict[final_label], 3) |
|
|
|
return { |
|
"detected_language": detected_lang, |
|
"text_used_for_classification": combined_text, |
|
"original_probabilities": {k: round(v, 3) for k, v in original_probs.items()}, |
|
"boosted_probabilities": {k: round(v, 3) for k, v in boosted_dict.items()}, |
|
"label": final_label, |
|
"confidence": final_confidence, |
|
"urls_found": found_urls, |
|
"safe_browsing_results": sb_results |
|
} |
|
|
|
|
|
|
|
|
|
def toggle_inputs(choice): |
|
if choice == "Text": |
|
return gr.update(visible=True), gr.update(visible=False) |
|
else: |
|
return gr.update(visible=False), gr.update(visible=True) |
|
|
|
with gr.Blocks() as demo: |
|
gr.Markdown("## SMiShing & Scam Detector with Debug-Enabled Safe Browsing") |
|
|
|
with gr.Row(): |
|
input_type = gr.Radio( |
|
choices=["Text", "Screenshot"], |
|
value="Text", |
|
label="Choose Input Type" |
|
) |
|
|
|
text_input = gr.Textbox( |
|
lines=3, |
|
label="Paste Suspicious SMS Text", |
|
placeholder="Type or paste the message here...", |
|
visible=True |
|
) |
|
image_input = gr.Image( |
|
type="pil", |
|
label="Upload Screenshot", |
|
visible=False |
|
) |
|
|
|
input_type.change( |
|
fn=toggle_inputs, |
|
inputs=input_type, |
|
outputs=[text_input, image_input], |
|
queue=False |
|
) |
|
|
|
analyze_btn = gr.Button("Classify") |
|
output_json = gr.JSON(label="Result") |
|
|
|
analyze_btn.click( |
|
fn=smishing_detector, |
|
inputs=[input_type, text_input, image_input], |
|
outputs=output_json |
|
) |
|
|
|
if __name__ == "__main__": |
|
if not openai.api_key: |
|
print("WARNING: OPENAI_API_KEY not set. LLM calls may fail.") |
|
if not SAFE_BROWSING_API_KEY: |
|
print("WARNING: GOOGLE_SAFE_BROWSING_API_KEY not set. All URLs returned as safe.") |
|
demo.launch() |