File size: 9,635 Bytes
7169f21
 
 
 
 
 
 
 
 
07e1348
 
65494f9
7169f21
 
 
 
07e1348
 
 
7169f21
 
 
 
 
 
 
 
 
 
 
 
 
 
 
07e1348
 
7169f21
 
07e1348
7169f21
07e1348
 
 
d5030c9
07e1348
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
d5030c9
7169f21
 
d5030c9
 
 
07e1348
 
 
 
 
 
 
d5030c9
 
 
 
 
7169f21
 
d5030c9
 
07e1348
 
 
 
 
 
 
 
 
 
 
 
 
 
 
d5030c9
 
7169f21
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
65494f9
7169f21
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
import gradio as gr
import pytesseract
from PIL import Image
from transformers import pipeline
import re
from langdetect import detect
from deep_translator import GoogleTranslator
import openai
import os
import requests
import json

# Set your OpenAI API key
openai.api_key = os.getenv("OPENAI_API_KEY")

# Retrieve Google Safe Browsing API key from environment
SAFE_BROWSING_API_KEY = os.getenv("GOOGLE_SAFE_BROWSING_API_KEY")
SAFE_BROWSING_URL = "https://safebrowsing.googleapis.com/v4/threatMatches:find"

# Translator instance
translator = GoogleTranslator(source="auto", target="es")

# 1. Load separate keywords for SMiShing and Other Scam (assumed in English)
with open("smishing_keywords.txt", "r", encoding="utf-8") as f:
    SMISHING_KEYWORDS = [line.strip().lower() for line in f if line.strip()]

with open("other_scam_keywords.txt", "r", encoding="utf-8") as f:
    OTHER_SCAM_KEYWORDS = [line.strip().lower() for line in f if line.strip()]

# 2. Zero-Shot Classification Pipeline
model_name = "joeddav/xlm-roberta-large-xnli"
classifier = pipeline("zero-shot-classification", model=model_name)
CANDIDATE_LABELS = ["SMiShing", "Other Scam", "Legitimate"]

def check_urls_with_google_safebrowsing(urls):
    """
    Debug-enabled version of Google Safe Browsing check:
    - Prints payload and response to help troubleshoot issues.
    Returns a dict {url: bool is_malicious}.
    If the API key is missing or error occurs, returns {url: False}.
    """
    result = {}
    if not SAFE_BROWSING_API_KEY:
        print("No GOOGLE_SAFE_BROWSING_API_KEY found. Returning all URLs as safe.")
        for u in urls:
            result[u] = False
        return result

    threat_entries = [{"url": u} for u in urls]
    payload = {
        "client": {
            "clientId": "my-smishing-detector",
            "clientVersion": "1.0"
        },
        "threatInfo": {
            "threatTypes": [
                "MALWARE",
                "SOCIAL_ENGINEERING",
                "UNWANTED_SOFTWARE",
                "POTENTIALLY_HARMFUL_APPLICATION"
            ],
            "platformTypes": ["ANY_PLATFORM"],
            "threatEntryTypes": ["URL"],
            "threatEntries": threat_entries
        }
    }

    print("---- Safe Browsing Debug ----")
    print("REQUEST Endpoint:", SAFE_BROWSING_URL)
    print("API Key:", SAFE_BROWSING_API_KEY)
    print("REQUEST Payload (JSON):")
    print(json.dumps(payload, indent=2))

    try:
        resp = requests.post(
            SAFE_BROWSING_URL,
            params={"key": SAFE_BROWSING_API_KEY},
            json=payload,
            timeout=10
        )
        print("RESPONSE Status Code:", resp.status_code)
        try:
            data = resp.json()
            print("RESPONSE JSON:")
            print(json.dumps(data, indent=2))
        except Exception as parse_err:
            print("Error parsing response as JSON:", parse_err)
            data = {}

        malicious_urls = set()
        if "matches" in data:
            for match in data["matches"]:
                threat_url = match.get("threat", {}).get("url")
                if threat_url:
                    malicious_urls.add(threat_url)

        for u in urls:
            result[u] = (u in malicious_urls)

    except Exception as e:
        print(f"Error contacting Safe Browsing API: {e}")
        for u in urls:
            result[u] = False

    print("RESULTS (url -> malicious):", result)
    print("---- End Debug ----\n")
    return result

def get_keywords_by_language(text: str):
    """
    Detect language using langdetect and translate keywords if needed.
    """
    snippet = text[:200]
    try:
        detected_lang = detect(snippet)
    except Exception:
        detected_lang = "en"

    if detected_lang == "es":
        smishing_in_spanish = [
            translator.translate(kw).lower() for kw in SMISHING_KEYWORDS
        ]
        other_scam_in_spanish = [
            translator.translate(kw).lower() for kw in OTHER_SCAM_KEYWORDS
        ]
        return smishing_in_spanish, other_scam_in_spanish, "es"
    else:
        return SMISHING_KEYWORDS, OTHER_SCAM_KEYWORDS, "en"

def boost_probabilities(probabilities: dict, text: str):
    """
    Boost probabilities based on keyword matches, presence of URLs,
    and Google Safe Browsing checks.
    """
    lower_text = text.lower()
    smishing_keywords, other_scam_keywords, detected_lang = get_keywords_by_language(text)

    smishing_count = sum(1 for kw in smishing_keywords if kw in lower_text)
    other_scam_count = sum(1 for kw in other_scam_keywords if kw in lower_text)

    smishing_boost = 0.30 * smishing_count
    other_scam_boost = 0.30 * other_scam_count

    # More robust URL pattern
    found_urls = re.findall(
        r"(https?://[^\s]+|\b[a-zA-Z0-9.-]+\.(?:com|net|org|edu|gov|mil|io|ai|co|info|biz|us|uk|de|fr|es|ru|jp|cn|in|au|ca|br|mx|it|nl|se|no|fi|ch|pl|kr|vn|id|tw|sg|hk)\b)",
        lower_text
    )
    if found_urls:
        smishing_boost += 0.35

    p_smishing = probabilities.get("SMiShing", 0.0)
    p_other_scam = probabilities.get("Other Scam", 0.0)
    p_legit = probabilities.get("Legitimate", 1.0)

    p_smishing += smishing_boost
    p_other_scam += other_scam_boost
    p_legit -= (smishing_boost + other_scam_boost)

    # Preliminary clamp & normalization
    p_smishing = max(p_smishing, 0.0)
    p_other_scam = max(p_other_scam, 0.0)
    p_legit = max(p_legit, 0.0)

    total = p_smishing + p_other_scam + p_legit
    if total > 0:
        p_smishing /= total
        p_other_scam /= total
        p_legit /= total
    else:
        p_smishing, p_other_scam, p_legit = 0.0, 0.0, 1.0

    # **Now** check Safe Browsing (with debug prints)
    sb_results = {}
    if found_urls:
        sb_results = check_urls_with_google_safebrowsing(found_urls)
        # If any malicious => set p_smishing=1.0
        if any(sb_results[u] for u in sb_results):
            p_smishing = 1.0
            p_other_scam = 0.0
            p_legit = 0.0

    return {
        "SMiShing": p_smishing,
        "Other Scam": p_other_scam,
        "Legitimate": p_legit,
        "detected_lang": detected_lang,
        "found_urls": found_urls,
        "safe_browsing_results": sb_results
    }

def smishing_detector(input_type, text, image):
    """
    Main detection function combining text (if 'Text') & OCR (if 'Screenshot'),
    and debugging logs for Safe Browsing calls.
    """
    if input_type == "Text":
        combined_text = text.strip() if text else ""
    else:
        combined_text = ""
        if image is not None:
            combined_text = pytesseract.image_to_string(image, lang="spa+eng").strip()

    if not combined_text:
        return {
            "text_used_for_classification": "(none)",
            "label": "No text provided",
            "confidence": 0.0,
            "keywords_found": [],
            "urls_found": [],
            "safe_browsing_results": {},
        }

    # 1. Local zero-shot classification
    local_result = classifier(
        sequences=combined_text,
        candidate_labels=CANDIDATE_LABELS,
        hypothesis_template="This message is {}."
    )
    original_probs = {k: float(v) for k, v in zip(local_result["labels"], local_result["scores"])}

    # 2. Boost with keywords, URLs, and Safe Browsing checks
    boosted_dict = boost_probabilities(original_probs, combined_text)
    detected_lang = boosted_dict.pop("detected_lang", "en")
    sb_results = boosted_dict.pop("safe_browsing_results", {})
    found_urls = boosted_dict.pop("found_urls", [])

    for k in boosted_dict:
        boosted_dict[k] = float(boosted_dict[k])

    final_label = max(boosted_dict, key=boosted_dict.get)
    final_confidence = round(boosted_dict[final_label], 3)

    return {
        "detected_language": detected_lang,
        "text_used_for_classification": combined_text,
        "original_probabilities": {k: round(v, 3) for k, v in original_probs.items()},
        "boosted_probabilities": {k: round(v, 3) for k, v in boosted_dict.items()},
        "label": final_label,
        "confidence": final_confidence,
        "urls_found": found_urls,
        "safe_browsing_results": sb_results
    }

#
# Gradio interface with dynamic visibility
#
def toggle_inputs(choice):
    if choice == "Text":
        return gr.update(visible=True), gr.update(visible=False)
    else:
        return gr.update(visible=False), gr.update(visible=True)

with gr.Blocks() as demo:
    gr.Markdown("## SMiShing & Scam Detector with Debug-Enabled Safe Browsing")

    with gr.Row():
        input_type = gr.Radio(
            choices=["Text", "Screenshot"],
            value="Text",
            label="Choose Input Type"
        )

    text_input = gr.Textbox(
        lines=3,
        label="Paste Suspicious SMS Text",
        placeholder="Type or paste the message here...",
        visible=True
    )
    image_input = gr.Image(
        type="pil",
        label="Upload Screenshot",
        visible=False
    )

    input_type.change(
        fn=toggle_inputs,
        inputs=input_type,
        outputs=[text_input, image_input],
        queue=False
    )

    analyze_btn = gr.Button("Classify")
    output_json = gr.JSON(label="Result")

    analyze_btn.click(
        fn=smishing_detector,
        inputs=[input_type, text_input, image_input],
        outputs=output_json
    )

if __name__ == "__main__":
    if not openai.api_key:
        print("WARNING: OPENAI_API_KEY not set. LLM calls may fail.")
    if not SAFE_BROWSING_API_KEY:
        print("WARNING: GOOGLE_SAFE_BROWSING_API_KEY not set. All URLs returned as safe.")
    demo.launch()