hackerbyhobby
commited on
safebrowsing debugging
Browse files
app.py
CHANGED
@@ -1,46 +1,19 @@
|
|
1 |
-
import gradio as gr
|
2 |
-
import pytesseract
|
3 |
-
from PIL import Image
|
4 |
-
from transformers import pipeline
|
5 |
-
import re
|
6 |
-
from langdetect import detect
|
7 |
-
from deep_translator import GoogleTranslator
|
8 |
-
import openai
|
9 |
-
import os
|
10 |
import requests
|
11 |
import json
|
|
|
12 |
|
13 |
-
# Set your OpenAI API key
|
14 |
-
openai.api_key = os.getenv("OPENAI_API_KEY")
|
15 |
-
|
16 |
-
# Retrieve Google Safe Browsing API key from environment
|
17 |
SAFE_BROWSING_API_KEY = os.getenv("GOOGLE_SAFE_BROWSING_API_KEY")
|
18 |
SAFE_BROWSING_URL = "https://safebrowsing.googleapis.com/v4/threatMatches:find"
|
19 |
|
20 |
-
# Translator instance
|
21 |
-
translator = GoogleTranslator(source="auto", target="es")
|
22 |
-
|
23 |
-
# 1. Load separate keywords for SMiShing and Other Scam (assumed in English)
|
24 |
-
with open("smishing_keywords.txt", "r", encoding="utf-8") as f:
|
25 |
-
SMISHING_KEYWORDS = [line.strip().lower() for line in f if line.strip()]
|
26 |
-
|
27 |
-
with open("other_scam_keywords.txt", "r", encoding="utf-8") as f:
|
28 |
-
OTHER_SCAM_KEYWORDS = [line.strip().lower() for line in f if line.strip()]
|
29 |
-
|
30 |
-
# 2. Zero-Shot Classification Pipeline
|
31 |
-
model_name = "joeddav/xlm-roberta-large-xnli"
|
32 |
-
classifier = pipeline("zero-shot-classification", model=model_name)
|
33 |
-
CANDIDATE_LABELS = ["SMiShing", "Other Scam", "Legitimate"]
|
34 |
-
|
35 |
def check_urls_with_google_safebrowsing(urls):
|
36 |
"""
|
37 |
-
|
|
|
38 |
Returns a dict {url: bool is_malicious}.
|
39 |
-
If the API key is missing or an error occurs, returns {url: False} for all.
|
40 |
"""
|
41 |
result = {}
|
42 |
if not SAFE_BROWSING_API_KEY:
|
43 |
-
|
44 |
for u in urls:
|
45 |
result[u] = False
|
46 |
return result
|
@@ -66,6 +39,12 @@ def check_urls_with_google_safebrowsing(urls):
|
|
66 |
}
|
67 |
}
|
68 |
|
|
|
|
|
|
|
|
|
|
|
|
|
69 |
try:
|
70 |
resp = requests.post(
|
71 |
SAFE_BROWSING_URL,
|
@@ -73,9 +52,17 @@ def check_urls_with_google_safebrowsing(urls):
|
|
73 |
json=payload,
|
74 |
timeout=10
|
75 |
)
|
76 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
77 |
# If "matches" is present, some URL is flagged
|
78 |
-
# Each match has "threat": {"url": "..."}
|
79 |
malicious_urls = set()
|
80 |
if "matches" in data:
|
81 |
for match in data["matches"]:
|
@@ -92,369 +79,7 @@ def check_urls_with_google_safebrowsing(urls):
|
|
92 |
for u in urls:
|
93 |
result[u] = False
|
94 |
|
95 |
-
|
96 |
-
|
97 |
-
def get_keywords_by_language(text: str):
|
98 |
-
"""
|
99 |
-
Detect language using langdetect and translate keywords if needed.
|
100 |
-
"""
|
101 |
-
snippet = text[:200]
|
102 |
-
try:
|
103 |
-
detected_lang = detect(snippet)
|
104 |
-
except Exception:
|
105 |
-
detected_lang = "en"
|
106 |
-
|
107 |
-
if detected_lang == "es":
|
108 |
-
smishing_in_spanish = [
|
109 |
-
translator.translate(kw).lower() for kw in SMISHING_KEYWORDS
|
110 |
-
]
|
111 |
-
other_scam_in_spanish = [
|
112 |
-
translator.translate(kw).lower() for kw in OTHER_SCAM_KEYWORDS
|
113 |
-
]
|
114 |
-
return smishing_in_spanish, other_scam_in_spanish, "es"
|
115 |
-
else:
|
116 |
-
return SMISHING_KEYWORDS, OTHER_SCAM_KEYWORDS, "en"
|
117 |
-
|
118 |
-
def boost_probabilities(probabilities: dict, text: str):
|
119 |
-
"""
|
120 |
-
Boost probabilities based on keyword matches, presence of URLs,
|
121 |
-
and Google Safe Browsing checks.
|
122 |
-
"""
|
123 |
-
lower_text = text.lower()
|
124 |
-
smishing_keywords, other_scam_keywords, detected_lang = get_keywords_by_language(text)
|
125 |
-
|
126 |
-
smishing_count = sum(1 for kw in smishing_keywords if kw in lower_text)
|
127 |
-
other_scam_count = sum(1 for kw in other_scam_keywords if kw in lower_text)
|
128 |
-
|
129 |
-
smishing_boost = 0.30 * smishing_count
|
130 |
-
other_scam_boost = 0.30 * other_scam_count
|
131 |
-
|
132 |
-
# More robust URL pattern
|
133 |
-
found_urls = re.findall(
|
134 |
-
r"(https?://[^\s]+|\b[a-zA-Z0-9.-]+\.(?:com|net|org|edu|gov|mil|io|ai|co|info|biz|us|uk|de|fr|es|ru|jp|cn|in|au|ca|br|mx|it|nl|se|no|fi|ch|pl|kr|vn|id|tw|sg|hk)\b)",
|
135 |
-
lower_text
|
136 |
-
)
|
137 |
-
# If any URL is found, add 0.35 to smishing
|
138 |
-
if found_urls:
|
139 |
-
smishing_boost += 0.35
|
140 |
-
|
141 |
-
p_smishing = probabilities.get("SMiShing", 0.0)
|
142 |
-
p_other_scam = probabilities.get("Other Scam", 0.0)
|
143 |
-
p_legit = probabilities.get("Legitimate", 1.0)
|
144 |
-
|
145 |
-
p_smishing += smishing_boost
|
146 |
-
p_other_scam += other_scam_boost
|
147 |
-
p_legit -= (smishing_boost + other_scam_boost)
|
148 |
-
|
149 |
-
# Preliminary clamp & normalization
|
150 |
-
p_smishing = max(p_smishing, 0.0)
|
151 |
-
p_other_scam = max(p_other_scam, 0.0)
|
152 |
-
p_legit = max(p_legit, 0.0)
|
153 |
-
|
154 |
-
total = p_smishing + p_other_scam + p_legit
|
155 |
-
if total > 0:
|
156 |
-
p_smishing /= total
|
157 |
-
p_other_scam /= total
|
158 |
-
p_legit /= total
|
159 |
-
else:
|
160 |
-
p_smishing, p_other_scam, p_legit = 0.0, 0.0, 1.0
|
161 |
-
|
162 |
-
# **Now** check Safe Browsing
|
163 |
-
sb_results = {}
|
164 |
-
if found_urls:
|
165 |
-
sb_results = check_urls_with_google_safebrowsing(found_urls)
|
166 |
-
# If any malicious => set p_smishing=1.0
|
167 |
-
if any(sb_results[u] for u in sb_results):
|
168 |
-
p_smishing = 1.0
|
169 |
-
p_other_scam = 0.0
|
170 |
-
p_legit = 0.0
|
171 |
-
|
172 |
-
return {
|
173 |
-
"SMiShing": p_smishing,
|
174 |
-
"Other Scam": p_other_scam,
|
175 |
-
"Legitimate": p_legit,
|
176 |
-
"detected_lang": detected_lang,
|
177 |
-
"found_urls": found_urls,
|
178 |
-
"safe_browsing_results": sb_results # <--- show which are malicious
|
179 |
-
}
|
180 |
-
|
181 |
-
def query_llm_for_classification(raw_message: str) -> dict:
|
182 |
-
"""
|
183 |
-
First LLM call: asks for a classification (SMiShing, Other Scam, or Legitimate)
|
184 |
-
acting as a cybersecurity expert. Returns label and short reason.
|
185 |
-
"""
|
186 |
-
if not raw_message.strip():
|
187 |
-
return {"label": "Unknown", "reason": "No message provided to the LLM."}
|
188 |
-
|
189 |
-
system_prompt = (
|
190 |
-
"You are a cybersecurity expert. You will classify the user's message "
|
191 |
-
"as one of: SMiShing, Other Scam, or Legitimate. Provide a short reason. "
|
192 |
-
"Return only JSON with keys: label, reason."
|
193 |
-
)
|
194 |
-
user_prompt = f"Message: {raw_message}\nClassify it as SMiShing, Other Scam, or Legitimate."
|
195 |
-
|
196 |
-
try:
|
197 |
-
response = openai.ChatCompletion.create(
|
198 |
-
model="gpt-3.5-turbo",
|
199 |
-
messages=[
|
200 |
-
{"role": "system", "content": system_prompt},
|
201 |
-
{"role": "user", "content": user_prompt}
|
202 |
-
],
|
203 |
-
temperature=0.2
|
204 |
-
)
|
205 |
-
raw_reply = response["choices"][0]["message"]["content"].strip()
|
206 |
-
|
207 |
-
llm_result = json.loads(raw_reply)
|
208 |
-
if "label" not in llm_result or "reason" not in llm_result:
|
209 |
-
return {"label": "Unknown", "reason": f"Unexpected format: {raw_reply}"}
|
210 |
-
|
211 |
-
return llm_result
|
212 |
-
|
213 |
-
except Exception as e:
|
214 |
-
return {"label": "Unknown", "reason": f"LLM error: {e}"}
|
215 |
-
|
216 |
-
def incorporate_llm_label(boosted: dict, llm_label: str) -> dict:
|
217 |
-
"""
|
218 |
-
Adjust the final probabilities based on the LLM's classification.
|
219 |
-
If LLM says SMiShing, add +0.2 to SMiShing, etc. Then clamp & re-normalize.
|
220 |
-
"""
|
221 |
-
if llm_label == "SMiShing":
|
222 |
-
boosted["SMiShing"] += 0.2
|
223 |
-
elif llm_label == "Other Scam":
|
224 |
-
boosted["Other Scam"] += 0.2
|
225 |
-
elif llm_label == "Legitimate":
|
226 |
-
boosted["Legitimate"] += 0.2
|
227 |
-
# else "Unknown" => do nothing
|
228 |
-
|
229 |
-
# clamp
|
230 |
-
for k in boosted:
|
231 |
-
if boosted[k] < 0:
|
232 |
-
boosted[k] = 0.0
|
233 |
|
234 |
-
|
235 |
-
if total > 0:
|
236 |
-
for k in boosted:
|
237 |
-
boosted[k] /= total
|
238 |
-
else:
|
239 |
-
# fallback
|
240 |
-
boosted["Legitimate"] = 1.0
|
241 |
-
boosted["SMiShing"] = 0.0
|
242 |
-
boosted["Other Scam"] = 0.0
|
243 |
-
|
244 |
-
return boosted
|
245 |
-
|
246 |
-
def query_llm_for_explanation(
|
247 |
-
text: str,
|
248 |
-
final_label: str,
|
249 |
-
final_conf: float,
|
250 |
-
local_label: str,
|
251 |
-
local_conf: float,
|
252 |
-
llm_label: str,
|
253 |
-
llm_reason: str,
|
254 |
-
found_smishing: list,
|
255 |
-
found_other_scam: list,
|
256 |
-
found_urls: list,
|
257 |
-
detected_lang: str
|
258 |
-
) -> str:
|
259 |
-
"""
|
260 |
-
Second LLM call: provides a holistic explanation of the final classification
|
261 |
-
in the same language as detected_lang (English or Spanish).
|
262 |
-
"""
|
263 |
-
if detected_lang == "es":
|
264 |
-
system_prompt = (
|
265 |
-
"Eres un experto en ciberseguridad. Proporciona una explicación final al usuario en español. "
|
266 |
-
"Combina la clasificación local, la clasificación LLM y la etiqueta final en una sola explicación breve. "
|
267 |
-
"No reveles el código interno ni el JSON bruto; simplemente da una breve explicación fácil de entender. "
|
268 |
-
"Termina con la etiqueta final."
|
269 |
-
)
|
270 |
-
else:
|
271 |
-
system_prompt = (
|
272 |
-
"You are a cybersecurity expert providing a final explanation to the user in English. "
|
273 |
-
"Combine the local classification, the LLM classification, and the final label "
|
274 |
-
"into one concise explanation. Do not reveal internal code or raw JSON. "
|
275 |
-
"End with a final statement of the final label."
|
276 |
-
)
|
277 |
-
|
278 |
-
user_context = f"""
|
279 |
-
User Message:
|
280 |
-
{text}
|
281 |
-
|
282 |
-
Local Classification => Label: {local_label}, Confidence: {local_conf}
|
283 |
-
LLM Classification => Label: {llm_label}, Reason: {llm_reason}
|
284 |
-
Final Overall Label => {final_label} (confidence {final_conf})
|
285 |
-
|
286 |
-
Suspicious SMiShing Keywords => {found_smishing}
|
287 |
-
Suspicious Other Scam Keywords => {found_other_scam}
|
288 |
-
URLs => {found_urls}
|
289 |
-
"""
|
290 |
-
|
291 |
-
try:
|
292 |
-
response = openai.ChatCompletion.create(
|
293 |
-
model="gpt-3.5-turbo",
|
294 |
-
messages=[
|
295 |
-
{"role": "system", "content": system_prompt},
|
296 |
-
{"role": "user", "content": user_context}
|
297 |
-
],
|
298 |
-
temperature=0.2
|
299 |
-
)
|
300 |
-
final_explanation = response["choices"][0]["message"]["content"].strip()
|
301 |
-
return final_explanation
|
302 |
-
except Exception as e:
|
303 |
-
return f"Could not generate final explanation due to error: {e}"
|
304 |
-
|
305 |
-
def smishing_detector(input_type, text, image):
|
306 |
-
"""
|
307 |
-
Main detection function combining text (if 'Text') & OCR (if 'Screenshot'),
|
308 |
-
plus two LLM calls:
|
309 |
-
1) classification to adjust final probabilities,
|
310 |
-
2) a final explanation summarizing the outcome in the detected language.
|
311 |
-
"""
|
312 |
-
if input_type == "Text":
|
313 |
-
combined_text = text.strip() if text else ""
|
314 |
-
else:
|
315 |
-
combined_text = ""
|
316 |
-
if image is not None:
|
317 |
-
combined_text = pytesseract.image_to_string(image, lang="spa+eng").strip()
|
318 |
-
|
319 |
-
if not combined_text:
|
320 |
-
return {
|
321 |
-
"text_used_for_classification": "(none)",
|
322 |
-
"label": "No text provided",
|
323 |
-
"confidence": 0.0,
|
324 |
-
"keywords_found": [],
|
325 |
-
"urls_found": [],
|
326 |
-
"llm_label": "Unknown",
|
327 |
-
"llm_reason": "No text to analyze",
|
328 |
-
"final_explanation": "No text provided"
|
329 |
-
}
|
330 |
-
|
331 |
-
# 1. Local zero-shot classification
|
332 |
-
local_result = classifier(
|
333 |
-
sequences=combined_text,
|
334 |
-
candidate_labels=CANDIDATE_LABELS,
|
335 |
-
hypothesis_template="This message is {}."
|
336 |
-
)
|
337 |
-
original_probs = {k: float(v) for k, v in zip(local_result["labels"], local_result["scores"])}
|
338 |
-
|
339 |
-
# 2. Basic boosting from keywords & URLs (plus Safe Browsing)
|
340 |
-
boosted_dict = boost_probabilities(original_probs, combined_text)
|
341 |
-
detected_lang = boosted_dict.pop("detected_lang", "en")
|
342 |
-
|
343 |
-
# Also retrieve the safe_browsing_results
|
344 |
-
sb_results = boosted_dict.pop("safe_browsing_results", {})
|
345 |
-
found_urls = boosted_dict.pop("found_urls", [])
|
346 |
-
|
347 |
-
for k in boosted_dict:
|
348 |
-
boosted_dict[k] = float(boosted_dict[k])
|
349 |
-
|
350 |
-
local_label = max(boosted_dict, key=boosted_dict.get)
|
351 |
-
local_conf = round(boosted_dict[local_label], 3)
|
352 |
-
|
353 |
-
# 3. LLM Classification
|
354 |
-
llm_classification = query_llm_for_classification(combined_text)
|
355 |
-
llm_label = llm_classification.get("label", "Unknown")
|
356 |
-
llm_reason = llm_classification.get("reason", "No reason provided")
|
357 |
-
|
358 |
-
# 4. Incorporate LLM’s label into final probabilities
|
359 |
-
boosted_dict = incorporate_llm_label(boosted_dict, llm_label)
|
360 |
-
|
361 |
-
final_label = max(boosted_dict, key=boosted_dict.get)
|
362 |
-
final_confidence = round(boosted_dict[final_label], 3)
|
363 |
-
|
364 |
-
# 5. Gather found keywords & URLs for the final explanation
|
365 |
-
lower_text = combined_text.lower()
|
366 |
-
smishing_keys, scam_keys, _ = get_keywords_by_language(combined_text)
|
367 |
-
|
368 |
-
found_smishing = [kw for kw in smishing_keys if kw in lower_text]
|
369 |
-
found_other_scam = [kw for kw in scam_keys if kw in lower_text]
|
370 |
-
|
371 |
-
# 6. Final explanation in user's language
|
372 |
-
final_explanation = query_llm_for_explanation(
|
373 |
-
text=combined_text,
|
374 |
-
final_label=final_label,
|
375 |
-
final_conf=final_confidence,
|
376 |
-
local_label=local_label,
|
377 |
-
local_conf=local_conf,
|
378 |
-
llm_label=llm_label,
|
379 |
-
llm_reason=llm_reason,
|
380 |
-
found_smishing=found_smishing,
|
381 |
-
found_other_scam=found_other_scam,
|
382 |
-
found_urls=found_urls,
|
383 |
-
detected_lang=detected_lang
|
384 |
-
)
|
385 |
-
|
386 |
-
# Combine results in final output
|
387 |
-
return {
|
388 |
-
"detected_language": detected_lang,
|
389 |
-
"text_used_for_classification": combined_text,
|
390 |
-
"original_probabilities": {k: round(v, 3) for k, v in original_probs.items()},
|
391 |
-
"boosted_probabilities_before_llm": {local_label: local_conf},
|
392 |
-
"llm_label": llm_label,
|
393 |
-
"llm_reason": llm_reason,
|
394 |
-
"boosted_probabilities_after_llm": {k: round(v, 3) for k, v in boosted_dict.items()},
|
395 |
-
"label": final_label,
|
396 |
-
"confidence": final_confidence,
|
397 |
-
"smishing_keywords_found": found_smishing,
|
398 |
-
"other_scam_keywords_found": found_other_scam,
|
399 |
-
"urls_found": found_urls,
|
400 |
-
"safe_browsing_results": sb_results, # <--- show if malicious
|
401 |
-
"final_explanation": final_explanation,
|
402 |
-
}
|
403 |
-
|
404 |
-
#
|
405 |
-
# Gradio interface with dynamic visibility
|
406 |
-
#
|
407 |
-
def toggle_inputs(choice):
|
408 |
-
"""
|
409 |
-
Return updates for (text_input, image_input) based on the radio selection.
|
410 |
-
"""
|
411 |
-
if choice == "Text":
|
412 |
-
return gr.update(visible=True), gr.update(visible=False)
|
413 |
-
else:
|
414 |
-
return gr.update(visible=False), gr.update(visible=True)
|
415 |
-
|
416 |
-
with gr.Blocks() as demo:
|
417 |
-
gr.Markdown("## SMiShing & Scam Detector with Google Safe Browsing + LLM")
|
418 |
-
|
419 |
-
with gr.Row():
|
420 |
-
input_type = gr.Radio(
|
421 |
-
choices=["Text", "Screenshot"],
|
422 |
-
value="Text",
|
423 |
-
label="Choose Input Type"
|
424 |
-
)
|
425 |
-
|
426 |
-
text_input = gr.Textbox(
|
427 |
-
lines=3,
|
428 |
-
label="Paste Suspicious SMS Text",
|
429 |
-
placeholder="Type or paste the message here...",
|
430 |
-
visible=True
|
431 |
-
)
|
432 |
-
|
433 |
-
image_input = gr.Image(
|
434 |
-
type="pil",
|
435 |
-
label="Upload Screenshot",
|
436 |
-
visible=False
|
437 |
-
)
|
438 |
-
|
439 |
-
input_type.change(
|
440 |
-
fn=toggle_inputs,
|
441 |
-
inputs=input_type,
|
442 |
-
outputs=[text_input, image_input],
|
443 |
-
queue=False
|
444 |
-
)
|
445 |
-
|
446 |
-
analyze_btn = gr.Button("Classify")
|
447 |
-
output_json = gr.JSON(label="Result")
|
448 |
-
|
449 |
-
analyze_btn.click(
|
450 |
-
fn=smishing_detector,
|
451 |
-
inputs=[input_type, text_input, image_input],
|
452 |
-
outputs=output_json
|
453 |
-
)
|
454 |
-
|
455 |
-
if __name__ == "__main__":
|
456 |
-
if not openai.api_key:
|
457 |
-
print("WARNING: OPENAI_API_KEY not set. LLM calls will fail or be skipped.")
|
458 |
-
if not SAFE_BROWSING_API_KEY:
|
459 |
-
print("WARNING: GOOGLE_SAFE_BROWSING_API_KEY not set. URL checks will be skipped.")
|
460 |
-
demo.launch()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
import requests
|
2 |
import json
|
3 |
+
import os
|
4 |
|
|
|
|
|
|
|
|
|
5 |
SAFE_BROWSING_API_KEY = os.getenv("GOOGLE_SAFE_BROWSING_API_KEY")
|
6 |
SAFE_BROWSING_URL = "https://safebrowsing.googleapis.com/v4/threatMatches:find"
|
7 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
8 |
def check_urls_with_google_safebrowsing(urls):
|
9 |
"""
|
10 |
+
Debug-enabled version:
|
11 |
+
- Prints payload and raw response to help troubleshoot Safe Browsing issues.
|
12 |
Returns a dict {url: bool is_malicious}.
|
|
|
13 |
"""
|
14 |
result = {}
|
15 |
if not SAFE_BROWSING_API_KEY:
|
16 |
+
print("No GOOGLE_SAFE_BROWSING_API_KEY found. Returning all URLs as safe.")
|
17 |
for u in urls:
|
18 |
result[u] = False
|
19 |
return result
|
|
|
39 |
}
|
40 |
}
|
41 |
|
42 |
+
print("---- Safe Browsing Debug ----")
|
43 |
+
print("REQUEST Payload (JSON):")
|
44 |
+
print(json.dumps(payload, indent=2))
|
45 |
+
print("REQUEST Endpoint:", SAFE_BROWSING_URL, "Key:", SAFE_BROWSING_API_KEY)
|
46 |
+
print("URLs being checked:", urls)
|
47 |
+
|
48 |
try:
|
49 |
resp = requests.post(
|
50 |
SAFE_BROWSING_URL,
|
|
|
52 |
json=payload,
|
53 |
timeout=10
|
54 |
)
|
55 |
+
|
56 |
+
print("RESPONSE Status Code:", resp.status_code)
|
57 |
+
try:
|
58 |
+
data = resp.json()
|
59 |
+
print("RESPONSE JSON:")
|
60 |
+
print(json.dumps(data, indent=2))
|
61 |
+
except Exception as parse_error:
|
62 |
+
print("Error parsing response as JSON:", parse_error)
|
63 |
+
data = {}
|
64 |
+
|
65 |
# If "matches" is present, some URL is flagged
|
|
|
66 |
malicious_urls = set()
|
67 |
if "matches" in data:
|
68 |
for match in data["matches"]:
|
|
|
79 |
for u in urls:
|
80 |
result[u] = False
|
81 |
|
82 |
+
print("RESULTS (url -> malicious):", result)
|
83 |
+
print("---- End Debug ----\n")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
84 |
|
85 |
+
return result
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|