Update app.py
Browse files
app.py
CHANGED
@@ -7,41 +7,33 @@ import requests
|
|
7 |
HUGGINGFACE_API_KEY = "your_huggingface_api_key" # Replace with your API key from https://huggingface.co/settings/tokens
|
8 |
HEADERS = {"Authorization": f"Bearer {HUGGINGFACE_API_KEY}"}
|
9 |
|
10 |
-
# Hugging Face Model Endpoints (
|
11 |
-
CLASSIFIER_API_URL = "https://api-inference.huggingface.co/models/
|
12 |
-
GENERATOR_API_URL = "https://api-inference.huggingface.co/models/
|
13 |
|
14 |
# -----------------------------------------------------------
|
15 |
# FUNCTION DEFINITIONS
|
16 |
# -----------------------------------------------------------
|
17 |
def detect_harmful_content(text):
|
18 |
-
"""
|
19 |
-
Detect harmful content in the input text using Hugging Face API.
|
20 |
-
"""
|
21 |
payload = {"inputs": text}
|
22 |
response = requests.post(CLASSIFIER_API_URL, headers=HEADERS, json=payload)
|
23 |
-
|
24 |
if response.status_code != 200:
|
25 |
return [{"category": "Error", "score": 0, "message": "Failed to fetch response"}]
|
26 |
|
27 |
results = response.json()
|
28 |
-
|
29 |
-
if not isinstance(results, list): # Handle unexpected response format
|
30 |
-
return [{"category": "Error", "score": 0, "message": "Invalid response format"}]
|
31 |
-
|
32 |
detected = []
|
33 |
-
threshold = 0.
|
34 |
for result in results:
|
35 |
-
if
|
36 |
-
|
37 |
-
detected.append({"category": result['label'], "score": result['score']})
|
38 |
|
39 |
return detected
|
40 |
|
41 |
def generate_mitigation_response(text, detected_categories):
|
42 |
-
"""
|
43 |
-
Generate a moderation response based on detected harmful categories.
|
44 |
-
"""
|
45 |
if not detected_categories:
|
46 |
return "β
Content appears safe. No harmful content detected."
|
47 |
|
@@ -54,23 +46,21 @@ def generate_mitigation_response(text, detected_categories):
|
|
54 |
response = requests.post(GENERATOR_API_URL, headers=HEADERS, json=payload)
|
55 |
|
56 |
if response.status_code != 200:
|
57 |
-
return "β οΈ Error: Could not generate a response.
|
58 |
|
59 |
generated = response.json()
|
60 |
-
|
61 |
-
# Ensure response contains the generated text
|
62 |
-
if isinstance(generated, list) and len(generated) > 0 and 'generated_text' in generated[0]:
|
63 |
-
return generated[0]['generated_text']
|
64 |
-
else:
|
65 |
-
return "β οΈ No valid response generated. Please try again."
|
66 |
|
67 |
# -----------------------------------------------------------
|
68 |
# STREAMLIT USER INTERFACE
|
69 |
# -----------------------------------------------------------
|
|
|
|
|
70 |
st.title("π AI-Powered Hate Speech Detection & Mitigation")
|
71 |
-
st.markdown("Detects hate speech, misinformation, and cyberbullying in social media posts.")
|
72 |
|
73 |
-
|
|
|
74 |
|
75 |
if st.button("Analyze"):
|
76 |
if user_input.strip() == "":
|
@@ -78,7 +68,7 @@ if st.button("Analyze"):
|
|
78 |
else:
|
79 |
st.markdown("### π Analysis Results")
|
80 |
detected = detect_harmful_content(user_input)
|
81 |
-
|
82 |
if detected and detected[0].get("category") != "Error":
|
83 |
for d in detected:
|
84 |
st.write(f"**Category:** {d['category']} | **Confidence:** {d['score']:.2f}")
|
@@ -89,3 +79,4 @@ if st.button("Analyze"):
|
|
89 |
mitigation_response = generate_mitigation_response(user_input, detected)
|
90 |
st.write(mitigation_response)
|
91 |
|
|
|
|
7 |
HUGGINGFACE_API_KEY = "your_huggingface_api_key" # Replace with your API key from https://huggingface.co/settings/tokens
|
8 |
HEADERS = {"Authorization": f"Bearer {HUGGINGFACE_API_KEY}"}
|
9 |
|
10 |
+
# Hugging Face Model Endpoints (Using Pretrained Models)
|
11 |
+
CLASSIFIER_API_URL = "https://api-inference.huggingface.co/models/Hate-speech-CNERG/bert-base-uncased-hatexplain"
|
12 |
+
GENERATOR_API_URL = "https://api-inference.huggingface.co/models/KAXY/GPT2-against-hate"
|
13 |
|
14 |
# -----------------------------------------------------------
|
15 |
# FUNCTION DEFINITIONS
|
16 |
# -----------------------------------------------------------
|
17 |
def detect_harmful_content(text):
|
18 |
+
"""Detects harmful content using a Hugging Face model."""
|
|
|
|
|
19 |
payload = {"inputs": text}
|
20 |
response = requests.post(CLASSIFIER_API_URL, headers=HEADERS, json=payload)
|
21 |
+
|
22 |
if response.status_code != 200:
|
23 |
return [{"category": "Error", "score": 0, "message": "Failed to fetch response"}]
|
24 |
|
25 |
results = response.json()
|
26 |
+
|
|
|
|
|
|
|
27 |
detected = []
|
28 |
+
threshold = 0.5 # Adjust threshold for sensitivity
|
29 |
for result in results:
|
30 |
+
if result.get('score', 0) >= threshold:
|
31 |
+
detected.append({"category": result.get('label', 'Unknown'), "score": result.get('score', 0)})
|
|
|
32 |
|
33 |
return detected
|
34 |
|
35 |
def generate_mitigation_response(text, detected_categories):
|
36 |
+
"""Generates a mitigation response based on detected harmful speech."""
|
|
|
|
|
37 |
if not detected_categories:
|
38 |
return "β
Content appears safe. No harmful content detected."
|
39 |
|
|
|
46 |
response = requests.post(GENERATOR_API_URL, headers=HEADERS, json=payload)
|
47 |
|
48 |
if response.status_code != 200:
|
49 |
+
return "β οΈ Error: Could not generate a response."
|
50 |
|
51 |
generated = response.json()
|
52 |
+
return generated[0].get('generated_text', "No response generated.")
|
|
|
|
|
|
|
|
|
|
|
53 |
|
54 |
# -----------------------------------------------------------
|
55 |
# STREAMLIT USER INTERFACE
|
56 |
# -----------------------------------------------------------
|
57 |
+
st.set_page_config(page_title="Hate Speech Detector", layout="centered")
|
58 |
+
|
59 |
st.title("π AI-Powered Hate Speech Detection & Mitigation")
|
60 |
+
st.markdown("Detects **hate speech, misinformation, and cyberbullying** in social media posts.")
|
61 |
|
62 |
+
# User Input
|
63 |
+
user_input = st.text_area("βοΈ Enter the text to analyze:", height=150)
|
64 |
|
65 |
if st.button("Analyze"):
|
66 |
if user_input.strip() == "":
|
|
|
68 |
else:
|
69 |
st.markdown("### π Analysis Results")
|
70 |
detected = detect_harmful_content(user_input)
|
71 |
+
|
72 |
if detected and detected[0].get("category") != "Error":
|
73 |
for d in detected:
|
74 |
st.write(f"**Category:** {d['category']} | **Confidence:** {d['score']:.2f}")
|
|
|
79 |
mitigation_response = generate_mitigation_response(user_input, detected)
|
80 |
st.write(mitigation_response)
|
81 |
|
82 |
+
|