Spaces:
Sleeping
Sleeping
hackerbyhobby
commited on
app
Browse files- app.py +88 -43
- trained_model.pkl +3 -0
app.py
CHANGED
@@ -1,51 +1,96 @@
|
|
|
|
|
|
|
|
|
|
1 |
import gradio as gr
|
2 |
import joblib
|
3 |
-
|
4 |
|
5 |
-
|
6 |
-
model = joblib.load("tuned_model.pkl")
|
7 |
|
8 |
-
|
9 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
10 |
|
11 |
-
|
12 |
-
|
13 |
-
|
14 |
-
|
15 |
-
|
16 |
-
|
17 |
-
|
18 |
-
|
19 |
-
|
20 |
-
|
21 |
-
|
22 |
-
|
23 |
-
|
24 |
-
|
25 |
-
|
26 |
-
# Return prediction, probability, and user inputs
|
27 |
-
return prediction, round(probability, 4), input_data
|
28 |
-
except Exception as e:
|
29 |
-
return "Error", 0, {"error": str(e)}
|
30 |
-
|
31 |
-
# Gradio Interface
|
32 |
-
inputs = [gr.Textbox(label=feature, placeholder=f"Enter value for {feature}") for feature in features]
|
33 |
-
|
34 |
-
interface = gr.Interface(
|
35 |
-
fn=predict_heart_failure,
|
36 |
-
inputs=inputs,
|
37 |
-
outputs=[
|
38 |
-
gr.Text(label="Prediction"),
|
39 |
-
gr.Number(label="Risk Probability"),
|
40 |
-
gr.JSON(label="User Inputs")
|
41 |
-
],
|
42 |
-
title="Heart Failure Prediction Model",
|
43 |
-
description=(
|
44 |
-
"Predicts the likelihood of heart failure based on health features. "
|
45 |
-
"Enter the values for the features below and receive the prediction."
|
46 |
)
|
47 |
-
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
48 |
|
49 |
-
#
|
50 |
if __name__ == "__main__":
|
51 |
-
interface
|
|
|
|
1 |
+
"""
|
2 |
+
Webapp Front End
|
3 |
+
"""
|
4 |
+
|
5 |
import gradio as gr
|
6 |
import joblib
|
7 |
+
from data.clean_data import fetch_check
|
8 |
|
9 |
+
from data.value_maps import category_maps, binary_maps
|
|
|
10 |
|
11 |
+
MODEL_PATH = "Random_Foresttest_model.pkl"
|
12 |
+
DEFAULT_VALUE = 99
|
13 |
+
try:
|
14 |
+
rf_model = joblib.load(MODEL_PATH)
|
15 |
+
joblib.dump(rf_model, MODEL_PATH)
|
16 |
+
except FileNotFoundError as e:
|
17 |
+
raise FileNotFoundError(
|
18 |
+
f"Model file not found at {MODEL_PATH}. Please check the path."
|
19 |
+
) from e
|
20 |
|
21 |
+
og_df = fetch_check(to_fetch=True, to_fillna=True, to_dropna=True)
|
22 |
+
|
23 |
+
binary_inputs = {
|
24 |
+
feature: gr.Radio(
|
25 |
+
choices=list(mapping.keys()),
|
26 |
+
label=feature.replace("_", " "),
|
27 |
+
)
|
28 |
+
for feature, mapping in binary_maps.items()
|
29 |
+
if mapping
|
30 |
+
}
|
31 |
+
|
32 |
+
categorical_inputs = {
|
33 |
+
feature: gr.Dropdown(
|
34 |
+
choices=list(mapping.keys()),
|
35 |
+
label=feature.replace("_", " "),
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
36 |
)
|
37 |
+
for feature, mapping in category_maps.items()
|
38 |
+
if mapping
|
39 |
+
}
|
40 |
+
|
41 |
+
input_types = list(categorical_inputs.values()) + list(binary_inputs.values())
|
42 |
+
|
43 |
+
for i in categorical_inputs:
|
44 |
+
print(f"input_types: {i}")
|
45 |
+
for i in binary_inputs:
|
46 |
+
print(f"input_types: {i}")
|
47 |
+
for i in input_types:
|
48 |
+
print(f"input_types: {i}")
|
49 |
+
|
50 |
+
|
51 |
+
def predict_outcome(*user_inputs):
|
52 |
+
"""
|
53 |
+
Converts user inputs into model-friendly format, runs the prediction,
|
54 |
+
and returns the result.
|
55 |
+
"""
|
56 |
+
# Use maps to set expected features
|
57 |
+
expected_features = list(categorical_inputs.keys()) + list(binary_inputs.keys())
|
58 |
+
|
59 |
+
input_data = dict(zip(expected_features, user_inputs))
|
60 |
+
|
61 |
+
# Ensure all required features are present and that the numerical values are used for the model
|
62 |
+
input_data = {}
|
63 |
+
for feature, user_input in zip(expected_features, user_inputs):
|
64 |
+
if feature in binary_maps:
|
65 |
+
# Convert 'Yes'/'No' to 1/0
|
66 |
+
input_data[feature] = binary_maps[feature].get(user_input, DEFAULT_VALUE)
|
67 |
+
elif feature in category_maps:
|
68 |
+
# Convert categorical values
|
69 |
+
input_data[feature] = category_maps[feature].get(user_input, DEFAULT_VALUE)
|
70 |
+
else:
|
71 |
+
# Default value for unexpected inputs
|
72 |
+
input_data[feature] = DEFAULT_VALUE
|
73 |
+
|
74 |
+
# Create a DataFrame for prediction
|
75 |
+
input_df = pd.DataFrame([input_data])[expected_features]
|
76 |
+
|
77 |
+
# Perform prediction
|
78 |
+
try:
|
79 |
+
prediction = rf_model.predict(input_df)[0]
|
80 |
+
return "High Risk" if prediction == 1 else "Low Risk"
|
81 |
+
except ValueError as e:
|
82 |
+
raise ValueError(f"Error during prediction: {e}") from e
|
83 |
+
|
84 |
+
|
85 |
+
def build_interface():
|
86 |
+
"""
|
87 |
+
Constructs the Gradio interface dynamically based on the dataset.
|
88 |
+
"""
|
89 |
+
outputs = gr.Label(label="Prediction")
|
90 |
+
return gr.Interface(fn=predict_outcome, inputs=input_types, outputs=outputs)
|
91 |
+
|
92 |
|
93 |
+
# Run the app
|
94 |
if __name__ == "__main__":
|
95 |
+
interface = build_interface()
|
96 |
+
interface.launch()
|
trained_model.pkl
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:aa5ce972c499796eb8c3a61154ebdcf0373d13d4eb48d41db47d3e0cbb4e7d96
|
3 |
+
size 7804281
|