import gradio as gr import numpy as np import joblib # Load the model rf_model = joblib.load("rf_model.pkl") features = { "CD4": [0, 500, 1], "AST/ALT": [0, 100, 1], "ALT": [0, 2000, 1], "Hb": [1, 150, 1], "CRP": [1, 500, 1], "ALB": [10, 50, 1], "POAL": [0, 1, 1], "ALC": [0, 5, 1], "Age (years)": [12, 100, 1], "WBC": [0, 20, 1], "PLT": [1, 800, 1], "AST": [0, 2000, 1], } # Define the inference function def predict(*args): try: # Convert input values to numpy array input_values = [float(arg) for arg in args] # Reshape to (1, n_features) input_array = np.array(input_values).reshape(1, -1) # Use the model for inference prediction_proba = rf_model.predict_proba(input_array) prediction = rf_model.predict(input_array) # Get the confidence of the prediction being class 1 (probability) confidence = prediction_proba[0][1] # Return the result if prediction[0] == 1: return f"Prediction: 1\nConfidence: {confidence:.2f}" else: return f"Prediction: 0\nConfidence: {confidence:.2f}" except Exception as e: return f"Inference error: {str(e)}" # Create Gradio interface inputs = [ gr.Number(value=v[0], label=k, minimum=v[0], maximum=v[1], step=v[2]) for k, v in features.items() ] # Dynamically generate input components outputs = gr.Textbox(label="Inference Result") # Output component # create queue interface = gr.Interface( fn=predict, # Inference function inputs=inputs, # Dynamically generated input components outputs=outputs, # Output component title="Random Forest Model Inference", # Interface title description="Input feature values to get the model's inference result.", # Interface description live=False, # Whether to update in real-time (set to False, requires button click) flagging_mode="never", ) # Enable queue interface.queue() # Launch the app interface.launch(share=True)