import gradio as gr from huggingface_hub import InferenceClient import os # Get the API token from environment variables api_token = os.getenv("HUGGINGFACE_API_TOKEN") # Initialize the Inference Client for your model client = InferenceClient( model="SerdarHelli/Segmentation-of-Teeth-in-Panoramic-X-ray-Image-Using-U-Net", token=api_token ) def predict(image): """ Process the uploaded image and return the segmentation result. Args: image: PIL Image object from Gradio input Returns: The segmentation result (assumed to be an image) or an error message """ try: # TODO: Add any necessary preprocessing here (e.g., resizing, normalization) # Send the image to the model via the Inference API result = client.post(data={"inputs": image}) # TODO: Add any necessary postprocessing here (e.g., converting to image, overlaying on original) # For now, assuming the result is directly the segmentation image return result except Exception as e: return f"Error: {str(e)}" # Create the Gradio interface iface = gr.Interface( fn=predict, inputs=gr.Image(type="pil", label="Upload Panoramic X-ray Image"), outputs=gr.Image(type="pil", label="Segmentation Result"), title="Teeth Segmentation in Panoramic X-rays", description="Upload an X-ray image to see the segmented teeth." ) # Launch the interface iface.launch()