Spaces:
Running
Running
File size: 1,495 Bytes
18869bb |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 |
import uvicorn
from fastapi import FastAPI
from fastapi.middleware.cors import CORSMiddleware
import gradio as gr
from config import get_settings
from models.text_classification import TextClassificationModel
from api.models import router as models_router, registry
app = FastAPI(
title=get_settings().app_name,
description="API for managing and running ML models",
version="1.0.0",
docs_url="/docs",
redoc_url="/redoc",
)
# Configure CORS
app.add_middleware(
CORSMiddleware,
allow_origins=["*"], # Modify this in production
allow_credentials=True,
allow_methods=["*"],
allow_headers=["*"],
)
# Register models
text_classifier = TextClassificationModel()
registry.register_model(
"text-classification",
text_classifier,
"/gradio/text-classification"
)
# Mount the models API router
app.include_router(
models_router,
prefix="/api/models",
tags=["models"]
)
# Mount Gradio interface
app = gr.mount_gradio_app(
app,
text_classifier.create_interface(),
path="/gradio/text-classification"
)
@app.get("/")
async def root():
"""Root endpoint returning basic API information."""
return {
"name": get_settings().app_name,
"version": "1.0.0",
"status": "running"
}
if __name__ == "__main__":
# Initialize settings
settings = get_settings()
uvicorn.run(
"main:app",
host=settings.host,
port=settings.port,
reload=settings.debug
) |