|
from fastapi import FastAPI, Request |
|
from fastapi.responses import HTMLResponse |
|
from fastapi.templating import Jinja2Templates |
|
from ollama import chat |
|
import subprocess |
|
|
|
|
|
app = FastAPI() |
|
|
|
|
|
templates = Jinja2Templates(directory="templates") |
|
|
|
|
|
def pull_model(): |
|
try: |
|
subprocess.run(["ollama", "pull", "mohamedo/bignova"], check=True) |
|
print("Model pulled successfully!") |
|
except subprocess.CalledProcessError: |
|
print("Error pulling model.") |
|
|
|
pull_model() |
|
|
|
|
|
@app.get("/", response_class=HTMLResponse) |
|
async def home(request: Request): |
|
return templates.TemplateResponse("index.html", {"request": request}) |
|
|
|
|
|
@app.get("/chat/{message}") |
|
async def chat_with_ai(message: str): |
|
""" |
|
Endpoint to interact with the AI model. |
|
It accepts a message from the user and returns the model's response. |
|
""" |
|
|
|
response = chat( |
|
model="mohamedo/bignova", |
|
messages=[{ |
|
'role': 'user', |
|
'content': message |
|
}] |
|
) |
|
|
|
|
|
return {"response": response['message']['content']} |
|
|