File size: 1,332 Bytes
590afda f1a8d62 590afda f35e5d4 590afda f35e5d4 590afda f35e5d4 590afda f1a8d62 590afda f1a8d62 590afda f1a8d62 590afda f1a8d62 f35e5d4 590afda f35e5d4 590afda f35e5d4 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 |
from fastapi import FastAPI, Request
from fastapi.responses import HTMLResponse
from fastapi.templating import Jinja2Templates
from ollama import chat
import subprocess
# Initialize FastAPI app
app = FastAPI()
# Set up template directory
templates = Jinja2Templates(directory="templates")
# Pull the model if it's not already downloaded
def pull_model():
try:
subprocess.run(["ollama", "pull", "mohamedo/bignova"], check=True)
print("Model pulled successfully!")
except subprocess.CalledProcessError:
print("Error pulling model.")
pull_model()
# Root route to display the chat UI
@app.get("/", response_class=HTMLResponse)
async def home(request: Request):
return templates.TemplateResponse("index.html", {"request": request})
# Route to interact with Ollama's chatbot
@app.get("/chat/{message}")
async def chat_with_ai(message: str):
"""
Endpoint to interact with the AI model.
It accepts a message from the user and returns the model's response.
"""
# Send a message to the model and get the response
response = chat(
model="mohamedo/bignova", # Specify the model to use
messages=[{
'role': 'user',
'content': message
}]
)
# Return the AI's response
return {"response": response['message']['content']}
|