FluentQ / app /agent.py
tommytracx's picture
Update app/agent.py
1e14c01 verified
from models.local_llm import run_llm
import logging
# Configure logging
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
logger = logging.getLogger(__name__)
# Global conversation memory (in a real app, this should be session-based)
conversation_memory = []
def process_text(input_text: str) -> str:
"""
Process user input through LLM with conversation memory.
Args:
input_text: User input text
Returns:
Generated response
"""
# Add user input to memory
conversation_memory.append({"user": input_text})
# Create context from memory (last 5 exchanges for brevity)
recent_memory = conversation_memory[-10:] if len(conversation_memory) > 10 else conversation_memory
context = "\n".join([
f"User: {m['user']}" if 'user' in m else f"Assistant: {m['assistant']}"
for m in recent_memory
])
# Create prompt with context
prompt = f"""You are a telecom AI assistant. You help users with questions about telecommunications,
networks, phones, and related technologies. Be concise and helpful.
Conversation history:
{context}
Please provide your next response:"""
# Get response from LLM
logger.info(f"Sending prompt to LLM with context length: {len(context)}")
response = run_llm(prompt)
# Add response to memory
conversation_memory.append({"assistant": response})
return response
def clear_memory():
"""Clear the conversation memory."""
global conversation_memory
conversation_memory = []
return True