Spaces:
Paused
Paused
File size: 1,614 Bytes
42c727a 1e14c01 42c727a 1e14c01 42c727a 1e14c01 42c727a 1e14c01 42c727a 1e14c01 42c727a 1e14c01 42c727a 1e14c01 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 |
from models.local_llm import run_llm
import logging
# Configure logging
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
logger = logging.getLogger(__name__)
# Global conversation memory (in a real app, this should be session-based)
conversation_memory = []
def process_text(input_text: str) -> str:
"""
Process user input through LLM with conversation memory.
Args:
input_text: User input text
Returns:
Generated response
"""
# Add user input to memory
conversation_memory.append({"user": input_text})
# Create context from memory (last 5 exchanges for brevity)
recent_memory = conversation_memory[-10:] if len(conversation_memory) > 10 else conversation_memory
context = "\n".join([
f"User: {m['user']}" if 'user' in m else f"Assistant: {m['assistant']}"
for m in recent_memory
])
# Create prompt with context
prompt = f"""You are a telecom AI assistant. You help users with questions about telecommunications,
networks, phones, and related technologies. Be concise and helpful.
Conversation history:
{context}
Please provide your next response:"""
# Get response from LLM
logger.info(f"Sending prompt to LLM with context length: {len(context)}")
response = run_llm(prompt)
# Add response to memory
conversation_memory.append({"assistant": response})
return response
def clear_memory():
"""Clear the conversation memory."""
global conversation_memory
conversation_memory = []
return True |