tommytracx commited on
Commit
1e14c01
·
verified ·
1 Parent(s): 46f013b

Update app/agent.py

Browse files
Files changed (1) hide show
  1. app/agent.py +44 -2
app/agent.py CHANGED
@@ -1,11 +1,53 @@
1
  from models.local_llm import run_llm
 
2
 
 
 
 
 
 
3
  conversation_memory = []
4
 
5
  def process_text(input_text: str) -> str:
 
 
 
 
 
 
 
 
 
 
6
  conversation_memory.append({"user": input_text})
7
- context = "\n".join([f"User: {m['user']}" for m in conversation_memory])
8
- prompt = f"You are a telecom AI assistant. Context:\n{context}\nRespond:"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
9
  response = run_llm(prompt)
 
 
10
  conversation_memory.append({"assistant": response})
 
11
  return response
 
 
 
 
 
 
 
1
  from models.local_llm import run_llm
2
+ import logging
3
 
4
+ # Configure logging
5
+ logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
6
+ logger = logging.getLogger(__name__)
7
+
8
+ # Global conversation memory (in a real app, this should be session-based)
9
  conversation_memory = []
10
 
11
  def process_text(input_text: str) -> str:
12
+ """
13
+ Process user input through LLM with conversation memory.
14
+
15
+ Args:
16
+ input_text: User input text
17
+
18
+ Returns:
19
+ Generated response
20
+ """
21
+ # Add user input to memory
22
  conversation_memory.append({"user": input_text})
23
+
24
+ # Create context from memory (last 5 exchanges for brevity)
25
+ recent_memory = conversation_memory[-10:] if len(conversation_memory) > 10 else conversation_memory
26
+ context = "\n".join([
27
+ f"User: {m['user']}" if 'user' in m else f"Assistant: {m['assistant']}"
28
+ for m in recent_memory
29
+ ])
30
+
31
+ # Create prompt with context
32
+ prompt = f"""You are a telecom AI assistant. You help users with questions about telecommunications,
33
+ networks, phones, and related technologies. Be concise and helpful.
34
+
35
+ Conversation history:
36
+ {context}
37
+
38
+ Please provide your next response:"""
39
+
40
+ # Get response from LLM
41
+ logger.info(f"Sending prompt to LLM with context length: {len(context)}")
42
  response = run_llm(prompt)
43
+
44
+ # Add response to memory
45
  conversation_memory.append({"assistant": response})
46
+
47
  return response
48
+
49
+ def clear_memory():
50
+ """Clear the conversation memory."""
51
+ global conversation_memory
52
+ conversation_memory = []
53
+ return True