Spaces:
Running
Running
File size: 4,092 Bytes
745a108 eb4e867 745a108 eb4e867 745a108 8313f49 6fd685c 8313f49 745a108 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 |
import os
import gradio as gr
from gradio import ChatMessage
import requests
from typing import Dict, List
from langchain_core.messages import HumanMessage
from langchain_core.tools import tool
from langchain_openai import ChatOpenAI
from langgraph.checkpoint.memory import MemorySaver
from langgraph.prebuilt import create_react_agent
# Weather and location tools
@tool
def get_lat_lng(location_description: str) -> dict[str, float]:
"""Get the latitude and longitude of a location."""
return {"lat": 51.1, "lng": -0.1} # London coordinates as dummy response
@tool
def get_weather(lat: float, lng: float) -> dict[str, str]:
"""Get the weather at a location."""
return {"temperature": "21°C", "description": "Sunny"} # Dummy response
def stream_from_agent(message: str, history: List[Dict[str, str]]) -> gr.ChatMessage:
"""Process messages through the LangChain agent with visible reasoning."""
# Initialize the agent
llm = ChatOpenAI(temperature=0, model="gpt-4")
memory = MemorySaver()
tools = [get_lat_lng, get_weather]
agent_executor = create_react_agent(llm, tools, checkpointer=memory)
# Add message to history
past_messages = [HumanMessage(content=message)]
for h in history:
if h["role"] == "user":
past_messages.append(HumanMessage(content=h["content"]))
messages_to_display = []
final_response = None
for chunk in agent_executor.stream(
{"messages": past_messages},
config={"configurable": {"thread_id": "abc123"}}
):
# Handle agent's actions and tool usage
if chunk.get("agent"):
for msg in chunk["agent"]["messages"]:
if msg.content:
final_response = msg.content
# Handle tool calls
for tool_call in msg.tool_calls:
tool_message = ChatMessage(
content=f"Parameters: {tool_call['args']}",
metadata={
"title": f"🛠️ Using {tool_call['name']}",
"id": tool_call["id"],
"status": "pending",
}
)
messages_to_display.append(tool_message)
yield messages_to_display
tool_message.metadata["status"] = "done"
# Handle tool responses
if chunk.get("tools"):
for tool_response in chunk["tools"]["messages"]:
# Find the corresponding tool message
for msg in messages_to_display:
if msg.metadata.get("id") == tool_response.tool_call_id:
msg.content += f"\nResult: {tool_response.content}"
yield messages_to_display
# Add the final response as a regular message
if final_response:
messages_to_display.append(ChatMessage(content=final_response))
yield messages_to_display
# Create the Gradio interface
demo = gr.ChatInterface(
fn=stream_from_agent,
type="messages",
title="🌤️ Weather Assistant",
description="Ask about the weather anywhere! Watch as I gather the information step by step.",
examples=[
"What's the weather like in Tokyo?",
"Is it sunny in Paris right now?",
"Should I bring an umbrella in New York today?"
],
example_icons=["https://cdn3.iconfinder.com/data/icons/landmark-outline/432/japan_tower_tokyo_landmark_travel_architecture_tourism_view-256.png",
"https://cdn2.iconfinder.com/data/icons/city-building-1/200/ArcdeTriomphe-256.png",
"https://cdn2.iconfinder.com/data/icons/city-icons-for-offscreen-magazine/80/new-york-256.png"
],
save_history=True,
editable=True
)
if __name__ == "__main__":
# Load environment variables
try:
from dotenv import load_dotenv
load_dotenv()
except ImportError:
pass
demo.launch(debug=True) |