Spaces:
Runtime error
Runtime error
File size: 4,377 Bytes
9b5b26a c19d193 6aae614 8fe992b 9b5b26a f64d837 f890ae9 f64d837 f890ae9 f64d837 f890ae9 f64d837 f890ae9 f64d837 f890ae9 f64d837 f890ae9 f64d837 f890ae9 8c01ffb ae7a494 e121372 bf6d34c 29ec968 fe328e0 13d500a 8c01ffb 9b5b26a 8c01ffb 861422e 9b5b26a 8c01ffb 8fe992b d2f6a24 8c01ffb 861422e 8fe992b 9b5b26a 8c01ffb |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 |
from smolagents import CodeAgent,DuckDuckGoSearchTool, HfApiModel,load_tool,tool
import datetime
import requests
import pytz
import yaml
from tools.final_answer import FinalAnswerTool
from Gradio_UI import GradioUI
import requests
import requests
from duckduckgo_search import DDGS
# ----- WeatherTool -----
class WeatherTool:
def __init__(self, api_key: str):
self.api_key = api_key
def __call__(self, city: str):
try:
url = f"https://api.openweathermap.org/data/2.5/weather?q={city}&appid={self.api_key}&units=metric"
response = requests.get(url)
data = response.json()
if response.status_code != 200:
return f"Error: {data.get('message', 'Unable to fetch weather data')}"
weather = data["weather"][0]["description"]
temp = data["main"]["temp"]
feels_like = data["main"]["feels_like"]
humidity = data["main"]["humidity"]
wind_speed = data["wind"]["speed"]
return (f"Weather in {city.title()}:\n"
f"- Condition: {weather}\n"
f"- Temperature: {temp}°C (Feels like {feels_like}°C)\n"
f"- Humidity: {humidity}%\n"
f"- Wind Speed: {wind_speed} m/s")
except Exception as e:
return f"Exception during weather fetch: {str(e)}"
# ----- VisitWebpageTool -----
class VisitWebpageTool:
def __call__(self, url: str):
return f"Pretending to visit webpage: {url}"
# ----- CustomDuckDuckGoSearchTool -----
class CustomDuckDuckGoSearchTool:
def __call__(self, query: str, max_results: int = 3):
try:
with DDGS() as ddgs:
results = []
for r in ddgs.text(query):
results.append(r)
if len(results) >= max_results:
break
return results
except Exception as e:
return f"Search error: {str(e)}"
# ----- FinalAnswerTool -----
class FinalAnswerTool:
def __call__(self, result: str):
return f"\n Final Answer:\n{result}"
# ----- DummyModel (if needed) -----
class DummyModel:
def call(self, input_text):
return f"Model processed: {input_text}"
# ----- ToolCallingAgent -----
class ToolCallingAgent:
def __init__(self, tools, final_tool, model, max_steps=10):
self.tools = tools
self.final_tool = final_tool
self.model = model
self.max_steps = max_steps
def run(self, input_text):
print(f"\nAgent received: {input_text}")
for tool in self.tools:
try:
result = tool(input_text)
if result:
final_output = self.final_tool(str(result))
print(final_output)
return final_output
except Exception as e:
print(f"\n[{tool.__class__.__name__} Error]: {str(e)}")
# Initialize tools
weather_tool = WeatherTool(api_key="HF_Token")
visit_webpage = VisitWebpageTool()
duckduckgo_tool = CustomDuckDuckGoSearchTool()
final_answer = FinalAnswerTool()
model = DummyModel()
# Initialize and run the agent
web_agent = ToolCallingAgent(
tools=[duckduckgo_tool, visit_webpage, weather_tool],
final_tool=final_answer,
model=model,
max_steps=10
)
# Example query
web_agent.run("Tokyo") # Can try a city or any other search term
# If the agent does not answer, the model is overloaded, please use another model or the following Hugging Face Endpoint that also contains qwen2.5 coder:
# model_id='https://pflgm2locj2t89co.us-east-1.aws.endpoints.huggingface.cloud'
model = HfApiModel(
max_tokens=2096,
temperature=0.5,
model_id='Qwen/Qwen2.5-Coder-32B-Instruct',# it is possible that this model may be overloaded
custom_role_conversions=None,
)
# Import tool from Hub
image_generation_tool = load_tool("agents-course/text-to-image", trust_remote_code=True)
with open("prompts.yaml", 'r') as stream:
prompt_templates = yaml.safe_load(stream)
agent = CodeAgent(
model=model,
tools=[final_answer], ## add your tools here (don't remove final answer)
max_steps=6,
verbosity_level=1,
grammar=None,
planning_interval=None,
name=None,
description=None,
prompt_templates=prompt_templates
)
GradioUI(agent).launch() |