Search_tool / app.py
wt002's picture
Update app.py (#5)
0f64069 verified
from smolagents import CodeAgent,DuckDuckGoSearchTool, HfApiModel,load_tool,tool
import datetime
import requests
import pytz
import yaml
from tools.final_answer import FinalAnswerTool
from Gradio_UI import GradioUI
#!pip install duckduckgo-search --upgrade
from duckduckgo_search import DDGS
# Custom search tool class
class CustomDuckDuckGoSearchTool:
def __call__(self, query: str, max_results: int = 5):
try:
with DDGS() as ddgs:
results = []
for r in ddgs.text(query):
results.append(r)
if len(results) >= max_results:
break
return results
except Exception as e:
return f"Search error: {str(e)}"
# Dummy placeholder for `visit_webpage` tool
class VisitWebpageTool:
def __call__(self, url: str):
return f"Pretending to visit: {url}"
# Final answer tool to format and return the final response
class FinalAnswerTool:
def __call__(self, results):
formatted_answer = "Final Answer:\n"
for result in results:
formatted_answer += f"- {str(result)}\n"
return formatted_answer
# Dummy model
class DummyModel:
def call(self, input_text):
return f"Model processing: {input_text}"
# Modified ToolCallingAgent to use FinalAnswerTool
class ToolCallingAgent:
def __init__(self, tools, model, final_answer_tool, max_steps=10):
self.tools = tools
self.model = model
self.final_answer_tool = final_answer_tool
self.max_steps = max_steps
def run(self, query):
print(f"Running agent with query: {query}")
tool_outputs = []
for tool in self.tools:
output = tool(query)
print("Tool output:", output)
tool_outputs.append(output)
# Use the final answer tool to format the collected outputs
final_result = self.final_answer_tool(tool_outputs)
print(final_result)
return final_result
# Initialize tools and model
model = DummyModel()
search_tool = CustomDuckDuckGoSearchTool()
visit_webpage = VisitWebpageTool()
final_answer = FinalAnswerTool()
# Initialize the agent
web_agent = ToolCallingAgent(
tools=[search_tool, visit_webpage],
model=model,
final_answer_tool=final_answer,
max_steps=10
)
# Example usage
web_agent.run("Latest AI tools")
final_answer = FinalAnswerTool()
# If the agent does not answer, the model is overloaded, please use another model or the following Hugging Face Endpoint that also contains qwen2.5 coder:
# model_id='https://pflgm2locj2t89co.us-east-1.aws.endpoints.huggingface.cloud'
model = HfApiModel(
max_tokens=2096,
temperature=0.5,
model_id='Qwen/Qwen2.5-Coder-32B-Instruct',# it is possible that this model may be overloaded
custom_role_conversions=None,
)
# Import tool from Hub
image_generation_tool = load_tool("agents-course/text-to-image", trust_remote_code=True)
with open("prompts.yaml", 'r') as stream:
prompt_templates = yaml.safe_load(stream)
agent = CodeAgent(
model=model,
tools=[final_answer], ## add your tools here (don't remove final answer)
max_steps=6,
verbosity_level=1,
grammar=None,
planning_interval=None,
name=None,
description=None,
prompt_templates=prompt_templates
)
GradioUI(agent).launch()