First_Agent / app.py
daviddwlee84's picture
Switch default model of Space as Hugging Face Discord recommend
5153255
from smolagents import (
CodeAgent,
HfApiModel,
TransformersModel,
OpenAIServerModel,
load_tool,
tool,
)
import os
import datetime
import pytz
import yaml
from tools import (
FinalAnswerTool,
VisitWebpageTool,
DuckDuckGoSearchTool, # This is also built-in in smolagents
)
# Below is an example of a tool that does nothing. Amaze us with your creativity !
@tool
def my_custom_tool(
arg1: str, arg2: int
) -> str: # it's import to specify the return type
# Keep this format for the description / args / args description but feel free to modify the tool
"""A tool that does nothing yet
Args:
arg1: the first argument
arg2: the second argument
"""
return "What magic will you build ?"
@tool
def get_current_time_in_timezone(timezone: str) -> str:
"""A tool that fetches the current local time in a specified timezone.
Args:
timezone: A string representing a valid timezone (e.g., 'America/New_York').
"""
try:
# Create timezone object
tz = pytz.timezone(timezone)
# Get current time in that timezone
local_time = datetime.datetime.now(tz).strftime("%Y-%m-%d %H:%M:%S")
return f"The current local time in {timezone} is: {local_time}"
except Exception as e:
return f"Error fetching time for timezone '{timezone}': {str(e)}"
final_answer = FinalAnswerTool()
if IS_IN_HF_SPACE := os.getenv("SPACE_ID"):
# model_id = (
# # BUG: Seems we are failed to call this model => bad request
# "https://wxknx1kg971u7k1n.us-east-1.aws.endpoints.huggingface.cloud" # it is possible that this model may be overloaded
# if True
# # BUG: Model not loaded on the server: https://api-inference.huggingface.co/models/Qwen/Qwen2.5-Coder-32B-Instruct/v1/chat/completions. Please retry with a higher timeout (current: 120)
# # BUG: TooManyRequests: Please log in or use a HF access token
# else "Qwen/Qwen2.5-Coder-32B-Instruct" # The default value of HfApiModel
# )
# https://discord.com/channels/879548962464493619/1336751588391391283/1338901490651103414
model_id = "deepseek-ai/DeepSeek-R1-Distill-Qwen-32B"
print("Using HfApiModel with model_id:", model_id)
model = HfApiModel(
max_tokens=2096 if IS_IN_HF_SPACE else None,
temperature=0.5,
model_id=model_id,
custom_role_conversions=None,
)
else:
from dotenv import load_dotenv
# NOTE: to load the Hugging Face API Key
curr_dir = os.path.dirname(os.path.abspath(__file__))
load_dotenv(os.path.join(curr_dir, "../../.env"))
if OPENAI_API_KEY := os.getenv("OPENAI_API_KEY"):
print("Using OpenAIServerModel with model_id: gpt-3.5-turbo")
model = OpenAIServerModel(model_id="gpt-3.5-turbo", api_key=OPENAI_API_KEY)
else:
# NOTE: this model is not good enough for agent (and still might be too heavy to run on a normal computer)
# print(
# "Using TransformersModel with model_id: HuggingFaceTB/SmolLM2-1.7B-Instruct"
# )
#
# model = TransformersModel(
# model_id="HuggingFaceTB/SmolLM2-1.7B-Instruct", trust_remote_code=True
# )
# NOTE: this model is well enough to use simple tools (but need authentication)
# https://huggingface.co./meta-llama/Llama-3.2-11B-Vision-Instruct
# https://huggingface.co./settings/gated-repos
print(
"Using HfApiModel with model_id: meta-llama/Llama-3.2-11B-Vision-Instruct"
)
# NOTE: this use InferenceClient under the hood
model = HfApiModel(
temperature=0.5,
model_id="meta-llama/Llama-3.2-11B-Vision-Instruct",
custom_role_conversions=None,
)
# Import tool from Hub
image_generation_tool = load_tool("agents-course/text-to-image", trust_remote_code=True)
duckduckgo_tool = DuckDuckGoSearchTool()
visit_webpage_tool = VisitWebpageTool()
with open("prompts.yaml", "r") as stream:
prompt_templates = yaml.safe_load(stream)
agent = CodeAgent(
model=model,
tools=[
final_answer,
my_custom_tool,
get_current_time_in_timezone,
image_generation_tool,
duckduckgo_tool,
visit_webpage_tool,
], # Add your tools here (don't remove final answer)
max_steps=6,
verbosity_level=1,
grammar=None,
planning_interval=None,
name=None,
description=None,
prompt_templates=prompt_templates,
)
if __name__ == "__main__":
from Gradio_UI import GradioUI
GradioUI(agent).launch()