|
import os |
|
from dotenv import load_dotenv |
|
from langgraph.graph import StateGraph, START, END |
|
from langgraph.graph.message import add_messages |
|
from typing import TypedDict, Annotated, Literal |
|
from langchain_core.messages import HumanMessage, AIMessage, SystemMessage |
|
from langchain_openai import ChatOpenAI |
|
from pydantic import BaseModel, Field |
|
import gradio as gr |
|
|
|
load_dotenv() |
|
|
|
|
|
max_tokens = 2000 |
|
num_iterations = 2 |
|
quality_threshold = 8 |
|
|
|
|
|
travel_database = { |
|
"paris": {"destination": "Paris", "price": 1500, "features": ["romantic", "cultural", "historic"]}, |
|
"bali": {"destination": "Bali", "price": 1200, "features": ["beach", "relaxing", "adventurous"]}, |
|
"new_york": {"destination": "New York", "price": 2000, "features": ["urban", "shopping", "nightlife"]}, |
|
"tokyo": {"destination": "Tokyo", "price": 1800, "features": ["modern", "cultural", "tech-savvy"]}, |
|
} |
|
|
|
|
|
class GenerateRecommendation(BaseModel): |
|
destination: str = Field(description="El destino tur铆stico recomendado") |
|
explanation: str = Field(description="Explicaci贸n breve de la recomendaci贸n") |
|
|
|
class RecommendationQualityScore(BaseModel): |
|
score: int = Field(description="Puntuaci贸n de la recomendaci贸n entre 1-10") |
|
comment: str = Field(description="Comentario sobre la calidad de la recomendaci贸n") |
|
|
|
|
|
class GraphState(TypedDict): |
|
messages: Annotated[list, add_messages] |
|
quality: Annotated[int, 0] |
|
iterations: Annotated[int, 0] |
|
|
|
|
|
builder = StateGraph(GraphState) |
|
|
|
llm = ChatOpenAI( |
|
model="gpt-4o-mini", |
|
temperature=0, |
|
max_tokens=max_tokens, |
|
api_key=os.getenv("HUGGINGFACEHUB_API_TOKEN") |
|
) |
|
|
|
developer_structure_llm = llm.with_structured_output(GenerateRecommendation, method="json_mode") |
|
|
|
reviewer_structure_llm = llm.with_structured_output(RecommendationQualityScore, method="json_mode") |
|
|
|
def travel_recommender(state): |
|
user_requirements = state["messages"][-1].content |
|
system_prompt = f""" |
|
Eres un experto en recomendaciones de viajes. |
|
Con base en las siguientes preferencias del usuario: {user_requirements}, |
|
selecciona el mejor destino de la siguiente base de datos: {travel_database}. |
|
Responde en JSON con la clave `destination` y `explanation`. |
|
""" |
|
messages = [ |
|
SystemMessage(content=system_prompt), |
|
|
|
] |
|
|
|
|
|
recommendation_obj = developer_structure_llm.invoke(messages) |
|
|
|
|
|
text_output = ( |
|
f"Destino recomendado: {recommendation_obj.destination}\n" |
|
f"Raz贸n: {recommendation_obj.explanation}" |
|
) |
|
|
|
|
|
state["messages"].append(AIMessage(content=text_output)) |
|
state["iterations"] += 1 |
|
return state |
|
|
|
def recommendation_review(state): |
|
system_prompt = """ |
|
Eres un revisor de recomendaciones con altos est谩ndares. |
|
Revisa la recomendaci贸n proporcionada y asigna una puntuaci贸n de calidad entre 1-10. |
|
Eval煤a la relevancia, precisi贸n y alineaci贸n con las necesidades del cliente. |
|
Responde en JSON con las claves `score` y `comment`. |
|
""" |
|
human_messages = [msg for msg in state["messages"] if isinstance(msg, HumanMessage)] |
|
ai_messages = [msg for msg in state["messages"] if isinstance(msg, AIMessage)] |
|
system_messages = [SystemMessage(content=system_prompt)] |
|
|
|
messages = system_messages + human_messages + ai_messages |
|
message = reviewer_structure_llm.invoke(messages) |
|
|
|
review_comment = f"Review Score: {message.score}\nComment: {message.comment}" |
|
state["messages"].append(AIMessage(content=review_comment)) |
|
state["quality"] = message.score |
|
return state |
|
|
|
def final_recommendation(state): |
|
system_prompt = "Revisa la recomendaci贸n final y proporciona una respuesta final para el usuario." |
|
human_messages = [msg for msg in state["messages"] if isinstance(msg, HumanMessage)] |
|
ai_messages = [msg for msg in state["messages"] if isinstance(msg, AIMessage)] |
|
system_messages = [SystemMessage(content=system_prompt)] |
|
|
|
messages = system_messages + human_messages + ai_messages |
|
final_message = llm.invoke(messages) |
|
|
|
|
|
state["final_recommendation"] = final_message.content |
|
state["messages"].append(AIMessage(content=f"Final Recommendation: {final_message.content}")) |
|
return state |
|
|
|
|
|
def quality_gate_condition(state) -> Literal["travel_recommender", "final_recommendation"]: |
|
if state["iterations"] >= num_iterations: |
|
return "final_recommendation" |
|
if state["quality"] < quality_threshold: |
|
return "travel_recommender" |
|
else: |
|
return "final_recommendation" |
|
|
|
|
|
builder.add_node("travel_recommender", travel_recommender) |
|
builder.add_node("recommendation_review", recommendation_review) |
|
builder.add_node("final_recommendation", final_recommendation) |
|
|
|
|
|
builder.add_edge(START, "travel_recommender") |
|
builder.add_edge("travel_recommender", "recommendation_review") |
|
builder.add_edge("final_recommendation", END) |
|
|
|
builder.add_conditional_edges("recommendation_review", quality_gate_condition) |
|
|
|
graph = builder.compile() |
|
|
|
|
|
def run_graph(user_input: str) -> str: |
|
initial_state = {"messages": [HumanMessage(content=user_input)], "quality": 0, "iterations": 0} |
|
final_state = graph.invoke(initial_state) |
|
|
|
final_messages = [msg for msg in final_state["messages"] if isinstance(msg, AIMessage)] |
|
if not final_messages: |
|
return "No se gener贸 una recomendaci贸n final." |
|
|
|
return final_messages[-1].content |
|
|
|
|
|
iface = gr.Interface( |
|
fn=run_graph, |
|
inputs=gr.Textbox(label="Ingrese sus preferencias de viaje"), |
|
outputs=gr.Textbox(label="Recomendaci贸n Final"), |
|
title="Sistema de Recomendaci贸n de Viajes" |
|
) |
|
|
|
if __name__ == "__main__": |
|
iface.launch() |
|
|