File size: 2,866 Bytes
8120752
b67abaa
8120752
 
 
 
 
b67abaa
8120752
 
 
 
 
 
bd577ed
8120752
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1cbf972
6073896
8120752
 
 
 
 
 
 
 
 
2edbab4
8120752
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
import os
os.environ['COHERE_API_KEY'] = os.getenv('cohere_ai')
# Create the Cohere chat model
from langchain_cohere.chat_models import ChatCohere
chat = ChatCohere(model="command-r-plus", temperature=0.3)
from langchain_community.tools.tavily_search import TavilySearchResults

os.environ['TAVILY_API_KEY'] = os.getenv('tavily_ai')

internet_search = TavilySearchResults()
internet_search.name = "internet_search"
internet_search.description = "Returns a list of relevant document snippets for a textual query retrieved from the internet."


from pydantic import BaseModel, Field
class TavilySearchInput(BaseModel):
    query: str = Field(description="Query to search the internet with")
internet_search.args_schema = TavilySearchInput
from langchain.agents import Tool
from langchain_experimental.utilities import PythonREPL

python_repl = PythonREPL()
repl_tool = Tool(
    name="python_repl",
    description="Executes python code and returns the result. The code runs in a static sandbox without interactive mode, so print output or save output to a file.",
    func=python_repl.run,
)
repl_tool.name = "python_interpreter"

# from langchain_core.pydantic_v1 import BaseModel, Field
class ToolInput(BaseModel):
    code: str = Field(description="Python code to execute.")
repl_tool.args_schema = ToolInput
from langchain.agents import AgentExecutor
from langchain_cohere.react_multi_hop.agent import create_cohere_react_agent
from langchain_core.prompts import ChatPromptTemplate
# Create the prompt
prompt = ChatPromptTemplate.from_template("{input}")


# Create the ReAct agent
agent = create_cohere_react_agent(
    llm=chat,
    tools=[internet_search, repl_tool],
    prompt=prompt,
)
agent_executor = AgentExecutor(agent=agent, tools=[internet_search, repl_tool], verbose=True)
from typing import List, Mapping, Any
from langchain_cohere.common import CohereCitation
def process_data(problem):
   output = 'Gemini agent rewriting your query \n\n'
   yield output
   #rewrite = get_completion(f"Rewrite the user question: {problem} ")
   #output += f"Here is your rewritten query: {rewrite} \n\n"
   #yield output
   output += f"Cohere agent gathering the data from public sources and doing analysis \n\n"
   yield output
   coh_output = agent_executor.invoke({
    "input": f"{problem}"})
   print ("Output is",coh_output['output'])
   output += f"Final Output: \n\n"+coh_output['output']+"\n\n"
   yield output
   citations = coh_output['citations']
   print (citations)
   # Assuming 'citi' is a list of CohereCitation 
   urls = []
   for item in citations:
       if isinstance(item, CohereCitation) and item.documents:
          for doc in item.documents:
              if 'url' in doc:
                  urls.append(doc['url'])
   final_urls = list(set(urls))
   output += f"Citations: \n\n" + '\n'.join(final_urls)
   yield output

   return output