File size: 3,235 Bytes
afe1590
 
 
 
e4cc30b
afe1590
 
 
 
 
 
 
 
 
e4cc30b
afe1590
 
 
 
 
 
 
 
e4cc30b
afe1590
e4cc30b
afe1590
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
e4cc30b
afe1590
e4cc30b
afe1590
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
e4cc30b
afe1590
e4cc30b
afe1590
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "from modules.llm_in_use import get_llm\n",
    "from langchain_ollama import ChatOllama\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "llm = get_llm()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "from langchain_core.messages import AIMessage\n",
    "\n",
    "messages = [\n",
    "    (\n",
    "        \"system\",\n",
    "        \"You are a helpful assistant that translates English to French. Translate the user sentence.\",\n",
    "    ),\n",
    "    (\"human\", \"I love programming.\"),\n",
    "]\n",
    "ai_msg = llm.invoke(messages)\n",
    "ai_msg"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "from langchain_core.prompts import ChatPromptTemplate\n",
    "\n",
    "prompt = ChatPromptTemplate.from_messages(\n",
    "    [\n",
    "        (\n",
    "            \"system\",\n",
    "            \"You are a helpful assistant that translates {input_language} to {output_language}.\",\n",
    "        ),\n",
    "        (\"human\", \"{input}\"),\n",
    "    ]\n",
    ")\n",
    "\n",
    "chain = prompt | llm\n",
    "chain.invoke(\n",
    "    {\n",
    "        \"input_language\": \"English\",\n",
    "        \"output_language\": \"German\",\n",
    "        \"input\": \"I love programming.\",\n",
    "    }\n",
    ")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "from typing import List\n",
    "\n",
    "from langchain_core.tools import tool\n",
    "from langchain_ollama import ChatOllama\n",
    "\n",
    "\n",
    "@tool\n",
    "def validate_user(user_id: int, addresses: List[str]) -> bool:\n",
    "    \"\"\"Validate user using historical addresses.\n",
    "\n",
    "    Args:\n",
    "        user_id (int): the user ID.\n",
    "        addresses (List[str]): Previous addresses as a list of strings.\n",
    "    \"\"\"\n",
    "    return True\n",
    "\n",
    "\n",
    "llm = ChatOllama(\n",
    "    model=\"llama3.2\",\n",
    "    temperature=0,\n",
    "    base_url=\"http://141.211.127.171\"\n",
    ").bind_tools([validate_user])\n",
    "\n",
    "result = llm.invoke(\n",
    "    \"Could you validate user 123? They previously lived at \"\n",
    "    \"123 Fake St in Boston MA and 234 Pretend Boulevard in \"\n",
    "    \"Houston TX.\"\n",
    ")\n",
    "result.tool_calls"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "paintrek",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.10.14"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}