Spaces:
Running
Running
Create app_v2.py
Browse files
app_v2.py
ADDED
@@ -0,0 +1,185 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import streamlit as st
|
2 |
+
from io import BytesIO
|
3 |
+
import ibm_watsonx_ai
|
4 |
+
import secretsload
|
5 |
+
import genparam
|
6 |
+
import requests
|
7 |
+
import time
|
8 |
+
import re
|
9 |
+
from ibm_watsonx_ai.foundation_models import ModelInference
|
10 |
+
from ibm_watsonx_ai import Credentials, APIClient
|
11 |
+
from ibm_watsonx_ai.metanames import GenTextParamsMetaNames as GenParams
|
12 |
+
from ibm_watsonx_ai.metanames import GenTextReturnOptMetaNames as RetParams
|
13 |
+
from secretsload import load_stsecrets
|
14 |
+
|
15 |
+
credentials = load_stsecrets()
|
16 |
+
|
17 |
+
st.set_page_config(
|
18 |
+
page_title="Jimmy",
|
19 |
+
page_icon="π",
|
20 |
+
initial_sidebar_state="collapsed"
|
21 |
+
)
|
22 |
+
|
23 |
+
# Password protection
|
24 |
+
def check_password():
|
25 |
+
def password_entered():
|
26 |
+
if st.session_state["password"] == st.secrets["app_password"]:
|
27 |
+
st.session_state["password_correct"] = True
|
28 |
+
del st.session_state["password"]
|
29 |
+
else:
|
30 |
+
st.session_state["password_correct"] = False
|
31 |
+
|
32 |
+
if "password_correct" not in st.session_state:
|
33 |
+
st.markdown("\n\n")
|
34 |
+
st.text_input("Enter the password", type="password", on_change=password_entered, key="password")
|
35 |
+
st.divider()
|
36 |
+
st.info("Developed by Milan Mrdenovic Β© IBM Norway 2025")
|
37 |
+
return False
|
38 |
+
elif not st.session_state["password_correct"]:
|
39 |
+
st.markdown("\n\n")
|
40 |
+
st.text_input("Enter the password", type="password", on_change=password_entered, key="password")
|
41 |
+
st.divider()
|
42 |
+
st.info("Developed by Milan Mrdenovic Β© IBM Norway 2025")
|
43 |
+
st.error("π Password incorrect")
|
44 |
+
return False
|
45 |
+
else:
|
46 |
+
return True
|
47 |
+
|
48 |
+
if not check_password():
|
49 |
+
st.stop()
|
50 |
+
|
51 |
+
|
52 |
+
# Initialize session state
|
53 |
+
if 'current_page' not in st.session_state:
|
54 |
+
st.session_state.current_page = 0
|
55 |
+
|
56 |
+
def initialize_session_state():
|
57 |
+
if 'chat_history' not in st.session_state:
|
58 |
+
st.session_state.chat_history = []
|
59 |
+
|
60 |
+
def setup_client():
|
61 |
+
credentials = Credentials(
|
62 |
+
url=st.secrets["url"],
|
63 |
+
api_key=st.secrets["api_key"]
|
64 |
+
)
|
65 |
+
return APIClient(credentials, project_id=st.secrets["project_id"])
|
66 |
+
|
67 |
+
def prepare_prompt(prompt, chat_history):
|
68 |
+
if genparam.TYPE == "chat" and chat_history:
|
69 |
+
chats = "\n".join([f"{message['role']}: \"{message['content']}\"" for message in chat_history])
|
70 |
+
return f"Conversation History:\n{chats}\n\n New User Prompt: {prompt}"
|
71 |
+
return prompt
|
72 |
+
|
73 |
+
def apply_prompt_syntax(prompt, system_prompt, prompt_template, bake_in_prompt_syntax):
|
74 |
+
model_family_syntax = {
|
75 |
+
### Llama Models
|
76 |
+
"llama3_1_instruct - system": """<|begin_of_text|><|start_header_id|>system<|end_header_id|>\n\n{system_prompt}<|eot_id|><|start_header_id|>user<|end_header_id|>\n\n{prompt}<|eot_id|><|start_header_id|>assistant<|end_header_id|>\n\n""",
|
77 |
+
"llama3_3_instruct - system": """<|start_header_id|>system<|end_header_id|>\n\n{system_prompt}<|eot_id|><|start_header_id|>user<|end_header_id|>\n\n{prompt}<|eot_id|><|start_header_id|>assistant<|end_header_id|>\n\n""",
|
78 |
+
|
79 |
+
"llama3_1_instruct - user": """<|begin_of_text|><|start_header_id|>user<|end_header_id|>\n\n{prompt}<|eot_id|><|start_header_id|>assistant<|end_header_id|>\n\n""",
|
80 |
+
"llama3_3_instruct - user": """<|start_header_id|>user<|end_header_id|>\n\n{prompt}<|eot_id|><|start_header_id|>assistant<|end_header_id|>\n\n""",
|
81 |
+
### Granite Models
|
82 |
+
"granite_3 - system": """<|start_of_role|>system<|end_of_role|>{system_prompt}<|end_of_text|>\n<|start_of_role|>user<|end_of_role|>{prompt}<|end_of_text|>\n<|start_of_role|>assistant<|end_of_role|>""",
|
83 |
+
"granite_3 - user": """<|start_of_role|>user<|end_of_role|>{prompt}<|end_of_text|>\n<|start_of_role|>assistant<|end_of_role|>""",
|
84 |
+
### Granite Code Only
|
85 |
+
"granite_code - with system": """System:\n{system_prompt}\n\nQuestion:\n{prompt}\n\nAnswer:\n""",
|
86 |
+
"granite_code - instruction only": """Question:\n{prompt}\n\nAnswer:\n""",
|
87 |
+
### Mistral Models
|
88 |
+
"mistral_large - sys": """[INST] {system_prompt}\n\n{prompt}[/INST]""", ### mistral-large-2407
|
89 |
+
"mistral_large - user": """[INST] {prompt}[/INST]""", ### mistral-large-2407
|
90 |
+
|
91 |
+
"mistral_large_2411 - sys": """[SYSTEM_PROMPT] {system_prompt}[/SYSTEM_PROMPT][INST] {prompt}[/INST]""", ### Only deploy on demand on watsonx.ai
|
92 |
+
|
93 |
+
"mistral_ai_small - sys": """[INST] {system_prompt}\n\n{prompt}[/INST]""", ### mistral-small-24b-2507 seems to have tokenization issues
|
94 |
+
### No Syntax
|
95 |
+
"no syntax - system": """{system_prompt}\n\n{prompt}""",
|
96 |
+
"no syntax - user": """{prompt}""",
|
97 |
+
}
|
98 |
+
|
99 |
+
|
100 |
+
|
101 |
+
# model_family_syntax = {
|
102 |
+
# ### Llama Models
|
103 |
+
# "llama3_1_instruct - system": """<|begin_of_text|><|start_header_id|>system<|end_header_id|>\n\n{system_prompt}<|eot_id|><|start_header_id|>user<|end_header_id|>\n\n{prompt}<|eot_id|><|start_header_id|>assistant<|end_header_id|>\n\n""",
|
104 |
+
# "llama3_3_instruct - system": """<|start_header_id|>system<|end_header_id|>\n\n{system_prompt}<|eot_id|><|start_header_id|>user<|end_header_id|>\n\n{prompt}<|eot_id|><|start_header_id|>assistant<|end_header_id|>\n\n""",
|
105 |
+
# "llama3_instruct - user": """<|begin_of_text|><|start_header_id|>user<|end_header_id|>\n\n{prompt}<|eot_id|><|start_header_id|>assistant<|end_header_id|>\n\n""",
|
106 |
+
# ### Granite Models
|
107 |
+
# "granite_3 - system": """<|start_of_role|>system<|end_of_role|>{system_prompt}<|end_of_text|>\n<|start_of_role|>user<|end_of_role|>{prompt}<|end_of_text|>\n<|start_of_role|>assistant<|end_of_role|>""",
|
108 |
+
# "granite_3 - user": """<|start_of_role|>user<|end_of_role|>{prompt}<|end_of_text|>\n<|start_of_role|>assistant<|end_of_role|>""",
|
109 |
+
# ### Granite Code Only
|
110 |
+
# "granite_code - with system": """System:\n{system_prompt}\n\nQuestion:\n{prompt}\n\nAnswer:\n""",
|
111 |
+
# "granite_code - instruction only": """Question:\n{prompt}\n\nAnswer:\n""",
|
112 |
+
# ### Mistral Models
|
113 |
+
# "mistral_large_sys": """[INST]{system_prompt} \n\n {prompt}[/INST]""",
|
114 |
+
# "mistral_large": """[INST]{prompt}[/INST]""",
|
115 |
+
|
116 |
+
# "mistral_ai_small_sys": """<s>[SYSTEM_PROMPT]{system_prompt}[/SYSTEM_PROMPT][INST]{prompt}[/INST]""",
|
117 |
+
# "mistral_ai_small_sys": """<s>[SYSTEM_PROMPT]{system_prompt}[/SYSTEM_PROMPT][INST]{prompt}[/INST]""",
|
118 |
+
# ### No Syntax
|
119 |
+
# "no syntax - system": """{system_prompt}\n\n{prompt}""",
|
120 |
+
# "no syntax - user": """{prompt}""",
|
121 |
+
# }
|
122 |
+
|
123 |
+
if bake_in_prompt_syntax:
|
124 |
+
template = model_family_syntax[prompt_template]
|
125 |
+
if system_prompt:
|
126 |
+
return template.format(system_prompt=system_prompt, prompt=prompt)
|
127 |
+
return prompt
|
128 |
+
|
129 |
+
def generate_response(watsonx_llm, prompt_data, params):
|
130 |
+
generated_response = watsonx_llm.generate_text_stream(prompt=prompt_data, params=params)
|
131 |
+
for chunk in generated_response:
|
132 |
+
yield chunk
|
133 |
+
|
134 |
+
def chat_interface():
|
135 |
+
st.subheader("Jimmy")
|
136 |
+
|
137 |
+
# User input
|
138 |
+
user_input = st.chat_input("You:", key="user_input")
|
139 |
+
|
140 |
+
if user_input:
|
141 |
+
# Add user message to chat history
|
142 |
+
st.session_state.chat_history.append({"role": "user", "content": user_input})
|
143 |
+
|
144 |
+
# Prepare the prompt
|
145 |
+
prompt = prepare_prompt(user_input, st.session_state.chat_history)
|
146 |
+
|
147 |
+
# Apply prompt syntax
|
148 |
+
prompt_data = apply_prompt_syntax(
|
149 |
+
prompt,
|
150 |
+
genparam.SYSTEM_PROMPT,
|
151 |
+
genparam.PROMPT_TEMPLATE,
|
152 |
+
genparam.BAKE_IN_PROMPT_SYNTAX
|
153 |
+
)
|
154 |
+
|
155 |
+
# Setup client and model
|
156 |
+
client = setup_client()
|
157 |
+
watsonx_llm = ModelInference(
|
158 |
+
api_client=client,
|
159 |
+
model_id=genparam.SELECTED_MODEL,
|
160 |
+
verify=genparam.VERIFY
|
161 |
+
)
|
162 |
+
|
163 |
+
# Prepare parameters
|
164 |
+
params = {
|
165 |
+
GenParams.DECODING_METHOD: genparam.DECODING_METHOD,
|
166 |
+
GenParams.MAX_NEW_TOKENS: genparam.MAX_NEW_TOKENS,
|
167 |
+
GenParams.MIN_NEW_TOKENS: genparam.MIN_NEW_TOKENS,
|
168 |
+
GenParams.REPETITION_PENALTY: genparam.REPETITION_PENALTY,
|
169 |
+
GenParams.STOP_SEQUENCES: genparam.STOP_SEQUENCES
|
170 |
+
}
|
171 |
+
|
172 |
+
# Generate and stream response
|
173 |
+
with st.chat_message("Jimmy", avatar="π"):
|
174 |
+
stream = generate_response(watsonx_llm, prompt_data, params)
|
175 |
+
response = st.write_stream(stream)
|
176 |
+
|
177 |
+
# Add AI response to chat history
|
178 |
+
st.session_state.chat_history.append({"role": "Jimmy", "content": response})
|
179 |
+
|
180 |
+
def main():
|
181 |
+
initialize_session_state()
|
182 |
+
chat_interface()
|
183 |
+
|
184 |
+
if __name__ == "__main__":
|
185 |
+
main()
|