ego_jimmy / app.py
MilanM's picture
Update app.py
a6db83f verified
import streamlit as st
from io import BytesIO
import ibm_watsonx_ai
import secretsload
import anton_ego_jimmy
import requests
import time
import regex
import re
from datetime import datetime
from jinja2 import Template
from ibm_watsonx_ai.foundation_models import ModelInference
from ibm_watsonx_ai import Credentials, APIClient
from ibm_watsonx_ai.metanames import GenTextParamsMetaNames as GenParams
from ibm_watsonx_ai.metanames import GenTextReturnOptMetaNames as RetParams
from secretsload import load_stsecrets # script to load credentials from HuggingFace secrets section
# New imports for ReportLab
from reportlab.lib.pagesizes import letter
from reportlab.platypus import SimpleDocTemplate, Paragraph, Spacer
from reportlab.lib.styles import getSampleStyleSheet, ParagraphStyle
from reportlab.lib import colors
from reportlab.lib.enums import TA_LEFT, TA_RIGHT
credentials = load_stsecrets()
st.set_page_config(
page_title="Jimmy",
page_icon="🧐",
initial_sidebar_state="collapsed",
layout="centered"
)
# Password protection
def check_password():
def password_entered():
if st.session_state["password"] == st.secrets["app_password"]:
st.session_state["password_correct"] = True
del st.session_state["password"]
else:
st.session_state["password_correct"] = False
if "password_correct" not in st.session_state:
st.markdown("\n\n")
st.text_input("Enter the password", type="password", on_change=password_entered, key="password")
st.divider()
st.info("Designed and developed by Milan Mrdenovic Β© IBM Norway 2025")
return False
elif not st.session_state["password_correct"]:
st.markdown("\n\n")
st.text_input("Enter the password", type="password", on_change=password_entered, key="password")
st.divider()
st.info("Designed and developed by Milan Mrdenovic Β© IBM Norway 2025")
st.error("πŸ˜• Password incorrect")
return False
else:
return True
if not check_password():
st.stop()
# Initialize session state
if 'current_page' not in st.session_state:
st.session_state.current_page = 0
def initialize_session_state():
if 'chat_history' not in st.session_state:
st.session_state.chat_history = []
def setup_client():
credentials = Credentials(
url=st.secrets["url"],
api_key=st.secrets["api_key"]
)
apo = st.secrets["api_key"]
client = APIClient(credentials, project_id=st.secrets["project_id"])
return client
client = setup_client()
def prepare_prompt(prompt, chat_history):
if anton_ego_jimmy.TYPE == "chat" and chat_history:
chats = "\n".join([f"{message['role']}: \"{message['content']}\"" for message in chat_history])
return f"Conversation History:\n{chats}\n\nNew Message: {prompt}"
return prompt
def apply_prompt_syntax(prompt, system_prompt, prompt_template, bake_in_prompt_syntax):
model_family_syntax = {
"llama3-instruct (llama-3 & 3.1) - system": """\n<|begin_of_text|><|start_header_id|>system<|end_header_id|>\n\n{system_prompt}<|eot_id|><|start_header_id|>user<|end_header_id|>\n\n{prompt}<|eot_id|><|start_header_id|>assistant<|end_header_id|>\n\n""",
"llama3-instruct (llama-3 & 3.1) - user": """\n<|begin_of_text|><|start_header_id|>user<|end_header_id|>\n\n{prompt}<|eot_id|><|start_header_id|>assistant<|end_header_id|>\n\n""",
"granite-13b-chat & instruct - system": """\n<|system|>\n{system_prompt}\n<|user|>\n{prompt}\n<|assistant|>\n\n""",
"granite-13b-chat & instruct - user": """\n<|user|>\n{prompt}\n<|assistant|>\n\n""",
"llama2-chat - system": """\n[INST] <<SYS>>\n{system_prompt}\n<</SYS>>\n\n{prompt} [/INST]\n""",
"llama2-chat - user": """\n[INST] {prompt} [/INST] """,
"mistral & mixtral v2 tokenizer - system": """\n<s>[INST]System Prompt:[{system_prompt}]\n\n{prompt} [/INST]\n""",
"mistral & mixtral v2 tokenizer - system segmented": """\n<s>[INST]System Prompt:{system_prompt}[/INST][INST]{prompt} [/INST]\n""",
"mistral & mixtral v2 tokenizer - user": """\n<s>[INST]{prompt} [/INST]\n"""
}
if bake_in_prompt_syntax:
template = model_family_syntax[prompt_template]
if system_prompt:
return template.format(system_prompt=system_prompt, prompt=prompt)
return prompt
def generate_response(watsonx_llm, prompt_data, params):
generated_response = watsonx_llm.generate_text_stream(prompt=prompt_data, params=params)
for chunk in generated_response:
yield chunk
emoji_pattern = regex.compile(r'\p{Emoji}', flags=regex.UNICODE)
def remove_emojis(text):
return emoji_pattern.sub(r'', text)
def create_pdf_from_chat(chat_history):
buffer = BytesIO()
doc = SimpleDocTemplate(buffer, pagesize=letter, topMargin=30, bottomMargin=30)
styles = getSampleStyleSheet()
flowables = []
title_style = ParagraphStyle('Title', parent=styles['Heading1'], fontSize=18, spaceAfter=20)
flowables.append(Paragraph(f"Chat History - Generated on {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}", title_style))
user_style = ParagraphStyle('UserStyle', parent=styles['Normal'],
backColor=colors.lightblue, borderPadding=10,
alignment=TA_RIGHT)
jimmy_style = ParagraphStyle('JimmyStyle', parent=styles['Normal'],
backColor=colors.lavender, borderPadding=10)
for message in chat_history:
role = message["role"]
content = remove_emojis(message["content"])
style = user_style if role == "user" else jimmy_style
flowables.append(Paragraph(f"<b>{role.capitalize()}:</b> {content}", style))
flowables.append(Spacer(1, 12))
doc.build(flowables)
buffer.seek(0)
return buffer
def get_response(user_input):
# Prepare the prompt
prompt = prepare_prompt(user_input, st.session_state.chat_history)
# Apply prompt syntax
prompt_data = apply_prompt_syntax(
prompt,
anton_ego_jimmy.SYSTEM_PROMPT,
anton_ego_jimmy.PROMPT_TEMPLATE,
anton_ego_jimmy.BAKE_IN_PROMPT_SYNTAX
)
watsonx_llm = ModelInference(
api_client=client,
model_id=anton_ego_jimmy.SELECTED_MODEL,
verify=anton_ego_jimmy.VERIFY
)
# Prepare parameters
params = {
GenParams.DECODING_METHOD: anton_ego_jimmy.DECODING_METHOD,
GenParams.MAX_NEW_TOKENS: anton_ego_jimmy.MAX_NEW_TOKENS,
GenParams.MIN_NEW_TOKENS: anton_ego_jimmy.MIN_NEW_TOKENS,
GenParams.REPETITION_PENALTY: anton_ego_jimmy.REPETITION_PENALTY,
GenParams.STOP_SEQUENCES: anton_ego_jimmy.STOP_SEQUENCES
}
print(prompt_data, params)
# Generate and stream response
with st.chat_message("Jimmy", avatar="🧐"):
stream = generate_response(watsonx_llm, prompt_data, params)
response = st.write_stream(stream)
# Add AI response to chat history
st.session_state.chat_history.append({"role": "Jimmy", "content": response})
return response
def main():
initialize_session_state()
st.subheader("Jimmy 🧐")
if anton_ego_jimmy.DISPLAY_CHAT_HISTORY == 1:
for message in st.session_state.chat_history:
with st.chat_message(message["role"], avatar="πŸ₯·πŸ»" if message["role"] == "user" else "🧐"):
st.markdown(message["content"])
user_input = st.chat_input("You:", key="user_input")
if user_input:
# Add user message to chat history
st.session_state.chat_history.append({"role": "user", "content": user_input})
with st.chat_message("user", avatar="πŸ₯·πŸ»"):
st.markdown(user_input)
# Get response
get_response(user_input)
if st.session_state.chat_history:
now = datetime.now()
date_str = now.strftime("%Y-%m-%d")
try:
pdf_buffer = create_pdf_from_chat(st.session_state.chat_history)
st.download_button(
label="Download Chat History as PDF",
data=pdf_buffer,
file_name=f"chat_history_{date_str}.pdf",
mime="application/pdf"
)
except Exception as e:
st.error(f"An error occurred while generating the PDF: {str(e)}")
st.error("If this persists, please contact support.")
if __name__ == "__main__":
main()