Spaces:
Sleeping
Sleeping
import streamlit as st | |
import json | |
from transformers import AutoTokenizer, AutoModelForCausalLM | |
import torch | |
import time | |
# Page configuration | |
st.set_page_config( | |
page_title="Portfolio Chatbot Test", | |
page_icon="🤖", | |
layout="wide" | |
) | |
# Initialize session state | |
if 'messages' not in st.session_state: | |
st.session_state.messages = [] | |
def load_knowledge_base(): | |
"""Load the knowledge base from JSON file""" | |
try: | |
with open('knowledge_base.json', 'r', encoding='utf-8') as f: | |
return json.load(f) | |
except Exception as e: | |
st.error(f"Error loading knowledge base: {str(e)}") | |
return {} | |
def get_context(query: str, knowledge_base: dict) -> str: | |
"""Get relevant context from knowledge base based on query""" | |
query_lower = query.lower() | |
contexts = [] | |
# Project context | |
if "project" in query_lower: | |
if "projects" in knowledge_base: | |
contexts.extend([ | |
f"{name}: {desc}" | |
for name, desc in knowledge_base["projects"].items() | |
]) | |
# Skills context | |
elif any(keyword in query_lower for keyword in ["skill", "experience", "capability"]): | |
if "personal_details" in knowledge_base and "skills" in knowledge_base["personal_details"]: | |
contexts.extend([ | |
f"{skill}: {desc}" | |
for skill, desc in knowledge_base["personal_details"]["skills"].items() | |
]) | |
# Default context | |
else: | |
contexts = [ | |
f"Name: {knowledge_base.get('personal_details', {}).get('full_name', 'Manyue')}", | |
"Summary: I am an aspiring AI/ML engineer with experience in Python, Machine Learning, and Data Analysis." | |
] | |
return "\n".join(contexts) | |
def initialize_model(): | |
"""Initialize the model and tokenizer""" | |
try: | |
# For testing, use a smaller model | |
model_name = "meta-llama/Llama-2-7b-chat-hf" # You'll need to adjust this | |
tokenizer = AutoTokenizer.from_pretrained(model_name) | |
model = AutoModelForCausalLM.from_pretrained( | |
model_name, | |
torch_dtype=torch.float16, | |
device_map="auto" | |
) | |
return tokenizer, model | |
except Exception as e: | |
st.error(f"Error initializing model: {str(e)}") | |
return None, None | |
def main(): | |
st.title("Portfolio Chatbot Testing Interface") | |
st.write("Test the chatbot's responses and interaction patterns") | |
# Load knowledge base | |
knowledge_base = load_knowledge_base() | |
# Create two columns for layout | |
col1, col2 = st.columns([2, 1]) | |
with col1: | |
st.subheader("Chat Interface") | |
# Display chat messages from history | |
for message in st.session_state.messages: | |
with st.chat_message(message["role"]): | |
st.markdown(message["content"]) | |
# Accept user input | |
if prompt := st.chat_input("What would you like to know?"): | |
# Add user message to chat history | |
st.session_state.messages.append({"role": "user", "content": prompt}) | |
# Get context for the query | |
context = get_context(prompt, knowledge_base) | |
# For now, just echo back a response (replace with actual model response later) | |
response = f"Test Response: Let me tell you about that based on my experience..." | |
# Display assistant response in chat message container | |
with st.chat_message("assistant"): | |
st.markdown(response) | |
# Add assistant response to chat history | |
st.session_state.messages.append({"role": "assistant", "content": response}) | |
with col2: | |
st.subheader("Testing Tools") | |
if st.button("Clear Chat History"): | |
st.session_state.messages = [] | |
st.experimental_rerun() | |
st.subheader("Sample Questions") | |
if st.button("Tell me about your ML projects"): | |
st.session_state.messages.append({ | |
"role": "user", | |
"content": "Tell me about your ML projects" | |
}) | |
st.experimental_rerun() | |
if st.button("What are your Python skills?"): | |
st.session_state.messages.append({ | |
"role": "user", | |
"content": "What are your Python skills?" | |
}) | |
st.experimental_rerun() | |
if __name__ == "__main__": | |
main() |