chatbot_v2 / app.py
MoiMoi-01's picture
Update app.py
cd5057b verified
import requests
import streamlit as st
from llama_cpp import Llama
import os
print(os.getcwd())
#===================Download Model==============================
url = "https://huggingface.co./ngxson/DeepSeek-R1-Distill-Qwen-7B-abliterated-GGUF/resolve/main/DeepSeek-R1-Distill-Qwen-7B-abliterated-Q4_K_M.gguf?download=true"
file_name = "DeepSeek-R1-Distill-Qwen-7B-abliterated-Q4_K_M.gguf"
response = requests.get(url, stream=True)
if response.status_code == 200:
with open(file_name, "wb") as file:
for chunk in response.iter_content(chunk_size=8192):
file.write(chunk)
print("Download complete:", file_name)
else:
print("Failed to download. Status code:", response.status_code)
#==================================Streamlit App====================================
# Set Streamlit page config
st.set_page_config(page_title="Llama Chatbot", page_icon="πŸ€–")
# Model path (Ensure the model is in the correct directory)
MODEL_PATH = file_name
# Check if model exists, else download it
if not os.path.exists(MODEL_PATH):
st.warning("Model not found! Please download it first.")
st.stop()
# Load Llama model
st.session_state["llm"] = Llama(model_path=MODEL_PATH)
# Streamlit UI
st.title("πŸ€– Llama GGUF Chatbot")
if "messages" not in st.session_state:
st.session_state["messages"] = []
# Display chat history
for message in st.session_state["messages"]:
with st.chat_message(message["role"]):
st.markdown(message["content"])
# User input
user_input = st.chat_input("Type your message here...")
if user_input:
# Display user message
st.session_state["messages"].append({"role": "user", "content": user_input})
with st.chat_message("user"):
st.markdown(user_input)
# Generate response
with st.spinner("Thinking..."):
response = st.session_state["llm"](user_input)["choices"][0]["text"]
# Display bot response
st.session_state["messages"].append({"role": "assistant", "content": response})
with st.chat_message("assistant"):
st.markdown(response)