Spaces:
Sleeping
Sleeping
File size: 2,067 Bytes
07e602a cd5057b 1be5a53 cd5057b 92113b9 2508011 92113b9 2508011 92113b9 2508011 cd5057b 2508011 cd5057b 2508011 cd5057b 2508011 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 |
import requests
import streamlit as st
from llama_cpp import Llama
import os
print(os.getcwd())
#===================Download Model==============================
url = "https://huggingface.co./ngxson/DeepSeek-R1-Distill-Qwen-7B-abliterated-GGUF/resolve/main/DeepSeek-R1-Distill-Qwen-7B-abliterated-Q4_K_M.gguf?download=true"
file_name = "DeepSeek-R1-Distill-Qwen-7B-abliterated-Q4_K_M.gguf"
response = requests.get(url, stream=True)
if response.status_code == 200:
with open(file_name, "wb") as file:
for chunk in response.iter_content(chunk_size=8192):
file.write(chunk)
print("Download complete:", file_name)
else:
print("Failed to download. Status code:", response.status_code)
#==================================Streamlit App====================================
# Set Streamlit page config
st.set_page_config(page_title="Llama Chatbot", page_icon="🤖")
# Model path (Ensure the model is in the correct directory)
MODEL_PATH = file_name
# Check if model exists, else download it
if not os.path.exists(MODEL_PATH):
st.warning("Model not found! Please download it first.")
st.stop()
# Load Llama model
st.session_state["llm"] = Llama(model_path=MODEL_PATH)
# Streamlit UI
st.title("🤖 Llama GGUF Chatbot")
if "messages" not in st.session_state:
st.session_state["messages"] = []
# Display chat history
for message in st.session_state["messages"]:
with st.chat_message(message["role"]):
st.markdown(message["content"])
# User input
user_input = st.chat_input("Type your message here...")
if user_input:
# Display user message
st.session_state["messages"].append({"role": "user", "content": user_input})
with st.chat_message("user"):
st.markdown(user_input)
# Generate response
with st.spinner("Thinking..."):
response = st.session_state["llm"](user_input)["choices"][0]["text"]
# Display bot response
st.session_state["messages"].append({"role": "assistant", "content": response})
with st.chat_message("assistant"):
st.markdown(response)
|