MoiMoi-01 commited on
Commit
cd5057b
·
verified ·
1 Parent(s): 92113b9

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +44 -9
app.py CHANGED
@@ -1,8 +1,10 @@
1
  import requests
 
 
2
  import os
3
 
4
  print(os.getcwd())
5
-
6
  url = "https://huggingface.co/ngxson/DeepSeek-R1-Distill-Qwen-7B-abliterated-GGUF/resolve/main/DeepSeek-R1-Distill-Qwen-7B-abliterated-Q4_K_M.gguf?download=true"
7
  file_name = "DeepSeek-R1-Distill-Qwen-7B-abliterated-Q4_K_M.gguf"
8
 
@@ -16,14 +18,47 @@ if response.status_code == 200:
16
  else:
17
  print("Failed to download. Status code:", response.status_code)
18
 
19
- from llama_cpp import Llama
20
- # url = "https://huggingface.co/ngxson/DeepSeek-R1-Distill-Qwen-7B-abliterated-GGUF/DeepSeek-R1-Distill-Qwen-7B-abliterated-Q4_K_M.gguf"
21
- model_path = "DeepSeek-R1-Distill-Qwen-7B-abliterated-Q4_K_M.gguf"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
22
 
23
- # Load the model directly from URL
24
- llm = Llama(model_path=model_path)
 
25
 
26
- # Generate a response
27
- output = llm("What is AI?")
28
- print(output)
 
29
 
 
1
  import requests
2
+ import streamlit as st
3
+ from llama_cpp import Llama
4
  import os
5
 
6
  print(os.getcwd())
7
+ #===================Download Model==============================
8
  url = "https://huggingface.co/ngxson/DeepSeek-R1-Distill-Qwen-7B-abliterated-GGUF/resolve/main/DeepSeek-R1-Distill-Qwen-7B-abliterated-Q4_K_M.gguf?download=true"
9
  file_name = "DeepSeek-R1-Distill-Qwen-7B-abliterated-Q4_K_M.gguf"
10
 
 
18
  else:
19
  print("Failed to download. Status code:", response.status_code)
20
 
21
+ #==================================Streamlit App====================================
22
+ # Set Streamlit page config
23
+ st.set_page_config(page_title="Llama Chatbot", page_icon="🤖")
24
+
25
+ # Model path (Ensure the model is in the correct directory)
26
+ MODEL_PATH = file_name
27
+
28
+ # Check if model exists, else download it
29
+ if not os.path.exists(MODEL_PATH):
30
+ st.warning("Model not found! Please download it first.")
31
+ st.stop()
32
+
33
+ # Load Llama model
34
+ st.session_state["llm"] = Llama(model_path=MODEL_PATH)
35
+
36
+ # Streamlit UI
37
+ st.title("🤖 Llama GGUF Chatbot")
38
+
39
+ if "messages" not in st.session_state:
40
+ st.session_state["messages"] = []
41
+
42
+ # Display chat history
43
+ for message in st.session_state["messages"]:
44
+ with st.chat_message(message["role"]):
45
+ st.markdown(message["content"])
46
+
47
+ # User input
48
+ user_input = st.chat_input("Type your message here...")
49
+
50
+ if user_input:
51
+ # Display user message
52
+ st.session_state["messages"].append({"role": "user", "content": user_input})
53
+ with st.chat_message("user"):
54
+ st.markdown(user_input)
55
 
56
+ # Generate response
57
+ with st.spinner("Thinking..."):
58
+ response = st.session_state["llm"](user_input)["choices"][0]["text"]
59
 
60
+ # Display bot response
61
+ st.session_state["messages"].append({"role": "assistant", "content": response})
62
+ with st.chat_message("assistant"):
63
+ st.markdown(response)
64