Spaces:
Running
Running
Issue fix
Browse files- app.py +25 -9
- requirements.txt +2 -1
app.py
CHANGED
@@ -15,6 +15,7 @@ from huggingface_hub import hf_hub_download
|
|
15 |
from langchain_openai import OpenAIEmbeddings, ChatOpenAI
|
16 |
from SkinCancerDiagnosis import initialize_classifier
|
17 |
from rag_pipeline import invoke_rag_chain
|
|
|
18 |
|
19 |
nest_asyncio.apply()
|
20 |
device='cuda' if torch.cuda.is_available() else 'cpu'
|
@@ -135,15 +136,30 @@ if prompt := st.chat_input("Ask a follow-up question..."):
|
|
135 |
|
136 |
with st.chat_message("assistant"):
|
137 |
with st.spinner("Thinking..."):
|
138 |
-
|
139 |
-
|
140 |
-
|
141 |
-
|
142 |
-
|
143 |
-
|
144 |
-
|
145 |
-
|
146 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
147 |
|
148 |
if st.session_state.messages and st.button("π Download Chat as PDF"):
|
149 |
pdf_file = export_chat_to_pdf(st.session_state.messages)
|
|
|
15 |
from langchain_openai import OpenAIEmbeddings, ChatOpenAI
|
16 |
from SkinCancerDiagnosis import initialize_classifier
|
17 |
from rag_pipeline import invoke_rag_chain
|
18 |
+
from langchain_core.messages import HumanMessage, AIMessage
|
19 |
|
20 |
nest_asyncio.apply()
|
21 |
device='cuda' if torch.cuda.is_available() else 'cpu'
|
|
|
136 |
|
137 |
with st.chat_message("assistant"):
|
138 |
with st.spinner("Thinking..."):
|
139 |
+
# Convert messages to LangChain format
|
140 |
+
chat_history = []
|
141 |
+
for msg in st.session_state.messages[:-1]: # Exclude the current prompt
|
142 |
+
if msg["role"] == "user":
|
143 |
+
chat_history.append(HumanMessage(content=msg["content"]))
|
144 |
+
else:
|
145 |
+
chat_history.append(AIMessage(content=msg["content"]))
|
146 |
+
|
147 |
+
# Get response
|
148 |
+
response = llm.invoke([HumanMessage(content=prompt)] + chat_history)
|
149 |
+
assistant_response = response.content
|
150 |
+
|
151 |
+
st.markdown(assistant_response)
|
152 |
+
st.session_state.messages.append({"role": "assistant", "content": assistant_response})
|
153 |
+
# with st.spinner("Thinking..."):
|
154 |
+
# if len(st.session_state.messages) > 1:
|
155 |
+
# response = llm.invoke([{"role": m["role"], "content": m["content"]} for m in st.session_state.messages])
|
156 |
+
# else:
|
157 |
+
# response = rag_chain.invoke(prompt)
|
158 |
+
# response = response['result']
|
159 |
+
#
|
160 |
+
# st.markdown(response)
|
161 |
+
# st.session_state.messages.append({"role": "assistant", "content": response})
|
162 |
+
#
|
163 |
|
164 |
if st.session_state.messages and st.button("π Download Chat as PDF"):
|
165 |
pdf_file = export_chat_to_pdf(st.session_state.messages)
|
requirements.txt
CHANGED
@@ -16,4 +16,5 @@ langchain_openai
|
|
16 |
nest_asyncio
|
17 |
sentence_transformers
|
18 |
langchain-qdrant
|
19 |
-
huggingface_hub
|
|
|
|
16 |
nest_asyncio
|
17 |
sentence_transformers
|
18 |
langchain-qdrant
|
19 |
+
huggingface_hub
|
20 |
+
langchain_core
|