from chatbot.llm import gemini_llm from chatbot.retrieval import get_vector_db from chatbot.memory import memory from chatbot.prompts import chat_prompt from langchain.chains import ConversationalRetrievalChain vector_db = get_vector_db() retriever = vector_db.as_retriever(search_kwargs={"k": 20}) qa_chain = ConversationalRetrievalChain.from_llm( llm=gemini_llm, retriever=retriever, memory=memory, return_source_documents= False, combine_docs_chain_kwargs={"prompt": chat_prompt}, output_key="result" ) def get_chat_response(user_input: str) -> str: response = qa_chain(user_input) # Lưu vào bộ nhớ hội thoại memory.save_context({"input": user_input}, {"output": response["result"]}) return response["result"]