joey1101 commited on
Commit
e3ec85b
·
verified ·
1 Parent(s): 7691064

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +93 -0
app.py CHANGED
@@ -8,6 +8,99 @@ from models.sum_model import Sum
8
  from models.whisper_model import AudioTranslator
9
  from models.llm_model import LlmReasoner
10
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
11
 
12
  class Chat:
13
 
 
8
  from models.whisper_model import AudioTranslator
9
  from models.llm_model import LlmReasoner
10
 
11
+ import os
12
+ from langchain.chains import ConversationalRetrievalChain, StuffDocumentsChain
13
+ from langchain.prompts import PromptTemplate
14
+ from ipex_llm.langchain.llms import TransformersLLM
15
+ from langchain.vectorstores import FAISS
16
+ from langchain.text_splitter import CharacterTextSplitter, RecursiveCharacterTextSplitter
17
+ from ipex_llm.langchain.embeddings import TransformersEmbeddings
18
+ from langchain import LLMChain
19
+ from utils.utils import new_cd
20
+
21
+ parent_dir = os.path.dirname(__file__)
22
+
23
+ condense_template = """
24
+ Given the following conversation and a follow up question, rephrase the follow up question to be a standalone question.
25
+ You can assume the discussion is about the video content.
26
+ REMEMBER: If there is no relevant information within the context, just say "Hmm, I'm \
27
+ not sure." Don't try to make up an answer. \
28
+ Chat History:
29
+ {chat_history}
30
+ Follow Up Question: {question}
31
+ Standalone question:
32
+ """
33
+
34
+ qa_template = """
35
+ You are an AI assistant designed for answering questions about a meeting.
36
+ You are given a word records of this meeting.
37
+ Try to comprehend the dialogs and provide a answer based on it.
38
+ =========
39
+ {context}
40
+ =========
41
+ Question: {question}
42
+ Answer:
43
+ """
44
+ # CONDENSE_QUESTION_PROMPT 用于将聊天历史记录和下一个问题压缩为一个独立的问题
45
+ CONDENSE_QUESTION_PROMPT = PromptTemplate.from_template(condense_template)
46
+ # QA_PROMPT为机器人设定基调和目的
47
+ QA_PROMPT = PromptTemplate(template=qa_template, input_variables=["question", "context"])
48
+ # DOC_PROMPT = PromptTemplate.from_template("Video Clip {video_clip}: {page_content}")
49
+ DOC_PROMPT = PromptTemplate.from_template("{page_content}")
50
+
51
+
52
+ class LlmReasoner():
53
+ def __init__(self, args):
54
+ self.history = []
55
+ self.llm_version = args.llm_version
56
+ self.embed_version = args.embed_version
57
+ self.qa_chain = None
58
+ self.vectorstore = None
59
+ self.top_k = args.top_k
60
+ self.qa_max_new_tokens = args.qa_max_new_tokens
61
+ self.init_model()
62
+
63
+ def init_model(self):
64
+ with new_cd(parent_dir):
65
+ self.llm = TransformersLLM.from_model_id_low_bit(
66
+ f"..\\checkpoints\\{self.llm_version}")
67
+ self.llm.streaming = False
68
+ self.embeddings = TransformersEmbeddings.from_model_id(
69
+ model_id=f"..\\checkpoints\\{self.embed_version}")
70
+
71
+ def create_qa_chain(self, args, input_log):
72
+ self.top_k = args.top_k
73
+ self.qa_max_new_tokens = args.qa_max_new_tokens
74
+ self.question_generator = LLMChain(llm=self.llm, prompt=CONDENSE_QUESTION_PROMPT)
75
+ self.answer_generator = LLMChain(llm=self.llm, prompt=QA_PROMPT,
76
+ llm_kwargs={"max_new_tokens": self.qa_max_new_tokens})
77
+ self.doc_chain = StuffDocumentsChain(llm_chain=self.answer_generator, document_prompt=DOC_PROMPT,
78
+ document_variable_name='context')
79
+ # 拆分查看字符的文本, 创建一个新的文本分割器
80
+ # self.text_splitter = CharacterTextSplitter(chunk_size=500, chunk_overlap=0, keep_separator=True)
81
+ self.text_splitter = RecursiveCharacterTextSplitter(chunk_size=2048, chunk_overlap=0)
82
+ texts = self.text_splitter.split_text(input_log)
83
+ self.vectorstore = FAISS.from_texts(texts, self.embeddings,
84
+ metadatas=[{"video_clip": str(i)} for i in range(len(texts))])
85
+ retriever = self.vectorstore.as_retriever(search_kwargs={"k": self.top_k})
86
+ self.qa_chain = ConversationalRetrievalChain(retriever=retriever,
87
+ question_generator=self.question_generator,
88
+ combine_docs_chain=self.doc_chain,
89
+ return_generated_question=True,
90
+ return_source_documents=True,
91
+ rephrase_question=False)
92
+
93
+ def __call__(self, question):
94
+ response = self.qa_chain({"question": question, "chat_history": self.history})
95
+ answer = response["answer"]
96
+ generated_question = response["generated_question"]
97
+ source_documents = response["source_documents"]
98
+ self.history.append([question, answer])
99
+ return self.history, generated_question, source_documents
100
+
101
+ def clean_history(self):
102
+ self.history = []
103
+
104
 
105
  class Chat:
106