ramwar commited on
Commit
5ced1cf
·
1 Parent(s): 6316373

latest version

Browse files
Files changed (1) hide show
  1. app.py +20 -18
app.py CHANGED
@@ -15,7 +15,7 @@ from langchain.callbacks.manager import CallbackManager
15
  from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler
16
  import gradio as gr
17
 
18
-
19
  question_examples = [
20
  "What is the name of the captain of the Nautilus?",
21
  "What are the names of the crew members of the Nautilus?",
@@ -27,17 +27,20 @@ question_examples = [
27
  "Why doesn't Captain Nemo hate the society?"
28
  ]
29
 
 
30
  os.environ["HUGGINGFACEHUB_API_TOKEN"] = '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'
 
 
31
  REPO_ID = "declare-lab/flan-alpaca-large"
32
 
33
  transformers.utils.move_cache()
34
 
35
- #llm = HuggingFaceHub(
36
- # repo_id=REPO_ID,
37
- # model_kwargs={"temperature":0, "max_length":512}
38
- #)
39
 
40
- #embeddings = HuggingFaceEmbeddings()
41
  callback_manager = CallbackManager([StreamingStdOutCallbackHandler()])
42
 
43
  def read_textfile(File):
@@ -45,20 +48,19 @@ def read_textfile(File):
45
  documents = loader.load()
46
  text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=10)
47
  docs = text_splitter.split_documents(documents)
48
- #db = FAISS.from_documents(docs, embeddings)
49
- #db.save_local(folder_path=".", index_name="faiss_index")
50
 
51
  def ask_question(question, chat_history):
52
- #chain = load_qa_chain(llm, chain_type="stuff", verbose=False)
53
- #db = FAISS.load_local(folder_path=".", embeddings=embeddings, index_name="faiss_index")
54
- #relevant_docs = db.similarity_search(question)
55
 
56
- answer = ""
57
- #answer = chain.run(
58
- # input_documents=relevant_docs,
59
- # question=question,
60
- # callbacks=callback_manager
61
- #)
62
 
63
  chat_history.append((question, answer))
64
  time.sleep(1)
@@ -84,5 +86,5 @@ with gr.Blocks() as demo:
84
  inputs=[ask_textbox, chatbot],
85
  outputs=[ask_textbox, chatbot]
86
  )
87
- clear_button.click(lambda: None, None, chatbot, queue=False)
88
 
 
15
  from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler
16
  import gradio as gr
17
 
18
+ # example questions
19
  question_examples = [
20
  "What is the name of the captain of the Nautilus?",
21
  "What are the names of the crew members of the Nautilus?",
 
27
  "Why doesn't Captain Nemo hate the society?"
28
  ]
29
 
30
+ # define the API token
31
  os.environ["HUGGINGFACEHUB_API_TOKEN"] = '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'
32
+
33
+ # used repository for llm
34
  REPO_ID = "declare-lab/flan-alpaca-large"
35
 
36
  transformers.utils.move_cache()
37
 
38
+ llm = HuggingFaceHub(
39
+ repo_id=REPO_ID,
40
+ model_kwargs={"temperature":0, "max_length":512}
41
+ )
42
 
43
+ embeddings = HuggingFaceEmbeddings()
44
  callback_manager = CallbackManager([StreamingStdOutCallbackHandler()])
45
 
46
  def read_textfile(File):
 
48
  documents = loader.load()
49
  text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=10)
50
  docs = text_splitter.split_documents(documents)
51
+ db = FAISS.from_documents(docs, embeddings)
52
+ db.save_local(folder_path=".", index_name="faiss_index")
53
 
54
  def ask_question(question, chat_history):
55
+ chain = load_qa_chain(llm, chain_type="stuff", verbose=False)
56
+ db = FAISS.load_local(folder_path=".", embeddings=embeddings, index_name="faiss_index")
57
+ relevant_docs = db.similarity_search(question)
58
 
59
+ answer = chain.run(
60
+ input_documents=relevant_docs,
61
+ question=question,
62
+ callbacks=callback_manager
63
+ )
 
64
 
65
  chat_history.append((question, answer))
66
  time.sleep(1)
 
86
  inputs=[ask_textbox, chatbot],
87
  outputs=[ask_textbox, chatbot]
88
  )
89
+ clear_button.click(lambda: None, None, chatbot, queue=False)
90