avreymi commited on
Commit
dedbd51
·
1 Parent(s): f72c31a
Files changed (2) hide show
  1. app.py +1 -2
  2. pipline.py +13 -7
app.py CHANGED
@@ -16,9 +16,8 @@ out_area = st.container()
16
 
17
  def __run_pipline():
18
  out_area.markdown(":green[Running pipline]")
19
- out_area.text(pipline.chain(state.input_text))
20
 
21
 
22
  in_area.text_area("input_text", key="input_text")
23
- in_area.text_input("word", key="word")
24
  in_area.button("run", on_click=__run_pipline)
 
16
 
17
  def __run_pipline():
18
  out_area.markdown(":green[Running pipline]")
19
+ out_area.text(pipline.chain_TI(state.input_text))
20
 
21
 
22
  in_area.text_area("input_text", key="input_text")
 
23
  in_area.button("run", on_click=__run_pipline)
pipline.py CHANGED
@@ -3,22 +3,28 @@ from langchain import PromptTemplate, OpenAI, LLMChain
3
  from langchain.prompts import load_prompt
4
  import wikipedia
5
  import os
 
6
  llm = OpenAI()
7
  # save templates to a file
8
- template = """Question:
9
  The user wrote me the following text, what is he trying to imply to me?
10
  {user_input}
11
 
12
  Answer: Let's think step by step."""
13
  # An example prompt with multiple input variables
14
- input_prompt = PromptTemplate(
15
  input_variables=["user_input"],
16
- template=template,
17
  )
18
- input_prompt.save("awesome_prompt.json") # Save to JSON file
19
 
20
 
21
- prompt = load_prompt("awesome_prompt.json")
 
 
 
 
 
 
22
 
23
- prompt = PromptTemplate(template=template, input_variables=["user_input"])
24
- chain = LLMChain(prompt=prompt, llm=llm)
 
3
  from langchain.prompts import load_prompt
4
  import wikipedia
5
  import os
6
+
7
  llm = OpenAI()
8
  # save templates to a file
9
+ try_imply_template = """Question:
10
  The user wrote me the following text, what is he trying to imply to me?
11
  {user_input}
12
 
13
  Answer: Let's think step by step."""
14
  # An example prompt with multiple input variables
15
+ TI_prompt = PromptTemplate(
16
  input_variables=["user_input"],
17
+ template=try_imply_template,
18
  )
 
19
 
20
 
21
+ connection_between_terms_template = PromptTemplate(
22
+ template="""Question:
23
+ What is the connection between {term1} and {term2}?
24
+ Answer: Let's think step by step.""",
25
+ input_variables=["term1", "term2"],
26
+ )
27
+
28
 
29
+ chain_TI = LLMChain(prompt=TI_prompt, llm=llm)
30
+ chain_CC = LLMChain(prompt=connection_between_terms_template, llm=llm)