inoid commited on
Commit
a6e1323
1 Parent(s): 5a99733

Update local changes

Browse files
Files changed (2) hide show
  1. app.py +10 -5
  2. seminar_edition_ai.py +10 -4
app.py CHANGED
@@ -6,6 +6,7 @@ from seminar_edition_ai import upload_file_ex, predictContemplando, predictProcl
6
 
7
  HISTORY_ANSWER = ''
8
 
 
9
  def activeSermonGuideZone(KEY):
10
  #print("Button show " + buttonEvent.label)
11
  return {text_button: gr.update (interactive = True), question_option: [KEY]}
@@ -22,23 +23,26 @@ def showMessage(questionAnswer, KEY):
22
  with gr.Blocks() as demo:
23
 
24
  gr.Markdown("SermonLab AI Demo.")
 
 
 
25
  with gr.Tab("Preparando mi Serm贸n"):
26
  text_input = gr.Textbox(label="T贸pico del serm贸n")
27
 
28
- text_output = gr.Textbox(label="Respuesta", lines=10)
29
 
30
  text_button = gr.Button("Crear")
31
 
32
  text_download = gr.DownloadButton(
33
  label="Descargar",
34
- value=fileAddresToDownload,
35
  every=10
36
  )
37
 
38
  text_button.click(
39
  fn = predictFromInit,
40
- inputs=text_input,
41
- outputs=text_output
42
  )
43
 
44
  text_download.click(
@@ -227,6 +231,7 @@ if __name__ == "__main__":
227
  llmBuilder = GeminiLLM()
228
 
229
  embed_model = llmBuilder.getEmbeddingsModel()
230
- llm = llmBuilder.getLLM()
 
231
 
232
  demo.launch(share=True)
 
6
 
7
  HISTORY_ANSWER = ''
8
 
9
+ llmModel = None
10
  def activeSermonGuideZone(KEY):
11
  #print("Button show " + buttonEvent.label)
12
  return {text_button: gr.update (interactive = True), question_option: [KEY]}
 
23
  with gr.Blocks() as demo:
24
 
25
  gr.Markdown("SermonLab AI Demo.")
26
+ global llmModel
27
+
28
+ llmModelState = gr.State([llmModel])
29
  with gr.Tab("Preparando mi Serm贸n"):
30
  text_input = gr.Textbox(label="T贸pico del serm贸n")
31
 
32
+ text_output = gr.Textbox(label="Respuesta", lines = 10)
33
 
34
  text_button = gr.Button("Crear")
35
 
36
  text_download = gr.DownloadButton(
37
  label="Descargar",
38
+ value = fileAddresToDownload,
39
  every=10
40
  )
41
 
42
  text_button.click(
43
  fn = predictFromInit,
44
+ inputs =[ text_input, llmModelState],
45
+ outputs = text_output
46
  )
47
 
48
  text_download.click(
 
231
  llmBuilder = GeminiLLM()
232
 
233
  embed_model = llmBuilder.getEmbeddingsModel()
234
+ global llmModel
235
+ llmModel = llmBuilder.getLLM()
236
 
237
  demo.launch(share=True)
seminar_edition_ai.py CHANGED
@@ -51,11 +51,15 @@ def getCurrentFileName():
51
  fileAddresToDownload = f"{DIRECTORY_PATH_TO_DOWNLOAD}{os.sep}{getCurrentFileName()}"
52
  FILE_PATH_NAME = fileAddresToDownload
53
 
54
- def updatePromptTemplate(promptTemplate, inputVariablesTemplate):
 
 
 
 
55
  prompt = PromptTemplate(template = promptTemplate,
56
  input_variables = inputVariablesTemplate)
57
  chain = load_qa_chain(
58
- llm,
59
  chain_type = "stuff",
60
  prompt = prompt
61
  )
@@ -99,14 +103,17 @@ def predictProclamando(queryKey):
99
  ####
100
  #
101
  ####
102
- def predictFromInit(sermonTopic):
103
  global HISTORY_ANSWER
104
  keyStr = 'SERMON_TOPIC'
105
 
106
  templates = SermonGeminiPromptTemplate()
107
 
 
 
108
  if HISTORY_ANSWER == '':
109
  chain = updatePromptTemplate(
 
110
  templates.getSermonPromptTemplates()['BUILD_INIT'],
111
  [keyStr,'CANT_VERSICULOS','context']
112
  )
@@ -209,7 +216,6 @@ def predictArgumentQuestionBuild(questionAnswer):
209
 
210
  return answer
211
 
212
-
213
  # A utility function for answer generation
214
  def askQuestion(
215
  question,
 
51
  fileAddresToDownload = f"{DIRECTORY_PATH_TO_DOWNLOAD}{os.sep}{getCurrentFileName()}"
52
  FILE_PATH_NAME = fileAddresToDownload
53
 
54
+ def updatePromptTemplate(
55
+ llmModel,
56
+ promptTemplate,
57
+ inputVariablesTemplate
58
+ ):
59
  prompt = PromptTemplate(template = promptTemplate,
60
  input_variables = inputVariablesTemplate)
61
  chain = load_qa_chain(
62
+ llmModel,
63
  chain_type = "stuff",
64
  prompt = prompt
65
  )
 
103
  ####
104
  #
105
  ####
106
+ def predictFromInit( sermonTopic, llmModelList):
107
  global HISTORY_ANSWER
108
  keyStr = 'SERMON_TOPIC'
109
 
110
  templates = SermonGeminiPromptTemplate()
111
 
112
+ llm = llmModelList[0] if len(llmModelList) > 0 else None
113
+
114
  if HISTORY_ANSWER == '':
115
  chain = updatePromptTemplate(
116
+ llm,
117
  templates.getSermonPromptTemplates()['BUILD_INIT'],
118
  [keyStr,'CANT_VERSICULOS','context']
119
  )
 
216
 
217
  return answer
218
 
 
219
  # A utility function for answer generation
220
  def askQuestion(
221
  question,