awacke1 commited on
Commit
a275740
·
1 Parent(s): e9cd015

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +9 -11
app.py CHANGED
@@ -188,20 +188,18 @@ def chat_with_model(prompt, document_section, model_choice='Llama-2-7b-chat-hf')
188
 
189
  except:
190
  st.write('Stream llm issue')
191
- SpeechSynthesis(result)
 
 
 
 
 
 
 
192
  return result
 
193
  except:
194
  st.write('Llama model is asleep. Starting up now on A10 - please give 5 minutes then retry as KEDA scales up from zero to activate running container(s).')
195
-
196
- full_reply_content = result
197
- st.write("Elapsed time:")
198
- st.write(time.time() - start_time)
199
-
200
- filename = generate_filename(full_reply_content, prompt)
201
- create_file(filename, prompt, full_reply_content, should_save)
202
- readitaloud(full_reply_content)
203
- return result
204
-
205
 
206
  # Chat and Chat with files
207
  def chat_with_model2(prompt, document_section, model_choice='gpt-3.5-turbo'):
 
188
 
189
  except:
190
  st.write('Stream llm issue')
191
+
192
+ full_reply_content = result
193
+ st.write("Elapsed time:")
194
+ st.write(time.time() - start_time)
195
+
196
+ filename = generate_filename(full_reply_content, prompt)
197
+ create_file(filename, prompt, full_reply_content, should_save)
198
+ readitaloud(full_reply_content)
199
  return result
200
+
201
  except:
202
  st.write('Llama model is asleep. Starting up now on A10 - please give 5 minutes then retry as KEDA scales up from zero to activate running container(s).')
 
 
 
 
 
 
 
 
 
 
203
 
204
  # Chat and Chat with files
205
  def chat_with_model2(prompt, document_section, model_choice='gpt-3.5-turbo'):