bleysg commited on
Commit
380e40f
Β·
1 Parent(s): 8378e8f

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +5 -5
app.py CHANGED
@@ -6,12 +6,12 @@ import openai
6
 
7
  print(os.environ)
8
  openai.api_base = os.environ.get("OPENAI_API_BASE")
9
- openai.api_key = os.environ.get("OPENAI_API_KEY")
10
 
11
  BASE_SYSTEM_MESSAGE = """### System Prompt\n"""
12
 
13
  def make_prediction(prompt, max_tokens=None, temperature=None, top_p=None, top_k=None, repetition_penalty=None):
14
- completion = openai.Completion.create(model="Phind/Phind-CodeLlama-34B-v2", prompt=prompt, max_tokens=max_tokens, temperature=temperature, top_p=top_p, top_k=top_k, repetition_penalty=repetition_penalty, stream=True, stop=["</s>", "<|im_end|>"])
15
  for chunk in completion:
16
  yield chunk["choices"][0]["text"]
17
 
@@ -33,7 +33,7 @@ def chat(history, system_message, max_tokens, temperature, top_p, top_k, repetit
33
  history = history or []
34
 
35
  messages = BASE_SYSTEM_MESSAGE + system_message.strip() + "\n" + \
36
- "\n".join(["\n".join(["### User Message\n"+item[0]+"\n\n", "### Assistant\n"+item[1]+"\n\n"])
37
  for item in history])
38
  # strip the last `<|end_of_turn|>` from the messages
39
  #messages = messages.rstrip("<|end_of_turn|>")
@@ -75,11 +75,11 @@ with gr.Blocks(css=CSS) as demo:
75
  with gr.Row():
76
  with gr.Column():
77
  gr.Markdown(f"""
78
- ## This demo is an unquantized GPU chatbot of [Phind-CodeLlama-34B-v2](https://huggingface.co/Phind/Phind-CodeLlama-34B-v2)
79
  Brought to you by your friends at Alignment Lab AI, garage-bAInd, Open Access AI Collective, and OpenChat!
80
  """)
81
  with gr.Row():
82
- gr.Markdown("# πŸ” Phind CodeLlama 34B Playground Space! πŸ”Ž")
83
  with gr.Row():
84
  #chatbot = gr.Chatbot().style(height=500)
85
  chatbot = gr.Chatbot(elem_id="chatbot")
 
6
 
7
  print(os.environ)
8
  openai.api_base = os.environ.get("OPENAI_API_BASE")
9
+ #openai.api_key = os.environ.get("OPENAI_API_KEY")
10
 
11
  BASE_SYSTEM_MESSAGE = """### System Prompt\n"""
12
 
13
  def make_prediction(prompt, max_tokens=None, temperature=None, top_p=None, top_k=None, repetition_penalty=None):
14
+ completion = openai.Completion.create(model="/workspace/text-generation-webui/models/wizardcoder-python-34b-v1.0.Q5_K_M.gguf", prompt=prompt, max_tokens=max_tokens, temperature=temperature, top_p=top_p, top_k=top_k, repetition_penalty=repetition_penalty, stream=True, stop=["</s>", "<|im_end|>"])
15
  for chunk in completion:
16
  yield chunk["choices"][0]["text"]
17
 
 
33
  history = history or []
34
 
35
  messages = BASE_SYSTEM_MESSAGE + system_message.strip() + "\n" + \
36
+ "\n".join(["\n".join(["###Instruction\n"+item[0]+"\n\n", "###Response\n"+item[1]+"\n\n"])
37
  for item in history])
38
  # strip the last `<|end_of_turn|>` from the messages
39
  #messages = messages.rstrip("<|end_of_turn|>")
 
75
  with gr.Row():
76
  with gr.Column():
77
  gr.Markdown(f"""
78
+ ## This demo is an unquantized GPU chatbot of [WizardCoder-Python-34B-V1.0-GGUF](https://huggingface.co/TheBloke/WizardCoder-Python-34B-V1.0-GGUF)
79
  Brought to you by your friends at Alignment Lab AI, garage-bAInd, Open Access AI Collective, and OpenChat!
80
  """)
81
  with gr.Row():
82
+ gr.Markdown("# πŸ” WizardCoder-Python-34B-V1.0-GGUF Playground Space! πŸ”Ž")
83
  with gr.Row():
84
  #chatbot = gr.Chatbot().style(height=500)
85
  chatbot = gr.Chatbot(elem_id="chatbot")