Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -1,33 +1,3 @@
|
|
1 |
-
|
2 |
-
Hugging Face's logo Hugging Face
|
3 |
-
|
4 |
-
Models
|
5 |
-
Datasets
|
6 |
-
Spaces
|
7 |
-
Docs
|
8 |
-
Pricing
|
9 |
-
|
10 |
-
Spaces:
|
11 |
-
bleysg
|
12 |
-
/
|
13 |
-
WizardCoder-Python-34b-v1.0
|
14 |
-
private
|
15 |
-
App
|
16 |
-
Files
|
17 |
-
Community
|
18 |
-
Settings
|
19 |
-
WizardCoder-Python-34b-v1.0
|
20 |
-
/ app.py
|
21 |
-
bleysg's picture
|
22 |
-
bleysg
|
23 |
-
Update app.py
|
24 |
-
7a84795
|
25 |
-
about 1 hour ago
|
26 |
-
raw
|
27 |
-
history
|
28 |
-
blame
|
29 |
-
No virus
|
30 |
-
5.52 kB
|
31 |
import os
|
32 |
import re
|
33 |
import logging
|
@@ -35,13 +5,20 @@ import gradio as gr
|
|
35 |
import openai
|
36 |
|
37 |
print(os.environ)
|
38 |
-
openai.
|
|
|
39 |
openai.api_key = os.environ.get("OPENAI_API_KEY")
|
40 |
|
41 |
BASE_SYSTEM_MESSAGE = """"""
|
42 |
|
43 |
-
|
44 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
45 |
for chunk in completion:
|
46 |
yield chunk["choices"][0]["text"]
|
47 |
|
@@ -59,7 +36,7 @@ def user(message, history):
|
|
59 |
return "", history
|
60 |
|
61 |
|
62 |
-
def
|
63 |
history = history or []
|
64 |
|
65 |
messages = BASE_SYSTEM_MESSAGE + system_message.strip() + "\n" + \
|
@@ -70,7 +47,38 @@ def chat(history, system_message, max_tokens, temperature, top_p, top_k, repetit
|
|
70 |
# remove last space from assistant, some models output a ZWSP if you leave a space
|
71 |
messages = messages.rstrip()
|
72 |
|
73 |
-
prediction =
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
74 |
messages,
|
75 |
max_tokens=max_tokens,
|
76 |
temperature=temperature,
|
@@ -141,15 +149,20 @@ with gr.Blocks(css=CSS) as demo:
|
|
141 |
|
142 |
chat_history_state = gr.State()
|
143 |
clear.click(clear_chat, inputs=[chat_history_state, message], outputs=[chat_history_state, message], queue=False)
|
144 |
-
clear.click(lambda: None, None,
|
145 |
-
clear.click(lambda: None, None,
|
146 |
|
147 |
-
|
|
|
|
|
|
|
|
|
|
|
148 |
fn=user, inputs=[message, chat_history_state], outputs=[message, chat_history_state], queue=True
|
149 |
).then(
|
150 |
-
fn=
|
151 |
)
|
152 |
-
stop.click(fn=None, inputs=None, outputs=None, cancels=[
|
153 |
|
154 |
demo.queue(max_size=48, concurrency_count=8).launch(debug=True, server_name="0.0.0.0", server_port=7860)
|
155 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
import os
|
2 |
import re
|
3 |
import logging
|
|
|
5 |
import openai
|
6 |
|
7 |
print(os.environ)
|
8 |
+
openai.api_base1 = os.environ.get("OPENAI_API_BASE")
|
9 |
+
openai.api_base2 = os.environ.get("OPENAI_API_BASE2")
|
10 |
openai.api_key = os.environ.get("OPENAI_API_KEY")
|
11 |
|
12 |
BASE_SYSTEM_MESSAGE = """"""
|
13 |
|
14 |
+
|
15 |
+
def make_prediction1(prompt, max_tokens=None, temperature=None, top_p=None, top_k=None, repetition_penalty=None):
|
16 |
+
completion = openai.Completion.create(api_base=openai.api_base1, model="wizardcoder-python-34b-v1.0.Q5_K_M.gguf", prompt=prompt, max_tokens=max_tokens, temperature=temperature, top_p=top_p, top_k=top_k, repetition_penalty=repetition_penalty, stream=True, stop=["</s>", "<|im_end|>"])
|
17 |
+
for chunk in completion:
|
18 |
+
yield chunk["choices"][0]["text"]
|
19 |
+
|
20 |
+
def make_prediction2(prompt, max_tokens=None, temperature=None, top_p=None, top_k=None, repetition_penalty=None):
|
21 |
+
completion = openai.Completion.create(api_base=openai.api_base2, model="wizardcoder-python-34b-v1.0.Q5_K_M.gguf", prompt=prompt, max_tokens=max_tokens, temperature=temperature, top_p=top_p, top_k=top_k, repetition_penalty=repetition_penalty, stream=True, stop=["</s>", "<|im_end|>"])
|
22 |
for chunk in completion:
|
23 |
yield chunk["choices"][0]["text"]
|
24 |
|
|
|
36 |
return "", history
|
37 |
|
38 |
|
39 |
+
def chat1(history, system_message, max_tokens, temperature, top_p, top_k, repetition_penalty):
|
40 |
history = history or []
|
41 |
|
42 |
messages = BASE_SYSTEM_MESSAGE + system_message.strip() + "\n" + \
|
|
|
47 |
# remove last space from assistant, some models output a ZWSP if you leave a space
|
48 |
messages = messages.rstrip()
|
49 |
|
50 |
+
prediction = make_prediction1(
|
51 |
+
messages,
|
52 |
+
max_tokens=max_tokens,
|
53 |
+
temperature=temperature,
|
54 |
+
top_p=top_p,
|
55 |
+
top_k=top_k,
|
56 |
+
repetition_penalty=repetition_penalty,
|
57 |
+
)
|
58 |
+
for tokens in prediction:
|
59 |
+
tokens = re.findall(r'(.*?)(\s|$)', tokens)
|
60 |
+
for subtoken in tokens:
|
61 |
+
subtoken = "".join(subtoken)
|
62 |
+
# Remove "Response\n" if it's at the beginning of the assistant's output
|
63 |
+
if subtoken.startswith("Response"):
|
64 |
+
subtoken = subtoken[len("Response"):]
|
65 |
+
answer = subtoken
|
66 |
+
history[-1][1] += answer
|
67 |
+
# stream the response
|
68 |
+
yield history, history, ""
|
69 |
+
|
70 |
+
def chat2(history, system_message, max_tokens, temperature, top_p, top_k, repetition_penalty):
|
71 |
+
history = history or []
|
72 |
+
|
73 |
+
messages = BASE_SYSTEM_MESSAGE + system_message.strip() + "\n" + \
|
74 |
+
"\n".join(["\n".join(["###Instruction\n"+item[0]+"\n\n", "###Response\n"+item[1]+"\n\n"])
|
75 |
+
for item in history])
|
76 |
+
# strip the last `<|end_of_turn|>` from the messages
|
77 |
+
#messages = messages.rstrip("<|end_of_turn|>")
|
78 |
+
# remove last space from assistant, some models output a ZWSP if you leave a space
|
79 |
+
messages = messages.rstrip()
|
80 |
+
|
81 |
+
prediction = make_prediction2(
|
82 |
messages,
|
83 |
max_tokens=max_tokens,
|
84 |
temperature=temperature,
|
|
|
149 |
|
150 |
chat_history_state = gr.State()
|
151 |
clear.click(clear_chat, inputs=[chat_history_state, message], outputs=[chat_history_state, message], queue=False)
|
152 |
+
clear.click(lambda: None, None, chatbot1, queue=False)
|
153 |
+
clear.click(lambda: None, None, chatbot2, queue=False)
|
154 |
|
155 |
+
submit_click_event1 = submit.click(
|
156 |
+
fn=user, inputs=[message, chat_history_state], outputs=[message, chat_history_state], queue=True
|
157 |
+
).then(
|
158 |
+
fn=chat1, inputs=[chat_history_state, system_msg, max_tokens, temperature, top_p, top_k, repetition_penalty], outputs=[chatbot1, chat_history_state, message], queue=True
|
159 |
+
)
|
160 |
+
submit_click_event2 = submit.click(
|
161 |
fn=user, inputs=[message, chat_history_state], outputs=[message, chat_history_state], queue=True
|
162 |
).then(
|
163 |
+
fn=chat2, inputs=[chat_history_state, system_msg, max_tokens, temperature, top_p, top_k, repetition_penalty], outputs=[chatbot1, chat_history_state, message], queue=True
|
164 |
)
|
165 |
+
stop.click(fn=None, inputs=None, outputs=None, cancels=[submit_click_event1, submit_click_event2], queue=False)
|
166 |
|
167 |
demo.queue(max_size=48, concurrency_count=8).launch(debug=True, server_name="0.0.0.0", server_port=7860)
|
168 |
|