Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -3,19 +3,27 @@ from openai import OpenAI
|
|
3 |
from datetime import datetime
|
4 |
import gradio as gr
|
5 |
import time
|
|
|
6 |
|
7 |
# --- Constants ---
|
8 |
-
|
9 |
-
|
10 |
-
|
11 |
-
|
12 |
-
|
13 |
-
|
|
|
14 |
MAX_HISTORY_LENGTH = 5
|
15 |
|
16 |
# --- API Key and Client Initialization ---
|
17 |
-
|
18 |
API_KEY = os.getenv("OPENAI_API_KEY")
|
|
|
|
|
|
|
|
|
|
|
|
|
19 |
client = OpenAI(api_key=API_KEY)
|
20 |
|
21 |
# --- Helper Functions ---
|
@@ -24,137 +32,199 @@ def get_openai_response(prompt, model=DEFAULT_MODEL, temperature=DEFAULT_TEMPERA
|
|
24 |
max_tokens=MAX_TOKENS, system_prompt="", chat_history=None):
|
25 |
"""Gets a response from the OpenAI API, handling errors and streaming."""
|
26 |
today_day = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
|
27 |
-
|
28 |
-
|
29 |
-
|
30 |
-
|
31 |
-
|
32 |
-
|
33 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
34 |
|
|
|
|
|
35 |
response = client.chat.completions.create(
|
36 |
model=model,
|
37 |
messages=messages,
|
38 |
temperature=temperature,
|
39 |
-
max_tokens=max_tokens, #
|
40 |
top_p=top_p,
|
41 |
frequency_penalty=frequency_penalty,
|
42 |
presence_penalty=presence_penalty,
|
43 |
-
response_format={"type": "text"},
|
44 |
-
stream=True # Enable streaming
|
45 |
)
|
46 |
|
47 |
collected_messages = []
|
|
|
48 |
for chunk in response:
|
49 |
-
|
50 |
-
if
|
|
|
51 |
collected_messages.append(chunk_message)
|
52 |
-
|
53 |
-
|
54 |
|
|
|
55 |
except openai.APIConnectionError as e:
|
56 |
-
|
|
|
57 |
except openai.RateLimitError as e:
|
58 |
-
|
|
|
|
|
|
|
|
|
59 |
except openai.APIStatusError as e:
|
60 |
-
|
|
|
61 |
except Exception as e:
|
62 |
-
|
63 |
-
|
64 |
|
65 |
|
66 |
def update_ui(message, chat_history, model, temperature, top_p, frequency_penalty, presence_penalty, system_prompt, history_length):
|
67 |
"""Updates the Gradio UI; handles streaming response."""
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
68 |
bot_message_gen = get_openai_response(
|
69 |
prompt=message, model=model, temperature=temperature, top_p=top_p,
|
70 |
frequency_penalty=frequency_penalty, presence_penalty=presence_penalty,
|
71 |
-
system_prompt=system_prompt, chat_history=chat_history
|
72 |
)
|
73 |
-
|
74 |
-
for
|
75 |
-
|
76 |
-
|
77 |
-
|
|
|
|
|
|
|
|
|
78 |
yield "", visible_history
|
79 |
|
80 |
# --- Gradio Interface ---
|
81 |
with gr.Blocks(theme=gr.themes.Soft()) as demo:
|
82 |
-
|
83 |
-
gr.Markdown("
|
|
|
84 |
gr.Markdown("β [Buy me a Coffee](https://buymeacoffee.com/diegocp01m)")
|
85 |
gr.Markdown("---")
|
86 |
gr.Markdown("""
|
87 |
-
π **GPT-4.5 EXPERIMENT:** GPT-4.5 was
|
88 |
-
|
89 |
-
|
90 |
-
**Here's how the experiment went:**
|
91 |
-
|
92 |
π **Chat Completions Metrics (Feb 27, 2025):**
|
93 |
- 111 requests
|
94 |
- 64,764 Total tokens processed
|
95 |
- Total spend: $10.99
|
96 |
-
|
97 |
This space went live at 4:23 PM ET, Feb 27, 2025 until 8:53 PM ET. [Read More](https://x.com/diegocabezas01/status/1895291365376041045)
|
98 |
Results from OpenAI platform: π
|
99 |
""")
|
100 |
-
|
101 |
gr.Image("https://pbs.twimg.com/media/Gk1tVnRXkAASa2U?format=jpg&name=4096x4096", elem_id="gpt4_5_image")
|
102 |
-
gr.Markdown("Chat
|
103 |
-
|
104 |
with gr.Row():
|
105 |
with gr.Column(scale=4):
|
106 |
chatbot = gr.Chatbot(
|
|
|
107 |
show_label=False,
|
108 |
avatar_images=(
|
109 |
-
|
110 |
-
"https://
|
|
|
|
|
111 |
),
|
112 |
render_markdown=True,
|
113 |
-
height=500
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
114 |
)
|
115 |
-
msg = gr.Textbox(placeholder="Type your message here...", scale=4, show_label=False)
|
116 |
|
117 |
with gr.Accordion("Advanced Options", open=False):
|
118 |
model_select = gr.Dropdown(
|
119 |
label="Model",
|
120 |
-
|
121 |
-
|
|
|
122 |
interactive=True
|
123 |
)
|
124 |
-
temperature_slider = gr.Slider(label="Temperature", minimum=0.0, maximum=2.0, value=DEFAULT_TEMPERATURE, step=0.1, interactive=True)
|
125 |
-
top_p_slider = gr.Slider(label="Top P", minimum=0.0, maximum=1.0, value=DEFAULT_TOP_P, step=0.05, interactive=True)
|
126 |
-
frequency_penalty_slider = gr.Slider(label="Frequency Penalty", minimum=-2.0, maximum=2.0, value=DEFAULT_FREQ_PENALTY, step=0.1, interactive=True)
|
127 |
-
presence_penalty_slider = gr.Slider(label="Presence Penalty", minimum=-2.0, maximum=2.0, value=DEFAULT_PRES_PENALTY, step=0.1, interactive=True)
|
128 |
-
system_prompt_textbox = gr.Textbox(label="System Prompt", placeholder="
|
129 |
-
history_length_slider = gr.Slider(label="Chat History Length", minimum=1, maximum=20, value=MAX_HISTORY_LENGTH, step=1, interactive=True)
|
130 |
-
|
131 |
|
132 |
with gr.Row():
|
133 |
-
|
134 |
-
|
|
|
|
|
135 |
|
136 |
# --- Event Handlers ---
|
137 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
138 |
update_ui,
|
139 |
-
|
140 |
-
|
|
|
141 |
)
|
|
|
|
|
142 |
msg.submit(
|
143 |
update_ui,
|
144 |
-
|
145 |
-
|
|
|
146 |
)
|
147 |
-
|
|
|
|
|
|
|
148 |
|
149 |
gr.Examples(
|
150 |
-
examples=["Tell me about
|
151 |
-
inputs=msg
|
|
|
152 |
)
|
153 |
-
msg.focus()
|
154 |
|
155 |
# --- Launch ---
|
156 |
if __name__ == "__main__":
|
157 |
-
|
158 |
-
|
159 |
-
|
160 |
-
|
|
|
3 |
from datetime import datetime
|
4 |
import gradio as gr
|
5 |
import time
|
6 |
+
import openai # Already imported OpenAI above, this line is redundant
|
7 |
|
8 |
# --- Constants ---
|
9 |
+
# Use a model available in the dropdown as the default
|
10 |
+
DEFAULT_MODEL = "gpt-4o-mini-2024-07-18"
|
11 |
+
DEFAULT_TEMPERATURE = 1.0
|
12 |
+
DEFAULT_TOP_P = 1.0
|
13 |
+
DEFAULT_FREQ_PENALTY = 0
|
14 |
+
DEFAULT_PRES_PENALTY = 0
|
15 |
+
MAX_TOKENS = 2048 # This is often controlled by the model, but can be a limit
|
16 |
MAX_HISTORY_LENGTH = 5
|
17 |
|
18 |
# --- API Key and Client Initialization ---
|
19 |
+
# Ensure the API key is set in your Hugging Face Space secrets
|
20 |
API_KEY = os.getenv("OPENAI_API_KEY")
|
21 |
+
if not API_KEY:
|
22 |
+
# Provide a clear error message if the key is missing
|
23 |
+
# In a real HF Space, you might raise an exception or disable the UI
|
24 |
+
print("Error: OPENAI_API_KEY environment variable not set.")
|
25 |
+
# Consider adding a gr.Markdown warning in the UI as well if API_KEY is None
|
26 |
+
# For now, we'll let it proceed, but OpenAI() will likely raise an error later.
|
27 |
client = OpenAI(api_key=API_KEY)
|
28 |
|
29 |
# --- Helper Functions ---
|
|
|
32 |
max_tokens=MAX_TOKENS, system_prompt="", chat_history=None):
|
33 |
"""Gets a response from the OpenAI API, handling errors and streaming."""
|
34 |
today_day = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
|
35 |
+
messages = []
|
36 |
+
# Add system prompt if provided
|
37 |
+
effective_system_prompt = f"Today's date is: {today_day}. {system_prompt}".strip()
|
38 |
+
if effective_system_prompt:
|
39 |
+
messages.append({"role": "system", "content": effective_system_prompt})
|
40 |
+
|
41 |
+
# Add chat history
|
42 |
+
if chat_history:
|
43 |
+
for turn in chat_history:
|
44 |
+
# Ensure turn has two elements before trying to access them
|
45 |
+
if len(turn) == 2 and turn[0] is not None and turn[1] is not None:
|
46 |
+
messages.append({"role": "user", "content": str(turn[0])}) # Ensure content is string
|
47 |
+
messages.append({"role": "assistant", "content": str(turn[1])}) # Ensure content is string
|
48 |
+
# else: # Optional: Handle malformed history entries
|
49 |
+
# print(f"Skipping malformed history entry: {turn}")
|
50 |
+
|
51 |
+
# Add the current user prompt
|
52 |
+
messages.append({"role": "user", "content": prompt})
|
53 |
|
54 |
+
try:
|
55 |
+
# *** This is the correct, modern API call for chat models ***
|
56 |
response = client.chat.completions.create(
|
57 |
model=model,
|
58 |
messages=messages,
|
59 |
temperature=temperature,
|
60 |
+
max_tokens=max_tokens, # Correct parameter name for this call
|
61 |
top_p=top_p,
|
62 |
frequency_penalty=frequency_penalty,
|
63 |
presence_penalty=presence_penalty,
|
64 |
+
# response_format={"type": "text"}, # Usually not needed unless forcing JSON etc. Let model decide default.
|
65 |
+
stream=True # Enable streaming
|
66 |
)
|
67 |
|
68 |
collected_messages = []
|
69 |
+
full_reply_content = "" # Initialize before loop
|
70 |
for chunk in response:
|
71 |
+
# Check if delta and content exist before accessing
|
72 |
+
if chunk.choices and chunk.choices[0].delta and chunk.choices[0].delta.content is not None:
|
73 |
+
chunk_message = chunk.choices[0].delta.content
|
74 |
collected_messages.append(chunk_message)
|
75 |
+
full_reply_content = ''.join(collected_messages)
|
76 |
+
yield full_reply_content # Yield the accumulated message
|
77 |
|
78 |
+
# Use specific exceptions from the openai library
|
79 |
except openai.APIConnectionError as e:
|
80 |
+
print(f"OpenAI API request failed: {e}")
|
81 |
+
yield f"Error: Could not connect to OpenAI API. {e}"
|
82 |
except openai.RateLimitError as e:
|
83 |
+
print(f"OpenAI API request failed: {e}")
|
84 |
+
yield f"Error: Rate limit exceeded. Please try again later. {e}"
|
85 |
+
except openai.AuthenticationError as e:
|
86 |
+
print(f"OpenAI API request failed: {e}")
|
87 |
+
yield f"Error: Authentication failed. Check your API key. {e}"
|
88 |
except openai.APIStatusError as e:
|
89 |
+
print(f"OpenAI API request failed: {e}")
|
90 |
+
yield f"Error: OpenAI API returned an error (Status: {e.status_code}). {e}"
|
91 |
except Exception as e:
|
92 |
+
print(f"An unexpected error occurred: {e}")
|
93 |
+
yield f"An unexpected error occurred: {e}"
|
94 |
|
95 |
|
96 |
def update_ui(message, chat_history, model, temperature, top_p, frequency_penalty, presence_penalty, system_prompt, history_length):
|
97 |
"""Updates the Gradio UI; handles streaming response."""
|
98 |
+
if not message: # Don't send empty messages
|
99 |
+
yield "", chat_history
|
100 |
+
return
|
101 |
+
|
102 |
+
# Trim history before sending to API if it's longer than needed for context
|
103 |
+
# (Optional optimization, the API call does include full history passed here)
|
104 |
+
# history_for_api = chat_history[-(MAX_HISTORY_LENGTH*2):] # Keep pairs
|
105 |
+
|
106 |
bot_message_gen = get_openai_response(
|
107 |
prompt=message, model=model, temperature=temperature, top_p=top_p,
|
108 |
frequency_penalty=frequency_penalty, presence_penalty=presence_penalty,
|
109 |
+
system_prompt=system_prompt, chat_history=chat_history # Pass full history for context
|
110 |
)
|
111 |
+
|
112 |
+
chat_history.append((message, "")) # Add user message and placeholder for bot response
|
113 |
+
|
114 |
+
# Stream the response
|
115 |
+
for bot_message_chunk in bot_message_gen:
|
116 |
+
chat_history[-1] = (message, bot_message_chunk) # Update the last entry with the streamed chunk
|
117 |
+
# Control visibility based on the slider
|
118 |
+
visible_history = chat_history[-int(history_length):] if history_length > 0 else []
|
119 |
+
# time.sleep(0.02) # Slightly shorter delay might feel more responsive
|
120 |
yield "", visible_history
|
121 |
|
122 |
# --- Gradio Interface ---
|
123 |
with gr.Blocks(theme=gr.themes.Soft()) as demo:
|
124 |
+
# Keep your informative Markdown sections
|
125 |
+
gr.Markdown("# Chat with OpenAI Models") # Updated Title
|
126 |
+
gr.Markdown("β GPT-4.5 experiment details from Feb 27, 2025...") # Keep context
|
127 |
gr.Markdown("β [Buy me a Coffee](https://buymeacoffee.com/diegocp01m)")
|
128 |
gr.Markdown("---")
|
129 |
gr.Markdown("""
|
130 |
+
π **GPT-4.5 EXPERIMENT RECAP:** GPT-4.5 was briefly accessible via API on Feb 27, 2025.
|
131 |
+
This space allowed free access during that window.
|
132 |
+
|
|
|
|
|
133 |
π **Chat Completions Metrics (Feb 27, 2025):**
|
134 |
- 111 requests
|
135 |
- 64,764 Total tokens processed
|
136 |
- Total spend: $10.99
|
137 |
+
|
138 |
This space went live at 4:23 PM ET, Feb 27, 2025 until 8:53 PM ET. [Read More](https://x.com/diegocabezas01/status/1895291365376041045)
|
139 |
Results from OpenAI platform: π
|
140 |
""")
|
|
|
141 |
gr.Image("https://pbs.twimg.com/media/Gk1tVnRXkAASa2U?format=jpg&name=4096x4096", elem_id="gpt4_5_image")
|
142 |
+
gr.Markdown("Chat with available models like GPT-4o mini below: π")
|
143 |
+
|
144 |
with gr.Row():
|
145 |
with gr.Column(scale=4):
|
146 |
chatbot = gr.Chatbot(
|
147 |
+
label="Chat Window", # Added label for clarity
|
148 |
show_label=False,
|
149 |
avatar_images=(
|
150 |
+
# Using generic user icon
|
151 |
+
"https://cdn-icons-png.flaticon.com/512/1077/1077114.png", # User
|
152 |
+
# Using generic AI icon
|
153 |
+
"https://cdn-icons-png.flaticon.com/512/8649/8649540.png" # AI
|
154 |
),
|
155 |
render_markdown=True,
|
156 |
+
height=500,
|
157 |
+
bubble_full_width=False # Optional: makes bubbles look nicer
|
158 |
+
)
|
159 |
+
msg = gr.Textbox(
|
160 |
+
label="Your Message", # Added label
|
161 |
+
placeholder="Type your message here and press Enter...",
|
162 |
+
scale=4,
|
163 |
+
show_label=False,
|
164 |
+
container=False # Makes it sit closer to the button
|
165 |
)
|
|
|
166 |
|
167 |
with gr.Accordion("Advanced Options", open=False):
|
168 |
model_select = gr.Dropdown(
|
169 |
label="Model",
|
170 |
+
# Ensure these models are available to your API key
|
171 |
+
choices=["gpt-4o-mini-2024-07-18", "gpt-3.5-turbo-0125", "gpt-4o"],
|
172 |
+
value=DEFAULT_MODEL, # Use the constant defined above
|
173 |
interactive=True
|
174 |
)
|
175 |
+
temperature_slider = gr.Slider(label="Temperature (Randomness)", minimum=0.0, maximum=2.0, value=DEFAULT_TEMPERATURE, step=0.1, interactive=True)
|
176 |
+
top_p_slider = gr.Slider(label="Top P (Nucleus Sampling)", minimum=0.0, maximum=1.0, value=DEFAULT_TOP_P, step=0.05, interactive=True)
|
177 |
+
frequency_penalty_slider = gr.Slider(label="Frequency Penalty (Discourage repetition)", minimum=-2.0, maximum=2.0, value=DEFAULT_FREQ_PENALTY, step=0.1, interactive=True)
|
178 |
+
presence_penalty_slider = gr.Slider(label="Presence Penalty (Discourage repeating topics)", minimum=-2.0, maximum=2.0, value=DEFAULT_PRES_PENALTY, step=0.1, interactive=True)
|
179 |
+
system_prompt_textbox = gr.Textbox(label="System Prompt", placeholder="e.g., You are a helpful assistant.", lines=3, interactive=True)
|
180 |
+
history_length_slider = gr.Slider(label="Chat History Display Length", minimum=1, maximum=20, value=MAX_HISTORY_LENGTH, step=1, interactive=True)
|
|
|
181 |
|
182 |
with gr.Row():
|
183 |
+
# Place clear button first maybe?
|
184 |
+
clear = gr.Button("Clear Chat")
|
185 |
+
send = gr.Button("Send Message", variant="primary") # Make send more prominent
|
186 |
+
|
187 |
|
188 |
# --- Event Handlers ---
|
189 |
+
# Define reusable inputs list
|
190 |
+
inputs = [
|
191 |
+
msg, chatbot, model_select, temperature_slider, top_p_slider,
|
192 |
+
frequency_penalty_slider, presence_penalty_slider, system_prompt_textbox,
|
193 |
+
history_length_slider
|
194 |
+
]
|
195 |
+
# Define reusable outputs list
|
196 |
+
outputs = [msg, chatbot]
|
197 |
+
|
198 |
+
# Connect send button click
|
199 |
+
send.click(
|
200 |
update_ui,
|
201 |
+
inputs=inputs,
|
202 |
+
outputs=outputs,
|
203 |
+
queue=True # Use queue for handling multiple users potentially
|
204 |
)
|
205 |
+
|
206 |
+
# Connect textbox submit (Enter key)
|
207 |
msg.submit(
|
208 |
update_ui,
|
209 |
+
inputs=inputs,
|
210 |
+
outputs=outputs,
|
211 |
+
queue=True
|
212 |
)
|
213 |
+
|
214 |
+
# Connect clear button
|
215 |
+
# Clears the message box and the chatbot history
|
216 |
+
clear.click(lambda: (None, []), None, outputs=[msg, chatbot], queue=False)
|
217 |
|
218 |
gr.Examples(
|
219 |
+
examples=["Tell me about the latest AI developments", "Write a short story about a friendly robot", "Explain black holes simply"],
|
220 |
+
inputs=msg,
|
221 |
+
label="Example Prompts" # Add label
|
222 |
)
|
223 |
+
# msg.focus() # Autoselect msg box - Sometimes causes issues, use if needed
|
224 |
|
225 |
# --- Launch ---
|
226 |
if __name__ == "__main__":
|
227 |
+
# Add share=True for a public link if running locally and want to share
|
228 |
+
# Add debug=True for more verbose logging during development
|
229 |
+
demo.queue() # Enable queue for better handling of multiple requests
|
230 |
+
demo.launch()
|