diegocp01 commited on
Commit
fd1f204
·
verified ·
1 Parent(s): 16565e1

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +79 -121
app.py CHANGED
@@ -1,22 +1,3 @@
1
- import os
2
- from openai import OpenAI
3
- from datetime import datetime
4
- import gradio as gr
5
- import time
6
- import openai # Redundant import
7
-
8
- # --- Constants ---
9
- # Default model might need to align with what client.responses.create supports
10
- DEFAULT_MODEL = "gpt-4.1" # As per your example
11
- MAX_HISTORY_LENGTH = 5 # History formatting will be manual and limited
12
-
13
- # --- API Key and Client Initialization ---
14
- API_KEY = os.getenv("OPENAI_API_KEY")
15
- if not API_KEY:
16
- print("Error: OPENAI_API_KEY environment variable not set.")
17
- # Handle missing key appropriately (e.g., disable UI, raise error)
18
- client = OpenAI(api_key=API_KEY)
19
-
20
  # --- Helper Functions ---
21
 
22
  # !!! WARNING: This function is adapted to the requested format and LOSES features !!!
@@ -26,65 +7,92 @@ def get_openai_response_simplified(prompt, model=DEFAULT_MODEL, system_prompt=""
26
  NOTE: This is NON-STREAMING and handles history/system prompt crudely.
27
  Advanced parameters (temp, top_p etc.) are NOT supported by this structure.
28
  """
29
- today_day = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
 
 
 
30
 
31
- # --- Attempt to manually format history and system prompt into 'input' ---
32
  formatted_input = ""
33
- # Add system prompt if provided
34
  effective_system_prompt = f"Today's date is: {today_day}. {system_prompt}".strip()
35
  if effective_system_prompt:
36
- # How best to include system prompt? Prepend? Specific tags? Unknown.
37
  formatted_input += f"System: {effective_system_prompt}\n\n"
38
-
39
- # Add chat history (simple concatenation)
40
  if chat_history:
41
  for turn in chat_history:
42
  if len(turn) == 2 and turn[0] is not None and turn[1] is not None:
43
  formatted_input += f"User: {turn[0]}\nAssistant: {turn[1]}\n"
 
44
 
45
- # Add the current user prompt
46
- formatted_input += f"User: {prompt}\nAssistant:" # Prompt the model for the next turn
47
 
48
  try:
49
- # *** Using the requested client.responses.create format ***
50
- # NOTE: This assumes client.responses.create actually exists and works this way.
51
- # NOTE: Parameters like temperature, top_p, max_tokens are NOT included here
52
- # as they are not part of the provided example format.
 
 
53
  response = client.responses.create(
54
  model=model,
55
  input=formatted_input
56
  )
57
- # Assuming the response object has an 'output_text' attribute
58
- return response.output_text
59
-
60
- # Error handling might need adjustment based on how client.responses.create fails
 
 
 
 
 
 
 
 
 
 
61
  except openai.APIConnectionError as e:
62
- print(f"OpenAI API request failed: {e}")
63
  return f"Error: Could not connect to OpenAI API. {e}"
64
  except openai.RateLimitError as e:
65
- print(f"OpenAI API request failed: {e}")
66
  return f"Error: Rate limit exceeded. Please try again later. {e}"
67
  except openai.AuthenticationError as e:
68
- print(f"OpenAI API request failed: {e}")
69
  return f"Error: Authentication failed. Check your API key. {e}"
70
  except openai.APIStatusError as e:
71
- print(f"OpenAI API request failed: {e}")
72
  return f"Error: OpenAI API returned an error (Status: {e.status_code}). {e}"
73
  except AttributeError as e:
74
- print(f"Error accessing response or client method: {e}")
75
- return f"Error: The API call structure 'client.responses.create' or its response format might be incorrect or not available. {e}"
 
76
  except Exception as e:
77
- print(f"An unexpected error occurred: {e}")
78
  return f"An unexpected error occurred: {e}"
79
 
 
80
  # !!! WARNING: This update function is now NON-STREAMING !!!
81
  def update_ui_simplified(message, chat_history, model, system_prompt, history_length):
82
  """Updates the Gradio UI WITHOUT streaming."""
 
 
 
 
 
83
  if not message:
 
84
  return "", chat_history # Return original history if message is empty
85
 
 
 
 
 
 
 
 
86
  # Keep only the specified length of history for the *next* call
87
- history_for_api = chat_history[-int(history_length):] if history_length > 0 else []
 
88
 
89
  # Call the simplified, non-streaming function
90
  bot_response = get_openai_response_simplified(
@@ -93,105 +101,55 @@ def update_ui_simplified(message, chat_history, model, system_prompt, history_le
93
  system_prompt=system_prompt,
94
  chat_history=history_for_api # Pass the potentially trimmed history
95
  )
 
96
 
97
  # Append the user message and the *complete* bot response
 
 
 
98
  chat_history.append((message, bot_response))
 
99
 
100
  # Update UI only once with the full response
101
  # Always display history based on the slider length for visibility
102
- visible_history = chat_history[-int(history_length):] if history_length > 0 else []
 
 
103
  return "", visible_history # Clear input, return updated history
104
 
105
- # --- Gradio Interface (Modified for Simplified API Call) ---
106
- with gr.Blocks(theme=gr.themes.Soft()) as demo:
107
- # Keep your Markdown, titles, etc.
108
- gr.Markdown("# Chat (Simplified API Demo)")
109
- gr.Markdown("---")
110
- gr.Markdown("Using a simplified, non-streaming API call structure.")
111
- gr.Markdown("---")
112
-
113
- # ... (rest of your Markdown) ...
114
-
115
- gr.Markdown("Chat below (Note: Responses will appear all at once): 👇")
116
-
117
- with gr.Row():
118
- with gr.Column(scale=4):
119
- chatbot = gr.Chatbot(
120
- label="Chat Window",
121
- show_label=False,
122
- avatar_images=(
123
- "https://cdn-icons-png.flaticon.com/512/1077/1077114.png", # User
124
- "https://cdn-icons-png.flaticon.com/512/8649/8649540.png" # AI
125
- ),
126
- render_markdown=True,
127
- height=500,
128
- bubble_full_width=False
129
- )
130
- msg = gr.Textbox(
131
- label="Your Message",
132
- placeholder="Type your message here and press Enter...",
133
- scale=4,
134
- show_label=False,
135
- container=False
136
- )
137
-
138
- # Accordion remains, but parameters might not be used by the simplified API call
139
- with gr.Accordion("Advanced Options (May Not Apply to Simplified API)", open=False):
140
- model_select = gr.Dropdown(
141
- label="Model",
142
- # Ensure gpt-4.1 is a valid choice if used
143
- choices=["gpt-4.1", "gpt-4o-mini-2024-07-18", "gpt-3.5-turbo-0125", "gpt-4o"],
144
- value=DEFAULT_MODEL,
145
- interactive=True
146
- )
147
- # These sliders are kept for UI, but won't be passed to the simplified API call
148
- temperature_slider = gr.Slider(label="Temperature (Not Used)", minimum=0.0, maximum=2.0, value=1.0, step=0.1, interactive=True)
149
- top_p_slider = gr.Slider(label="Top P (Not Used)", minimum=0.0, maximum=1.0, value=1.0, step=0.05, interactive=True)
150
- frequency_penalty_slider = gr.Slider(label="Frequency Penalty (Not Used)", minimum=-2.0, maximum=2.0, value=0.0, step=0.1, interactive=True)
151
- presence_penalty_slider = gr.Slider(label="Presence Penalty (Not Used)", minimum=-2.0, maximum=2.0, value=0.0, step=0.1, interactive=True)
152
- system_prompt_textbox = gr.Textbox(label="System Prompt", placeholder="e.g., You are a helpful assistant.", lines=3, interactive=True)
153
- history_length_slider = gr.Slider(label="Chat History Length (Affects Input & Display)", minimum=1, maximum=20, value=MAX_HISTORY_LENGTH, step=1, interactive=True)
154
-
155
-
156
- with gr.Row():
157
- clear = gr.Button("Clear Chat")
158
- send = gr.Button("Send Message", variant="primary")
159
-
160
-
161
- # --- Event Handlers (Using Simplified Functions) ---
162
- # Define inputs, excluding sliders not used by the simplified function
163
  inputs_simplified = [
164
  msg, chatbot, model_select, system_prompt_textbox,
165
- history_length_slider
166
  ]
167
- outputs = [msg, chatbot] # Outputs remain the same
168
 
169
- # Connect send button click
170
  send.click(
171
- update_ui_simplified, # Use the non-streaming UI update function
172
- inputs=inputs_simplified,
173
  outputs=outputs,
174
  queue=True
175
  )
176
-
177
- # Connect textbox submit (Enter key)
178
  msg.submit(
179
- update_ui_simplified, # Use the non-streaming UI update function
180
- inputs=inputs_simplified,
181
  outputs=outputs,
182
  queue=True
183
  )
184
-
185
- # Connect clear button
186
  clear.click(lambda: (None, []), None, outputs=[msg, chatbot], queue=False)
187
 
188
- gr.Examples(
189
- examples=["Tell me about the latest AI developments", "Write a short story about a friendly robot", "Explain black holes simply"],
190
- inputs=msg,
191
- label="Example Prompts"
192
- )
193
 
194
- # --- Launch ---
195
  if __name__ == "__main__":
196
- demo.queue()
197
- demo.launch()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  # --- Helper Functions ---
2
 
3
  # !!! WARNING: This function is adapted to the requested format and LOSES features !!!
 
7
  NOTE: This is NON-STREAMING and handles history/system prompt crudely.
8
  Advanced parameters (temp, top_p etc.) are NOT supported by this structure.
9
  """
10
+ print("--- Entering get_openai_response_simplified ---") # DEBUG
11
+ print(f"Received prompt: {prompt}") # DEBUG
12
+ print(f"Received model: {model}") # DEBUG
13
+ print(f"Received history (first few): {chat_history[:2] if chat_history else 'None'}") # DEBUG
14
 
15
+ today_day = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
16
  formatted_input = ""
 
17
  effective_system_prompt = f"Today's date is: {today_day}. {system_prompt}".strip()
18
  if effective_system_prompt:
 
19
  formatted_input += f"System: {effective_system_prompt}\n\n"
 
 
20
  if chat_history:
21
  for turn in chat_history:
22
  if len(turn) == 2 and turn[0] is not None and turn[1] is not None:
23
  formatted_input += f"User: {turn[0]}\nAssistant: {turn[1]}\n"
24
+ formatted_input += f"User: {prompt}\nAssistant:"
25
 
26
+ print(f"Formatted Input for API: \n{formatted_input}\n---") # DEBUG
 
27
 
28
  try:
29
+ print("Attempting client.responses.create call...") # DEBUG
30
+ # Check if method seems to exist before calling (basic check)
31
+ if not (hasattr(client, 'responses') and hasattr(client.responses, 'create')):
32
+ print("ERROR: client.responses.create method NOT FOUND!") # DEBUG
33
+ return "Error: The API call structure 'client.responses.create' is not available in the OpenAI client."
34
+
35
  response = client.responses.create(
36
  model=model,
37
  input=formatted_input
38
  )
39
+ print(f"API call supposedly succeeded. Response object: {response}") # DEBUG
40
+
41
+ # Check if response has the expected attribute BEFORE accessing it
42
+ if hasattr(response, 'output_text'):
43
+ output = response.output_text
44
+ print(f"Extracted output_text: {output}") # DEBUG
45
+ print("--- Exiting get_openai_response_simplified (Success) ---") # DEBUG
46
+ return output
47
+ else:
48
+ print("ERROR: Response object does NOT have 'output_text' attribute.") # DEBUG
49
+ print("--- Exiting get_openai_response_simplified (Error) ---") # DEBUG
50
+ return "Error: API response format is unexpected (missing output_text)."
51
+
52
+ # Keep specific error handling
53
  except openai.APIConnectionError as e:
54
+ print(f"ERROR caught in get_openai_response_simplified: APIConnectionError: {e}") # DEBUG
55
  return f"Error: Could not connect to OpenAI API. {e}"
56
  except openai.RateLimitError as e:
57
+ print(f"ERROR caught in get_openai_response_simplified: RateLimitError: {e}") # DEBUG
58
  return f"Error: Rate limit exceeded. Please try again later. {e}"
59
  except openai.AuthenticationError as e:
60
+ print(f"ERROR caught in get_openai_response_simplified: AuthenticationError: {e}") # DEBUG
61
  return f"Error: Authentication failed. Check your API key. {e}"
62
  except openai.APIStatusError as e:
63
+ print(f"ERROR caught in get_openai_response_simplified: APIStatusError: {e}") # DEBUG
64
  return f"Error: OpenAI API returned an error (Status: {e.status_code}). {e}"
65
  except AttributeError as e:
66
+ # This might catch the error if the initial check fails or response has wrong structure
67
+ print(f"ERROR caught in get_openai_response_simplified: AttributeError: {e}") # DEBUG
68
+ return f"Error: Problem with API call structure or response format. {e}"
69
  except Exception as e:
70
+ print(f"ERROR caught in get_openai_response_simplified: Unexpected Exception: {e}") # DEBUG
71
  return f"An unexpected error occurred: {e}"
72
 
73
+
74
  # !!! WARNING: This update function is now NON-STREAMING !!!
75
  def update_ui_simplified(message, chat_history, model, system_prompt, history_length):
76
  """Updates the Gradio UI WITHOUT streaming."""
77
+ print("\n--- Entering update_ui_simplified ---") # DEBUG
78
+ print(f"Received message: {message}") # DEBUG
79
+ print(f"Current chat_history length: {len(chat_history)}") # DEBUG
80
+ print(f"Requested history_length: {history_length}") # DEBUG
81
+
82
  if not message:
83
+ print("Empty message received, returning.") # DEBUG
84
  return "", chat_history # Return original history if message is empty
85
 
86
+ # Ensure history_length is an int for slicing
87
+ try:
88
+ hist_len_int = int(history_length)
89
+ except ValueError:
90
+ print(f"Warning: Invalid history_length '{history_length}', defaulting to {MAX_HISTORY_LENGTH}")
91
+ hist_len_int = MAX_HISTORY_LENGTH
92
+
93
  # Keep only the specified length of history for the *next* call
94
+ history_for_api = chat_history[-hist_len_int:] if hist_len_int > 0 else []
95
+ print(f"History slice for API length: {len(history_for_api)}") # DEBUG
96
 
97
  # Call the simplified, non-streaming function
98
  bot_response = get_openai_response_simplified(
 
101
  system_prompt=system_prompt,
102
  chat_history=history_for_api # Pass the potentially trimmed history
103
  )
104
+ print(f"Received from API helper: {bot_response}") # DEBUG
105
 
106
  # Append the user message and the *complete* bot response
107
+ # Make sure chat_history is treated as a list
108
+ if chat_history is None:
109
+ chat_history = []
110
  chat_history.append((message, bot_response))
111
+ print(f"Appended to chat_history. New length: {len(chat_history)}") # DEBUG
112
 
113
  # Update UI only once with the full response
114
  # Always display history based on the slider length for visibility
115
+ visible_history = chat_history[-hist_len_int:] if hist_len_int > 0 else []
116
+ print(f"Visible history length: {len(visible_history)}") # DEBUG
117
+ print("--- Exiting update_ui_simplified ---") # DEBUG
118
  return "", visible_history # Clear input, return updated history
119
 
120
+ # --- Gradio Interface (Ensure Simplified Handlers are Used) ---
121
+ # ... (Keep the Gradio Blocks definition as in the previous example) ...
122
+
123
+ # --- Event Handlers (Check these carefully) ---
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
124
  inputs_simplified = [
125
  msg, chatbot, model_select, system_prompt_textbox,
126
+ history_length_slider # Make sure history_length_slider is included
127
  ]
128
+ outputs = [msg, chatbot]
129
 
 
130
  send.click(
131
+ update_ui_simplified, # Ensure this is the correct function
132
+ inputs=inputs_simplified, # Ensure this list matches the function args
133
  outputs=outputs,
134
  queue=True
135
  )
 
 
136
  msg.submit(
137
+ update_ui_simplified, # Ensure this is the correct function
138
+ inputs=inputs_simplified, # Ensure this list matches the function args
139
  outputs=outputs,
140
  queue=True
141
  )
 
 
142
  clear.click(lambda: (None, []), None, outputs=[msg, chatbot], queue=False)
143
 
144
+ # ... (Rest of the Gradio code and launch) ...
 
 
 
 
145
 
 
146
  if __name__ == "__main__":
147
+ # Make sure API key is loaded (add check if needed)
148
+ if not API_KEY:
149
+ print("FATAL ERROR: OPENAI_API_KEY is not set. The application cannot function.")
150
+ # Optionally, you could display this error in the Gradio UI as well
151
+ # and prevent the launch or disable input components.
152
+ else:
153
+ print("OpenAI API Key found.")
154
+ demo.queue()
155
+ demo.launch()