tdurzynski commited on
Commit
ecbd6bb
·
verified ·
1 Parent(s): e02e6a1

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +54 -53
app.py CHANGED
@@ -1,47 +1,46 @@
1
- ### Importing Libraries
2
-
3
- import os
4
  import openai
5
  import gradio as gr
6
  from functools import partial
7
 
8
-
9
- ### OpenAI Setup
10
-
11
- # Use environment variables for API key
12
- OPENAI_API_KEY = os.getenv('OPENAI_API_KEY')
13
-
14
- # Validate API Key at runtime
15
- if not OPENAI_API_KEY:
16
- raise ValueError("OPENAI_API_KEY is not set in the environment.")
17
-
18
- # Initialize OpenAI client
19
- openai.api_key = OPENAI_API_KEY
20
-
21
- # Common system prompt for models
22
- SYSTEM_PROMPT = "You are a helpful assistant."
23
-
24
-
25
  ### Utility Functions
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
26
 
27
- def call_openai_api(user_message, model, temperature=0.1, max_tokens=1024):
28
  """
29
  Calls the OpenAI API and returns the response content.
30
-
31
  Parameters:
32
- - user_message (str): The input message to process.
33
- - model (str): Model name (e.g., 'gpt-3.5-turbo', 'gpt-4').
34
- - temperature (float): Sampling temperature for the API.
35
- - max_tokens (int): Maximum number of tokens to generate.
36
-
37
  Returns:
38
- - str: The model's response content.
39
  """
40
  try:
41
- response = openai.chat.completions.create(
 
42
  model=model,
43
  messages=[
44
- {'role': 'system', 'content': SYSTEM_PROMPT},
45
  {'role': 'user', 'content': f"#### {user_message} ####"}
46
  ],
47
  temperature=temperature,
@@ -51,54 +50,56 @@ def call_openai_api(user_message, model, temperature=0.1, max_tokens=1024):
51
  except Exception as e:
52
  return f"Error: {str(e)}"
53
 
54
-
55
- def summarize(user_message):
56
- """
57
- Summarizes a given text using GPT-3.5 Turbo.
58
- """
59
- return call_openai_api(user_message, model='gpt-3.5-turbo', temperature=0.1)
60
-
61
-
62
- def inference(user_message):
63
  """
64
  Generates responses from multiple models for comparison.
65
  """
66
- gpt_35_response = call_openai_api(user_message, model='gpt-3.5-turbo', temperature=0.1)
67
- gpt_4_response = call_openai_api(user_message, model='gpt-4', temperature=0.1)
 
 
 
68
  return gpt_35_response, gpt_4_response
69
 
70
-
71
  ### Gradio Interface
72
-
73
  def launch_demo():
74
  """
75
  Launches the Gradio interface for the application.
76
  """
77
  with gr.Blocks() as demo:
78
  gr.Markdown('<center><h1>OpenAI LLM Explorer</h1></center>')
79
- gr.Markdown("Type your prompt below to compare results from multiple OpenAI models.")
80
-
 
 
 
 
 
 
 
81
  # Input prompt
82
- prompt = gr.Textbox(label='Prompt', lines=5, placeholder="Enter your prompt here...")
 
 
 
 
83
 
84
  # Outputs for each model
85
  with gr.Row():
86
  gpt_35_output = gr.Textbox(label='GPT-3.5-Turbo Output', lines=6)
87
  gpt_4_output = gr.Textbox(label='GPT-4 Output', lines=6)
88
-
89
  # Button for generating outputs
90
  generate_btn = gr.Button("Generate", variant='primary')
91
-
92
  # Bind inputs and outputs
93
  generate_btn.click(
94
- inference,
95
- inputs=[prompt],
96
  outputs=[gpt_35_output, gpt_4_output]
97
  )
98
-
99
  demo.launch(share=True)
100
 
101
-
102
  if __name__ == "__main__":
103
  launch_demo()
104
-
 
 
 
 
1
  import openai
2
  import gradio as gr
3
  from functools import partial
4
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5
  ### Utility Functions
6
+ def validate_api_key(api_key):
7
+ """
8
+ Validates the OpenAI API key by making a test request.
9
+ Returns True if valid, False otherwise.
10
+ """
11
+ if not api_key:
12
+ return False
13
+
14
+ try:
15
+ client = openai.OpenAI(api_key=api_key)
16
+ # Make a minimal test request
17
+ client.chat.completions.create(
18
+ model="gpt-3.5-turbo",
19
+ messages=[{"role": "user", "content": "test"}],
20
+ max_tokens=5
21
+ )
22
+ return True
23
+ except Exception:
24
+ return False
25
 
26
+ def call_openai_api(user_message, api_key, model, temperature=0.1, max_tokens=1024):
27
  """
28
  Calls the OpenAI API and returns the response content.
 
29
  Parameters:
30
+ - user_message (str): The input message to process
31
+ - api_key (str): OpenAI API key
32
+ - model (str): Model name (e.g., 'gpt-3.5-turbo', 'gpt-4')
33
+ - temperature (float): Sampling temperature for the API
34
+ - max_tokens (int): Maximum number of tokens to generate
35
  Returns:
36
+ - str: The model's response content
37
  """
38
  try:
39
+ client = openai.OpenAI(api_key=api_key)
40
+ response = client.chat.completions.create(
41
  model=model,
42
  messages=[
43
+ {'role': 'system', 'content': "You are a helpful assistant."},
44
  {'role': 'user', 'content': f"#### {user_message} ####"}
45
  ],
46
  temperature=temperature,
 
50
  except Exception as e:
51
  return f"Error: {str(e)}"
52
 
53
+ def inference(user_message, api_key):
 
 
 
 
 
 
 
 
54
  """
55
  Generates responses from multiple models for comparison.
56
  """
57
+ if not validate_api_key(api_key):
58
+ return "Invalid API Key. Please enter a valid OpenAI API key.", "Invalid API Key. Please enter a valid OpenAI API key."
59
+
60
+ gpt_35_response = call_openai_api(user_message, api_key, model='gpt-3.5-turbo', temperature=0.1)
61
+ gpt_4_response = call_openai_api(user_message, api_key, model='gpt-4', temperature=0.1)
62
  return gpt_35_response, gpt_4_response
63
 
 
64
  ### Gradio Interface
 
65
  def launch_demo():
66
  """
67
  Launches the Gradio interface for the application.
68
  """
69
  with gr.Blocks() as demo:
70
  gr.Markdown('<center><h1>OpenAI LLM Explorer</h1></center>')
71
+ gr.Markdown("Enter your OpenAI API key and prompt to compare results from multiple OpenAI models.")
72
+
73
+ # API Key input
74
+ api_key = gr.Textbox(
75
+ label='OpenAI API Key',
76
+ placeholder="Enter your OpenAI API key...",
77
+ type="password" # Masks the API key
78
+ )
79
+
80
  # Input prompt
81
+ prompt = gr.Textbox(
82
+ label='Prompt',
83
+ lines=5,
84
+ placeholder="Enter your prompt here..."
85
+ )
86
 
87
  # Outputs for each model
88
  with gr.Row():
89
  gpt_35_output = gr.Textbox(label='GPT-3.5-Turbo Output', lines=6)
90
  gpt_4_output = gr.Textbox(label='GPT-4 Output', lines=6)
91
+
92
  # Button for generating outputs
93
  generate_btn = gr.Button("Generate", variant='primary')
94
+
95
  # Bind inputs and outputs
96
  generate_btn.click(
97
+ inference,
98
+ inputs=[prompt, api_key],
99
  outputs=[gpt_35_output, gpt_4_output]
100
  )
101
+
102
  demo.launch(share=True)
103
 
 
104
  if __name__ == "__main__":
105
  launch_demo()