Spaces:
Sleeping
Sleeping
import openai | |
import gradio as gr | |
from functools import partial | |
### Utility Functions | |
def validate_api_key(api_key): | |
""" | |
Validates the OpenAI API key by making a test request. | |
Returns True if valid, False otherwise. | |
""" | |
if not api_key: | |
return False | |
try: | |
client = openai.OpenAI(api_key=api_key) | |
# Make a minimal test request | |
client.chat.completions.create( | |
model="gpt-3.5-turbo", | |
messages=[{"role": "user", "content": "test"}], | |
max_tokens=5 | |
) | |
return True | |
except Exception: | |
return False | |
def call_openai_api(user_message, api_key, model, temperature=0.1, max_tokens=1024): | |
""" | |
Calls the OpenAI API and returns the response content. | |
Parameters: | |
- user_message (str): The input message to process | |
- api_key (str): OpenAI API key | |
- model (str): Model name (e.g., 'gpt-3.5-turbo', 'gpt-4') | |
- temperature (float): Sampling temperature for the API | |
- max_tokens (int): Maximum number of tokens to generate | |
Returns: | |
- str: The model's response content | |
""" | |
try: | |
client = openai.OpenAI(api_key=api_key) | |
response = client.chat.completions.create( | |
model=model, | |
messages=[ | |
{'role': 'system', 'content': "You are a helpful assistant."}, | |
{'role': 'user', 'content': f"#### {user_message} ####"} | |
], | |
temperature=temperature, | |
max_tokens=max_tokens | |
) | |
return response.choices[0].message.content | |
except Exception as e: | |
return f"Error: {str(e)}" | |
def inference(user_message, api_key): | |
""" | |
Generates responses from multiple models for comparison. | |
""" | |
if not validate_api_key(api_key): | |
return "Invalid API Key. Please enter a valid OpenAI API key.", "Invalid API Key. Please enter a valid OpenAI API key." | |
gpt_35_response = call_openai_api(user_message, api_key, model='gpt-3.5-turbo', temperature=0.1) | |
gpt_4_response = call_openai_api(user_message, api_key, model='gpt-4', temperature=0.1) | |
return gpt_35_response, gpt_4_response | |
### Gradio Interface | |
def launch_demo(): | |
""" | |
Launches the Gradio interface for the application. | |
""" | |
with gr.Blocks() as demo: | |
gr.Markdown('<center><h1>OpenAI LLM Explorer</h1></center>') | |
gr.Markdown("Enter your OpenAI API key and prompt to compare results from multiple OpenAI models.") | |
# API Key input | |
api_key = gr.Textbox( | |
label='OpenAI API Key', | |
placeholder="Enter your OpenAI API key...", | |
type="password" # Masks the API key | |
) | |
# Input prompt | |
prompt = gr.Textbox( | |
label='Prompt', | |
lines=5, | |
placeholder="Enter your prompt here..." | |
) | |
# Outputs for each model | |
with gr.Row(): | |
gpt_35_output = gr.Textbox(label='GPT-3.5-Turbo Output', lines=6) | |
gpt_4_output = gr.Textbox(label='GPT-4 Output', lines=6) | |
# Button for generating outputs | |
generate_btn = gr.Button("Generate", variant='primary') | |
# Bind inputs and outputs | |
generate_btn.click( | |
inference, | |
inputs=[prompt, api_key], | |
outputs=[gpt_35_output, gpt_4_output] | |
) | |
demo.launch(share=True) | |
if __name__ == "__main__": | |
launch_demo() | |