Spaces:
Sleeping
Sleeping
### Importing Libraries | |
import os | |
import openai | |
import gradio as gr | |
from functools import partial | |
### OpenAI Setup | |
# Use environment variables for API key | |
OPENAI_API_KEY = os.getenv('OPENAI_API_KEY') | |
# Validate API Key at runtime | |
if not OPENAI_API_KEY: | |
raise ValueError("OPENAI_API_KEY is not set in the environment.") | |
# Initialize OpenAI client | |
openai.api_key = OPENAI_API_KEY | |
# Common system prompt for models | |
SYSTEM_PROMPT = "You are a helpful assistant." | |
### Utility Functions | |
def call_openai_api(user_message, model, temperature=0.1, max_tokens=1024): | |
""" | |
Calls the OpenAI API and returns the response content. | |
Parameters: | |
- user_message (str): The input message to process. | |
- model (str): Model name (e.g., 'gpt-3.5-turbo', 'gpt-4'). | |
- temperature (float): Sampling temperature for the API. | |
- max_tokens (int): Maximum number of tokens to generate. | |
Returns: | |
- str: The model's response content. | |
""" | |
try: | |
response = openai.ChatCompletion.create( | |
model=model, | |
messages=[ | |
{'role': 'system', 'content': SYSTEM_PROMPT}, | |
{'role': 'user', 'content': f"#### {user_message} ####"} | |
], | |
temperature=temperature, | |
max_tokens=max_tokens | |
) | |
return response.choices[0].message.content | |
except Exception as e: | |
return f"Error: {str(e)}" | |
def summarize(user_message): | |
""" | |
Summarizes a given text using GPT-3.5 Turbo. | |
""" | |
return call_openai_api(user_message, model='gpt-3.5-turbo', temperature=0.1) | |
def inference(user_message): | |
""" | |
Generates responses from multiple models for comparison. | |
""" | |
gpt_35_response = call_openai_api(user_message, model='gpt-3.5-turbo', temperature=0.1) | |
gpt_4_response = call_openai_api(user_message, model='gpt-4', temperature=0.1) | |
return gpt_35_response, gpt_4_response | |
### Gradio Interface | |
def launch_demo(): | |
""" | |
Launches the Gradio interface for the application. | |
""" | |
with gr.Blocks() as demo: | |
gr.Markdown('<center><h1>OpenAI LLM Explorer</h1></center>') | |
gr.Markdown("Type your prompt below to compare results from multiple OpenAI models.") | |
# Input prompt | |
prompt = gr.Textbox(label='Prompt', lines=5, placeholder="Enter your prompt here...") | |
# Outputs for each model | |
with gr.Row(): | |
gpt_35_output = gr.Textbox(label='GPT-3.5-Turbo Output', lines=6) | |
gpt_4_output = gr.Textbox(label='GPT-4 Output', lines=6) | |
# Button for generating outputs | |
generate_btn = gr.Button("Generate", variant='primary') | |
# Bind inputs and outputs | |
generate_btn.click( | |
inference, | |
inputs=[prompt], | |
outputs=[gpt_35_output, gpt_4_output] | |
) | |
demo.launch(share=True) | |
if __name__ == "__main__": | |
launch_demo() | |