import openai import gradio as gr from functools import partial ### Utility Functions def validate_api_key(api_key): """ Validates the OpenAI API key by making a test request. Returns True if valid, False otherwise. """ if not api_key: return False try: client = openai.OpenAI(api_key=api_key) # Make a minimal test request client.chat.completions.create( model="gpt-3.5-turbo", messages=[{"role": "user", "content": "test"}], max_tokens=5 ) return True except Exception: return False def call_openai_api(user_message, api_key, model, temperature=0.1, max_tokens=1024): """ Calls the OpenAI API and returns the response content. Parameters: - user_message (str): The input message to process - api_key (str): OpenAI API key - model (str): Model name (e.g., 'gpt-3.5-turbo', 'gpt-4') - temperature (float): Sampling temperature for the API - max_tokens (int): Maximum number of tokens to generate Returns: - str: The model's response content """ try: client = openai.OpenAI(api_key=api_key) response = client.chat.completions.create( model=model, messages=[ {'role': 'system', 'content': "You are a helpful assistant."}, {'role': 'user', 'content': f"#### {user_message} ####"} ], temperature=temperature, max_tokens=max_tokens ) return response.choices[0].message.content except Exception as e: return f"Error: {str(e)}" def inference(user_message, api_key): """ Generates responses from multiple models for comparison. """ if not validate_api_key(api_key): return "Invalid API Key. Please enter a valid OpenAI API key.", "Invalid API Key. Please enter a valid OpenAI API key." gpt_35_response = call_openai_api(user_message, api_key, model='gpt-3.5-turbo', temperature=0.1) gpt_4_response = call_openai_api(user_message, api_key, model='gpt-4', temperature=0.1) return gpt_35_response, gpt_4_response ### Gradio Interface def launch_demo(): """ Launches the Gradio interface for the application. """ with gr.Blocks() as demo: gr.Markdown('