File size: 2,909 Bytes
c85e9e7
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
### Importing Libraries

import os
import openai
import gradio as gr
from functools import partial


### OpenAI Setup

# Use environment variables for API key
OPENAI_API_KEY = os.getenv('OPENAI_API_KEY')

# Validate API Key at runtime
if not OPENAI_API_KEY:
    raise ValueError("OPENAI_API_KEY is not set in the environment.")

# Initialize OpenAI client
openai.api_key = OPENAI_API_KEY

# Common system prompt for models
SYSTEM_PROMPT = "You are a helpful assistant."


### Utility Functions

def call_openai_api(user_message, model, temperature=0.1, max_tokens=1024):
    """
    Calls the OpenAI API and returns the response content.

    Parameters:
    - user_message (str): The input message to process.
    - model (str): Model name (e.g., 'gpt-3.5-turbo', 'gpt-4').
    - temperature (float): Sampling temperature for the API.
    - max_tokens (int): Maximum number of tokens to generate.

    Returns:
    - str: The model's response content.
    """
    try:
        response = openai.ChatCompletion.create(
            model=model,
            messages=[
                {'role': 'system', 'content': SYSTEM_PROMPT},
                {'role': 'user', 'content': f"#### {user_message} ####"}
            ],
            temperature=temperature,
            max_tokens=max_tokens
        )
        return response.choices[0].message.content
    except Exception as e:
        return f"Error: {str(e)}"


def summarize(user_message):
    """
    Summarizes a given text using GPT-3.5 Turbo.
    """
    return call_openai_api(user_message, model='gpt-3.5-turbo', temperature=0.1)


def inference(user_message):
    """
    Generates responses from multiple models for comparison.
    """
    gpt_35_response = call_openai_api(user_message, model='gpt-3.5-turbo', temperature=0.1)
    gpt_4_response = call_openai_api(user_message, model='gpt-4', temperature=0.1)
    return gpt_35_response, gpt_4_response


### Gradio Interface

def launch_demo():
    """
    Launches the Gradio interface for the application.
    """
    with gr.Blocks() as demo:
        gr.Markdown('<center><h1>OpenAI LLM Explorer</h1></center>')
        gr.Markdown("Type your prompt below to compare results from multiple OpenAI models.")

        # Input prompt
        prompt = gr.Textbox(label='Prompt', lines=5, placeholder="Enter your prompt here...")
        
        # Outputs for each model
        with gr.Row():
            gpt_35_output = gr.Textbox(label='GPT-3.5-Turbo Output', lines=6)
            gpt_4_output = gr.Textbox(label='GPT-4 Output', lines=6)

        # Button for generating outputs
        generate_btn = gr.Button("Generate", variant='primary')

        # Bind inputs and outputs
        generate_btn.click(
            inference, 
            inputs=[prompt], 
            outputs=[gpt_35_output, gpt_4_output]
        )

    demo.launch(share=True)


if __name__ == "__main__":
    launch_demo()