Spaces:
Sleeping
Sleeping
Create app.py
Browse files
app.py
ADDED
@@ -0,0 +1,104 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
### Importing Libraries
|
2 |
+
|
3 |
+
import os
|
4 |
+
import openai
|
5 |
+
import gradio as gr
|
6 |
+
from functools import partial
|
7 |
+
|
8 |
+
|
9 |
+
### OpenAI Setup
|
10 |
+
|
11 |
+
# Use environment variables for API key
|
12 |
+
OPENAI_API_KEY = os.getenv('OPENAI_API_KEY')
|
13 |
+
|
14 |
+
# Validate API Key at runtime
|
15 |
+
if not OPENAI_API_KEY:
|
16 |
+
raise ValueError("OPENAI_API_KEY is not set in the environment.")
|
17 |
+
|
18 |
+
# Initialize OpenAI client
|
19 |
+
openai.api_key = OPENAI_API_KEY
|
20 |
+
|
21 |
+
# Common system prompt for models
|
22 |
+
SYSTEM_PROMPT = "You are a helpful assistant."
|
23 |
+
|
24 |
+
|
25 |
+
### Utility Functions
|
26 |
+
|
27 |
+
def call_openai_api(user_message, model, temperature=0.1, max_tokens=1024):
|
28 |
+
"""
|
29 |
+
Calls the OpenAI API and returns the response content.
|
30 |
+
|
31 |
+
Parameters:
|
32 |
+
- user_message (str): The input message to process.
|
33 |
+
- model (str): Model name (e.g., 'gpt-3.5-turbo', 'gpt-4').
|
34 |
+
- temperature (float): Sampling temperature for the API.
|
35 |
+
- max_tokens (int): Maximum number of tokens to generate.
|
36 |
+
|
37 |
+
Returns:
|
38 |
+
- str: The model's response content.
|
39 |
+
"""
|
40 |
+
try:
|
41 |
+
response = openai.ChatCompletion.create(
|
42 |
+
model=model,
|
43 |
+
messages=[
|
44 |
+
{'role': 'system', 'content': SYSTEM_PROMPT},
|
45 |
+
{'role': 'user', 'content': f"#### {user_message} ####"}
|
46 |
+
],
|
47 |
+
temperature=temperature,
|
48 |
+
max_tokens=max_tokens
|
49 |
+
)
|
50 |
+
return response.choices[0].message.content
|
51 |
+
except Exception as e:
|
52 |
+
return f"Error: {str(e)}"
|
53 |
+
|
54 |
+
|
55 |
+
def summarize(user_message):
|
56 |
+
"""
|
57 |
+
Summarizes a given text using GPT-3.5 Turbo.
|
58 |
+
"""
|
59 |
+
return call_openai_api(user_message, model='gpt-3.5-turbo', temperature=0.1)
|
60 |
+
|
61 |
+
|
62 |
+
def inference(user_message):
|
63 |
+
"""
|
64 |
+
Generates responses from multiple models for comparison.
|
65 |
+
"""
|
66 |
+
gpt_35_response = call_openai_api(user_message, model='gpt-3.5-turbo', temperature=0.1)
|
67 |
+
gpt_4_response = call_openai_api(user_message, model='gpt-4', temperature=0.1)
|
68 |
+
return gpt_35_response, gpt_4_response
|
69 |
+
|
70 |
+
|
71 |
+
### Gradio Interface
|
72 |
+
|
73 |
+
def launch_demo():
|
74 |
+
"""
|
75 |
+
Launches the Gradio interface for the application.
|
76 |
+
"""
|
77 |
+
with gr.Blocks() as demo:
|
78 |
+
gr.Markdown('<center><h1>OpenAI LLM Explorer</h1></center>')
|
79 |
+
gr.Markdown("Type your prompt below to compare results from multiple OpenAI models.")
|
80 |
+
|
81 |
+
# Input prompt
|
82 |
+
prompt = gr.Textbox(label='Prompt', lines=5, placeholder="Enter your prompt here...")
|
83 |
+
|
84 |
+
# Outputs for each model
|
85 |
+
with gr.Row():
|
86 |
+
gpt_35_output = gr.Textbox(label='GPT-3.5-Turbo Output', lines=6)
|
87 |
+
gpt_4_output = gr.Textbox(label='GPT-4 Output', lines=6)
|
88 |
+
|
89 |
+
# Button for generating outputs
|
90 |
+
generate_btn = gr.Button("Generate", variant='primary')
|
91 |
+
|
92 |
+
# Bind inputs and outputs
|
93 |
+
generate_btn.click(
|
94 |
+
inference,
|
95 |
+
inputs=[prompt],
|
96 |
+
outputs=[gpt_35_output, gpt_4_output]
|
97 |
+
)
|
98 |
+
|
99 |
+
demo.launch(share=True)
|
100 |
+
|
101 |
+
|
102 |
+
if __name__ == "__main__":
|
103 |
+
launch_demo()
|
104 |
+
|