import gradio as gr from openai import OpenAI import os import PyPDF2 # Initialize OpenAI client ONCE client = OpenAI(api_key=os.getenv("OPENAI_KEY")) # Note: Corrected to OPENAI_KEY here def read_pdf(file): if file is None: return "" reader = PyPDF2.PdfReader(file) text = "" for page in reader.pages: text += page.extract_text() return text def chat_with_openai(user_input, model_name, uploaded_pdf): # model_name comes correctly from dropdown selection pdf_text = read_pdf(uploaded_pdf) if uploaded_pdf else "" prompt = f"{pdf_text}\n\nUser Query: {user_input}" if pdf_text else user_input try: response = client.chat.completions.create( model=model_name, # 🔥 Use selected model here messages=[{"role": "user", "content": prompt}] ) return response.choices[0].message.content except Exception as e: return f"Error: {str(e)}" with gr.Blocks() as app: gr.Markdown("# 🔥 OpenAI Chat + PDF Analysis Tool") with gr.Row(): model_selector = gr.Dropdown( choices=["gpt-3.5-turbo", "gpt-4", "gpt-4-turbo"], label="Select OpenAI Model", value="gpt-3.5-turbo" ) with gr.Row(): uploaded_pdf = gr.File(label="Upload a PDF (optional)", file_types=[".pdf"]) with gr.Row(): user_input = gr.Textbox(label="Your Prompt", placeholder="Ask anything...") with gr.Row(): submit_btn = gr.Button("Submit") with gr.Row(): response_output = gr.Textbox(label="OpenAI Response", lines=10, interactive=True) submit_btn.click( fn=chat_with_openai, inputs=[user_input, model_selector, uploaded_pdf], # Make sure model_selector is passed here outputs=response_output ) app.launch()