Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -6,6 +6,7 @@ import torch
|
|
6 |
import gradio as gr
|
7 |
from transformers import AutoModelForCausalLM, AutoTokenizer
|
8 |
import logging
|
|
|
9 |
|
10 |
# Set up logging
|
11 |
logging.basicConfig(level=logging.DEBUG)
|
@@ -231,9 +232,6 @@ def chat_interface(user_input, history, web_search, decoding_strategy, temperatu
|
|
231 |
# Ensure the tokenizer is accessible within the function scope
|
232 |
global tokenizer
|
233 |
|
234 |
-
# Ensure the input is correctly formatted as a dictionary
|
235 |
-
user_input = {"text": user_input}
|
236 |
-
|
237 |
# Perform model inference
|
238 |
response = model_inference(
|
239 |
user_prompt=user_input,
|
@@ -246,10 +244,10 @@ def chat_interface(user_input, history, web_search, decoding_strategy, temperatu
|
|
246 |
tokenizer=tokenizer # Pass tokenizer to the model_inference function
|
247 |
)
|
248 |
|
249 |
-
# Update chat history
|
250 |
-
history.append([user_input
|
251 |
|
252 |
-
# Return the updated history
|
253 |
return history, response
|
254 |
|
255 |
# Define the Gradio interface components
|
@@ -257,7 +255,7 @@ interface = gr.Interface(
|
|
257 |
fn=chat_interface,
|
258 |
inputs=[
|
259 |
gr.Textbox(label="User Input", placeholder="Type your message here..."),
|
260 |
-
gr.State([],
|
261 |
gr.Checkbox(label="Perform Web Search"),
|
262 |
gr.Radio(["Greedy", "Top P Sampling"], label="Decoding strategy"),
|
263 |
gr.Slider(minimum=0.0, maximum=2.0, step=0.05, label="Sampling temperature", value=0.5),
|
@@ -265,11 +263,10 @@ interface = gr.Interface(
|
|
265 |
gr.Slider(minimum=0.01, maximum=5.0, step=0.01, label="Repetition penalty", value=1),
|
266 |
gr.Slider(minimum=0.01, maximum=0.99, step=0.01, label="Top P", value=0.9)
|
267 |
],
|
268 |
-
outputs=[
|
269 |
-
|
270 |
-
|
271 |
-
|
272 |
-
live=False
|
273 |
)
|
274 |
|
275 |
# Launch the Gradio interface
|
|
|
6 |
import gradio as gr
|
7 |
from transformers import AutoModelForCausalLM, AutoTokenizer
|
8 |
import logging
|
9 |
+
import feedparser
|
10 |
|
11 |
# Set up logging
|
12 |
logging.basicConfig(level=logging.DEBUG)
|
|
|
232 |
# Ensure the tokenizer is accessible within the function scope
|
233 |
global tokenizer
|
234 |
|
|
|
|
|
|
|
235 |
# Perform model inference
|
236 |
response = model_inference(
|
237 |
user_prompt=user_input,
|
|
|
244 |
tokenizer=tokenizer # Pass tokenizer to the model_inference function
|
245 |
)
|
246 |
|
247 |
+
# Update the chat history with the new interaction
|
248 |
+
history.append([user_input, response])
|
249 |
|
250 |
+
# Return the updated history and the response
|
251 |
return history, response
|
252 |
|
253 |
# Define the Gradio interface components
|
|
|
255 |
fn=chat_interface,
|
256 |
inputs=[
|
257 |
gr.Textbox(label="User Input", placeholder="Type your message here..."),
|
258 |
+
gr.State([]), # Chat history
|
259 |
gr.Checkbox(label="Perform Web Search"),
|
260 |
gr.Radio(["Greedy", "Top P Sampling"], label="Decoding strategy"),
|
261 |
gr.Slider(minimum=0.0, maximum=2.0, step=0.05, label="Sampling temperature", value=0.5),
|
|
|
263 |
gr.Slider(minimum=0.01, maximum=5.0, step=0.01, label="Repetition penalty", value=1),
|
264 |
gr.Slider(minimum=0.01, maximum=0.99, step=0.01, label="Top P", value=0.9)
|
265 |
],
|
266 |
+
outputs=[gr.State([]), gr.Textbox(label="Assistant Response")],
|
267 |
+
live=True,
|
268 |
+
title="OpenGPT-4o-Chatty",
|
269 |
+
description="Chat with the AI and optionally perform web searches to enhance responses."
|
|
|
270 |
)
|
271 |
|
272 |
# Launch the Gradio interface
|