Spaces:
Sleeping
Sleeping
import spaces | |
from transformers import AutoModelForCausalLM, AutoTokenizer | |
import gradio as gr | |
checkpoint = "WillHeld/olmo-raccoon" | |
device = "cuda" | |
tokenizer = AutoTokenizer.from_pretrained(checkpoint) | |
model = AutoModelForCausalLM.from_pretrained(checkpoint).to(device) | |
def predict(message, history, temperature, top_p): | |
history.append({"role": "user", "content": message}) | |
input_text = tokenizer.apply_chat_template(history, tokenize=False, add_generation_prompt=True) | |
inputs = tokenizer.encode(input_text, return_tensors="pt").to(device) | |
outputs = model.generate( | |
inputs, | |
max_new_tokens=1024, | |
temperature=float(temperature), | |
top_p=float(top_p), | |
do_sample=True | |
) | |
decoded = tokenizer.decode(outputs[0]) | |
response = decoded.split("<|assistant|>")[-1] | |
return response | |
with gr.Blocks() as demo: | |
chatbot = gr.ChatInterface( | |
predict, | |
additional_inputs=[ | |
gr.Slider(0.1, 2.0, value=0.7, step=0.1, label="Temperature"), | |
gr.Slider(0.1, 1.0, value=0.9, step=0.05, label="Top-P") | |
], | |
type="messages" | |
) | |
demo.launch() |