Spaces:
Sleeping
Sleeping
import gradio as gr | |
from transformers import AutoModelForCausalLM, AutoTokenizer | |
# Load a pretrained AI model (Mistral-7B or any Hugging Face model) | |
model_name = "gpt2" | |
tokenizer = AutoTokenizer.from_pretrained(model_name) | |
model = AutoModelForCausalLM.from_pretrained(model_name, device_map="auto") | |
# AI Chatbot function | |
def chat_with_ai(prompt): | |
inputs = tokenizer(prompt, return_tensors="pt") | |
outputs = model.generate(**inputs, max_new_tokens=100) | |
return tokenizer.decode(outputs[0]) | |
# Create a simple chatbot UI using Gradio | |
interface = gr.Interface(fn=chat_with_ai, inputs="text", outputs="text", title="AI Chatbot") | |
interface.launch() | |