from fastapi import FastAPI from transformers import AutoModelForCausalLM, AutoTokenizer app = FastAPI() # Load Fine-Tuned Model model_name = "PygmalionAI/pygmalion-7b" tokenizer = AutoTokenizer.from_pretrained(model_name) model = AutoModelForCausalLM.from_pretrained(model_name) @app.get("/chat") def chat(msg: str): inputs = tokenizer(msg, return_tensors="pt") response = model.generate(**inputs, max_length=200) return {"response": tokenizer.decode(response[0], skip_special_tokens=True)}