Spaces:
Configuration error
Configuration error
Update app.py
Browse files
app.py
CHANGED
@@ -1,20 +1,21 @@
|
|
1 |
from fastapi import FastAPI
|
2 |
from pydantic import BaseModel
|
3 |
-
from transformers import
|
|
|
4 |
|
5 |
-
# FastAPI uygulaması başlat
|
6 |
app = FastAPI()
|
7 |
|
8 |
-
#
|
9 |
-
|
|
|
10 |
|
11 |
-
# API'ye gönderilecek istek yapısı
|
12 |
class Memory(BaseModel):
|
13 |
description: str
|
14 |
|
15 |
-
# POST endpoint: /generate
|
16 |
@app.post("/generate")
|
17 |
def generate(memory: Memory):
|
18 |
prompt = f"Soru üret: {memory.description}"
|
19 |
-
|
20 |
-
|
|
|
|
|
|
1 |
from fastapi import FastAPI
|
2 |
from pydantic import BaseModel
|
3 |
+
from transformers import AutoModelForCausalLM, AutoTokenizer
|
4 |
+
import torch
|
5 |
|
|
|
6 |
app = FastAPI()
|
7 |
|
8 |
+
# MODELİ VE TOKENIZER'I YÜKLE (CPU için)
|
9 |
+
model = AutoModelForCausalLM.from_pretrained("memorease/memorease-quizgen")
|
10 |
+
tokenizer = AutoTokenizer.from_pretrained("memorease/memorease-quizgen")
|
11 |
|
|
|
12 |
class Memory(BaseModel):
|
13 |
description: str
|
14 |
|
|
|
15 |
@app.post("/generate")
|
16 |
def generate(memory: Memory):
|
17 |
prompt = f"Soru üret: {memory.description}"
|
18 |
+
inputs = tokenizer(prompt, return_tensors="pt", padding=True, truncation=True, max_length=128)
|
19 |
+
outputs = model.generate(**inputs, max_new_tokens=64)
|
20 |
+
question = tokenizer.decode(outputs[0], skip_special_tokens=True)
|
21 |
+
return {"question": question}
|