Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -1,24 +1,30 @@
|
|
|
|
|
|
1 |
import gradio as gr
|
2 |
-
import json
|
3 |
|
4 |
-
#
|
5 |
-
|
6 |
-
memory_data = json.load(f)
|
7 |
|
8 |
-
|
9 |
-
|
10 |
-
|
11 |
-
|
12 |
-
|
13 |
-
|
14 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
15 |
|
16 |
-
#
|
17 |
iface = gr.Interface(
|
18 |
fn=generate_question,
|
19 |
inputs=gr.Textbox(label="Your Memory"),
|
20 |
outputs=gr.Textbox(label="Generated Question"),
|
21 |
-
title="
|
22 |
)
|
23 |
|
24 |
iface.launch()
|
|
|
1 |
+
from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline
|
2 |
+
import torch
|
3 |
import gradio as gr
|
|
|
4 |
|
5 |
+
# LLaMA 2 Chat modeli
|
6 |
+
model_id = "meta-llama/Llama-2-7b-chat-hf"
|
|
|
7 |
|
8 |
+
tokenizer = AutoTokenizer.from_pretrained(model_id)
|
9 |
+
model = AutoModelForCausalLM.from_pretrained(
|
10 |
+
model_id,
|
11 |
+
torch_dtype=torch.float16, # CPU çalışıyorsan float32 olabilir
|
12 |
+
device_map="auto"
|
13 |
+
)
|
14 |
+
|
15 |
+
def generate_question(memory):
|
16 |
+
prompt = f"[INST] You are a helpful assistant. Based on this memory, generate a question that would help the user recall more details:\n\nMemory: {memory}\n\nQuestion: [/INST]"
|
17 |
+
inputs = tokenizer(prompt, return_tensors="pt")
|
18 |
+
outputs = model.generate(**inputs, max_new_tokens=50)
|
19 |
+
result = tokenizer.decode(outputs[0], skip_special_tokens=True)
|
20 |
+
return result.split("Question:")[-1].strip()
|
21 |
|
22 |
+
# Arayüz
|
23 |
iface = gr.Interface(
|
24 |
fn=generate_question,
|
25 |
inputs=gr.Textbox(label="Your Memory"),
|
26 |
outputs=gr.Textbox(label="Generated Question"),
|
27 |
+
title="LLaMA Chat Question Generator"
|
28 |
)
|
29 |
|
30 |
iface.launch()
|