memorease commited on
Commit
7efac98
·
verified ·
1 Parent(s): 1fb3845

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +19 -13
app.py CHANGED
@@ -1,24 +1,30 @@
 
 
1
  import gradio as gr
2
- import json
3
 
4
- # Eğitim verisini yükle
5
- with open("memory_questions.json", "r") as f:
6
- memory_data = json.load(f)
7
 
8
- # Soru tahmini fonksiyonu
9
- def generate_question(memory: str):
10
- # En yakın eşleşmeyi manuel kurarak (örnek amaçlı)
11
- for item in memory_data:
12
- if item['description'].lower() in memory.lower():
13
- return item['question']
14
- return "What question does this memory bring to your mind?"
 
 
 
 
 
 
15
 
16
- # Gradio UI
17
  iface = gr.Interface(
18
  fn=generate_question,
19
  inputs=gr.Textbox(label="Your Memory"),
20
  outputs=gr.Textbox(label="Generated Question"),
21
- title="MemoRease Question Generator"
22
  )
23
 
24
  iface.launch()
 
1
+ from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline
2
+ import torch
3
  import gradio as gr
 
4
 
5
+ # LLaMA 2 Chat modeli
6
+ model_id = "meta-llama/Llama-2-7b-chat-hf"
 
7
 
8
+ tokenizer = AutoTokenizer.from_pretrained(model_id)
9
+ model = AutoModelForCausalLM.from_pretrained(
10
+ model_id,
11
+ torch_dtype=torch.float16, # CPU çalışıyorsan float32 olabilir
12
+ device_map="auto"
13
+ )
14
+
15
+ def generate_question(memory):
16
+ prompt = f"[INST] You are a helpful assistant. Based on this memory, generate a question that would help the user recall more details:\n\nMemory: {memory}\n\nQuestion: [/INST]"
17
+ inputs = tokenizer(prompt, return_tensors="pt")
18
+ outputs = model.generate(**inputs, max_new_tokens=50)
19
+ result = tokenizer.decode(outputs[0], skip_special_tokens=True)
20
+ return result.split("Question:")[-1].strip()
21
 
22
+ # Arayüz
23
  iface = gr.Interface(
24
  fn=generate_question,
25
  inputs=gr.Textbox(label="Your Memory"),
26
  outputs=gr.Textbox(label="Generated Question"),
27
+ title="LLaMA Chat Question Generator"
28
  )
29
 
30
  iface.launch()