Spaces:
Sleeping
Sleeping
File size: 2,169 Bytes
ac2fa94 863d80b 830876b e78d49b ac2fa94 e78d49b 7efac98 e78d49b 863d80b 830876b 863d80b e78d49b 830876b e78d49b 830876b e78d49b 8c2067d e78d49b ac2fa94 e78d49b ac2fa94 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 |
import gradio as gr
import json
from sentence_transformers import SentenceTransformer
from sklearn.metrics.pairwise import cosine_similarity
import numpy as np
from transformers import AutoTokenizer, AutoModelForCausalLM
import torch
# 1. Embed model for semantic similarity
embedder = SentenceTransformer("paraphrase-MiniLM-L3-v2") # Küçük ve hızlı
# 2. LLM model for question generation (TinyLLaMA)
llm_model_id = "TinyLlama/TinyLlama-1.1B-Chat-v1.0"
tokenizer = AutoTokenizer.from_pretrained(llm_model_id)
model = AutoModelForCausalLM.from_pretrained(llm_model_id)
# 3. Load memory-question data
with open("memory_questions.json", "r") as f:
memory_data = json.load(f)
memory_texts = [item['description'] for item in memory_data]
memory_embeddings = embedder.encode(memory_texts)
# 4. En alakalı memory'yi bul + LLM ile soru üret
def generate_question(user_memory):
# (a) En benzer memory'yi bul
user_embedding = embedder.encode([user_memory])
similarities = cosine_similarity(user_embedding, memory_embeddings)[0]
best_match_index = np.argmax(similarities)
matched_memory = memory_data[best_match_index]['description']
# (b) Prompt hazırlığı
prompt = f"<|system|>You are a helpful assistant who asks clear, meaningful questions based on short memories.<|user|>Memory: {matched_memory}\nGenerate a question that starts with What, Why, Who, When, or How.<|assistant|>"
# (c) LLM ile generate et
input_ids = tokenizer(prompt, return_tensors="pt").input_ids
output = model.generate(input_ids, max_new_tokens=50, do_sample=False)
result = tokenizer.decode(output[0], skip_special_tokens=True)
# (d) Sadece son üretilen kısmı al
if "<|assistant|>" in result:
result = result.split("<|assistant|>")[-1].strip()
return result
# 5. Gradio UI
iface = gr.Interface(
fn=generate_question,
inputs=gr.Textbox(label="Your Memory"),
outputs=gr.Textbox(label="Generated Question"),
title="MemoRease – LLM-Enhanced Question Generator",
description="Enter a memory. We'll find a similar one and generate a clear, meaningful question using TinyLLaMA."
)
iface.launch()
|