File size: 6,351 Bytes
0647d83 f60608b 0647d83 f60608b 0647d83 f60608b 0647d83 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 |
import os
from openai import OpenAI
from langchain_community.embeddings import HuggingFaceEmbeddings
from datasets import load_dataset, Dataset
from sklearn.neighbors import NearestNeighbors
import numpy as np
from transformers import pipeline, AutoTokenizer, AutoModelForCausalLM, TextStreamer
import torch
from typing import List
from langchain_core.output_parsers import StrOutputParser
from langchain_core.prompts import ChatPromptTemplate
import gradio as gr
from huggingface_hub import InferenceClient
# Configuration
# Sample questions:
# 1. What are the key components of an effective persona for prompt generation?
# 2. How can I create a persona that generates creative writing prompts?
# 3. What strategies can I use to make my persona-driven prompts more engaging?
DEFAULT_QUESTION = "Ask me anything in the context of persona-driven prompt generation..."
# Set API keys (make sure these are set in your environment)
os.environ['OPENAI_BASE'] = "https://api.openai.com/v1"
os.environ['OPENAI_MODEL'] = "gpt-4"
os.environ['MODEL_PROVIDER'] = "huggingface"
model_provider = os.environ.get("MODEL_PROVIDER")
# Instantiate the client for openai v1.x
if model_provider.lower() == "openai":
MODEL_NAME = os.environ['OPENAI_MODEL']
client = OpenAI(
base_url=os.environ.get("OPENAI_BASE"),
api_key=api_key
)
else:
MODEL_NAME = "meta-llama/Llama-3.3-70B-Instruct"
# Initialize Hugging Face InferenceClient
hf_client = InferenceClient(
model=MODEL_NAME,
api_key=os.environ.get("HF_TOKEN")
)
# Load the Hugging Face dataset
dataset = load_dataset('tosin2013/persona-driven-prompt-generator', streaming=True)
dataset = Dataset.from_list(list(dataset['train']))
# Initialize embeddings
embeddings = HuggingFaceEmbeddings(model_name="sentence-transformers/all-MiniLM-L6-v2")
# Extract texts from the dataset
texts = dataset['input']
# Create embeddings for the texts
text_embeddings = embeddings.embed_documents(texts)
# Fit a nearest neighbor model
nn = NearestNeighbors(n_neighbors=5, metric='cosine')
nn.fit(np.array(text_embeddings))
def get_relevant_documents(query, k=5):
"""
Retrieves the k most relevant documents to the query.
"""
query_embedding = embeddings.embed_query(query)
distances, indices = nn.kneighbors([query_embedding], n_neighbors=k)
relevant_docs = [texts[i] for i in indices[0]]
return relevant_docs
def generate_response(question, history):
try:
print(f"\n[LOG] Received question: {question}")
# Get relevant documents based on the query
relevant_docs = get_relevant_documents(question, k=3)
print(f"[LOG] Retrieved {len(relevant_docs)} relevant documents")
# Create the prompt for the LLM
context = "\n".join(relevant_docs)
prompt = f"Context: {context}\n\nQuestion: {question}\n\nAnswer:"
print(f"[LOG] Generated prompt: {prompt[:200]}...") # Log first 200 chars of prompt
if model_provider.lower() == "huggingface":
messages = [
{
"role": "system",
"content": "You are a helpful AI assistant. Answer the question based on the provided context."
},
{
"role": "user",
"content": prompt
}
]
completion = hf_client.chat.completions.create(
model=MODEL_NAME,
messages=messages,
max_tokens=500
)
response = completion.choices[0].message.content
print(f"[LOG] Using Hugging Face model (serverless): {MODEL_NAME}")
print(f"[LOG] Hugging Face response: {response[:200]}...")
elif model_provider.lower() == "openai":
response = client.chat.completions.create(
model=os.environ.get("OPENAI_MODEL"),
messages=[
{"role": "system", "content": "You are a helpful assistant. Answer the question based on the provided context."},
{"role": "user", "content": prompt},
]
)
response = response.choices[0].message.content
print(f"[LOG] Using OpenAI model: {os.environ.get('OPENAI_MODEL')}")
print(f"[LOG] OpenAI response: {response[:200]}...") # Log first 200 chars of response
# Update chat history with new message pair
history.append((question, response))
return history
except Exception as e:
error_msg = f"Error generating response: {str(e)}"
print(f"[ERROR] {error_msg}")
history.append((question, error_msg))
return history
# Create Gradio interface
with gr.Blocks() as demo:
gr.Markdown(f"""
## Persona-Driven Prompt Generator QA Agent
**Current Model:** {MODEL_NAME}
The Custom Prompt Generator is a Python application that leverages Large Language Models (LLMs) and the LiteLLM library to dynamically generate personas, fetch knowledge sources, resolve conflicts, and produce tailored prompts. This application is designed to assist in various software development tasks by providing context-aware prompts based on user input and predefined personas.
Sample questions:
1. What are the key components of an effective persona for prompt generation?
2. How can I create a persona that generates creative writing prompts?
3. What are the main features of the persona generator?
Related repository: [persona-driven-prompt-generator](https://github.com/tosin2013/persona-driven-prompt-generator)
""")
with gr.Row():
chatbot = gr.Chatbot(label="Chat History")
with gr.Row():
question = gr.Textbox(
value=DEFAULT_QUESTION,
label="Your Question",
placeholder=DEFAULT_QUESTION
)
with gr.Row():
submit_btn = gr.Button("Submit")
clear_btn = gr.Button("Clear")
# Event handlers
submit_btn.click(
generate_response,
inputs=[question, chatbot],
outputs=[chatbot]
)
clear_btn.click(
lambda: (None, ""),
inputs=[],
outputs=[chatbot, question]
)
if __name__ == "__main__":
demo.launch()
|