SeedOfEvil commited on
Commit
368407f
·
verified ·
1 Parent(s): aa4117e

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +34 -0
app.py ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ from transformers import pipeline
3
+
4
+ # Load the larger text-generation model that uses GPU.
5
+ # Here we use EleutherAI/gpt-j-6B: https://huggingface.co/EleutherAI/gpt-j-6B
6
+ # Setting device=0 tells the pipeline to use GPU 0.
7
+ generator = pipeline("text-generation", model="EleutherAI/gpt-j-6B", device=0)
8
+
9
+ def expand_prompt(prompt, num_variants=5, max_length=100):
10
+ """
11
+ Given a basic prompt, generate `num_variants` expanded prompts using GPT-J-6B.
12
+ """
13
+ # Generate multiple completions using sampling.
14
+ outputs = generator(prompt, max_length=max_length, num_return_sequences=num_variants, do_sample=True)
15
+ # Clean and join the outputs
16
+ expanded = [out["generated_text"].strip() for out in outputs]
17
+ return "\n\n".join(expanded)
18
+
19
+ # Create a Gradio Interface
20
+ iface = gr.Interface(
21
+ fn=expand_prompt,
22
+ inputs=gr.Textbox(lines=2, placeholder="Enter your basic prompt here...", label="Basic Prompt"),
23
+ outputs=gr.Textbox(lines=10, label="Expanded Prompts"),
24
+ title="Prompt Expansion Generator",
25
+ description=(
26
+ "Enter a basic prompt and receive 5 creative, expanded prompt variants. "
27
+ "This tool leverages the EleutherAI/gpt-j-6B model on an A100 GPU for fast, expressive prompt expansion. "
28
+ "Simply copy the output for use with your downstream image-generation pipeline."
29
+ )
30
+ )
31
+
32
+ if __name__ == "__main__":
33
+ iface.launch()
34
+