DemahAlmutairi commited on
Commit
f350335
·
verified ·
1 Parent(s): 0765a71

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +13 -11
app.py CHANGED
@@ -3,7 +3,6 @@ from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline
3
  import torch
4
  import spaces
5
 
6
- # Load models for English and Arabic
7
  def load_model(model_name):
8
  device = "cuda" if torch.cuda.is_available() else "cpu"
9
  model = AutoModelForCausalLM.from_pretrained(
@@ -19,12 +18,12 @@ def load_model(model_name):
19
  tokenizer=tokenizer,
20
  return_full_text=False,
21
  max_new_tokens=500,
22
- do_sample=True # Changed to enable sampling for more creative outputs
23
  )
24
  return generator
25
 
26
  @spaces.GPU
27
- def generate_text(model_name, prompt):
28
  generator = load_model(model_name)
29
  messages = [{"role": "user", "content": prompt}]
30
  output = generator(messages)
@@ -34,17 +33,20 @@ def generate_text(model_name, prompt):
34
  demo = gr.Interface(
35
  fn=generate_text,
36
  inputs=[
37
- gr.Radio(choices=["microsoft/Phi-3-mini-4k-instruct", "ALLaM-AI/ALLaM-7B-Instruct-preview"], label="Select Language"),
38
- gr.Textbox(lines=2, placeholder="Enter your story prompt here...")
 
 
 
 
39
  ],
40
- outputs=gr.Textbox(label="Generated Story"),
41
- title="Kids Storyteller",
42
- description="Choose a language and enter a prompt to generate a fun story for kids!",
43
  examples=[
44
- ["microsoft/Phi-3-mini-4k-instruct", "Once upon a time in a magical forest..."],
45
- ["ALLaM-AI/ALLaM-7B-Instruct-preview", "في قديم الزمان في غابة سحرية..."]
46
  ]
47
  )
48
 
49
  demo.launch()
50
-
 
3
  import torch
4
  import spaces
5
 
 
6
  def load_model(model_name):
7
  device = "cuda" if torch.cuda.is_available() else "cpu"
8
  model = AutoModelForCausalLM.from_pretrained(
 
18
  tokenizer=tokenizer,
19
  return_full_text=False,
20
  max_new_tokens=500,
21
+ do_sample=False
22
  )
23
  return generator
24
 
25
  @spaces.GPU
26
+ def generate_text(prompt, model_name):
27
  generator = load_model(model_name)
28
  messages = [{"role": "user", "content": prompt}]
29
  output = generator(messages)
 
33
  demo = gr.Interface(
34
  fn=generate_text,
35
  inputs=[
36
+ gr.Textbox(lines=2, placeholder="Enter your prompt here..."),
37
+ gr.Dropdown(
38
+ choices=["Qwen/Qwen2.5-1.5B-Instruct","microsoft/Phi-3-mini-4k-instruct", "ALLaM-AI/ALLaM-7B-Instruct-preview"],
39
+ label="Choose Model",
40
+ value="ALLaM-AI/ALLaM-7B-Instruct-preview"
41
+ )
42
  ],
43
+ outputs=gr.Textbox(label="Generated Text"),
44
+ title="Text Generator",
45
+ description="Enter a prompt and generate text using one of the available models.",
46
  examples=[
47
+ ["Tell me a funny joke about chickens.", "microsoft/Phi-3-mini-4k-instruct"],
48
+ ["أخبرني نكتة مضحكة عن الدجاج.", "ALLaM-AI/ALLaM-7B-Instruct-preview"]
49
  ]
50
  )
51
 
52
  demo.launch()