akashmadisetty commited on
Commit
f664cc2
·
1 Parent(s): f679f19
Files changed (2) hide show
  1. app.py +120 -264
  2. testing_grok.py +205 -0
app.py CHANGED
@@ -25,82 +25,59 @@ def load_model(hf_token):
25
  """Load the model with the provided token"""
26
  global global_model, global_tokenizer, model_loaded, loaded_model_name
27
 
28
- # Initially assume tabs should be hidden until successful load
29
- initial_tabs_update = gr.Tabs.update(visible=False)
30
 
31
  if not hf_token:
32
  model_loaded = False
33
  loaded_model_name = "None"
34
- return "⚠️ Please enter your Hugging Face token to use the model.", initial_tabs_update
35
 
36
  try:
37
- # Try different model versions from smallest to largest
38
  model_options = [
39
- "google/gemma-2b-it",
40
- "google/gemma-7b-it",
41
- "google/gemma-2b",
42
- "google/gemma-7b",
43
- "TinyLlama/TinyLlama-1.1B-Chat-v1.0" # Fallback
44
  ]
45
-
46
- print(f"Attempting to load models with token starting with: {hf_token[:5]}...")
47
  loaded_successfully = False
48
  for model_name in model_options:
49
  try:
50
- print(f"\n--- Attempting to load model: {model_name} ---")
51
  is_gemma = "gemma" in model_name.lower()
52
  current_token = hf_token if is_gemma else None
53
-
54
  print("Loading tokenizer...")
55
- global_tokenizer = AutoTokenizer.from_pretrained(
56
- model_name,
57
- token=current_token
58
- )
59
- print("Tokenizer loaded successfully.")
60
-
61
- print(f"Loading model {model_name}...")
62
  global_model = AutoModelForCausalLM.from_pretrained(
63
- model_name,
64
- torch_dtype=torch.float16, # Using float16 for broader compatibility
65
- device_map="auto",
66
- token=current_token
67
  )
68
- print(f"Model {model_name} loaded successfully!")
69
-
70
  model_loaded = True
71
  loaded_model_name = model_name
72
  loaded_successfully = True
73
- tabs_update = gr.Tabs.update(visible=True)
74
- status_msg = f"✅ Model '{model_name}' loaded successfully!"
 
75
  if "tinyllama" in model_name.lower():
76
- status_msg = f"✅ Fallback model '{model_name}' loaded successfully! Limited capabilities compared to Gemma."
77
  return status_msg, tabs_update
78
-
79
- except ImportError as import_err:
80
- print(f"Import Error loading {model_name}: {import_err}. Check dependencies (e.g., bitsandbytes, accelerate).")
81
  continue
82
- except Exception as specific_e:
83
- print(f"Failed to load {model_name}: {specific_e}")
84
- if "401 Client Error" in str(specific_e) or "requires you to be logged in" in str(specific_e) and is_gemma:
85
- print("Authentication error likely. Check token and license agreement.")
86
  continue
87
-
88
  if not loaded_successfully:
89
- model_loaded = False
90
- loaded_model_name = "None"
91
- print("Could not load any model version.")
92
- return "❌ Could not load any model. Please check your token, license acceptance, dependencies, and network connection.", initial_tabs_update
93
-
94
  except Exception as e:
95
- model_loaded = False
96
- loaded_model_name = "None"
97
- error_msg = str(e)
98
- print(f"Error in load_model: {error_msg}")
99
- traceback.print_exc()
100
- if "401 Client Error" in error_msg or "requires you to be logged in" in error_msg :
101
- return "❌ Authentication failed. Check token/license.", initial_tabs_update
102
- else:
103
- return f"❌ Unexpected error during model loading: {error_msg}", initial_tabs_update
104
 
105
 
106
  def generate_prompt(task_type, **kwargs):
@@ -141,6 +118,7 @@ def generate_text(prompt, max_new_tokens=1024, temperature=0.7, top_p=0.9):
141
  global global_model, global_tokenizer, model_loaded, loaded_model_name
142
 
143
  print(f"\n--- Generating Text ---")
 
144
  print(f"Model: {loaded_model_name}")
145
  print(f"Params: max_new_tokens={max_new_tokens}, temp={temperature}, top_p={top_p}")
146
  print(f"Prompt (start): {prompt[:150]}...")
@@ -154,11 +132,9 @@ def generate_text(prompt, max_new_tokens=1024, temperature=0.7, top_p=0.9):
154
  chat_prompt = prompt # Default to raw prompt
155
  if loaded_model_name and ("it" in loaded_model_name.lower() or "instruct" in loaded_model_name.lower() or "chat" in loaded_model_name.lower()):
156
  if "gemma" in loaded_model_name.lower():
157
- # Use Gemma's specific format
158
  chat_prompt = f"<start_of_turn>user\n{prompt}<end_of_turn>\n<start_of_turn>model\n"
159
  elif "tinyllama" in loaded_model_name.lower():
160
- # Use TinyLlama's chat format
161
- chat_prompt = f"<|system|>\nYou are a friendly chatbot.</s>\n<|user|>\n{prompt}</s>\n<|assistant|>\n"
162
  else: # Generic instruction format
163
  chat_prompt = f"User: {prompt}\nAssistant:"
164
 
@@ -168,11 +144,10 @@ def generate_text(prompt, max_new_tokens=1024, temperature=0.7, top_p=0.9):
168
 
169
  effective_max_new_tokens = min(int(max_new_tokens), 2048)
170
 
171
- # Handle potential None for eos_token_id
172
  eos_token_id = global_tokenizer.eos_token_id
173
  if eos_token_id is None:
174
  print("Warning: eos_token_id is None, using default 50256.")
175
- eos_token_id = 50256 # A common default EOS token ID
176
 
177
  generation_args = {
178
  "input_ids": inputs.input_ids,
@@ -181,9 +156,8 @@ def generate_text(prompt, max_new_tokens=1024, temperature=0.7, top_p=0.9):
181
  "do_sample": True,
182
  "temperature": float(temperature),
183
  "top_p": float(top_p),
184
- "pad_token_id": eos_token_id # Use determined EOS or default
185
  }
186
-
187
  print(f"Generation args: {generation_args}")
188
 
189
  with torch.no_grad():
@@ -201,15 +175,17 @@ def generate_text(prompt, max_new_tokens=1024, temperature=0.7, top_p=0.9):
201
  print(f"Generation error: {error_msg}")
202
  traceback.print_exc()
203
  if "CUDA out of memory" in error_msg:
204
- return f"❌ Error: CUDA out of memory. Try reducing 'Max New Tokens' or using a smaller model."
205
  elif "probability tensor contains nan" in error_msg or "invalid value encountered" in error_msg:
206
- return f"❌ Error: Generation failed (invalid probability). Try adjusting Temperature/Top-P or modifying the prompt."
207
  else:
208
  return f"❌ Error during text generation: {error_msg}"
209
 
 
210
  # --- UI Components & Layout ---
211
 
212
  def create_parameter_ui():
 
213
  with gr.Accordion("✨ Generation Parameters", open=False):
214
  with gr.Row():
215
  max_new_tokens = gr.Slider(minimum=64, maximum=2048, value=512, step=64, label="Max New Tokens", info="Max tokens to generate.")
@@ -224,6 +200,7 @@ lang_map = {"Python": "python", "JavaScript": "javascript", "Java": "java", "C++
224
  with gr.Blocks(theme=gr.themes.Soft(), fill_height=True, title="Gemma Capabilities Demo") as demo:
225
 
226
  # Header
 
227
  gr.Markdown(
228
  """
229
  <div style="text-align: center; margin-bottom: 20px;"><h1><span style="font-size: 1.5em;">🤖</span> Gemma Capabilities Demo</h1>
@@ -231,8 +208,10 @@ with gr.Blocks(theme=gr.themes.Soft(), fill_height=True, title="Gemma Capabiliti
231
  <p style="font-size: 0.9em;"><a href="https://huggingface.co/google/gemma-7b-it" target="_blank">[Accept Gemma License Here]</a></p></div>"""
232
  )
233
 
 
234
  # --- Authentication ---
235
- with gr.Group(): # Removed variant="panel"
 
236
  gr.Markdown("### 🔑 Authentication")
237
  with gr.Row():
238
  with gr.Column(scale=4):
@@ -242,12 +221,13 @@ with gr.Blocks(theme=gr.themes.Soft(), fill_height=True, title="Gemma Capabiliti
242
  auth_status = gr.Markdown("ℹ️ Enter token & click 'Load Model'. May take time.")
243
  gr.Markdown(
244
  "**Token Info:** Get from [HF Settings](https://huggingface.co/settings/tokens) (read access). Ensure Gemma license is accepted.",
245
- elem_id="token-info" # Optional ID for styling if needed later
246
  )
247
 
248
  # --- Main Content Tabs ---
 
249
  with gr.Tabs(elem_id="main_tabs", visible=False) as tabs:
250
-
251
  # --- Text Generation Tab ---
252
  with gr.TabItem("📝 Creative & Informational"):
253
  with gr.Row():
@@ -263,26 +243,20 @@ with gr.Blocks(theme=gr.themes.Soft(), fill_height=True, title="Gemma Capabiliti
263
  with gr.Group(visible=False) as custom_prompt_group:
264
  custom_prompt = gr.Textbox(label="Custom Prompt", placeholder="Enter full prompt...", lines=5)
265
  text_gen_params = create_parameter_ui()
266
- # Removed gr.Spacer
267
  generate_text_btn = gr.Button("Generate Text", variant="primary")
268
  with gr.Column(scale=1):
269
  gr.Markdown("#### Generated Output")
270
  text_output = gr.Textbox(label="Result", lines=25, interactive=False, show_copy_button=True)
271
 
272
- # Visibility logic
273
  def update_text_gen_visibility(choice):
274
  return { creative_options: gr.update(visible=choice == "Creative Writing"),
275
  info_options: gr.update(visible=choice == "Informational Writing"),
276
  custom_prompt_group: gr.update(visible=choice == "Custom Prompt") }
277
  text_gen_type.change(update_text_gen_visibility, text_gen_type, [creative_options, info_options, custom_prompt_group], queue=False)
278
 
279
- # Click handler
280
  def text_gen_click(gen_type, style, c_topic, fmt_type, i_topic, custom_pr, *params):
281
- task_map = {"Creative Writing": ("creative", {"style": style, "topic": c_topic}),
282
- "Informational Writing": ("informational", {"format_type": fmt_type, "topic": i_topic}),
283
- "Custom Prompt": ("custom", {"prompt": custom_pr})}
284
- task_type, kwargs = task_map.get(gen_type, ("custom", {"prompt": custom_pr}))
285
- # Apply safe_value inside handler where needed
286
  if task_type == "creative": kwargs = {"style": safe_value(style, "story"), "topic": safe_value(c_topic, "[topic]")}
287
  elif task_type == "informational": kwargs = {"format_type": safe_value(fmt_type, "article"), "topic": safe_value(i_topic, "[topic]")}
288
  else: kwargs = {"prompt": safe_value(custom_pr, "Write something.")}
@@ -290,14 +264,12 @@ with gr.Blocks(theme=gr.themes.Soft(), fill_height=True, title="Gemma Capabiliti
290
  return generate_text(final_prompt, *params)
291
  generate_text_btn.click(text_gen_click, [text_gen_type, style, creative_topic, format_type, info_topic, custom_prompt, *text_gen_params], text_output)
292
 
293
- # Examples
294
  gr.Examples( examples=[ ["Creative Writing", "poem", "sound of rain", "", "", "", 512, 0.7, 0.9],
295
  ["Informational Writing", "", "", "explanation", "photosynthesis", "", 768, 0.6, 0.9],
296
  ["Custom Prompt", "", "", "", "", "Dialogue: cat and dog discuss humans.", 512, 0.8, 0.95] ],
297
- inputs=[text_gen_type, style, creative_topic, format_type, info_topic, custom_prompt, *text_gen_params[:3]], # Pass UI elements
298
  outputs=text_output, label="Try examples...")
299
 
300
-
301
  # --- Brainstorming Tab ---
302
  with gr.TabItem("🧠 Brainstorming"):
303
  with gr.Row():
@@ -306,12 +278,10 @@ with gr.Blocks(theme=gr.themes.Soft(), fill_height=True, title="Gemma Capabiliti
306
  brainstorm_category = gr.Dropdown(["project", "business", "creative", "solution", "content", "feature", "product name"], label="Category", value="project")
307
  brainstorm_topic = gr.Textbox(label="Topic/Problem", placeholder="e.g., reducing plastic waste", value="unique mobile app ideas", lines=3)
308
  brainstorm_params = create_parameter_ui()
309
- # Removed gr.Spacer
310
  brainstorm_btn = gr.Button("Generate Ideas", variant="primary")
311
  with gr.Column(scale=1):
312
  gr.Markdown("#### Generated Ideas")
313
  brainstorm_output = gr.Textbox(label="Result", lines=25, interactive=False, show_copy_button=True)
314
-
315
  def brainstorm_click(category, topic, *params):
316
  prompt = generate_prompt("brainstorm", category=safe_value(category, "project"), topic=safe_value(topic, "ideas"))
317
  return generate_text(prompt, *params)
@@ -321,7 +291,6 @@ with gr.Blocks(theme=gr.themes.Soft(), fill_height=True, title="Gemma Capabiliti
321
  ["creative", "fantasy novel themes", 512, 0.85, 0.95] ],
322
  inputs=[brainstorm_category, brainstorm_topic, *brainstorm_params[:3]], outputs=brainstorm_output, label="Try examples...")
323
 
324
-
325
  # --- Code Tab ---
326
  with gr.TabItem("💻 Code"):
327
  with gr.Tabs():
@@ -332,249 +301,136 @@ with gr.Blocks(theme=gr.themes.Soft(), fill_height=True, title="Gemma Capabiliti
332
  code_lang_gen = gr.Dropdown(list(lang_map.keys())[:-1], label="Language", value="Python")
333
  code_task = gr.Textbox(label="Task", placeholder="e.g., function for factorial", value="Python class for calculator", lines=4)
334
  code_gen_params = create_parameter_ui()
335
- # Removed gr.Spacer
336
  code_gen_btn = gr.Button("Generate Code", variant="primary")
337
  with gr.Column(scale=1):
338
  gr.Markdown("#### Generated Code")
339
  code_output = gr.Code(label="Result", language="python", lines=25, interactive=False)
340
-
341
  def gen_code_click(lang, task, *params):
342
  prompt = generate_prompt("code_generate", language=safe_value(lang, "Python"), task=safe_value(task, "hello world"))
343
- result = generate_text(prompt, *params)
344
- # Basic code block extraction
345
- if "```" in result:
346
- parts = result.split("```")
347
- if len(parts) >= 2:
348
- block = parts[1]
349
- if '\n' in block: first_line, rest = block.split('\n', 1); return rest.strip() if first_line.strip().lower() == lang.lower() else block.strip()
350
- else: return block.strip()
351
  return result.strip()
352
  def update_gen_lang_display(lang): return gr.Code.update(language=lang_map.get(lang, "plaintext"))
353
  code_lang_gen.change(update_gen_lang_display, code_lang_gen, code_output, queue=False)
354
  code_gen_btn.click(gen_code_click, [code_lang_gen, code_task, *code_gen_params], code_output)
355
- gr.Examples([ ["JavaScript", "email validation regex function", 768, 0.6, 0.9],
356
- ["SQL", "select users > 30 yrs old", 512, 0.5, 0.8],
357
- ["HTML", "basic portfolio structure", 1024, 0.7, 0.9] ],
358
- inputs=[code_lang_gen, code_task, *code_gen_params[:3]], outputs=code_output, label="Try examples...")
359
 
360
  with gr.TabItem("Explain"):
361
  with gr.Row():
362
- with gr.Column(scale=1):
363
- gr.Markdown("#### Setup")
364
- code_lang_explain = gr.Dropdown(list(lang_map.keys()), label="Language", value="Python")
365
- code_to_explain = gr.Code(label="Code to Explain", language="python", lines=15)
366
- explain_code_params = create_parameter_ui()
367
- # Removed gr.Spacer
368
- explain_code_btn = gr.Button("Explain Code", variant="primary")
369
- with gr.Column(scale=1):
370
- gr.Markdown("#### Explanation")
371
- code_explanation = gr.Textbox(label="Result", lines=25, interactive=False, show_copy_button=True)
372
-
373
- def explain_code_click(lang, code, *params):
374
- code_content = safe_value(code['code'] if isinstance(code, dict) else code, "# Empty code")
375
- prompt = generate_prompt("code_explain", language=safe_value(lang, "code"), code=code_content)
376
- return generate_text(prompt, *params)
377
  def update_explain_lang_display(lang): return gr.Code.update(language=lang_map.get(lang, "plaintext"))
378
  code_lang_explain.change(update_explain_lang_display, code_lang_explain, code_to_explain, queue=False)
379
  explain_code_btn.click(explain_code_click, [code_lang_explain, code_to_explain, *explain_code_params], code_explanation)
380
 
381
-
382
  with gr.TabItem("Debug"):
383
  with gr.Row():
384
- with gr.Column(scale=1):
385
- gr.Markdown("#### Setup")
386
- code_lang_debug = gr.Dropdown(list(lang_map.keys()), label="Language", value="Python")
387
- code_to_debug = gr.Code(label="Buggy Code", language="python", lines=15, value="def avg(nums):\n # Potential div by zero\n return sum(nums)/len(nums)")
388
- debug_code_params = create_parameter_ui()
389
- # Removed gr.Spacer
390
- debug_code_btn = gr.Button("Debug Code", variant="primary")
391
- with gr.Column(scale=1):
392
- gr.Markdown("#### Debugging Analysis")
393
- debug_result = gr.Textbox(label="Result", lines=25, interactive=False, show_copy_button=True)
394
-
395
- def debug_code_click(lang, code, *params):
396
- code_content = safe_value(code['code'] if isinstance(code, dict) else code, "# Empty code")
397
- prompt = generate_prompt("code_debug", language=safe_value(lang, "code"), code=code_content)
398
- return generate_text(prompt, *params)
399
  def update_debug_lang_display(lang): return gr.Code.update(language=lang_map.get(lang, "plaintext"))
400
  code_lang_debug.change(update_debug_lang_display, code_lang_debug, code_to_debug, queue=False)
401
  debug_code_btn.click(debug_code_click, [code_lang_debug, code_to_debug, *debug_code_params], debug_result)
402
 
403
-
404
  # --- Comprehension Tab ---
405
  with gr.TabItem("📚 Comprehension"):
406
  with gr.Tabs():
407
  with gr.TabItem("Summarize"):
408
  with gr.Row():
409
- with gr.Column(scale=1):
410
- gr.Markdown("#### Setup")
411
- summarize_text = gr.Textbox(label="Text to Summarize", lines=15, placeholder="Paste long text...")
412
- summarize_params = create_parameter_ui()
413
- # Removed gr.Spacer
414
- summarize_btn = gr.Button("Summarize Text", variant="primary")
415
- with gr.Column(scale=1):
416
- gr.Markdown("#### Summary")
417
- summary_output = gr.Textbox(label="Result", lines=15, interactive=False, show_copy_button=True)
418
- def summarize_click(text, *params):
419
- prompt = generate_prompt("summarize", text=safe_value(text, "[empty text]"))
420
- # Adjust max tokens for summary specifically if needed
421
- p_list = list(params); p_list[0] = min(max(int(p_list[0]), 64), 512)
422
- return generate_text(prompt, *p_list)
423
  summarize_btn.click(summarize_click, [summarize_text, *summarize_params], summary_output)
424
-
425
-
426
  with gr.TabItem("Q & A"):
427
  with gr.Row():
428
- with gr.Column(scale=1):
429
- gr.Markdown("#### Setup")
430
- qa_text = gr.Textbox(label="Context Text", lines=10, placeholder="Paste text containing answer...")
431
- qa_question = gr.Textbox(label="Question", placeholder="Ask question about text...")
432
- qa_params = create_parameter_ui()
433
- # Removed gr.Spacer
434
- qa_btn = gr.Button("Get Answer", variant="primary")
435
- with gr.Column(scale=1):
436
- gr.Markdown("#### Answer")
437
- qa_output = gr.Textbox(label="Result", lines=10, interactive=False, show_copy_button=True)
438
- def qa_click(text, question, *params):
439
- prompt = generate_prompt("qa", text=safe_value(text, "[context]"), question=safe_value(question,"[question]"))
440
- p_list = list(params); p_list[0] = min(max(int(p_list[0]), 32), 256)
441
- return generate_text(prompt, *p_list)
442
  qa_btn.click(qa_click, [qa_text, qa_question, *qa_params], qa_output)
443
-
444
-
445
  with gr.TabItem("Translate"):
446
  with gr.Row():
447
- with gr.Column(scale=1):
448
- gr.Markdown("#### Setup")
449
- translate_text = gr.Textbox(label="Text to Translate", lines=8, placeholder="Enter text...")
450
- target_lang = gr.Dropdown(["French", "Spanish", "German", "Japanese", "Chinese", "Russian", "Arabic", "Hindi", "Portuguese", "Italian"], label="Translate To", value="French")
451
- translate_params = create_parameter_ui()
452
- # Removed gr.Spacer
453
- translate_btn = gr.Button("Translate Text", variant="primary")
454
- with gr.Column(scale=1):
455
- gr.Markdown("#### Translation")
456
- translation_output = gr.Textbox(label="Result", lines=8, interactive=False, show_copy_button=True)
457
- def translate_click(text, lang, *params):
458
- prompt = generate_prompt("translate", text=safe_value(text,"[text]"), target_lang=safe_value(lang,"French"))
459
- p_list = list(params); p_list[0] = max(int(p_list[0]), 64)
460
- return generate_text(prompt, *p_list)
461
  translate_btn.click(translate_click, [translate_text, target_lang, *translate_params], translation_output)
462
 
463
-
464
  # --- More Tasks Tab ---
465
  with gr.TabItem("🛠️ More Tasks"):
466
  with gr.Tabs():
467
- with gr.TabItem("Content Creation"):
468
  with gr.Row():
469
- with gr.Column(scale=1):
470
- gr.Markdown("#### Setup")
471
- content_type = gr.Dropdown(["blog post outline", "social media post (Twitter)", "social media post (LinkedIn)", "marketing email subject line", "product description", "press release intro"], label="Content Type", value="blog post outline")
472
- content_topic = gr.Textbox(label="Topic", value="sustainable travel tips", lines=2)
473
- content_audience = gr.Textbox(label="Audience", value="eco-conscious millennials")
474
- content_params = create_parameter_ui()
475
- # Removed gr.Spacer
476
- content_btn = gr.Button("Generate Content", variant="primary")
477
- with gr.Column(scale=1):
478
- gr.Markdown("#### Generated Content")
479
- content_output = gr.Textbox(label="Result", lines=20, interactive=False, show_copy_button=True)
480
- def content_click(c_type, topic, audience, *params):
481
- prompt = generate_prompt("content_creation", content_type=safe_value(c_type,"text"), topic=safe_value(topic,"[topic]"), audience=safe_value(audience,"[audience]"))
482
- return generate_text(prompt, *params)
483
  content_btn.click(content_click, [content_type, content_topic, content_audience, *content_params], content_output)
484
-
485
- with gr.TabItem("Email Drafting"):
486
- with gr.Row():
487
- with gr.Column(scale=1):
488
- gr.Markdown("#### Setup")
489
- email_type = gr.Dropdown(["job inquiry", "meeting request", "follow-up", "thank you", "support response", "sales outreach"], label="Email Type", value="meeting request")
490
- email_context = gr.Textbox(label="Context/Points", lines=5, value="Request meeting next week re: project X. Suggest Tue/Wed afternoon.")
491
- email_params = create_parameter_ui()
492
- # Removed gr.Spacer
493
- email_btn = gr.Button("Generate Draft", variant="primary")
494
- with gr.Column(scale=1):
495
- gr.Markdown("#### Generated Draft")
496
- email_output = gr.Textbox(label="Result", lines=20, interactive=False, show_copy_button=True)
497
- def email_click(e_type, context, *params):
498
- prompt = generate_prompt("email_draft", email_type=safe_value(e_type,"email"), context=safe_value(context,"[context]"))
499
- return generate_text(prompt, *params)
500
- email_btn.click(email_click, [email_type, email_context, *email_params], email_output)
501
-
502
- with gr.TabItem("Doc Editing"):
503
  with gr.Row():
504
- with gr.Column(scale=1):
505
- gr.Markdown("#### Setup")
506
- edit_text = gr.Textbox(label="Text to Edit", lines=10, placeholder="Paste text...")
507
- edit_type = gr.Dropdown(["improve clarity", "fix grammar/spelling", "make concise", "make formal", "make casual", "simplify"], label="Improve For", value="improve clarity")
508
- edit_params = create_parameter_ui()
509
- # Removed gr.Spacer
510
- edit_btn = gr.Button("Edit Text", variant="primary")
511
- with gr.Column(scale=1):
512
- gr.Markdown("#### Edited Text")
513
- edit_output = gr.Textbox(label="Result", lines=10, interactive=False, show_copy_button=True)
514
- def edit_click(text, e_type, *params):
515
- prompt = generate_prompt("document_edit", text=safe_value(text,"[text]"), edit_type=safe_value(e_type,"clarity"))
516
- p_list = list(params); input_tokens = len(safe_value(text,"").split()); p_list[0] = max(int(p_list[0]), input_tokens + 64)
517
- return generate_text(prompt, *p_list)
518
  edit_btn.click(edit_click, [edit_text, edit_type, *edit_params], edit_output)
519
-
520
- with gr.TabItem("Classification"):
521
- with gr.Row():
522
- with gr.Column(scale=1):
523
- gr.Markdown("#### Setup")
524
- classify_text = gr.Textbox(label="Text to Classify", lines=8, value="Sci-fi movie explores AI consciousness.")
525
- classify_categories = gr.Textbox(label="Categories (comma-sep)", value="Tech, Entertainment, Science, Politics")
526
- classify_params = create_parameter_ui()
527
- # Removed gr.Spacer
528
- classify_btn = gr.Button("Classify Text", variant="primary")
529
- with gr.Column(scale=1):
530
- gr.Markdown("#### Classification")
531
- classify_output = gr.Textbox(label="Predicted Category", lines=2, interactive=False, show_copy_button=True)
532
- def classify_click(text, cats, *params):
533
- prompt = generate_prompt("classify", text=safe_value(text,"[text]"), categories=safe_value(cats,"cat1, cat2"))
534
- p_list = list(params); p_list[0] = min(max(int(p_list[0]), 16), 128)
535
- raw = generate_text(prompt, *p_list)
536
- # Basic post-processing attempt
537
- lines = raw.split('\n'); last = lines[-1].strip(); possible = [c.strip().lower() for c in cats.split(',')]; return last if last.lower() in possible else raw
538
- classify_btn.click(classify_click, [classify_text, classify_categories, *classify_params], classify_output)
539
-
540
- with gr.TabItem("Data Extraction"):
541
- with gr.Row():
542
- with gr.Column(scale=1):
543
- gr.Markdown("#### Setup")
544
- extract_text = gr.Textbox(label="Source Text", lines=10, value="Order #123 by Jane ([email protected]). Total: $99. Shipped: 123 Main St.")
545
- extract_data_points = gr.Textbox(label="Data Points (comma-sep)", value="order num, name, email, total, address")
546
- extract_params = create_parameter_ui()
547
- # Removed gr.Spacer
548
- extract_btn = gr.Button("Extract Data", variant="primary")
549
- with gr.Column(scale=1):
550
- gr.Markdown("#### Extracted Data")
551
- extract_output = gr.Textbox(label="Result (JSON or Key-Value)", lines=10, interactive=False, show_copy_button=True)
552
- def extract_click(text, points, *params):
553
- prompt = generate_prompt("data_extract", text=safe_value(text,"[text]"), data_points=safe_value(points,"info"))
554
- return generate_text(prompt, *params)
555
- extract_btn.click(extract_click, [extract_text, extract_data_points, *extract_params], extract_output)
556
 
557
 
558
  # --- Authentication Handler & Footer ---
559
- footer_status = gr.Markdown(f"...", elem_id="footer-status-md") # Placeholder
560
 
 
561
  def handle_auth(token):
562
- yield "⏳ Authenticating & loading model...", gr.Tabs.update(visible=False)
563
- status_message, tabs_update = load_model(token)
564
- yield status_message, tabs_update
 
 
565
 
 
566
  def update_footer_status(status_text): # Updates footer based on global state
567
- return gr.Markdown.update(value=f"""
 
568
  <hr><div style="text-align: center; font-size: 0.9em; color: #777;">
569
  <p>Powered by Hugging Face 🤗 Transformers & Gradio. Model: <strong>{loaded_model_name if model_loaded else 'None'}</strong>.</p>
570
  <p>Review outputs carefully. Models may generate inaccurate information.</p></div>""")
571
 
572
- auth_button.click(handle_auth, hf_token, [auth_status, tabs], queue=True)
 
 
 
 
 
 
573
  # Update footer whenever auth status text changes
574
- auth_status.change(update_footer_status, auth_status, footer_status, queue=False)
 
 
 
 
 
575
  # Initial footer update on load
576
- demo.load(update_footer_status, auth_status, footer_status, queue=False)
 
 
 
 
 
577
 
578
 
579
  # --- Launch App ---
580
- demo.launch(share=False)
 
25
  """Load the model with the provided token"""
26
  global global_model, global_tokenizer, model_loaded, loaded_model_name
27
 
28
+ # --- FIX: Use gr.update() for visibility ---
29
+ initial_tabs_update = gr.update(visible=False) # Generic update targeted by outputs list
30
 
31
  if not hf_token:
32
  model_loaded = False
33
  loaded_model_name = "None"
34
+ return "⚠️ Please enter your Hugging Face token.", initial_tabs_update
35
 
36
  try:
 
37
  model_options = [
38
+ "google/gemma-2b-it", "google/gemma-7b-it",
39
+ "google/gemma-2b", "google/gemma-7b",
40
+ "TinyLlama/TinyLlama-1.1B-Chat-v1.0"
 
 
41
  ]
42
+ print(f"Attempting loading with token: {hf_token[:5]}...")
 
43
  loaded_successfully = False
44
  for model_name in model_options:
45
  try:
46
+ print(f"\n--- Attempting: {model_name} ---")
47
  is_gemma = "gemma" in model_name.lower()
48
  current_token = hf_token if is_gemma else None
 
49
  print("Loading tokenizer...")
50
+ global_tokenizer = AutoTokenizer.from_pretrained(model_name, token=current_token)
51
+ print("Loading model...")
 
 
 
 
 
52
  global_model = AutoModelForCausalLM.from_pretrained(
53
+ model_name, torch_dtype=torch.float16,
54
+ device_map="auto", token=current_token
 
 
55
  )
56
+ print(f"Success: {model_name}")
 
57
  model_loaded = True
58
  loaded_model_name = model_name
59
  loaded_successfully = True
60
+ # --- FIX: Use gr.update() for visibility ---
61
+ tabs_update = gr.update(visible=True) # Generic update targeted by outputs list
62
+ status_msg = f"✅ Model '{model_name}' loaded!"
63
  if "tinyllama" in model_name.lower():
64
+ status_msg = f"✅ Fallback '{model_name}' loaded!"
65
  return status_msg, tabs_update
66
+ except ImportError as ie:
67
+ print(f"Import Error ({model_name}): {ie}. Check dependencies.")
 
68
  continue
69
+ except Exception as e:
70
+ print(f"Failed ({model_name}): {e}")
71
+ if "401" in str(e) or "logged in" in str(e) and is_gemma: print("Auth error likely.")
 
72
  continue
 
73
  if not loaded_successfully:
74
+ model_loaded = False; loaded_model_name = "None"
75
+ return "❌ Failed to load any model. Check token/license/deps/network.", initial_tabs_update
 
 
 
76
  except Exception as e:
77
+ model_loaded = False; loaded_model_name = "None"
78
+ print(f"Outer load error: {e}"); traceback.print_exc()
79
+ if "401" in str(e) or "logged in" in str(e): return "❌ Auth failed.", initial_tabs_update
80
+ else: return f" Unexpected load error: {e}", initial_tabs_update
 
 
 
 
 
81
 
82
 
83
  def generate_prompt(task_type, **kwargs):
 
118
  global global_model, global_tokenizer, model_loaded, loaded_model_name
119
 
120
  print(f"\n--- Generating Text ---")
121
+ # ... (rest of the function remains the same as the previous valid version) ...
122
  print(f"Model: {loaded_model_name}")
123
  print(f"Params: max_new_tokens={max_new_tokens}, temp={temperature}, top_p={top_p}")
124
  print(f"Prompt (start): {prompt[:150]}...")
 
132
  chat_prompt = prompt # Default to raw prompt
133
  if loaded_model_name and ("it" in loaded_model_name.lower() or "instruct" in loaded_model_name.lower() or "chat" in loaded_model_name.lower()):
134
  if "gemma" in loaded_model_name.lower():
 
135
  chat_prompt = f"<start_of_turn>user\n{prompt}<end_of_turn>\n<start_of_turn>model\n"
136
  elif "tinyllama" in loaded_model_name.lower():
137
+ chat_prompt = f"<|system|>\nYou are a helpful assistant.</s>\n<|user|>\n{prompt}</s>\n<|assistant|>\n"
 
138
  else: # Generic instruction format
139
  chat_prompt = f"User: {prompt}\nAssistant:"
140
 
 
144
 
145
  effective_max_new_tokens = min(int(max_new_tokens), 2048)
146
 
 
147
  eos_token_id = global_tokenizer.eos_token_id
148
  if eos_token_id is None:
149
  print("Warning: eos_token_id is None, using default 50256.")
150
+ eos_token_id = 50256
151
 
152
  generation_args = {
153
  "input_ids": inputs.input_ids,
 
156
  "do_sample": True,
157
  "temperature": float(temperature),
158
  "top_p": float(top_p),
159
+ "pad_token_id": eos_token_id
160
  }
 
161
  print(f"Generation args: {generation_args}")
162
 
163
  with torch.no_grad():
 
175
  print(f"Generation error: {error_msg}")
176
  traceback.print_exc()
177
  if "CUDA out of memory" in error_msg:
178
+ return f"❌ Error: CUDA out of memory. Try reducing 'Max New Tokens' or use a smaller model."
179
  elif "probability tensor contains nan" in error_msg or "invalid value encountered" in error_msg:
180
+ return f"❌ Error: Generation failed (invalid probability). Adjust Temp/Top-P or prompt."
181
  else:
182
  return f"❌ Error during text generation: {error_msg}"
183
 
184
+
185
  # --- UI Components & Layout ---
186
 
187
  def create_parameter_ui():
188
+ # ... (function remains the same) ...
189
  with gr.Accordion("✨ Generation Parameters", open=False):
190
  with gr.Row():
191
  max_new_tokens = gr.Slider(minimum=64, maximum=2048, value=512, step=64, label="Max New Tokens", info="Max tokens to generate.")
 
200
  with gr.Blocks(theme=gr.themes.Soft(), fill_height=True, title="Gemma Capabilities Demo") as demo:
201
 
202
  # Header
203
+ # ... (remains the same) ...
204
  gr.Markdown(
205
  """
206
  <div style="text-align: center; margin-bottom: 20px;"><h1><span style="font-size: 1.5em;">🤖</span> Gemma Capabilities Demo</h1>
 
208
  <p style="font-size: 0.9em;"><a href="https://huggingface.co/google/gemma-7b-it" target="_blank">[Accept Gemma License Here]</a></p></div>"""
209
  )
210
 
211
+
212
  # --- Authentication ---
213
+ # ... (remains the same) ...
214
+ with gr.Group():
215
  gr.Markdown("### 🔑 Authentication")
216
  with gr.Row():
217
  with gr.Column(scale=4):
 
221
  auth_status = gr.Markdown("ℹ️ Enter token & click 'Load Model'. May take time.")
222
  gr.Markdown(
223
  "**Token Info:** Get from [HF Settings](https://huggingface.co/settings/tokens) (read access). Ensure Gemma license is accepted.",
224
+ elem_id="token-info"
225
  )
226
 
227
  # --- Main Content Tabs ---
228
+ # Define tabs instance first
229
  with gr.Tabs(elem_id="main_tabs", visible=False) as tabs:
230
+ # ... (All TabItem definitions remain the same as the previous working version) ...
231
  # --- Text Generation Tab ---
232
  with gr.TabItem("📝 Creative & Informational"):
233
  with gr.Row():
 
243
  with gr.Group(visible=False) as custom_prompt_group:
244
  custom_prompt = gr.Textbox(label="Custom Prompt", placeholder="Enter full prompt...", lines=5)
245
  text_gen_params = create_parameter_ui()
 
246
  generate_text_btn = gr.Button("Generate Text", variant="primary")
247
  with gr.Column(scale=1):
248
  gr.Markdown("#### Generated Output")
249
  text_output = gr.Textbox(label="Result", lines=25, interactive=False, show_copy_button=True)
250
 
 
251
  def update_text_gen_visibility(choice):
252
  return { creative_options: gr.update(visible=choice == "Creative Writing"),
253
  info_options: gr.update(visible=choice == "Informational Writing"),
254
  custom_prompt_group: gr.update(visible=choice == "Custom Prompt") }
255
  text_gen_type.change(update_text_gen_visibility, text_gen_type, [creative_options, info_options, custom_prompt_group], queue=False)
256
 
 
257
  def text_gen_click(gen_type, style, c_topic, fmt_type, i_topic, custom_pr, *params):
258
+ task_map = {"Creative Writing": ("creative", {}), "Informational Writing": ("informational", {}), "Custom Prompt": ("custom", {})}
259
+ task_type, kwargs = task_map.get(gen_type, ("custom", {}))
 
 
 
260
  if task_type == "creative": kwargs = {"style": safe_value(style, "story"), "topic": safe_value(c_topic, "[topic]")}
261
  elif task_type == "informational": kwargs = {"format_type": safe_value(fmt_type, "article"), "topic": safe_value(i_topic, "[topic]")}
262
  else: kwargs = {"prompt": safe_value(custom_pr, "Write something.")}
 
264
  return generate_text(final_prompt, *params)
265
  generate_text_btn.click(text_gen_click, [text_gen_type, style, creative_topic, format_type, info_topic, custom_prompt, *text_gen_params], text_output)
266
 
 
267
  gr.Examples( examples=[ ["Creative Writing", "poem", "sound of rain", "", "", "", 512, 0.7, 0.9],
268
  ["Informational Writing", "", "", "explanation", "photosynthesis", "", 768, 0.6, 0.9],
269
  ["Custom Prompt", "", "", "", "", "Dialogue: cat and dog discuss humans.", 512, 0.8, 0.95] ],
270
+ inputs=[text_gen_type, style, creative_topic, format_type, info_topic, custom_prompt, *text_gen_params[:3]],
271
  outputs=text_output, label="Try examples...")
272
 
 
273
  # --- Brainstorming Tab ---
274
  with gr.TabItem("🧠 Brainstorming"):
275
  with gr.Row():
 
278
  brainstorm_category = gr.Dropdown(["project", "business", "creative", "solution", "content", "feature", "product name"], label="Category", value="project")
279
  brainstorm_topic = gr.Textbox(label="Topic/Problem", placeholder="e.g., reducing plastic waste", value="unique mobile app ideas", lines=3)
280
  brainstorm_params = create_parameter_ui()
 
281
  brainstorm_btn = gr.Button("Generate Ideas", variant="primary")
282
  with gr.Column(scale=1):
283
  gr.Markdown("#### Generated Ideas")
284
  brainstorm_output = gr.Textbox(label="Result", lines=25, interactive=False, show_copy_button=True)
 
285
  def brainstorm_click(category, topic, *params):
286
  prompt = generate_prompt("brainstorm", category=safe_value(category, "project"), topic=safe_value(topic, "ideas"))
287
  return generate_text(prompt, *params)
 
291
  ["creative", "fantasy novel themes", 512, 0.85, 0.95] ],
292
  inputs=[brainstorm_category, brainstorm_topic, *brainstorm_params[:3]], outputs=brainstorm_output, label="Try examples...")
293
 
 
294
  # --- Code Tab ---
295
  with gr.TabItem("💻 Code"):
296
  with gr.Tabs():
 
301
  code_lang_gen = gr.Dropdown(list(lang_map.keys())[:-1], label="Language", value="Python")
302
  code_task = gr.Textbox(label="Task", placeholder="e.g., function for factorial", value="Python class for calculator", lines=4)
303
  code_gen_params = create_parameter_ui()
 
304
  code_gen_btn = gr.Button("Generate Code", variant="primary")
305
  with gr.Column(scale=1):
306
  gr.Markdown("#### Generated Code")
307
  code_output = gr.Code(label="Result", language="python", lines=25, interactive=False)
 
308
  def gen_code_click(lang, task, *params):
309
  prompt = generate_prompt("code_generate", language=safe_value(lang, "Python"), task=safe_value(task, "hello world"))
310
+ result = generate_text(prompt, *params); # Basic extraction...
311
+ if "```" in result: parts = result.split("```"); block = parts[1] if len(parts)>1 else ''; return block.split('\n',1)[1].strip() if '\n' in block and block.split('\n',1)[0].strip().lower() == lang.lower() else block.strip()
 
 
 
 
 
 
312
  return result.strip()
313
  def update_gen_lang_display(lang): return gr.Code.update(language=lang_map.get(lang, "plaintext"))
314
  code_lang_gen.change(update_gen_lang_display, code_lang_gen, code_output, queue=False)
315
  code_gen_btn.click(gen_code_click, [code_lang_gen, code_task, *code_gen_params], code_output)
316
+ gr.Examples([["JS", "email validation", 768, 0.6, 0.9], ["SQL", "users > 30", 512, 0.5, 0.8], ["HTML", "portfolio", 1024, 0.7, 0.9]], [code_lang_gen, code_task, *code_gen_params[:3]], code_output, label="Try...") # Abbreviated examples
 
 
 
317
 
318
  with gr.TabItem("Explain"):
319
  with gr.Row():
320
+ with gr.Column(scale=1): gr.Markdown("#### Setup"); code_lang_explain = gr.Dropdown(list(lang_map.keys()), label="Language", value="Python"); code_to_explain = gr.Code(label="Code to Explain", language="python", lines=15); explain_code_params = create_parameter_ui(); explain_code_btn = gr.Button("Explain Code", variant="primary")
321
+ with gr.Column(scale=1): gr.Markdown("#### Explanation"); code_explanation = gr.Textbox(label="Result", lines=25, interactive=False, show_copy_button=True)
322
+ def explain_code_click(lang, code, *params): code_content = safe_value(code['code'] if isinstance(code, dict) else code, "#"); prompt = generate_prompt("code_explain", language=safe_value(lang, "code"), code=code_content); return generate_text(prompt, *params)
 
 
 
 
 
 
 
 
 
 
 
 
323
  def update_explain_lang_display(lang): return gr.Code.update(language=lang_map.get(lang, "plaintext"))
324
  code_lang_explain.change(update_explain_lang_display, code_lang_explain, code_to_explain, queue=False)
325
  explain_code_btn.click(explain_code_click, [code_lang_explain, code_to_explain, *explain_code_params], code_explanation)
326
 
 
327
  with gr.TabItem("Debug"):
328
  with gr.Row():
329
+ with gr.Column(scale=1): gr.Markdown("#### Setup"); code_lang_debug = gr.Dropdown(list(lang_map.keys()), label="Language", value="Python"); code_to_debug = gr.Code(label="Buggy Code", language="python", lines=15, value="def avg(nums):\n # Potential div by zero\n return sum(nums)/len(nums)"); debug_code_params = create_parameter_ui(); debug_code_btn = gr.Button("Debug Code", variant="primary")
330
+ with gr.Column(scale=1): gr.Markdown("#### Debugging Analysis"); debug_result = gr.Textbox(label="Result", lines=25, interactive=False, show_copy_button=True)
331
+ def debug_code_click(lang, code, *params): code_content = safe_value(code['code'] if isinstance(code, dict) else code, "#"); prompt = generate_prompt("code_debug", language=safe_value(lang, "code"), code=code_content); return generate_text(prompt, *params)
 
 
 
 
 
 
 
 
 
 
 
 
332
  def update_debug_lang_display(lang): return gr.Code.update(language=lang_map.get(lang, "plaintext"))
333
  code_lang_debug.change(update_debug_lang_display, code_lang_debug, code_to_debug, queue=False)
334
  debug_code_btn.click(debug_code_click, [code_lang_debug, code_to_debug, *debug_code_params], debug_result)
335
 
 
336
  # --- Comprehension Tab ---
337
  with gr.TabItem("📚 Comprehension"):
338
  with gr.Tabs():
339
  with gr.TabItem("Summarize"):
340
  with gr.Row():
341
+ with gr.Column(scale=1): gr.Markdown("#### Setup"); summarize_text = gr.Textbox(label="Text", lines=15, placeholder="Paste..."); summarize_params = create_parameter_ui(); summarize_btn = gr.Button("Summarize", variant="primary")
342
+ with gr.Column(scale=1): gr.Markdown("#### Summary"); summary_output = gr.Textbox(label="Result", lines=15, interactive=False, show_copy_button=True)
343
+ def summarize_click(text, *params): prompt = generate_prompt("summarize", text=safe_value(text,"[text]")); p = list(params); p[0]=min(max(int(p[0]),64),512); return generate_text(prompt, *p)
 
 
 
 
 
 
 
 
 
 
 
344
  summarize_btn.click(summarize_click, [summarize_text, *summarize_params], summary_output)
 
 
345
  with gr.TabItem("Q & A"):
346
  with gr.Row():
347
+ with gr.Column(scale=1): gr.Markdown("#### Setup"); qa_text = gr.Textbox(label="Context", lines=10, placeholder="Paste context..."); qa_question = gr.Textbox(label="Question", placeholder="Ask..."); qa_params = create_parameter_ui(); qa_btn = gr.Button("Answer", variant="primary")
348
+ with gr.Column(scale=1): gr.Markdown("#### Answer"); qa_output = gr.Textbox(label="Result", lines=10, interactive=False, show_copy_button=True)
349
+ def qa_click(text, q, *params): prompt = generate_prompt("qa", text=safe_value(text,"[ctx]"), question=safe_value(q,"[q]")); p = list(params); p[0]=min(max(int(p[0]),32),256); return generate_text(prompt, *p)
 
 
 
 
 
 
 
 
 
 
 
350
  qa_btn.click(qa_click, [qa_text, qa_question, *qa_params], qa_output)
 
 
351
  with gr.TabItem("Translate"):
352
  with gr.Row():
353
+ with gr.Column(scale=1): gr.Markdown("#### Setup"); translate_text = gr.Textbox(label="Text", lines=8, placeholder="Enter text..."); target_lang = gr.Dropdown(["French", "Spanish", "German", "Japanese", "Chinese", "Russian", "Arabic", "Hindi", "Portuguese", "Italian"], label="To", value="French"); translate_params = create_parameter_ui(); translate_btn = gr.Button("Translate", variant="primary")
354
+ with gr.Column(scale=1): gr.Markdown("#### Translation"); translation_output = gr.Textbox(label="Result", lines=8, interactive=False, show_copy_button=True)
355
+ def translate_click(text, lang, *params): prompt = generate_prompt("translate", text=safe_value(text,"[text]"), target_lang=safe_value(lang,"French")); p = list(params); p[0]=max(int(p[0]),64); return generate_text(prompt, *p)
 
 
 
 
 
 
 
 
 
 
 
356
  translate_btn.click(translate_click, [translate_text, target_lang, *translate_params], translation_output)
357
 
 
358
  # --- More Tasks Tab ---
359
  with gr.TabItem("🛠️ More Tasks"):
360
  with gr.Tabs():
361
+ with gr.TabItem("Content"): # Abbreviated names for brevity
362
  with gr.Row():
363
+ with gr.Column(scale=1): gr.Markdown("#### Setup"); content_type = gr.Dropdown(["blog outline", "tweet", "linkedin post", "email subject", "product desc", "press release intro"], label="Type", value="blog outline"); content_topic = gr.Textbox(label="Topic", value="sustainable travel", lines=2); content_audience = gr.Textbox(label="Audience", value="millennials"); content_params = create_parameter_ui(); content_btn = gr.Button("Generate", variant="primary")
364
+ with gr.Column(scale=1): gr.Markdown("#### Result"); content_output = gr.Textbox(lines=20, interactive=False, show_copy_button=True)
365
+ def content_click(t, top, aud, *p): prompt = generate_prompt("content_creation", content_type=safe_value(t,"text"), topic=safe_value(top,"[topic]"), audience=safe_value(aud,"[audience]")); return generate_text(prompt, *p)
 
 
 
 
 
 
 
 
 
 
 
366
  content_btn.click(content_click, [content_type, content_topic, content_audience, *content_params], content_output)
367
+ with gr.TabItem("Email"):
368
+ with gr.Row():
369
+ with gr.Column(scale=1): gr.Markdown("#### Setup"); email_type = gr.Dropdown(["job inquiry", "meeting request", "follow-up", "thank you", "support reply", "sales outreach"], label="Type", value="meeting request"); email_context = gr.Textbox(label="Context", lines=5, value="Meet next week re: project X. Tue/Wed PM?"); email_params = create_parameter_ui(); email_btn = gr.Button("Generate", variant="primary")
370
+ with gr.Column(scale=1): gr.Markdown("#### Draft"); email_output = gr.Textbox(lines=20, interactive=False, show_copy_button=True)
371
+ def email_click(t, ctx, *p): prompt = generate_prompt("email_draft", email_type=safe_value(t,"email"), context=safe_value(ctx,"[context]")); return generate_text(prompt, *p)
372
+ email_btn.click(email_click, [email_type, email_context, *email_params], email_output)
373
+ with gr.TabItem("Edit"):
 
 
 
 
 
 
 
 
 
 
 
 
374
  with gr.Row():
375
+ with gr.Column(scale=1): gr.Markdown("#### Setup"); edit_text = gr.Textbox(label="Text", lines=10, placeholder="Paste..."); edit_type = gr.Dropdown(["clarity", "grammar/spelling", "concise", "formal", "casual", "simplify"], label="Improve For", value="clarity"); edit_params = create_parameter_ui(); edit_btn = gr.Button("Edit", variant="primary")
376
+ with gr.Column(scale=1): gr.Markdown("#### Edited"); edit_output = gr.Textbox(lines=10, interactive=False, show_copy_button=True)
377
+ def edit_click(txt, et, *p): prompt = generate_prompt("document_edit", text=safe_value(txt,"[text]"), edit_type=safe_value(et,"clarity")); p_list = list(p); p_list[0] = max(int(p_list[0]), len(safe_value(txt,"").split()) + 64); return generate_text(prompt, *p_list)
 
 
 
 
 
 
 
 
 
 
 
378
  edit_btn.click(edit_click, [edit_text, edit_type, *edit_params], edit_output)
379
+ with gr.TabItem("Classify"):
380
+ with gr.Row():
381
+ with gr.Column(scale=1): gr.Markdown("#### Setup"); classify_text = gr.Textbox(label="Text", lines=8, value="Sci-fi movie explores AI."); classify_categories = gr.Textbox(label="Categories", value="Tech, Entertainment, Science"); classify_params = create_parameter_ui(); classify_btn = gr.Button("Classify", variant="primary")
382
+ with gr.Column(scale=1): gr.Markdown("#### Category"); classify_output = gr.Textbox(lines=2, interactive=False, show_copy_button=True)
383
+ def classify_click(txt, cats, *p): prompt = generate_prompt("classify", text=safe_value(txt,"[text]"), categories=safe_value(cats,"c1,c2")); p_list = list(p); p_list[0] = min(max(int(p_list[0]),16),128); raw=generate_text(prompt,*p_list); lines=raw.split('\n');last=lines[-1].strip();poss=[c.strip().lower() for c in cats.split(',')]; return last if last.lower() in poss else raw
384
+ classify_btn.click(classify_click, [classify_text, classify_categories, *classify_params], classify_output)
385
+ with gr.TabItem("Extract"):
386
+ with gr.Row():
387
+ with gr.Column(scale=1): gr.Markdown("#### Setup"); extract_text = gr.Textbox(label="Source", lines=10, value="Order #123 by Jane ([email protected]). Total: $99."); extract_data_points = gr.Textbox(label="Extract", value="order num, name, email, total"); extract_params = create_parameter_ui(); extract_btn = gr.Button("Extract", variant="primary")
388
+ with gr.Column(scale=1): gr.Markdown("#### Data"); extract_output = gr.Textbox(lines=10, interactive=False, show_copy_button=True)
389
+ def extract_click(txt, pts, *p): prompt = generate_prompt("data_extract", text=safe_value(txt,"[text]"), data_points=safe_value(pts,"info")); return generate_text(prompt, *p)
390
+ extract_btn.click(extract_click, [extract_text, extract_data_points, *extract_params], extract_output)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
391
 
392
 
393
  # --- Authentication Handler & Footer ---
394
+ footer_status = gr.Markdown("...", elem_id="footer-status-md") # Placeholder for footer
395
 
396
+ # Define authentication handler AFTER tabs is defined
397
  def handle_auth(token):
398
+ # --- FIX: Use gr.update() for visibility ---
399
+ yield "⏳ Authenticating & loading model...", gr.update(visible=False)
400
+ # Call the actual model loading function
401
+ status_message, tabs_update_obj = load_model(token) # Get the update object
402
+ yield status_message, tabs_update_obj # Yield the object
403
 
404
+ # Define footer update handler
405
  def update_footer_status(status_text): # Updates footer based on global state
406
+ # --- FIX: Use gr.update() for Markdown ---
407
+ return gr.update(value=f"""
408
  <hr><div style="text-align: center; font-size: 0.9em; color: #777;">
409
  <p>Powered by Hugging Face 🤗 Transformers & Gradio. Model: <strong>{loaded_model_name if model_loaded else 'None'}</strong>.</p>
410
  <p>Review outputs carefully. Models may generate inaccurate information.</p></div>""")
411
 
412
+ # Link button click to the handler
413
+ auth_button.click(
414
+ fn=handle_auth,
415
+ inputs=hf_token,
416
+ outputs=[auth_status, tabs], # Target auth_status and the tabs instance
417
+ queue=True
418
+ )
419
  # Update footer whenever auth status text changes
420
+ auth_status.change(
421
+ fn=update_footer_status,
422
+ inputs=auth_status, # Trigger based on auth_status text
423
+ outputs=footer_status, # Update the footer_status Markdown
424
+ queue=False
425
+ )
426
  # Initial footer update on load
427
+ demo.load(
428
+ fn=update_footer_status,
429
+ inputs=auth_status, # Use initial auth_status text
430
+ outputs=footer_status,
431
+ queue=False
432
+ )
433
 
434
 
435
  # --- Launch App ---
436
+ demo.queue().launch(share=False)
testing_grok.py ADDED
@@ -0,0 +1,205 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import torch
3
+ import os
4
+ from transformers import AutoTokenizer, AutoModelForCausalLM
5
+ from gradio.themes.utils import colors, sizes
6
+ from gradio.themes import Soft
7
+
8
+ # Define custom themes
9
+ class GemmaLightTheme(Soft):
10
+ def __init__(self):
11
+ super().__init__(
12
+ primary_hue=colors.blue,
13
+ secondary_hue=colors.indigo,
14
+ neutral_hue=colors.gray,
15
+ spacing_size=sizes.spacing_md,
16
+ radius_size=sizes.radius_md,
17
+ text_size=sizes.text_md,
18
+ )
19
+
20
+ class GemmaDarkTheme(Soft):
21
+ def __init__(self):
22
+ super().__init__(
23
+ primary_hue=colors.blue,
24
+ secondary_hue=colors.indigo,
25
+ neutral_hue=colors.gray,
26
+ spacing_size=sizes.spacing_md,
27
+ radius_size=sizes.radius_md,
28
+ text_size=sizes.text_md,
29
+ )
30
+ self.name = "gemma_dark"
31
+ self.background_fill_primary = "#1F1F2E"
32
+ self.background_fill_secondary = "#2A2A3C"
33
+ self.border_color_primary = "#3A3A4C"
34
+ self.border_color_secondary = "#4A4A5C"
35
+ self.color_accent_soft = "#3B5FA3"
36
+ self.color_accent = "#4B82C4"
37
+ self.text_color = "#FFFFFF"
38
+ self.text_color_subdued = "#CCCCCC"
39
+ self.shadow_spread = "8px"
40
+ self.shadow_inset = "0px 1px 2px 0px rgba(0, 0, 0, 0.1) inset"
41
+
42
+ # [Previous code for helper functions, model loading, etc., remains unchanged]
43
+
44
+ # Create Gradio interface
45
+ with gr.Blocks(theme=GemmaLightTheme()) as demo:
46
+ # Header with theme toggle
47
+ with gr.Row(equal_height=True):
48
+ with gr.Column(scale=6):
49
+ gr.Markdown(
50
+ """
51
+ # 🤖 Gemma Capabilities Demo
52
+
53
+ This interactive demo showcases Google's Gemma model capabilities across different tasks.
54
+ """
55
+ )
56
+ with gr.Column(scale=1, min_width=150):
57
+ theme_toggle = gr.Radio(
58
+ ["Light", "Dark"],
59
+ value="Light",
60
+ label="Theme",
61
+ info="Switch between light and dark mode",
62
+ elem_id="theme_toggle"
63
+ )
64
+
65
+ # Add CSS and JavaScript for themes
66
+ gr.HTML("""
67
+ <style>
68
+ /* Base styles for better UI */
69
+ .gr-group {
70
+ box-shadow: 0 2px 5px rgba(0,0,0,0.1);
71
+ border-radius: 10px;
72
+ padding: 20px;
73
+ background-color: var(--block-background-fill);
74
+ }
75
+ .gr-tabs {
76
+ box-shadow: 0 2px 5px rgba(0,0,0,0.1);
77
+ border-radius: 10px;
78
+ overflow: hidden;
79
+ }
80
+ button.primary {
81
+ transition: transform 0.2s, box-shadow 0.2s;
82
+ }
83
+ button.primary:hover {
84
+ transform: translateY(-2px);
85
+ box-shadow: 0 4px 8px rgba(0,0,0,0.2);
86
+ }
87
+ textarea, .input-box {
88
+ font-size: 16px !important;
89
+ }
90
+
91
+ /* Dark theme overrides using Gradio variables */
92
+ .dark-theme {
93
+ --body-background-fill: #1F1F2E !important;
94
+ --block-background-fill: #2A2A3C !important;
95
+ --input-background-fill: #3A3A4C !important;
96
+ --button-primary-background-fill: #4B82C4 !important;
97
+ --button-secondary-background-fill: #3A3A4C !important;
98
+ --border-color-primary: #4A4A5C !important;
99
+ --body-text-color: #FFFFFF !important;
100
+ --block-label-text-color: #CCCCCC !important;
101
+ --input-text-color: #FFFFFF !important;
102
+ --button-primary-text-color: #FFFFFF !important;
103
+ --button-secondary-text-color: #FFFFFF !important;
104
+ --shadow: 0 1px 2px rgba(0,0,0,0.5) !important;
105
+ }
106
+ .dark-theme .code {
107
+ background-color: #2A2A3C !important;
108
+ color: #FFFFFF !important;
109
+ }
110
+ </style>
111
+
112
+ <script>
113
+ (function() {
114
+ function applyTheme() {
115
+ const themeToggle = document.getElementById('theme_toggle');
116
+ if (!themeToggle) return;
117
+ const inputs = themeToggle.querySelectorAll('input');
118
+ for (let input of inputs) {
119
+ if (input.checked && input.value === 'Dark') {
120
+ document.body.classList.add('dark-theme');
121
+ return;
122
+ }
123
+ }
124
+ document.body.classList.remove('dark-theme');
125
+ }
126
+
127
+ function setupObserver() {
128
+ const themeToggle = document.getElementById('theme_toggle');
129
+ if (!themeToggle) {
130
+ setTimeout(setupObserver, 100);
131
+ return;
132
+ }
133
+ applyTheme();
134
+ const observer = new MutationObserver((mutations) => {
135
+ mutations.forEach((mutation) => {
136
+ if (mutation.type === 'attributes' || mutation.type === 'childList') {
137
+ applyTheme();
138
+ }
139
+ });
140
+ });
141
+ observer.observe(themeToggle, { attributes: true, childList: true, subtree: true });
142
+ const inputs = themeToggle.querySelectorAll('input');
143
+ inputs.forEach(input => input.addEventListener('change', applyTheme));
144
+ }
145
+
146
+ if (document.readyState === 'loading') {
147
+ document.addEventListener('DOMContentLoaded', setupObserver);
148
+ } else {
149
+ setupObserver();
150
+ }
151
+ })();
152
+ </script>
153
+ """)
154
+
155
+ # Authentication Section
156
+ with gr.Group(elem_id="auth_box"):
157
+ gr.Markdown("## 🔑 Authentication")
158
+ with gr.Row(equal_height=True):
159
+ with gr.Column(scale=3):
160
+ hf_token = gr.Textbox(
161
+ label="Hugging Face Token",
162
+ placeholder="Enter your token here...",
163
+ type="password",
164
+ value=DEFAULT_HF_TOKEN,
165
+ info="Get your token from https://huggingface.co/settings/tokens"
166
+ )
167
+ with gr.Column(scale=1):
168
+ auth_button = gr.Button("Authenticate", variant="primary")
169
+ auth_status = gr.Markdown("Please authenticate to use the model.")
170
+
171
+ def authenticate(token):
172
+ return "⏳ Loading model... Please wait, this may take a minute."
173
+
174
+ def auth_complete(token):
175
+ return load_model(token)
176
+
177
+ auth_button.click(fn=authenticate, inputs=[hf_token], outputs=[auth_status], queue=False).then(
178
+ fn=auth_complete, inputs=[hf_token], outputs=[auth_status]
179
+ )
180
+ gr.Markdown(
181
+ """
182
+ ### How to get a token:
183
+ 1. Go to [Hugging Face Token Settings](https://huggingface.co/settings/tokens)
184
+ 2. Create a new token with read access
185
+ 3. Accept the [Gemma model license](https://huggingface.co/google/gemma-3-4b-pt)
186
+ """
187
+ )
188
+
189
+ # [Remaining tabs and components remain unchanged for brevity]
190
+
191
+ # Footer
192
+ with gr.Group(elem_id="footer"):
193
+ gr.Markdown(
194
+ """
195
+ ## About Gemma
196
+ Gemma is a family of lightweight, state-of-the-art open models from Google...
197
+ [Learn more about Gemma](https://huggingface.co/google/gemma-3-4b-pt)
198
+ <div style="text-align: center; margin-top: 20px; padding: 10px;">
199
+ <p>© 2023 Gemma Capabilities Demo | Made with ❤️ using Gradio</p>
200
+ </div>
201
+ """
202
+ )
203
+
204
+ # Launch the app
205
+ demo.launch(share=False)