IITheLordII commited on
Commit
0d9a0ce
·
verified ·
1 Parent(s): 8336ddb

Update demo_gradio.py

Browse files

Replace with update files from github

Files changed (1) hide show
  1. demo_gradio.py +11 -6
demo_gradio.py CHANGED
@@ -100,7 +100,7 @@ os.makedirs(outputs_folder, exist_ok=True)
100
 
101
 
102
  @torch.no_grad()
103
- def worker(input_image, prompt, n_prompt, seed, total_second_length, latent_window_size, steps, cfg, gs, rs, gpu_memory_preservation, use_teacache):
104
  total_latent_sections = (total_second_length * 30) / (latent_window_size * 4)
105
  total_latent_sections = int(max(round(total_latent_sections), 1))
106
 
@@ -295,7 +295,7 @@ def worker(input_image, prompt, n_prompt, seed, total_second_length, latent_wind
295
 
296
  output_filename = os.path.join(outputs_folder, f'{job_id}_{total_generated_latent_frames}.mp4')
297
 
298
- save_bcthw_as_mp4(history_pixels, output_filename, fps=30)
299
 
300
  print(f'Decoded. Current latent shape {real_history_latents.shape}; pixel shape {history_pixels.shape}')
301
 
@@ -315,7 +315,7 @@ def worker(input_image, prompt, n_prompt, seed, total_second_length, latent_wind
315
  return
316
 
317
 
318
- def process(input_image, prompt, n_prompt, seed, total_second_length, latent_window_size, steps, cfg, gs, rs, gpu_memory_preservation, use_teacache):
319
  global stream
320
  assert input_image is not None, 'No input image!'
321
 
@@ -323,7 +323,7 @@ def process(input_image, prompt, n_prompt, seed, total_second_length, latent_win
323
 
324
  stream = AsyncStream()
325
 
326
- async_run(worker, input_image, prompt, n_prompt, seed, total_second_length, latent_window_size, steps, cfg, gs, rs, gpu_memory_preservation, use_teacache)
327
 
328
  output_filename = None
329
 
@@ -385,13 +385,18 @@ with block:
385
 
386
  gpu_memory_preservation = gr.Slider(label="GPU Inference Preserved Memory (GB) (larger means slower)", minimum=6, maximum=128, value=6, step=0.1, info="Set this number to a larger value if you encounter OOM. Larger value causes slower speed.")
387
 
 
 
388
  with gr.Column():
389
  preview_image = gr.Image(label="Next Latents", height=200, visible=False)
390
  result_video = gr.Video(label="Finished Frames", autoplay=True, show_share_button=False, height=512, loop=True)
391
  gr.Markdown('Note that the ending actions will be generated before the starting actions due to the inverted sampling. If the starting action is not in the video, you just need to wait, and it will be generated later.')
392
  progress_desc = gr.Markdown('', elem_classes='no-generating-animation')
393
  progress_bar = gr.HTML('', elem_classes='no-generating-animation')
394
- ips = [input_image, prompt, n_prompt, seed, total_second_length, latent_window_size, steps, cfg, gs, rs, gpu_memory_preservation, use_teacache]
 
 
 
395
  start_button.click(fn=process, inputs=ips, outputs=[result_video, preview_image, progress_desc, progress_bar, start_button, end_button])
396
  end_button.click(fn=end_process)
397
 
@@ -401,4 +406,4 @@ block.launch(
401
  server_port=args.port,
402
  share=args.share,
403
  inbrowser=args.inbrowser,
404
- )
 
100
 
101
 
102
  @torch.no_grad()
103
+ def worker(input_image, prompt, n_prompt, seed, total_second_length, latent_window_size, steps, cfg, gs, rs, gpu_memory_preservation, use_teacache, mp4_crf):
104
  total_latent_sections = (total_second_length * 30) / (latent_window_size * 4)
105
  total_latent_sections = int(max(round(total_latent_sections), 1))
106
 
 
295
 
296
  output_filename = os.path.join(outputs_folder, f'{job_id}_{total_generated_latent_frames}.mp4')
297
 
298
+ save_bcthw_as_mp4(history_pixels, output_filename, fps=30, crf=mp4_crf)
299
 
300
  print(f'Decoded. Current latent shape {real_history_latents.shape}; pixel shape {history_pixels.shape}')
301
 
 
315
  return
316
 
317
 
318
+ def process(input_image, prompt, n_prompt, seed, total_second_length, latent_window_size, steps, cfg, gs, rs, gpu_memory_preservation, use_teacache, mp4_crf):
319
  global stream
320
  assert input_image is not None, 'No input image!'
321
 
 
323
 
324
  stream = AsyncStream()
325
 
326
+ async_run(worker, input_image, prompt, n_prompt, seed, total_second_length, latent_window_size, steps, cfg, gs, rs, gpu_memory_preservation, use_teacache, mp4_crf)
327
 
328
  output_filename = None
329
 
 
385
 
386
  gpu_memory_preservation = gr.Slider(label="GPU Inference Preserved Memory (GB) (larger means slower)", minimum=6, maximum=128, value=6, step=0.1, info="Set this number to a larger value if you encounter OOM. Larger value causes slower speed.")
387
 
388
+ mp4_crf = gr.Slider(label="MP4 Compression", minimum=0, maximum=100, value=16, step=1, info="Lower means better quality. 0 is uncompressed. Change to 16 if you get black outputs. ")
389
+
390
  with gr.Column():
391
  preview_image = gr.Image(label="Next Latents", height=200, visible=False)
392
  result_video = gr.Video(label="Finished Frames", autoplay=True, show_share_button=False, height=512, loop=True)
393
  gr.Markdown('Note that the ending actions will be generated before the starting actions due to the inverted sampling. If the starting action is not in the video, you just need to wait, and it will be generated later.')
394
  progress_desc = gr.Markdown('', elem_classes='no-generating-animation')
395
  progress_bar = gr.HTML('', elem_classes='no-generating-animation')
396
+
397
+ gr.HTML('<div style="text-align:center; margin-top:20px;">Share your results and find ideas at the <a href="https://x.com/search?q=framepack&f=live" target="_blank">FramePack Twitter (X) thread</a></div>')
398
+
399
+ ips = [input_image, prompt, n_prompt, seed, total_second_length, latent_window_size, steps, cfg, gs, rs, gpu_memory_preservation, use_teacache, mp4_crf]
400
  start_button.click(fn=process, inputs=ips, outputs=[result_video, preview_image, progress_desc, progress_bar, start_button, end_button])
401
  end_button.click(fn=end_process)
402
 
 
406
  server_port=args.port,
407
  share=args.share,
408
  inbrowser=args.inbrowser,
409
+ )