seawolf2357 commited on
Commit
3a8aec0
·
verified ·
1 Parent(s): f028eb9

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +4 -7
app.py CHANGED
@@ -28,7 +28,6 @@ def generate_video(prompt, negative_prompt, num_inference_steps, conditioning_fr
28
  ]
29
  conditioning_frames = [load_image(img_file) for img_file in image_files]
30
 
31
- # Ensure conditioning_frame_indices is a list of integers
32
  conditioning_frame_indices = eval(conditioning_frame_indices)
33
  controlnet_conditioning_scale = float(controlnet_conditioning_scale)
34
 
@@ -61,9 +60,9 @@ def generate_simple_video(prompt):
61
  frames = pipe(
62
  prompt,
63
  num_frames=64,
64
- num_inference_steps=20,
65
- guidance_scale=7.0,
66
- decode_chunk_size=2,
67
  ).frames[0]
68
 
69
  export_to_gif(frames, "simple_output.gif")
@@ -74,7 +73,7 @@ demo1 = gr.Interface(
74
  inputs=[
75
  gr.Textbox(label="Prompt", value="an aerial view of a cyberpunk city, night time, neon lights, masterpiece, high quality"),
76
  gr.Textbox(label="Negative Prompt", value="low quality, worst quality, letterboxed"),
77
- gr.Slider(label="Number of Inference Steps", minimum=1, maximum=100, step=1, value=25),
78
  gr.Textbox(label="Conditioning Frame Indices", value="[0, 8, 15]"),
79
  gr.Slider(label="ControlNet Conditioning Scale", minimum=0.1, maximum=2.0, step=0.1, value=1.0)
80
  ],
@@ -93,7 +92,5 @@ demo2 = gr.Interface(
93
 
94
  demo = gr.TabbedInterface([demo1, demo2], ["Advanced Video Generation", "Simple Video Generation"])
95
 
96
-
97
-
98
  demo.launch()
99
  #demo.launch(server_name="0.0.0.0", server_port=7910)
 
28
  ]
29
  conditioning_frames = [load_image(img_file) for img_file in image_files]
30
 
 
31
  conditioning_frame_indices = eval(conditioning_frame_indices)
32
  controlnet_conditioning_scale = float(controlnet_conditioning_scale)
33
 
 
60
  frames = pipe(
61
  prompt,
62
  num_frames=64,
63
+ num_inference_steps=50, # Increased for higher quality
64
+ guidance_scale=10.0, # Increased for stronger guidance
65
+ decode_chunk_size=1, # Reduced for finer details
66
  ).frames[0]
67
 
68
  export_to_gif(frames, "simple_output.gif")
 
73
  inputs=[
74
  gr.Textbox(label="Prompt", value="an aerial view of a cyberpunk city, night time, neon lights, masterpiece, high quality"),
75
  gr.Textbox(label="Negative Prompt", value="low quality, worst quality, letterboxed"),
76
+ gr.Slider(label="Number of Inference Steps", minimum=1, maximum=100, step=1, value=50), # Increased default value
77
  gr.Textbox(label="Conditioning Frame Indices", value="[0, 8, 15]"),
78
  gr.Slider(label="ControlNet Conditioning Scale", minimum=0.1, maximum=2.0, step=0.1, value=1.0)
79
  ],
 
92
 
93
  demo = gr.TabbedInterface([demo1, demo2], ["Advanced Video Generation", "Simple Video Generation"])
94
 
 
 
95
  demo.launch()
96
  #demo.launch(server_name="0.0.0.0", server_port=7910)