Dhan98 commited on
Commit
5d58ce5
·
verified ·
1 Parent(s): 9f3cdd1

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +19 -49
app.py CHANGED
@@ -1,72 +1,47 @@
1
  import streamlit as st
2
- import torch
3
  from transformers import pipeline
4
  from PIL import Image
5
  import numpy as np
6
  import tempfile
7
  import os
8
- from diffusers import VideoToVideoSDPipeline
9
- from diffusers.utils import export_to_video
10
 
11
  def generate_video_from_image(image, duration_seconds=10, progress_bar=None):
12
  """
13
- Generate a video from an image using VideoToVideoSDPipeline.
14
  """
15
  try:
16
  if progress_bar:
17
  progress_bar.progress(0.1, "Generating image caption...")
18
 
19
- # Setup image captioning pipeline
20
- captioner = pipeline("image-to-text", model="Salesforce/blip-image-captioning-base")
21
 
22
  # Generate caption
23
- caption = captioner(image)[0]['generated_text']
24
  st.write(f"Generated caption: *{caption}*")
25
 
26
  if progress_bar:
27
  progress_bar.progress(0.3, "Loading Video Generation model...")
28
 
29
- # Initialize Video Generation pipeline
30
- pipeline = VideoToVideoSDPipeline.from_pretrained(
31
- "cerspense/zeroscope_v2_576w",
32
- torch_dtype=torch.float16
33
- ).to("cuda" if torch.cuda.is_available() else "cpu")
34
 
35
  if progress_bar:
36
- progress_bar.progress(0.4, "Processing image...")
37
-
38
- # Prepare image
39
- if image.mode != "RGB":
40
- image = image.convert("RGB")
41
- image = image.resize((576, 320)) # Resize to model's expected size
42
-
43
- if progress_bar:
44
- progress_bar.progress(0.5, "Generating video frames...")
45
 
46
  # Generate video
47
- num_frames = duration_seconds * 8 # 8 FPS for this model
48
- video_frames = pipeline(
49
- image,
50
- num_inference_steps=50,
51
- num_frames=num_frames,
52
- guidance_scale=7.5,
53
- prompt=caption,
54
- ).videos[0]
55
-
56
- if progress_bar:
57
- progress_bar.progress(0.8, "Creating final video...")
58
-
59
- # Create temporary file for video
60
- with tempfile.NamedTemporaryFile(delete=False, suffix='.mp4') as tmp_file:
61
- output_path = tmp_file.name
62
-
63
- # Export video frames
64
- export_to_video(video_frames, output_path, fps=8)
65
 
66
  if progress_bar:
67
  progress_bar.progress(1.0, "Video generation complete!")
68
 
69
- return output_path, caption
70
 
71
  except Exception as e:
72
  st.error(f"Error generating video: {str(e)}")
@@ -81,15 +56,11 @@ def main():
81
  The app will automatically generate a caption for your image and use it as inspiration for the video.
82
  """)
83
 
84
- # Add warning about computational requirements
85
- st.warning("Note: Video generation may take several minutes depending on the duration and available computing resources.")
86
 
87
  # File uploader
88
  uploaded_file = st.file_uploader("Choose an image", type=['png', 'jpg', 'jpeg'])
89
 
90
- # Duration selector (adjusted for this model's capabilities)
91
- duration = st.slider("Video duration (seconds)", min_value=1, max_value=15, value=5)
92
-
93
  if uploaded_file is not None:
94
  # Display uploaded image
95
  image = Image.open(uploaded_file)
@@ -103,7 +74,7 @@ def main():
103
  my_bar = st.progress(0, text=progress_text)
104
 
105
  # Generate video
106
- video_path, caption = generate_video_from_image(image, duration, my_bar)
107
 
108
  if video_path and os.path.exists(video_path):
109
  # Read the video file
@@ -120,14 +91,13 @@ def main():
120
 
121
  # Display video
122
  st.video(video_bytes)
123
-
124
- # Clean up temporary file
125
- os.unlink(video_path)
126
  else:
127
  st.error("Failed to generate video. Please try again.")
128
 
129
  except Exception as e:
130
  st.error(f"An error occurred: {str(e)}")
 
 
131
 
132
  if __name__ == "__main__":
133
  main()
 
1
  import streamlit as st
 
2
  from transformers import pipeline
3
  from PIL import Image
4
  import numpy as np
5
  import tempfile
6
  import os
7
+ from modelscope.pipelines import pipeline as modelscope_pipeline
8
+ from modelscope.outputs import OutputKeys
9
 
10
  def generate_video_from_image(image, duration_seconds=10, progress_bar=None):
11
  """
12
+ Generate a video from an image using ModelScope's video generation.
13
  """
14
  try:
15
  if progress_bar:
16
  progress_bar.progress(0.1, "Generating image caption...")
17
 
18
+ # Setup image captioning
19
+ caption_pipe = pipeline("image-to-text", model="Salesforce/blip-image-captioning-base")
20
 
21
  # Generate caption
22
+ caption = caption_pipe(image)[0]['generated_text']
23
  st.write(f"Generated caption: *{caption}*")
24
 
25
  if progress_bar:
26
  progress_bar.progress(0.3, "Loading Video Generation model...")
27
 
28
+ # Initialize video generation
29
+ video_pipe = modelscope_pipeline(
30
+ 'text-to-video-synthesis',
31
+ model='damo/text-to-video-synthesis'
32
+ )
33
 
34
  if progress_bar:
35
+ progress_bar.progress(0.5, "Generating video...")
 
 
 
 
 
 
 
 
36
 
37
  # Generate video
38
+ output = video_pipe(caption)
39
+ video_path = output[OutputKeys.OUTPUT_VIDEO]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
40
 
41
  if progress_bar:
42
  progress_bar.progress(1.0, "Video generation complete!")
43
 
44
+ return video_path, caption
45
 
46
  except Exception as e:
47
  st.error(f"Error generating video: {str(e)}")
 
56
  The app will automatically generate a caption for your image and use it as inspiration for the video.
57
  """)
58
 
59
+ st.info("Note: Video generation may take several minutes.")
 
60
 
61
  # File uploader
62
  uploaded_file = st.file_uploader("Choose an image", type=['png', 'jpg', 'jpeg'])
63
 
 
 
 
64
  if uploaded_file is not None:
65
  # Display uploaded image
66
  image = Image.open(uploaded_file)
 
74
  my_bar = st.progress(0, text=progress_text)
75
 
76
  # Generate video
77
+ video_path, caption = generate_video_from_image(image, my_bar)
78
 
79
  if video_path and os.path.exists(video_path):
80
  # Read the video file
 
91
 
92
  # Display video
93
  st.video(video_bytes)
 
 
 
94
  else:
95
  st.error("Failed to generate video. Please try again.")
96
 
97
  except Exception as e:
98
  st.error(f"An error occurred: {str(e)}")
99
+ st.error("Full error message for debugging:")
100
+ st.error(e)
101
 
102
  if __name__ == "__main__":
103
  main()