dream2589632147 commited on
Commit
44a4bd9
·
verified ·
1 Parent(s): acd25ee

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +212 -126
app.py CHANGED
@@ -1,21 +1,34 @@
1
- import spaces
 
 
 
 
 
 
 
 
 
2
  import torch
 
 
3
  from diffusers.pipelines.wan.pipeline_wan_i2v import WanImageToVideoPipeline
4
  from diffusers.models.transformers.transformer_wan import WanTransformer3DModel
5
  from diffusers.utils.export_utils import export_to_video
6
- import gradio as gr
7
- import tempfile
8
- import numpy as np
9
- from PIL import Image
10
- import random
11
- import gc
12
- from torchao.quantization import quantize_
13
- from torchao.quantization import Float8DynamicActivationFloat8WeightConfig, Int8WeightOnlyConfig
14
 
15
  # ------------------------
16
- # إعدادات النموذج
17
  # ------------------------
18
- MODEL_ID = "Wan-AI/Wan2.2-I2V-A14B-Diffusers"
 
 
19
 
20
  MAX_DIM = 832
21
  MIN_DIM = 480
@@ -28,75 +41,17 @@ MAX_FRAMES_MODEL = 480
28
  MIN_DURATION = round(MIN_FRAMES_MODEL / FIXED_FPS, 1)
29
  MAX_DURATION = round(MAX_FRAMES_MODEL / FIXED_FPS, 1)
30
 
31
- # ------------------------
32
- # تحميل النموذج
33
- # ------------------------
34
- print("🔹 Loading model... Please wait, this may take a few minutes.")
35
-
36
- pipe = WanImageToVideoPipeline.from_pretrained(
37
- MODEL_ID,
38
- transformer=WanTransformer3DModel.from_pretrained(
39
- 'cbensimon/Wan2.2-I2V-A14B-bf16-Diffusers',
40
- subfolder='transformer',
41
- torch_dtype=torch.float16,
42
- device_map='cuda'
43
- ),
44
- transformer_2=WanTransformer3DModel.from_pretrained(
45
- 'cbensimon/Wan2.2-I2V-A14B-bf16-Diffusers',
46
- subfolder='transformer_2',
47
- torch_dtype=torch.float16,
48
- device_map='cuda'
49
- ),
50
- torch_dtype=torch.float16
51
- ).to('cuda')
52
-
53
- pipe.load_lora_weights(
54
- "Kijai/WanVideo_comfy",
55
- weight_name="Lightx2v/lightx2v_I2V_14B_480p_cfg_step_distill_rank128_bf16.safetensors",
56
- adapter_name="lightx2v"
57
- )
58
- pipe.load_lora_weights(
59
- "Kijai/WanVideo_comfy",
60
- weight_name="Lightx2v/lightx2v_I2V_14B_480p_cfg_step_distill_rank128_bf16.safetensors",
61
- adapter_name="lightx2v_2",
62
- load_into_transformer_2=True
63
- )
64
- pipe.set_adapters(["lightx2v", "lightx2v_2"], adapter_weights=[1.0, 1.0])
65
- pipe.fuse_lora(adapter_names=["lightx2v"], lora_scale=3.0, components=["transformer"])
66
- pipe.fuse_lora(adapter_names=["lightx2v_2"], lora_scale=1.0, components=["transformer_2"])
67
-
68
- # لا نقوم بفك تحميل الـ LoRA بعد الدمج
69
-
70
- # ------------------------
71
- # كوانتاز اختياري (تسريع وتحسين الذاكرة)
72
- # ------------------------
73
- if torch.cuda.is_available():
74
- try:
75
- quantize_(pipe.text_encoder, Int8WeightOnlyConfig())
76
- quantize_(pipe.transformer, Float8DynamicActivationFloat8WeightConfig())
77
- quantize_(pipe.transformer_2, Float8DynamicActivationFloat8WeightConfig())
78
- print("✅ Quantization applied successfully.")
79
- except Exception as e:
80
- print(f"⚠️ Quantization skipped due to: {e}")
81
-
82
- # ------------------------
83
- # الموجهات الافتراضية
84
- # ------------------------
85
  default_prompt_i2v = (
86
- "ultra realistic cinematic footage, perfectly preserved facial identity and body structure "
87
- "across all frames, stable anatomy and consistent body proportions, realistic muscle definition, "
88
- "natural motion flow and breathing dynamics, seamless motion continuity, photorealistic clothing "
89
- "preservation with accurate fabric movement and lighting response, consistent outfit color and texture, "
90
- "high-fidelity skin texture, detailed lighting and shadows"
91
  )
92
-
93
  default_negative_prompt = (
94
- "low quality, low resolution, poor lighting, underexposed, overexposed, noise, flickering, artifacts, "
95
- "stutter, inconsistent motion, broken motion, distorted face, changing face, unnatural anatomy"
96
  )
97
 
98
  # ------------------------
99
- # أدوات الصورة والفيديو
100
  # ------------------------
101
  def resize_image(image: Image.Image) -> Image.Image:
102
  width, height = image.size
@@ -107,106 +62,237 @@ def resize_image(image: Image.Image) -> Image.Image:
107
  MAX_AR = MAX_DIM / MIN_DIM
108
  MIN_AR = MIN_DIM / MAX_DIM
109
 
 
110
  if aspect_ratio > MAX_AR:
111
  crop_width = int(round(height * MAX_AR))
112
  left = (width - crop_width) // 2
113
- image = image.crop((left, 0, left + crop_width, height))
114
  elif aspect_ratio < MIN_AR:
115
  crop_height = int(round(width / MIN_AR))
116
  top = (height - crop_height) // 2
117
- image = image.crop((0, top, width, top + crop_height))
118
-
119
- if width > height:
120
- target_w = MAX_DIM
121
- target_h = int(round(target_w / aspect_ratio))
122
  else:
123
- target_h = MAX_DIM
124
- target_w = int(round(target_h * aspect_ratio))
 
 
 
 
125
 
 
126
  final_w = max(MIN_DIM, min(MAX_DIM, round(target_w / MULTIPLE_OF) * MULTIPLE_OF))
127
  final_h = max(MIN_DIM, min(MAX_DIM, round(target_h / MULTIPLE_OF) * MULTIPLE_OF))
128
- return image.resize((final_w, final_h), Image.LANCZOS)
129
 
130
 
131
  def get_num_frames(duration_seconds: float):
132
  return 1 + int(np.clip(int(round(duration_seconds * FIXED_FPS)), MIN_FRAMES_MODEL, MAX_FRAMES_MODEL))
133
 
 
134
  # ------------------------
135
- # عملية التوليد
136
  # ------------------------
137
- @spaces.GPU()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
138
  def generate_video(
139
  input_image,
140
  prompt,
141
- steps=4,
142
  negative_prompt=default_negative_prompt,
143
  duration_seconds=3.5,
144
  guidance_scale=1.0,
145
  guidance_scale_2=1.0,
146
  seed=42,
147
  randomize_seed=False,
148
- progress=gr.Progress(track_tqdm=True)
149
  ):
150
- if input_image is None:
151
- raise gr.Error("⚠️ Please upload an input image first.")
152
-
153
- num_frames = get_num_frames(duration_seconds)
154
- current_seed = random.randint(0, MAX_SEED) if randomize_seed else int(seed)
155
- resized_image = resize_image(input_image)
156
-
157
- with progress.tqdm(total=100) as pbar:
158
- pbar.set_description("🎬 Generating video...")
159
- output_frames_list = pipe(
160
- image=resized_image,
161
- prompt=prompt,
162
- negative_prompt=negative_prompt,
163
- height=resized_image.height,
164
- width=resized_image.width,
165
- num_frames=num_frames,
166
- guidance_scale=float(guidance_scale),
167
- guidance_scale_2=float(guidance_scale_2),
168
- num_inference_steps=int(steps),
169
- generator=torch.Generator(device="cuda").manual_seed(current_seed),
170
- ).frames[0]
171
-
172
- with tempfile.NamedTemporaryFile(suffix=".mp4", delete=False) as tmpfile:
173
- video_path = tmpfile.name
174
-
175
- export_to_video(output_frames_list, video_path, fps=FIXED_FPS)
176
- torch.cuda.empty_cache()
177
- gc.collect()
178
- return video_path, current_seed
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
179
 
180
  # ------------------------
181
- # واجهة المستخدم
182
  # ------------------------
183
  with gr.Blocks(theme=gr.themes.Soft(primary_hue="violet")) as demo:
184
  gr.HTML("""
185
  <div style="text-align:center; padding:20px;">
186
- <h1 style="font-size: 2em;">Wan 2.2 Lightning Studio – AI Cinematic Video Generator</h1>
187
- <p style="opacity:0.8;">⚡ Powered by dream2589632147</p>
188
  </div>
189
- """)
190
 
191
  with gr.Row():
192
  with gr.Column(scale=1):
193
  input_image = gr.Image(label="🎞️ Input Image", type="pil")
194
  prompt = gr.Textbox(label="✨ Positive Prompt", value=default_prompt_i2v, lines=3)
195
  negative_prompt = gr.Textbox(label="🚫 Negative Prompt", value=default_negative_prompt, lines=3)
196
- duration = gr.Slider(MIN_DURATION, MAX_DURATION, value=3.5, step=0.1, label="🎬 Duration (seconds)")
197
  with gr.Accordion("⚙️ Advanced Settings", open=False):
198
  steps = gr.Slider(1, 30, value=6, step=1, label="Inference Steps")
199
  guidance_scale = gr.Slider(0.0, 10.0, value=1.0, step=0.5, label="Guidance Scale 1")
200
  guidance_scale_2 = gr.Slider(0.0, 10.0, value=1.0, step=0.5, label="Guidance Scale 2")
201
- seed = gr.Slider(0, MAX_SEED, value=42, step=1, label="Seed")
202
- randomize_seed = gr.Checkbox(label="Randomize Seed", value=True)
203
  generate_btn = gr.Button("🚀 Generate Cinematic Video", variant="primary")
204
 
205
  with gr.Column(scale=1):
206
  video_output = gr.Video(label="🎬 Generated Video Preview", autoplay=True)
207
- seed_output = gr.Textbox(label="🎲 Seed Used", interactive=False)
208
  download_btn = gr.File(label="⬇️ Download MP4")
209
 
 
210
  generate_btn.click(
211
  fn=generate_video,
212
  inputs=[input_image, prompt, steps, negative_prompt, duration,
@@ -214,7 +300,7 @@ with gr.Blocks(theme=gr.themes.Soft(primary_hue="violet")) as demo:
214
  outputs=[video_output, seed_output]
215
  )
216
 
217
- # زر تبديل الوضع الليلي/النهاري
218
  gr.HTML("""
219
  <script>
220
  const toggle = document.createElement('button');
@@ -232,7 +318,7 @@ with gr.Blocks(theme=gr.themes.Soft(primary_hue="violet")) as demo:
232
  </script>
233
  """)
234
 
235
- gr.Markdown("---\nMade with ❤️ using [Gradio](https://gradio.app) • Hosted on [Hugging Face Spaces](https://huggingface.co/spaces)")
236
 
237
  if __name__ == "__main__":
238
  demo.queue().launch()
 
1
+ # app.py — Modified for dream2589632147/Dream-wan2-2-faster-Pro
2
+ import os
3
+ import tempfile
4
+ import random
5
+ import gc
6
+ import traceback
7
+
8
+ import numpy as np
9
+ from PIL import Image
10
+
11
  import torch
12
+ import gradio as gr
13
+
14
  from diffusers.pipelines.wan.pipeline_wan_i2v import WanImageToVideoPipeline
15
  from diffusers.models.transformers.transformer_wan import WanTransformer3DModel
16
  from diffusers.utils.export_utils import export_to_video
17
+
18
+ # Optional quantization (wrapped safely)
19
+ try:
20
+ from torchao.quantization import quantize_
21
+ from torchao.quantization import Float8DynamicActivationFloat8WeightConfig, Int8WeightOnlyConfig
22
+ HAS_TORCHAO_QUANT = True
23
+ except Exception:
24
+ HAS_TORCHAO_QUANT = False
25
 
26
  # ------------------------
27
+ # Configuration
28
  # ------------------------
29
+ MODEL_ID = "dream2589632147/Dream-wan2-2-faster-Pro" # user's model
30
+ # If your actual transformer checkpoint differs, update the following IDs accordingly:
31
+ TRANSFORMER_BACKBONE = "cbensimon/Wan2.2-I2V-A14B-bf16-Diffusers"
32
 
33
  MAX_DIM = 832
34
  MIN_DIM = 480
 
41
  MIN_DURATION = round(MIN_FRAMES_MODEL / FIXED_FPS, 1)
42
  MAX_DURATION = round(MAX_FRAMES_MODEL / FIXED_FPS, 1)
43
 
44
+ # default prompts (shortened for readability — replace with your full prompts)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
45
  default_prompt_i2v = (
46
+ "ultra realistic cinematic footage, perfectly preserved facial identity and body structure across all frames,"
47
+ " stable anatomy and consistent body proportions, realistic skin, photorealistic lighting"
 
 
 
48
  )
 
49
  default_negative_prompt = (
50
+ "low quality, low resolution, poor lighting, noise, flicker, artifact, changing face, inconsistent anatomy"
 
51
  )
52
 
53
  # ------------------------
54
+ # Utility functions
55
  # ------------------------
56
  def resize_image(image: Image.Image) -> Image.Image:
57
  width, height = image.size
 
62
  MAX_AR = MAX_DIM / MIN_DIM
63
  MIN_AR = MIN_DIM / MAX_DIM
64
 
65
+ image_to_resize = image
66
  if aspect_ratio > MAX_AR:
67
  crop_width = int(round(height * MAX_AR))
68
  left = (width - crop_width) // 2
69
+ image_to_resize = image.crop((left, 0, left + crop_width, height))
70
  elif aspect_ratio < MIN_AR:
71
  crop_height = int(round(width / MIN_AR))
72
  top = (height - crop_height) // 2
73
+ image_to_resize = image.crop((0, top, width, top + crop_height))
 
 
 
 
74
  else:
75
+ if width > height:
76
+ target_w = MAX_DIM
77
+ target_h = int(round(target_w / aspect_ratio))
78
+ else:
79
+ target_h = MAX_DIM
80
+ target_w = int(round(target_h * aspect_ratio))
81
 
82
+ # ensure multiple-of constraint
83
  final_w = max(MIN_DIM, min(MAX_DIM, round(target_w / MULTIPLE_OF) * MULTIPLE_OF))
84
  final_h = max(MIN_DIM, min(MAX_DIM, round(target_h / MULTIPLE_OF) * MULTIPLE_OF))
85
+ return image_to_resize.resize((final_w, final_h), Image.LANCZOS)
86
 
87
 
88
  def get_num_frames(duration_seconds: float):
89
  return 1 + int(np.clip(int(round(duration_seconds * FIXED_FPS)), MIN_FRAMES_MODEL, MAX_FRAMES_MODEL))
90
 
91
+
92
  # ------------------------
93
+ # Load pipeline (wrapped in try/except to provide clear messages)
94
  # ------------------------
95
+ print("🔹 Loading pipeline. This can take a while...")
96
+
97
+ try:
98
+ # Use float16 for compatibility with most GPUs (H200 should be OK)
99
+ transformer_kwargs = {
100
+ "subfolder": "transformer",
101
+ "torch_dtype": torch.float16,
102
+ "device_map": "cuda"
103
+ }
104
+ transformer_2_kwargs = {
105
+ "subfolder": "transformer_2",
106
+ "torch_dtype": torch.float16,
107
+ "device_map": "cuda"
108
+ }
109
+
110
+ transformer = WanTransformer3DModel.from_pretrained(TRANSFORMER_BACKBONE, **transformer_kwargs)
111
+ transformer_2 = WanTransformer3DModel.from_pretrained(TRANSFORMER_BACKBONE, **transformer_2_kwargs)
112
+
113
+ pipe = WanImageToVideoPipeline.from_pretrained(
114
+ MODEL_ID,
115
+ transformer=transformer,
116
+ transformer_2=transformer_2,
117
+ torch_dtype=torch.float16,
118
+ ).to("cuda")
119
+ print("✅ Pipeline loaded successfully.")
120
+
121
+ # Attempt to load LoRA adapters if available — wrapped for safety
122
+ try:
123
+ pipe.load_lora_weights(
124
+ "Kijai/WanVideo_comfy",
125
+ weight_name="Lightx2v/lightx2v_I2V_14B_480p_cfg_step_distill_rank128_bf16.safetensors",
126
+ adapter_name="lightx2v"
127
+ )
128
+ pipe.load_lora_weights(
129
+ "Kijai/WanVideo_comfy",
130
+ weight_name="Lightx2v/lightx2v_I2V_14B_480p_cfg_step_distill_rank128_bf16.safetensors",
131
+ adapter_name="lightx2v_2",
132
+ load_into_transformer_2=True
133
+ )
134
+ pipe.set_adapters(["lightx2v", "lightx2v_2"], adapter_weights=[1.0, 1.0])
135
+ pipe.fuse_lora(adapter_names=["lightx2v"], lora_scale=3.0, components=["transformer"])
136
+ pipe.fuse_lora(adapter_names=["lightx2v_2"], lora_scale=1.0, components=["transformer_2"])
137
+ print("✅ LoRA adapters loaded and fused.")
138
+ except Exception as e:
139
+ print(f"⚠️ Could not load/fuse LoRA adapters: {e}")
140
+
141
+ # Optional quantization if torcha0 is installed and CUDA available
142
+ if torch.cuda.is_available() and HAS_TORCHAO_QUANT:
143
+ try:
144
+ quantize_(pipe.text_encoder, Int8WeightOnlyConfig())
145
+ quantize_(pipe.transformer, Float8DynamicActivationFloat8WeightConfig())
146
+ quantize_(pipe.transformer_2, Float8DynamicActivationFloat8WeightConfig())
147
+ print("✅ Quantization applied.")
148
+ except Exception as e:
149
+ print(f"⚠️ Quantization skipped: {e}")
150
+ else:
151
+ if not HAS_TORCHAO_QUANT:
152
+ print("ℹ️ torchao.quantization not available; skipping quantization.")
153
+ else:
154
+ print("ℹ️ CUDA not available; skipping quantization.")
155
+
156
+ except Exception as e:
157
+ print("❌ Failed to initialize pipeline. Full traceback:")
158
+ traceback.print_exc()
159
+ # It's OK to keep running the app; generate_video will catch missing pipe and return an error to UI
160
+ pipe = None
161
+
162
+
163
+ # ------------------------
164
+ # Video generation function
165
+ # ------------------------
166
+ @spaces.GPU() if hasattr(globals().get("spaces", None), "GPU") else (lambda f: f)
167
  def generate_video(
168
  input_image,
169
  prompt,
170
+ steps=6,
171
  negative_prompt=default_negative_prompt,
172
  duration_seconds=3.5,
173
  guidance_scale=1.0,
174
  guidance_scale_2=1.0,
175
  seed=42,
176
  randomize_seed=False,
177
+ progress=gr.Progress() # injected Gradio progress (use correctly)
178
  ):
179
+ """
180
+ Returns: (video_path_for_preview, seed_used)
181
+ """
182
+ try:
183
+ if pipe is None:
184
+ return gr.update(value=None), "Error: pipeline not initialized on backend."
185
+
186
+ if input_image is None:
187
+ raise gr.Error("Please upload an input image.")
188
+
189
+ # Prepare
190
+ num_frames = get_num_frames(duration_seconds)
191
+ current_seed = random.randint(0, MAX_SEED) if randomize_seed else int(seed)
192
+ resized_image = resize_image(input_image.convert("RGB"))
193
+
194
+ # Use the GRADIO progress context correctly
195
+ # NOTE: progress is an object returned by gr.Progress(); calling progress() returns context manager
196
+ with progress() as pbar:
197
+ pbar(0, desc="Starting generation...")
198
+
199
+ # Stage 1 — generate frames
200
+ pbar(10, desc="Running diffusion pipeline (prepare)...")
201
+ gen = torch.Generator(device="cuda").manual_seed(current_seed)
202
+ # Call pipeline (this is the heavy op)
203
+ pbar(20, desc="Generating frames (this may take a while)...")
204
+ result = pipe(
205
+ image=resized_image,
206
+ prompt=prompt,
207
+ negative_prompt=negative_prompt,
208
+ height=resized_image.height,
209
+ width=resized_image.width,
210
+ num_frames=num_frames,
211
+ guidance_scale=float(guidance_scale),
212
+ guidance_scale_2=float(guidance_scale_2),
213
+ num_inference_steps=int(steps),
214
+ generator=gen,
215
+ )
216
+
217
+ # result.frames shape depends on implementation; we expect list-like of frames
218
+ frames_list = None
219
+ try:
220
+ frames_list = result.frames[0]
221
+ except Exception:
222
+ # fallback: if result itself is a list or has frames attribute differently
223
+ if hasattr(result, "frames"):
224
+ frames_list = result.frames
225
+ else:
226
+ frames_list = result # last resort
227
+
228
+ if frames_list is None:
229
+ raise RuntimeError("Pipeline returned no frames.")
230
+
231
+ pbar(70, desc="Encoding frames to MP4...")
232
+
233
+ # Save to temp file
234
+ with tempfile.NamedTemporaryFile(suffix=".mp4", delete=False) as tmpfile:
235
+ video_path = tmpfile.name
236
+
237
+ export_to_video(frames_list, video_path, fps=FIXED_FPS)
238
+
239
+ pbar(95, desc="Finalizing and cleaning memory...")
240
+ # cleanup
241
+ try:
242
+ torch.cuda.synchronize()
243
+ except Exception:
244
+ pass
245
+ torch.cuda.empty_cache()
246
+ gc.collect()
247
+
248
+ pbar(100, desc="Done!")
249
+
250
+ # Return path for gr.Video and the seed used (seed as string)
251
+ return video_path, str(current_seed)
252
+
253
+ except gr.Error as ge:
254
+ # expected user-facing error
255
+ return None, f"Input error: {ge}"
256
+
257
+ except Exception as e:
258
+ # log full traceback server-side
259
+ traceback_str = traceback.format_exc()
260
+ print("Error during generation:\n", traceback_str)
261
+ # return error message to UI (do not leak sensitive internals)
262
+ return None, f"Generation failed: {e}"
263
+
264
 
265
  # ------------------------
266
+ # Gradio UI
267
  # ------------------------
268
  with gr.Blocks(theme=gr.themes.Soft(primary_hue="violet")) as demo:
269
  gr.HTML("""
270
  <div style="text-align:center; padding:20px;">
271
+ <h1 style="font-size: 1.6em;">Dream Wan2.2 Video Generator (wan2-2-faster-Pro)</h1>
272
+ <p style="opacity:0.8;">Model: {}</p>
273
  </div>
274
+ """.format(MODEL_ID))
275
 
276
  with gr.Row():
277
  with gr.Column(scale=1):
278
  input_image = gr.Image(label="🎞️ Input Image", type="pil")
279
  prompt = gr.Textbox(label="✨ Positive Prompt", value=default_prompt_i2v, lines=3)
280
  negative_prompt = gr.Textbox(label="🚫 Negative Prompt", value=default_negative_prompt, lines=3)
281
+ duration = gr.Slider(MIN_DURATION, MAX_DURATION, value=3.5, step=0.1, label="Duration (seconds)")
282
  with gr.Accordion("⚙️ Advanced Settings", open=False):
283
  steps = gr.Slider(1, 30, value=6, step=1, label="Inference Steps")
284
  guidance_scale = gr.Slider(0.0, 10.0, value=1.0, step=0.5, label="Guidance Scale 1")
285
  guidance_scale_2 = gr.Slider(0.0, 10.0, value=1.0, step=0.5, label="Guidance Scale 2")
286
+ seed = gr.Number(value=42, label="Seed", precision=0)
287
+ randomize_seed = gr.Checkbox(label="Randomize Seed", value=False)
288
  generate_btn = gr.Button("🚀 Generate Cinematic Video", variant="primary")
289
 
290
  with gr.Column(scale=1):
291
  video_output = gr.Video(label="🎬 Generated Video Preview", autoplay=True)
292
+ seed_output = gr.Textbox(label="🎲 Seed Used / Status", interactive=False)
293
  download_btn = gr.File(label="⬇️ Download MP4")
294
 
295
+ # Wire up the button: outputs -> (video preview, seed/status)
296
  generate_btn.click(
297
  fn=generate_video,
298
  inputs=[input_image, prompt, steps, negative_prompt, duration,
 
300
  outputs=[video_output, seed_output]
301
  )
302
 
303
+ # Toggle theme script (kept from your original)
304
  gr.HTML("""
305
  <script>
306
  const toggle = document.createElement('button');
 
318
  </script>
319
  """)
320
 
321
+ gr.Markdown("---\nMade with ❤️ using Gradio • Hosted on Spaces")
322
 
323
  if __name__ == "__main__":
324
  demo.queue().launch()