dream2589632147 commited on
Commit
88079b8
·
verified ·
1 Parent(s): 97608ef

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +27 -28
app.py CHANGED
@@ -36,24 +36,33 @@ MAX_DURATION = round(MAX_FRAMES_MODEL / FIXED_FPS, 1)
36
  # LOAD PIPELINE
37
  # =========================================================
38
  print("Loading pipeline...")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
39
  pipe = WanImageToVideoPipeline.from_pretrained(
40
  MODEL_ID,
41
- transformer=WanTransformer3DModel.from_pretrained(
42
- MODEL_ID,
43
- subfolder="transformer",
44
- torch_dtype=torch.bfloat16,
45
- device_map="cuda",
46
- token=HF_TOKEN
47
- ),
48
- transformer_2=WanTransformer3DModel.from_pretrained(
49
- MODEL_ID,
50
- subfolder="transformer_2",
51
- torch_dtype=torch.bfloat16,
52
- device_map="cuda",
53
- token=HF_TOKEN
54
- ),
55
  torch_dtype=torch.bfloat16,
56
- ).to("cuda")
 
 
 
 
57
 
58
  # =========================================================
59
  # LOAD LORA ADAPTERS
@@ -80,6 +89,7 @@ pipe.unload_lora_weights()
80
  # QUANTIZATION & AOT OPTIMIZATION
81
  # =========================================================
82
  print("Applying quantization...")
 
83
  quantize_(pipe.text_encoder, Int8WeightOnlyConfig())
84
  quantize_(pipe.transformer, Float8DynamicActivationFloat8WeightConfig())
85
  quantize_(pipe.transformer_2, Float8DynamicActivationFloat8WeightConfig())
@@ -275,19 +285,8 @@ with gr.Blocks(theme=gr.themes.Soft()) as demo:
275
 
276
  generate_button.click(fn=generate_video, inputs=ui_inputs, outputs=[video_output, seed_input])
277
 
278
- gr.Examples(
279
- examples=[
280
- [
281
- "wan_i2v_input.JPG",
282
- "POV selfie video, white cat with sunglasses standing on surfboard, relaxed smile, tropical beach behind (clear water, green hills, blue sky with clouds). Surfboard tips, cat falls into ocean, camera plunges underwater with bubbles and sunlight beams. Brief underwater view of cat’s face, then cat resurfaces, still filming selfie, playful summer vacation mood.",
283
- 4,
284
- ],
285
- ],
286
- inputs=[input_image_component, prompt_input, steps_slider],
287
- outputs=[video_output, seed_input],
288
- fn=generate_video,
289
- cache_examples=False
290
- )
291
 
292
  # --- BOTTOM ADVERTISEMENT BANNER ---
293
  gr.HTML("""
 
36
  # LOAD PIPELINE
37
  # =========================================================
38
  print("Loading pipeline...")
39
+
40
+ # 1. تحميل المكونات بدون تحديد device_map="cuda" لتجنب تعارض ZeroGPU
41
+ transformer = WanTransformer3DModel.from_pretrained(
42
+ MODEL_ID,
43
+ subfolder="transformer",
44
+ torch_dtype=torch.bfloat16,
45
+ token=HF_TOKEN
46
+ )
47
+
48
+ transformer_2 = WanTransformer3DModel.from_pretrained(
49
+ MODEL_ID,
50
+ subfolder="transformer_2",
51
+ torch_dtype=torch.bfloat16,
52
+ token=HF_TOKEN
53
+ )
54
+
55
+ # 2. تجميع البايبلاين
56
  pipe = WanImageToVideoPipeline.from_pretrained(
57
  MODEL_ID,
58
+ transformer=transformer,
59
+ transformer_2=transformer_2,
 
 
 
 
 
 
 
 
 
 
 
 
60
  torch_dtype=torch.bfloat16,
61
+ )
62
+
63
+ # 3. نقل الموديل للـ CUDA مرة واحدة هنا
64
+ print("Moving pipeline to CUDA...")
65
+ pipe = pipe.to("cuda")
66
 
67
  # =========================================================
68
  # LOAD LORA ADAPTERS
 
89
  # QUANTIZATION & AOT OPTIMIZATION
90
  # =========================================================
91
  print("Applying quantization...")
92
+ # نقلنا التكميم بعد النقل للـ GPU لضمان التوافق
93
  quantize_(pipe.text_encoder, Int8WeightOnlyConfig())
94
  quantize_(pipe.transformer, Float8DynamicActivationFloat8WeightConfig())
95
  quantize_(pipe.transformer_2, Float8DynamicActivationFloat8WeightConfig())
 
285
 
286
  generate_button.click(fn=generate_video, inputs=ui_inputs, outputs=[video_output, seed_input])
287
 
288
+ # ملاحظة: يمكنك حذف جزء Examples إذا استمرت الأخطاء أو إذا لم تكن الصورة مرفوعة
289
+ # gr.Examples(...)
 
 
 
 
 
 
 
 
 
 
 
290
 
291
  # --- BOTTOM ADVERTISEMENT BANNER ---
292
  gr.HTML("""