Commit
·
10919c8
1
Parent(s):
ffb1bd6
reload model
Browse files
app.py
CHANGED
|
@@ -354,14 +354,15 @@ def ui(GPU_memory_mode, scheduler_dict, config_path, compile_dit, weight_dtype):
|
|
| 354 |
|
| 355 |
with gr.Column(variant="panel"):
|
| 356 |
# Hide model selection
|
| 357 |
-
|
|
|
|
| 358 |
|
| 359 |
# Use snapshot download for the VideoCoF repo to get all weights (including safetensors)
|
| 360 |
try:
|
| 361 |
from huggingface_hub import snapshot_download, hf_hub_download
|
| 362 |
print("Downloading Wan2.1-T2V-14B weights...")
|
| 363 |
-
|
| 364 |
-
snapshot_download(repo_id=
|
| 365 |
|
| 366 |
os.makedirs("models/Personalized_Model", exist_ok=True)
|
| 367 |
|
|
@@ -385,7 +386,7 @@ def ui(GPU_memory_mode, scheduler_dict, config_path, compile_dit, weight_dtype):
|
|
| 385 |
|
| 386 |
# Preload heavy weights and LoRAs before launching the UI to avoid first-run latency.
|
| 387 |
acc_lora_path = os.path.join("models", "Personalized_Model", "Wan2.1_Text_to_Video_14B_FusionX_LoRA.safetensors")
|
| 388 |
-
preload_models(controller,
|
| 389 |
|
| 390 |
with gr.Column(variant="panel"):
|
| 391 |
prompt_textbox, negative_prompt_textbox = create_prompts(prompt="Remove the young man with short black hair wearing black shirt on the left.")
|
|
@@ -498,7 +499,8 @@ def ui(GPU_memory_mode, scheduler_dict, config_path, compile_dit, weight_dtype):
|
|
| 498 |
if __name__ == "__main__":
|
| 499 |
from videox_fun.ui.controller import flow_scheduler_dict
|
| 500 |
|
| 501 |
-
|
|
|
|
| 502 |
compile_dit = False
|
| 503 |
weight_dtype = torch.bfloat16
|
| 504 |
server_name = "0.0.0.0"
|
|
|
|
| 354 |
|
| 355 |
with gr.Column(variant="panel"):
|
| 356 |
# Hide model selection
|
| 357 |
+
local_model_dir = os.path.join("models", "Wan2.1-T2V-14B")
|
| 358 |
+
diffusion_transformer_dropdown, _ = create_model_checkpoints(controller, visible=False, default_model=local_model_dir)
|
| 359 |
|
| 360 |
# Use snapshot download for the VideoCoF repo to get all weights (including safetensors)
|
| 361 |
try:
|
| 362 |
from huggingface_hub import snapshot_download, hf_hub_download
|
| 363 |
print("Downloading Wan2.1-T2V-14B weights...")
|
| 364 |
+
hf_model_id = "Wan-AI/Wan2.1-T2V-14B"
|
| 365 |
+
snapshot_download(repo_id=hf_model_id, local_dir=local_model_dir, local_dir_use_symlinks=False)
|
| 366 |
|
| 367 |
os.makedirs("models/Personalized_Model", exist_ok=True)
|
| 368 |
|
|
|
|
| 386 |
|
| 387 |
# Preload heavy weights and LoRAs before launching the UI to avoid first-run latency.
|
| 388 |
acc_lora_path = os.path.join("models", "Personalized_Model", "Wan2.1_Text_to_Video_14B_FusionX_LoRA.safetensors")
|
| 389 |
+
preload_models(controller, local_model_dir, "videocof.safetensors", acc_lora_path)
|
| 390 |
|
| 391 |
with gr.Column(variant="panel"):
|
| 392 |
prompt_textbox, negative_prompt_textbox = create_prompts(prompt="Remove the young man with short black hair wearing black shirt on the left.")
|
|
|
|
| 499 |
if __name__ == "__main__":
|
| 500 |
from videox_fun.ui.controller import flow_scheduler_dict
|
| 501 |
|
| 502 |
+
# Keep everything on GPU to avoid offload hooks that break ZeroGPU pickling
|
| 503 |
+
GPU_memory_mode = "model_full_load_and_qfloat8"
|
| 504 |
compile_dit = False
|
| 505 |
weight_dtype = torch.bfloat16
|
| 506 |
server_name = "0.0.0.0"
|