{ "id": "9a6bb580-900f-4a5f-9233-12eb0fe41226", "revision": 0, "last_node_id": 318, "last_link_id": 133, "nodes": [ { "id": 51, "type": "CLIPVisionEncode", "pos": [ 124.79998016357422, 510.9447021484375 ], "size": [ 253.60000610351562, 78 ], "flags": {}, "order": 21, "mode": 0, "inputs": [ { "name": "clip_vision", "type": "CLIP_VISION", "link": 94 }, { "name": "image", "type": "IMAGE", "link": 109 } ], "outputs": [ { "name": "CLIP_VISION_OUTPUT", "type": "CLIP_VISION_OUTPUT", "slot_index": 0, "links": [ 107 ] } ], "properties": { "cnr_id": "comfy-core", "ver": "0.3.36", "Node name for S&R": "CLIPVisionEncode", "widget_ue_connectable": {} }, "widgets_values": [ "none" ] }, { "id": 8, "type": "VAEDecode", "pos": [ 1044.422607421875, 43.79334259033203 ], "size": [ 210, 46 ], "flags": { "collapsed": true }, "order": 32, "mode": 0, "inputs": [ { "name": "samples", "type": "LATENT", "link": 35 }, { "name": "vae", "type": "VAE", "link": 119 } ], "outputs": [ { "name": "IMAGE", "type": "IMAGE", "slot_index": 0, "links": [ 112 ] } ], "properties": { "cnr_id": "comfy-core", "ver": "0.3.36", "Node name for S&R": "VAEDecode", "widget_ue_connectable": {} }, "widgets_values": [] }, { "id": 54, "type": "ModelSamplingSD3", "pos": [ 602.1815185546875, 376.4954833984375 ], "size": [ 219.82144165039062, 59.586307525634766 ], "flags": {}, "order": 30, "mode": 0, "inputs": [ { "name": "model", "type": "MODEL", "link": 115 } ], "outputs": [ { "name": "MODEL", "type": "MODEL", "slot_index": 0, "links": [ 111 ] } ], "properties": { "cnr_id": "comfy-core", "ver": "0.3.36", "Node name for S&R": "ModelSamplingSD3", "widget_ue_connectable": {} }, "widgets_values": [ 1.0000000000000002 ] }, { "id": 111, "type": "ModelPatchTorchSettings", "pos": [ 511.2032470703125, -189.2046661376953 ], "size": [ 253.60000610351562, 58 ], "flags": {}, "order": 29, "mode": 0, "inputs": [ { "name": "model", "type": "MODEL", "link": 114 } ], "outputs": [ { "name": "MODEL", "type": "MODEL", "links": [ 115 ] } ], "properties": { "cnr_id": "comfyui-kjnodes", "ver": "5dcda71011870278c35d92ff77a677ed2e538f2d", "Node name for S&R": "ModelPatchTorchSettings", "widget_ue_connectable": {} }, "widgets_values": [ true ], "color": "#2a363b", "bgcolor": "#3f5159" }, { "id": 49, "type": "CLIPVisionLoader", "pos": [ 120.06415557861328, -152.35841369628906 ], "size": [ 315, 58 ], "flags": {}, "order": 0, "mode": 0, "inputs": [], "outputs": [ { "name": "CLIP_VISION", "type": "CLIP_VISION", "slot_index": 0, "links": [ 94 ] } ], "properties": { "cnr_id": "comfy-core", "ver": "0.3.36", "Node name for S&R": "CLIPVisionLoader", "models": [ { "name": "clip_vision_h.safetensors", "url": "https://huggingface.co/Comfy-Org/Wan_2.1_ComfyUI_repackaged/resolve/main/split_files/clip_vision/clip_vision_h.safetensors?download=true", "directory": "clip_vision" } ], "widget_ue_connectable": {} }, "widgets_values": [ "clip_vision_h.safetensors" ], "color": "#223", "bgcolor": "#335" }, { "id": 108, "type": "PathchSageAttentionKJ", "pos": [ 510.80859375, -310.6372985839844 ], "size": [ 253.63636779785156, 58 ], "flags": {}, "order": 28, "mode": 0, "inputs": [ { "name": "model", "type": "MODEL", "link": 129 } ], "outputs": [ { "name": "MODEL", "type": "MODEL", "links": [ 114 ] } ], "properties": { "cnr_id": "comfyui-kjnodes", "ver": "5dcda71011870278c35d92ff77a677ed2e538f2d", "Node name for S&R": "PathchSageAttentionKJ", "widget_ue_connectable": {} }, "widgets_values": [ "auto" ], "color": "#2a363b", "bgcolor": "#3f5159" }, { "id": 297, "type": "UnetLoaderGGUFDisTorchMultiGPU", "pos": [ -357.08233642578125, -1244.5355224609375 ], "size": [ 413.7545166015625, 156.38363647460938 ], "flags": {}, "order": 1, "mode": 4, "inputs": [], "outputs": [ { "name": "MODEL", "type": "MODEL", "links": [] } ], "properties": { "cnr_id": "comfyui-multigpu", "ver": "1.7.3", "Node name for S&R": "UnetLoaderGGUFDisTorchMultiGPU", "widget_ue_connectable": {} }, "widgets_values": [ "wanvace\\Wan2.1_T2V_14B_FusionX_VACE-Q3_K_S.gguf", "cuda:0", 0.1, false, "" ], "color": "#322", "bgcolor": "#533" }, { "id": 299, "type": "MarkdownNote", "pos": [ -374.5340576171875, -1029.4305419921875 ], "size": [ 429.1710205078125, 183.4434356689453 ], "flags": {}, "order": 2, "mode": 0, "inputs": [], "outputs": [], "title": "speed boost", "properties": { "widget_ue_connectable": {} }, "widgets_values": [ "## Just un-bypass the above node, and connect the output to where the current Unet Loader (GGUF) is connected then bypass that one." ], "color": "#432", "bgcolor": "#653" }, { "id": 298, "type": "MarkdownNote", "pos": [ 96.43013000488281, -1262.2916259765625 ], "size": [ 1014.274658203125, 420.11279296875 ], "flags": {}, "order": 3, "mode": 0, "inputs": [], "outputs": [], "title": "Low vram", "properties": { "widget_ue_connectable": {} }, "widgets_values": [ "## ⬅️ Increase `virtual_vram_gb` to Simulate More VRAM\n\nUse this node instead of the standard **Unet Loader (GGUF)** if you're getting **out-of-memory (OOM)** errors.\n\n> ⚠️ **Note:** Higher values = more memory offloading = slower performance.\n\n---\n\n## ✅ Recommended Settings for 12GB and lower GPU (If you OOM)\n\n| **Field** | **Recommended Value** | **Description** |\n|---------------------------|------------------------------|---------------------------------------------------------------------------------|\n| `device` | `cuda:0` | Keep this set to use the primary GPU. |\n| `virtual_vram_gb` | `2.0` to `4.0` | Adds \"virtual\" VRAM by offloading to CPU. Start with 2.0 and increase as needed.|\n| `use_other_vram` | `true` | Enables fallback to CPU or additional GPU memory if available. |\n| `expert_mode_allocations`| *(leave blank unless advanced)* | Use only if you want manual control over layer/device allocation. |\n\n---\n" ], "color": "#322", "bgcolor": "#533" }, { "id": 301, "type": "MarkdownNote", "pos": [ 67.71233367919922, -767.65771484375 ], "size": [ 1097.5220947265625, 125.47974395751953 ], "flags": {}, "order": 4, "mode": 0, "inputs": [], "outputs": [], "title": "FusionX Lora", "properties": { "widget_ue_connectable": {} }, "widgets_values": [ "## 📦 Main Model should NOT be a FusionX main model and instead the normal base Wan2.1 14B image to video model since your using the FusionX LoRa here.\n\n\n\n\n\n\n" ], "color": "#432", "bgcolor": "#653" }, { "id": 304, "type": "MarkdownNote", "pos": [ 1349.3433837890625, 846.6979370117188 ], "size": [ 586.1148681640625, 92.16458892822266 ], "flags": {}, "order": 5, "mode": 0, "inputs": [], "outputs": [], "title": "FusionX Lora", "properties": { "widget_ue_connectable": {} }, "widgets_values": [ "