Commit
·
bfbf793
0
Parent(s):
Initial commit
Browse files- .gitattributes +35 -0
- Final-000001.safetensors +3 -0
- Final-step00001000.safetensors +3 -0
- Final-step00002000.safetensors +3 -0
- Final-step00003000.safetensors +3 -0
- Final-step00004000.safetensors +3 -0
- Final-step00005000.safetensors +3 -0
- Final-step00006000.safetensors +3 -0
- Final-step00007000.safetensors +3 -0
- NoobAI Flux2VAE RF v0.1 Aesthetic Tune.safetensors +3 -0
- NoobAI Flux2VAE RF v0.1.safetensors +3 -0
- README.md +263 -0
- SDXL-FLUX2VAE-RF-Basic.json +488 -0
.gitattributes
ADDED
|
@@ -0,0 +1,35 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
*.7z filter=lfs diff=lfs merge=lfs -text
|
| 2 |
+
*.arrow filter=lfs diff=lfs merge=lfs -text
|
| 3 |
+
*.bin filter=lfs diff=lfs merge=lfs -text
|
| 4 |
+
*.bz2 filter=lfs diff=lfs merge=lfs -text
|
| 5 |
+
*.ckpt filter=lfs diff=lfs merge=lfs -text
|
| 6 |
+
*.ftz filter=lfs diff=lfs merge=lfs -text
|
| 7 |
+
*.gz filter=lfs diff=lfs merge=lfs -text
|
| 8 |
+
*.h5 filter=lfs diff=lfs merge=lfs -text
|
| 9 |
+
*.joblib filter=lfs diff=lfs merge=lfs -text
|
| 10 |
+
*.lfs.* filter=lfs diff=lfs merge=lfs -text
|
| 11 |
+
*.mlmodel filter=lfs diff=lfs merge=lfs -text
|
| 12 |
+
*.model filter=lfs diff=lfs merge=lfs -text
|
| 13 |
+
*.msgpack filter=lfs diff=lfs merge=lfs -text
|
| 14 |
+
*.npy filter=lfs diff=lfs merge=lfs -text
|
| 15 |
+
*.npz filter=lfs diff=lfs merge=lfs -text
|
| 16 |
+
*.onnx filter=lfs diff=lfs merge=lfs -text
|
| 17 |
+
*.ot filter=lfs diff=lfs merge=lfs -text
|
| 18 |
+
*.parquet filter=lfs diff=lfs merge=lfs -text
|
| 19 |
+
*.pb filter=lfs diff=lfs merge=lfs -text
|
| 20 |
+
*.pickle filter=lfs diff=lfs merge=lfs -text
|
| 21 |
+
*.pkl filter=lfs diff=lfs merge=lfs -text
|
| 22 |
+
*.pt filter=lfs diff=lfs merge=lfs -text
|
| 23 |
+
*.pth filter=lfs diff=lfs merge=lfs -text
|
| 24 |
+
*.rar filter=lfs diff=lfs merge=lfs -text
|
| 25 |
+
*.safetensors filter=lfs diff=lfs merge=lfs -text
|
| 26 |
+
saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
| 27 |
+
*.tar.* filter=lfs diff=lfs merge=lfs -text
|
| 28 |
+
*.tar filter=lfs diff=lfs merge=lfs -text
|
| 29 |
+
*.tflite filter=lfs diff=lfs merge=lfs -text
|
| 30 |
+
*.tgz filter=lfs diff=lfs merge=lfs -text
|
| 31 |
+
*.wasm filter=lfs diff=lfs merge=lfs -text
|
| 32 |
+
*.xz filter=lfs diff=lfs merge=lfs -text
|
| 33 |
+
*.zip filter=lfs diff=lfs merge=lfs -text
|
| 34 |
+
*.zst filter=lfs diff=lfs merge=lfs -text
|
| 35 |
+
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
Final-000001.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:9c052641c9348e7225b671c2acc6942c65694ac76c61cd431444bd35657bcdf1
|
| 3 |
+
size 6939151202
|
Final-step00001000.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:b3bc2ee696c83dc63d18ed7478ac510ba7db1c91115dafaaba44968d7963adb4
|
| 3 |
+
size 6939151202
|
Final-step00002000.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:f9b26c623ffbde28ab9c356478ffe4d0c64a76228d55b635646358d73f01db3e
|
| 3 |
+
size 6939151202
|
Final-step00003000.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:cdcefe02e943fb7ce032e34c39f6d19f9a81a97040f5b09e0ffb5f690758cb72
|
| 3 |
+
size 6939151202
|
Final-step00004000.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:c052f252d7e262f4ea8319c14614afec1f6c382b8cdad7eb6460b49612adef8d
|
| 3 |
+
size 6939151202
|
Final-step00005000.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:b22fd3b07a1a97ea8ea04e282067894d65e4bdc48cc8bfc4ad43e56bfde32238
|
| 3 |
+
size 6939151202
|
Final-step00006000.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:f0d26e35763c993563b33fa35a44bd81b934eae9c993326c0a12ac6ee160da5b
|
| 3 |
+
size 6939151202
|
Final-step00007000.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:9fcfc1fe8ea4eec9474c9029e059ecb78ac8de03090c2466ad746c6b38515877
|
| 3 |
+
size 6939151202
|
NoobAI Flux2VAE RF v0.1 Aesthetic Tune.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:288303e36a36c8d930fc941346c82e6658d189cfaeae791bf9c78431c5e5fbaf
|
| 3 |
+
size 6939151210
|
NoobAI Flux2VAE RF v0.1.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:309b8934c432b719010d892893a9b765e47148a23060faac845c5c5f2964baaf
|
| 3 |
+
size 6939151202
|
README.md
ADDED
|
@@ -0,0 +1,263 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
---
|
| 2 |
+
license: other
|
| 3 |
+
license_name: fair-ai-public-license-1.0-sd
|
| 4 |
+
license_link: https://freedevproject.org/faipl-1.0-sd/
|
| 5 |
+
base_model:
|
| 6 |
+
- CabalResearch/NoobAI-RectifiedFlow-Experimental
|
| 7 |
+
library_name: diffusers
|
| 8 |
+
---
|
| 9 |
+
## Model Details
|
| 10 |
+
|
| 11 |
+
Experimental Conversion of our [NoobAI-RF](https://huggingface.co/CabalResearch/NoobAI-RectifiedFlow-Experimental) model to Flux2 VAE.
|
| 12 |
+
|
| 13 |
+
<u>We have observed the model's ability to adapt to the Flux2 VAE, and current trends suggest that significant improvements are possible with bigger training, which potentially would allow it to compete with bigger models.
|
| 14 |
+
By supporting us you could make it a reality.</u>
|
| 15 |
+
|
| 16 |
+
More info on supporting us: [click me](https://huggingface.co/CabalResearch/NoobAI-Flux2VAE-RectifiedFlow#potential-future)
|
| 17 |
+
|
| 18 |
+
### Model Description
|
| 19 |
+
|
| 20 |
+
This is a native training of SDXL Unet in combination with Flux2 VAE. Essentially we've adapted previously 4 channel model to work with 32 complex channels of Flux 2. No adapters or tricks, fully native.
|
| 21 |
+
Danbooru dataset of NoobAI has been utilized for this.
|
| 22 |
+
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+

|
| 26 |
+
|
| 27 |
+

|
| 28 |
+
|
| 29 |
+
Due to limited compute we were not able to fully converge it, expect output on the level of very early anime models. We hope community will find this interesting enough to support us.
|
| 30 |
+
We observe steady convergence throughout whole training process, and believe that further training will result in a new standard for fast local anime generation.
|
| 31 |
+
|
| 32 |
+
Please take this model a proof of concept, not as a final product.
|
| 33 |
+
|
| 34 |
+
We have used Rectified Flow for training, with staged approach for adaptation of Flux2 VAE.
|
| 35 |
+
Most of the knowledge seem to be preserved, but is significantly weakened due to completely new latent space.
|
| 36 |
+
|
| 37 |
+
|
| 38 |
+
- **Developed by:** Cabal Research (Bluvoll, Anzhc)
|
| 39 |
+
- **Funded by:** Community, Bluvoll
|
| 40 |
+
- **License:** [fair-ai-public-license-1.0-sd](https://freedevproject.org/faipl-1.0-sd/)
|
| 41 |
+
- **Finetuned from model:** [NoobAI-RF](https://huggingface.co/CabalResearch/NoobAI-RectifiedFlow-Experimental)
|
| 42 |
+
|
| 43 |
+
|
| 44 |
+
## Bias and Limitations
|
| 45 |
+
|
| 46 |
+
Once again, we are limited in budget for this fundamental task. We have adapted enough to have it output somewhat acceptable images (Closer to a theoretical NoobAI 0.1's knowledge using Flux 2 VAE), but further progress would require large compute, as we are in territory where model is simply seeing the new level of details for the first time(as well as old level of details in a new way), and it is hard.
|
| 47 |
+
|
| 48 |
+
Most biases of official dataset will apply(Blue Archive, etc.).
|
| 49 |
+
|
| 50 |
+
Expect noise, fuzzy details, low performance in landscape aspect ratio, bad hands and generally issues with composition as a whole.
|
| 51 |
+
|
| 52 |
+
|
| 53 |
+
## Model Output Examples
|
| 54 |
+
|
| 55 |
+
One of the benefits we have achieved is color:
|
| 56 |
+
|
| 57 |
+

|
| 58 |
+
Due to being native flow model, it achieves strong colors, while not making them acidic, or otherwise unstable.
|
| 59 |
+
|
| 60 |
+
Generally, as already stated, expect at least some grain and fuzzyness in all gens, as we have not converged to the juicy details yet.
|
| 61 |
+

|
| 62 |
+
|
| 63 |
+

|
| 64 |
+
|
| 65 |
+

|
| 66 |
+
|
| 67 |
+

|
| 68 |
+
|
| 69 |
+

|
| 70 |
+
|
| 71 |
+

|
| 72 |
+
|
| 73 |
+

|
| 74 |
+
|
| 75 |
+
Area it is currently relatively nice in is scenery:
|
| 76 |
+
|
| 77 |
+

|
| 78 |
+
|
| 79 |
+

|
| 80 |
+
|
| 81 |
+

|
| 82 |
+
|
| 83 |
+

|
| 84 |
+
|
| 85 |
+

|
| 86 |
+
|
| 87 |
+

|
| 88 |
+
|
| 89 |
+

|
| 90 |
+
|
| 91 |
+

|
| 92 |
+
|
| 93 |
+
We also provide Aesthetic Tune, that improves details in general:
|
| 94 |
+
|
| 95 |
+

|
| 96 |
+
|
| 97 |
+

|
| 98 |
+
|
| 99 |
+
# Recommendations
|
| 100 |
+
|
| 101 |
+
### Inference
|
| 102 |
+
|
| 103 |
+
#### Comfy
|
| 104 |
+
|
| 105 |
+

|
| 106 |
+
|
| 107 |
+
(Workflow is available alongside model in repo)
|
| 108 |
+
We will provide a temporary ComfyUI fork, and hope it will be adapted in main repo:
|
| 109 |
+
**https://github.com/Anzhc/ComfyUI-sdxl-flux2vae-support**
|
| 110 |
+
|
| 111 |
+
Same as your normal inference, but with addition of SD3 sampling node, as this model is Flow-based.
|
| 112 |
+
|
| 113 |
+
Recommended Parameters:
|
| 114 |
+
**Sampler**: Euler, Euler A, DPM++ SDE, etc.
|
| 115 |
+
**Steps**: 20-28
|
| 116 |
+
**CFG**: 6-9
|
| 117 |
+
**Schedule**: Normal/Simple/SGM Uniform/Quadratic
|
| 118 |
+
**Positive Quality Tags**: `masterpiece, best quality`
|
| 119 |
+
**Negative Tags**: `worst quality, normal quality, bad anatomy`
|
| 120 |
+
|
| 121 |
+
|
| 122 |
+
#### A1111 WebUI
|
| 123 |
+
|
| 124 |
+
(All screenshots are repeating our RF release, as there is no difference in setup)
|
| 125 |
+
|
| 126 |
+
|
| 127 |
+
Recommended WebUI: [ReForge](https://github.com/Panchovix/stable-diffusion-webui-reForge) - has native support for Flow models, and we've PR'd our native support for Flux2vae-based SDXL modification.
|
| 128 |
+
|
| 129 |
+
|
| 130 |
+
**How to use in ReForge**:
|
| 131 |
+
|
| 132 |
+

|
| 133 |
+
(ignore Sigma max field at the top, this is not used in RF)
|
| 134 |
+
|
| 135 |
+
Support for RF in ReForge is being implemented through a built-in extension:
|
| 136 |
+
|
| 137 |
+

|
| 138 |
+
|
| 139 |
+
Set parameters to that, and you're good to go.
|
| 140 |
+
|
| 141 |
+
Flux2VAE does not currently have an appropriate high quality preview method, please use Approx Cheap option, which would allow you to see simple PCA projection(ReForge).
|
| 142 |
+
|
| 143 |
+
Recommended Parameters:
|
| 144 |
+
**Sampler**: Euler A Comfy RF, Euler, DPM++ SDE Comfy, etc. **ALL VARIANTS MUST BE RF OR COMFY, IF AVAILABLE. In ComfyUI routing is automatic, but not in the case of WebUI.**
|
| 145 |
+
**Steps**: 20-28
|
| 146 |
+
**CFG**: 6-9
|
| 147 |
+
**Schedule**: Normal/Simple/SGM Uniform
|
| 148 |
+
**Positive Quality Tags**: `masterpiece, best quality`
|
| 149 |
+
**Negative Tags**: `worst quality, normal quality, bad anatomy`
|
| 150 |
+
|
| 151 |
+
**ADETAILER FIX FOR RF**:
|
| 152 |
+
By default, Adetailer discards Advanced Model Sampling extension, which breaks RF. You need to add AMS to this part of settings:
|
| 153 |
+
|
| 154 |
+

|
| 155 |
+
|
| 156 |
+
Add: `advanced_model_sampling_script,advanced_model_sampling_script_backported` to there.
|
| 157 |
+
|
| 158 |
+
If that does not work, go into adetailer extension, find args.py, open it, replace _builtin_scripts like this:
|
| 159 |
+
|
| 160 |
+

|
| 161 |
+
|
| 162 |
+
Here is a copypaste for easy copy:
|
| 163 |
+
```
|
| 164 |
+
_builtin_script = (
|
| 165 |
+
"advanced_model_sampling_script",
|
| 166 |
+
"advanced_model_sampling_script_backported",
|
| 167 |
+
"hypertile_script",
|
| 168 |
+
"soft_inpainting",
|
| 169 |
+
)
|
| 170 |
+
```
|
| 171 |
+
|
| 172 |
+
Or use my fork of Adetailer - https://github.com/Anzhc/aadetailer-reforge
|
| 173 |
+
|
| 174 |
+
## Training
|
| 175 |
+
|
| 176 |
+
### Model Composition
|
| 177 |
+
(Relative to base it's trained from)
|
| 178 |
+
|
| 179 |
+
Unet: Same
|
| 180 |
+
CLIP L: Same, Frozen
|
| 181 |
+
CLIP G: Same, Frozen
|
| 182 |
+
VAE: [Flux2 VAE](https://huggingface.co/black-forest-labs/FLUX.2-dev/tree/main/vae)
|
| 183 |
+
|
| 184 |
+
|
| 185 |
+
### Training Details
|
| 186 |
+
(Main Stage Training)
|
| 187 |
+
|
| 188 |
+
**Samples seen**(unbatched steps): ~18.5 million samples seen
|
| 189 |
+
**Learning Rate**: 5e-5
|
| 190 |
+
**Effective Batch size**: 1472 (92 Batch Size * 2 Accumulation * 8 GPUs)
|
| 191 |
+
**Precision**: Full BF16
|
| 192 |
+
**Optimizer**: AdamW8bit with Kahan Summation
|
| 193 |
+
**Weight Decay**: 0.01
|
| 194 |
+
**Schedule**: Constant with warmup
|
| 195 |
+
**Timestep Sampling Strategy**: Logit-Normal -0.2 1.5 (sometimes referred to as Lognorm), Shift 2.5
|
| 196 |
+
**Text Encoders**: Frozen
|
| 197 |
+
**Keep Token**: False
|
| 198 |
+
**Tag Dropout**: 10%
|
| 199 |
+
**Uncond Dropout**: 10%
|
| 200 |
+
**Shuffle**: True
|
| 201 |
+
|
| 202 |
+
**VAE Conv Padding**: False
|
| 203 |
+
**VAE Shift**: 0.0760
|
| 204 |
+
**VAE Scale**: 0.6043
|
| 205 |
+
|
| 206 |
+
**Additional Features used**: Protected Tags, Cosine Optimal Transport.
|
| 207 |
+
|
| 208 |
+
#### Training Data
|
| 209 |
+
|
| 210 |
+
2 epochs of the original NoobAI dataset, including images up to October 2024, minus screencap data(was not shared).
|
| 211 |
+
|
| 212 |
+
|
| 213 |
+
### LoRA Training
|
| 214 |
+
|
| 215 |
+
Current stage is trainable, but it is hard to achieve accurate reproduction if subject/content is dependent on small details, as base model did not converge to them yet.
|
| 216 |
+
My current style training settings (Anzhc):
|
| 217 |
+
|
| 218 |
+
**Learning Rate**: tested up to **7.5e-4**
|
| 219 |
+
**Batch Size**: 144 (6 real * 24 accum), using SGA(Stochastic Gradient Accumulation) - without SGA I probably would lower accum to 4-8.
|
| 220 |
+
**Optimizer**: Adamw8bit with Kahan summation
|
| 221 |
+
**Schedule**: ReREX (Use REX for simplicity, or Cosine annealing)
|
| 222 |
+
**Precision**: Full BF16
|
| 223 |
+
**Weight Decay**: 0.02
|
| 224 |
+
**Timestep Sampling Strategy**: Logit-Normal(either 0.0 1.0, or -0.2 1.5), Shift 2.5
|
| 225 |
+
|
| 226 |
+
**Dim/Alpha/Conv/Alpha**: 24/24/24/24 (Lycoris/Locon)
|
| 227 |
+
|
| 228 |
+
**Text Encoders**: Frozen
|
| 229 |
+
|
| 230 |
+
**Optimal Transport**: True
|
| 231 |
+
|
| 232 |
+
**Expected Dataset Size**: 100 images (Can be even 10, but balance with repeats to roughly this target.)
|
| 233 |
+
**Epochs**: 50
|
| 234 |
+
|
| 235 |
+
### Hardware
|
| 236 |
+
|
| 237 |
+
Model was trained on cloud 8xH200 node.
|
| 238 |
+
|
| 239 |
+
### Software
|
| 240 |
+
|
| 241 |
+
Custom fork of [SD-Scripts](https://github.com/bluvoll/sd-scripts)(maintained by Bluvoll)
|
| 242 |
+
|
| 243 |
+
## Acknowledgements
|
| 244 |
+
|
| 245 |
+
### Special Thanks
|
| 246 |
+
|
| 247 |
+
**To a special supporter who singlehandidly sponsored whole run and preferred to stay anonymous**
|
| 248 |
+
|
| 249 |
+
---
|
| 250 |
+
|
| 251 |
+
# Support
|
| 252 |
+
If you wish to support our continuous effort of making waifus 0.2% better, you can do it here:
|
| 253 |
+
|
| 254 |
+
**https://ko-fi.com/bluvoll**
|
| 255 |
+
|
| 256 |
+
Crypto link pending.
|
| 257 |
+
|
| 258 |
+
# Potential future
|
| 259 |
+
|
| 260 |
+
**Expected Compute Needed**: We theorize that the model needs at the very least 20 epochs on full data, ideally 35 Epochs, each epoch was about 460 USD with the provider we use, at the very least each time we reach enough donations to train 2 epochs, we'll resume and train more. If we have enough donations we will update the dataset to most recent data.
|
| 261 |
+
Why not do this now? Caching with Flux 2 VAE takes a whooping 15 hours, and +-20TB since each latent is 2MB, which in itself costs 180 USD of compute time.
|
| 262 |
+
|
| 263 |
+
We are working on further improvements to pipeline and components at the moment of release of this model, and have plans to upgrade this arch more.
|
SDXL-FLUX2VAE-RF-Basic.json
ADDED
|
@@ -0,0 +1,488 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"id": "e310ee90-0242-4171-94a9-e2082f9a0e50",
|
| 3 |
+
"revision": 0,
|
| 4 |
+
"last_node_id": 10,
|
| 5 |
+
"last_link_id": 17,
|
| 6 |
+
"nodes": [
|
| 7 |
+
{
|
| 8 |
+
"id": 8,
|
| 9 |
+
"type": "PreviewImage",
|
| 10 |
+
"pos": [
|
| 11 |
+
1200,
|
| 12 |
+
450
|
| 13 |
+
],
|
| 14 |
+
"size": [
|
| 15 |
+
400,
|
| 16 |
+
580
|
| 17 |
+
],
|
| 18 |
+
"flags": {},
|
| 19 |
+
"order": 7,
|
| 20 |
+
"mode": 0,
|
| 21 |
+
"inputs": [
|
| 22 |
+
{
|
| 23 |
+
"name": "images",
|
| 24 |
+
"type": "IMAGE",
|
| 25 |
+
"link": 12
|
| 26 |
+
}
|
| 27 |
+
],
|
| 28 |
+
"outputs": [],
|
| 29 |
+
"properties": {
|
| 30 |
+
"cnr_id": "comfy-core",
|
| 31 |
+
"ver": "0.3.63",
|
| 32 |
+
"Node name for S&R": "PreviewImage",
|
| 33 |
+
"ue_properties": {
|
| 34 |
+
"widget_ue_connectable": {},
|
| 35 |
+
"input_ue_unconnectable": {},
|
| 36 |
+
"version": "7.2.2"
|
| 37 |
+
}
|
| 38 |
+
},
|
| 39 |
+
"widgets_values": [],
|
| 40 |
+
"shape": 1
|
| 41 |
+
},
|
| 42 |
+
{
|
| 43 |
+
"id": 7,
|
| 44 |
+
"type": "VAEDecode",
|
| 45 |
+
"pos": [
|
| 46 |
+
1050,
|
| 47 |
+
450
|
| 48 |
+
],
|
| 49 |
+
"size": [
|
| 50 |
+
140,
|
| 51 |
+
46
|
| 52 |
+
],
|
| 53 |
+
"flags": {
|
| 54 |
+
"collapsed": false
|
| 55 |
+
},
|
| 56 |
+
"order": 6,
|
| 57 |
+
"mode": 0,
|
| 58 |
+
"inputs": [
|
| 59 |
+
{
|
| 60 |
+
"name": "samples",
|
| 61 |
+
"type": "LATENT",
|
| 62 |
+
"link": 10
|
| 63 |
+
},
|
| 64 |
+
{
|
| 65 |
+
"name": "vae",
|
| 66 |
+
"type": "VAE",
|
| 67 |
+
"link": 11
|
| 68 |
+
}
|
| 69 |
+
],
|
| 70 |
+
"outputs": [
|
| 71 |
+
{
|
| 72 |
+
"name": "IMAGE",
|
| 73 |
+
"type": "IMAGE",
|
| 74 |
+
"links": [
|
| 75 |
+
12
|
| 76 |
+
]
|
| 77 |
+
}
|
| 78 |
+
],
|
| 79 |
+
"properties": {
|
| 80 |
+
"cnr_id": "comfy-core",
|
| 81 |
+
"ver": "0.3.63",
|
| 82 |
+
"Node name for S&R": "VAEDecode",
|
| 83 |
+
"ue_properties": {
|
| 84 |
+
"widget_ue_connectable": {},
|
| 85 |
+
"input_ue_unconnectable": {},
|
| 86 |
+
"version": "7.2.2"
|
| 87 |
+
}
|
| 88 |
+
},
|
| 89 |
+
"shape": 1
|
| 90 |
+
},
|
| 91 |
+
{
|
| 92 |
+
"id": 9,
|
| 93 |
+
"type": "CheckpointLoaderSimple",
|
| 94 |
+
"pos": [
|
| 95 |
+
-100,
|
| 96 |
+
930
|
| 97 |
+
],
|
| 98 |
+
"size": [
|
| 99 |
+
380,
|
| 100 |
+
110
|
| 101 |
+
],
|
| 102 |
+
"flags": {},
|
| 103 |
+
"order": 0,
|
| 104 |
+
"mode": 0,
|
| 105 |
+
"inputs": [],
|
| 106 |
+
"outputs": [
|
| 107 |
+
{
|
| 108 |
+
"name": "MODEL",
|
| 109 |
+
"type": "MODEL",
|
| 110 |
+
"links": [
|
| 111 |
+
17
|
| 112 |
+
]
|
| 113 |
+
},
|
| 114 |
+
{
|
| 115 |
+
"name": "CLIP",
|
| 116 |
+
"type": "CLIP",
|
| 117 |
+
"links": [
|
| 118 |
+
4,
|
| 119 |
+
5
|
| 120 |
+
]
|
| 121 |
+
},
|
| 122 |
+
{
|
| 123 |
+
"name": "VAE",
|
| 124 |
+
"type": "VAE",
|
| 125 |
+
"links": [
|
| 126 |
+
11
|
| 127 |
+
]
|
| 128 |
+
}
|
| 129 |
+
],
|
| 130 |
+
"properties": {
|
| 131 |
+
"cnr_id": "comfy-core",
|
| 132 |
+
"ver": "0.3.63",
|
| 133 |
+
"Node name for S&R": "CheckpointLoaderSimple",
|
| 134 |
+
"ue_properties": {
|
| 135 |
+
"widget_ue_connectable": {},
|
| 136 |
+
"input_ue_unconnectable": {},
|
| 137 |
+
"version": "7.2.2"
|
| 138 |
+
}
|
| 139 |
+
},
|
| 140 |
+
"widgets_values": [
|
| 141 |
+
"NoobAI Flux2VAE RF v0.1.safetensors"
|
| 142 |
+
],
|
| 143 |
+
"shape": 1
|
| 144 |
+
},
|
| 145 |
+
{
|
| 146 |
+
"id": 1,
|
| 147 |
+
"type": "EmptyLatentImage",
|
| 148 |
+
"pos": [
|
| 149 |
+
10,
|
| 150 |
+
780
|
| 151 |
+
],
|
| 152 |
+
"size": [
|
| 153 |
+
270,
|
| 154 |
+
110
|
| 155 |
+
],
|
| 156 |
+
"flags": {},
|
| 157 |
+
"order": 1,
|
| 158 |
+
"mode": 0,
|
| 159 |
+
"inputs": [],
|
| 160 |
+
"outputs": [
|
| 161 |
+
{
|
| 162 |
+
"name": "LATENT",
|
| 163 |
+
"type": "LATENT",
|
| 164 |
+
"links": [
|
| 165 |
+
9
|
| 166 |
+
]
|
| 167 |
+
}
|
| 168 |
+
],
|
| 169 |
+
"properties": {
|
| 170 |
+
"cnr_id": "comfy-core",
|
| 171 |
+
"ver": "0.3.63",
|
| 172 |
+
"Node name for S&R": "EmptyLatentImage",
|
| 173 |
+
"ue_properties": {
|
| 174 |
+
"widget_ue_connectable": {},
|
| 175 |
+
"input_ue_unconnectable": {},
|
| 176 |
+
"version": "7.2.2"
|
| 177 |
+
}
|
| 178 |
+
},
|
| 179 |
+
"widgets_values": [
|
| 180 |
+
832,
|
| 181 |
+
1216,
|
| 182 |
+
1
|
| 183 |
+
],
|
| 184 |
+
"shape": 1
|
| 185 |
+
},
|
| 186 |
+
{
|
| 187 |
+
"id": 3,
|
| 188 |
+
"type": "ModelSamplingSD3",
|
| 189 |
+
"pos": [
|
| 190 |
+
60,
|
| 191 |
+
680
|
| 192 |
+
],
|
| 193 |
+
"size": [
|
| 194 |
+
220,
|
| 195 |
+
60
|
| 196 |
+
],
|
| 197 |
+
"flags": {},
|
| 198 |
+
"order": 2,
|
| 199 |
+
"mode": 0,
|
| 200 |
+
"inputs": [
|
| 201 |
+
{
|
| 202 |
+
"name": "model",
|
| 203 |
+
"type": "MODEL",
|
| 204 |
+
"link": 17
|
| 205 |
+
}
|
| 206 |
+
],
|
| 207 |
+
"outputs": [
|
| 208 |
+
{
|
| 209 |
+
"name": "MODEL",
|
| 210 |
+
"type": "MODEL",
|
| 211 |
+
"links": [
|
| 212 |
+
6
|
| 213 |
+
]
|
| 214 |
+
}
|
| 215 |
+
],
|
| 216 |
+
"properties": {
|
| 217 |
+
"cnr_id": "comfy-core",
|
| 218 |
+
"ver": "0.3.63",
|
| 219 |
+
"Node name for S&R": "ModelSamplingSD3",
|
| 220 |
+
"ue_properties": {
|
| 221 |
+
"widget_ue_connectable": {},
|
| 222 |
+
"input_ue_unconnectable": {},
|
| 223 |
+
"version": "7.2.2"
|
| 224 |
+
}
|
| 225 |
+
},
|
| 226 |
+
"widgets_values": [
|
| 227 |
+
3
|
| 228 |
+
],
|
| 229 |
+
"shape": 1
|
| 230 |
+
},
|
| 231 |
+
{
|
| 232 |
+
"id": 5,
|
| 233 |
+
"type": "CLIPTextEncode",
|
| 234 |
+
"pos": [
|
| 235 |
+
290,
|
| 236 |
+
890
|
| 237 |
+
],
|
| 238 |
+
"size": [
|
| 239 |
+
400,
|
| 240 |
+
150
|
| 241 |
+
],
|
| 242 |
+
"flags": {},
|
| 243 |
+
"order": 4,
|
| 244 |
+
"mode": 0,
|
| 245 |
+
"inputs": [
|
| 246 |
+
{
|
| 247 |
+
"name": "clip",
|
| 248 |
+
"type": "CLIP",
|
| 249 |
+
"link": 5
|
| 250 |
+
}
|
| 251 |
+
],
|
| 252 |
+
"outputs": [
|
| 253 |
+
{
|
| 254 |
+
"name": "CONDITIONING",
|
| 255 |
+
"type": "CONDITIONING",
|
| 256 |
+
"links": [
|
| 257 |
+
8
|
| 258 |
+
]
|
| 259 |
+
}
|
| 260 |
+
],
|
| 261 |
+
"properties": {
|
| 262 |
+
"cnr_id": "comfy-core",
|
| 263 |
+
"ver": "0.3.63",
|
| 264 |
+
"Node name for S&R": "CLIPTextEncode",
|
| 265 |
+
"ue_properties": {
|
| 266 |
+
"widget_ue_connectable": {},
|
| 267 |
+
"input_ue_unconnectable": {},
|
| 268 |
+
"version": "7.2.2"
|
| 269 |
+
}
|
| 270 |
+
},
|
| 271 |
+
"widgets_values": [
|
| 272 |
+
"worst quality,low quality,bokeh,blur,blurry,depth of field"
|
| 273 |
+
],
|
| 274 |
+
"shape": 1
|
| 275 |
+
},
|
| 276 |
+
{
|
| 277 |
+
"id": 4,
|
| 278 |
+
"type": "CLIPTextEncode",
|
| 279 |
+
"pos": [
|
| 280 |
+
290,
|
| 281 |
+
650
|
| 282 |
+
],
|
| 283 |
+
"size": [
|
| 284 |
+
400,
|
| 285 |
+
200
|
| 286 |
+
],
|
| 287 |
+
"flags": {},
|
| 288 |
+
"order": 3,
|
| 289 |
+
"mode": 0,
|
| 290 |
+
"inputs": [
|
| 291 |
+
{
|
| 292 |
+
"name": "clip",
|
| 293 |
+
"type": "CLIP",
|
| 294 |
+
"link": 4
|
| 295 |
+
}
|
| 296 |
+
],
|
| 297 |
+
"outputs": [
|
| 298 |
+
{
|
| 299 |
+
"name": "CONDITIONING",
|
| 300 |
+
"type": "CONDITIONING",
|
| 301 |
+
"links": [
|
| 302 |
+
7
|
| 303 |
+
]
|
| 304 |
+
}
|
| 305 |
+
],
|
| 306 |
+
"properties": {
|
| 307 |
+
"cnr_id": "comfy-core",
|
| 308 |
+
"ver": "0.3.63",
|
| 309 |
+
"Node name for S&R": "CLIPTextEncode",
|
| 310 |
+
"ue_properties": {
|
| 311 |
+
"widget_ue_connectable": {},
|
| 312 |
+
"input_ue_unconnectable": {},
|
| 313 |
+
"version": "7.2.2"
|
| 314 |
+
}
|
| 315 |
+
},
|
| 316 |
+
"widgets_values": [
|
| 317 |
+
"masterpiece,best quality,1girl,upper body, collared shirt"
|
| 318 |
+
],
|
| 319 |
+
"shape": 1
|
| 320 |
+
},
|
| 321 |
+
{
|
| 322 |
+
"id": 6,
|
| 323 |
+
"type": "KSampler",
|
| 324 |
+
"pos": [
|
| 325 |
+
700,
|
| 326 |
+
450
|
| 327 |
+
],
|
| 328 |
+
"size": [
|
| 329 |
+
340,
|
| 330 |
+
590
|
| 331 |
+
],
|
| 332 |
+
"flags": {},
|
| 333 |
+
"order": 5,
|
| 334 |
+
"mode": 0,
|
| 335 |
+
"inputs": [
|
| 336 |
+
{
|
| 337 |
+
"name": "model",
|
| 338 |
+
"type": "MODEL",
|
| 339 |
+
"link": 6
|
| 340 |
+
},
|
| 341 |
+
{
|
| 342 |
+
"name": "positive",
|
| 343 |
+
"type": "CONDITIONING",
|
| 344 |
+
"link": 7
|
| 345 |
+
},
|
| 346 |
+
{
|
| 347 |
+
"name": "negative",
|
| 348 |
+
"type": "CONDITIONING",
|
| 349 |
+
"link": 8
|
| 350 |
+
},
|
| 351 |
+
{
|
| 352 |
+
"name": "latent_image",
|
| 353 |
+
"type": "LATENT",
|
| 354 |
+
"link": 9
|
| 355 |
+
}
|
| 356 |
+
],
|
| 357 |
+
"outputs": [
|
| 358 |
+
{
|
| 359 |
+
"name": "LATENT",
|
| 360 |
+
"type": "LATENT",
|
| 361 |
+
"links": [
|
| 362 |
+
10
|
| 363 |
+
]
|
| 364 |
+
}
|
| 365 |
+
],
|
| 366 |
+
"properties": {
|
| 367 |
+
"cnr_id": "comfy-core",
|
| 368 |
+
"ver": "0.3.63",
|
| 369 |
+
"Node name for S&R": "KSampler",
|
| 370 |
+
"ue_properties": {
|
| 371 |
+
"widget_ue_connectable": {},
|
| 372 |
+
"input_ue_unconnectable": {},
|
| 373 |
+
"version": "7.2.2"
|
| 374 |
+
}
|
| 375 |
+
},
|
| 376 |
+
"widgets_values": [
|
| 377 |
+
619828979874,
|
| 378 |
+
"fixed",
|
| 379 |
+
28,
|
| 380 |
+
7,
|
| 381 |
+
"euler_ancestral",
|
| 382 |
+
"simple",
|
| 383 |
+
1
|
| 384 |
+
],
|
| 385 |
+
"shape": 1
|
| 386 |
+
}
|
| 387 |
+
],
|
| 388 |
+
"links": [
|
| 389 |
+
[
|
| 390 |
+
4,
|
| 391 |
+
9,
|
| 392 |
+
1,
|
| 393 |
+
4,
|
| 394 |
+
0,
|
| 395 |
+
"CLIP"
|
| 396 |
+
],
|
| 397 |
+
[
|
| 398 |
+
5,
|
| 399 |
+
9,
|
| 400 |
+
1,
|
| 401 |
+
5,
|
| 402 |
+
0,
|
| 403 |
+
"CLIP"
|
| 404 |
+
],
|
| 405 |
+
[
|
| 406 |
+
6,
|
| 407 |
+
3,
|
| 408 |
+
0,
|
| 409 |
+
6,
|
| 410 |
+
0,
|
| 411 |
+
"MODEL"
|
| 412 |
+
],
|
| 413 |
+
[
|
| 414 |
+
7,
|
| 415 |
+
4,
|
| 416 |
+
0,
|
| 417 |
+
6,
|
| 418 |
+
1,
|
| 419 |
+
"CONDITIONING"
|
| 420 |
+
],
|
| 421 |
+
[
|
| 422 |
+
8,
|
| 423 |
+
5,
|
| 424 |
+
0,
|
| 425 |
+
6,
|
| 426 |
+
2,
|
| 427 |
+
"CONDITIONING"
|
| 428 |
+
],
|
| 429 |
+
[
|
| 430 |
+
9,
|
| 431 |
+
1,
|
| 432 |
+
0,
|
| 433 |
+
6,
|
| 434 |
+
3,
|
| 435 |
+
"LATENT"
|
| 436 |
+
],
|
| 437 |
+
[
|
| 438 |
+
10,
|
| 439 |
+
6,
|
| 440 |
+
0,
|
| 441 |
+
7,
|
| 442 |
+
0,
|
| 443 |
+
"LATENT"
|
| 444 |
+
],
|
| 445 |
+
[
|
| 446 |
+
11,
|
| 447 |
+
9,
|
| 448 |
+
2,
|
| 449 |
+
7,
|
| 450 |
+
1,
|
| 451 |
+
"VAE"
|
| 452 |
+
],
|
| 453 |
+
[
|
| 454 |
+
12,
|
| 455 |
+
7,
|
| 456 |
+
0,
|
| 457 |
+
8,
|
| 458 |
+
0,
|
| 459 |
+
"IMAGE"
|
| 460 |
+
],
|
| 461 |
+
[
|
| 462 |
+
17,
|
| 463 |
+
9,
|
| 464 |
+
0,
|
| 465 |
+
3,
|
| 466 |
+
0,
|
| 467 |
+
"MODEL"
|
| 468 |
+
]
|
| 469 |
+
],
|
| 470 |
+
"groups": [],
|
| 471 |
+
"config": {},
|
| 472 |
+
"extra": {
|
| 473 |
+
"ds": {
|
| 474 |
+
"scale": 1.1167815779425083,
|
| 475 |
+
"offset": [
|
| 476 |
+
255.55317442518452,
|
| 477 |
+
-140.87822926306436
|
| 478 |
+
]
|
| 479 |
+
},
|
| 480 |
+
"workflowRendererVersion": "LG",
|
| 481 |
+
"frontendVersion": "1.32.9",
|
| 482 |
+
"VHS_latentpreview": false,
|
| 483 |
+
"VHS_latentpreviewrate": 0,
|
| 484 |
+
"VHS_MetadataImage": true,
|
| 485 |
+
"VHS_KeepIntermediate": true
|
| 486 |
+
},
|
| 487 |
+
"version": 0.4
|
| 488 |
+
}
|