Skip to content

Commit e90e9b7

Browse files
comfyanonymousblob42
authored andcommitted
Add cheap latent preview for flux 2. (#10907)
Thank you to the person who calculated them. You saved me a percent of my time.
1 parent 8402c87 commit e90e9b7

File tree

4 files changed

+61
-6
lines changed

4 files changed

+61
-6
lines changed

comfy/cli_args.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -90,6 +90,7 @@ def __call__(self, parser, namespace, values, option_string=None):
9090
parser.add_argument("--oneapi-device-selector", type=str, default=None, metavar="SELECTOR_STRING", help="Sets the oneAPI device(s) this instance will use.")
9191
parser.add_argument("--disable-ipex-optimize", action="store_true", help="Disables ipex.optimize default when loading models with Intel's Extension for Pytorch.")
9292
parser.add_argument("--supports-fp8-compute", action="store_true", help="ComfyUI will act like if the device supports fp8 compute.")
93+
parser.add_argument("--total-ram", type=float, default=0, help="Maximum system RAM visible to comfy in GB (default 0: all)")
9394

9495
class LatentPreviewMethod(enum.Enum):
9596
NoPreviews = "none"

comfy/latent_formats.py

Lines changed: 40 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -6,6 +6,7 @@ class LatentFormat:
66
latent_dimensions = 2
77
latent_rgb_factors = None
88
latent_rgb_factors_bias = None
9+
latent_rgb_factors_reshape = None
910
taesd_decoder_name = None
1011

1112
def process_in(self, latent):
@@ -181,6 +182,45 @@ def process_out(self, latent):
181182
class Flux2(LatentFormat):
182183
latent_channels = 128
183184

185+
def __init__(self):
186+
self.latent_rgb_factors =[
187+
[0.0058, 0.0113, 0.0073],
188+
[0.0495, 0.0443, 0.0836],
189+
[-0.0099, 0.0096, 0.0644],
190+
[0.2144, 0.3009, 0.3652],
191+
[0.0166, -0.0039, -0.0054],
192+
[0.0157, 0.0103, -0.0160],
193+
[-0.0398, 0.0902, -0.0235],
194+
[-0.0052, 0.0095, 0.0109],
195+
[-0.3527, -0.2712, -0.1666],
196+
[-0.0301, -0.0356, -0.0180],
197+
[-0.0107, 0.0078, 0.0013],
198+
[0.0746, 0.0090, -0.0941],
199+
[0.0156, 0.0169, 0.0070],
200+
[-0.0034, -0.0040, -0.0114],
201+
[0.0032, 0.0181, 0.0080],
202+
[-0.0939, -0.0008, 0.0186],
203+
[0.0018, 0.0043, 0.0104],
204+
[0.0284, 0.0056, -0.0127],
205+
[-0.0024, -0.0022, -0.0030],
206+
[0.1207, -0.0026, 0.0065],
207+
[0.0128, 0.0101, 0.0142],
208+
[0.0137, -0.0072, -0.0007],
209+
[0.0095, 0.0092, -0.0059],
210+
[0.0000, -0.0077, -0.0049],
211+
[-0.0465, -0.0204, -0.0312],
212+
[0.0095, 0.0012, -0.0066],
213+
[0.0290, -0.0034, 0.0025],
214+
[0.0220, 0.0169, -0.0048],
215+
[-0.0332, -0.0457, -0.0468],
216+
[-0.0085, 0.0389, 0.0609],
217+
[-0.0076, 0.0003, -0.0043],
218+
[-0.0111, -0.0460, -0.0614],
219+
]
220+
221+
self.latent_rgb_factors_bias = [-0.0329, -0.0718, -0.0851]
222+
self.latent_rgb_factors_reshape = lambda t: t.reshape(t.shape[0], 32, 2, 2, t.shape[-2], t.shape[-1]).permute(0, 1, 4, 2, 5, 3).reshape(t.shape[0], 32, t.shape[-2] * 2, t.shape[-1] * 2)
223+
184224
def process_in(self, latent):
185225
return latent
186226

comfy/model_management.py

Lines changed: 15 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -192,8 +192,12 @@ def get_total_memory(dev=None, torch_total_too=False):
192192
if dev is None:
193193
dev = get_torch_device()
194194

195-
if hasattr(dev, 'type') and (dev.type == 'cpu' or dev.type == 'mps'):
196-
mem_total = psutil.virtual_memory().total
195+
if hasattr(dev, "type") and (dev.type == "cpu" or dev.type == "mps"):
196+
mem_total = 0
197+
if args.total_ram != 0:
198+
mem_total = args.total_ram * 1024 * 1024
199+
else:
200+
mem_total = psutil.virtual_memory().total
197201
mem_total_torch = mem_total
198202
else:
199203
if directml_enabled:
@@ -236,8 +240,15 @@ def mac_version():
236240
return None
237241

238242
total_vram = get_total_memory(get_torch_device()) / (1024 * 1024)
239-
total_ram = psutil.virtual_memory().total / (1024 * 1024)
240-
logging.info("Total VRAM {:0.0f} MB, total RAM {:0.0f} MB".format(total_vram, total_ram))
243+
244+
total_ram = 0
245+
if args.total_ram != 0:
246+
total_ram = args.total_ram * (1024) # arg in GB
247+
else:
248+
total_ram = psutil.virtual_memory().total / (1024 * 1024)
249+
logging.info(
250+
"Total VRAM {:0.0f} MB, total RAM {:0.0f} MB".format(total_vram, total_ram)
251+
)
241252

242253
try:
243254
logging.info("pytorch version: {}".format(torch_version))

latent_preview.py

Lines changed: 5 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -37,13 +37,16 @@ def decode_latent_to_preview(self, x0):
3737

3838

3939
class Latent2RGBPreviewer(LatentPreviewer):
40-
def __init__(self, latent_rgb_factors, latent_rgb_factors_bias=None):
40+
def __init__(self, latent_rgb_factors, latent_rgb_factors_bias=None, latent_rgb_factors_reshape=None):
4141
self.latent_rgb_factors = torch.tensor(latent_rgb_factors, device="cpu").transpose(0, 1)
4242
self.latent_rgb_factors_bias = None
4343
if latent_rgb_factors_bias is not None:
4444
self.latent_rgb_factors_bias = torch.tensor(latent_rgb_factors_bias, device="cpu")
45+
self.latent_rgb_factors_reshape = latent_rgb_factors_reshape
4546

4647
def decode_latent_to_preview(self, x0):
48+
if self.latent_rgb_factors_reshape is not None:
49+
x0 = self.latent_rgb_factors_reshape(x0)
4750
self.latent_rgb_factors = self.latent_rgb_factors.to(dtype=x0.dtype, device=x0.device)
4851
if self.latent_rgb_factors_bias is not None:
4952
self.latent_rgb_factors_bias = self.latent_rgb_factors_bias.to(dtype=x0.dtype, device=x0.device)
@@ -85,7 +88,7 @@ def get_previewer(device, latent_format):
8588

8689
if previewer is None:
8790
if latent_format.latent_rgb_factors is not None:
88-
previewer = Latent2RGBPreviewer(latent_format.latent_rgb_factors, latent_format.latent_rgb_factors_bias)
91+
previewer = Latent2RGBPreviewer(latent_format.latent_rgb_factors, latent_format.latent_rgb_factors_bias, latent_format.latent_rgb_factors_reshape)
8992
return previewer
9093

9194
def prepare_callback(model, steps, x0_output_dict=None):

0 commit comments

Comments
 (0)