Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
40 commits
Select commit Hold shift + click to select a range
6e33ee3
debug error
strint Oct 16, 2025
fa19dd4
debug offload
strint Oct 16, 2025
f40e00c
add detail debug
strint Oct 16, 2025
2b22296
add debug log
strint Oct 16, 2025
c1eac55
add debug log
strint Oct 16, 2025
9352987
add log
strint Oct 16, 2025
a207301
rm useless log
strint Oct 16, 2025
71b23d1
rm useless log
strint Oct 16, 2025
e5ff6a1
refine log
strint Oct 16, 2025
5c3c6c0
add debug log of cpu load
strint Oct 17, 2025
6583cc0
debug load mem
strint Oct 17, 2025
49597bf
load remains mmap
strint Oct 17, 2025
21ebcad
debug free mem
strint Oct 20, 2025
4ac827d
unload partial
strint Oct 20, 2025
e9e1d2f
add mmap tensor
strint Oct 20, 2025
4956178
fix log
strint Oct 20, 2025
8aeebbf
fix to
strint Oct 20, 2025
05c2518
refact mmap
strint Oct 20, 2025
2f0d566
refine code
strint Oct 21, 2025
2d010f5
refine code
strint Oct 21, 2025
fff56de
fix format
strint Oct 21, 2025
08e094e
use native mmap
strint Oct 21, 2025
8038393
lazy rm file
strint Oct 21, 2025
98ba311
add env
strint Oct 21, 2025
f3c673d
Merge branch 'master' of https://github.com/siliconflow/ComfyUI into …
strint Oct 22, 2025
aab0e24
fix MMAP_MEM_THRESHOLD_GB default
strint Oct 23, 2025
58d28ed
no limit for offload size
strint Oct 23, 2025
c312733
refine log
strint Oct 23, 2025
dc7c77e
better partial unload
strint Oct 23, 2025
5c5fbdd
debug mmap
strint Nov 17, 2025
d28093f
Merge branch 'master' into refine_offload
doombeaker Nov 26, 2025
96c7f18
Merge branch 'master' into refine_offload
doombeaker Nov 27, 2025
7733d51
try fix flux2 (#9)
strint Dec 4, 2025
211fa31
Merge branch 'master' into refine_offload
doombeaker Dec 8, 2025
1122cd0
allow offload quant (#10)
strint Dec 9, 2025
532eb01
rm comment
strint Dec 9, 2025
a511d0d
Merge remote-tracking branch 'upstream/master' into refine_offload
yiquanfeng Dec 12, 2025
f61871b
Merge branch 'master' into refine_offload
doombeaker Dec 12, 2025
407dab1
merge master
strint Dec 22, 2025
cdb08d2
Offload merge (#12)
strint Jan 6, 2026
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 2 additions & 0 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -212,6 +212,8 @@ Python 3.14 works but you may encounter issues with the torch compile node. The

Python 3.13 is very well supported. If you have trouble with some custom node dependencies on 3.13 you can try 3.12

torch 2.4 and above is supported but some features might only work on newer versions. We generally recommend using the latest major version of pytorch unless it is less than 2 weeks old.

### Instructions:

Git clone this repo.
Expand Down
4 changes: 2 additions & 2 deletions app/model_manager.py
Original file line number Diff line number Diff line change
Expand Up @@ -44,7 +44,7 @@ async def get_model_folders(request):
@routes.get("/experiment/models/{folder}")
async def get_all_models(request):
folder = request.match_info.get("folder", None)
if not folder in folder_paths.folder_names_and_paths:
if folder not in folder_paths.folder_names_and_paths:
return web.Response(status=404)
files = self.get_model_file_list(folder)
return web.json_response(files)
Expand All @@ -55,7 +55,7 @@ async def get_model_preview(request):
path_index = int(request.match_info.get("path_index", None))
filename = request.match_info.get("filename", None)

if not folder_name in folder_paths.folder_names_and_paths:
if folder_name not in folder_paths.folder_names_and_paths:
return web.Response(status=404)

folders = folder_paths.folder_names_and_paths[folder_name]
Expand Down
19 changes: 19 additions & 0 deletions comfy/clip_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,25 @@
from comfy.ldm.modules.attention import optimized_attention_for_device
import comfy.ops

def clip_preprocess(image, size=224, mean=[0.48145466, 0.4578275, 0.40821073], std=[0.26862954, 0.26130258, 0.27577711], crop=True):
image = image[:, :, :, :3] if image.shape[3] > 3 else image
mean = torch.tensor(mean, device=image.device, dtype=image.dtype)
std = torch.tensor(std, device=image.device, dtype=image.dtype)
image = image.movedim(-1, 1)
if not (image.shape[2] == size and image.shape[3] == size):
if crop:
scale = (size / min(image.shape[2], image.shape[3]))
scale_size = (round(scale * image.shape[2]), round(scale * image.shape[3]))
else:
scale_size = (size, size)

image = torch.nn.functional.interpolate(image, size=scale_size, mode="bicubic", antialias=True)
h = (image.shape[2] - size)//2
w = (image.shape[3] - size)//2
image = image[:,:,h:h+size,w:w+size]
image = torch.clip((255. * image), 0, 255).round() / 255.0
return (image - mean.view([3,1,1])) / std.view([3,1,1])

class CLIPAttention(torch.nn.Module):
def __init__(self, embed_dim, heads, dtype, device, operations):
super().__init__()
Expand Down
22 changes: 2 additions & 20 deletions comfy/clip_vision.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,5 @@
from .utils import load_torch_file, transformers_convert, state_dict_prefix_replace
import os
import torch
import json
import logging

Expand All @@ -17,24 +16,7 @@ def __getitem__(self, key):
def __setitem__(self, key, item):
setattr(self, key, item)

def clip_preprocess(image, size=224, mean=[0.48145466, 0.4578275, 0.40821073], std=[0.26862954, 0.26130258, 0.27577711], crop=True):
image = image[:, :, :, :3] if image.shape[3] > 3 else image
mean = torch.tensor(mean, device=image.device, dtype=image.dtype)
std = torch.tensor(std, device=image.device, dtype=image.dtype)
image = image.movedim(-1, 1)
if not (image.shape[2] == size and image.shape[3] == size):
if crop:
scale = (size / min(image.shape[2], image.shape[3]))
scale_size = (round(scale * image.shape[2]), round(scale * image.shape[3]))
else:
scale_size = (size, size)

image = torch.nn.functional.interpolate(image, size=scale_size, mode="bicubic", antialias=True)
h = (image.shape[2] - size)//2
w = (image.shape[3] - size)//2
image = image[:,:,h:h+size,w:w+size]
image = torch.clip((255. * image), 0, 255).round() / 255.0
return (image - mean.view([3,1,1])) / std.view([3,1,1])
clip_preprocess = comfy.clip_model.clip_preprocess # Prevent some stuff from breaking, TODO: remove eventually

IMAGE_ENCODERS = {
"clip_vision_model": comfy.clip_model.CLIPVisionModelProjection,
Expand Down Expand Up @@ -73,7 +55,7 @@ def get_sd(self):

def encode_image(self, image, crop=True):
comfy.model_management.load_model_gpu(self.patcher)
pixel_values = clip_preprocess(image.to(self.load_device), size=self.image_size, mean=self.image_mean, std=self.image_std, crop=crop).float()
pixel_values = comfy.clip_model.clip_preprocess(image.to(self.load_device), size=self.image_size, mean=self.image_mean, std=self.image_std, crop=crop).float()
out = self.model(pixel_values=pixel_values, intermediate_output='all' if self.return_all_hidden_states else -2)

outputs = Output()
Expand Down
6 changes: 6 additions & 0 deletions comfy/context_windows.py
Original file line number Diff line number Diff line change
Expand Up @@ -188,6 +188,12 @@ def get_resized_cond(self, cond_in: list[dict], x_in: torch.Tensor, window: Inde
audio_cond = cond_value.cond
if audio_cond.ndim > 1 and audio_cond.size(1) == x_in.size(self.dim):
new_cond_item[cond_key] = cond_value._copy_with(window.get_tensor(audio_cond, device, dim=1))
# Handle vace_context (temporal dim is 3)
elif cond_key == "vace_context" and hasattr(cond_value, "cond") and isinstance(cond_value.cond, torch.Tensor):
vace_cond = cond_value.cond
if vace_cond.ndim >= 4 and vace_cond.size(3) == x_in.size(self.dim):
sliced_vace = window.get_tensor(vace_cond, device, dim=3, retain_index_list=self.cond_retain_index_list)
new_cond_item[cond_key] = cond_value._copy_with(sliced_vace)
# if has cond that is a Tensor, check if needs to be subset
elif hasattr(cond_value, "cond") and isinstance(cond_value.cond, torch.Tensor):
if (self.dim < cond_value.cond.ndim and cond_value.cond.size(self.dim) == x_in.size(self.dim)) or \
Expand Down
3 changes: 2 additions & 1 deletion comfy/hooks.py
Original file line number Diff line number Diff line change
Expand Up @@ -527,7 +527,8 @@ def prepare_current_keyframe(self, curr_t: float, transformer_options: dict[str,
if self._current_keyframe.get_effective_guarantee_steps(max_sigma) > 0:
break
# if eval_c is outside the percent range, stop looking further
else: break
else:
break
# update steps current context is used
self._current_used_steps += 1
# update current timestep this was performed on
Expand Down
3 changes: 3 additions & 0 deletions comfy/k_diffusion/sampling.py
Original file line number Diff line number Diff line change
Expand Up @@ -74,6 +74,9 @@ def get_ancestral_step(sigma_from, sigma_to, eta=1.):

def default_noise_sampler(x, seed=None):
if seed is not None:
if x.device == torch.device("cpu"):
seed += 1

generator = torch.Generator(device=x.device)
generator.manual_seed(seed)
else:
Expand Down
3 changes: 3 additions & 0 deletions comfy/latent_formats.py
Original file line number Diff line number Diff line change
Expand Up @@ -407,6 +407,9 @@ def __init__(self):

self.latent_rgb_factors_bias = [-0.0571, -0.1657, -0.2512]

class LTXAV(LTXV):
pass

class HunyuanVideo(LatentFormat):
latent_channels = 16
latent_dimensions = 3
Expand Down
2 changes: 1 addition & 1 deletion comfy/ldm/chroma_radiance/model.py
Original file line number Diff line number Diff line change
Expand Up @@ -270,7 +270,7 @@ def radiance_get_override_params(self, overrides: dict) -> ChromaRadianceParams:
bad_keys = tuple(
k
for k, v in overrides.items()
if type(v) != type(getattr(params, k)) and (v is not None or k not in nullable_keys)
if not isinstance(v, type(getattr(params, k))) and (v is not None or k not in nullable_keys)
)
if bad_keys:
e = f"Invalid value(s) in transformer_options chroma_radiance_options: {', '.join(bad_keys)}"
Expand Down
3 changes: 2 additions & 1 deletion comfy/ldm/hunyuan_video/upsampler.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,8 @@
import torch.nn.functional as F
from comfy.ldm.modules.diffusionmodules.model import ResnetBlock, VideoConv3d
from comfy.ldm.hunyuan_video.vae_refiner import RMS_norm
import model_management, model_patcher
import model_management
import model_patcher

class SRResidualCausalBlock3D(nn.Module):
def __init__(self, channels: int):
Expand Down
Loading
Loading