Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion app/logger.py
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,7 @@ def write(self, data):
with self._lock:
self._logs_since_flush.append(entry)

# Simple handling for cr to overwrite the last output if it isnt a full line
# Simple handling for cr to overwrite the last output if it isn't a full line
# else logs just get full of progress messages
if isinstance(data, str) and data.startswith("\r") and not logs[-1]["m"].endswith("\n"):
logs.pop()
Expand Down
2 changes: 1 addition & 1 deletion comfy/cli_args.py
Original file line number Diff line number Diff line change
Expand Up @@ -136,7 +136,7 @@ class LatentPreviewMethod(enum.Enum):

parser.add_argument("--default-hashing-function", type=str, choices=['md5', 'sha1', 'sha256', 'sha512'], default='sha256', help="Allows you to choose the hash function to use for duplicate filename / contents comparison. Default is sha256.")

parser.add_argument("--disable-smart-memory", action="store_true", help="Force ComfyUI to agressively offload to regular ram instead of keeping models in vram when it can.")
parser.add_argument("--disable-smart-memory", action="store_true", help="Force ComfyUI to aggressively offload to regular ram instead of keeping models in vram when it can.")
parser.add_argument("--deterministic", action="store_true", help="Make pytorch use slower deterministic algorithms when it can. Note that this might not make images deterministic in all cases.")

class PerformanceFeature(enum.Enum):
Expand Down
2 changes: 1 addition & 1 deletion comfy/context_windows.py
Original file line number Diff line number Diff line change
Expand Up @@ -133,7 +133,7 @@ def get_resized_cond(self, cond_in: list[dict], x_in: torch.Tensor, window: Inde
if isinstance(cond_item, torch.Tensor):
# check that tensor is the expected length - x.size(0)
if self.dim < cond_item.ndim and cond_item.size(self.dim) == x_in.size(self.dim):
# if so, it's subsetting time - tell controls the expected indeces so they can handle them
# if so, it's subsetting time - tell controls the expected indices so they can handle them
actual_cond_item = window.get_tensor(cond_item)
resized_actual_cond[key] = actual_cond_item.to(device)
else:
Expand Down
2 changes: 1 addition & 1 deletion comfy/hooks.py
Original file line number Diff line number Diff line change
Expand Up @@ -93,7 +93,7 @@ def __init__(self, hook_type: EnumHookType=None, hook_ref: _HookRef=None, hook_i
self.hook_scope = hook_scope
'''Scope of where this hook should apply in terms of the conds used in sampling run.'''
self.custom_should_register = default_should_register
'''Can be overriden with a compatible function to decide if this hook should be registered without the need to override .should_register'''
'''Can be overridden with a compatible function to decide if this hook should be registered without the need to override .should_register'''

@property
def strength(self):
Expand Down
6 changes: 3 additions & 3 deletions comfy/ldm/ace/lyric_encoder.py
Original file line number Diff line number Diff line change
Expand Up @@ -136,11 +136,11 @@ def forward(
class PositionwiseFeedForward(torch.nn.Module):
"""Positionwise feed forward layer.

FeedForward are appied on each position of the sequence.
FeedForward are applied on each position of the sequence.
The output dim is same with the input dim.

Args:
idim (int): Input dimenstion.
idim (int): Input dimension.
hidden_units (int): The number of hidden units.
dropout_rate (float): Dropout rate.
activation (torch.nn.Module): Activation function
Expand Down Expand Up @@ -758,7 +758,7 @@ def extend_pe(self, x: torch.Tensor):
if self.pe.dtype != x.dtype or self.pe.device != x.device:
self.pe = self.pe.to(dtype=x.dtype, device=x.device)
return
# Suppose `i` means to the position of query vecotr and `j` means the
# Suppose `i` means to the position of query vector and `j` means the
# position of key vector. We use position relative positions when keys
# are to the left (i>j) and negative relative positions otherwise (i<j).
pe_positive = torch.zeros(x.size(1), self.d_model)
Expand Down
2 changes: 1 addition & 1 deletion comfy/ldm/cascade/stage_a.py
Original file line number Diff line number Diff line change
Expand Up @@ -60,7 +60,7 @@ class VectorQuantize(nn.Module):
def __init__(self, embedding_size, k, ema_decay=0.99, ema_loss=False):
"""
Takes an input of variable size (as long as the last dimension matches the embedding size).
Returns one tensor containing the nearest neigbour embeddings to each of the inputs,
Returns one tensor containing the nearest neighbour embeddings to each of the inputs,
with the same size as the input, vq and commitment components for the loss as a touple
in the second output and the indices of the quantized vectors in the third:
quantized, (vq_loss, commit_loss), indices
Expand Down
2 changes: 1 addition & 1 deletion comfy/ldm/cosmos/cosmos_tokenizer/layers3d.py
Original file line number Diff line number Diff line change
Expand Up @@ -120,7 +120,7 @@ def forward(self, x: torch.Tensor) -> torch.Tensor:
time_factor = time_factor.item()
x = x.repeat_interleave(int(time_factor), dim=2)
# TODO(freda): Check if this causes temporal inconsistency.
# Shoule reverse the order of the following two ops,
# Should reverse the order of the following two ops,
# better perf and better temporal smoothness.
x = self.conv(x)
return x[..., int(time_factor - 1) :, :, :]
Expand Down
2 changes: 1 addition & 1 deletion comfy/ldm/cosmos/position_embedding.py
Original file line number Diff line number Diff line change
Expand Up @@ -178,7 +178,7 @@ def __init__(
):
"""
Args:
interpolation (str): we curretly only support "crop", ideally when we need extrapolation capacity, we should adjust frequency or other more advanced methods. they are not implemented yet.
interpolation (str): we currently only support "crop", ideally when we need extrapolation capacity, we should adjust frequency or other more advanced methods. they are not implemented yet.
"""
del kwargs # unused
super().__init__()
Expand Down
4 changes: 2 additions & 2 deletions comfy/ldm/hunyuan3d/vae.py
Original file line number Diff line number Diff line change
Expand Up @@ -118,7 +118,7 @@ def sample_points_and_latents(self, point_cloud: torch.Tensor, features: torch.T
take the fourier embeddings for both input and query pc

Mental Note: FPS-sampled points (query_pc) act as latent tokens that attend to and learn from the broader context in input_pc.
Goal: get a smaller represenation (query_pc) to represent the entire scence structure by learning from a broader subset (input_pc).
Goal: get a smaller representation (query_pc) to represent the entire scence structure by learning from a broader subset (input_pc).
More computationally efficient.

Features are additional information for each point in the cloud
Expand Down Expand Up @@ -193,7 +193,7 @@ def sample_points_and_latents(self, point_cloud: torch.Tensor, features: torch.T
query = torch.cat([query, query_features], dim = -1)
data = torch.cat([data, input_features], dim = -1)

# don't return pc_info to avoid unnecessary memory usuage
# don't return pc_info to avoid unnecessary memory usage
return query.view(B, -1, query.shape[-1]), data.view(B, -1, data.shape[-1])

def forward(self, point_cloud: torch.Tensor, features: torch.Tensor):
Expand Down
2 changes: 1 addition & 1 deletion comfy/ldm/util.py
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,7 @@ def log_txt_as_img(wh, xc, size=10):
try:
draw.text((0, 0), lines, fill="black", font=font)
except UnicodeEncodeError:
logging.warning("Cant encode string for logging. Skipping.")
logging.warning("Can't encode string for logging. Skipping.")

txt = np.array(txt).transpose(2, 0, 1) / 127.5 - 1.0
txts.append(txt)
Expand Down
2 changes: 1 addition & 1 deletion comfy/sd.py
Original file line number Diff line number Diff line change
Expand Up @@ -527,7 +527,7 @@ def estimate_memory(shape, dtype, num_layers = 16, kv_cache_multiplier = 2):
self.latent_dim = 2
self.output_channels = 3
else:
logging.warning("WARNING: No VAE weights detected, VAE not initalized.")
logging.warning("WARNING: No VAE weights detected, VAE not initialized.")
self.first_stage_model = None
return
else:
Expand Down
2 changes: 1 addition & 1 deletion comfy_api/latest/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -80,7 +80,7 @@ class ComfyExtension(ABC):
async def on_load(self) -> None:
"""
Called when an extension is loaded.
This should be used to initialize any global resources neeeded by the extension.
This should be used to initialize any global resources needed by the extension.
"""

@abstractmethod
Expand Down
2 changes: 1 addition & 1 deletion comfy_api_nodes/nodes_kling.py
Original file line number Diff line number Diff line change
Expand Up @@ -173,7 +173,7 @@ def is_valid_image_response(response: KlingVirtualTryOnResponse) -> bool:


def validate_prompts(prompt: str, negative_prompt: str, max_length: int) -> bool:
"""Verifies that the positive prompt is not empty and that neither promt is too long."""
"""Verifies that the positive prompt is not empty and that neither prompt is too long."""
if not prompt:
raise ValueError("Positive prompt is empty")
if len(prompt) > max_length:
Expand Down
2 changes: 1 addition & 1 deletion comfy_api_nodes/nodes_recraft.py
Original file line number Diff line number Diff line change
Expand Up @@ -89,7 +89,7 @@ def recraft_multipart_parser(data, parent_key=None, formatter: callable=None, co

The OpenAI client that Recraft uses has a bizarre way of serializing lists:

It does NOT keep track of indeces of each list, so for background_color, that must be serialized as:
It does NOT keep track of indices of each list, so for background_color, that must be serialized as:
'background_color[rgb][]' = [0, 0, 255]
where the array is assigned to a key that has '[]' at the end, to signal it's an array.

Expand Down
2 changes: 1 addition & 1 deletion comfy_extras/nodes_camera_trajectory.py
Original file line number Diff line number Diff line change
Expand Up @@ -114,7 +114,7 @@ def ray_condition(K, c2w, H, W, device):
rays_d = directions @ c2w[..., :3, :3].transpose(-1, -2) # B, V, 3, HW
rays_o = c2w[..., :3, 3] # B, V, 3
rays_o = rays_o[:, :, None].expand_as(rays_d) # B, V, 3, HW
# c2w @ dirctions
# c2w @ directions
rays_dxo = torch.cross(rays_o, rays_d)
plucker = torch.cat([rays_dxo, rays_d], dim=-1)
plucker = plucker.reshape(B, c2w.shape[1], H, W, 6) # B, V, H, W, 6
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -297,7 +297,7 @@ def dynamic_dependency_cycle(self, input1, input2):
mix1 = g.node("TestLazyMixImages", image1=input1, mask=mask.out(0))
mix2 = g.node("TestLazyMixImages", image1=mix1.out(0), image2=input2, mask=mask.out(0))

# Create the cyle
# Create the cycle
mix1.set_input("image2", mix2.out(0))

return {
Expand Down