From 1b0771357c2dda22b119eab5f947c6b7494f9c8a Mon Sep 17 00:00:00 2001 From: Marcel Petrick Date: Wed, 1 Oct 2025 15:59:09 +0200 Subject: [PATCH] fix(docs): correct typos in comments and strings found during code review Non-functional changes only: - Fixed minor spelling mistakes in comments - Corrected typos in user-facing strings No variables, logic, or functional code was modified. Signed-off-by: Marcel Petrick --- app/logger.py | 2 +- comfy/cli_args.py | 2 +- comfy/context_windows.py | 2 +- comfy/hooks.py | 2 +- comfy/ldm/ace/lyric_encoder.py | 6 +++--- comfy/ldm/cascade/stage_a.py | 2 +- comfy/ldm/cosmos/cosmos_tokenizer/layers3d.py | 2 +- comfy/ldm/cosmos/position_embedding.py | 2 +- comfy/ldm/hunyuan3d/vae.py | 4 ++-- comfy/ldm/util.py | 2 +- comfy/sd.py | 2 +- comfy_api/latest/__init__.py | 2 +- comfy_api_nodes/nodes_kling.py | 2 +- comfy_api_nodes/nodes_recraft.py | 2 +- comfy_extras/nodes_camera_trajectory.py | 2 +- .../execution/testing_nodes/testing-pack/specific_tests.py | 2 +- 16 files changed, 19 insertions(+), 19 deletions(-) diff --git a/app/logger.py b/app/logger.py index 3d26d98fe280..c81c0ff894a1 100644 --- a/app/logger.py +++ b/app/logger.py @@ -24,7 +24,7 @@ def write(self, data): with self._lock: self._logs_since_flush.append(entry) - # Simple handling for cr to overwrite the last output if it isnt a full line + # Simple handling for cr to overwrite the last output if it isn't a full line # else logs just get full of progress messages if isinstance(data, str) and data.startswith("\r") and not logs[-1]["m"].endswith("\n"): logs.pop() diff --git a/comfy/cli_args.py b/comfy/cli_args.py index cc1f12482e9f..4751caef067b 100644 --- a/comfy/cli_args.py +++ b/comfy/cli_args.py @@ -136,7 +136,7 @@ class LatentPreviewMethod(enum.Enum): parser.add_argument("--default-hashing-function", type=str, choices=['md5', 'sha1', 'sha256', 'sha512'], default='sha256', help="Allows you to choose the hash function to use for duplicate filename / contents comparison. Default is sha256.") -parser.add_argument("--disable-smart-memory", action="store_true", help="Force ComfyUI to agressively offload to regular ram instead of keeping models in vram when it can.") +parser.add_argument("--disable-smart-memory", action="store_true", help="Force ComfyUI to aggressively offload to regular ram instead of keeping models in vram when it can.") parser.add_argument("--deterministic", action="store_true", help="Make pytorch use slower deterministic algorithms when it can. Note that this might not make images deterministic in all cases.") class PerformanceFeature(enum.Enum): diff --git a/comfy/context_windows.py b/comfy/context_windows.py index 041f380f9140..26ff8f85ce80 100644 --- a/comfy/context_windows.py +++ b/comfy/context_windows.py @@ -133,7 +133,7 @@ def get_resized_cond(self, cond_in: list[dict], x_in: torch.Tensor, window: Inde if isinstance(cond_item, torch.Tensor): # check that tensor is the expected length - x.size(0) if self.dim < cond_item.ndim and cond_item.size(self.dim) == x_in.size(self.dim): - # if so, it's subsetting time - tell controls the expected indeces so they can handle them + # if so, it's subsetting time - tell controls the expected indices so they can handle them actual_cond_item = window.get_tensor(cond_item) resized_actual_cond[key] = actual_cond_item.to(device) else: diff --git a/comfy/hooks.py b/comfy/hooks.py index 9d0731072902..0aa019e91a30 100644 --- a/comfy/hooks.py +++ b/comfy/hooks.py @@ -93,7 +93,7 @@ def __init__(self, hook_type: EnumHookType=None, hook_ref: _HookRef=None, hook_i self.hook_scope = hook_scope '''Scope of where this hook should apply in terms of the conds used in sampling run.''' self.custom_should_register = default_should_register - '''Can be overriden with a compatible function to decide if this hook should be registered without the need to override .should_register''' + '''Can be overridden with a compatible function to decide if this hook should be registered without the need to override .should_register''' @property def strength(self): diff --git a/comfy/ldm/ace/lyric_encoder.py b/comfy/ldm/ace/lyric_encoder.py index ff4359b26e8d..63edbbf05b69 100644 --- a/comfy/ldm/ace/lyric_encoder.py +++ b/comfy/ldm/ace/lyric_encoder.py @@ -136,11 +136,11 @@ def forward( class PositionwiseFeedForward(torch.nn.Module): """Positionwise feed forward layer. - FeedForward are appied on each position of the sequence. + FeedForward are applied on each position of the sequence. The output dim is same with the input dim. Args: - idim (int): Input dimenstion. + idim (int): Input dimension. hidden_units (int): The number of hidden units. dropout_rate (float): Dropout rate. activation (torch.nn.Module): Activation function @@ -758,7 +758,7 @@ def extend_pe(self, x: torch.Tensor): if self.pe.dtype != x.dtype or self.pe.device != x.device: self.pe = self.pe.to(dtype=x.dtype, device=x.device) return - # Suppose `i` means to the position of query vecotr and `j` means the + # Suppose `i` means to the position of query vector and `j` means the # position of key vector. We use position relative positions when keys # are to the left (i>j) and negative relative positions otherwise (i torch.Tensor: time_factor = time_factor.item() x = x.repeat_interleave(int(time_factor), dim=2) # TODO(freda): Check if this causes temporal inconsistency. - # Shoule reverse the order of the following two ops, + # Should reverse the order of the following two ops, # better perf and better temporal smoothness. x = self.conv(x) return x[..., int(time_factor - 1) :, :, :] diff --git a/comfy/ldm/cosmos/position_embedding.py b/comfy/ldm/cosmos/position_embedding.py index c925811d4c22..7037740a268e 100644 --- a/comfy/ldm/cosmos/position_embedding.py +++ b/comfy/ldm/cosmos/position_embedding.py @@ -178,7 +178,7 @@ def __init__( ): """ Args: - interpolation (str): we curretly only support "crop", ideally when we need extrapolation capacity, we should adjust frequency or other more advanced methods. they are not implemented yet. + interpolation (str): we currently only support "crop", ideally when we need extrapolation capacity, we should adjust frequency or other more advanced methods. they are not implemented yet. """ del kwargs # unused super().__init__() diff --git a/comfy/ldm/hunyuan3d/vae.py b/comfy/ldm/hunyuan3d/vae.py index 76094482700f..e23630cfa34c 100644 --- a/comfy/ldm/hunyuan3d/vae.py +++ b/comfy/ldm/hunyuan3d/vae.py @@ -118,7 +118,7 @@ def sample_points_and_latents(self, point_cloud: torch.Tensor, features: torch.T take the fourier embeddings for both input and query pc Mental Note: FPS-sampled points (query_pc) act as latent tokens that attend to and learn from the broader context in input_pc. - Goal: get a smaller represenation (query_pc) to represent the entire scence structure by learning from a broader subset (input_pc). + Goal: get a smaller representation (query_pc) to represent the entire scence structure by learning from a broader subset (input_pc). More computationally efficient. Features are additional information for each point in the cloud @@ -193,7 +193,7 @@ def sample_points_and_latents(self, point_cloud: torch.Tensor, features: torch.T query = torch.cat([query, query_features], dim = -1) data = torch.cat([data, input_features], dim = -1) - # don't return pc_info to avoid unnecessary memory usuage + # don't return pc_info to avoid unnecessary memory usage return query.view(B, -1, query.shape[-1]), data.view(B, -1, data.shape[-1]) def forward(self, point_cloud: torch.Tensor, features: torch.Tensor): diff --git a/comfy/ldm/util.py b/comfy/ldm/util.py index 30b4b4721056..4ebe7adbd81f 100644 --- a/comfy/ldm/util.py +++ b/comfy/ldm/util.py @@ -24,7 +24,7 @@ def log_txt_as_img(wh, xc, size=10): try: draw.text((0, 0), lines, fill="black", font=font) except UnicodeEncodeError: - logging.warning("Cant encode string for logging. Skipping.") + logging.warning("Can't encode string for logging. Skipping.") txt = np.array(txt).transpose(2, 0, 1) / 127.5 - 1.0 txts.append(txt) diff --git a/comfy/sd.py b/comfy/sd.py index 2df340739f4e..9c6bcd02bfb2 100644 --- a/comfy/sd.py +++ b/comfy/sd.py @@ -527,7 +527,7 @@ def estimate_memory(shape, dtype, num_layers = 16, kv_cache_multiplier = 2): self.latent_dim = 2 self.output_channels = 3 else: - logging.warning("WARNING: No VAE weights detected, VAE not initalized.") + logging.warning("WARNING: No VAE weights detected, VAE not initialized.") self.first_stage_model = None return else: diff --git a/comfy_api/latest/__init__.py b/comfy_api/latest/__init__.py index 2cee65aa9fb2..fe8995c1133b 100644 --- a/comfy_api/latest/__init__.py +++ b/comfy_api/latest/__init__.py @@ -80,7 +80,7 @@ class ComfyExtension(ABC): async def on_load(self) -> None: """ Called when an extension is loaded. - This should be used to initialize any global resources neeeded by the extension. + This should be used to initialize any global resources needed by the extension. """ @abstractmethod diff --git a/comfy_api_nodes/nodes_kling.py b/comfy_api_nodes/nodes_kling.py index 5f55b2cc9b49..8a72c892dca0 100644 --- a/comfy_api_nodes/nodes_kling.py +++ b/comfy_api_nodes/nodes_kling.py @@ -173,7 +173,7 @@ def is_valid_image_response(response: KlingVirtualTryOnResponse) -> bool: def validate_prompts(prompt: str, negative_prompt: str, max_length: int) -> bool: - """Verifies that the positive prompt is not empty and that neither promt is too long.""" + """Verifies that the positive prompt is not empty and that neither prompt is too long.""" if not prompt: raise ValueError("Positive prompt is empty") if len(prompt) > max_length: diff --git a/comfy_api_nodes/nodes_recraft.py b/comfy_api_nodes/nodes_recraft.py index c8516b368208..f0061cb7bf13 100644 --- a/comfy_api_nodes/nodes_recraft.py +++ b/comfy_api_nodes/nodes_recraft.py @@ -89,7 +89,7 @@ def recraft_multipart_parser(data, parent_key=None, formatter: callable=None, co The OpenAI client that Recraft uses has a bizarre way of serializing lists: - It does NOT keep track of indeces of each list, so for background_color, that must be serialized as: + It does NOT keep track of indices of each list, so for background_color, that must be serialized as: 'background_color[rgb][]' = [0, 0, 255] where the array is assigned to a key that has '[]' at the end, to signal it's an array. diff --git a/comfy_extras/nodes_camera_trajectory.py b/comfy_extras/nodes_camera_trajectory.py index eb7ef363cb3e..9b40386decbf 100644 --- a/comfy_extras/nodes_camera_trajectory.py +++ b/comfy_extras/nodes_camera_trajectory.py @@ -114,7 +114,7 @@ def ray_condition(K, c2w, H, W, device): rays_d = directions @ c2w[..., :3, :3].transpose(-1, -2) # B, V, 3, HW rays_o = c2w[..., :3, 3] # B, V, 3 rays_o = rays_o[:, :, None].expand_as(rays_d) # B, V, 3, HW - # c2w @ dirctions + # c2w @ directions rays_dxo = torch.cross(rays_o, rays_d) plucker = torch.cat([rays_dxo, rays_d], dim=-1) plucker = plucker.reshape(B, c2w.shape[1], H, W, 6) # B, V, H, W, 6 diff --git a/tests/execution/testing_nodes/testing-pack/specific_tests.py b/tests/execution/testing_nodes/testing-pack/specific_tests.py index 4f8f01ae4a48..a23969ff1990 100644 --- a/tests/execution/testing_nodes/testing-pack/specific_tests.py +++ b/tests/execution/testing_nodes/testing-pack/specific_tests.py @@ -297,7 +297,7 @@ def dynamic_dependency_cycle(self, input1, input2): mix1 = g.node("TestLazyMixImages", image1=input1, mask=mask.out(0)) mix2 = g.node("TestLazyMixImages", image1=mix1.out(0), image2=input2, mask=mask.out(0)) - # Create the cyle + # Create the cycle mix1.set_input("image2", mix2.out(0)) return {