Skip to content

Commit c4d3760

Browse files
[pre-commit.ci] auto fixes from pre-commit.com hooks
for more information, see https://pre-commit.ci
1 parent 2a022ad commit c4d3760

File tree

2 files changed

+2
-6
lines changed

2 files changed

+2
-6
lines changed

litgpt/lora.py

Lines changed: 1 addition & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -238,9 +238,7 @@ def __init__(
238238
if r > 0 and any(enable_lora):
239239
self.lora_A = nn.Parameter(torch.empty((r * sum(enable_lora), in_features))) # (4, 128)
240240
# qkv_shapes will be used to split a tensor with weights correctly
241-
self.qkv_shapes = [
242-
s for s, e in zip(self._all_qkv_shapes, enable_lora) if e
243-
]
241+
self.qkv_shapes = [s for s, e in zip(self._all_qkv_shapes, enable_lora) if e]
244242
self.lora_B = nn.Parameter(torch.empty(sum(self.qkv_shapes), r)) # (256, 2))
245243
# Notes about shapes above
246244
# - self.lora_A has shape (4, 128): 4 because rank is 2 and LoRA is applied only to two matrices;

tests/test_lora.py

Lines changed: 1 addition & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -354,9 +354,7 @@ def test_lora_gpt_query_groups_merge_and_forward_no_exception(n_query_groups, ap
354354
)
355355
def test_lora_qkv_linear_compare_conv1d(head_size, n_head, enable_lora):
356356
C = 12
357-
layer = LoRAQKVLinear(
358-
C, head_size=head_size, n_head=n_head, n_query_groups=n_head, r=2, enable_lora=enable_lora
359-
)
357+
layer = LoRAQKVLinear(C, head_size=head_size, n_head=n_head, n_query_groups=n_head, r=2, enable_lora=enable_lora)
360358
x = torch.randn((1, 1, C))
361359
a = F.linear(x, layer.lora_A).transpose(-2, -1) # after_A
362360
b = layer.lora_B.data.unsqueeze(-1)

0 commit comments

Comments
 (0)