Skip to content

Commit 5999fdc

Browse files
committed
Apply RoPE for SWA layers only.
1 parent a671e53 commit 5999fdc

File tree

1 file changed

+20
-7
lines changed

1 file changed

+20
-7
lines changed

src/llama.cpp

Lines changed: 20 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -14857,12 +14857,13 @@ struct llm_build_context {
1485714857
struct ggml_tensor * KQ_mask_swa = build_inp_KQ_mask_swa();
1485814858

1485914859
// sliding window switch pattern
14860-
const int32_t n_layer_switch = 4;
14860+
const int32_t sliding_window_pattern = 4;
1486114861

1486214862
for (int il = 0; il < n_layer; ++il) {
1486314863
// three layers sliding window attention (window size 4096) and ROPE
1486414864
// fourth layer uses global attention without positional embeddings
14865-
struct ggml_tensor * KQ_mask_l = (il % n_layer_switch < (n_layer_switch - 1)) ? KQ_mask_swa : KQ_mask;
14865+
const bool is_sliding = il % sliding_window_pattern < (sliding_window_pattern - 1);
14866+
struct ggml_tensor * KQ_mask_l = is_sliding ? KQ_mask_swa : KQ_mask;
1486614867

1486714868
// norm
1486814869
cur = llm_build_norm(ctx0, inpL, hparams, model.layers[il].attn_norm, NULL, LLM_NORM, cb, il);
@@ -14871,6 +14872,9 @@ struct llm_build_context {
1487114872

1487214873
// self-attention
1487314874
{
14875+
// rope freq factors for 128k context
14876+
struct ggml_tensor * rope_factors = build_rope_factors(il);
14877+
1487414878
// compute Q and K and RoPE them
1487514879
struct ggml_tensor * Qcur = llm_build_lora_mm(lctx, ctx0, model.layers[il].wq, cur);
1487614880
cb(Qcur, "Qcur", il);
@@ -14893,15 +14897,24 @@ struct llm_build_context {
1489314897
cb(Vcur, "Vcur", il);
1489414898
}
1489514899

14896-
Qcur = ggml_rope_ext(ctx0, ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens), inp_pos, nullptr,
14900+
if (is_sliding) {
14901+
Qcur = ggml_rope_ext(ctx0, ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens), inp_pos, rope_factors,
1489714902
n_rot, rope_type, n_ctx_orig, freq_base, freq_scale, ext_factor, attn_factor,
1489814903
beta_fast, beta_slow);
14899-
cb(Qcur, "Qcur", il);
14904+
cb(Qcur, "Qcur", il);
1490014905

14901-
Kcur = ggml_rope_ext(ctx0, ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens), inp_pos,
14902-
nullptr, n_rot, rope_type, n_ctx_orig, freq_base, freq_scale, ext_factor,
14906+
Kcur = ggml_rope_ext(ctx0, ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens), inp_pos,
14907+
rope_factors, n_rot, rope_type, n_ctx_orig, freq_base, freq_scale, ext_factor,
1490314908
attn_factor, beta_fast, beta_slow);
14904-
cb(Kcur, "Kcur", il);
14909+
cb(Kcur, "Kcur", il);
14910+
} else {
14911+
// For non-sliding layers, just reshape without applying RoPE
14912+
Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens);
14913+
cb(Qcur, "Qcur", il);
14914+
14915+
Kcur = ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens);
14916+
cb(Kcur, "Kcur", il);
14917+
}
1490514918

1490614919
cur = llm_build_kv(ctx0, lctx, kv_self, gf, model.layers[il].wo, model.layers[il].bo, Kcur, Vcur, Qcur,
1490714920
KQ_mask_l, n_tokens, kv_head, n_kv, 1.0f / sqrtf(float(n_embd_head)), cb, il);

0 commit comments

Comments
 (0)