From 3f8bd3ee283485219d635dfbe408a8d73b81669e Mon Sep 17 00:00:00 2001 From: Prarthana Bhattacharyya Date: Sun, 26 Oct 2025 13:25:50 +0000 Subject: [PATCH] Fix inplace masking for AKT to prevent data leakage Change masked_fill to masked_fill_ (in-place operation) on line 273 to prevent data leakage from future time steps. The non-in-place version creates a new tensor without modifying the original, causing the masking to have no effect when the result is not assigned back to the variable. Fixes #52 --- EduKTM/AKT/AKTNet.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/EduKTM/AKT/AKTNet.py b/EduKTM/AKT/AKTNet.py index 1155a48..1764b0a 100644 --- a/EduKTM/AKT/AKTNet.py +++ b/EduKTM/AKT/AKTNet.py @@ -270,7 +270,7 @@ def attention(q, k, v, d_k, mask, dropout, zero_pad, gamma=None): total_effect = torch.clamp(torch.clamp((dist_scores * gamma).exp(), min=1e-5), max=1e5) scores = scores * total_effect - scores.masked_fill(mask == 0, -1e23) + scores.masked_fill_(mask == 0, -1e23) scores = F.softmax(scores, dim=-1) if zero_pad: pad_zero = torch.zeros(bs, head, 1, seqlen).to(device)