summaryrefslogtreecommitdiff
path: root/kernel/bpf
diff options
context:
space:
mode:
authorAlan Maguire <alan.maguire@oracle.com>2025-02-05 17:00:59 +0000
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2025-02-27 04:34:16 -0800
commit787d556a3de447e70964a4bdeba9196f62a62b1e (patch)
tree3b0f2c7ff2753f7be78ae6dcc126ab1c49a754ed /kernel/bpf
parent4dba79c1e7aad6620bbb707b6c4459380fd90860 (diff)
downloadlinux-787d556a3de447e70964a4bdeba9196f62a62b1e.tar.gz
linux-787d556a3de447e70964a4bdeba9196f62a62b1e.tar.bz2
linux-787d556a3de447e70964a4bdeba9196f62a62b1e.zip
bpf: Fix softlockup in arena_map_free on 64k page kernel
[ Upstream commit 517e8a7835e8cfb398a0aeb0133de50e31cae32b ] On an aarch64 kernel with CONFIG_PAGE_SIZE_64KB=y, arena_htab tests cause a segmentation fault and soft lockup. The same failure is not observed with 4k pages on aarch64. It turns out arena_map_free() is calling apply_to_existing_page_range() with the address returned by bpf_arena_get_kern_vm_start(). If this address is not page-aligned the code ends up calling apply_to_pte_range() with that unaligned address causing soft lockup. Fix it by round up GUARD_SZ to PAGE_SIZE << 1 so that the division by 2 in bpf_arena_get_kern_vm_start() returns a page-aligned value. Fixes: 317460317a02 ("bpf: Introduce bpf_arena.") Reported-by: Colm Harrington <colm.harrington@oracle.com> Suggested-by: Alexei Starovoitov <ast@kernel.org> Signed-off-by: Alan Maguire <alan.maguire@oracle.com> Link: https://lore.kernel.org/r/20250205170059.427458-1-alan.maguire@oracle.com Signed-off-by: Alexei Starovoitov <ast@kernel.org> Signed-off-by: Sasha Levin <sashal@kernel.org>
Diffstat (limited to 'kernel/bpf')
-rw-r--r--kernel/bpf/arena.c2
1 files changed, 1 insertions, 1 deletions
diff --git a/kernel/bpf/arena.c b/kernel/bpf/arena.c
index 8caf56a308d9..eac5d1edefe9 100644
--- a/kernel/bpf/arena.c
+++ b/kernel/bpf/arena.c
@@ -39,7 +39,7 @@
*/
/* number of bytes addressable by LDX/STX insn with 16-bit 'off' field */
-#define GUARD_SZ (1ull << sizeof_field(struct bpf_insn, off) * 8)
+#define GUARD_SZ round_up(1ull << sizeof_field(struct bpf_insn, off) * 8, PAGE_SIZE << 1)
#define KERN_VM_SZ (SZ_4G + GUARD_SZ)
struct bpf_arena {