diff options
| author | Jiayuan Chen <mrpre@163.com> | 2025-02-14 17:18:21 +0800 |
|---|---|---|
| committer | Greg Kroah-Hartman <gregkh@linuxfoundation.org> | 2025-04-10 14:44:26 +0200 |
| commit | 1a86ae57b2600e5749f5f674e9d4296ac00c69a8 (patch) | |
| tree | 8daa4049dd668583c2cd2514649a4259feecde17 /kernel | |
| parent | 409160e7dbc5f7649de4c1e1b8990341d872c239 (diff) | |
| download | linux-1a86ae57b2600e5749f5f674e9d4296ac00c69a8.tar.gz linux-1a86ae57b2600e5749f5f674e9d4296ac00c69a8.tar.bz2 linux-1a86ae57b2600e5749f5f674e9d4296ac00c69a8.zip | |
bpf: Fix array bounds error with may_goto
[ Upstream commit 6ebc5030e0c5a698f1dd9a6684cddf6ccaed64a0 ]
may_goto uses an additional 8 bytes on the stack, which causes the
interpreters[] array to go out of bounds when calculating index by
stack_size.
1. If a BPF program is rewritten, re-evaluate the stack size. For non-JIT
cases, reject loading directly.
2. For non-JIT cases, calculating interpreters[idx] may still cause
out-of-bounds array access, and just warn about it.
3. For jit_requested cases, the execution of bpf_func also needs to be
warned. So move the definition of function __bpf_prog_ret0_warn out of
the macro definition CONFIG_BPF_JIT_ALWAYS_ON.
Reported-by: syzbot+d2a2c639d03ac200a4f1@syzkaller.appspotmail.com
Closes: https://lore.kernel.org/bpf/0000000000000f823606139faa5d@google.com/
Fixes: 011832b97b311 ("bpf: Introduce may_goto instruction")
Signed-off-by: Jiayuan Chen <mrpre@163.com>
Link: https://lore.kernel.org/r/20250214091823.46042-2-mrpre@163.com
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
Signed-off-by: Sasha Levin <sashal@kernel.org>
Diffstat (limited to 'kernel')
| -rw-r--r-- | kernel/bpf/core.c | 19 | ||||
| -rw-r--r-- | kernel/bpf/verifier.c | 7 |
2 files changed, 22 insertions, 4 deletions
diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c index da729cbbaeb9..a0200fbbace9 100644 --- a/kernel/bpf/core.c +++ b/kernel/bpf/core.c @@ -2290,17 +2290,18 @@ void bpf_patch_call_args(struct bpf_insn *insn, u32 stack_depth) insn->code = BPF_JMP | BPF_CALL_ARGS; } #endif -#else +#endif + static unsigned int __bpf_prog_ret0_warn(const void *ctx, const struct bpf_insn *insn) { /* If this handler ever gets executed, then BPF_JIT_ALWAYS_ON - * is not working properly, so warn about it! + * is not working properly, or interpreter is being used when + * prog->jit_requested is not 0, so warn about it! */ WARN_ON_ONCE(1); return 0; } -#endif bool bpf_prog_map_compatible(struct bpf_map *map, const struct bpf_prog *fp) @@ -2380,8 +2381,18 @@ static void bpf_prog_select_func(struct bpf_prog *fp) { #ifndef CONFIG_BPF_JIT_ALWAYS_ON u32 stack_depth = max_t(u32, fp->aux->stack_depth, 1); + u32 idx = (round_up(stack_depth, 32) / 32) - 1; - fp->bpf_func = interpreters[(round_up(stack_depth, 32) / 32) - 1]; + /* may_goto may cause stack size > 512, leading to idx out-of-bounds. + * But for non-JITed programs, we don't need bpf_func, so no bounds + * check needed. + */ + if (!fp->jit_requested && + !WARN_ON_ONCE(idx >= ARRAY_SIZE(interpreters))) { + fp->bpf_func = interpreters[idx]; + } else { + fp->bpf_func = __bpf_prog_ret0_warn; + } #else fp->bpf_func = __bpf_prog_ret0_warn; #endif diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index 60611df77957..c6f3b5f4ff2b 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -21897,6 +21897,13 @@ next_insn: if (subprogs[cur_subprog + 1].start == i + delta + 1) { subprogs[cur_subprog].stack_depth += stack_depth_extra; subprogs[cur_subprog].stack_extra = stack_depth_extra; + + stack_depth = subprogs[cur_subprog].stack_depth; + if (stack_depth > MAX_BPF_STACK && !prog->jit_requested) { + verbose(env, "stack size %d(extra %d) is too large\n", + stack_depth, stack_depth_extra); + return -EINVAL; + } cur_subprog++; stack_depth = subprogs[cur_subprog].stack_depth; stack_depth_extra = 0; |
