may_goto uses an additional 8 bytes on the stack, which causes the
interpreters[] array to go out of bounds when calculating index by
stack_size.

1. If a BPF program is rewritten, re-evaluate the stack size. For non-JIT
cases, reject loading directly.

2. For non-JIT cases, calculating interpreters[idx] may still cause
out-of-bounds array access, and just warn about it.

3. For jit_requested cases, the execution of bpf_func also needs to be
warned. So move the definition of function __bpf_prog_ret0_warn out of
the macro definition CONFIG_BPF_JIT_ALWAYS_ON.

Reported-by: syzbot+d2a2c639d03ac200a...@syzkaller.appspotmail.com
Closes: https://lore.kernel.org/bpf/0000000000000f823606139fa...@google.com/
Fixes: 011832b97b311 ("bpf: Introduce may_goto instruction")
Signed-off-by: Jiayuan Chen <mr...@163.com>
---
 kernel/bpf/core.c     | 19 +++++++++++++++----
 kernel/bpf/verifier.c |  7 +++++++
 2 files changed, 22 insertions(+), 4 deletions(-)

diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c
index da729cbbaeb9..a0200fbbace9 100644
--- a/kernel/bpf/core.c
+++ b/kernel/bpf/core.c
@@ -2290,17 +2290,18 @@ void bpf_patch_call_args(struct bpf_insn *insn, u32 
stack_depth)
        insn->code = BPF_JMP | BPF_CALL_ARGS;
 }
 #endif
-#else
+#endif
+
 static unsigned int __bpf_prog_ret0_warn(const void *ctx,
                                         const struct bpf_insn *insn)
 {
        /* If this handler ever gets executed, then BPF_JIT_ALWAYS_ON
-        * is not working properly, so warn about it!
+        * is not working properly, or interpreter is being used when
+        * prog->jit_requested is not 0, so warn about it!
         */
        WARN_ON_ONCE(1);
        return 0;
 }
-#endif
 
 bool bpf_prog_map_compatible(struct bpf_map *map,
                             const struct bpf_prog *fp)
@@ -2380,8 +2381,18 @@ static void bpf_prog_select_func(struct bpf_prog *fp)
 {
 #ifndef CONFIG_BPF_JIT_ALWAYS_ON
        u32 stack_depth = max_t(u32, fp->aux->stack_depth, 1);
+       u32 idx = (round_up(stack_depth, 32) / 32) - 1;
 
-       fp->bpf_func = interpreters[(round_up(stack_depth, 32) / 32) - 1];
+       /* may_goto may cause stack size > 512, leading to idx out-of-bounds.
+        * But for non-JITed programs, we don't need bpf_func, so no bounds
+        * check needed.
+        */
+       if (!fp->jit_requested &&
+           !WARN_ON_ONCE(idx >= ARRAY_SIZE(interpreters))) {
+               fp->bpf_func = interpreters[idx];
+       } else {
+               fp->bpf_func = __bpf_prog_ret0_warn;
+       }
 #else
        fp->bpf_func = __bpf_prog_ret0_warn;
 #endif
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index 9971c03adfd5..fcd302904ba0 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -21882,6 +21882,13 @@ static int do_misc_fixups(struct bpf_verifier_env *env)
                if (subprogs[cur_subprog + 1].start == i + delta + 1) {
                        subprogs[cur_subprog].stack_depth += stack_depth_extra;
                        subprogs[cur_subprog].stack_extra = stack_depth_extra;
+
+                       stack_depth = subprogs[cur_subprog].stack_depth;
+                       if (stack_depth > MAX_BPF_STACK && 
!prog->jit_requested) {
+                               verbose(env, "stack size %d(extra %d) is too 
large\n",
+                                       stack_depth, stack_depth_extra);
+                               return -EINVAL;
+                       }
                        cur_subprog++;
                        stack_depth = subprogs[cur_subprog].stack_depth;
                        stack_depth_extra = 0;
-- 
2.47.1


Reply via email to