On 26/03/25 8:04 pm, Hari Bathini wrote:
The JIT compile of ldimm instructions can be anywhere between 1-5
instructions long depending on the value being loaded.

arch_bpf_trampoline_size() provides JIT size of the BPF trampoline
before the buffer for JIT'ing it is allocated. BPF trampoline JIT
code has ldimm instructions that need to load the value of pointer
to struct bpf_tramp_image. But this pointer value is not same while
calling arch_bpf_trampoline_size() & arch_prepare_bpf_trampoline().
So, the size arrived at using arch_bpf_trampoline_size() can vary
from the size needed in arch_prepare_bpf_trampoline(). When the
number of ldimm instructions emitted in arch_bpf_trampoline_size()
is less than the number of ldimm instructions emitted during the
actual JIT compile of trampoline, the below warning is produced:

   WARNING: CPU: 8 PID: 204190 at arch/powerpc/net/bpf_jit_comp.c:981 
__arch_prepare_bpf_trampoline.isra.0+0xd2c/0xdcc

which is:

   /* Make sure the trampoline generation logic doesn't overflow */
   if (image && WARN_ON_ONCE(&image[ctx->idx] >
                        (u32 *)rw_image_end - BPF_INSN_SAFETY)) {

Pass NULL as the first argument to __arch_prepare_bpf_trampoline()
call from arch_bpf_trampoline_size() function, to differentiate it
from how arch_prepare_bpf_trampoline() calls it and ensure maximum
possible instructions are emitted in arch_bpf_trampoline_size() for
ldimm instructions that load a different value during the actual JIT
compile of BPF trampoline.

Fixes: d243b62b7bd3 ("powerpc64/bpf: Add support for bpf trampolines")
Reported-by: Venkat Rao Bagalkote <venka...@linux.ibm.com>
Closes: 
https://lore.kernel.org/all/6168bfc8-659f-4b5a-a6fb-90a916dde...@linux.ibm.com/
Cc: sta...@vger.kernel.org # v6.13+
Signed-off-by: Hari Bathini <hbath...@linux.ibm.com>
---

* Removed a redundant '/' accidently added in a comment and resending.

  arch/powerpc/net/bpf_jit_comp.c | 29 +++++++++++++++++++++++------
  1 file changed, 23 insertions(+), 6 deletions(-)

diff --git a/arch/powerpc/net/bpf_jit_comp.c b/arch/powerpc/net/bpf_jit_comp.c
index 2991bb171a9b..c94717ccb2bd 100644
--- a/arch/powerpc/net/bpf_jit_comp.c
+++ b/arch/powerpc/net/bpf_jit_comp.c
@@ -833,7 +833,12 @@ static int __arch_prepare_bpf_trampoline(struct 
bpf_tramp_image *im, void *rw_im
        EMIT(PPC_RAW_STL(_R26, _R1, nvr_off + SZL));
if (flags & BPF_TRAMP_F_CALL_ORIG) {
-               PPC_LI_ADDR(_R3, (unsigned long)im);
+               /*
+                * Emit maximum possible instructions while getting the size of
+                * bpf trampoline to ensure trampoline JIT code doesn't 
overflow.
+                */
+               PPC_LI_ADDR(_R3, im ? (unsigned long)im :
+                               (unsigned long)(~(1UL << (BITS_PER_LONG - 1))));
                ret = bpf_jit_emit_func_call_rel(image, ro_image, ctx,
                                                 (unsigned 
long)__bpf_tramp_enter);
                if (ret)
@@ -889,7 +894,8 @@ static int __arch_prepare_bpf_trampoline(struct 
bpf_tramp_image *im, void *rw_im
                        bpf_trampoline_restore_tail_call_cnt(image, ctx, 
func_frame_offset, r4_off);
/* Reserve space to patch branch instruction to skip fexit progs */
-               im->ip_after_call = &((u32 *)ro_image)[ctx->idx];
+               if (im)
+                       im->ip_after_call = &((u32 *)ro_image)[ctx->idx];
                EMIT(PPC_RAW_NOP());
        }
@@ -912,8 +918,14 @@ static int __arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *rw_im
                }
if (flags & BPF_TRAMP_F_CALL_ORIG) {
-               im->ip_epilogue = &((u32 *)ro_image)[ctx->idx];
-               PPC_LI_ADDR(_R3, im);
+               if (im)
+                       im->ip_epilogue = &((u32 *)ro_image)[ctx->idx];
+               /*
+                * Emit maximum possible instructions while getting the size of
+                * bpf trampoline to ensure trampoline JIT code doesn't 
overflow.
+                */
+               PPC_LI_ADDR(_R3, im ? (unsigned long)im :
+                               (unsigned long)(~(1UL << (BITS_PER_LONG - 1))));
                ret = bpf_jit_emit_func_call_rel(image, ro_image, ctx,
                                                 (unsigned 
long)__bpf_tramp_exit);
                if (ret)
@@ -972,7 +984,6 @@ static int __arch_prepare_bpf_trampoline(struct 
bpf_tramp_image *im, void *rw_im
  int arch_bpf_trampoline_size(const struct btf_func_model *m, u32 flags,
                             struct bpf_tramp_links *tlinks, void *func_addr)
  {
-       struct bpf_tramp_image im;
        void *image;
        int ret;
@@ -988,7 +999,13 @@ int arch_bpf_trampoline_size(const struct btf_func_model *m, u32 flags,
        if (!image)
                return -ENOMEM;
- ret = __arch_prepare_bpf_trampoline(&im, image, image + PAGE_SIZE, image,
+       /*
+        * Pass NULL as bpf_tramp_image pointer to differentiate the intent to 
get the
+        * buffer size for trampoline here. This differentiation helps in 
accounting for
+        * maximum possible instructions if the JIT code size is likely to vary 
during
+        * the actual JIT compile of the trampoline.
+        */
+       ret = __arch_prepare_bpf_trampoline(NULL, image, image + PAGE_SIZE, 
image,
                                            m, flags, tlinks, func_addr);
        bpf_jit_free_exec(image);


Tested this patch by applying on main line kernel, and ran the tests 5 times, and issue is not seen. Hence the reported issue is fixed.

HeadCommit on which this patch was applied: 1e26c5e28ca5821a824e90dd359556f5e9e7b89f.

Please add below tag.

Tested-by: Venkat Rao Bagalkote <venka...@linux.ibm.com>

Regards,

Venkat.


Reply via email to