Le 02/04/2024 à 12:58, Hari Bathini a écrit :
> Currently, bpf jit code on powerpc assumes all the bpf functions and
> helpers to be kernel text. This is false for kfunc case, as function
> addresses can be module addresses as well. So, ensure module addresses
> are supported to enable kfunc support.
> 
> Emit instructions based on whether the function address is kernel text
> address or module address to retain optimized instruction sequence for
> kernel text address case.
> 
> Also, as bpf programs are always module addresses and a bpf helper can
> be within kernel address as well, using relative addressing often fails
> with "out of range of pcrel address" error. Use unoptimized instruction
> sequence for both kernel and module addresses to work around this, when
> PCREL addressing is used.
> 
> With module addresses supported, override bpf_jit_supports_kfunc_call()
> to enable kfunc support. Since module address offsets can be more than
> 32-bit long on PPC64, override bpf_jit_supports_far_kfunc_call() to
> enable 64-bit pointers.
> 
> Signed-off-by: Hari Bathini <hbath...@linux.ibm.com>
> ---
> 
> * Changes in v3:
>    - Retained optimized instruction sequence when function address is
>      a core kernel address as suggested by Naveen.
>    - Used unoptimized instruction sequence for PCREL addressing to
>      avoid out of range errors for core kernel function addresses.
>    - Folded patch that adds support for kfunc calls with patch that
>      enables/advertises this support as suggested by Naveen.
> 
> 
>   arch/powerpc/net/bpf_jit_comp.c   | 10 +++++++
>   arch/powerpc/net/bpf_jit_comp64.c | 48 ++++++++++++++++++++-----------
>   2 files changed, 42 insertions(+), 16 deletions(-)
> 
> diff --git a/arch/powerpc/net/bpf_jit_comp.c b/arch/powerpc/net/bpf_jit_comp.c
> index 0f9a21783329..dc7ffafd7441 100644
> --- a/arch/powerpc/net/bpf_jit_comp.c
> +++ b/arch/powerpc/net/bpf_jit_comp.c
> @@ -359,3 +359,13 @@ void bpf_jit_free(struct bpf_prog *fp)
>   
>       bpf_prog_unlock_free(fp);
>   }
> +
> +bool bpf_jit_supports_kfunc_call(void)
> +{
> +     return true;
> +}
> +
> +bool bpf_jit_supports_far_kfunc_call(void)
> +{
> +     return IS_ENABLED(CONFIG_PPC64) ? true : false;

You don't need the true/false, the following is enough:

        return IS_ENABLED(CONFIG_PPC64);

> +}
> diff --git a/arch/powerpc/net/bpf_jit_comp64.c 
> b/arch/powerpc/net/bpf_jit_comp64.c
> index 7f62ac4b4e65..ec3adf715c55 100644
> --- a/arch/powerpc/net/bpf_jit_comp64.c
> +++ b/arch/powerpc/net/bpf_jit_comp64.c
> @@ -207,24 +207,14 @@ static int bpf_jit_emit_func_call_hlp(u32 *image, 
> struct codegen_context *ctx, u
>       unsigned long func_addr = func ? ppc_function_entry((void *)func) : 0;
>       long reladdr;
>   
> -     if (WARN_ON_ONCE(!core_kernel_text(func_addr)))
> +     /*
> +      * With the introduction of kfunc feature, BPF helpers can be part of 
> kernel as
> +      * well as module text address.
> +      */
> +     if (WARN_ON_ONCE(!kernel_text_address(func_addr)))
>               return -EINVAL;
>   
> -     if (IS_ENABLED(CONFIG_PPC_KERNEL_PCREL)) {
> -             reladdr = func_addr - CTX_NIA(ctx);
> -
> -             if (reladdr >= (long)SZ_8G || reladdr < -(long)SZ_8G) {
> -                     pr_err("eBPF: address of %ps out of range of pcrel 
> address.\n",
> -                             (void *)func);
> -                     return -ERANGE;
> -             }
> -             /* pla r12,addr */
> -             EMIT(PPC_PREFIX_MLS | __PPC_PRFX_R(1) | IMM_H18(reladdr));
> -             EMIT(PPC_INST_PADDI | ___PPC_RT(_R12) | IMM_L(reladdr));
> -             EMIT(PPC_RAW_MTCTR(_R12));
> -             EMIT(PPC_RAW_BCTR());
> -
> -     } else {
> +     if (core_kernel_text(func_addr) && 
> !IS_ENABLED(CONFIG_PPC_KERNEL_PCREL)) {
>               reladdr = func_addr - kernel_toc_addr();
>               if (reladdr > 0x7FFFFFFF || reladdr < -(0x80000000L)) {
>                       pr_err("eBPF: address of %ps out of range of 
> kernel_toc.\n", (void *)func);
> @@ -235,6 +225,32 @@ static int bpf_jit_emit_func_call_hlp(u32 *image, struct 
> codegen_context *ctx, u
>               EMIT(PPC_RAW_ADDI(_R12, _R12, PPC_LO(reladdr)));
>               EMIT(PPC_RAW_MTCTR(_R12));
>               EMIT(PPC_RAW_BCTRL());
> +     } else {
> +             if (IS_ENABLED(CONFIG_PPC64_ELF_ABI_V1)) {
> +                     /* func points to the function descriptor */
> +                     PPC_LI64(bpf_to_ppc(TMP_REG_2), func);
> +                     /* Load actual entry point from function descriptor */
> +                     EMIT(PPC_RAW_LD(bpf_to_ppc(TMP_REG_1), 
> bpf_to_ppc(TMP_REG_2), 0));
> +                     /* ... and move it to CTR */
> +                     EMIT(PPC_RAW_MTCTR(bpf_to_ppc(TMP_REG_1)));
> +                     /*
> +                      * Load TOC from function descriptor at offset 8.
> +                      * We can clobber r2 since we get called through a
> +                      * function pointer (so caller will save/restore r2)
> +                      * and since we don't use a TOC ourself.
> +                      */
> +                     EMIT(PPC_RAW_LD(2, bpf_to_ppc(TMP_REG_2), 8));
> +                     EMIT(PPC_RAW_BCTRL());
> +             } else {
> +                     /* We can clobber r12 */
> +                     PPC_LI64(12, func);
> +                     EMIT(PPC_RAW_MTCTR(12));
> +                     EMIT(PPC_RAW_BCTRL());
> +#ifndef CONFIG_PPC_KERNEL_PCREL

Why not use IS_ENABLED(CONFIG_PPC_KERNEL_PCREL) ?

> +                     /* Restore kernel TOC */
> +                     EMIT(PPC_RAW_LD(2, 13, offsetof(struct paca_struct, 
> kernel_toc)));
> +#endif
> +             }
>       }
>   
>       return 0;

Reply via email to