After previous patches, verifier has marked those instructions that really
need zero extension on dst_reg.

It is then for all back-ends to decide how to use such information to
eliminate unnecessary zero extension code-gen during JIT compilation.

One approach is:
  1. Verifier insert explicit zero extension for those instructions that
     need zero extension.
  2. All JIT back-ends do NOT generate zero extension for sub-register
     write any more.

The good thing for this approach is no major change on JIT back-end
interface, all back-ends could get this optimization.

However, only those back-ends that do not have hardware zero extension
want this optimization. For back-ends like x86_64 and AArch64, there is
hardware support, so zext insertion should be disabled.

This patch introduces new target hook "bpf_jit_hardware_zext" which is
default true, meaning the underlying hardware will do zero extension
implicitly, therefore zext insertion by verifier will be disabled. Once a
back-end overrides this hook to false, then verifier will insert BPF_ZEXT
to clear high 32-bit of definitions when necessary.

Offload targets do not use this native target hook, instead, they could
get the optimization results using bpf_prog_offload_ops.finalize.

Reviewed-by: Jakub Kicinski <jakub.kicin...@netronome.com>
Signed-off-by: Jiong Wang <jiong.w...@netronome.com>
---
 include/linux/bpf.h    |  1 +
 include/linux/filter.h |  1 +
 kernel/bpf/core.c      |  8 ++++++++
 kernel/bpf/verifier.c  | 40 ++++++++++++++++++++++++++++++++++++++++
 4 files changed, 50 insertions(+)

diff --git a/include/linux/bpf.h b/include/linux/bpf.h
index 11a5fb9..cf3c3f3 100644
--- a/include/linux/bpf.h
+++ b/include/linux/bpf.h
@@ -373,6 +373,7 @@ struct bpf_prog_aux {
        u32 id;
        u32 func_cnt; /* used by non-func prog as the number of func progs */
        u32 func_idx; /* 0 for non-func prog, the index in func array for func 
prog */
+       bool verifier_zext; /* Zero extensions has been inserted by verifier. */
        bool offload_requested;
        struct bpf_prog **func;
        void *jit_data; /* JIT specific data. arch dependent */
diff --git a/include/linux/filter.h b/include/linux/filter.h
index fb0edad..8750657 100644
--- a/include/linux/filter.h
+++ b/include/linux/filter.h
@@ -821,6 +821,7 @@ u64 __bpf_call_base(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
 
 struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog);
 void bpf_jit_compile(struct bpf_prog *prog);
+bool bpf_jit_hardware_zext(void);
 bool bpf_helper_changes_pkt_data(void *func);
 
 static inline bool bpf_dump_raw_ok(void)
diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c
index ee8703d..9754346 100644
--- a/kernel/bpf/core.c
+++ b/kernel/bpf/core.c
@@ -2095,6 +2095,14 @@ bool __weak bpf_helper_changes_pkt_data(void *func)
        return false;
 }
 
+/* Return TRUE is the target hardware of JIT will do zero extension to high 
bits
+ * when writing to low 32-bit of one register. Otherwise, return FALSE.
+ */
+bool __weak bpf_jit_hardware_zext(void)
+{
+       return true;
+}
+
 /* To execute LD_ABS/LD_IND instructions __bpf_prog_run() may call
  * skb_copy_bits(), so provide a weak definition of it for NET-less config.
  */
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index b75913c..66aaaa0 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -7648,6 +7648,37 @@ static int opt_remove_nops(struct bpf_verifier_env *env)
        return 0;
 }
 
+static int opt_subreg_zext_lo32(struct bpf_verifier_env *env)
+{
+       struct bpf_insn_aux_data *aux = env->insn_aux_data;
+       struct bpf_insn *insns = env->prog->insnsi;
+       int i, delta = 0, len = env->prog->len;
+       struct bpf_insn zext_patch[2];
+       struct bpf_prog *new_prog;
+
+       zext_patch[1] = BPF_ALU32_IMM(BPF_ZEXT, 0, 0);
+       for (i = 0; i < len; i++) {
+               int adj_idx = i + delta;
+               struct bpf_insn insn;
+
+               if (!aux[adj_idx].zext_dst)
+                       continue;
+
+               insn = insns[adj_idx];
+               zext_patch[0] = insn;
+               zext_patch[1].dst_reg = insn.dst_reg;
+               new_prog = bpf_patch_insn_data(env, adj_idx, zext_patch, 2);
+               if (!new_prog)
+                       return -ENOMEM;
+               env->prog = new_prog;
+               insns = new_prog->insnsi;
+               aux = env->insn_aux_data;
+               delta += 2;
+       }
+
+       return 0;
+}
+
 /* convert load instructions that access fields of a context type into a
  * sequence of instructions that access fields of the underlying structure:
  *     struct __sk_buff    -> struct sk_buff
@@ -8499,6 +8530,15 @@ int bpf_check(struct bpf_prog **prog, union bpf_attr 
*attr,
        if (ret == 0)
                ret = fixup_bpf_calls(env);
 
+       /* do 32-bit optimization after insn patching has done so those patched
+        * insns could be handled correctly.
+        */
+       if (ret == 0 && !bpf_jit_hardware_zext() &&
+           !bpf_prog_is_dev_bound(env->prog->aux)) {
+               ret = opt_subreg_zext_lo32(env);
+               env->prog->aux->verifier_zext = !ret;
+       }
+
        if (ret == 0)
                ret = fixup_call_args(env);
 
-- 
2.7.4

Reply via email to