> From: Konstantin Ananyev [mailto:konstantin.anan...@intel.com]
> Sent: Wednesday, May 27, 2020 4:17 PM
> 
> Make x86 JIT to generate native code for
> (BPF_ABS | <size> | BPF_LD) and (BPF_IND | <size> | BPF_LD)
> instructions.
> 
> Signed-off-by: Konstantin Ananyev <konstantin.anan...@intel.com>
> ---

[...]

> +/*
> + * helper function, used by emit_ld_mbuf().
> + * generates code for 'fast_path':
> + * calculate load offset and check is it inside first packet segment.
> + */
> +static void
> +emit_ldmb_fast_path(struct bpf_jit_state *st, const uint32_t
> rg[EBPF_REG_7],
> +     uint32_t sreg, uint32_t mode, uint32_t sz, uint32_t imm,
> +     const int32_t ofs[LDMB_OFS_NUM])
> +{
> +     /* make R2 contain *off* value */
> +
> +     if (sreg != rg[EBPF_REG_2]) {
> +             emit_mov_imm(st, EBPF_ALU64 | EBPF_MOV | BPF_K,
> +                     rg[EBPF_REG_2], imm);
> +             if (mode == BPF_IND)
> +                     emit_alu_reg(st, EBPF_ALU64 | BPF_ADD | BPF_X,
> +                             sreg, rg[EBPF_REG_2]);
> +     } else
> +             /* BPF_IND with sreg == R2 */
> +             emit_alu_imm(st, EBPF_ALU64 | BPF_ADD | BPF_K,
> +                     rg[EBPF_REG_2], imm);
> +
> +     /* R3 = mbuf->data_len */
> +     emit_ld_reg(st, BPF_LDX | BPF_MEM | BPF_H,
> +             rg[EBPF_REG_6], rg[EBPF_REG_3],
> +             offsetof(struct rte_mbuf, data_len));
> +
> +     /* R3 = R3 - R2 */
> +     emit_alu_reg(st, EBPF_ALU64 | BPF_SUB | BPF_X,
> +             rg[EBPF_REG_2], rg[EBPF_REG_3]);
> +
> +     /* JSLT R3, <sz> <slow_path> */
> +     emit_cmp_imm(st, EBPF_ALU64, rg[EBPF_REG_3], sz);
> +     emit_abs_jcc(st, BPF_JMP | EBPF_JSLT | BPF_K, ofs[LDMB_SLP_OFS]);
> +

[...]

> +
> +/*
> + * emit code for BPF_ABS/BPF_IND load.
> + * generates the following construction:
> + * fast_path:
> + *   off = ins->sreg + ins->imm
> + *   if (mbuf->data_len - off < ins->opsz)
> + *      goto slow_path;
> + *   ptr = mbuf->buf_addr + mbuf->data_off + off;
> + *   goto fin_part;
> + * slow_path:
> + *   typeof(ins->opsz) buf; //allocate space on the stack
> + *   ptr = __rte_pktmbuf_read(mbuf, off, ins->opsz, &buf);
> + *   if (ptr == NULL)
> + *      goto exit_label;
> + * fin_part:
> + *   res = *(typeof(ins->opsz))ptr;
> + *   res = bswap(res);
> + */

[...] 

The comparison for jumping to the slow path looks correct now.

I haven't reviewed it all in depth, but it certainly deserves an:

Acked-by: Morten Brørup <m...@smartsharesystems.com>

Reply via email to