Notice the compiler generated ASM code layout was suboptimal. It assumed map enqueue errors as the likely case, which is shouldn't. It assumed that xdp_do_flush_map() was a likely case, due to maps changing between packets, which should be very unlikely.
Signed-off-by: Jesper Dangaard Brouer <bro...@redhat.com> --- net/core/filter.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/net/core/filter.c b/net/core/filter.c index c25eb36f1320..520f5e9e0b73 100644 --- a/net/core/filter.c +++ b/net/core/filter.c @@ -3182,7 +3182,7 @@ static int __bpf_tx_xdp_map(struct net_device *dev_rx, void *fwd, struct bpf_dtab_netdev *dst = fwd; err = dev_map_enqueue(dst, xdp, dev_rx); - if (err) + if (unlikely(err)) return err; __dev_map_insert_ctx(map, index); break; @@ -3191,7 +3191,7 @@ static int __bpf_tx_xdp_map(struct net_device *dev_rx, void *fwd, struct bpf_cpu_map_entry *rcpu = fwd; err = cpu_map_enqueue(rcpu, xdp, dev_rx); - if (err) + if (unlikely(err)) return err; __cpu_map_insert_ctx(map, index); break; @@ -3279,7 +3279,7 @@ static int xdp_do_redirect_map(struct net_device *dev, struct xdp_buff *xdp, err = -EINVAL; goto err; } - if (ri->map_to_flush && ri->map_to_flush != map) + if (ri->map_to_flush && unlikely(ri->map_to_flush != map)) xdp_do_flush_map(); err = __bpf_tx_xdp_map(dev, fwd, map, xdp, index);