Tue, Jul 24, 2018 at 10:06:43PM CEST, pab...@redhat.com wrote: >When mirred is invoked from the ingress path, and it wants to redirect >the processed packet, it can now use the TC_ACT_REINJECT action, >filling the tcf_result accordingly, and avoiding a per packet >skb_clone(). > >Overall this gives a ~10% improvement in forwarding performance for the >TC S/W data path and TC S/W performances are now comparable to the >kernel openvswitch datapath. > >v1 -> v2: use ACT_MIRRED instead of ACT_REDIRECT >v2 -> v3: updated after action rename, fixed a typo into the commit > message > >Signed-off-by: Paolo Abeni <pab...@redhat.com> >--- > net/sched/act_mirred.c | 34 ++++++++++++++++++++++++---------- > 1 file changed, 24 insertions(+), 10 deletions(-) > >diff --git a/net/sched/act_mirred.c b/net/sched/act_mirred.c >index eeb335f03102..368187312136 100644 >--- a/net/sched/act_mirred.c >+++ b/net/sched/act_mirred.c >@@ -25,6 +25,7 @@ > #include <net/net_namespace.h> > #include <net/netlink.h> > #include <net/pkt_sched.h> >+#include <net/pkt_cls.h> > #include <linux/tc_act/tc_mirred.h> > #include <net/tc_act/tc_mirred.h> > >@@ -171,10 +172,12 @@ static int tcf_mirred(struct sk_buff *skb, const struct >tc_action *a, > struct tcf_result *res) > { > struct tcf_mirred *m = to_mirred(a); >+ struct sk_buff *skb2 = skb; > bool m_mac_header_xmit; > struct net_device *dev; >- struct sk_buff *skb2; > int retval, err = 0; >+ bool want_ingress; >+ bool is_redirect; > int m_eaction; > int mac_len; > >@@ -196,16 +199,19 @@ static int tcf_mirred(struct sk_buff *skb, const struct >tc_action *a, > goto out; > } > >- skb2 = skb_clone(skb, GFP_ATOMIC); >- if (!skb2) >- goto out; >+ is_redirect = tcf_mirred_is_act_redirect(m_eaction); >+ if (!skb_at_tc_ingress(skb) || !is_redirect) { >+ skb2 = skb_clone(skb, GFP_ATOMIC); >+ if (!skb2) >+ goto out; >+ } > > /* If action's target direction differs than filter's direction, > * and devices expect a mac header on xmit, then mac push/pull is > * needed. > */ >- if (skb_at_tc_ingress(skb) != tcf_mirred_act_wants_ingress(m_eaction) && >- m_mac_header_xmit) { >+ want_ingress = tcf_mirred_act_wants_ingress(m_eaction); >+ if (skb_at_tc_ingress(skb) != want_ingress && m_mac_header_xmit) { > if (!skb_at_tc_ingress(skb)) { > /* caught at egress, act ingress: pull mac */ > mac_len = skb_network_header(skb) - skb_mac_header(skb); >@@ -216,15 +222,23 @@ static int tcf_mirred(struct sk_buff *skb, const struct >tc_action *a, > } > } > >+ skb2->skb_iif = skb->dev->ifindex; >+ skb2->dev = dev; >+ > /* mirror is always swallowed */ >- if (tcf_mirred_is_act_redirect(m_eaction)) { >+ if (is_redirect) { > skb2->tc_redirected = 1; > skb2->tc_from_ingress = skb2->tc_at_ingress; >+ >+ /* let's the caller reinject the packet, if possible */ >+ if (skb_at_tc_ingress(skb)) {
I probably missed something. Why only on ingress? The patch looks good to me. >+ res->ingress = want_ingress; >+ res->qstats = this_cpu_ptr(m->common.cpu_qstats); >+ return TC_ACT_REINJECT; >+ } > } > >- skb2->skb_iif = skb->dev->ifindex; >- skb2->dev = dev; >- if (!tcf_mirred_act_wants_ingress(m_eaction)) >+ if (!want_ingress) > err = dev_queue_xmit(skb2); > else > err = netif_receive_skb(skb2); >-- >2.17.1 >