This patch add flow based mirroring support to the mirror lib.
The flow based mirroring is traffic mirroring with flow rules.
Applications may either use a customized callback to apply their
own flow rules, or use MAC matching rules implemented by mirror
lib.

Signed-off-by: Liang-min Wang <liang-min.w...@intel.com>
Signed-off-by: Patrick Fu <patrick...@intel.com>
Signed-off-by: Timothy Miskell <timothy.misk...@intel.com>
---
 lib/librte_mirror/rte_mirror.c | 107 +++++++++++++++++++++++++++++++++
 1 file changed, 107 insertions(+)

diff --git a/lib/librte_mirror/rte_mirror.c b/lib/librte_mirror/rte_mirror.c
index d2c0d8eab..523ab37ff 100644
--- a/lib/librte_mirror/rte_mirror.c
+++ b/lib/librte_mirror/rte_mirror.c
@@ -196,6 +196,101 @@ mirror_pkt_update(struct rte_mbuf *pkt, uint16_t 
dst_vlan_id)
        rte_mbuf_refcnt_update(pkt, 1);
 }
 
+static inline uint16_t
+mirror_flow_cb(uint16_t qidx, struct rte_mbuf **pkts, uint16_t nb_pkts,
+               void *user_params, bool is_custom, uint8_t mac_offset)
+{
+       struct rte_mirror_param *data = user_params;
+       uint16_t i, dst_qidx, match_count = 0;
+       uint16_t pkt_trans;
+       uint16_t dst_port_id = data->dst_port_id;
+       uint16_t dst_vlan_id = data->dst_vlan_id;
+       uint64_t target_addr = *((uint64_t *)data->extra_data);
+       struct rte_mbuf **pkt_buf = &data->pkt_buf[qidx*data->max_burst_size];
+       uint64_t *mac_addr = 0;
+
+       if (nb_pkts == 0)
+               return 0;
+
+       if (nb_pkts > data->max_burst_size) {
+               MIRROR_LOG(ERR, "Per-flow batch size, %d, exceeds "
+                       "maximum limit, %d.\n", nb_pkts, data->max_burst_size);
+               return -EINVAL;
+       }
+
+       if (unlikely(is_custom)) {
+               for (i = 0; i < nb_pkts; i++) {
+                       if (data->custom_scan(pkts[i], user_params)) {
+                               pkt_buf[match_count] = pkts[i];
+                               mirror_pkt_update(pkt_buf[match_count],
+                                               dst_vlan_id);
+                               match_count++;
+                       }
+               }
+       } else {
+               for (i = 0; i < nb_pkts; i++) {
+                       mac_addr =
+                               rte_pktmbuf_mtod_offset(pkts[i],
+                                               uint64_t *, mac_offset);
+                       if (is_mac_addr_match(target_addr, (*mac_addr))) {
+                               pkt_buf[match_count] = pkts[i];
+                               mirror_pkt_update(pkt_buf[match_count],
+                                               dst_vlan_id);
+                               match_count++;
+                       }
+               }
+       }
+
+       dst_qidx = (data->n_dst_queue > qidx) ? qidx : (data->n_dst_queue - 1);
+
+       rte_spinlock_lock(&data->locks[dst_qidx]);
+       pkt_trans = rte_eth_tx_burst(dst_port_id, dst_qidx,
+                       pkt_buf, match_count);
+       rte_spinlock_unlock(&data->locks[dst_qidx]);
+
+       for (i = 0; i < match_count; i++)
+               pkt_buf[i]->ol_flags &= ~VLAN_INSERT_FLAG;
+
+       while (unlikely(pkt_trans < match_count)) {
+               rte_pktmbuf_free(pkt_buf[pkt_trans]);
+               pkt_trans++;
+       }
+
+       return nb_pkts;
+}
+
+static uint16_t
+mirror_rx_flow_custom_cb(uint16_t port_id __rte_unused, uint16_t qidx,
+       struct rte_mbuf **pkts, uint16_t nb_pkts,
+       uint16_t maxi_pkts __rte_unused, void *user_params)
+{
+       return mirror_flow_cb(qidx, pkts, nb_pkts, user_params, true, 0);
+}
+
+static uint16_t
+mirror_tx_flow_custom_cb(uint16_t port_id __rte_unused, uint16_t qidx,
+       struct rte_mbuf **pkts, uint16_t nb_pkts, void *user_params)
+{
+       return mirror_flow_cb(qidx, pkts, nb_pkts, user_params, true, 0);
+}
+
+static uint16_t
+mirror_rx_flow_mac_cb(uint16_t port_id __rte_unused, uint16_t qidx,
+       struct rte_mbuf **pkts, uint16_t nb_pkts,
+       uint16_t maxi_pkts __rte_unused, void *user_params)
+{
+       return mirror_flow_cb(qidx, pkts, nb_pkts,
+                       user_params, false, DST_MAC_OFFSET);
+}
+
+static uint16_t
+mirror_tx_flow_mac_cb(uint16_t port_id __rte_unused, uint16_t qidx,
+       struct rte_mbuf **pkts, uint16_t nb_pkts, void *user_params)
+{
+       return mirror_flow_cb(qidx, pkts, nb_pkts,
+                       user_params, false, SRC_MAC_OFFSET);
+}
+
 static inline uint16_t
 mirror_port_cb(uint16_t qidx, struct rte_mbuf **pkts,
        uint16_t nb_pkts, void *user_params)
@@ -278,6 +373,18 @@ rte_mirror_offload_register(uint16_t src_port,
                else
                        rx_fn = mirror_rx_port_cb;
                break;
+       case rte_mirror_type_flow_mac:
+               if (tx_cb)
+                       tx_fn = mirror_tx_flow_mac_cb;
+               else
+                       rx_fn = mirror_rx_flow_mac_cb;
+               break;
+       case rte_mirror_type_flow_custom:
+               if (tx_cb)
+                       tx_fn = mirror_tx_flow_custom_cb;
+               else
+                       rx_fn = mirror_rx_flow_custom_cb;
+               break;
        default:
                MIRROR_LOG(ERR, "Un-supported mirror offloading type!!!\n");
                return -ENOTSUP;
-- 
2.18.4

Reply via email to