Do four packets macswap in same loop iterate to squeeze more CPU cycles. Signed-off-by: Qi Zhang <qi.z.zh...@intel.com> --- app/test-pmd/macswap_sse.h | 65 ++++++++++++++++++++++++++++++++++++++-------- 1 file changed, 54 insertions(+), 11 deletions(-)
diff --git a/app/test-pmd/macswap_sse.h b/app/test-pmd/macswap_sse.h index d5b0f6a21..0649539c2 100644 --- a/app/test-pmd/macswap_sse.h +++ b/app/test-pmd/macswap_sse.h @@ -10,11 +10,12 @@ static inline void do_macswap(struct rte_mbuf *pkts[], uint16_t nb, struct rte_port *txp) { - struct ether_hdr *eth_hdr; - struct rte_mbuf *mb; + struct ether_hdr *eth_hdr[4]; + struct rte_mbuf *mb[4]; uint64_t ol_flags; int i; - __m128i addr; + int r; + __m128i addr0, addr1, addr2, addr3; __m128i shfl_msk = _mm_set_epi8(15, 14, 13, 12, 5, 4, 3, 2, 1, 0, 11, 10, @@ -22,19 +23,61 @@ do_macswap(struct rte_mbuf *pkts[], uint16_t nb, ol_flags = ol_flags_init(txp->dev_conf.txmode.offloads); - for (i = 0; i < nb; i++) { - if (likely(i < nb - 1)) + i = 0; + r = nb; + + while (r >= 4) { + mb[0] = pkts[i++]; + eth_hdr[0] = rte_pktmbuf_mtod(mb[0], struct ether_hdr *); + addr0 = _mm_loadu_si128((__m128i *)eth_hdr[0]); + + mb[1] = pkts[i++]; + eth_hdr[1] = rte_pktmbuf_mtod(mb[1], struct ether_hdr *); + addr1 = _mm_loadu_si128((__m128i *)eth_hdr[1]); + + + mb[2] = pkts[i++]; + eth_hdr[2] = rte_pktmbuf_mtod(mb[2], struct ether_hdr *); + addr2 = _mm_loadu_si128((__m128i *)eth_hdr[2]); + + mb[3] = pkts[i++]; + eth_hdr[3] = rte_pktmbuf_mtod(mb[3], struct ether_hdr *); + addr3 = _mm_loadu_si128((__m128i *)eth_hdr[3]); + + addr0 = _mm_shuffle_epi8(addr0, shfl_msk); + addr1 = _mm_shuffle_epi8(addr1, shfl_msk); + addr2 = _mm_shuffle_epi8(addr2, shfl_msk); + addr3 = _mm_shuffle_epi8(addr3, shfl_msk); + + _mm_storeu_si128((__m128i *)eth_hdr[0], addr0); + _mm_storeu_si128((__m128i *)eth_hdr[1], addr1); + _mm_storeu_si128((__m128i *)eth_hdr[2], addr2); + _mm_storeu_si128((__m128i *)eth_hdr[3], addr3); + + mbuf_field_set(mb[0], ol_flags, txp->tx_vlan_id, + txp->tx_vlan_id_outer); + mbuf_field_set(mb[1], ol_flags, txp->tx_vlan_id, + txp->tx_vlan_id_outer); + mbuf_field_set(mb[2], ol_flags, txp->tx_vlan_id, + txp->tx_vlan_id_outer); + mbuf_field_set(mb[3], ol_flags, txp->tx_vlan_id, + txp->tx_vlan_id_outer); + r -= 4; + } + + for ( ; i < nb; i++) { + if (i < nb - 1) rte_prefetch0(rte_pktmbuf_mtod(pkts[i+1], void *)); - mb = pkts[i]; + mb[0] = pkts[i]; - eth_hdr = rte_pktmbuf_mtod(mb, struct ether_hdr *); + eth_hdr[0] = rte_pktmbuf_mtod(mb[0], struct ether_hdr *); /* Swap dest and src mac addresses. */ - addr = _mm_loadu_si128((__m128i *)eth_hdr); - addr = _mm_shuffle_epi8(addr, shfl_msk); - _mm_storeu_si128((__m128i *)eth_hdr, addr); + addr0 = _mm_loadu_si128((__m128i *)eth_hdr); + addr0 = _mm_shuffle_epi8(addr0, shfl_msk); + _mm_storeu_si128((__m128i *)eth_hdr[0], addr0); - mbuf_field_set(mb, ol_flags, txp->tx_vlan_id, + mbuf_field_set(mb[0], ol_flags, txp->tx_vlan_id, txp->tx_vlan_id_outer); } } -- 2.13.6