add non temporal store for few WQE fields to optimize
data path. Define RTE_LIBRTE_MLX5_NT_STORE in build
configurations to enable this optimization.

Signed-off-by: Aman Kumar <aman.ku...@vvdntech.in>
---
 drivers/net/mlx5/meson.build     |   1 +
 drivers/net/mlx5/mlx5.c          |  17 ++
 drivers/net/mlx5/mlx5.h          |   4 +
 drivers/net/mlx5/mlx5_rxq.c      |   3 +
 drivers/net/mlx5/mlx5_rxtx.c     | 322 ++++++++++++++++++++++++++++++-
 drivers/net/mlx5/mlx5_rxtx.h     |   6 +
 drivers/net/mlx5/mlx5_rxtx_vec.h |  29 ++-
 drivers/net/mlx5/mlx5_txq.c      |   3 +
 meson_options.txt                |   2 +
 9 files changed, 378 insertions(+), 9 deletions(-)

diff --git a/drivers/net/mlx5/meson.build b/drivers/net/mlx5/meson.build
index 38e93fdc1..347ca6527 100644
--- a/drivers/net/mlx5/meson.build
+++ b/drivers/net/mlx5/meson.build
@@ -48,6 +48,7 @@ foreach option:cflags_options
        endif
 endforeach
 dpdk_conf.set('RTE_LIBRTE_MLX5_NTLOAD_TSTORE_ALIGN_COPY', 
get_option('mlx5_ntload_tstore'))
+dpdk_conf.set('RTE_LIBRTE_MLX5_NT_STORE', get_option('mlx5_ntstore'))
 if get_option('buildtype').contains('debug')
        cflags += [ '-pedantic', '-DPEDANTIC' ]
 else
diff --git a/drivers/net/mlx5/mlx5.c b/drivers/net/mlx5/mlx5.c
index a2796eaa5..01b25a109 100644
--- a/drivers/net/mlx5/mlx5.c
+++ b/drivers/net/mlx5/mlx5.c
@@ -164,6 +164,13 @@
 /* mprq_tstore_memcpy */
 #define MLX5_MPRQ_TSTORE_MEMCPY "mprq_tstore_memcpy"
 #endif
+#ifdef RTE_LIBRTE_MLX5_NT_STORE
+/* tx_wqe_field_ntstore */
+#define MLX5_TX_WQE_FIELD_NTSTORE "tx_wqe_field_ntstore"
+
+/* vec_rx_wqe_field_ntstore */
+#define MLX5_VEC_RX_WQE_FIELD_NTSTORE "vec_rx_wqe_field_ntstore"
+#endif
 
 /*
  * Device parameter to configure the total data buffer size for a single
@@ -1631,6 +1638,12 @@ mlx5_args_check(const char *key, const char *val, void 
*opaque)
 #ifdef RTE_LIBRTE_MLX5_NTLOAD_TSTORE_ALIGN_COPY
        } else if (strcmp(MLX5_MPRQ_TSTORE_MEMCPY, key) == 0) {
                config->mprq_tstore_memcpy = tmp;
+#endif
+#ifdef RTE_LIBRTE_MLX5_NT_STORE
+       } else if (strcmp(MLX5_TX_WQE_FIELD_NTSTORE, key) == 0) {
+               config->tx_wqe_field_ntstore = tmp;
+       } else if (strcmp(MLX5_VEC_RX_WQE_FIELD_NTSTORE, key) == 0) {
+               config->vec_rx_wqe_field_ntstore = tmp;
 #endif
        } else {
                DRV_LOG(WARNING, "%s: unknown parameter", key);
@@ -1694,6 +1707,10 @@ mlx5_args(struct mlx5_dev_config *config, struct 
rte_devargs *devargs)
                MLX5_DECAP_EN,
 #ifdef RTE_LIBRTE_MLX5_NTLOAD_TSTORE_ALIGN_COPY
                MLX5_MPRQ_TSTORE_MEMCPY,
+#endif
+#ifdef RTE_LIBRTE_MLX5_NT_STORE
+               MLX5_TX_WQE_FIELD_NTSTORE,
+               MLX5_VEC_RX_WQE_FIELD_NTSTORE,
 #endif
                NULL,
        };
diff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h
index 1eb305650..9d192465f 100644
--- a/drivers/net/mlx5/mlx5.h
+++ b/drivers/net/mlx5/mlx5.h
@@ -237,6 +237,10 @@ struct mlx5_dev_config {
 #ifdef RTE_LIBRTE_MLX5_NTLOAD_TSTORE_ALIGN_COPY
        unsigned int mprq_tstore_memcpy:1;
 #endif
+#ifdef RTE_LIBRTE_MLX5_NT_STORE
+       unsigned int tx_wqe_field_ntstore:1;
+       unsigned int vec_rx_wqe_field_ntstore:1;
+#endif
 };
 
 
diff --git a/drivers/net/mlx5/mlx5_rxq.c b/drivers/net/mlx5/mlx5_rxq.c
index c8db59a12..69ad9ab8c 100644
--- a/drivers/net/mlx5/mlx5_rxq.c
+++ b/drivers/net/mlx5/mlx5_rxq.c
@@ -1382,6 +1382,9 @@ mlx5_rxq_new(struct rte_eth_dev *dev, uint16_t idx, 
uint16_t desc,
                tmpl->irq = 1;
 #ifdef RTE_LIBRTE_MLX5_NTLOAD_TSTORE_ALIGN_COPY
        tmpl->rxq.mprq_tstore_memcpy = config->mprq_tstore_memcpy;
+#endif
+#ifdef RTE_LIBRTE_MLX5_NT_STORE
+       tmpl->rxq.vec_rx_wqe_field_ntstore = config->vec_rx_wqe_field_ntstore;
 #endif
        mprq_stride_nums = config->mprq.stride_num_n ?
                config->mprq.stride_num_n : MLX5_MPRQ_STRIDE_NUM_N;
diff --git a/drivers/net/mlx5/mlx5_rxtx.c b/drivers/net/mlx5/mlx5_rxtx.c
index f59e30d82..76bf20b6f 100644
--- a/drivers/net/mlx5/mlx5_rxtx.c
+++ b/drivers/net/mlx5/mlx5_rxtx.c
@@ -214,6 +214,301 @@ static void *memcpy_aligned_rx_tstore_16B(void *dst, void 
*src, int len)
 }
 #endif
 
+#ifdef RTE_LIBRTE_MLX5_NT_STORE
+static void *amd_memcpy(void *dest, const void *src, size_t size)
+{
+       asm goto (
+       "movq   %0, %%rsi\n\t"
+       "movq   %1, %%rdi\n\t"
+       "movq   %2, %%rdx\n\t"
+       "movq    %%rdi, %%rax\n\t"
+       "cmp     $32, %%rdx\n\t"
+       "jb      less_vec\n\t"
+       "cmp     $(32 * 2), %%rdx\n\t"
+       "ja      more_2x_vec\n\t"
+       "vmovdqu   (%%rsi), %%ymm0\n\t"
+       "vmovdqu   -32(%%rsi,%%rdx), %%ymm1\n\t"
+       "vmovdqu   %%ymm0, (%%rdi)\n\t"
+       "vmovdqu   %%ymm1, -32(%%rdi,%%rdx)\n\t"
+       "vzeroupper\n\t"
+       "jmp %l[done]\n\t"
+       "less_vec:\n\t"
+       /* Less than 1 VEC.  */
+       "cmpb    $32, %%dl\n\t"
+       "jae     between_32_63\n\t"
+       "cmpb    $16, %%dl\n\t"
+       "jae     between_16_31\n\t"
+       "cmpb    $8, %%dl\n\t"
+       "jae     between_8_15\n\t"
+       "cmpb    $4, %%dl\n\t"
+       "jae     between_4_7\n\t"
+       "cmpb    $1, %%dl\n\t"
+       "ja      between_2_3\n\t"
+       "jb      1f\n\t"
+       "movzbl  (%%rsi), %%ecx\n\t"
+       "movb    %%cl, (%%rdi)\n\t"
+       "1:\n\t"
+       "jmp %l[done]\n\t"
+       "between_32_63:\n\t"
+       /* From 32 to 63.  No branch when size == 32.  */
+       "vmovdqu (%%rsi), %%ymm0\n\t"
+       "vmovdqu -32(%%rsi,%%rdx), %%ymm1\n\t"
+       "vmovdqu %%ymm0, (%%rdi)\n\t"
+       "vmovdqu %%ymm1, -32(%%rdi,%%rdx)\n\t"
+       "vzeroupper\n\t"
+       "jmp %l[done]\n\t"
+       /* From 16 to 31.  No branch when size == 16.  */
+       "between_16_31:\n\t"
+       "vmovdqu (%%rsi), %%xmm0\n\t"
+       "vmovdqu -16(%%rsi,%%rdx), %%xmm1\n\t"
+       "vmovdqu %%xmm0, (%%rdi)\n\t"
+       "vmovdqu %%xmm1, -16(%%rdi,%%rdx)\n\t"
+       "jmp %l[done]\n\t"
+       "between_8_15:\n\t"
+       /* From 8 to 15.  No branch when size == 8.  */
+       "movq    -8(%%rsi,%%rdx), %%rcx\n\t"
+       "movq    (%%rsi), %%rsi\n\t"
+       "movq    %%rcx, -8(%%rdi,%%rdx)\n\t"
+       "movq    %%rsi, (%%rdi)\n\t"
+       "jmp %l[done]\n\t"
+       "between_4_7:\n\t"
+       /* From 4 to 7.  No branch when size == 4.  */
+       "movl    -4(%%rsi,%%rdx), %%ecx\n\t"
+       "movl    (%%rsi), %%esi\n\t"
+       "movl    %%ecx, -4(%%rdi,%%rdx)\n\t"
+       "movl    %%esi, (%%rdi)\n\t"
+       "jmp %l[done]\n\t"
+       "between_2_3:\n\t"
+       /* From 2 to 3.  No branch when size == 2.  */
+       "movzwl  -2(%%rsi,%%rdx), %%ecx\n\t"
+       "movzwl  (%%rsi), %%esi\n\t"
+       "movw    %%cx, -2(%%rdi,%%rdx)\n\t"
+       "movw    %%si, (%%rdi)\n\t"
+       "jmp %l[done]\n\t"
+       "more_2x_vec:\n\t"
+       /* More than 2 * VEC and there may be overlap between destination */
+       /* and source.  */
+       "cmpq    $(32 * 8), %%rdx\n\t"
+       "ja      more_8x_vec\n\t"
+       "cmpq    $(32 * 4), %%rdx\n\t"
+       "jb      last_4x_vec\n\t"
+       /* Copy from 4 * VEC to 8 * VEC, inclusively. */
+       "vmovdqu   (%%rsi), %%ymm0\n\t"
+       "vmovdqu   32(%%rsi), %%ymm1\n\t"
+       "vmovdqu   (32 * 2)(%%rsi), %%ymm2\n\t"
+       "vmovdqu   (32 * 3)(%%rsi), %%ymm3\n\t"
+       "vmovdqu   -32(%%rsi,%%rdx), %%ymm4\n\t"
+       "vmovdqu   -(32 * 2)(%%rsi,%%rdx), %%ymm5\n\t"
+       "vmovdqu   -(32 * 3)(%%rsi,%%rdx), %%ymm6\n\t"
+       "vmovdqu   -(32 * 4)(%%rsi,%%rdx), %%ymm7\n\t"
+       "vmovdqu   %%ymm0, (%%rdi)\n\t"
+       "vmovdqu   %%ymm1, 32(%%rdi)\n\t"
+       "vmovdqu   %%ymm2, (32 * 2)(%%rdi)\n\t"
+       "vmovdqu   %%ymm3, (32 * 3)(%%rdi)\n\t"
+       "vmovdqu   %%ymm4, -32(%%rdi,%%rdx)\n\t"
+       "vmovdqu   %%ymm5, -(32 * 2)(%%rdi,%%rdx)\n\t"
+       "vmovdqu   %%ymm6, -(32 * 3)(%%rdi,%%rdx)\n\t"
+       "vmovdqu   %%ymm7, -(32 * 4)(%%rdi,%%rdx)\n\t"
+       "vzeroupper\n\t"
+       "jmp %l[done]\n\t"
+       "last_4x_vec:\n\t"
+       /* Copy from 2 * VEC to 4 * VEC. */
+       "vmovdqu   (%%rsi), %%ymm0\n\t"
+       "vmovdqu   32(%%rsi), %%ymm1\n\t"
+       "vmovdqu   -32(%%rsi,%%rdx), %%ymm2\n\t"
+       "vmovdqu   -(32 * 2)(%%rsi,%%rdx), %%ymm3\n\t"
+       "vmovdqu   %%ymm0, (%%rdi)\n\t"
+       "vmovdqu   %%ymm1, 32(%%rdi)\n\t"
+       "vmovdqu   %%ymm2, -32(%%rdi,%%rdx)\n\t"
+       "vmovdqu   %%ymm3, -(32 * 2)(%%rdi,%%rdx)\n\t"
+       "vzeroupper\n\t"
+       "nop:\n\t"
+       "jmp %l[done]\n\t"
+       "more_8x_vec:\n\t"
+       "cmpq    %%rsi, %%rdi\n\t"
+       "ja      more_8x_vec_backward\n\t"
+       /* Source == destination is less common.  */
+       "je      nop\n\t"
+       /* Load the first VEC and last 4 * VEC to support overlapping 
addresses.  */
+       "vmovdqu   (%%rsi), %%ymm4\n\t"
+       "vmovdqu   -32(%%rsi, %%rdx), %%ymm5\n\t"
+       "vmovdqu   -(32 * 2)(%%rsi, %%rdx), %%ymm6\n\t"
+       "vmovdqu   -(32 * 3)(%%rsi, %%rdx), %%ymm7\n\t"
+       "vmovdqu   -(32 * 4)(%%rsi, %%rdx), %%ymm8\n\t"
+       /* Save start and stop of the destination buffer.  */
+       "movq    %%rdi, %%r11\n\t"
+       "leaq    -32(%%rdi, %%rdx), %%rcx\n\t"
+       /* Align destination for aligned stores in the loop.  Compute */
+       /* how much destination is misaligned.  */
+       "movq    %%rdi, %%r8\n\t"
+       "andq    $(32 - 1), %%r8\n\t"
+       /* Get the negative of offset for alignment.  */
+       "subq    $32, %%r8\n\t"
+       /* Adjust source.  */
+       "subq    %%r8, %%rsi\n\t"
+       /* Adjust destination which should be aligned now.  */
+       "subq    %%r8, %%rdi\n\t"
+       /* Adjust length.  */
+       "addq    %%r8, %%rdx\n\t"
+       /* Check non-temporal store threshold.  */
+       "cmpq    $(1024*1024), %%rdx\n\t"
+       "ja      large_forward\n\t"
+       "loop_4x_vec_forward:\n\t"
+       /* Copy 4 * VEC a time forward.  */
+       "vmovdqu   (%%rsi), %%ymm0\n\t"
+       "vmovdqu   32(%%rsi), %%ymm1\n\t"
+       "vmovdqu   (32 * 2)(%%rsi), %%ymm2\n\t"
+       "vmovdqu   (32 * 3)(%%rsi), %%ymm3\n\t"
+       "addq    $(32 * 4), %%rsi\n\t"
+       "subq    $(32 * 4), %%rdx\n\t"
+       "vmovdqa   %%ymm0, (%%rdi)\n\t"
+       "vmovdqa   %%ymm1, 32(%%rdi)\n\t"
+       "vmovdqa   %%ymm2, (32 * 2)(%%rdi)\n\t"
+       "vmovdqa   %%ymm3, (32 * 3)(%%rdi)\n\t"
+       "addq    $(32 * 4), %%rdi\n\t"
+       "cmpq    $(32 * 4), %%rdx\n\t"
+       "ja      loop_4x_vec_forward\n\t"
+       /* Store the last 4 * VEC.  */
+       "vmovdqu   %%ymm5, (%%rcx)\n\t"
+       "vmovdqu   %%ymm6, -32(%%rcx)\n\t"
+       "vmovdqu   %%ymm7, -(32 * 2)(%%rcx)\n\t"
+       "vmovdqu   %%ymm8, -(32 * 3)(%%rcx)\n\t"
+       /* Store the first VEC.  */
+       "vmovdqu   %%ymm4, (%%r11)\n\t"
+       "vzeroupper\n\t"
+       "jmp %l[done]\n\t"
+       "more_8x_vec_backward:\n\t"
+       /* Load the first 4*VEC and last VEC to support overlapping addresses.*/
+       "vmovdqu   (%%rsi), %%ymm4\n\t"
+       "vmovdqu   32(%%rsi), %%ymm5\n\t"
+       "vmovdqu   (32 * 2)(%%rsi), %%ymm6\n\t"
+       "vmovdqu   (32 * 3)(%%rsi), %%ymm7\n\t"
+       "vmovdqu   -32(%%rsi,%%rdx), %%ymm8\n\t"
+       /* Save stop of the destination buffer.  */
+       "leaq    -32(%%rdi, %%rdx), %%r11\n\t"
+       /* Align destination end for aligned stores in the loop.  Compute */
+       /* how much destination end is misaligned.  */
+       "leaq    -32(%%rsi, %%rdx), %%rcx\n\t"
+       "movq    %%r11, %%r9\n\t"
+       "movq    %%r11, %%r8\n\t"
+       "andq    $(32 - 1), %%r8\n\t"
+       /* Adjust source.  */
+       "subq    %%r8, %%rcx\n\t"
+       /* Adjust the end of destination which should be aligned now.  */
+       "subq    %%r8, %%r9\n\t"
+       /* Adjust length.  */
+       "subq    %%r8, %%rdx\n\t"
+        /* Check non-temporal store threshold.  */
+       "cmpq    $(1024*1024), %%rdx\n\t"
+       "ja      large_backward\n\t"
+       "loop_4x_vec_backward:\n\t"
+       /* Copy 4 * VEC a time backward.  */
+       "vmovdqu   (%%rcx), %%ymm0\n\t"
+       "vmovdqu   -32(%%rcx), %%ymm1\n\t"
+       "vmovdqu   -(32 * 2)(%%rcx), %%ymm2\n\t"
+       "vmovdqu   -(32 * 3)(%%rcx), %%ymm3\n\t"
+       "subq    $(32 * 4), %%rcx\n\t"
+       "subq    $(32 * 4), %%rdx\n\t"
+       "vmovdqa   %%ymm0, (%%r9)\n\t"
+       "vmovdqa   %%ymm1, -32(%%r9)\n\t"
+       "vmovdqa   %%ymm2, -(32 * 2)(%%r9)\n\t"
+       "vmovdqa   %%ymm3, -(32 * 3)(%%r9)\n\t"
+       "subq    $(32 * 4), %%r9\n\t"
+       "cmpq    $(32 * 4), %%rdx\n\t"
+       "ja      loop_4x_vec_backward\n\t"
+       /* Store the first 4 * VEC. */
+       "vmovdqu   %%ymm4, (%%rdi)\n\t"
+       "vmovdqu   %%ymm5, 32(%%rdi)\n\t"
+       "vmovdqu   %%ymm6, (32 * 2)(%%rdi)\n\t"
+       "vmovdqu   %%ymm7, (32 * 3)(%%rdi)\n\t"
+       /* Store the last VEC. */
+       "vmovdqu   %%ymm8, (%%r11)\n\t"
+       "vzeroupper\n\t"
+       "jmp %l[done]\n\t"
+
+       "large_forward:\n\t"
+       /* Don't use non-temporal store if there is overlap between */
+       /* destination and source since destination may be in cache */
+       /* when source is loaded. */
+       "leaq    (%%rdi, %%rdx), %%r10\n\t"
+       "cmpq    %%r10, %%rsi\n\t"
+       "jb      loop_4x_vec_forward\n\t"
+       "loop_large_forward:\n\t"
+       /* Copy 4 * VEC a time forward with non-temporal stores.  */
+       "prefetcht0 (32*4*2)(%%rsi)\n\t"
+       "prefetcht0 (32*4*2 + 64)(%%rsi)\n\t"
+       "prefetcht0 (32*4*3)(%%rsi)\n\t"
+       "prefetcht0 (32*4*3 + 64)(%%rsi)\n\t"
+       "vmovdqu   (%%rsi), %%ymm0\n\t"
+       "vmovdqu   32(%%rsi), %%ymm1\n\t"
+       "vmovdqu   (32 * 2)(%%rsi), %%ymm2\n\t"
+       "vmovdqu   (32 * 3)(%%rsi), %%ymm3\n\t"
+       "addq    $(32*4), %%rsi\n\t"
+       "subq    $(32*4), %%rdx\n\t"
+       "vmovntdq  %%ymm0, (%%rdi)\n\t"
+       "vmovntdq  %%ymm1, 32(%%rdi)\n\t"
+       "vmovntdq  %%ymm2, (32 * 2)(%%rdi)\n\t"
+       "vmovntdq  %%ymm3, (32 * 3)(%%rdi)\n\t"
+       "addq    $(32*4), %%rdi\n\t"
+       "cmpq    $(32*4), %%rdx\n\t"
+       "ja      loop_large_forward\n\t"
+       "sfence\n\t"
+       /* Store the last 4 * VEC.  */
+       "vmovdqu   %%ymm5, (%%rcx)\n\t"
+       "vmovdqu   %%ymm6, -32(%%rcx)\n\t"
+       "vmovdqu   %%ymm7, -(32 * 2)(%%rcx)\n\t"
+       "vmovdqu   %%ymm8, -(32 * 3)(%%rcx)\n\t"
+       /* Store the first VEC.  */
+       "vmovdqu   %%ymm4, (%%r11)\n\t"
+       "vzeroupper\n\t"
+       "jmp %l[done]\n\t"
+       "large_backward:\n\t"
+       /* Don't use non-temporal store if there is overlap between */
+       /* destination and source since destination may be in cache */
+       /* when source is loaded.  */
+       "leaq    (%%rcx, %%rdx), %%r10\n\t"
+       "cmpq    %%r10, %%r9\n\t"
+       "jb      loop_4x_vec_backward\n\t"
+       "loop_large_backward:\n\t"
+       /* Copy 4 * VEC a time backward with non-temporal stores. */
+       "prefetcht0 (-32 * 4 * 2)(%%rcx)\n\t"
+       "prefetcht0 (-32 * 4 * 2 - 64)(%%rcx)\n\t"
+       "prefetcht0 (-32 * 4 * 3)(%%rcx)\n\t"
+       "prefetcht0 (-32 * 4 * 3 - 64)(%%rcx)\n\t"
+       "vmovdqu   (%%rcx), %%ymm0\n\t"
+       "vmovdqu   -32(%%rcx), %%ymm1\n\t"
+       "vmovdqu   -(32 * 2)(%%rcx), %%ymm2\n\t"
+       "vmovdqu   -(32 * 3)(%%rcx), %%ymm3\n\t"
+       "subq    $(32*4), %%rcx\n\t"
+       "subq    $(32*4), %%rdx\n\t"
+       "vmovntdq  %%ymm0, (%%r9)\n\t"
+       "vmovntdq  %%ymm1, -32(%%r9)\n\t"
+       "vmovntdq  %%ymm2, -(32 * 2)(%%r9)\n\t"
+       "vmovntdq  %%ymm3, -(32 * 3)(%%r9)\n\t"
+       "subq    $(32 * 4), %%r9\n\t"
+       "cmpq    $(32 * 4), %%rdx\n\t"
+       "ja      loop_large_backward\n\t"
+       "sfence\n\t"
+       /* Store the first 4 * VEC.  */
+       "vmovdqu   %%ymm4, (%%rdi)\n\t"
+       "vmovdqu   %%ymm5, 32(%%rdi)\n\t"
+       "vmovdqu   %%ymm6, (32 * 2)(%%rdi)\n\t"
+       "vmovdqu   %%ymm7, (32 * 3)(%%rdi)\n\t"
+       /* Store the last VEC.  */
+       "vmovdqu   %%ymm8, (%%r11)\n\t"
+       "vzeroupper\n\t"
+       "jmp %l[done]"
+       :
+       : "r"(src), "r"(dest), "r"(size)
+       : "rax", "rcx", "rdx", "rdi", "rsi", "r8", "r9", "r10", "r11", "r12", 
"ymm0",
+       "ymm1", "ymm2", "ymm3", "ymm4", "ymm5", "ymm6", "ymm7", "ymm8", "memory"
+       : done
+       );
+done:
+       return dest;
+}
+#endif
+
 /**
  * Build a table to translate Rx completion flags to packet type.
  *
@@ -2419,6 +2714,9 @@ mlx5_tx_request_completion(struct mlx5_txq_data 
*__rte_restrict txq,
 {
        uint16_t head = txq->elts_head;
        unsigned int part;
+#ifdef RTE_LIBRTE_MLX5_NT_STORE
+       register uint32_t flags;
+#endif
 
        part = MLX5_TXOFF_CONFIG(INLINE) ?
               0 : loc->pkts_sent - loc->pkts_copy;
@@ -2432,9 +2730,20 @@ mlx5_tx_request_completion(struct mlx5_txq_data 
*__rte_restrict txq,
                txq->elts_comp = head;
                if (MLX5_TXOFF_CONFIG(INLINE))
                        txq->wqe_comp = txq->wqe_ci;
-               /* Request unconditional completion on last WQE. */
-               last->cseg.flags = RTE_BE32(MLX5_COMP_ALWAYS <<
-                                           MLX5_COMP_MODE_OFFSET);
+#ifdef RTE_LIBRTE_MLX5_NT_STORE
+               if (txq->tx_wqe_field_ntstore) {
+                       flags = RTE_BE32(MLX5_COMP_ALWAYS <<
+                                       MLX5_COMP_MODE_OFFSET);
+                       _mm_stream_si32(((void *)(uintptr_t)&last->cseg.flags),
+                                       flags);
+               } else {
+#endif
+                       /* Request unconditional completion on last WQE. */
+                       last->cseg.flags = RTE_BE32(MLX5_COMP_ALWAYS <<
+                                       MLX5_COMP_MODE_OFFSET);
+#ifdef RTE_LIBRTE_MLX5_NT_STORE
+               }
+#endif
                /* Save elts_head in dedicated free on completion queue. */
 #ifdef RTE_LIBRTE_MLX5_DEBUG
                txq->fcqs[txq->cq_pi++ & txq->cqe_m] = head |
@@ -3162,7 +3471,12 @@ mlx5_tx_dseg_empw(struct mlx5_txq_data *__rte_restrict 
txq,
        part = (uint8_t *)txq->wqes_end - pdst;
        part = RTE_MIN(part, len);
        do {
-               rte_memcpy(pdst, buf, part);
+#ifdef RTE_LIBRTE_MLX5_NT_STORE
+               if (likely(txq->tx_wqe_field_ntstore))
+                       amd_memcpy(pdst, buf, part);
+               else
+#endif
+                       rte_memcpy(pdst, buf, part);
                len -= part;
                if (likely(!len)) {
                        pdst += part;
diff --git a/drivers/net/mlx5/mlx5_rxtx.h b/drivers/net/mlx5/mlx5_rxtx.h
index a8ea1a795..f1e59a881 100644
--- a/drivers/net/mlx5/mlx5_rxtx.h
+++ b/drivers/net/mlx5/mlx5_rxtx.h
@@ -156,6 +156,9 @@ struct mlx5_rxq_data {
 #ifdef RTE_LIBRTE_MLX5_NTLOAD_TSTORE_ALIGN_COPY
        unsigned int mprq_tstore_memcpy:1;
 #endif
+#ifdef RTE_LIBRTE_MLX5_NT_STORE
+       unsigned int vec_rx_wqe_field_ntstore:1;
+#endif
 } __rte_cache_aligned;
 
 enum mlx5_rxq_type {
@@ -256,6 +259,9 @@ struct mlx5_txq_data {
        int32_t ts_offset; /* Timestamp field dynamic offset. */
        struct mlx5_dev_ctx_shared *sh; /* Shared context. */
        struct mlx5_txq_stats stats; /* TX queue counters. */
+#ifdef RTE_LIBRTE_MLX5_NT_STORE
+       unsigned int tx_wqe_field_ntstore:1;
+#endif
 #ifndef RTE_ARCH_64
        rte_spinlock_t *uar_lock;
        /* UAR access lock required for 32bit implementations */
diff --git a/drivers/net/mlx5/mlx5_rxtx_vec.h b/drivers/net/mlx5/mlx5_rxtx_vec.h
index a8d6c4f41..413f863ba 100644
--- a/drivers/net/mlx5/mlx5_rxtx_vec.h
+++ b/drivers/net/mlx5/mlx5_rxtx_vec.h
@@ -86,6 +86,10 @@ mlx5_rx_replenish_bulk_mbuf(struct mlx5_rxq_data *rxq, 
uint16_t n)
        volatile struct mlx5_wqe_data_seg *wq =
                &((volatile struct mlx5_wqe_data_seg *)rxq->wqes)[elts_idx];
        unsigned int i;
+#ifdef RTE_LIBRTE_MLX5_NT_STORE
+       register uint64_t buf_addr2;
+       register uint32_t lkey_t;
+#endif
 
        MLX5_ASSERT(n >= MLX5_VPMD_RXQ_RPLNSH_THRESH(q_n));
        MLX5_ASSERT(n <= (uint16_t)(q_n - (rxq->rq_ci - rxq->rq_pi)));
@@ -107,11 +111,26 @@ mlx5_rx_replenish_bulk_mbuf(struct mlx5_rxq_data *rxq, 
uint16_t n)
                 * impact the performance.
                 */
                buf_addr = elts[i]->buf_addr;
-               wq[i].addr = rte_cpu_to_be_64((uintptr_t)buf_addr +
-                                             RTE_PKTMBUF_HEADROOM);
-               /* If there's only one MR, no need to replace LKey in WQE. */
-               if (unlikely(mlx5_mr_btree_len(&rxq->mr_ctrl.cache_bh) > 1))
-                       wq[i].lkey = mlx5_rx_mb2mr(rxq, elts[i]);
+#ifdef RTE_LIBRTE_MLX5_NT_STORE
+               if (rxq->vec_rx_wqe_field_ntstore) {
+                       buf_addr2 = 
(uint64_t)rte_cpu_to_be_64((uintptr_t)buf_addr +
+                                                              
RTE_PKTMBUF_HEADROOM);
+                       _mm_stream_si64(((void *)(uintptr_t)&wq[i].addr), 
buf_addr2);
+                       /* If there's only one MR, no need to replace LKey in 
WQE. */
+                       if (unlikely(mlx5_mr_btree_len(&rxq->mr_ctrl.cache_bh) 
> 1)) {
+                               lkey_t = (uint32_t)mlx5_rx_mb2mr(rxq, elts[i]);
+                               _mm_stream_si32(((void 
*)(uintptr_t)&wq[i].lkey), lkey_t);
+                       }
+               } else {
+#endif
+                       wq[i].addr = rte_cpu_to_be_64((uintptr_t)buf_addr +
+                                       RTE_PKTMBUF_HEADROOM);
+                       /* If there's only one MR, no need to replace LKey in 
WQE. */
+                       if (unlikely(mlx5_mr_btree_len(&rxq->mr_ctrl.cache_bh) 
> 1))
+                               wq[i].lkey = mlx5_rx_mb2mr(rxq, elts[i]);
+#ifdef RTE_LIBRTE_MLX5_NT_STORE
+               }
+#endif
        }
        rxq->rq_ci += n;
        /* Prevent overflowing into consumed mbufs. */
diff --git a/drivers/net/mlx5/mlx5_txq.c b/drivers/net/mlx5/mlx5_txq.c
index 1bb667d46..cba675f53 100644
--- a/drivers/net/mlx5/mlx5_txq.c
+++ b/drivers/net/mlx5/mlx5_txq.c
@@ -1565,6 +1565,9 @@ txq_set_params(struct mlx5_txq_ctrl *txq_ctrl)
                                            DEV_TX_OFFLOAD_UDP_TNL_TSO);
        bool vlan_inline;
        unsigned int temp;
+#ifdef RTE_LIBRTE_MLX5_NT_STORE
+       txq_ctrl->txq.tx_wqe_field_ntstore = config->tx_wqe_field_ntstore;
+#endif
 
        if (config->txqs_inline == MLX5_ARG_UNSET)
                txqs_inline =
diff --git a/meson_options.txt b/meson_options.txt
index a4bc565d2..21c31d57b 100644
--- a/meson_options.txt
+++ b/meson_options.txt
@@ -32,6 +32,8 @@ option('max_numa_nodes', type: 'integer', value: 4,
        description: 'maximum number of NUMA nodes supported by EAL')
 option('mlx5_ntload_tstore', type: 'boolean', value: false,
        description: 'to enable optimized MPRQ in RX datapath')
+option('mlx5_ntstore', type: 'boolean', value: false,
+       description: 'to enable optimized MLX5 TX datapath')
 option('enable_trace_fp', type: 'boolean', value: false,
        description: 'enable fast path trace points.')
 option('tests', type: 'boolean', value: true,
-- 
2.25.1

Reply via email to