[dpdk-dev] [PATCH v4 1/3] test/ring: ring perf test case enhancement

2019-01-01 Thread Gavin Hu
From: Joyce Kong 

Run ring perf test on all available cores to really verify MPMC operations.
The old way of running on a pair of cores is not enough for MPMC rings. We
used this test case for ring optimization and it was really helpful for
measuring the ring performance in multi-core environment.

Suggested-by: Gavin Hu 
Signed-off-by: Joyce Kong 
Reviewed-by: Ruifeng Wang 
Reviewed-by: Honnappa Nagarahalli 
Reviewed-by: Dharmik Thakkar 
Reviewed-by: Ola Liljedahl 
Reviewed-by: Gavin Hu 
---
 test/test/test_ring_perf.c | 82 --
 1 file changed, 79 insertions(+), 3 deletions(-)

diff --git a/test/test/test_ring_perf.c b/test/test/test_ring_perf.c
index ebb3939..01c6937 100644
--- a/test/test/test_ring_perf.c
+++ b/test/test/test_ring_perf.c
@@ -9,7 +9,7 @@
 #include 
 #include 
 #include 
-
+#include 
 #include "test.h"
 
 /*
@@ -20,6 +20,7 @@
  *  * Empty ring dequeue
  *  * Enqueue/dequeue of bursts in 1 threads
  *  * Enqueue/dequeue of bursts in 2 threads
+ *  * Enqueue/dequeue of bursts in all available threads
  */
 
 #define RING_NAME "RING_PERF"
@@ -248,9 +249,80 @@ run_on_core_pair(struct lcore_pair *cores, struct rte_ring 
*r,
}
 }
 
+static rte_atomic32_t synchro;
+static uint64_t queue_count[RTE_MAX_LCORE];
+
+#define TIME_MS 100
+
+static int
+load_loop_fn(void *p)
+{
+   uint64_t time_diff = 0;
+   uint64_t begin = 0;
+   uint64_t hz = rte_get_timer_hz();
+   uint64_t lcount = 0;
+   const unsigned int lcore = rte_lcore_id();
+   struct thread_params *params = p;
+   void *burst[MAX_BURST] = {0};
+
+   /* wait synchro for slaves */
+   if (lcore != rte_get_master_lcore())
+   while (rte_atomic32_read(&synchro) == 0)
+   rte_pause();
+
+   begin = rte_get_timer_cycles();
+   while (time_diff < hz * TIME_MS / 1000) {
+   rte_ring_mp_enqueue_bulk(params->r, burst, params->size, NULL);
+   rte_ring_mc_dequeue_bulk(params->r, burst, params->size, NULL);
+   lcount++;
+   time_diff = rte_get_timer_cycles() - begin;
+   }
+   queue_count[lcore] = lcount;
+   return 0;
+}
+
+static int
+run_on_all_cores(struct rte_ring *r)
+{
+   uint64_t total = 0;
+   unsigned int i, c;
+   struct thread_params param;
+
+   memset(¶m, 0, sizeof(struct thread_params));
+   for (i = 0; i < RTE_DIM(bulk_sizes); i++) {
+   printf("\nBulk enq/dequeue count on size %u\n", bulk_sizes[i]);
+   param.size = bulk_sizes[i];
+   param.r = r;
+
+   /* clear synchro and start slaves */
+   rte_atomic32_set(&synchro, 0);
+   if (rte_eal_mp_remote_launch(load_loop_fn,
+   ¶m, SKIP_MASTER) < 0)
+   return -1;
+
+   /* start synchro and launch test on master */
+   rte_atomic32_set(&synchro, 1);
+   load_loop_fn(¶m);
+
+   rte_eal_mp_wait_lcore();
+
+   RTE_LCORE_FOREACH(c) {
+   printf("Core [%u] count = %"PRIu64"\n",
+   c, queue_count[c]);
+   total += queue_count[c];
+   }
+
+   printf("Total count (size: %u): %"PRIu64"\n", bulk_sizes[i],
+   total);
+   }
+
+   return 0;
+}
+
 /*
- * Test function that determines how long an enqueue + dequeue of a single item
- * takes on a single lcore. Result is for comparison with the bulk enq+deq.
+ * Test function that determines how long an enqueue + dequeue of a single
+ * item takes on a single lcore. Result is for comparison with the bulk
+ * enq+deq.
  */
 static void
 test_single_enqueue_dequeue(struct rte_ring *r)
@@ -394,6 +466,10 @@ test_ring_perf(void)
printf("\n### Testing using two NUMA nodes ###\n");
run_on_core_pair(&cores, r, enqueue_bulk, dequeue_bulk);
}
+
+   printf("\n### Testing using all slave nodes ###\n");
+   run_on_all_cores(r);
+
rte_ring_free(r);
return 0;
 }
-- 
2.7.4



[dpdk-dev] [PATCH v4 2/3] ring: add reset api to flush the ring when not in use

2019-01-01 Thread Gavin Hu
Currently, the flush is done by dequeuing the ring in a while loop. It is
much simpler to flush the queue by resetting the head and tail indices.

Signed-off-by: Gavin Hu 
Reviewed-by: Ruifeng Wang 
Reviewed-by: Honnappa Nagarahalli 
---
 lib/librte_ring/rte_ring.h   | 20 
 lib/librte_ring/rte_ring_version.map |  7 +++
 2 files changed, 27 insertions(+)

diff --git a/lib/librte_ring/rte_ring.h b/lib/librte_ring/rte_ring.h
index af5444a..2830300 100644
--- a/lib/librte_ring/rte_ring.h
+++ b/lib/librte_ring/rte_ring.h
@@ -671,6 +671,26 @@ rte_ring_dequeue(struct rte_ring *r, void **obj_p)
 }
 
 /**
+ * Flush a ring.
+ *
+ * This function flush all the elements in a ring
+ *
+ * @b EXPERIMENTAL: this API may change without prior notice
+ *
+ * @warning
+ * Make sure the ring is not in use while calling this function.
+ *
+ * @param r
+ *   A pointer to the ring structure.
+ */
+static inline void __rte_experimental
+rte_ring_reset(struct rte_ring *r)
+{
+   r->prod.head = r->cons.head = 0;
+   r->prod.tail = r->cons.tail = 0;
+}
+
+/**
  * Return the number of entries in a ring.
  *
  * @param r
diff --git a/lib/librte_ring/rte_ring_version.map 
b/lib/librte_ring/rte_ring_version.map
index d935efd..581d9ca 100644
--- a/lib/librte_ring/rte_ring_version.map
+++ b/lib/librte_ring/rte_ring_version.map
@@ -17,3 +17,10 @@ DPDK_2.2 {
rte_ring_free;
 
 } DPDK_2.0;
+
+EXPERIMENTAL {
+global:
+
+   rte_ring_reset;
+
+};
-- 
2.7.4



[dpdk-dev] [PATCH v4 3/3] hash: flush the rings instead of dequeuing one by one

2019-01-01 Thread Gavin Hu
Within rte_hash_reset, calling a while loop to dequeue one by
one from the ring, while not using them at all, is wasting cycles,
The patch just flush the ring by resetting the indices can save cpu
cycles.

Fixes: b26473ff8f4a ("hash: add reset function")
Fixes: 75706568a7eb ("hash: add extendable bucket feature")
Cc: sta...@dpdk.org

Signed-off-by: Gavin Hu 
Reviewed-by: Honnappa Nagarahalli 
---
 lib/librte_hash/Makefile  |  2 +-
 lib/librte_hash/meson.build   |  3 +++
 lib/librte_hash/rte_cuckoo_hash.c | 11 ---
 3 files changed, 8 insertions(+), 8 deletions(-)

diff --git a/lib/librte_hash/Makefile b/lib/librte_hash/Makefile
index c8c435d..5669d83 100644
--- a/lib/librte_hash/Makefile
+++ b/lib/librte_hash/Makefile
@@ -6,7 +6,7 @@ include $(RTE_SDK)/mk/rte.vars.mk
 # library name
 LIB = librte_hash.a
 
-CFLAGS += -O3
+CFLAGS += -O3 -DALLOW_EXPERIMENTAL_API
 CFLAGS += $(WERROR_FLAGS) -I$(SRCDIR)
 LDLIBS += -lrte_eal -lrte_ring
 
diff --git a/lib/librte_hash/meson.build b/lib/librte_hash/meson.build
index efc06ed..ebf70de 100644
--- a/lib/librte_hash/meson.build
+++ b/lib/librte_hash/meson.build
@@ -14,3 +14,6 @@ headers = files('rte_cmp_arm64.h',
 
 sources = files('rte_cuckoo_hash.c', 'rte_fbk_hash.c')
 deps += ['ring']
+
+# rte ring reset is not yet part of stable API
+allow_experimental_apis = true
diff --git a/lib/librte_hash/rte_cuckoo_hash.c 
b/lib/librte_hash/rte_cuckoo_hash.c
index c01489b..4b08049 100644
--- a/lib/librte_hash/rte_cuckoo_hash.c
+++ b/lib/librte_hash/rte_cuckoo_hash.c
@@ -559,7 +559,6 @@ __hash_rw_reader_unlock(const struct rte_hash *h)
 void
 rte_hash_reset(struct rte_hash *h)
 {
-   void *ptr;
uint32_t tot_ring_cnt, i;
 
if (h == NULL)
@@ -570,16 +569,14 @@ rte_hash_reset(struct rte_hash *h)
memset(h->key_store, 0, h->key_entry_size * (h->entries + 1));
*h->tbl_chng_cnt = 0;
 
-   /* clear the free ring */
-   while (rte_ring_dequeue(h->free_slots, &ptr) == 0)
-   continue;
+   /* reset the free ring */
+   rte_ring_reset(h->free_slots);
 
-   /* clear free extendable bucket ring and memory */
+   /* flush free extendable bucket ring and memory */
if (h->ext_table_support) {
memset(h->buckets_ext, 0, h->num_buckets *
sizeof(struct rte_hash_bucket));
-   while (rte_ring_dequeue(h->free_ext_bkts, &ptr) == 0)
-   continue;
+   rte_ring_reset(h->free_ext_bkts);
}
 
/* Repopulate the free slots ring. Entry zero is reserved for key 
misses */
-- 
2.7.4



[dpdk-dev] [PATCH v4 0/3] add rte ring reset api and use it to flush a ring by hash

2019-01-01 Thread Gavin Hu
V4: Include the ring perf test case enhancement patch in the series.

V3: Allow experimental API for meson build

V2: Fix the coding style issue(commit message line too long)

V1: To flush a ring not in use, dequeue one by one is wasting cpu cycles.
The patch is to just resetting the head and tail indices to save cpu
cycle.

Gavin Hu (2):
  ring: add reset api to flush the ring when not in use
  hash: flush the rings instead of dequeuing one by one

Joyce Kong (1):
  test/ring: ring perf test case enhancement

 lib/librte_hash/Makefile |  2 +-
 lib/librte_hash/meson.build  |  3 ++
 lib/librte_hash/rte_cuckoo_hash.c| 11 ++---
 lib/librte_ring/rte_ring.h   | 20 +
 lib/librte_ring/rte_ring_version.map |  7 +++
 test/test/test_ring_perf.c   | 82 ++--
 6 files changed, 114 insertions(+), 11 deletions(-)

-- 
2.7.4



Re: [dpdk-dev] [PATCH v2 1/2] net/ixgbe: enable x550 flexible byte filter

2019-01-01 Thread Zhang, Qi Z
HI Wei:

> -Original Message-
> From: Zhao1, Wei
> Sent: Tuesday, December 25, 2018 1:45 PM
> To: dev@dpdk.org
> Cc: sta...@dpdk.org; Lu, Wenzhuo ; Zhang, Qi Z
> ; Peng, Yuan ; Zhao1, Wei
> 
> Subject: [PATCH v2 1/2] net/ixgbe: enable x550 flexible byte filter
> 
> There is need for users to use flexible byte filter on x550.
> x550 support IP mode and tunnel mode flexible byte filter.
> tunnel mode flexible byte filter is used for vxlan packets and so on. It can 
> be
> used combined with FDIR tunnel filter.
> By now, ixgbe PMD flow parer code do not support tunnel mode flexible byte
> filter for x550, So I have to enable it in function 
> ixgbe_parse_fdir_filter_tunnel().
> Although IP mode flexible byte filter parser is support in function
> ixgbe_parse_fdir_filter_normal(), but some flow like  "flow create 0 ingress
> pattern raw pattern is 0xab / end actions queue index 3 / end" need to be
> support, so parser code also need change a little.
> This patch enable all of these feature.

I would suggest to divide this patch into 3 patches
1. more accurate input set mask setup (include all changes in ixgbe_fdir.c)
2. support flexbyte without IP layer (include changes in 
ixgbe_parse_fdir_filter_normal)
3. support flexbyte in tunnel mode (include changes in 
ixgbe_parse_fdir_flilter_tunnel)

Btw, please make sure to sync the acceptable flow pattern/action description 
above function ixgbe_parse_fdir_filter_x to their new behaviors

Thanks
Qi


Re: [dpdk-dev] [PATCH v2 1/2] net/ixgbe: enable x550 flexible byte filter

2019-01-01 Thread Zhao1, Wei
Hi, qi  

> -Original Message-
> From: Zhang, Qi Z
> Sent: Wednesday, January 2, 2019 9:19 AM
> To: Zhao1, Wei ; dev@dpdk.org
> Cc: sta...@dpdk.org; Lu, Wenzhuo ; Peng, Yuan
> 
> Subject: RE: [PATCH v2 1/2] net/ixgbe: enable x550 flexible byte filter
> 
> HI Wei:
> 
> > -Original Message-
> > From: Zhao1, Wei
> > Sent: Tuesday, December 25, 2018 1:45 PM
> > To: dev@dpdk.org
> > Cc: sta...@dpdk.org; Lu, Wenzhuo ; Zhang, Qi Z
> > ; Peng, Yuan ; Zhao1, Wei
> > 
> > Subject: [PATCH v2 1/2] net/ixgbe: enable x550 flexible byte filter
> >
> > There is need for users to use flexible byte filter on x550.
> > x550 support IP mode and tunnel mode flexible byte filter.
> > tunnel mode flexible byte filter is used for vxlan packets and so on.
> > It can be used combined with FDIR tunnel filter.
> > By now, ixgbe PMD flow parer code do not support tunnel mode flexible
> > byte filter for x550, So I have to enable it in function
> ixgbe_parse_fdir_filter_tunnel().
> > Although IP mode flexible byte filter parser is support in function
> > ixgbe_parse_fdir_filter_normal(), but some flow like  "flow create 0
> > ingress pattern raw pattern is 0xab / end actions queue index 3 / end"
> > need to be support, so parser code also need change a little.
> > This patch enable all of these feature.
> 
> I would suggest to divide this patch into 3 patches 1. more accurate input set
> mask setup (include all changes in ixgbe_fdir.c) 2. support flexbyte without 
> IP
> layer (include changes in ixgbe_parse_fdir_filter_normal) 3. support flexbyte
> in tunnel mode (include changes in ixgbe_parse_fdir_flilter_tunnel)
> 
> Btw, please make sure to sync the acceptable flow pattern/action description
> above function ixgbe_parse_fdir_filter_x to their new behaviors

There will be 4 patch in this set, because there is other for flow_cmdline 
parser, is that ok?

> 
> Thanks
> Qi


Re: [dpdk-dev] [PATCH] libs/power: fix the resource leaking issue

2019-01-01 Thread Yao, Lei A



> -Original Message-
> From: Ma, Liang J
> Sent: Friday, December 28, 2018 7:33 PM
> To: Hunt, David 
> Cc: dev@dpdk.org; Burakov, Anatoly ; Yao, Lei
> A ; Ma, Liang J 
> Subject: [PATCH] libs/power: fix the resource leaking issue
> 
> Fixes: e6c6dc0f96c8 ("power: add p-state driver compatibility")
> Coverity issue: 328528
> 
> Also add the missing functionality of enable/disable turbo
> 
> Signed-off-by: Liang Ma 
Reviewed-by: Lei Yao 
Tested-by: Lei Yao 
This patch has been tested based on 19.02-rc1 code. 
> ---
>  lib/librte_power/power_pstate_cpufreq.c | 34
> -
>  1 file changed, 33 insertions(+), 1 deletion(-)
> 
> diff --git a/lib/librte_power/power_pstate_cpufreq.c
> b/lib/librte_power/power_pstate_cpufreq.c
> index 411d0eb..cb226a5 100644
> --- a/lib/librte_power/power_pstate_cpufreq.c
> +++ b/lib/librte_power/power_pstate_cpufreq.c
> @@ -160,6 +160,10 @@ power_init_for_setting_freq(struct
> pstate_power_info *pi)
>   pi->lcore_id);
> 
>   f_max = fopen(fullpath_max, "rw+");
> +
> + if (f_max == NULL)
> + fclose(f_min);
> +
>   FOPEN_OR_ERR_RET(f_max, -1);
> 
>   pi->f_cur_min = f_min;
> @@ -214,7 +218,13 @@ set_freq_internal(struct pstate_power_info *pi,
> uint32_t idx)
>   /* Turbo is available and enabled, first freq bucket is sys max freq */
>   if (pi->turbo_available && pi->turbo_enable && (idx == 0))
>   target_freq = pi->sys_max_freq;
> - else
> + else if (pi->turbo_available && (!pi->turbo_enable) && (idx == 0)) {
> +
> + RTE_LOG(ERR, POWER, "Turbo is off, frequency can't be
> scaled up more %u\n",
> + pi->lcore_id);
> + return -1;
> +
> + } else
>   target_freq = pi->freqs[idx];
> 
>   /* Decrease freq, the min freq should be updated first */
> @@ -394,6 +404,10 @@ power_get_available_freqs(struct
> pstate_power_info *pi)
>   FOPEN_OR_ERR_RET(f_min, ret);
> 
>   f_max = fopen(fullpath_max, "r");
> +
> + if (f_max == NULL)
> + fclose(f_min);
> +
>   FOPEN_OR_ERR_RET(f_max, ret);
> 
>   s_min = fgets(buf_min, sizeof(buf_min), f_min);
> @@ -726,6 +740,14 @@ power_pstate_enable_turbo(unsigned int lcore_id)
>   return -1;
>   }
> 
> + /* Max may have changed, so call to max function */
> + if (power_pstate_cpufreq_freq_max(lcore_id) < 0) {
> + RTE_LOG(ERR, POWER,
> + "Failed to set frequency of lcore %u to max\n",
> + lcore_id);
> + return -1;
> + }
> +
>   return 0;
>  }
> 
> @@ -744,6 +766,16 @@ power_pstate_disable_turbo(unsigned int lcore_id)
> 
>   pi->turbo_enable = 0;
> 
> + if ((pi->turbo_available) && (pi->curr_idx <= 1)) {
> + /* Try to set freq to max by default coming out of turbo */
> + if (power_pstate_cpufreq_freq_max(lcore_id) < 0) {
> + RTE_LOG(ERR, POWER,
> + "Failed to set frequency of lcore %u to
> max\n",
> + lcore_id);
> + return -1;
> + }
> + }
> +
> 
>   return 0;
>  }
> --
> 2.7.5



Re: [dpdk-dev] [PATCH v2 1/2] net/ixgbe: enable x550 flexible byte filter

2019-01-01 Thread Zhang, Qi Z



> -Original Message-
> From: Zhao1, Wei
> Sent: Wednesday, January 2, 2019 9:54 AM
> To: Zhang, Qi Z ; dev@dpdk.org
> Cc: sta...@dpdk.org; Lu, Wenzhuo ; Peng, Yuan
> 
> Subject: RE: [PATCH v2 1/2] net/ixgbe: enable x550 flexible byte filter
> 
> Hi, qi
> 
> > -Original Message-
> > From: Zhang, Qi Z
> > Sent: Wednesday, January 2, 2019 9:19 AM
> > To: Zhao1, Wei ; dev@dpdk.org
> > Cc: sta...@dpdk.org; Lu, Wenzhuo ; Peng, Yuan
> > 
> > Subject: RE: [PATCH v2 1/2] net/ixgbe: enable x550 flexible byte
> > filter
> >
> > HI Wei:
> >
> > > -Original Message-
> > > From: Zhao1, Wei
> > > Sent: Tuesday, December 25, 2018 1:45 PM
> > > To: dev@dpdk.org
> > > Cc: sta...@dpdk.org; Lu, Wenzhuo ; Zhang, Qi Z
> > > ; Peng, Yuan ; Zhao1, Wei
> > > 
> > > Subject: [PATCH v2 1/2] net/ixgbe: enable x550 flexible byte filter
> > >
> > > There is need for users to use flexible byte filter on x550.
> > > x550 support IP mode and tunnel mode flexible byte filter.
> > > tunnel mode flexible byte filter is used for vxlan packets and so on.
> > > It can be used combined with FDIR tunnel filter.
> > > By now, ixgbe PMD flow parer code do not support tunnel mode
> > > flexible byte filter for x550, So I have to enable it in function
> > ixgbe_parse_fdir_filter_tunnel().
> > > Although IP mode flexible byte filter parser is support in function
> > > ixgbe_parse_fdir_filter_normal(), but some flow like  "flow create 0
> > > ingress pattern raw pattern is 0xab / end actions queue index 3 / end"
> > > need to be support, so parser code also need change a little.
> > > This patch enable all of these feature.
> >
> > I would suggest to divide this patch into 3 patches 1. more accurate
> > input set mask setup (include all changes in ixgbe_fdir.c) 2. support
> > flexbyte without IP layer (include changes in
> > ixgbe_parse_fdir_filter_normal) 3. support flexbyte in tunnel mode
> > (include changes in ixgbe_parse_fdir_flilter_tunnel)
> >
> > Btw, please make sure to sync the acceptable flow pattern/action
> > description above function ixgbe_parse_fdir_filter_x to their new
> > behaviors
> 
> There will be 4 patch in this set, because there is other for flow_cmdline 
> parser,
> is that ok?

it's better to separate patch 4, it's for cmdline enhancement, not driver 
related, it can be reviewed, merged independently. 
> 
> >
> > Thanks
> > Qi


Re: [dpdk-dev] [PATCH v3]/driver/raw/ifpga_rawdev: fix a memory leak bug in ifpga

2019-01-01 Thread Zhang, Tianfei


> -Original Message-
> From: Pei, Andy
> Sent: Tuesday, December 25, 2018 10:02 PM
> To: dev@dpdk.org
> Cc: Xu, Rosen ; Zhang, Tianfei
> ; Pei, Andy 
> Subject: [PATCH v3]/driver/raw/ifpga_rawdev: fix a memory leak bug in ifpga
> 
> When ifpga_rawdev_create() allocate memory for a new rawdev, the
> original code allocate redundant memory for adapter, which is a member of
> the rawdev.
> What is actually necessary is the adapter to be initialized, not memory
> allocated.
> 
> What is different in v3 from v2 is that the adapter is no longer need to be
> freed.
> 
> fixes:ef1e8ede3da5
> cc: rosen...@intel.com
> cc: tianfei.zh...@intel.com

It looks good for me, Acked-by: Tianfei zhang 



Re: [dpdk-dev] [PATCH] net/ice: fixed CRC strip issue

2019-01-01 Thread Zhang, Qi Z



> -Original Message-
> From: dev [mailto:dev-boun...@dpdk.org] On Behalf Of Qiming Yang
> Sent: Tuesday, December 25, 2018 11:21 PM
> To: dev@dpdk.org
> Cc: Yang, Qiming 
> Subject: [dpdk-dev] [PATCH] net/ice: fixed CRC strip issue
> 
> Fixes: 50370662b727 ("net/ice: support device and queue ops")
> 
> Signed-off-by: Qiming Yang 

Acked-by: Qi Zhang 

Applied to dpdk-next-net-intel.

Thanks
Qi


[dpdk-dev] [PATCH v1] examples/l3fwd: enable hash multi lookup for ARM

2019-01-01 Thread Ruifeng Wang
Compile option for hash_multi_lookup was broken, and caused feature
cannot be enabled on Arm.
This patch sets hash_multi_lookup method as default, and sequential
lookup becomes optional.

In test of 8192 flows with 128-byte packets, throughput increased by
25.6% after enabling hash_multi_lookup.

Fixes: 52c97adc1f0f ("examples/l3fwd: fix exact match performance")
Cc: tomaszx.kula...@intel.com

Signed-off-by: Ruifeng Wang 
Reviewed-by: Gavin Hu 
Reviewed-by: Phil Yang 
Tested-by: Ruifeng Wang 
---
 examples/l3fwd/l3fwd.h | 4 
 1 file changed, 4 deletions(-)

diff --git a/examples/l3fwd/l3fwd.h b/examples/l3fwd/l3fwd.h
index c962deac3..063b80018 100644
--- a/examples/l3fwd/l3fwd.h
+++ b/examples/l3fwd/l3fwd.h
@@ -11,10 +11,6 @@
 
 #define RTE_LOGTYPE_L3FWD RTE_LOGTYPE_USER1
 
-#if !defined(NO_HASH_MULTI_LOOKUP) && defined(RTE_MACHINE_CPUFLAG_NEON)
-#define NO_HASH_MULTI_LOOKUP 1
-#endif
-
 #define MAX_PKT_BURST 32
 #define BURST_TX_DRAIN_US 100 /* TX drain every ~100us */
 
-- 
2.17.1



[dpdk-dev] [PATCH v6] app/testpmd: add IFPGA AFU register access function

2019-01-01 Thread Rosen Xu
Currently register read/write of testpmd is only for PCI device,
but more and more IFPGA based AFU devices need this feature to
access registers, this patch will add support for it.

Signed-off-by: Rosen Xu 
Acked-by: Bernard Iremonger 

v5 updates:
===
 - Added Macro to fix compile dependency of ifpga for testpmd
---
 app/test-pmd/config.c  | 253 -
 app/test-pmd/testpmd.h |  64 +
 2 files changed, 315 insertions(+), 2 deletions(-)

diff --git a/app/test-pmd/config.c b/app/test-pmd/config.c
index b9e5dd9..5600ef5 100644
--- a/app/test-pmd/config.c
+++ b/app/test-pmd/config.c
@@ -866,7 +866,50 @@ void print_valid_ports(void)
printf("Invalid vlan_id %d (must be < 4096)\n", vlan_id);
return 1;
 }
+#ifdef RTE_LIBRTE_IFPGA_BUS
+static int
+port_reg_off_is_invalid(portid_t port_id, uint32_t reg_off)
+{
+   const struct rte_pci_device *pci_dev;
+   const struct rte_bus *bus;
+   uint64_t len;
+   const struct rte_afu_device *afu_dev;
 
+   if (reg_off & 0x3) {
+   printf("Port register offset 0x%X not aligned on a 4-byte "
+  "boundary\n",
+  (unsigned int)reg_off);
+   return 1;
+   }
+
+   if (!ports[port_id].dev_info.device) {
+   printf("Invalid device\n");
+   return 0;
+   }
+
+   bus = rte_bus_find_by_device(ports[port_id].dev_info.device);
+   if (bus && !strcmp(bus->name, "pci")) {
+   pci_dev = RTE_DEV_TO_PCI(ports[port_id].dev_info.device);
+   len = pci_dev->mem_resource[0].len;
+   } else if (bus && !strcmp(bus->name, "ifpga")) {
+   afu_dev = RTE_DEV_TO_AFU(ports[port_id].dev_info.device);
+   len = afu_dev->mem_resource[0].len;
+   } else {
+   printf("Not a PCI or AFU device\n");
+   return 1;
+   }
+
+   if (reg_off >= len) {
+   printf("Port %d: register offset %u (0x%X) out of port "
+  "PCI or AFU device "
+  "resource (length=%"PRIu64")\n",
+  port_id, (unsigned int)reg_off,
+   (unsigned int)reg_off, len);
+   return 1;
+   }
+   return 0;
+}
+#else
 static int
 port_reg_off_is_invalid(portid_t port_id, uint32_t reg_off)
 {
@@ -903,7 +946,7 @@ void print_valid_ports(void)
}
return 0;
 }
-
+#endif
 static int
 reg_bit_pos_is_invalid(uint8_t bit_pos)
 {
@@ -923,6 +966,212 @@ void print_valid_ports(void)
printf("0x%08X (%u)\n", (unsigned)reg_v, (unsigned)reg_v);
 }
 
+#ifdef RTE_LIBRTE_IFPGA_BUS
+void
+port_reg_bit_display(portid_t port_id, uint32_t reg_off, uint8_t bit_x)
+{
+   uint32_t reg_v;
+   const struct rte_bus *bus;
+
+   if (port_id_is_invalid(port_id, ENABLED_WARN))
+   return;
+   if (port_reg_off_is_invalid(port_id, reg_off))
+   return;
+   if (reg_bit_pos_is_invalid(bit_x))
+   return;
+
+   bus = rte_bus_find_by_device(ports[port_id].dev_info.device);
+   if (bus && !strcmp(bus->name, "pci")) {
+   reg_v = port_id_pci_reg_read(port_id, reg_off);
+   } else if (bus && !strcmp(bus->name, "ifpga")) {
+   reg_v = port_id_afu_reg_read(port_id, reg_off);
+   } else {
+   printf("Not a PCI or AFU device\n");
+   return;
+   }
+   display_port_and_reg_off(port_id, (unsigned int)reg_off);
+   printf("bit %d=%d\n", bit_x, (int) ((reg_v & (1 << bit_x)) >> bit_x));
+}
+
+void
+port_reg_bit_field_display(portid_t port_id, uint32_t reg_off,
+  uint8_t bit1_pos, uint8_t bit2_pos)
+{
+   uint32_t reg_v;
+   uint8_t  l_bit;
+   uint8_t  h_bit;
+   const struct rte_bus *bus;
+
+   if (port_id_is_invalid(port_id, ENABLED_WARN))
+   return;
+   if (port_reg_off_is_invalid(port_id, reg_off))
+   return;
+   if (reg_bit_pos_is_invalid(bit1_pos))
+   return;
+   if (reg_bit_pos_is_invalid(bit2_pos))
+   return;
+   if (bit1_pos > bit2_pos)
+   l_bit = bit2_pos, h_bit = bit1_pos;
+   else
+   l_bit = bit1_pos, h_bit = bit2_pos;
+
+   bus = rte_bus_find_by_device(ports[port_id].dev_info.device);
+   if (bus && !strcmp(bus->name, "pci")) {
+   reg_v = port_id_pci_reg_read(port_id, reg_off);
+   } else if (bus && !strcmp(bus->name, "ifpga")) {
+   reg_v = port_id_afu_reg_read(port_id, reg_off);
+   } else {
+   printf("Not a PCI or AFU device\n");
+   return;
+   }
+   reg_v >>= l_bit;
+   if (h_bit < 31)
+   reg_v &= ((1 << (h_bit - l_bit + 1)) - 1);
+   display_port_and_reg_off(port_id, (unsigned int)reg_off);
+   printf("bits[%d, %d]=0x%0*X (%u)\n", l_bit, h_bit,
+  ((h_bit - l_bit) / 4) + 1, (unsigned in

Re: [dpdk-dev] [PATCH v5] app/testpmd: add IFPGA AFU register access function

2019-01-01 Thread Xu, Rosen
Hi Ferruh,

I have added Macro to identify the dependency of ifpga, and checked it ok in my 
v6 patch.
Pls review, thanks a lot.

> -Original Message-
> From: Yigit, Ferruh
> Sent: Thursday, December 20, 2018 22:23
> To: Xu, Rosen ; Iremonger, Bernard
> ; dev@dpdk.org
> Cc: Lu, Wenzhuo ; Wu, Jingjing
> 
> Subject: Re: [PATCH v5] app/testpmd: add IFPGA AFU register access function
> 
> On 12/20/2018 10:48 AM, Xu, Rosen wrote:
> > Thanks a lot Ferruh.
> 
> Sorry for the confusion Rosen, I dropped it back because of the dependency
> it creates to ifpga, and a put a comment to the patch.
> 
> Also this gives meson build error, fyi.
> 
> >
> >> -Original Message-
> >> From: Yigit, Ferruh
> >> Sent: Thursday, December 20, 2018 16:59
> >> To: Iremonger, Bernard ; Xu, Rosen
> >> ; dev@dpdk.org
> >> Cc: Lu, Wenzhuo ; Wu, Jingjing
> >> 
> >> Subject: Re: [PATCH v5] app/testpmd: add IFPGA AFU register access
> >> function
> >>
> >> On 12/18/2018 6:12 PM, Iremonger, Bernard wrote:
>  -Original Message-
>  From: Xu, Rosen
>  Sent: Tuesday, December 18, 2018 11:30 AM
>  To: dev@dpdk.org
>  Cc: Lu, Wenzhuo ; Wu, Jingjing
>  ; Iremonger, Bernard
>  ; Xu, Rosen ;
>  Yigit, Ferruh 
>  Subject: [PATCH v5] app/testpmd: add IFPGA AFU register access
>  function
> 
>  Currently register read/write of testpmd is only for PCI device,
>  but more and more IFPGA based AFU devices need this feature to
>  access registers, this patch will add support for it.
> 
>  Signed-off-by: Rosen Xu 
> >>>
> >>> Acked-by: Bernard Iremonger 
> >>>
> >>
> >> Applied to dpdk-next-net/master, thanks.



Re: [dpdk-dev] [PATCH v3]/driver/raw/ifpga_rawdev: fix a memory leak bug in ifpga

2019-01-01 Thread Xu, Rosen
Hi,

> -Original Message-
> From: Pei, Andy
> Sent: Tuesday, December 25, 2018 22:02
> To: dev@dpdk.org
> Cc: Xu, Rosen ; Zhang, Tianfei
> ; Pei, Andy 
> Subject: [PATCH v3]/driver/raw/ifpga_rawdev: fix a memory leak bug in ifpga
> 
> When ifpga_rawdev_create() allocate memory for a new rawdev, the original
> code allocate redundant memory for adapter, which is a member of the
> rawdev.
> What is actually necessary is the adapter to be initialized, not memory
> allocated.
> 
> What is different in v3 from v2 is that the adapter is no longer need to be
> freed.
> 
> fixes:ef1e8ede3da5
> cc: rosen...@intel.com
> cc: tianfei.zh...@intel.com

Acked-by: Rosen Xu 


[dpdk-dev] [PATCH] net/ixgbe: enable x550 flexible byte filter

2019-01-01 Thread Wei Zhao
There is need for users to use flexible byte filter on x550.
This patch enable it.

Fixes: 82fb702077f6 ("ixgbe: support new flow director modes for X550")
Fixes: 11777435c727 ("net/ixgbe: parse flow director filter")

Signed-off-by: Wei Zhao 
---
 drivers/net/ixgbe/ixgbe_fdir.c |   9 +-
 drivers/net/ixgbe/ixgbe_flow.c | 274 -
 2 files changed, 195 insertions(+), 88 deletions(-)

diff --git a/drivers/net/ixgbe/ixgbe_fdir.c b/drivers/net/ixgbe/ixgbe_fdir.c
index e559f0f..deb9a21 100644
--- a/drivers/net/ixgbe/ixgbe_fdir.c
+++ b/drivers/net/ixgbe/ixgbe_fdir.c
@@ -307,6 +307,8 @@ fdir_set_input_mask_82599(struct rte_eth_dev *dev)
/* flex byte mask */
if (info->mask.flex_bytes_mask == 0)
fdirm |= IXGBE_FDIRM_FLEX;
+   if (info->mask.src_ipv4_mask == 0 && info->mask.dst_ipv4_mask == 0)
+   fdirm |= IXGBE_FDIRM_L3P;
 
IXGBE_WRITE_REG(hw, IXGBE_FDIRM, fdirm);
 
@@ -356,8 +358,7 @@ fdir_set_input_mask_x550(struct rte_eth_dev *dev)
/* mask VM pool and DIPv6 since there are currently not supported
 * mask FLEX byte, it will be set in flex_conf
 */
-   uint32_t fdirm = IXGBE_FDIRM_POOL | IXGBE_FDIRM_DIPv6 |
-IXGBE_FDIRM_FLEX;
+   uint32_t fdirm = IXGBE_FDIRM_POOL | IXGBE_FDIRM_DIPv6;
uint32_t fdiripv6m;
enum rte_fdir_mode mode = dev->data->dev_conf.fdir_conf.mode;
uint16_t mac_mask;
@@ -385,6 +386,10 @@ fdir_set_input_mask_x550(struct rte_eth_dev *dev)
return -EINVAL;
}
 
+   /* flex byte mask */
+   if (info->mask.flex_bytes_mask == 0)
+   fdirm |= IXGBE_FDIRM_FLEX;
+
IXGBE_WRITE_REG(hw, IXGBE_FDIRM, fdirm);
 
fdiripv6m = ((u32)0xU << IXGBE_FDIRIP6M_DIPM_SHIFT);
diff --git a/drivers/net/ixgbe/ixgbe_flow.c b/drivers/net/ixgbe/ixgbe_flow.c
index f0fafeb..dc210c5 100644
--- a/drivers/net/ixgbe/ixgbe_flow.c
+++ b/drivers/net/ixgbe/ixgbe_flow.c
@@ -1622,9 +1622,9 @@ ixgbe_parse_fdir_filter_normal(struct rte_eth_dev *dev,
const struct rte_flow_item_raw *raw_mask;
const struct rte_flow_item_raw *raw_spec;
uint8_t j;
-
struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 
+
if (!pattern) {
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ITEM_NUM,
@@ -1651,9 +1651,7 @@ ixgbe_parse_fdir_filter_normal(struct rte_eth_dev *dev,
 * value. So, we need not do anything for the not provided fields later.
 */
memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
-   memset(&rule->mask, 0xFF, sizeof(struct ixgbe_hw_fdir_mask));
-   rule->mask.vlan_tci_mask = 0;
-   rule->mask.flex_bytes_mask = 0;
+   memset(&rule->mask, 0, sizeof(struct ixgbe_hw_fdir_mask));
 
/**
 * The first not void item should be
@@ -1665,7 +1663,8 @@ ixgbe_parse_fdir_filter_normal(struct rte_eth_dev *dev,
item->type != RTE_FLOW_ITEM_TYPE_IPV6 &&
item->type != RTE_FLOW_ITEM_TYPE_TCP &&
item->type != RTE_FLOW_ITEM_TYPE_UDP &&
-   item->type != RTE_FLOW_ITEM_TYPE_SCTP) {
+   item->type != RTE_FLOW_ITEM_TYPE_SCTP &&
+   item->type != RTE_FLOW_ITEM_TYPE_RAW) {
memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ITEM,
@@ -2201,6 +2200,7 @@ ixgbe_parse_fdir_filter_normal(struct rte_eth_dev *dev,
}
 
raw_mask = item->mask;
+   rule->b_mask = TRUE;
 
/* check mask */
if (raw_mask->relative != 0x1 ||
@@ -2217,6 +2217,7 @@ ixgbe_parse_fdir_filter_normal(struct rte_eth_dev *dev,
}
 
raw_spec = item->spec;
+   rule->b_spec = TRUE;
 
/* check spec */
if (raw_spec->relative != 0 ||
@@ -2323,6 +2324,8 @@ ixgbe_parse_fdir_filter_tunnel(const struct rte_flow_attr 
*attr,
const struct rte_flow_item_eth *eth_mask;
const struct rte_flow_item_vlan *vlan_spec;
const struct rte_flow_item_vlan *vlan_mask;
+   const struct rte_flow_item_raw *raw_mask;
+   const struct rte_flow_item_raw *raw_spec;
uint32_t j;
 
if (!pattern) {
@@ -2351,8 +2354,7 @@ ixgbe_parse_fdir_filter_tunnel(const struct rte_flow_attr 
*attr,
 * value. So, we need not do anything for the not provided fields later.
 */
memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
-   memset(&rule->mask, 0xFF, sizeof(struct ixgbe_hw_fdir_mask));
-   rule->mask.vlan_tci_mask = 0;
+   memset(&rule->mask, 0, sizeof(struct ixgbe_hw_fdir_mask));
 
/**
 * The first not void item should be
@@ -2364,7 +2366,8 @@ ixgbe_parse_fdir_filter_tunnel(const struct rte_flow_attr 
*attr,
item->type != RTE_FLOW_ITEM_TYPE_IPV6 &&
it

[dpdk-dev] [PATCH] net/ixgbe: fix MAT enable for VF in multicast

2019-01-01 Thread Wei Zhao
In ixgbe PMD code, all vf ars set with bit IXGBE_VMOLR_ROMPE,
which make vf accept packets that match the MTA table,
if some vf update IXGBE_MTA in function ixgbe_vf_set_multicast,
then all vf will receive packets from these address.
So thhere is need to set VMOLR register bit ROPE onlty after this
vf has been set multicast address. If this bit is when pf host doing
initialization, this vf will receive multicast packets with address
written in MTA table. Align to ixgbe pf kernel 5.3.7 code to fix this
bug.

Fixes: 00e30184daa0 ("ixgbe: add PF support")

Signed-off-by: Wei Zhao 
---
 drivers/net/ixgbe/ixgbe_pf.c | 6 +-
 1 file changed, 5 insertions(+), 1 deletion(-)

diff --git a/drivers/net/ixgbe/ixgbe_pf.c b/drivers/net/ixgbe/ixgbe_pf.c
index 4b833ff..0f4b96b 100644
--- a/drivers/net/ixgbe/ixgbe_pf.c
+++ b/drivers/net/ixgbe/ixgbe_pf.c
@@ -351,7 +351,7 @@ ixgbe_vf_reset_event(struct rte_eth_dev *dev, uint16_t vf)
int rar_entry = hw->mac.num_rar_entries - (vf + 1);
uint32_t vmolr = IXGBE_READ_REG(hw, IXGBE_VMOLR(vf));
 
-   vmolr |= (IXGBE_VMOLR_ROPE | IXGBE_VMOLR_ROMPE |
+   vmolr |= (IXGBE_VMOLR_ROPE |
IXGBE_VMOLR_BAM | IXGBE_VMOLR_AUPE);
IXGBE_WRITE_REG(hw, IXGBE_VMOLR(vf), vmolr);
 
@@ -503,6 +503,7 @@ ixgbe_vf_set_multicast(struct rte_eth_dev *dev, uint32_t 
vf, uint32_t *msgbuf)
const uint32_t IXGBE_MTA_BIT_MASK = (0x1 << IXGBE_MTA_BIT_SHIFT) - 1;
uint32_t reg_val;
int i;
+   u32 vmolr = IXGBE_READ_REG(hw, IXGBE_VMOLR(vf));
 
/* Disable multicast promiscuous first */
ixgbe_disable_vf_mc_promisc(dev, vf);
@@ -525,6 +526,9 @@ ixgbe_vf_set_multicast(struct rte_eth_dev *dev, uint32_t 
vf, uint32_t *msgbuf)
IXGBE_WRITE_REG(hw, IXGBE_MTA(mta_idx), reg_val);
}
 
+   vmolr |= IXGBE_VMOLR_ROMPE;
+   IXGBE_WRITE_REG(hw, IXGBE_VMOLR(vf), vmolr);
+
return 0;
 }
 
-- 
2.7.5



Re: [dpdk-dev] [PATCH v2 1/3] net/mlx5: fix shared counter allocation logic

2019-01-01 Thread Mordechay Haimovsky
Hi,
 Every counter that is created (shared or not) is added to the counters list 
Therefore every counter destined for  removal (i.e. ref_count == 0) should also 
be 
Removed from this list.

What am I missing ?

Moti

> -Original Message-
> From: Slava Ovsiienko
> Sent: Saturday, December 29, 2018 10:13 PM
> To: Mordechay Haimovsky ; dev@dpdk.org
> Cc: Mordechay Haimovsky ; sta...@dpdk.org
> Subject: RE: [dpdk-dev] [PATCH v2 1/3] net/mlx5: fix shared counter
> allocation logic
> 
> Moti, don't you forget to update flow_verbs_counter_release() ?
> Only shared counters should be removed from the list.
> 
> WBR,
> Slava
> 
> > -Original Message-
> > From: dev  On Behalf Of Mordechay Haimovsky
> > Sent: Friday, December 28, 2018 0:20
> > To: dev@dpdk.org
> > Cc: Mordechay Haimovsky ; sta...@dpdk.org
> > Subject: [dpdk-dev] [PATCH v2 1/3] net/mlx5: fix shared counter
> > allocation logic
> >
> > This commit fixes the logic for searching and allocating a shared
> > counter in mlx5_flow_verbs.
> > Now only the shared counters in the counters list are checked for a
> > match and not all the counters as before.
> >
> > Fixes: 84c406e74524 ("net/mlx5: add flow translate function")
> > Cc: sta...@dpdk.org
> >
> > Signed-off-by: Moti Haimovsky 
> > ---
> > v2:
> > * Modified commit header
> > ---
> >  drivers/net/mlx5/mlx5_flow_verbs.c | 14 +++---
> >  1 file changed, 7 insertions(+), 7 deletions(-)
> >
> > diff --git a/drivers/net/mlx5/mlx5_flow_verbs.c
> > b/drivers/net/mlx5/mlx5_flow_verbs.c
> > index 81ec59d..409e1cd 100644
> > --- a/drivers/net/mlx5/mlx5_flow_verbs.c
> > +++ b/drivers/net/mlx5/mlx5_flow_verbs.c
> > @@ -121,13 +121,13 @@
> > struct mlx5_flow_counter *cnt;
> > int ret;
> >
> > -   LIST_FOREACH(cnt, &priv->flow_counters, next) {
> > -   if (!cnt->shared || cnt->shared != shared)
> > -   continue;
> > -   if (cnt->id != id)
> > -   continue;
> > -   cnt->ref_cnt++;
> > -   return cnt;
> > +   if (shared) {
> > +   LIST_FOREACH(cnt, &priv->flow_counters, next) {
> > +   if (cnt->shared && cnt->id == id) {
> > +   cnt->ref_cnt++;
> > +   return cnt;
> > +   }
> > +   }
> > }
> > cnt = rte_calloc(__func__, 1, sizeof(*cnt), 0);
> > if (!cnt) {
> > --
> > 1.8.3.1



Re: [dpdk-dev] [PATCH] net/ixgbe: enable x550 flexible byte filter

2019-01-01 Thread Zhao1, Wei
Some error in patch send, please ignore  this patch.

> -Original Message-
> From: Zhao1, Wei
> Sent: Wednesday, January 2, 2019 2:31 PM
> To: dev@dpdk.org
> Cc: sta...@dpdk.org; Wu, Jingjing ; Zhao1, Wei
> 
> Subject: [PATCH] net/ixgbe: enable x550 flexible byte filter
> 
> There is need for users to use flexible byte filter on x550.
> This patch enable it.
> 
> Fixes: 82fb702077f6 ("ixgbe: support new flow director modes for X550")
> Fixes: 11777435c727 ("net/ixgbe: parse flow director filter")
> 
> Signed-off-by: Wei Zhao 
> ---
>  drivers/net/ixgbe/ixgbe_fdir.c |   9 +-
>  drivers/net/ixgbe/ixgbe_flow.c | 274 --
> ---
>  2 files changed, 195 insertions(+), 88 deletions(-)
> 
> diff --git a/drivers/net/ixgbe/ixgbe_fdir.c b/drivers/net/ixgbe/ixgbe_fdir.c
> index e559f0f..deb9a21 100644
> --- a/drivers/net/ixgbe/ixgbe_fdir.c
> +++ b/drivers/net/ixgbe/ixgbe_fdir.c
> @@ -307,6 +307,8 @@ fdir_set_input_mask_82599(struct rte_eth_dev *dev)
>   /* flex byte mask */
>   if (info->mask.flex_bytes_mask == 0)
>   fdirm |= IXGBE_FDIRM_FLEX;
> + if (info->mask.src_ipv4_mask == 0 && info->mask.dst_ipv4_mask ==
> 0)
> + fdirm |= IXGBE_FDIRM_L3P;
> 
>   IXGBE_WRITE_REG(hw, IXGBE_FDIRM, fdirm);
> 
> @@ -356,8 +358,7 @@ fdir_set_input_mask_x550(struct rte_eth_dev *dev)
>   /* mask VM pool and DIPv6 since there are currently not supported
>* mask FLEX byte, it will be set in flex_conf
>*/
> - uint32_t fdirm = IXGBE_FDIRM_POOL | IXGBE_FDIRM_DIPv6 |
> -  IXGBE_FDIRM_FLEX;
> + uint32_t fdirm = IXGBE_FDIRM_POOL | IXGBE_FDIRM_DIPv6;
>   uint32_t fdiripv6m;
>   enum rte_fdir_mode mode = dev->data->dev_conf.fdir_conf.mode;
>   uint16_t mac_mask;
> @@ -385,6 +386,10 @@ fdir_set_input_mask_x550(struct rte_eth_dev *dev)
>   return -EINVAL;
>   }
> 
> + /* flex byte mask */
> + if (info->mask.flex_bytes_mask == 0)
> + fdirm |= IXGBE_FDIRM_FLEX;
> +
>   IXGBE_WRITE_REG(hw, IXGBE_FDIRM, fdirm);
> 
>   fdiripv6m = ((u32)0xU << IXGBE_FDIRIP6M_DIPM_SHIFT); diff --
> git a/drivers/net/ixgbe/ixgbe_flow.c b/drivers/net/ixgbe/ixgbe_flow.c index
> f0fafeb..dc210c5 100644
> --- a/drivers/net/ixgbe/ixgbe_flow.c
> +++ b/drivers/net/ixgbe/ixgbe_flow.c
> @@ -1622,9 +1622,9 @@ ixgbe_parse_fdir_filter_normal(struct rte_eth_dev
> *dev,
>   const struct rte_flow_item_raw *raw_mask;
>   const struct rte_flow_item_raw *raw_spec;
>   uint8_t j;
> -
>   struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data-
> >dev_private);
> 
> +
>   if (!pattern) {
>   rte_flow_error_set(error, EINVAL,
>   RTE_FLOW_ERROR_TYPE_ITEM_NUM,
> @@ -1651,9 +1651,7 @@ ixgbe_parse_fdir_filter_normal(struct rte_eth_dev
> *dev,
>* value. So, we need not do anything for the not provided fields
> later.
>*/
>   memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
> - memset(&rule->mask, 0xFF, sizeof(struct ixgbe_hw_fdir_mask));
> - rule->mask.vlan_tci_mask = 0;
> - rule->mask.flex_bytes_mask = 0;
> + memset(&rule->mask, 0, sizeof(struct ixgbe_hw_fdir_mask));
> 
>   /**
>* The first not void item should be
> @@ -1665,7 +1663,8 @@ ixgbe_parse_fdir_filter_normal(struct rte_eth_dev
> *dev,
>   item->type != RTE_FLOW_ITEM_TYPE_IPV6 &&
>   item->type != RTE_FLOW_ITEM_TYPE_TCP &&
>   item->type != RTE_FLOW_ITEM_TYPE_UDP &&
> - item->type != RTE_FLOW_ITEM_TYPE_SCTP) {
> + item->type != RTE_FLOW_ITEM_TYPE_SCTP &&
> + item->type != RTE_FLOW_ITEM_TYPE_RAW) {
>   memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
>   rte_flow_error_set(error, EINVAL,
>   RTE_FLOW_ERROR_TYPE_ITEM,
> @@ -2201,6 +2200,7 @@ ixgbe_parse_fdir_filter_normal(struct rte_eth_dev
> *dev,
>   }
> 
>   raw_mask = item->mask;
> + rule->b_mask = TRUE;
> 
>   /* check mask */
>   if (raw_mask->relative != 0x1 ||
> @@ -2217,6 +2217,7 @@ ixgbe_parse_fdir_filter_normal(struct rte_eth_dev
> *dev,
>   }
> 
>   raw_spec = item->spec;
> + rule->b_spec = TRUE;
> 
>   /* check spec */
>   if (raw_spec->relative != 0 ||
> @@ -2323,6 +2324,8 @@ ixgbe_parse_fdir_filter_tunnel(const struct
> rte_flow_attr *attr,
>   const struct rte_flow_item_eth *eth_mask;
>   const struct rte_flow_item_vlan *vlan_spec;
>   const struct rte_flow_item_vlan *vlan_mask;
> + const struct rte_flow_item_raw *raw_mask;
> + const struct rte_flow_item_raw *raw_spec;
>   uint32_t j;
> 
>   if (!pattern) {
> @@ -2351,8 +2354,7 @@ ixgbe_parse_fdir_filter_tunnel(const struct
> rte_flow_attr *attr,
>* value. So, we need not do anything for the not provided fields
> later.
>*/
>   memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
> -  

[dpdk-dev] [PATCH 2/2] net/i40e: support VXLAN-GPE classification

2019-01-01 Thread Qiming Yang
Added VXLAN-GPE tunnel filter, supported filter to queue.

Signed-off-by: Qiming Yang 
---
 app/test-pmd/cmdline.c | 4 +++-
 drivers/net/i40e/i40e_ethdev.c | 3 +++
 2 files changed, 6 insertions(+), 1 deletion(-)

diff --git a/app/test-pmd/cmdline.c b/app/test-pmd/cmdline.c
index 51c7fac..7b7cb12 100644
--- a/app/test-pmd/cmdline.c
+++ b/app/test-pmd/cmdline.c
@@ -8704,6 +8704,8 @@ cmd_tunnel_filter_parsed(void *parsed_result,
 
if (!strcmp(res->tunnel_type, "vxlan"))
tunnel_filter_conf.tunnel_type = RTE_TUNNEL_TYPE_VXLAN;
+   else if (!strcmp(res->tunnel_type, "vxlan-gpe"))
+   tunnel_filter_conf.tunnel_type = RTE_TUNNEL_TYPE_VXLAN_GPE;
else if (!strcmp(res->tunnel_type, "nvgre"))
tunnel_filter_conf.tunnel_type = RTE_TUNNEL_TYPE_NVGRE;
else if (!strcmp(res->tunnel_type, "ipingre"))
@@ -8753,7 +8755,7 @@ cmdline_parse_token_ipaddr_t cmd_tunnel_filter_ip_value =
ip_value);
 cmdline_parse_token_string_t cmd_tunnel_filter_tunnel_type =
TOKEN_STRING_INITIALIZER(struct cmd_tunnel_filter_result,
-   tunnel_type, "vxlan#nvgre#ipingre");
+   tunnel_type, "vxlan#nvgre#ipingre#vxlan-gpe");
 
 cmdline_parse_token_string_t cmd_tunnel_filter_filter_type =
TOKEN_STRING_INITIALIZER(struct cmd_tunnel_filter_result,
diff --git a/drivers/net/i40e/i40e_ethdev.c b/drivers/net/i40e/i40e_ethdev.c
index 1dd04e6..8eb2a02 100644
--- a/drivers/net/i40e/i40e_ethdev.c
+++ b/drivers/net/i40e/i40e_ethdev.c
@@ -7690,6 +7690,9 @@ i40e_dev_tunnel_filter_set(struct i40e_pf *pf,
case RTE_TUNNEL_TYPE_IP_IN_GRE:
tun_type = I40E_AQC_ADD_CLOUD_TNL_TYPE_IP;
break;
+   case RTE_TUNNEL_TYPE_VXLAN_GPE:
+   tun_type = I40E_AQC_ADD_CLOUD_TNL_TYPE_VXLAN_GPE;
+   break;
default:
/* Other tunnel types is not supported. */
PMD_DRV_LOG(ERR, "tunnel type is not supported.");
-- 
2.9.5



[dpdk-dev] [PATCH 1/2] net/i40e: add support for VXLAN-GPE

2019-01-01 Thread Qiming Yang
Can recognize new packet type VXLAN-GPE in i40e driver.
Added inner IP/TCP/UDP checksum and RSS support for VXLAN-GPE
packet.

Signed-off-by: Qiming Yang 
---
 app/test-pmd/cmdline.c   |  6 --
 drivers/net/i40e/i40e_ethdev.c   | 13 +
 lib/librte_ethdev/rte_eth_ctrl.h |  1 +
 3 files changed, 14 insertions(+), 6 deletions(-)

diff --git a/app/test-pmd/cmdline.c b/app/test-pmd/cmdline.c
index 3ddc3e0..51c7fac 100644
--- a/app/test-pmd/cmdline.c
+++ b/app/test-pmd/cmdline.c
@@ -8877,6 +8877,8 @@ cmd_cfg_tunnel_udp_port_parsed(void *parsed_result,
tunnel_udp.prot_type = RTE_TUNNEL_TYPE_VXLAN;
} else if (!strcmp(res->tunnel_type, "geneve")) {
tunnel_udp.prot_type = RTE_TUNNEL_TYPE_GENEVE;
+   } else if (!strcmp(res->tunnel_type, "vxlan-gpe")) {
+   tunnel_udp.prot_type = RTE_TUNNEL_TYPE_VXLAN_GPE;
} else {
printf("Invalid tunnel type\n");
return;
@@ -8911,7 +8913,7 @@ cmdline_parse_token_string_t 
cmd_config_tunnel_udp_port_action =
 "add#rm");
 cmdline_parse_token_string_t cmd_config_tunnel_udp_port_tunnel_type =
TOKEN_STRING_INITIALIZER(struct cmd_config_tunnel_udp_port, tunnel_type,
-"vxlan#geneve");
+"vxlan#geneve#vxlan-gpe");
 cmdline_parse_token_num_t cmd_config_tunnel_udp_port_value =
TOKEN_NUM_INITIALIZER(struct cmd_config_tunnel_udp_port, udp_port,
  UINT16);
@@ -8919,7 +8921,7 @@ cmdline_parse_token_num_t 
cmd_config_tunnel_udp_port_value =
 cmdline_parse_inst_t cmd_cfg_tunnel_udp_port = {
.f = cmd_cfg_tunnel_udp_port_parsed,
.data = NULL,
-   .help_str = "port config  udp_tunnel_port add|rm vxlan|geneve 
",
+   .help_str = "port config  udp_tunnel_port add|rm 
vxlan|geneve|vxlan-gpe ",
.tokens = {
(void *)&cmd_config_tunnel_udp_port_port,
(void *)&cmd_config_tunnel_udp_port_config,
diff --git a/drivers/net/i40e/i40e_ethdev.c b/drivers/net/i40e/i40e_ethdev.c
index 8dc1a4a..1dd04e6 100644
--- a/drivers/net/i40e/i40e_ethdev.c
+++ b/drivers/net/i40e/i40e_ethdev.c
@@ -8338,7 +8338,7 @@ i40e_get_vxlan_port_idx(struct i40e_pf *pf, uint16_t port)
 }
 
 static int
-i40e_add_vxlan_port(struct i40e_pf *pf, uint16_t port)
+i40e_add_vxlan_port(struct i40e_pf *pf, uint16_t port, int udp_type)
 {
int  idx, ret;
uint8_t filter_idx;
@@ -8361,7 +8361,7 @@ i40e_add_vxlan_port(struct i40e_pf *pf, uint16_t port)
return -ENOSPC;
}
 
-   ret =  i40e_aq_add_udp_tunnel(hw, port, I40E_AQC_TUNNEL_TYPE_VXLAN,
+   ret =  i40e_aq_add_udp_tunnel(hw, port, udp_type,
&filter_idx, NULL);
if (ret < 0) {
PMD_DRV_LOG(ERR, "Failed to add VXLAN UDP port %d", port);
@@ -8429,9 +8429,13 @@ i40e_dev_udp_tunnel_port_add(struct rte_eth_dev *dev,
 
switch (udp_tunnel->prot_type) {
case RTE_TUNNEL_TYPE_VXLAN:
-   ret = i40e_add_vxlan_port(pf, udp_tunnel->udp_port);
+   ret = i40e_add_vxlan_port(pf, udp_tunnel->udp_port,
+ I40E_AQC_TUNNEL_TYPE_VXLAN);
break;
-
+   case RTE_TUNNEL_TYPE_VXLAN_GPE:
+ret = i40e_add_vxlan_port(pf, udp_tunnel->udp_port,
+ I40E_AQC_TUNNEL_TYPE_VXLAN_GPE);
+break;
case RTE_TUNNEL_TYPE_GENEVE:
case RTE_TUNNEL_TYPE_TEREDO:
PMD_DRV_LOG(ERR, "Tunnel type is not supported now.");
@@ -8460,6 +8464,7 @@ i40e_dev_udp_tunnel_port_del(struct rte_eth_dev *dev,
 
switch (udp_tunnel->prot_type) {
case RTE_TUNNEL_TYPE_VXLAN:
+   case RTE_TUNNEL_TYPE_VXLAN_GPE:
ret = i40e_del_vxlan_port(pf, udp_tunnel->udp_port);
break;
case RTE_TUNNEL_TYPE_GENEVE:
diff --git a/lib/librte_ethdev/rte_eth_ctrl.h b/lib/librte_ethdev/rte_eth_ctrl.h
index 5ea8ae2..b341634 100644
--- a/lib/librte_ethdev/rte_eth_ctrl.h
+++ b/lib/librte_ethdev/rte_eth_ctrl.h
@@ -229,6 +229,7 @@ enum rte_eth_tunnel_type {
RTE_TUNNEL_TYPE_NVGRE,
RTE_TUNNEL_TYPE_IP_IN_GRE,
RTE_L2_TUNNEL_TYPE_E_TAG,
+   RTE_TUNNEL_TYPE_VXLAN_GPE,
RTE_TUNNEL_TYPE_MAX,
 };
 
-- 
2.9.5