[dpdk-dev] [PATCH v4] app/testpmd: add option ring-bind-lcpu to bind Q with CPU

2018-01-10 Thread wei . guo . simon
From: Simon Guo 

Currently the rx/tx queue is allocated from the buffer pool on socket of:
- port's socket if --port-numa-config specified
- or ring-numa-config setting per port

All the above will "bind" queue to single socket per port configuration.
But it can actually archieve better performance if one port's queue can
be spread across multiple NUMA nodes, and the rx/tx queue is allocated
per lcpu socket.

This patch adds a new option "--ring-bind-lcpu"(no parameter).  With
this, testpmd can utilize the PCI-e bus bandwidth on another NUMA
nodes.

When --port-numa-config or --ring-numa-config option is specified, this
--ring-bind-lcpu option will be suppressed.

Test result:
64bytes package, running in PowerPC with Mellanox
CX-4 card, single port(100G), with 8 cores, fw mode:
- Without this patch:  52.5Mpps throughput
- With this patch: 66Mpps throughput
  ~25% improvement

Signed-off-by: Simon Guo 
---
 app/test-pmd/parameters.c |  14 +-
 app/test-pmd/testpmd.c| 112 +-
 app/test-pmd/testpmd.h|   7 +++
 3 files changed, 101 insertions(+), 32 deletions(-)

diff --git a/app/test-pmd/parameters.c b/app/test-pmd/parameters.c
index 304b98d..1dba92e 100644
--- a/app/test-pmd/parameters.c
+++ b/app/test-pmd/parameters.c
@@ -104,6 +104,10 @@
   "(flag: 1 for RX; 2 for TX; 3 for RX and TX).\n");
printf("  --socket-num=N: set socket from which all memory is allocated 
"
   "in NUMA mode.\n");
+   printf("  --ring-bind-lcpu: "
+   "specify TX/RX rings will be allocated on local socket of lcpu."
+   "It will be ignored if ring-numa-config or port-numa-config is 
used. "
+   "As a result, it allows one port binds to multiple NUMA 
nodes.\n");
printf("  --mbuf-size=N: set the data size of mbuf to N bytes.\n");
printf("  --total-num-mbufs=N: set the number of mbufs to be allocated "
   "in mbuf pools.\n");
@@ -544,6 +548,7 @@
{ "interactive",0, 0, 0 },
{ "cmdline-file",   1, 0, 0 },
{ "auto-start", 0, 0, 0 },
+   { "ring-bind-lcpu", 0, 0, 0 },
{ "eth-peers-configfile",   1, 0, 0 },
{ "eth-peer",   1, 0, 0 },
 #endif
@@ -676,6 +681,10 @@
stats_period = n;
break;
}
+   if (!strcmp(lgopts[opt_idx].name, "ring-bind-lcpu")) {
+   ring_bind_lcpu |= RBL_BIND_LOCAL_MASK;
+   break;
+   }
if (!strcmp(lgopts[opt_idx].name,
"eth-peers-configfile")) {
if (init_peer_eth_addrs(optarg) != 0)
@@ -739,11 +748,14 @@
if (parse_portnuma_config(optarg))
rte_exit(EXIT_FAILURE,
   "invalid port-numa configuration\n");
+   ring_bind_lcpu |= RBL_PORT_NUMA_MASK;
}
-   if (!strcmp(lgopts[opt_idx].name, "ring-numa-config"))
+   if (!strcmp(lgopts[opt_idx].name, "ring-numa-config")) {
if (parse_ringnuma_config(optarg))
rte_exit(EXIT_FAILURE,
   "invalid ring-numa configuration\n");
+   ring_bind_lcpu |= RBL_RING_NUMA_MASK;
+   }
if (!strcmp(lgopts[opt_idx].name, "socket-num")) {
n = atoi(optarg);
if (!new_socket_id((uint8_t)n)) {
diff --git a/app/test-pmd/testpmd.c b/app/test-pmd/testpmd.c
index 9414d0e..e9e89d0 100644
--- a/app/test-pmd/testpmd.c
+++ b/app/test-pmd/testpmd.c
@@ -68,6 +68,9 @@
 uint8_t interactive = 0;
 uint8_t auto_start = 0;
 uint8_t tx_first;
+
+uint8_t ring_bind_lcpu;
+
 char cmdline_filename[PATH_MAX] = {0};
 
 /*
@@ -1410,6 +1413,43 @@ static int eth_event_callback(portid_t port_id,
return 1;
 }
 
+static int find_local_socket(queueid_t qi, int is_rxq)
+{
+   /*
+* try to find the local socket with following logic:
+* 1) Find the correct stream for the queue;
+* 2) Find the correct lcore for the stream;
+* 3) Find the correct socket for the lcore;
+*/
+   int i, j, socket = NUMA_NO_CONFIG;
+
+   /* find the stream based on queue no*/
+   for (i = 0; i < nb_fwd_streams; i++) {
+   if (is_rxq) {
+   if (fwd_streams[i]->rx_queue == qi)
+   break;
+   } else {
+   if (fwd_streams[i]->tx_queue == qi)
+   break;
+

Re: [dpdk-dev] [PATCH] build: add support for detecting march on ARM

2018-01-10 Thread Pavan Nikhilesh
On Mon, Jan 08, 2018 at 05:05:47PM +, Bruce Richardson wrote:
> On Sat, Dec 30, 2017 at 10:07:54PM +0530, Pavan Nikhilesh wrote:
> > Added support for detecting march and mcpu by reading midr_el1 register.
> > The implementer, primary part number values read can be used to figure
> > out the underlying arm cpu.
> >
> > Signed-off-by: Pavan Nikhilesh 
> > ---
> >
> >  The current method used for reading MIDR_EL1 form userspace might not be
> >  reliable and can be easily modified by updating config/arm/machine.py.
> >
> >  More info on midr_el1 can be found at
> >  
> > http://infocenter.arm.com/help/index.jsp?topic=/com.arm.doc.ddi0500g/BABFEABI.html
> >
> >  This patch depends on http://dpdk.org/dev/patchwork/patch/32410/
> >
>
> I had intended that patch to just be a prototype to start adding ARM
> support - have you considered taking that and rolling in these changes
> into that as a part of a set?

Agreed, I will merge the patches as a patchset in v2.

>
> >  config/arm/machine.py  | 18 ++
> >  config/arm/meson.build | 20 
> >  config/meson.build |  3 ++-
> >  drivers/meson.build|  2 +-
> >  examples/meson.build   |  2 +-
> >  lib/meson.build|  2 +-
> >  meson.build|  2 +-
> >  7 files changed, 44 insertions(+), 5 deletions(-)
> >  create mode 100755 config/arm/machine.py
> >
> > diff --git a/config/arm/machine.py b/config/arm/machine.py
> > new file mode 100755
> > index 0..3c6e7b6a7
> > --- /dev/null
> > +++ b/config/arm/machine.py
> > @@ -0,0 +1,18 @@
> > +#!/usr/bin/python
> > +import pprint
> > +pp = pprint
> > +
> > +ident = []
> > +fname = '/sys/devices/system/cpu/cpu0/regs/identification/midr_el1'
> > +with open(fname) as f:
> > +content = f.read()
> > +
> > +midr_el1 = (int(content.rstrip('\n'), 16))
> > +
> > +ident.append(hex((midr_el1 >> 24) & 0xFF))  # Implementer
> > +ident.append(hex((midr_el1 >> 20) & 0xF))   # Variant
> > +ident.append(hex((midr_el1 >> 16) & 0XF))   # Architecture
> > +ident.append(hex((midr_el1 >> 4) & 0xFFF))  # Primary Part number
> > +ident.append(hex(midr_el1 & 0xF))   # Revision
> > +
> > +print(' '.join(ident))
> > diff --git a/config/arm/meson.build b/config/arm/meson.build
> > index 250958415..f6ae69c21 100644
> > --- a/config/arm/meson.build
> > +++ b/config/arm/meson.build
> > @@ -41,3 +41,23 @@ else
> >  endif
> >  dpdk_conf.set('RTE_CACHE_LINE_SIZE', 128)
> >  dpdk_conf.set('RTE_FORCE_INTRINSICS', 1)
> > +
> > +detect_vendor = find_program(join_paths(meson.current_source_dir(),
> > +   'machine.py'))
> > +cmd = run_command(detect_vendor.path())
> > +if cmd.returncode() != 0
> > +   message('Unable to read midr_el1')
> > +else
> > +   cmd_output = cmd.stdout().strip().split(' ')
> > +   message('midr_el1 output: \n' + 'Implementor ' + cmd_output[0] +
> > +   ' Variant ' + cmd_output[1] + ' Architecture ' +
> > +   cmd_output[2] + ' Primary Part number ' + cmd_output[3]
> > +   + ' Revision ' + cmd_output[4])
> > +   if cmd_output[0] == '0x43'
> > +   message('Implementor : Cavium')
> > +   dpdk_conf.set('RTE_MACHINE', 'thunderx')
> > +   machine_arg = []
> > +   machine_arg += '-march=' + 'armv8-a+crc+crypto'
> > +   machine_arg += '-mcpu=' + 'thunderx'
> > +   endif
> > +endif
>
> Should the call to the script and return code not be dependent on the
> current value of machine i.e. only if it's "native"? If the user has
> specified a "machine" type as part of the meson configuration
> parameters, you should not override it. Similarly in the cross-build
> case, the machine type is taken from the cross-build file itself.
>

Yes, only in native build scenario the script and result should be used  I will
modify that in v2.

> > diff --git a/config/meson.build b/config/meson.build
> > index 86e978fb1..fe8104676 100644
> > --- a/config/meson.build
> > +++ b/config/meson.build
> > @@ -8,7 +8,8 @@ else
> > machine = get_option('machine')
> >  endif
> >  dpdk_conf.set('RTE_MACHINE', machine)
> > -machine_arg = '-march=' + machine
> > +machine_arg = []
> > +machine_arg += '-march=' + machine
> >
>
> I was confused initially as to why this change, but now I realise it's
> due to the fact that for the thunderx build you need both an -march and
> an -mcpu flag. I think it might be better to make this change as a
> separate patch and rename the variable from "machine_arg" to
> "machine_args" to make it clearer it's an array.

I was thinking of having a dpdk_machine_args in base meson.build parallel to
dpdk_extra_ldflags and friends.

> Alternatively, we can have separate variables for march flag and mcpu
> flag. [Does cpu-type need to be a configuration parameter for ARM
> platforms, in the non-cross-build case?]
>

Initially while experimenting with meson I did try out splitting the march and
mcpu flags but that resulted in extra overhead i.e. filtering out mcpu when
it's unuse

[dpdk-dev] [PATCH 0/8] net/ixgbe: update base code

2018-01-10 Thread Qiming Yang
The patchset update ixgbe base code base on latest CID drop cid-
ixgbe.2018.01.02.tar.gz, couple issues are fixed, include:
SECRX_RDY polling frequency and semaphore timeout, PHY initialization
code not cleanup, uninitialized padding, PHY init fails, SW resource
bits sync and some compile issues for GCC7. And also changed method for
flash read, added x553 SGMII 10/100Mbps support and common FW version
functions. The patchset also covers couple code clean and function
comments update.

Qiming Yang (8):
  net/ixgbe/base: add common FW version functions
  net/ixgbe/base: increasing timeout
  net/ixgbe/base: x550 related bug fix
  net/ixgbe/base: changed method for flash read
  net/ixgbe/base: clear sync register during init
  net/ixgbe/base: update function comments
  net/ixgbe/base: added media type fiber fixed
  net/ixgbe/base: update README file

 drivers/net/ixgbe/base/README|   2 +-
 drivers/net/ixgbe/base/ixgbe_82598.c |   3 +-
 drivers/net/ixgbe/base/ixgbe_82599.c |  31 --
 drivers/net/ixgbe/base/ixgbe_api.c   |  18 +++-
 drivers/net/ixgbe/base/ixgbe_common.c| 179 ---
 drivers/net/ixgbe/base/ixgbe_common.h|   6 ++
 drivers/net/ixgbe/base/ixgbe_dcb.c   |  10 +-
 drivers/net/ixgbe/base/ixgbe_dcb_82598.c |  22 +++-
 drivers/net/ixgbe/base/ixgbe_dcb_82599.c |  25 -
 drivers/net/ixgbe/base/ixgbe_hv_vf.c |  17 +++
 drivers/net/ixgbe/base/ixgbe_mbx.c   |  26 +
 drivers/net/ixgbe/base/ixgbe_phy.c   |  19 +++-
 drivers/net/ixgbe/base/ixgbe_type.h  |  61 +--
 drivers/net/ixgbe/base/ixgbe_vf.c|   7 +-
 drivers/net/ixgbe/base/ixgbe_x540.c  |  27 +++--
 drivers/net/ixgbe/base/ixgbe_x550.c  | 150 +-
 16 files changed, 418 insertions(+), 185 deletions(-)

-- 
2.9.4



[dpdk-dev] [PATCH 2/8] net/ixgbe/base: increasing timeout

2018-01-10 Thread Qiming Yang
Increasing SECRX_RDY polling frequency and semaphore timeout.
Fixed the FWSW.PT check in ixgbe_mng_present().

Signed-off-by: Qiming Yang 
---
 drivers/net/ixgbe/base/ixgbe_common.c | 4 ++--
 drivers/net/ixgbe/base/ixgbe_x540.c   | 3 +++
 2 files changed, 5 insertions(+), 2 deletions(-)

diff --git a/drivers/net/ixgbe/base/ixgbe_common.c 
b/drivers/net/ixgbe/base/ixgbe_common.c
index 717af6e..4c950f1 100644
--- a/drivers/net/ixgbe/base/ixgbe_common.c
+++ b/drivers/net/ixgbe/base/ixgbe_common.c
@@ -3347,7 +3347,7 @@ void ixgbe_release_swfw_sync(struct ixgbe_hw *hw, u32 
mask)
  **/
 s32 ixgbe_disable_sec_rx_path_generic(struct ixgbe_hw *hw)
 {
-#define IXGBE_MAX_SECRX_POLL 40
+#define IXGBE_MAX_SECRX_POLL 4000
 
int i;
int secrxreg;
@@ -3364,7 +3364,7 @@ s32 ixgbe_disable_sec_rx_path_generic(struct ixgbe_hw *hw)
break;
else
/* Use interrupt-safe sleep just in case */
-   usec_delay(1000);
+   usec_delay(10);
}
 
/* For informational purposes only */
diff --git a/drivers/net/ixgbe/base/ixgbe_x540.c 
b/drivers/net/ixgbe/base/ixgbe_x540.c
index 0e51813..ef29a9b 100644
--- a/drivers/net/ixgbe/base/ixgbe_x540.c
+++ b/drivers/net/ixgbe/base/ixgbe_x540.c
@@ -779,6 +779,9 @@ s32 ixgbe_acquire_swfw_sync_X540(struct ixgbe_hw *hw, u32 
mask)
 
swmask |= swi2c_mask;
fwmask |= swi2c_mask << 2;
+   if (hw->mac.type >= ixgbe_mac_X550)
+   timeout = 1000;
+
for (i = 0; i < timeout; i++) {
/* SW NVM semaphore bit is used for access to all
 * SW_FW_SYNC bits (not just NVM)
-- 
2.9.4



[dpdk-dev] [PATCH 1/8] net/ixgbe/base: add common FW version functions

2018-01-10 Thread Qiming Yang
Added common functions for getting OEM product verison,
option ROM version, and ETrack id.

Signed-off-by: Qiming Yang 
---
 drivers/net/ixgbe/base/ixgbe_common.c | 111 ++
 drivers/net/ixgbe/base/ixgbe_common.h |   6 ++
 drivers/net/ixgbe/base/ixgbe_type.h   |  47 --
 3 files changed, 158 insertions(+), 6 deletions(-)

diff --git a/drivers/net/ixgbe/base/ixgbe_common.c 
b/drivers/net/ixgbe/base/ixgbe_common.c
index 7f85713..717af6e 100644
--- a/drivers/net/ixgbe/base/ixgbe_common.c
+++ b/drivers/net/ixgbe/base/ixgbe_common.c
@@ -4983,6 +4983,117 @@ s32 ixgbe_init_thermal_sensor_thresh_generic(struct 
ixgbe_hw *hw)
return IXGBE_NOT_IMPLEMENTED;
 }
 
+/**
+ *  ixgbe_get_orom_version - Return option ROM from EEPROM
+ *
+ *  @hw: pointer to hardware structure
+ *  @nvm_ver: pointer to output structure
+ *
+ *  if valid option ROM version, nvm_ver->or_valid set to true
+ *  else nvm_ver->or_valid is false.
+ **/
+void ixgbe_get_orom_version(struct ixgbe_hw *hw,
+   struct ixgbe_nvm_version *nvm_ver)
+{
+   u16 offset, eeprom_cfg_blkh, eeprom_cfg_blkl;
+
+   nvm_ver->or_valid = false;
+   /* Option Rom may or may not be present.  Start with pointer */
+   hw->eeprom.ops.read(hw, NVM_OROM_OFFSET, &offset);
+
+   /* make sure offset is valid */
+   if ((offset == 0x0) || (offset == NVM_INVALID_PTR))
+   return;
+
+   hw->eeprom.ops.read(hw, offset + NVM_OROM_BLK_HI, &eeprom_cfg_blkh);
+   hw->eeprom.ops.read(hw, offset + NVM_OROM_BLK_LOW, &eeprom_cfg_blkl);
+
+   /* option rom exists and is valid */
+   if ((eeprom_cfg_blkl | eeprom_cfg_blkh) == 0x0 ||
+   eeprom_cfg_blkl == NVM_VER_INVALID ||
+   eeprom_cfg_blkh == NVM_VER_INVALID)
+   return;
+
+   nvm_ver->or_valid = true;
+   nvm_ver->or_major = eeprom_cfg_blkl >> NVM_OROM_SHIFT;
+   nvm_ver->or_build = (eeprom_cfg_blkl << NVM_OROM_SHIFT) |
+   (eeprom_cfg_blkh >> NVM_OROM_SHIFT);
+   nvm_ver->or_patch = eeprom_cfg_blkh & NVM_OROM_PATCH_MASK;
+}
+
+/**
+ *  ixgbe_get_oem_prod_version - Return OEM Product version
+ *
+ *  @hw: pointer to hardware structure
+ *  @nvm_ver: pointer to output structure
+ *
+ *  if valid OEM product version, nvm_ver->oem_valid set to true
+ *  else nvm_ver->oem_valid is false.
+ **/
+void ixgbe_get_oem_prod_version(struct ixgbe_hw *hw,
+   struct ixgbe_nvm_version *nvm_ver)
+{
+   u16 rel_num, prod_ver, mod_len, cap, offset;
+
+   nvm_ver->oem_valid = false;
+   hw->eeprom.ops.read(hw, NVM_OEM_PROD_VER_PTR, &offset);
+
+   /* Return is offset to OEM Product Version block is invalid */
+   if (offset == 0x0 && offset == NVM_INVALID_PTR)
+   return;
+
+   /* Read product version block */
+   hw->eeprom.ops.read(hw, offset, &mod_len);
+   hw->eeprom.ops.read(hw, offset + NVM_OEM_PROD_VER_CAP_OFF, &cap);
+
+   /* Return if OEM product version block is invalid */
+   if (mod_len != NVM_OEM_PROD_VER_MOD_LEN ||
+   (cap & NVM_OEM_PROD_VER_CAP_MASK) != 0x0)
+   return;
+
+   hw->eeprom.ops.read(hw, offset + NVM_OEM_PROD_VER_OFF_L, &prod_ver);
+   hw->eeprom.ops.read(hw, offset + NVM_OEM_PROD_VER_OFF_H, &rel_num);
+
+   /* Return if version is invalid */
+   if ((rel_num | prod_ver) == 0x0 ||
+   rel_num == NVM_VER_INVALID || prod_ver == NVM_VER_INVALID)
+   return;
+
+   nvm_ver->oem_major = prod_ver >> NVM_VER_SHIFT;
+   nvm_ver->oem_minor = prod_ver & NVM_VER_MASK;
+   nvm_ver->oem_release = rel_num;
+   nvm_ver->oem_valid = true;
+}
+
+/**
+ *  ixgbe_get_etk_id - Return Etrack ID from EEPROM
+ *
+ *  @hw: pointer to hardware structure
+ *  @nvm_ver: pointer to output structure
+ *
+ *  word read errors will return 0x
+ **/
+void ixgbe_get_etk_id(struct ixgbe_hw *hw, struct ixgbe_nvm_version *nvm_ver)
+{
+   u16 etk_id_l, etk_id_h;
+
+   if (hw->eeprom.ops.read(hw, NVM_ETK_OFF_LOW, &etk_id_l))
+   etk_id_l = NVM_VER_INVALID;
+   if (hw->eeprom.ops.read(hw, NVM_ETK_OFF_HI, &etk_id_h))
+   etk_id_h = NVM_VER_INVALID;
+
+   /* The word order for the version format is determined by high order
+* word bit 15.
+*/
+   if ((etk_id_h & NVM_ETK_VALID) == 0) {
+   nvm_ver->etk_id = etk_id_h;
+   nvm_ver->etk_id |= (etk_id_l << NVM_ETK_SHIFT);
+   } else {
+   nvm_ver->etk_id = etk_id_l;
+   nvm_ver->etk_id |= (etk_id_h << NVM_ETK_SHIFT);
+   }
+}
+
 
 /**
  * ixgbe_dcb_get_rtrup2tc_generic - read rtrup2tc reg
diff --git a/drivers/net/ixgbe/base/ixgbe_common.h 
b/drivers/net/ixgbe/base/ixgbe_common.h
index 903f34d..fd35dcc 100644
--- a/drivers/net/ixgbe/base/ixgbe_common.h
+++ b/drivers/net/ixgbe/base/ixgbe_common.h
@@ -183,6 +183,12 @@ bool ixgbe_mng_enabled(struct ixgbe_hw *hw);
 
 s32 ix

[dpdk-dev] [PATCH 3/8] net/ixgbe/base: x550 related bug fix

2018-01-10 Thread Qiming Yang
Added error code when PHY init fails.
Cleanup PHY initialization code.
Fixed the error use of uninitialized padding.
Added x553 SGMII 10/100Mbps support.

Signed-off-by: Qiming Yang 
---
 drivers/net/ixgbe/base/ixgbe_common.c |  19 +++--
 drivers/net/ixgbe/base/ixgbe_x550.c   | 144 ++
 2 files changed, 54 insertions(+), 109 deletions(-)

diff --git a/drivers/net/ixgbe/base/ixgbe_common.c 
b/drivers/net/ixgbe/base/ixgbe_common.c
index 4c950f1..50b9b46 100644
--- a/drivers/net/ixgbe/base/ixgbe_common.c
+++ b/drivers/net/ixgbe/base/ixgbe_common.c
@@ -4246,10 +4246,17 @@ s32 ixgbe_check_mac_link_generic(struct ixgbe_hw *hw, 
ixgbe_link_speed *speed,
break;
case IXGBE_LINKS_SPEED_10_X550EM_A:
*speed = IXGBE_LINK_SPEED_UNKNOWN;
+#ifdef PREBOOT_SUPPORT
if (hw->device_id == IXGBE_DEV_ID_X550EM_A_1G_T ||
-   hw->device_id == IXGBE_DEV_ID_X550EM_A_1G_T_L) {
+   hw->device_id == IXGBE_DEV_ID_X550EM_A_1G_T_L ||
+   hw->device_id == IXGBE_DEV_ID_X550EM_A_SGMII ||
+   hw->device_id == IXGBE_DEV_ID_X550EM_A_SGMII_L)
*speed = IXGBE_LINK_SPEED_10_FULL;
-   }
+#else
+   if (hw->device_id == IXGBE_DEV_ID_X550EM_A_1G_T ||
+   hw->device_id == IXGBE_DEV_ID_X550EM_A_1G_T_L)
+   *speed = IXGBE_LINK_SPEED_10_FULL;
+#endif /* PREBOOT_SUPPORT */
break;
default:
*speed = IXGBE_LINK_SPEED_UNKNOWN;
@@ -4669,10 +4676,10 @@ s32 ixgbe_set_fw_drv_ver_generic(struct ixgbe_hw *hw, 
u8 maj, u8 min,
fw_cmd.ver_build = build;
fw_cmd.ver_sub = sub;
fw_cmd.hdr.checksum = 0;
-   fw_cmd.hdr.checksum = ixgbe_calculate_checksum((u8 *)&fw_cmd,
-   (FW_CEM_HDR_LEN + fw_cmd.hdr.buf_len));
fw_cmd.pad = 0;
fw_cmd.pad2 = 0;
+   fw_cmd.hdr.checksum = ixgbe_calculate_checksum((u8 *)&fw_cmd,
+   (FW_CEM_HDR_LEN + fw_cmd.hdr.buf_len));
 
for (i = 0; i <= FW_CEM_MAX_RETRIES; i++) {
ret_val = ixgbe_host_interface_command(hw, (u32 *)&fw_cmd,
@@ -5165,8 +5172,8 @@ bool ixgbe_mng_present(struct ixgbe_hw *hw)
return false;
 
fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM_BY_MAC(hw));
-   fwsm &= IXGBE_FWSM_MODE_MASK;
-   return fwsm == IXGBE_FWSM_FW_MODE_PT;
+
+   return !!(fwsm & IXGBE_FWSM_FW_MODE_PT);
 }
 
 /**
diff --git a/drivers/net/ixgbe/base/ixgbe_x550.c 
b/drivers/net/ixgbe/base/ixgbe_x550.c
index 9862391..c55bc5c 100644
--- a/drivers/net/ixgbe/base/ixgbe_x550.c
+++ b/drivers/net/ixgbe/base/ixgbe_x550.c
@@ -336,98 +336,6 @@ STATIC void ixgbe_setup_mux_ctl(struct ixgbe_hw *hw)
 }
 
 /**
- * ixgbe_read_phy_reg_mdi_22 - Read from a clause 22 PHY register without lock
- * @hw: pointer to hardware structure
- * @reg_addr: 32 bit address of PHY register to read
- * @dev_type: always unused
- * @phy_data: Pointer to read data from PHY register
- */
-STATIC s32 ixgbe_read_phy_reg_mdi_22(struct ixgbe_hw *hw, u32 reg_addr,
-u32 dev_type, u16 *phy_data)
-{
-   u32 i, data, command;
-   UNREFERENCED_1PARAMETER(dev_type);
-
-   /* Setup and write the read command */
-   command = (reg_addr << IXGBE_MSCA_DEV_TYPE_SHIFT) |
- (hw->phy.addr << IXGBE_MSCA_PHY_ADDR_SHIFT) |
- IXGBE_MSCA_OLD_PROTOCOL | IXGBE_MSCA_READ_AUTOINC |
- IXGBE_MSCA_MDI_COMMAND;
-
-   IXGBE_WRITE_REG(hw, IXGBE_MSCA, command);
-
-   /* Check every 10 usec to see if the access completed.
-* The MDI Command bit will clear when the operation is
-* complete
-*/
-   for (i = 0; i < IXGBE_MDIO_COMMAND_TIMEOUT; i++) {
-   usec_delay(10);
-
-   command = IXGBE_READ_REG(hw, IXGBE_MSCA);
-   if (!(command & IXGBE_MSCA_MDI_COMMAND))
-   break;
-   }
-
-   if (command & IXGBE_MSCA_MDI_COMMAND) {
-   ERROR_REPORT1(IXGBE_ERROR_POLLING,
- "PHY read command did not complete.\n");
-   return IXGBE_ERR_PHY;
-   }
-
-   /* Read operation is complete.  Get the data from MSRWD */
-   data = IXGBE_READ_REG(hw, IXGBE_MSRWD);
-   data >>= IXGBE_MSRWD_READ_DATA_SHIFT;
-   *phy_data = (u16)data;
-
-   return IXGBE_SUCCESS;
-}
-
-/**
- * ixgbe_write_phy_reg_mdi_22 - Write to a clause 22 PHY register without lock
- * @hw: pointer to hardware structure
- * @reg_addr: 32 bit PHY register to write
- * @dev_type: always unused
- * @phy_data: Data to write to the PHY register
- */
-STATIC s32 ixgbe_write_phy_reg_mdi_22(struct ixgbe_hw *hw, u32 reg_addr,
- u32 dev_type, u16 phy_data)
-{
-   u32 i, command;
-   UNREFERENCED_1PARAMETER(dev_type);
-
-   /* Put the data in the MDI single read and wri

[dpdk-dev] [PATCH 5/8] net/ixgbe/base: clear sync register during init

2018-01-10 Thread Qiming Yang
Cleared SW resource bits in synchronization register and update
host interface resource bit error case to X540.
Fix complier warnings for gcc 7.

Signed-off-by: Qiming Yang 
---
 drivers/net/ixgbe/base/ixgbe_82599.c  | 10 +++---
 drivers/net/ixgbe/base/ixgbe_common.c |  4 ++--
 drivers/net/ixgbe/base/ixgbe_x540.c   | 24 ++--
 3 files changed, 23 insertions(+), 15 deletions(-)

diff --git a/drivers/net/ixgbe/base/ixgbe_82599.c 
b/drivers/net/ixgbe/base/ixgbe_82599.c
index d9d11a8..d382a60 100644
--- a/drivers/net/ixgbe/base/ixgbe_82599.c
+++ b/drivers/net/ixgbe/base/ixgbe_82599.c
@@ -1739,15 +1739,17 @@ s32 ixgbe_fdir_set_input_mask_82599(struct ixgbe_hw *hw,
 
switch (IXGBE_NTOHS(input_mask->formatted.vlan_id) & 0xEFFF) {
case 0x:
-   /* mask VLAN ID, fall through to mask VLAN priority */
+   /* mask VLAN ID */
fdirm |= IXGBE_FDIRM_VLANID;
+   /* fall through */
case 0x0FFF:
/* mask VLAN priority */
fdirm |= IXGBE_FDIRM_VLANP;
break;
case 0xE000:
-   /* mask VLAN ID only, fall through */
+   /* mask VLAN ID only */
fdirm |= IXGBE_FDIRM_VLANID;
+   /* fall through */
case 0xEFFF:
/* no VLAN fields masked */
break;
@@ -1758,8 +1760,9 @@ s32 ixgbe_fdir_set_input_mask_82599(struct ixgbe_hw *hw,
 
switch (input_mask->formatted.flex_bytes & 0x) {
case 0x:
-   /* Mask Flex Bytes, fall through */
+   /* Mask Flex Bytes */
fdirm |= IXGBE_FDIRM_FLEX;
+   /* fall through */
case 0x:
break;
default:
@@ -2024,6 +2027,7 @@ s32 ixgbe_fdir_add_perfect_filter_82599(struct ixgbe_hw 
*hw,
DEBUGOUT(" Error on src/dst port\n");
return IXGBE_ERR_CONFIG;
}
+   /* fall through */
case IXGBE_ATR_FLOW_TYPE_TCPV4:
case IXGBE_ATR_FLOW_TYPE_TUNNELED_TCPV4:
case IXGBE_ATR_FLOW_TYPE_UDPV4:
diff --git a/drivers/net/ixgbe/base/ixgbe_common.c 
b/drivers/net/ixgbe/base/ixgbe_common.c
index bbb04de..9ca6ebf 100644
--- a/drivers/net/ixgbe/base/ixgbe_common.c
+++ b/drivers/net/ixgbe/base/ixgbe_common.c
@@ -264,7 +264,7 @@ s32 ixgbe_setup_fc_generic(struct ixgbe_hw *hw)
if (ret_val != IXGBE_SUCCESS)
goto out;
 
-   /* only backplane uses autoc so fall though */
+   /* fall through - only backplane uses autoc */
case ixgbe_media_type_fiber_qsfp:
case ixgbe_media_type_fiber:
reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANA);
@@ -4750,7 +4750,7 @@ void ixgbe_set_rxpba_generic(struct ixgbe_hw *hw, int 
num_pb, u32 headroom,
rxpktsize <<= IXGBE_RXPBSIZE_SHIFT;
for (; i < (num_pb / 2); i++)
IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpktsize);
-   /* Fall through to configure remaining packet buffers */
+   /* fall through - configure remaining packet buffers */
case PBA_STRATEGY_EQUAL:
rxpktsize = (pbsize / (num_pb - i)) << IXGBE_RXPBSIZE_SHIFT;
for (; i < num_pb; i++)
diff --git a/drivers/net/ixgbe/base/ixgbe_x540.c 
b/drivers/net/ixgbe/base/ixgbe_x540.c
index ef29a9b..716664b 100644
--- a/drivers/net/ixgbe/base/ixgbe_x540.c
+++ b/drivers/net/ixgbe/base/ixgbe_x540.c
@@ -807,14 +807,6 @@ s32 ixgbe_acquire_swfw_sync_X540(struct ixgbe_hw *hw, u32 
mask)
msec_delay(5);
}
 
-   /* Failed to get SW only semaphore */
-   if (swmask == IXGBE_GSSR_SW_MNG_SM) {
-   ERROR_REPORT1(IXGBE_ERROR_POLLING,
-"Failed to get SW only semaphore");
-   DEBUGOUT("Failed to get SW only semaphore, returning 
IXGBE_ERR_SWFW_SYNC\n");
-   return IXGBE_ERR_SWFW_SYNC;
-   }
-
/* If the resource is not released by the FW/HW the SW can assume that
 * the FW/HW malfunctions. In that case the SW should set the SW bit(s)
 * of the requested resource(s) while ignoring the corresponding FW/HW
@@ -839,7 +831,8 @@ s32 ixgbe_acquire_swfw_sync_X540(struct ixgbe_hw *hw, u32 
mask)
 */
if (swfw_sync & swmask) {
u32 rmask = IXGBE_GSSR_EEP_SM | IXGBE_GSSR_PHY0_SM |
-   IXGBE_GSSR_PHY1_SM | IXGBE_GSSR_MAC_CSR_SM;
+   IXGBE_GSSR_PHY1_SM | IXGBE_GSSR_MAC_CSR_SM |
+   IXGBE_GSSR_SW_MNG_SM;
 
if (swi2c_mask)
rmask |= IXGBE_GSSR_I2C_MASK;
@@ -973,14 +966,25 @@ STATIC void ixgbe_release_swfw_sync_semaphore(struct 
ixgbe_hw *hw)
  **/
 void ixgbe_init_swfw_sync_X540(struct ixgbe_hw *hw)
 {
+   u32 rmask;
+
/* First try to grab the semaphore but we don't need to bother
- 

[dpdk-dev] [PATCH 4/8] net/ixgbe/base: changed method for flash read

2018-01-10 Thread Qiming Yang
Stored host interface read resolte in two bytes instead of one byte.
And added definitions for LESM module structure.

Signed-off-by: Qiming Yang 
---
 drivers/net/ixgbe/base/ixgbe_common.c | 22 +++---
 drivers/net/ixgbe/base/ixgbe_type.h   | 10 ++
 2 files changed, 29 insertions(+), 3 deletions(-)

diff --git a/drivers/net/ixgbe/base/ixgbe_common.c 
b/drivers/net/ixgbe/base/ixgbe_common.c
index 50b9b46..bbb04de 100644
--- a/drivers/net/ixgbe/base/ixgbe_common.c
+++ b/drivers/net/ixgbe/base/ixgbe_common.c
@@ -4584,10 +4584,11 @@ s32 ixgbe_host_interface_command(struct ixgbe_hw *hw, 
u32 *buffer,
 u32 length, u32 timeout, bool return_data)
 {
u32 hdr_size = sizeof(struct ixgbe_hic_hdr);
-   u16 dword_len;
+   struct ixgbe_hic_hdr *resp = (struct ixgbe_hic_hdr *)buffer;
u16 buf_len;
s32 status;
u32 bi;
+   u32 dword_len;
 
DEBUGFUNC("ixgbe_host_interface_command");
 
@@ -4617,8 +4618,23 @@ s32 ixgbe_host_interface_command(struct ixgbe_hw *hw, 
u32 *buffer,
IXGBE_LE32_TO_CPUS(&buffer[bi]);
}
 
-   /* If there is any thing in data position pull it in */
-   buf_len = ((struct ixgbe_hic_hdr *)buffer)->buf_len;
+   /*
+* If there is any thing in data position pull it in
+* Read Flash command requires reading buffer length from
+* two byes instead of one byte
+*/
+   if (resp->cmd == 0x30) {
+   for (; bi < dword_len + 2; bi++) {
+   buffer[bi] = IXGBE_READ_REG_ARRAY(hw, IXGBE_FLEX_MNG,
+ bi);
+   IXGBE_LE32_TO_CPUS(&buffer[bi]);
+   }
+   buf_len = (((u16)(resp->cmd_or_resp.ret_status) << 3)
+ & 0xF00) | resp->buf_len;
+   hdr_size += (2 << 2);
+   } else {
+   buf_len = resp->buf_len;
+   }
if (!buf_len)
goto rel_out;
 
diff --git a/drivers/net/ixgbe/base/ixgbe_type.h 
b/drivers/net/ixgbe/base/ixgbe_type.h
index 66a79dd..e614c10 100644
--- a/drivers/net/ixgbe/base/ixgbe_type.h
+++ b/drivers/net/ixgbe/base/ixgbe_type.h
@@ -2458,6 +2458,16 @@ enum {
 #define IXGBE_FW_LESM_PARAMETERS_PTR   0x2
 #define IXGBE_FW_LESM_STATE_1  0x1
 #define IXGBE_FW_LESM_STATE_ENABLED0x8000 /* LESM Enable bit */
+#define IXGBE_FW_LESM_2_STATES_ENABLED_MASK0x1F
+#define IXGBE_FW_LESM_2_STATES_ENABLED 0x12
+#define IXGBE_FW_LESM_STATE0_10G_ENABLED   0x6FFF
+#define IXGBE_FW_LESM_STATE1_10G_ENABLED   0x4FFF
+#define IXGBE_FW_LESM_STATE0_10G_DISABLED  0x0FFF
+#define IXGBE_FW_LESM_STATE1_10G_DISABLED  0x2FFF
+#define IXGBE_FW_LESM_PORT0_STATE0_OFFSET  0x2
+#define IXGBE_FW_LESM_PORT0_STATE1_OFFSET  0x3
+#define IXGBE_FW_LESM_PORT1_STATE0_OFFSET  0x6
+#define IXGBE_FW_LESM_PORT1_STATE1_OFFSET  0x7
 #define IXGBE_FW_PASSTHROUGH_PATCH_CONFIG_PTR  0x4
 #define IXGBE_FW_PATCH_VERSION_4   0x7
 #define IXGBE_FCOE_IBA_CAPS_BLK_PTR0x33 /* iSCSI/FCOE block */
-- 
2.9.4



[dpdk-dev] [PATCH 6/8] net/ixgbe/base: update function comments

2018-01-10 Thread Qiming Yang
This patch mainly adds/removes comments for function
parameters that were missing or no longer needed.

Signed-off-by: Qiming Yang 
---
 drivers/net/ixgbe/base/ixgbe_82598.c |  3 ++-
 drivers/net/ixgbe/base/ixgbe_82599.c | 14 +++---
 drivers/net/ixgbe/base/ixgbe_api.c   | 16 +++-
 drivers/net/ixgbe/base/ixgbe_common.c| 13 ++---
 drivers/net/ixgbe/base/ixgbe_dcb.c   | 10 --
 drivers/net/ixgbe/base/ixgbe_dcb_82598.c | 22 +-
 drivers/net/ixgbe/base/ixgbe_dcb_82599.c | 25 +
 drivers/net/ixgbe/base/ixgbe_hv_vf.c | 17 +
 drivers/net/ixgbe/base/ixgbe_mbx.c   |  4 ++--
 drivers/net/ixgbe/base/ixgbe_phy.c   | 19 ---
 drivers/net/ixgbe/base/ixgbe_vf.c|  7 ---
 drivers/net/ixgbe/base/ixgbe_x550.c  | 10 +-
 12 files changed, 132 insertions(+), 28 deletions(-)

diff --git a/drivers/net/ixgbe/base/ixgbe_82598.c 
b/drivers/net/ixgbe/base/ixgbe_82598.c
index d64abb2..ee7ce2e 100644
--- a/drivers/net/ixgbe/base/ixgbe_82598.c
+++ b/drivers/net/ixgbe/base/ixgbe_82598.c
@@ -548,6 +548,7 @@ s32 ixgbe_fc_enable_82598(struct ixgbe_hw *hw)
 /**
  *  ixgbe_start_mac_link_82598 - Configures MAC link settings
  *  @hw: pointer to hardware structure
+ *  @autoneg_wait_to_complete: true when waiting for completion is needed
  *
  *  Configures link settings based on values in the ixgbe_hw struct.
  *  Restarts the link.  Performs autonegotiation if needed.
@@ -1205,7 +1206,7 @@ s32 ixgbe_read_i2c_eeprom_82598(struct ixgbe_hw *hw, u8 
byte_offset,
  *  ixgbe_read_i2c_sff8472_82598 - Reads 8 bit word over I2C interface.
  *  @hw: pointer to hardware structure
  *  @byte_offset: byte offset at address 0xA2
- *  @eeprom_data: value read
+ *  @sff8472_data: value read
  *
  *  Performs 8 byte read operation to SFP module's SFF-8472 data over I2C
  **/
diff --git a/drivers/net/ixgbe/base/ixgbe_82599.c 
b/drivers/net/ixgbe/base/ixgbe_82599.c
index d382a60..110b114 100644
--- a/drivers/net/ixgbe/base/ixgbe_82599.c
+++ b/drivers/net/ixgbe/base/ixgbe_82599.c
@@ -265,7 +265,7 @@ s32 prot_autoc_read_82599(struct ixgbe_hw *hw, bool 
*locked, u32 *reg_val)
 /**
  * prot_autoc_write_82599 - Hides MAC differences needed for AUTOC write
  * @hw: pointer to hardware structure
- * @reg_val: value to write to AUTOC
+ * @autoc: value to write to AUTOC
  * @locked: bool to indicate whether the SW/FW lock was already taken by
  *   previous proc_autoc_read_82599.
  *
@@ -1367,6 +1367,7 @@ s32 ixgbe_init_fdir_signature_82599(struct ixgbe_hw *hw, 
u32 fdirctrl)
 s32 ixgbe_init_fdir_perfect_82599(struct ixgbe_hw *hw, u32 fdirctrl,
bool cloud_mode)
 {
+   UNREFERENCED_1PARAMETER(cloud_mode);
DEBUGFUNC("ixgbe_init_fdir_perfect_82599");
 
/*
@@ -1455,7 +1456,8 @@ do { \
 
 /**
  *  ixgbe_atr_compute_sig_hash_82599 - Compute the signature hash
- *  @stream: input bitstream to compute the hash on
+ *  @input: input bitstream to compute the hash on
+ *  @common: compressed common input dword
  *
  *  This function is almost identical to the function above but contains
  *  several optimizations such as unwinding all of the loops, letting the
@@ -1594,7 +1596,7 @@ do { \
 
 /**
  *  ixgbe_atr_compute_perfect_hash_82599 - Compute the perfect filter hash
- *  @atr_input: input bitstream to compute the hash on
+ *  @input: input bitstream to compute the hash on
  *  @input_mask: mask for the input bitstream
  *
  *  This function serves two main purposes.  First it applies the input_mask
@@ -1695,6 +1697,7 @@ s32 ixgbe_fdir_set_input_mask_82599(struct ixgbe_hw *hw,
u32 fdirm = IXGBE_FDIRM_DIPv6;
u32 fdirtcpm;
u32 fdirip6m;
+   UNREFERENCED_1PARAMETER(cloud_mode);
DEBUGFUNC("ixgbe_fdir_set_atr_input_mask_82599");
 
/*
@@ -1871,6 +1874,7 @@ s32 ixgbe_fdir_write_perfect_filter_82599(struct ixgbe_hw 
*hw,
u32 addr_low, addr_high;
u32 cloud_type = 0;
s32 err;
+   UNREFERENCED_1PARAMETER(cloud_mode);
 
DEBUGFUNC("ixgbe_fdir_write_perfect_filter_82599");
if (!cloud_mode) {
@@ -1995,6 +1999,7 @@ s32 ixgbe_fdir_erase_perfect_filter_82599(struct ixgbe_hw 
*hw,
  *  @input_mask: mask for the input bitstream
  *  @soft_id: software index for the filters
  *  @queue: queue index to direct traffic to
+ *  @cloud_mode: unused
  *
  *  Note that the caller to this function must lock before calling, since the
  *  hardware writes must be protected from one another.
@@ -2005,6 +2010,7 @@ s32 ixgbe_fdir_add_perfect_filter_82599(struct ixgbe_hw 
*hw,
u16 soft_id, u8 queue, bool cloud_mode)
 {
s32 err = IXGBE_ERR_CONFIG;
+   UNREFERENCED_1PARAMETER(cloud_mode);
 
DEBUGFUNC("ixgbe_fdir_add_perfect_filter_82599");
 
@@ -2514,6 +2520,7 @@ s32 ixgbe_reset_pipeline_82599(struct ixgbe_hw *hw)
  *  ixgbe_read_i2c_byte_82599 - Reads 8 bi

[dpdk-dev] [PATCH 8/8] net/ixgbe/base: update README file

2018-01-10 Thread Qiming Yang
Update README file to specify the version of CID drop.

Signed-off-by: Qiming Yang 
---
 drivers/net/ixgbe/base/README | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/drivers/net/ixgbe/base/README b/drivers/net/ixgbe/base/README
index 8c833b4..70fdfe7 100644
--- a/drivers/net/ixgbe/base/README
+++ b/drivers/net/ixgbe/base/README
@@ -34,7 +34,7 @@ Intel?? IXGBE driver
 ===
 
 This directory contains source code of FreeBSD ixgbe driver of version
-cid-10g-shared-code.2017.05.16 released by the team which develop
+cid-ixgbe.2018.01.02.tar.gz released by the team which develop
 basic drivers for any ixgbe NIC. The sub-directory of base/
 contains the original source package.
 This driver is valid for the product(s) listed below
-- 
2.9.4



[dpdk-dev] [PATCH 7/8] net/ixgbe/base: added media type fiber fixed

2018-01-10 Thread Qiming Yang
Added function ixgbe_media_type_fiber_fixed.

Signed-off-by: Qiming Yang 
---
 drivers/net/ixgbe/base/ixgbe_82599.c  |  7 +++
 drivers/net/ixgbe/base/ixgbe_api.c|  2 ++
 drivers/net/ixgbe/base/ixgbe_common.c |  6 ++
 drivers/net/ixgbe/base/ixgbe_mbx.c| 22 --
 drivers/net/ixgbe/base/ixgbe_type.h   |  4 +++-
 5 files changed, 18 insertions(+), 23 deletions(-)

diff --git a/drivers/net/ixgbe/base/ixgbe_82599.c 
b/drivers/net/ixgbe/base/ixgbe_82599.c
index 110b114..2621721 100644
--- a/drivers/net/ixgbe/base/ixgbe_82599.c
+++ b/drivers/net/ixgbe/base/ixgbe_82599.c
@@ -87,6 +87,9 @@ void ixgbe_init_mac_link_ops_82599(struct ixgbe_hw *hw)
mac->ops.setup_mac_link = ixgbe_setup_mac_link_82599;
mac->ops.set_rate_select_speed =
   ixgbe_set_hard_rate_select_speed;
+   if (ixgbe_get_media_type(hw) == ixgbe_media_type_fiber_fixed)
+   mac->ops.set_rate_select_speed =
+  ixgbe_set_soft_rate_select_speed;
} else {
if ((ixgbe_get_media_type(hw) == ixgbe_media_type_backplane) &&
 (hw->phy.smart_speed == ixgbe_smart_speed_auto ||
@@ -564,6 +567,10 @@ enum ixgbe_media_type ixgbe_get_media_type_82599(struct 
ixgbe_hw *hw)
case IXGBE_DEV_ID_82599_QSFP_SF_QP:
media_type = ixgbe_media_type_fiber_qsfp;
break;
+   case IXGBE_DEV_ID_82599_BYPASS:
+   media_type = ixgbe_media_type_fiber_fixed;
+   hw->phy.multispeed_fiber = true;
+   break;
default:
media_type = ixgbe_media_type_unknown;
break;
diff --git a/drivers/net/ixgbe/base/ixgbe_api.c 
b/drivers/net/ixgbe/base/ixgbe_api.c
index 4870a15..e50c104 100644
--- a/drivers/net/ixgbe/base/ixgbe_api.c
+++ b/drivers/net/ixgbe/base/ixgbe_api.c
@@ -178,6 +178,7 @@ s32 ixgbe_set_mac_type(struct ixgbe_hw *hw)
case IXGBE_DEV_ID_82599EN_SFP:
case IXGBE_DEV_ID_82599_CX4:
case IXGBE_DEV_ID_82599_LS:
+   case IXGBE_DEV_ID_82599_BYPASS:
case IXGBE_DEV_ID_82599_T3_LOM:
hw->mac.type = ixgbe_mac_82599EB;
break;
@@ -192,6 +193,7 @@ s32 ixgbe_set_mac_type(struct ixgbe_hw *hw)
break;
case IXGBE_DEV_ID_X540T:
case IXGBE_DEV_ID_X540T1:
+   case IXGBE_DEV_ID_X540_BYPASS:
hw->mac.type = ixgbe_mac_X540;
hw->mvals = ixgbe_mvals_X540;
break;
diff --git a/drivers/net/ixgbe/base/ixgbe_common.c 
b/drivers/net/ixgbe/base/ixgbe_common.c
index e54ba6a..3bff4a7 100644
--- a/drivers/net/ixgbe/base/ixgbe_common.c
+++ b/drivers/net/ixgbe/base/ixgbe_common.c
@@ -167,6 +167,7 @@ bool ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw)
DEBUGFUNC("ixgbe_device_supports_autoneg_fc");
 
switch (hw->phy.media_type) {
+   case ixgbe_media_type_fiber_fixed:
case ixgbe_media_type_fiber_qsfp:
case ixgbe_media_type_fiber:
/* flow control autoneg black list */
@@ -200,6 +201,7 @@ bool ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw)
case IXGBE_DEV_ID_82599_T3_LOM:
case IXGBE_DEV_ID_X540T:
case IXGBE_DEV_ID_X540T1:
+   case IXGBE_DEV_ID_X540_BYPASS:
case IXGBE_DEV_ID_X550T:
case IXGBE_DEV_ID_X550T1:
case IXGBE_DEV_ID_X550EM_X_10G_T:
@@ -265,6 +267,7 @@ s32 ixgbe_setup_fc_generic(struct ixgbe_hw *hw)
goto out;
 
/* fall through - only backplane uses autoc */
+   case ixgbe_media_type_fiber_fixed:
case ixgbe_media_type_fiber_qsfp:
case ixgbe_media_type_fiber:
reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANA);
@@ -3124,6 +3127,7 @@ void ixgbe_fc_autoneg(struct ixgbe_hw *hw)
 
switch (hw->phy.media_type) {
/* Autoneg flow control on fiber adapters */
+   case ixgbe_media_type_fiber_fixed:
case ixgbe_media_type_fiber_qsfp:
case ixgbe_media_type_fiber:
if (speed == IXGBE_LINK_SPEED_1GB_FULL)
@@ -5263,6 +5267,7 @@ s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw 
*hw,
 
/* Set the module link speed */
switch (hw->phy.media_type) {
+   case ixgbe_media_type_fiber_fixed:
case ixgbe_media_type_fiber:
ixgbe_set_rate_select_speed(hw,
IXGBE_LINK_SPEED_10GB_FULL);
@@ -5313,6 +5318,7 @@ s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw 
*hw,
 
/* Set the module link speed */
switch (hw->phy.media_type) {
+   case ixgbe_media_type_fiber_fixed:
case ixgbe_media_type_fiber:
ixgbe_set_rate_select_speed(hw,
 

[dpdk-dev] [RFC v3] Compression API in DPDK :SW ZLIB PMD

2018-01-10 Thread Verma, Shally
Hi Fiona

We are planning to implement ZLIB based SW PMD to proof-concept DPDK 
compression RFC v3 API spec internally. However, would like to check
If you're working upon similar in parallel and if yes, then what's your 
development roadmap / strategy so that we could see if we could leverage joint 
effort.
Depending upon your feedback, we can see if we can have some common repo for 
joint development or send it as RFC patch.

Let me know your opinion on same.

Thanks
Shally



Re: [dpdk-dev] [PATCH v1] app/testpmd: support command echo in CLI batch loading

2018-01-10 Thread Lu, Wenzhuo
Hi Xueming,

> -Original Message-
> From: dev [mailto:dev-boun...@dpdk.org] On Behalf Of Xueming Li
> Sent: Tuesday, December 26, 2017 10:26 PM
> Cc: Xueming Li ; Wu, Jingjing
> ; dev@dpdk.org; Olivier MATZ
> ; Burakov, Anatoly 
> Subject: [dpdk-dev] [PATCH v1] app/testpmd: support command echo in CLI
> batch loading
> 
> Use first bit of verbose_level to enable CLI echo of batch loading.
After this patch, the first bit of verbose_level is ambiguous. It can still 
enable/disable the log print. 
Is it by design?


Re: [dpdk-dev] [PATCH 0/8] net/ixgbe: update base code

2018-01-10 Thread Lu, Wenzhuo
Hi,


> -Original Message-
> From: Yang, Qiming
> Sent: Thursday, January 11, 2018 12:05 AM
> To: dev@dpdk.org
> Cc: Lu, Wenzhuo ; Dai, Wei ;
> Yang, Qiming 
> Subject: [PATCH 0/8] net/ixgbe: update base code
> 
> The patchset update ixgbe base code base on latest CID drop cid-
> ixgbe.2018.01.02.tar.gz, couple issues are fixed, include:
> SECRX_RDY polling frequency and semaphore timeout, PHY initialization
> code not cleanup, uninitialized padding, PHY init fails, SW resource bits sync
> and some compile issues for GCC7. And also changed method for flash read,
> added x553 SGMII 10/100Mbps support and common FW version functions.
> The patchset also covers couple code clean and function comments update.
> 
> Qiming Yang (8):
>   net/ixgbe/base: add common FW version functions
>   net/ixgbe/base: increasing timeout
>   net/ixgbe/base: x550 related bug fix
>   net/ixgbe/base: changed method for flash read
>   net/ixgbe/base: clear sync register during init
>   net/ixgbe/base: update function comments
>   net/ixgbe/base: added media type fiber fixed
>   net/ixgbe/base: update README file
> 
>  drivers/net/ixgbe/base/README|   2 +-
>  drivers/net/ixgbe/base/ixgbe_82598.c |   3 +-
>  drivers/net/ixgbe/base/ixgbe_82599.c |  31 --
>  drivers/net/ixgbe/base/ixgbe_api.c   |  18 +++-
>  drivers/net/ixgbe/base/ixgbe_common.c| 179
> ---
>  drivers/net/ixgbe/base/ixgbe_common.h|   6 ++
>  drivers/net/ixgbe/base/ixgbe_dcb.c   |  10 +-
>  drivers/net/ixgbe/base/ixgbe_dcb_82598.c |  22 +++-
> drivers/net/ixgbe/base/ixgbe_dcb_82599.c |  25 -
>  drivers/net/ixgbe/base/ixgbe_hv_vf.c |  17 +++
>  drivers/net/ixgbe/base/ixgbe_mbx.c   |  26 +
>  drivers/net/ixgbe/base/ixgbe_phy.c   |  19 +++-
>  drivers/net/ixgbe/base/ixgbe_type.h  |  61 +--
>  drivers/net/ixgbe/base/ixgbe_vf.c|   7 +-
>  drivers/net/ixgbe/base/ixgbe_x540.c  |  27 +++--
>  drivers/net/ixgbe/base/ixgbe_x550.c  | 150 +-
>  16 files changed, 418 insertions(+), 185 deletions(-)
> 
> --
> 2.9.4
Series-Acked-by: Wenzhuo Lu 


Re: [dpdk-dev] [PATCH v2 2/2] app/testpmd: fix invalid txq number setting

2018-01-10 Thread Dai, Wei
> -Original Message-
> From: Yang, Qiming
> Sent: Wednesday, January 10, 2018 2:38 PM
> To: Dai, Wei ; Lu, Wenzhuo ;
> Wu, Jingjing ; Peng, Yuan ;
> Ananyev, Konstantin 
> Cc: dev@dpdk.org; sta...@dpdk.org; Dai, Wei 
> Subject: RE: [dpdk-dev] [PATCH v2 2/2] app/testpmd: fix invalid txq number
> setting
> 
> I think the name bak is a little bit confused, what do you think just use
> nd_txq_backup/nd_rxq_backup?
> And I think it's no need to break the patch into two patch, them fix the same
> thing and the code amount are not large.

I will follow Konstantin's guide to give v3 patch set.
By the way, I think 2 patches are much clearer and keep each very simple 
for others to review and for maintainer's convenience.

> 
> > -Original Message-
> > From: dev [mailto:dev-boun...@dpdk.org] On Behalf Of Wei Dai
> > Sent: Wednesday, January 10, 2018 12:14 PM
> > To: Lu, Wenzhuo ; Wu, Jingjing
> > ; Peng, Yuan ; Ananyev,
> > Konstantin 
> > Cc: dev@dpdk.org; sta...@dpdk.org; Dai, Wei 
> > Subject: [dpdk-dev] [PATCH v2 2/2] app/testpmd: fix invalid txq number
> > setting
> >
> > If an invalid TX queue is configured from testpmd command like "port
> > config all txq number", the global variable txq is updated by this
> > invalid value. It may cause testpmd crash.
> > This patch restores its last correct value when an invalid txq number
> > configured is detected.
> >
> > Fixes: ce8d561418d4 ("app/testpmd: add port configuration settings")
> > Cc: sta...@dpdk.org
> >
> > Signed-off-by: Wei Dai 
> > ---
> >  app/test-pmd/cmdline.c |  2 ++
> >  app/test-pmd/testpmd.c | 12 +---  app/test-pmd/testpmd.h |
> 1
> > +
> >  3 files changed, 12 insertions(+), 3 deletions(-)
> >
> > diff --git a/app/test-pmd/cmdline.c b/app/test-pmd/cmdline.c index
> > a5a1d57..26dd81a 100644
> > --- a/app/test-pmd/cmdline.c
> > +++ b/app/test-pmd/cmdline.c
> > @@ -1527,6 +1527,8 @@ cmd_config_rx_tx_parsed(void *parsed_result,
> > printf("Warning: Either rx or tx queues should be non 
> > zero\n");
> > return;
> > }
> > +   /* bakcup last correct nb_txq */
> > +   nb_txq_bak = nb_txq;
> > nb_txq = res->value;
> > }
> > else if (!strcmp(res->name, "rxd")) { diff --git
> > a/app/test-pmd/testpmd.c b/app/test-pmd/testpmd.c index
> > efafc24..8b49d96 100644
> > --- a/app/test-pmd/testpmd.c
> > +++ b/app/test-pmd/testpmd.c
> > @@ -190,6 +190,7 @@ queueid_t nb_rxq = 1; /**< Number of RX queues
> per
> > port. */  queueid_t nb_txq = 1; /**< Number of TX queues per port. */
> >
> >  queueid_t nb_rxq_bak = 1; /**< Backup of last correct number of RX
> > queues */
> > +queueid_t nb_txq_bak = 1; /**< Backup of last correct number of TX
> > +queues */
> >
> >  /*
> >   * Configurable number of RX/TX ring descriptors.
> > @@ -721,8 +722,12 @@ init_fwd_streams(void)
> > }
> > if (nb_txq > port->dev_info.max_tx_queues) {
> > printf("Fail: nb_txq(%d) is greater than "
> > -   "max_tx_queues(%d)\n", nb_txq,
> > -   port->dev_info.max_tx_queues);
> > +   "max_tx_queues(%d), restored to backup "
> > +   "txq number(%d)\n", nb_txq,
> > +   port->dev_info.max_tx_queues,
> > +   nb_txq_bak);
> > +   /* restored to last correct nb_txq */
> > +   nb_txq = nb_txq_bak;
> > return -1;
> > }
> > if (numa_support) {
> > @@ -744,8 +749,9 @@ init_fwd_streams(void)
> > }
> > }
> >
> > -   /* backup the correct nb_rxq */
> > +   /* backup the correct nb_rxq and nb_txq */
> > nb_rxq_bak = nb_rxq;
> > +   nb_txq_bak = nb_txq;
> >
> > q = RTE_MAX(nb_rxq, nb_txq);
> > if (q == 0) {
> > diff --git a/app/test-pmd/testpmd.h b/app/test-pmd/testpmd.h index
> > 6f7932d..bca93c1 100644
> > --- a/app/test-pmd/testpmd.h
> > +++ b/app/test-pmd/testpmd.h
> > @@ -393,6 +393,7 @@ extern queueid_t nb_rxq;  extern queueid_t
> nb_txq;
> >
> >  extern queueid_t nb_rxq_bak;
> > +extern queueid_t nb_txq_bak;
> >
> >  extern uint16_t nb_rxd;
> >  extern uint16_t nb_txd;
> > --
> > 2.7.5



Re: [dpdk-dev] [PATCH v1] app/testpmd: support command echo in CLI batch loading

2018-01-10 Thread Xueming(Steven) Li

> -Original Message-
> From: Lu, Wenzhuo [mailto:wenzhuo...@intel.com]
> Sent: Wednesday, January 10, 2018 4:36 PM
> To: Xueming(Steven) Li 
> Cc: Wu, Jingjing ; dev@dpdk.org; Olivier MATZ
> ; Burakov, Anatoly 
> Subject: RE: [dpdk-dev] [PATCH v1] app/testpmd: support command echo in
> CLI batch loading
> 
> Hi Xueming,
> 
> > -Original Message-
> > From: dev [mailto:dev-boun...@dpdk.org] On Behalf Of Xueming Li
> > Sent: Tuesday, December 26, 2017 10:26 PM
> > Cc: Xueming Li ; Wu, Jingjing
> > ; dev@dpdk.org; Olivier MATZ
> > ; Burakov, Anatoly 
> > Subject: [dpdk-dev] [PATCH v1] app/testpmd: support command echo in
> > CLI batch loading
> >
> > Use first bit of verbose_level to enable CLI echo of batch loading.
> After this patch, the first bit of verbose_level is ambiguous. It can
> still enable/disable the log print.
> Is it by design?
You are correct, there are some code in testpmd simply testing verbose>0.
How about changing all the test to: verbose & 1? I have another patchset
using other bits of verbose...


[dpdk-dev] [PATCH v3 0/2] app/testpmd: fix invalid rxq and txq nubmer setting

2018-01-10 Thread Wei Dai
If an invlaid number of RX or TX queues is configured from testpmd
command like "port config all rxq number" or "port config all txq number".
The global variable rxq or txq is updated by the invalid input.
This can cause testpmd crash. For example, if the maximum number of
RX or TX queues is 4, testpmd will crash after running commands
"port config all rxq 5", "port config all txq 5" and "start" in sequence.

These 2 patches reserve the last correct rxq and txq, if an invalid input
is detected, it is restored to the backup value to avoid crash.

Fixes: ce8d561418d4 ("app/testpmd: add port configuration settings")
Cc: sta...@dpdk.org

Signed-off-by: Wei Dai 

---
v3: follow the guide from Konstantin to use functions to check 
input rxq and txq instead of usage of new added global variables.

v2: fix a bug in v1


Wei Dai (2):
  app/testpmd: fix invalid rxq number setting
  app/testpmd: fix invalid txq number setting

 app/test-pmd/cmdline.c|  4 ++
 app/test-pmd/parameters.c | 13 ---
 app/test-pmd/testpmd.c| 94 +++
 app/test-pmd/testpmd.h|  5 +++
 4 files changed, 110 insertions(+), 6 deletions(-)

-- 
2.7.5



[dpdk-dev] [PATCH v3 2/2] app/testpmd: fix invalid txq number setting

2018-01-10 Thread Wei Dai
If an invalid TX queue is configured from testpmd command
like "port config all txq number", the global variable txq
is updated by this invalid value. It may cause testpmd crash.
This patch restores its last correct value when an invalid
txq number configured is detected.

Fixes: ce8d561418d4 ("app/testpmd: add port configuration settings")
Cc: sta...@dpdk.org

Signed-off-by: Wei Dai 
---
 app/test-pmd/cmdline.c|  2 ++
 app/test-pmd/parameters.c |  6 +++---
 app/test-pmd/testpmd.c| 47 +++
 app/test-pmd/testpmd.h|  2 ++
 4 files changed, 54 insertions(+), 3 deletions(-)

diff --git a/app/test-pmd/cmdline.c b/app/test-pmd/cmdline.c
index f0623b1..6619cb8 100644
--- a/app/test-pmd/cmdline.c
+++ b/app/test-pmd/cmdline.c
@@ -1527,6 +1527,8 @@ cmd_config_rx_tx_parsed(void *parsed_result,
printf("Warning: Either rx or tx queues should be non 
zero\n");
return;
}
+   if (check_nb_txq(res->value) != 0)
+   return;
nb_txq = res->value;
}
else if (!strcmp(res->name, "rxd")) {
diff --git a/app/test-pmd/parameters.c b/app/test-pmd/parameters.c
index eac1826..6b5925d 100644
--- a/app/test-pmd/parameters.c
+++ b/app/test-pmd/parameters.c
@@ -932,12 +932,12 @@ launch_args_parse(int argc, char** argv)
}
if (!strcmp(lgopts[opt_idx].name, "txq")) {
n = atoi(optarg);
-   if (n >= 0 && n <= (int) MAX_QUEUE_ID)
+   if (n >= 0 && check_nb_txq((queueid_t)n) == 0)
nb_txq = (queueid_t) n;
else
rte_exit(EXIT_FAILURE, "txq %d invalid 
- must be"
- " >= 0 && <= %d\n", n,
- (int) MAX_QUEUE_ID);
+ " >= 0 && <= %u\n", n,
+ get_allowed_nb_txq(&pid));
}
if (!nb_rxq && !nb_txq) {
rte_exit(EXIT_FAILURE, "Either rx or tx queues 
should "
diff --git a/app/test-pmd/testpmd.c b/app/test-pmd/testpmd.c
index 1203b17..fb8bb48 100644
--- a/app/test-pmd/testpmd.c
+++ b/app/test-pmd/testpmd.c
@@ -587,6 +587,53 @@ check_nb_rxq(queueid_t rxq)
return 0;
 }
 
+/*
+ * Get the allowed maximum number of TX queues.
+ * *pid return the port id which has mimumal value of
+ * max_tx_queues in all ports.
+ */
+
+queueid_t
+get_allowed_nb_txq(portid_t *pid)
+{
+   queueid_t allowed_max_txq = MAX_QUEUE_ID;
+   portid_t pi;
+   struct rte_eth_dev_info dev_info;
+
+   RTE_ETH_FOREACH_DEV(pi) {
+   rte_eth_dev_info_get(pi, &dev_info);
+   if (dev_info.max_tx_queues < allowed_max_txq) {
+   allowed_max_txq = dev_info.max_tx_queues;
+   *pid = pi;
+   }
+   }
+   return allowed_max_txq;
+}
+
+/*
+ * Check input txq is valid or not.
+ * If input txq is not greater than any of maximum number
+ * of TX queues of all ports, it is valid.
+ * if valid, return 0, else return -1
+ */
+int
+check_nb_txq(queueid_t txq)
+{
+   queueid_t allowed_max_txq;
+   portid_t pid;
+
+   allowed_max_txq = get_allowed_nb_txq(&pid);
+   if (txq > allowed_max_txq) {
+   printf("Fail: input txq (%u) can't be greater "
+  "than max_tx_queues (%u) of port %u\n",
+  txq,
+  allowed_max_txq,
+  pid);
+   return -1;
+   }
+   return 0;
+}
+
 static void
 init_config(void)
 {
diff --git a/app/test-pmd/testpmd.h b/app/test-pmd/testpmd.h
index 1e38f43..b848364 100644
--- a/app/test-pmd/testpmd.h
+++ b/app/test-pmd/testpmd.h
@@ -702,6 +702,8 @@ int new_socket_id(unsigned int socket_id);
 
 queueid_t get_allowed_nb_rxq(portid_t *pid);
 int check_nb_rxq(queueid_t rxq);
+queueid_t get_allowed_nb_txq(portid_t *pid);
+int check_nb_txq(queueid_t txq);
 
 /*
  * Work-around of a compilation error with ICC on invocations of the
-- 
2.7.5



[dpdk-dev] [PATCH v3 1/2] app/testpmd: fix invalid rxq number setting

2018-01-10 Thread Wei Dai
If an invalid RX queue is configured from testpmd command
like "port config all rxq number", the global variable rxq
is updated by this invalid value. It may cause testpmd crash.
This patch restores its last correct value when an invalid
rxq number configured is detected.

Fixes: ce8d561418d4 ("app/testpmd: add port configuration settings")
Cc: sta...@dpdk.org

Signed-off-by: Wei Dai 
---
 app/test-pmd/cmdline.c|  2 ++
 app/test-pmd/parameters.c |  7 ---
 app/test-pmd/testpmd.c| 47 +++
 app/test-pmd/testpmd.h|  3 +++
 4 files changed, 56 insertions(+), 3 deletions(-)

diff --git a/app/test-pmd/cmdline.c b/app/test-pmd/cmdline.c
index 5b2e2ef..f0623b1 100644
--- a/app/test-pmd/cmdline.c
+++ b/app/test-pmd/cmdline.c
@@ -1518,6 +1518,8 @@ cmd_config_rx_tx_parsed(void *parsed_result,
printf("Warning: Either rx or tx queues should be non 
zero\n");
return;
}
+   if (check_nb_rxq(res->value) != 0)
+   return;
nb_rxq = res->value;
}
else if (!strcmp(res->name, "txq")) {
diff --git a/app/test-pmd/parameters.c b/app/test-pmd/parameters.c
index 304b98d..eac1826 100644
--- a/app/test-pmd/parameters.c
+++ b/app/test-pmd/parameters.c
@@ -536,6 +536,7 @@ launch_args_parse(int argc, char** argv)
int n, opt;
char **argvopt;
int opt_idx;
+   portid_t pid;
enum { TX, RX };
 
static struct option lgopts[] = {
@@ -922,12 +923,12 @@ launch_args_parse(int argc, char** argv)
rss_hf = ETH_RSS_UDP;
if (!strcmp(lgopts[opt_idx].name, "rxq")) {
n = atoi(optarg);
-   if (n >= 0 && n <= (int) MAX_QUEUE_ID)
+   if (n >= 0 && check_nb_rxq((queueid_t)n) == 0)
nb_rxq = (queueid_t) n;
else
rte_exit(EXIT_FAILURE, "rxq %d invalid 
- must be"
- " >= 0 && <= %d\n", n,
- (int) MAX_QUEUE_ID);
+ " >= 0 && <= %u\n", n,
+ get_allowed_nb_rxq(&pid));
}
if (!strcmp(lgopts[opt_idx].name, "txq")) {
n = atoi(optarg);
diff --git a/app/test-pmd/testpmd.c b/app/test-pmd/testpmd.c
index 9414d0e..1203b17 100644
--- a/app/test-pmd/testpmd.c
+++ b/app/test-pmd/testpmd.c
@@ -540,6 +540,53 @@ check_socket_id(const unsigned int socket_id)
return 0;
 }
 
+/*
+ * Get the allowed maximum number of RX queues.
+ * *pid return the port id which has mimumal value of
+ * max_rx_queues in all ports.
+ */
+
+queueid_t
+get_allowed_nb_rxq(portid_t *pid)
+{
+   queueid_t allowed_max_rxq = MAX_QUEUE_ID;
+   portid_t pi;
+   struct rte_eth_dev_info dev_info;
+
+   RTE_ETH_FOREACH_DEV(pi) {
+   rte_eth_dev_info_get(pi, &dev_info);
+   if (dev_info.max_rx_queues < allowed_max_rxq) {
+   allowed_max_rxq = dev_info.max_rx_queues;
+   *pid = pi;
+   }
+   }
+   return allowed_max_rxq;
+}
+
+/*
+ * Check input rxq is valid or not.
+ * If input rxq is not greater than any of maximum number
+ * of RX queues of all ports, it is valid.
+ * if valid, return 0, else return -1
+ */
+int
+check_nb_rxq(queueid_t rxq)
+{
+   queueid_t allowed_max_rxq;
+   portid_t pid;
+
+   allowed_max_rxq = get_allowed_nb_rxq(&pid);
+   if (rxq > allowed_max_rxq) {
+   printf("Fail: input rxq (%u) can't be greater "
+  "than max_rx_queues (%u) of port %u\n",
+  rxq,
+  allowed_max_rxq,
+  pid);
+   return -1;
+   }
+   return 0;
+}
+
 static void
 init_config(void)
 {
diff --git a/app/test-pmd/testpmd.h b/app/test-pmd/testpmd.h
index 2a266fd..1e38f43 100644
--- a/app/test-pmd/testpmd.h
+++ b/app/test-pmd/testpmd.h
@@ -700,6 +700,9 @@ enum print_warning {
 int port_id_is_invalid(portid_t port_id, enum print_warning warning);
 int new_socket_id(unsigned int socket_id);
 
+queueid_t get_allowed_nb_rxq(portid_t *pid);
+int check_nb_rxq(queueid_t rxq);
+
 /*
  * Work-around of a compilation error with ICC on invocations of the
  * rte_be_to_cpu_16() function.
-- 
2.7.5



[dpdk-dev] [PATCH v4 00/11] convert testpmd to new ethdev offloads API

2018-01-10 Thread Shahaf Shuler
This series is to convert testpmd application to the new offloads API [1].

on v4:
 - Enable FAST_FREE Tx offload by default if device supports.
 - Fix commit log for the VLAN configuration patch.

on v3:
 - Remove warn prints on bad offloads configuration from application.

on v2:
 - Splited the patchset to multiple patches to help with the review.
 - Removed wrong comments.
 - Removed redundent parenthesis.
 - Fixed port print parameters.
 - Introduced a new method to check if single port is stopped.
 - Cleanup for internal Tx offloads flags.

[1] http://dpdk.org/ml/archives/dev/2017-October/077329.html

Shahaf Shuler (11):
  app/testpmd: fix port configuration print
  app/testpmd: convert to new Ethdev Rx offloads API
  app/testpmd: support check of single port stop
  app/testpmd: convert to new Ethdev Tx offloads API
  app/testpmd: fix flowgen forwarding ol flags
  app/testpmd: cleanup internal Tx offloads flags field
  app/testpmd: add command line option for Tx offloads
  app/testpmd: remove txqflags
  app/testpmd: enforce offloads caps
  app/testpmd: adjust on the flight VLAN configuration
  app/testpmd: enable fast free Tx offload by default

 app/test-pmd/cmdline.c  | 501 +--
 app/test-pmd/config.c   | 177 +---
 app/test-pmd/csumonly.c |  40 +-
 app/test-pmd/flowgen.c  |  12 +-
 app/test-pmd/macfwd.c   |   8 +-
 app/test-pmd/macswap.c  |   8 +-
 app/test-pmd/parameters.c   |  59 +--
 app/test-pmd/testpmd.c  |  58 +--
 app/test-pmd/testpmd.h  |  26 +-
 app/test-pmd/txonly.c   |   8 +-
 doc/guides/testpmd_app_ug/run_app.rst   |  17 +-
 doc/guides/testpmd_app_ug/testpmd_funcs.rst |  11 -
 12 files changed, 522 insertions(+), 403 deletions(-)

-- 
2.12.0



[dpdk-dev] [PATCH v4 03/11] app/testpmd: support check of single port stop

2018-01-10 Thread Shahaf Shuler
This patch adds supports for checking if a single port is stopped.
currently there is a function to check only for all ports.

Signed-off-by: Shahaf Shuler 
Acked-by: Wenzhuo Lu 
---
 app/test-pmd/testpmd.c | 16 
 app/test-pmd/testpmd.h |  1 +
 2 files changed, 13 insertions(+), 4 deletions(-)

diff --git a/app/test-pmd/testpmd.c b/app/test-pmd/testpmd.c
index 6785b095f..77154ef3b 100644
--- a/app/test-pmd/testpmd.c
+++ b/app/test-pmd/testpmd.c
@@ -1394,15 +1394,23 @@ all_ports_started(void)
 }
 
 int
+port_is_stopped(portid_t port_id)
+{
+   struct rte_port *port = &ports[port_id];
+
+   if ((port->port_status != RTE_PORT_STOPPED) &&
+   (port->slave_flag == 0))
+   return 0;
+   return 1;
+}
+
+int
 all_ports_stopped(void)
 {
portid_t pi;
-   struct rte_port *port;
 
RTE_ETH_FOREACH_DEV(pi) {
-   port = &ports[pi];
-   if ((port->port_status != RTE_PORT_STOPPED) &&
-   (port->slave_flag == 0))
+   if (!port_is_stopped(pi))
return 0;
}
 
diff --git a/app/test-pmd/testpmd.h b/app/test-pmd/testpmd.h
index 1639d27e7..ab74d39ce 100644
--- a/app/test-pmd/testpmd.h
+++ b/app/test-pmd/testpmd.h
@@ -682,6 +682,7 @@ void reset_port(portid_t pid);
 void attach_port(char *identifier);
 void detach_port(portid_t port_id);
 int all_ports_stopped(void);
+int port_is_stopped(portid_t port_id);
 int port_is_started(portid_t port_id);
 void pmd_test_exit(void);
 void fdir_get_infos(portid_t port_id);
-- 
2.12.0



[dpdk-dev] [PATCH v4 04/11] app/testpmd: convert to new Ethdev Tx offloads API

2018-01-10 Thread Shahaf Shuler
Ethdev Tx offloads API has changed since:

commit cba7f53b717d ("ethdev: introduce Tx queue offloads API")

Convert the application to use the new API.

This patch mandates the port to be stopped when configure the Tx
offloads. This is because the PMD must be aware to the offloads changes
on the device and queue configuration.

Signed-off-by: Shahaf Shuler 
Acked-by: Wenzhuo Lu 
---
 app/test-pmd/cmdline.c | 90 ++---
 app/test-pmd/config.c  | 55 ++-
 app/test-pmd/testpmd.c |  3 ++
 3 files changed, 124 insertions(+), 24 deletions(-)

diff --git a/app/test-pmd/cmdline.c b/app/test-pmd/cmdline.c
index d8c73a9b1..58125839a 100644
--- a/app/test-pmd/cmdline.c
+++ b/app/test-pmd/cmdline.c
@@ -3439,7 +3439,14 @@ cmd_tx_vlan_set_parsed(void *parsed_result,
 {
struct cmd_tx_vlan_set_result *res = parsed_result;
 
+   if (!port_is_stopped(res->port_id)) {
+   printf("Please stop port %d first\n", res->port_id);
+   return;
+   }
+
tx_vlan_set(res->port_id, res->vlan_id);
+
+   cmd_reconfig_device_queue(res->port_id, 1, 1);
 }
 
 cmdline_parse_token_string_t cmd_tx_vlan_set_tx_vlan =
@@ -3486,7 +3493,14 @@ cmd_tx_vlan_set_qinq_parsed(void *parsed_result,
 {
struct cmd_tx_vlan_set_qinq_result *res = parsed_result;
 
+   if (!port_is_stopped(res->port_id)) {
+   printf("Please stop port %d first\n", res->port_id);
+   return;
+   }
+
tx_qinq_set(res->port_id, res->vlan_id, res->vlan_id_outer);
+
+   cmd_reconfig_device_queue(res->port_id, 1, 1);
 }
 
 cmdline_parse_token_string_t cmd_tx_vlan_set_qinq_tx_vlan =
@@ -3592,7 +3606,14 @@ cmd_tx_vlan_reset_parsed(void *parsed_result,
 {
struct cmd_tx_vlan_reset_result *res = parsed_result;
 
+   if (!port_is_stopped(res->port_id)) {
+   printf("Please stop port %d first\n", res->port_id);
+   return;
+   }
+
tx_vlan_reset(res->port_id);
+
+   cmd_reconfig_device_queue(res->port_id, 1, 1);
 }
 
 cmdline_parse_token_string_t cmd_tx_vlan_reset_tx_vlan =
@@ -3685,11 +3706,16 @@ cmd_csum_parsed(void *parsed_result,
struct cmd_csum_result *res = parsed_result;
int hw = 0;
uint16_t mask = 0;
+   uint64_t csum_offloads = 0;
 
if (port_id_is_invalid(res->port_id, ENABLED_WARN)) {
printf("invalid port %d\n", res->port_id);
return;
}
+   if (!port_is_stopped(res->port_id)) {
+   printf("Please stop port %d first\n", res->port_id);
+   return;
+   }
 
if (!strcmp(res->mode, "set")) {
 
@@ -3698,22 +3724,34 @@ cmd_csum_parsed(void *parsed_result,
 
if (!strcmp(res->proto, "ip")) {
mask = TESTPMD_TX_OFFLOAD_IP_CKSUM;
+   csum_offloads |= DEV_TX_OFFLOAD_IPV4_CKSUM;
} else if (!strcmp(res->proto, "udp")) {
mask = TESTPMD_TX_OFFLOAD_UDP_CKSUM;
+   csum_offloads |= DEV_TX_OFFLOAD_UDP_CKSUM;
} else if (!strcmp(res->proto, "tcp")) {
mask = TESTPMD_TX_OFFLOAD_TCP_CKSUM;
+   csum_offloads |= DEV_TX_OFFLOAD_TCP_CKSUM;
} else if (!strcmp(res->proto, "sctp")) {
mask = TESTPMD_TX_OFFLOAD_SCTP_CKSUM;
+   csum_offloads |= DEV_TX_OFFLOAD_SCTP_CKSUM;
} else if (!strcmp(res->proto, "outer-ip")) {
mask = TESTPMD_TX_OFFLOAD_OUTER_IP_CKSUM;
+   csum_offloads |= DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM;
}
 
-   if (hw)
+   if (hw) {
ports[res->port_id].tx_ol_flags |= mask;
-   else
+   ports[res->port_id].dev_conf.txmode.offloads |=
+   csum_offloads;
+   } else {
ports[res->port_id].tx_ol_flags &= (~mask);
+   ports[res->port_id].dev_conf.txmode.offloads &=
+   (~csum_offloads);
+   }
}
csum_show(res->port_id);
+
+   cmd_reconfig_device_queue(res->port_id, 1, 1);
 }
 
 cmdline_parse_token_string_t cmd_csum_csum =
@@ -3837,15 +3875,24 @@ cmd_tso_set_parsed(void *parsed_result,
 
if (port_id_is_invalid(res->port_id, ENABLED_WARN))
return;
+   if (!port_is_stopped(res->port_id)) {
+   printf("Please stop port %d first\n", res->port_id);
+   return;
+   }
 
if (!strcmp(res->mode, "set"))
ports[res->port_id].tso_segsz = res->tso_segsz;
 
-   if (ports[res->port_id].tso_segsz == 0)
+   if (ports[res->port_id].tso_segsz == 0) {
+   ports[res->port_id].dev_conf.txmode.offloads &=
+   

[dpdk-dev] [PATCH v4 01/11] app/testpmd: fix port configuration print

2018-01-10 Thread Shahaf Shuler
The print of the port configuration was only according to configuration
of the first port.

Fixes: f2c5125a686a ("app/testpmd: use default Rx/Tx port configuration")
Cc: pablo.de.lara.gua...@intel.com
Cc: sta...@dpdk.org

Signed-off-by: Shahaf Shuler 
Acked-by: Wenzhuo Lu 
---
 app/test-pmd/config.c | 48 +-
 1 file changed, 30 insertions(+), 18 deletions(-)

diff --git a/app/test-pmd/config.c b/app/test-pmd/config.c
index 86ca3aaef..387fefbaa 100644
--- a/app/test-pmd/config.c
+++ b/app/test-pmd/config.c
@@ -1656,33 +1656,45 @@ fwd_lcores_config_display(void)
 void
 rxtx_config_display(void)
 {
-   printf("  %s packet forwarding%s - CRC stripping %s - "
-  "packets/burst=%d\n", cur_fwd_eng->fwd_mode_name,
+   portid_t pid;
+
+   printf("  %s packet forwarding%s packets/burst=%d\n",
+  cur_fwd_eng->fwd_mode_name,
   retry_enabled == 0 ? "" : " with retry",
-  rx_mode.hw_strip_crc ? "enabled" : "disabled",
   nb_pkt_per_burst);
 
if (cur_fwd_eng == &tx_only_engine || cur_fwd_eng == &flow_gen_engine)
printf("  packet len=%u - nb packet segments=%d\n",
(unsigned)tx_pkt_length, (int) tx_pkt_nb_segs);
 
-   struct rte_eth_rxconf *rx_conf = &ports[0].rx_conf;
-   struct rte_eth_txconf *tx_conf = &ports[0].tx_conf;
-
printf("  nb forwarding cores=%d - nb forwarding ports=%d\n",
   nb_fwd_lcores, nb_fwd_ports);
-   printf("  RX queues=%d - RX desc=%d - RX free threshold=%d\n",
-  nb_rxq, nb_rxd, rx_conf->rx_free_thresh);
-   printf("  RX threshold registers: pthresh=%d hthresh=%d wthresh=%d\n",
-  rx_conf->rx_thresh.pthresh, rx_conf->rx_thresh.hthresh,
-  rx_conf->rx_thresh.wthresh);
-   printf("  TX queues=%d - TX desc=%d - TX free threshold=%d\n",
-  nb_txq, nb_txd, tx_conf->tx_free_thresh);
-   printf("  TX threshold registers: pthresh=%d hthresh=%d wthresh=%d\n",
-  tx_conf->tx_thresh.pthresh, tx_conf->tx_thresh.hthresh,
-  tx_conf->tx_thresh.wthresh);
-   printf("  TX RS bit threshold=%d - TXQ flags=0x%"PRIx32"\n",
-  tx_conf->tx_rs_thresh, tx_conf->txq_flags);
+
+   RTE_ETH_FOREACH_DEV(pid) {
+   struct rte_eth_rxconf *rx_conf = &ports[pid].rx_conf;
+   struct rte_eth_txconf *tx_conf = &ports[pid].tx_conf;
+
+   printf("  port %d:\n", (unsigned int)pid);
+   printf("  CRC stripping %s\n",
+   ports[pid].dev_conf.rxmode.hw_strip_crc ?
+   "enabled" : "disabled");
+   printf("  RX queues=%d - RX desc=%d - RX free threshold=%d\n",
+   nb_rxq, nb_rxd, rx_conf->rx_free_thresh);
+   printf("  RX threshold registers: pthresh=%d hthresh=%d "
+  " wthresh=%d\n",
+   rx_conf->rx_thresh.pthresh,
+   rx_conf->rx_thresh.hthresh,
+   rx_conf->rx_thresh.wthresh);
+   printf("  TX queues=%d - TX desc=%d - TX free threshold=%d\n",
+   nb_txq, nb_txd, tx_conf->tx_free_thresh);
+   printf("  TX threshold registers: pthresh=%d hthresh=%d "
+  " wthresh=%d\n",
+   tx_conf->tx_thresh.pthresh,
+   tx_conf->tx_thresh.hthresh,
+   tx_conf->tx_thresh.wthresh);
+   printf("  TX RS bit threshold=%d - TXQ flags=0x%"PRIx32"\n",
+   tx_conf->tx_rs_thresh, tx_conf->txq_flags);
+   }
 }
 
 void
-- 
2.12.0



[dpdk-dev] [PATCH v4 02/11] app/testpmd: convert to new Ethdev Rx offloads API

2018-01-10 Thread Shahaf Shuler
Ethdev Rx offloads API has changed since:

commit ce17eddefc20 ("ethdev: introduce Rx queue offloads API")

Convert the application to use the new API. The is no functionality
changes rather simple conversion of the flags.

Signed-off-by: Shahaf Shuler 
Acked-by: Wenzhuo Lu 
---
 app/test-pmd/cmdline.c| 51 +++---
 app/test-pmd/config.c | 36 +++--
 app/test-pmd/parameters.c | 32 +-
 app/test-pmd/testpmd.c| 19 +++-
 4 files changed, 77 insertions(+), 61 deletions(-)

diff --git a/app/test-pmd/cmdline.c b/app/test-pmd/cmdline.c
index f71d96301..d8c73a9b1 100644
--- a/app/test-pmd/cmdline.c
+++ b/app/test-pmd/cmdline.c
@@ -1577,6 +1577,7 @@ cmd_config_max_pkt_len_parsed(void *parsed_result,
__attribute__((unused)) void *data)
 {
struct cmd_config_max_pkt_len_result *res = parsed_result;
+   uint64_t rx_offloads = rx_mode.offloads;
 
if (!all_ports_stopped()) {
printf("Please stop all ports first\n");
@@ -1594,14 +1595,16 @@ cmd_config_max_pkt_len_parsed(void *parsed_result,
 
rx_mode.max_rx_pkt_len = res->value;
if (res->value > ETHER_MAX_LEN)
-   rx_mode.jumbo_frame = 1;
+   rx_offloads |= DEV_RX_OFFLOAD_JUMBO_FRAME;
else
-   rx_mode.jumbo_frame = 0;
+   rx_offloads &= ~DEV_RX_OFFLOAD_JUMBO_FRAME;
} else {
printf("Unknown parameter\n");
return;
}
 
+   rx_mode.offloads = rx_offloads;
+
init_port_config();
 
cmd_reconfig_device_queue(RTE_PORT_ALL, 1, 1);
@@ -1703,6 +1706,7 @@ cmd_config_rx_mode_flag_parsed(void *parsed_result,
__attribute__((unused)) void *data)
 {
struct cmd_config_rx_mode_flag *res = parsed_result;
+   uint64_t rx_offloads = rx_mode.offloads;
 
if (!all_ports_stopped()) {
printf("Please stop all ports first\n");
@@ -1711,48 +1715,48 @@ cmd_config_rx_mode_flag_parsed(void *parsed_result,
 
if (!strcmp(res->name, "crc-strip")) {
if (!strcmp(res->value, "on"))
-   rx_mode.hw_strip_crc = 1;
+   rx_offloads |= DEV_RX_OFFLOAD_CRC_STRIP;
else if (!strcmp(res->value, "off"))
-   rx_mode.hw_strip_crc = 0;
+   rx_offloads &= ~DEV_RX_OFFLOAD_CRC_STRIP;
else {
printf("Unknown parameter\n");
return;
}
} else if (!strcmp(res->name, "scatter")) {
-   if (!strcmp(res->value, "on"))
-   rx_mode.enable_scatter = 1;
-   else if (!strcmp(res->value, "off"))
-   rx_mode.enable_scatter = 0;
-   else {
+   if (!strcmp(res->value, "on")) {
+   rx_offloads |= DEV_RX_OFFLOAD_SCATTER;
+   } else if (!strcmp(res->value, "off")) {
+   rx_offloads &= ~DEV_RX_OFFLOAD_SCATTER;
+   } else {
printf("Unknown parameter\n");
return;
}
} else if (!strcmp(res->name, "rx-cksum")) {
if (!strcmp(res->value, "on"))
-   rx_mode.hw_ip_checksum = 1;
+   rx_offloads |= DEV_RX_OFFLOAD_CHECKSUM;
else if (!strcmp(res->value, "off"))
-   rx_mode.hw_ip_checksum = 0;
+   rx_offloads &= ~DEV_RX_OFFLOAD_CHECKSUM;
else {
printf("Unknown parameter\n");
return;
}
} else if (!strcmp(res->name, "rx-timestamp")) {
if (!strcmp(res->value, "on"))
-   rx_mode.hw_timestamp = 1;
+   rx_offloads |= DEV_RX_OFFLOAD_TIMESTAMP;
else if (!strcmp(res->value, "off"))
-   rx_mode.hw_timestamp = 0;
+   rx_offloads &= ~DEV_RX_OFFLOAD_TIMESTAMP;
else {
printf("Unknown parameter\n");
return;
}
} else if (!strcmp(res->name, "hw-vlan")) {
if (!strcmp(res->value, "on")) {
-   rx_mode.hw_vlan_filter = 1;
-   rx_mode.hw_vlan_strip  = 1;
+   rx_offloads |= (DEV_RX_OFFLOAD_VLAN_FILTER |
+   DEV_RX_OFFLOAD_VLAN_STRIP);
}
else if (!strcmp(res->value, "off")) {
-   rx_mode.hw_vlan_filter = 0;
-   rx_mode.hw_vlan_strip  = 0;
+   rx_offloads &= ~(DEV_RX_OFFLOAD_VLAN_FILTER |
+   DEV_RX_OFFLOAD_VLAN

[dpdk-dev] [PATCH v4 05/11] app/testpmd: fix flowgen forwarding ol flags

2018-01-10 Thread Shahaf Shuler
The mbuf ol_flags were taken directly from testpmd internal enumeration
leading to incorrect values.

addressing only insertion offload flags as the checksum flags by
the application design are only with csum forwarding.

Fixes: e9e23a617eb8 ("app/testpmd: add flowgen forwarding engine")
Cc: cchempara...@tilera.com
Cc: sta...@dpdk.org

Signed-off-by: Shahaf Shuler 
Acked-by: Wenzhuo Lu 
---
 app/test-pmd/flowgen.c | 10 --
 1 file changed, 8 insertions(+), 2 deletions(-)

diff --git a/app/test-pmd/flowgen.c b/app/test-pmd/flowgen.c
index acf9af941..46478fc3a 100644
--- a/app/test-pmd/flowgen.c
+++ b/app/test-pmd/flowgen.c
@@ -123,7 +123,7 @@ pkt_burst_flow_gen(struct fwd_stream *fs)
struct ipv4_hdr *ip_hdr;
struct udp_hdr *udp_hdr;
uint16_t vlan_tci, vlan_tci_outer;
-   uint16_t ol_flags;
+   uint64_t ol_flags;
uint16_t nb_rx;
uint16_t nb_tx;
uint16_t nb_pkt;
@@ -151,7 +151,13 @@ pkt_burst_flow_gen(struct fwd_stream *fs)
mbp = current_fwd_lcore()->mbp;
vlan_tci = ports[fs->tx_port].tx_vlan_id;
vlan_tci_outer = ports[fs->tx_port].tx_vlan_id_outer;
-   ol_flags = ports[fs->tx_port].tx_ol_flags;
+
+   if (ports[fs->tx_port].tx_ol_flags & TESTPMD_TX_OFFLOAD_INSERT_VLAN)
+   ol_flags = PKT_TX_VLAN_PKT;
+   if (ports[fs->tx_port].tx_ol_flags & TESTPMD_TX_OFFLOAD_INSERT_QINQ)
+   ol_flags |= PKT_TX_QINQ_PKT;
+   if (ports[fs->tx_port].tx_ol_flags & TESTPMD_TX_OFFLOAD_MACSEC)
+   ol_flags |= PKT_TX_MACSEC;
 
for (nb_pkt = 0; nb_pkt < nb_pkt_per_burst; nb_pkt++) {
pkt = rte_mbuf_raw_alloc(mbp);
-- 
2.12.0



[dpdk-dev] [PATCH v4 06/11] app/testpmd: cleanup internal Tx offloads flags field

2018-01-10 Thread Shahaf Shuler
The tx_ol_flags field was used in order to control the different
Tx offloads set. After the conversion to the new Ethdev Tx offloads API
it is not needed anymore as the offloads configuration is stored in
ethdev structs.

Signed-off-by: Shahaf Shuler 
Acked-by: Wenzhuo Lu 
---
 app/test-pmd/cmdline.c  | 49 
 app/test-pmd/config.c   |  4 
 app/test-pmd/csumonly.c | 40 ++--
 app/test-pmd/flowgen.c  |  8 +---
 app/test-pmd/macfwd.c   |  8 +---
 app/test-pmd/macswap.c  |  8 +---
 app/test-pmd/testpmd.h  | 22 +---
 app/test-pmd/txonly.c   |  8 +---
 8 files changed, 59 insertions(+), 88 deletions(-)

diff --git a/app/test-pmd/cmdline.c b/app/test-pmd/cmdline.c
index 58125839a..858482174 100644
--- a/app/test-pmd/cmdline.c
+++ b/app/test-pmd/cmdline.c
@@ -3653,45 +3653,45 @@ static void
 csum_show(int port_id)
 {
struct rte_eth_dev_info dev_info;
-   uint16_t ol_flags;
+   uint64_t tx_offloads;
 
-   ol_flags = ports[port_id].tx_ol_flags;
+   tx_offloads = ports[port_id].dev_conf.txmode.offloads;
printf("Parse tunnel is %s\n",
-   (ol_flags & TESTPMD_TX_OFFLOAD_PARSE_TUNNEL) ? "on" : "off");
+   (ports[port_id].parse_tunnel) ? "on" : "off");
printf("IP checksum offload is %s\n",
-   (ol_flags & TESTPMD_TX_OFFLOAD_IP_CKSUM) ? "hw" : "sw");
+   (tx_offloads & DEV_TX_OFFLOAD_IPV4_CKSUM) ? "hw" : "sw");
printf("UDP checksum offload is %s\n",
-   (ol_flags & TESTPMD_TX_OFFLOAD_UDP_CKSUM) ? "hw" : "sw");
+   (tx_offloads & DEV_TX_OFFLOAD_UDP_CKSUM) ? "hw" : "sw");
printf("TCP checksum offload is %s\n",
-   (ol_flags & TESTPMD_TX_OFFLOAD_TCP_CKSUM) ? "hw" : "sw");
+   (tx_offloads & DEV_TX_OFFLOAD_TCP_CKSUM) ? "hw" : "sw");
printf("SCTP checksum offload is %s\n",
-   (ol_flags & TESTPMD_TX_OFFLOAD_SCTP_CKSUM) ? "hw" : "sw");
+   (tx_offloads & DEV_TX_OFFLOAD_SCTP_CKSUM) ? "hw" : "sw");
printf("Outer-Ip checksum offload is %s\n",
-   (ol_flags & TESTPMD_TX_OFFLOAD_OUTER_IP_CKSUM) ? "hw" : "sw");
+   (tx_offloads & DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM) ? "hw" : "sw");
 
/* display warnings if configuration is not supported by the NIC */
rte_eth_dev_info_get(port_id, &dev_info);
-   if ((ol_flags & TESTPMD_TX_OFFLOAD_IP_CKSUM) &&
+   if ((tx_offloads & DEV_TX_OFFLOAD_IPV4_CKSUM) &&
(dev_info.tx_offload_capa & DEV_TX_OFFLOAD_IPV4_CKSUM) == 0) {
printf("Warning: hardware IP checksum enabled but not "
"supported by port %d\n", port_id);
}
-   if ((ol_flags & TESTPMD_TX_OFFLOAD_UDP_CKSUM) &&
+   if ((tx_offloads & DEV_TX_OFFLOAD_UDP_CKSUM) &&
(dev_info.tx_offload_capa & DEV_TX_OFFLOAD_UDP_CKSUM) == 0) {
printf("Warning: hardware UDP checksum enabled but not "
"supported by port %d\n", port_id);
}
-   if ((ol_flags & TESTPMD_TX_OFFLOAD_TCP_CKSUM) &&
+   if ((tx_offloads & DEV_TX_OFFLOAD_TCP_CKSUM) &&
(dev_info.tx_offload_capa & DEV_TX_OFFLOAD_TCP_CKSUM) == 0) {
printf("Warning: hardware TCP checksum enabled but not "
"supported by port %d\n", port_id);
}
-   if ((ol_flags & TESTPMD_TX_OFFLOAD_SCTP_CKSUM) &&
+   if ((tx_offloads & DEV_TX_OFFLOAD_SCTP_CKSUM) &&
(dev_info.tx_offload_capa & DEV_TX_OFFLOAD_SCTP_CKSUM) == 0) {
printf("Warning: hardware SCTP checksum enabled but not "
"supported by port %d\n", port_id);
}
-   if ((ol_flags & TESTPMD_TX_OFFLOAD_OUTER_IP_CKSUM) &&
+   if ((tx_offloads & DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM) &&
(dev_info.tx_offload_capa & DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM) == 
0) {
printf("Warning: hardware outer IP checksum enabled but not "
"supported by port %d\n", port_id);
@@ -3705,7 +3705,6 @@ cmd_csum_parsed(void *parsed_result,
 {
struct cmd_csum_result *res = parsed_result;
int hw = 0;
-   uint16_t mask = 0;
uint64_t csum_offloads = 0;
 
if (port_id_is_invalid(res->port_id, ENABLED_WARN)) {
@@ -3723,28 +3722,21 @@ cmd_csum_parsed(void *parsed_result,
hw = 1;
 
if (!strcmp(res->proto, "ip")) {
-   mask = TESTPMD_TX_OFFLOAD_IP_CKSUM;
csum_offloads |= DEV_TX_OFFLOAD_IPV4_CKSUM;
} else if (!strcmp(res->proto, "udp")) {
-   mask = TESTPMD_TX_OFFLOAD_UDP_CKSUM;
csum_offloads |= DEV_TX_OFFLOAD_UDP_CKSUM;
} else if (!strcmp(res->proto, "tcp")) {
-   mask = TESTPMD_TX_OFFLOAD_TCP_CKSUM;
csum_offlo

[dpdk-dev] [PATCH v4 07/11] app/testpmd: add command line option for Tx offloads

2018-01-10 Thread Shahaf Shuler
This patch adds command line option to set hex value for the ports Tx
offloads flags.

Signed-off-by: Shahaf Shuler 
---
 app/test-pmd/parameters.c | 17 +++--
 app/test-pmd/testpmd.c|  4 
 app/test-pmd/testpmd.h|  2 ++
 doc/guides/testpmd_app_ug/run_app.rst |  5 +
 4 files changed, 26 insertions(+), 2 deletions(-)

diff --git a/app/test-pmd/parameters.c b/app/test-pmd/parameters.c
index 263651cba..58889420f 100644
--- a/app/test-pmd/parameters.c
+++ b/app/test-pmd/parameters.c
@@ -99,7 +99,7 @@ usage(char* progname)
   "--rss-ip | --rss-udp | "
   "--rxpt= | --rxht= | --rxwt= | --rxfreet= | "
   "--txpt= | --txht= | --txwt= | --txfreet= | "
-  "--txrst= | --txqflags= ]\n",
+  "--txrst= | --txqflags= | --tx-offloads ]\n",
   progname);
 #ifdef RTE_LIBRTE_CMDLINE
printf("  --interactive: run in interactive mode.\n");
@@ -216,6 +216,7 @@ usage(char* progname)
   "disable print of designated event or all of them.\n");
printf("  --flow-isolate-all: "
   "requests flow API isolated mode on all ports at initialization 
time.\n");
+   printf("  --tx-offloads=0x: hexadecimal bitmask of TX queue 
offloads\n");
 }
 
 #ifdef RTE_LIBRTE_CMDLINE
@@ -566,8 +567,9 @@ launch_args_parse(int argc, char** argv)
char **argvopt;
int opt_idx;
enum { TX, RX };
-   /* Default Rx offloads for all ports. */
+   /* Default offloads for all ports. */
uint64_t rx_offloads = rx_mode.offloads;
+   uint64_t tx_offloads = tx_mode.offloads;
 
static struct option lgopts[] = {
{ "help",   0, 0, 0 },
@@ -645,6 +647,7 @@ launch_args_parse(int argc, char** argv)
{ "no-rmv-interrupt",   0, 0, 0 },
{ "print-event",1, 0, 0 },
{ "mask-event", 1, 0, 0 },
+   { "tx-offloads",1, 0, 0 },
{ 0, 0, 0, 0 },
};
 
@@ -1116,6 +1119,15 @@ launch_args_parse(int argc, char** argv)
rmv_interrupt = 0;
if (!strcmp(lgopts[opt_idx].name, "flow-isolate-all"))
flow_isolate_all = 1;
+   if (!strcmp(lgopts[opt_idx].name, "tx-offloads")) {
+   char *end = NULL;
+   n = strtoull(optarg, &end, 16);
+   if (n >= 0)
+   tx_offloads = (uint64_t)n;
+   else
+   rte_exit(EXIT_FAILURE,
+"tx-offloads must be >= 0\n");
+   }
if (!strcmp(lgopts[opt_idx].name, "print-event"))
if (parse_event_printing_config(optarg, 1)) {
rte_exit(EXIT_FAILURE,
@@ -1142,4 +1154,5 @@ launch_args_parse(int argc, char** argv)
 
/* Set offload configuration from command line parameters. */
rx_mode.offloads = rx_offloads;
+   tx_mode.offloads = tx_offloads;
 }
diff --git a/app/test-pmd/testpmd.c b/app/test-pmd/testpmd.c
index 0087438bc..806548196 100644
--- a/app/test-pmd/testpmd.c
+++ b/app/test-pmd/testpmd.c
@@ -344,6 +344,8 @@ struct rte_eth_rxmode rx_mode = {
.ignore_offload_bitfield = 1,
 };
 
+struct rte_eth_txmode tx_mode;
+
 struct rte_fdir_conf fdir_conf = {
.mode = RTE_FDIR_MODE_NONE,
.pballoc = RTE_FDIR_PBALLOC_64K,
@@ -604,6 +606,8 @@ init_config(void)
 
RTE_ETH_FOREACH_DEV(pid) {
port = &ports[pid];
+   /* Apply default Tx configuration for all ports */
+   port->dev_conf.txmode = tx_mode;
rte_eth_dev_info_get(pid, &port->dev_info);
 
if (numa_support) {
diff --git a/app/test-pmd/testpmd.h b/app/test-pmd/testpmd.h
index c6baa1066..3e63edfa1 100644
--- a/app/test-pmd/testpmd.h
+++ b/app/test-pmd/testpmd.h
@@ -395,6 +395,8 @@ extern portid_t fwd_ports_ids[RTE_MAX_ETHPORTS];
 extern struct rte_port *ports;
 
 extern struct rte_eth_rxmode rx_mode;
+extern struct rte_eth_txmode tx_mode;
+
 extern uint64_t rss_hf;
 
 extern queueid_t nb_rxq;
diff --git a/doc/guides/testpmd_app_ug/run_app.rst 
b/doc/guides/testpmd_app_ug/run_app.rst
index 4c0d2cede..fface6f58 100644
--- a/doc/guides/testpmd_app_ug/run_app.rst
+++ b/doc/guides/testpmd_app_ug/run_app.rst
@@ -509,3 +509,8 @@ The commandline options are:
 configured flow rules only (see flow command).
 
 Ports that do not support this mode are automatically discarded.
+
+*   ``--tx-offloads=0x``
+
+Set the hexadecimal bitmask of TX queue offloads.
+The default value is 0.
-- 
2.12.0



[dpdk-dev] [PATCH v4 08/11] app/testpmd: remove txqflags

2018-01-10 Thread Shahaf Shuler
Since testpmd is now using the new Ethdev offloads API and there is
a way configure each of the tx offloads from CLI or command line,
there is no need for the txqflags configuration anymore.

Signed-off-by: Shahaf Shuler 
Acked-by: Nelio Laranjeiro 
Acked-by: Wenzhuo Lu 
---
 app/test-pmd/cmdline.c  | 69 
 app/test-pmd/config.c   |  7 +--
 app/test-pmd/parameters.c   | 14 +
 app/test-pmd/testpmd.c  |  8 ---
 app/test-pmd/testpmd.h  |  1 -
 doc/guides/testpmd_app_ug/run_app.rst   | 12 -
 doc/guides/testpmd_app_ug/testpmd_funcs.rst | 11 
 7 files changed, 3 insertions(+), 119 deletions(-)

diff --git a/app/test-pmd/cmdline.c b/app/test-pmd/cmdline.c
index 858482174..b4ef1d0eb 100644
--- a/app/test-pmd/cmdline.c
+++ b/app/test-pmd/cmdline.c
@@ -3092,74 +3092,6 @@ cmdline_parse_inst_t cmd_set_txsplit = {
},
 };
 
-/* *** CONFIG TX QUEUE FLAGS *** */
-
-struct cmd_config_txqflags_result {
-   cmdline_fixed_string_t port;
-   cmdline_fixed_string_t config;
-   cmdline_fixed_string_t all;
-   cmdline_fixed_string_t what;
-   int32_t hexvalue;
-};
-
-static void cmd_config_txqflags_parsed(void *parsed_result,
-   __attribute__((unused)) struct cmdline *cl,
-   __attribute__((unused)) void *data)
-{
-   struct cmd_config_txqflags_result *res = parsed_result;
-
-   if (!all_ports_stopped()) {
-   printf("Please stop all ports first\n");
-   return;
-   }
-
-   if (strcmp(res->what, "txqflags")) {
-   printf("Unknown parameter\n");
-   return;
-   }
-
-   if (res->hexvalue >= 0) {
-   txq_flags = res->hexvalue;
-   } else {
-   printf("txqflags must be >= 0\n");
-   return;
-   }
-
-   init_port_config();
-
-   cmd_reconfig_device_queue(RTE_PORT_ALL, 1, 1);
-}
-
-cmdline_parse_token_string_t cmd_config_txqflags_port =
-   TOKEN_STRING_INITIALIZER(struct cmd_config_txqflags_result, port,
-"port");
-cmdline_parse_token_string_t cmd_config_txqflags_config =
-   TOKEN_STRING_INITIALIZER(struct cmd_config_txqflags_result, config,
-"config");
-cmdline_parse_token_string_t cmd_config_txqflags_all =
-   TOKEN_STRING_INITIALIZER(struct cmd_config_txqflags_result, all,
-"all");
-cmdline_parse_token_string_t cmd_config_txqflags_what =
-   TOKEN_STRING_INITIALIZER(struct cmd_config_txqflags_result, what,
-"txqflags");
-cmdline_parse_token_num_t cmd_config_txqflags_value =
-   TOKEN_NUM_INITIALIZER(struct cmd_config_txqflags_result,
-   hexvalue, INT32);
-
-cmdline_parse_inst_t cmd_config_txqflags = {
-   .f = cmd_config_txqflags_parsed,
-   .data = NULL,
-   .help_str = "port config all txqflags ",
-   .tokens = {
-   (void *)&cmd_config_txqflags_port,
-   (void *)&cmd_config_txqflags_config,
-   (void *)&cmd_config_txqflags_all,
-   (void *)&cmd_config_txqflags_what,
-   (void *)&cmd_config_txqflags_value,
-   NULL,
-   },
-};
-
 /* *** ADD/REMOVE ALL VLAN IDENTIFIERS TO/FROM A PORT VLAN RX FILTER *** */
 struct cmd_rx_vlan_filter_all_result {
cmdline_fixed_string_t rx_vlan;
@@ -15709,7 +15641,6 @@ cmdline_parse_ctx_t main_ctx[] = {
(cmdline_parse_inst_t *)&cmd_config_rx_mode_flag,
(cmdline_parse_inst_t *)&cmd_config_rss,
(cmdline_parse_inst_t *)&cmd_config_rxtx_queue,
-   (cmdline_parse_inst_t *)&cmd_config_txqflags,
(cmdline_parse_inst_t *)&cmd_config_rss_reta,
(cmdline_parse_inst_t *)&cmd_showport_reta,
(cmdline_parse_inst_t *)&cmd_config_burst,
diff --git a/app/test-pmd/config.c b/app/test-pmd/config.c
index 29115e255..faccb84ca 100644
--- a/app/test-pmd/config.c
+++ b/app/test-pmd/config.c
@@ -417,7 +417,6 @@ tx_queue_infos_display(portid_t port_id, uint16_t queue_id)
printf("\nTX writeback threshold: %hhu", qinfo.conf.tx_thresh.wthresh);
printf("\nTX RS threshold: %hu", qinfo.conf.tx_rs_thresh);
printf("\nTX free threshold: %hu", qinfo.conf.tx_free_thresh);
-   printf("\nTX flags: %#x", qinfo.conf.txq_flags);
printf("\nTX deferred start: %s",
(qinfo.conf.tx_deferred_start != 0) ? "on" : "off");
printf("\nNumber of TXDs: %hu", qinfo.nb_desc);
@@ -1714,10 +1713,8 @@ rxtx_config_display(void)
tx_conf->tx_thresh.pthresh,
tx_conf->tx_thresh.hthresh,
tx_conf->tx_thresh.wthresh);
-   printf("  TX RS bit threshold=%d - TXQ flags=0x%"PRIx32""
-  " - TXQ offloads=0x%"PRIx64"\n",
-

[dpdk-dev] [PATCH v4 10/11] app/testpmd: adjust on the flight VLAN configuration

2018-01-10 Thread Shahaf Shuler
On ethdev there is an API to configure VLAN offloads after the port
was started and without reconfiguration of the port or queues.

In the current design of the application, when the Rx offloads are
changed (through "port config all" CLI command) the port configuration
is overwritten, therefore the configuration made for the VLAN is lost.

This patch is to address the issue by a configuration of each port Rx
offloads separately instead of using the global Rx config. Such
adjustment is required due to the conversion of the application to the
new offloads API.

Signed-off-by: Shahaf Shuler 
---
 app/test-pmd/cmdline.c | 217 +++-
 app/test-pmd/config.c  |  27 --
 app/test-pmd/testpmd.c |   2 +-
 3 files changed, 135 insertions(+), 111 deletions(-)

diff --git a/app/test-pmd/cmdline.c b/app/test-pmd/cmdline.c
index d1b8dab6e..00a229a41 100644
--- a/app/test-pmd/cmdline.c
+++ b/app/test-pmd/cmdline.c
@@ -1577,34 +1577,38 @@ cmd_config_max_pkt_len_parsed(void *parsed_result,
__attribute__((unused)) void *data)
 {
struct cmd_config_max_pkt_len_result *res = parsed_result;
-   uint64_t rx_offloads = rx_mode.offloads;
+   portid_t pid;
 
if (!all_ports_stopped()) {
printf("Please stop all ports first\n");
return;
}
 
-   if (!strcmp(res->name, "max-pkt-len")) {
-   if (res->value < ETHER_MIN_LEN) {
-   printf("max-pkt-len can not be less than %d\n",
-   ETHER_MIN_LEN);
+   RTE_ETH_FOREACH_DEV(pid) {
+   struct rte_port *port = &ports[pid];
+   uint64_t rx_offloads = port->dev_conf.rxmode.offloads;
+
+   if (!strcmp(res->name, "max-pkt-len")) {
+   if (res->value < ETHER_MIN_LEN) {
+   printf("max-pkt-len can not be less than %d\n",
+   ETHER_MIN_LEN);
+   return;
+   }
+   if (res->value == port->dev_conf.rxmode.max_rx_pkt_len)
+   return;
+
+   port->dev_conf.rxmode.max_rx_pkt_len = res->value;
+   if (res->value > ETHER_MAX_LEN)
+   rx_offloads |= DEV_RX_OFFLOAD_JUMBO_FRAME;
+   else
+   rx_offloads &= ~DEV_RX_OFFLOAD_JUMBO_FRAME;
+   port->dev_conf.rxmode.offloads = rx_offloads;
+   } else {
+   printf("Unknown parameter\n");
return;
}
-   if (res->value == rx_mode.max_rx_pkt_len)
-   return;
-
-   rx_mode.max_rx_pkt_len = res->value;
-   if (res->value > ETHER_MAX_LEN)
-   rx_offloads |= DEV_RX_OFFLOAD_JUMBO_FRAME;
-   else
-   rx_offloads &= ~DEV_RX_OFFLOAD_JUMBO_FRAME;
-   } else {
-   printf("Unknown parameter\n");
-   return;
}
 
-   rx_mode.offloads = rx_offloads;
-
init_port_config();
 
cmd_reconfig_device_queue(RTE_PORT_ALL, 1, 1);
@@ -1706,103 +1710,108 @@ cmd_config_rx_mode_flag_parsed(void *parsed_result,
__attribute__((unused)) void *data)
 {
struct cmd_config_rx_mode_flag *res = parsed_result;
-   uint64_t rx_offloads = rx_mode.offloads;
+   portid_t pid;
 
if (!all_ports_stopped()) {
printf("Please stop all ports first\n");
return;
}
 
-   if (!strcmp(res->name, "crc-strip")) {
-   if (!strcmp(res->value, "on"))
-   rx_offloads |= DEV_RX_OFFLOAD_CRC_STRIP;
-   else if (!strcmp(res->value, "off"))
-   rx_offloads &= ~DEV_RX_OFFLOAD_CRC_STRIP;
-   else {
-   printf("Unknown parameter\n");
-   return;
-   }
-   } else if (!strcmp(res->name, "scatter")) {
-   if (!strcmp(res->value, "on")) {
-   rx_offloads |= DEV_RX_OFFLOAD_SCATTER;
-   } else if (!strcmp(res->value, "off")) {
-   rx_offloads &= ~DEV_RX_OFFLOAD_SCATTER;
+   RTE_ETH_FOREACH_DEV(pid) {
+   struct rte_port *port;
+   uint64_t rx_offloads;
+
+   port = &ports[pid];
+   rx_offloads = port->dev_conf.rxmode.offloads;
+   if (!strcmp(res->name, "crc-strip")) {
+   if (!strcmp(res->value, "on"))
+   rx_offloads |= DEV_RX_OFFLOAD_CRC_STRIP;
+   else if (!strcmp(res->value, "off"))
+   rx_offloads &= ~DEV_RX_OFFLOAD_CRC_STRIP;
+   else {
+

[dpdk-dev] [PATCH v4 11/11] app/testpmd: enable fast free Tx offload by default

2018-01-10 Thread Shahaf Shuler
Enable the DEV_TX_OFFLOAD_MBUF_FAST_FREE in case the underlying device
supports.

This is to preserve the previous offloads configuration made according
to the PMD defaults.

Signed-off-by: Shahaf Shuler 
---
 app/test-pmd/testpmd.c | 8 +++-
 1 file changed, 7 insertions(+), 1 deletion(-)

diff --git a/app/test-pmd/testpmd.c b/app/test-pmd/testpmd.c
index c1fb387fb..d6f69f5fa 100644
--- a/app/test-pmd/testpmd.c
+++ b/app/test-pmd/testpmd.c
@@ -339,7 +339,9 @@ struct rte_eth_rxmode rx_mode = {
.ignore_offload_bitfield = 1,
 };
 
-struct rte_eth_txmode tx_mode;
+struct rte_eth_txmode tx_mode = {
+   .offloads = DEV_TX_OFFLOAD_MBUF_FAST_FREE,
+};
 
 struct rte_fdir_conf fdir_conf = {
.mode = RTE_FDIR_MODE_NONE,
@@ -605,6 +607,10 @@ init_config(void)
port->dev_conf.txmode = tx_mode;
port->dev_conf.rxmode = rx_mode;
rte_eth_dev_info_get(pid, &port->dev_info);
+   if (!(port->dev_info.tx_offload_capa &
+ DEV_TX_OFFLOAD_MBUF_FAST_FREE))
+   port->dev_conf.txmode.offloads &=
+   ~DEV_TX_OFFLOAD_MBUF_FAST_FREE;
 
if (numa_support) {
if (port_numa[pid] != NUMA_NO_CONFIG)
-- 
2.12.0



[dpdk-dev] [PATCH v4 09/11] app/testpmd: enforce offloads caps

2018-01-10 Thread Shahaf Shuler
In the current design it was possible for offload to be set even though
the device is not supporting it. A warning message was printed instead.

This is a wrong behaviour, as application should set only the offloads
reported by the capabilities of the device.

This patch adds verification for the offloads being set and make sure
the offload configuration passed to the device always match its
capabilities.

Signed-off-by: Shahaf Shuler 
Acked-by: Wenzhuo Lu 
---
 app/test-pmd/cmdline.c | 103 +---
 app/test-pmd/config.c  |  14 ++
 2 files changed, 92 insertions(+), 25 deletions(-)

diff --git a/app/test-pmd/cmdline.c b/app/test-pmd/cmdline.c
index b4ef1d0eb..d1b8dab6e 100644
--- a/app/test-pmd/cmdline.c
+++ b/app/test-pmd/cmdline.c
@@ -3638,6 +3638,7 @@ cmd_csum_parsed(void *parsed_result,
struct cmd_csum_result *res = parsed_result;
int hw = 0;
uint64_t csum_offloads = 0;
+   struct rte_eth_dev_info dev_info;
 
if (port_id_is_invalid(res->port_id, ENABLED_WARN)) {
printf("invalid port %d\n", res->port_id);
@@ -3648,21 +3649,53 @@ cmd_csum_parsed(void *parsed_result,
return;
}
 
+   rte_eth_dev_info_get(res->port_id, &dev_info);
if (!strcmp(res->mode, "set")) {
 
if (!strcmp(res->hwsw, "hw"))
hw = 1;
 
if (!strcmp(res->proto, "ip")) {
-   csum_offloads |= DEV_TX_OFFLOAD_IPV4_CKSUM;
+   if (dev_info.tx_offload_capa &
+   DEV_TX_OFFLOAD_IPV4_CKSUM) {
+   csum_offloads |= DEV_TX_OFFLOAD_IPV4_CKSUM;
+   } else {
+   printf("IP checksum offload is not supported "
+  "by port %u\n", res->port_id);
+   }
} else if (!strcmp(res->proto, "udp")) {
-   csum_offloads |= DEV_TX_OFFLOAD_UDP_CKSUM;
+   if (dev_info.tx_offload_capa &
+   DEV_TX_OFFLOAD_UDP_CKSUM) {
+   csum_offloads |= DEV_TX_OFFLOAD_UDP_CKSUM;
+   } else {
+   printf("UDP checksum offload is not supported "
+  "by port %u\n", res->port_id);
+   }
} else if (!strcmp(res->proto, "tcp")) {
-   csum_offloads |= DEV_TX_OFFLOAD_TCP_CKSUM;
+   if (dev_info.tx_offload_capa &
+   DEV_TX_OFFLOAD_TCP_CKSUM) {
+   csum_offloads |= DEV_TX_OFFLOAD_TCP_CKSUM;
+   } else {
+   printf("TCP checksum offload is not supported "
+  "by port %u\n", res->port_id);
+   }
} else if (!strcmp(res->proto, "sctp")) {
-   csum_offloads |= DEV_TX_OFFLOAD_SCTP_CKSUM;
+   if (dev_info.tx_offload_capa &
+   DEV_TX_OFFLOAD_SCTP_CKSUM) {
+   csum_offloads |= DEV_TX_OFFLOAD_SCTP_CKSUM;
+   } else {
+   printf("SCTP checksum offload is not supported "
+  "by port %u\n", res->port_id);
+   }
} else if (!strcmp(res->proto, "outer-ip")) {
-   csum_offloads |= DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM;
+   if (dev_info.tx_offload_capa &
+   DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM) {
+   csum_offloads |=
+   DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM;
+   } else {
+   printf("Outer IP checksum offload is not "
+  "supported by port %u\n", res->port_id);
+   }
}
 
if (hw) {
@@ -3805,6 +3838,14 @@ cmd_tso_set_parsed(void *parsed_result,
if (!strcmp(res->mode, "set"))
ports[res->port_id].tso_segsz = res->tso_segsz;
 
+   rte_eth_dev_info_get(res->port_id, &dev_info);
+   if ((ports[res->port_id].tso_segsz != 0) &&
+   (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_TCP_TSO) == 0) {
+   printf("Error: TSO is not supported by port %d\n",
+  res->port_id);
+   return;
+   }
+
if (ports[res->port_id].tso_segsz == 0) {
ports[res->port_id].dev_conf.txmode.offloads &=
~DEV_TX_OFFLOAD_TCP_TSO;
@@ -3881,24 +3922,25 @@ struct cmd_tunnel_tso_set_result {
portid_t port_id;
 };
 
-static void
+static struc

[dpdk-dev] [PATCH V9 2/5] eal: add uevent pass and process function

2018-01-10 Thread Jeff Guo
In order to handle the uevent which have been detected from the kernel side,
add uevent process function, let hot plug event to be example to show uevent
mechanism how to pass the uevent and process the uevent.

About uevent passing and processing, add below functions in linux eal dev layer.
FreeBSD not support uevent ,so let it to be void and do not implement in 
function.
a.dev_uev_parse
b.dev_uev_receive
c.dev_uev_process

Signed-off-by: Jeff Guo 
---
v9->v8:
split the patch set into small and explicit patch
---
 drivers/bus/pci/pci_common.c|  20 ++
 drivers/bus/vdev/vdev.c |  20 ++
 lib/librte_eal/common/eal_common_bus.c  |  28 
 lib/librte_eal/common/include/rte_bus.h |  36 ++
 lib/librte_eal/common/include/rte_dev.h |  21 ++
 lib/librte_eal/linuxapp/eal/eal_dev.c   | 120 +++-
 6 files changed, 243 insertions(+), 2 deletions(-)

diff --git a/drivers/bus/pci/pci_common.c b/drivers/bus/pci/pci_common.c
index 104fdf9..c4415a0 100644
--- a/drivers/bus/pci/pci_common.c
+++ b/drivers/bus/pci/pci_common.c
@@ -502,6 +502,25 @@ pci_find_device(const struct rte_device *start, 
rte_dev_cmp_t cmp,
return NULL;
 }
 
+static struct rte_device *
+pci_find_device_by_name(const struct rte_device *start,
+   rte_dev_cmp_name_t cmp_name,
+   const void *data)
+{
+   struct rte_pci_device *dev;
+
+   FOREACH_DEVICE_ON_PCIBUS(dev) {
+   if (start && &dev->device == start) {
+   start = NULL; /* starting point found */
+   continue;
+   }
+   if (cmp_name(dev->device.name, data) == 0)
+   return &dev->device;
+   }
+
+   return NULL;
+}
+
 static int
 pci_plug(struct rte_device *dev)
 {
@@ -528,6 +547,7 @@ struct rte_pci_bus rte_pci_bus = {
.scan = rte_pci_scan,
.probe = rte_pci_probe,
.find_device = pci_find_device,
+   .find_device_by_name = pci_find_device_by_name,
.plug = pci_plug,
.unplug = pci_unplug,
.parse = pci_parse,
diff --git a/drivers/bus/vdev/vdev.c b/drivers/bus/vdev/vdev.c
index fd7736d..cac2aa0 100644
--- a/drivers/bus/vdev/vdev.c
+++ b/drivers/bus/vdev/vdev.c
@@ -323,6 +323,25 @@ vdev_find_device(const struct rte_device *start, 
rte_dev_cmp_t cmp,
return NULL;
 }
 
+
+static struct rte_device *
+vdev_find_device_by_name(const struct rte_device *start,
+   rte_dev_cmp_name_t cmp_name,
+   const void *data)
+{
+   struct rte_vdev_device *dev;
+
+   TAILQ_FOREACH(dev, &vdev_device_list, next) {
+   if (start && &dev->device == start) {
+   start = NULL;
+   continue;
+   }
+   if (cmp_name(dev->device.name, data) == 0)
+   return &dev->device;
+   }
+   return NULL;
+}
+
 static int
 vdev_plug(struct rte_device *dev)
 {
@@ -339,6 +358,7 @@ static struct rte_bus rte_vdev_bus = {
.scan = vdev_scan,
.probe = vdev_probe,
.find_device = vdev_find_device,
+   .find_device_by_name = vdev_find_device_by_name,
.plug = vdev_plug,
.unplug = vdev_unplug,
.parse = vdev_parse,
diff --git a/lib/librte_eal/common/eal_common_bus.c 
b/lib/librte_eal/common/eal_common_bus.c
index 3e022d5..efd5539 100644
--- a/lib/librte_eal/common/eal_common_bus.c
+++ b/lib/librte_eal/common/eal_common_bus.c
@@ -51,6 +51,7 @@ rte_bus_register(struct rte_bus *bus)
RTE_VERIFY(bus->scan);
RTE_VERIFY(bus->probe);
RTE_VERIFY(bus->find_device);
+   RTE_VERIFY(bus->find_device_by_name);
/* Buses supporting driver plug also require unplug. */
RTE_VERIFY(!bus->plug || bus->unplug);
 
@@ -170,6 +171,14 @@ cmp_rte_device(const struct rte_device *dev1, const void 
*_dev2)
 }
 
 static int
+cmp_rte_device_name(const char *dev_name1, const void *_dev_name2)
+{
+   const char *dev_name2 = _dev_name2;
+
+   return strcmp(dev_name1, dev_name2);
+}
+
+static int
 bus_find_device(const struct rte_bus *bus, const void *_dev)
 {
struct rte_device *dev;
@@ -178,6 +187,25 @@ bus_find_device(const struct rte_bus *bus, const void 
*_dev)
return dev == NULL;
 }
 
+static struct rte_device *
+bus_find_device_by_name(const struct rte_bus *bus, const void *_dev_name)
+{
+   struct rte_device *dev;
+
+   dev = bus->find_device_by_name(NULL, cmp_rte_device_name, _dev_name);
+   return dev;
+}
+
+struct rte_device *
+
+rte_bus_find_device(const struct rte_bus *bus, const void *_dev_name)
+{
+   struct rte_device *dev;
+
+   dev = bus_find_device_by_name(bus, _dev_name);
+   return dev;
+}
+
 struct rte_bus *
 rte_bus_find_by_device(const struct rte_device *dev)
 {
diff --git a/lib/librte_eal/common/include/rte_bus.h 
b/lib/librte_eal/common/include/rte_bus.h
inde

[dpdk-dev] [PATCH V9 1/5] eal: add uevent monitor api and callback func

2018-01-10 Thread Jeff Guo
This patch aim to add a general uevent mechanism in eal device layer,
to enable all linux kernel object uevent monitoring, user could use these
APIs to monitor and read out the device status info that sent from the
kernel side, then corresponding to handle it, such as when detect hotplug
uevent type, user could detach or attach the device, and more it benefit
to use to do smoothly fail safe work.

About uevent monitoring:
a: add one epolling to poll the netlink socket, to monitor the uevent of
   the device.
b: add enum of rte_eal_dev_event_type and struct of rte_eal_uevent.
c: add below APIs in rte eal device layer.
   rte_dev_callback_register
   rte_dev_callback_unregister
   _rte_dev_callback_process
   rte_dev_monitor_start
   rte_dev_monitor_stop

Signed-off-by: Jeff Guo 
---
v9->v8:
split the patch set into small and explicit patch
---
 lib/librte_eal/bsdapp/eal/eal_dev.c|  36 
 .../bsdapp/eal/include/exec-env/rte_dev.h  |  39 
 lib/librte_eal/common/eal_common_dev.c | 150 ++
 lib/librte_eal/common/include/rte_dev.h|  98 +
 lib/librte_eal/linuxapp/eal/Makefile   |   3 +-
 lib/librte_eal/linuxapp/eal/eal_dev.c  | 223 +
 .../linuxapp/eal/include/exec-env/rte_dev.h|  39 
 7 files changed, 587 insertions(+), 1 deletion(-)
 create mode 100644 lib/librte_eal/bsdapp/eal/eal_dev.c
 create mode 100644 lib/librte_eal/bsdapp/eal/include/exec-env/rte_dev.h
 create mode 100644 lib/librte_eal/linuxapp/eal/eal_dev.c
 create mode 100644 lib/librte_eal/linuxapp/eal/include/exec-env/rte_dev.h

diff --git a/lib/librte_eal/bsdapp/eal/eal_dev.c 
b/lib/librte_eal/bsdapp/eal/eal_dev.c
new file mode 100644
index 000..7fdc2c0
--- /dev/null
+++ b/lib/librte_eal/bsdapp/eal/eal_dev.c
@@ -0,0 +1,36 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Intel Corporation
+ */
+
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+
+#include "eal_thread.h"
+
+int
+rte_dev_monitor_start(void)
+{
+   return -1;
+}
+
+int
+rte_dev_monitor_stop(void)
+{
+   return -1;
+}
diff --git a/lib/librte_eal/bsdapp/eal/include/exec-env/rte_dev.h 
b/lib/librte_eal/bsdapp/eal/include/exec-env/rte_dev.h
new file mode 100644
index 000..70413b3
--- /dev/null
+++ b/lib/librte_eal/bsdapp/eal/include/exec-env/rte_dev.h
@@ -0,0 +1,39 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Intel Corporation
+ */
+
+#ifndef _RTE_DEV_H_
+#error "don't include this file directly, please include generic "
+#endif
+
+#ifndef _RTE_LINUXAPP_DEV_H_
+#define _RTE_LINUXAPP_DEV_H_
+
+#include 
+
+#include 
+
+/**
+ * Start the device uevent monitoring.
+ *
+ * @param none
+ * @return
+ *   - On success, zero.
+ *   - On failure, a negative value.
+ */
+int
+rte_dev_monitor_start(void);
+
+/**
+ * Stop the device uevent monitoring .
+ *
+ * @param none
+ * @return
+ *   - On success, zero.
+ *   - On failure, a negative value.
+ */
+
+int
+rte_dev_monitor_stop(void);
+
+#endif /* _RTE_LINUXAPP_DEV_H_ */
diff --git a/lib/librte_eal/common/eal_common_dev.c 
b/lib/librte_eal/common/eal_common_dev.c
index dda8f58..24c410e 100644
--- a/lib/librte_eal/common/eal_common_dev.c
+++ b/lib/librte_eal/common/eal_common_dev.c
@@ -42,9 +42,32 @@
 #include 
 #include 
 #include 
+#include 
+#include 
 
 #include "eal_private.h"
 
+/* spinlock for device callbacks */
+static rte_spinlock_t rte_dev_cb_lock = RTE_SPINLOCK_INITIALIZER;
+
+/**
+ * The user application callback description.
+ *
+ * It contains callback address to be registered by user application,
+ * the pointer to the parameters for callback, and the event type.
+ */
+struct rte_eal_dev_callback {
+   TAILQ_ENTRY(rte_eal_dev_callback) next; /**< Callbacks list */
+   rte_eal_dev_cb_fn cb_fn;/**< Callback address */
+   void *cb_arg;   /**< Parameter for callback */
+   void *ret_param;/**< Return parameter */
+   enum rte_dev_event_type event;  /**< device event type */
+   uint32_t active;/**< Callback is executing */
+};
+
+/* A genaral callback for all new devices be added onto the bus */
+static struct rte_eal_dev_callback *dev_add_cb;
+
 static int cmp_detached_dev_name(const struct rte_device *dev,
const void *_name)
 {
@@ -234,3 +257,130 @@ int rte_eal_hotplug_remove(const char *busname, const 
char *devname)
rte_eal_devargs_remove(busname, devname);
return ret;
 }
+
+int
+rte_dev_callback_register(struct rte_device *device,
+   enum rte_dev_event_type event,
+   rte_eal_dev_cb_fn cb_fn, void *cb_arg)
+{
+   struct rte_eal_dev_callback *user_cb;
+
+   if (!cb_fn || device == NULL)
+   return -EINVAL;
+
+   r

[dpdk-dev] [PATCH V9 0/5] add uevent mechanism in eal framework

2018-01-10 Thread Jeff Guo
So far, about hot plug in dpdk, we already have hot plug add/remove
api and fail-safe driver to offload the fail-safe work from the app
user. But there are still lack of a general event api to detect all hotplug
event for all driver,now the hotplug interrupt event is diversity between
each device and driver, such as mlx4, pci driver and others.

Use the hot removal event for example, pci drivers not all exposure the
remove interrupt, so in order to make user to easy use the hot plug feature
for pci driver, something must be done to detect the remove event at the
kernel level and offer a new line of interrupt to the user land.

Base on the uevent of kobject mechanism in kernel, we could use it to
benefit for monitoring the hot plug status of the device which not only
uio/vfio of pci bus devices, but also other, such as cpu/usb/pci-express
bus devices.

The idea is comming as bellow.

a.The uevent message form FD monitoring which will be useful.
remove@/devices/pci:80/:80:02.2/:82:00.0/:83:03.0/:84:00.2/uio/uio2
ACTION=remove
DEVPATH=/devices/pci:80/:80:02.2/:82:00.0/:83:03.0/:84:00.2/uio/uio2
SUBSYSTEM=uio
MAJOR=243
MINOR=2
DEVNAME=uio2
SEQNUM=11366

b.add uevent monitoring machanism:
add several general api to enable uevent monitoring.

c.add common uevent handler and uevent failure handler
uevent of device should be handler at bus or device layer, and the memory read
and write failure when hot removal should be handle correctly before detach 
behaviors.

d.show example how to use uevent monitor
enable uevent monitoring in testpmd or fail-safe to show usage.

patchset history:
v9->v8:
split the patch set into small and explicit patch

v8->v7:
1.use rte_service to replace pthread management.
2.fix defind issue and copyright issue
3.fix some lock issue

v7->v6:
1.modify vdev part according to the vdev rework
2.re-define and split the func into common and bus specific code
3.fix some incorrect issue.
4.fix the system hung after send packcet issue.

v6->v5:
1.add hot plug policy, in eal, default handle to prepare hot plug work for
all pci device, then let app to manage to deside which device need to
hot plug.
2.modify to manage event callback in each device.
3.fix some system hung issue when igb_uio release.
4.modify the pci part to the bus-pci base on the bus rework.
5.add hot plug policy in app, show example to use hotplug list to manage
to deside which device need to hot plug.

v5->v4:
1.Move uevent monitor epolling from eal interrupt to eal device layer.
2.Redefine the eal device API for common, and distinguish between linux and bsd
3.Add failure handler helper api in bus layer.Add function of find device by 
name.
4.Replace of individual fd bind with single device, use a common fd to polling 
all device.
5.Add to register hot insertion monitoring and process, add function to auto 
bind driver befor user add device
6.Refine some coding style and typos issue
7.add new callback to process hot insertion

v4->v3:
1.move uevent monitor api from eal interrupt to eal device layer.
2.create uevent type and struct in eal device.
3.move uevent handler for each driver to eal layer.
4.add uevent failure handler to process signal fault issue.
5.add example for request and use uevent monitoring in testpmd.

v3->v2:
1.refine some return error
2.refine the string searching logic to avoid memory issue

v2->v1:
1.remove global variables of hotplug_fd, add uevent_fd
in rte_intr_handle to let each pci device self maintain it fd,
to fix dual device fd issue.
2.refine some typo error.

Jeff Guo (5):
  eal: add uevent monitor api and callback func
  eal: add uevent pass and process function
  app/testpmd: use uevent to monitor hotplug
  pci_uio: add uevent hotplug failure handler in pci
  pci: add driver auto bind for hot insertion

 app/test-pmd/testpmd.c | 179 ++
 app/test-pmd/testpmd.h |   9 +
 drivers/bus/pci/bsd/pci.c  |  30 ++
 drivers/bus/pci/linux/pci.c|  87 +
 drivers/bus/pci/pci_common.c   |  43 +++
 drivers/bus/pci/pci_common_uio.c   |  28 ++
 drivers/bus/pci/private.h  |  12 +
 drivers/bus/pci/rte_bus_pci.h  |  25 ++
 drivers/bus/vdev/vdev.c|  36 ++
 lib/librte_eal/bsdapp/eal/eal_dev.c|  36 ++
 .../bsdapp/eal/include/exec-env/rte_dev.h  |  39 +++
 lib/librte_eal/common/eal_common_bus.c |  30 ++
 lib/librte_eal/common/eal_common_dev.c | 150 +
 lib/librte_eal/common/include/rte_bus.h|  71 
 lib/librte_eal/common/include/rte_dev.h| 119 +++
 lib/librte_eal/linuxapp/eal/Makefile   |   3 +-
 lib/librte_eal/linuxapp/eal/eal_dev.c  | 375 +
 .../linuxapp/eal/include/exec-env/rte_dev.h|  39 +++
 lib/librte_eal/linuxapp/igb_uio/igb

[dpdk-dev] [PATCH V9 3/5] app/testpmd: use uevent to monitor hotplug

2018-01-10 Thread Jeff Guo
use testpmd for example, to show app how to request and use
uevent monitoring to handle the hot removal event and the
hot insertion event.

Signed-off-by: Jeff Guo 
---
v9->v8:
split the patch set into small and explicit patch
---
 app/test-pmd/testpmd.c | 179 +
 app/test-pmd/testpmd.h |   9 +++
 2 files changed, 188 insertions(+)

diff --git a/app/test-pmd/testpmd.c b/app/test-pmd/testpmd.c
index 9414d0e..37c859a 100644
--- a/app/test-pmd/testpmd.c
+++ b/app/test-pmd/testpmd.c
@@ -373,6 +373,8 @@ uint8_t bitrate_enabled;
 struct gro_status gro_ports[RTE_MAX_ETHPORTS];
 uint8_t gro_flush_cycles = GRO_DEFAULT_FLUSH_CYCLES;
 
+static struct hotplug_request_list hp_list;
+
 /* Forward function declarations */
 static void map_port_queue_stats_mapping_registers(portid_t pi,
   struct rte_port *port);
@@ -380,6 +382,13 @@ static void check_all_ports_link_status(uint32_t 
port_mask);
 static int eth_event_callback(portid_t port_id,
  enum rte_eth_event_type type,
  void *param, void *ret_param);
+static int eth_uevent_callback(enum rte_dev_event_type type,
+ void *param, void *ret_param);
+static int eth_uevent_callback_register(portid_t pid);
+static int in_hotplug_list(const char *dev_name);
+
+static int hotplug_list_add(const char *dev_name,
+   enum rte_dev_event_type event);
 
 /*
  * Check if all the ports are started.
@@ -1729,6 +1738,32 @@ reset_port(portid_t pid)
printf("Done\n");
 }
 
+static int
+eth_uevent_callback_register(portid_t pid)
+{
+   int diag;
+   struct rte_eth_dev *dev;
+   enum rte_dev_event_type dev_event_type;
+
+   /* register the uevent callback */
+   dev = &rte_eth_devices[pid];
+   for (dev_event_type = RTE_DEV_EVENT_ADD;
+dev_event_type < RTE_DEV_EVENT_CHANGE;
+dev_event_type++) {
+   diag = rte_dev_callback_register(dev->device, dev_event_type,
+   eth_uevent_callback,
+   (void *)(intptr_t)pid);
+   if (diag) {
+   printf("Failed to setup uevent callback for"
+   " device event %d\n",
+   dev_event_type);
+   return -1;
+   }
+   }
+
+   return 0;
+}
+
 void
 attach_port(char *identifier)
 {
@@ -1745,6 +1780,8 @@ attach_port(char *identifier)
if (rte_eth_dev_attach(identifier, &pi))
return;
 
+   eth_uevent_callback_register(pi);
+
socket_id = (unsigned)rte_eth_dev_socket_id(pi);
/* if socket_id is invalid, set to 0 */
if (check_socket_id(socket_id) < 0)
@@ -1756,6 +1793,8 @@ attach_port(char *identifier)
 
ports[pi].port_status = RTE_PORT_STOPPED;
 
+   hotplug_list_add(identifier, RTE_DEV_EVENT_REMOVE);
+
printf("Port %d is attached. Now total ports is %d\n", pi, nb_ports);
printf("Done\n");
 }
@@ -1782,6 +1821,9 @@ detach_port(portid_t port_id)
 
nb_ports = rte_eth_dev_count();
 
+   hotplug_list_add(rte_eth_devices[port_id].device->name,
+RTE_DEV_EVENT_ADD);
+
printf("Port '%s' is detached. Now total ports is %d\n",
name, nb_ports);
printf("Done\n");
@@ -1805,6 +1847,9 @@ pmd_test_exit(void)
close_port(pt_id);
}
}
+
+   rte_dev_monitor_stop();
+
printf("\nBye...\n");
 }
 
@@ -1889,6 +1934,49 @@ rmv_event_callback(void *arg)
dev->device->name);
 }
 
+static void
+rmv_uevent_callback(void *arg)
+{
+   char name[RTE_ETH_NAME_MAX_LEN];
+   uint8_t port_id = (intptr_t)arg;
+
+   rte_eal_alarm_cancel(rmv_uevent_callback, arg);
+
+   RTE_ETH_VALID_PORTID_OR_RET(port_id);
+   printf("removing port id:%u\n", port_id);
+
+   if (!in_hotplug_list(rte_eth_devices[port_id].device->name))
+   return;
+
+   stop_packet_forwarding();
+
+   stop_port(port_id);
+   close_port(port_id);
+   if (rte_eth_dev_detach(port_id, name)) {
+   RTE_LOG(ERR, USER1, "Failed to detach port '%s'\n", name);
+   return;
+   }
+
+   nb_ports = rte_eth_dev_count();
+
+   printf("Port '%s' is detached. Now total ports is %d\n",
+   name, nb_ports);
+}
+
+static void
+add_uevent_callback(void *arg)
+{
+   char *dev_name = (char *)arg;
+
+   rte_eal_alarm_cancel(add_uevent_callback, arg);
+
+   if (!in_hotplug_list(dev_name))
+   return;
+
+   RTE_LOG(ERR, EAL, "add device: %s\n", dev_name);
+   attach_port(dev_name);
+}
+
 /* This function is used by the interrupt thread */
 static int
 eth_event_callback(portid_t port_id, enum rte_eth_event_type type, void *param,
@@ -1931,6 +2019,88 @@ eth_event_

[dpdk-dev] [PATCH V9 4/5] pci_uio: add uevent hotplug failure handler in pci

2018-01-10 Thread Jeff Guo
when detect hot removal uevent of device, the device resource become invalid,
in order to avoid unexpected usage of this resource, remap the device resource
to be a fake memory, that would lead the application keep running well but not
encounter system core dump.

Signed-off-by: Jeff Guo 
---
v9->v8:
split the patch set into small and explicit patch
---
 drivers/bus/pci/bsd/pci.c | 23 
 drivers/bus/pci/linux/pci.c   | 34 ++
 drivers/bus/pci/pci_common.c  | 22 +++
 drivers/bus/pci/pci_common_uio.c  | 28 +
 drivers/bus/pci/private.h | 12 +++
 drivers/bus/pci/rte_bus_pci.h | 11 ++
 drivers/bus/vdev/vdev.c   |  9 +++-
 lib/librte_eal/common/eal_common_bus.c|  1 +
 lib/librte_eal/common/include/rte_bus.h   | 17 +++
 lib/librte_eal/linuxapp/eal/eal_dev.c | 35 ---
 lib/librte_eal/linuxapp/igb_uio/igb_uio.c |  6 ++
 lib/librte_pci/rte_pci.c  | 20 ++
 lib/librte_pci/rte_pci.h  | 17 +++
 13 files changed, 231 insertions(+), 4 deletions(-)

diff --git a/drivers/bus/pci/bsd/pci.c b/drivers/bus/pci/bsd/pci.c
index 655b34b..d7165b9 100644
--- a/drivers/bus/pci/bsd/pci.c
+++ b/drivers/bus/pci/bsd/pci.c
@@ -97,6 +97,29 @@ rte_pci_unmap_device(struct rte_pci_device *dev)
}
 }
 
+/* re-map pci device */
+int
+rte_pci_remap_device(struct rte_pci_device *dev)
+{
+   int ret;
+
+   if (dev == NULL)
+   return -EINVAL;
+
+   switch (dev->kdrv) {
+   case RTE_KDRV_NIC_UIO:
+   ret = pci_uio_remap_resource(dev);
+   break;
+   default:
+   RTE_LOG(DEBUG, EAL,
+   "  Not managed by a supported kernel driver, 
skipped\n");
+   ret = 1;
+   break;
+   }
+
+   return ret;
+}
+
 void
 pci_uio_free_resource(struct rte_pci_device *dev,
struct mapped_pci_resource *uio_res)
diff --git a/drivers/bus/pci/linux/pci.c b/drivers/bus/pci/linux/pci.c
index 25f907e..7aa3079 100644
--- a/drivers/bus/pci/linux/pci.c
+++ b/drivers/bus/pci/linux/pci.c
@@ -116,6 +116,38 @@ rte_pci_unmap_device(struct rte_pci_device *dev)
}
 }
 
+/* Map pci device */
+int
+rte_pci_remap_device(struct rte_pci_device *dev)
+{
+   int ret = -1;
+
+   if (dev == NULL)
+   return -EINVAL;
+
+   switch (dev->kdrv) {
+   case RTE_KDRV_VFIO:
+#ifdef VFIO_PRESENT
+   /* no thing to do */
+#endif
+   break;
+   case RTE_KDRV_IGB_UIO:
+   case RTE_KDRV_UIO_GENERIC:
+   if (rte_eal_using_phys_addrs()) {
+   /* map resources for devices that use uio */
+   ret = pci_uio_remap_resource(dev);
+   }
+   break;
+   default:
+   RTE_LOG(DEBUG, EAL,
+   "  Not managed by a supported kernel driver, 
skipped\n");
+   ret = 1;
+   break;
+   }
+
+   return ret;
+}
+
 void *
 pci_find_max_end_va(void)
 {
@@ -357,6 +389,8 @@ pci_scan_one(const char *dirname, const struct rte_pci_addr 
*addr)
rte_pci_add_device(dev);
}
 
+   dev->device.state = RTE_DEV_PARSED;
+   TAILQ_INIT(&(dev->device.uev_cbs));
return 0;
 }
 
diff --git a/drivers/bus/pci/pci_common.c b/drivers/bus/pci/pci_common.c
index c4415a0..3fbe9d7 100644
--- a/drivers/bus/pci/pci_common.c
+++ b/drivers/bus/pci/pci_common.c
@@ -282,6 +282,7 @@ pci_probe_all_drivers(struct rte_pci_device *dev)
if (rc > 0)
/* positive value means driver doesn't support it */
continue;
+   dev->device.state = RTE_DEV_PROBED;
return 0;
}
return 1;
@@ -481,6 +482,7 @@ rte_pci_insert_device(struct rte_pci_device *exist_pci_dev,
 void
 rte_pci_remove_device(struct rte_pci_device *pci_dev)
 {
+   RTE_LOG(DEBUG, EAL, " rte_pci_remove_device for device list\n");
TAILQ_REMOVE(&rte_pci_bus.device_list, pci_dev, next);
 }
 
@@ -522,6 +524,25 @@ pci_find_device_by_name(const struct rte_device *start,
 }
 
 static int
+pci_remap_device(struct rte_device *dev)
+{
+   struct rte_pci_device *pdev;
+   int ret;
+
+   if (dev == NULL)
+   return -EINVAL;
+
+   pdev = RTE_DEV_TO_PCI(dev);
+
+   /* remap resources for devices that use igb_uio */
+   ret = rte_pci_remap_device(pdev);
+   if (ret != 0)
+   RTE_LOG(ERR, EAL, "failed to remap device %s",
+   dev->name);
+   return ret;
+}
+
+static int
 pci_plug(struct rte_device *dev)
 {
return pci_probe_all_drivers(RTE_DEV_TO_PCI(dev));
@@ -552,6 +573,7 @@ struct rte_pci_bus rte_pci_bus = {
.unplug = pci_unplug,
 

[dpdk-dev] [PATCH V9 5/5] pci: add driver auto bind for hot insertion

2018-01-10 Thread Jeff Guo
Normally we bind nic driver before application running, so if we want to
automatically driver binding after application run, need to implement
a auto bind function, that would benefit for hot insertion case, when detect
hot insertion uevent of device, auto bind the driver according some user
policy and then attach device, let app running smoothly and automatically
when hotplug behavior occur.

Signed-off-by: Jeff Guo 
---
v9->v8:
split the patch set into small and explicit patch
---
 drivers/bus/pci/bsd/pci.c   |  7 +
 drivers/bus/pci/linux/pci.c | 53 +
 drivers/bus/pci/pci_common.c|  1 +
 drivers/bus/pci/rte_bus_pci.h   | 14 +
 drivers/bus/vdev/vdev.c |  9 ++
 lib/librte_eal/common/eal_common_bus.c  |  1 +
 lib/librte_eal/common/include/rte_bus.h | 18 +++
 lib/librte_eal/linuxapp/eal/eal_dev.c   |  7 +
 8 files changed, 110 insertions(+)

diff --git a/drivers/bus/pci/bsd/pci.c b/drivers/bus/pci/bsd/pci.c
index d7165b9..2d1d24f 100644
--- a/drivers/bus/pci/bsd/pci.c
+++ b/drivers/bus/pci/bsd/pci.c
@@ -672,3 +672,10 @@ rte_pci_ioport_unmap(struct rte_pci_ioport *p)
 
return ret;
 }
+
+int
+rte_pci_dev_bind_driver(const char *dev_name, const char *drv_type)
+{
+   return -1;
+}
+
diff --git a/drivers/bus/pci/linux/pci.c b/drivers/bus/pci/linux/pci.c
index 7aa3079..cec1489 100644
--- a/drivers/bus/pci/linux/pci.c
+++ b/drivers/bus/pci/linux/pci.c
@@ -859,3 +859,56 @@ rte_pci_ioport_unmap(struct rte_pci_ioport *p)
 
return ret;
 }
+
+int
+rte_pci_dev_bind_driver(const char *dev_name, const char *drv_type)
+{
+   char drv_bind_path[1024];
+   char drv_override_path[1024]; /* contains the /dev/uioX */
+   int drv_override_fd;
+   int drv_bind_fd;
+
+   RTE_SET_USED(drv_type);
+
+   snprintf(drv_override_path, sizeof(drv_override_path),
+   "/sys/bus/pci/devices/%s/driver_override", dev_name);
+
+   /* specify the driver for a device by writing to driver_override */
+   drv_override_fd = open(drv_override_path, O_WRONLY);
+   if (drv_override_fd < 0) {
+   RTE_LOG(ERR, EAL, "Cannot open %s: %s\n",
+   drv_override_path, strerror(errno));
+   goto err;
+   }
+
+   if (write(drv_override_fd, drv_type, sizeof(drv_type)) < 0) {
+   RTE_LOG(ERR, EAL,
+   "Error: bind failed - Cannot write "
+   "driver %s to device %s\n", drv_type, dev_name);
+   goto err;
+   }
+
+   close(drv_override_fd);
+
+   snprintf(drv_bind_path, sizeof(drv_bind_path),
+   "/sys/bus/pci/drivers/%s/bind", drv_type);
+
+   /* do the bind by writing device to the specific driver  */
+   drv_bind_fd = open(drv_bind_path, O_WRONLY | O_APPEND);
+   if (drv_bind_fd < 0) {
+   RTE_LOG(ERR, EAL, "Cannot open %s: %s\n",
+   drv_bind_path, strerror(errno));
+   goto err;
+   }
+
+   if (write(drv_bind_fd, dev_name, sizeof(dev_name)) < 0)
+   goto err;
+
+   close(drv_bind_fd);
+   return 0;
+err:
+   close(drv_override_fd);
+   close(drv_bind_fd);
+   return -1;
+}
+
diff --git a/drivers/bus/pci/pci_common.c b/drivers/bus/pci/pci_common.c
index 3fbe9d7..54601a9 100644
--- a/drivers/bus/pci/pci_common.c
+++ b/drivers/bus/pci/pci_common.c
@@ -574,6 +574,7 @@ struct rte_pci_bus rte_pci_bus = {
.parse = pci_parse,
.get_iommu_class = rte_pci_get_iommu_class,
.remap_device = pci_remap_device,
+   .bind_driver = rte_pci_dev_bind_driver,
},
.device_list = TAILQ_HEAD_INITIALIZER(rte_pci_bus.device_list),
.driver_list = TAILQ_HEAD_INITIALIZER(rte_pci_bus.driver_list),
diff --git a/drivers/bus/pci/rte_bus_pci.h b/drivers/bus/pci/rte_bus_pci.h
index 65337eb..1662f3b 100644
--- a/drivers/bus/pci/rte_bus_pci.h
+++ b/drivers/bus/pci/rte_bus_pci.h
@@ -344,6 +344,20 @@ void rte_pci_ioport_read(struct rte_pci_ioport *p,
 void rte_pci_ioport_write(struct rte_pci_ioport *p,
const void *data, size_t len, off_t offset);
 
+/**
+ * It can be used to bind a device to a specific type of driver.
+ *
+ * @param dev_name
+ *  The device name.
+ * @param drv_type
+ *  The specific driver's type.
+ *
+ * @return
+ *  - On success, zero.
+ *  - On failure, a negative value.
+ */
+int rte_pci_dev_bind_driver(const char *dev_name, const char *drv_type);
+
 #ifdef __cplusplus
 }
 #endif
diff --git a/drivers/bus/vdev/vdev.c b/drivers/bus/vdev/vdev.c
index c9cd369..773f6e0 100644
--- a/drivers/bus/vdev/vdev.c
+++ b/drivers/bus/vdev/vdev.c
@@ -349,6 +349,14 @@ vdev_remap_device(struct rte_device *dev)
 }
 
 static int
+vdev_bind_driver(const char *dev_name, const char *drv_type)
+{
+   RTE_SET_USED(dev_name);
+   RTE_SET_USED(drv_type);
+   return 0;
+}
+
+static i

[dpdk-dev] [PATCH v3 1/7] net/mlx5: change pkt burst select function prototype

2018-01-10 Thread Shahaf Shuler
Change the function prototype to return the function pointer of the
selected Tx/Rx burst function instead of assigning it directly to the
device context.

Such change will enable to use those select functions to query the burst
function that will be selected according to the device configuration.

Signed-off-by: Shahaf Shuler 
Acked-by: Nelio Laranjeiro 
---
 drivers/net/mlx5/mlx5.c | 11 --
 drivers/net/mlx5/mlx5.h |  4 ++--
 drivers/net/mlx5/mlx5_ethdev.c  | 41 +---
 drivers/net/mlx5/mlx5_trigger.c |  4 ++--
 4 files changed, 37 insertions(+), 23 deletions(-)

diff --git a/drivers/net/mlx5/mlx5.c b/drivers/net/mlx5/mlx5.c
index cd66fe162..0192815f2 100644
--- a/drivers/net/mlx5/mlx5.c
+++ b/drivers/net/mlx5/mlx5.c
@@ -712,8 +712,15 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv, struct 
rte_pci_device *pci_dev)
err = -err;
goto error;
}
-   priv_dev_select_rx_function(priv, eth_dev);
-   priv_dev_select_tx_function(priv, eth_dev);
+   /*
+* Ethdev pointer is still required as input since
+* the primary device is not accessible from the
+* secondary process.
+*/
+   eth_dev->rx_pkt_burst =
+   priv_select_rx_function(priv, eth_dev);
+   eth_dev->tx_pkt_burst =
+   priv_select_tx_function(priv, eth_dev);
continue;
}
 
diff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h
index e6a69b823..3e3259b55 100644
--- a/drivers/net/mlx5/mlx5.h
+++ b/drivers/net/mlx5/mlx5.h
@@ -206,8 +206,8 @@ void priv_dev_interrupt_handler_uninstall(struct priv *, 
struct rte_eth_dev *);
 void priv_dev_interrupt_handler_install(struct priv *, struct rte_eth_dev *);
 int mlx5_set_link_down(struct rte_eth_dev *dev);
 int mlx5_set_link_up(struct rte_eth_dev *dev);
-void priv_dev_select_tx_function(struct priv *priv, struct rte_eth_dev *dev);
-void priv_dev_select_rx_function(struct priv *priv, struct rte_eth_dev *dev);
+eth_tx_burst_t priv_select_tx_function(struct priv *, struct rte_eth_dev *);
+eth_rx_burst_t priv_select_rx_function(struct priv *, struct rte_eth_dev *);
 
 /* mlx5_mac.c */
 
diff --git a/drivers/net/mlx5/mlx5_ethdev.c b/drivers/net/mlx5/mlx5_ethdev.c
index 282ef241e..28183534a 100644
--- a/drivers/net/mlx5/mlx5_ethdev.c
+++ b/drivers/net/mlx5/mlx5_ethdev.c
@@ -1325,8 +1325,8 @@ priv_dev_set_link(struct priv *priv, struct rte_eth_dev 
*dev, int up)
err = priv_set_flags(priv, ~IFF_UP, IFF_UP);
if (err)
return err;
-   priv_dev_select_tx_function(priv, dev);
-   priv_dev_select_rx_function(priv, dev);
+   dev->tx_pkt_burst = priv_select_tx_function(priv, dev);
+   dev->rx_pkt_burst = priv_select_rx_function(priv, dev);
} else {
err = priv_set_flags(priv, ~IFF_UP, ~IFF_UP);
if (err)
@@ -1386,32 +1386,36 @@ mlx5_set_link_up(struct rte_eth_dev *dev)
  *   Pointer to private data structure.
  * @param dev
  *   Pointer to rte_eth_dev structure.
+ *
+ * @return
+ *   Pointer to selected Tx burst function.
  */
-void
-priv_dev_select_tx_function(struct priv *priv, struct rte_eth_dev *dev)
+eth_tx_burst_t
+priv_select_tx_function(struct priv *priv, __rte_unused struct rte_eth_dev 
*dev)
 {
+   eth_tx_burst_t tx_pkt_burst = mlx5_tx_burst;
+
assert(priv != NULL);
-   assert(dev != NULL);
-   dev->tx_pkt_burst = mlx5_tx_burst;
/* Select appropriate TX function. */
if (priv->mps == MLX5_MPW_ENHANCED) {
if (priv_check_vec_tx_support(priv) > 0) {
if (priv_check_raw_vec_tx_support(priv) > 0)
-   dev->tx_pkt_burst = mlx5_tx_burst_raw_vec;
+   tx_pkt_burst = mlx5_tx_burst_raw_vec;
else
-   dev->tx_pkt_burst = mlx5_tx_burst_vec;
+   tx_pkt_burst = mlx5_tx_burst_vec;
DEBUG("selected Enhanced MPW TX vectorized function");
} else {
-   dev->tx_pkt_burst = mlx5_tx_burst_empw;
+   tx_pkt_burst = mlx5_tx_burst_empw;
DEBUG("selected Enhanced MPW TX function");
}
} else if (priv->mps && priv->txq_inline) {
-   dev->tx_pkt_burst = mlx5_tx_burst_mpw_inline;
+   tx_pkt_burst = mlx5_tx_burst_mpw_inline;
DEBUG("selected MPW inline TX function");
} else if (priv->mps) {
-   dev->tx_pkt_burst = mlx5_tx_burst_mpw;
+   tx_pkt_burst = mlx5_tx_burst_mpw;
D

[dpdk-dev] [PATCH v3 3/7] net/mlx5: rename counter set in configuration

2018-01-10 Thread Shahaf Shuler
From: Nelio Laranjeiro 

Counter_set is a counter used for flows when its support is available.
Renaming it to flow counter.

Signed-off-by: Nelio Laranjeiro 
---
 drivers/net/mlx5/mlx5.c  | 3 +--
 drivers/net/mlx5/mlx5.h  | 2 +-
 drivers/net/mlx5/mlx5_flow.c | 2 +-
 3 files changed, 3 insertions(+), 4 deletions(-)

diff --git a/drivers/net/mlx5/mlx5.c b/drivers/net/mlx5/mlx5.c
index fdd4710f1..ca44a0a59 100644
--- a/drivers/net/mlx5/mlx5.c
+++ b/drivers/net/mlx5/mlx5.c
@@ -759,8 +759,7 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv, struct 
rte_pci_device *pci_dev)
  (config.hw_csum_l2tun ? "" : "not "));
 
 #ifdef HAVE_IBV_DEVICE_COUNTERS_SET_SUPPORT
-   config.counter_set_supported =
-   !!(device_attr.max_counter_sets);
+   config.flow_counter_en = !!(device_attr.max_counter_sets);
ibv_describe_counter_set(ctx, 0, &cs_desc);
DEBUG("counter type = %d, num of cs = %ld, attributes = %d",
  cs_desc.counter_type, cs_desc.num_of_cs,
diff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h
index 04f0b2557..171b3a933 100644
--- a/drivers/net/mlx5/mlx5.h
+++ b/drivers/net/mlx5/mlx5.h
@@ -110,7 +110,7 @@ struct mlx5_dev_config {
unsigned int sriov:1; /* This is a VF or PF with VF devices. */
unsigned int mps:2; /* Multi-packet send supported mode. */
unsigned int tunnel_en:1; /* Whether tunnel is supported. */
-   unsigned int counter_set_supported:1; /* Counter set is supported. */
+   unsigned int flow_counter_en:1; /* Whether flow counter is supported. */
unsigned int cqe_comp:1; /* CQE compression is enabled. */
unsigned int tso:1; /* Whether TSO is enabled. */
unsigned int tx_vec_en:1; /* Tx vector is enabled. */
diff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c
index ec179bd30..305b2ec01 100644
--- a/drivers/net/mlx5/mlx5_flow.c
+++ b/drivers/net/mlx5/mlx5_flow.c
@@ -778,7 +778,7 @@ priv_flow_convert_actions(struct priv *priv,
} else if (actions->type == RTE_FLOW_ACTION_TYPE_FLAG) {
parser->mark = 1;
} else if (actions->type == RTE_FLOW_ACTION_TYPE_COUNT &&
-  priv->config.counter_set_supported) {
+  priv->config.flow_counter_en) {
parser->count = 1;
} else {
goto exit_action_not_supported;
-- 
2.12.0



[dpdk-dev] [PATCH v3 2/7] net/mlx5: add device configuration structure

2018-01-10 Thread Shahaf Shuler
Move device configuration and features capabilities to its own structure.
This structure is filled by mlx5_pci_probe(), outside of this function
it should be treated as *read only*.

This configuration struct will be used for the Tx/Rx queue setup to
select the Tx/Rx queue parameters based on the user configuration and
device capabilities.
In addition it will be used by the burst selection function to decide
on the best pkt burst to be used.

Signed-off-by: Shahaf Shuler 
Signed-off-by: Nelio Laranjeiro 
---
 drivers/net/mlx5/mlx5.c  | 178 +++--
 drivers/net/mlx5/mlx5.h  |  53 ++
 drivers/net/mlx5/mlx5_ethdev.c   |  26 ++---
 drivers/net/mlx5/mlx5_flow.c |   2 +-
 drivers/net/mlx5/mlx5_rxq.c  |  24 +++--
 drivers/net/mlx5/mlx5_rxtx_vec.c |  10 +-
 drivers/net/mlx5/mlx5_txq.c  | 182 ++
 drivers/net/mlx5/mlx5_vlan.c |   4 +-
 8 files changed, 248 insertions(+), 231 deletions(-)

diff --git a/drivers/net/mlx5/mlx5.c b/drivers/net/mlx5/mlx5.c
index 0192815f2..fdd4710f1 100644
--- a/drivers/net/mlx5/mlx5.c
+++ b/drivers/net/mlx5/mlx5.c
@@ -94,9 +94,6 @@
 /* Device parameter to enable hardware Rx vector. */
 #define MLX5_RX_VEC_EN "rx_vec_en"
 
-/* Default PMD specific parameter value. */
-#define MLX5_ARG_UNSET (-1)
-
 #ifndef HAVE_IBV_MLX5_MOD_MPW
 #define MLX5DV_CONTEXT_FLAGS_MPW_ALLOWED (1 << 2)
 #define MLX5DV_CONTEXT_FLAGS_ENHANCED_MPW (1 << 3)
@@ -106,17 +103,6 @@
 #define MLX5DV_CONTEXT_FLAGS_CQE_128B_COMP (1 << 4)
 #endif
 
-struct mlx5_args {
-   int cqe_comp;
-   int txq_inline;
-   int txqs_inline;
-   int mps;
-   int mpw_hdr_dseg;
-   int inline_max_packet_sz;
-   int tso;
-   int tx_vec_en;
-   int rx_vec_en;
-};
 /**
  * Retrieve integer value from environment variable.
  *
@@ -399,7 +385,7 @@ mlx5_dev_idx(struct rte_pci_addr *pci_addr)
 static int
 mlx5_args_check(const char *key, const char *val, void *opaque)
 {
-   struct mlx5_args *args = opaque;
+   struct mlx5_dev_config *config = opaque;
unsigned long tmp;
 
errno = 0;
@@ -409,23 +395,23 @@ mlx5_args_check(const char *key, const char *val, void 
*opaque)
return errno;
}
if (strcmp(MLX5_RXQ_CQE_COMP_EN, key) == 0) {
-   args->cqe_comp = !!tmp;
+   config->cqe_comp = !!tmp;
} else if (strcmp(MLX5_TXQ_INLINE, key) == 0) {
-   args->txq_inline = tmp;
+   config->txq_inline = tmp;
} else if (strcmp(MLX5_TXQS_MIN_INLINE, key) == 0) {
-   args->txqs_inline = tmp;
+   config->txqs_inline = tmp;
} else if (strcmp(MLX5_TXQ_MPW_EN, key) == 0) {
-   args->mps = !!tmp;
+   config->mps = !!tmp ? config->mps : 0;
} else if (strcmp(MLX5_TXQ_MPW_HDR_DSEG_EN, key) == 0) {
-   args->mpw_hdr_dseg = !!tmp;
+   config->mpw_hdr_dseg = !!tmp;
} else if (strcmp(MLX5_TXQ_MAX_INLINE_LEN, key) == 0) {
-   args->inline_max_packet_sz = tmp;
+   config->inline_max_packet_sz = tmp;
} else if (strcmp(MLX5_TSO, key) == 0) {
-   args->tso = !!tmp;
+   config->tso = !!tmp;
} else if (strcmp(MLX5_TX_VEC_EN, key) == 0) {
-   args->tx_vec_en = !!tmp;
+   config->tx_vec_en = !!tmp;
} else if (strcmp(MLX5_RX_VEC_EN, key) == 0) {
-   args->rx_vec_en = !!tmp;
+   config->rx_vec_en = !!tmp;
} else {
WARN("%s: unknown parameter", key);
return -EINVAL;
@@ -436,8 +422,8 @@ mlx5_args_check(const char *key, const char *val, void 
*opaque)
 /**
  * Parse device parameters.
  *
- * @param priv
- *   Pointer to private structure.
+ * @param config
+ *   Pointer to device configuration structure.
  * @param devargs
  *   Device arguments structure.
  *
@@ -445,7 +431,7 @@ mlx5_args_check(const char *key, const char *val, void 
*opaque)
  *   0 on success, errno value on failure.
  */
 static int
-mlx5_args(struct mlx5_args *args, struct rte_devargs *devargs)
+mlx5_args(struct mlx5_dev_config *config, struct rte_devargs *devargs)
 {
const char **params = (const char *[]){
MLX5_RXQ_CQE_COMP_EN,
@@ -473,7 +459,7 @@ mlx5_args(struct mlx5_args *args, struct rte_devargs 
*devargs)
for (i = 0; (params[i] != NULL); ++i) {
if (rte_kvargs_count(kvlist, params[i])) {
ret = rte_kvargs_process(kvlist, params[i],
-mlx5_args_check, args);
+mlx5_args_check, config);
if (ret != 0) {
rte_kvargs_free(kvlist);
return ret;
@@ -487,38 +473,6 @@ mlx5_args(struct mlx5_args *args, struct rte_devargs 
*devargs)
 static struct rte_pci_driver mlx5_driver;
 
 /**
- 

[dpdk-dev] [PATCH v3 0/7] convert mlx PMDs to new ethdev offloads API

2018-01-10 Thread Shahaf Shuler
This series is to convert mlx4 and mlx5 PMDs to the new offloads API [1].

On v3:
 - address almost all of Adrien's comments on mlx4.

On v2:
 - New design to hold PMD specific args and combine
   them with offloads requested.
 - Fix missing IPV4 checksum flag on vector function selection.
 - Verify Txq flags ignore bit before checking for valid offloads
   configuration.
 - Removed strict offloads check from mlx4. 

[1] http://dpdk.org/ml/archives/dev/2017-October/077329.html

Nelio Laranjeiro (1):
  net/mlx5: rename counter set in configuration

Shahaf Shuler (6):
  net/mlx5: change pkt burst select function prototype
  net/mlx5: add device configuration structure
  net/mlx5: convert to new Tx offloads API
  net/mlx5: convert to new Rx offloads API
  net/mlx4: convert to new Tx offloads API
  net/mlx4: convert to new Rx offloads API

 doc/guides/nics/mlx5.rst |  15 +-
 drivers/net/mlx4/mlx4_ethdev.c   |  16 +--
 drivers/net/mlx4/mlx4_flow.c |   5 +-
 drivers/net/mlx4/mlx4_rxq.c  |  77 ++-
 drivers/net/mlx4/mlx4_rxtx.h |   3 +
 drivers/net/mlx4/mlx4_txq.c  |  69 -
 drivers/net/mlx5/mlx5.c  | 190 +
 drivers/net/mlx5/mlx5.h  |  57 +---
 drivers/net/mlx5/mlx5_ethdev.c   | 113 ---
 drivers/net/mlx5/mlx5_flow.c |   2 +-
 drivers/net/mlx5/mlx5_rxq.c  | 124 ++---
 drivers/net/mlx5/mlx5_rxtx.c |   6 +-
 drivers/net/mlx5/mlx5_rxtx.h |  10 +-
 drivers/net/mlx5/mlx5_rxtx_vec.c |  40 +++---
 drivers/net/mlx5/mlx5_rxtx_vec.h |  12 ++
 drivers/net/mlx5/mlx5_trigger.c  |   4 +-
 drivers/net/mlx5/mlx5_txq.c  | 254 +-
 drivers/net/mlx5/mlx5_vlan.c |   7 +-
 18 files changed, 661 insertions(+), 343 deletions(-)

-- 
2.12.0



[dpdk-dev] [PATCH v3 4/7] net/mlx5: convert to new Tx offloads API

2018-01-10 Thread Shahaf Shuler
Ethdev Tx offloads API has changed since:

commit cba7f53b717d ("ethdev: introduce Tx queue offloads API")

This commit support the new Tx offloads API.

Signed-off-by: Shahaf Shuler 
Acked-by: Nelio Laranjeiro 
---
 doc/guides/nics/mlx5.rst | 15 +++
 drivers/net/mlx5/mlx5.c  | 18 ++--
 drivers/net/mlx5/mlx5.h  |  2 +-
 drivers/net/mlx5/mlx5_ethdev.c   | 37 
 drivers/net/mlx5/mlx5_rxtx.c |  6 ++-
 drivers/net/mlx5/mlx5_rxtx.h |  7 +--
 drivers/net/mlx5/mlx5_rxtx_vec.c | 32 +++---
 drivers/net/mlx5/mlx5_rxtx_vec.h | 12 ++
 drivers/net/mlx5/mlx5_txq.c  | 80 ---
 9 files changed, 142 insertions(+), 67 deletions(-)

diff --git a/doc/guides/nics/mlx5.rst b/doc/guides/nics/mlx5.rst
index 154db64d7..bdc2216c0 100644
--- a/doc/guides/nics/mlx5.rst
+++ b/doc/guides/nics/mlx5.rst
@@ -262,8 +262,9 @@ Run-time configuration
   Enhanced MPS supports hybrid mode - mixing inlined packets and pointers
   in the same descriptor.
 
-  This option cannot be used in conjunction with ``tso`` below. When ``tso``
-  is set, ``txq_mpw_en`` is disabled.
+  This option cannot be used with certain offloads such as 
``DEV_TX_OFFLOAD_TCP_TSO,
+  DEV_TX_OFFLOAD_VXLAN_TNL_TSO, DEV_TX_OFFLOAD_GRE_TNL_TSO, 
DEV_TX_OFFLOAD_VLAN_INSERT``.
+  When those offloads are requested the MPS send function will not be used.
 
   It is currently only supported on the ConnectX-4 Lx and ConnectX-5
   families of adapters. Enabled by default.
@@ -284,17 +285,15 @@ Run-time configuration
 
   Effective only when Enhanced MPS is supported. The default value is 256.
 
-- ``tso`` parameter [int]
-
-  A nonzero value enables hardware TSO.
-  When hardware TSO is enabled, packets marked with TCP segmentation
-  offload will be divided into segments by the hardware. Disabled by default.
-
 - ``tx_vec_en`` parameter [int]
 
   A nonzero value enables Tx vector on ConnectX-5 only NIC if the number of
   global Tx queues on the port is lesser than MLX5_VPMD_MIN_TXQS.
 
+  This option cannot be used with certain offloads such as 
``DEV_TX_OFFLOAD_TCP_TSO,
+  DEV_TX_OFFLOAD_VXLAN_TNL_TSO, DEV_TX_OFFLOAD_GRE_TNL_TSO, 
DEV_TX_OFFLOAD_VLAN_INSERT``.
+  When those offloads are requested the MPS send function will not be used.
+
   Enabled by default on ConnectX-5.
 
 - ``rx_vec_en`` parameter [int]
diff --git a/drivers/net/mlx5/mlx5.c b/drivers/net/mlx5/mlx5.c
index ca44a0a59..1c95f3520 100644
--- a/drivers/net/mlx5/mlx5.c
+++ b/drivers/net/mlx5/mlx5.c
@@ -85,9 +85,6 @@
 /* Device parameter to limit the size of inlining packet. */
 #define MLX5_TXQ_MAX_INLINE_LEN "txq_max_inline_len"
 
-/* Device parameter to enable hardware TSO offload. */
-#define MLX5_TSO "tso"
-
 /* Device parameter to enable hardware Tx vector. */
 #define MLX5_TX_VEC_EN "tx_vec_en"
 
@@ -406,8 +403,6 @@ mlx5_args_check(const char *key, const char *val, void 
*opaque)
config->mpw_hdr_dseg = !!tmp;
} else if (strcmp(MLX5_TXQ_MAX_INLINE_LEN, key) == 0) {
config->inline_max_packet_sz = tmp;
-   } else if (strcmp(MLX5_TSO, key) == 0) {
-   config->tso = !!tmp;
} else if (strcmp(MLX5_TX_VEC_EN, key) == 0) {
config->tx_vec_en = !!tmp;
} else if (strcmp(MLX5_RX_VEC_EN, key) == 0) {
@@ -440,7 +435,6 @@ mlx5_args(struct mlx5_dev_config *config, struct 
rte_devargs *devargs)
MLX5_TXQ_MPW_EN,
MLX5_TXQ_MPW_HDR_DSEG_EN,
MLX5_TXQ_MAX_INLINE_LEN,
-   MLX5_TSO,
MLX5_TX_VEC_EN,
MLX5_RX_VEC_EN,
NULL,
@@ -629,7 +623,6 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv, struct 
rte_pci_device *pci_dev)
.cqe_comp = cqe_comp,
.mps = mps,
.tunnel_en = tunnel_en,
-   .tso = 0,
.tx_vec_en = 1,
.rx_vec_en = 1,
.mpw_hdr_dseg = 0,
@@ -793,10 +786,9 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv, struct 
rte_pci_device *pci_dev)
 
priv_get_num_vfs(priv, &num_vfs);
config.sriov = (num_vfs || sriov);
-   if (config.tso)
-   config.tso = ((device_attr_ex.tso_caps.max_tso > 0) &&
- (device_attr_ex.tso_caps.supported_qpts &
- (1 << IBV_QPT_RAW_PACKET)));
+   config.tso = ((device_attr_ex.tso_caps.max_tso > 0) &&
+ (device_attr_ex.tso_caps.supported_qpts &
+ (1 << IBV_QPT_RAW_PACKET)));
if (config.tso)
config.tso_max_payload_sz =
device_attr_ex.tso_caps.max_tso;
@@ -805,10 +797,6 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv, struct 
rte_pci_device *pci_dev)
  " (" MLX

[dpdk-dev] [PATCH v3 7/7] net/mlx4: convert to new Rx offloads API

2018-01-10 Thread Shahaf Shuler
Ethdev Rx offloads API has changed since:

commit ce17eddefc20 ("ethdev: introduce Rx queue offloads API")

This commit support the new Rx offloads API.

Signed-off-by: Shahaf Shuler 
---
 drivers/net/mlx4/mlx4_ethdev.c |  9 ++---
 drivers/net/mlx4/mlx4_flow.c   |  5 ++-
 drivers/net/mlx4/mlx4_rxq.c| 77 ++---
 drivers/net/mlx4/mlx4_rxtx.h   |  2 +
 4 files changed, 80 insertions(+), 13 deletions(-)

diff --git a/drivers/net/mlx4/mlx4_ethdev.c b/drivers/net/mlx4/mlx4_ethdev.c
index 3602f0ad8..c80eab5a8 100644
--- a/drivers/net/mlx4/mlx4_ethdev.c
+++ b/drivers/net/mlx4/mlx4_ethdev.c
@@ -766,13 +766,10 @@ mlx4_dev_infos_get(struct rte_eth_dev *dev, struct 
rte_eth_dev_info *info)
info->max_rx_queues = max;
info->max_tx_queues = max;
info->max_mac_addrs = RTE_DIM(priv->mac);
-   info->rx_offload_capa = 0;
info->tx_offload_capa = mlx4_get_tx_port_offloads(priv);
-   if (priv->hw_csum) {
-   info->rx_offload_capa |= (DEV_RX_OFFLOAD_IPV4_CKSUM |
- DEV_RX_OFFLOAD_UDP_CKSUM |
- DEV_RX_OFFLOAD_TCP_CKSUM);
-   }
+   info->rx_queue_offload_capa = mlx4_get_rx_queue_offloads(priv);
+   info->rx_offload_capa = (mlx4_get_rx_port_offloads(priv) |
+info->rx_queue_offload_capa);
if (mlx4_get_ifname(priv, &ifname) == 0)
info->if_index = if_nametoindex(ifname);
info->hash_key_size = MLX4_RSS_HASH_KEY_SIZE;
diff --git a/drivers/net/mlx4/mlx4_flow.c b/drivers/net/mlx4/mlx4_flow.c
index 69025da42..96a6a6fa7 100644
--- a/drivers/net/mlx4/mlx4_flow.c
+++ b/drivers/net/mlx4/mlx4_flow.c
@@ -1232,7 +1232,7 @@ mlx4_flow_internal_next_vlan(struct priv *priv, uint16_t 
vlan)
  * - MAC flow rules are generated from @p dev->data->mac_addrs
  *   (@p priv->mac array).
  * - An additional flow rule for Ethernet broadcasts is also generated.
- * - All these are per-VLAN if @p dev->data->dev_conf.rxmode.hw_vlan_filter
+ * - All these are per-VLAN if @p DEV_RX_OFFLOAD_VLAN_FILTER
  *   is enabled and VLAN filters are configured.
  *
  * @param priv
@@ -1300,7 +1300,8 @@ mlx4_flow_internal(struct priv *priv, struct 
rte_flow_error *error)
};
struct ether_addr *rule_mac = ð_spec.dst;
rte_be16_t *rule_vlan =
-   priv->dev->data->dev_conf.rxmode.hw_vlan_filter &&
+   (priv->dev->data->dev_conf.rxmode.offloads &
+DEV_RX_OFFLOAD_VLAN_FILTER) &&
!priv->dev->data->promiscuous ?
&vlan_spec.tci :
NULL;
diff --git a/drivers/net/mlx4/mlx4_rxq.c b/drivers/net/mlx4/mlx4_rxq.c
index 53313c56f..98ab1d266 100644
--- a/drivers/net/mlx4/mlx4_rxq.c
+++ b/drivers/net/mlx4/mlx4_rxq.c
@@ -663,6 +663,63 @@ mlx4_rxq_detach(struct rxq *rxq)
 }
 
 /**
+ * Returns the per-queue supported offloads.
+ *
+ * @param priv
+ *   Pointer to private structure.
+ *
+ * @return
+ *   Supported Tx offloads.
+ */
+uint64_t
+mlx4_get_rx_queue_offloads(struct priv *priv)
+{
+   uint64_t offloads = DEV_RX_OFFLOAD_SCATTER;
+
+   if (priv->hw_csum)
+   offloads |= DEV_RX_OFFLOAD_CHECKSUM;
+   return offloads;
+}
+
+/**
+ * Returns the per-port supported offloads.
+ *
+ * @param priv
+ *   Pointer to private structure.
+ *
+ * @return
+ *   Supported Rx offloads.
+ */
+uint64_t
+mlx4_get_rx_port_offloads(struct priv *priv)
+{
+   uint64_t offloads = DEV_RX_OFFLOAD_VLAN_FILTER;
+
+   (void)priv;
+   return offloads;
+}
+
+/**
+ * Checks if the per-queue offload configuration is valid.
+ *
+ * @param priv
+ *   Pointer to private structure.
+ * @param requested
+ *   Per-queue offloads configuration.
+ *
+ * @return
+ *   Nonzero when configuration is valid.
+ */
+static int
+mlx4_check_rx_queue_offloads(struct priv *priv, uint64_t requested)
+{
+   uint64_t mandatory = priv->dev->data->dev_conf.rxmode.offloads;
+   uint64_t supported = mlx4_get_rx_port_offloads(priv);
+
+   return !((mandatory ^ requested) & supported);
+}
+
+/**
  * DPDK callback to configure a Rx queue.
  *
  * @param dev
@@ -707,6 +764,16 @@ mlx4_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, 
uint16_t desc,
(void)conf; /* Thresholds configuration (ignored). */
DEBUG("%p: configuring queue %u for %u descriptors",
  (void *)dev, idx, desc);
+   if (!mlx4_check_rx_queue_offloads(priv, conf->offloads)) {
+   rte_errno = ENOTSUP;
+   ERROR("%p: Rx queue offloads 0x%" PRIx64 " don't match port "
+ "offloads 0x%" PRIx64 " or supported offloads 0x%" PRIx64,
+ (void *)dev, conf->offloads,
+ dev->data->dev_conf.rxmode.offloads,
+ (mlx4_get_rx_port_offloads(priv) |
+  mlx4_get_rx_queue_offloads(priv)));
+   return -rte_errno;
+   }
if (idx >= dev->dat

[dpdk-dev] [PATCH v3 6/7] net/mlx4: convert to new Tx offloads API

2018-01-10 Thread Shahaf Shuler
Ethdev Tx offloads API has changed since:

commit cba7f53b717d ("ethdev: introduce Tx queue offloads API")

This commit support the new Tx offloads API.

Signed-off-by: Shahaf Shuler 
---
 drivers/net/mlx4/mlx4_ethdev.c |  7 +---
 drivers/net/mlx4/mlx4_rxtx.h   |  1 +
 drivers/net/mlx4/mlx4_txq.c| 69 +++--
 3 files changed, 68 insertions(+), 9 deletions(-)

diff --git a/drivers/net/mlx4/mlx4_ethdev.c b/drivers/net/mlx4/mlx4_ethdev.c
index 2f69e7d4f..3602f0ad8 100644
--- a/drivers/net/mlx4/mlx4_ethdev.c
+++ b/drivers/net/mlx4/mlx4_ethdev.c
@@ -767,17 +767,12 @@ mlx4_dev_infos_get(struct rte_eth_dev *dev, struct 
rte_eth_dev_info *info)
info->max_tx_queues = max;
info->max_mac_addrs = RTE_DIM(priv->mac);
info->rx_offload_capa = 0;
-   info->tx_offload_capa = 0;
+   info->tx_offload_capa = mlx4_get_tx_port_offloads(priv);
if (priv->hw_csum) {
-   info->tx_offload_capa |= (DEV_TX_OFFLOAD_IPV4_CKSUM |
- DEV_TX_OFFLOAD_UDP_CKSUM |
- DEV_TX_OFFLOAD_TCP_CKSUM);
info->rx_offload_capa |= (DEV_RX_OFFLOAD_IPV4_CKSUM |
  DEV_RX_OFFLOAD_UDP_CKSUM |
  DEV_RX_OFFLOAD_TCP_CKSUM);
}
-   if (priv->hw_csum_l2tun)
-   info->tx_offload_capa |= DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM;
if (mlx4_get_ifname(priv, &ifname) == 0)
info->if_index = if_nametoindex(ifname);
info->hash_key_size = MLX4_RSS_HASH_KEY_SIZE;
diff --git a/drivers/net/mlx4/mlx4_rxtx.h b/drivers/net/mlx4/mlx4_rxtx.h
index b93e2bcda..bff5ae43d 100644
--- a/drivers/net/mlx4/mlx4_rxtx.h
+++ b/drivers/net/mlx4/mlx4_rxtx.h
@@ -180,6 +180,7 @@ uint16_t mlx4_rx_burst_removed(void *dpdk_rxq, struct 
rte_mbuf **pkts,
 
 /* mlx4_txq.c */
 
+uint64_t mlx4_get_tx_port_offloads(struct priv *priv);
 int mlx4_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx,
uint16_t desc, unsigned int socket,
const struct rte_eth_txconf *conf);
diff --git a/drivers/net/mlx4/mlx4_txq.c b/drivers/net/mlx4/mlx4_txq.c
index d651e4980..7664c3e1a 100644
--- a/drivers/net/mlx4/mlx4_txq.c
+++ b/drivers/net/mlx4/mlx4_txq.c
@@ -41,6 +41,7 @@
 #include 
 #include 
 #include 
+#include 
 
 /* Verbs headers do not support -pedantic. */
 #ifdef PEDANTIC
@@ -182,6 +183,50 @@ mlx4_txq_fill_dv_obj_info(struct txq *txq, struct 
mlx4dv_obj *mlxdv)
 }
 
 /**
+ * Returns the per-port supported offloads.
+ *
+ * @param priv
+ *   Pointer to private structure.
+ *
+ * @return
+ *   Supported Tx offloads.
+ */
+uint64_t
+mlx4_get_tx_port_offloads(struct priv *priv)
+{
+   uint64_t offloads = DEV_TX_OFFLOAD_MULTI_SEGS;
+
+   if (priv->hw_csum) {
+   offloads |= (DEV_TX_OFFLOAD_IPV4_CKSUM |
+DEV_TX_OFFLOAD_UDP_CKSUM |
+DEV_TX_OFFLOAD_TCP_CKSUM);
+   }
+   if (priv->hw_csum_l2tun)
+   offloads |= DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM;
+   return offloads;
+}
+
+/**
+ * Checks if the per-queue offload configuration is valid.
+ *
+ * @param priv
+ *   Pointer to private structure.
+ * @param requested
+ *   Per-queue offloads configuration.
+ *
+ * @return
+ *   Nonzero when configuration is valid.
+ */
+static int
+mlx4_check_tx_queue_offloads(struct priv *priv, uint64_t requested)
+{
+   uint64_t mandatory = priv->dev->data->dev_conf.txmode.offloads;
+   uint64_t supported = mlx4_get_tx_port_offloads(priv);
+
+   return !((mandatory ^ requested) & supported);
+}
+
+/**
  * DPDK callback to configure a Tx queue.
  *
  * @param dev
@@ -229,9 +274,22 @@ mlx4_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, 
uint16_t desc,
};
int ret;
 
-   (void)conf; /* Thresholds configuration (ignored). */
DEBUG("%p: configuring queue %u for %u descriptors",
  (void *)dev, idx, desc);
+   /*
+* Don't verify port offloads for application which
+* use the old API.
+*/
+   if ((conf->txq_flags & ETH_TXQ_FLAGS_IGNORE) &&
+   !mlx4_check_tx_queue_offloads(priv, conf->offloads)) {
+   rte_errno = ENOTSUP;
+   ERROR("%p: Tx queue offloads 0x%" PRIx64 " don't match port "
+ "offloads 0x%" PRIx64 " or supported offloads 0x%" PRIx64,
+ (void *)dev, conf->offloads,
+ dev->data->dev_conf.txmode.offloads,
+ mlx4_get_tx_port_offloads(priv));
+   return -rte_errno;
+   }
if (idx >= dev->data->nb_tx_queues) {
rte_errno = EOVERFLOW;
ERROR("%p: queue index out of range (%u >= %u)",
@@ -281,8 +339,13 @@ mlx4_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, 
uint16_t desc,
RTE_MIN(MLX4_PMD_TX_PER_COMP_REQ, desc / 4),
 

[dpdk-dev] [PATCH v3 5/7] net/mlx5: convert to new Rx offloads API

2018-01-10 Thread Shahaf Shuler
Ethdev Rx offloads API has changed since:

commit ce17eddefc20 ("ethdev: introduce Rx queue offloads API")

This commit support the new Rx offloads API.

Signed-off-by: Shahaf Shuler 
Acked-by: Nelio Laranjeiro 
---
 drivers/net/mlx5/mlx5_ethdev.c |  23 +---
 drivers/net/mlx5/mlx5_rxq.c| 106 +++-
 drivers/net/mlx5/mlx5_rxtx.h   |   3 +
 drivers/net/mlx5/mlx5_vlan.c   |   3 +-
 4 files changed, 111 insertions(+), 24 deletions(-)

diff --git a/drivers/net/mlx5/mlx5_ethdev.c b/drivers/net/mlx5/mlx5_ethdev.c
index 7b1b7aa0e..278a4dfc3 100644
--- a/drivers/net/mlx5/mlx5_ethdev.c
+++ b/drivers/net/mlx5/mlx5_ethdev.c
@@ -553,6 +553,10 @@ dev_configure(struct rte_eth_dev *dev)
!!dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key;
uint64_t supp_tx_offloads = mlx5_priv_get_tx_port_offloads(priv);
uint64_t tx_offloads = dev->data->dev_conf.txmode.offloads;
+   uint64_t supp_rx_offloads =
+   (mlx5_priv_get_rx_port_offloads(priv) |
+mlx5_priv_get_rx_queue_offloads(priv));
+   uint64_t rx_offloads = dev->data->dev_conf.rxmode.offloads;
 
if ((tx_offloads & supp_tx_offloads) != tx_offloads) {
ERROR("Some Tx offloads are not supported "
@@ -560,6 +564,12 @@ dev_configure(struct rte_eth_dev *dev)
  tx_offloads, supp_tx_offloads);
return ENOTSUP;
}
+   if ((rx_offloads & supp_rx_offloads) != rx_offloads) {
+   ERROR("Some Rx offloads are not supported "
+ "requested 0x%" PRIx64 " supported 0x%" PRIx64,
+ rx_offloads, supp_rx_offloads);
+   return ENOTSUP;
+   }
if (use_app_rss_key &&
(dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key_len !=
 rss_hash_default_key_len)) {
@@ -671,15 +681,10 @@ mlx5_dev_infos_get(struct rte_eth_dev *dev, struct 
rte_eth_dev_info *info)
info->max_rx_queues = max;
info->max_tx_queues = max;
info->max_mac_addrs = RTE_DIM(priv->mac);
-   info->rx_offload_capa =
-   (config->hw_csum ?
-(DEV_RX_OFFLOAD_IPV4_CKSUM |
- DEV_RX_OFFLOAD_UDP_CKSUM |
- DEV_RX_OFFLOAD_TCP_CKSUM) :
-0) |
-   (priv->config.hw_vlan_strip ? DEV_RX_OFFLOAD_VLAN_STRIP : 0) |
-   DEV_RX_OFFLOAD_TIMESTAMP;
-
+   info->rx_queue_offload_capa =
+   mlx5_priv_get_rx_queue_offloads(priv);
+   info->rx_offload_capa = (mlx5_priv_get_rx_port_offloads(priv) |
+info->rx_queue_offload_capa);
info->tx_offload_capa = mlx5_priv_get_tx_port_offloads(priv);
if (priv_get_ifname(priv, &ifname) == 0)
info->if_index = if_nametoindex(ifname);
diff --git a/drivers/net/mlx5/mlx5_rxq.c b/drivers/net/mlx5/mlx5_rxq.c
index 057156d84..950472754 100644
--- a/drivers/net/mlx5/mlx5_rxq.c
+++ b/drivers/net/mlx5/mlx5_rxq.c
@@ -213,6 +213,78 @@ mlx5_rxq_cleanup(struct mlx5_rxq_ctrl *rxq_ctrl)
 }
 
 /**
+ * Returns the per-queue supported offloads.
+ *
+ * @param priv
+ *   Pointer to private structure.
+ *
+ * @return
+ *   Supported Rx offloads.
+ */
+uint64_t
+mlx5_priv_get_rx_queue_offloads(struct priv *priv)
+{
+   struct mlx5_dev_config *config = &priv->config;
+   uint64_t offloads = (DEV_RX_OFFLOAD_SCATTER |
+DEV_RX_OFFLOAD_TIMESTAMP |
+DEV_RX_OFFLOAD_JUMBO_FRAME);
+
+   if (config->hw_fcs_strip)
+   offloads |= DEV_RX_OFFLOAD_CRC_STRIP;
+   if (config->hw_csum)
+   offloads |= (DEV_RX_OFFLOAD_IPV4_CKSUM |
+DEV_RX_OFFLOAD_UDP_CKSUM |
+DEV_RX_OFFLOAD_TCP_CKSUM);
+   if (config->hw_vlan_strip)
+   offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
+   return offloads;
+}
+
+
+/**
+ * Returns the per-port supported offloads.
+ *
+ * @param priv
+ *   Pointer to private structure.
+ * @return
+ *   Supported Rx offloads.
+ */
+uint64_t
+mlx5_priv_get_rx_port_offloads(struct priv *priv __rte_unused)
+{
+   uint64_t offloads = DEV_RX_OFFLOAD_VLAN_FILTER;
+
+   return offloads;
+}
+
+/**
+ * Checks if the per-queue offload configuration is valid.
+ *
+ * @param priv
+ *   Pointer to private structure.
+ * @param offloads
+ *   Per-queue offloads configuration.
+ *
+ * @return
+ *   1 if the configuration is valid, 0 otherwise.
+ */
+static int
+priv_is_rx_queue_offloads_allowed(struct priv *priv, uint64_t offloads)
+{
+   uint64_t port_offloads = priv->dev->data->dev_conf.rxmode.offloads;
+   uint64_t queue_supp_offloads =
+   mlx5_priv_get_rx_queue_offloads(priv);
+   uint64_t port_supp_offloads = mlx5_priv_get_rx_port_offloads(priv);
+
+   if ((offloads & (queue_supp_offloads | port_supp_offloads)) !=
+   offloads)
+   return 0;
+   if (((port_offloads ^ offloads) &

[dpdk-dev] [PATCH] doc: minor updates to the enic guide

2018-01-10 Thread John Daley
From: Hyong Youb Kim 

Fix typos, inconsistencies, duplicate text, and so on.

Signed-off-by: Hyong Youb Kim 
Reviewed-by: John Daley 
---
 doc/guides/nics/enic.rst | 52 +---
 1 file changed, 27 insertions(+), 25 deletions(-)

diff --git a/doc/guides/nics/enic.rst b/doc/guides/nics/enic.rst
index cb5ae1250..22df466b4 100644
--- a/doc/guides/nics/enic.rst
+++ b/doc/guides/nics/enic.rst
@@ -55,6 +55,9 @@ Configuration information
   - **CONFIG_RTE_LIBRTE_ENIC_DEBUG** (default n): Enables or disables debug
 logging within the ENIC PMD driver.
 
+  - **CONFIG_RTE_LIBRTE_ENIC_DEBUG_FLOW** (default n): Enables or disables flow
+API related debug logging within the ENIC PMD driver.
+
 - **vNIC Configuration Parameters**
 
   - **Number of Queues**
@@ -66,7 +69,7 @@ Configuration information
 These values should be configured as follows:
 
 - The number of WQs should be greater or equal to the value of the
-  expected nb_tx_q parameter in the call to the
+  expected nb_tx_q parameter in the call to
   rte_eth_dev_configure()
 
 - The number of RQs configured in the vNIC should be greater or
@@ -88,7 +91,7 @@ Configuration information
   - **Size of Queues**
 
 Likewise, the number of receive and transmit descriptors are configurable 
on
-a per vNIC bases via the UCS Manager and should be greater than or equal to
+a per-vNIC basis via the UCS Manager and should be greater than or equal to
 the nb_rx_desc and   nb_tx_desc parameters expected to be used in the calls
 to rte_eth_rx_queue_setup() and rte_eth_tx_queue_setup() respectively.
 An application requesting more than the set size will be limited to that
@@ -101,7 +104,7 @@ Configuration information
 
 - *Note*: Since the introduction of Rx scatter, for performance
   reasons, this PMD uses two RQs on the vNIC per receive queue in
-  DPDK.  One RQ holds descriptors for the start of a packet the
+  DPDK.  One RQ holds descriptors for the start of a packet, and the
   second RQ holds the descriptors for the rest of the fragments of
   a packet.  This means that the nb_rx_desc parameter to
   rte_eth_rx_queue_setup() can be a greater than 4096.  The exact
@@ -110,7 +113,7 @@ Configuration information
 
   For example: If the mbuf size is 2048, and the MTU is 9000, then
   receiving a full size packet will take 5 descriptors, 1 from the
-  start of packet queue, and 4 from the second queue.  Assuming
+  start-of-packet queue, and 4 from the second queue.  Assuming
   that the RQ size was set to the maximum of 4096, then the
   application can specify up to 1024 + 4096 as the nb_rx_desc
   parameter to rte_eth_rx_queue_setup().
@@ -156,7 +159,7 @@ host to route intra-host VM traffic.
 
 Please refer to `Creating a Dynamic vNIC Connection Policy
 
`_
-for information on configuring SR-IOV Adapter policies using UCS manager.
+for information on configuring SR-IOV adapter policies using UCS manager.
 
 Once the policies are in place and the host OS is rebooted, VFs should be
 visible on the host, E.g.:
@@ -222,15 +225,15 @@ Generic Flow API is supported. The baseline support is:
 
 - **1200 series VICs**
 
-  5-tuple exact Flow support for 1200 series adapters. This allows:
+  5-tuple exact flow support for 1200 series adapters. This allows:
 
   - Attributes: ingress
   - Items: ipv4, ipv6, udp, tcp (must exactly match src/dst IP
-addresses and ports and all must be specified).
+addresses and ports and all must be specified)
   - Actions: queue and void
   - Selectors: 'is'
 
-- **1300 series VICS with Advanced filters disabled**
+- **1300 series VICS with advanced filters disabled**
 
   With advanced filters disabled, an IPv4 or IPv6 item must be specified
   in the pattern.
@@ -239,15 +242,15 @@ Generic Flow API is supported. The baseline support is:
   - Items: eth, ipv4, ipv6, udp, tcp, vxlan, inner eth, ipv4, ipv6, udp, tcp
   - Actions: queue and void
   - Selectors: 'is', 'spec' and 'mask'. 'last' is not supported
-  - In total, up to 64 bytes of mask is allowed across all haeders
+  - In total, up to 64 bytes of mask is allowed across all headers
 
-- **1300 series VICS with Advanced filters enabled**
+- **1300 series VICS with advanced filters enabled**
 
   - Attributes: ingress
   - Items: eth, ipv4, ipv6, udp, tcp, vxlan, inner eth, ipv4, ipv6, udp, tcp
   - Actions: queue, mark, flag and void
   - Selectors: 'is', 'spec' and 'mask'. 'last' is not supported
-  - In total, up to 64 bytes of mask is allowed across all haeders
+  - In total, up to 64 bytes of mask is allowed across all headers
 
 More features may be added in future firmware and new versions of th

[dpdk-dev] [PATCH] net/enic: do not set checksum unkonwn offload flag

2018-01-10 Thread John Daley
From: Hyong Youb Kim 

PKT_RX_IP_CKSUM_UNKNOWN and PKT_RX_L4_CKSUM_UNKNOWN are zeros, so no
need to set them.

Signed-off-by: Hyong Youb Kim 
Reviewed-by: John Daley 
---
 drivers/net/enic/enic_rxtx.c | 5 +
 1 file changed, 1 insertion(+), 4 deletions(-)

diff --git a/drivers/net/enic/enic_rxtx.c b/drivers/net/enic/enic_rxtx.c
index 831c90a1c..a10d9bd72 100644
--- a/drivers/net/enic/enic_rxtx.c
+++ b/drivers/net/enic/enic_rxtx.c
@@ -273,10 +273,7 @@ enic_cq_rx_to_pkt_flags(struct cq_desc *cqd, struct 
rte_mbuf *mbuf)
 
/* checksum flags */
if (mbuf->packet_type & RTE_PTYPE_L3_IPV4) {
-   if (enic_cq_rx_desc_csum_not_calc(cqrd))
-   pkt_flags |= (PKT_RX_IP_CKSUM_UNKNOWN &
-PKT_RX_L4_CKSUM_UNKNOWN);
-   else {
+   if (!enic_cq_rx_desc_csum_not_calc(cqrd)) {
uint32_t l4_flags;
l4_flags = mbuf->packet_type & RTE_PTYPE_L4_MASK;
 
-- 
2.12.0



[dpdk-dev] [PATCH] net/enic: use the new ethdev offloads API

2018-01-10 Thread John Daley
From: Hyong Youb Kim 

The following commits deprecate the use of the offload bit fields
(e.g. header_split) in rte_eth_rxmode and txq_flags in rte_eth_txconf.

commit ce17eddefc20 ("ethdev: introduce Rx queue offloads API")
commit cba7f53b717d ("ethdev: introduce Tx queue offloads API")

For enic, the required changes are mechanical. Use the new 'offloads'
field in rxmode instead of the bit fields. And, no changes required
with respect to txq_flags, as enic does not use it at all.

Per-queue RX offload capabilities are not set, as all offloads are
per-port at the moment.

Signed-off-by: Hyong Youb Kim 
Reviewed-by: John Daley 
---
 drivers/net/enic/enic_ethdev.c | 9 ++---
 drivers/net/enic/enic_main.c   | 6 --
 2 files changed, 10 insertions(+), 5 deletions(-)

diff --git a/drivers/net/enic/enic_ethdev.c b/drivers/net/enic/enic_ethdev.c
index 669dbf336..59834f3c8 100644
--- a/drivers/net/enic/enic_ethdev.c
+++ b/drivers/net/enic/enic_ethdev.c
@@ -370,7 +370,8 @@ static int enicpmd_vlan_offload_set(struct rte_eth_dev 
*eth_dev, int mask)
ENICPMD_FUNC_TRACE();
 
if (mask & ETH_VLAN_STRIP_MASK) {
-   if (eth_dev->data->dev_conf.rxmode.hw_vlan_strip)
+   if (eth_dev->data->dev_conf.rxmode.offloads &
+   DEV_RX_OFFLOAD_VLAN_STRIP)
enic->ig_vlan_strip_en = 1;
else
enic->ig_vlan_strip_en = 0;
@@ -407,13 +408,15 @@ static int enicpmd_dev_configure(struct rte_eth_dev 
*eth_dev)
}
 
if (eth_dev->data->dev_conf.rxmode.split_hdr_size &&
-   eth_dev->data->dev_conf.rxmode.header_split) {
+   (eth_dev->data->dev_conf.rxmode.offloads &
+DEV_RX_OFFLOAD_HEADER_SPLIT)) {
/* Enable header-data-split */
enic_set_hdr_split_size(enic,
eth_dev->data->dev_conf.rxmode.split_hdr_size);
}
 
-   enic->hw_ip_checksum = eth_dev->data->dev_conf.rxmode.hw_ip_checksum;
+   enic->hw_ip_checksum = !!(eth_dev->data->dev_conf.rxmode.offloads &
+ DEV_RX_OFFLOAD_CHECKSUM);
ret = enicpmd_vlan_offload_set(eth_dev, ETH_VLAN_STRIP_MASK);
 
return ret;
diff --git a/drivers/net/enic/enic_main.c b/drivers/net/enic/enic_main.c
index 8af0ccd3c..bd85f344f 100644
--- a/drivers/net/enic/enic_main.c
+++ b/drivers/net/enic/enic_main.c
@@ -634,7 +634,8 @@ int enic_alloc_rq(struct enic *enic, uint16_t queue_idx,
mbuf_size = (uint16_t)(rte_pktmbuf_data_room_size(mp) -
   RTE_PKTMBUF_HEADROOM);
 
-   if (enic->rte_dev->data->dev_conf.rxmode.enable_scatter) {
+   if (enic->rte_dev->data->dev_conf.rxmode.offloads &
+   DEV_RX_OFFLOAD_SCATTER) {
dev_info(enic, "Rq %u Scatter rx mode enabled\n", queue_idx);
/* ceil((mtu + ETHER_HDR_LEN + 4)/mbuf_size) */
mbufs_per_pkt = ((mtu + ETHER_HDR_LEN + 4) +
@@ -1208,7 +1209,8 @@ int enic_set_mtu(struct enic *enic, uint16_t new_mtu)
/* The easy case is when scatter is disabled. However if the MTU
 * becomes greater than the mbuf data size, packet drops will ensue.
 */
-   if (!enic->rte_dev->data->dev_conf.rxmode.enable_scatter) {
+   if (!(enic->rte_dev->data->dev_conf.rxmode.offloads &
+ DEV_RX_OFFLOAD_SCATTER)) {
eth_dev->data->mtu = new_mtu;
goto set_mtu_done;
}
-- 
2.12.0



[dpdk-dev] [PATCH] net/enic: fix L4 Rx ptype comparison

2018-01-10 Thread John Daley
From: Hyong Youb Kim 

For non-UDP/TCP packets, enic may wrongly set PKT_RX_L4_CKSUM_BAD in
ol_flags. The comparison that checks if a packet is UDP or TCP assumes
that RTE_PTYPE_L4 values are bit flags, but they are not. For example,
the following evaluates to true because NONFRAG is 0x600 and UDP is
0x200, and causes the current code to think the packet is UDP.

!!(RTE_PTYPE_L4_NONFRAG & RTE_PTYPE_L4_UDP)

So, fix this by comparing the packet type against UDP and TCP
individually.

Fixes: 453d15059b58 ("net/enic: use new Rx checksum flags")
Cc: sta...@dpdk.org

Signed-off-by: Hyong Youb Kim 
Reviewed-by: John Daley 
---
 drivers/net/enic/enic_rxtx.c | 3 ++-
 1 file changed, 2 insertions(+), 1 deletion(-)

diff --git a/drivers/net/enic/enic_rxtx.c b/drivers/net/enic/enic_rxtx.c
index a3663d516..831c90a1c 100644
--- a/drivers/net/enic/enic_rxtx.c
+++ b/drivers/net/enic/enic_rxtx.c
@@ -285,7 +285,8 @@ enic_cq_rx_to_pkt_flags(struct cq_desc *cqd, struct 
rte_mbuf *mbuf)
else
pkt_flags |= PKT_RX_IP_CKSUM_BAD;
 
-   if (l4_flags & (RTE_PTYPE_L4_UDP | RTE_PTYPE_L4_TCP)) {
+   if (l4_flags == RTE_PTYPE_L4_UDP ||
+   l4_flags == RTE_PTYPE_L4_TCP) {
if (enic_cq_rx_desc_tcp_udp_csum_ok(cqrd))
pkt_flags |= PKT_RX_L4_CKSUM_GOOD;
else
-- 
2.12.0



[dpdk-dev] [PATCH] net/enic: remove remaining header-split code

2018-01-10 Thread John Daley
From: Hyong Youb Kim 

Header splitting has been disabled at least since the following
commit. Remove the remaining code to avoid confusion.

commit 947d860c821f ("enic: improve Rx performance")

Signed-off-by: Hyong Youb Kim 
Reviewed-by: John Daley 
---
 drivers/net/enic/base/vnic_dev.c | 11 ---
 drivers/net/enic/base/vnic_dev.h |  2 --
 drivers/net/enic/enic.h  |  1 -
 drivers/net/enic/enic_ethdev.c   |  8 
 drivers/net/enic/enic_main.c |  5 -
 5 files changed, 27 deletions(-)

diff --git a/drivers/net/enic/base/vnic_dev.c b/drivers/net/enic/base/vnic_dev.c
index 9b25d219c..75388423b 100644
--- a/drivers/net/enic/base/vnic_dev.c
+++ b/drivers/net/enic/base/vnic_dev.c
@@ -78,7 +78,6 @@ struct vnic_dev {
enum vnic_proxy_type proxy;
u32 proxy_index;
u64 args[VNIC_DEVCMD_NARGS];
-   u16 split_hdr_size;
int in_reset;
struct vnic_intr_coal_timer_info intr_coal_timer_info;
void *(*alloc_consistent)(void *priv, size_t size,
@@ -251,16 +250,6 @@ unsigned int vnic_dev_desc_ring_size(struct vnic_dev_ring 
*ring,
return ring->size_unaligned;
 }
 
-void vnic_set_hdr_split_size(struct vnic_dev *vdev, u16 size)
-{
-   vdev->split_hdr_size = size;
-}
-
-u16 vnic_get_hdr_split_size(struct vnic_dev *vdev)
-{
-   return vdev->split_hdr_size;
-}
-
 void vnic_dev_clear_desc_ring(struct vnic_dev_ring *ring)
 {
memset(ring->descs, 0, ring->size);
diff --git a/drivers/net/enic/base/vnic_dev.h b/drivers/net/enic/base/vnic_dev.h
index c9ca25b35..2e07a8535 100644
--- a/drivers/net/enic/base/vnic_dev.h
+++ b/drivers/net/enic/base/vnic_dev.h
@@ -120,8 +120,6 @@ unsigned long vnic_dev_get_res_type_len(struct vnic_dev 
*vdev,
 unsigned int vnic_dev_desc_ring_size(struct vnic_dev_ring *ring,
unsigned int desc_count, unsigned int desc_size);
 void vnic_dev_clear_desc_ring(struct vnic_dev_ring *ring);
-void vnic_set_hdr_split_size(struct vnic_dev *vdev, u16 size);
-u16 vnic_get_hdr_split_size(struct vnic_dev *vdev);
 int vnic_dev_alloc_desc_ring(struct vnic_dev *vdev, struct vnic_dev_ring *ring,
unsigned int desc_count, unsigned int desc_size, unsigned int socket_id,
char *z_name);
diff --git a/drivers/net/enic/enic.h b/drivers/net/enic/enic.h
index e36ec385c..b8336ea4b 100644
--- a/drivers/net/enic/enic.h
+++ b/drivers/net/enic/enic.h
@@ -277,7 +277,6 @@ extern int enic_alloc_rq(struct enic *enic, uint16_t 
queue_idx,
uint16_t nb_desc, uint16_t free_thresh);
 extern int enic_set_rss_nic_cfg(struct enic *enic);
 extern int enic_set_vnic_res(struct enic *enic);
-extern void enic_set_hdr_split_size(struct enic *enic, u16 split_hdr_size);
 extern int enic_enable(struct enic *enic);
 extern int enic_disable(struct enic *enic);
 extern void enic_remove(struct enic *enic);
diff --git a/drivers/net/enic/enic_ethdev.c b/drivers/net/enic/enic_ethdev.c
index 59834f3c8..24916312f 100644
--- a/drivers/net/enic/enic_ethdev.c
+++ b/drivers/net/enic/enic_ethdev.c
@@ -407,14 +407,6 @@ static int enicpmd_dev_configure(struct rte_eth_dev 
*eth_dev)
return ret;
}
 
-   if (eth_dev->data->dev_conf.rxmode.split_hdr_size &&
-   (eth_dev->data->dev_conf.rxmode.offloads &
-DEV_RX_OFFLOAD_HEADER_SPLIT)) {
-   /* Enable header-data-split */
-   enic_set_hdr_split_size(enic,
-   eth_dev->data->dev_conf.rxmode.split_hdr_size);
-   }
-
enic->hw_ip_checksum = !!(eth_dev->data->dev_conf.rxmode.offloads &
  DEV_RX_OFFLOAD_CHECKSUM);
ret = enicpmd_vlan_offload_set(eth_dev, ETH_VLAN_STRIP_MASK);
diff --git a/drivers/net/enic/enic_main.c b/drivers/net/enic/enic_main.c
index bd85f344f..ac0c4a19f 100644
--- a/drivers/net/enic/enic_main.c
+++ b/drivers/net/enic/enic_main.c
@@ -98,11 +98,6 @@ enic_rxmbuf_queue_release(__rte_unused struct enic *enic, 
struct vnic_rq *rq)
}
 }
 
-void enic_set_hdr_split_size(struct enic *enic, u16 split_hdr_size)
-{
-   vnic_set_hdr_split_size(enic->vdev, split_hdr_size);
-}
-
 static void enic_free_wq_buf(struct vnic_wq_buf *buf)
 {
struct rte_mbuf *mbuf = (struct rte_mbuf *)buf->mb;
-- 
2.12.0



[dpdk-dev] [PATCH] maintainers: update for enic

2018-01-10 Thread John Daley
Welcome Hyong Youb Kim.

Signed-off-by: John Daley 
---
 MAINTAINERS | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/MAINTAINERS b/MAINTAINERS
index d64cd243d..86eb7bb0d 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -391,7 +391,7 @@ F: doc/guides/nics/features/cxgbe.ini
 
 Cisco enic
 M: John Daley 
-M: Nelson Escobar 
+M: Hyong Youb Kim 
 F: drivers/net/enic/
 F: doc/guides/nics/enic.rst
 F: doc/guides/nics/features/enic.ini
-- 
2.12.0



[dpdk-dev] [PATCH] net/enic: use dynamic log types

2018-01-10 Thread John Daley
From: Hyong Youb Kim 

"pmd.enic.init" replaces CONFIG_RTE_LIBRTE_ENIC_DEBUG
"pmd.enic.flow" replaces CONFIG_RTE_LIBRTE_ENIC_DEBUG_FLOW

Signed-off-by: Hyong Youb Kim 
Reviewed-by: John Daley 
---
 config/common_base |  2 --
 doc/guides/nics/enic.rst   |  6 --
 drivers/net/enic/enic_compat.h |  2 ++
 drivers/net/enic/enic_ethdev.c | 26 --
 drivers/net/enic/enic_flow.c   | 11 ---
 5 files changed, 26 insertions(+), 21 deletions(-)

diff --git a/config/common_base b/config/common_base
index 5ee18420c..918bf65d0 100644
--- a/config/common_base
+++ b/config/common_base
@@ -256,8 +256,6 @@ CONFIG_RTE_LIBRTE_CXGBE_TPUT=y
 # Compile burst-oriented Cisco ENIC PMD driver
 #
 CONFIG_RTE_LIBRTE_ENIC_PMD=y
-CONFIG_RTE_LIBRTE_ENIC_DEBUG=n
-CONFIG_RTE_LIBRTE_ENIC_DEBUG_FLOW=n
 
 #
 # Compile burst-oriented Netronome NFP PMD driver
diff --git a/doc/guides/nics/enic.rst b/doc/guides/nics/enic.rst
index 22df466b4..4dffce1a6 100644
--- a/doc/guides/nics/enic.rst
+++ b/doc/guides/nics/enic.rst
@@ -52,12 +52,6 @@ Configuration information
   - **CONFIG_RTE_LIBRTE_ENIC_PMD** (default y): Enables or disables inclusion
 of the ENIC PMD driver in the DPDK compilation.
 
-  - **CONFIG_RTE_LIBRTE_ENIC_DEBUG** (default n): Enables or disables debug
-logging within the ENIC PMD driver.
-
-  - **CONFIG_RTE_LIBRTE_ENIC_DEBUG_FLOW** (default n): Enables or disables flow
-API related debug logging within the ENIC PMD driver.
-
 - **vNIC Configuration Parameters**
 
   - **Number of Queues**
diff --git a/drivers/net/enic/enic_compat.h b/drivers/net/enic/enic_compat.h
index 1cb5686f8..4fb92dd9c 100644
--- a/drivers/net/enic/enic_compat.h
+++ b/drivers/net/enic/enic_compat.h
@@ -84,6 +84,8 @@
 #define dev_warning(x, args...) dev_printk(WARNING, args)
 #define dev_debug(x, args...) dev_printk(DEBUG, args)
 
+extern int enicpmd_logtype_flow;
+
 #define __le16 u16
 #define __le32 u32
 #define __le64 u64
diff --git a/drivers/net/enic/enic_ethdev.c b/drivers/net/enic/enic_ethdev.c
index 24916312f..5132966d0 100644
--- a/drivers/net/enic/enic_ethdev.c
+++ b/drivers/net/enic/enic_ethdev.c
@@ -49,12 +49,14 @@
 #include "vnic_enet.h"
 #include "enic.h"
 
-#ifdef RTE_LIBRTE_ENIC_DEBUG
-#define ENICPMD_FUNC_TRACE() \
-   RTE_LOG(DEBUG, PMD, "ENICPMD trace: %s\n", __func__)
-#else
-#define ENICPMD_FUNC_TRACE() (void)0
-#endif
+int enicpmd_logtype_init;
+int enicpmd_logtype_flow;
+
+#define PMD_INIT_LOG(level, fmt, args...) \
+   rte_log(RTE_LOG_ ## level, enicpmd_logtype_init, \
+   "%s" fmt "\n", __func__, ##args)
+
+#define ENICPMD_FUNC_TRACE() PMD_INIT_LOG(DEBUG, " >>")
 
 /*
  * The set of PCI devices this driver supports
@@ -66,6 +68,18 @@ static const struct rte_pci_id pci_id_enic_map[] = {
{.vendor_id = 0, /* sentinel */},
 };
 
+RTE_INIT(enicpmd_init_log);
+static void
+enicpmd_init_log(void)
+{
+   enicpmd_logtype_init = rte_log_register("pmd.enic.init");
+   if (enicpmd_logtype_init >= 0)
+   rte_log_set_level(enicpmd_logtype_init, RTE_LOG_NOTICE);
+   enicpmd_logtype_flow = rte_log_register("pmd.enic.flow");
+   if (enicpmd_logtype_flow >= 0)
+   rte_log_set_level(enicpmd_logtype_flow, RTE_LOG_NOTICE);
+}
+
 static int
 enicpmd_fdir_ctrl_func(struct rte_eth_dev *eth_dev,
enum rte_filter_op filter_op, void *arg)
diff --git a/drivers/net/enic/enic_flow.c b/drivers/net/enic/enic_flow.c
index a728d0777..190762eb6 100644
--- a/drivers/net/enic/enic_flow.c
+++ b/drivers/net/enic/enic_flow.c
@@ -42,15 +42,12 @@
 #include "vnic_dev.h"
 #include "vnic_nic.h"
 
-#ifdef RTE_LIBRTE_ENIC_DEBUG_FLOW
 #define FLOW_TRACE() \
-   RTE_LOG(DEBUG, PMD, "%s()\n", __func__)
+   rte_log(RTE_LOG_DEBUG, enicpmd_logtype_flow, \
+   "%s()\n", __func__)
 #define FLOW_LOG(level, fmt, args...) \
-   RTE_LOG(level, PMD, fmt, ## args)
-#else
-#define FLOW_TRACE() do { } while (0)
-#define FLOW_LOG(level, fmt, args...) do { } while (0)
-#endif
+   rte_log(RTE_LOG_ ## level, enicpmd_logtype_flow, \
+   fmt "\n", ##args)
 
 /** Info about how to copy items into enic filters. */
 struct enic_items {
-- 
2.12.0



[dpdk-dev] [PATCH] net/enic: refill only the address of the RQ descriptor

2018-01-10 Thread John Daley
From: Hyong Youb Kim 

Once the RQ descriptors are initialized (enic_alloc_rx_queue_mbufs),
their length_type does not change during normal RX
operations. rx_pkt_burst only needs to reset their address field for
newly allocated mbufs.

Signed-off-by: Hyong Youb Kim 
Reviewed-by: John Daley 
---
 drivers/net/enic/enic_rxtx.c | 14 +++---
 1 file changed, 7 insertions(+), 7 deletions(-)

diff --git a/drivers/net/enic/enic_rxtx.c b/drivers/net/enic/enic_rxtx.c
index a28834ea7..f27f3d443 100644
--- a/drivers/net/enic/enic_rxtx.c
+++ b/drivers/net/enic/enic_rxtx.c
@@ -327,7 +327,6 @@ enic_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
 
while (nb_rx < nb_pkts) {
volatile struct rq_enet_desc *rqd_ptr;
-   dma_addr_t dma_addr;
struct cq_desc cqd;
uint8_t packet_error;
uint16_t ciflags;
@@ -376,12 +375,13 @@ enic_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
 
/* Push descriptor for newly allocated mbuf */
nmb->data_off = RTE_PKTMBUF_HEADROOM;
-   dma_addr = (dma_addr_t)(nmb->buf_iova +
-   RTE_PKTMBUF_HEADROOM);
-   rq_enet_desc_enc(rqd_ptr, dma_addr,
-   (rq->is_sop ? RQ_ENET_TYPE_ONLY_SOP
-   : RQ_ENET_TYPE_NOT_SOP),
-   nmb->buf_len - RTE_PKTMBUF_HEADROOM);
+   /*
+* Only the address needs to be refilled. length_type of the
+* descriptor it set during initialization
+* (enic_alloc_rx_queue_mbufs) and does not change.
+*/
+   rqd_ptr->address = rte_cpu_to_le_64(nmb->buf_iova +
+   RTE_PKTMBUF_HEADROOM);
 
/* Fill in the rest of the mbuf */
seg_length = enic_cq_rx_desc_n_bytes(&cqd);
-- 
2.12.0



[dpdk-dev] [PATCH] net/enic: remove a couple unnecessary statements

2018-01-10 Thread John Daley
From: Hyong Youb Kim 

No need to zero ol_flags as it is overwritten at the end of the
function. No need to check for EOP as the caller (enic_recv_pkts) has
already checked it.

Signed-off-by: Hyong Youb Kim 
Reviewed-by: John Daley 
---
 drivers/net/enic/enic_rxtx.c | 10 +-
 1 file changed, 1 insertion(+), 9 deletions(-)

diff --git a/drivers/net/enic/enic_rxtx.c b/drivers/net/enic/enic_rxtx.c
index a10d9bd72..a28834ea7 100644
--- a/drivers/net/enic/enic_rxtx.c
+++ b/drivers/net/enic/enic_rxtx.c
@@ -230,17 +230,10 @@ static inline void
 enic_cq_rx_to_pkt_flags(struct cq_desc *cqd, struct rte_mbuf *mbuf)
 {
struct cq_enet_rq_desc *cqrd = (struct cq_enet_rq_desc *)cqd;
-   uint16_t ciflags, bwflags, pkt_flags = 0, vlan_tci;
-   ciflags = enic_cq_rx_desc_ciflags(cqrd);
+   uint16_t bwflags, pkt_flags = 0, vlan_tci;
bwflags = enic_cq_rx_desc_bwflags(cqrd);
vlan_tci = enic_cq_rx_desc_vlan(cqrd);
 
-   mbuf->ol_flags = 0;
-
-   /* flags are meaningless if !EOP */
-   if (unlikely(!enic_cq_rx_desc_eop(ciflags)))
-   goto mbuf_flags_done;
-
/* VLAN STRIPPED flag. The L2 packet type updated here also */
if (bwflags & CQ_ENET_RQ_DESC_FLAGS_VLAN_STRIPPED) {
pkt_flags |= PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED;
@@ -292,7 +285,6 @@ enic_cq_rx_to_pkt_flags(struct cq_desc *cqd, struct 
rte_mbuf *mbuf)
}
}
 
- mbuf_flags_done:
mbuf->ol_flags = pkt_flags;
 }
 
-- 
2.12.0



[dpdk-dev] [PATCH] net/enic: use BSD-3-Clause

2018-01-10 Thread John Daley
From: Hyong Youb Kim 

enic is currently using BSD-2-Clause, whereas the DPDK approved
license is BSD-3-Clause. So replace license text with BSD-3-Clause.

Remove LICENSE as it is redundant.

Signed-off-by: Hyong Youb Kim 
Reviewed-by: John Daley 
---
 drivers/net/enic/LICENSE  | 27 ---
 drivers/net/enic/Makefile | 32 ++--
 drivers/net/enic/base/cq_desc.h   | 33 ++---
 drivers/net/enic/base/cq_enet_desc.h  | 33 ++---
 drivers/net/enic/base/rq_enet_desc.h  | 33 ++---
 drivers/net/enic/base/vnic_cq.c   | 33 ++---
 drivers/net/enic/base/vnic_cq.h   | 33 ++---
 drivers/net/enic/base/vnic_dev.c  | 33 ++---
 drivers/net/enic/base/vnic_dev.h  | 33 ++---
 drivers/net/enic/base/vnic_devcmd.h   | 33 ++---
 drivers/net/enic/base/vnic_enet.h | 33 ++---
 drivers/net/enic/base/vnic_intr.c | 33 ++---
 drivers/net/enic/base/vnic_intr.h | 33 ++---
 drivers/net/enic/base/vnic_nic.h  | 33 ++---
 drivers/net/enic/base/vnic_resource.h | 33 ++---
 drivers/net/enic/base/vnic_rq.c   | 33 ++---
 drivers/net/enic/base/vnic_rq.h   | 33 ++---
 drivers/net/enic/base/vnic_rss.c  | 33 ++---
 drivers/net/enic/base/vnic_rss.h  | 32 ++--
 drivers/net/enic/base/vnic_stats.h| 33 ++---
 drivers/net/enic/base/vnic_wq.c   | 33 ++---
 drivers/net/enic/base/vnic_wq.h   | 33 ++---
 drivers/net/enic/base/wq_enet_desc.h  | 33 ++---
 drivers/net/enic/enic.h   | 33 ++---
 drivers/net/enic/enic_clsf.c  | 33 ++---
 drivers/net/enic/enic_compat.h| 33 ++---
 drivers/net/enic/enic_ethdev.c| 33 ++---
 drivers/net/enic/enic_flow.c  | 31 ++-
 drivers/net/enic/enic_main.c  | 33 ++---
 drivers/net/enic/enic_res.c   | 33 ++---
 drivers/net/enic/enic_res.h   | 33 ++---
 drivers/net/enic/enic_rxtx.c  | 31 ++-
 32 files changed, 62 insertions(+), 982 deletions(-)
 delete mode 100644 drivers/net/enic/LICENSE

diff --git a/drivers/net/enic/LICENSE b/drivers/net/enic/LICENSE
deleted file mode 100644
index 46a27a4e9..0
--- a/drivers/net/enic/LICENSE
+++ /dev/null
@@ -1,27 +0,0 @@
- * Copyright (c) 2014, Cisco Systems, Inc.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- *
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
- * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
- * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
- * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
- * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
- * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
- * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
diff --git a/drivers/net/enic/Makefile b/drivers/net/enic/Makefile
index 5191db549..7c6c29cc0 100644
--- a/drivers/net/enic/Makefile
+++ b/drivers/net/enic/Makefile
@@ -1,34 +1,6 @@
-#
-# Copyright 2008-2014 Cisco Systems, Inc.  All rights reserved.
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright 2008-2017 Cisco Systems, Inc.  All rights reserved.
 # Copyright 2007 Nuova Systems, Inc.  All rights reserved.
-#
-# Copyright (c) 2014, Cisco Systems, Inc.
-# All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are pe

[dpdk-dev] [PATCH] net/enic: remove a conditional from the Tx path

2018-01-10 Thread John Daley
The VLAN insert flag and VLAN tag used in the VIC write descriptor
can be set unconditionally.

Signed-off-by: John Daley 
Reviewed-by: Hyong Youb Kim 
---
 drivers/net/enic/enic_rxtx.c | 8 ++--
 1 file changed, 2 insertions(+), 6 deletions(-)

diff --git a/drivers/net/enic/enic_rxtx.c b/drivers/net/enic/enic_rxtx.c
index 08d600ffb..e2002e136 100644
--- a/drivers/net/enic/enic_rxtx.c
+++ b/drivers/net/enic/enic_rxtx.c
@@ -490,8 +490,8 @@ uint16_t enic_xmit_pkts(void *tx_queue, struct rte_mbuf 
**tx_pkts,
}
 
mss = 0;
-   vlan_id = 0;
-   vlan_tag_insert = 0;
+   vlan_id = tx_pkt->vlan_tci;
+   vlan_tag_insert = !!(ol_flags & PKT_TX_VLAN_PKT);
bus_addr = (dma_addr_t)
   (tx_pkt->buf_iova + tx_pkt->data_off);
 
@@ -531,10 +531,6 @@ uint16_t enic_xmit_pkts(void *tx_queue, struct rte_mbuf 
**tx_pkts,
}
}
 
-   if (ol_flags & PKT_TX_VLAN_PKT) {
-   vlan_tag_insert = 1;
-   vlan_id = tx_pkt->vlan_tci;
-   }
 
wq_enet_desc_enc(&desc_tmp, bus_addr, data_len, mss, header_len,
 offload_mode, eop, eop, 0, vlan_tag_insert,
-- 
2.12.0



[dpdk-dev] [PATCH] net/enic: use TSO flags

2018-01-10 Thread John Daley
Depend on the tx_offload flags in the mbuf to determeine the length
of the headers instead of looking into the packet itself.

Signed-off-by: John Daley 
Reviewed-by: Hyong Youb Kim 
---
 drivers/net/enic/enic_rxtx.c | 56 ++--
 1 file changed, 2 insertions(+), 54 deletions(-)

diff --git a/drivers/net/enic/enic_rxtx.c b/drivers/net/enic/enic_rxtx.c
index b9358d34f..08d600ffb 100644
--- a/drivers/net/enic/enic_rxtx.c
+++ b/drivers/net/enic/enic_rxtx.c
@@ -105,59 +105,6 @@ enic_cq_rx_desc_n_bytes(struct cq_desc *cqd)
CQ_ENET_RQ_DESC_BYTES_WRITTEN_MASK;
 }
 
-/* Find the offset to L5. This is needed by enic TSO implementation.
- * Return 0 if not a TCP packet or can't figure out the length.
- */
-static inline uint8_t tso_header_len(struct rte_mbuf *mbuf)
-{
-   struct ether_hdr *eh;
-   struct vlan_hdr *vh;
-   struct ipv4_hdr *ip4;
-   struct ipv6_hdr *ip6;
-   struct tcp_hdr *th;
-   uint8_t hdr_len;
-   uint16_t ether_type;
-
-   /* offset past Ethernet header */
-   eh = rte_pktmbuf_mtod(mbuf, struct ether_hdr *);
-   ether_type = eh->ether_type;
-   hdr_len = sizeof(struct ether_hdr);
-   if (ether_type == rte_cpu_to_be_16(ETHER_TYPE_VLAN)) {
-   vh = rte_pktmbuf_mtod_offset(mbuf, struct vlan_hdr *, hdr_len);
-   ether_type = vh->eth_proto;
-   hdr_len += sizeof(struct vlan_hdr);
-   }
-
-   /* offset past IP header */
-   switch (rte_be_to_cpu_16(ether_type)) {
-   case ETHER_TYPE_IPv4:
-   ip4 = rte_pktmbuf_mtod_offset(mbuf, struct ipv4_hdr *, hdr_len);
-   if (ip4->next_proto_id != IPPROTO_TCP)
-   return 0;
-   hdr_len += (ip4->version_ihl & 0xf) * 4;
-   break;
-   case ETHER_TYPE_IPv6:
-   ip6 = rte_pktmbuf_mtod_offset(mbuf, struct ipv6_hdr *, hdr_len);
-   if (ip6->proto != IPPROTO_TCP)
-   return 0;
-   hdr_len += sizeof(struct ipv6_hdr);
-   break;
-   default:
-   return 0;
-   }
-
-   if ((hdr_len + sizeof(struct tcp_hdr)) > mbuf->pkt_len)
-   return 0;
-
-   /* offset past TCP header */
-   th = rte_pktmbuf_mtod_offset(mbuf, struct tcp_hdr *, hdr_len);
-   hdr_len += (th->data_off >> 4) * 4;
-
-   if (hdr_len > mbuf->pkt_len)
-   return 0;
-
-   return hdr_len;
-}
 
 static inline uint8_t
 enic_cq_rx_check_err(struct cq_desc *cqd)
@@ -556,7 +503,8 @@ uint16_t enic_xmit_pkts(void *tx_queue, struct rte_mbuf 
**tx_pkts,
header_len = 0;
 
if (tso) {
-   header_len = tso_header_len(tx_pkt);
+   header_len = tx_pkt->l2_len + tx_pkt->l3_len +
+tx_pkt->l4_len;
 
/* Drop if non-TCP packet or TSO seg size is too big */
if (unlikely(header_len == 0 || ((tx_pkt->tso_segsz +
-- 
2.12.0



Re: [dpdk-dev] [PATCH v2 0/2] AVX2 Vectorized Rx/Tx functions for i40e

2018-01-10 Thread Richardson, Bruce


> -Original Message-
> From: John Fastabend [mailto:john.fastab...@gmail.com]
> Sent: Tuesday, January 9, 2018 4:31 PM
> To: Richardson, Bruce; Zhang, Qi Z; Xing, Beilei
> Cc: dev@dpdk.org; Zhang, Helin; Yigit, Ferruh
> Subject: Re: [dpdk-dev] [PATCH v2 0/2] AVX2 Vectorized Rx/Tx functions for
> i40e
> 
> On 01/09/2018 06:32 AM, Bruce Richardson wrote:
> > This patch adds an AVX2 vectorized path to the i40e driver, based on
> > the existing SSE4.2 version. Using AVX2 instructions gives better
> > performance than the SSE version, though the percentage increase
> > depends on the exact settings used. For example:
> >
> 
> Hi Bruce,
> 
> Just curious, can you provide some hints on percent increase in at least
> some representative cases? I'm just trying to get a sense of if this is
> %5, 10%, 20%, more... I know mileage will vary depending on system, setup,
> configuration, etc.
> 

Best case conditions to test under are using testpmd as that is where any IO 
improvement will be most seen. As a ballpark figure though, on my system while 
testing testpmd with both 16B and 32B descriptors, (RX/TX ring sizes 1024/512) 
I saw ~15% performance increase, and sometimes quite a bit higher, e.g. when 
testing with 16B descriptors with larger burst sizes. 
As you say system, setup, config all has an impact, so YMMV... :-).

/Bruce


Re: [dpdk-dev] [PATCH v3 00/24] net/i40e: update base code

2018-01-10 Thread Zhang, Helin


> -Original Message-
> From: dev [mailto:dev-boun...@dpdk.org] On Behalf Of Xing, Beilei
> Sent: Wednesday, January 10, 2018 1:37 PM
> To: Zhang, Qi Z
> Cc: dev@dpdk.org; Wu, Jingjing
> Subject: Re: [dpdk-dev] [PATCH v3 00/24] net/i40e: update base code
> 
> 
> 
> > -Original Message-
> > From: Zhang, Qi Z
> > Sent: Wednesday, January 10, 2018 4:30 AM
> > To: Xing, Beilei 
> > Cc: dev@dpdk.org; Wu, Jingjing ; Zhang, Qi Z
> > 
> > Subject: [PATCH v3 00/24] net/i40e: update base code
> >
> > The patchset update i40e base code base on latest CID drop cid-
> > i40e.2018.01.02.tar.gz, couple issues are fixed, include:
> > NVM lock issue, LED blink issue, LLDP configuration read issue
> > unaligned data issue on non-x86 platform and some compile issue with
> > GCC 6.3. Aslo a new AQ command that help software to get access DCB
> > paramters is added, AQ command for NVM update is enhanced to support
> > preservation flag for X722 device, and it also allow retreval of
> > AdminQ events as a result of AdminQ commands send to firmware.
> > Becides, the patchset also covers couple code clean.
> >
> > v3:
> > - rebase to next-net-intel again
> >
> > v2:
> > - rebase to next-net-intel
> > - fix some typo
> > - merge code clean into single patch
> > - add missing fix line
> >
> >
> > Qi Zhang (24):
> >   net/i40e/base: add new PHY type
> >   net/i40e/base: add capability macros
> >   net/i40e/base: add (Q)SFP module memory access definitions
> >   net/i40e/base: release spinlock before function returns
> >   net/i40e/base: retry AQC to overcome IRCRead hangs
> >   net/i40e/base: add byte swaps in PHY register access
> >   net/i40e/base: add macro for 25G device
> >   net/i40e/base: code refactoring for LED blink
> >   net/i40e/base: add link speed convert function
> >   net/i40e/base: add AQ command for DCB parameters
> >   net/i40e/base: fix NVM lock
> >   net/i40e/base: code clean
> >   net/i40e/base: add NVM update preservation flags
> >   net/i40e/base: enable AQ event get in NVM update
> >   net/i40e/base: fix link LED blink
> >   net/i40e/base: add defines for flat NVM
> >   net/i40e: enhanced loopback AQ command
> >   net/i40e/base: add rearrange process AQ command
> >   net/i40e/base: add AQ critical error type
> >   net/i40e/base: fix compile issue for GCC 6.3
> >   net/i40e/base: fix reading LLDP configuration
> >   net/i40e/base: fix unaligned data issue
> >   net/i40e: rename a field
> >   net/i40e/base: update README file
> >
> >  drivers/net/i40e/base/README|   2 +-
> >  drivers/net/i40e/base/i40e_adminq.c |  44 ++--
> >  drivers/net/i40e/base/i40e_adminq.h |   3 -
> >  drivers/net/i40e/base/i40e_adminq_cmd.h |  47 +++-
> >  drivers/net/i40e/base/i40e_common.c | 388 +++
> >  drivers/net/i40e/base/i40e_dcb.c|  88 ++-
> >  drivers/net/i40e/base/i40e_devids.h |   3 +
> >  drivers/net/i40e/base/i40e_diag.c   |  17 +-
> >  drivers/net/i40e/base/i40e_hmc.c|   1 -
> >  drivers/net/i40e/base/i40e_nvm.c| 447 ++--
> > 
> >  drivers/net/i40e/base/i40e_prototype.h  |  49 +++-
> >  drivers/net/i40e/base/i40e_status.h |   1 +
> >  drivers/net/i40e/base/i40e_type.h   |  56 +++-
> >  drivers/net/i40e/base/virtchnl.h|  12 +-
> >  drivers/net/i40e/i40e_ethdev.c  |   4 +-
> >  drivers/net/i40e/i40e_ethdev_vf.c   |   2 +-
> >  drivers/net/i40e/i40e_pf.c  |   4 +-
> >  17 files changed, 753 insertions(+), 415 deletions(-)
> >
> > --
> > 2.14.1
> 
> Acked-by: Beilei Xing 
Applied to dpdk-next-net-intel, with minor commit log changes. Thanks!

/Helin



Re: [dpdk-dev] [PATCH v7 1/2] eal: add uevent monitor for hot plug

2018-01-10 Thread Guo, Jia



On 1/9/2018 8:42 PM, Gaëtan Rivet wrote:

Hi Jeff,

On Tue, Jan 09, 2018 at 12:08:52PM +, Guo, Jia wrote:

Your comments about split it totally make sense ,no doubt that, but my question 
is that if split api with the funcational , so the function part should be set 
null implement or stake. Any other good idea or tip for that.


Please avoid top-posting on the mailing list, it is confusing when
reading a thread intertwined with inner-posted mails.

Regarding your issue, it is fine to propose a first skeleton API with
bare implementations, then progressively use your new functions where
relevant.

It is only necessary to ensure compilation is always possible between
each patch. The API itself need not be usable, as long as the patch
order remains coherent and meaningful for review.

Otherwise, sorry about not doing a review earlier, I didn't think I knew
enough about uevent to provide useful comments. However after a quick
reading I may be able to provide a few remarks.

I will wait for your split before doing so.

make sense, new patch set version have been sent, for you reference.

Best regards,
Jeff Guo


-Original Message-
From: Thomas Monjalon [mailto:tho...@monjalon.net]
Sent: Tuesday, January 9, 2018 7:45 PM
To: Guo, Jia 
Cc: Mordechay Haimovsky ; dev@dpdk.org; step...@networkplumber.org; Richardson, Bruce 
; Yigit, Ferruh ; gaetan.ri...@6wind.com; Ananyev, 
Konstantin ; shreyansh.j...@nxp.com; Wu, Jingjing ; 
Zhang, Helin ; Van Haaren, Harry 
Subject: Re: [dpdk-dev] [PATCH v7 1/2] eal: add uevent monitor for hot plug

09/01/2018 12:39, Guo, Jia:

So, how can separate the patch into more small patch, use stake or null 
implement in function. I think we should consider if it is a economic way now, 
if I could explain more detail in code for you all not very familiar the 
background? I have sent v8, please check, thanks all.

The v8 is not split enough.
Please try to address all my comments.




Re: [dpdk-dev] [PATCH v7 1/2] eal: add uevent monitor for hot plug

2018-01-10 Thread Guo, Jia



On 1/9/2018 9:44 PM, Thomas Monjalon wrote:

09/01/2018 13:08, Guo, Jia:

Your comments about split it totally make sense ,no doubt that, but my question 
is that if split api with the funcational , so the function part should be set 
null implement or stake. Any other good idea or tip for that.

Yes when introducing the callback API first, there will be no
implementation, so the callbacks are not called.
If needed you can have some empty functions.
i think we all want to make review more effective in any good way, so 
the v9 patch set have been sent, please check.




Re: [dpdk-dev] [PATCH v3 1/2] net/tap: add eBPF instructions to TAP device

2018-01-10 Thread Pascal Mazon
Hi Ophir,

I have a few remarks.

- Why do you define ARRAY_SIZE in tap_flow.h while it's used only in
tap_bpf_insns.c?

- I really dislike having the BPF bytecode hardcoded in tap_bpf_insns.c.
  You don't provide the commands used to generate that bytecode.
  And you don't provide the way to translate bytecode into these insns[]
arrays.
  So we're just supposed to trust that these instructions are indeed
what they pretend to be.
  The process is not repeatable nor verifiable.
 

On 10/01/2018 08:06, Ophir Munk wrote:
> TAP PMD is required to support RSS queue mapping based on rte_flow API. An
> example usage for this requirement is failsafe transparent switching from a
> PCI device to TAP device while keep redirecting packets to the same RSS
> queues on both devices.
>
> TAP RSS implementation is based on eBPF programs sent to Linux kernel
> through BPF system calls and using netlink messages to reference the
> programs as part of traffic control commands.
>
> An eBPF program acts as a traffic control classifier or action. Each
> program is written in C code under a different ELF section name. Clang 3.7
> is used to compile the C code into eBPF-formatted object file. The ELF file
> is parsed and its sections (programs) can be downloaded to the kernel using
> BPF system call. The BPF system call parameters contain the array of eBPF
> instructions. This commit includes BPF classifier and action programs
> (tap_bpf_program.c) as reference and their corresponding arrays of eBPF
> instructions (tap_bpf_insns.c). The reference file does not take part in
> dpdk compilation. The details on how to generate new eBPF code will be
> presented in another commit.
>
> In a follow up commit TAP PMD will use the eBPF programs to implement RSS
> flow rules.
>
> TAP eBPF requires Linux version 4.9 configured with BPF. TAP PMD will
> successfully compile on systems with old or non-BPF configured kernels but
> RSS rules creation on TAP devices will not be supported.
>
> Signed-off-by: Ophir Munk 
> ---
>  drivers/net/tap/Makefile  |   11 +
>  drivers/net/tap/tap_bpf.h |   96 ++
>  drivers/net/tap/tap_bpf_insns.c   | 1845 
> +
>  drivers/net/tap/tap_bpf_program.c |  221 +
>  drivers/net/tap/tap_flow.h|5 +
>  5 files changed, 2178 insertions(+)
>  create mode 100644 drivers/net/tap/tap_bpf.h
>  create mode 100644 drivers/net/tap/tap_bpf_insns.c
>  create mode 100644 drivers/net/tap/tap_bpf_program.c
>
> diff --git a/drivers/net/tap/Makefile b/drivers/net/tap/Makefile
> index fd4195f..feaa5b7 100644
> --- a/drivers/net/tap/Makefile
> +++ b/drivers/net/tap/Makefile
> @@ -12,6 +12,9 @@ EXPORT_MAP := rte_pmd_tap_version.map
>  
>  LIBABIVER := 1
>  
> +# TAP_MAX_QUEUES must be a power of 2 as it will be used for masking */
> +TAP_MAX_QUEUES = 16
> +
>  CFLAGS += -O3
>  CFLAGS += -I$(SRCDIR)
>  CFLAGS += -I.
> @@ -20,6 +23,8 @@ LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool -lrte_ring
>  LDLIBS += -lrte_ethdev -lrte_net -lrte_kvargs -lrte_hash
>  LDLIBS += -lrte_bus_vdev
>  
> +CFLAGS += -DTAP_MAX_QUEUES=$(TAP_MAX_QUEUES)
> +
>  #
>  # all source are stored in SRCS-y
>  #
> @@ -27,6 +32,7 @@ SRCS-$(CONFIG_RTE_LIBRTE_PMD_TAP) += rte_eth_tap.c
>  SRCS-$(CONFIG_RTE_LIBRTE_PMD_TAP) += tap_flow.c
>  SRCS-$(CONFIG_RTE_LIBRTE_PMD_TAP) += tap_netlink.c
>  SRCS-$(CONFIG_RTE_LIBRTE_PMD_TAP) += tap_tcmsgs.c
> +SRCS-$(CONFIG_RTE_LIBRTE_PMD_TAP) += tap_bpf_insns.c
>  
>  include $(RTE_SDK)/mk/rte.lib.mk
>  
> @@ -53,6 +59,11 @@ tap_autoconf.h.new: $(RTE_SDK)/buildtools/auto-config-h.sh
>   linux/pkt_cls.h \
>   enum TCA_FLOWER_KEY_VLAN_PRIO \
>   $(AUTOCONF_OUTPUT)
> + $Q sh -- '$<' '$@' \
> + HAVE_BPF_PROG_LOAD \
> + linux/bpf.h \
> + enum BPF_PROG_LOAD \
> + $(AUTOCONF_OUTPUT)
>  
>  # Create tap_autoconf.h or update it in case it differs from the new one.
>  
> diff --git a/drivers/net/tap/tap_bpf.h b/drivers/net/tap/tap_bpf.h
> new file mode 100644
> index 000..82775b7
> --- /dev/null
> +++ b/drivers/net/tap/tap_bpf.h
> @@ -0,0 +1,96 @@
> +/* SPDX-License-Identifier: BSD-3-Clause
> + * Copyright 2017 Mellanox Technologies, Ltd.
> + */
> +
> +#ifndef __TAP_BPF_H__
> +#define __TAP_BPF_H__
> +
> +#include 
> +
> +#ifdef HAVE_BPF_PROG_LOAD
> +#include 
> +#else
> +/* BPF_MAP_UPDATE_ELEM command flags */
> +#define  BPF_ANY 0 /* create a new element or update an existing */
> +
> +/* BPF architecture instruction struct */
> +struct bpf_insn {
> + __u8code;
> + __u8dst_reg:4;
> + __u8src_reg:4;
> + __s16   off;
> + __s32   imm; /* immediate value */
> +};
> +
> +/* BPF program types */
> +enum bpf_prog_type {
> + BPF_PROG_TYPE_UNSPEC,
> + BPF_PROG_TYPE_SOCKET_FILTER,
> + BPF_PROG_TYPE_KPROBE,
> + BPF_PROG_TYPE_SCHED_CLS,
> + BPF_PROG_TYPE_SCHED_ACT,
> +};
> +
> +/* BPF commands types */
> +enum bpf_cmd {
> + BPF_MAP_CREATE,
> + 

[dpdk-dev] [PATCH v3 02/19] crypto/ccp: support ccp device initialization and deintialization

2018-01-10 Thread Ravi Kumar
CCP PMD is a virtual crypto PMD which schedules all the
available actual hardware engines. The PMD creates a
linked list of all CCP engines which will be scheduled
in a round-robin fashion to the CPU core requesting crypto
operations.

Signed-off-by: Ravi Kumar 
---
 drivers/crypto/ccp/Makefile  |   3 +
 drivers/crypto/ccp/ccp_dev.c | 787 +++
 drivers/crypto/ccp/ccp_dev.h | 310 ++
 drivers/crypto/ccp/ccp_pci.c | 262 
 drivers/crypto/ccp/ccp_pci.h |  53 +++
 drivers/crypto/ccp/ccp_pmd_ops.c |  55 +++
 drivers/crypto/ccp/ccp_pmd_private.h |  82 
 drivers/crypto/ccp/rte_ccp_pmd.c | 151 ++-
 8 files changed, 1701 insertions(+), 2 deletions(-)
 create mode 100644 drivers/crypto/ccp/ccp_dev.c
 create mode 100644 drivers/crypto/ccp/ccp_dev.h
 create mode 100644 drivers/crypto/ccp/ccp_pci.c
 create mode 100644 drivers/crypto/ccp/ccp_pci.h
 create mode 100644 drivers/crypto/ccp/ccp_pmd_ops.c
 create mode 100644 drivers/crypto/ccp/ccp_pmd_private.h

diff --git a/drivers/crypto/ccp/Makefile b/drivers/crypto/ccp/Makefile
index 51c5e5b..5e58c31 100644
--- a/drivers/crypto/ccp/Makefile
+++ b/drivers/crypto/ccp/Makefile
@@ -51,5 +51,8 @@ EXPORT_MAP := rte_pmd_ccp_version.map
 
 # library source files
 SRCS-$(CONFIG_RTE_LIBRTE_PMD_CCP) += rte_ccp_pmd.c
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_CCP) += ccp_pmd_ops.c
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_CCP) += ccp_dev.c
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_CCP) += ccp_pci.c
 
 include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/drivers/crypto/ccp/ccp_dev.c b/drivers/crypto/ccp/ccp_dev.c
new file mode 100644
index 000..5af2b49
--- /dev/null
+++ b/drivers/crypto/ccp/ccp_dev.c
@@ -0,0 +1,787 @@
+/*-
+ *   Copyright(c) 2018 Advanced Micro Devices, Inc.
+ *   All rights reserved.
+ *
+ *   Redistribution and use in source and binary forms, with or without
+ *   modification, are permitted provided that the following conditions
+ *   are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of the copyright holder nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ *   HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+
+#include "ccp_dev.h"
+#include "ccp_pci.h"
+#include "ccp_pmd_private.h"
+
+struct ccp_list ccp_list = TAILQ_HEAD_INITIALIZER(ccp_list);
+static int ccp_dev_id;
+
+static const struct rte_memzone *
+ccp_queue_dma_zone_reserve(const char *queue_name,
+  uint32_t queue_size,
+  int socket_id)
+{
+   const struct rte_memzone *mz;
+   unsigned int memzone_flags = 0;
+   const struct rte_memseg *ms;
+
+   mz = rte_memzone_lookup(queue_name);
+   if (mz != 0)
+   return mz;
+
+   ms = rte_eal_get_physmem_layout();
+   switch (ms[0].hugepage_sz) {
+   case(RTE_PGSIZE_2M):
+   memzone_flags = RTE_MEMZONE_2MB;
+   break;
+   case(RTE_PGSIZE_1G):
+   memzone_flags = RTE_MEMZONE_1GB;
+   break;
+   case(RTE_PGSIZE_16M):
+   memzone_flags = RTE_MEMZONE_16MB;
+   break;
+   case(RTE_PGSIZE_16G):
+   memzone_flags = RTE_MEMZONE_16GB;
+   break;
+   default:
+   memzone_flags = RTE_MEMZONE_SIZE_HINT_ONLY;
+   }
+
+   return rte_memzone_reserve_aligned(queue_name,
+  queue_size,
+  socket_id,
+  memzone_flags,
+  queue_size);
+}
+
+/* 

[dpdk-dev] [PATCH v3 01/19] crypto/ccp: add AMD ccp skeleton PMD

2018-01-10 Thread Ravi Kumar
Signed-off-by: Ravi Kumar 
---
 MAINTAINERS|  6 +++
 config/common_base |  5 +++
 doc/guides/rel_notes/release_18_02.rst |  5 +++
 drivers/crypto/Makefile|  1 +
 drivers/crypto/ccp/Makefile| 55 ++
 drivers/crypto/ccp/rte_ccp_pmd.c   | 62 ++
 drivers/crypto/ccp/rte_pmd_ccp_version.map |  4 ++
 mk/rte.app.mk  |  2 +
 8 files changed, 140 insertions(+)
 create mode 100644 drivers/crypto/ccp/Makefile
 create mode 100644 drivers/crypto/ccp/rte_ccp_pmd.c
 create mode 100644 drivers/crypto/ccp/rte_pmd_ccp_version.map

diff --git a/MAINTAINERS b/MAINTAINERS
index b51c2d0..e609244 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -594,6 +594,12 @@ M: Pablo de Lara 
 T: git://dpdk.org/next/dpdk-next-crypto
 F: doc/guides/cryptodevs/features/default.ini
 
+AMD CCP Crypto PMD
+M: Ravi Kumar 
+F: drivers/crypto/ccp/
+F: doc/guides/cryptodevs/ccp.rst
+F: doc/guides/cryptodevs/features/ccp.ini
+
 ARMv8 Crypto
 M: Jerin Jacob 
 F: drivers/crypto/armv8/
diff --git a/config/common_base b/config/common_base
index e74febe..88826c8 100644
--- a/config/common_base
+++ b/config/common_base
@@ -557,6 +557,11 @@ CONFIG_RTE_LIBRTE_PMD_CRYPTO_SCHEDULER_DEBUG=n
 CONFIG_RTE_LIBRTE_PMD_NULL_CRYPTO=y
 
 #
+# Compile PMD for AMD CCP crypto device
+#
+CONFIG_RTE_LIBRTE_PMD_CCP=n
+
+#
 # Compile PMD for Marvell Crypto device
 #
 CONFIG_RTE_LIBRTE_PMD_MRVL_CRYPTO=n
diff --git a/doc/guides/rel_notes/release_18_02.rst 
b/doc/guides/rel_notes/release_18_02.rst
index 24b67bb..42ebeeb 100644
--- a/doc/guides/rel_notes/release_18_02.rst
+++ b/doc/guides/rel_notes/release_18_02.rst
@@ -41,6 +41,11 @@ New Features
  Also, make sure to start the actual text at the margin.
  =
 
+* **Added a new crypto poll mode driver for AMD CCP devices.**
+
+  Added the new ``ccp`` crypto driver for AMD CCP devices. See the
+  :doc:`../cryptodevs/ccp` crypto driver guide for more details on
+  this new driver.
 
 API Changes
 ---
diff --git a/drivers/crypto/Makefile b/drivers/crypto/Makefile
index 628bd14..fe41edd 100644
--- a/drivers/crypto/Makefile
+++ b/drivers/crypto/Makefile
@@ -16,5 +16,6 @@ DIRS-$(CONFIG_RTE_LIBRTE_PMD_MRVL_CRYPTO) += mrvl
 DIRS-$(CONFIG_RTE_LIBRTE_PMD_NULL_CRYPTO) += null
 DIRS-$(CONFIG_RTE_LIBRTE_PMD_DPAA2_SEC) += dpaa2_sec
 DIRS-$(CONFIG_RTE_LIBRTE_PMD_DPAA_SEC) += dpaa_sec
+DIRS-$(CONFIG_RTE_LIBRTE_PMD_CCP) += ccp
 
 include $(RTE_SDK)/mk/rte.subdir.mk
diff --git a/drivers/crypto/ccp/Makefile b/drivers/crypto/ccp/Makefile
new file mode 100644
index 000..51c5e5b
--- /dev/null
+++ b/drivers/crypto/ccp/Makefile
@@ -0,0 +1,55 @@
+#
+#   Copyright(c) 2018 Advanced Micro Devices, Inc.
+#   All rights reserved.
+#
+#   Redistribution and use in source and binary forms, with or without
+#   modification, are permitted provided that the following conditions
+#   are met:
+#
+#  * Redistributions of source code must retain the above copyright
+#  notice, this list of conditions and the following disclaimer.
+#  * Redistributions in binary form must reproduce the above copyright
+#  notice, this list of conditions and the following disclaimer in the
+#  documentation and/or other materials provided with the distribution.
+#  * Neither the name of the copyright holder nor the names of its
+#  contributors may be used to endorse or promote products derived from
+#  this software without specific prior written permission.
+#
+#   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+#   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+#   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+#   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+#   HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+#   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+#   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+#   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+#   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+#   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+#   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+# library name
+LIB = librte_pmd_ccp.a
+
+# build flags
+CFLAGS += -O3
+CFLAGS += -I$(SRCDIR)
+CFLAGS += $(WERROR_FLAGS)
+
+# library version
+LIBABIVER := 1
+
+# external library include paths
+LDLIBS += -lcrypto
+LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool -lrte_ring
+LDLIBS += -lrte_cryptodev
+LDLIBS += -lrte_pci -lrte_bus_pci
+
+# versioning export map
+EXPORT_MAP := rte_pmd_ccp_version.map
+
+# library source files
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_CCP) += rte_ccp_pmd.c
+
+include $(RTE_SDK)/mk/rte.lib.mk
diff 

[dpdk-dev] [PATCH v3 04/19] crypto/ccp: support session related crypto pmd ops

2018-01-10 Thread Ravi Kumar
Signed-off-by: Ravi Kumar 
---
 drivers/crypto/ccp/Makefile  |   3 +-
 drivers/crypto/ccp/ccp_crypto.c  | 229 +
 drivers/crypto/ccp/ccp_crypto.h  | 267 +++
 drivers/crypto/ccp/ccp_dev.h | 129 +++
 drivers/crypto/ccp/ccp_pmd_ops.c |  61 -
 5 files changed, 685 insertions(+), 4 deletions(-)
 create mode 100644 drivers/crypto/ccp/ccp_crypto.c
 create mode 100644 drivers/crypto/ccp/ccp_crypto.h

diff --git a/drivers/crypto/ccp/Makefile b/drivers/crypto/ccp/Makefile
index 5e58c31..5241465 100644
--- a/drivers/crypto/ccp/Makefile
+++ b/drivers/crypto/ccp/Makefile
@@ -51,8 +51,9 @@ EXPORT_MAP := rte_pmd_ccp_version.map
 
 # library source files
 SRCS-$(CONFIG_RTE_LIBRTE_PMD_CCP) += rte_ccp_pmd.c
-SRCS-$(CONFIG_RTE_LIBRTE_PMD_CCP) += ccp_pmd_ops.c
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_CCP) += ccp_crypto.c
 SRCS-$(CONFIG_RTE_LIBRTE_PMD_CCP) += ccp_dev.c
 SRCS-$(CONFIG_RTE_LIBRTE_PMD_CCP) += ccp_pci.c
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_CCP) += ccp_pmd_ops.c
 
 include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/drivers/crypto/ccp/ccp_crypto.c b/drivers/crypto/ccp/ccp_crypto.c
new file mode 100644
index 000..c365c0f
--- /dev/null
+++ b/drivers/crypto/ccp/ccp_crypto.c
@@ -0,0 +1,229 @@
+/*-
+ *   Copyright(c) 2018 Advanced Micro Devices, Inc.
+ *   All rights reserved.
+ *
+ *   Redistribution and use in source and binary forms, with or without
+ *   modification, are permitted provided that the following conditions
+ *   are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of the copyright holder nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ *   HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+
+#include "ccp_dev.h"
+#include "ccp_crypto.h"
+#include "ccp_pci.h"
+#include "ccp_pmd_private.h"
+
+static enum ccp_cmd_order
+ccp_get_cmd_id(const struct rte_crypto_sym_xform *xform)
+{
+   enum ccp_cmd_order res = CCP_CMD_NOT_SUPPORTED;
+
+   if (xform == NULL)
+   return res;
+   if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
+   if (xform->next == NULL)
+   return CCP_CMD_AUTH;
+   else if (xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER)
+   return CCP_CMD_HASH_CIPHER;
+   }
+   if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
+   if (xform->next == NULL)
+   return CCP_CMD_CIPHER;
+   else if (xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH)
+   return CCP_CMD_CIPHER_HASH;
+   }
+   if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD)
+   return CCP_CMD_COMBINED;
+   return res;
+}
+
+/* configure session */
+static int
+ccp_configure_session_cipher(struct ccp_session *sess,
+const struct rte_crypto_sym_xform *xform)
+{
+   const struct rte_crypto_cipher_xform *cipher_xform = NULL;
+
+   cipher_xform = &xform->cipher;
+
+   /* set cipher direction */
+   if (cipher_xform->op ==  RTE_CRYPTO_CIPHER_OP_ENCRYPT)
+   sess->cipher.dir = CCP_CIPHER_DIR_ENCRYPT;
+   else
+   sess->cipher.dir = CCP_CIPHER_DIR_DECRYPT;
+
+   /* set cipher key */
+   sess->cipher.key_length = cipher_xform->key.length;
+   rte_memcpy(sess->cipher.key, cipher_xform->key.data,
+  cipher_xform->key.length);
+
+   /* set iv parameters */
+   sess->iv.offset = cipher_xform->iv.offset;
+   sess->iv.length = cipher_xform->iv.length;
+
+   switch (cipher_xform->algo) {
+   default:

[dpdk-dev] [PATCH v3 03/19] crypto/ccp: support basic pmd ops

2018-01-10 Thread Ravi Kumar
Signed-off-by: Ravi Kumar 
---
 drivers/crypto/ccp/ccp_dev.c |  9 ++
 drivers/crypto/ccp/ccp_dev.h |  9 ++
 drivers/crypto/ccp/ccp_pmd_ops.c | 61 +---
 drivers/crypto/ccp/ccp_pmd_private.h | 43 +
 4 files changed, 117 insertions(+), 5 deletions(-)

diff --git a/drivers/crypto/ccp/ccp_dev.c b/drivers/crypto/ccp/ccp_dev.c
index 5af2b49..57bccf4 100644
--- a/drivers/crypto/ccp/ccp_dev.c
+++ b/drivers/crypto/ccp/ccp_dev.c
@@ -52,6 +52,15 @@
 struct ccp_list ccp_list = TAILQ_HEAD_INITIALIZER(ccp_list);
 static int ccp_dev_id;
 
+int
+ccp_dev_start(struct rte_cryptodev *dev)
+{
+   struct ccp_private *priv = dev->data->dev_private;
+
+   priv->last_dev = TAILQ_FIRST(&ccp_list);
+   return 0;
+}
+
 static const struct rte_memzone *
 ccp_queue_dma_zone_reserve(const char *queue_name,
   uint32_t queue_size,
diff --git a/drivers/crypto/ccp/ccp_dev.h b/drivers/crypto/ccp/ccp_dev.h
index fe05bf0..b321530 100644
--- a/drivers/crypto/ccp/ccp_dev.h
+++ b/drivers/crypto/ccp/ccp_dev.h
@@ -104,6 +104,10 @@
 #define LSB_ITEM_SIZE   32
 #define SLSB_MAP_SIZE   (MAX_LSB_CNT * LSB_SIZE)
 
+/* General CCP Defines */
+
+#define CCP_SB_BYTES32
+
 /* bitmap */
 enum {
BITS_PER_WORD = sizeof(unsigned long) * CHAR_BIT
@@ -299,6 +303,11 @@ high32_value(unsigned long addr)
return ((uint64_t)addr >> 32) & 0x0;
 }
 
+/*
+ * Start CCP device
+ */
+int ccp_dev_start(struct rte_cryptodev *dev);
+
 /**
  * Detect ccp platform and initialize all ccp devices
  *
diff --git a/drivers/crypto/ccp/ccp_pmd_ops.c b/drivers/crypto/ccp/ccp_pmd_ops.c
index bc4120b..b6f8c48 100644
--- a/drivers/crypto/ccp/ccp_pmd_ops.c
+++ b/drivers/crypto/ccp/ccp_pmd_ops.c
@@ -28,18 +28,69 @@
  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  */
 
+#include 
+
+#include 
 #include 
+#include 
+
+#include "ccp_pmd_private.h"
+#include "ccp_dev.h"
+
+static const struct rte_cryptodev_capabilities ccp_pmd_capabilities[] = {
+   RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST()
+};
+
+static int
+ccp_pmd_config(struct rte_cryptodev *dev __rte_unused,
+  struct rte_cryptodev_config *config __rte_unused)
+{
+   return 0;
+}
+
+static int
+ccp_pmd_start(struct rte_cryptodev *dev)
+{
+   return ccp_dev_start(dev);
+}
+
+static void
+ccp_pmd_stop(struct rte_cryptodev *dev __rte_unused)
+{
+
+}
+
+static int
+ccp_pmd_close(struct rte_cryptodev *dev __rte_unused)
+{
+   return 0;
+}
+
+static void
+ccp_pmd_info_get(struct rte_cryptodev *dev,
+struct rte_cryptodev_info *dev_info)
+{
+   struct ccp_private *internals = dev->data->dev_private;
+
+   if (dev_info != NULL) {
+   dev_info->driver_id = dev->driver_id;
+   dev_info->feature_flags = dev->feature_flags;
+   dev_info->capabilities = ccp_pmd_capabilities;
+   dev_info->max_nb_queue_pairs = internals->max_nb_qpairs;
+   dev_info->sym.max_nb_sessions = internals->max_nb_sessions;
+   }
+}
 
 struct rte_cryptodev_ops ccp_ops = {
-   .dev_configure  = NULL,
-   .dev_start  = NULL,
-   .dev_stop   = NULL,
-   .dev_close  = NULL,
+   .dev_configure  = ccp_pmd_config,
+   .dev_start  = ccp_pmd_start,
+   .dev_stop   = ccp_pmd_stop,
+   .dev_close  = ccp_pmd_close,
 
.stats_get  = NULL,
.stats_reset= NULL,
 
-   .dev_infos_get  = NULL,
+   .dev_infos_get  = ccp_pmd_info_get,
 
.queue_pair_setup   = NULL,
.queue_pair_release = NULL,
diff --git a/drivers/crypto/ccp/ccp_pmd_private.h 
b/drivers/crypto/ccp/ccp_pmd_private.h
index f5b6061..d2283e8 100644
--- a/drivers/crypto/ccp/ccp_pmd_private.h
+++ b/drivers/crypto/ccp/ccp_pmd_private.h
@@ -60,13 +60,56 @@
 #define CCP_NB_MAX_DESCRIPTORS 1024
 #define CCP_MAX_BURST 64
 
+#include "ccp_dev.h"
+
 /* private data structure for each CCP crypto device */
 struct ccp_private {
unsigned int max_nb_qpairs; /**< Max number of queue pairs */
unsigned int max_nb_sessions;   /**< Max number of sessions */
uint8_t crypto_num_dev; /**< Number of working crypto devices */
+   struct ccp_device *last_dev;/**< Last working crypto device */
 };
 
+/* CCP batch info */
+struct ccp_batch_info {
+   struct rte_crypto_op *op[CCP_MAX_BURST];
+   /**< optable populated at enque time from app*/
+   int op_idx;
+   struct ccp_queue *cmd_q;
+   uint16_t opcnt;
+   /**< no. of crypto ops in batch*/
+   int desccnt;
+   /**< no. of ccp queue descriptors*/
+   uint32_t head_offset;
+   /**< ccp 

[dpdk-dev] [PATCH v3 05/19] crypto/ccp: support queue pair related pmd ops

2018-01-10 Thread Ravi Kumar
Signed-off-by: Ravi Kumar 
---
 drivers/crypto/ccp/ccp_pmd_ops.c | 149 +--
 1 file changed, 144 insertions(+), 5 deletions(-)

diff --git a/drivers/crypto/ccp/ccp_pmd_ops.c b/drivers/crypto/ccp/ccp_pmd_ops.c
index ad0a670..a02aa6f 100644
--- a/drivers/crypto/ccp/ccp_pmd_ops.c
+++ b/drivers/crypto/ccp/ccp_pmd_ops.c
@@ -82,6 +82,145 @@ ccp_pmd_info_get(struct rte_cryptodev *dev,
}
 }
 
+static int
+ccp_pmd_qp_release(struct rte_cryptodev *dev, uint16_t qp_id)
+{
+   struct ccp_qp *qp;
+
+   if (dev->data->queue_pairs[qp_id] != NULL) {
+   qp = (struct ccp_qp *)dev->data->queue_pairs[qp_id];
+   rte_ring_free(qp->processed_pkts);
+   rte_mempool_free(qp->batch_mp);
+   rte_free(qp);
+   dev->data->queue_pairs[qp_id] = NULL;
+   }
+   return 0;
+}
+
+static int
+ccp_pmd_qp_set_unique_name(struct rte_cryptodev *dev,
+   struct ccp_qp *qp)
+{
+   unsigned int n = snprintf(qp->name, sizeof(qp->name),
+   "ccp_pmd_%u_qp_%u",
+   dev->data->dev_id, qp->id);
+
+   if (n > sizeof(qp->name))
+   return -1;
+
+   return 0;
+}
+
+static struct rte_ring *
+ccp_pmd_qp_create_batch_info_ring(struct ccp_qp *qp,
+ unsigned int ring_size, int socket_id)
+{
+   struct rte_ring *r;
+
+   r = rte_ring_lookup(qp->name);
+   if (r) {
+   if (r->size >= ring_size) {
+   CCP_LOG_INFO(
+   "Reusing ring %s for processed packets",
+   qp->name);
+   return r;
+   }
+   CCP_LOG_INFO(
+   "Unable to reuse ring %s for processed packets",
+qp->name);
+   return NULL;
+   }
+
+   return rte_ring_create(qp->name, ring_size, socket_id,
+   RING_F_SP_ENQ | RING_F_SC_DEQ);
+}
+
+static int
+ccp_pmd_qp_setup(struct rte_cryptodev *dev, uint16_t qp_id,
+const struct rte_cryptodev_qp_conf *qp_conf,
+int socket_id, struct rte_mempool *session_pool)
+{
+   struct ccp_private *internals = dev->data->dev_private;
+   struct ccp_qp *qp;
+   int retval = 0;
+
+   if (qp_id >= internals->max_nb_qpairs) {
+   CCP_LOG_ERR("Invalid qp_id %u, should be less than %u",
+   qp_id, internals->max_nb_qpairs);
+   return (-EINVAL);
+   }
+
+   /* Free memory prior to re-allocation if needed. */
+   if (dev->data->queue_pairs[qp_id] != NULL)
+   ccp_pmd_qp_release(dev, qp_id);
+
+   /* Allocate the queue pair data structure. */
+   qp = rte_zmalloc_socket("CCP Crypto PMD Queue Pair", sizeof(*qp),
+   RTE_CACHE_LINE_SIZE, socket_id);
+   if (qp == NULL) {
+   CCP_LOG_ERR("Failed to allocate queue pair memory");
+   return (-ENOMEM);
+   }
+
+   qp->dev = dev;
+   qp->id = qp_id;
+   dev->data->queue_pairs[qp_id] = qp;
+
+   retval = ccp_pmd_qp_set_unique_name(dev, qp);
+   if (retval) {
+   CCP_LOG_ERR("Failed to create unique name for ccp qp");
+   goto qp_setup_cleanup;
+   }
+
+   qp->processed_pkts = ccp_pmd_qp_create_batch_info_ring(qp,
+   qp_conf->nb_descriptors, socket_id);
+   if (qp->processed_pkts == NULL) {
+   CCP_LOG_ERR("Failed to create batch info ring");
+   goto qp_setup_cleanup;
+   }
+
+   qp->sess_mp = session_pool;
+
+   /* mempool for batch info */
+   qp->batch_mp = rte_mempool_create(
+   qp->name,
+   qp_conf->nb_descriptors,
+   sizeof(struct ccp_batch_info),
+   RTE_CACHE_LINE_SIZE,
+   0, NULL, NULL, NULL, NULL,
+   SOCKET_ID_ANY, 0);
+   if (qp->batch_mp == NULL)
+   goto qp_setup_cleanup;
+   memset(&qp->qp_stats, 0, sizeof(qp->qp_stats));
+   return 0;
+
+qp_setup_cleanup:
+   dev->data->queue_pairs[qp_id] = NULL;
+   if (qp)
+   rte_free(qp);
+   return -1;
+}
+
+static int
+ccp_pmd_qp_start(struct rte_cryptodev *dev __rte_unused,
+uint16_t queue_pair_id __rte_unused)
+{
+   return -ENOTSUP;
+}
+
+static int
+ccp_pmd_qp_stop(struct rte_cryptodev *dev __rte_unused,
+   uint16_t queue_pair_id __rte_unused)
+{
+   return -ENOTSUP;
+}
+
+static uint32_t
+ccp_pmd_qp_count(struct rte_cryptodev *dev)
+{
+   return dev->data->nb_queue_pairs;
+}
+
 static unsigned
 ccp_pmd_session_get_size(struct rte_cryptodev *dev __rte_unused)
 {
@@ -147,11 +286,11 @@ struct rte_cryptodev_ops ccp_ops = {
 
.dev_infos_get  = ccp_p

[dpdk-dev] [PATCH v3 06/19] crypto/ccp: support crypto enqueue and dequeue burst api

2018-01-10 Thread Ravi Kumar
Signed-off-by: Ravi Kumar 
---
 drivers/crypto/ccp/ccp_crypto.c  | 360 +++
 drivers/crypto/ccp/ccp_crypto.h  |  35 
 drivers/crypto/ccp/ccp_dev.c |  27 +++
 drivers/crypto/ccp/ccp_dev.h |   9 +
 drivers/crypto/ccp/rte_ccp_pmd.c |  64 ++-
 5 files changed, 488 insertions(+), 7 deletions(-)

diff --git a/drivers/crypto/ccp/ccp_crypto.c b/drivers/crypto/ccp/ccp_crypto.c
index c365c0f..c17e84f 100644
--- a/drivers/crypto/ccp/ccp_crypto.c
+++ b/drivers/crypto/ccp/ccp_crypto.c
@@ -227,3 +227,363 @@ ccp_set_session_parameters(struct ccp_session *sess,
}
return ret;
 }
+
+/* calculate CCP descriptors requirement */
+static inline int
+ccp_cipher_slot(struct ccp_session *session)
+{
+   int count = 0;
+
+   switch (session->cipher.algo) {
+   default:
+   CCP_LOG_ERR("Unsupported cipher algo %d",
+   session->cipher.algo);
+   }
+   return count;
+}
+
+static inline int
+ccp_auth_slot(struct ccp_session *session)
+{
+   int count = 0;
+
+   switch (session->auth.algo) {
+   default:
+   CCP_LOG_ERR("Unsupported auth algo %d",
+   session->auth.algo);
+   }
+
+   return count;
+}
+
+static int
+ccp_aead_slot(struct ccp_session *session)
+{
+   int count = 0;
+
+   switch (session->aead_algo) {
+   default:
+   CCP_LOG_ERR("Unsupported aead algo %d",
+   session->aead_algo);
+   }
+   return count;
+}
+
+int
+ccp_compute_slot_count(struct ccp_session *session)
+{
+   int count = 0;
+
+   switch (session->cmd_id) {
+   case CCP_CMD_CIPHER:
+   count = ccp_cipher_slot(session);
+   break;
+   case CCP_CMD_AUTH:
+   count = ccp_auth_slot(session);
+   break;
+   case CCP_CMD_CIPHER_HASH:
+   case CCP_CMD_HASH_CIPHER:
+   count = ccp_cipher_slot(session);
+   count += ccp_auth_slot(session);
+   break;
+   case CCP_CMD_COMBINED:
+   count = ccp_aead_slot(session);
+   break;
+   default:
+   CCP_LOG_ERR("Unsupported cmd_id");
+
+   }
+
+   return count;
+}
+
+static inline int
+ccp_crypto_cipher(struct rte_crypto_op *op,
+ struct ccp_queue *cmd_q __rte_unused,
+ struct ccp_batch_info *b_info __rte_unused)
+{
+   int result = 0;
+   struct ccp_session *session;
+
+   session = (struct ccp_session *)get_session_private_data(
+op->sym->session,
+ccp_cryptodev_driver_id);
+
+   switch (session->cipher.algo) {
+   default:
+   CCP_LOG_ERR("Unsupported cipher algo %d",
+   session->cipher.algo);
+   return -ENOTSUP;
+   }
+   return result;
+}
+
+static inline int
+ccp_crypto_auth(struct rte_crypto_op *op,
+   struct ccp_queue *cmd_q __rte_unused,
+   struct ccp_batch_info *b_info __rte_unused)
+{
+
+   int result = 0;
+   struct ccp_session *session;
+
+   session = (struct ccp_session *)get_session_private_data(
+op->sym->session,
+   ccp_cryptodev_driver_id);
+
+   switch (session->auth.algo) {
+   default:
+   CCP_LOG_ERR("Unsupported auth algo %d",
+   session->auth.algo);
+   return -ENOTSUP;
+   }
+
+   return result;
+}
+
+static inline int
+ccp_crypto_aead(struct rte_crypto_op *op,
+   struct ccp_queue *cmd_q __rte_unused,
+   struct ccp_batch_info *b_info __rte_unused)
+{
+   int result = 0;
+   struct ccp_session *session;
+
+   session = (struct ccp_session *)get_session_private_data(
+op->sym->session,
+   ccp_cryptodev_driver_id);
+
+   switch (session->aead_algo) {
+   default:
+   CCP_LOG_ERR("Unsupported aead algo %d",
+   session->aead_algo);
+   return -ENOTSUP;
+   }
+   return result;
+}
+
+int
+process_ops_to_enqueue(const struct ccp_qp *qp,
+  struct rte_crypto_op **op,
+  struct ccp_queue *cmd_q,
+  uint16_t nb_ops,
+  int slots_req)
+{
+   int i, result = 0;
+   struct ccp_batch_info *b_info;
+   struct ccp_session *session;
+
+   if (rte_mempool_get(qp->batch_mp, (void **)&b_info)) {
+   CCP_LOG_ERR("batch info allocation failed");
+   return 0;
+   }
+   /* populate batch info necessary for dequeue */
+   b_info->op_idx = 0;
+   b_info->lsb_buf_idx = 0;
+   b_info->desccnt = 0;
+   b_info->cmd_q = cmd_q;
+   b_info->lsb_buf_phys =
+ 

[dpdk-dev] [PATCH v3 07/19] crypto/ccp: support for RTE_CRYPTO_OP_SESSIONLESS

2018-01-10 Thread Ravi Kumar
Signed-off-by: Ravi Kumar 
---
 drivers/crypto/ccp/rte_ccp_pmd.c | 33 +++--
 1 file changed, 31 insertions(+), 2 deletions(-)

diff --git a/drivers/crypto/ccp/rte_ccp_pmd.c b/drivers/crypto/ccp/rte_ccp_pmd.c
index ed6ca5d..23d3af3 100644
--- a/drivers/crypto/ccp/rte_ccp_pmd.c
+++ b/drivers/crypto/ccp/rte_ccp_pmd.c
@@ -49,7 +49,7 @@ static unsigned int ccp_pmd_init_done;
 uint8_t ccp_cryptodev_driver_id;
 
 static struct ccp_session *
-get_ccp_session(struct ccp_qp *qp __rte_unused, struct rte_crypto_op *op)
+get_ccp_session(struct ccp_qp *qp, struct rte_crypto_op *op)
 {
struct ccp_session *sess = NULL;
 
@@ -61,6 +61,27 @@ get_ccp_session(struct ccp_qp *qp __rte_unused, struct 
rte_crypto_op *op)
get_session_private_data(
op->sym->session,
ccp_cryptodev_driver_id);
+   } else if (op->sess_type == RTE_CRYPTO_OP_SESSIONLESS) {
+   void *_sess;
+   void *_sess_private_data = NULL;
+
+   if (rte_mempool_get(qp->sess_mp, &_sess))
+   return NULL;
+   if (rte_mempool_get(qp->sess_mp, (void **)&_sess_private_data))
+   return NULL;
+
+   sess = (struct ccp_session *)_sess_private_data;
+
+   if (unlikely(ccp_set_session_parameters(sess,
+   op->sym->xform) != 0)) {
+   rte_mempool_put(qp->sess_mp, _sess);
+   rte_mempool_put(qp->sess_mp, _sess_private_data);
+   sess = NULL;
+   }
+   op->sym->session = (struct rte_cryptodev_sym_session *)_sess;
+   set_session_private_data(op->sym->session,
+ccp_cryptodev_driver_id,
+_sess_private_data);
}
 
return sess;
@@ -108,10 +129,18 @@ ccp_pmd_dequeue_burst(void *queue_pair, struct 
rte_crypto_op **ops,
uint16_t nb_ops)
 {
struct ccp_qp *qp = queue_pair;
-   uint16_t nb_dequeued = 0;
+   uint16_t nb_dequeued = 0, i;
 
nb_dequeued = process_ops_to_dequeue(qp, ops, nb_ops);
 
+   /* Free session if a session-less crypto op */
+   for (i = 0; i < nb_dequeued; i++)
+   if (unlikely(ops[i]->sess_type ==
+RTE_CRYPTO_OP_SESSIONLESS)) {
+   rte_mempool_put(qp->sess_mp,
+   ops[i]->sym->session);
+   ops[i]->sym->session = NULL;
+   }
qp->qp_stats.dequeued_count += nb_dequeued;
 
return nb_dequeued;
-- 
2.7.4



[dpdk-dev] [PATCH v3 08/19] crypto/ccp: support stats related crypto pmd ops

2018-01-10 Thread Ravi Kumar
Signed-off-by: Ravi Kumar 
---
 drivers/crypto/ccp/ccp_pmd_ops.c | 34 --
 1 file changed, 32 insertions(+), 2 deletions(-)

diff --git a/drivers/crypto/ccp/ccp_pmd_ops.c b/drivers/crypto/ccp/ccp_pmd_ops.c
index a02aa6f..d483a74 100644
--- a/drivers/crypto/ccp/ccp_pmd_ops.c
+++ b/drivers/crypto/ccp/ccp_pmd_ops.c
@@ -68,6 +68,36 @@ ccp_pmd_close(struct rte_cryptodev *dev __rte_unused)
 }
 
 static void
+ccp_pmd_stats_get(struct rte_cryptodev *dev,
+ struct rte_cryptodev_stats *stats)
+{
+   int qp_id;
+
+   for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
+   struct ccp_qp *qp = dev->data->queue_pairs[qp_id];
+
+   stats->enqueued_count += qp->qp_stats.enqueued_count;
+   stats->dequeued_count += qp->qp_stats.dequeued_count;
+
+   stats->enqueue_err_count += qp->qp_stats.enqueue_err_count;
+   stats->dequeue_err_count += qp->qp_stats.dequeue_err_count;
+   }
+
+}
+
+static void
+ccp_pmd_stats_reset(struct rte_cryptodev *dev)
+{
+   int qp_id;
+
+   for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
+   struct ccp_qp *qp = dev->data->queue_pairs[qp_id];
+
+   memset(&qp->qp_stats, 0, sizeof(qp->qp_stats));
+   }
+}
+
+static void
 ccp_pmd_info_get(struct rte_cryptodev *dev,
 struct rte_cryptodev_info *dev_info)
 {
@@ -281,8 +311,8 @@ struct rte_cryptodev_ops ccp_ops = {
.dev_stop   = ccp_pmd_stop,
.dev_close  = ccp_pmd_close,
 
-   .stats_get  = NULL,
-   .stats_reset= NULL,
+   .stats_get  = ccp_pmd_stats_get,
+   .stats_reset= ccp_pmd_stats_reset,
 
.dev_infos_get  = ccp_pmd_info_get,
 
-- 
2.7.4



[dpdk-dev] [PATCH v3 11/19] crypto/ccp: support 3des cipher algo

2018-01-10 Thread Ravi Kumar
Signed-off-by: Ravi Kumar 
---
 drivers/crypto/ccp/ccp_crypto.c  | 132 ++-
 drivers/crypto/ccp/ccp_crypto.h  |   3 +
 drivers/crypto/ccp/ccp_pmd_ops.c |  20 ++
 3 files changed, 154 insertions(+), 1 deletion(-)

diff --git a/drivers/crypto/ccp/ccp_crypto.c b/drivers/crypto/ccp/ccp_crypto.c
index b097355..0660761 100644
--- a/drivers/crypto/ccp/ccp_crypto.c
+++ b/drivers/crypto/ccp/ccp_crypto.c
@@ -80,7 +80,7 @@ ccp_configure_session_cipher(struct ccp_session *sess,
 const struct rte_crypto_sym_xform *xform)
 {
const struct rte_crypto_cipher_xform *cipher_xform = NULL;
-   size_t i;
+   size_t i, j, x;
 
cipher_xform = &xform->cipher;
 
@@ -115,6 +115,11 @@ ccp_configure_session_cipher(struct ccp_session *sess,
sess->cipher.um.aes_mode = CCP_AES_MODE_CBC;
sess->cipher.engine = CCP_ENGINE_AES;
break;
+   case RTE_CRYPTO_CIPHER_3DES_CBC:
+   sess->cipher.algo = CCP_CIPHER_ALGO_3DES_CBC;
+   sess->cipher.um.des_mode = CCP_DES_MODE_CBC;
+   sess->cipher.engine = CCP_ENGINE_3DES;
+   break;
default:
CCP_LOG_ERR("Unsupported cipher algo");
return -1;
@@ -137,6 +142,20 @@ ccp_configure_session_cipher(struct ccp_session *sess,
sess->cipher.key_ccp[sess->cipher.key_length - i - 1] =
sess->cipher.key[i];
break;
+   case CCP_ENGINE_3DES:
+   if (sess->cipher.key_length == 16)
+   sess->cipher.ut.des_type = CCP_DES_TYPE_128;
+   else if (sess->cipher.key_length == 24)
+   sess->cipher.ut.des_type = CCP_DES_TYPE_192;
+   else {
+   CCP_LOG_ERR("Invalid cipher key length");
+   return -1;
+   }
+   for (j = 0, x = 0; j < sess->cipher.key_length/8; j++, x += 8)
+   for (i = 0; i < 8; i++)
+   sess->cipher.key_ccp[(8 + x) - i - 1] =
+   sess->cipher.key[i + x];
+   break;
default:
CCP_LOG_ERR("Invalid CCP Engine");
return -ENOTSUP;
@@ -280,6 +299,10 @@ ccp_cipher_slot(struct ccp_session *session)
count = 2;
/**< op + passthrough for iv */
break;
+   case CCP_CIPHER_ALGO_3DES_CBC:
+   count = 2;
+   /**< op + passthrough for iv */
+   break;
default:
CCP_LOG_ERR("Unsupported cipher algo %d",
session->cipher.algo);
@@ -478,6 +501,109 @@ ccp_perform_aes(struct rte_crypto_op *op,
return 0;
 }
 
+static int
+ccp_perform_3des(struct rte_crypto_op *op,
+   struct ccp_queue *cmd_q,
+   struct ccp_batch_info *b_info)
+{
+   struct ccp_session *session;
+   union ccp_function function;
+   unsigned char *lsb_buf;
+   struct ccp_passthru pst;
+   struct ccp_desc *desc;
+   uint32_t tail;
+   uint8_t *iv;
+   phys_addr_t src_addr, dest_addr, key_addr;
+
+   session = (struct ccp_session *)get_session_private_data(
+op->sym->session,
+   ccp_cryptodev_driver_id);
+
+   iv = rte_crypto_op_ctod_offset(op, uint8_t *, session->iv.offset);
+   switch (session->cipher.um.des_mode) {
+   case CCP_DES_MODE_CBC:
+   lsb_buf = &(b_info->lsb_buf[b_info->lsb_buf_idx*CCP_SB_BYTES]);
+   b_info->lsb_buf_idx++;
+
+   rte_memcpy(lsb_buf + (CCP_SB_BYTES - session->iv.length),
+  iv, session->iv.length);
+
+   pst.src_addr = (phys_addr_t)rte_mem_virt2phy((void *) lsb_buf);
+   pst.dest_addr = (phys_addr_t)(cmd_q->sb_iv * CCP_SB_BYTES);
+   pst.len = CCP_SB_BYTES;
+   pst.dir = 1;
+   pst.bit_mod = CCP_PASSTHRU_BITWISE_NOOP;
+   pst.byte_swap = CCP_PASSTHRU_BYTESWAP_256BIT;
+   ccp_perform_passthru(&pst, cmd_q);
+   break;
+   case CCP_DES_MODE_CFB:
+   case CCP_DES_MODE_ECB:
+   CCP_LOG_ERR("Unsupported DES cipher mode");
+   return -ENOTSUP;
+   }
+
+   src_addr = rte_pktmbuf_mtophys_offset(op->sym->m_src,
+ op->sym->cipher.data.offset);
+   if (unlikely(op->sym->m_dst != NULL))
+   dest_addr =
+   rte_pktmbuf_mtophys_offset(op->sym->m_dst,
+  op->sym->cipher.data.offset);
+   else
+   dest_addr = src_addr;
+
+   key_addr = rte_mem_virt2phy(session->cipher.key_ccp);
+
+   desc = &cmd_q->qbase_desc[cmd_q->qidx];
+
+   memset(desc, 0, Q_DESC_SIZE

[dpdk-dev] [PATCH v3 10/19] crypto/ccp: support aes cipher algo

2018-01-10 Thread Ravi Kumar
Signed-off-by: Ravi Kumar 
---
 drivers/crypto/ccp/ccp_crypto.c  | 197 ++-
 drivers/crypto/ccp/ccp_crypto.h  |  13 +++
 drivers/crypto/ccp/ccp_dev.h |  53 +++
 drivers/crypto/ccp/ccp_pmd_ops.c |  60 
 4 files changed, 321 insertions(+), 2 deletions(-)

diff --git a/drivers/crypto/ccp/ccp_crypto.c b/drivers/crypto/ccp/ccp_crypto.c
index c17e84f..b097355 100644
--- a/drivers/crypto/ccp/ccp_crypto.c
+++ b/drivers/crypto/ccp/ccp_crypto.c
@@ -80,6 +80,7 @@ ccp_configure_session_cipher(struct ccp_session *sess,
 const struct rte_crypto_sym_xform *xform)
 {
const struct rte_crypto_cipher_xform *cipher_xform = NULL;
+   size_t i;
 
cipher_xform = &xform->cipher;
 
@@ -99,6 +100,21 @@ ccp_configure_session_cipher(struct ccp_session *sess,
sess->iv.length = cipher_xform->iv.length;
 
switch (cipher_xform->algo) {
+   case RTE_CRYPTO_CIPHER_AES_CTR:
+   sess->cipher.algo = CCP_CIPHER_ALGO_AES_CTR;
+   sess->cipher.um.aes_mode = CCP_AES_MODE_CTR;
+   sess->cipher.engine = CCP_ENGINE_AES;
+   break;
+   case RTE_CRYPTO_CIPHER_AES_ECB:
+   sess->cipher.algo = CCP_CIPHER_ALGO_AES_CBC;
+   sess->cipher.um.aes_mode = CCP_AES_MODE_ECB;
+   sess->cipher.engine = CCP_ENGINE_AES;
+   break;
+   case RTE_CRYPTO_CIPHER_AES_CBC:
+   sess->cipher.algo = CCP_CIPHER_ALGO_AES_CBC;
+   sess->cipher.um.aes_mode = CCP_AES_MODE_CBC;
+   sess->cipher.engine = CCP_ENGINE_AES;
+   break;
default:
CCP_LOG_ERR("Unsupported cipher algo");
return -1;
@@ -106,10 +122,27 @@ ccp_configure_session_cipher(struct ccp_session *sess,
 
 
switch (sess->cipher.engine) {
+   case CCP_ENGINE_AES:
+   if (sess->cipher.key_length == 16)
+   sess->cipher.ut.aes_type = CCP_AES_TYPE_128;
+   else if (sess->cipher.key_length == 24)
+   sess->cipher.ut.aes_type = CCP_AES_TYPE_192;
+   else if (sess->cipher.key_length == 32)
+   sess->cipher.ut.aes_type = CCP_AES_TYPE_256;
+   else {
+   CCP_LOG_ERR("Invalid cipher key length");
+   return -1;
+   }
+   for (i = 0; i < sess->cipher.key_length ; i++)
+   sess->cipher.key_ccp[sess->cipher.key_length - i - 1] =
+   sess->cipher.key[i];
+   break;
default:
CCP_LOG_ERR("Invalid CCP Engine");
return -ENOTSUP;
}
+   sess->cipher.nonce_phys = rte_mem_virt2phy(sess->cipher.nonce);
+   sess->cipher.key_phys = rte_mem_virt2phy(sess->cipher.key_ccp);
return 0;
 }
 
@@ -235,6 +268,18 @@ ccp_cipher_slot(struct ccp_session *session)
int count = 0;
 
switch (session->cipher.algo) {
+   case CCP_CIPHER_ALGO_AES_CBC:
+   count = 2;
+   /**< op + passthrough for iv */
+   break;
+   case CCP_CIPHER_ALGO_AES_ECB:
+   count = 1;
+   /**cipher.algo);
@@ -297,10 +342,146 @@ ccp_compute_slot_count(struct ccp_session *session)
return count;
 }
 
+static void
+ccp_perform_passthru(struct ccp_passthru *pst,
+struct ccp_queue *cmd_q)
+{
+   struct ccp_desc *desc;
+   union ccp_function function;
+
+   desc = &cmd_q->qbase_desc[cmd_q->qidx];
+
+   CCP_CMD_ENGINE(desc) = CCP_ENGINE_PASSTHRU;
+
+   CCP_CMD_SOC(desc) = 0;
+   CCP_CMD_IOC(desc) = 0;
+   CCP_CMD_INIT(desc) = 0;
+   CCP_CMD_EOM(desc) = 0;
+   CCP_CMD_PROT(desc) = 0;
+
+   function.raw = 0;
+   CCP_PT_BYTESWAP(&function) = pst->byte_swap;
+   CCP_PT_BITWISE(&function) = pst->bit_mod;
+   CCP_CMD_FUNCTION(desc) = function.raw;
+
+   CCP_CMD_LEN(desc) = pst->len;
+
+   if (pst->dir) {
+   CCP_CMD_SRC_LO(desc) = (uint32_t)(pst->src_addr);
+   CCP_CMD_SRC_HI(desc) = high32_value(pst->src_addr);
+   CCP_CMD_SRC_MEM(desc) = CCP_MEMTYPE_SYSTEM;
+
+   CCP_CMD_DST_LO(desc) = (uint32_t)(pst->dest_addr);
+   CCP_CMD_DST_HI(desc) = 0;
+   CCP_CMD_DST_MEM(desc) = CCP_MEMTYPE_SB;
+
+   if (pst->bit_mod != CCP_PASSTHRU_BITWISE_NOOP)
+   CCP_CMD_LSB_ID(desc) = cmd_q->sb_key;
+   } else {
+
+   CCP_CMD_SRC_LO(desc) = (uint32_t)(pst->src_addr);
+   CCP_CMD_SRC_HI(desc) = 0;
+   CCP_CMD_SRC_MEM(desc) = CCP_MEMTYPE_SB;
+
+   CCP_CMD_DST_LO(desc) = (uint32_t)(pst->dest_addr);
+   CCP_CMD_DST_HI(desc) = high32_value(pst->dest_addr);
+   CCP_CMD_DST_MEM(desc) = CCP_MEMTYPE_SYSTEM;
+   }
+
+   cmd_q->qidx = (cmd_q->qidx + 1) % COMMANDS_P

[dpdk-dev] [PATCH v3 09/19] crypto/ccp: support ccp hwrng feature

2018-01-10 Thread Ravi Kumar
Signed-off-by: Ravi Kumar 
---
 drivers/crypto/ccp/ccp_dev.c | 20 
 drivers/crypto/ccp/ccp_dev.h | 11 +++
 2 files changed, 31 insertions(+)

diff --git a/drivers/crypto/ccp/ccp_dev.c b/drivers/crypto/ccp/ccp_dev.c
index fee90e3..d8c0ab4 100644
--- a/drivers/crypto/ccp/ccp_dev.c
+++ b/drivers/crypto/ccp/ccp_dev.c
@@ -88,6 +88,26 @@ ccp_allot_queue(struct rte_cryptodev *cdev, int slot_req)
return NULL;
 }
 
+int
+ccp_read_hwrng(uint32_t *value)
+{
+   struct ccp_device *dev;
+
+   TAILQ_FOREACH(dev, &ccp_list, next) {
+   void *vaddr = (void *)(dev->pci.mem_resource[2].addr);
+
+   while (dev->hwrng_retries++ < CCP_MAX_TRNG_RETRIES) {
+   *value = CCP_READ_REG(vaddr, TRNG_OUT_REG);
+   if (*value) {
+   dev->hwrng_retries = 0;
+   return 0;
+   }
+   }
+   dev->hwrng_retries = 0;
+   }
+   return -1;
+}
+
 static const struct rte_memzone *
 ccp_queue_dma_zone_reserve(const char *queue_name,
   uint32_t queue_size,
diff --git a/drivers/crypto/ccp/ccp_dev.h b/drivers/crypto/ccp/ccp_dev.h
index cfb3b03..a5c9ef3 100644
--- a/drivers/crypto/ccp/ccp_dev.h
+++ b/drivers/crypto/ccp/ccp_dev.h
@@ -47,6 +47,7 @@
 
 /**< CCP sspecific */
 #define MAX_HW_QUEUES   5
+#define CCP_MAX_TRNG_RETRIES   10
 
 /**< CCP Register Mappings */
 #define Q_MASK_REG  0x000
@@ -223,6 +224,8 @@ struct ccp_device {
/**< protection for shared lsb region allocation */
int qidx;
/**< current queue index */
+   int hwrng_retries;
+   /**< retry counter for CCP TRNG */
 } __rte_cache_aligned;
 
 /**< CCP H/W engine related */
@@ -454,4 +457,12 @@ int ccp_probe_devices(const struct rte_pci_id *ccp_id);
  */
 struct ccp_queue *ccp_allot_queue(struct rte_cryptodev *dev, int slot_req);
 
+/**
+ * read hwrng value
+ *
+ * @param trng_value data pointer to write RNG value
+ * @return 0 on success otherwise -1
+ */
+int ccp_read_hwrng(uint32_t *trng_value);
+
 #endif /* _CCP_DEV_H_ */
-- 
2.7.4



[dpdk-dev] [PATCH v3 12/19] crypto/ccp: support aes-cmac auth algo

2018-01-10 Thread Ravi Kumar
Signed-off-by: Ravi Kumar 
---
 drivers/crypto/ccp/ccp_crypto.c  | 277 ++-
 drivers/crypto/ccp/ccp_pmd_ops.c |  20 +++
 2 files changed, 295 insertions(+), 2 deletions(-)

diff --git a/drivers/crypto/ccp/ccp_crypto.c b/drivers/crypto/ccp/ccp_crypto.c
index 0660761..6e593d8 100644
--- a/drivers/crypto/ccp/ccp_crypto.c
+++ b/drivers/crypto/ccp/ccp_crypto.c
@@ -36,6 +36,8 @@
 #include 
 #include 
 #include 
+#include  /*sub key apis*/
+#include  /*sub key apis*/
 
 #include 
 #include 
@@ -74,6 +76,84 @@ ccp_get_cmd_id(const struct rte_crypto_sym_xform *xform)
return res;
 }
 
+/* prepare temporary keys K1 and K2 */
+static void prepare_key(unsigned char *k, unsigned char *l, int bl)
+{
+   int i;
+   /* Shift block to left, including carry */
+   for (i = 0; i < bl; i++) {
+   k[i] = l[i] << 1;
+   if (i < bl - 1 && l[i + 1] & 0x80)
+   k[i] |= 1;
+   }
+   /* If MSB set fixup with R */
+   if (l[0] & 0x80)
+   k[bl - 1] ^= bl == 16 ? 0x87 : 0x1b;
+}
+
+/* subkeys K1 and K2 generation for CMAC */
+static int
+generate_cmac_subkeys(struct ccp_session *sess)
+{
+   const EVP_CIPHER *algo;
+   EVP_CIPHER_CTX *ctx;
+   unsigned char *ccp_ctx;
+   size_t i;
+   int dstlen, totlen;
+   unsigned char zero_iv[AES_BLOCK_SIZE] = {0};
+   unsigned char dst[2 * AES_BLOCK_SIZE] = {0};
+   unsigned char k1[AES_BLOCK_SIZE] = {0};
+   unsigned char k2[AES_BLOCK_SIZE] = {0};
+
+   if (sess->auth.ut.aes_type == CCP_AES_TYPE_128)
+   algo =  EVP_aes_128_cbc();
+   else if (sess->auth.ut.aes_type == CCP_AES_TYPE_192)
+   algo =  EVP_aes_192_cbc();
+   else if (sess->auth.ut.aes_type == CCP_AES_TYPE_256)
+   algo =  EVP_aes_256_cbc();
+   else {
+   CCP_LOG_ERR("Invalid CMAC type length");
+   return -1;
+   }
+
+   ctx = EVP_CIPHER_CTX_new();
+   if (!ctx) {
+   CCP_LOG_ERR("ctx creation failed");
+   return -1;
+   }
+   if (EVP_EncryptInit(ctx, algo, (unsigned char *)sess->auth.key,
+   (unsigned char *)zero_iv) <= 0)
+   goto key_generate_err;
+   if (EVP_CIPHER_CTX_set_padding(ctx, 0) <= 0)
+   goto key_generate_err;
+   if (EVP_EncryptUpdate(ctx, dst, &dstlen, zero_iv,
+ AES_BLOCK_SIZE) <= 0)
+   goto key_generate_err;
+   if (EVP_EncryptFinal_ex(ctx, dst + dstlen, &totlen) <= 0)
+   goto key_generate_err;
+
+   memset(sess->auth.pre_compute, 0, CCP_SB_BYTES * 2);
+
+   ccp_ctx = (unsigned char *)(sess->auth.pre_compute + CCP_SB_BYTES - 1);
+   prepare_key(k1, dst, AES_BLOCK_SIZE);
+   for (i = 0; i < AES_BLOCK_SIZE;  i++, ccp_ctx--)
+   *ccp_ctx = k1[i];
+
+   ccp_ctx = (unsigned char *)(sess->auth.pre_compute +
+  (2 * CCP_SB_BYTES) - 1);
+   prepare_key(k2, k1, AES_BLOCK_SIZE);
+   for (i = 0; i < AES_BLOCK_SIZE;  i++, ccp_ctx--)
+   *ccp_ctx = k2[i];
+
+   EVP_CIPHER_CTX_free(ctx);
+
+   return 0;
+
+key_generate_err:
+   CCP_LOG_ERR("CMAC Init failed");
+   return -1;
+}
+
 /* configure session */
 static int
 ccp_configure_session_cipher(struct ccp_session *sess,
@@ -170,6 +250,7 @@ ccp_configure_session_auth(struct ccp_session *sess,
   const struct rte_crypto_sym_xform *xform)
 {
const struct rte_crypto_auth_xform *auth_xform = NULL;
+   size_t i;
 
auth_xform = &xform->auth;
 
@@ -179,6 +260,33 @@ ccp_configure_session_auth(struct ccp_session *sess,
else
sess->auth.op = CCP_AUTH_OP_VERIFY;
switch (auth_xform->algo) {
+   case RTE_CRYPTO_AUTH_AES_CMAC:
+   sess->auth.algo = CCP_AUTH_ALGO_AES_CMAC;
+   sess->auth.engine = CCP_ENGINE_AES;
+   sess->auth.um.aes_mode = CCP_AES_MODE_CMAC;
+   sess->auth.key_length = auth_xform->key.length;
+   /**auth.ctx_len = CCP_SB_BYTES << 1;
+   sess->auth.offset = AES_BLOCK_SIZE;
+   sess->auth.block_size = AES_BLOCK_SIZE;
+   if (sess->auth.key_length == 16)
+   sess->auth.ut.aes_type = CCP_AES_TYPE_128;
+   else if (sess->auth.key_length == 24)
+   sess->auth.ut.aes_type = CCP_AES_TYPE_192;
+   else if (sess->auth.key_length == 32)
+   sess->auth.ut.aes_type = CCP_AES_TYPE_256;
+   else {
+   CCP_LOG_ERR("Invalid CMAC key length");
+   return -1;
+   }
+   rte_memcpy(sess->auth.key, auth_xform->key.data,
+  sess->auth.key_length);
+   for (i = 0; i < sess->auth.key_length; i++)
+   sess->auth.key_ccp[ses

[dpdk-dev] [PATCH v3 14/19] crypto/ccp: support sha1 authentication algo

2018-01-10 Thread Ravi Kumar
Signed-off-by: Ravi Kumar 
---
 drivers/crypto/ccp/ccp_crypto.c  | 367 +++
 drivers/crypto/ccp/ccp_crypto.h  |  23 +++
 drivers/crypto/ccp/ccp_pmd_ops.c |  42 +
 3 files changed, 432 insertions(+)

diff --git a/drivers/crypto/ccp/ccp_crypto.c b/drivers/crypto/ccp/ccp_crypto.c
index 4ced193..ace6bc2 100644
--- a/drivers/crypto/ccp/ccp_crypto.c
+++ b/drivers/crypto/ccp/ccp_crypto.c
@@ -36,6 +36,7 @@
 #include 
 #include 
 #include 
+#include 
 #include  /*sub key apis*/
 #include  /*sub key apis*/
 
@@ -52,6 +53,14 @@
 #include "ccp_pci.h"
 #include "ccp_pmd_private.h"
 
+/* SHA initial context values */
+static uint32_t ccp_sha1_init[SHA_COMMON_DIGEST_SIZE / sizeof(uint32_t)] = {
+   SHA1_H4, SHA1_H3,
+   SHA1_H2, SHA1_H1,
+   SHA1_H0, 0x0U,
+   0x0U, 0x0U,
+};
+
 static enum ccp_cmd_order
 ccp_get_cmd_id(const struct rte_crypto_sym_xform *xform)
 {
@@ -76,6 +85,59 @@ ccp_get_cmd_id(const struct rte_crypto_sym_xform *xform)
return res;
 }
 
+/* partial hash using openssl */
+static int partial_hash_sha1(uint8_t *data_in, uint8_t *data_out)
+{
+   SHA_CTX ctx;
+
+   if (!SHA1_Init(&ctx))
+   return -EFAULT;
+   SHA1_Transform(&ctx, data_in);
+   rte_memcpy(data_out, &ctx, SHA_DIGEST_LENGTH);
+   return 0;
+}
+
+static int generate_partial_hash(struct ccp_session *sess)
+{
+
+   uint8_t ipad[sess->auth.block_size];
+   uint8_t opad[sess->auth.block_size];
+   uint8_t *ipad_t, *opad_t;
+   uint32_t *hash_value_be32, hash_temp32[8];
+   int i, count;
+
+   opad_t = ipad_t = (uint8_t *)sess->auth.key;
+
+   hash_value_be32 = (uint32_t *)((uint8_t *)sess->auth.pre_compute);
+
+   /* considering key size is always equal to block size of algorithm */
+   for (i = 0; i < sess->auth.block_size; i++) {
+   ipad[i] = (ipad_t[i] ^ HMAC_IPAD_VALUE);
+   opad[i] = (opad_t[i] ^ HMAC_OPAD_VALUE);
+   }
+
+   switch (sess->auth.algo) {
+   case CCP_AUTH_ALGO_SHA1_HMAC:
+   count = SHA1_DIGEST_SIZE >> 2;
+
+   if (partial_hash_sha1(ipad, (uint8_t *)hash_temp32))
+   return -1;
+   for (i = 0; i < count; i++, hash_value_be32++)
+   *hash_value_be32 = hash_temp32[count - 1 - i];
+
+   hash_value_be32 = (uint32_t *)((uint8_t *)sess->auth.pre_compute
+  + sess->auth.ctx_len);
+   if (partial_hash_sha1(opad, (uint8_t *)hash_temp32))
+   return -1;
+   for (i = 0; i < count; i++, hash_value_be32++)
+   *hash_value_be32 = hash_temp32[count - 1 - i];
+   return 0;
+   default:
+   CCP_LOG_ERR("Invalid auth algo");
+   return -1;
+   }
+}
+
 /* prepare temporary keys K1 and K2 */
 static void prepare_key(unsigned char *k, unsigned char *l, int bl)
 {
@@ -260,6 +322,31 @@ ccp_configure_session_auth(struct ccp_session *sess,
else
sess->auth.op = CCP_AUTH_OP_VERIFY;
switch (auth_xform->algo) {
+   case RTE_CRYPTO_AUTH_SHA1:
+   sess->auth.engine = CCP_ENGINE_SHA;
+   sess->auth.algo = CCP_AUTH_ALGO_SHA1;
+   sess->auth.ut.sha_type = CCP_SHA_TYPE_1;
+   sess->auth.ctx = (void *)ccp_sha1_init;
+   sess->auth.ctx_len = CCP_SB_BYTES;
+   sess->auth.offset = CCP_SB_BYTES - SHA1_DIGEST_SIZE;
+   break;
+   case RTE_CRYPTO_AUTH_SHA1_HMAC:
+   if (auth_xform->key.length > SHA1_BLOCK_SIZE)
+   return -1;
+   sess->auth.engine = CCP_ENGINE_SHA;
+   sess->auth.algo = CCP_AUTH_ALGO_SHA1_HMAC;
+   sess->auth.ut.sha_type = CCP_SHA_TYPE_1;
+   sess->auth.ctx_len = CCP_SB_BYTES;
+   sess->auth.offset = CCP_SB_BYTES - SHA1_DIGEST_SIZE;
+   sess->auth.block_size = SHA1_BLOCK_SIZE;
+   sess->auth.key_length = auth_xform->key.length;
+   memset(sess->auth.key, 0, sess->auth.block_size);
+   memset(sess->auth.pre_compute, 0, sess->auth.ctx_len << 1);
+   rte_memcpy(sess->auth.key, auth_xform->key.data,
+  auth_xform->key.length);
+   if (generate_partial_hash(sess))
+   return -1;
+   break;
case RTE_CRYPTO_AUTH_AES_CMAC:
sess->auth.algo = CCP_AUTH_ALGO_AES_CMAC;
sess->auth.engine = CCP_ENGINE_AES;
@@ -453,6 +540,13 @@ ccp_auth_slot(struct ccp_session *session)
int count = 0;
 
switch (session->auth.algo) {
+   case CCP_AUTH_ALGO_SHA1:
+   count = 3;
+   /**< op + lsb passthrough cpy to/from*/
+   break;
+   case CCP_AUTH_ALGO_SHA1_HMAC:
+   count = 6;
+   break;
case CCP_AUTH_ALGO_AES_CMAC:

[dpdk-dev] [PATCH v3 15/19] crypto/ccp: support sha2 family authentication algo

2018-01-10 Thread Ravi Kumar
Signed-off-by: Ravi Kumar 
---
 drivers/crypto/ccp/ccp_crypto.c  | 270 +++
 drivers/crypto/ccp/ccp_crypto.h  |  48 +++
 drivers/crypto/ccp/ccp_pmd_ops.c | 168 
 3 files changed, 486 insertions(+)

diff --git a/drivers/crypto/ccp/ccp_crypto.c b/drivers/crypto/ccp/ccp_crypto.c
index ace6bc2..31353ed 100644
--- a/drivers/crypto/ccp/ccp_crypto.c
+++ b/drivers/crypto/ccp/ccp_crypto.c
@@ -61,6 +61,34 @@ static uint32_t ccp_sha1_init[SHA_COMMON_DIGEST_SIZE / 
sizeof(uint32_t)] = {
0x0U, 0x0U,
 };
 
+uint32_t ccp_sha224_init[SHA256_DIGEST_SIZE / sizeof(uint32_t)] = {
+   SHA224_H7, SHA224_H6,
+   SHA224_H5, SHA224_H4,
+   SHA224_H3, SHA224_H2,
+   SHA224_H1, SHA224_H0,
+};
+
+uint32_t ccp_sha256_init[SHA256_DIGEST_SIZE / sizeof(uint32_t)] = {
+   SHA256_H7, SHA256_H6,
+   SHA256_H5, SHA256_H4,
+   SHA256_H3, SHA256_H2,
+   SHA256_H1, SHA256_H0,
+};
+
+uint64_t ccp_sha384_init[SHA512_DIGEST_SIZE / sizeof(uint64_t)] = {
+   SHA384_H7, SHA384_H6,
+   SHA384_H5, SHA384_H4,
+   SHA384_H3, SHA384_H2,
+   SHA384_H1, SHA384_H0,
+};
+
+uint64_t ccp_sha512_init[SHA512_DIGEST_SIZE / sizeof(uint64_t)] = {
+   SHA512_H7, SHA512_H6,
+   SHA512_H5, SHA512_H4,
+   SHA512_H3, SHA512_H2,
+   SHA512_H1, SHA512_H0,
+};
+
 static enum ccp_cmd_order
 ccp_get_cmd_id(const struct rte_crypto_sym_xform *xform)
 {
@@ -97,6 +125,54 @@ static int partial_hash_sha1(uint8_t *data_in, uint8_t 
*data_out)
return 0;
 }
 
+static int partial_hash_sha224(uint8_t *data_in, uint8_t *data_out)
+{
+   SHA256_CTX ctx;
+
+   if (!SHA224_Init(&ctx))
+   return -EFAULT;
+   SHA256_Transform(&ctx, data_in);
+   rte_memcpy(data_out, &ctx,
+  SHA256_DIGEST_LENGTH);
+   return 0;
+}
+
+static int partial_hash_sha256(uint8_t *data_in, uint8_t *data_out)
+{
+   SHA256_CTX ctx;
+
+   if (!SHA256_Init(&ctx))
+   return -EFAULT;
+   SHA256_Transform(&ctx, data_in);
+   rte_memcpy(data_out, &ctx,
+  SHA256_DIGEST_LENGTH);
+   return 0;
+}
+
+static int partial_hash_sha384(uint8_t *data_in, uint8_t *data_out)
+{
+   SHA512_CTX ctx;
+
+   if (!SHA384_Init(&ctx))
+   return -EFAULT;
+   SHA512_Transform(&ctx, data_in);
+   rte_memcpy(data_out, &ctx,
+  SHA512_DIGEST_LENGTH);
+   return 0;
+}
+
+static int partial_hash_sha512(uint8_t *data_in, uint8_t *data_out)
+{
+   SHA512_CTX ctx;
+
+   if (!SHA512_Init(&ctx))
+   return -EFAULT;
+   SHA512_Transform(&ctx, data_in);
+   rte_memcpy(data_out, &ctx,
+  SHA512_DIGEST_LENGTH);
+   return 0;
+}
+
 static int generate_partial_hash(struct ccp_session *sess)
 {
 
@@ -104,11 +180,13 @@ static int generate_partial_hash(struct ccp_session *sess)
uint8_t opad[sess->auth.block_size];
uint8_t *ipad_t, *opad_t;
uint32_t *hash_value_be32, hash_temp32[8];
+   uint64_t *hash_value_be64, hash_temp64[8];
int i, count;
 
opad_t = ipad_t = (uint8_t *)sess->auth.key;
 
hash_value_be32 = (uint32_t *)((uint8_t *)sess->auth.pre_compute);
+   hash_value_be64 = (uint64_t *)((uint8_t *)sess->auth.pre_compute);
 
/* considering key size is always equal to block size of algorithm */
for (i = 0; i < sess->auth.block_size; i++) {
@@ -132,6 +210,66 @@ static int generate_partial_hash(struct ccp_session *sess)
for (i = 0; i < count; i++, hash_value_be32++)
*hash_value_be32 = hash_temp32[count - 1 - i];
return 0;
+   case CCP_AUTH_ALGO_SHA224_HMAC:
+   count = SHA256_DIGEST_SIZE >> 2;
+
+   if (partial_hash_sha224(ipad, (uint8_t *)hash_temp32))
+   return -1;
+   for (i = 0; i < count; i++, hash_value_be32++)
+   *hash_value_be32 = hash_temp32[count - 1 - i];
+
+   hash_value_be32 = (uint32_t *)((uint8_t *)sess->auth.pre_compute
+  + sess->auth.ctx_len);
+   if (partial_hash_sha224(opad, (uint8_t *)hash_temp32))
+   return -1;
+   for (i = 0; i < count; i++, hash_value_be32++)
+   *hash_value_be32 = hash_temp32[count - 1 - i];
+   return 0;
+   case CCP_AUTH_ALGO_SHA256_HMAC:
+   count = SHA256_DIGEST_SIZE >> 2;
+
+   if (partial_hash_sha256(ipad, (uint8_t *)hash_temp32))
+   return -1;
+   for (i = 0; i < count; i++, hash_value_be32++)
+   *hash_value_be32 = hash_temp32[count - 1 - i];
+
+   hash_value_be32 = (uint32_t *)((uint8_t *)sess->auth.pre_compute
+  + sess->auth.ctx_len);
+   if (partial_hash_sha256(opad, (uint8_t *)hash_temp32))
+

[dpdk-dev] [PATCH v3 13/19] crypto/ccp: support aes-gcm aead algo

2018-01-10 Thread Ravi Kumar
Signed-off-by: Ravi Kumar 
---
 drivers/crypto/ccp/ccp_crypto.c  | 240 ++-
 drivers/crypto/ccp/ccp_pmd_ops.c |  30 +
 2 files changed, 266 insertions(+), 4 deletions(-)

diff --git a/drivers/crypto/ccp/ccp_crypto.c b/drivers/crypto/ccp/ccp_crypto.c
index 6e593d8..4ced193 100644
--- a/drivers/crypto/ccp/ccp_crypto.c
+++ b/drivers/crypto/ccp/ccp_crypto.c
@@ -299,6 +299,7 @@ ccp_configure_session_aead(struct ccp_session *sess,
   const struct rte_crypto_sym_xform *xform)
 {
const struct rte_crypto_aead_xform *aead_xform = NULL;
+   size_t i;
 
aead_xform = &xform->aead;
 
@@ -313,6 +314,7 @@ ccp_configure_session_aead(struct ccp_session *sess,
sess->cipher.dir = CCP_CIPHER_DIR_DECRYPT;
sess->auth.op = CCP_AUTH_OP_VERIFY;
}
+   sess->aead_algo = aead_xform->algo;
sess->auth.aad_length = aead_xform->aad_length;
sess->auth.digest_length = aead_xform->digest_length;
 
@@ -321,10 +323,37 @@ ccp_configure_session_aead(struct ccp_session *sess,
sess->iv.length = aead_xform->iv.length;
 
switch (aead_xform->algo) {
+   case RTE_CRYPTO_AEAD_AES_GCM:
+   sess->cipher.algo = CCP_CIPHER_ALGO_AES_GCM;
+   sess->cipher.um.aes_mode = CCP_AES_MODE_GCTR;
+   sess->cipher.engine = CCP_ENGINE_AES;
+   if (sess->cipher.key_length == 16)
+   sess->cipher.ut.aes_type = CCP_AES_TYPE_128;
+   else if (sess->cipher.key_length == 24)
+   sess->cipher.ut.aes_type = CCP_AES_TYPE_192;
+   else if (sess->cipher.key_length == 32)
+   sess->cipher.ut.aes_type = CCP_AES_TYPE_256;
+   else {
+   CCP_LOG_ERR("Invalid aead key length");
+   return -1;
+   }
+   for (i = 0; i < sess->cipher.key_length; i++)
+   sess->cipher.key_ccp[sess->cipher.key_length - i - 1] =
+   sess->cipher.key[i];
+   sess->auth.algo = CCP_AUTH_ALGO_AES_GCM;
+   sess->auth.engine = CCP_ENGINE_AES;
+   sess->auth.um.aes_mode = CCP_AES_MODE_GHASH;
+   sess->auth.ctx_len = CCP_SB_BYTES;
+   sess->auth.offset = 0;
+   sess->auth.block_size = AES_BLOCK_SIZE;
+   sess->cmd_id = CCP_CMD_COMBINED;
+   break;
default:
CCP_LOG_ERR("Unsupported aead algo");
return -ENOTSUP;
}
+   sess->cipher.nonce_phys = rte_mem_virt2phy(sess->cipher.nonce);
+   sess->cipher.key_phys = rte_mem_virt2phy(sess->cipher.key_ccp);
return 0;
 }
 
@@ -447,10 +476,27 @@ ccp_aead_slot(struct ccp_session *session)
int count = 0;
 
switch (session->aead_algo) {
+   case RTE_CRYPTO_AEAD_AES_GCM:
+   break;
default:
CCP_LOG_ERR("Unsupported aead algo %d",
session->aead_algo);
}
+   switch (session->auth.algo) {
+   case CCP_AUTH_ALGO_AES_GCM:
+   count = 5;
+   /**
+* 1. Passthru iv
+* 2. Hash AAD
+* 3. GCTR
+* 4. Reload passthru
+* 5. Hash Final tag
+*/
+   break;
+   default:
+   CCP_LOG_ERR("Unsupported combined auth ALGO %d",
+   session->auth.algo);
+   }
return count;
 }
 
@@ -873,6 +919,184 @@ ccp_perform_3des(struct rte_crypto_op *op,
return 0;
 }
 
+static int
+ccp_perform_aes_gcm(struct rte_crypto_op *op, struct ccp_queue *cmd_q)
+{
+   struct ccp_session *session;
+   union ccp_function function;
+   uint8_t *lsb_buf, *append_ptr, *iv;
+   struct ccp_passthru pst;
+   struct ccp_desc *desc;
+   uint32_t tail;
+   uint64_t *temp;
+   phys_addr_t src_addr, dest_addr, key_addr, aad_addr;
+   phys_addr_t digest_dest_addr;
+   int length, non_align_len, i;
+
+   session = (struct ccp_session *)get_session_private_data(
+op->sym->session,
+ccp_cryptodev_driver_id);
+   iv = rte_crypto_op_ctod_offset(op, uint8_t *, session->iv.offset);
+   key_addr = rte_mem_virt2phy(session->cipher.key_ccp);
+
+   src_addr = rte_pktmbuf_mtophys_offset(op->sym->m_src,
+ op->sym->cipher.data.offset);
+   if (unlikely(op->sym->m_dst != NULL))
+   dest_addr = rte_pktmbuf_mtophys_offset(op->sym->m_dst,
+   op->sym->cipher.data.offset);
+   else
+   dest_addr = src_addr;
+   append_ptr = (uint8_t *)rte_pktmbuf_append(op->sym->m_src,
+  session->auth.ctx_len);
+   di

[dpdk-dev] [PATCH v3 16/19] crypto/ccp: support sha3 family authentication algo

2018-01-10 Thread Ravi Kumar
Signed-off-by: Ravi Kumar 
---
 drivers/crypto/ccp/ccp_crypto.c   | 667 +-
 drivers/crypto/ccp/ccp_crypto.h   |  22 ++
 drivers/crypto/ccp/ccp_pmd_ops.c  | 168 +
 lib/librte_cryptodev/rte_crypto_sym.h |  17 +
 4 files changed, 873 insertions(+), 1 deletion(-)

diff --git a/drivers/crypto/ccp/ccp_crypto.c b/drivers/crypto/ccp/ccp_crypto.c
index 31353ed..1290cdd 100644
--- a/drivers/crypto/ccp/ccp_crypto.c
+++ b/drivers/crypto/ccp/ccp_crypto.c
@@ -89,6 +89,74 @@ uint64_t ccp_sha512_init[SHA512_DIGEST_SIZE / 
sizeof(uint64_t)] = {
SHA512_H1, SHA512_H0,
 };
 
+#if defined(_MSC_VER)
+#define SHA3_CONST(x) x
+#else
+#define SHA3_CONST(x) x##L
+#endif
+
+/** 'Words' here refers to uint64_t */
+#define SHA3_KECCAK_SPONGE_WORDS \
+   (((1600) / 8) / sizeof(uint64_t))
+typedef struct sha3_context_ {
+   uint64_t saved;
+   /**
+* The portion of the input message that we
+* didn't consume yet
+*/
+   union {
+   uint64_t s[SHA3_KECCAK_SPONGE_WORDS];
+   /* Keccak's state */
+   uint8_t sb[SHA3_KECCAK_SPONGE_WORDS * 8];
+   /**total 200 ctx size**/
+   };
+   unsigned int byteIndex;
+   /**
+* 0..7--the next byte after the set one
+* (starts from 0; 0--none are buffered)
+*/
+   unsigned int wordIndex;
+   /**
+* 0..24--the next word to integrate input
+* (starts from 0)
+*/
+   unsigned int capacityWords;
+   /**
+* the double size of the hash output in
+* words (e.g. 16 for Keccak 512)
+*/
+} sha3_context;
+
+#ifndef SHA3_ROTL64
+#define SHA3_ROTL64(x, y) \
+   (((x) << (y)) | ((x) >> ((sizeof(uint64_t)*8) - (y
+#endif
+
+static const uint64_t keccakf_rndc[24] = {
+   SHA3_CONST(0x0001UL), SHA3_CONST(0x8082UL),
+   SHA3_CONST(0x8000808aUL), SHA3_CONST(0x800080008000UL),
+   SHA3_CONST(0x808bUL), SHA3_CONST(0x8001UL),
+   SHA3_CONST(0x800080008081UL), SHA3_CONST(0x80008009UL),
+   SHA3_CONST(0x008aUL), SHA3_CONST(0x0088UL),
+   SHA3_CONST(0x80008009UL), SHA3_CONST(0x800aUL),
+   SHA3_CONST(0x8000808bUL), SHA3_CONST(0x808bUL),
+   SHA3_CONST(0x80008089UL), SHA3_CONST(0x80008003UL),
+   SHA3_CONST(0x80008002UL), SHA3_CONST(0x8080UL),
+   SHA3_CONST(0x800aUL), SHA3_CONST(0x8000800aUL),
+   SHA3_CONST(0x800080008081UL), SHA3_CONST(0x80008080UL),
+   SHA3_CONST(0x8001UL), SHA3_CONST(0x800080008008UL)
+};
+
+static const unsigned int keccakf_rotc[24] = {
+   1, 3, 6, 10, 15, 21, 28, 36, 45, 55, 2, 14, 27, 41, 56, 8, 25, 43, 62,
+   18, 39, 61, 20, 44
+};
+
+static const unsigned int keccakf_piln[24] = {
+   10, 7, 11, 17, 18, 3, 5, 16, 8, 21, 24, 4, 15, 23, 19, 13, 12, 2, 20,
+   14, 22, 9, 6, 1
+};
+
 static enum ccp_cmd_order
 ccp_get_cmd_id(const struct rte_crypto_sym_xform *xform)
 {
@@ -173,6 +241,223 @@ static int partial_hash_sha512(uint8_t *data_in, uint8_t 
*data_out)
return 0;
 }
 
+static void
+keccakf(uint64_t s[25])
+{
+   int i, j, round;
+   uint64_t t, bc[5];
+#define KECCAK_ROUNDS 24
+
+   for (round = 0; round < KECCAK_ROUNDS; round++) {
+
+   /* Theta */
+   for (i = 0; i < 5; i++)
+   bc[i] = s[i] ^ s[i + 5] ^ s[i + 10] ^ s[i + 15] ^
+   s[i + 20];
+
+   for (i = 0; i < 5; i++) {
+   t = bc[(i + 4) % 5] ^ SHA3_ROTL64(bc[(i + 1) % 5], 1);
+   for (j = 0; j < 25; j += 5)
+   s[j + i] ^= t;
+   }
+
+   /* Rho Pi */
+   t = s[1];
+   for (i = 0; i < 24; i++) {
+   j = keccakf_piln[i];
+   bc[0] = s[j];
+   s[j] = SHA3_ROTL64(t, keccakf_rotc[i]);
+   t = bc[0];
+   }
+
+   /* Chi */
+   for (j = 0; j < 25; j += 5) {
+   for (i = 0; i < 5; i++)
+   bc[i] = s[j + i];
+   for (i = 0; i < 5; i++)
+   s[j + i] ^= (~bc[(i + 1) % 5]) &
+   bc[(i + 2) % 5];
+   }
+
+   /* Iota */
+   s[0] ^= keccakf_rndc[round];
+   }
+}
+
+static void
+sha3_Init224(void *priv)
+{
+   sha3_context *ctx = (sha3_context *) priv;
+
+   memset(ctx, 0, sizeof(*ctx));
+   ctx->capacityWords = 2 * 224 / (8 * sizeof(uint64_t));
+}
+
+static void
+sha3_Init256(void *priv)
+{
+   sha3_context *ctx = (sha3_context *) priv;
+
+   memset(ctx, 0, sizeof(*ctx));
+   ctx->capacityWords = 2 * 2

[dpdk-dev] [PATCH v3 17/19] crypto/ccp: support cpu based md5 and sha2 family authentication algo

2018-01-10 Thread Ravi Kumar
Auth operations can be performed on CPU without offloading
to CCP if CONFIG_RTE_LIBRTE_PMD_CCP_CPU_AUTH is enabled in
DPDK configuration.

Signed-off-by: Ravi Kumar 
---
 config/common_base   |   1 +
 drivers/crypto/ccp/ccp_crypto.c  | 282 ++-
 drivers/crypto/ccp/ccp_crypto.h  |   5 +-
 drivers/crypto/ccp/ccp_pmd_ops.c |  23 +++
 drivers/crypto/ccp/ccp_pmd_private.h |  10 ++
 5 files changed, 316 insertions(+), 5 deletions(-)

diff --git a/config/common_base b/config/common_base
index 88826c8..2974581 100644
--- a/config/common_base
+++ b/config/common_base
@@ -560,6 +560,7 @@ CONFIG_RTE_LIBRTE_PMD_NULL_CRYPTO=y
 # Compile PMD for AMD CCP crypto device
 #
 CONFIG_RTE_LIBRTE_PMD_CCP=n
+CONFIG_RTE_LIBRTE_PMD_CCP_CPU_AUTH=n
 
 #
 # Compile PMD for Marvell Crypto device
diff --git a/drivers/crypto/ccp/ccp_crypto.c b/drivers/crypto/ccp/ccp_crypto.c
index 1290cdd..f916055 100644
--- a/drivers/crypto/ccp/ccp_crypto.c
+++ b/drivers/crypto/ccp/ccp_crypto.c
@@ -53,6 +53,12 @@
 #include "ccp_pci.h"
 #include "ccp_pmd_private.h"
 
+#ifdef RTE_LIBRTE_PMD_CCP_CPU_AUTH
+#include 
+#include 
+#include 
+#endif
+
 /* SHA initial context values */
 static uint32_t ccp_sha1_init[SHA_COMMON_DIGEST_SIZE / sizeof(uint32_t)] = {
SHA1_H4, SHA1_H3,
@@ -786,6 +792,17 @@ ccp_configure_session_auth(struct ccp_session *sess,
else
sess->auth.op = CCP_AUTH_OP_VERIFY;
switch (auth_xform->algo) {
+#ifdef RTE_LIBRTE_PMD_CCP_CPU_AUTH
+   case RTE_CRYPTO_AUTH_MD5_HMAC:
+   sess->auth.algo = CCP_AUTH_ALGO_MD5_HMAC;
+   sess->auth.offset = (CCP_SB_BYTES << 1) - MD5_DIGEST_SIZE;
+   sess->auth.key_length = auth_xform->key.length;
+   sess->auth.block_size = MD5_BLOCK_SIZE;
+   memset(sess->auth.key, 0, sess->auth.block_size);
+   rte_memcpy(sess->auth.key, auth_xform->key.data,
+  auth_xform->key.length);
+   break;
+#endif
case RTE_CRYPTO_AUTH_SHA1:
sess->auth.engine = CCP_ENGINE_SHA;
sess->auth.algo = CCP_AUTH_ALGO_SHA1;
@@ -795,6 +812,17 @@ ccp_configure_session_auth(struct ccp_session *sess,
sess->auth.offset = CCP_SB_BYTES - SHA1_DIGEST_SIZE;
break;
case RTE_CRYPTO_AUTH_SHA1_HMAC:
+#ifdef RTE_LIBRTE_PMD_CCP_CPU_AUTH
+   if (auth_xform->key.length > SHA1_BLOCK_SIZE)
+   return -1;
+   sess->auth.algo = CCP_AUTH_ALGO_SHA1_HMAC;
+   sess->auth.offset = CCP_SB_BYTES - SHA1_DIGEST_SIZE;
+   sess->auth.block_size = SHA1_BLOCK_SIZE;
+   sess->auth.key_length = auth_xform->key.length;
+   memset(sess->auth.key, 0, sess->auth.block_size);
+   rte_memcpy(sess->auth.key, auth_xform->key.data,
+  auth_xform->key.length);
+#else
if (auth_xform->key.length > SHA1_BLOCK_SIZE)
return -1;
sess->auth.engine = CCP_ENGINE_SHA;
@@ -810,6 +838,7 @@ ccp_configure_session_auth(struct ccp_session *sess,
   auth_xform->key.length);
if (generate_partial_hash(sess))
return -1;
+#endif
break;
case RTE_CRYPTO_AUTH_SHA224:
sess->auth.algo = CCP_AUTH_ALGO_SHA224;
@@ -820,6 +849,17 @@ ccp_configure_session_auth(struct ccp_session *sess,
sess->auth.offset = CCP_SB_BYTES - SHA224_DIGEST_SIZE;
break;
case RTE_CRYPTO_AUTH_SHA224_HMAC:
+#ifdef RTE_LIBRTE_PMD_CCP_CPU_AUTH
+   if (auth_xform->key.length > SHA224_BLOCK_SIZE)
+   return -1;
+   sess->auth.algo = CCP_AUTH_ALGO_SHA224_HMAC;
+   sess->auth.offset = CCP_SB_BYTES - SHA224_DIGEST_SIZE;
+   sess->auth.block_size = SHA224_BLOCK_SIZE;
+   sess->auth.key_length = auth_xform->key.length;
+   memset(sess->auth.key, 0, sess->auth.block_size);
+   rte_memcpy(sess->auth.key, auth_xform->key.data,
+  auth_xform->key.length);
+#else
if (auth_xform->key.length > SHA224_BLOCK_SIZE)
return -1;
sess->auth.algo = CCP_AUTH_ALGO_SHA224_HMAC;
@@ -835,6 +875,7 @@ ccp_configure_session_auth(struct ccp_session *sess,
   auth_xform->key.length);
if (generate_partial_hash(sess))
return -1;
+#endif
break;
case RTE_CRYPTO_AUTH_SHA3_224:
sess->auth.algo = CCP_AUTH_ALGO_SHA3_224;
@@ -869,6 +910,17 @@ ccp_configure_session_auth(struct ccp_session *sess,
sess->auth.offset = CCP_SB_BYTES - SHA256_DIGEST_SIZE;
break;
case RTE_CRYPTO_AUTH_SHA256_HMAC:
+#ifdef RTE_LIBRTE_PMD_CCP_CPU_AUTH
+   if (auth_xform->key.

[dpdk-dev] [PATCH v3 18/19] test/crypto: add test for AMD CCP crypto poll mode driver

2018-01-10 Thread Ravi Kumar
Signed-off-by: Ravi Kumar 
---
 test/test/test_cryptodev.c   | 161 +++
 test/test/test_cryptodev.h   |   1 +
 test/test/test_cryptodev_aes_test_vectors.h  |  93 ++--
 test/test/test_cryptodev_blockcipher.c   |   9 +-
 test/test/test_cryptodev_blockcipher.h   |   1 +
 test/test/test_cryptodev_des_test_vectors.h  |  42 ---
 test/test/test_cryptodev_hash_test_vectors.h |  60 ++
 7 files changed, 301 insertions(+), 66 deletions(-)

diff --git a/test/test/test_cryptodev.c b/test/test/test_cryptodev.c
index 4d7e5b1..17df676 100644
--- a/test/test/test_cryptodev.c
+++ b/test/test/test_cryptodev.c
@@ -336,6 +336,23 @@ testsuite_setup(void)
}
}
 
+   /* Create an CCP device if required */
+   if (gbl_driver_id == rte_cryptodev_driver_id_get(
+   RTE_STR(CRYPTODEV_NAME_CCP_PMD))) {
+   nb_devs = rte_cryptodev_device_count_by_driver(
+   rte_cryptodev_driver_id_get(
+   RTE_STR(CRYPTODEV_NAME_CCP_PMD)));
+   if (nb_devs < 1) {
+   ret = rte_vdev_init(
+   RTE_STR(CRYPTODEV_NAME_CCP_PMD),
+   NULL);
+
+   TEST_ASSERT(ret == 0, "Failed to create "
+   "instance of pmd : %s",
+   RTE_STR(CRYPTODEV_NAME_CCP_PMD));
+   }
+   }
+
 #ifdef RTE_LIBRTE_PMD_CRYPTO_SCHEDULER
if (gbl_driver_id == rte_cryptodev_driver_id_get(
RTE_STR(CRYPTODEV_NAME_SCHEDULER_PMD))) {
@@ -1725,6 +1742,44 @@ test_AES_cipheronly_openssl_all(void)
 }
 
 static int
+test_AES_chain_ccp_all(void)
+{
+   struct crypto_testsuite_params *ts_params = &testsuite_params;
+   int status;
+
+   status = test_blockcipher_all_tests(ts_params->mbuf_pool,
+   ts_params->op_mpool,
+   ts_params->session_mpool,
+   ts_params->valid_devs[0],
+   rte_cryptodev_driver_id_get(
+   RTE_STR(CRYPTODEV_NAME_CCP_PMD)),
+   BLKCIPHER_AES_CHAIN_TYPE);
+
+   TEST_ASSERT_EQUAL(status, 0, "Test failed");
+
+   return TEST_SUCCESS;
+}
+
+static int
+test_AES_cipheronly_ccp_all(void)
+{
+   struct crypto_testsuite_params *ts_params = &testsuite_params;
+   int status;
+
+   status = test_blockcipher_all_tests(ts_params->mbuf_pool,
+   ts_params->op_mpool,
+   ts_params->session_mpool,
+   ts_params->valid_devs[0],
+   rte_cryptodev_driver_id_get(
+   RTE_STR(CRYPTODEV_NAME_CCP_PMD)),
+   BLKCIPHER_AES_CIPHERONLY_TYPE);
+
+   TEST_ASSERT_EQUAL(status, 0, "Test failed");
+
+   return TEST_SUCCESS;
+}
+
+static int
 test_AES_chain_qat_all(void)
 {
struct crypto_testsuite_params *ts_params = &testsuite_params;
@@ -1896,6 +1951,25 @@ test_authonly_openssl_all(void)
 }
 
 static int
+test_authonly_ccp_all(void)
+{
+   struct crypto_testsuite_params *ts_params = &testsuite_params;
+   int status;
+
+   status = test_blockcipher_all_tests(ts_params->mbuf_pool,
+   ts_params->op_mpool,
+   ts_params->session_mpool,
+   ts_params->valid_devs[0],
+   rte_cryptodev_driver_id_get(
+   RTE_STR(CRYPTODEV_NAME_CCP_PMD)),
+   BLKCIPHER_AUTHONLY_TYPE);
+
+   TEST_ASSERT_EQUAL(status, 0, "Test failed");
+
+   return TEST_SUCCESS;
+}
+
+static int
 test_AES_chain_armv8_all(void)
 {
struct crypto_testsuite_params *ts_params = &testsuite_params;
@@ -4971,6 +5045,44 @@ test_3DES_cipheronly_dpaa2_sec_all(void)
 }
 
 static int
+test_3DES_chain_ccp_all(void)
+{
+   struct crypto_testsuite_params *ts_params = &testsuite_params;
+   int status;
+
+   status = test_blockcipher_all_tests(ts_params->mbuf_pool,
+   ts_params->op_mpool,
+   ts_params->session_mpool,
+   ts_params->valid_devs[0],
+   rte_cryptodev_driver_id_get(
+   RTE_STR(CRYPTODEV_NAME_CCP_PMD)),
+   BLKCIPHER_3DES_CHAIN_TYPE);
+
+   TEST_ASSERT_EQUAL(status, 0, "Test failed");
+
+   return TEST_SUCCESS;
+}
+
+static int
+test_3DES_cipheronly_ccp_all(void)
+{
+   struct crypto_testsuite_params *ts_params = &testsuite_params;
+   int status;
+
+   status = test_blockcipher_all_tests(ts_params->mbuf_pool,
+   ts_params->op_mpool,
+   ts_params->session_mpool,
+   ts_params->valid_devs[0],
+   rte_cryptodev_driver_id_get(
+   RTE_STR(CRYPTODEV_NAME_CCP_PMD)),
+   BLKCIPHER_3DES_CIPHERONLY_TYPE);
+
+   TEST_ASSERT_EQUAL(status, 0, "Test failed");
+
+   return TEST_SUCCESS;
+}
+
+static int
 test_3DES_cipheronly_qat_all(void)
 {
struct crypto_testsuite_params *ts_p

[dpdk-dev] [PATCH v3 19/19] doc: add document for AMD CCP crypto poll mode driver

2018-01-10 Thread Ravi Kumar
Signed-off-by: Ravi Kumar 
---
 doc/guides/cryptodevs/ccp.rst  | 127 +
 doc/guides/cryptodevs/features/ccp.ini |  57 +
 doc/guides/cryptodevs/features/default.ini |  12 +++
 doc/guides/cryptodevs/index.rst|   1 +
 4 files changed, 197 insertions(+)
 create mode 100644 doc/guides/cryptodevs/ccp.rst
 create mode 100644 doc/guides/cryptodevs/features/ccp.ini

diff --git a/doc/guides/cryptodevs/ccp.rst b/doc/guides/cryptodevs/ccp.rst
new file mode 100644
index 000..59b9281
--- /dev/null
+++ b/doc/guides/cryptodevs/ccp.rst
@@ -0,0 +1,127 @@
+.. Copyright(c) 2017 Advanced Micro Devices, Inc.
+   All rights reserved.
+
+   Redistribution and use in source and binary forms, with or without
+   modification, are permitted provided that the following conditions
+   are met:
+
+   * Redistributions of source code must retain the above copyright
+   notice, this list of conditions and the following disclaimer.
+   * Redistributions in binary form must reproduce the above copyright
+   notice, this list of conditions and the following disclaimer in the
+   documentation and/or other materials provided with the distribution.
+   * Neither the name of the copyright holder nor the names of its
+   contributors may be used to endorse or promote products derived from
+   this software without specific prior written permission.
+
+   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+   HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+AMD CCP Poll Mode Driver
+
+
+This code provides the initial implementation of the ccp poll mode driver.
+The CCP poll mode driver library (librte_pmd_ccp) implements support for
+AMD???s cryptographic co-processor (CCP). The CCP PMD is a virtual crypto
+poll mode driver which schedules crypto operations to one or more available
+CCP hardware engines on the platform. The CCP PMD provides poll mode crypto
+driver support for the following hardware accelerator devices::
+
+   AMD Cryptographic Co-processor (0x1456)
+   AMD Cryptographic Co-processor (0x1468)
+
+Features
+
+
+CCP crypto PMD has support for:
+
+Cipher algorithms:
+
+* ``RTE_CRYPTO_CIPHER_AES_CBC``
+* ``RTE_CRYPTO_CIPHER_AES_ECB``
+* ``RTE_CRYPTO_CIPHER_AES_CTR``
+* ``RTE_CRYPTO_CIPHER_3DES_CBC``
+
+Hash algorithms:
+
+* ``RTE_CRYPTO_AUTH_SHA1``
+* ``RTE_CRYPTO_AUTH_SHA1_HMAC``
+* ``RTE_CRYPTO_AUTH_SHA224``
+* ``RTE_CRYPTO_AUTH_SHA224_HMAC``
+* ``RTE_CRYPTO_AUTH_SHA256``
+* ``RTE_CRYPTO_AUTH_SHA256_HMAC``
+* ``RTE_CRYPTO_AUTH_SHA384``
+* ``RTE_CRYPTO_AUTH_SHA384_HMAC``
+* ``RTE_CRYPTO_AUTH_SHA512``
+* ``RTE_CRYPTO_AUTH_SHA512_HMAC``
+* ``RTE_CRYPTO_AUTH_MD5_HMAC``
+* ``RTE_CRYPTO_AUTH_AES_CMAC``
+* ``RTE_CRYPTO_AUTH_SHA3_224``
+* ``RTE_CRYPTO_AUTH_SHA3_224_HMAC``
+* ``RTE_CRYPTO_AUTH_SHA3_256``
+* ``RTE_CRYPTO_AUTH_SHA3_256_HMAC``
+* ``RTE_CRYPTO_AUTH_SHA3_384``
+* ``RTE_CRYPTO_AUTH_SHA3_384_HMAC``
+* ``RTE_CRYPTO_AUTH_SHA3_512``
+* ``RTE_CRYPTO_AUTH_SHA3_512_HMAC``
+
+AEAD algorithms:
+
+* ``RTE_CRYPTO_AEAD_AES_GCM``
+
+Installation
+
+
+To compile CCP PMD, it has to be enabled in the config/common_base file.
+* ``CONFIG_RTE_LIBRTE_PMD_CCP=y``
+
+The CCP PMD also supports computing authentication over CPU with cipher 
offloaded
+to CCP. To enable this feature, enable following in the configuration.
+* ``CONFIG_RTE_LIBRTE_PMD_CCP_CPU_AUTH=y``
+
+This code was verified on Ubuntu 16.04.
+
+Initialization
+--
+
+Bind the CCP devices to DPDK UIO driver module before running the CCP PMD 
stack.
+e.g. for the 0x1456 device::
+
+   cd to the top-level DPDK directory
+   modprobe uio
+   insmod ./build/kmod/igb_uio.ko
+   echo "1022 1456" > /sys/bus/pci/drivers/igb_uio/new_id
+
+Another way to bind the CCP devices to DPDK UIO driver is by using the 
``dpdk-devbind.py`` script.
+The following command assumes ``BFD`` of ``:09:00.2``::
+
+   cd to the top-level DPDK directory
+   ./usertools/dpdk-devbind.py -b igb_uio :09:00.2
+
+To verify real traffic l2fwd-crypto example can be used with following command:
+
+.. code-block:: console
+
+   sudo ./build/l2fwd-crypto -l 1 -n 4 --vdev "crypto_ccp" -- -p 0x1
+   --chain CIPHER_HASH --cipher_op ENCRYPT --cipher_algo AES_CBC
+   --cipher_k

Re: [dpdk-dev] [PATCH v6 05/14] net/avf: enable link status update

2018-01-10 Thread Xing, Beilei


> -Original Message-
> From: dev [mailto:dev-boun...@dpdk.org] On Behalf Of Wenzhuo Lu
> Sent: Wednesday, January 10, 2018 2:16 PM
> To: dev@dpdk.org
> Cc: Wu, Jingjing 
> Subject: [dpdk-dev] [PATCH v6 05/14] net/avf: enable link status update
> 
> From: Jingjing Wu 
> 
> Signed-off-by: Jingjing Wu 

Acked-by: Beilei Xing 


[dpdk-dev] [PATCH v3 10/19] net/dpaa: change Tx HW budget to 7

2018-01-10 Thread Hemant Agrawal
From: Nipun Gupta 

change the TX budget to 7 to sync best with the hw.

Signed-off-by: Nipun Gupta 
Acked-by: Hemant Agrawal 
---
 drivers/net/dpaa/dpaa_ethdev.h | 2 +-
 drivers/net/dpaa/dpaa_rxtx.c   | 5 +++--
 2 files changed, 4 insertions(+), 3 deletions(-)

diff --git a/drivers/net/dpaa/dpaa_ethdev.h b/drivers/net/dpaa/dpaa_ethdev.h
index f00a77a..1b36567 100644
--- a/drivers/net/dpaa/dpaa_ethdev.h
+++ b/drivers/net/dpaa/dpaa_ethdev.h
@@ -41,7 +41,7 @@
 #define DPAA_MAX_MAC_FILTER (MEMAC_NUM_OF_PADDRS + 1)
 
 /*Maximum number of slots available in TX ring*/
-#define MAX_TX_RING_SLOTS  8
+#define DPAA_TX_BURST_SIZE 7
 
 #ifndef VLAN_TAG_SIZE
 #define VLAN_TAG_SIZE   4 /** < Vlan Header Length */
diff --git a/drivers/net/dpaa/dpaa_rxtx.c b/drivers/net/dpaa/dpaa_rxtx.c
index 630d7a5..565ca50 100644
--- a/drivers/net/dpaa/dpaa_rxtx.c
+++ b/drivers/net/dpaa/dpaa_rxtx.c
@@ -669,7 +669,7 @@ dpaa_eth_queue_tx(void *q, struct rte_mbuf **bufs, uint16_t 
nb_bufs)
struct rte_mbuf *mbuf, *mi = NULL;
struct rte_mempool *mp;
struct dpaa_bp_info *bp_info;
-   struct qm_fd fd_arr[MAX_TX_RING_SLOTS];
+   struct qm_fd fd_arr[DPAA_TX_BURST_SIZE];
uint32_t frames_to_send, loop, i = 0;
uint16_t state;
int ret;
@@ -683,7 +683,8 @@ dpaa_eth_queue_tx(void *q, struct rte_mbuf **bufs, uint16_t 
nb_bufs)
DPAA_DP_LOG(DEBUG, "Transmitting %d buffers on queue: %p", nb_bufs, q);
 
while (nb_bufs) {
-   frames_to_send = (nb_bufs >> 3) ? MAX_TX_RING_SLOTS : nb_bufs;
+   frames_to_send = (nb_bufs > DPAA_TX_BURST_SIZE) ?
+   DPAA_TX_BURST_SIZE : nb_bufs;
for (loop = 0; loop < frames_to_send; loop++, i++) {
mbuf = bufs[i];
if (RTE_MBUF_DIRECT(mbuf)) {
-- 
2.7.4



[dpdk-dev] [PATCH v3 12/19] net/dpaa: optimize Rx path

2018-01-10 Thread Hemant Agrawal
From: Nipun Gupta 

Signed-off-by: Nipun Gupta 
Signed-off-by: Hemant Agrawal 
---
 drivers/net/dpaa/dpaa_rxtx.c | 48 
 drivers/net/dpaa/dpaa_rxtx.h |  2 +-
 2 files changed, 23 insertions(+), 27 deletions(-)

diff --git a/drivers/net/dpaa/dpaa_rxtx.c b/drivers/net/dpaa/dpaa_rxtx.c
index 148f265..98671fa 100644
--- a/drivers/net/dpaa/dpaa_rxtx.c
+++ b/drivers/net/dpaa/dpaa_rxtx.c
@@ -97,12 +97,6 @@ static inline void dpaa_eth_packet_info(struct rte_mbuf *m,
DPAA_DP_LOG(DEBUG, " Parsing mbuf: %p with annotations: %p", m, annot);
 
switch (prs) {
-   case DPAA_PKT_TYPE_NONE:
-   m->packet_type = 0;
-   break;
-   case DPAA_PKT_TYPE_ETHER:
-   m->packet_type = RTE_PTYPE_L2_ETHER;
-   break;
case DPAA_PKT_TYPE_IPV4:
m->packet_type = RTE_PTYPE_L2_ETHER |
RTE_PTYPE_L3_IPV4;
@@ -111,6 +105,9 @@ static inline void dpaa_eth_packet_info(struct rte_mbuf *m,
m->packet_type = RTE_PTYPE_L2_ETHER |
RTE_PTYPE_L3_IPV6;
break;
+   case DPAA_PKT_TYPE_ETHER:
+   m->packet_type = RTE_PTYPE_L2_ETHER;
+   break;
case DPAA_PKT_TYPE_IPV4_FRAG:
case DPAA_PKT_TYPE_IPV4_FRAG_UDP:
case DPAA_PKT_TYPE_IPV4_FRAG_TCP:
@@ -173,6 +170,9 @@ static inline void dpaa_eth_packet_info(struct rte_mbuf *m,
m->packet_type = RTE_PTYPE_L2_ETHER |
RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_SCTP;
break;
+   case DPAA_PKT_TYPE_NONE:
+   m->packet_type = 0;
+   break;
/* More switch cases can be added */
default:
dpaa_slow_parsing(m, prs);
@@ -183,12 +183,11 @@ static inline void dpaa_eth_packet_info(struct rte_mbuf 
*m,
<< DPAA_PKT_L3_LEN_SHIFT;
 
/* Set the hash values */
-   m->hash.rss = (uint32_t)(rte_be_to_cpu_64(annot->hash));
-   m->ol_flags = PKT_RX_RSS_HASH;
+   m->hash.rss = (uint32_t)(annot->hash);
/* All packets with Bad checksum are dropped by interface (and
 * corresponding notification issued to RX error queues).
 */
-   m->ol_flags |= PKT_RX_IP_CKSUM_GOOD;
+   m->ol_flags = PKT_RX_RSS_HASH | PKT_RX_IP_CKSUM_GOOD;
 
/* Check if Vlan is present */
if (prs & DPAA_PARSE_VLAN_MASK)
@@ -297,7 +296,7 @@ dpaa_unsegmented_checksum(struct rte_mbuf *mbuf, struct 
qm_fd *fd_arr)
 }
 
 struct rte_mbuf *
-dpaa_eth_sg_to_mbuf(struct qm_fd *fd, uint32_t ifid)
+dpaa_eth_sg_to_mbuf(const struct qm_fd *fd, uint32_t ifid)
 {
struct dpaa_bp_info *bp_info = DPAA_BPID_TO_POOL_INFO(fd->bpid);
struct rte_mbuf *first_seg, *prev_seg, *cur_seg, *temp;
@@ -355,34 +354,31 @@ dpaa_eth_sg_to_mbuf(struct qm_fd *fd, uint32_t ifid)
return first_seg;
 }
 
-static inline struct rte_mbuf *dpaa_eth_fd_to_mbuf(struct qm_fd *fd,
-   uint32_t ifid)
+static inline struct rte_mbuf *
+dpaa_eth_fd_to_mbuf(const struct qm_fd *fd, uint32_t ifid)
 {
-   struct dpaa_bp_info *bp_info = DPAA_BPID_TO_POOL_INFO(fd->bpid);
struct rte_mbuf *mbuf;
-   void *ptr;
+   struct dpaa_bp_info *bp_info = DPAA_BPID_TO_POOL_INFO(fd->bpid);
+   void *ptr = rte_dpaa_mem_ptov(qm_fd_addr(fd));
uint8_t format =
(fd->opaque & DPAA_FD_FORMAT_MASK) >> DPAA_FD_FORMAT_SHIFT;
-   uint16_t offset =
-   (fd->opaque & DPAA_FD_OFFSET_MASK) >> DPAA_FD_OFFSET_SHIFT;
-   uint32_t length = fd->opaque & DPAA_FD_LENGTH_MASK;
+   uint16_t offset;
+   uint32_t length;
 
DPAA_DP_LOG(DEBUG, " FD--->MBUF");
 
if (unlikely(format == qm_fd_sg))
return dpaa_eth_sg_to_mbuf(fd, ifid);
 
+   rte_prefetch0((void *)((uint8_t *)ptr + DEFAULT_RX_ICEOF));
+
+   offset = (fd->opaque & DPAA_FD_OFFSET_MASK) >> DPAA_FD_OFFSET_SHIFT;
+   length = fd->opaque & DPAA_FD_LENGTH_MASK;
+
/* Ignoring case when format != qm_fd_contig */
dpaa_display_frame(fd);
-   ptr = rte_dpaa_mem_ptov(fd->addr);
-   /* Ignoring case when ptr would be NULL. That is only possible incase
-* of a corrupted packet
-*/
 
mbuf = (struct rte_mbuf *)((char *)ptr - bp_info->meta_data_size);
-   /* Prefetch the Parse results and packet data to L1 */
-   rte_prefetch0((void *)((uint8_t *)ptr + DEFAULT_RX_ICEOF));
-   rte_prefetch0((void *)((uint8_t *)ptr + offset));
 
mbuf->data_off = offset;
mbuf->data_len = length;
@@ -462,11 +458,11 @@ static struct rte_mbuf *dpaa_get_dmable_mbuf(struct 
rte_mbuf *mbuf,
if (!dpaa_mbuf)
return NULL;
 
-   memcpy((uint8_t *)(dpaa_mbuf->buf_addr) + mbuf->data_off, (void *)
+   memcpy((uint8_t *)(dpaa_mbuf->buf_addr) + RTE_PKTMBUF_HEADROOM, (void *)
((uint

[dpdk-dev] [PATCH v3 15/19] net/dpaa: add support for loopback API

2018-01-10 Thread Hemant Agrawal
PMD specific API is being added as an EXPERIMENTAL API

Signed-off-by: Hemant Agrawal 
---
 doc/api/doxy-api-index.md |  1 +
 doc/api/doxy-api.conf |  1 +
 drivers/net/dpaa/Makefile |  3 +++
 drivers/net/dpaa/dpaa_ethdev.c| 42 +++
 drivers/net/dpaa/rte_pmd_dpaa.h   | 39 
 drivers/net/dpaa/rte_pmd_dpaa_version.map |  8 ++
 6 files changed, 94 insertions(+)
 create mode 100644 drivers/net/dpaa/rte_pmd_dpaa.h

diff --git a/doc/api/doxy-api-index.md b/doc/api/doxy-api-index.md
index 3492702..38314af 100644
--- a/doc/api/doxy-api-index.md
+++ b/doc/api/doxy-api-index.md
@@ -60,6 +60,7 @@ The public API headers are grouped by topics:
   [ixgbe]  (@ref rte_pmd_ixgbe.h),
   [i40e]   (@ref rte_pmd_i40e.h),
   [bnxt]   (@ref rte_pmd_bnxt.h),
+  [dpaa]   (@ref rte_pmd_dpaa.h),
   [crypto_scheduler]   (@ref rte_cryptodev_scheduler.h)
 
 - **memory**:
diff --git a/doc/api/doxy-api.conf b/doc/api/doxy-api.conf
index b2cbe94..09e3232 100644
--- a/doc/api/doxy-api.conf
+++ b/doc/api/doxy-api.conf
@@ -33,6 +33,7 @@ INPUT   = doc/api/doxy-api-index.md \
   drivers/crypto/scheduler \
   drivers/net/bnxt \
   drivers/net/bonding \
+  drivers/net/dpaa \
   drivers/net/i40e \
   drivers/net/ixgbe \
   drivers/net/softnic \
diff --git a/drivers/net/dpaa/Makefile b/drivers/net/dpaa/Makefile
index e5f662f..b1fc5a0 100644
--- a/drivers/net/dpaa/Makefile
+++ b/drivers/net/dpaa/Makefile
@@ -34,4 +34,7 @@ LDLIBS += -lrte_mempool_dpaa
 LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool -lrte_ring
 LDLIBS += -lrte_ethdev -lrte_net -lrte_kvargs
 
+# install this header file
+SYMLINK-$(CONFIG_RTE_LIBRTE_DPAA_PMD)-include := rte_pmd_dpaa.h
+
 include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/drivers/net/dpaa/dpaa_ethdev.c b/drivers/net/dpaa/dpaa_ethdev.c
index de016ab..85ccea4 100644
--- a/drivers/net/dpaa/dpaa_ethdev.c
+++ b/drivers/net/dpaa/dpaa_ethdev.c
@@ -38,6 +38,7 @@
 
 #include 
 #include 
+#include 
 
 #include 
 #include 
@@ -84,6 +85,8 @@ static const struct rte_dpaa_xstats_name_off 
dpaa_xstats_strings[] = {
offsetof(struct dpaa_if_stats, tund)},
 };
 
+static struct rte_dpaa_driver rte_dpaa_pmd;
+
 static int
 dpaa_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
 {
@@ -707,6 +710,45 @@ static struct eth_dev_ops dpaa_devops = {
.fw_version_get   = dpaa_fw_version_get,
 };
 
+static bool
+is_device_supported(struct rte_eth_dev *dev, struct rte_dpaa_driver *drv)
+{
+   if (strcmp(dev->device->driver->name,
+  drv->driver.name))
+   return false;
+
+   return true;
+}
+
+static bool
+is_dpaa_supported(struct rte_eth_dev *dev)
+{
+   return is_device_supported(dev, &rte_dpaa_pmd);
+}
+
+int
+rte_pmd_dpaa_set_tx_loopback(uint8_t port, uint8_t on)
+{
+   struct rte_eth_dev *dev;
+   struct dpaa_if *dpaa_intf;
+
+   RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+   dev = &rte_eth_devices[port];
+
+   if (!is_dpaa_supported(dev))
+   return -ENOTSUP;
+
+   dpaa_intf = dev->data->dev_private;
+
+   if (on)
+   fman_if_loopback_enable(dpaa_intf->fif);
+   else
+   fman_if_loopback_disable(dpaa_intf->fif);
+
+   return 0;
+}
+
 static int dpaa_fc_set_default(struct dpaa_if *dpaa_intf)
 {
struct rte_eth_fc_conf *fc_conf;
diff --git a/drivers/net/dpaa/rte_pmd_dpaa.h b/drivers/net/dpaa/rte_pmd_dpaa.h
new file mode 100644
index 000..9614be8
--- /dev/null
+++ b/drivers/net/dpaa/rte_pmd_dpaa.h
@@ -0,0 +1,39 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2018 NXP
+ */
+
+#ifndef _PMD_DPAA_H_
+#define _PMD_DPAA_H_
+
+/**
+ * @file rte_pmd_dpaa.h
+ *
+ * NXP dpaa PMD specific functions.
+ *
+ * @warning
+ * @b EXPERIMENTAL: this API may change, or be removed, without prior notice
+ *
+ */
+
+#include 
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change, or be removed, without prior notice
+ *
+ * Enable/Disable TX loopback
+ *
+ * @param port
+ *The port identifier of the Ethernet device.
+ * @param on
+ *1 - Enable TX loopback.
+ *0 - Disable TX loopback.
+ * @return
+ *   - (0) if successful.
+ *   - (-ENODEV) if *port* invalid.
+ *   - (-EINVAL) if bad parameter.
+ */
+int
+rte_pmd_dpaa_set_tx_loopback(uint8_t port, uint8_t on);
+
+#endif /* _PMD_DPAA_H_ */
diff --git a/drivers/net/dpaa/rte_pmd_dpaa_version.map 
b/drivers/net/dpaa/rte_pmd_dpaa_version.map
index a70bd19..d1f3ea4 100644
--- a/drivers/net/dpaa/rte_pmd_dpaa_version.map
+++ b/drivers/net/dpaa/rte_pmd_dpaa_version.map
@@ -2,3 +2,11 @@ DPDK_17.11 {
 
local: *;
 };
+
+EXPERIMENTAL {
+   global:
+
+   rte_pmd_dpaa_set_tx_loopback;
+
+ 

[dpdk-dev] [PATCH v3 14/19] net/dpaa: add Rx queue count support

2018-01-10 Thread Hemant Agrawal
Signed-off-by: Hemant Agrawal 
---
 drivers/net/dpaa/dpaa_ethdev.c | 17 +
 1 file changed, 17 insertions(+)

diff --git a/drivers/net/dpaa/dpaa_ethdev.c b/drivers/net/dpaa/dpaa_ethdev.c
index 5d94af5..de016ab 100644
--- a/drivers/net/dpaa/dpaa_ethdev.c
+++ b/drivers/net/dpaa/dpaa_ethdev.c
@@ -513,6 +513,22 @@ static void dpaa_eth_tx_queue_release(void *txq 
__rte_unused)
PMD_INIT_FUNC_TRACE();
 }
 
+static uint32_t
+dpaa_dev_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id)
+{
+   struct dpaa_if *dpaa_intf = dev->data->dev_private;
+   struct qman_fq *rxq = &dpaa_intf->rx_queues[rx_queue_id];
+   u32 frm_cnt = 0;
+
+   PMD_INIT_FUNC_TRACE();
+
+   if (qman_query_fq_frm_cnt(rxq, &frm_cnt) == 0) {
+   RTE_LOG(DEBUG, PMD, "RX frame count for q(%d) is %u\n",
+   rx_queue_id, frm_cnt);
+   }
+   return frm_cnt;
+}
+
 static int dpaa_link_down(struct rte_eth_dev *dev)
 {
PMD_INIT_FUNC_TRACE();
@@ -664,6 +680,7 @@ static struct eth_dev_ops dpaa_devops = {
.tx_queue_setup   = dpaa_eth_tx_queue_setup,
.rx_queue_release = dpaa_eth_rx_queue_release,
.tx_queue_release = dpaa_eth_tx_queue_release,
+   .rx_queue_count   = dpaa_dev_rx_queue_count,
 
.flow_ctrl_get= dpaa_flow_ctrl_get,
.flow_ctrl_set= dpaa_flow_ctrl_set,
-- 
2.7.4



[dpdk-dev] [PATCH v3 13/19] bus/dpaa: query queue frame count support

2018-01-10 Thread Hemant Agrawal
Signed-off-by: Hemant Agrawal 
---
 drivers/bus/dpaa/base/qbman/qman.c| 22 ++
 drivers/bus/dpaa/include/fsl_qman.h   |  7 +++
 drivers/bus/dpaa/rte_bus_dpaa_version.map |  1 +
 3 files changed, 30 insertions(+)

diff --git a/drivers/bus/dpaa/base/qbman/qman.c 
b/drivers/bus/dpaa/base/qbman/qman.c
index d8fb25a..ffb008e 100644
--- a/drivers/bus/dpaa/base/qbman/qman.c
+++ b/drivers/bus/dpaa/base/qbman/qman.c
@@ -1722,6 +1722,28 @@ int qman_query_fq_np(struct qman_fq *fq, struct 
qm_mcr_queryfq_np *np)
return 0;
 }
 
+int qman_query_fq_frm_cnt(struct qman_fq *fq, u32 *frm_cnt)
+{
+   struct qm_mc_command *mcc;
+   struct qm_mc_result *mcr;
+   struct qman_portal *p = get_affine_portal();
+
+   mcc = qm_mc_start(&p->p);
+   mcc->queryfq.fqid = cpu_to_be32(fq->fqid);
+   qm_mc_commit(&p->p, QM_MCC_VERB_QUERYFQ_NP);
+   while (!(mcr = qm_mc_result(&p->p)))
+   cpu_relax();
+   DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_QUERYFQ_NP);
+
+   if (mcr->result == QM_MCR_RESULT_OK)
+   *frm_cnt = be24_to_cpu(mcr->queryfq_np.frm_cnt);
+   else if (mcr->result == QM_MCR_RESULT_ERR_FQID)
+   return -ERANGE;
+   else if (mcr->result != QM_MCR_RESULT_OK)
+   return -EIO;
+   return 0;
+}
+
 int qman_query_wq(u8 query_dedicated, struct qm_mcr_querywq *wq)
 {
struct qm_mc_command *mcc;
diff --git a/drivers/bus/dpaa/include/fsl_qman.h 
b/drivers/bus/dpaa/include/fsl_qman.h
index fc00d8d..d769d50 100644
--- a/drivers/bus/dpaa/include/fsl_qman.h
+++ b/drivers/bus/dpaa/include/fsl_qman.h
@@ -1616,6 +1616,13 @@ int qman_query_fq_has_pkts(struct qman_fq *fq);
 int qman_query_fq_np(struct qman_fq *fq, struct qm_mcr_queryfq_np *np);
 
 /**
+ * qman_query_fq_frmcnt - Queries fq frame count
+ * @fq: the frame queue object to be queried
+ * @frm_cnt: number of frames in the queue
+ */
+int qman_query_fq_frm_cnt(struct qman_fq *fq, u32 *frm_cnt);
+
+/**
  * qman_query_wq - Queries work queue lengths
  * @query_dedicated: If non-zero, query length of WQs in the channel dedicated
  * to this software portal. Otherwise, query length of WQs in a
diff --git a/drivers/bus/dpaa/rte_bus_dpaa_version.map 
b/drivers/bus/dpaa/rte_bus_dpaa_version.map
index 4e3afda..212c75f 100644
--- a/drivers/bus/dpaa/rte_bus_dpaa_version.map
+++ b/drivers/bus/dpaa/rte_bus_dpaa_version.map
@@ -73,6 +73,7 @@ DPDK_18.02 {
qman_create_cgr;
qman_delete_cgr;
qman_modify_cgr;
+   qman_query_fq_frm_cnt;
qman_release_cgrid_range;
rte_dpaa_portal_fq_close;
rte_dpaa_portal_fq_init;
-- 
2.7.4



[dpdk-dev] [PATCH v3 16/19] app/testpmd: add support for loopback config for dpaa

2018-01-10 Thread Hemant Agrawal
Signed-off-by: Hemant Agrawal 
---
 app/test-pmd/Makefile  | 4 
 app/test-pmd/cmdline.c | 7 +++
 2 files changed, 11 insertions(+)

diff --git a/app/test-pmd/Makefile b/app/test-pmd/Makefile
index 82b3481..34125e5 100644
--- a/app/test-pmd/Makefile
+++ b/app/test-pmd/Makefile
@@ -43,6 +43,10 @@ ifeq ($(CONFIG_RTE_LIBRTE_PMD_BOND),y)
 LDLIBS += -lrte_pmd_bond
 endif
 
+ifeq ($(CONFIG_RTE_LIBRTE_DPAA_PMD),y)
+LDLIBS += -lrte_pmd_dpaa
+endif
+
 ifeq ($(CONFIG_RTE_LIBRTE_IXGBE_PMD),y)
 LDLIBS += -lrte_pmd_ixgbe
 endif
diff --git a/app/test-pmd/cmdline.c b/app/test-pmd/cmdline.c
index f71d963..32096aa 100644
--- a/app/test-pmd/cmdline.c
+++ b/app/test-pmd/cmdline.c
@@ -89,6 +89,9 @@
 #include 
 #include 
 #endif
+#ifdef RTE_LIBRTE_DPAA_PMD
+#include 
+#endif
 #ifdef RTE_LIBRTE_IXGBE_PMD
 #include 
 #endif
@@ -12620,6 +12623,10 @@ cmd_set_tx_loopback_parsed(
if (ret == -ENOTSUP)
ret = rte_pmd_bnxt_set_tx_loopback(res->port_id, is_on);
 #endif
+#ifdef RTE_LIBRTE_DPAA_PMD
+   if (ret == -ENOTSUP)
+   ret = rte_pmd_dpaa_set_tx_loopback(res->port_id, is_on);
+#endif
 
switch (ret) {
case 0:
-- 
2.7.4



[dpdk-dev] [PATCH v3 17/19] bus/dpaa: add support for static queues

2018-01-10 Thread Hemant Agrawal
DPAA hardware support two kinds of queues:
1. Pull mode queue - where one needs to regularly pull the packets.
2. Push mode queue - where the hw pushes the packet to queue. These are
   high performance queues, but limited in number.

This patch add the driver support for push mode queues.

Signed-off-by: Sunil Kumar Kori 
Signed-off-by: Hemant Agrawal 
---
 drivers/bus/dpaa/base/qbman/qman.c| 64 +++
 drivers/bus/dpaa/base/qbman/qman.h|  4 +-
 drivers/bus/dpaa/include/fsl_qman.h   | 14 ++-
 drivers/bus/dpaa/rte_bus_dpaa_version.map |  4 ++
 4 files changed, 83 insertions(+), 3 deletions(-)

diff --git a/drivers/bus/dpaa/base/qbman/qman.c 
b/drivers/bus/dpaa/base/qbman/qman.c
index ffb008e..7e285a5 100644
--- a/drivers/bus/dpaa/base/qbman/qman.c
+++ b/drivers/bus/dpaa/base/qbman/qman.c
@@ -1051,6 +1051,70 @@ u16 qman_affine_channel(int cpu)
return affine_channels[cpu];
 }
 
+unsigned int qman_portal_poll_rx(unsigned int poll_limit,
+void **bufs,
+struct qman_portal *p)
+{
+   const struct qm_dqrr_entry *dq;
+   struct qman_fq *fq;
+   enum qman_cb_dqrr_result res;
+   unsigned int limit = 0;
+#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
+   struct qm_dqrr_entry *shadow;
+#endif
+   unsigned int rx_number = 0;
+
+   do {
+   qm_dqrr_pvb_update(&p->p);
+   dq = qm_dqrr_current(&p->p);
+   if (unlikely(!dq))
+   break;
+#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
+   /* If running on an LE system the fields of the
+* dequeue entry must be swapper.  Because the
+* QMan HW will ignore writes the DQRR entry is
+* copied and the index stored within the copy
+*/
+   shadow = &p->shadow_dqrr[DQRR_PTR2IDX(dq)];
+   *shadow = *dq;
+   dq = shadow;
+   shadow->fqid = be32_to_cpu(shadow->fqid);
+   shadow->contextB = be32_to_cpu(shadow->contextB);
+   shadow->seqnum = be16_to_cpu(shadow->seqnum);
+   hw_fd_to_cpu(&shadow->fd);
+#endif
+
+   /* SDQCR: context_b points to the FQ */
+#ifdef CONFIG_FSL_QMAN_FQ_LOOKUP
+   fq = get_fq_table_entry(dq->contextB);
+#else
+   fq = (void *)(uintptr_t)dq->contextB;
+#endif
+   /* Now let the callback do its stuff */
+   res = fq->cb.dqrr_dpdk_cb(NULL, p, fq, dq, &bufs[rx_number]);
+   rx_number++;
+   /* Interpret 'dq' from a driver perspective. */
+   /*
+* Parking isn't possible unless HELDACTIVE was set. NB,
+* FORCEELIGIBLE implies HELDACTIVE, so we only need to
+* check for HELDACTIVE to cover both.
+*/
+   DPAA_ASSERT((dq->stat & QM_DQRR_STAT_FQ_HELDACTIVE) ||
+   (res != qman_cb_dqrr_park));
+   qm_dqrr_cdc_consume_1ptr(&p->p, dq, res == qman_cb_dqrr_park);
+   /* Move forward */
+   qm_dqrr_next(&p->p);
+   /*
+* Entry processed and consumed, increment our counter.  The
+* callback can request that we exit after consuming the
+* entry, and we also exit if we reach our processing limit,
+* so loop back only if neither of these conditions is met.
+*/
+   } while (likely(++limit < poll_limit));
+
+   return limit;
+}
+
 struct qm_dqrr_entry *qman_dequeue(struct qman_fq *fq)
 {
struct qman_portal *p = get_affine_portal();
diff --git a/drivers/bus/dpaa/base/qbman/qman.h 
b/drivers/bus/dpaa/base/qbman/qman.h
index a433369..4346d86 100644
--- a/drivers/bus/dpaa/base/qbman/qman.h
+++ b/drivers/bus/dpaa/base/qbman/qman.h
@@ -154,7 +154,7 @@ struct qm_eqcr {
 };
 
 struct qm_dqrr {
-   const struct qm_dqrr_entry *ring, *cursor;
+   struct qm_dqrr_entry *ring, *cursor;
u8 pi, ci, fill, ithresh, vbit;
 #ifdef RTE_LIBRTE_DPAA_HWDEBUG
enum qm_dqrr_dmode dmode;
@@ -441,7 +441,7 @@ static inline u8 DQRR_PTR2IDX(const struct qm_dqrr_entry *e)
return ((uintptr_t)e >> 6) & (QM_DQRR_SIZE - 1);
 }
 
-static inline const struct qm_dqrr_entry *DQRR_INC(
+static inline struct qm_dqrr_entry *DQRR_INC(
const struct qm_dqrr_entry *e)
 {
return DQRR_CARRYCLEAR(e + 1);
diff --git a/drivers/bus/dpaa/include/fsl_qman.h 
b/drivers/bus/dpaa/include/fsl_qman.h
index d769d50..ad40d80 100644
--- a/drivers/bus/dpaa/include/fsl_qman.h
+++ b/drivers/bus/dpaa/include/fsl_qman.h
@@ -1124,6 +1124,12 @@ typedef enum qman_cb_dqrr_result (*qman_cb_dqrr)(struct 
qman_portal *qm,
struct qman_fq *fq,
const struct qm_dqrr_entry *dqrr);
 
+typedef enum qman_cb_dqrr_result (*qman_dpdk_cb_dqrr)(void *e

[dpdk-dev] [PATCH v3 18/19] net/dpaa: integrate the support of push mode in PMD

2018-01-10 Thread Hemant Agrawal
Signed-off-by: Sunil Kumar Kori 
Signed-off-by: Hemant Agrawal 
Signed-off-by: Nipun Gupta 
---
 doc/guides/nics/dpaa.rst   | 11 
 drivers/net/dpaa/dpaa_ethdev.c | 64 +-
 drivers/net/dpaa/dpaa_ethdev.h |  2 +-
 drivers/net/dpaa/dpaa_rxtx.c   | 34 ++
 drivers/net/dpaa/dpaa_rxtx.h   |  5 
 5 files changed, 108 insertions(+), 8 deletions(-)

diff --git a/doc/guides/nics/dpaa.rst b/doc/guides/nics/dpaa.rst
index a62f128..0a13996 100644
--- a/doc/guides/nics/dpaa.rst
+++ b/doc/guides/nics/dpaa.rst
@@ -290,6 +290,17 @@ state during application initialization:
   In case the application is configured to use lesser number of queues than
   configured above, it might result in packet loss (because of distribution).
 
+- ``DPAA_PUSH_QUEUES_NUMBER`` (default 4)
+
+  This defines the number of High performance queues to be used for ethdev Rx.
+  These queues use one private HW portal per queue configured, so they are
+  limited in the system. The first configured ethdev queues will be
+  automatically be assigned from the these high perf PUSH queues. Any queue
+  configuration beyond that will be standard Rx queues. The application can
+  choose to change their number if HW portals are limited.
+  The valid values are from '0' to '4'. The valuse shall be set to '0' if the
+  application want to use eventdev with DPAA device.
+
 
 Driver compilation and testing
 --
diff --git a/drivers/net/dpaa/dpaa_ethdev.c b/drivers/net/dpaa/dpaa_ethdev.c
index 85ccea4..444c122 100644
--- a/drivers/net/dpaa/dpaa_ethdev.c
+++ b/drivers/net/dpaa/dpaa_ethdev.c
@@ -47,6 +47,14 @@
 
 /* Keep track of whether QMAN and BMAN have been globally initialized */
 static int is_global_init;
+/* At present we only allow up to 4 push mode queues - as each of this queue
+ * need dedicated portal and we are short of portals.
+ */
+#define DPAA_MAX_PUSH_MODE_QUEUE   4
+
+static int dpaa_push_mode_max_queue = DPAA_MAX_PUSH_MODE_QUEUE;
+static int dpaa_push_queue_idx; /* Queue index which are in push mode*/
+
 
 /* Per FQ Taildrop in frame count */
 static unsigned int td_threshold = CGR_RX_PERFQ_THRESH;
@@ -434,6 +442,9 @@ int dpaa_eth_rx_queue_setup(struct rte_eth_dev *dev, 
uint16_t queue_idx,
 {
struct dpaa_if *dpaa_intf = dev->data->dev_private;
struct qman_fq *rxq = &dpaa_intf->rx_queues[queue_idx];
+   struct qm_mcc_initfq opts = {0};
+   u32 flags = 0;
+   int ret;
 
PMD_INIT_FUNC_TRACE();
 
@@ -469,13 +480,45 @@ int dpaa_eth_rx_queue_setup(struct rte_eth_dev *dev, 
uint16_t queue_idx,
dpaa_intf->name, fd_offset,
fman_if_get_fdoff(dpaa_intf->fif));
}
-
+   /* checking if push mode only, no error check for now */
+   if (dpaa_push_mode_max_queue > dpaa_push_queue_idx) {
+   dpaa_push_queue_idx++;
+   opts.we_mask = QM_INITFQ_WE_FQCTRL | QM_INITFQ_WE_CONTEXTA;
+   opts.fqd.fq_ctrl = QM_FQCTRL_AVOIDBLOCK |
+  QM_FQCTRL_CTXASTASHING |
+  QM_FQCTRL_PREFERINCACHE;
+   opts.fqd.context_a.stashing.exclusive = 0;
+   opts.fqd.context_a.stashing.annotation_cl =
+   DPAA_IF_RX_ANNOTATION_STASH;
+   opts.fqd.context_a.stashing.data_cl = DPAA_IF_RX_DATA_STASH;
+   opts.fqd.context_a.stashing.context_cl =
+   DPAA_IF_RX_CONTEXT_STASH;
+
+   /*Create a channel and associate given queue with the channel*/
+   qman_alloc_pool_range((u32 *)&rxq->ch_id, 1, 1, 0);
+   opts.we_mask = opts.we_mask | QM_INITFQ_WE_DESTWQ;
+   opts.fqd.dest.channel = rxq->ch_id;
+   opts.fqd.dest.wq = DPAA_IF_RX_PRIORITY;
+   flags = QMAN_INITFQ_FLAG_SCHED;
+
+   /* Configure tail drop */
+   if (dpaa_intf->cgr_rx) {
+   opts.we_mask |= QM_INITFQ_WE_CGID;
+   opts.fqd.cgid = dpaa_intf->cgr_rx[queue_idx].cgrid;
+   opts.fqd.fq_ctrl |= QM_FQCTRL_CGE;
+   }
+   ret = qman_init_fq(rxq, flags, &opts);
+   if (ret)
+   DPAA_PMD_ERR("Channel/Queue association failed. fqid %d"
+" ret: %d", rxq->fqid, ret);
+   rxq->cb.dqrr_dpdk_cb = dpaa_rx_cb;
+   rxq->is_static = true;
+   }
dev->data->rx_queues[queue_idx] = rxq;
 
/* configure the CGR size as per the desc size */
if (dpaa_intf->cgr_rx) {
struct qm_mcc_initcgr cgr_opts = {0};
-   int ret;
 
/* Enable tail drop with cgr on this queue */
qm_cgr_cs_thres_set64(&cgr_opts.cgr.cs_thres, nb_desc, 0);
@@ -809,11 +852,8 @@ static int dpaa_rx_queue_init(struct

[dpdk-dev] [PATCH v3 19/19] bus/dpaa: support for enqueue frames of multiple queues

2018-01-10 Thread Hemant Agrawal
From: Akhil Goyal 

Signed-off-by: Akhil Goyal 
Signed-off-by: Nipun Gupta 
Acked-by: Hemant Agrawal 
---
 drivers/bus/dpaa/base/qbman/qman.c| 66 +++
 drivers/bus/dpaa/include/fsl_qman.h   | 14 +++
 drivers/bus/dpaa/rte_bus_dpaa_version.map |  1 +
 3 files changed, 81 insertions(+)

diff --git a/drivers/bus/dpaa/base/qbman/qman.c 
b/drivers/bus/dpaa/base/qbman/qman.c
index 7e285a5..e171356 100644
--- a/drivers/bus/dpaa/base/qbman/qman.c
+++ b/drivers/bus/dpaa/base/qbman/qman.c
@@ -2158,6 +2158,72 @@ int qman_enqueue_multi(struct qman_fq *fq,
return sent;
 }
 
+int
+qman_enqueue_multi_fq(struct qman_fq *fq[], const struct qm_fd *fd,
+ int frames_to_send)
+{
+   struct qman_portal *p = get_affine_portal();
+   struct qm_portal *portal = &p->p;
+
+   register struct qm_eqcr *eqcr = &portal->eqcr;
+   struct qm_eqcr_entry *eq = eqcr->cursor, *prev_eq;
+
+   u8 i, diff, old_ci, sent = 0;
+
+   /* Update the available entries if no entry is free */
+   if (!eqcr->available) {
+   old_ci = eqcr->ci;
+   eqcr->ci = qm_cl_in(EQCR_CI) & (QM_EQCR_SIZE - 1);
+   diff = qm_cyc_diff(QM_EQCR_SIZE, old_ci, eqcr->ci);
+   eqcr->available += diff;
+   if (!diff)
+   return 0;
+   }
+
+   /* try to send as many frames as possible */
+   while (eqcr->available && frames_to_send--) {
+   eq->fqid = fq[sent]->fqid_le;
+   eq->fd.opaque_addr = fd->opaque_addr;
+   eq->fd.addr = cpu_to_be40(fd->addr);
+   eq->fd.status = cpu_to_be32(fd->status);
+   eq->fd.opaque = cpu_to_be32(fd->opaque);
+
+   eq = (void *)((unsigned long)(eq + 1) &
+   (~(unsigned long)(QM_EQCR_SIZE << 6)));
+   eqcr->available--;
+   sent++;
+   fd++;
+   }
+   lwsync();
+
+   /* In order for flushes to complete faster, all lines are recorded in
+* 32 bit word.
+*/
+   eq = eqcr->cursor;
+   for (i = 0; i < sent; i++) {
+   eq->__dont_write_directly__verb =
+   QM_EQCR_VERB_CMD_ENQUEUE | eqcr->vbit;
+   prev_eq = eq;
+   eq = (void *)((unsigned long)(eq + 1) &
+   (~(unsigned long)(QM_EQCR_SIZE << 6)));
+   if (unlikely((prev_eq + 1) != eq))
+   eqcr->vbit ^= QM_EQCR_VERB_VBIT;
+   }
+
+   /* We need  to flush all the lines but without load/store operations
+* between them
+*/
+   eq = eqcr->cursor;
+   for (i = 0; i < sent; i++) {
+   dcbf(eq);
+   eq = (void *)((unsigned long)(eq + 1) &
+   (~(unsigned long)(QM_EQCR_SIZE << 6)));
+   }
+   /* Update cursor for the next call */
+   eqcr->cursor = eq;
+   return sent;
+}
+
 int qman_enqueue_orp(struct qman_fq *fq, const struct qm_fd *fd, u32 flags,
 struct qman_fq *orp, u16 orp_seqnum)
 {
diff --git a/drivers/bus/dpaa/include/fsl_qman.h 
b/drivers/bus/dpaa/include/fsl_qman.h
index ad40d80..0e3e4fe 100644
--- a/drivers/bus/dpaa/include/fsl_qman.h
+++ b/drivers/bus/dpaa/include/fsl_qman.h
@@ -1703,6 +1703,20 @@ int qman_enqueue_multi(struct qman_fq *fq,
   const struct qm_fd *fd,
int frames_to_send);
 
+/**
+ * qman_enqueue_multi_fq - Enqueue multiple frames to their respective frame
+ * queues.
+ * @fq[]: Array of frame queue objects to enqueue to
+ * @fd: pointer to first descriptor of frame to be enqueued
+ * @frames_to_send: number of frames to be sent.
+ *
+ * This API is similar to qman_enqueue_multi(), but it takes fd which needs
+ * to be processed by different frame queues.
+ */
+int
+qman_enqueue_multi_fq(struct qman_fq *fq[], const struct qm_fd *fd,
+ int frames_to_send);
+
 typedef int (*qman_cb_precommit) (void *arg);
 
 /**
diff --git a/drivers/bus/dpaa/rte_bus_dpaa_version.map 
b/drivers/bus/dpaa/rte_bus_dpaa_version.map
index ac455cd..64068de 100644
--- a/drivers/bus/dpaa/rte_bus_dpaa_version.map
+++ b/drivers/bus/dpaa/rte_bus_dpaa_version.map
@@ -73,6 +73,7 @@ DPDK_18.02 {
qman_alloc_pool_range;
qman_create_cgr;
qman_delete_cgr;
+   qman_enqueue_multi_fq;
qman_modify_cgr;
qman_oos_fq;
qman_portal_poll_rx;
-- 
2.7.4



Re: [dpdk-dev] [PATCH] fix multiple typos: 'the the ' => 'the '

2018-01-10 Thread Thomas Monjalon
15/12/2017 13:34, Thierry Herbelot:
> Repeated occurrences of 'the'.
> 
> The change was obtained using the following command:
> 
>   sed -i "s;the the ;the ;" `git grep -l "the "`
> 
> Signed-off-by: Thierry Herbelot 

Applied, thanks



Re: [dpdk-dev] [PATCH v4 1/4] lib/librte_flow_classify: remove table id parameter from apis

2018-01-10 Thread Thomas Monjalon
10/01/2018 10:54, Singh, Jasvinder:
> From: Thomas Monjalon [mailto:tho...@monjalon.net]
> > 19/12/2017 15:29, Jasvinder Singh:
> > > This patch removes table id parameter from all the flow classify apis
> > > to reduce the complexity alongwith some code cleanup.
> > >
> > > The validate api is exposed as public api to allow user to validate
> > > the flow before adding it to the classifier.
> > 
> > This patch does not compile alone.
> > Should we merge all patches together?
> 
> No, It should compile alone. On my system it doesn't show any error and 
> compilation goes successfully. Could you send me error log, etc ?

test/test/test_flow_classify.c:48:10: fatal error: too many arguments to
  function call, expected 6, have 7
NULL, NULL);
  ^~~~

I guess it is expected that test and example does not compile after patch 1.
That's why I suggest to merge all 4 patches.


Re: [dpdk-dev] [PATCH v4] kernel folder for Linux and BSD modules

2018-01-10 Thread Richardson, Bruce
> -Original Message-
> From: dev [mailto:dev-boun...@dpdk.org] On Behalf Of Hemant Agrawal
> Sent: Wednesday, January 10, 2018 6:46 AM
> To: dev@dpdk.org; tho...@monjalon.net
> Cc: Yigit, Ferruh
> Subject: [dpdk-dev] [PATCH v4] kernel folder for Linux and BSD modules
> 
> This patch moves the kernel modules code from EAL to a common place.
>  - Separate the kernel module code from user space code.
> 
> Signed-off-by: Hemant Agrawal 
> ---
> v4: update the path in MAINTAINERS
> v3: move contigmem from bsdapp
> v2: rename kern to kernel, add freebsd modules as well
> 
Ran test builds on FreeBSD and modules still compile ok.

One minor comment is that I don't think "kernel" should go last on the list of 
subdirs, but should probably go between lib and drivers, with app being the 
last thing.

Tested-by: Bruce Richardson 


Re: [dpdk-dev] [PATCH v4 1/4] lib/librte_flow_classify: remove table id parameter from apis

2018-01-10 Thread Singh, Jasvinder


> -Original Message-
> From: Thomas Monjalon [mailto:tho...@monjalon.net]
> Sent: Wednesday, January 10, 2018 10:53 AM
> To: Singh, Jasvinder 
> Cc: dev@dpdk.org; Iremonger, Bernard ;
> Kovacevic, Marko 
> Subject: Re: [dpdk-dev] [PATCH v4 1/4] lib/librte_flow_classify: remove table
> id parameter from apis
> 
> 10/01/2018 10:54, Singh, Jasvinder:
> > From: Thomas Monjalon [mailto:tho...@monjalon.net]
> > > 19/12/2017 15:29, Jasvinder Singh:
> > > > This patch removes table id parameter from all the flow classify
> > > > apis to reduce the complexity alongwith some code cleanup.
> > > >
> > > > The validate api is exposed as public api to allow user to
> > > > validate the flow before adding it to the classifier.
> > >
> > > This patch does not compile alone.
> > > Should we merge all patches together?
> >
> > No, It should compile alone. On my system it doesn't show any error and
> compilation goes successfully. Could you send me error log, etc ?
> 
> test/test/test_flow_classify.c:48:10: fatal error: too many arguments to
>   function call, expected 6, have 7
> NULL, NULL);
>   ^~~~
> 
> I guess it is expected that test and example does not compile after patch 1.
> That's why I suggest to merge all 4 patches.

Ok, I will send revised patch with all merged into one.

Thanks,
Jasvinder



[dpdk-dev] [PATCH v2 01/15] examples/eventdev: add Rx adapter support

2018-01-10 Thread Pavan Nikhilesh
Use event Rx adapter for packets Rx instead of explicit producer logic.
Use service run iter function for granular control instead of using
dedicated service lcore.

Signed-off-by: Pavan Nikhilesh 
---

 v2 Changes:
  - split work funtion into delay cycles and excange_mac
  - add option to configure mempool size
  - remove prod_data structure(Gage)
  - simplifly locks used while calling producer and scheduler(Gage)

 examples/eventdev_pipeline_sw_pmd/main.c | 168 +++
 1 file changed, 80 insertions(+), 88 deletions(-)

diff --git a/examples/eventdev_pipeline_sw_pmd/main.c 
b/examples/eventdev_pipeline_sw_pmd/main.c
index 2e9a6d208..111dcb0ea 100644
--- a/examples/eventdev_pipeline_sw_pmd/main.c
+++ b/examples/eventdev_pipeline_sw_pmd/main.c
@@ -46,26 +46,19 @@
 #include 
 #include 
 #include 
+#include 
 #include 

 #define MAX_NUM_STAGES 8
 #define BATCH_SIZE 16
 #define MAX_NUM_CORE 64

-struct prod_data {
-   uint8_t dev_id;
-   uint8_t port_id;
-   int32_t qid;
-   unsigned int num_nic_ports;
-} __rte_cache_aligned;
-
 struct cons_data {
uint8_t dev_id;
uint8_t port_id;
uint8_t release;
 } __rte_cache_aligned;

-static struct prod_data prod_data;
 static struct cons_data cons_data;

 struct worker_data {
@@ -75,10 +68,9 @@ struct worker_data {

 struct fastpath_data {
volatile int done;
-   uint32_t rx_lock;
uint32_t tx_lock;
-   uint32_t sched_lock;
uint32_t evdev_service_id;
+   uint32_t rxadptr_service_id;
bool rx_single;
bool tx_single;
bool sched_single;
@@ -106,6 +98,7 @@ struct config_data {
unsigned int worker_cq_depth;
int16_t next_qid[MAX_NUM_STAGES+2];
int16_t qid[MAX_NUM_STAGES];
+   uint8_t rx_adapter_id;
 };

 static struct config_data cdata = {
@@ -206,64 +199,21 @@ consumer(void)
return 0;
 }

-static int
-producer(void)
-{
-   static uint8_t eth_port;
-   struct rte_mbuf *mbufs[BATCH_SIZE+2];
-   struct rte_event ev[BATCH_SIZE+2];
-   uint32_t i, num_ports = prod_data.num_nic_ports;
-   int32_t qid = prod_data.qid;
-   uint8_t dev_id = prod_data.dev_id;
-   uint8_t port_id = prod_data.port_id;
-   uint32_t prio_idx = 0;
-
-   const uint16_t nb_rx = rte_eth_rx_burst(eth_port, 0, mbufs, BATCH_SIZE);
-   if (++eth_port == num_ports)
-   eth_port = 0;
-   if (nb_rx == 0) {
-   rte_pause();
-   return 0;
-   }
-
-   for (i = 0; i < nb_rx; i++) {
-   ev[i].flow_id = mbufs[i]->hash.rss;
-   ev[i].op = RTE_EVENT_OP_NEW;
-   ev[i].sched_type = cdata.queue_type;
-   ev[i].queue_id = qid;
-   ev[i].event_type = RTE_EVENT_TYPE_ETHDEV;
-   ev[i].sub_event_type = 0;
-   ev[i].priority = RTE_EVENT_DEV_PRIORITY_NORMAL;
-   ev[i].mbuf = mbufs[i];
-   RTE_SET_USED(prio_idx);
-   }
-
-   const int nb_tx = rte_event_enqueue_burst(dev_id, port_id, ev, nb_rx);
-   if (nb_tx != nb_rx) {
-   for (i = nb_tx; i < nb_rx; i++)
-   rte_pktmbuf_free(mbufs[i]);
-   }
-
-   return 0;
-}
-
 static inline void
 schedule_devices(unsigned int lcore_id)
 {
-   if (fdata->rx_core[lcore_id] && (fdata->rx_single ||
-   rte_atomic32_cmpset(&(fdata->rx_lock), 0, 1))) {
-   producer();
-   rte_atomic32_clear((rte_atomic32_t *)&(fdata->rx_lock));
+   if (fdata->rx_core[lcore_id]) {
+   rte_service_run_iter_on_app_lcore(fdata->rxadptr_service_id,
+   !fdata->rx_single);
}

-   if (fdata->sched_core[lcore_id] && (fdata->sched_single ||
-   rte_atomic32_cmpset(&(fdata->sched_lock), 0, 1))) {
-   rte_service_run_iter_on_app_lcore(fdata->evdev_service_id, 1);
+   if (fdata->sched_core[lcore_id]) {
+   rte_service_run_iter_on_app_lcore(fdata->evdev_service_id,
+   !fdata->sched_single);
if (cdata.dump_dev_signal) {
rte_event_dev_dump(0, stdout);
cdata.dump_dev_signal = 0;
}
-   rte_atomic32_clear((rte_atomic32_t *)&(fdata->sched_lock));
}

if (fdata->tx_core[lcore_id] && (fdata->tx_single ||
@@ -566,6 +516,70 @@ parse_app_args(int argc, char **argv)
}
 }

+static inline void
+init_rx_adapter(uint16_t nb_ports)
+{
+   int i;
+   int ret;
+   uint8_t evdev_id = 0;
+   struct rte_event_dev_info dev_info;
+
+   ret = rte_event_dev_info_get(evdev_id, &dev_info);
+
+   struct rte_event_port_conf rx_p_conf = {
+   .dequeue_depth = 8,
+   .enqueue_depth = 8,
+   .new_event_threshold = 1200,
+   };
+
+   if (rx_p_conf.dequeue_depth > dev_info.max_event_port_dequeue_depth)
+   rx_p_conf.dequeue_d

Re: [dpdk-dev] [PATCH v4 1/4] lib/librte_flow_classify: remove table id parameter from apis

2018-01-10 Thread Thomas Monjalon
10/01/2018 11:59, Singh, Jasvinder:
> From: Thomas Monjalon [mailto:tho...@monjalon.net]
> > 10/01/2018 10:54, Singh, Jasvinder:
> > > From: Thomas Monjalon [mailto:tho...@monjalon.net]
> > > > 19/12/2017 15:29, Jasvinder Singh:
> > > > > This patch removes table id parameter from all the flow classify
> > > > > apis to reduce the complexity alongwith some code cleanup.
> > > > >
> > > > > The validate api is exposed as public api to allow user to
> > > > > validate the flow before adding it to the classifier.
> > > >
> > > > This patch does not compile alone.
> > > > Should we merge all patches together?
> > >
> > > No, It should compile alone. On my system it doesn't show any error and
> > compilation goes successfully. Could you send me error log, etc ?
> > 
> > test/test/test_flow_classify.c:48:10: fatal error: too many arguments to
> >   function call, expected 6, have 7
> > NULL, NULL);
> >   ^~~~
> > 
> > I guess it is expected that test and example does not compile after patch 1.
> > That's why I suggest to merge all 4 patches.
> 
> Ok, I will send revised patch with all merged into one.

I can merge them myself, it is fine.


[dpdk-dev] [PATCH v2 04/15] examples/eventdev: add generic worker pipeline

2018-01-10 Thread Pavan Nikhilesh
Rename existing pipeline as generic worker pipeline.

Signed-off-by: Pavan Nikhilesh 
---

 v2 Changes:
 - Add SPDX licence tags

 examples/eventdev_pipeline_sw_pmd/Makefile |   1 +
 examples/eventdev_pipeline_sw_pmd/main.c   | 440 +
 .../eventdev_pipeline_sw_pmd/pipeline_common.h |  53 +++
 .../pipeline_worker_generic.c  | 398 +++
 4 files changed, 466 insertions(+), 426 deletions(-)
 create mode 100644 examples/eventdev_pipeline_sw_pmd/pipeline_worker_generic.c

diff --git a/examples/eventdev_pipeline_sw_pmd/Makefile 
b/examples/eventdev_pipeline_sw_pmd/Makefile
index de4e22c88..5e30556fb 100644
--- a/examples/eventdev_pipeline_sw_pmd/Makefile
+++ b/examples/eventdev_pipeline_sw_pmd/Makefile
@@ -42,6 +42,7 @@ APP = eventdev_pipeline_sw_pmd

 # all source are stored in SRCS-y
 SRCS-y := main.c
+SRCS-y += pipeline_worker_generic.c

 CFLAGS += -O3
 CFLAGS += $(WERROR_FLAGS)
diff --git a/examples/eventdev_pipeline_sw_pmd/main.c 
b/examples/eventdev_pipeline_sw_pmd/main.c
index 2c919b7fa..295c8b692 100644
--- a/examples/eventdev_pipeline_sw_pmd/main.c
+++ b/examples/eventdev_pipeline_sw_pmd/main.c
@@ -68,179 +68,6 @@ eth_tx_buffer_retry(struct rte_mbuf **pkts, uint16_t unsent,
} while (_sent != unsent);
 }

-static int
-consumer(void)
-{
-   const uint64_t freq_khz = rte_get_timer_hz() / 1000;
-   struct rte_event packets[BATCH_SIZE];
-
-   static uint64_t received;
-   static uint64_t last_pkts;
-   static uint64_t last_time;
-   static uint64_t start_time;
-   unsigned int i, j;
-   uint8_t dev_id = cons_data.dev_id;
-   uint8_t port_id = cons_data.port_id;
-
-   uint16_t n = rte_event_dequeue_burst(dev_id, port_id,
-   packets, RTE_DIM(packets), 0);
-
-   if (n == 0) {
-   for (j = 0; j < rte_eth_dev_count(); j++)
-   rte_eth_tx_buffer_flush(j, 0, fdata->tx_buf[j]);
-   return 0;
-   }
-   if (start_time == 0)
-   last_time = start_time = rte_get_timer_cycles();
-
-   received += n;
-   for (i = 0; i < n; i++) {
-   uint8_t outport = packets[i].mbuf->port;
-   rte_eth_tx_buffer(outport, 0, fdata->tx_buf[outport],
-   packets[i].mbuf);
-
-   packets[i].op = RTE_EVENT_OP_RELEASE;
-   }
-
-   if (cons_data.release) {
-   uint16_t nb_tx;
-
-   nb_tx = rte_event_enqueue_burst(dev_id, port_id, packets, n);
-   while (nb_tx < n)
-   nb_tx += rte_event_enqueue_burst(dev_id, port_id,
-packets + nb_tx,
-n - nb_tx);
-   }
-
-   /* Print out mpps every 1<22 packets */
-   if (!cdata.quiet && received >= last_pkts + (1<<22)) {
-   const uint64_t now = rte_get_timer_cycles();
-   const uint64_t total_ms = (now - start_time) / freq_khz;
-   const uint64_t delta_ms = (now - last_time) / freq_khz;
-   uint64_t delta_pkts = received - last_pkts;
-
-   printf("# consumer RX=%"PRIu64", time %"PRIu64 "ms, "
-   "avg %.3f mpps [current %.3f mpps]\n",
-   received,
-   total_ms,
-   received / (total_ms * 1000.0),
-   delta_pkts / (delta_ms * 1000.0));
-   last_pkts = received;
-   last_time = now;
-   }
-
-   cdata.num_packets -= n;
-   if (cdata.num_packets <= 0)
-   fdata->done = 1;
-
-   return 0;
-}
-
-static inline void
-schedule_devices(unsigned int lcore_id)
-{
-   if (fdata->rx_core[lcore_id]) {
-   rte_service_run_iter_on_app_lcore(fdata->rxadptr_service_id,
-   !fdata->rx_single);
-   }
-
-   if (fdata->sched_core[lcore_id]) {
-   rte_service_run_iter_on_app_lcore(fdata->evdev_service_id,
-   !fdata->sched_single);
-   if (cdata.dump_dev_signal) {
-   rte_event_dev_dump(0, stdout);
-   cdata.dump_dev_signal = 0;
-   }
-   }
-
-   if (fdata->tx_core[lcore_id] && (fdata->tx_single ||
-   rte_atomic32_cmpset(&(fdata->tx_lock), 0, 1))) {
-   consumer();
-   rte_atomic32_clear((rte_atomic32_t *)&(fdata->tx_lock));
-   }
-}
-
-static inline void
-work(struct rte_mbuf *m)
-{
-   struct ether_hdr *eth;
-   struct ether_addr addr;
-
-   /* change mac addresses on packet (to use mbuf data) */
-   /*
-* FIXME Swap mac address properly and also handle the
-* case for both odd and even number of stages that the
-* addresses end up the same at the end of the pipeline
-*/
-   eth = r

[dpdk-dev] [PATCH v2 05/15] examples/eventdev: add ops to check cmdline args

2018-01-10 Thread Pavan Nikhilesh
Each eventdev pipeline needs to allow different cmdline args combination
based on pipeline type.

Signed-off-by: Pavan Nikhilesh 
---

 v2 Changes:
 - remove redundant split in printf

 examples/eventdev_pipeline_sw_pmd/main.c   | 16 +++-
 .../eventdev_pipeline_sw_pmd/pipeline_common.h |  4 ++
 .../pipeline_worker_generic.c  | 43 ++
 3 files changed, 52 insertions(+), 11 deletions(-)

diff --git a/examples/eventdev_pipeline_sw_pmd/main.c 
b/examples/eventdev_pipeline_sw_pmd/main.c
index 295c8b692..9e6061643 100644
--- a/examples/eventdev_pipeline_sw_pmd/main.c
+++ b/examples/eventdev_pipeline_sw_pmd/main.c
@@ -253,17 +253,11 @@ parse_app_args(int argc, char **argv)
}
}

-   if (worker_lcore_mask == 0 || rx_lcore_mask == 0 ||
-   sched_lcore_mask == 0 || tx_lcore_mask == 0) {
-   printf("Core part of pipeline was not assigned any cores. "
-   "This will stall the pipeline, please check core masks "
-   "(use -h for details on setting core masks):\n"
-   "\trx: %"PRIu64"\n\ttx: %"PRIu64"\n\tsched: %"PRIu64
-   "\n\tworkers: %"PRIu64"\n",
-   rx_lcore_mask, tx_lcore_mask, sched_lcore_mask,
-   worker_lcore_mask);
-   rte_exit(-1, "Fix core masks\n");
-   }
+   cdata.worker_lcore_mask = worker_lcore_mask;
+   cdata.sched_lcore_mask = sched_lcore_mask;
+   cdata.rx_lcore_mask = rx_lcore_mask;
+   cdata.tx_lcore_mask = tx_lcore_mask;
+
if (cdata.num_stages == 0 || cdata.num_stages > MAX_NUM_STAGES)
usage();

diff --git a/examples/eventdev_pipeline_sw_pmd/pipeline_common.h 
b/examples/eventdev_pipeline_sw_pmd/pipeline_common.h
index 379ba9d4b..9e1f5e9f0 100644
--- a/examples/eventdev_pipeline_sw_pmd/pipeline_common.h
+++ b/examples/eventdev_pipeline_sw_pmd/pipeline_common.h
@@ -82,6 +82,10 @@ struct config_data {
int16_t next_qid[MAX_NUM_STAGES+2];
int16_t qid[MAX_NUM_STAGES];
uint8_t rx_adapter_id;
+   uint64_t worker_lcore_mask;
+   uint64_t rx_lcore_mask;
+   uint64_t tx_lcore_mask;
+   uint64_t sched_lcore_mask;
 };

 struct port_link {
diff --git a/examples/eventdev_pipeline_sw_pmd/pipeline_worker_generic.c 
b/examples/eventdev_pipeline_sw_pmd/pipeline_worker_generic.c
index d2bc6d355..d1b0e1db1 100644
--- a/examples/eventdev_pipeline_sw_pmd/pipeline_worker_generic.c
+++ b/examples/eventdev_pipeline_sw_pmd/pipeline_worker_generic.c
@@ -385,6 +385,48 @@ init_rx_adapter(uint16_t nb_ports)
cdata.rx_adapter_id);
 }

+static void
+generic_opt_check(void)
+{
+   int i;
+   int ret;
+   uint32_t cap = 0;
+   uint8_t rx_needed = 0;
+   struct rte_event_dev_info eventdev_info;
+
+   memset(&eventdev_info, 0, sizeof(struct rte_event_dev_info));
+   rte_event_dev_info_get(0, &eventdev_info);
+
+   for (i = 0; i < rte_eth_dev_count(); i++) {
+   ret = rte_event_eth_rx_adapter_caps_get(0, i, &cap);
+   if (ret)
+   rte_exit(EXIT_FAILURE,
+   "failed to get event rx adapter capabilities");
+   rx_needed |=
+   !(cap & RTE_EVENT_ETH_RX_ADAPTER_CAP_INTERNAL_PORT);
+   }
+
+   if (cdata.worker_lcore_mask == 0 ||
+   (rx_needed && cdata.rx_lcore_mask == 0) ||
+   cdata.tx_lcore_mask == 0 || (cdata.sched_lcore_mask == 0
+   && !(eventdev_info.event_dev_cap &
+   RTE_EVENT_DEV_CAP_DISTRIBUTED_SCHED))) {
+   printf("Core part of pipeline was not assigned any cores. "
+   "This will stall the pipeline, please check core masks "
+   "(use -h for details on setting core masks):\n"
+   "\trx: %"PRIu64"\n\ttx: %"PRIu64"\n\tsched: %"PRIu64
+   "\n\tworkers: %"PRIu64"\n",
+   cdata.rx_lcore_mask, cdata.tx_lcore_mask,
+   cdata.sched_lcore_mask,
+   cdata.worker_lcore_mask);
+   rte_exit(-1, "Fix core masks\n");
+   }
+
+   if (eventdev_info.event_dev_cap & RTE_EVENT_DEV_CAP_DISTRIBUTED_SCHED)
+   memset(fdata->sched_core, 0,
+   sizeof(unsigned int) * MAX_NUM_CORE);
+}
+
 void
 set_worker_generic_setup_data(struct setup_data *caps, bool burst)
 {
@@ -395,4 +437,5 @@ set_worker_generic_setup_data(struct setup_data *caps, bool 
burst)
caps->adptr_setup = init_rx_adapter;
caps->scheduler = schedule_devices;
caps->evdev_setup = setup_eventdev_generic;
+   caps->check_opt = generic_opt_check;
 }
--
2.15.1



[dpdk-dev] [PATCH v2 03/15] examples/eventdev: add framework for caps based pipeline

2018-01-10 Thread Pavan Nikhilesh
Add framework to support capability based pipeline.
Based on the capability of event device and probed ethernet devices the
optimal pipeline configuration can be chosen.

Signed-off-by: Pavan Nikhilesh 
---
 examples/eventdev_pipeline_sw_pmd/pipeline_common.h | 17 +
 1 file changed, 17 insertions(+)

diff --git a/examples/eventdev_pipeline_sw_pmd/pipeline_common.h 
b/examples/eventdev_pipeline_sw_pmd/pipeline_common.h
index 1dbc01f16..00721ea94 100644
--- a/examples/eventdev_pipeline_sw_pmd/pipeline_common.h
+++ b/examples/eventdev_pipeline_sw_pmd/pipeline_common.h
@@ -34,6 +34,22 @@ struct worker_data {
uint8_t port_id;
 } __rte_cache_aligned;
 
+typedef int (*worker_loop)(void *);
+typedef int (*consumer_loop)(void);
+typedef void (*schedule_loop)(unsigned int);
+typedef int (*eventdev_setup)(struct cons_data *, struct worker_data *);
+typedef void (*rx_adapter_setup)(uint16_t nb_ports);
+typedef void (*opt_check)(void);
+
+struct setup_data {
+   worker_loop worker;
+   consumer_loop consumer;
+   schedule_loop scheduler;
+   eventdev_setup evdev_setup;
+   rx_adapter_setup adptr_setup;
+   opt_check check_opt;
+};
+
 struct fastpath_data {
volatile int done;
uint32_t tx_lock;
@@ -47,6 +63,7 @@ struct fastpath_data {
unsigned int sched_core[MAX_NUM_CORE];
unsigned int worker_core[MAX_NUM_CORE];
struct rte_eth_dev_tx_buffer *tx_buf[RTE_MAX_ETHPORTS];
+   struct setup_data cap;
 } __rte_cache_aligned;
 
 struct config_data {
-- 
2.15.1



  1   2   3   4   >